summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/6lowpan/ndisc.c6
-rw-r--r--net/802/garp.c2
-rw-r--r--net/802/mrp.c2
-rw-r--r--net/802/psnap.c4
-rw-r--r--net/8021q/vlan.c3
-rw-r--r--net/8021q/vlan_core.c2
-rw-r--r--net/8021q/vlan_dev.c46
-rw-r--r--net/8021q/vlan_netlink.c16
-rw-r--r--net/8021q/vlanproc.c50
-rw-r--r--net/9p/Kconfig9
-rw-r--r--net/9p/Makefile4
-rw-r--r--net/9p/client.c73
-rw-r--r--net/9p/trans_fd.c1
-rw-r--r--net/9p/trans_usbg.c956
-rw-r--r--net/9p/trans_virtio.c1
-rw-r--r--net/9p/trans_xen.c9
-rw-r--r--net/Kconfig29
-rw-r--r--net/Kconfig.debug15
-rw-r--r--net/Makefile3
-rw-r--r--net/appletalk/Makefile2
-rw-r--r--net/appletalk/aarp.c45
-rw-r--r--net/appletalk/ddp.c19
-rw-r--r--net/appletalk/dev.c46
-rw-r--r--net/appletalk/sysctl_net_atalk.c1
-rw-r--r--net/atm/clip.c4
-rw-r--r--net/atm/common.c2
-rw-r--r--net/atm/ioctl.c4
-rw-r--r--net/atm/lec.c3
-rw-r--r--net/atm/svc.c8
-rw-r--r--net/ax25/Kconfig2
-rw-r--r--net/ax25/af_ax25.c37
-rw-r--r--net/ax25/ax25_dev.c55
-rw-r--r--net/ax25/ax25_ip.c3
-rw-r--r--net/ax25/ax25_out.c22
-rw-r--r--net/ax25/ax25_route.c2
-rw-r--r--net/ax25/sysctl_net_ax25.c5
-rw-r--r--net/batman-adv/bat_iv_ogm.c7
-rw-r--r--net/batman-adv/bat_v.c2
-rw-r--r--net/batman-adv/bat_v_elp.c122
-rw-r--r--net/batman-adv/bat_v_elp.h2
-rw-r--r--net/batman-adv/bat_v_ogm.c3
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c41
-rw-r--r--net/batman-adv/distributed-arp-table.c23
-rw-r--r--net/batman-adv/gateway_client.c18
-rw-r--r--net/batman-adv/main.c23
-rw-r--r--net/batman-adv/main.h4
-rw-r--r--net/batman-adv/multicast.c17
-rw-r--r--net/batman-adv/netlink.c148
-rw-r--r--net/batman-adv/netlink.h5
-rw-r--r--net/batman-adv/originator.c145
-rw-r--r--net/batman-adv/soft-interface.c23
-rw-r--r--net/batman-adv/trace.h4
-rw-r--r--net/batman-adv/translation-table.c250
-rw-r--r--net/batman-adv/types.h9
-rw-r--r--net/bluetooth/6lowpan.c13
-rw-r--r--net/bluetooth/Kconfig8
-rw-r--r--net/bluetooth/Makefile4
-rw-r--r--net/bluetooth/a2mp.c1054
-rw-r--r--net/bluetooth/a2mp.h154
-rw-r--r--net/bluetooth/af_bluetooth.c35
-rw-r--r--net/bluetooth/amp.c590
-rw-r--r--net/bluetooth/amp.h60
-rw-r--r--net/bluetooth/bnep/core.c10
-rw-r--r--net/bluetooth/cmtp/Kconfig4
-rw-r--r--net/bluetooth/cmtp/capi.c32
-rw-r--r--net/bluetooth/coredump.c2
-rw-r--r--net/bluetooth/eir.c29
-rw-r--r--net/bluetooth/eir.h2
-rw-r--r--net/bluetooth/hci_conn.c568
-rw-r--r--net/bluetooth/hci_core.c562
-rw-r--r--net/bluetooth/hci_debugfs.c49
-rw-r--r--net/bluetooth/hci_event.c689
-rw-r--r--net/bluetooth/hci_request.c901
-rw-r--r--net/bluetooth/hci_request.h75
-rw-r--r--net/bluetooth/hci_sock.c38
-rw-r--r--net/bluetooth/hci_sync.c985
-rw-r--r--net/bluetooth/hci_sysfs.c34
-rw-r--r--net/bluetooth/hidp/Kconfig3
-rw-r--r--net/bluetooth/iso.c543
-rw-r--r--net/bluetooth/l2cap_core.c1434
-rw-r--r--net/bluetooth/l2cap_sock.c210
-rw-r--r--net/bluetooth/leds.c2
-rw-r--r--net/bluetooth/mgmt.c747
-rw-r--r--net/bluetooth/mgmt_util.c2
-rw-r--r--net/bluetooth/msft.c6
-rw-r--r--net/bluetooth/msft.h4
-rw-r--r--net/bluetooth/rfcomm/core.c2
-rw-r--r--net/bluetooth/rfcomm/sock.c45
-rw-r--r--net/bluetooth/rfcomm/tty.c27
-rw-r--r--net/bluetooth/sco.c207
-rw-r--r--net/bluetooth/smp.c151
-rw-r--r--net/bpf/bpf_dummy_struct_ops.c99
-rw-r--r--net/bpf/test_run.c79
-rw-r--r--net/bridge/br.c22
-rw-r--r--net/bridge/br_arp_nd_proxy.c2
-rw-r--r--net/bridge/br_device.c45
-rw-r--r--net/bridge/br_fdb.c61
-rw-r--r--net/bridge/br_forward.c29
-rw-r--r--net/bridge/br_input.c35
-rw-r--r--net/bridge/br_mdb.c2
-rw-r--r--net/bridge/br_mst.c29
-rw-r--r--net/bridge/br_multicast.c6
-rw-r--r--net/bridge/br_netfilter_hooks.c81
-rw-r--r--net/bridge/br_netlink.c17
-rw-r--r--net/bridge/br_netlink_tunnel.c4
-rw-r--r--net/bridge/br_private.h21
-rw-r--r--net/bridge/br_stp_bpdu.c2
-rw-r--r--net/bridge/br_sysfs_br.c6
-rw-r--r--net/bridge/br_vlan.c67
-rw-r--r--net/bridge/br_vlan_tunnel.c9
-rw-r--r--net/bridge/netfilter/Kconfig13
-rw-r--r--net/bridge/netfilter/Makefile2
-rw-r--r--net/bridge/netfilter/ebtables.c8
-rw-r--r--net/bridge/netfilter/nf_conntrack_bridge.c20
-rw-r--r--net/bridge/netfilter/nft_meta_bridge.c9
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c3
-rw-r--r--net/caif/cfctrl.c8
-rw-r--r--net/caif/cfpkt_skbuff.c13
-rw-r--r--net/caif/cfrfml.c2
-rw-r--r--net/caif/cfsrvl.c6
-rw-r--r--net/caif/chnl_net.c2
-rw-r--r--net/can/Kconfig11
-rw-r--r--net/can/af_can.c15
-rw-r--r--net/can/af_can.h12
-rw-r--r--net/can/bcm.c75
-rw-r--r--net/can/gw.c29
-rw-r--r--net/can/isotp.c16
-rw-r--r--net/can/j1939/main.c6
-rw-r--r--net/can/j1939/socket.c4
-rw-r--r--net/can/j1939/transport.c36
-rw-r--r--net/can/proc.c46
-rw-r--r--net/can/raw.c108
-rw-r--r--net/ceph/crush/mapper.c7
-rw-r--r--net/ceph/crypto.c12
-rw-r--r--net/ceph/crypto.h1
-rw-r--r--net/ceph/messenger.c2
-rw-r--r--net/ceph/mon_client.c14
-rw-r--r--net/ceph/osd_client.c36
-rw-r--r--net/ceph/pagelist.c38
-rw-r--r--net/ceph/pagevec.c52
-rw-r--r--net/core/Makefile8
-rw-r--r--net/core/bpf_sk_storage.c53
-rw-r--r--net/core/datagram.c90
-rw-r--r--net/core/dev.c1927
-rw-r--r--net/core/dev.h210
-rw-r--r--net/core/dev_addr_lists.c13
-rw-r--r--net/core/dev_addr_lists_test.c14
-rw-r--r--net/core/dev_ioctl.c95
-rw-r--r--net/core/devmem.c402
-rw-r--r--net/core/devmem.h180
-rw-r--r--net/core/drop_monitor.c60
-rw-r--r--net/core/dst.c23
-rw-r--r--net/core/dst_cache.c13
-rw-r--r--net/core/fib_notifier.c2
-rw-r--r--net/core/fib_rules.c82
-rw-r--r--net/core/filter.c857
-rw-r--r--net/core/flow_dissector.c154
-rw-r--r--net/core/gen_estimator.c2
-rw-r--r--net/core/gro.c89
-rw-r--r--net/core/gro_cells.c3
-rw-r--r--net/core/gso.c4
-rw-r--r--net/core/hotdata.c25
-rw-r--r--net/core/ieee8021q_helpers.c242
-rw-r--r--net/core/link_watch.c28
-rw-r--r--net/core/lwt_bpf.c21
-rw-r--r--net/core/lwtunnel.c65
-rw-r--r--net/core/mp_dmabuf_devmem.h44
-rw-r--r--net/core/neighbour.c467
-rw-r--r--net/core/net-procfs.c58
-rw-r--r--net/core/net-sysfs.c201
-rw-r--r--net/core/net-traces.c2
-rw-r--r--net/core/net_namespace.c213
-rw-r--r--net/core/net_test.c (renamed from net/core/gso_test.c)131
-rw-r--r--net/core/netdev-genl-gen.c69
-rw-r--r--net/core/netdev-genl-gen.h9
-rw-r--r--net/core/netdev-genl.c581
-rw-r--r--net/core/netdev_rx_queue.c82
-rw-r--r--net/core/netmem_priv.h31
-rw-r--r--net/core/netpoll.c118
-rw-r--r--net/core/page_pool.c665
-rw-r--r--net/core/page_pool_priv.h48
-rw-r--r--net/core/page_pool_user.c49
-rw-r--r--net/core/pktgen.c29
-rw-r--r--net/core/rtnetlink.c1565
-rw-r--r--net/core/rtnl_net_debug.c116
-rw-r--r--net/core/scm.c27
-rw-r--r--net/core/skb_fault_injection.c106
-rw-r--r--net/core/skbuff.c673
-rw-r--r--net/core/skmsg.c32
-rw-r--r--net/core/sock.c311
-rw-r--r--net/core/sock_diag.c118
-rw-r--r--net/core/sock_map.c329
-rw-r--r--net/core/sock_reuseport.c5
-rw-r--r--net/core/sysctl_net_core.c201
-rw-r--r--net/core/timestamping.c51
-rw-r--r--net/core/tso.c2
-rw-r--r--net/core/utils.c2
-rw-r--r--net/core/xdp.c351
-rw-r--r--net/dcb/dcbnl.c8
-rw-r--r--net/dccp/ackvec.c8
-rw-r--r--net/dccp/ccids/Kconfig2
-rw-r--r--net/dccp/ccids/ccid2.c1
-rw-r--r--net/dccp/ccids/ccid3.c2
-rw-r--r--net/dccp/diag.c1
-rw-r--r--net/dccp/feat.c6
-rw-r--r--net/dccp/ipv4.c22
-rw-r--r--net/dccp/ipv6.c25
-rw-r--r--net/dccp/minisocks.c12
-rw-r--r--net/dccp/options.c2
-rw-r--r--net/dccp/output.c2
-rw-r--r--net/dccp/sysctl.c6
-rw-r--r--net/devlink/core.c8
-rw-r--r--net/devlink/dev.c32
-rw-r--r--net/devlink/devl_internal.h7
-rw-r--r--net/devlink/dpipe.c20
-rw-r--r--net/devlink/health.c92
-rw-r--r--net/devlink/netlink.c13
-rw-r--r--net/devlink/netlink_gen.c2
-rw-r--r--net/devlink/param.c7
-rw-r--r--net/devlink/port.c66
-rw-r--r--net/devlink/rate.c8
-rw-r--r--net/devlink/region.c15
-rw-r--r--net/devlink/resource.c101
-rw-r--r--net/devlink/trap.c34
-rw-r--r--net/dsa/Kconfig8
-rw-r--r--net/dsa/Makefile1
-rw-r--r--net/dsa/devlink.c26
-rw-r--r--net/dsa/dsa.c16
-rw-r--r--net/dsa/port.c275
-rw-r--r--net/dsa/tag.c5
-rw-r--r--net/dsa/tag.h141
-rw-r--r--net/dsa/tag_8021q.c84
-rw-r--r--net/dsa/tag_8021q.h7
-rw-r--r--net/dsa/tag_ksz.c13
-rw-r--r--net/dsa/tag_ocelot.c37
-rw-r--r--net/dsa/tag_ocelot_8021q.c6
-rw-r--r--net/dsa/tag_sja1105.c78
-rw-r--r--net/dsa/tag_vsc73xx_8021q.c68
-rw-r--r--net/dsa/trace.h34
-rw-r--r--net/dsa/user.c370
-rw-r--r--net/dsa/user.h2
-rw-r--r--net/ethernet/eth.c16
-rw-r--r--net/ethtool/Makefile3
-rw-r--r--net/ethtool/bitset.c48
-rw-r--r--net/ethtool/cabletest.c61
-rw-r--r--net/ethtool/channels.c22
-rw-r--r--net/ethtool/cmis.h129
-rw-r--r--net/ethtool/cmis_cdb.c678
-rw-r--r--net/ethtool/cmis_fw_update.c479
-rw-r--r--net/ethtool/coalesce.c274
-rw-r--r--net/ethtool/common.c366
-rw-r--r--net/ethtool/common.h31
-rw-r--r--net/ethtool/eee.c62
-rw-r--r--net/ethtool/eeprom.c6
-rw-r--r--net/ethtool/ioctl.c361
-rw-r--r--net/ethtool/linkinfo.c2
-rw-r--r--net/ethtool/linkmodes.c2
-rw-r--r--net/ethtool/linkstate.c67
-rw-r--r--net/ethtool/module.c394
-rw-r--r--net/ethtool/module_fw.h75
-rw-r--r--net/ethtool/netlink.c194
-rw-r--r--net/ethtool/netlink.h65
-rw-r--r--net/ethtool/phy.c306
-rw-r--r--net/ethtool/plca.c30
-rw-r--r--net/ethtool/pse-pd.c205
-rw-r--r--net/ethtool/rings.c62
-rw-r--r--net/ethtool/rss.c242
-rw-r--r--net/ethtool/stats.c55
-rw-r--r--net/ethtool/strset.c37
-rw-r--r--net/ethtool/ts.h20
-rw-r--r--net/ethtool/tsconfig.c457
-rw-r--r--net/ethtool/tsinfo.c417
-rw-r--r--net/ethtool/wol.c2
-rw-r--r--net/handshake/handshake-test.c2
-rw-r--r--net/handshake/netlink.c4
-rw-r--r--net/handshake/request.c1
-rw-r--r--net/handshake/tlshd.c1
-rw-r--r--net/hsr/hsr_debugfs.c9
-rw-r--r--net/hsr/hsr_device.c295
-rw-r--r--net/hsr/hsr_device.h4
-rw-r--r--net/hsr/hsr_forward.c152
-rw-r--r--net/hsr/hsr_framereg.c68
-rw-r--r--net/hsr/hsr_framereg.h6
-rw-r--r--net/hsr/hsr_main.c17
-rw-r--r--net/hsr/hsr_main.h20
-rw-r--r--net/hsr/hsr_netlink.c40
-rw-r--r--net/hsr/hsr_slave.c20
-rw-r--r--net/ieee802154/6lowpan/core.c5
-rw-r--r--net/ieee802154/6lowpan/reassembly.c10
-rw-r--r--net/ieee802154/core.c10
-rw-r--r--net/ieee802154/nl-mac.c15
-rw-r--r--net/ieee802154/nl802154.c26
-rw-r--r--net/ieee802154/socket.c13
-rw-r--r--net/ieee802154/sysfs.c2
-rw-r--r--net/ieee802154/sysfs.h2
-rw-r--r--net/ieee802154/trace.h2
-rw-r--r--net/ipv4/Kconfig3
-rw-r--r--net/ipv4/af_inet.c150
-rw-r--r--net/ipv4/arp.c212
-rw-r--r--net/ipv4/bpf_tcp_ca.c64
-rw-r--r--net/ipv4/cipso_ipv4.c91
-rw-r--r--net/ipv4/datagram.c19
-rw-r--r--net/ipv4/devinet.c697
-rw-r--r--net/ipv4/esp4.c38
-rw-r--r--net/ipv4/esp4_offload.c30
-rw-r--r--net/ipv4/fib_frontend.c93
-rw-r--r--net/ipv4/fib_notifier.c10
-rw-r--r--net/ipv4/fib_rules.c62
-rw-r--r--net/ipv4/fib_semantics.c114
-rw-r--r--net/ipv4/fib_trie.c22
-rw-r--r--net/ipv4/fou_bpf.c6
-rw-r--r--net/ipv4/fou_core.c33
-rw-r--r--net/ipv4/fou_nl.c4
-rw-r--r--net/ipv4/gre_demux.c4
-rw-r--r--net/ipv4/gre_offload.c2
-rw-r--r--net/ipv4/icmp.c219
-rw-r--r--net/ipv4/igmp.c99
-rw-r--r--net/ipv4/inet_connection_sock.c174
-rw-r--r--net/ipv4/inet_diag.c117
-rw-r--r--net/ipv4/inet_fragment.c76
-rw-r--r--net/ipv4/inet_hashtables.c20
-rw-r--r--net/ipv4/inet_timewait_sock.c112
-rw-r--r--net/ipv4/inetpeer.c63
-rw-r--r--net/ipv4/ip_fragment.c34
-rw-r--r--net/ipv4/ip_gre.c203
-rw-r--r--net/ipv4/ip_input.c39
-rw-r--r--net/ipv4/ip_options.c5
-rw-r--r--net/ipv4/ip_output.c86
-rw-r--r--net/ipv4/ip_sockglue.c27
-rw-r--r--net/ipv4/ip_tunnel.c199
-rw-r--r--net/ipv4/ip_tunnel_core.c82
-rw-r--r--net/ipv4/ip_vti.c51
-rw-r--r--net/ipv4/ipip.c43
-rw-r--r--net/ipv4/ipmr.c149
-rw-r--r--net/ipv4/ipmr_base.c12
-rw-r--r--net/ipv4/metrics.c8
-rw-r--r--net/ipv4/netfilter.c3
-rw-r--r--net/ipv4/netfilter/Kconfig55
-rw-r--r--net/ipv4/netfilter/Makefile2
-rw-r--r--net/ipv4/netfilter/arp_tables.c12
-rw-r--r--net/ipv4/netfilter/ip_tables.c12
-rw-r--r--net/ipv4/netfilter/ipt_rpfilter.c3
-rw-r--r--net/ipv4/netfilter/iptable_filter.c2
-rw-r--r--net/ipv4/netfilter/iptable_nat.c18
-rw-r--r--net/ipv4/netfilter/nf_dup_ipv4.c10
-rw-r--r--net/ipv4/netfilter/nf_reject_ipv4.c10
-rw-r--r--net/ipv4/netfilter/nf_tproxy_ipv4.c2
-rw-r--r--net/ipv4/netfilter/nft_dup_ipv4.c4
-rw-r--r--net/ipv4/netfilter/nft_fib_ipv4.c10
-rw-r--r--net/ipv4/nexthop.c449
-rw-r--r--net/ipv4/ping.c2
-rw-r--r--net/ipv4/proc.c6
-rw-r--r--net/ipv4/raw.c33
-rw-r--r--net/ipv4/raw_diag.c1
-rw-r--r--net/ipv4/route.c426
-rw-r--r--net/ipv4/syncookies.c67
-rw-r--r--net/ipv4/sysctl_net_ipv4.c159
-rw-r--r--net/ipv4/tcp.c530
-rw-r--r--net/ipv4/tcp_ao.c133
-rw-r--r--net/ipv4/tcp_bbr.c10
-rw-r--r--net/ipv4/tcp_bpf.c74
-rw-r--r--net/ipv4/tcp_cong.c29
-rw-r--r--net/ipv4/tcp_cubic.c16
-rw-r--r--net/ipv4/tcp_dctcp.c21
-rw-r--r--net/ipv4/tcp_diag.c1
-rw-r--r--net/ipv4/tcp_fastopen.c11
-rw-r--r--net/ipv4/tcp_htcp.c2
-rw-r--r--net/ipv4/tcp_input.c438
-rw-r--r--net/ipv4/tcp_ipv4.c201
-rw-r--r--net/ipv4/tcp_metrics.c18
-rw-r--r--net/ipv4/tcp_minisocks.c133
-rw-r--r--net/ipv4/tcp_offload.c292
-rw-r--r--net/ipv4/tcp_output.c211
-rw-r--r--net/ipv4/tcp_sigpool.c17
-rw-r--r--net/ipv4/tcp_timer.c62
-rw-r--r--net/ipv4/udp.c425
-rw-r--r--net/ipv4/udp_diag.c2
-rw-r--r--net/ipv4/udp_offload.c120
-rw-r--r--net/ipv4/udp_tunnel_core.c8
-rw-r--r--net/ipv4/xfrm4_input.c23
-rw-r--r--net/ipv4/xfrm4_policy.c46
-rw-r--r--net/ipv4/xfrm4_protocol.c2
-rw-r--r--net/ipv6/Kconfig1
-rw-r--r--net/ipv6/addrconf.c1226
-rw-r--r--net/ipv6/addrlabel.c42
-rw-r--r--net/ipv6/af_inet6.c29
-rw-r--r--net/ipv6/anycast.c106
-rw-r--r--net/ipv6/calipso.c7
-rw-r--r--net/ipv6/esp6.c30
-rw-r--r--net/ipv6/esp6_offload.c13
-rw-r--r--net/ipv6/exthdrs.c34
-rw-r--r--net/ipv6/fib6_notifier.c2
-rw-r--r--net/ipv6/fib6_rules.c112
-rw-r--r--net/ipv6/icmp.c85
-rw-r--r--net/ipv6/ila/ila.h1
-rw-r--r--net/ipv6/ila/ila_lwt.c13
-rw-r--r--net/ipv6/ila/ila_main.c6
-rw-r--r--net/ipv6/ila/ila_xlat.c42
-rw-r--r--net/ipv6/inet6_hashtables.c27
-rw-r--r--net/ipv6/ioam6.c84
-rw-r--r--net/ipv6/ioam6_iptunnel.c175
-rw-r--r--net/ipv6/ip6_fib.c208
-rw-r--r--net/ipv6/ip6_gre.c136
-rw-r--r--net/ipv6/ip6_input.c26
-rw-r--r--net/ipv6/ip6_offload.c35
-rw-r--r--net/ipv6/ip6_output.c98
-rw-r--r--net/ipv6/ip6_tunnel.c70
-rw-r--r--net/ipv6/ip6_vti.c27
-rw-r--r--net/ipv6/ip6mr.c123
-rw-r--r--net/ipv6/ipv6_sockglue.c15
-rw-r--r--net/ipv6/mcast.c165
-rw-r--r--net/ipv6/ndisc.c162
-rw-r--r--net/ipv6/netfilter.c7
-rw-r--r--net/ipv6/netfilter/Kconfig27
-rw-r--r--net/ipv6/netfilter/Makefile2
-rw-r--r--net/ipv6/netfilter/ip6_tables.c10
-rw-r--r--net/ipv6/netfilter/ip6table_filter.c2
-rw-r--r--net/ipv6/netfilter/ip6table_nat.c14
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c15
-rw-r--r--net/ipv6/netfilter/nf_dup_ipv6.c7
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c38
-rw-r--r--net/ipv6/netfilter/nft_dup_ipv6.c4
-rw-r--r--net/ipv6/netfilter/nft_fib_ipv6.c5
-rw-r--r--net/ipv6/output_core.c4
-rw-r--r--net/ipv6/ping.c3
-rw-r--r--net/ipv6/proc.c2
-rw-r--r--net/ipv6/raw.c39
-rw-r--r--net/ipv6/reassembly.c12
-rw-r--r--net/ipv6/route.c264
-rw-r--r--net/ipv6/rpl_iptunnel.c83
-rw-r--r--net/ipv6/seg6.c34
-rw-r--r--net/ipv6/seg6_hmac.c50
-rw-r--r--net/ipv6/seg6_iptunnel.c123
-rw-r--r--net/ipv6/seg6_local.c44
-rw-r--r--net/ipv6/sit.c76
-rw-r--r--net/ipv6/syncookies.c36
-rw-r--r--net/ipv6/sysctl_net_ipv6.c18
-rw-r--r--net/ipv6/tcp_ipv6.c156
-rw-r--r--net/ipv6/tcpv6_offload.c154
-rw-r--r--net/ipv6/udp.c266
-rw-r--r--net/ipv6/udp_offload.c32
-rw-r--r--net/ipv6/xfrm6_input.c28
-rw-r--r--net/ipv6/xfrm6_output.c4
-rw-r--r--net/ipv6/xfrm6_policy.c51
-rw-r--r--net/ipv6/xfrm6_tunnel.c5
-rw-r--r--net/iucv/af_iucv.c46
-rw-r--r--net/iucv/iucv.c96
-rw-r--r--net/kcm/kcmsock.c32
-rw-r--r--net/key/af_key.c7
-rw-r--r--net/l2tp/l2tp_core.c843
-rw-r--r--net/l2tp/l2tp_core.h70
-rw-r--r--net/l2tp/l2tp_debugfs.c37
-rw-r--r--net/l2tp/l2tp_eth.c50
-rw-r--r--net/l2tp/l2tp_ip.c148
-rw-r--r--net/l2tp/l2tp_ip6.c127
-rw-r--r--net/l2tp/l2tp_netlink.c86
-rw-r--r--net/l2tp/l2tp_ppp.c164
-rw-r--r--net/llc/af_llc.c9
-rw-r--r--net/llc/llc_c_st.c500
-rw-r--r--net/llc/llc_conn.c20
-rw-r--r--net/llc/llc_input.c2
-rw-r--r--net/llc/llc_s_ac.c49
-rw-r--r--net/llc/llc_s_st.c26
-rw-r--r--net/llc/llc_sap.c12
-rw-r--r--net/llc/sysctl_net_llc.c12
-rw-r--r--net/mac80211/Kconfig2
-rw-r--r--net/mac80211/Makefile2
-rw-r--r--net/mac80211/agg-rx.c109
-rw-r--r--net/mac80211/agg-tx.c50
-rw-r--r--net/mac80211/airtime.c140
-rw-r--r--net/mac80211/cfg.c962
-rw-r--r--net/mac80211/chan.c1212
-rw-r--r--net/mac80211/debug.h28
-rw-r--r--net/mac80211/debugfs.c37
-rw-r--r--net/mac80211/debugfs_key.c56
-rw-r--r--net/mac80211/debugfs_key.h15
-rw-r--r--net/mac80211/debugfs_netdev.c16
-rw-r--r--net/mac80211/debugfs_sta.c9
-rw-r--r--net/mac80211/driver-ops.c63
-rw-r--r--net/mac80211/driver-ops.h69
-rw-r--r--net/mac80211/drop.h3
-rw-r--r--net/mac80211/eht.c28
-rw-r--r--net/mac80211/ethtool.c22
-rw-r--r--net/mac80211/he.c129
-rw-r--r--net/mac80211/ht.c25
-rw-r--r--net/mac80211/ibss.c70
-rw-r--r--net/mac80211/ieee80211_i.h424
-rw-r--r--net/mac80211/iface.c222
-rw-r--r--net/mac80211/key.c64
-rw-r--r--net/mac80211/link.c90
-rw-r--r--net/mac80211/main.c320
-rw-r--r--net/mac80211/mesh.c181
-rw-r--r--net/mac80211/mesh.h39
-rw-r--r--net/mac80211/mesh_hwmp.c8
-rw-r--r--net/mac80211/mesh_pathtbl.c62
-rw-r--r--net/mac80211/mesh_plink.c40
-rw-r--r--net/mac80211/mesh_sync.c2
-rw-r--r--net/mac80211/michael.c2
-rw-r--r--net/mac80211/mlme.c5270
-rw-r--r--net/mac80211/ocb.c11
-rw-r--r--net/mac80211/offchannel.c69
-rw-r--r--net/mac80211/parse.c1127
-rw-r--r--net/mac80211/pm.c6
-rw-r--r--net/mac80211/rate.c53
-rw-r--r--net/mac80211/rate.h10
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c2
-rw-r--r--net/mac80211/rc80211_minstrel_ht_debugfs.c2
-rw-r--r--net/mac80211/rx.c185
-rw-r--r--net/mac80211/scan.c140
-rw-r--r--net/mac80211/spectmgmt.c353
-rw-r--r--net/mac80211/sta_info.c66
-rw-r--r--net/mac80211/sta_info.h40
-rw-r--r--net/mac80211/status.c30
-rw-r--r--net/mac80211/tdls.c76
-rw-r--r--net/mac80211/tests/Makefile2
-rw-r--r--net/mac80211/tests/elems.c7
-rw-r--r--net/mac80211/tests/mfp.c2
-rw-r--r--net/mac80211/tests/tpe.c284
-rw-r--r--net/mac80211/tests/util.c313
-rw-r--r--net/mac80211/tests/util.h36
-rw-r--r--net/mac80211/tkip.c4
-rw-r--r--net/mac80211/trace.h382
-rw-r--r--net/mac80211/trace_msg.h2
-rw-r--r--net/mac80211/tx.c104
-rw-r--r--net/mac80211/util.c2223
-rw-r--r--net/mac80211/vht.c177
-rw-r--r--net/mac80211/wep.c2
-rw-r--r--net/mac80211/wpa.c50
-rw-r--r--net/mac802154/ieee802154_i.h3
-rw-r--r--net/mac802154/iface.c4
-rw-r--r--net/mac802154/llsec.c18
-rw-r--r--net/mac802154/main.c14
-rw-r--r--net/mac802154/rx.c2
-rw-r--r--net/mac802154/scan.c4
-rw-r--r--net/mac802154/tx.c23
-rw-r--r--net/mctp/Kconfig1
-rw-r--r--net/mctp/af_mctp.c126
-rw-r--r--net/mctp/device.c92
-rw-r--r--net/mctp/neigh.c31
-rw-r--r--net/mctp/route.c184
-rw-r--r--net/mctp/test/route-test.c610
-rw-r--r--net/mctp/test/utils.c2
-rw-r--r--net/mpls/af_mpls.c129
-rw-r--r--net/mpls/mpls_gso.c5
-rw-r--r--net/mpls/mpls_iptunnel.c8
-rw-r--r--net/mptcp/crypto.c2
-rw-r--r--net/mptcp/ctrl.c218
-rw-r--r--net/mptcp/diag.c5
-rw-r--r--net/mptcp/fastopen.c4
-rw-r--r--net/mptcp/mib.c12
-rw-r--r--net/mptcp/mib.h14
-rw-r--r--net/mptcp/mptcp_diag.c2
-rw-r--r--net/mptcp/mptcp_pm_gen.c10
-rw-r--r--net/mptcp/mptcp_pm_gen.h2
-rw-r--r--net/mptcp/options.c112
-rw-r--r--net/mptcp/pm.c86
-rw-r--r--net/mptcp/pm_netlink.c568
-rw-r--r--net/mptcp/pm_userspace.c461
-rw-r--r--net/mptcp/protocol.c349
-rw-r--r--net/mptcp/protocol.h247
-rw-r--r--net/mptcp/sched.c24
-rw-r--r--net/mptcp/sockopt.c167
-rw-r--r--net/mptcp/subflow.c291
-rw-r--r--net/mptcp/token_test.c7
-rw-r--r--net/ncsi/internal.h4
-rw-r--r--net/ncsi/ncsi-manage.c104
-rw-r--r--net/ncsi/ncsi-rsp.c41
-rw-r--r--net/netfilter/Kconfig12
-rw-r--r--net/netfilter/Makefile9
-rw-r--r--net/netfilter/core.c17
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c7
-rw-r--r--net/netfilter/ipset/ip_set_core.c97
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c36
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c58
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c78
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c36
-rw-r--r--net/netfilter/nf_bpf_link.c11
-rw-r--r--net/netfilter/nf_conncount.c37
-rw-r--r--net/netfilter/nf_conntrack_amanda.c2
-rw-r--r--net/netfilter/nf_conntrack_bpf.c72
-rw-r--r--net/netfilter/nf_conntrack_broadcast.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c169
-rw-r--r--net/netfilter/nf_conntrack_ecache.c23
-rw-r--r--net/netfilter/nf_conntrack_expect.c4
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c4
-rw-r--r--net/netfilter/nf_conntrack_netlink.c68
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c4
-rw-r--r--net/netfilter/nf_conntrack_proto_icmpv6.c4
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c21
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c2
-rw-r--r--net/netfilter/nf_conntrack_sip.c4
-rw-r--r--net/netfilter/nf_conntrack_standalone.c36
-rw-r--r--net/netfilter/nf_flow_table_bpf.c121
-rw-r--r--net/netfilter/nf_flow_table_core.c201
-rw-r--r--net/netfilter/nf_flow_table_inet.c6
-rw-r--r--net/netfilter/nf_flow_table_ip.c27
-rw-r--r--net/netfilter/nf_flow_table_offload.c4
-rw-r--r--net/netfilter/nf_flow_table_xdp.c147
-rw-r--r--net/netfilter/nf_hooks_lwtunnel.c72
-rw-r--r--net/netfilter/nf_internals.h6
-rw-r--r--net/netfilter/nf_log.c16
-rw-r--r--net/netfilter/nf_nat_bpf.c4
-rw-r--r--net/netfilter/nf_nat_core.c129
-rw-r--r--net/netfilter/nf_queue.c106
-rw-r--r--net/netfilter/nf_synproxy_core.c4
-rw-r--r--net/netfilter/nf_tables_api.c1747
-rw-r--r--net/netfilter/nf_tables_core.c8
-rw-r--r--net/netfilter/nf_tables_offload.c40
-rw-r--r--net/netfilter/nf_tables_trace.c2
-rw-r--r--net/netfilter/nfnetlink.c24
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c3
-rw-r--r--net/netfilter/nfnetlink_queue.c217
-rw-r--r--net/netfilter/nft_bitwise.c170
-rw-r--r--net/netfilter/nft_byteorder.c4
-rw-r--r--net/netfilter/nft_chain_filter.c54
-rw-r--r--net/netfilter/nft_cmp.c6
-rw-r--r--net/netfilter/nft_compat.c20
-rw-r--r--net/netfilter/nft_connlimit.c4
-rw-r--r--net/netfilter/nft_counter.c95
-rw-r--r--net/netfilter/nft_ct.c14
-rw-r--r--net/netfilter/nft_dup_netdev.c2
-rw-r--r--net/netfilter/nft_dynset.c24
-rw-r--r--net/netfilter/nft_exthdr.c14
-rw-r--r--net/netfilter/nft_fib.c11
-rw-r--r--net/netfilter/nft_flow_offload.c28
-rw-r--r--net/netfilter/nft_fwd_netdev.c9
-rw-r--r--net/netfilter/nft_hash.c5
-rw-r--r--net/netfilter/nft_immediate.c5
-rw-r--r--net/netfilter/nft_inner.c57
-rw-r--r--net/netfilter/nft_last.c4
-rw-r--r--net/netfilter/nft_limit.c14
-rw-r--r--net/netfilter/nft_log.c2
-rw-r--r--net/netfilter/nft_lookup.c11
-rw-r--r--net/netfilter/nft_masq.c7
-rw-r--r--net/netfilter/nft_meta.c13
-rw-r--r--net/netfilter/nft_nat.c11
-rw-r--r--net/netfilter/nft_numgen.c2
-rw-r--r--net/netfilter/nft_objref.c2
-rw-r--r--net/netfilter/nft_osf.c14
-rw-r--r--net/netfilter/nft_payload.c104
-rw-r--r--net/netfilter/nft_queue.c5
-rw-r--r--net/netfilter/nft_quota.c4
-rw-r--r--net/netfilter/nft_range.c2
-rw-r--r--net/netfilter/nft_redir.c7
-rw-r--r--net/netfilter/nft_reject.c3
-rw-r--r--net/netfilter/nft_reject_inet.c3
-rw-r--r--net/netfilter/nft_reject_netdev.c3
-rw-r--r--net/netfilter/nft_rt.c7
-rw-r--r--net/netfilter/nft_set_bitmap.c14
-rw-r--r--net/netfilter/nft_set_hash.c27
-rw-r--r--net/netfilter/nft_set_pipapo.c468
-rw-r--r--net/netfilter/nft_set_pipapo.h60
-rw-r--r--net/netfilter/nft_set_pipapo_avx2.c81
-rw-r--r--net/netfilter/nft_set_rbtree.c47
-rw-r--r--net/netfilter/nft_socket.c51
-rw-r--r--net/netfilter/nft_synproxy.c3
-rw-r--r--net/netfilter/nft_tproxy.c7
-rw-r--r--net/netfilter/nft_tunnel.c54
-rw-r--r--net/netfilter/nft_xfrm.c6
-rw-r--r--net/netfilter/utils.c37
-rw-r--r--net/netfilter/x_tables.c5
-rw-r--r--net/netfilter/xt_CHECKSUM.c33
-rw-r--r--net/netfilter/xt_CLASSIFY.c16
-rw-r--r--net/netfilter/xt_CONNSECMARK.c36
-rw-r--r--net/netfilter/xt_CT.c106
-rw-r--r--net/netfilter/xt_IDLETIMER.c115
-rw-r--r--net/netfilter/xt_LED.c43
-rw-r--r--net/netfilter/xt_NFLOG.c36
-rw-r--r--net/netfilter/xt_RATEEST.c39
-rw-r--r--net/netfilter/xt_SECMARK.c27
-rw-r--r--net/netfilter/xt_TRACE.c36
-rw-r--r--net/netfilter/xt_addrtype.c15
-rw-r--r--net/netfilter/xt_cluster.c33
-rw-r--r--net/netfilter/xt_connbytes.c4
-rw-r--r--net/netfilter/xt_connlimit.c54
-rw-r--r--net/netfilter/xt_connmark.c28
-rw-r--r--net/netfilter/xt_hashlimit.c6
-rw-r--r--net/netfilter/xt_mark.c42
-rw-r--r--net/netfilter/xt_recent.c8
-rw-r--r--net/netlabel/netlabel_kapi.c41
-rw-r--r--net/netlabel/netlabel_mgmt.c13
-rw-r--r--net/netlabel/netlabel_unlabeled.c46
-rw-r--r--net/netlabel/netlabel_user.c13
-rw-r--r--net/netlabel/netlabel_user.h2
-rw-r--r--net/netlink/af_netlink.c280
-rw-r--r--net/netlink/af_netlink.h6
-rw-r--r--net/netlink/diag.c3
-rw-r--r--net/netlink/genetlink.c76
-rw-r--r--net/netlink/genetlink.h11
-rw-r--r--net/netrom/af_netrom.c6
-rw-r--r--net/netrom/nr_route.c29
-rw-r--r--net/netrom/nr_timer.c3
-rw-r--r--net/netrom/sysctl_net_netrom.c1
-rw-r--r--net/nfc/core.c2
-rw-r--r--net/nfc/hci/llc.c20
-rw-r--r--net/nfc/llcp_sock.c16
-rw-r--r--net/nfc/nci/core.c31
-rw-r--r--net/nfc/nci/hci.c2
-rw-r--r--net/nfc/nci/ntf.c32
-rw-r--r--net/nfc/netlink.c11
-rw-r--r--net/nsh/nsh.c14
-rw-r--r--net/openvswitch/Kconfig1
-rw-r--r--net/openvswitch/actions.c84
-rw-r--r--net/openvswitch/conntrack.c68
-rw-r--r--net/openvswitch/datapath.c25
-rw-r--r--net/openvswitch/datapath.h3
-rw-r--r--net/openvswitch/flow.c3
-rw-r--r--net/openvswitch/flow_netlink.c112
-rw-r--r--net/openvswitch/meter.h1
-rw-r--r--net/openvswitch/openvswitch_trace.h8
-rw-r--r--net/openvswitch/vport-internal_dev.c22
-rw-r--r--net/openvswitch/vport-netdev.c7
-rw-r--r--net/openvswitch/vport.c1
-rw-r--r--net/packet/af_packet.c162
-rw-r--r--net/packet/diag.c3
-rw-r--r--net/phonet/af_phonet.c2
-rw-r--r--net/phonet/pep.c12
-rw-r--r--net/phonet/pn_dev.c74
-rw-r--r--net/phonet/pn_netlink.c154
-rw-r--r--net/phonet/socket.c7
-rw-r--r--net/phonet/sysctl.c3
-rw-r--r--net/psample/psample.c50
-rw-r--r--net/qrtr/af_qrtr.c2
-rw-r--r--net/qrtr/mhi.c46
-rw-r--r--net/qrtr/ns.c44
-rw-r--r--net/rds/Kconfig9
-rw-r--r--net/rds/Makefile5
-rw-r--r--net/rds/connection.c4
-rw-r--r--net/rds/ib.h4
-rw-r--r--net/rds/ib_rdma.c4
-rw-r--r--net/rds/ib_sysctl.c1
-rw-r--r--net/rds/rdma.c2
-rw-r--r--net/rds/send.c5
-rw-r--r--net/rds/sysctl.c1
-rw-r--r--net/rds/tcp.c52
-rw-r--r--net/rds/tcp_listen.c6
-rw-r--r--net/rds/tcp_recv.c4
-rw-r--r--net/rfkill/core.c9
-rw-r--r--net/rfkill/rfkill-gpio.c30
-rw-r--r--net/rose/af_rose.c46
-rw-r--r--net/rose/rose_timer.c15
-rw-r--r--net/rose/sysctl_net_rose.c1
-rw-r--r--net/rxrpc/Makefile1
-rw-r--r--net/rxrpc/af_rxrpc.c25
-rw-r--r--net/rxrpc/ar-internal.h420
-rw-r--r--net/rxrpc/call_accept.c22
-rw-r--r--net/rxrpc/call_event.c572
-rw-r--r--net/rxrpc/call_object.c127
-rw-r--r--net/rxrpc/conn_client.c30
-rw-r--r--net/rxrpc/conn_event.c71
-rw-r--r--net/rxrpc/conn_object.c26
-rw-r--r--net/rxrpc/input.c821
-rw-r--r--net/rxrpc/input_rack.c418
-rw-r--r--net/rxrpc/insecure.c14
-rw-r--r--net/rxrpc/io_thread.c134
-rw-r--r--net/rxrpc/local_object.c6
-rw-r--r--net/rxrpc/misc.c12
-rw-r--r--net/rxrpc/output.c763
-rw-r--r--net/rxrpc/peer_event.c130
-rw-r--r--net/rxrpc/peer_object.c37
-rw-r--r--net/rxrpc/proc.c69
-rw-r--r--net/rxrpc/protocol.h19
-rw-r--r--net/rxrpc/recvmsg.c18
-rw-r--r--net/rxrpc/rtt.c121
-rw-r--r--net/rxrpc/rxkad.c91
-rw-r--r--net/rxrpc/rxperf.c14
-rw-r--r--net/rxrpc/security.c4
-rw-r--r--net/rxrpc/sendmsg.c172
-rw-r--r--net/rxrpc/sysctl.c23
-rw-r--r--net/rxrpc/txbuf.c114
-rw-r--r--net/sched/Kconfig10
-rw-r--r--net/sched/act_api.c132
-rw-r--r--net/sched/act_bpf.c5
-rw-r--r--net/sched/act_connmark.c1
-rw-r--r--net/sched/act_csum.c1
-rw-r--r--net/sched/act_ct.c66
-rw-r--r--net/sched/act_ctinfo.c9
-rw-r--r--net/sched/act_gact.c1
-rw-r--r--net/sched/act_gate.c12
-rw-r--r--net/sched/act_ife.c1
-rw-r--r--net/sched/act_mirred.c1
-rw-r--r--net/sched/act_mpls.c19
-rw-r--r--net/sched/act_nat.c1
-rw-r--r--net/sched/act_pedit.c3
-rw-r--r--net/sched/act_police.c7
-rw-r--r--net/sched/act_sample.c13
-rw-r--r--net/sched/act_simple.c1
-rw-r--r--net/sched/act_skbedit.c1
-rw-r--r--net/sched/act_skbmod.c11
-rw-r--r--net/sched/act_tunnel_key.c37
-rw-r--r--net/sched/act_vlan.c2
-rw-r--r--net/sched/cls_api.c107
-rw-r--r--net/sched/cls_basic.c1
-rw-r--r--net/sched/cls_bpf.c7
-rw-r--r--net/sched/cls_cgroup.c1
-rw-r--r--net/sched/cls_flow.c4
-rw-r--r--net/sched/cls_flower.c274
-rw-r--r--net/sched/cls_fw.c1
-rw-r--r--net/sched/cls_matchall.c3
-rw-r--r--net/sched/cls_route.c1
-rw-r--r--net/sched/cls_u32.c23
-rw-r--r--net/sched/em_cmp.c2
-rw-r--r--net/sched/sch_api.c56
-rw-r--r--net/sched/sch_cake.c356
-rw-r--r--net/sched/sch_cbs.c23
-rw-r--r--net/sched/sch_choke.c26
-rw-r--r--net/sched/sch_codel.c67
-rw-r--r--net/sched/sch_drr.c1
-rw-r--r--net/sched/sch_etf.c11
-rw-r--r--net/sched/sch_ets.c28
-rw-r--r--net/sched/sch_fifo.c16
-rw-r--r--net/sched/sch_fq.c169
-rw-r--r--net/sched/sch_fq_codel.c61
-rw-r--r--net/sched/sch_fq_pie.c69
-rw-r--r--net/sched/sch_generic.c86
-rw-r--r--net/sched/sch_gred.c10
-rw-r--r--net/sched/sch_hfsc.c10
-rw-r--r--net/sched/sch_hhf.c36
-rw-r--r--net/sched/sch_htb.c27
-rw-r--r--net/sched/sch_ingress.c15
-rw-r--r--net/sched/sch_mqprio.c7
-rw-r--r--net/sched/sch_multiq.c3
-rw-r--r--net/sched/sch_netem.c80
-rw-r--r--net/sched/sch_pie.c45
-rw-r--r--net/sched/sch_plug.c1
-rw-r--r--net/sched/sch_prio.c1
-rw-r--r--net/sched/sch_qfq.c6
-rw-r--r--net/sched/sch_red.c7
-rw-r--r--net/sched/sch_sfb.c5
-rw-r--r--net/sched/sch_sfq.c57
-rw-r--r--net/sched/sch_skbprio.c9
-rw-r--r--net/sched/sch_taprio.c137
-rw-r--r--net/sched/sch_tbf.c19
-rw-r--r--net/sched/sch_teql.c4
-rw-r--r--net/sctp/associola.c3
-rw-r--r--net/sctp/diag.c1
-rw-r--r--net/sctp/input.c19
-rw-r--r--net/sctp/ipv6.c23
-rw-r--r--net/sctp/protocol.c41
-rw-r--r--net/sctp/sm_statefuns.c25
-rw-r--r--net/sctp/socket.c52
-rw-r--r--net/sctp/stream.c2
-rw-r--r--net/sctp/sysctl.c54
-rw-r--r--net/shaper/Makefile8
-rw-r--r--net/shaper/shaper.c1438
-rw-r--r--net/shaper/shaper_nl_gen.c154
-rw-r--r--net/shaper/shaper_nl_gen.h44
-rw-r--r--net/smc/Kconfig13
-rw-r--r--net/smc/Makefile3
-rw-r--r--net/smc/af_smc.c273
-rw-r--r--net/smc/smc.h47
-rw-r--r--net/smc/smc_cdc.c36
-rw-r--r--net/smc/smc_clc.c29
-rw-r--r--net/smc/smc_clc.h56
-rw-r--r--net/smc/smc_core.c162
-rw-r--r--net/smc/smc_core.h18
-rw-r--r--net/smc/smc_diag.c1
-rw-r--r--net/smc/smc_ib.c30
-rw-r--r--net/smc/smc_inet.c176
-rw-r--r--net/smc/smc_inet.h22
-rw-r--r--net/smc/smc_ism.c88
-rw-r--r--net/smc/smc_ism.h20
-rw-r--r--net/smc/smc_llc.c21
-rw-r--r--net/smc/smc_loopback.c427
-rw-r--r--net/smc/smc_loopback.h60
-rw-r--r--net/smc/smc_pnet.c19
-rw-r--r--net/smc/smc_rx.c43
-rw-r--r--net/smc/smc_rx.h8
-rw-r--r--net/smc/smc_stats.c6
-rw-r--r--net/smc/smc_stats.h30
-rw-r--r--net/smc/smc_sysctl.c19
-rw-r--r--net/smc/smc_tracepoint.h4
-rw-r--r--net/smc/smc_wr.c48
-rw-r--r--net/socket.c448
-rw-r--r--net/strparser/strparser.c11
-rw-r--r--net/sunrpc/addr.c4
-rw-r--r--net/sunrpc/auth_gss/Makefile2
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c4
-rw-r--r--net/sunrpc/auth_gss/auth_gss_internal.h6
-rw-r--r--net/sunrpc/auth_gss/gss_generic_token.c231
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c55
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_internal.h7
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_keys.c2
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c11
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_test.c2
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c1
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_xdr.c27
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c12
-rw-r--r--net/sunrpc/cache.c77
-rw-r--r--net/sunrpc/clnt.c64
-rw-r--r--net/sunrpc/debugfs.c15
-rw-r--r--net/sunrpc/rpc_pipe.c17
-rw-r--r--net/sunrpc/sched.c6
-rw-r--r--net/sunrpc/stats.c2
-rw-r--r--net/sunrpc/sunrpc.h4
-rw-r--r--net/sunrpc/svc.c357
-rw-r--r--net/sunrpc/svc_xprt.c219
-rw-r--r--net/sunrpc/svcauth.c29
-rw-r--r--net/sunrpc/svcauth_unix.c3
-rw-r--r--net/sunrpc/svcsock.c30
-rw-r--r--net/sunrpc/sysctl.c5
-rw-r--r--net/sunrpc/xdr.c6
-rw-r--r--net/sunrpc/xprt.c9
-rw-r--r--net/sunrpc/xprtmultipath.c17
-rw-r--r--net/sunrpc/xprtrdma/Makefile2
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c9
-rw-r--r--net/sunrpc/xprtrdma/ib_client.c184
-rw-r--r--net/sunrpc/xprtrdma/module.c18
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c3
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma.c22
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_backchannel.c2
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c10
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_rw.c181
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c150
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c112
-rw-r--r--net/sunrpc/xprtrdma/transport.c1
-rw-r--r--net/sunrpc/xprtrdma/verbs.c96
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h5
-rw-r--r--net/sunrpc/xprtsock.c66
-rw-r--r--net/switchdev/switchdev.c124
-rw-r--r--net/sysctl_net.c14
-rw-r--r--net/tipc/Kconfig7
-rw-r--r--net/tipc/Makefile4
-rw-r--r--net/tipc/bcast.c2
-rw-r--r--net/tipc/bearer.c25
-rw-r--r--net/tipc/core.h1
-rw-r--r--net/tipc/crypto.c4
-rw-r--r--net/tipc/diag.c1
-rw-r--r--net/tipc/link.c27
-rw-r--r--net/tipc/monitor.c2
-rw-r--r--net/tipc/msg.c8
-rw-r--r--net/tipc/name_table.c4
-rw-r--r--net/tipc/name_table.h2
-rw-r--r--net/tipc/node.c3
-rw-r--r--net/tipc/socket.c27
-rw-r--r--net/tipc/sysctl.c1
-rw-r--r--net/tipc/trace.h16
-rw-r--r--net/tipc/udp_media.c14
-rw-r--r--net/tls/Kconfig1
-rw-r--r--net/tls/tls.h5
-rw-r--r--net/tls/tls_device.c14
-rw-r--r--net/tls/tls_device_fallback.c1
-rw-r--r--net/tls/tls_main.c94
-rw-r--r--net/tls/tls_proc.c5
-rw-r--r--net/tls/tls_strp.c7
-rw-r--r--net/tls/tls_sw.c152
-rw-r--r--net/tls/trace.h2
-rw-r--r--net/unix/Kconfig9
-rw-r--r--net/unix/Makefile2
-rw-r--r--net/unix/af_unix.c800
-rw-r--r--net/unix/diag.c60
-rw-r--r--net/unix/garbage.c664
-rw-r--r--net/unix/scm.c159
-rw-r--r--net/unix/scm.h10
-rw-r--r--net/unix/sysctl_net_unix.c3
-rw-r--r--net/unix/unix_bpf.c3
-rw-r--r--net/vmw_vsock/af_vsock.c224
-rw-r--r--net/vmw_vsock/diag.c1
-rw-r--r--net/vmw_vsock/hyperv_transport.c7
-rw-r--r--net/vmw_vsock/virtio_transport.c174
-rw-r--r--net/vmw_vsock/virtio_transport_common.c95
-rw-r--r--net/vmw_vsock/vsock_bpf.c21
-rw-r--r--net/vmw_vsock/vsock_loopback.c6
-rw-r--r--net/wireless/Kconfig45
-rw-r--r--net/wireless/Makefile7
-rw-r--r--net/wireless/chan.c830
-rw-r--r--net/wireless/core.c155
-rw-r--r--net/wireless/core.h70
-rw-r--r--net/wireless/ibss.c5
-rw-r--r--net/wireless/lib80211.c257
-rw-r--r--net/wireless/lib80211_crypt_ccmp.c448
-rw-r--r--net/wireless/lib80211_crypt_tkip.c738
-rw-r--r--net/wireless/lib80211_crypt_wep.c256
-rw-r--r--net/wireless/mesh.c5
-rw-r--r--net/wireless/mlme.c252
-rw-r--r--net/wireless/nl80211.c1537
-rw-r--r--net/wireless/nl80211.h7
-rw-r--r--net/wireless/pmsr.c22
-rw-r--r--net/wireless/radiotap.c4
-rw-r--r--net/wireless/rdev-ops.h133
-rw-r--r--net/wireless/reg.c116
-rw-r--r--net/wireless/reg.h13
-rw-r--r--net/wireless/scan.c1098
-rw-r--r--net/wireless/sme.c25
-rw-r--r--net/wireless/sysfs.c4
-rw-r--r--net/wireless/tests/Makefile2
-rw-r--r--net/wireless/tests/chan.c228
-rw-r--r--net/wireless/tests/fragmentation.c30
-rw-r--r--net/wireless/tests/scan.c281
-rw-r--r--net/wireless/trace.h445
-rw-r--r--net/wireless/util.c216
-rw-r--r--net/wireless/wext-compat.c328
-rw-r--r--net/wireless/wext-compat.h6
-rw-r--r--net/wireless/wext-core.c13
-rw-r--r--net/wireless/wext-sme.c43
-rw-r--r--net/wireless/wext-spy.c232
-rw-r--r--net/x25/Kconfig2
-rw-r--r--net/x25/af_x25.c8
-rw-r--r--net/x25/sysctl_net_x25.c1
-rw-r--r--net/xdp/xdp_umem.c9
-rw-r--r--net/xdp/xsk.c113
-rw-r--r--net/xdp/xsk_buff_pool.c128
-rw-r--r--net/xdp/xsk_diag.c1
-rw-r--r--net/xdp/xsk_queue.h7
-rw-r--r--net/xdp/xskmap.c2
-rw-r--r--net/xfrm/Kconfig16
-rw-r--r--net/xfrm/Makefile4
-rw-r--r--net/xfrm/espintcp.c4
-rw-r--r--net/xfrm/trace_iptfs.h218
-rw-r--r--net/xfrm/xfrm_compat.c17
-rw-r--r--net/xfrm/xfrm_device.c36
-rw-r--r--net/xfrm/xfrm_input.c52
-rw-r--r--net/xfrm/xfrm_interface_bpf.c4
-rw-r--r--net/xfrm/xfrm_interface_core.c32
-rw-r--r--net/xfrm/xfrm_iptfs.c2764
-rw-r--r--net/xfrm/xfrm_nat_keepalive.c292
-rw-r--r--net/xfrm/xfrm_output.c56
-rw-r--r--net/xfrm/xfrm_policy.c517
-rw-r--r--net/xfrm/xfrm_proc.c5
-rw-r--r--net/xfrm/xfrm_replay.c14
-rw-r--r--net/xfrm/xfrm_state.c447
-rw-r--r--net/xfrm/xfrm_state_bpf.c4
-rw-r--r--net/xfrm/xfrm_sysctl.c7
-rw-r--r--net/xfrm/xfrm_user.c346
1031 files changed, 65800 insertions, 36165 deletions
diff --git a/net/6lowpan/ndisc.c b/net/6lowpan/ndisc.c
index 16be8f8b2f8c..c40b98f7743c 100644
--- a/net/6lowpan/ndisc.c
+++ b/net/6lowpan/ndisc.c
@@ -11,11 +11,6 @@
#include "6lowpan_i.h"
-static int lowpan_ndisc_is_useropt(u8 nd_opt_type)
-{
- return nd_opt_type == ND_OPT_6CO;
-}
-
#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
#define NDISC_802154_SHORT_ADDR_LENGTH 1
static int lowpan_ndisc_parse_802154_options(const struct net_device *dev,
@@ -222,7 +217,6 @@ static void lowpan_ndisc_prefix_rcv_add_addr(struct net *net,
#endif
const struct ndisc_ops lowpan_ndisc_ops = {
- .is_useropt = lowpan_ndisc_is_useropt,
#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
.parse_options = lowpan_ndisc_parse_options,
.update = lowpan_ndisc_update,
diff --git a/net/802/garp.c b/net/802/garp.c
index 6a743d004301..27f0ab146026 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -16,7 +16,7 @@
#include <net/llc.h>
#include <net/llc_pdu.h>
#include <net/garp.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
static unsigned int garp_join_time __read_mostly = 200;
module_param(garp_join_time, uint, 0644);
diff --git a/net/802/mrp.c b/net/802/mrp.c
index 3154d7409493..e0c96d0da8d5 100644
--- a/net/802/mrp.c
+++ b/net/802/mrp.c
@@ -16,7 +16,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <net/mrp.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
static unsigned int mrp_join_time __read_mostly = 200;
module_param(mrp_join_time, uint, 0644);
diff --git a/net/802/psnap.c b/net/802/psnap.c
index fca9d454905f..389df460c8c4 100644
--- a/net/802/psnap.c
+++ b/net/802/psnap.c
@@ -55,11 +55,11 @@ static int snap_rcv(struct sk_buff *skb, struct net_device *dev,
goto drop;
rcu_read_lock();
- proto = find_snap_client(skb_transport_header(skb));
+ proto = find_snap_client(skb->data);
if (proto) {
/* Pass the frame on. */
- skb->transport_header += 5;
skb_pull_rcsum(skb, 5);
+ skb_reset_transport_header(skb);
rc = proto->rcvfunc(skb, dev, &snap_packet_type, orig_dev);
}
rcu_read_unlock();
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index e45187b88220..41be38264493 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -131,7 +131,8 @@ int vlan_check_real_dev(struct net_device *real_dev,
{
const char *name = real_dev->name;
- if (real_dev->features & NETIF_F_VLAN_CHALLENGED) {
+ if (real_dev->features & NETIF_F_VLAN_CHALLENGED ||
+ real_dev->type != ARPHRD_ETHER) {
pr_info("VLANs not supported on %s\n", name);
NL_SET_ERR_MSG_MOD(extack, "VLANs not supported on device");
return -EOPNOTSUPP;
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index f00158234505..9404dd551dfd 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -478,6 +478,8 @@ static struct sk_buff *vlan_gro_receive(struct list_head *head,
if (unlikely(!vhdr))
goto out;
+ NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark] = hlen;
+
type = vhdr->h_vlan_encapsulated_proto;
ptype = gro_find_receive_by_type(type);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 407b2335f091..91d134961357 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -149,7 +149,7 @@ static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
if (max_mtu < new_mtu)
return -ERANGE;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -504,28 +504,6 @@ static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
}
-/*
- * vlan network devices have devices nesting below it, and are a special
- * "super class" of normal network devices; split their locks off into a
- * separate class since they always nest.
- */
-static struct lock_class_key vlan_netdev_xmit_lock_key;
-static struct lock_class_key vlan_netdev_addr_lock_key;
-
-static void vlan_dev_set_lockdep_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *unused)
-{
- lockdep_set_class(&txq->_xmit_lock, &vlan_netdev_xmit_lock_key);
-}
-
-static void vlan_dev_set_lockdep_class(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock,
- &vlan_netdev_addr_lock_key);
- netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, NULL);
-}
-
static __be16 vlan_parse_protocol(const struct sk_buff *skb)
{
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
@@ -559,7 +537,7 @@ static const struct header_ops vlan_passthru_header_ops = {
.parse_protocol = vlan_parse_protocol,
};
-static struct device_type vlan_type = {
+static const struct device_type vlan_type = {
.name = "vlan",
};
@@ -586,17 +564,20 @@ static int vlan_dev_init(struct net_device *dev)
NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE |
NETIF_F_GSO_ENCAP_ALL |
NETIF_F_HIGHDMA | NETIF_F_SCTP_CRC |
- NETIF_F_ALL_FCOE;
+ NETIF_F_FCOE_CRC | NETIF_F_FSO;
if (real_dev->vlan_features & NETIF_F_HW_MACSEC)
dev->hw_features |= NETIF_F_HW_MACSEC;
- dev->features |= dev->hw_features | NETIF_F_LLTX;
+ dev->features |= dev->hw_features;
+ dev->lltx = true;
+ dev->fcoe_mtu = true;
netif_inherit_tso_max(dev, real_dev);
if (dev->features & NETIF_F_VLAN_FEATURES)
netdev_warn(real_dev, "VLAN features are set incorrectly. Q-in-Q configurations may not work correctly.\n");
- dev->vlan_features = real_dev->vlan_features & ~NETIF_F_ALL_FCOE;
+ dev->vlan_features = real_dev->vlan_features &
+ ~(NETIF_F_FCOE_CRC | NETIF_F_FSO);
dev->hw_enc_features = vlan_tnl_features(real_dev);
dev->mpls_features = real_dev->mpls_features;
@@ -627,7 +608,7 @@ static int vlan_dev_init(struct net_device *dev)
SET_NETDEV_DEVTYPE(dev, &vlan_type);
- vlan_dev_set_lockdep_class(dev);
+ netdev_lockdep_set_classes(dev);
vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
if (!vlan->vlan_pcpu_stats)
@@ -677,7 +658,6 @@ static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
lower_features |= NETIF_F_HW_CSUM;
features = netdev_intersect_features(features, lower_features);
features |= old_features & (NETIF_F_SOFT_FEATURES | NETIF_F_GSO_SOFTWARE);
- features |= NETIF_F_LLTX;
return features;
}
@@ -699,7 +679,7 @@ static void vlan_ethtool_get_drvinfo(struct net_device *dev,
}
static int vlan_ethtool_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
return ethtool_get_ts_info_by_layer(vlan->real_dev, info);
@@ -745,7 +725,7 @@ static void vlan_dev_poll_controller(struct net_device *dev)
return;
}
-static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo)
+static int vlan_dev_netpoll_setup(struct net_device *dev)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev = vlan->real_dev;
@@ -784,9 +764,9 @@ static void vlan_dev_netpoll_cleanup(struct net_device *dev)
static int vlan_dev_get_iflink(const struct net_device *dev)
{
- struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+ const struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
- return real_dev->ifindex;
+ return READ_ONCE(real_dev->ifindex);
}
static int vlan_dev_fill_forward_path(struct net_device_path_ctx *ctx,
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index a3b68243fd4b..134419667d59 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -117,17 +117,15 @@ static int vlan_changelink(struct net_device *dev, struct nlattr *tb[],
return err;
}
if (data[IFLA_VLAN_INGRESS_QOS]) {
- nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) {
- if (nla_type(attr) != IFLA_VLAN_QOS_MAPPING)
- continue;
+ nla_for_each_nested_type(attr, IFLA_VLAN_QOS_MAPPING,
+ data[IFLA_VLAN_INGRESS_QOS], rem) {
m = nla_data(attr);
vlan_dev_set_ingress_priority(dev, m->to, m->from);
}
}
if (data[IFLA_VLAN_EGRESS_QOS]) {
- nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) {
- if (nla_type(attr) != IFLA_VLAN_QOS_MAPPING)
- continue;
+ nla_for_each_nested_type(attr, IFLA_VLAN_QOS_MAPPING,
+ data[IFLA_VLAN_EGRESS_QOS], rem) {
m = nla_data(attr);
err = vlan_dev_set_egress_priority(dev, m->from, m->to);
if (err)
@@ -163,10 +161,8 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
return -ENODEV;
}
- if (data[IFLA_VLAN_PROTOCOL])
- proto = nla_get_be16(data[IFLA_VLAN_PROTOCOL]);
- else
- proto = htons(ETH_P_8021Q);
+ proto = nla_get_be16_default(data[IFLA_VLAN_PROTOCOL],
+ htons(ETH_P_8021Q));
vlan->vlan_proto = proto;
vlan->vlan_id = nla_get_u16(data[IFLA_VLAN_ID]);
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index 7825c129742a..fa67374bda49 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -163,48 +163,34 @@ void vlan_proc_rem_dev(struct net_device *vlandev)
* The following few functions build the content of /proc/net/vlan/config
*/
-/* start read of /proc/net/vlan/config */
-static void *vlan_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(rcu)
+static void *vlan_seq_from_index(struct seq_file *seq, loff_t *pos)
{
+ unsigned long ifindex = *pos;
struct net_device *dev;
- struct net *net = seq_file_net(seq);
- loff_t i = 1;
-
- rcu_read_lock();
- if (*pos == 0)
- return SEQ_START_TOKEN;
- for_each_netdev_rcu(net, dev) {
+ for_each_netdev_dump(seq_file_net(seq), dev, ifindex) {
if (!is_vlan_dev(dev))
continue;
-
- if (i++ == *pos)
- return dev;
+ *pos = dev->ifindex;
+ return dev;
}
+ return NULL;
+}
+
+static void *vlan_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(rcu)
+{
+ rcu_read_lock();
+ if (*pos == 0)
+ return SEQ_START_TOKEN;
- return NULL;
+ return vlan_seq_from_index(seq, pos);
}
static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct net_device *dev;
- struct net *net = seq_file_net(seq);
-
++*pos;
-
- dev = v;
- if (v == SEQ_START_TOKEN)
- dev = net_device_entry(&net->dev_base_head);
-
- for_each_netdev_continue_rcu(net, dev) {
- if (!is_vlan_dev(dev))
- continue;
-
- return dev;
- }
-
- return NULL;
+ return vlan_seq_from_index(seq, pos);
}
static void vlan_seq_stop(struct seq_file *seq, void *v)
@@ -252,9 +238,9 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)
stats = dev_get_stats(vlandev, &temp);
seq_printf(seq,
- "%s VID: %d REORDER_HDR: %i dev->priv_flags: %llx\n",
+ "%s VID: %d REORDER_HDR: %i dev->priv_flags: %x\n",
vlandev->name, vlan->vlan_id,
- (int)(vlan->flags & 1), vlandev->priv_flags);
+ (int)(vlan->flags & 1), (u32)vlandev->priv_flags);
seq_printf(seq, fmt64, "total frames received", stats->rx_packets);
seq_printf(seq, fmt64, "total bytes received", stats->rx_bytes);
diff --git a/net/9p/Kconfig b/net/9p/Kconfig
index 00ebce9e5a65..22f8c167845d 100644
--- a/net/9p/Kconfig
+++ b/net/9p/Kconfig
@@ -5,6 +5,7 @@
menuconfig NET_9P
tristate "Plan 9 Resource Sharing Support (9P2000)"
+ select NETFS_SUPPORT
help
If you say Y here, you will get experimental support for
Plan 9 resource sharing via the 9P2000 protocol.
@@ -39,6 +40,14 @@ config NET_9P_XEN
This builds support for a transport for 9pfs between
two Xen domains.
+config NET_9P_USBG
+ tristate "9P USB Gadget Transport"
+ depends on USB_GADGET
+ select CONFIGFS_FS
+ select USB_LIBCOMPOSITE
+ help
+ This builds support for a transport for 9pfs over
+ usb gadget.
config NET_9P_RDMA
depends on INET && INFINIBAND && INFINIBAND_ADDR_TRANS
diff --git a/net/9p/Makefile b/net/9p/Makefile
index 1df9b344c30b..22794a451c3f 100644
--- a/net/9p/Makefile
+++ b/net/9p/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_NET_9P_FD) += 9pnet_fd.o
obj-$(CONFIG_NET_9P_XEN) += 9pnet_xen.o
obj-$(CONFIG_NET_9P_VIRTIO) += 9pnet_virtio.o
obj-$(CONFIG_NET_9P_RDMA) += 9pnet_rdma.o
+obj-$(CONFIG_NET_9P_USBG) += 9pnet_usbg.o
9pnet-objs := \
mod.o \
@@ -23,3 +24,6 @@ obj-$(CONFIG_NET_9P_RDMA) += 9pnet_rdma.o
9pnet_rdma-objs := \
trans_rdma.o \
+
+9pnet_usbg-objs := \
+ trans_usbg.o \
diff --git a/net/9p/client.c b/net/9p/client.c
index e265a0ca6bdd..09f8ced9f8bb 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -18,6 +18,7 @@
#include <linux/sched/signal.h>
#include <linux/uaccess.h>
#include <linux/uio.h>
+#include <linux/netfs.h>
#include <net/9p/9p.h>
#include <linux/parser.h>
#include <linux/seq_file.h>
@@ -235,6 +236,8 @@ static int p9_fcall_init(struct p9_client *c, struct p9_fcall *fc,
if (!fc->sdata)
return -ENOMEM;
fc->capacity = alloc_msize;
+ fc->id = 0;
+ fc->tag = P9_NOTAG;
return 0;
}
@@ -974,8 +977,10 @@ error:
struct p9_client *p9_client_create(const char *dev_name, char *options)
{
int err;
+ static atomic_t seqno = ATOMIC_INIT(0);
struct p9_client *clnt;
char *client_id;
+ char *cache_name;
clnt = kmalloc(sizeof(*clnt), GFP_KERNEL);
if (!clnt)
@@ -1032,15 +1037,23 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
if (err)
goto close_trans;
+ cache_name = kasprintf(GFP_KERNEL,
+ "9p-fcall-cache-%u", atomic_inc_return(&seqno));
+ if (!cache_name) {
+ err = -ENOMEM;
+ goto close_trans;
+ }
+
/* P9_HDRSZ + 4 is the smallest packet header we can have that is
* followed by data accessed from userspace by read
*/
clnt->fcall_cache =
- kmem_cache_create_usercopy("9p-fcall-cache", clnt->msize,
+ kmem_cache_create_usercopy(cache_name, clnt->msize,
0, 0, P9_HDRSZ + 4,
clnt->msize - (P9_HDRSZ + 4),
NULL);
+ kfree(cache_name);
return clnt;
close_trans:
@@ -1583,7 +1596,7 @@ p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
received = rsize;
}
- p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", count);
+ p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", received);
if (non_zc) {
int n = copy_to_iter(dataptr, received, to);
@@ -1609,9 +1622,6 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
int total = 0;
*err = 0;
- p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %zd\n",
- fid->fid, offset, iov_iter_count(from));
-
while (iov_iter_count(from)) {
int count = iov_iter_count(from);
int rsize = fid->iounit;
@@ -1623,6 +1633,9 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
if (count < rsize)
rsize = count;
+ p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %d (/%d)\n",
+ fid->fid, offset, rsize, count);
+
/* Don't bother zerocopy for small IO (< 1024) */
if (clnt->trans_mod->zc_request && rsize > 1024) {
req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, from, 0,
@@ -1650,7 +1663,7 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
written = rsize;
}
- p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", count);
+ p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", written);
p9_req_put(clnt, req);
iov_iter_revert(from, count - written - iov_iter_count(from));
@@ -1661,6 +1674,54 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
}
EXPORT_SYMBOL(p9_client_write);
+void
+p9_client_write_subreq(struct netfs_io_subrequest *subreq)
+{
+ struct netfs_io_request *wreq = subreq->rreq;
+ struct p9_fid *fid = wreq->netfs_priv;
+ struct p9_client *clnt = fid->clnt;
+ struct p9_req_t *req;
+ unsigned long long start = subreq->start + subreq->transferred;
+ int written, len = subreq->len - subreq->transferred;
+ int err;
+
+ p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu len %d\n",
+ fid->fid, start, len);
+
+ /* Don't bother zerocopy for small IO (< 1024) */
+ if (clnt->trans_mod->zc_request && len > 1024) {
+ req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, &subreq->io_iter,
+ 0, wreq->len, P9_ZC_HDR_SZ, "dqd",
+ fid->fid, start, len);
+ } else {
+ req = p9_client_rpc(clnt, P9_TWRITE, "dqV", fid->fid,
+ start, len, &subreq->io_iter);
+ }
+ if (IS_ERR(req)) {
+ netfs_write_subrequest_terminated(subreq, PTR_ERR(req), false);
+ return;
+ }
+
+ err = p9pdu_readf(&req->rc, clnt->proto_version, "d", &written);
+ if (err) {
+ trace_9p_protocol_dump(clnt, &req->rc);
+ p9_req_put(clnt, req);
+ netfs_write_subrequest_terminated(subreq, err, false);
+ return;
+ }
+
+ if (written > len) {
+ pr_err("bogus RWRITE count (%d > %u)\n", written, len);
+ written = len;
+ }
+
+ p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", len);
+
+ p9_req_put(clnt, req);
+ netfs_write_subrequest_terminated(subreq, written, false);
+}
+EXPORT_SYMBOL(p9_client_write_subreq);
+
struct p9_wstat *p9_client_stat(struct p9_fid *fid)
{
int err;
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 1a3948b8c493..196060dc6138 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -95,7 +95,6 @@ struct p9_poll_wait {
* @unsent_req_list: accounting for requests that haven't been sent
* @rreq: read request
* @wreq: write request
- * @req: current request being processed (if any)
* @tmp_buf: temporary buffer to read in header
* @rc: temporary fcall for reading current frame
* @wpos: write position for current frame
diff --git a/net/9p/trans_usbg.c b/net/9p/trans_usbg.c
new file mode 100644
index 000000000000..6b694f117aef
--- /dev/null
+++ b/net/9p/trans_usbg.c
@@ -0,0 +1,956 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * trans_usbg.c - USB peripheral usb9pfs configuration driver and transport.
+ *
+ * Copyright (C) 2024 Michael Grzeschik <m.grzeschik@pengutronix.de>
+ */
+
+/* Gadget usb9pfs only needs two bulk endpoints, and will use the usb9pfs
+ * transport to mount host exported filesystem via usb gadget.
+ */
+
+/* +--------------------------+ | +--------------------------+
+ * | 9PFS mounting client | | | 9PFS exporting server |
+ * SW | | | | |
+ * | (this:trans_usbg) | | |(e.g. diod or nfs-ganesha)|
+ * +-------------^------------+ | +-------------^------------+
+ * | | |
+ * ------------------|------------------------------------|-------------
+ * | | |
+ * +-------------v------------+ | +-------------v------------+
+ * | | | | |
+ * HW | USB Device Controller <---------> USB Host Controller |
+ * | | | | |
+ * +--------------------------+ | +--------------------------+
+ */
+
+#include <linux/cleanup.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/func_utils.h>
+
+#include <net/9p/9p.h>
+#include <net/9p/client.h>
+#include <net/9p/transport.h>
+
+#define DEFAULT_BUFLEN 16384
+
+struct f_usb9pfs {
+ struct p9_client *client;
+
+ /* 9p request lock for en/dequeue */
+ spinlock_t lock;
+
+ struct usb_request *in_req;
+ struct usb_request *out_req;
+
+ struct usb_ep *in_ep;
+ struct usb_ep *out_ep;
+
+ struct completion send;
+ struct completion received;
+
+ unsigned int buflen;
+
+ struct usb_function function;
+};
+
+static inline struct f_usb9pfs *func_to_usb9pfs(struct usb_function *f)
+{
+ return container_of(f, struct f_usb9pfs, function);
+}
+
+struct f_usb9pfs_opts {
+ struct usb_function_instance func_inst;
+ unsigned int buflen;
+
+ struct f_usb9pfs_dev *dev;
+
+ /* Read/write access to configfs attributes is handled by configfs.
+ *
+ * This is to protect the data from concurrent access by read/write
+ * and create symlink/remove symlink.
+ */
+ struct mutex lock;
+ int refcnt;
+};
+
+struct f_usb9pfs_dev {
+ struct f_usb9pfs *usb9pfs;
+ struct f_usb9pfs_opts *opts;
+ char tag[41];
+ bool inuse;
+
+ struct list_head usb9pfs_instance;
+};
+
+static DEFINE_MUTEX(usb9pfs_lock);
+static struct list_head usbg_instance_list;
+
+static int usb9pfs_queue_tx(struct f_usb9pfs *usb9pfs, struct p9_req_t *p9_tx_req,
+ gfp_t gfp_flags)
+{
+ struct usb_composite_dev *cdev = usb9pfs->function.config->cdev;
+ struct usb_request *req = usb9pfs->in_req;
+ int ret;
+
+ if (!(p9_tx_req->tc.size % usb9pfs->in_ep->maxpacket))
+ req->zero = 1;
+
+ req->buf = p9_tx_req->tc.sdata;
+ req->length = p9_tx_req->tc.size;
+ req->context = p9_tx_req;
+
+ dev_dbg(&cdev->gadget->dev, "%s usb9pfs send --> %d/%d, zero: %d\n",
+ usb9pfs->in_ep->name, req->actual, req->length, req->zero);
+
+ ret = usb_ep_queue(usb9pfs->in_ep, req, gfp_flags);
+ if (ret)
+ req->context = NULL;
+
+ dev_dbg(&cdev->gadget->dev, "tx submit --> %d\n", ret);
+
+ return ret;
+}
+
+static int usb9pfs_queue_rx(struct f_usb9pfs *usb9pfs, struct usb_request *req,
+ gfp_t gfp_flags)
+{
+ struct usb_composite_dev *cdev = usb9pfs->function.config->cdev;
+ int ret;
+
+ ret = usb_ep_queue(usb9pfs->out_ep, req, gfp_flags);
+
+ dev_dbg(&cdev->gadget->dev, "rx submit --> %d\n", ret);
+
+ return ret;
+}
+
+static int usb9pfs_transmit(struct f_usb9pfs *usb9pfs, struct p9_req_t *p9_req)
+{
+ int ret = 0;
+
+ guard(spinlock_irqsave)(&usb9pfs->lock);
+
+ ret = usb9pfs_queue_tx(usb9pfs, p9_req, GFP_ATOMIC);
+ if (ret)
+ return ret;
+
+ list_del(&p9_req->req_list);
+
+ p9_req_get(p9_req);
+
+ return ret;
+}
+
+static void usb9pfs_tx_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_usb9pfs *usb9pfs = ep->driver_data;
+ struct usb_composite_dev *cdev = usb9pfs->function.config->cdev;
+ struct p9_req_t *p9_tx_req = req->context;
+ unsigned long flags;
+
+ /* reset zero packages */
+ req->zero = 0;
+
+ if (req->status) {
+ dev_err(&cdev->gadget->dev, "%s usb9pfs complete --> %d, %d/%d\n",
+ ep->name, req->status, req->actual, req->length);
+ return;
+ }
+
+ dev_dbg(&cdev->gadget->dev, "%s usb9pfs complete --> %d, %d/%d\n",
+ ep->name, req->status, req->actual, req->length);
+
+ spin_lock_irqsave(&usb9pfs->lock, flags);
+ WRITE_ONCE(p9_tx_req->status, REQ_STATUS_SENT);
+
+ p9_req_put(usb9pfs->client, p9_tx_req);
+
+ req->context = NULL;
+
+ spin_unlock_irqrestore(&usb9pfs->lock, flags);
+
+ complete(&usb9pfs->send);
+}
+
+static struct p9_req_t *usb9pfs_rx_header(struct f_usb9pfs *usb9pfs, void *buf)
+{
+ struct p9_req_t *p9_rx_req;
+ struct p9_fcall rc;
+ int ret;
+
+ /* start by reading header */
+ rc.sdata = buf;
+ rc.offset = 0;
+ rc.capacity = P9_HDRSZ;
+ rc.size = P9_HDRSZ;
+
+ p9_debug(P9_DEBUG_TRANS, "mux %p got %zu bytes\n", usb9pfs,
+ rc.capacity - rc.offset);
+
+ ret = p9_parse_header(&rc, &rc.size, NULL, NULL, 0);
+ if (ret) {
+ p9_debug(P9_DEBUG_ERROR,
+ "error parsing header: %d\n", ret);
+ return NULL;
+ }
+
+ p9_debug(P9_DEBUG_TRANS,
+ "mux %p pkt: size: %d bytes tag: %d\n",
+ usb9pfs, rc.size, rc.tag);
+
+ p9_rx_req = p9_tag_lookup(usb9pfs->client, rc.tag);
+ if (!p9_rx_req || p9_rx_req->status != REQ_STATUS_SENT) {
+ p9_debug(P9_DEBUG_ERROR, "Unexpected packet tag %d\n", rc.tag);
+ return NULL;
+ }
+
+ if (rc.size > p9_rx_req->rc.capacity) {
+ p9_debug(P9_DEBUG_ERROR,
+ "requested packet size too big: %d for tag %d with capacity %zd\n",
+ rc.size, rc.tag, p9_rx_req->rc.capacity);
+ p9_req_put(usb9pfs->client, p9_rx_req);
+ return NULL;
+ }
+
+ if (!p9_rx_req->rc.sdata) {
+ p9_debug(P9_DEBUG_ERROR,
+ "No recv fcall for tag %d (req %p), disconnecting!\n",
+ rc.tag, p9_rx_req);
+ p9_req_put(usb9pfs->client, p9_rx_req);
+ return NULL;
+ }
+
+ return p9_rx_req;
+}
+
+static void usb9pfs_rx_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_usb9pfs *usb9pfs = ep->driver_data;
+ struct usb_composite_dev *cdev = usb9pfs->function.config->cdev;
+ struct p9_req_t *p9_rx_req;
+
+ if (req->status) {
+ dev_err(&cdev->gadget->dev, "%s usb9pfs complete --> %d, %d/%d\n",
+ ep->name, req->status, req->actual, req->length);
+ return;
+ }
+
+ p9_rx_req = usb9pfs_rx_header(usb9pfs, req->buf);
+ if (!p9_rx_req)
+ return;
+
+ memcpy(p9_rx_req->rc.sdata, req->buf, req->actual);
+
+ p9_rx_req->rc.size = req->actual;
+
+ p9_client_cb(usb9pfs->client, p9_rx_req, REQ_STATUS_RCVD);
+ p9_req_put(usb9pfs->client, p9_rx_req);
+
+ complete(&usb9pfs->received);
+}
+
+static void disable_ep(struct usb_composite_dev *cdev, struct usb_ep *ep)
+{
+ int value;
+
+ value = usb_ep_disable(ep);
+ if (value < 0)
+ dev_info(&cdev->gadget->dev,
+ "disable %s --> %d\n", ep->name, value);
+}
+
+static void disable_usb9pfs(struct f_usb9pfs *usb9pfs)
+{
+ struct usb_composite_dev *cdev =
+ usb9pfs->function.config->cdev;
+
+ if (usb9pfs->in_req) {
+ usb_ep_free_request(usb9pfs->in_ep, usb9pfs->in_req);
+ usb9pfs->in_req = NULL;
+ }
+
+ if (usb9pfs->out_req) {
+ usb_ep_free_request(usb9pfs->out_ep, usb9pfs->out_req);
+ usb9pfs->out_req = NULL;
+ }
+
+ disable_ep(cdev, usb9pfs->in_ep);
+ disable_ep(cdev, usb9pfs->out_ep);
+ dev_dbg(&cdev->gadget->dev, "%s disabled\n",
+ usb9pfs->function.name);
+}
+
+static int alloc_requests(struct usb_composite_dev *cdev,
+ struct f_usb9pfs *usb9pfs)
+{
+ int ret;
+
+ usb9pfs->in_req = usb_ep_alloc_request(usb9pfs->in_ep, GFP_ATOMIC);
+ if (!usb9pfs->in_req) {
+ ret = -ENOENT;
+ goto fail;
+ }
+
+ usb9pfs->out_req = alloc_ep_req(usb9pfs->out_ep, usb9pfs->buflen);
+ if (!usb9pfs->out_req) {
+ ret = -ENOENT;
+ goto fail_in;
+ }
+
+ usb9pfs->in_req->complete = usb9pfs_tx_complete;
+ usb9pfs->out_req->complete = usb9pfs_rx_complete;
+
+ /* length will be set in complete routine */
+ usb9pfs->in_req->context = usb9pfs;
+ usb9pfs->out_req->context = usb9pfs;
+
+ return 0;
+
+fail_in:
+ usb_ep_free_request(usb9pfs->in_ep, usb9pfs->in_req);
+fail:
+ return ret;
+}
+
+static int enable_endpoint(struct usb_composite_dev *cdev,
+ struct f_usb9pfs *usb9pfs, struct usb_ep *ep)
+{
+ int ret;
+
+ ret = config_ep_by_speed(cdev->gadget, &usb9pfs->function, ep);
+ if (ret)
+ return ret;
+
+ ret = usb_ep_enable(ep);
+ if (ret < 0)
+ return ret;
+
+ ep->driver_data = usb9pfs;
+
+ return 0;
+}
+
+static int
+enable_usb9pfs(struct usb_composite_dev *cdev, struct f_usb9pfs *usb9pfs)
+{
+ struct p9_client *client;
+ int ret = 0;
+
+ ret = enable_endpoint(cdev, usb9pfs, usb9pfs->in_ep);
+ if (ret)
+ goto out;
+
+ ret = enable_endpoint(cdev, usb9pfs, usb9pfs->out_ep);
+ if (ret)
+ goto disable_in;
+
+ ret = alloc_requests(cdev, usb9pfs);
+ if (ret)
+ goto disable_out;
+
+ client = usb9pfs->client;
+ if (client)
+ client->status = Connected;
+
+ dev_dbg(&cdev->gadget->dev, "%s enabled\n", usb9pfs->function.name);
+ return 0;
+
+disable_out:
+ usb_ep_disable(usb9pfs->out_ep);
+disable_in:
+ usb_ep_disable(usb9pfs->in_ep);
+out:
+ return ret;
+}
+
+static int p9_usbg_create(struct p9_client *client, const char *devname, char *args)
+{
+ struct f_usb9pfs_dev *dev;
+ struct f_usb9pfs *usb9pfs;
+ int ret = -ENOENT;
+ int found = 0;
+
+ if (!devname)
+ return -EINVAL;
+
+ guard(mutex)(&usb9pfs_lock);
+
+ list_for_each_entry(dev, &usbg_instance_list, usb9pfs_instance) {
+ if (!strncmp(devname, dev->tag, strlen(devname))) {
+ if (!dev->inuse) {
+ dev->inuse = true;
+ found = 1;
+ break;
+ }
+ ret = -EBUSY;
+ break;
+ }
+ }
+
+ if (!found) {
+ pr_err("no channels available for device %s\n", devname);
+ return ret;
+ }
+
+ usb9pfs = dev->usb9pfs;
+ if (!usb9pfs)
+ return -EINVAL;
+
+ client->trans = (void *)usb9pfs;
+ if (!usb9pfs->in_req)
+ client->status = Disconnected;
+ else
+ client->status = Connected;
+ usb9pfs->client = client;
+
+ client->trans_mod->maxsize = usb9pfs->buflen;
+
+ complete(&usb9pfs->received);
+
+ return 0;
+}
+
+static void usb9pfs_clear_tx(struct f_usb9pfs *usb9pfs)
+{
+ struct p9_req_t *req;
+
+ guard(spinlock_irqsave)(&usb9pfs->lock);
+
+ req = usb9pfs->in_req->context;
+ if (!req)
+ return;
+
+ if (!req->t_err)
+ req->t_err = -ECONNRESET;
+
+ p9_client_cb(usb9pfs->client, req, REQ_STATUS_ERROR);
+}
+
+static void p9_usbg_close(struct p9_client *client)
+{
+ struct f_usb9pfs *usb9pfs;
+ struct f_usb9pfs_dev *dev;
+ struct f_usb9pfs_opts *opts;
+
+ if (!client)
+ return;
+
+ usb9pfs = client->trans;
+ if (!usb9pfs)
+ return;
+
+ client->status = Disconnected;
+
+ usb9pfs_clear_tx(usb9pfs);
+
+ opts = container_of(usb9pfs->function.fi,
+ struct f_usb9pfs_opts, func_inst);
+
+ dev = opts->dev;
+
+ mutex_lock(&usb9pfs_lock);
+ dev->inuse = false;
+ mutex_unlock(&usb9pfs_lock);
+}
+
+static int p9_usbg_request(struct p9_client *client, struct p9_req_t *p9_req)
+{
+ struct f_usb9pfs *usb9pfs = client->trans;
+ int ret;
+
+ if (client->status != Connected)
+ return -EBUSY;
+
+ ret = wait_for_completion_killable(&usb9pfs->received);
+ if (ret)
+ return ret;
+
+ ret = usb9pfs_transmit(usb9pfs, p9_req);
+ if (ret)
+ return ret;
+
+ ret = wait_for_completion_killable(&usb9pfs->send);
+ if (ret)
+ return ret;
+
+ return usb9pfs_queue_rx(usb9pfs, usb9pfs->out_req, GFP_ATOMIC);
+}
+
+static int p9_usbg_cancel(struct p9_client *client, struct p9_req_t *req)
+{
+ struct f_usb9pfs *usb9pfs = client->trans;
+ int ret = 1;
+
+ p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
+
+ guard(spinlock_irqsave)(&usb9pfs->lock);
+
+ if (req->status == REQ_STATUS_UNSENT) {
+ list_del(&req->req_list);
+ WRITE_ONCE(req->status, REQ_STATUS_FLSHD);
+ p9_req_put(client, req);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static struct p9_trans_module p9_usbg_trans = {
+ .name = "usbg",
+ .create = p9_usbg_create,
+ .close = p9_usbg_close,
+ .request = p9_usbg_request,
+ .cancel = p9_usbg_cancel,
+ .owner = THIS_MODULE,
+};
+
+/*-------------------------------------------------------------------------*/
+
+#define USB_PROTOCOL_9PFS 0x09
+
+static struct usb_interface_descriptor usb9pfs_intf = {
+ .bLength = sizeof(usb9pfs_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ .bNumEndpoints = 2,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC,
+ .bInterfaceProtocol = USB_PROTOCOL_9PFS,
+
+ /* .iInterface = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor fs_usb9pfs_source_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor fs_usb9pfs_sink_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *fs_usb9pfs_descs[] = {
+ (struct usb_descriptor_header *)&usb9pfs_intf,
+ (struct usb_descriptor_header *)&fs_usb9pfs_sink_desc,
+ (struct usb_descriptor_header *)&fs_usb9pfs_source_desc,
+ NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor hs_usb9pfs_source_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor hs_usb9pfs_sink_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *hs_usb9pfs_descs[] = {
+ (struct usb_descriptor_header *)&usb9pfs_intf,
+ (struct usb_descriptor_header *)&hs_usb9pfs_source_desc,
+ (struct usb_descriptor_header *)&hs_usb9pfs_sink_desc,
+ NULL,
+};
+
+/* super speed support: */
+
+static struct usb_endpoint_descriptor ss_usb9pfs_source_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_usb9pfs_source_comp_desc = {
+ .bLength = USB_DT_SS_EP_COMP_SIZE,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 0,
+ .bmAttributes = 0,
+ .wBytesPerInterval = 0,
+};
+
+static struct usb_endpoint_descriptor ss_usb9pfs_sink_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_usb9pfs_sink_comp_desc = {
+ .bLength = USB_DT_SS_EP_COMP_SIZE,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 0,
+ .bmAttributes = 0,
+ .wBytesPerInterval = 0,
+};
+
+static struct usb_descriptor_header *ss_usb9pfs_descs[] = {
+ (struct usb_descriptor_header *)&usb9pfs_intf,
+ (struct usb_descriptor_header *)&ss_usb9pfs_source_desc,
+ (struct usb_descriptor_header *)&ss_usb9pfs_source_comp_desc,
+ (struct usb_descriptor_header *)&ss_usb9pfs_sink_desc,
+ (struct usb_descriptor_header *)&ss_usb9pfs_sink_comp_desc,
+ NULL,
+};
+
+/* function-specific strings: */
+static struct usb_string strings_usb9pfs[] = {
+ [0].s = "usb9pfs input to output",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_usb9pfs = {
+ .language = 0x0409, /* en-us */
+ .strings = strings_usb9pfs,
+};
+
+static struct usb_gadget_strings *usb9pfs_strings[] = {
+ &stringtab_usb9pfs,
+ NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int usb9pfs_func_bind(struct usb_configuration *c,
+ struct usb_function *f)
+{
+ struct f_usb9pfs *usb9pfs = func_to_usb9pfs(f);
+ struct f_usb9pfs_opts *opts;
+ struct usb_composite_dev *cdev = c->cdev;
+ int ret;
+ int id;
+
+ /* allocate interface ID(s) */
+ id = usb_interface_id(c, f);
+ if (id < 0)
+ return id;
+ usb9pfs_intf.bInterfaceNumber = id;
+
+ id = usb_string_id(cdev);
+ if (id < 0)
+ return id;
+ strings_usb9pfs[0].id = id;
+ usb9pfs_intf.iInterface = id;
+
+ /* allocate endpoints */
+ usb9pfs->in_ep = usb_ep_autoconfig(cdev->gadget,
+ &fs_usb9pfs_source_desc);
+ if (!usb9pfs->in_ep)
+ goto autoconf_fail;
+
+ usb9pfs->out_ep = usb_ep_autoconfig(cdev->gadget,
+ &fs_usb9pfs_sink_desc);
+ if (!usb9pfs->out_ep)
+ goto autoconf_fail;
+
+ /* support high speed hardware */
+ hs_usb9pfs_source_desc.bEndpointAddress =
+ fs_usb9pfs_source_desc.bEndpointAddress;
+ hs_usb9pfs_sink_desc.bEndpointAddress =
+ fs_usb9pfs_sink_desc.bEndpointAddress;
+
+ /* support super speed hardware */
+ ss_usb9pfs_source_desc.bEndpointAddress =
+ fs_usb9pfs_source_desc.bEndpointAddress;
+ ss_usb9pfs_sink_desc.bEndpointAddress =
+ fs_usb9pfs_sink_desc.bEndpointAddress;
+
+ ret = usb_assign_descriptors(f, fs_usb9pfs_descs, hs_usb9pfs_descs,
+ ss_usb9pfs_descs, ss_usb9pfs_descs);
+ if (ret)
+ return ret;
+
+ opts = container_of(f->fi, struct f_usb9pfs_opts, func_inst);
+ opts->dev->usb9pfs = usb9pfs;
+
+ dev_dbg(&cdev->gadget->dev, "%s speed %s: IN/%s, OUT/%s\n",
+ (gadget_is_superspeed(c->cdev->gadget) ? "super" :
+ (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full")),
+ f->name, usb9pfs->in_ep->name, usb9pfs->out_ep->name);
+
+ return 0;
+
+autoconf_fail:
+ ERROR(cdev, "%s: can't autoconfigure on %s\n",
+ f->name, cdev->gadget->name);
+ return -ENODEV;
+}
+
+static void usb9pfs_func_unbind(struct usb_configuration *c,
+ struct usb_function *f)
+{
+ struct f_usb9pfs *usb9pfs = func_to_usb9pfs(f);
+
+ disable_usb9pfs(usb9pfs);
+}
+
+static void usb9pfs_free_func(struct usb_function *f)
+{
+ struct f_usb9pfs *usb9pfs = func_to_usb9pfs(f);
+ struct f_usb9pfs_opts *opts;
+
+ kfree(usb9pfs);
+
+ opts = container_of(f->fi, struct f_usb9pfs_opts, func_inst);
+
+ mutex_lock(&opts->lock);
+ opts->refcnt--;
+ mutex_unlock(&opts->lock);
+
+ usb_free_all_descriptors(f);
+}
+
+static int usb9pfs_set_alt(struct usb_function *f,
+ unsigned int intf, unsigned int alt)
+{
+ struct f_usb9pfs *usb9pfs = func_to_usb9pfs(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+
+ return enable_usb9pfs(cdev, usb9pfs);
+}
+
+static void usb9pfs_disable(struct usb_function *f)
+{
+ struct f_usb9pfs *usb9pfs = func_to_usb9pfs(f);
+
+ usb9pfs_clear_tx(usb9pfs);
+}
+
+static struct usb_function *usb9pfs_alloc(struct usb_function_instance *fi)
+{
+ struct f_usb9pfs_opts *usb9pfs_opts;
+ struct f_usb9pfs *usb9pfs;
+
+ usb9pfs = kzalloc(sizeof(*usb9pfs), GFP_KERNEL);
+ if (!usb9pfs)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&usb9pfs->lock);
+
+ init_completion(&usb9pfs->send);
+ init_completion(&usb9pfs->received);
+
+ usb9pfs_opts = container_of(fi, struct f_usb9pfs_opts, func_inst);
+
+ mutex_lock(&usb9pfs_opts->lock);
+ usb9pfs_opts->refcnt++;
+ mutex_unlock(&usb9pfs_opts->lock);
+
+ usb9pfs->buflen = usb9pfs_opts->buflen;
+
+ usb9pfs->function.name = "usb9pfs";
+ usb9pfs->function.bind = usb9pfs_func_bind;
+ usb9pfs->function.unbind = usb9pfs_func_unbind;
+ usb9pfs->function.set_alt = usb9pfs_set_alt;
+ usb9pfs->function.disable = usb9pfs_disable;
+ usb9pfs->function.strings = usb9pfs_strings;
+
+ usb9pfs->function.free_func = usb9pfs_free_func;
+
+ return &usb9pfs->function;
+}
+
+static inline struct f_usb9pfs_opts *to_f_usb9pfs_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct f_usb9pfs_opts,
+ func_inst.group);
+}
+
+static inline struct f_usb9pfs_opts *fi_to_f_usb9pfs_opts(struct usb_function_instance *fi)
+{
+ return container_of(fi, struct f_usb9pfs_opts, func_inst);
+}
+
+static void usb9pfs_attr_release(struct config_item *item)
+{
+ struct f_usb9pfs_opts *usb9pfs_opts = to_f_usb9pfs_opts(item);
+
+ usb_put_function_instance(&usb9pfs_opts->func_inst);
+}
+
+static struct configfs_item_operations usb9pfs_item_ops = {
+ .release = usb9pfs_attr_release,
+};
+
+static ssize_t f_usb9pfs_opts_buflen_show(struct config_item *item, char *page)
+{
+ struct f_usb9pfs_opts *opts = to_f_usb9pfs_opts(item);
+ int ret;
+
+ mutex_lock(&opts->lock);
+ ret = sysfs_emit(page, "%d\n", opts->buflen);
+ mutex_unlock(&opts->lock);
+
+ return ret;
+}
+
+static ssize_t f_usb9pfs_opts_buflen_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct f_usb9pfs_opts *opts = to_f_usb9pfs_opts(item);
+ int ret;
+ u32 num;
+
+ guard(mutex)(&opts->lock);
+
+ if (opts->refcnt)
+ return -EBUSY;
+
+ ret = kstrtou32(page, 0, &num);
+ if (ret)
+ return ret;
+
+ opts->buflen = num;
+
+ return len;
+}
+
+CONFIGFS_ATTR(f_usb9pfs_opts_, buflen);
+
+static struct configfs_attribute *usb9pfs_attrs[] = {
+ &f_usb9pfs_opts_attr_buflen,
+ NULL,
+};
+
+static const struct config_item_type usb9pfs_func_type = {
+ .ct_item_ops = &usb9pfs_item_ops,
+ .ct_attrs = usb9pfs_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct f_usb9pfs_dev *_usb9pfs_do_find_dev(const char *tag)
+{
+ struct f_usb9pfs_dev *usb9pfs_dev;
+
+ if (!tag)
+ return NULL;
+
+ list_for_each_entry(usb9pfs_dev, &usbg_instance_list, usb9pfs_instance) {
+ if (strcmp(usb9pfs_dev->tag, tag) == 0)
+ return usb9pfs_dev;
+ }
+
+ return NULL;
+}
+
+static int usb9pfs_tag_instance(struct f_usb9pfs_dev *dev, const char *tag)
+{
+ struct f_usb9pfs_dev *existing;
+ int ret = 0;
+
+ guard(mutex)(&usb9pfs_lock);
+
+ existing = _usb9pfs_do_find_dev(tag);
+ if (!existing)
+ strscpy(dev->tag, tag, ARRAY_SIZE(dev->tag));
+ else if (existing != dev)
+ ret = -EBUSY;
+
+ return ret;
+}
+
+static int usb9pfs_set_inst_tag(struct usb_function_instance *fi, const char *tag)
+{
+ if (strlen(tag) >= sizeof_field(struct f_usb9pfs_dev, tag))
+ return -ENAMETOOLONG;
+ return usb9pfs_tag_instance(fi_to_f_usb9pfs_opts(fi)->dev, tag);
+}
+
+static void usb9pfs_free_instance(struct usb_function_instance *fi)
+{
+ struct f_usb9pfs_opts *usb9pfs_opts =
+ container_of(fi, struct f_usb9pfs_opts, func_inst);
+ struct f_usb9pfs_dev *dev = usb9pfs_opts->dev;
+
+ mutex_lock(&usb9pfs_lock);
+ list_del(&dev->usb9pfs_instance);
+ mutex_unlock(&usb9pfs_lock);
+
+ kfree(usb9pfs_opts);
+}
+
+static struct usb_function_instance *usb9pfs_alloc_instance(void)
+{
+ struct f_usb9pfs_opts *usb9pfs_opts;
+ struct f_usb9pfs_dev *dev;
+
+ usb9pfs_opts = kzalloc(sizeof(*usb9pfs_opts), GFP_KERNEL);
+ if (!usb9pfs_opts)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&usb9pfs_opts->lock);
+
+ usb9pfs_opts->func_inst.set_inst_name = usb9pfs_set_inst_tag;
+ usb9pfs_opts->func_inst.free_func_inst = usb9pfs_free_instance;
+
+ usb9pfs_opts->buflen = DEFAULT_BUFLEN;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ kfree(usb9pfs_opts);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ usb9pfs_opts->dev = dev;
+ dev->opts = usb9pfs_opts;
+
+ config_group_init_type_name(&usb9pfs_opts->func_inst.group, "",
+ &usb9pfs_func_type);
+
+ mutex_lock(&usb9pfs_lock);
+ list_add_tail(&dev->usb9pfs_instance, &usbg_instance_list);
+ mutex_unlock(&usb9pfs_lock);
+
+ return &usb9pfs_opts->func_inst;
+}
+DECLARE_USB_FUNCTION(usb9pfs, usb9pfs_alloc_instance, usb9pfs_alloc);
+
+static int __init usb9pfs_modinit(void)
+{
+ int ret;
+
+ INIT_LIST_HEAD(&usbg_instance_list);
+
+ ret = usb_function_register(&usb9pfsusb_func);
+ if (!ret)
+ v9fs_register_trans(&p9_usbg_trans);
+
+ return ret;
+}
+
+static void __exit usb9pfs_modexit(void)
+{
+ usb_function_unregister(&usb9pfsusb_func);
+ v9fs_unregister_trans(&p9_usbg_trans);
+}
+
+module_init(usb9pfs_modinit);
+module_exit(usb9pfs_modexit);
+
+MODULE_ALIAS_9P("usbg");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("USB gadget 9pfs transport");
+MODULE_AUTHOR("Michael Grzeschik");
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index e305071eb7b8..0b8086f58ad5 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -781,7 +781,6 @@ static struct virtio_driver p9_virtio_drv = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = p9_virtio_probe,
.remove = p9_virtio_remove,
diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
index dfdbe1ca5338..b9ff69c7522a 100644
--- a/net/9p/trans_xen.c
+++ b/net/9p/trans_xen.c
@@ -286,7 +286,7 @@ static void xen_9pfs_front_free(struct xen_9pfs_front_priv *priv)
if (!priv->rings[i].intf)
break;
if (priv->rings[i].irq > 0)
- unbind_from_irqhandler(priv->rings[i].irq, priv->dev);
+ unbind_from_irqhandler(priv->rings[i].irq, ring);
if (priv->rings[i].data.in) {
for (j = 0;
j < (1 << priv->rings[i].intf->ring_order);
@@ -465,6 +465,7 @@ static int xen_9pfs_front_init(struct xenbus_device *dev)
goto error;
}
+ xenbus_switch_state(dev, XenbusStateInitialised);
return 0;
error_xenbus:
@@ -512,8 +513,10 @@ static void xen_9pfs_front_changed(struct xenbus_device *dev,
break;
case XenbusStateInitWait:
- if (!xen_9pfs_front_init(dev))
- xenbus_switch_state(dev, XenbusStateInitialised);
+ if (dev->state != XenbusStateInitialising)
+ break;
+
+ xen_9pfs_front_init(dev);
break;
case XenbusStateConnected:
diff --git a/net/Kconfig b/net/Kconfig
index 4adc47d0c9c2..c3fca69a7c83 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -60,9 +60,21 @@ config NET_XGRESS
config NET_REDIRECT
bool
+config SKB_DECRYPTED
+ bool
+
config SKB_EXTENSIONS
bool
+config NET_DEVMEM
+ def_bool y
+ depends on DMA_SHARED_BUFFER
+ depends on GENERIC_ALLOCATOR
+ depends on PAGE_POOL
+
+config NET_SHAPER
+ bool
+
menu "Networking options"
source "net/packet/Kconfig"
@@ -287,15 +299,21 @@ config MAX_SKB_FRAGS
If unsure, say 17.
config RPS
- bool
+ bool "Receive packet steering"
depends on SMP && SYSFS
default y
+ help
+ Software receive side packet steering (RPS) distributes the
+ load of received packet processing across multiple CPUs.
config RFS_ACCEL
- bool
+ bool "Hardware acceleration of RFS"
depends on RPS
select CPU_RMAP
default y
+ help
+ Allowing drivers for multiqueue hardware with flow filter tables to
+ accelerate RFS.
config SOCK_RX_QUEUE_MAPPING
bool
@@ -331,6 +349,7 @@ config NET_RX_BUSY_POLL
config BQL
bool
+ prompt "Enable Byte Queue Limits"
depends on SYSFS
select DQL
default y
@@ -347,7 +366,7 @@ config BPF_STREAM_PARSER
BPF_MAP_TYPE_SOCKMAP.
config NET_FLOW_LIMIT
- bool
+ bool "Net flow limit"
depends on RPS
default y
help
@@ -448,6 +467,9 @@ config GRO_CELLS
config SOCK_VALIDATE_XMIT
bool
+config NET_IEEE8021Q_HELPERS
+ bool
+
config NET_SELFTESTS
def_tristate PHYLIB
depends on PHYLIB && INET
@@ -495,6 +517,7 @@ config FAILOVER
config ETHTOOL_NETLINK
bool "Netlink interface for ethtool"
+ select DIMLIB
default y
help
An alternative userspace interface for ethtool based on generic
diff --git a/net/Kconfig.debug b/net/Kconfig.debug
index 5e3fffe707dd..277fab8c4d77 100644
--- a/net/Kconfig.debug
+++ b/net/Kconfig.debug
@@ -24,3 +24,18 @@ config DEBUG_NET
help
Enable extra sanity checks in networking.
This is mostly used by fuzzers, but is safe to select.
+
+config DEBUG_NET_SMALL_RTNL
+ bool "Add extra per-netns mutex inside RTNL"
+ depends on DEBUG_KERNEL && NET && LOCK_DEBUGGING_SUPPORT
+ select PROVE_LOCKING
+ default n
+ help
+ rtnl_lock() is being replaced with rtnl_net_lock() that
+ acquires the global RTNL and a small per-netns RTNL mutex.
+
+ During the conversion, rtnl_net_lock() just adds an extra
+ mutex in every RTNL scope and slows down the operations.
+
+ Once the conversion completes, rtnl_lock() will be removed
+ and rtnetlink will gain per-netns scalability.
diff --git a/net/Makefile b/net/Makefile
index b06b5539e7a6..60ed5190eda8 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -17,7 +17,7 @@ obj-$(CONFIG_NETFILTER) += netfilter/
obj-$(CONFIG_INET) += ipv4/
obj-$(CONFIG_TLS) += tls/
obj-$(CONFIG_XFRM) += xfrm/
-obj-$(CONFIG_UNIX_SCM) += unix/
+obj-$(CONFIG_UNIX) += unix/
obj-y += ipv6/
obj-$(CONFIG_PACKET) += packet/
obj-$(CONFIG_NET_KEY) += key/
@@ -79,3 +79,4 @@ obj-$(CONFIG_XDP_SOCKETS) += xdp/
obj-$(CONFIG_MPTCP) += mptcp/
obj-$(CONFIG_MCTP) += mctp/
obj-$(CONFIG_NET_HANDSHAKE) += handshake/
+obj-$(CONFIG_NET_SHAPER) += shaper/
diff --git a/net/appletalk/Makefile b/net/appletalk/Makefile
index 33164d972d37..152312a15180 100644
--- a/net/appletalk/Makefile
+++ b/net/appletalk/Makefile
@@ -5,6 +5,6 @@
obj-$(CONFIG_ATALK) += appletalk.o
-appletalk-y := aarp.o ddp.o dev.o
+appletalk-y := aarp.o ddp.o
appletalk-$(CONFIG_PROC_FS) += atalk_proc.o
appletalk-$(CONFIG_SYSCTL) += sysctl_net_atalk.o
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index 9fa0b246902b..05cbb3c227c5 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -432,49 +432,18 @@ static struct atalk_addr *__aarp_proxy_find(struct net_device *dev,
return a ? sa : NULL;
}
-/*
- * Probe a Phase 1 device or a device that requires its Net:Node to
- * be set via an ioctl.
- */
-static void aarp_send_probe_phase1(struct atalk_iface *iface)
-{
- struct ifreq atreq;
- struct sockaddr_at *sa = (struct sockaddr_at *)&atreq.ifr_addr;
- const struct net_device_ops *ops = iface->dev->netdev_ops;
-
- sa->sat_addr.s_node = iface->address.s_node;
- sa->sat_addr.s_net = ntohs(iface->address.s_net);
-
- /* We pass the Net:Node to the drivers/cards by a Device ioctl. */
- if (!(ops->ndo_do_ioctl(iface->dev, &atreq, SIOCSIFADDR))) {
- ops->ndo_do_ioctl(iface->dev, &atreq, SIOCGIFADDR);
- if (iface->address.s_net != htons(sa->sat_addr.s_net) ||
- iface->address.s_node != sa->sat_addr.s_node)
- iface->status |= ATIF_PROBE_FAIL;
-
- iface->address.s_net = htons(sa->sat_addr.s_net);
- iface->address.s_node = sa->sat_addr.s_node;
- }
-}
-
-
void aarp_probe_network(struct atalk_iface *atif)
{
- if (atif->dev->type == ARPHRD_LOCALTLK ||
- atif->dev->type == ARPHRD_PPP)
- aarp_send_probe_phase1(atif);
- else {
- unsigned int count;
+ unsigned int count;
- for (count = 0; count < AARP_RETRANSMIT_LIMIT; count++) {
- aarp_send_probe(atif->dev, &atif->address);
+ for (count = 0; count < AARP_RETRANSMIT_LIMIT; count++) {
+ aarp_send_probe(atif->dev, &atif->address);
- /* Defer 1/10th */
- msleep(100);
+ /* Defer 1/10th */
+ msleep(100);
- if (atif->status & ATIF_PROBE_FAIL)
- break;
- }
+ if (atif->status & ATIF_PROBE_FAIL)
+ break;
}
}
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 198f5ba2feae..b068651984fe 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -88,6 +88,7 @@ static inline void atalk_remove_socket(struct sock *sk)
static struct sock *atalk_search_socket(struct sockaddr_at *to,
struct atalk_iface *atif)
{
+ struct sock *def_socket = NULL;
struct sock *s;
read_lock_bh(&atalk_sockets_lock);
@@ -98,8 +99,20 @@ static struct sock *atalk_search_socket(struct sockaddr_at *to,
continue;
if (to->sat_addr.s_net == ATADDR_ANYNET &&
- to->sat_addr.s_node == ATADDR_BCAST)
- goto found;
+ to->sat_addr.s_node == ATADDR_BCAST) {
+ if (atif->address.s_node == at->src_node &&
+ atif->address.s_net == at->src_net) {
+ /* This socket's address matches the address of the interface
+ * that received the packet -- use it
+ */
+ goto found;
+ }
+
+ /* Continue searching for a socket matching the interface address,
+ * but use this socket by default if no other one is found
+ */
+ def_socket = s;
+ }
if (to->sat_addr.s_net == at->src_net &&
(to->sat_addr.s_node == at->src_node ||
@@ -116,7 +129,7 @@ static struct sock *atalk_search_socket(struct sockaddr_at *to,
goto found;
}
}
- s = NULL;
+ s = def_socket;
found:
read_unlock_bh(&atalk_sockets_lock);
return s;
diff --git a/net/appletalk/dev.c b/net/appletalk/dev.c
deleted file mode 100644
index 284c8e585533..000000000000
--- a/net/appletalk/dev.c
+++ /dev/null
@@ -1,46 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Moved here from drivers/net/net_init.c, which is:
- * Written 1993,1994,1995 by Donald Becker.
- */
-
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/if_ltalk.h>
-
-static void ltalk_setup(struct net_device *dev)
-{
- /* Fill in the fields of the device structure with localtalk-generic values. */
-
- dev->type = ARPHRD_LOCALTLK;
- dev->hard_header_len = LTALK_HLEN;
- dev->mtu = LTALK_MTU;
- dev->addr_len = LTALK_ALEN;
- dev->tx_queue_len = 10;
-
- dev->broadcast[0] = 0xFF;
-
- dev->flags = IFF_BROADCAST|IFF_MULTICAST|IFF_NOARP;
-}
-
-/**
- * alloc_ltalkdev - Allocates and sets up an localtalk device
- * @sizeof_priv: Size of additional driver-private structure to be allocated
- * for this localtalk device
- *
- * Fill in the fields of the device structure with localtalk-generic
- * values. Basically does everything except registering the device.
- *
- * Constructs a new net device, complete with a private data area of
- * size @sizeof_priv. A 32-byte (not bit) alignment is enforced for
- * this private data area.
- */
-
-struct net_device *alloc_ltalkdev(int sizeof_priv)
-{
- return alloc_netdev(sizeof_priv, "lt%d", NET_NAME_UNKNOWN,
- ltalk_setup);
-}
-EXPORT_SYMBOL(alloc_ltalkdev);
diff --git a/net/appletalk/sysctl_net_atalk.c b/net/appletalk/sysctl_net_atalk.c
index d945b7c0176d..7aebfe903242 100644
--- a/net/appletalk/sysctl_net_atalk.c
+++ b/net/appletalk/sysctl_net_atalk.c
@@ -40,7 +40,6 @@ static struct ctl_table atalk_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
- { },
};
static struct ctl_table_header *atalk_table_header;
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 294cb9efe3d3..42b910cb4e8e 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -345,7 +345,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
- rt = (struct rtable *) dst;
+ rt = dst_rtable(dst);
if (rt->rt_gw_family == AF_INET)
daddr = &rt->rt_gw4;
else
@@ -463,7 +463,7 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
unlink_clip_vcc(clip_vcc);
return 0;
}
- rt = ip_route_output(&init_net, ip, 0, 1, 0);
+ rt = ip_route_output(&init_net, ip, 0, 0, 0, RT_SCOPE_LINK);
if (IS_ERR(rt))
return PTR_ERR(rt);
neigh = __neigh_lookup(&arp_tbl, &ip, rt->dst.dev, 1);
diff --git a/net/atm/common.c b/net/atm/common.c
index 2a1ec014e901..9b75699992ff 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -116,7 +116,7 @@ static void vcc_write_space(struct sock *sk)
if (skwq_has_sleeper(wq))
wake_up_interruptible(&wq->wait);
- sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
+ sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
rcu_read_unlock();
diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c
index f81f8d56f5c0..0f7a39aeccc8 100644
--- a/net/atm/ioctl.c
+++ b/net/atm/ioctl.c
@@ -68,7 +68,7 @@ static int do_vcc_ioctl(struct socket *sock, unsigned int cmd,
goto done;
}
error = put_user(sk->sk_sndbuf - sk_wmem_alloc_get(sk),
- (int __user *)argp) ? -EFAULT : 0;
+ (int __user *)argp);
goto done;
case SIOCINQ:
{
@@ -83,7 +83,7 @@ static int do_vcc_ioctl(struct socket *sock, unsigned int cmd,
skb = skb_peek(&sk->sk_receive_queue);
amount = skb ? skb->len : 0;
spin_unlock_irq(&sk->sk_receive_queue.lock);
- error = put_user(amount, (int __user *)argp) ? -EFAULT : 0;
+ error = put_user(amount, (int __user *)argp);
goto done;
}
case ATM_SETSC:
diff --git a/net/atm/lec.c b/net/atm/lec.c
index ffef658862db..a948dd47c3f3 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -181,6 +181,7 @@ static void
lec_send(struct atm_vcc *vcc, struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
+ unsigned int len = skb->len;
ATM_SKB(skb)->vcc = vcc;
atm_account_tx(vcc, skb);
@@ -191,7 +192,7 @@ lec_send(struct atm_vcc *vcc, struct sk_buff *skb)
}
dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
+ dev->stats.tx_bytes += len;
}
static void lec_tx_timeout(struct net_device *dev, unsigned int txqueue)
diff --git a/net/atm/svc.c b/net/atm/svc.c
index 36a814f1fbd1..f8137ae693b0 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -324,8 +324,8 @@ out:
return error;
}
-static int svc_accept(struct socket *sock, struct socket *newsock, int flags,
- bool kern)
+static int svc_accept(struct socket *sock, struct socket *newsock,
+ struct proto_accept_arg *arg)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
@@ -336,7 +336,7 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags,
lock_sock(sk);
- error = svc_create(sock_net(sk), newsock, 0, kern);
+ error = svc_create(sock_net(sk), newsock, 0, arg->kern);
if (error)
goto out;
@@ -355,7 +355,7 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags,
error = -sk->sk_err;
break;
}
- if (flags & O_NONBLOCK) {
+ if (arg->flags & O_NONBLOCK) {
error = -EAGAIN;
break;
}
diff --git a/net/ax25/Kconfig b/net/ax25/Kconfig
index fdb666607f10..e23a3dc14b93 100644
--- a/net/ax25/Kconfig
+++ b/net/ax25/Kconfig
@@ -4,7 +4,7 @@
#
menuconfig HAMRADIO
- depends on NET && !S390
+ depends on NET
bool "Amateur Radio support"
help
If you want to connect your Linux box to an amateur radio, answer Y
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 558e158c98d0..9f3b8b682adb 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -103,7 +103,7 @@ again:
s->ax25_dev = NULL;
if (sk->sk_socket) {
netdev_put(ax25_dev->dev,
- &ax25_dev->dev_tracker);
+ &s->dev_tracker);
ax25_dev_put(ax25_dev);
}
ax25_cb_del(s);
@@ -467,7 +467,7 @@ einval_put:
goto out_put;
}
-static void ax25_fillin_cb_from_dev(ax25_cb *ax25, ax25_dev *ax25_dev)
+static void ax25_fillin_cb_from_dev(ax25_cb *ax25, const ax25_dev *ax25_dev)
{
ax25->rtt = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T1]) / 2;
ax25->t1 = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T1]);
@@ -677,22 +677,33 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
break;
}
- rtnl_lock();
- dev = __dev_get_by_name(&init_net, devname);
+ rcu_read_lock();
+ dev = dev_get_by_name_rcu(&init_net, devname);
if (!dev) {
- rtnl_unlock();
+ rcu_read_unlock();
res = -ENODEV;
break;
}
+ if (ax25->ax25_dev) {
+ if (dev == ax25->ax25_dev->dev) {
+ rcu_read_unlock();
+ break;
+ }
+ netdev_put(ax25->ax25_dev->dev, &ax25->dev_tracker);
+ ax25_dev_put(ax25->ax25_dev);
+ }
+
ax25->ax25_dev = ax25_dev_ax25dev(dev);
if (!ax25->ax25_dev) {
- rtnl_unlock();
+ rcu_read_unlock();
res = -ENODEV;
break;
}
ax25_fillin_cb(ax25, ax25->ax25_dev);
- rtnl_unlock();
+ netdev_hold(dev, &ax25->dev_tracker, GFP_ATOMIC);
+ ax25_dev_hold(ax25->ax25_dev);
+ rcu_read_unlock();
break;
default:
@@ -1373,13 +1384,15 @@ out_release:
return err;
}
-static int ax25_accept(struct socket *sock, struct socket *newsock, int flags,
- bool kern)
+static int ax25_accept(struct socket *sock, struct socket *newsock,
+ struct proto_accept_arg *arg)
{
struct sk_buff *skb;
struct sock *newsk;
+ ax25_dev *ax25_dev;
DEFINE_WAIT(wait);
struct sock *sk;
+ ax25_cb *ax25;
int err = 0;
if (sock->state != SS_UNCONNECTED)
@@ -1409,7 +1422,7 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags,
if (skb)
break;
- if (flags & O_NONBLOCK) {
+ if (arg->flags & O_NONBLOCK) {
err = -EWOULDBLOCK;
break;
}
@@ -1434,6 +1447,10 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags,
kfree_skb(skb);
sk_acceptq_removed(sk);
newsock->state = SS_CONNECTED;
+ ax25 = sk_to_ax25(newsk);
+ ax25_dev = ax25->ax25_dev;
+ netdev_hold(ax25_dev->dev, &ax25->dev_tracker, GFP_ATOMIC);
+ ax25_dev_hold(ax25_dev);
out:
release_sock(sk);
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
index c5462486dbca..3733c0254a50 100644
--- a/net/ax25/ax25_dev.c
+++ b/net/ax25/ax25_dev.c
@@ -22,11 +22,12 @@
#include <net/sock.h>
#include <linux/uaccess.h>
#include <linux/fcntl.h>
+#include <linux/list.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/init.h>
-ax25_dev *ax25_dev_list;
+static LIST_HEAD(ax25_dev_list);
DEFINE_SPINLOCK(ax25_dev_lock);
ax25_dev *ax25_addr_ax25dev(ax25_address *addr)
@@ -34,10 +35,11 @@ ax25_dev *ax25_addr_ax25dev(ax25_address *addr)
ax25_dev *ax25_dev, *res = NULL;
spin_lock_bh(&ax25_dev_lock);
- for (ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next)
+ list_for_each_entry(ax25_dev, &ax25_dev_list, list)
if (ax25cmp(addr, (const ax25_address *)ax25_dev->dev->dev_addr) == 0) {
res = ax25_dev;
ax25_dev_hold(ax25_dev);
+ break;
}
spin_unlock_bh(&ax25_dev_lock);
@@ -59,7 +61,6 @@ void ax25_dev_device_up(struct net_device *dev)
}
refcount_set(&ax25_dev->refcount, 1);
- dev->ax25_ptr = ax25_dev;
ax25_dev->dev = dev;
netdev_hold(dev, &ax25_dev->dev_tracker, GFP_KERNEL);
ax25_dev->forward = NULL;
@@ -78,17 +79,19 @@ void ax25_dev_device_up(struct net_device *dev)
ax25_dev->values[AX25_VALUES_N2] = AX25_DEF_N2;
ax25_dev->values[AX25_VALUES_PACLEN] = AX25_DEF_PACLEN;
ax25_dev->values[AX25_VALUES_PROTOCOL] = AX25_DEF_PROTOCOL;
+
+#ifdef CONFIG_AX25_DAMA_SLAVE
ax25_dev->values[AX25_VALUES_DS_TIMEOUT]= AX25_DEF_DS_TIMEOUT;
+#endif
#if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER)
ax25_ds_setup_timer(ax25_dev);
#endif
spin_lock_bh(&ax25_dev_lock);
- ax25_dev->next = ax25_dev_list;
- ax25_dev_list = ax25_dev;
+ list_add(&ax25_dev->list, &ax25_dev_list);
+ rcu_assign_pointer(dev->ax25_ptr, ax25_dev);
spin_unlock_bh(&ax25_dev_lock);
- ax25_dev_hold(ax25_dev);
ax25_register_dev_sysctl(ax25_dev);
}
@@ -105,38 +108,25 @@ void ax25_dev_device_down(struct net_device *dev)
spin_lock_bh(&ax25_dev_lock);
#ifdef CONFIG_AX25_DAMA_SLAVE
- ax25_ds_del_timer(ax25_dev);
+ timer_shutdown_sync(&ax25_dev->dama.slave_timer);
#endif
/*
* Remove any packet forwarding that points to this device.
*/
- for (s = ax25_dev_list; s != NULL; s = s->next)
+ list_for_each_entry(s, &ax25_dev_list, list)
if (s->forward == dev)
s->forward = NULL;
- if ((s = ax25_dev_list) == ax25_dev) {
- ax25_dev_list = s->next;
- goto unlock_put;
- }
-
- while (s != NULL && s->next != NULL) {
- if (s->next == ax25_dev) {
- s->next = ax25_dev->next;
- goto unlock_put;
+ list_for_each_entry(s, &ax25_dev_list, list) {
+ if (s == ax25_dev) {
+ list_del(&s->list);
+ break;
}
-
- s = s->next;
}
- spin_unlock_bh(&ax25_dev_lock);
- dev->ax25_ptr = NULL;
- ax25_dev_put(ax25_dev);
- return;
-unlock_put:
+ RCU_INIT_POINTER(dev->ax25_ptr, NULL);
spin_unlock_bh(&ax25_dev_lock);
- ax25_dev_put(ax25_dev);
- dev->ax25_ptr = NULL;
netdev_put(dev, &ax25_dev->dev_tracker);
ax25_dev_put(ax25_dev);
}
@@ -200,16 +190,13 @@ struct net_device *ax25_fwd_dev(struct net_device *dev)
*/
void __exit ax25_dev_free(void)
{
- ax25_dev *s, *ax25_dev;
+ ax25_dev *s, *n;
spin_lock_bh(&ax25_dev_lock);
- ax25_dev = ax25_dev_list;
- while (ax25_dev != NULL) {
- s = ax25_dev;
- netdev_put(ax25_dev->dev, &ax25_dev->dev_tracker);
- ax25_dev = ax25_dev->next;
- kfree(s);
+ list_for_each_entry_safe(s, n, &ax25_dev_list, list) {
+ netdev_put(s->dev, &s->dev_tracker);
+ list_del(&s->list);
+ ax25_dev_put(s);
}
- ax25_dev_list = NULL;
spin_unlock_bh(&ax25_dev_lock);
}
diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
index 36249776c021..215d4ccf12b9 100644
--- a/net/ax25/ax25_ip.c
+++ b/net/ax25/ax25_ip.c
@@ -122,6 +122,7 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
if (dev == NULL)
dev = skb->dev;
+ rcu_read_lock();
if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) {
kfree_skb(skb);
goto put;
@@ -202,7 +203,7 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
ax25_queue_xmit(skb, dev);
put:
-
+ rcu_read_unlock();
ax25_route_lock_unuse();
return NETDEV_TX_OK;
}
diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c
index 3db76d2470e9..8bca2ace98e5 100644
--- a/net/ax25/ax25_out.c
+++ b/net/ax25/ax25_out.c
@@ -39,10 +39,14 @@ ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, const ax25_address *sr
* specified.
*/
if (paclen == 0) {
- if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
+ rcu_read_lock();
+ ax25_dev = ax25_dev_ax25dev(dev);
+ if (!ax25_dev) {
+ rcu_read_unlock();
return NULL;
-
+ }
paclen = ax25_dev->values[AX25_VALUES_PACLEN];
+ rcu_read_unlock();
}
/*
@@ -53,13 +57,19 @@ ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, const ax25_address *sr
return ax25; /* It already existed */
}
- if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
+ rcu_read_lock();
+ ax25_dev = ax25_dev_ax25dev(dev);
+ if (!ax25_dev) {
+ rcu_read_unlock();
return NULL;
+ }
- if ((ax25 = ax25_create_cb()) == NULL)
+ if ((ax25 = ax25_create_cb()) == NULL) {
+ rcu_read_unlock();
return NULL;
-
+ }
ax25_fillin_cb(ax25, ax25_dev);
+ rcu_read_unlock();
ax25->source_addr = *src;
ax25->dest_addr = *dest;
@@ -358,7 +368,9 @@ void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned char *ptr;
+ rcu_read_lock();
skb->protocol = ax25_type_trans(skb, ax25_fwd_dev(dev));
+ rcu_read_unlock();
ptr = skb_push(skb, 1);
*ptr = 0x00; /* KISS */
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index b7c4d656a94b..69de75db0c9c 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -406,6 +406,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
ax25_route_lock_unuse();
return -EHOSTUNREACH;
}
+ rcu_read_lock();
if ((ax25->ax25_dev = ax25_dev_ax25dev(ax25_rt->dev)) == NULL) {
err = -EHOSTUNREACH;
goto put;
@@ -442,6 +443,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
}
put:
+ rcu_read_unlock();
ax25_route_lock_unuse();
return err;
}
diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
index db66e11e7fe8..68753aa30334 100644
--- a/net/ax25/sysctl_net_ax25.c
+++ b/net/ax25/sysctl_net_ax25.c
@@ -141,8 +141,6 @@ static const struct ctl_table ax25_param_table[] = {
.extra2 = &max_ds_timeout
},
#endif
-
- { } /* that's all, folks! */
};
int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
@@ -155,6 +153,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
if (!table)
return -ENOMEM;
+ BUILD_BUG_ON(ARRAY_SIZE(ax25_param_table) != AX25_MAX_VALUES);
for (k = 0; k < AX25_MAX_VALUES; k++)
table[k].data = &ax25_dev->values[k];
@@ -171,7 +170,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev)
{
struct ctl_table_header *header = ax25_dev->sysheader;
- struct ctl_table *table;
+ const struct ctl_table *table;
if (header) {
ax25_dev->sysheader = NULL;
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 74b49c35ddc1..b12645949ae5 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -36,6 +36,7 @@
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <net/genetlink.h>
@@ -324,8 +325,7 @@ batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
/* check if there is enough space for the optional TVLV */
next_buff_pos += ntohs(ogm_packet->tvlv_len);
- return (next_buff_pos <= packet_len) &&
- (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
+ return next_buff_pos <= packet_len;
}
/* send a batman ogm to a given interface */
@@ -371,8 +371,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
batadv_ogm_packet->orig,
ntohl(batadv_ogm_packet->seqno),
batadv_ogm_packet->tq, batadv_ogm_packet->ttl,
- ((batadv_ogm_packet->flags & BATADV_DIRECTLINK) ?
- "on" : "off"),
+ str_on_off(batadv_ogm_packet->flags & BATADV_DIRECTLINK),
hard_iface->net_dev->name,
hard_iface->net_dev->dev_addr);
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index ac11f1f08db0..d35479c465e2 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -113,8 +113,6 @@ static void
batadv_v_hardif_neigh_init(struct batadv_hardif_neigh_node *hardif_neigh)
{
ewma_throughput_init(&hardif_neigh->bat_v.throughput);
- INIT_WORK(&hardif_neigh->bat_v.metric_work,
- batadv_v_elp_throughput_metric_update);
}
/**
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index 1d704574e6bf..b065578b4436 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -18,6 +18,7 @@
#include <linux/if_ether.h>
#include <linux/jiffies.h>
#include <linux/kref.h>
+#include <linux/list.h>
#include <linux/minmax.h>
#include <linux/netdevice.h>
#include <linux/nl80211.h>
@@ -26,6 +27,7 @@
#include <linux/rcupdate.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
+#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/types.h>
@@ -42,6 +44,18 @@
#include "send.h"
/**
+ * struct batadv_v_metric_queue_entry - list of hardif neighbors which require
+ * and metric update
+ */
+struct batadv_v_metric_queue_entry {
+ /** @hardif_neigh: hardif neighbor scheduled for metric update */
+ struct batadv_hardif_neigh_node *hardif_neigh;
+
+ /** @list: list node for metric_queue */
+ struct list_head list;
+};
+
+/**
* batadv_v_elp_start_timer() - restart timer for ELP periodic work
* @hard_iface: the interface for which the timer has to be reset
*/
@@ -59,25 +73,36 @@ static void batadv_v_elp_start_timer(struct batadv_hard_iface *hard_iface)
/**
* batadv_v_elp_get_throughput() - get the throughput towards a neighbour
* @neigh: the neighbour for which the throughput has to be obtained
+ * @pthroughput: calculated throughput towards the given neighbour in multiples
+ * of 100kpbs (a value of '1' equals 0.1Mbps, '10' equals 1Mbps, etc).
*
- * Return: The throughput towards the given neighbour in multiples of 100kpbs
- * (a value of '1' equals 0.1Mbps, '10' equals 1Mbps, etc).
+ * Return: true when value behind @pthroughput was set
*/
-static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
+static bool batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh,
+ u32 *pthroughput)
{
struct batadv_hard_iface *hard_iface = neigh->if_incoming;
+ struct net_device *soft_iface = hard_iface->soft_iface;
struct ethtool_link_ksettings link_settings;
struct net_device *real_netdev;
struct station_info sinfo;
u32 throughput;
int ret;
+ /* don't query throughput when no longer associated with any
+ * batman-adv interface
+ */
+ if (!soft_iface)
+ return false;
+
/* if the user specified a customised value for this interface, then
* return it directly
*/
throughput = atomic_read(&hard_iface->bat_v.throughput_override);
- if (throughput != 0)
- return throughput;
+ if (throughput != 0) {
+ *pthroughput = throughput;
+ return true;
+ }
/* if this is a wireless device, then ask its throughput through
* cfg80211 API
@@ -104,27 +129,39 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
* possible to delete this neighbor. For now set
* the throughput metric to 0.
*/
- return 0;
+ *pthroughput = 0;
+ return true;
}
if (ret)
goto default_throughput;
- if (sinfo.filled & BIT(NL80211_STA_INFO_EXPECTED_THROUGHPUT))
- return sinfo.expected_throughput / 100;
+ if (sinfo.filled & BIT(NL80211_STA_INFO_EXPECTED_THROUGHPUT)) {
+ *pthroughput = sinfo.expected_throughput / 100;
+ return true;
+ }
/* try to estimate the expected throughput based on reported tx
* rates
*/
- if (sinfo.filled & BIT(NL80211_STA_INFO_TX_BITRATE))
- return cfg80211_calculate_bitrate(&sinfo.txrate) / 3;
+ if (sinfo.filled & BIT(NL80211_STA_INFO_TX_BITRATE)) {
+ *pthroughput = cfg80211_calculate_bitrate(&sinfo.txrate) / 3;
+ return true;
+ }
goto default_throughput;
}
+ /* only use rtnl_trylock because the elp worker will be cancelled while
+ * the rntl_lock is held. the cancel_delayed_work_sync() would otherwise
+ * wait forever when the elp work_item was started and it is then also
+ * trying to rtnl_lock
+ */
+ if (!rtnl_trylock())
+ return false;
+
/* if not a wifi interface, check if this device provides data via
* ethtool (e.g. an Ethernet adapter)
*/
- rtnl_lock();
ret = __ethtool_get_link_ksettings(hard_iface->net_dev, &link_settings);
rtnl_unlock();
if (ret == 0) {
@@ -135,13 +172,15 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
hard_iface->bat_v.flags &= ~BATADV_FULL_DUPLEX;
throughput = link_settings.base.speed;
- if (throughput && throughput != SPEED_UNKNOWN)
- return throughput * 10;
+ if (throughput && throughput != SPEED_UNKNOWN) {
+ *pthroughput = throughput * 10;
+ return true;
+ }
}
default_throughput:
if (!(hard_iface->bat_v.flags & BATADV_WARNING_DEFAULT)) {
- batadv_info(hard_iface->soft_iface,
+ batadv_info(soft_iface,
"WiFi driver or ethtool info does not provide information about link speeds on interface %s, therefore defaulting to hardcoded throughput values of %u.%1u Mbps. Consider overriding the throughput manually or checking your driver.\n",
hard_iface->net_dev->name,
BATADV_THROUGHPUT_DEFAULT_VALUE / 10,
@@ -150,31 +189,26 @@ default_throughput:
}
/* if none of the above cases apply, return the base_throughput */
- return BATADV_THROUGHPUT_DEFAULT_VALUE;
+ *pthroughput = BATADV_THROUGHPUT_DEFAULT_VALUE;
+ return true;
}
/**
* batadv_v_elp_throughput_metric_update() - worker updating the throughput
* metric of a single hop neighbour
- * @work: the work queue item
+ * @neigh: the neighbour to probe
*/
-void batadv_v_elp_throughput_metric_update(struct work_struct *work)
+static void
+batadv_v_elp_throughput_metric_update(struct batadv_hardif_neigh_node *neigh)
{
- struct batadv_hardif_neigh_node_bat_v *neigh_bat_v;
- struct batadv_hardif_neigh_node *neigh;
-
- neigh_bat_v = container_of(work, struct batadv_hardif_neigh_node_bat_v,
- metric_work);
- neigh = container_of(neigh_bat_v, struct batadv_hardif_neigh_node,
- bat_v);
+ u32 throughput;
+ bool valid;
- ewma_throughput_add(&neigh->bat_v.throughput,
- batadv_v_elp_get_throughput(neigh));
+ valid = batadv_v_elp_get_throughput(neigh, &throughput);
+ if (!valid)
+ return;
- /* decrement refcounter to balance increment performed before scheduling
- * this task
- */
- batadv_hardif_neigh_put(neigh);
+ ewma_throughput_add(&neigh->bat_v.throughput, throughput);
}
/**
@@ -248,14 +282,16 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh)
*/
static void batadv_v_elp_periodic_work(struct work_struct *work)
{
+ struct batadv_v_metric_queue_entry *metric_entry;
+ struct batadv_v_metric_queue_entry *metric_safe;
struct batadv_hardif_neigh_node *hardif_neigh;
struct batadv_hard_iface *hard_iface;
struct batadv_hard_iface_bat_v *bat_v;
struct batadv_elp_packet *elp_packet;
+ struct list_head metric_queue;
struct batadv_priv *bat_priv;
struct sk_buff *skb;
u32 elp_interval;
- bool ret;
bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work);
hard_iface = container_of(bat_v, struct batadv_hard_iface, bat_v);
@@ -291,6 +327,8 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
atomic_inc(&hard_iface->bat_v.elp_seqno);
+ INIT_LIST_HEAD(&metric_queue);
+
/* The throughput metric is updated on each sent packet. This way, if a
* node is dead and no longer sends packets, batman-adv is still able to
* react timely to its death.
@@ -315,16 +353,28 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
/* Reading the estimated throughput from cfg80211 is a task that
* may sleep and that is not allowed in an rcu protected
- * context. Therefore schedule a task for that.
+ * context. Therefore add it to metric_queue and process it
+ * outside rcu protected context.
*/
- ret = queue_work(batadv_event_workqueue,
- &hardif_neigh->bat_v.metric_work);
-
- if (!ret)
+ metric_entry = kzalloc(sizeof(*metric_entry), GFP_ATOMIC);
+ if (!metric_entry) {
batadv_hardif_neigh_put(hardif_neigh);
+ continue;
+ }
+
+ metric_entry->hardif_neigh = hardif_neigh;
+ list_add(&metric_entry->list, &metric_queue);
}
rcu_read_unlock();
+ list_for_each_entry_safe(metric_entry, metric_safe, &metric_queue, list) {
+ batadv_v_elp_throughput_metric_update(metric_entry->hardif_neigh);
+
+ batadv_hardif_neigh_put(metric_entry->hardif_neigh);
+ list_del(&metric_entry->list);
+ kfree(metric_entry);
+ }
+
restart_timer:
batadv_v_elp_start_timer(hard_iface);
out:
diff --git a/net/batman-adv/bat_v_elp.h b/net/batman-adv/bat_v_elp.h
index 9e2740195fa2..c9cb0a307100 100644
--- a/net/batman-adv/bat_v_elp.h
+++ b/net/batman-adv/bat_v_elp.h
@@ -10,7 +10,6 @@
#include "main.h"
#include <linux/skbuff.h>
-#include <linux/workqueue.h>
int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface);
void batadv_v_elp_iface_disable(struct batadv_hard_iface *hard_iface);
@@ -19,6 +18,5 @@ void batadv_v_elp_iface_activate(struct batadv_hard_iface *primary_iface,
void batadv_v_elp_primary_iface_set(struct batadv_hard_iface *primary_iface);
int batadv_v_elp_packet_recv(struct sk_buff *skb,
struct batadv_hard_iface *if_incoming);
-void batadv_v_elp_throughput_metric_update(struct work_struct *work);
#endif /* _NET_BATMAN_ADV_BAT_V_ELP_H_ */
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index e503ee0d896b..8f89ffe6020c 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -839,8 +839,7 @@ batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
/* check if there is enough space for the optional TVLV */
next_buff_pos += ntohs(ogm2_packet->tvlv_len);
- return (next_buff_pos <= packet_len) &&
- (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
+ return next_buff_pos <= packet_len;
}
/**
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 5f46ca3d4bb8..8c814f790d17 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -12,6 +12,7 @@
#include <linux/compiler.h>
#include <linux/container_of.h>
#include <linux/crc16.h>
+#include <linux/err.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/gfp.h>
@@ -33,11 +34,11 @@
#include <linux/sprintf.h>
#include <linux/stddef.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <linux/workqueue.h>
#include <net/arp.h>
#include <net/genetlink.h>
#include <net/netlink.h>
-#include <net/sock.h>
#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
@@ -46,7 +47,6 @@
#include "log.h"
#include "netlink.h"
#include "originator.h"
-#include "soft-interface.h"
#include "translation-table.h"
static const u8 batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
@@ -1946,16 +1946,15 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
claim = batadv_claim_hash_find(bat_priv, &search_claim);
if (!claim) {
+ bool local = batadv_is_my_client(bat_priv, ethhdr->h_source, vid);
+
/* possible optimization: race for a claim */
/* No claim exists yet, claim it for us!
*/
batadv_dbg(BATADV_DBG_BLA, bat_priv,
"%s(): Unclaimed MAC %pM found. Claim it. Local: %s\n",
- __func__, ethhdr->h_source,
- batadv_is_my_client(bat_priv,
- ethhdr->h_source, vid) ?
- "yes" : "no");
+ __func__, ethhdr->h_source, str_yes_no(local));
batadv_handle_claim(bat_priv, primary_if,
primary_if->net_dev->dev_addr,
ethhdr->h_source, vid);
@@ -2233,25 +2232,16 @@ int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb)
{
struct batadv_hard_iface *primary_if = NULL;
int portid = NETLINK_CB(cb->skb).portid;
- struct net *net = sock_net(cb->skb->sk);
struct net_device *soft_iface;
struct batadv_hashtable *hash;
struct batadv_priv *bat_priv;
int bucket = cb->args[0];
int idx = cb->args[1];
- int ifindex;
int ret = 0;
- ifindex = batadv_netlink_get_ifindex(cb->nlh,
- BATADV_ATTR_MESH_IFINDEX);
- if (!ifindex)
- return -EINVAL;
-
- soft_iface = dev_get_by_index(net, ifindex);
- if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
- ret = -ENODEV;
- goto out;
- }
+ soft_iface = batadv_netlink_get_softif(cb);
+ if (IS_ERR(soft_iface))
+ return PTR_ERR(soft_iface);
bat_priv = netdev_priv(soft_iface);
hash = bat_priv->bla.claim_hash;
@@ -2403,25 +2393,16 @@ int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb)
{
struct batadv_hard_iface *primary_if = NULL;
int portid = NETLINK_CB(cb->skb).portid;
- struct net *net = sock_net(cb->skb->sk);
struct net_device *soft_iface;
struct batadv_hashtable *hash;
struct batadv_priv *bat_priv;
int bucket = cb->args[0];
int idx = cb->args[1];
- int ifindex;
int ret = 0;
- ifindex = batadv_netlink_get_ifindex(cb->nlh,
- BATADV_ATTR_MESH_IFINDEX);
- if (!ifindex)
- return -EINVAL;
-
- soft_iface = dev_get_by_index(net, ifindex);
- if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
- ret = -ENODEV;
- goto out;
- }
+ soft_iface = batadv_netlink_get_softif(cb);
+ if (IS_ERR(soft_iface))
+ return PTR_ERR(soft_iface);
bat_priv = netdev_priv(soft_iface);
hash = bat_priv->bla.backbone_hash;
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 28a939d56090..e5a07152d4ec 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -7,11 +7,11 @@
#include "distributed-arp-table.h"
#include "main.h"
-#include <asm/unaligned.h>
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/byteorder/generic.h>
#include <linux/container_of.h>
+#include <linux/err.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/gfp.h>
@@ -32,11 +32,11 @@
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/udp.h>
+#include <linux/unaligned.h>
#include <linux/workqueue.h>
#include <net/arp.h>
#include <net/genetlink.h>
#include <net/netlink.h>
-#include <net/sock.h>
#include <uapi/linux/batman_adv.h>
#include "bridge_loop_avoidance.h"
@@ -46,7 +46,6 @@
#include "netlink.h"
#include "originator.h"
#include "send.h"
-#include "soft-interface.h"
#include "translation-table.h"
#include "tvlv.h"
@@ -684,7 +683,7 @@ static bool batadv_dat_forward_data(struct batadv_priv *bat_priv,
cand = batadv_dat_select_candidates(bat_priv, ip, vid);
if (!cand)
- goto out;
+ return ret;
batadv_dbg(BATADV_DBG_DAT, bat_priv, "DHT_SEND for %pI4\n", &ip);
@@ -728,7 +727,6 @@ free_orig:
batadv_orig_node_put(cand[i].orig_node);
}
-out:
kfree(cand);
return ret;
}
@@ -938,25 +936,16 @@ int batadv_dat_cache_dump(struct sk_buff *msg, struct netlink_callback *cb)
{
struct batadv_hard_iface *primary_if = NULL;
int portid = NETLINK_CB(cb->skb).portid;
- struct net *net = sock_net(cb->skb->sk);
struct net_device *soft_iface;
struct batadv_hashtable *hash;
struct batadv_priv *bat_priv;
int bucket = cb->args[0];
int idx = cb->args[1];
- int ifindex;
int ret = 0;
- ifindex = batadv_netlink_get_ifindex(cb->nlh,
- BATADV_ATTR_MESH_IFINDEX);
- if (!ifindex)
- return -EINVAL;
-
- soft_iface = dev_get_by_index(net, ifindex);
- if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
- ret = -ENODEV;
- goto out;
- }
+ soft_iface = batadv_netlink_get_softif(cb);
+ if (IS_ERR(soft_iface))
+ return PTR_ERR(soft_iface);
bat_priv = netdev_priv(soft_iface);
hash = bat_priv->dat.hash;
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 0ddd8b4b3f4c..f68e34ed1f62 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -10,6 +10,7 @@
#include <linux/atomic.h>
#include <linux/byteorder/generic.h>
#include <linux/container_of.h>
+#include <linux/err.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/gfp.h>
@@ -31,7 +32,6 @@
#include <linux/sprintf.h>
#include <linux/stddef.h>
#include <linux/udp.h>
-#include <net/sock.h>
#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
@@ -40,7 +40,6 @@
#include "netlink.h"
#include "originator.h"
#include "routing.h"
-#include "soft-interface.h"
#include "translation-table.h"
/* These are the offsets of the "hw type" and "hw address length" in the dhcp
@@ -502,22 +501,13 @@ void batadv_gw_node_free(struct batadv_priv *bat_priv)
int batadv_gw_dump(struct sk_buff *msg, struct netlink_callback *cb)
{
struct batadv_hard_iface *primary_if = NULL;
- struct net *net = sock_net(cb->skb->sk);
struct net_device *soft_iface;
struct batadv_priv *bat_priv;
- int ifindex;
int ret;
- ifindex = batadv_netlink_get_ifindex(cb->nlh,
- BATADV_ATTR_MESH_IFINDEX);
- if (!ifindex)
- return -EINVAL;
-
- soft_iface = dev_get_by_index(net, ifindex);
- if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
- ret = -ENODEV;
- goto out;
- }
+ soft_iface = batadv_netlink_get_softif(cb);
+ if (IS_ERR(soft_iface))
+ return PTR_ERR(soft_iface);
bat_priv = netdev_priv(soft_iface);
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 5fc754b0b3f7..333e947afcce 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -14,7 +14,6 @@
#include <linux/crc32c.h>
#include <linux/device.h>
#include <linux/errno.h>
-#include <linux/genetlink.h>
#include <linux/gfp.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
@@ -38,6 +37,7 @@
#include <linux/string.h>
#include <linux/workqueue.h>
#include <net/dsfield.h>
+#include <net/genetlink.h>
#include <net/rtnetlink.h>
#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
@@ -637,6 +637,13 @@ unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len)
vhdr = (struct vlan_ethhdr *)(skb->data + header_len);
vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
+
+ /* VID 0 is only used to indicate "priority tag" frames which only
+ * contain priority information and no VID.
+ */
+ if (vid == 0)
+ return BATADV_NO_FLAGS;
+
vid |= BATADV_VLAN_HAS_TAG;
return vid;
@@ -691,29 +698,31 @@ int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
"%s%s", BATADV_UEV_TYPE_VAR,
batadv_uev_type_str[type]);
if (!uevent_env[0])
- goto out;
+ goto report_error;
uevent_env[1] = kasprintf(GFP_ATOMIC,
"%s%s", BATADV_UEV_ACTION_VAR,
batadv_uev_action_str[action]);
if (!uevent_env[1])
- goto out;
+ goto free_first_env;
/* If the event is DEL, ignore the data field */
if (action != BATADV_UEV_DEL) {
uevent_env[2] = kasprintf(GFP_ATOMIC,
"%s%s", BATADV_UEV_DATA_VAR, data);
if (!uevent_env[2])
- goto out;
+ goto free_second_env;
}
ret = kobject_uevent_env(bat_kobj, KOBJ_CHANGE, uevent_env);
-out:
- kfree(uevent_env[0]);
- kfree(uevent_env[1]);
kfree(uevent_env[2]);
+free_second_env:
+ kfree(uevent_env[1]);
+free_first_env:
+ kfree(uevent_env[0]);
if (ret)
+report_error:
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Impossible to send uevent for (%s,%s,%s) event (err: %d)\n",
batadv_uev_type_str[type],
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 870dcd7f1786..964f3088af5b 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -7,13 +7,13 @@
#ifndef _NET_BATMAN_ADV_MAIN_H_
#define _NET_BATMAN_ADV_MAIN_H_
-#define BATADV_DRIVER_AUTHOR "Marek Lindner <mareklindner@neomailbox.ch>, " \
+#define BATADV_DRIVER_AUTHOR "Marek Lindner <marek.lindner@mailbox.org>, " \
"Simon Wunderlich <sw@simonwunderlich.de>"
#define BATADV_DRIVER_DESC "B.A.T.M.A.N. advanced"
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2024.0"
+#define BATADV_SOURCE_VERSION "2025.0"
#endif
/* B.A.T.M.A.N. parameters */
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index 14088c4ff2f6..d95c418484fa 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -12,6 +12,7 @@
#include <linux/bug.h>
#include <linux/byteorder/generic.h>
#include <linux/container_of.h>
+#include <linux/err.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/gfp.h>
@@ -46,7 +47,6 @@
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/netlink.h>
-#include <net/sock.h>
#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
@@ -56,7 +56,6 @@
#include "log.h"
#include "netlink.h"
#include "send.h"
-#include "soft-interface.h"
#include "translation-table.h"
#include "tvlv.h"
@@ -2104,21 +2103,13 @@ batadv_mcast_netlink_get_primary(struct netlink_callback *cb,
struct batadv_hard_iface **primary_if)
{
struct batadv_hard_iface *hard_iface = NULL;
- struct net *net = sock_net(cb->skb->sk);
struct net_device *soft_iface;
struct batadv_priv *bat_priv;
- int ifindex;
int ret = 0;
- ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX);
- if (!ifindex)
- return -EINVAL;
-
- soft_iface = dev_get_by_index(net, ifindex);
- if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
- ret = -ENODEV;
- goto out;
- }
+ soft_iface = batadv_netlink_get_softif(cb);
+ if (IS_ERR(soft_iface))
+ return PTR_ERR(soft_iface);
bat_priv = netdev_priv(soft_iface);
diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
index 1f7ed9d4f6fd..eefba5600ded 100644
--- a/net/batman-adv/netlink.c
+++ b/net/batman-adv/netlink.c
@@ -15,8 +15,6 @@
#include <linux/cache.h>
#include <linux/err.h>
#include <linux/errno.h>
-#include <linux/export.h>
-#include <linux/genetlink.h>
#include <linux/gfp.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
@@ -160,8 +158,7 @@ static const struct nla_policy batadv_netlink_policy[NUM_BATADV_ATTR] = {
*
* Return: interface index, or 0.
*/
-int
-batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int attrtype)
+static int batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int attrtype)
{
struct nlattr *attr = nlmsg_find_attr(nlh, GENL_HDRLEN, attrtype);
@@ -883,14 +880,14 @@ static int batadv_netlink_notify_hardif(struct batadv_priv *bat_priv,
}
/**
- * batadv_netlink_get_hardif() - Get hardif attributes
+ * batadv_netlink_cmd_get_hardif() - Get hardif attributes
* @skb: Netlink message with request data
* @info: receiver information
*
* Return: 0 on success or negative error number in case of failure
*/
-static int batadv_netlink_get_hardif(struct sk_buff *skb,
- struct genl_info *info)
+static int batadv_netlink_cmd_get_hardif(struct sk_buff *skb,
+ struct genl_info *info)
{
struct batadv_hard_iface *hard_iface = info->user_ptr[1];
struct batadv_priv *bat_priv = info->user_ptr[0];
@@ -966,28 +963,16 @@ static int batadv_netlink_set_hardif(struct sk_buff *skb,
static int
batadv_netlink_dump_hardif(struct sk_buff *msg, struct netlink_callback *cb)
{
- struct net *net = sock_net(cb->skb->sk);
struct net_device *soft_iface;
struct batadv_hard_iface *hard_iface;
struct batadv_priv *bat_priv;
- int ifindex;
int portid = NETLINK_CB(cb->skb).portid;
int skip = cb->args[0];
int i = 0;
- ifindex = batadv_netlink_get_ifindex(cb->nlh,
- BATADV_ATTR_MESH_IFINDEX);
- if (!ifindex)
- return -EINVAL;
-
- soft_iface = dev_get_by_index(net, ifindex);
- if (!soft_iface)
- return -ENODEV;
-
- if (!batadv_softif_is_valid(soft_iface)) {
- dev_put(soft_iface);
- return -ENODEV;
- }
+ soft_iface = batadv_netlink_get_softif(cb);
+ if (IS_ERR(soft_iface))
+ return PTR_ERR(soft_iface);
bat_priv = netdev_priv(soft_iface);
@@ -1152,23 +1137,17 @@ static int batadv_netlink_set_vlan(struct sk_buff *skb, struct genl_info *info)
}
/**
- * batadv_get_softif_from_info() - Retrieve soft interface from genl attributes
+ * batadv_netlink_get_softif_from_ifindex() - Get soft-iface from ifindex
* @net: the applicable net namespace
- * @info: receiver information
+ * @ifindex: index of the soft interface
*
* Return: Pointer to soft interface (with increased refcnt) on success, error
* pointer on error
*/
static struct net_device *
-batadv_get_softif_from_info(struct net *net, struct genl_info *info)
+batadv_netlink_get_softif_from_ifindex(struct net *net, int ifindex)
{
struct net_device *soft_iface;
- int ifindex;
-
- if (!info->attrs[BATADV_ATTR_MESH_IFINDEX])
- return ERR_PTR(-EINVAL);
-
- ifindex = nla_get_u32(info->attrs[BATADV_ATTR_MESH_IFINDEX]);
soft_iface = dev_get_by_index(net, ifindex);
if (!soft_iface)
@@ -1186,28 +1165,61 @@ err_put_softif:
}
/**
- * batadv_get_hardif_from_info() - Retrieve hardif from genl attributes
- * @bat_priv: the bat priv with all the soft interface information
+ * batadv_netlink_get_softif_from_info() - Get soft-iface from genl attributes
* @net: the applicable net namespace
* @info: receiver information
*
+ * Return: Pointer to soft interface (with increased refcnt) on success, error
+ * pointer on error
+ */
+static struct net_device *
+batadv_netlink_get_softif_from_info(struct net *net, struct genl_info *info)
+{
+ int ifindex;
+
+ if (!info->attrs[BATADV_ATTR_MESH_IFINDEX])
+ return ERR_PTR(-EINVAL);
+
+ ifindex = nla_get_u32(info->attrs[BATADV_ATTR_MESH_IFINDEX]);
+
+ return batadv_netlink_get_softif_from_ifindex(net, ifindex);
+}
+
+/**
+ * batadv_netlink_get_softif() - Retrieve soft interface from netlink callback
+ * @cb: callback structure containing arguments
+ *
+ * Return: Pointer to soft interface (with increased refcnt) on success, error
+ * pointer on error
+ */
+struct net_device *batadv_netlink_get_softif(struct netlink_callback *cb)
+{
+ int ifindex = batadv_netlink_get_ifindex(cb->nlh,
+ BATADV_ATTR_MESH_IFINDEX);
+ if (!ifindex)
+ return ERR_PTR(-ENONET);
+
+ return batadv_netlink_get_softif_from_ifindex(sock_net(cb->skb->sk),
+ ifindex);
+}
+
+/**
+ * batadv_netlink_get_hardif_from_ifindex() - Get hard-iface from ifindex
+ * @bat_priv: the bat priv with all the soft interface information
+ * @net: the applicable net namespace
+ * @ifindex: index of the hard interface
+ *
* Return: Pointer to hard interface (with increased refcnt) on success, error
* pointer on error
*/
static struct batadv_hard_iface *
-batadv_get_hardif_from_info(struct batadv_priv *bat_priv, struct net *net,
- struct genl_info *info)
+batadv_netlink_get_hardif_from_ifindex(struct batadv_priv *bat_priv,
+ struct net *net, int ifindex)
{
struct batadv_hard_iface *hard_iface;
struct net_device *hard_dev;
- unsigned int hardif_index;
-
- if (!info->attrs[BATADV_ATTR_HARD_IFINDEX])
- return ERR_PTR(-EINVAL);
-
- hardif_index = nla_get_u32(info->attrs[BATADV_ATTR_HARD_IFINDEX]);
- hard_dev = dev_get_by_index(net, hardif_index);
+ hard_dev = dev_get_by_index(net, ifindex);
if (!hard_dev)
return ERR_PTR(-ENODEV);
@@ -1232,6 +1244,51 @@ err_put_harddev:
}
/**
+ * batadv_netlink_get_hardif_from_info() - Get hard-iface from genl attributes
+ * @bat_priv: the bat priv with all the soft interface information
+ * @net: the applicable net namespace
+ * @info: receiver information
+ *
+ * Return: Pointer to hard interface (with increased refcnt) on success, error
+ * pointer on error
+ */
+static struct batadv_hard_iface *
+batadv_netlink_get_hardif_from_info(struct batadv_priv *bat_priv,
+ struct net *net, struct genl_info *info)
+{
+ int ifindex;
+
+ if (!info->attrs[BATADV_ATTR_HARD_IFINDEX])
+ return ERR_PTR(-EINVAL);
+
+ ifindex = nla_get_u32(info->attrs[BATADV_ATTR_HARD_IFINDEX]);
+
+ return batadv_netlink_get_hardif_from_ifindex(bat_priv, net, ifindex);
+}
+
+/**
+ * batadv_netlink_get_hardif() - Retrieve hard interface from netlink callback
+ * @bat_priv: the bat priv with all the soft interface information
+ * @cb: callback structure containing arguments
+ *
+ * Return: Pointer to hard interface (with increased refcnt) on success, error
+ * pointer on error
+ */
+struct batadv_hard_iface *
+batadv_netlink_get_hardif(struct batadv_priv *bat_priv,
+ struct netlink_callback *cb)
+{
+ int ifindex = batadv_netlink_get_ifindex(cb->nlh,
+ BATADV_ATTR_HARD_IFINDEX);
+ if (!ifindex)
+ return ERR_PTR(-ENONET);
+
+ return batadv_netlink_get_hardif_from_ifindex(bat_priv,
+ sock_net(cb->skb->sk),
+ ifindex);
+}
+
+/**
* batadv_get_vlan_from_info() - Retrieve vlan from genl attributes
* @bat_priv: the bat priv with all the soft interface information
* @net: the applicable net namespace
@@ -1290,7 +1347,7 @@ static int batadv_pre_doit(const struct genl_split_ops *ops,
return -EINVAL;
if (ops->internal_flags & BATADV_FLAG_NEED_MESH) {
- soft_iface = batadv_get_softif_from_info(net, info);
+ soft_iface = batadv_netlink_get_softif_from_info(net, info);
if (IS_ERR(soft_iface))
return PTR_ERR(soft_iface);
@@ -1299,7 +1356,8 @@ static int batadv_pre_doit(const struct genl_split_ops *ops,
}
if (ops->internal_flags & BATADV_FLAG_NEED_HARDIF) {
- hard_iface = batadv_get_hardif_from_info(bat_priv, net, info);
+ hard_iface = batadv_netlink_get_hardif_from_info(bat_priv, net,
+ info);
if (IS_ERR(hard_iface)) {
ret = PTR_ERR(hard_iface);
goto err_put_softif;
@@ -1392,7 +1450,7 @@ static const struct genl_small_ops batadv_netlink_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
/* can be retrieved by unprivileged users */
.dumpit = batadv_netlink_dump_hardif,
- .doit = batadv_netlink_get_hardif,
+ .doit = batadv_netlink_cmd_get_hardif,
.internal_flags = BATADV_FLAG_NEED_MESH |
BATADV_FLAG_NEED_HARDIF,
},
diff --git a/net/batman-adv/netlink.h b/net/batman-adv/netlink.h
index 876d2806a67d..2097c2ae98f1 100644
--- a/net/batman-adv/netlink.h
+++ b/net/batman-adv/netlink.h
@@ -15,7 +15,10 @@
void batadv_netlink_register(void);
void batadv_netlink_unregister(void);
-int batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int attrtype);
+struct net_device *batadv_netlink_get_softif(struct netlink_callback *cb);
+struct batadv_hard_iface *
+batadv_netlink_get_hardif(struct batadv_priv *bat_priv,
+ struct netlink_callback *cb);
int batadv_netlink_tpmeter_notify(struct batadv_priv *bat_priv, const u8 *dst,
u8 result, u32 test_time, u64 total_bytes,
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 71c143d4b6d0..bcc2e20e0cd6 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -9,9 +9,11 @@
#include <linux/atomic.h>
#include <linux/container_of.h>
+#include <linux/err.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/gfp.h>
+#include <linux/if_vlan.h>
#include <linux/jiffies.h>
#include <linux/kref.h>
#include <linux/list.h>
@@ -25,9 +27,7 @@
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/workqueue.h>
-#include <net/sock.h>
#include <uapi/linux/batadv_packet.h>
-#include <uapi/linux/batman_adv.h>
#include "bat_algo.h"
#include "distributed-arp-table.h"
@@ -40,7 +40,6 @@
#include "netlink.h"
#include "network-coding.h"
#include "routing.h"
-#include "soft-interface.h"
#include "translation-table.h"
/* hash class keys */
@@ -132,6 +131,29 @@ batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
}
/**
+ * batadv_vlan_id_valid() - check if vlan id is in valid batman-adv encoding
+ * @vid: the VLAN identifier
+ *
+ * Return: true when either no vlan is set or if VLAN is in correct range,
+ * false otherwise
+ */
+static bool batadv_vlan_id_valid(unsigned short vid)
+{
+ unsigned short non_vlan = vid & ~(BATADV_VLAN_HAS_TAG | VLAN_VID_MASK);
+
+ if (vid == 0)
+ return true;
+
+ if (!(vid & BATADV_VLAN_HAS_TAG))
+ return false;
+
+ if (non_vlan)
+ return false;
+
+ return true;
+}
+
+/**
* batadv_orig_node_vlan_new() - search and possibly create an orig_node_vlan
* object
* @orig_node: the originator serving the VLAN
@@ -149,6 +171,9 @@ batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
{
struct batadv_orig_node_vlan *vlan;
+ if (!batadv_vlan_id_valid(vid))
+ return NULL;
+
spin_lock_bh(&orig_node->vlan_list_lock);
/* first look if an object for this vid already exists */
@@ -728,64 +753,48 @@ batadv_neigh_node_get_or_create(struct batadv_orig_node *orig_node,
*/
int batadv_hardif_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb)
{
- struct net *net = sock_net(cb->skb->sk);
+ struct batadv_hard_iface *primary_if, *hard_iface;
struct net_device *soft_iface;
- struct net_device *hard_iface = NULL;
- struct batadv_hard_iface *hardif = BATADV_IF_DEFAULT;
struct batadv_priv *bat_priv;
- struct batadv_hard_iface *primary_if = NULL;
int ret;
- int ifindex, hard_ifindex;
- ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX);
- if (!ifindex)
- return -EINVAL;
-
- soft_iface = dev_get_by_index(net, ifindex);
- if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
- ret = -ENODEV;
- goto out;
- }
+ soft_iface = batadv_netlink_get_softif(cb);
+ if (IS_ERR(soft_iface))
+ return PTR_ERR(soft_iface);
bat_priv = netdev_priv(soft_iface);
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
ret = -ENOENT;
- goto out;
+ goto out_put_soft_iface;
}
- hard_ifindex = batadv_netlink_get_ifindex(cb->nlh,
- BATADV_ATTR_HARD_IFINDEX);
- if (hard_ifindex) {
- hard_iface = dev_get_by_index(net, hard_ifindex);
- if (hard_iface)
- hardif = batadv_hardif_get_by_netdev(hard_iface);
-
- if (!hardif) {
- ret = -ENODEV;
- goto out;
- }
-
- if (hardif->soft_iface != soft_iface) {
- ret = -ENOENT;
- goto out;
- }
+ hard_iface = batadv_netlink_get_hardif(bat_priv, cb);
+ if (IS_ERR(hard_iface) && PTR_ERR(hard_iface) != -ENONET) {
+ ret = PTR_ERR(hard_iface);
+ goto out_put_primary_if;
+ } else if (IS_ERR(hard_iface)) {
+ /* => PTR_ERR(hard_iface) == -ENONET
+ * => no hard-iface given, ok
+ */
+ hard_iface = BATADV_IF_DEFAULT;
}
if (!bat_priv->algo_ops->neigh.dump) {
ret = -EOPNOTSUPP;
- goto out;
+ goto out_put_hard_iface;
}
- bat_priv->algo_ops->neigh.dump(msg, cb, bat_priv, hardif);
+ bat_priv->algo_ops->neigh.dump(msg, cb, bat_priv, hard_iface);
ret = msg->len;
- out:
- batadv_hardif_put(hardif);
- dev_put(hard_iface);
+out_put_hard_iface:
+ batadv_hardif_put(hard_iface);
+out_put_primary_if:
batadv_hardif_put(primary_if);
+out_put_soft_iface:
dev_put(soft_iface);
return ret;
@@ -1266,6 +1275,8 @@ void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
/* for all origins... */
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
+ if (hlist_empty(head))
+ continue;
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
@@ -1313,64 +1324,48 @@ static void batadv_purge_orig(struct work_struct *work)
*/
int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb)
{
- struct net *net = sock_net(cb->skb->sk);
+ struct batadv_hard_iface *primary_if, *hard_iface;
struct net_device *soft_iface;
- struct net_device *hard_iface = NULL;
- struct batadv_hard_iface *hardif = BATADV_IF_DEFAULT;
struct batadv_priv *bat_priv;
- struct batadv_hard_iface *primary_if = NULL;
int ret;
- int ifindex, hard_ifindex;
- ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX);
- if (!ifindex)
- return -EINVAL;
-
- soft_iface = dev_get_by_index(net, ifindex);
- if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
- ret = -ENODEV;
- goto out;
- }
+ soft_iface = batadv_netlink_get_softif(cb);
+ if (IS_ERR(soft_iface))
+ return PTR_ERR(soft_iface);
bat_priv = netdev_priv(soft_iface);
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
ret = -ENOENT;
- goto out;
+ goto out_put_soft_iface;
}
- hard_ifindex = batadv_netlink_get_ifindex(cb->nlh,
- BATADV_ATTR_HARD_IFINDEX);
- if (hard_ifindex) {
- hard_iface = dev_get_by_index(net, hard_ifindex);
- if (hard_iface)
- hardif = batadv_hardif_get_by_netdev(hard_iface);
-
- if (!hardif) {
- ret = -ENODEV;
- goto out;
- }
-
- if (hardif->soft_iface != soft_iface) {
- ret = -ENOENT;
- goto out;
- }
+ hard_iface = batadv_netlink_get_hardif(bat_priv, cb);
+ if (IS_ERR(hard_iface) && PTR_ERR(hard_iface) != -ENONET) {
+ ret = PTR_ERR(hard_iface);
+ goto out_put_primary_if;
+ } else if (IS_ERR(hard_iface)) {
+ /* => PTR_ERR(hard_iface) == -ENONET
+ * => no hard-iface given, ok
+ */
+ hard_iface = BATADV_IF_DEFAULT;
}
if (!bat_priv->algo_ops->orig.dump) {
ret = -EOPNOTSUPP;
- goto out;
+ goto out_put_hard_iface;
}
- bat_priv->algo_ops->orig.dump(msg, cb, bat_priv, hardif);
+ bat_priv->algo_ops->orig.dump(msg, cb, bat_priv, hard_iface);
ret = msg->len;
- out:
- batadv_hardif_put(hardif);
- dev_put(hard_iface);
+out_put_hard_iface:
+ batadv_hardif_put(hard_iface);
+out_put_primary_if:
batadv_hardif_put(primary_if);
+out_put_soft_iface:
dev_put(soft_iface);
return ret;
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 89c51b3cf430..822d788a5f86 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -159,7 +159,7 @@ static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu)
if (new_mtu < ETH_MIN_MTU || new_mtu > batadv_hardif_min_mtu(dev))
return -EINVAL;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
bat_priv->mtu_set_by_user = new_mtu;
return 0;
@@ -637,6 +637,14 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
if (proto != htons(ETH_P_8021Q))
return -EINVAL;
+ /* VID 0 is only used to indicate "priority tag" frames which only
+ * contain priority information and no VID. No management structures
+ * should be created for this VID and it should be handled like an
+ * untagged frame.
+ */
+ if (vid == 0)
+ return 0;
+
vid |= BATADV_VLAN_HAS_TAG;
/* if a new vlan is getting created and it already exists, it means that
@@ -684,6 +692,12 @@ static int batadv_interface_kill_vid(struct net_device *dev, __be16 proto,
if (proto != htons(ETH_P_8021Q))
return -EINVAL;
+ /* "priority tag" frames are handled like "untagged" frames
+ * and no softif_vlan needs to be destroyed
+ */
+ if (vid == 0)
+ return 0;
+
vlan = batadv_softif_vlan_get(bat_priv, vid | BATADV_VLAN_HAS_TAG);
if (!vlan)
return -ENOENT;
@@ -783,13 +797,13 @@ static int batadv_softif_init_late(struct net_device *dev)
atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
atomic_set(&bat_priv->bcast_seqno, 1);
atomic_set(&bat_priv->tt.vn, 0);
- atomic_set(&bat_priv->tt.local_changes, 0);
atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
#ifdef CONFIG_BATMAN_ADV_BLA
atomic_set(&bat_priv->bla.num_requests, 0);
#endif
atomic_set(&bat_priv->tp_num, 0);
+ WRITE_ONCE(bat_priv->tt.local_changes, 0);
bat_priv->tt.last_changeset = NULL;
bat_priv->tt.last_changeset_len = 0;
bat_priv->isolation_mark = 0;
@@ -1020,9 +1034,10 @@ static void batadv_softif_init_early(struct net_device *dev)
dev->netdev_ops = &batadv_netdev_ops;
dev->needs_free_netdev = true;
dev->priv_destructor = batadv_softif_free;
- dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL;
- dev->features |= NETIF_F_LLTX;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
dev->priv_flags |= IFF_NO_QUEUE;
+ dev->lltx = true;
+ dev->netns_local = true;
/* can't call min_mtu, because the needed variables
* have not been initialized yet
diff --git a/net/batman-adv/trace.h b/net/batman-adv/trace.h
index 5dd52bc5cabb..6b816cf1a953 100644
--- a/net/batman-adv/trace.h
+++ b/net/batman-adv/trace.h
@@ -40,8 +40,8 @@ TRACE_EVENT(batadv_dbg,
),
TP_fast_assign(
- __assign_str(device, bat_priv->soft_iface->name);
- __assign_str(driver, KBUILD_MODNAME);
+ __assign_str(device);
+ __assign_str(driver);
__assign_vstr(msg, vaf->fmt, vaf->va);
),
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index b95c36765d04..d4b71d34310f 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -15,6 +15,7 @@
#include <linux/compiler.h>
#include <linux/container_of.h>
#include <linux/crc32c.h>
+#include <linux/err.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/gfp.h>
@@ -28,6 +29,7 @@
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
+#include <linux/overflow.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/skbuff.h>
@@ -38,7 +40,6 @@
#include <linux/workqueue.h>
#include <net/genetlink.h>
#include <net/netlink.h>
-#include <net/sock.h>
#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
@@ -209,20 +210,6 @@ batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
}
/**
- * batadv_tt_local_entry_free_rcu() - free the tt_local_entry
- * @rcu: rcu pointer of the tt_local_entry
- */
-static void batadv_tt_local_entry_free_rcu(struct rcu_head *rcu)
-{
- struct batadv_tt_local_entry *tt_local_entry;
-
- tt_local_entry = container_of(rcu, struct batadv_tt_local_entry,
- common.rcu);
-
- kmem_cache_free(batadv_tl_cache, tt_local_entry);
-}
-
-/**
* batadv_tt_local_entry_release() - release tt_local_entry from lists and queue
* for free after rcu grace period
* @ref: kref pointer of the nc_node
@@ -236,7 +223,7 @@ static void batadv_tt_local_entry_release(struct kref *ref)
batadv_softif_vlan_put(tt_local_entry->vlan);
- call_rcu(&tt_local_entry->common.rcu, batadv_tt_local_entry_free_rcu);
+ kfree_rcu(tt_local_entry, common.rcu);
}
/**
@@ -255,20 +242,6 @@ batadv_tt_local_entry_put(struct batadv_tt_local_entry *tt_local_entry)
}
/**
- * batadv_tt_global_entry_free_rcu() - free the tt_global_entry
- * @rcu: rcu pointer of the tt_global_entry
- */
-static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu)
-{
- struct batadv_tt_global_entry *tt_global_entry;
-
- tt_global_entry = container_of(rcu, struct batadv_tt_global_entry,
- common.rcu);
-
- kmem_cache_free(batadv_tg_cache, tt_global_entry);
-}
-
-/**
* batadv_tt_global_entry_release() - release tt_global_entry from lists and
* queue for free after rcu grace period
* @ref: kref pointer of the nc_node
@@ -282,7 +255,7 @@ void batadv_tt_global_entry_release(struct kref *ref)
batadv_tt_global_del_orig_list(tt_global_entry);
- call_rcu(&tt_global_entry->common.rcu, batadv_tt_global_entry_free_rcu);
+ kfree_rcu(tt_global_entry, common.rcu);
}
/**
@@ -408,19 +381,6 @@ static void batadv_tt_global_size_dec(struct batadv_orig_node *orig_node,
}
/**
- * batadv_tt_orig_list_entry_free_rcu() - free the orig_entry
- * @rcu: rcu pointer of the orig_entry
- */
-static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
-{
- struct batadv_tt_orig_list_entry *orig_entry;
-
- orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu);
-
- kmem_cache_free(batadv_tt_orig_cache, orig_entry);
-}
-
-/**
* batadv_tt_orig_list_entry_release() - release tt orig entry from lists and
* queue for free after rcu grace period
* @ref: kref pointer of the tt orig entry
@@ -433,7 +393,7 @@ static void batadv_tt_orig_list_entry_release(struct kref *ref)
refcount);
batadv_orig_node_put(orig_entry->orig_node);
- call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
+ kfree_rcu(orig_entry, rcu);
}
/**
@@ -463,8 +423,8 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
struct batadv_tt_change_node *tt_change_node, *entry, *safe;
struct batadv_tt_common_entry *common = &tt_local_entry->common;
u8 flags = common->flags | event_flags;
- bool event_removed = false;
bool del_op_requested, del_op_entry;
+ size_t changes;
tt_change_node = kmem_cache_alloc(batadv_tt_change_cache, GFP_ATOMIC);
if (!tt_change_node)
@@ -478,51 +438,45 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
del_op_requested = flags & BATADV_TT_CLIENT_DEL;
- /* check for ADD+DEL or DEL+ADD events */
+ /* check for ADD+DEL, DEL+ADD, ADD+ADD or DEL+DEL events */
spin_lock_bh(&bat_priv->tt.changes_list_lock);
+ changes = READ_ONCE(bat_priv->tt.local_changes);
list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
list) {
if (!batadv_compare_eth(entry->change.addr, common->addr))
continue;
- /* DEL+ADD in the same orig interval have no effect and can be
- * removed to avoid silly behaviour on the receiver side. The
- * other way around (ADD+DEL) can happen in case of roaming of
- * a client still in the NEW state. Roaming of NEW clients is
- * now possible due to automatically recognition of "temporary"
- * clients
- */
del_op_entry = entry->change.flags & BATADV_TT_CLIENT_DEL;
- if (!del_op_requested && del_op_entry)
- goto del;
- if (del_op_requested && !del_op_entry)
- goto del;
-
- /* this is a second add in the same originator interval. It
- * means that flags have been changed: update them!
- */
- if (!del_op_requested && !del_op_entry)
+ if (del_op_requested != del_op_entry) {
+ /* DEL+ADD in the same orig interval have no effect and
+ * can be removed to avoid silly behaviour on the
+ * receiver side. The other way around (ADD+DEL) can
+ * happen in case of roaming of a client still in the
+ * NEW state. Roaming of NEW clients is now possible due
+ * to automatically recognition of "temporary" clients
+ */
+ list_del(&entry->list);
+ kmem_cache_free(batadv_tt_change_cache, entry);
+ changes--;
+ } else {
+ /* this is a second add or del in the same originator
+ * interval. It could mean that flags have been changed
+ * (e.g. double add): update them
+ */
entry->change.flags = flags;
+ }
- continue;
-del:
- list_del(&entry->list);
- kmem_cache_free(batadv_tt_change_cache, entry);
kmem_cache_free(batadv_tt_change_cache, tt_change_node);
- event_removed = true;
- goto unlock;
+ goto update_changes;
}
/* track the change in the OGMinterval list */
list_add_tail(&tt_change_node->list, &bat_priv->tt.changes_list);
+ changes++;
-unlock:
+update_changes:
+ WRITE_ONCE(bat_priv->tt.local_changes, changes);
spin_unlock_bh(&bat_priv->tt.changes_list_lock);
-
- if (event_removed)
- atomic_dec(&bat_priv->tt.local_changes);
- else
- atomic_inc(&bat_priv->tt.local_changes);
}
/**
@@ -856,8 +810,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
num_entries += atomic_read(&vlan->tt.num_entries);
}
- change_offset = sizeof(**tt_data);
- change_offset += num_vlan * sizeof(*tt_vlan);
+ change_offset = struct_size(*tt_data, vlan_data, num_vlan);
/* if tt_len is negative, allocate the space needed by the full table */
if (*tt_len < 0)
@@ -876,7 +829,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
(*tt_data)->ttvn = atomic_read(&orig_node->last_ttvn);
(*tt_data)->num_vlan = htons(num_vlan);
- tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1);
+ tt_vlan = (*tt_data)->vlan_data;
hlist_for_each_entry(vlan, &orig_node->vlan_list, list) {
tt_vlan->vid = htons(vlan->vid);
tt_vlan->crc = htonl(vlan->tt.crc);
@@ -936,8 +889,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
total_entries += vlan_entries;
}
- change_offset = sizeof(**tt_data);
- change_offset += num_vlan * sizeof(*tt_vlan);
+ change_offset = struct_size(*tt_data, vlan_data, num_vlan);
/* if tt_len is negative, allocate the space needed by the full table */
if (*tt_len < 0)
@@ -956,7 +908,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
(*tt_data)->ttvn = atomic_read(&bat_priv->tt.vn);
(*tt_data)->num_vlan = htons(num_vlan);
- tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1);
+ tt_vlan = (*tt_data)->vlan_data;
hlist_for_each_entry(vlan, &bat_priv->softif_vlan_list, list) {
vlan_entries = atomic_read(&vlan->tt.num_entries);
if (vlan_entries < 1)
@@ -990,16 +942,25 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
int tt_diff_len, tt_change_len = 0;
int tt_diff_entries_num = 0;
int tt_diff_entries_count = 0;
+ bool drop_changes = false;
+ size_t tt_extra_len = 0;
u16 tvlv_len;
- tt_diff_entries_num = atomic_read(&bat_priv->tt.local_changes);
+ tt_diff_entries_num = READ_ONCE(bat_priv->tt.local_changes);
tt_diff_len = batadv_tt_len(tt_diff_entries_num);
/* if we have too many changes for one packet don't send any
- * and wait for the tt table request which will be fragmented
+ * and wait for the tt table request so we can reply with the full
+ * (fragmented) table.
+ *
+ * The local change history should still be cleaned up so the next
+ * TT round can start again with a clean state.
*/
- if (tt_diff_len > bat_priv->soft_iface->mtu)
+ if (tt_diff_len > bat_priv->soft_iface->mtu) {
tt_diff_len = 0;
+ tt_diff_entries_num = 0;
+ drop_changes = true;
+ }
tvlv_len = batadv_tt_prepare_tvlv_local_data(bat_priv, &tt_data,
&tt_change, &tt_diff_len);
@@ -1008,11 +969,11 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
tt_data->flags = BATADV_TT_OGM_DIFF;
- if (tt_diff_len == 0)
+ if (!drop_changes && tt_diff_len == 0)
goto container_register;
spin_lock_bh(&bat_priv->tt.changes_list_lock);
- atomic_set(&bat_priv->tt.local_changes, 0);
+ WRITE_ONCE(bat_priv->tt.local_changes, 0);
list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
list) {
@@ -1027,6 +988,9 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
}
spin_unlock_bh(&bat_priv->tt.changes_list_lock);
+ tt_extra_len = batadv_tt_len(tt_diff_entries_num -
+ tt_diff_entries_count);
+
/* Keep the buffer for possible tt_request */
spin_lock_bh(&bat_priv->tt.last_changeset_lock);
kfree(bat_priv->tt.last_changeset);
@@ -1035,6 +999,7 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
tt_change_len = batadv_tt_len(tt_diff_entries_count);
/* check whether this new OGM has no changes due to size problems */
if (tt_diff_entries_count > 0) {
+ tt_diff_len -= tt_extra_len;
/* if kmalloc() fails we will reply with the full table
* instead of providing the diff
*/
@@ -1047,6 +1012,8 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
}
spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
+ /* Remove extra packet space for OGM */
+ tvlv_len -= tt_extra_len;
container_register:
batadv_tvlv_container_register(bat_priv, BATADV_TVLV_TT, 1, tt_data,
tvlv_len);
@@ -1163,26 +1130,18 @@ batadv_tt_local_dump_bucket(struct sk_buff *msg, u32 portid,
*/
int batadv_tt_local_dump(struct sk_buff *msg, struct netlink_callback *cb)
{
- struct net *net = sock_net(cb->skb->sk);
struct net_device *soft_iface;
struct batadv_priv *bat_priv;
struct batadv_hard_iface *primary_if = NULL;
struct batadv_hashtable *hash;
int ret;
- int ifindex;
int bucket = cb->args[0];
int idx = cb->args[1];
int portid = NETLINK_CB(cb->skb).portid;
- ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX);
- if (!ifindex)
- return -EINVAL;
-
- soft_iface = dev_get_by_index(net, ifindex);
- if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
- ret = -ENODEV;
- goto out;
- }
+ soft_iface = batadv_netlink_get_softif(cb);
+ if (IS_ERR(soft_iface))
+ return PTR_ERR(soft_iface);
bat_priv = netdev_priv(soft_iface);
@@ -1422,7 +1381,7 @@ static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv)
kmem_cache_free(batadv_tt_change_cache, entry);
}
- atomic_set(&bat_priv->tt.local_changes, 0);
+ WRITE_ONCE(bat_priv->tt.local_changes, 0);
spin_unlock_bh(&bat_priv->tt.changes_list_lock);
}
@@ -1938,28 +1897,20 @@ batadv_tt_global_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
*/
int batadv_tt_global_dump(struct sk_buff *msg, struct netlink_callback *cb)
{
- struct net *net = sock_net(cb->skb->sk);
struct net_device *soft_iface;
struct batadv_priv *bat_priv;
struct batadv_hard_iface *primary_if = NULL;
struct batadv_hashtable *hash;
struct hlist_head *head;
int ret;
- int ifindex;
int bucket = cb->args[0];
int idx = cb->args[1];
int sub = cb->args[2];
int portid = NETLINK_CB(cb->skb).portid;
- ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX);
- if (!ifindex)
- return -EINVAL;
-
- soft_iface = dev_get_by_index(net, ifindex);
- if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
- ret = -ENODEV;
- goto out;
- }
+ soft_iface = batadv_netlink_get_softif(cb);
+ if (IS_ERR(soft_iface))
+ return PTR_ERR(soft_iface);
bat_priv = netdev_priv(soft_iface);
@@ -2747,14 +2698,16 @@ static bool batadv_tt_global_valid(const void *entry_ptr,
*
* Fills the tvlv buff with the tt entries from the specified hash. If valid_cb
* is not provided then this becomes a no-op.
+ *
+ * Return: Remaining unused length in tvlv_buff.
*/
-static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
- struct batadv_hashtable *hash,
- void *tvlv_buff, u16 tt_len,
- bool (*valid_cb)(const void *,
- const void *,
- u8 *flags),
- void *cb_data)
+static u16 batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
+ struct batadv_hashtable *hash,
+ void *tvlv_buff, u16 tt_len,
+ bool (*valid_cb)(const void *,
+ const void *,
+ u8 *flags),
+ void *cb_data)
{
struct batadv_tt_common_entry *tt_common_entry;
struct batadv_tvlv_tt_change *tt_change;
@@ -2768,7 +2721,7 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
tt_change = tvlv_buff;
if (!valid_cb)
- return;
+ return tt_len;
rcu_read_lock();
for (i = 0; i < hash->size; i++) {
@@ -2794,6 +2747,8 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
}
}
rcu_read_unlock();
+
+ return batadv_tt_len(tt_tot - tt_num_entries);
}
/**
@@ -2916,7 +2871,6 @@ static bool batadv_send_tt_request(struct batadv_priv *bat_priv,
{
struct batadv_tvlv_tt_data *tvlv_tt_data = NULL;
struct batadv_tt_req_node *tt_req_node = NULL;
- struct batadv_tvlv_tt_vlan_data *tt_vlan_req;
struct batadv_hard_iface *primary_if;
bool ret = false;
int i, size;
@@ -2932,7 +2886,7 @@ static bool batadv_send_tt_request(struct batadv_priv *bat_priv,
if (!tt_req_node)
goto out;
- size = sizeof(*tvlv_tt_data) + sizeof(*tt_vlan_req) * num_vlan;
+ size = struct_size(tvlv_tt_data, vlan_data, num_vlan);
tvlv_tt_data = kzalloc(size, GFP_ATOMIC);
if (!tvlv_tt_data)
goto out;
@@ -2944,12 +2898,10 @@ static bool batadv_send_tt_request(struct batadv_priv *bat_priv,
/* send all the CRCs within the request. This is needed by intermediate
* nodes to ensure they have the correct table before replying
*/
- tt_vlan_req = (struct batadv_tvlv_tt_vlan_data *)(tvlv_tt_data + 1);
for (i = 0; i < num_vlan; i++) {
- tt_vlan_req->vid = tt_vlan->vid;
- tt_vlan_req->crc = tt_vlan->crc;
+ tvlv_tt_data->vlan_data[i].vid = tt_vlan->vid;
+ tvlv_tt_data->vlan_data[i].crc = tt_vlan->crc;
- tt_vlan_req++;
tt_vlan++;
}
@@ -3001,7 +2953,6 @@ static bool batadv_send_other_tt_response(struct batadv_priv *bat_priv,
struct batadv_orig_node *res_dst_orig_node = NULL;
struct batadv_tvlv_tt_change *tt_change;
struct batadv_tvlv_tt_data *tvlv_tt_data = NULL;
- struct batadv_tvlv_tt_vlan_data *tt_vlan;
bool ret = false, full_table;
u8 orig_ttvn, req_ttvn;
u16 tvlv_len;
@@ -3024,10 +2975,9 @@ static bool batadv_send_other_tt_response(struct batadv_priv *bat_priv,
orig_ttvn = (u8)atomic_read(&req_dst_orig_node->last_ttvn);
req_ttvn = tt_data->ttvn;
- tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(tt_data + 1);
/* this node doesn't have the requested data */
if (orig_ttvn != req_ttvn ||
- !batadv_tt_global_check_crc(req_dst_orig_node, tt_vlan,
+ !batadv_tt_global_check_crc(req_dst_orig_node, tt_data->vlan_data,
ntohs(tt_data->num_vlan)))
goto out;
@@ -3069,10 +3019,11 @@ static bool batadv_send_other_tt_response(struct batadv_priv *bat_priv,
goto out;
/* fill the rest of the tvlv with the real TT entries */
- batadv_tt_tvlv_generate(bat_priv, bat_priv->tt.global_hash,
- tt_change, tt_len,
- batadv_tt_global_valid,
- req_dst_orig_node);
+ tvlv_len -= batadv_tt_tvlv_generate(bat_priv,
+ bat_priv->tt.global_hash,
+ tt_change, tt_len,
+ batadv_tt_global_valid,
+ req_dst_orig_node);
}
/* Don't send the response, if larger than fragmented packet. */
@@ -3196,9 +3147,11 @@ static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv,
goto out;
/* fill the rest of the tvlv with the real TT entries */
- batadv_tt_tvlv_generate(bat_priv, bat_priv->tt.local_hash,
- tt_change, tt_len,
- batadv_tt_local_valid, NULL);
+ tvlv_len -= batadv_tt_tvlv_generate(bat_priv,
+ bat_priv->tt.local_hash,
+ tt_change, tt_len,
+ batadv_tt_local_valid,
+ NULL);
}
tvlv_tt_data->flags = BATADV_TT_RESPONSE;
@@ -3370,7 +3323,6 @@ static void batadv_handle_tt_response(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node = NULL;
struct batadv_tvlv_tt_change *tt_change;
u8 *tvlv_ptr = (u8 *)tt_data;
- u16 change_offset;
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
@@ -3383,10 +3335,7 @@ static void batadv_handle_tt_response(struct batadv_priv *bat_priv,
spin_lock_bh(&orig_node->tt_lock);
- change_offset = sizeof(struct batadv_tvlv_tt_vlan_data);
- change_offset *= ntohs(tt_data->num_vlan);
- change_offset += sizeof(*tt_data);
- tvlv_ptr += change_offset;
+ tvlv_ptr += struct_size(tt_data, vlan_data, ntohs(tt_data->num_vlan));
tt_change = (struct batadv_tvlv_tt_change *)tvlv_ptr;
if (tt_data->flags & BATADV_TT_FULL_TABLE) {
@@ -3685,7 +3634,7 @@ static void batadv_tt_local_commit_changes_nolock(struct batadv_priv *bat_priv)
{
lockdep_assert_held(&bat_priv->tt.commit_lock);
- if (atomic_read(&bat_priv->tt.local_changes) < 1) {
+ if (READ_ONCE(bat_priv->tt.local_changes) == 0) {
if (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt))
batadv_tt_tvlv_container_update(bat_priv);
return;
@@ -3948,7 +3897,7 @@ void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface)
spin_lock_bh(&bat_priv->tt.commit_lock);
- while (true) {
+ while (timeout) {
table_size = batadv_tt_local_table_transmit_size(bat_priv);
if (packet_size_max >= table_size)
break;
@@ -3985,30 +3934,29 @@ static void batadv_tt_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
u8 flags, void *tvlv_value,
u16 tvlv_value_len)
{
- struct batadv_tvlv_tt_vlan_data *tt_vlan;
struct batadv_tvlv_tt_change *tt_change;
struct batadv_tvlv_tt_data *tt_data;
u16 num_entries, num_vlan;
+ size_t tt_data_sz;
if (tvlv_value_len < sizeof(*tt_data))
return;
tt_data = tvlv_value;
- tvlv_value_len -= sizeof(*tt_data);
-
num_vlan = ntohs(tt_data->num_vlan);
- if (tvlv_value_len < sizeof(*tt_vlan) * num_vlan)
+ tt_data_sz = struct_size(tt_data, vlan_data, num_vlan);
+ if (tvlv_value_len < tt_data_sz)
return;
- tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(tt_data + 1);
- tt_change = (struct batadv_tvlv_tt_change *)(tt_vlan + num_vlan);
- tvlv_value_len -= sizeof(*tt_vlan) * num_vlan;
+ tt_change = (struct batadv_tvlv_tt_change *)((void *)tt_data
+ + tt_data_sz);
+ tvlv_value_len -= tt_data_sz;
num_entries = batadv_tt_entries(tvlv_value_len);
- batadv_tt_update_orig(bat_priv, orig, tt_vlan, num_vlan, tt_change,
- num_entries, tt_data->ttvn);
+ batadv_tt_update_orig(bat_priv, orig, tt_data->vlan_data, num_vlan,
+ tt_change, num_entries, tt_data->ttvn);
}
/**
@@ -4039,8 +3987,8 @@ static int batadv_tt_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
tt_data = tvlv_value;
tvlv_value_len -= sizeof(*tt_data);
- tt_vlan_len = sizeof(struct batadv_tvlv_tt_vlan_data);
- tt_vlan_len *= ntohs(tt_data->num_vlan);
+ tt_vlan_len = flex_array_size(tt_data, vlan_data,
+ ntohs(tt_data->num_vlan));
if (tvlv_value_len < tt_vlan_len)
return NET_RX_SUCCESS;
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 00840d5784fe..fe89f08533fe 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -287,7 +287,7 @@ struct batadv_frag_table_entry {
/** @lock: lock to protect the list of fragments */
spinlock_t lock;
- /** @timestamp: time (jiffie) of last received fragment */
+ /** @timestamp: time (jiffy) of last received fragment */
unsigned long timestamp;
/** @seqno: sequence number of the fragments in the list */
@@ -596,9 +596,6 @@ struct batadv_hardif_neigh_node_bat_v {
* neighbor
*/
unsigned long last_unicast_tx;
-
- /** @metric_work: work queue callback item for metric update */
- struct work_struct metric_work;
};
/**
@@ -1022,7 +1019,7 @@ struct batadv_priv_tt {
atomic_t ogm_append_cnt;
/** @local_changes: changes registered in an originator interval */
- atomic_t local_changes;
+ size_t local_changes;
/**
* @changes_list: tracks tt local changes within an originator interval
@@ -1044,7 +1041,7 @@ struct batadv_priv_tt {
*/
struct list_head roam_list;
- /** @changes_list_lock: lock protecting changes_list */
+ /** @changes_list_lock: lock protecting changes_list & local_changes */
spinlock_t changes_list_lock;
/** @req_list_lock: lock protecting req_list */
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 4eb1b3ced0d2..3c29778171c5 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -133,7 +133,7 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_btle_dev *dev,
struct in6_addr *daddr,
struct sk_buff *skb)
{
- struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
+ struct rt6_info *rt = dst_rt6_info(skb_dst(skb));
int count = atomic_read(&dev->peer_count);
const struct in6_addr *nexthop;
struct lowpan_peer *peer;
@@ -572,7 +572,7 @@ static void netdev_setup(struct net_device *dev)
dev->needs_free_netdev = true;
}
-static struct device_type bt_type = {
+static const struct device_type bt_type = {
.name = "bluetooth",
};
@@ -825,11 +825,16 @@ static struct sk_buff *chan_alloc_skb_cb(struct l2cap_chan *chan,
unsigned long hdr_len,
unsigned long len, int nb)
{
+ struct sk_buff *skb;
+
/* Note that we must allocate using GFP_ATOMIC here as
* this function is called originally from netdev hard xmit
* function in atomic context.
*/
- return bt_skb_alloc(hdr_len + len, GFP_ATOMIC);
+ skb = bt_skb_alloc(hdr_len + len, GFP_ATOMIC);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+ return skb;
}
static void chan_suspend_cb(struct l2cap_chan *chan)
@@ -892,7 +897,7 @@ static int bt_6lowpan_connect(bdaddr_t *addr, u8 dst_type)
chan->ops = &bt_6lowpan_chan_ops;
err = l2cap_chan_connect(chan, cpu_to_le16(L2CAP_PSM_IPSP), 0,
- addr, dst_type);
+ addr, dst_type, L2CAP_CONN_TIMEOUT);
BT_DBG("chan %p err %d", chan, err);
if (err < 0)
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index da7cac0a1b71..6b2b65a66700 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -62,14 +62,6 @@ source "net/bluetooth/cmtp/Kconfig"
source "net/bluetooth/hidp/Kconfig"
-config BT_HS
- bool "Bluetooth High Speed (HS) features"
- depends on BT_BREDR
- help
- Bluetooth High Speed includes support for off-loading
- Bluetooth connections via 802.11 (wifi) physical layer
- available with Bluetooth version 3.0 or later.
-
config BT_LE
bool "Bluetooth Low Energy (LE) features"
depends on BT
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 141ac1fda0bf..5a3835b7dfcd 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -14,14 +14,12 @@ bluetooth_6lowpan-y := 6lowpan.o
bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o lib.o \
- ecdh_helper.o hci_request.o mgmt_util.o mgmt_config.o hci_codec.o \
- eir.o hci_sync.o
+ ecdh_helper.o mgmt_util.o mgmt_config.o hci_codec.o eir.o hci_sync.o
bluetooth-$(CONFIG_DEV_COREDUMP) += coredump.o
bluetooth-$(CONFIG_BT_BREDR) += sco.o
bluetooth-$(CONFIG_BT_LE) += iso.o
-bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o
bluetooth-$(CONFIG_BT_LEDS) += leds.o
bluetooth-$(CONFIG_BT_MSFTEXT) += msft.o
bluetooth-$(CONFIG_BT_AOSPEXT) += aosp.o
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
deleted file mode 100644
index e7adb8a98cf9..000000000000
--- a/net/bluetooth/a2mp.c
+++ /dev/null
@@ -1,1054 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved.
- Copyright (c) 2011,2012 Intel Corp.
-
-*/
-
-#include <net/bluetooth/bluetooth.h>
-#include <net/bluetooth/hci_core.h>
-#include <net/bluetooth/l2cap.h>
-
-#include "hci_request.h"
-#include "a2mp.h"
-#include "amp.h"
-
-#define A2MP_FEAT_EXT 0x8000
-
-/* Global AMP Manager list */
-static LIST_HEAD(amp_mgr_list);
-static DEFINE_MUTEX(amp_mgr_list_lock);
-
-/* A2MP build & send command helper functions */
-static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data)
-{
- struct a2mp_cmd *cmd;
- int plen;
-
- plen = sizeof(*cmd) + len;
- cmd = kzalloc(plen, GFP_KERNEL);
- if (!cmd)
- return NULL;
-
- cmd->code = code;
- cmd->ident = ident;
- cmd->len = cpu_to_le16(len);
-
- memcpy(cmd->data, data, len);
-
- return cmd;
-}
-
-static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data)
-{
- struct l2cap_chan *chan = mgr->a2mp_chan;
- struct a2mp_cmd *cmd;
- u16 total_len = len + sizeof(*cmd);
- struct kvec iv;
- struct msghdr msg;
-
- cmd = __a2mp_build(code, ident, len, data);
- if (!cmd)
- return;
-
- iv.iov_base = cmd;
- iv.iov_len = total_len;
-
- memset(&msg, 0, sizeof(msg));
-
- iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iv, 1, total_len);
-
- l2cap_chan_send(chan, &msg, total_len);
-
- kfree(cmd);
-}
-
-static u8 __next_ident(struct amp_mgr *mgr)
-{
- if (++mgr->ident == 0)
- mgr->ident = 1;
-
- return mgr->ident;
-}
-
-static struct amp_mgr *amp_mgr_lookup_by_state(u8 state)
-{
- struct amp_mgr *mgr;
-
- mutex_lock(&amp_mgr_list_lock);
- list_for_each_entry(mgr, &amp_mgr_list, list) {
- if (test_and_clear_bit(state, &mgr->state)) {
- amp_mgr_get(mgr);
- mutex_unlock(&amp_mgr_list_lock);
- return mgr;
- }
- }
- mutex_unlock(&amp_mgr_list_lock);
-
- return NULL;
-}
-
-/* hci_dev_list shall be locked */
-static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl)
-{
- struct hci_dev *hdev;
- int i = 1;
-
- cl[0].id = AMP_ID_BREDR;
- cl[0].type = AMP_TYPE_BREDR;
- cl[0].status = AMP_STATUS_BLUETOOTH_ONLY;
-
- list_for_each_entry(hdev, &hci_dev_list, list) {
- if (hdev->dev_type == HCI_AMP) {
- cl[i].id = hdev->id;
- cl[i].type = hdev->amp_type;
- if (test_bit(HCI_UP, &hdev->flags))
- cl[i].status = hdev->amp_status;
- else
- cl[i].status = AMP_STATUS_POWERED_DOWN;
- i++;
- }
- }
-}
-
-/* Processing A2MP messages */
-static int a2mp_command_rej(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_cmd_rej *rej = (void *) skb->data;
-
- if (le16_to_cpu(hdr->len) < sizeof(*rej))
- return -EINVAL;
-
- BT_DBG("ident %u reason %d", hdr->ident, le16_to_cpu(rej->reason));
-
- skb_pull(skb, sizeof(*rej));
-
- return 0;
-}
-
-static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_discov_req *req = (void *) skb->data;
- u16 len = le16_to_cpu(hdr->len);
- struct a2mp_discov_rsp *rsp;
- u16 ext_feat;
- u8 num_ctrl;
- struct hci_dev *hdev;
-
- if (len < sizeof(*req))
- return -EINVAL;
-
- skb_pull(skb, sizeof(*req));
-
- ext_feat = le16_to_cpu(req->ext_feat);
-
- BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu), ext_feat);
-
- /* check that packet is not broken for now */
- while (ext_feat & A2MP_FEAT_EXT) {
- if (len < sizeof(ext_feat))
- return -EINVAL;
-
- ext_feat = get_unaligned_le16(skb->data);
- BT_DBG("efm 0x%4.4x", ext_feat);
- len -= sizeof(ext_feat);
- skb_pull(skb, sizeof(ext_feat));
- }
-
- read_lock(&hci_dev_list_lock);
-
- /* at minimum the BR/EDR needs to be listed */
- num_ctrl = 1;
-
- list_for_each_entry(hdev, &hci_dev_list, list) {
- if (hdev->dev_type == HCI_AMP)
- num_ctrl++;
- }
-
- len = struct_size(rsp, cl, num_ctrl);
- rsp = kmalloc(len, GFP_ATOMIC);
- if (!rsp) {
- read_unlock(&hci_dev_list_lock);
- return -ENOMEM;
- }
-
- rsp->mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
- rsp->ext_feat = 0;
-
- __a2mp_add_cl(mgr, rsp->cl);
-
- read_unlock(&hci_dev_list_lock);
-
- a2mp_send(mgr, A2MP_DISCOVER_RSP, hdr->ident, len, rsp);
-
- kfree(rsp);
- return 0;
-}
-
-static int a2mp_discover_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_discov_rsp *rsp = (void *) skb->data;
- u16 len = le16_to_cpu(hdr->len);
- struct a2mp_cl *cl;
- u16 ext_feat;
- bool found = false;
-
- if (len < sizeof(*rsp))
- return -EINVAL;
-
- len -= sizeof(*rsp);
- skb_pull(skb, sizeof(*rsp));
-
- ext_feat = le16_to_cpu(rsp->ext_feat);
-
- BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(rsp->mtu), ext_feat);
-
- /* check that packet is not broken for now */
- while (ext_feat & A2MP_FEAT_EXT) {
- if (len < sizeof(ext_feat))
- return -EINVAL;
-
- ext_feat = get_unaligned_le16(skb->data);
- BT_DBG("efm 0x%4.4x", ext_feat);
- len -= sizeof(ext_feat);
- skb_pull(skb, sizeof(ext_feat));
- }
-
- cl = (void *) skb->data;
- while (len >= sizeof(*cl)) {
- BT_DBG("Remote AMP id %u type %u status %u", cl->id, cl->type,
- cl->status);
-
- if (cl->id != AMP_ID_BREDR && cl->type != AMP_TYPE_BREDR) {
- struct a2mp_info_req req;
-
- found = true;
-
- memset(&req, 0, sizeof(req));
-
- req.id = cl->id;
- a2mp_send(mgr, A2MP_GETINFO_REQ, __next_ident(mgr),
- sizeof(req), &req);
- }
-
- len -= sizeof(*cl);
- cl = skb_pull(skb, sizeof(*cl));
- }
-
- /* Fall back to L2CAP init sequence */
- if (!found) {
- struct l2cap_conn *conn = mgr->l2cap_conn;
- struct l2cap_chan *chan;
-
- mutex_lock(&conn->chan_lock);
-
- list_for_each_entry(chan, &conn->chan_l, list) {
-
- BT_DBG("chan %p state %s", chan,
- state_to_string(chan->state));
-
- if (chan->scid == L2CAP_CID_A2MP)
- continue;
-
- l2cap_chan_lock(chan);
-
- if (chan->state == BT_CONNECT)
- l2cap_send_conn_req(chan);
-
- l2cap_chan_unlock(chan);
- }
-
- mutex_unlock(&conn->chan_lock);
- }
-
- return 0;
-}
-
-static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_cl *cl = (void *) skb->data;
-
- while (skb->len >= sizeof(*cl)) {
- BT_DBG("Controller id %u type %u status %u", cl->id, cl->type,
- cl->status);
- cl = skb_pull(skb, sizeof(*cl));
- }
-
- /* TODO send A2MP_CHANGE_RSP */
-
- return 0;
-}
-
-static void read_local_amp_info_complete(struct hci_dev *hdev, u8 status,
- u16 opcode)
-{
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
-
- a2mp_send_getinfo_rsp(hdev);
-}
-
-static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_info_req *req = (void *) skb->data;
- struct hci_dev *hdev;
- struct hci_request hreq;
- int err = 0;
-
- if (le16_to_cpu(hdr->len) < sizeof(*req))
- return -EINVAL;
-
- BT_DBG("id %u", req->id);
-
- hdev = hci_dev_get(req->id);
- if (!hdev || hdev->dev_type != HCI_AMP) {
- struct a2mp_info_rsp rsp;
-
- memset(&rsp, 0, sizeof(rsp));
-
- rsp.id = req->id;
- rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
-
- a2mp_send(mgr, A2MP_GETINFO_RSP, hdr->ident, sizeof(rsp),
- &rsp);
-
- goto done;
- }
-
- set_bit(READ_LOC_AMP_INFO, &mgr->state);
- hci_req_init(&hreq, hdev);
- hci_req_add(&hreq, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
- err = hci_req_run(&hreq, read_local_amp_info_complete);
- if (err < 0)
- a2mp_send_getinfo_rsp(hdev);
-
-done:
- if (hdev)
- hci_dev_put(hdev);
-
- skb_pull(skb, sizeof(*req));
- return 0;
-}
-
-static int a2mp_getinfo_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_info_rsp *rsp = (struct a2mp_info_rsp *) skb->data;
- struct a2mp_amp_assoc_req req;
- struct amp_ctrl *ctrl;
-
- if (le16_to_cpu(hdr->len) < sizeof(*rsp))
- return -EINVAL;
-
- BT_DBG("id %u status 0x%2.2x", rsp->id, rsp->status);
-
- if (rsp->status)
- return -EINVAL;
-
- ctrl = amp_ctrl_add(mgr, rsp->id);
- if (!ctrl)
- return -ENOMEM;
-
- memset(&req, 0, sizeof(req));
-
- req.id = rsp->id;
- a2mp_send(mgr, A2MP_GETAMPASSOC_REQ, __next_ident(mgr), sizeof(req),
- &req);
-
- skb_pull(skb, sizeof(*rsp));
- return 0;
-}
-
-static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_amp_assoc_req *req = (void *) skb->data;
- struct hci_dev *hdev;
- struct amp_mgr *tmp;
-
- if (le16_to_cpu(hdr->len) < sizeof(*req))
- return -EINVAL;
-
- BT_DBG("id %u", req->id);
-
- /* Make sure that other request is not processed */
- tmp = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC);
-
- hdev = hci_dev_get(req->id);
- if (!hdev || hdev->amp_type == AMP_TYPE_BREDR || tmp) {
- struct a2mp_amp_assoc_rsp rsp;
-
- memset(&rsp, 0, sizeof(rsp));
- rsp.id = req->id;
-
- if (tmp) {
- rsp.status = A2MP_STATUS_COLLISION_OCCURED;
- amp_mgr_put(tmp);
- } else {
- rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
- }
-
- a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, hdr->ident, sizeof(rsp),
- &rsp);
-
- goto done;
- }
-
- amp_read_loc_assoc(hdev, mgr);
-
-done:
- if (hdev)
- hci_dev_put(hdev);
-
- skb_pull(skb, sizeof(*req));
- return 0;
-}
-
-static int a2mp_getampassoc_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_amp_assoc_rsp *rsp = (void *) skb->data;
- u16 len = le16_to_cpu(hdr->len);
- struct hci_dev *hdev;
- struct amp_ctrl *ctrl;
- struct hci_conn *hcon;
- size_t assoc_len;
-
- if (len < sizeof(*rsp))
- return -EINVAL;
-
- assoc_len = len - sizeof(*rsp);
-
- BT_DBG("id %u status 0x%2.2x assoc len %zu", rsp->id, rsp->status,
- assoc_len);
-
- if (rsp->status)
- return -EINVAL;
-
- /* Save remote ASSOC data */
- ctrl = amp_ctrl_lookup(mgr, rsp->id);
- if (ctrl) {
- u8 *assoc;
-
- assoc = kmemdup(rsp->amp_assoc, assoc_len, GFP_KERNEL);
- if (!assoc) {
- amp_ctrl_put(ctrl);
- return -ENOMEM;
- }
-
- ctrl->assoc = assoc;
- ctrl->assoc_len = assoc_len;
- ctrl->assoc_rem_len = assoc_len;
- ctrl->assoc_len_so_far = 0;
-
- amp_ctrl_put(ctrl);
- }
-
- /* Create Phys Link */
- hdev = hci_dev_get(rsp->id);
- if (!hdev)
- return -EINVAL;
-
- hcon = phylink_add(hdev, mgr, rsp->id, true);
- if (!hcon)
- goto done;
-
- BT_DBG("Created hcon %p: loc:%u -> rem:%u", hcon, hdev->id, rsp->id);
-
- mgr->bredr_chan->remote_amp_id = rsp->id;
-
- amp_create_phylink(hdev, mgr, hcon);
-
-done:
- hci_dev_put(hdev);
- skb_pull(skb, len);
- return 0;
-}
-
-static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_physlink_req *req = (void *) skb->data;
- struct a2mp_physlink_rsp rsp;
- struct hci_dev *hdev;
- struct hci_conn *hcon;
- struct amp_ctrl *ctrl;
-
- if (le16_to_cpu(hdr->len) < sizeof(*req))
- return -EINVAL;
-
- BT_DBG("local_id %u, remote_id %u", req->local_id, req->remote_id);
-
- memset(&rsp, 0, sizeof(rsp));
-
- rsp.local_id = req->remote_id;
- rsp.remote_id = req->local_id;
-
- hdev = hci_dev_get(req->remote_id);
- if (!hdev || hdev->amp_type == AMP_TYPE_BREDR) {
- rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
- goto send_rsp;
- }
-
- ctrl = amp_ctrl_lookup(mgr, rsp.remote_id);
- if (!ctrl) {
- ctrl = amp_ctrl_add(mgr, rsp.remote_id);
- if (ctrl) {
- amp_ctrl_get(ctrl);
- } else {
- rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION;
- goto send_rsp;
- }
- }
-
- if (ctrl) {
- size_t assoc_len = le16_to_cpu(hdr->len) - sizeof(*req);
- u8 *assoc;
-
- assoc = kmemdup(req->amp_assoc, assoc_len, GFP_KERNEL);
- if (!assoc) {
- amp_ctrl_put(ctrl);
- hci_dev_put(hdev);
- return -ENOMEM;
- }
-
- ctrl->assoc = assoc;
- ctrl->assoc_len = assoc_len;
- ctrl->assoc_rem_len = assoc_len;
- ctrl->assoc_len_so_far = 0;
-
- amp_ctrl_put(ctrl);
- }
-
- hcon = phylink_add(hdev, mgr, req->local_id, false);
- if (hcon) {
- amp_accept_phylink(hdev, mgr, hcon);
- rsp.status = A2MP_STATUS_SUCCESS;
- } else {
- rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION;
- }
-
-send_rsp:
- if (hdev)
- hci_dev_put(hdev);
-
- /* Reply error now and success after HCI Write Remote AMP Assoc
- command complete with success status
- */
- if (rsp.status != A2MP_STATUS_SUCCESS) {
- a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, hdr->ident,
- sizeof(rsp), &rsp);
- } else {
- set_bit(WRITE_REMOTE_AMP_ASSOC, &mgr->state);
- mgr->ident = hdr->ident;
- }
-
- skb_pull(skb, le16_to_cpu(hdr->len));
- return 0;
-}
-
-static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_physlink_req *req = (void *) skb->data;
- struct a2mp_physlink_rsp rsp;
- struct hci_dev *hdev;
- struct hci_conn *hcon;
-
- if (le16_to_cpu(hdr->len) < sizeof(*req))
- return -EINVAL;
-
- BT_DBG("local_id %u remote_id %u", req->local_id, req->remote_id);
-
- memset(&rsp, 0, sizeof(rsp));
-
- rsp.local_id = req->remote_id;
- rsp.remote_id = req->local_id;
- rsp.status = A2MP_STATUS_SUCCESS;
-
- hdev = hci_dev_get(req->remote_id);
- if (!hdev) {
- rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
- goto send_rsp;
- }
-
- hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
- &mgr->l2cap_conn->hcon->dst);
- if (!hcon) {
- bt_dev_err(hdev, "no phys link exist");
- rsp.status = A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS;
- goto clean;
- }
-
- /* TODO Disconnect Phys Link here */
-
-clean:
- hci_dev_put(hdev);
-
-send_rsp:
- a2mp_send(mgr, A2MP_DISCONNPHYSLINK_RSP, hdr->ident, sizeof(rsp), &rsp);
-
- skb_pull(skb, sizeof(*req));
- return 0;
-}
-
-static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- BT_DBG("ident %u code 0x%2.2x", hdr->ident, hdr->code);
-
- skb_pull(skb, le16_to_cpu(hdr->len));
- return 0;
-}
-
-/* Handle A2MP signalling */
-static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
-{
- struct a2mp_cmd *hdr;
- struct amp_mgr *mgr = chan->data;
- int err = 0;
-
- amp_mgr_get(mgr);
-
- while (skb->len >= sizeof(*hdr)) {
- u16 len;
-
- hdr = (void *) skb->data;
- len = le16_to_cpu(hdr->len);
-
- BT_DBG("code 0x%2.2x id %u len %u", hdr->code, hdr->ident, len);
-
- skb_pull(skb, sizeof(*hdr));
-
- if (len > skb->len || !hdr->ident) {
- err = -EINVAL;
- break;
- }
-
- mgr->ident = hdr->ident;
-
- switch (hdr->code) {
- case A2MP_COMMAND_REJ:
- a2mp_command_rej(mgr, skb, hdr);
- break;
-
- case A2MP_DISCOVER_REQ:
- err = a2mp_discover_req(mgr, skb, hdr);
- break;
-
- case A2MP_CHANGE_NOTIFY:
- err = a2mp_change_notify(mgr, skb, hdr);
- break;
-
- case A2MP_GETINFO_REQ:
- err = a2mp_getinfo_req(mgr, skb, hdr);
- break;
-
- case A2MP_GETAMPASSOC_REQ:
- err = a2mp_getampassoc_req(mgr, skb, hdr);
- break;
-
- case A2MP_CREATEPHYSLINK_REQ:
- err = a2mp_createphyslink_req(mgr, skb, hdr);
- break;
-
- case A2MP_DISCONNPHYSLINK_REQ:
- err = a2mp_discphyslink_req(mgr, skb, hdr);
- break;
-
- case A2MP_DISCOVER_RSP:
- err = a2mp_discover_rsp(mgr, skb, hdr);
- break;
-
- case A2MP_GETINFO_RSP:
- err = a2mp_getinfo_rsp(mgr, skb, hdr);
- break;
-
- case A2MP_GETAMPASSOC_RSP:
- err = a2mp_getampassoc_rsp(mgr, skb, hdr);
- break;
-
- case A2MP_CHANGE_RSP:
- case A2MP_CREATEPHYSLINK_RSP:
- case A2MP_DISCONNPHYSLINK_RSP:
- err = a2mp_cmd_rsp(mgr, skb, hdr);
- break;
-
- default:
- BT_ERR("Unknown A2MP sig cmd 0x%2.2x", hdr->code);
- err = -EINVAL;
- break;
- }
- }
-
- if (err) {
- struct a2mp_cmd_rej rej;
-
- memset(&rej, 0, sizeof(rej));
-
- rej.reason = cpu_to_le16(0);
- hdr = (void *) skb->data;
-
- BT_DBG("Send A2MP Rej: cmd 0x%2.2x err %d", hdr->code, err);
-
- a2mp_send(mgr, A2MP_COMMAND_REJ, hdr->ident, sizeof(rej),
- &rej);
- }
-
- /* Always free skb and return success error code to prevent
- from sending L2CAP Disconnect over A2MP channel */
- kfree_skb(skb);
-
- amp_mgr_put(mgr);
-
- return 0;
-}
-
-static void a2mp_chan_close_cb(struct l2cap_chan *chan)
-{
- l2cap_chan_put(chan);
-}
-
-static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state,
- int err)
-{
- struct amp_mgr *mgr = chan->data;
-
- if (!mgr)
- return;
-
- BT_DBG("chan %p state %s", chan, state_to_string(state));
-
- chan->state = state;
-
- switch (state) {
- case BT_CLOSED:
- if (mgr)
- amp_mgr_put(mgr);
- break;
- }
-}
-
-static struct sk_buff *a2mp_chan_alloc_skb_cb(struct l2cap_chan *chan,
- unsigned long hdr_len,
- unsigned long len, int nb)
-{
- struct sk_buff *skb;
-
- skb = bt_skb_alloc(hdr_len + len, GFP_KERNEL);
- if (!skb)
- return ERR_PTR(-ENOMEM);
-
- return skb;
-}
-
-static const struct l2cap_ops a2mp_chan_ops = {
- .name = "L2CAP A2MP channel",
- .recv = a2mp_chan_recv_cb,
- .close = a2mp_chan_close_cb,
- .state_change = a2mp_chan_state_change_cb,
- .alloc_skb = a2mp_chan_alloc_skb_cb,
-
- /* Not implemented for A2MP */
- .new_connection = l2cap_chan_no_new_connection,
- .teardown = l2cap_chan_no_teardown,
- .ready = l2cap_chan_no_ready,
- .defer = l2cap_chan_no_defer,
- .resume = l2cap_chan_no_resume,
- .set_shutdown = l2cap_chan_no_set_shutdown,
- .get_sndtimeo = l2cap_chan_no_get_sndtimeo,
-};
-
-static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn, bool locked)
-{
- struct l2cap_chan *chan;
- int err;
-
- chan = l2cap_chan_create();
- if (!chan)
- return NULL;
-
- BT_DBG("chan %p", chan);
-
- chan->chan_type = L2CAP_CHAN_FIXED;
- chan->scid = L2CAP_CID_A2MP;
- chan->dcid = L2CAP_CID_A2MP;
- chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
- chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
- chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
-
- chan->ops = &a2mp_chan_ops;
-
- l2cap_chan_set_defaults(chan);
- chan->remote_max_tx = chan->max_tx;
- chan->remote_tx_win = chan->tx_win;
-
- chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
- chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
-
- skb_queue_head_init(&chan->tx_q);
-
- chan->mode = L2CAP_MODE_ERTM;
-
- err = l2cap_ertm_init(chan);
- if (err < 0) {
- l2cap_chan_del(chan, 0);
- return NULL;
- }
-
- chan->conf_state = 0;
-
- if (locked)
- __l2cap_chan_add(conn, chan);
- else
- l2cap_chan_add(conn, chan);
-
- chan->remote_mps = chan->omtu;
- chan->mps = chan->omtu;
-
- chan->state = BT_CONNECTED;
-
- return chan;
-}
-
-/* AMP Manager functions */
-struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr)
-{
- BT_DBG("mgr %p orig refcnt %d", mgr, kref_read(&mgr->kref));
-
- kref_get(&mgr->kref);
-
- return mgr;
-}
-
-static void amp_mgr_destroy(struct kref *kref)
-{
- struct amp_mgr *mgr = container_of(kref, struct amp_mgr, kref);
-
- BT_DBG("mgr %p", mgr);
-
- mutex_lock(&amp_mgr_list_lock);
- list_del(&mgr->list);
- mutex_unlock(&amp_mgr_list_lock);
-
- amp_ctrl_list_flush(mgr);
- kfree(mgr);
-}
-
-int amp_mgr_put(struct amp_mgr *mgr)
-{
- BT_DBG("mgr %p orig refcnt %d", mgr, kref_read(&mgr->kref));
-
- return kref_put(&mgr->kref, &amp_mgr_destroy);
-}
-
-static struct amp_mgr *amp_mgr_create(struct l2cap_conn *conn, bool locked)
-{
- struct amp_mgr *mgr;
- struct l2cap_chan *chan;
-
- mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
- if (!mgr)
- return NULL;
-
- BT_DBG("conn %p mgr %p", conn, mgr);
-
- mgr->l2cap_conn = conn;
-
- chan = a2mp_chan_open(conn, locked);
- if (!chan) {
- kfree(mgr);
- return NULL;
- }
-
- mgr->a2mp_chan = chan;
- chan->data = mgr;
-
- conn->hcon->amp_mgr = mgr;
-
- kref_init(&mgr->kref);
-
- /* Remote AMP ctrl list initialization */
- INIT_LIST_HEAD(&mgr->amp_ctrls);
- mutex_init(&mgr->amp_ctrls_lock);
-
- mutex_lock(&amp_mgr_list_lock);
- list_add(&mgr->list, &amp_mgr_list);
- mutex_unlock(&amp_mgr_list_lock);
-
- return mgr;
-}
-
-struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
- struct sk_buff *skb)
-{
- struct amp_mgr *mgr;
-
- if (conn->hcon->type != ACL_LINK)
- return NULL;
-
- mgr = amp_mgr_create(conn, false);
- if (!mgr) {
- BT_ERR("Could not create AMP manager");
- return NULL;
- }
-
- BT_DBG("mgr: %p chan %p", mgr, mgr->a2mp_chan);
-
- return mgr->a2mp_chan;
-}
-
-void a2mp_send_getinfo_rsp(struct hci_dev *hdev)
-{
- struct amp_mgr *mgr;
- struct a2mp_info_rsp rsp;
-
- mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_INFO);
- if (!mgr)
- return;
-
- BT_DBG("%s mgr %p", hdev->name, mgr);
-
- memset(&rsp, 0, sizeof(rsp));
-
- rsp.id = hdev->id;
- rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
-
- if (hdev->amp_type != AMP_TYPE_BREDR) {
- rsp.status = 0;
- rsp.total_bw = cpu_to_le32(hdev->amp_total_bw);
- rsp.max_bw = cpu_to_le32(hdev->amp_max_bw);
- rsp.min_latency = cpu_to_le32(hdev->amp_min_latency);
- rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap);
- rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size);
- }
-
- a2mp_send(mgr, A2MP_GETINFO_RSP, mgr->ident, sizeof(rsp), &rsp);
- amp_mgr_put(mgr);
-}
-
-void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status)
-{
- struct amp_mgr *mgr;
- struct amp_assoc *loc_assoc = &hdev->loc_assoc;
- struct a2mp_amp_assoc_rsp *rsp;
- size_t len;
-
- mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC);
- if (!mgr)
- return;
-
- BT_DBG("%s mgr %p", hdev->name, mgr);
-
- len = sizeof(struct a2mp_amp_assoc_rsp) + loc_assoc->len;
- rsp = kzalloc(len, GFP_KERNEL);
- if (!rsp) {
- amp_mgr_put(mgr);
- return;
- }
-
- rsp->id = hdev->id;
-
- if (status) {
- rsp->status = A2MP_STATUS_INVALID_CTRL_ID;
- } else {
- rsp->status = A2MP_STATUS_SUCCESS;
- memcpy(rsp->amp_assoc, loc_assoc->data, loc_assoc->len);
- }
-
- a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, mgr->ident, len, rsp);
- amp_mgr_put(mgr);
- kfree(rsp);
-}
-
-void a2mp_send_create_phy_link_req(struct hci_dev *hdev, u8 status)
-{
- struct amp_mgr *mgr;
- struct amp_assoc *loc_assoc = &hdev->loc_assoc;
- struct a2mp_physlink_req *req;
- struct l2cap_chan *bredr_chan;
- size_t len;
-
- mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC_FINAL);
- if (!mgr)
- return;
-
- len = sizeof(*req) + loc_assoc->len;
-
- BT_DBG("%s mgr %p assoc_len %zu", hdev->name, mgr, len);
-
- req = kzalloc(len, GFP_KERNEL);
- if (!req) {
- amp_mgr_put(mgr);
- return;
- }
-
- bredr_chan = mgr->bredr_chan;
- if (!bredr_chan)
- goto clean;
-
- req->local_id = hdev->id;
- req->remote_id = bredr_chan->remote_amp_id;
- memcpy(req->amp_assoc, loc_assoc->data, loc_assoc->len);
-
- a2mp_send(mgr, A2MP_CREATEPHYSLINK_REQ, __next_ident(mgr), len, req);
-
-clean:
- amp_mgr_put(mgr);
- kfree(req);
-}
-
-void a2mp_send_create_phy_link_rsp(struct hci_dev *hdev, u8 status)
-{
- struct amp_mgr *mgr;
- struct a2mp_physlink_rsp rsp;
- struct hci_conn *hs_hcon;
-
- mgr = amp_mgr_lookup_by_state(WRITE_REMOTE_AMP_ASSOC);
- if (!mgr)
- return;
-
- memset(&rsp, 0, sizeof(rsp));
-
- hs_hcon = hci_conn_hash_lookup_state(hdev, AMP_LINK, BT_CONNECT);
- if (!hs_hcon) {
- rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION;
- } else {
- rsp.remote_id = hs_hcon->remote_id;
- rsp.status = A2MP_STATUS_SUCCESS;
- }
-
- BT_DBG("%s mgr %p hs_hcon %p status %u", hdev->name, mgr, hs_hcon,
- status);
-
- rsp.local_id = hdev->id;
- a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, mgr->ident, sizeof(rsp), &rsp);
- amp_mgr_put(mgr);
-}
-
-void a2mp_discover_amp(struct l2cap_chan *chan)
-{
- struct l2cap_conn *conn = chan->conn;
- struct amp_mgr *mgr = conn->hcon->amp_mgr;
- struct a2mp_discov_req req;
-
- BT_DBG("chan %p conn %p mgr %p", chan, conn, mgr);
-
- if (!mgr) {
- mgr = amp_mgr_create(conn, true);
- if (!mgr)
- return;
- }
-
- mgr->bredr_chan = chan;
-
- memset(&req, 0, sizeof(req));
-
- req.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
- req.ext_feat = 0;
- a2mp_send(mgr, A2MP_DISCOVER_REQ, 1, sizeof(req), &req);
-}
diff --git a/net/bluetooth/a2mp.h b/net/bluetooth/a2mp.h
deleted file mode 100644
index 2fd253a61a2a..000000000000
--- a/net/bluetooth/a2mp.h
+++ /dev/null
@@ -1,154 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved.
- Copyright (c) 2011,2012 Intel Corp.
-
-*/
-
-#ifndef __A2MP_H
-#define __A2MP_H
-
-#include <net/bluetooth/l2cap.h>
-
-enum amp_mgr_state {
- READ_LOC_AMP_INFO,
- READ_LOC_AMP_ASSOC,
- READ_LOC_AMP_ASSOC_FINAL,
- WRITE_REMOTE_AMP_ASSOC,
-};
-
-struct amp_mgr {
- struct list_head list;
- struct l2cap_conn *l2cap_conn;
- struct l2cap_chan *a2mp_chan;
- struct l2cap_chan *bredr_chan;
- struct kref kref;
- __u8 ident;
- __u8 handle;
- unsigned long state;
- unsigned long flags;
-
- struct list_head amp_ctrls;
- struct mutex amp_ctrls_lock;
-};
-
-struct a2mp_cmd {
- __u8 code;
- __u8 ident;
- __le16 len;
- __u8 data[];
-} __packed;
-
-/* A2MP command codes */
-#define A2MP_COMMAND_REJ 0x01
-struct a2mp_cmd_rej {
- __le16 reason;
- __u8 data[];
-} __packed;
-
-#define A2MP_DISCOVER_REQ 0x02
-struct a2mp_discov_req {
- __le16 mtu;
- __le16 ext_feat;
-} __packed;
-
-struct a2mp_cl {
- __u8 id;
- __u8 type;
- __u8 status;
-} __packed;
-
-#define A2MP_DISCOVER_RSP 0x03
-struct a2mp_discov_rsp {
- __le16 mtu;
- __le16 ext_feat;
- struct a2mp_cl cl[];
-} __packed;
-
-#define A2MP_CHANGE_NOTIFY 0x04
-#define A2MP_CHANGE_RSP 0x05
-
-#define A2MP_GETINFO_REQ 0x06
-struct a2mp_info_req {
- __u8 id;
-} __packed;
-
-#define A2MP_GETINFO_RSP 0x07
-struct a2mp_info_rsp {
- __u8 id;
- __u8 status;
- __le32 total_bw;
- __le32 max_bw;
- __le32 min_latency;
- __le16 pal_cap;
- __le16 assoc_size;
-} __packed;
-
-#define A2MP_GETAMPASSOC_REQ 0x08
-struct a2mp_amp_assoc_req {
- __u8 id;
-} __packed;
-
-#define A2MP_GETAMPASSOC_RSP 0x09
-struct a2mp_amp_assoc_rsp {
- __u8 id;
- __u8 status;
- __u8 amp_assoc[];
-} __packed;
-
-#define A2MP_CREATEPHYSLINK_REQ 0x0A
-#define A2MP_DISCONNPHYSLINK_REQ 0x0C
-struct a2mp_physlink_req {
- __u8 local_id;
- __u8 remote_id;
- __u8 amp_assoc[];
-} __packed;
-
-#define A2MP_CREATEPHYSLINK_RSP 0x0B
-#define A2MP_DISCONNPHYSLINK_RSP 0x0D
-struct a2mp_physlink_rsp {
- __u8 local_id;
- __u8 remote_id;
- __u8 status;
-} __packed;
-
-/* A2MP response status */
-#define A2MP_STATUS_SUCCESS 0x00
-#define A2MP_STATUS_INVALID_CTRL_ID 0x01
-#define A2MP_STATUS_UNABLE_START_LINK_CREATION 0x02
-#define A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS 0x02
-#define A2MP_STATUS_COLLISION_OCCURED 0x03
-#define A2MP_STATUS_DISCONN_REQ_RECVD 0x04
-#define A2MP_STATUS_PHYS_LINK_EXISTS 0x05
-#define A2MP_STATUS_SECURITY_VIOLATION 0x06
-
-struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr);
-
-#if IS_ENABLED(CONFIG_BT_HS)
-int amp_mgr_put(struct amp_mgr *mgr);
-struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
- struct sk_buff *skb);
-void a2mp_discover_amp(struct l2cap_chan *chan);
-#else
-static inline int amp_mgr_put(struct amp_mgr *mgr)
-{
- return 0;
-}
-
-static inline struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
- struct sk_buff *skb)
-{
- return NULL;
-}
-
-static inline void a2mp_discover_amp(struct l2cap_chan *chan)
-{
-}
-#endif
-
-void a2mp_send_getinfo_rsp(struct hci_dev *hdev);
-void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status);
-void a2mp_send_create_phy_link_req(struct hci_dev *hdev, u8 status);
-void a2mp_send_create_phy_link_rsp(struct hci_dev *hdev, u8 status);
-
-#endif /* __A2MP_H */
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index b93464ac3517..0b4d0a8bd361 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -185,6 +185,28 @@ void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk)
}
EXPORT_SYMBOL(bt_sock_unlink);
+bool bt_sock_linked(struct bt_sock_list *l, struct sock *s)
+{
+ struct sock *sk;
+
+ if (!l || !s)
+ return false;
+
+ read_lock(&l->lock);
+
+ sk_for_each(sk, &l->head) {
+ if (s == sk) {
+ read_unlock(&l->lock);
+ return true;
+ }
+ }
+
+ read_unlock(&l->lock);
+
+ return false;
+}
+EXPORT_SYMBOL(bt_sock_linked);
+
void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh)
{
const struct cred *old_cred;
@@ -309,14 +331,11 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
if (flags & MSG_OOB)
return -EOPNOTSUPP;
- lock_sock(sk);
-
skb = skb_recv_datagram(sk, flags, &err);
if (!skb) {
if (sk->sk_shutdown & RCV_SHUTDOWN)
err = 0;
- release_sock(sk);
return err;
}
@@ -346,8 +365,6 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
skb_free_datagram(sk, skb);
- release_sock(sk);
-
if (flags & MSG_TRUNC)
copied = skblen;
@@ -570,10 +587,11 @@ int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
if (sk->sk_state == BT_LISTEN)
return -EINVAL;
- lock_sock(sk);
+ spin_lock(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
amount = skb ? skb->len : 0;
- release_sock(sk);
+ spin_unlock(&sk->sk_receive_queue.lock);
+
err = put_user(amount, (int __user *)arg);
break;
@@ -829,11 +847,14 @@ cleanup_sysfs:
bt_sysfs_cleanup();
cleanup_led:
bt_leds_cleanup();
+ debugfs_remove_recursive(bt_debugfs);
return err;
}
static void __exit bt_exit(void)
{
+ iso_exit();
+
mgmt_exit();
sco_exit();
diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
deleted file mode 100644
index 5d698f19868c..000000000000
--- a/net/bluetooth/amp.c
+++ /dev/null
@@ -1,590 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- Copyright (c) 2011,2012 Intel Corp.
-
-*/
-
-#include <net/bluetooth/bluetooth.h>
-#include <net/bluetooth/hci.h>
-#include <net/bluetooth/hci_core.h>
-#include <crypto/hash.h>
-
-#include "hci_request.h"
-#include "a2mp.h"
-#include "amp.h"
-
-/* Remote AMP Controllers interface */
-void amp_ctrl_get(struct amp_ctrl *ctrl)
-{
- BT_DBG("ctrl %p orig refcnt %d", ctrl,
- kref_read(&ctrl->kref));
-
- kref_get(&ctrl->kref);
-}
-
-static void amp_ctrl_destroy(struct kref *kref)
-{
- struct amp_ctrl *ctrl = container_of(kref, struct amp_ctrl, kref);
-
- BT_DBG("ctrl %p", ctrl);
-
- kfree(ctrl->assoc);
- kfree(ctrl);
-}
-
-int amp_ctrl_put(struct amp_ctrl *ctrl)
-{
- BT_DBG("ctrl %p orig refcnt %d", ctrl,
- kref_read(&ctrl->kref));
-
- return kref_put(&ctrl->kref, &amp_ctrl_destroy);
-}
-
-struct amp_ctrl *amp_ctrl_add(struct amp_mgr *mgr, u8 id)
-{
- struct amp_ctrl *ctrl;
-
- ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
- if (!ctrl)
- return NULL;
-
- kref_init(&ctrl->kref);
- ctrl->id = id;
-
- mutex_lock(&mgr->amp_ctrls_lock);
- list_add(&ctrl->list, &mgr->amp_ctrls);
- mutex_unlock(&mgr->amp_ctrls_lock);
-
- BT_DBG("mgr %p ctrl %p", mgr, ctrl);
-
- return ctrl;
-}
-
-void amp_ctrl_list_flush(struct amp_mgr *mgr)
-{
- struct amp_ctrl *ctrl, *n;
-
- BT_DBG("mgr %p", mgr);
-
- mutex_lock(&mgr->amp_ctrls_lock);
- list_for_each_entry_safe(ctrl, n, &mgr->amp_ctrls, list) {
- list_del(&ctrl->list);
- amp_ctrl_put(ctrl);
- }
- mutex_unlock(&mgr->amp_ctrls_lock);
-}
-
-struct amp_ctrl *amp_ctrl_lookup(struct amp_mgr *mgr, u8 id)
-{
- struct amp_ctrl *ctrl;
-
- BT_DBG("mgr %p id %u", mgr, id);
-
- mutex_lock(&mgr->amp_ctrls_lock);
- list_for_each_entry(ctrl, &mgr->amp_ctrls, list) {
- if (ctrl->id == id) {
- amp_ctrl_get(ctrl);
- mutex_unlock(&mgr->amp_ctrls_lock);
- return ctrl;
- }
- }
- mutex_unlock(&mgr->amp_ctrls_lock);
-
- return NULL;
-}
-
-/* Physical Link interface */
-static u8 __next_handle(struct amp_mgr *mgr)
-{
- if (++mgr->handle == 0)
- mgr->handle = 1;
-
- return mgr->handle;
-}
-
-struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
- u8 remote_id, bool out)
-{
- bdaddr_t *dst = &mgr->l2cap_conn->hcon->dst;
- struct hci_conn *hcon;
- u8 role = out ? HCI_ROLE_MASTER : HCI_ROLE_SLAVE;
-
- hcon = hci_conn_add(hdev, AMP_LINK, dst, role, __next_handle(mgr));
- if (!hcon)
- return NULL;
-
- BT_DBG("hcon %p dst %pMR", hcon, dst);
-
- hcon->state = BT_CONNECT;
- hcon->attempt++;
- hcon->remote_id = remote_id;
- hcon->amp_mgr = amp_mgr_get(mgr);
-
- return hcon;
-}
-
-/* AMP crypto key generation interface */
-static int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize, u8 *output)
-{
- struct crypto_shash *tfm;
- struct shash_desc *shash;
- int ret;
-
- if (!ksize)
- return -EINVAL;
-
- tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
- if (IS_ERR(tfm)) {
- BT_DBG("crypto_alloc_ahash failed: err %ld", PTR_ERR(tfm));
- return PTR_ERR(tfm);
- }
-
- ret = crypto_shash_setkey(tfm, key, ksize);
- if (ret) {
- BT_DBG("crypto_ahash_setkey failed: err %d", ret);
- goto failed;
- }
-
- shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
- GFP_KERNEL);
- if (!shash) {
- ret = -ENOMEM;
- goto failed;
- }
-
- shash->tfm = tfm;
-
- ret = crypto_shash_digest(shash, plaintext, psize, output);
-
- kfree(shash);
-
-failed:
- crypto_free_shash(tfm);
- return ret;
-}
-
-int phylink_gen_key(struct hci_conn *conn, u8 *data, u8 *len, u8 *type)
-{
- struct hci_dev *hdev = conn->hdev;
- struct link_key *key;
- u8 keybuf[HCI_AMP_LINK_KEY_SIZE];
- u8 gamp_key[HCI_AMP_LINK_KEY_SIZE];
- int err;
-
- if (!hci_conn_check_link_mode(conn))
- return -EACCES;
-
- BT_DBG("conn %p key_type %d", conn, conn->key_type);
-
- /* Legacy key */
- if (conn->key_type < 3) {
- bt_dev_err(hdev, "legacy key type %u", conn->key_type);
- return -EACCES;
- }
-
- *type = conn->key_type;
- *len = HCI_AMP_LINK_KEY_SIZE;
-
- key = hci_find_link_key(hdev, &conn->dst);
- if (!key) {
- BT_DBG("No Link key for conn %p dst %pMR", conn, &conn->dst);
- return -EACCES;
- }
-
- /* BR/EDR Link Key concatenated together with itself */
- memcpy(&keybuf[0], key->val, HCI_LINK_KEY_SIZE);
- memcpy(&keybuf[HCI_LINK_KEY_SIZE], key->val, HCI_LINK_KEY_SIZE);
-
- /* Derive Generic AMP Link Key (gamp) */
- err = hmac_sha256(keybuf, HCI_AMP_LINK_KEY_SIZE, "gamp", 4, gamp_key);
- if (err) {
- bt_dev_err(hdev, "could not derive Generic AMP Key: err %d", err);
- return err;
- }
-
- if (conn->key_type == HCI_LK_DEBUG_COMBINATION) {
- BT_DBG("Use Generic AMP Key (gamp)");
- memcpy(data, gamp_key, HCI_AMP_LINK_KEY_SIZE);
- return err;
- }
-
- /* Derive Dedicated AMP Link Key: "802b" is 802.11 PAL keyID */
- return hmac_sha256(gamp_key, HCI_AMP_LINK_KEY_SIZE, "802b", 4, data);
-}
-
-static void read_local_amp_assoc_complete(struct hci_dev *hdev, u8 status,
- u16 opcode, struct sk_buff *skb)
-{
- struct hci_rp_read_local_amp_assoc *rp = (void *)skb->data;
- struct amp_assoc *assoc = &hdev->loc_assoc;
- size_t rem_len, frag_len;
-
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
-
- if (rp->status)
- goto send_rsp;
-
- frag_len = skb->len - sizeof(*rp);
- rem_len = __le16_to_cpu(rp->rem_len);
-
- if (rem_len > frag_len) {
- BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
-
- memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
- assoc->offset += frag_len;
-
- /* Read other fragments */
- amp_read_loc_assoc_frag(hdev, rp->phy_handle);
-
- return;
- }
-
- memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
- assoc->len = assoc->offset + rem_len;
- assoc->offset = 0;
-
-send_rsp:
- /* Send A2MP Rsp when all fragments are received */
- a2mp_send_getampassoc_rsp(hdev, rp->status);
- a2mp_send_create_phy_link_req(hdev, rp->status);
-}
-
-void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle)
-{
- struct hci_cp_read_local_amp_assoc cp;
- struct amp_assoc *loc_assoc = &hdev->loc_assoc;
- struct hci_request req;
- int err;
-
- BT_DBG("%s handle %u", hdev->name, phy_handle);
-
- cp.phy_handle = phy_handle;
- cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
- cp.len_so_far = cpu_to_le16(loc_assoc->offset);
-
- hci_req_init(&req, hdev);
- hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
- err = hci_req_run_skb(&req, read_local_amp_assoc_complete);
- if (err < 0)
- a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
-}
-
-void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr)
-{
- struct hci_cp_read_local_amp_assoc cp;
- struct hci_request req;
- int err;
-
- memset(&hdev->loc_assoc, 0, sizeof(struct amp_assoc));
- memset(&cp, 0, sizeof(cp));
-
- cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
-
- set_bit(READ_LOC_AMP_ASSOC, &mgr->state);
- hci_req_init(&req, hdev);
- hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
- err = hci_req_run_skb(&req, read_local_amp_assoc_complete);
- if (err < 0)
- a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
-}
-
-void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
- struct hci_conn *hcon)
-{
- struct hci_cp_read_local_amp_assoc cp;
- struct amp_mgr *mgr = hcon->amp_mgr;
- struct hci_request req;
- int err;
-
- if (!mgr)
- return;
-
- cp.phy_handle = hcon->handle;
- cp.len_so_far = cpu_to_le16(0);
- cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
-
- set_bit(READ_LOC_AMP_ASSOC_FINAL, &mgr->state);
-
- /* Read Local AMP Assoc final link information data */
- hci_req_init(&req, hdev);
- hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
- err = hci_req_run_skb(&req, read_local_amp_assoc_complete);
- if (err < 0)
- a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
-}
-
-static void write_remote_amp_assoc_complete(struct hci_dev *hdev, u8 status,
- u16 opcode, struct sk_buff *skb)
-{
- struct hci_rp_write_remote_amp_assoc *rp = (void *)skb->data;
-
- BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
- hdev->name, rp->status, rp->phy_handle);
-
- if (rp->status)
- return;
-
- amp_write_rem_assoc_continue(hdev, rp->phy_handle);
-}
-
-/* Write AMP Assoc data fragments, returns true with last fragment written*/
-static bool amp_write_rem_assoc_frag(struct hci_dev *hdev,
- struct hci_conn *hcon)
-{
- struct hci_cp_write_remote_amp_assoc *cp;
- struct amp_mgr *mgr = hcon->amp_mgr;
- struct amp_ctrl *ctrl;
- struct hci_request req;
- u16 frag_len, len;
-
- ctrl = amp_ctrl_lookup(mgr, hcon->remote_id);
- if (!ctrl)
- return false;
-
- if (!ctrl->assoc_rem_len) {
- BT_DBG("all fragments are written");
- ctrl->assoc_rem_len = ctrl->assoc_len;
- ctrl->assoc_len_so_far = 0;
-
- amp_ctrl_put(ctrl);
- return true;
- }
-
- frag_len = min_t(u16, 248, ctrl->assoc_rem_len);
- len = frag_len + sizeof(*cp);
-
- cp = kzalloc(len, GFP_KERNEL);
- if (!cp) {
- amp_ctrl_put(ctrl);
- return false;
- }
-
- BT_DBG("hcon %p ctrl %p frag_len %u assoc_len %u rem_len %u",
- hcon, ctrl, frag_len, ctrl->assoc_len, ctrl->assoc_rem_len);
-
- cp->phy_handle = hcon->handle;
- cp->len_so_far = cpu_to_le16(ctrl->assoc_len_so_far);
- cp->rem_len = cpu_to_le16(ctrl->assoc_rem_len);
- memcpy(cp->frag, ctrl->assoc, frag_len);
-
- ctrl->assoc_len_so_far += frag_len;
- ctrl->assoc_rem_len -= frag_len;
-
- amp_ctrl_put(ctrl);
-
- hci_req_init(&req, hdev);
- hci_req_add(&req, HCI_OP_WRITE_REMOTE_AMP_ASSOC, len, cp);
- hci_req_run_skb(&req, write_remote_amp_assoc_complete);
-
- kfree(cp);
-
- return false;
-}
-
-void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle)
-{
- struct hci_conn *hcon;
-
- BT_DBG("%s phy handle 0x%2.2x", hdev->name, handle);
-
- hcon = hci_conn_hash_lookup_handle(hdev, handle);
- if (!hcon)
- return;
-
- /* Send A2MP create phylink rsp when all fragments are written */
- if (amp_write_rem_assoc_frag(hdev, hcon))
- a2mp_send_create_phy_link_rsp(hdev, 0);
-}
-
-void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle)
-{
- struct hci_conn *hcon;
-
- BT_DBG("%s phy handle 0x%2.2x", hdev->name, handle);
-
- hcon = hci_conn_hash_lookup_handle(hdev, handle);
- if (!hcon)
- return;
-
- BT_DBG("%s phy handle 0x%2.2x hcon %p", hdev->name, handle, hcon);
-
- amp_write_rem_assoc_frag(hdev, hcon);
-}
-
-static void create_phylink_complete(struct hci_dev *hdev, u8 status,
- u16 opcode)
-{
- struct hci_cp_create_phy_link *cp;
-
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
-
- cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
- if (!cp)
- return;
-
- hci_dev_lock(hdev);
-
- if (status) {
- struct hci_conn *hcon;
-
- hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
- if (hcon)
- hci_conn_del(hcon);
- } else {
- amp_write_remote_assoc(hdev, cp->phy_handle);
- }
-
- hci_dev_unlock(hdev);
-}
-
-void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
- struct hci_conn *hcon)
-{
- struct hci_cp_create_phy_link cp;
- struct hci_request req;
-
- cp.phy_handle = hcon->handle;
-
- BT_DBG("%s hcon %p phy handle 0x%2.2x", hdev->name, hcon,
- hcon->handle);
-
- if (phylink_gen_key(mgr->l2cap_conn->hcon, cp.key, &cp.key_len,
- &cp.key_type)) {
- BT_DBG("Cannot create link key");
- return;
- }
-
- hci_req_init(&req, hdev);
- hci_req_add(&req, HCI_OP_CREATE_PHY_LINK, sizeof(cp), &cp);
- hci_req_run(&req, create_phylink_complete);
-}
-
-static void accept_phylink_complete(struct hci_dev *hdev, u8 status,
- u16 opcode)
-{
- struct hci_cp_accept_phy_link *cp;
-
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
-
- if (status)
- return;
-
- cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
- if (!cp)
- return;
-
- amp_write_remote_assoc(hdev, cp->phy_handle);
-}
-
-void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
- struct hci_conn *hcon)
-{
- struct hci_cp_accept_phy_link cp;
- struct hci_request req;
-
- cp.phy_handle = hcon->handle;
-
- BT_DBG("%s hcon %p phy handle 0x%2.2x", hdev->name, hcon,
- hcon->handle);
-
- if (phylink_gen_key(mgr->l2cap_conn->hcon, cp.key, &cp.key_len,
- &cp.key_type)) {
- BT_DBG("Cannot create link key");
- return;
- }
-
- hci_req_init(&req, hdev);
- hci_req_add(&req, HCI_OP_ACCEPT_PHY_LINK, sizeof(cp), &cp);
- hci_req_run(&req, accept_phylink_complete);
-}
-
-void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon)
-{
- struct hci_dev *bredr_hdev = hci_dev_hold(bredr_hcon->hdev);
- struct amp_mgr *mgr = hs_hcon->amp_mgr;
- struct l2cap_chan *bredr_chan;
-
- BT_DBG("bredr_hcon %p hs_hcon %p mgr %p", bredr_hcon, hs_hcon, mgr);
-
- if (!bredr_hdev || !mgr || !mgr->bredr_chan)
- return;
-
- bredr_chan = mgr->bredr_chan;
-
- l2cap_chan_lock(bredr_chan);
-
- set_bit(FLAG_EFS_ENABLE, &bredr_chan->flags);
- bredr_chan->remote_amp_id = hs_hcon->remote_id;
- bredr_chan->local_amp_id = hs_hcon->hdev->id;
- bredr_chan->hs_hcon = hs_hcon;
- bredr_chan->conn->mtu = hs_hcon->hdev->block_mtu;
-
- __l2cap_physical_cfm(bredr_chan, 0);
-
- l2cap_chan_unlock(bredr_chan);
-
- hci_dev_put(bredr_hdev);
-}
-
-void amp_create_logical_link(struct l2cap_chan *chan)
-{
- struct hci_conn *hs_hcon = chan->hs_hcon;
- struct hci_cp_create_accept_logical_link cp;
- struct hci_dev *hdev;
-
- BT_DBG("chan %p hs_hcon %p dst %pMR", chan, hs_hcon,
- &chan->conn->hcon->dst);
-
- if (!hs_hcon)
- return;
-
- hdev = hci_dev_hold(chan->hs_hcon->hdev);
- if (!hdev)
- return;
-
- cp.phy_handle = hs_hcon->handle;
-
- cp.tx_flow_spec.id = chan->local_id;
- cp.tx_flow_spec.stype = chan->local_stype;
- cp.tx_flow_spec.msdu = cpu_to_le16(chan->local_msdu);
- cp.tx_flow_spec.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
- cp.tx_flow_spec.acc_lat = cpu_to_le32(chan->local_acc_lat);
- cp.tx_flow_spec.flush_to = cpu_to_le32(chan->local_flush_to);
-
- cp.rx_flow_spec.id = chan->remote_id;
- cp.rx_flow_spec.stype = chan->remote_stype;
- cp.rx_flow_spec.msdu = cpu_to_le16(chan->remote_msdu);
- cp.rx_flow_spec.sdu_itime = cpu_to_le32(chan->remote_sdu_itime);
- cp.rx_flow_spec.acc_lat = cpu_to_le32(chan->remote_acc_lat);
- cp.rx_flow_spec.flush_to = cpu_to_le32(chan->remote_flush_to);
-
- if (hs_hcon->out)
- hci_send_cmd(hdev, HCI_OP_CREATE_LOGICAL_LINK, sizeof(cp),
- &cp);
- else
- hci_send_cmd(hdev, HCI_OP_ACCEPT_LOGICAL_LINK, sizeof(cp),
- &cp);
-
- hci_dev_put(hdev);
-}
-
-void amp_disconnect_logical_link(struct hci_chan *hchan)
-{
- struct hci_conn *hcon = hchan->conn;
- struct hci_cp_disconn_logical_link cp;
-
- if (hcon->state != BT_CONNECTED) {
- BT_DBG("hchan %p not connected", hchan);
- return;
- }
-
- cp.log_handle = cpu_to_le16(hchan->handle);
- hci_send_cmd(hcon->hdev, HCI_OP_DISCONN_LOGICAL_LINK, sizeof(cp), &cp);
-}
-
-void amp_destroy_logical_link(struct hci_chan *hchan, u8 reason)
-{
- BT_DBG("hchan %p", hchan);
-
- hci_chan_del(hchan);
-}
diff --git a/net/bluetooth/amp.h b/net/bluetooth/amp.h
deleted file mode 100644
index 97c87abd129f..000000000000
--- a/net/bluetooth/amp.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- Copyright (c) 2011,2012 Intel Corp.
-
-*/
-
-#ifndef __AMP_H
-#define __AMP_H
-
-struct amp_ctrl {
- struct list_head list;
- struct kref kref;
- __u8 id;
- __u16 assoc_len_so_far;
- __u16 assoc_rem_len;
- __u16 assoc_len;
- __u8 *assoc;
-};
-
-int amp_ctrl_put(struct amp_ctrl *ctrl);
-void amp_ctrl_get(struct amp_ctrl *ctrl);
-struct amp_ctrl *amp_ctrl_add(struct amp_mgr *mgr, u8 id);
-struct amp_ctrl *amp_ctrl_lookup(struct amp_mgr *mgr, u8 id);
-void amp_ctrl_list_flush(struct amp_mgr *mgr);
-
-struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
- u8 remote_id, bool out);
-
-int phylink_gen_key(struct hci_conn *hcon, u8 *data, u8 *len, u8 *type);
-
-void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle);
-void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr);
-void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
- struct hci_conn *hcon);
-void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
- struct hci_conn *hcon);
-void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
- struct hci_conn *hcon);
-
-#if IS_ENABLED(CONFIG_BT_HS)
-void amp_create_logical_link(struct l2cap_chan *chan);
-void amp_disconnect_logical_link(struct hci_chan *hchan);
-#else
-static inline void amp_create_logical_link(struct l2cap_chan *chan)
-{
-}
-
-static inline void amp_disconnect_logical_link(struct hci_chan *hchan)
-{
-}
-#endif
-
-void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle);
-void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle);
-void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon);
-void amp_create_logical_link(struct l2cap_chan *chan);
-void amp_disconnect_logical_link(struct hci_chan *hchan);
-void amp_destroy_logical_link(struct hci_chan *hchan, u8 reason);
-
-#endif /* __AMP_H */
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 5a6a49885ab6..d44987d4515c 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -29,7 +29,7 @@
#include <linux/kthread.h>
#include <linux/file.h>
#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/l2cap.h>
@@ -385,7 +385,8 @@ static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
case BNEP_COMPRESSED_DST_ONLY:
__skb_put_data(nskb, skb_mac_header(skb), ETH_ALEN);
- __skb_put_data(nskb, s->eh.h_source, ETH_ALEN + 2);
+ __skb_put_data(nskb, s->eh.h_source, ETH_ALEN);
+ put_unaligned(s->eh.h_proto, (__be16 *)__skb_put(nskb, 2));
break;
case BNEP_GENERAL:
@@ -549,7 +550,7 @@ static struct device *bnep_get_device(struct bnep_session *session)
return &conn->hcon->dev;
}
-static struct device_type bnep_type = {
+static const struct device_type bnep_type = {
.name = "bluetooth",
};
@@ -744,8 +745,7 @@ static int __init bnep_init(void)
if (flt[0])
BT_INFO("BNEP filters: %s", flt);
- bnep_sock_init();
- return 0;
+ return bnep_sock_init();
}
static void __exit bnep_exit(void)
diff --git a/net/bluetooth/cmtp/Kconfig b/net/bluetooth/cmtp/Kconfig
index c8337786da6b..34e923466236 100644
--- a/net/bluetooth/cmtp/Kconfig
+++ b/net/bluetooth/cmtp/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config BT_CMTP
- tristate "CMTP protocol support"
- depends on BT_BREDR && ISDN_CAPI
+ tristate "CMTP protocol support (DEPRECATED)"
+ depends on BT_BREDR && ISDN_CAPI && DEPRECATED
help
CMTP (CAPI Message Transport Protocol) is a transport layer
for CAPI messages. CMTP is required for the Bluetooth Common
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index f3bedc3b613a..884703fda979 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -248,18 +248,10 @@ static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *s
break;
case CAPI_FUNCTION_GET_MANUFACTURER:
- if (skb->len < CAPI_MSG_BASELEN + 15)
- break;
-
- if (!info && ctrl) {
- int len = min_t(uint, CAPI_MANUFACTURER_LEN,
- skb->data[CAPI_MSG_BASELEN + 14]);
-
- memset(ctrl->manu, 0, CAPI_MANUFACTURER_LEN);
- strncpy(ctrl->manu,
- skb->data + CAPI_MSG_BASELEN + 15, len);
- }
-
+ if (!info && ctrl && skb->len > CAPI_MSG_BASELEN + 14)
+ strscpy_pad(ctrl->manu,
+ skb->data + CAPI_MSG_BASELEN + 15,
+ skb->data[CAPI_MSG_BASELEN + 14]);
break;
case CAPI_FUNCTION_GET_VERSION:
@@ -276,18 +268,10 @@ static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *s
break;
case CAPI_FUNCTION_GET_SERIAL_NUMBER:
- if (skb->len < CAPI_MSG_BASELEN + 17)
- break;
-
- if (!info && ctrl) {
- int len = min_t(uint, CAPI_SERIAL_LEN,
- skb->data[CAPI_MSG_BASELEN + 16]);
-
- memset(ctrl->serial, 0, CAPI_SERIAL_LEN);
- strncpy(ctrl->serial,
- skb->data + CAPI_MSG_BASELEN + 17, len);
- }
-
+ if (!info && ctrl && skb->len > CAPI_MSG_BASELEN + 16)
+ strscpy_pad(ctrl->serial,
+ skb->data + CAPI_MSG_BASELEN + 17,
+ skb->data[CAPI_MSG_BASELEN + 16]);
break;
}
diff --git a/net/bluetooth/coredump.c b/net/bluetooth/coredump.c
index ec97a4bab1c9..c18df3a08607 100644
--- a/net/bluetooth/coredump.c
+++ b/net/bluetooth/coredump.c
@@ -5,7 +5,7 @@
#include <linux/devcoredump.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
diff --git a/net/bluetooth/eir.c b/net/bluetooth/eir.c
index 9214189279e8..1bc51e2b05a3 100644
--- a/net/bluetooth/eir.c
+++ b/net/bluetooth/eir.c
@@ -13,48 +13,33 @@
#define PNP_INFO_SVCLASS_ID 0x1200
-static u8 eir_append_name(u8 *eir, u16 eir_len, u8 type, u8 *data, u8 data_len)
-{
- u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
-
- /* If data is already NULL terminated just pass it directly */
- if (data[data_len - 1] == '\0')
- return eir_append_data(eir, eir_len, type, data, data_len);
-
- memcpy(name, data, HCI_MAX_SHORT_NAME_LENGTH);
- name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
-
- return eir_append_data(eir, eir_len, type, name, sizeof(name));
-}
-
u8 eir_append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
{
size_t short_len;
size_t complete_len;
- /* no space left for name (+ NULL + type + len) */
- if ((max_adv_len(hdev) - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
+ /* no space left for name (+ type + len) */
+ if ((max_adv_len(hdev) - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 2)
return ad_len;
/* use complete name if present and fits */
complete_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
- return eir_append_name(ptr, ad_len, EIR_NAME_COMPLETE,
- hdev->dev_name, complete_len + 1);
+ return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
+ hdev->dev_name, complete_len);
/* use short name if present */
short_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
if (short_len)
- return eir_append_name(ptr, ad_len, EIR_NAME_SHORT,
+ return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
hdev->short_name,
- short_len == HCI_MAX_SHORT_NAME_LENGTH ?
- short_len : short_len + 1);
+ short_len);
/* use shortened full name if present, we already know that name
* is longer then HCI_MAX_SHORT_NAME_LENGTH
*/
if (complete_len)
- return eir_append_name(ptr, ad_len, EIR_NAME_SHORT,
+ return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
hdev->dev_name,
HCI_MAX_SHORT_NAME_LENGTH);
diff --git a/net/bluetooth/eir.h b/net/bluetooth/eir.h
index 0df19f2f4af9..5c89a05e8b29 100644
--- a/net/bluetooth/eir.h
+++ b/net/bluetooth/eir.h
@@ -5,7 +5,7 @@
* Copyright (C) 2021 Intel Corporation
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
void eir_create(struct hci_dev *hdev, u8 *data);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index a41d2693f4d8..d097e308a755 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -1,7 +1,7 @@
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
- Copyright 2023 NXP
+ Copyright 2023-2024 NXP
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -34,9 +34,7 @@
#include <net/bluetooth/iso.h>
#include <net/bluetooth/mgmt.h>
-#include "hci_request.h"
#include "smp.h"
-#include "a2mp.h"
#include "eir.h"
struct sco_param {
@@ -69,7 +67,7 @@ static const struct sco_param esco_param_msbc[] = {
};
/* This function requires the caller holds hdev->lock */
-static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
+void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
{
struct hci_conn_params *params;
struct hci_dev *hdev = conn->hdev;
@@ -108,8 +106,7 @@ static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
* where a timeout + cancel does indicate an actual failure.
*/
if (status && status != HCI_ERROR_UNKNOWN_CONN_ID)
- mgmt_connect_failed(hdev, &conn->dst, conn->type,
- conn->dst_type, status);
+ mgmt_connect_failed(hdev, conn, status);
/* The connection attempt was doing scan for new RPA, and is
* in scan phase. If params are not associated with any other
@@ -179,64 +176,6 @@ static void hci_conn_cleanup(struct hci_conn *conn)
hci_dev_put(hdev);
}
-static void hci_acl_create_connection(struct hci_conn *conn)
-{
- struct hci_dev *hdev = conn->hdev;
- struct inquiry_entry *ie;
- struct hci_cp_create_conn cp;
-
- BT_DBG("hcon %p", conn);
-
- /* Many controllers disallow HCI Create Connection while it is doing
- * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
- * Connection. This may cause the MGMT discovering state to become false
- * without user space's request but it is okay since the MGMT Discovery
- * APIs do not promise that discovery should be done forever. Instead,
- * the user space monitors the status of MGMT discovering and it may
- * request for discovery again when this flag becomes false.
- */
- if (test_bit(HCI_INQUIRY, &hdev->flags)) {
- /* Put this connection to "pending" state so that it will be
- * executed after the inquiry cancel command complete event.
- */
- conn->state = BT_CONNECT2;
- hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
- return;
- }
-
- conn->state = BT_CONNECT;
- conn->out = true;
- conn->role = HCI_ROLE_MASTER;
-
- conn->attempt++;
-
- conn->link_policy = hdev->link_policy;
-
- memset(&cp, 0, sizeof(cp));
- bacpy(&cp.bdaddr, &conn->dst);
- cp.pscan_rep_mode = 0x02;
-
- ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
- if (ie) {
- if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
- cp.pscan_rep_mode = ie->data.pscan_rep_mode;
- cp.pscan_mode = ie->data.pscan_mode;
- cp.clock_offset = ie->data.clock_offset |
- cpu_to_le16(0x8000);
- }
-
- memcpy(conn->dev_class, ie->data.dev_class, 3);
- }
-
- cp.pkt_type = cpu_to_le16(conn->pkt_type);
- if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
- cp.role_switch = 0x01;
- else
- cp.role_switch = 0x00;
-
- hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
-}
-
int hci_disconnect(struct hci_conn *conn, __u8 reason)
{
BT_DBG("hcon %p", conn);
@@ -300,13 +239,13 @@ static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec)
__u8 vnd_len, *vnd_data = NULL;
struct hci_op_configure_data_path *cmd = NULL;
+ /* Do not take below 2 checks as error since the 1st means user do not
+ * want to use HFP offload mode and the 2nd means the vendor controller
+ * do not need to send below HCI command for offload mode.
+ */
if (!codec->data_path || !hdev->get_codec_config_data)
return 0;
- /* Do not take me as error */
- if (!hdev->get_codec_config_data)
- return 0;
-
err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
&vnd_data);
if (err < 0)
@@ -350,6 +289,9 @@ static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
kfree(conn_handle);
+ if (!hci_conn_valid(hdev, conn))
+ return -ECANCELED;
+
bt_dev_dbg(hdev, "hcon %p", conn);
configure_datapath_sync(hdev, &conn->codec);
@@ -723,11 +665,6 @@ static void le_conn_timeout(struct work_struct *work)
hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
}
-struct iso_cig_params {
- struct hci_cp_le_set_cig_params cp;
- struct hci_cis_params cis[0x1f];
-};
-
struct iso_list_data {
union {
u8 cig;
@@ -843,7 +780,6 @@ static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *c
if (!d)
return -ENOMEM;
- memset(d, 0, sizeof(*d));
d->big = big;
d->sync_handle = conn->sync_handle;
@@ -963,16 +899,42 @@ static int hci_conn_hash_alloc_unset(struct hci_dev *hdev)
U16_MAX, GFP_ATOMIC);
}
-struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
- u8 role, u16 handle)
+static struct hci_conn *__hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ u8 role, u16 handle)
{
struct hci_conn *conn;
+ switch (type) {
+ case ACL_LINK:
+ if (!hdev->acl_mtu)
+ return ERR_PTR(-ECONNREFUSED);
+ break;
+ case ISO_LINK:
+ if (hdev->iso_mtu)
+ /* Dedicated ISO Buffer exists */
+ break;
+ fallthrough;
+ case LE_LINK:
+ if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
+ return ERR_PTR(-ECONNREFUSED);
+ if (!hdev->le_mtu && hdev->acl_mtu < HCI_MIN_LE_MTU)
+ return ERR_PTR(-ECONNREFUSED);
+ break;
+ case SCO_LINK:
+ case ESCO_LINK:
+ if (!hdev->sco_pkts)
+ /* Controller does not support SCO or eSCO over HCI */
+ return ERR_PTR(-ECONNREFUSED);
+ break;
+ default:
+ return ERR_PTR(-ECONNREFUSED);
+ }
+
bt_dev_dbg(hdev, "dst %pMR handle 0x%4.4x", dst, handle);
conn = kzalloc(sizeof(*conn), GFP_KERNEL);
if (!conn)
- return NULL;
+ return ERR_PTR(-ENOMEM);
bacpy(&conn->dst, dst);
bacpy(&conn->src, &hdev->bdaddr);
@@ -990,6 +952,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
conn->tx_power = HCI_TX_POWER_INVALID;
conn->max_tx_power = HCI_TX_POWER_INVALID;
conn->sync_handle = HCI_SYNC_HANDLE_INVALID;
+ conn->sid = HCI_SID_INVALID;
set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
@@ -1003,10 +966,12 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
switch (type) {
case ACL_LINK:
conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
+ conn->mtu = hdev->acl_mtu;
break;
case LE_LINK:
/* conn->src should reflect the local identity address */
hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
+ conn->mtu = hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu;
break;
case ISO_LINK:
/* conn->src should reflect the local identity address */
@@ -1018,6 +983,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
else if (conn->role == HCI_ROLE_MASTER)
conn->cleanup = cis_cleanup;
+ conn->mtu = hdev->iso_mtu ? hdev->iso_mtu :
+ hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu;
break;
case SCO_LINK:
if (lmp_esco_capable(hdev))
@@ -1025,9 +992,12 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
(hdev->esco_type & EDR_ESCO_MASK);
else
conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
+
+ conn->mtu = hdev->sco_mtu;
break;
case ESCO_LINK:
conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
+ conn->mtu = hdev->sco_mtu;
break;
}
@@ -1070,9 +1040,18 @@ struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
handle = hci_conn_hash_alloc_unset(hdev);
if (unlikely(handle < 0))
- return NULL;
+ return ERR_PTR(-ECONNREFUSED);
+
+ return __hci_conn_add(hdev, type, dst, role, handle);
+}
+
+struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ u8 role, u16 handle)
+{
+ if (handle > HCI_CONN_HANDLE_MAX)
+ return ERR_PTR(-EINVAL);
- return hci_conn_add(hdev, type, dst, role, handle);
+ return __hci_conn_add(hdev, type, dst, role, handle);
}
static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason)
@@ -1149,9 +1128,9 @@ void hci_conn_del(struct hci_conn *conn)
hci_conn_unlink(conn);
- cancel_delayed_work_sync(&conn->disc_work);
- cancel_delayed_work_sync(&conn->auto_accept_work);
- cancel_delayed_work_sync(&conn->idle_work);
+ disable_delayed_work_sync(&conn->disc_work);
+ disable_delayed_work_sync(&conn->auto_accept_work);
+ disable_delayed_work_sync(&conn->idle_work);
if (conn->type == ACL_LINK) {
/* Unacked frames */
@@ -1175,9 +1154,6 @@ void hci_conn_del(struct hci_conn *conn)
}
}
- if (conn->amp_mgr)
- amp_mgr_put(conn->amp_mgr);
-
skb_queue_purge(&conn->data_q);
/* Remove the connection from the list and cleanup its remaining
@@ -1186,6 +1162,9 @@ void hci_conn_del(struct hci_conn *conn)
* rest of hci_conn_del.
*/
hci_conn_cleanup(conn);
+
+ /* Dequeue callbacks using connection pointer as data */
+ hci_cmd_sync_dequeue(hdev, NULL, conn, NULL);
}
struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
@@ -1199,8 +1178,7 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
list_for_each_entry(d, &hci_dev_list, list) {
if (!test_bit(HCI_UP, &d->flags) ||
- hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
- d->dev_type != HCI_PRIMARY)
+ hci_dev_test_flag(d, HCI_USER_CHANNEL))
continue;
/* Simple routing:
@@ -1274,8 +1252,7 @@ void hci_conn_failed(struct hci_conn *conn, u8 status)
hci_le_conn_failed(conn, status);
break;
case ACL_LINK:
- mgmt_connect_failed(hdev, &conn->dst, conn->type,
- conn->dst_type, status);
+ mgmt_connect_failed(hdev, conn, status);
break;
}
@@ -1320,56 +1297,9 @@ u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle)
return 0;
}
-static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
-{
- struct hci_conn *conn;
- u16 handle = PTR_UINT(data);
-
- conn = hci_conn_hash_lookup_handle(hdev, handle);
- if (!conn)
- return;
-
- bt_dev_dbg(hdev, "err %d", err);
-
- hci_dev_lock(hdev);
-
- if (!err) {
- hci_connect_le_scan_cleanup(conn, 0x00);
- goto done;
- }
-
- /* Check if connection is still pending */
- if (conn != hci_lookup_le_connect(hdev))
- goto done;
-
- /* Flush to make sure we send create conn cancel command if needed */
- flush_delayed_work(&conn->le_conn_timeout);
- hci_conn_failed(conn, bt_status(err));
-
-done:
- hci_dev_unlock(hdev);
-}
-
-static int hci_connect_le_sync(struct hci_dev *hdev, void *data)
-{
- struct hci_conn *conn;
- u16 handle = PTR_UINT(data);
-
- conn = hci_conn_hash_lookup_handle(hdev, handle);
- if (!conn)
- return 0;
-
- bt_dev_dbg(hdev, "conn %p", conn);
-
- clear_bit(HCI_CONN_SCANNING, &conn->flags);
- conn->state = BT_CONNECT;
-
- return hci_le_create_conn_sync(hdev, conn);
-}
-
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, bool dst_resolved, u8 sec_level,
- u16 conn_timeout, u8 role)
+ u16 conn_timeout, u8 role, u8 phy, u8 sec_phy)
{
struct hci_conn *conn;
struct smp_irk *irk;
@@ -1423,8 +1353,8 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
bacpy(&conn->dst, dst);
} else {
conn = hci_conn_add_unset(hdev, LE_LINK, dst, role);
- if (!conn)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(conn))
+ return conn;
hci_conn_hold(conn);
conn->pending_sec_level = sec_level;
}
@@ -1432,10 +1362,10 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
conn->dst_type = dst_type;
conn->sec_level = BT_SECURITY_LOW;
conn->conn_timeout = conn_timeout;
+ conn->le_adv_phy = phy;
+ conn->le_adv_sec_phy = sec_phy;
- err = hci_cmd_sync_queue(hdev, hci_connect_le_sync,
- UINT_PTR(conn->handle),
- create_le_conn_complete);
+ err = hci_connect_le_sync(hdev, conn);
if (err) {
hci_conn_del(conn);
return ERR_PTR(err);
@@ -1600,8 +1530,8 @@ static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
return ERR_PTR(-EADDRINUSE);
conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
- if (!conn)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(conn))
+ return conn;
conn->state = BT_CONNECT;
@@ -1644,8 +1574,8 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
BT_DBG("requesting refresh of dst_addr");
conn = hci_conn_add_unset(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
- if (!conn)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(conn))
+ return conn;
if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
hci_conn_del(conn);
@@ -1669,7 +1599,7 @@ done:
struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
u8 sec_level, u8 auth_type,
- enum conn_reasons conn_reason)
+ enum conn_reasons conn_reason, u16 timeout)
{
struct hci_conn *acl;
@@ -1692,18 +1622,26 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
if (!acl) {
acl = hci_conn_add_unset(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
- if (!acl)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(acl))
+ return acl;
}
hci_conn_hold(acl);
acl->conn_reason = conn_reason;
if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
+ int err;
+
acl->sec_level = BT_SECURITY_LOW;
acl->pending_sec_level = sec_level;
acl->auth_type = auth_type;
- hci_acl_create_connection(acl);
+ acl->conn_timeout = timeout;
+
+ err = hci_connect_acl_sync(hdev, acl);
+ if (err) {
+ hci_conn_del(acl);
+ return ERR_PTR(err);
+ }
}
return acl;
@@ -1738,23 +1676,24 @@ static struct hci_link *hci_conn_link(struct hci_conn *parent,
}
struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
- __u16 setting, struct bt_codec *codec)
+ __u16 setting, struct bt_codec *codec,
+ u16 timeout)
{
struct hci_conn *acl;
struct hci_conn *sco;
struct hci_link *link;
acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
- CONN_REASON_SCO_CONNECT);
+ CONN_REASON_SCO_CONNECT, timeout);
if (IS_ERR(acl))
return acl;
sco = hci_conn_hash_lookup_ba(hdev, type, dst);
if (!sco) {
sco = hci_conn_add_unset(hdev, type, dst, HCI_ROLE_MASTER);
- if (!sco) {
+ if (IS_ERR(sco)) {
hci_conn_drop(acl);
- return ERR_PTR(-ENOMEM);
+ return sco;
}
}
@@ -1819,34 +1758,33 @@ static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
static int set_cig_params_sync(struct hci_dev *hdev, void *data)
{
+ DEFINE_FLEX(struct hci_cp_le_set_cig_params, pdu, cis, num_cis, 0x1f);
u8 cig_id = PTR_UINT(data);
struct hci_conn *conn;
struct bt_iso_qos *qos;
- struct iso_cig_params pdu;
+ u8 aux_num_cis = 0;
u8 cis_id;
conn = hci_conn_hash_lookup_cig(hdev, cig_id);
if (!conn)
return 0;
- memset(&pdu, 0, sizeof(pdu));
-
qos = &conn->iso_qos;
- pdu.cp.cig_id = cig_id;
- hci_cpu_to_le24(qos->ucast.out.interval, pdu.cp.c_interval);
- hci_cpu_to_le24(qos->ucast.in.interval, pdu.cp.p_interval);
- pdu.cp.sca = qos->ucast.sca;
- pdu.cp.packing = qos->ucast.packing;
- pdu.cp.framing = qos->ucast.framing;
- pdu.cp.c_latency = cpu_to_le16(qos->ucast.out.latency);
- pdu.cp.p_latency = cpu_to_le16(qos->ucast.in.latency);
+ pdu->cig_id = cig_id;
+ hci_cpu_to_le24(qos->ucast.out.interval, pdu->c_interval);
+ hci_cpu_to_le24(qos->ucast.in.interval, pdu->p_interval);
+ pdu->sca = qos->ucast.sca;
+ pdu->packing = qos->ucast.packing;
+ pdu->framing = qos->ucast.framing;
+ pdu->c_latency = cpu_to_le16(qos->ucast.out.latency);
+ pdu->p_latency = cpu_to_le16(qos->ucast.in.latency);
/* Reprogram all CIS(s) with the same CIG, valid range are:
* num_cis: 0x00 to 0x1F
* cis_id: 0x00 to 0xEF
*/
for (cis_id = 0x00; cis_id < 0xf0 &&
- pdu.cp.num_cis < ARRAY_SIZE(pdu.cis); cis_id++) {
+ aux_num_cis < pdu->num_cis; cis_id++) {
struct hci_cis_params *cis;
conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, cig_id, cis_id);
@@ -1855,7 +1793,7 @@ static int set_cig_params_sync(struct hci_dev *hdev, void *data)
qos = &conn->iso_qos;
- cis = &pdu.cis[pdu.cp.num_cis++];
+ cis = &pdu->cis[aux_num_cis++];
cis->cis_id = cis_id;
cis->c_sdu = cpu_to_le16(conn->iso_qos.ucast.out.sdu);
cis->p_sdu = cpu_to_le16(conn->iso_qos.ucast.in.sdu);
@@ -1866,14 +1804,14 @@ static int set_cig_params_sync(struct hci_dev *hdev, void *data)
cis->c_rtn = qos->ucast.out.rtn;
cis->p_rtn = qos->ucast.in.rtn;
}
+ pdu->num_cis = aux_num_cis;
- if (!pdu.cp.num_cis)
+ if (!pdu->num_cis)
return 0;
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_CIG_PARAMS,
- sizeof(pdu.cp) +
- pdu.cp.num_cis * sizeof(pdu.cis[0]), &pdu,
- HCI_CMD_TIMEOUT);
+ struct_size(pdu, cis, pdu->num_cis),
+ pdu, HCI_CMD_TIMEOUT);
}
static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
@@ -1944,8 +1882,8 @@ struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
qos->ucast.cis);
if (!cis) {
cis = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
- if (!cis)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(cis))
+ return cis;
cis->cleanup = cis_cleanup;
cis->dst_type = dst_type;
cis->iso_qos.ucast.cig = BT_ISO_QOS_CIG_UNSET;
@@ -2080,14 +2018,8 @@ static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
struct bt_iso_io_qos *qos, __u8 phy)
{
/* Only set MTU if PHY is enabled */
- if (!qos->sdu && qos->phy) {
- if (hdev->iso_mtu > 0)
- qos->sdu = hdev->iso_mtu;
- else if (hdev->le_mtu > 0)
- qos->sdu = hdev->le_mtu;
- else
- qos->sdu = hdev->acl_mtu;
- }
+ if (!qos->sdu && qos->phy)
+ qos->sdu = conn->mtu;
/* Use the same PHY as ACL if set to any */
if (qos->phy == BT_ISO_PHY_ANY)
@@ -2131,89 +2063,217 @@ static int create_big_sync(struct hci_dev *hdev, void *data)
static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
{
- struct hci_cp_le_pa_create_sync *cp = data;
-
bt_dev_dbg(hdev, "");
if (err)
bt_dev_err(hdev, "Unable to create PA: %d", err);
+}
+
+static bool hci_conn_check_create_pa_sync(struct hci_conn *conn)
+{
+ if (conn->type != ISO_LINK || conn->sid == HCI_SID_INVALID)
+ return false;
- kfree(cp);
+ return true;
}
static int create_pa_sync(struct hci_dev *hdev, void *data)
{
- struct hci_cp_le_pa_create_sync *cp = data;
- int err;
+ struct hci_cp_le_pa_create_sync cp = {0};
+ struct hci_conn *conn;
+ int err = 0;
- err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
- sizeof(*cp), cp, HCI_CMD_TIMEOUT);
- if (err) {
- hci_dev_clear_flag(hdev, HCI_PA_SYNC);
- return err;
+ hci_dev_lock(hdev);
+
+ rcu_read_lock();
+
+ /* The spec allows only one pending LE Periodic Advertising Create
+ * Sync command at a time. If the command is pending now, don't do
+ * anything. We check for pending connections after each PA Sync
+ * Established event.
+ *
+ * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
+ * page 2493:
+ *
+ * If the Host issues this command when another HCI_LE_Periodic_
+ * Advertising_Create_Sync command is pending, the Controller shall
+ * return the error code Command Disallowed (0x0C).
+ */
+ list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
+ if (test_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags))
+ goto unlock;
+ }
+
+ list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
+ if (hci_conn_check_create_pa_sync(conn)) {
+ struct bt_iso_qos *qos = &conn->iso_qos;
+
+ cp.options = qos->bcast.options;
+ cp.sid = conn->sid;
+ cp.addr_type = conn->dst_type;
+ bacpy(&cp.addr, &conn->dst);
+ cp.skip = cpu_to_le16(qos->bcast.skip);
+ cp.sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
+ cp.sync_cte_type = qos->bcast.sync_cte_type;
+
+ break;
+ }
+ }
+
+unlock:
+ rcu_read_unlock();
+
+ hci_dev_unlock(hdev);
+
+ if (bacmp(&cp.addr, BDADDR_ANY)) {
+ hci_dev_set_flag(hdev, HCI_PA_SYNC);
+ set_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
+
+ err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+ if (!err)
+ err = hci_update_passive_scan_sync(hdev);
+
+ if (err) {
+ hci_dev_clear_flag(hdev, HCI_PA_SYNC);
+ clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
+ }
}
- return hci_update_passive_scan_sync(hdev);
+ return err;
}
-int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type,
- __u8 sid, struct bt_iso_qos *qos)
+int hci_pa_create_sync_pending(struct hci_dev *hdev)
{
- struct hci_cp_le_pa_create_sync *cp;
+ /* Queue start pa_create_sync and scan */
+ return hci_cmd_sync_queue(hdev, create_pa_sync,
+ NULL, create_pa_complete);
+}
- if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
- return -EBUSY;
+struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
+ __u8 dst_type, __u8 sid,
+ struct bt_iso_qos *qos)
+{
+ struct hci_conn *conn;
- cp = kzalloc(sizeof(*cp), GFP_KERNEL);
- if (!cp) {
- hci_dev_clear_flag(hdev, HCI_PA_SYNC);
- return -ENOMEM;
+ conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_SLAVE);
+ if (IS_ERR(conn))
+ return conn;
+
+ conn->iso_qos = *qos;
+ conn->dst_type = dst_type;
+ conn->sid = sid;
+ conn->state = BT_LISTEN;
+
+ hci_conn_hold(conn);
+
+ hci_pa_create_sync_pending(hdev);
+
+ return conn;
+}
+
+static bool hci_conn_check_create_big_sync(struct hci_conn *conn)
+{
+ if (!conn->num_bis)
+ return false;
+
+ return true;
+}
+
+static void big_create_sync_complete(struct hci_dev *hdev, void *data, int err)
+{
+ bt_dev_dbg(hdev, "");
+
+ if (err)
+ bt_dev_err(hdev, "Unable to create BIG sync: %d", err);
+}
+
+static int big_create_sync(struct hci_dev *hdev, void *data)
+{
+ DEFINE_FLEX(struct hci_cp_le_big_create_sync, pdu, bis, num_bis, 0x11);
+ struct hci_conn *conn;
+
+ rcu_read_lock();
+
+ pdu->num_bis = 0;
+
+ /* The spec allows only one pending LE BIG Create Sync command at
+ * a time. If the command is pending now, don't do anything. We
+ * check for pending connections after each BIG Sync Established
+ * event.
+ *
+ * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
+ * page 2586:
+ *
+ * If the Host sends this command when the Controller is in the
+ * process of synchronizing to any BIG, i.e. the HCI_LE_BIG_Sync_
+ * Established event has not been generated, the Controller shall
+ * return the error code Command Disallowed (0x0C).
+ */
+ list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
+ if (test_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags))
+ goto unlock;
}
- cp->options = qos->bcast.options;
- cp->sid = sid;
- cp->addr_type = dst_type;
- bacpy(&cp->addr, dst);
- cp->skip = cpu_to_le16(qos->bcast.skip);
- cp->sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
- cp->sync_cte_type = qos->bcast.sync_cte_type;
+ list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
+ if (hci_conn_check_create_big_sync(conn)) {
+ struct bt_iso_qos *qos = &conn->iso_qos;
- /* Queue start pa_create_sync and scan */
- return hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete);
+ set_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags);
+
+ pdu->handle = qos->bcast.big;
+ pdu->sync_handle = cpu_to_le16(conn->sync_handle);
+ pdu->encryption = qos->bcast.encryption;
+ memcpy(pdu->bcode, qos->bcast.bcode,
+ sizeof(pdu->bcode));
+ pdu->mse = qos->bcast.mse;
+ pdu->timeout = cpu_to_le16(qos->bcast.timeout);
+ pdu->num_bis = conn->num_bis;
+ memcpy(pdu->bis, conn->bis, conn->num_bis);
+
+ break;
+ }
+ }
+
+unlock:
+ rcu_read_unlock();
+
+ if (!pdu->num_bis)
+ return 0;
+
+ return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
+ struct_size(pdu, bis, pdu->num_bis), pdu);
+}
+
+int hci_le_big_create_sync_pending(struct hci_dev *hdev)
+{
+ /* Queue big_create_sync */
+ return hci_cmd_sync_queue_once(hdev, big_create_sync,
+ NULL, big_create_sync_complete);
}
int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
struct bt_iso_qos *qos,
__u16 sync_handle, __u8 num_bis, __u8 bis[])
{
- struct _packed {
- struct hci_cp_le_big_create_sync cp;
- __u8 bis[0x11];
- } pdu;
int err;
- if (num_bis < 0x01 || num_bis > sizeof(pdu.bis))
+ if (num_bis < 0x01 || num_bis > ISO_MAX_NUM_BIS)
return -EINVAL;
err = qos_set_big(hdev, qos);
if (err)
return err;
- if (hcon)
- hcon->iso_qos.bcast.big = qos->bcast.big;
+ if (hcon) {
+ /* Update hcon QoS */
+ hcon->iso_qos = *qos;
- memset(&pdu, 0, sizeof(pdu));
- pdu.cp.handle = qos->bcast.big;
- pdu.cp.sync_handle = cpu_to_le16(sync_handle);
- pdu.cp.encryption = qos->bcast.encryption;
- memcpy(pdu.cp.bcode, qos->bcast.bcode, sizeof(pdu.cp.bcode));
- pdu.cp.mse = qos->bcast.mse;
- pdu.cp.timeout = cpu_to_le16(qos->bcast.timeout);
- pdu.cp.num_bis = num_bis;
- memcpy(pdu.bis, bis, num_bis);
+ hcon->num_bis = num_bis;
+ memcpy(hcon->bis, bis, num_bis);
+ }
- return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
- sizeof(pdu.cp) + num_bis, &pdu);
+ return hci_le_big_create_sync_pending(hdev);
}
static void create_big_complete(struct hci_dev *hdev, void *data, int err)
@@ -2277,13 +2337,9 @@ struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
conn->iso_qos.bcast.big);
if (parent && parent != conn) {
link = hci_conn_link(parent, conn);
- if (!link) {
- hci_conn_drop(conn);
- return ERR_PTR(-ENOLINK);
- }
-
- /* Link takes the refcount */
hci_conn_drop(conn);
+ if (!link)
+ return ERR_PTR(-ENOLINK);
}
return conn;
@@ -2352,7 +2408,7 @@ struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
le = hci_connect_le(hdev, dst, dst_type, false,
BT_SECURITY_LOW,
HCI_LE_CONN_TIMEOUT,
- HCI_ROLE_SLAVE);
+ HCI_ROLE_SLAVE, 0, 0);
else
le = hci_connect_le_scan(hdev, dst, dst_type,
BT_SECURITY_LOW,
@@ -2373,15 +2429,12 @@ struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
}
link = hci_conn_link(le, cis);
+ hci_conn_drop(cis);
if (!link) {
hci_conn_drop(le);
- hci_conn_drop(cis);
return ERR_PTR(-ENOLINK);
}
- /* Link takes the refcount */
- hci_conn_drop(cis);
-
cis->state = BT_CONNECT;
hci_le_create_cis_pending(hdev);
@@ -2647,22 +2700,6 @@ void hci_conn_hash_flush(struct hci_dev *hdev)
}
}
-/* Check pending connect attempts */
-void hci_conn_check_pending(struct hci_dev *hdev)
-{
- struct hci_conn *conn;
-
- BT_DBG("hdev %s", hdev->name);
-
- hci_dev_lock(hdev);
-
- conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
- if (conn)
- hci_acl_create_connection(conn);
-
- hci_dev_unlock(hdev);
-}
-
static u32 get_link_mode(struct hci_conn *conn)
{
u32 link_mode = 0;
@@ -2978,12 +3015,10 @@ u32 hci_conn_get_phy(struct hci_conn *conn)
static int abort_conn_sync(struct hci_dev *hdev, void *data)
{
- struct hci_conn *conn;
- u16 handle = PTR_UINT(data);
+ struct hci_conn *conn = data;
- conn = hci_conn_hash_lookup_handle(hdev, handle);
- if (!conn)
- return 0;
+ if (!hci_conn_valid(hdev, conn))
+ return -ECANCELED;
return hci_abort_conn_sync(hdev, conn, conn->abort_reason);
}
@@ -3011,14 +3046,21 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason)
*/
if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) {
switch (hci_skb_event(hdev->sent_cmd)) {
+ case HCI_EV_CONN_COMPLETE:
case HCI_EV_LE_CONN_COMPLETE:
case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
case HCI_EVT_LE_CIS_ESTABLISHED:
- hci_cmd_sync_cancel(hdev, -ECANCELED);
+ hci_cmd_sync_cancel(hdev, ECANCELED);
break;
}
+ /* Cancel connect attempt if still queued/pending */
+ } else if (!hci_cancel_connect_sync(hdev, conn)) {
+ return 0;
}
- return hci_cmd_sync_queue(hdev, abort_conn_sync, UINT_PTR(conn->handle),
- NULL);
+ /* Run immediately if on cmd_sync_work since this may be called
+ * as a result to MGMT_OP_DISCONNECT/MGMT_OP_UNPAIR which does
+ * already queue its callback on cmd_sync_work.
+ */
+ return hci_cmd_sync_run_once(hdev, abort_conn_sync, conn, NULL);
}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 2821a42cefdc..012fc107901a 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -33,14 +33,13 @@
#include <linux/property.h>
#include <linux/suspend.h>
#include <linux/wait.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/mgmt.h>
-#include "hci_request.h"
#include "hci_debugfs.h"
#include "smp.h"
#include "leds.h"
@@ -63,50 +62,6 @@ DEFINE_MUTEX(hci_cb_list_lock);
/* HCI ID Numbering */
static DEFINE_IDA(hci_index_ida);
-static int hci_scan_req(struct hci_request *req, unsigned long opt)
-{
- __u8 scan = opt;
-
- BT_DBG("%s %x", req->hdev->name, scan);
-
- /* Inquiry and Page scans */
- hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
- return 0;
-}
-
-static int hci_auth_req(struct hci_request *req, unsigned long opt)
-{
- __u8 auth = opt;
-
- BT_DBG("%s %x", req->hdev->name, auth);
-
- /* Authentication */
- hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
- return 0;
-}
-
-static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
-{
- __u8 encrypt = opt;
-
- BT_DBG("%s %x", req->hdev->name, encrypt);
-
- /* Encryption */
- hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
- return 0;
-}
-
-static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
-{
- __le16 policy = cpu_to_le16(opt);
-
- BT_DBG("%s %x", req->hdev->name, policy);
-
- /* Default link policy */
- hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
- return 0;
-}
-
/* Get HCI device by index.
* Device is held on return. */
struct hci_dev *hci_dev_get(int index)
@@ -149,8 +104,6 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state)
{
int old_state = hdev->discovery.state;
- BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
-
if (old_state == state)
return;
@@ -173,6 +126,8 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state)
case DISCOVERY_STOPPING:
break;
}
+
+ bt_dev_dbg(hdev, "state %u -> %u", old_state, state);
}
void hci_inquiry_cache_flush(struct hci_dev *hdev)
@@ -349,33 +304,12 @@ static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
return copied;
}
-static int hci_inq_req(struct hci_request *req, unsigned long opt)
-{
- struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
- struct hci_dev *hdev = req->hdev;
- struct hci_cp_inquiry cp;
-
- BT_DBG("%s", hdev->name);
-
- if (test_bit(HCI_INQUIRY, &hdev->flags))
- return 0;
-
- /* Start Inquiry */
- memcpy(&cp.lap, &ir->lap, 3);
- cp.length = ir->length;
- cp.num_rsp = ir->num_rsp;
- hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
-
- return 0;
-}
-
int hci_inquiry(void __user *arg)
{
__u8 __user *ptr = arg;
struct hci_inquiry_req ir;
struct hci_dev *hdev;
int err = 0, do_inquiry = 0, max_rsp;
- long timeo;
__u8 *buf;
if (copy_from_user(&ir, ptr, sizeof(ir)))
@@ -395,11 +329,6 @@ int hci_inquiry(void __user *arg)
goto done;
}
- if (hdev->dev_type != HCI_PRIMARY) {
- err = -EOPNOTSUPP;
- goto done;
- }
-
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
err = -EOPNOTSUPP;
goto done;
@@ -419,11 +348,11 @@ int hci_inquiry(void __user *arg)
}
hci_dev_unlock(hdev);
- timeo = ir.length * msecs_to_jiffies(2000);
-
if (do_inquiry) {
- err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
- timeo, NULL);
+ hci_req_sync_lock(hdev);
+ err = hci_inquiry_sync(hdev, ir.length, ir.num_rsp);
+ hci_req_sync_unlock(hdev);
+
if (err < 0)
goto done;
@@ -733,6 +662,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
{
struct hci_dev *hdev;
struct hci_dev_req dr;
+ __le16 policy;
int err = 0;
if (copy_from_user(&dr, arg, sizeof(dr)))
@@ -752,11 +682,6 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
goto done;
}
- if (hdev->dev_type != HCI_PRIMARY) {
- err = -EOPNOTSUPP;
- goto done;
- }
-
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
err = -EOPNOTSUPP;
goto done;
@@ -764,8 +689,8 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
switch (cmd) {
case HCISETAUTH:
- err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
- HCI_INIT_TIMEOUT, NULL);
+ err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
+ 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
break;
case HCISETENCRYPT:
@@ -776,19 +701,21 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
if (!test_bit(HCI_AUTH, &hdev->flags)) {
/* Auth must be enabled first */
- err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
- HCI_INIT_TIMEOUT, NULL);
+ err = hci_cmd_sync_status(hdev,
+ HCI_OP_WRITE_AUTH_ENABLE,
+ 1, &dr.dev_opt,
+ HCI_CMD_TIMEOUT);
if (err)
break;
}
- err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
- HCI_INIT_TIMEOUT, NULL);
+ err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE,
+ 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
break;
case HCISETSCAN:
- err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
- HCI_INIT_TIMEOUT, NULL);
+ err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
+ 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
/* Ensure that the connectable and discoverable states
* get correctly modified as this was a non-mgmt change.
@@ -798,8 +725,10 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
break;
case HCISETLINKPOL:
- err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
- HCI_INIT_TIMEOUT, NULL);
+ policy = cpu_to_le16(dr.dev_opt);
+
+ err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
+ 2, &policy, HCI_CMD_TIMEOUT);
break;
case HCISETLINKMODE:
@@ -840,7 +769,7 @@ int hci_get_dev_list(void __user *arg)
struct hci_dev *hdev;
struct hci_dev_list_req *dl;
struct hci_dev_req *dr;
- int n = 0, size, err;
+ int n = 0, err;
__u16 dev_num;
if (get_user(dev_num, (__u16 __user *) arg))
@@ -849,12 +778,11 @@ int hci_get_dev_list(void __user *arg)
if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
return -EINVAL;
- size = sizeof(*dl) + dev_num * sizeof(*dr);
-
- dl = kzalloc(size, GFP_KERNEL);
+ dl = kzalloc(struct_size(dl, dev_req, dev_num), GFP_KERNEL);
if (!dl)
return -ENOMEM;
+ dl->dev_num = dev_num;
dr = dl->dev_req;
read_lock(&hci_dev_list_lock);
@@ -868,8 +796,8 @@ int hci_get_dev_list(void __user *arg)
if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
flags &= ~BIT(HCI_UP);
- (dr + n)->dev_id = hdev->id;
- (dr + n)->dev_opt = flags;
+ dr[n].dev_id = hdev->id;
+ dr[n].dev_opt = flags;
if (++n >= dev_num)
break;
@@ -877,9 +805,7 @@ int hci_get_dev_list(void __user *arg)
read_unlock(&hci_dev_list_lock);
dl->dev_num = n;
- size = sizeof(*dl) + n * sizeof(*dr);
-
- err = copy_to_user(arg, dl, size);
+ err = copy_to_user(arg, dl, struct_size(dl, dev_req, n));
kfree(dl);
return err ? -EFAULT : 0;
@@ -908,9 +834,9 @@ int hci_get_dev_info(void __user *arg)
else
flags = hdev->flags;
- strcpy(di.name, hdev->name);
+ strscpy(di.name, hdev->name, sizeof(di.name));
di.bdaddr = hdev->bdaddr;
- di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
+ di.type = (hdev->bus & 0x0f);
di.flags = flags;
di.pkt_type = hdev->pkt_type;
if (lmp_bredr_capable(hdev)) {
@@ -940,20 +866,51 @@ int hci_get_dev_info(void __user *arg)
/* ---- Interface to HCI drivers ---- */
+static int hci_dev_do_poweroff(struct hci_dev *hdev)
+{
+ int err;
+
+ BT_DBG("%s %p", hdev->name, hdev);
+
+ hci_req_sync_lock(hdev);
+
+ err = hci_set_powered_sync(hdev, false);
+
+ hci_req_sync_unlock(hdev);
+
+ return err;
+}
+
static int hci_rfkill_set_block(void *data, bool blocked)
{
struct hci_dev *hdev = data;
+ int err;
BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
return -EBUSY;
+ if (blocked == hci_dev_test_flag(hdev, HCI_RFKILLED))
+ return 0;
+
if (blocked) {
hci_dev_set_flag(hdev, HCI_RFKILLED);
+
if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
- !hci_dev_test_flag(hdev, HCI_CONFIG))
- hci_dev_do_close(hdev);
+ !hci_dev_test_flag(hdev, HCI_CONFIG)) {
+ err = hci_dev_do_poweroff(hdev);
+ if (err) {
+ bt_dev_err(hdev, "Error when powering off device on rfkill (%d)",
+ err);
+
+ /* Make sure the device is still closed even if
+ * anything during power off sequence (eg.
+ * disconnecting devices) failed.
+ */
+ hci_dev_do_close(hdev);
+ }
+ }
} else {
hci_dev_clear_flag(hdev, HCI_RFKILLED);
}
@@ -995,8 +952,7 @@ static void hci_power_on(struct work_struct *work)
*/
if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
- (hdev->dev_type == HCI_PRIMARY &&
- !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
+ (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
!bacmp(&hdev->static_addr, BDADDR_ANY))) {
hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
hci_dev_do_close(hdev);
@@ -1491,17 +1447,18 @@ static void hci_cmd_timeout(struct work_struct *work)
struct hci_dev *hdev = container_of(work, struct hci_dev,
cmd_timer.work);
- if (hdev->sent_cmd) {
- struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
- u16 opcode = __le16_to_cpu(sent->opcode);
+ if (hdev->req_skb) {
+ u16 opcode = hci_skb_opcode(hdev->req_skb);
bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
+
+ hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
} else {
bt_dev_err(hdev, "command tx timeout");
}
- if (hdev->cmd_timeout)
- hdev->cmd_timeout(hdev);
+ if (hdev->reset)
+ hdev->reset(hdev);
atomic_set(&hdev->cmd_cnt, 1);
queue_work(hdev->workqueue, &hdev->cmd_work);
@@ -1687,12 +1644,12 @@ void hci_adv_instances_clear(struct hci_dev *hdev)
struct adv_info *adv_instance, *n;
if (hdev->adv_instance_timeout) {
- cancel_delayed_work(&hdev->adv_instance_expire);
+ disable_delayed_work(&hdev->adv_instance_expire);
hdev->adv_instance_timeout = 0;
}
list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
- cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
+ disable_delayed_work_sync(&adv_instance->rpa_expired_cb);
list_del(&adv_instance->list);
kfree(adv_instance);
}
@@ -1737,6 +1694,15 @@ struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
adv->pending = true;
adv->instance = instance;
+
+ /* If controller support only one set and the instance is set to
+ * 1 then there is no option other than using handle 0x00.
+ */
+ if (hdev->le_num_of_adv_sets == 1 && instance == 1)
+ adv->handle = 0x00;
+ else
+ adv->handle = instance;
+
list_add(&adv->list, &hdev->adv_instances);
hdev->adv_instance_cnt++;
}
@@ -2216,26 +2182,6 @@ int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
return 0;
}
-int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
- u8 type)
-{
- struct bdaddr_list_with_flags *entry;
-
- if (!bacmp(bdaddr, BDADDR_ANY)) {
- hci_bdaddr_list_clear(list);
- return 0;
- }
-
- entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
- if (!entry)
- return -ENOENT;
-
- list_del(&entry->list);
- kfree(entry);
-
- return 0;
-}
-
/* This function requires the caller holds hdev->lock */
struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
bdaddr_t *addr, u8 addr_type)
@@ -2440,10 +2386,16 @@ static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
/* To avoid a potential race with hci_unregister_dev. */
hci_dev_hold(hdev);
- if (action == PM_SUSPEND_PREPARE)
+ switch (action) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
ret = hci_suspend_dev(hdev);
- else if (action == PM_POST_SUSPEND)
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
ret = hci_resume_dev(hdev);
+ break;
+ }
if (ret)
bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
@@ -2491,16 +2443,16 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
hdev->le_adv_channel_map = 0x07;
hdev->le_adv_min_interval = 0x0800;
hdev->le_adv_max_interval = 0x0800;
- hdev->le_scan_interval = 0x0060;
- hdev->le_scan_window = 0x0030;
- hdev->le_scan_int_suspend = 0x0400;
- hdev->le_scan_window_suspend = 0x0012;
+ hdev->le_scan_interval = DISCOV_LE_SCAN_INT_FAST;
+ hdev->le_scan_window = DISCOV_LE_SCAN_WIN_FAST;
+ hdev->le_scan_int_suspend = DISCOV_LE_SCAN_INT_SLOW1;
+ hdev->le_scan_window_suspend = DISCOV_LE_SCAN_WIN_SLOW1;
hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
- hdev->le_scan_int_adv_monitor = 0x0060;
- hdev->le_scan_window_adv_monitor = 0x0030;
- hdev->le_scan_int_connect = 0x0060;
- hdev->le_scan_window_connect = 0x0060;
+ hdev->le_scan_int_adv_monitor = DISCOV_LE_SCAN_INT_FAST;
+ hdev->le_scan_window_adv_monitor = DISCOV_LE_SCAN_WIN_FAST;
+ hdev->le_scan_int_connect = DISCOV_LE_SCAN_INT_CONN;
+ hdev->le_scan_window_connect = DISCOV_LE_SCAN_WIN_CONN;
hdev->le_conn_min_interval = 0x0018;
hdev->le_conn_max_interval = 0x0028;
hdev->le_conn_latency = 0x0000;
@@ -2517,7 +2469,7 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
- hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
+ hdev->def_le_autoconnect_timeout = HCI_LE_CONN_TIMEOUT;
hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
@@ -2578,7 +2530,6 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
hci_devcd_setup(hdev);
- hci_request_setup(hdev);
hci_init_sysfs(hdev);
discovery_init(hdev);
@@ -2603,20 +2554,7 @@ int hci_register_dev(struct hci_dev *hdev)
if (!hdev->open || !hdev->close || !hdev->send)
return -EINVAL;
- /* Do not allow HCI_AMP devices to register at index 0,
- * so the index can be used as the AMP controller ID.
- */
- switch (hdev->dev_type) {
- case HCI_PRIMARY:
- id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
- break;
- case HCI_AMP:
- id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
- break;
- default:
- return -EINVAL;
- }
-
+ id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
if (id < 0)
return id;
@@ -2668,12 +2606,10 @@ int hci_register_dev(struct hci_dev *hdev)
hci_dev_set_flag(hdev, HCI_SETUP);
hci_dev_set_flag(hdev, HCI_AUTO_OFF);
- if (hdev->dev_type == HCI_PRIMARY) {
- /* Assume BR/EDR support until proven otherwise (such as
- * through reading supported features during init.
- */
- hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
- }
+ /* Assume BR/EDR support until proven otherwise (such as
+ * through reading supported features during init.
+ */
+ hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
write_lock(&hci_dev_list_lock);
list_add(&hdev->list, &hci_dev_list);
@@ -2710,7 +2646,7 @@ err_wqueue:
destroy_workqueue(hdev->workqueue);
destroy_workqueue(hdev->req_workqueue);
err:
- ida_simple_remove(&hci_index_ida, hdev->id);
+ ida_free(&hci_index_ida, hdev->id);
return error;
}
@@ -2729,14 +2665,16 @@ void hci_unregister_dev(struct hci_dev *hdev)
list_del(&hdev->list);
write_unlock(&hci_dev_list_lock);
- cancel_work_sync(&hdev->power_on);
+ disable_work_sync(&hdev->rx_work);
+ disable_work_sync(&hdev->cmd_work);
+ disable_work_sync(&hdev->tx_work);
+ disable_work_sync(&hdev->power_on);
+ disable_work_sync(&hdev->error_reset);
hci_cmd_sync_clear(hdev);
hci_unregister_suspend_notifier(hdev);
- msft_unregister(hdev);
-
hci_dev_do_close(hdev);
if (!test_bit(HCI_INIT, &hdev->flags) &&
@@ -2790,11 +2728,13 @@ void hci_release_dev(struct hci_dev *hdev)
hci_discovery_filter_clear(hdev);
hci_blocked_keys_clear(hdev);
hci_codec_list_clear(&hdev->local_codecs);
+ msft_release(hdev);
hci_dev_unlock(hdev);
ida_destroy(&hdev->unset_handle_ida);
- ida_simple_remove(&hci_index_ida, hdev->id);
+ ida_free(&hci_index_ida, hdev->id);
kfree_skb(hdev->sent_cmd);
+ kfree_skb(hdev->req_skb);
kfree_skb(hdev->recv_event);
kfree(hdev);
}
@@ -2826,6 +2766,29 @@ int hci_unregister_suspend_notifier(struct hci_dev *hdev)
return ret;
}
+/* Cancel ongoing command synchronously:
+ *
+ * - Cancel command timer
+ * - Reset command counter
+ * - Cancel command request
+ */
+static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
+{
+ bt_dev_dbg(hdev, "err 0x%2.2x", err);
+
+ if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
+ disable_delayed_work_sync(&hdev->cmd_timer);
+ disable_delayed_work_sync(&hdev->ncmd_timer);
+ } else {
+ cancel_delayed_work_sync(&hdev->cmd_timer);
+ cancel_delayed_work_sync(&hdev->ncmd_timer);
+ }
+
+ atomic_set(&hdev->cmd_cnt, 1);
+
+ hci_cmd_sync_cancel_sync(hdev, err);
+}
+
/* Suspend HCI device */
int hci_suspend_dev(struct hci_dev *hdev)
{
@@ -2843,7 +2806,7 @@ int hci_suspend_dev(struct hci_dev *hdev)
return 0;
/* Cancel potentially blocking sync operation before suspend */
- __hci_cmd_sync_cancel(hdev, -EHOSTDOWN);
+ hci_cancel_cmd_sync(hdev, EHOSTDOWN);
hci_req_sync_lock(hdev);
ret = hci_suspend_sync(hdev);
@@ -2905,15 +2868,31 @@ int hci_reset_dev(struct hci_dev *hdev)
}
EXPORT_SYMBOL(hci_reset_dev);
+static u8 hci_dev_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ if (hdev->classify_pkt_type)
+ return hdev->classify_pkt_type(hdev, skb);
+
+ return hci_skb_pkt_type(skb);
+}
+
/* Receive frame from HCI drivers */
int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
+ u8 dev_pkt_type;
+
if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
&& !test_bit(HCI_INIT, &hdev->flags))) {
kfree_skb(skb);
return -ENXIO;
}
+ /* Check if the driver agree with packet type classification */
+ dev_pkt_type = hci_dev_classify_pkt_type(hdev, skb);
+ if (hci_skb_pkt_type(skb) != dev_pkt_type) {
+ hci_skb_pkt_type(skb) = dev_pkt_type;
+ }
+
switch (hci_skb_pkt_type(skb)) {
case HCI_EVENT_PKT:
break;
@@ -3058,7 +3037,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
- skb = hci_prepare_cmd(hdev, opcode, plen, param);
+ skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
if (!skb) {
bt_dev_err(hdev, "no memory for command");
return -ENOMEM;
@@ -3093,7 +3072,7 @@ int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
return -EINVAL;
}
- skb = hci_prepare_cmd(hdev, opcode, plen, param);
+ skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
if (!skb) {
bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
opcode);
@@ -3107,21 +3086,33 @@ int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
EXPORT_SYMBOL(__hci_cmd_send);
/* Get data from the previously sent command */
-void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
+static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
{
struct hci_command_hdr *hdr;
- if (!hdev->sent_cmd)
+ if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
return NULL;
- hdr = (void *) hdev->sent_cmd->data;
+ hdr = (void *)skb->data;
if (hdr->opcode != cpu_to_le16(opcode))
return NULL;
- BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
+ return skb->data + HCI_COMMAND_HDR_SIZE;
+}
+
+/* Get data from the previously sent command */
+void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
+{
+ void *data;
- return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
+ /* Check if opcode matches last sent command */
+ data = hci_cmd_data(hdev->sent_cmd, opcode);
+ if (!data)
+ /* Check if opcode matches last request */
+ data = hci_cmd_data(hdev->req_skb, opcode);
+
+ return data;
}
/* Get data from last received event */
@@ -3180,17 +3171,7 @@ static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
- switch (hdev->dev_type) {
- case HCI_PRIMARY:
- hci_add_acl_hdr(skb, conn->handle, flags);
- break;
- case HCI_AMP:
- hci_add_acl_hdr(skb, chan->handle, flags);
- break;
- default:
- bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
- return;
- }
+ hci_add_acl_hdr(skb, conn->handle, flags);
list = skb_shinfo(skb)->frag_list;
if (!list) {
@@ -3350,9 +3331,6 @@ static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
case ACL_LINK:
cnt = hdev->acl_cnt;
break;
- case AMP_LINK:
- cnt = hdev->block_cnt;
- break;
case SCO_LINK:
case ESCO_LINK:
cnt = hdev->sco_cnt;
@@ -3550,12 +3528,6 @@ static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
}
-static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
-{
- /* Calculate count of blocks used by this packet */
- return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
-}
-
static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
{
unsigned long last_tx;
@@ -3669,100 +3641,34 @@ static void hci_sched_acl_pkt(struct hci_dev *hdev)
hci_prio_recalculate(hdev, ACL_LINK);
}
-static void hci_sched_acl_blk(struct hci_dev *hdev)
-{
- unsigned int cnt = hdev->block_cnt;
- struct hci_chan *chan;
- struct sk_buff *skb;
- int quote;
- u8 type;
-
- BT_DBG("%s", hdev->name);
-
- if (hdev->dev_type == HCI_AMP)
- type = AMP_LINK;
- else
- type = ACL_LINK;
-
- __check_timeout(hdev, cnt, type);
-
- while (hdev->block_cnt > 0 &&
- (chan = hci_chan_sent(hdev, type, &quote))) {
- u32 priority = (skb_peek(&chan->data_q))->priority;
- while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
- int blocks;
-
- BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
- skb->len, skb->priority);
-
- /* Stop if priority has changed */
- if (skb->priority < priority)
- break;
-
- skb = skb_dequeue(&chan->data_q);
-
- blocks = __get_blocks(hdev, skb);
- if (blocks > hdev->block_cnt)
- return;
-
- hci_conn_enter_active_mode(chan->conn,
- bt_cb(skb)->force_active);
-
- hci_send_frame(hdev, skb);
- hdev->acl_last_tx = jiffies;
-
- hdev->block_cnt -= blocks;
- quote -= blocks;
-
- chan->sent += blocks;
- chan->conn->sent += blocks;
- }
- }
-
- if (cnt != hdev->block_cnt)
- hci_prio_recalculate(hdev, type);
-}
-
static void hci_sched_acl(struct hci_dev *hdev)
{
BT_DBG("%s", hdev->name);
/* No ACL link over BR/EDR controller */
- if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
+ if (!hci_conn_num(hdev, ACL_LINK))
return;
- /* No AMP link over AMP controller */
- if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
- return;
-
- switch (hdev->flow_ctl_mode) {
- case HCI_FLOW_CTL_MODE_PACKET_BASED:
- hci_sched_acl_pkt(hdev);
- break;
-
- case HCI_FLOW_CTL_MODE_BLOCK_BASED:
- hci_sched_acl_blk(hdev);
- break;
- }
+ hci_sched_acl_pkt(hdev);
}
static void hci_sched_le(struct hci_dev *hdev)
{
struct hci_chan *chan;
struct sk_buff *skb;
- int quote, cnt, tmp;
+ int quote, *cnt, tmp;
BT_DBG("%s", hdev->name);
if (!hci_conn_num(hdev, LE_LINK))
return;
- cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
+ cnt = hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
- __check_timeout(hdev, cnt, LE_LINK);
+ __check_timeout(hdev, *cnt, LE_LINK);
- tmp = cnt;
- while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
+ tmp = *cnt;
+ while (*cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
u32 priority = (skb_peek(&chan->data_q))->priority;
while (quote-- && (skb = skb_peek(&chan->data_q))) {
BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
@@ -3777,7 +3683,7 @@ static void hci_sched_le(struct hci_dev *hdev)
hci_send_frame(hdev, skb);
hdev->le_last_tx = jiffies;
- cnt--;
+ (*cnt)--;
chan->sent++;
chan->conn->sent++;
@@ -3787,12 +3693,7 @@ static void hci_sched_le(struct hci_dev *hdev)
}
}
- if (hdev->le_pkts)
- hdev->le_cnt = cnt;
- else
- hdev->acl_cnt = cnt;
-
- if (cnt != tmp)
+ if (*cnt != tmp)
hci_prio_recalculate(hdev, LE_LINK);
}
@@ -3850,18 +3751,22 @@ static void hci_tx_work(struct work_struct *work)
/* ACL data packet */
static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct hci_acl_hdr *hdr = (void *) skb->data;
+ struct hci_acl_hdr *hdr;
struct hci_conn *conn;
__u16 handle, flags;
- skb_pull(skb, HCI_ACL_HDR_SIZE);
+ hdr = skb_pull_data(skb, sizeof(*hdr));
+ if (!hdr) {
+ bt_dev_err(hdev, "ACL packet too small");
+ goto drop;
+ }
handle = __le16_to_cpu(hdr->handle);
flags = hci_flags(handle);
handle = hci_handle(handle);
- BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
- handle, flags);
+ bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
+ handle, flags);
hdev->stat.acl_rx++;
@@ -3880,24 +3785,29 @@ static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
handle);
}
+drop:
kfree_skb(skb);
}
/* SCO data packet */
static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct hci_sco_hdr *hdr = (void *) skb->data;
+ struct hci_sco_hdr *hdr;
struct hci_conn *conn;
__u16 handle, flags;
- skb_pull(skb, HCI_SCO_HDR_SIZE);
+ hdr = skb_pull_data(skb, sizeof(*hdr));
+ if (!hdr) {
+ bt_dev_err(hdev, "SCO packet too small");
+ goto drop;
+ }
handle = __le16_to_cpu(hdr->handle);
flags = hci_flags(handle);
handle = hci_handle(handle);
- BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
- handle, flags);
+ bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
+ handle, flags);
hdev->stat.sco_rx++;
@@ -3915,6 +3825,7 @@ static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
handle);
}
+drop:
kfree_skb(skb);
}
@@ -4022,17 +3933,19 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
if (!status && !hci_req_is_complete(hdev))
return;
+ skb = hdev->req_skb;
+
/* If this was the last command in a request the complete
- * callback would be found in hdev->sent_cmd instead of the
+ * callback would be found in hdev->req_skb instead of the
* command queue (hdev->cmd_q).
*/
- if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
- *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
+ if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
+ *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
return;
}
- if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
- *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
+ if (skb && bt_cb(skb)->hci.req_complete) {
+ *req_complete = bt_cb(skb)->hci.req_complete;
return;
}
@@ -4128,6 +4041,36 @@ static void hci_rx_work(struct work_struct *work)
}
}
+static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ int err;
+
+ bt_dev_dbg(hdev, "skb %p", skb);
+
+ kfree_skb(hdev->sent_cmd);
+
+ hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
+ if (!hdev->sent_cmd) {
+ skb_queue_head(&hdev->cmd_q, skb);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
+ return;
+ }
+
+ err = hci_send_frame(hdev, skb);
+ if (err < 0) {
+ hci_cmd_sync_cancel_sync(hdev, -err);
+ return;
+ }
+
+ if (hdev->req_status == HCI_REQ_PEND &&
+ !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
+ kfree_skb(hdev->req_skb);
+ hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
+ }
+
+ atomic_dec(&hdev->cmd_cnt);
+}
+
static void hci_cmd_work(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
@@ -4142,30 +4085,15 @@ static void hci_cmd_work(struct work_struct *work)
if (!skb)
return;
- kfree_skb(hdev->sent_cmd);
+ hci_send_cmd_sync(hdev, skb);
- hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
- if (hdev->sent_cmd) {
- int res;
- if (hci_req_status_pend(hdev))
- hci_dev_set_flag(hdev, HCI_CMD_PENDING);
- atomic_dec(&hdev->cmd_cnt);
-
- res = hci_send_frame(hdev, skb);
- if (res < 0)
- __hci_cmd_sync_cancel(hdev, -res);
-
- rcu_read_lock();
- if (test_bit(HCI_RESET, &hdev->flags) ||
- hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
- cancel_delayed_work(&hdev->cmd_timer);
- else
- queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
- HCI_CMD_TIMEOUT);
- rcu_read_unlock();
- } else {
- skb_queue_head(&hdev->cmd_q, skb);
- queue_work(hdev->workqueue, &hdev->cmd_work);
- }
+ rcu_read_lock();
+ if (test_bit(HCI_RESET, &hdev->flags) ||
+ hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
+ cancel_delayed_work(&hdev->cmd_timer);
+ else
+ queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
+ HCI_CMD_TIMEOUT);
+ rcu_read_unlock();
}
}
diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c
index 233453807b50..f625074d1f00 100644
--- a/net/bluetooth/hci_debugfs.c
+++ b/net/bluetooth/hci_debugfs.c
@@ -28,7 +28,6 @@
#include <net/bluetooth/hci_core.h>
#include "smp.h"
-#include "hci_request.h"
#include "hci_debugfs.h"
#define DEFINE_QUIRK_ATTRIBUTE(__name, __quirk) \
@@ -218,10 +217,12 @@ static int conn_info_min_age_set(void *data, u64 val)
{
struct hci_dev *hdev = data;
- if (val == 0 || val > hdev->conn_info_max_age)
+ hci_dev_lock(hdev);
+ if (val == 0 || val > hdev->conn_info_max_age) {
+ hci_dev_unlock(hdev);
return -EINVAL;
+ }
- hci_dev_lock(hdev);
hdev->conn_info_min_age = val;
hci_dev_unlock(hdev);
@@ -246,10 +247,12 @@ static int conn_info_max_age_set(void *data, u64 val)
{
struct hci_dev *hdev = data;
- if (val == 0 || val < hdev->conn_info_min_age)
+ hci_dev_lock(hdev);
+ if (val == 0 || val < hdev->conn_info_min_age) {
+ hci_dev_unlock(hdev);
return -EINVAL;
+ }
- hci_dev_lock(hdev);
hdev->conn_info_max_age = val;
hci_dev_unlock(hdev);
@@ -567,10 +570,12 @@ static int sniff_min_interval_set(void *data, u64 val)
{
struct hci_dev *hdev = data;
- if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
+ hci_dev_lock(hdev);
+ if (val == 0 || val % 2 || val > hdev->sniff_max_interval) {
+ hci_dev_unlock(hdev);
return -EINVAL;
+ }
- hci_dev_lock(hdev);
hdev->sniff_min_interval = val;
hci_dev_unlock(hdev);
@@ -595,10 +600,12 @@ static int sniff_max_interval_set(void *data, u64 val)
{
struct hci_dev *hdev = data;
- if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
+ hci_dev_lock(hdev);
+ if (val == 0 || val % 2 || val < hdev->sniff_min_interval) {
+ hci_dev_unlock(hdev);
return -EINVAL;
+ }
- hci_dev_lock(hdev);
hdev->sniff_max_interval = val;
hci_dev_unlock(hdev);
@@ -850,10 +857,12 @@ static int conn_min_interval_set(void *data, u64 val)
{
struct hci_dev *hdev = data;
- if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
+ hci_dev_lock(hdev);
+ if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval) {
+ hci_dev_unlock(hdev);
return -EINVAL;
+ }
- hci_dev_lock(hdev);
hdev->le_conn_min_interval = val;
hci_dev_unlock(hdev);
@@ -878,10 +887,12 @@ static int conn_max_interval_set(void *data, u64 val)
{
struct hci_dev *hdev = data;
- if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
+ hci_dev_lock(hdev);
+ if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval) {
+ hci_dev_unlock(hdev);
return -EINVAL;
+ }
- hci_dev_lock(hdev);
hdev->le_conn_max_interval = val;
hci_dev_unlock(hdev);
@@ -990,10 +1001,12 @@ static int adv_min_interval_set(void *data, u64 val)
{
struct hci_dev *hdev = data;
- if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
+ hci_dev_lock(hdev);
+ if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval) {
+ hci_dev_unlock(hdev);
return -EINVAL;
+ }
- hci_dev_lock(hdev);
hdev->le_adv_min_interval = val;
hci_dev_unlock(hdev);
@@ -1018,10 +1031,12 @@ static int adv_max_interval_set(void *data, u64 val)
{
struct hci_dev *hdev = data;
- if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
+ hci_dev_lock(hdev);
+ if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval) {
+ hci_dev_unlock(hdev);
return -EINVAL;
+ }
- hci_dev_lock(hdev);
hdev->le_adv_max_interval = val;
hci_dev_unlock(hdev);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 2a5f5a7d2412..903b0b52692a 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1,7 +1,7 @@
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
- Copyright 2023 NXP
+ Copyright 2023-2024 NXP
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -25,7 +25,7 @@
/* Bluetooth HCI event handling. */
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/crypto.h>
#include <crypto/algapi.h>
@@ -33,11 +33,8 @@
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/mgmt.h>
-#include "hci_request.h"
#include "hci_debugfs.h"
#include "hci_codec.h"
-#include "a2mp.h"
-#include "amp.h"
#include "smp.h"
#include "msft.h"
#include "eir.h"
@@ -45,8 +42,6 @@
#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
"\x00\x00\x00\x00\x00\x00\x00\x00"
-#define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
-
/* Handle HCI Event packets */
static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
@@ -95,11 +90,11 @@ static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
/* It is possible that we receive Inquiry Complete event right
* before we receive Inquiry Cancel Command Complete event, in
* which case the latter event should have status of Command
- * Disallowed (0x0c). This should not be treated as error, since
+ * Disallowed. This should not be treated as error, since
* we actually achieve what Inquiry Cancel wants to achieve,
* which is to end the last Inquiry session.
*/
- if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
+ if (rp->status == HCI_ERROR_COMMAND_DISALLOWED && !test_bit(HCI_INQUIRY, &hdev->flags)) {
bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
rp->status = 0x00;
}
@@ -120,8 +115,6 @@ static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
hci_dev_unlock(hdev);
- hci_conn_check_pending(hdev);
-
return rp->status;
}
@@ -152,8 +145,6 @@ static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
- hci_conn_check_pending(hdev);
-
return rp->status;
}
@@ -919,21 +910,6 @@ static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
return rp->status;
}
-static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data,
- struct sk_buff *skb)
-{
- struct hci_rp_read_flow_control_mode *rp = data;
-
- bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
-
- if (rp->status)
- return rp->status;
-
- hdev->flow_ctl_mode = rp->mode;
-
- return rp->status;
-}
-
static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
@@ -960,6 +936,9 @@ static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
+ if (!hdev->acl_mtu || !hdev->acl_pkts)
+ return HCI_ERROR_INVALID_PARAMETERS;
+
return rp->status;
}
@@ -1074,28 +1053,6 @@ static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
return rp->status;
}
-static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data,
- struct sk_buff *skb)
-{
- struct hci_rp_read_data_block_size *rp = data;
-
- bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
-
- if (rp->status)
- return rp->status;
-
- hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
- hdev->block_len = __le16_to_cpu(rp->block_len);
- hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
-
- hdev->block_cnt = hdev->num_blocks;
-
- BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
- hdev->block_cnt, hdev->block_len);
-
- return rp->status;
-}
-
static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
@@ -1130,30 +1087,6 @@ unlock:
return rp->status;
}
-static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data,
- struct sk_buff *skb)
-{
- struct hci_rp_read_local_amp_info *rp = data;
-
- bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
-
- if (rp->status)
- return rp->status;
-
- hdev->amp_status = rp->amp_status;
- hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
- hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
- hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
- hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
- hdev->amp_type = rp->amp_type;
- hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
- hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
- hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
- hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
-
- return rp->status;
-}
-
static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
@@ -1269,6 +1202,9 @@ static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
+ if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
+ return HCI_ERROR_INVALID_PARAMETERS;
+
return rp->status;
}
@@ -1783,10 +1719,10 @@ static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
switch (enable) {
case LE_SCAN_ENABLE:
hci_dev_set_flag(hdev, HCI_LE_SCAN);
- if (hdev->le_scan_type == LE_SCAN_ACTIVE)
+ if (hdev->le_scan_type == LE_SCAN_ACTIVE) {
clear_pending_adv_report(hdev);
- if (hci_dev_test_flag(hdev, HCI_MESH))
hci_discovery_set_state(hdev, DISCOVERY_FINDING);
+ }
break;
case LE_SCAN_DISABLE:
@@ -2314,10 +2250,8 @@ static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
{
bt_dev_dbg(hdev, "status 0x%2.2x", status);
- if (status) {
- hci_conn_check_pending(hdev);
+ if (status)
return;
- }
if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
set_bit(HCI_INQUIRY, &hdev->flags);
@@ -2342,19 +2276,16 @@ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
if (status) {
if (conn && conn->state == BT_CONNECT) {
- if (status != 0x0c || conn->attempt > 2) {
- conn->state = BT_CLOSED;
- hci_connect_cfm(conn, status);
- hci_conn_del(conn);
- } else
- conn->state = BT_CONNECT2;
+ conn->state = BT_CLOSED;
+ hci_connect_cfm(conn, status);
+ hci_conn_del(conn);
}
} else {
if (!conn) {
conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr,
HCI_ROLE_MASTER);
- if (!conn)
- bt_dev_err(hdev, "no memory for new connection");
+ if (IS_ERR(conn))
+ bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
}
}
@@ -2526,9 +2457,7 @@ static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
* Only those in BT_CONFIG or BT_CONNECTED states can be
* considered connected.
*/
- if (conn &&
- (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
- !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+ if (conn && (conn->state == BT_CONFIG || conn->state == BT_CONNECTED))
mgmt_device_connected(hdev, conn, name, name_len);
if (discov->state == DISCOVERY_STOPPED)
@@ -3039,8 +2968,6 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
- hci_conn_check_pending(hdev);
-
if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
return;
@@ -3169,8 +3096,8 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
BDADDR_BREDR)) {
conn = hci_conn_add_unset(hdev, ev->link_type,
&ev->bdaddr, HCI_ROLE_SLAVE);
- if (!conn) {
- bt_dev_err(hdev, "no memory for new conn");
+ if (IS_ERR(conn)) {
+ bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
goto unlock;
}
} else {
@@ -3223,6 +3150,31 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
if (test_bit(HCI_ENCRYPT, &hdev->flags))
set_bit(HCI_CONN_ENCRYPT, &conn->flags);
+ /* "Link key request" completed ahead of "connect request" completes */
+ if (ev->encr_mode == 1 && !test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
+ ev->link_type == ACL_LINK) {
+ struct link_key *key;
+ struct hci_cp_read_enc_key_size cp;
+
+ key = hci_find_link_key(hdev, &ev->bdaddr);
+ if (key) {
+ set_bit(HCI_CONN_ENCRYPT, &conn->flags);
+
+ if (!read_key_size_capable(hdev)) {
+ conn->enc_key_size = HCI_LINK_KEY_SIZE;
+ } else {
+ cp.handle = cpu_to_le16(conn->handle);
+ if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
+ sizeof(cp), &cp)) {
+ bt_dev_err(hdev, "sending read key size failed");
+ conn->enc_key_size = HCI_LINK_KEY_SIZE;
+ }
+ }
+
+ hci_encrypt_cfm(conn, ev->status);
+ }
+ }
+
/* Get remote features */
if (conn->type == ACL_LINK) {
struct hci_cp_read_remote_features cp;
@@ -3262,8 +3214,6 @@ done:
unlock:
hci_dev_unlock(hdev);
-
- hci_conn_check_pending(hdev);
}
static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
@@ -3335,8 +3285,8 @@ static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
if (!conn) {
conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr,
HCI_ROLE_SLAVE);
- if (!conn) {
- bt_dev_err(hdev, "no memory for new connection");
+ if (IS_ERR(conn)) {
+ bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
goto unlock;
}
}
@@ -3441,23 +3391,30 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
hci_update_scan(hdev);
}
- params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
- if (params) {
- switch (params->auto_connect) {
- case HCI_AUTO_CONN_LINK_LOSS:
- if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
+ /* Re-enable passive scanning if disconnected device is marked
+ * as auto-connectable.
+ */
+ if (conn->type == LE_LINK) {
+ params = hci_conn_params_lookup(hdev, &conn->dst,
+ conn->dst_type);
+ if (params) {
+ switch (params->auto_connect) {
+ case HCI_AUTO_CONN_LINK_LOSS:
+ if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
+ break;
+ fallthrough;
+
+ case HCI_AUTO_CONN_DIRECT:
+ case HCI_AUTO_CONN_ALWAYS:
+ hci_pend_le_list_del_init(params);
+ hci_pend_le_list_add(params,
+ &hdev->pend_le_conns);
+ hci_update_passive_scan(hdev);
break;
- fallthrough;
- case HCI_AUTO_CONN_DIRECT:
- case HCI_AUTO_CONN_ALWAYS:
- hci_pend_le_list_del_init(params);
- hci_pend_le_list_add(params, &hdev->pend_le_conns);
- hci_update_passive_scan(hdev);
- break;
-
- default:
- break;
+ default:
+ break;
+ }
}
}
@@ -3556,8 +3513,6 @@ static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
- hci_conn_check_pending(hdev);
-
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
@@ -3660,7 +3615,7 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
* controller really supports it. If it doesn't, assume
* the default size (16).
*/
- if (!(hdev->commands[20] & 0x10)) {
+ if (!read_key_size_capable(hdev)) {
conn->enc_key_size = HCI_LINK_KEY_SIZE;
goto notify;
}
@@ -3676,6 +3631,13 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
goto unlock;
}
+ /* We skip the WRITE_AUTH_PAYLOAD_TIMEOUT for ATS2851 based controllers
+ * to avoid unexpected SMP command errors when pairing.
+ */
+ if (test_bit(HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT,
+ &hdev->quirks))
+ goto notify;
+
/* Set the default Authenticated Payload Timeout after
* an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
* Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
@@ -3756,14 +3718,15 @@ static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
goto unlock;
}
- if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
+ if (!ev->status) {
struct hci_cp_remote_name_req cp;
memset(&cp, 0, sizeof(cp));
bacpy(&cp.bdaddr, &conn->dst);
cp.pscan_rep_mode = 0x02;
hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
- } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+ } else {
mgmt_device_connected(hdev, conn, NULL, 0);
+ }
if (!hci_outgoing_auth_needed(hdev, conn)) {
conn->state = BT_CONNECTED;
@@ -3814,6 +3777,9 @@ static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
+ if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
+ return HCI_ERROR_INVALID_PARAMETERS;
+
return rp->status;
}
@@ -3936,6 +3902,11 @@ static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
* last.
*/
hci_connect_cfm(conn, rp->status);
+
+ /* Notify device connected in case it is a BIG Sync */
+ if (!rp->status && test_bit(HCI_CONN_BIG_SYNC, &conn->flags))
+ mgmt_device_connected(hdev, conn, NULL, 0);
+
break;
}
@@ -4100,12 +4071,6 @@ static const struct hci_cc {
HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
sizeof(struct hci_rp_read_page_scan_type)),
HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
- HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size,
- sizeof(struct hci_rp_read_data_block_size)),
- HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode,
- sizeof(struct hci_rp_read_flow_control_mode)),
- HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info,
- sizeof(struct hci_rp_read_local_amp_info)),
HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
sizeof(struct hci_rp_read_clock)),
HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
@@ -4296,7 +4261,7 @@ static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
hci_dev_lock(hdev);
/* Remove connection if command failed */
- for (i = 0; cp->num_cis; cp->num_cis--, i++) {
+ for (i = 0; i < cp->num_cis; i++) {
struct hci_conn *conn;
u16 handle;
@@ -4312,6 +4277,7 @@ static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
hci_conn_del(conn);
}
}
+ cp->num_cis = 0;
if (pending)
hci_le_create_cis_pending(hdev);
@@ -4381,7 +4347,7 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
* (since for this kind of commands there will not be a command
* complete event).
*/
- if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) {
+ if (ev->status || (hdev->req_skb && !hci_skb_event(hdev->req_skb))) {
hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
req_complete_skb);
if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
@@ -4440,11 +4406,6 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
flex_array_size(ev, handles, ev->num)))
return;
- if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
- bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
- return;
- }
-
bt_dev_dbg(hdev, "num %d", ev->num);
for (i = 0; i < ev->num; i++) {
@@ -4512,78 +4473,6 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
queue_work(hdev->workqueue, &hdev->tx_work);
}
-static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
- __u16 handle)
-{
- struct hci_chan *chan;
-
- switch (hdev->dev_type) {
- case HCI_PRIMARY:
- return hci_conn_hash_lookup_handle(hdev, handle);
- case HCI_AMP:
- chan = hci_chan_lookup_handle(hdev, handle);
- if (chan)
- return chan->conn;
- break;
- default:
- bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
- break;
- }
-
- return NULL;
-}
-
-static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data,
- struct sk_buff *skb)
-{
- struct hci_ev_num_comp_blocks *ev = data;
- int i;
-
- if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS,
- flex_array_size(ev, handles, ev->num_hndl)))
- return;
-
- if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
- bt_dev_err(hdev, "wrong event for mode %d",
- hdev->flow_ctl_mode);
- return;
- }
-
- bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks,
- ev->num_hndl);
-
- for (i = 0; i < ev->num_hndl; i++) {
- struct hci_comp_blocks_info *info = &ev->handles[i];
- struct hci_conn *conn = NULL;
- __u16 handle, block_count;
-
- handle = __le16_to_cpu(info->handle);
- block_count = __le16_to_cpu(info->blocks);
-
- conn = __hci_conn_lookup_handle(hdev, handle);
- if (!conn)
- continue;
-
- conn->sent -= block_count;
-
- switch (conn->type) {
- case ACL_LINK:
- case AMP_LINK:
- hdev->block_cnt += block_count;
- if (hdev->block_cnt > hdev->num_blocks)
- hdev->block_cnt = hdev->num_blocks;
- break;
-
- default:
- bt_dev_err(hdev, "unknown type %d conn %p",
- conn->type, conn);
- break;
- }
- }
-
- queue_work(hdev->workqueue, &hdev->tx_work);
-}
-
static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
@@ -5010,8 +4899,9 @@ static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
bacpy(&cp.bdaddr, &conn->dst);
cp.pscan_rep_mode = 0x02;
hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
- } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+ } else {
mgmt_device_connected(hdev, conn, NULL, 0);
+ }
if (!hci_outgoing_auth_needed(hdev, conn)) {
conn->state = BT_CONNECTED;
@@ -5446,19 +5336,16 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
goto unlock;
}
- /* If no side requires MITM protection; auto-accept */
+ /* If no side requires MITM protection; use JUST_CFM method */
if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
(!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
- /* If we're not the initiators request authorization to
- * proceed from user space (mgmt_user_confirm with
- * confirm_hint set to 1). The exception is if neither
- * side had MITM or if the local IO capability is
- * NoInputNoOutput, in which case we do auto-accept
+ /* If we're not the initiator of request authorization and the
+ * local IO capability is not NoInputNoOutput, use JUST_WORKS
+ * method (mgmt_user_confirm with confirm_hint set to 1).
*/
if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
- conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
- (loc_mitm || rem_mitm)) {
+ conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) {
bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
confirm_hint = 1;
goto confirm;
@@ -5675,150 +5562,6 @@ unlock:
hci_dev_unlock(hdev);
}
-#if IS_ENABLED(CONFIG_BT_HS)
-static void hci_chan_selected_evt(struct hci_dev *hdev, void *data,
- struct sk_buff *skb)
-{
- struct hci_ev_channel_selected *ev = data;
- struct hci_conn *hcon;
-
- bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle);
-
- hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
- if (!hcon)
- return;
-
- amp_read_loc_assoc_final_data(hdev, hcon);
-}
-
-static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data,
- struct sk_buff *skb)
-{
- struct hci_ev_phy_link_complete *ev = data;
- struct hci_conn *hcon, *bredr_hcon;
-
- bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle,
- ev->status);
-
- hci_dev_lock(hdev);
-
- hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
- if (!hcon)
- goto unlock;
-
- if (!hcon->amp_mgr)
- goto unlock;
-
- if (ev->status) {
- hci_conn_del(hcon);
- goto unlock;
- }
-
- bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
-
- hcon->state = BT_CONNECTED;
- bacpy(&hcon->dst, &bredr_hcon->dst);
-
- hci_conn_hold(hcon);
- hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
- hci_conn_drop(hcon);
-
- hci_debugfs_create_conn(hcon);
- hci_conn_add_sysfs(hcon);
-
- amp_physical_cfm(bredr_hcon, hcon);
-
-unlock:
- hci_dev_unlock(hdev);
-}
-
-static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data,
- struct sk_buff *skb)
-{
- struct hci_ev_logical_link_complete *ev = data;
- struct hci_conn *hcon;
- struct hci_chan *hchan;
- struct amp_mgr *mgr;
-
- bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
- le16_to_cpu(ev->handle), ev->phy_handle, ev->status);
-
- hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
- if (!hcon)
- return;
-
- /* Create AMP hchan */
- hchan = hci_chan_create(hcon);
- if (!hchan)
- return;
-
- hchan->handle = le16_to_cpu(ev->handle);
- hchan->amp = true;
-
- BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
-
- mgr = hcon->amp_mgr;
- if (mgr && mgr->bredr_chan) {
- struct l2cap_chan *bredr_chan = mgr->bredr_chan;
-
- l2cap_chan_lock(bredr_chan);
-
- bredr_chan->conn->mtu = hdev->block_mtu;
- l2cap_logical_cfm(bredr_chan, hchan, 0);
- hci_conn_hold(hcon);
-
- l2cap_chan_unlock(bredr_chan);
- }
-}
-
-static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data,
- struct sk_buff *skb)
-{
- struct hci_ev_disconn_logical_link_complete *ev = data;
- struct hci_chan *hchan;
-
- bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x",
- le16_to_cpu(ev->handle), ev->status);
-
- if (ev->status)
- return;
-
- hci_dev_lock(hdev);
-
- hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
- if (!hchan || !hchan->amp)
- goto unlock;
-
- amp_destroy_logical_link(hchan, ev->reason);
-
-unlock:
- hci_dev_unlock(hdev);
-}
-
-static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data,
- struct sk_buff *skb)
-{
- struct hci_ev_disconn_phy_link_complete *ev = data;
- struct hci_conn *hcon;
-
- bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
-
- if (ev->status)
- return;
-
- hci_dev_lock(hdev);
-
- hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
- if (hcon && hcon->type == AMP_LINK) {
- hcon->state = BT_CLOSED;
- hci_disconn_cfm(hcon, ev->reason);
- hci_conn_del(hcon);
- }
-
- hci_dev_unlock(hdev);
-}
-#endif
-
static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
u8 bdaddr_type, bdaddr_t *local_rpa)
{
@@ -5899,8 +5642,8 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
goto unlock;
conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role);
- if (!conn) {
- bt_dev_err(hdev, "no memory for new connection");
+ if (IS_ERR(conn)) {
+ bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
goto unlock;
}
@@ -5984,8 +5727,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
goto unlock;
}
- if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
- mgmt_device_connected(hdev, conn, NULL, 0);
+ mgmt_device_connected(hdev, conn, NULL, 0);
conn->sec_level = BT_SECURITY_LOW;
conn->state = BT_CONFIG;
@@ -6169,7 +5911,7 @@ static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
bdaddr_t *addr,
u8 addr_type, bool addr_resolved,
- u8 adv_type)
+ u8 adv_type, u8 phy, u8 sec_phy)
{
struct hci_conn *conn;
struct hci_conn_params *params;
@@ -6187,7 +5929,7 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
* while we have an existing one in peripheral role.
*/
if (hdev->conn_hash.le_num_peripheral > 0 &&
- (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
+ (test_bit(HCI_QUIRK_BROKEN_LE_STATES, &hdev->quirks) ||
!(hdev->le_states[3] & 0x10)))
return NULL;
@@ -6224,7 +5966,7 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
- HCI_ROLE_MASTER);
+ HCI_ROLE_MASTER, phy, sec_phy);
if (!IS_ERR(conn)) {
/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
* by higher layer that tried to connect, if no then
@@ -6259,8 +6001,9 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
u8 bdaddr_type, bdaddr_t *direct_addr,
- u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
- bool ext_adv, bool ctl_time, u64 instant)
+ u8 direct_addr_type, u8 phy, u8 sec_phy, s8 rssi,
+ u8 *data, u8 len, bool ext_adv, bool ctl_time,
+ u64 instant)
{
struct discovery_state *d = &hdev->discovery;
struct smp_irk *irk;
@@ -6348,7 +6091,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
* for advertising reports) and is already verified to be RPA above.
*/
conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
- type);
+ type, phy, sec_phy);
if (!ext_adv && conn && type == LE_ADV_IND &&
len <= max_adv_len(hdev)) {
/* Store report for later inclusion by
@@ -6494,7 +6237,8 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
if (info->length <= max_adv_len(hdev)) {
rssi = info->data[info->length];
process_adv_report(hdev, info->type, &info->bdaddr,
- info->bdaddr_type, NULL, 0, rssi,
+ info->bdaddr_type, NULL, 0,
+ HCI_ADV_PHY_1M, 0, rssi,
info->data, info->length, false,
false, instant);
} else {
@@ -6576,9 +6320,18 @@ static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK;
legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
+
+ if (test_bit(HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY,
+ &hdev->quirks)) {
+ info->primary_phy &= 0x1f;
+ info->secondary_phy &= 0x1f;
+ }
+
if (legacy_evt_type != LE_ADV_INVALID) {
process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
info->bdaddr_type, NULL, 0,
+ info->primary_phy,
+ info->secondary_phy,
info->rssi, info->data, info->length,
!(evt_type & LE_EXT_ADV_LEGACY_PDU),
false, instant);
@@ -6604,7 +6357,7 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
struct hci_ev_le_pa_sync_established *ev = data;
int mask = hdev->link_mode;
__u8 flags = 0;
- struct hci_conn *pa_sync;
+ struct hci_conn *pa_sync, *conn;
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
@@ -6612,6 +6365,20 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
hci_dev_clear_flag(hdev, HCI_PA_SYNC);
+ conn = hci_conn_hash_lookup_sid(hdev, ev->sid, &ev->bdaddr,
+ ev->bdaddr_type);
+ if (!conn) {
+ bt_dev_err(hdev,
+ "Unable to find connection for dst %pMR sid 0x%2.2x",
+ &ev->bdaddr, ev->sid);
+ goto unlock;
+ }
+
+ clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
+
+ conn->sync_handle = le16_to_cpu(ev->handle);
+ conn->sid = HCI_SID_INVALID;
+
mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
if (!(mask & HCI_LM_ACCEPT)) {
hci_le_pa_term_sync(hdev, ev->handle);
@@ -6621,14 +6388,16 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
if (!(flags & HCI_PROTO_DEFER))
goto unlock;
- if (ev->status) {
- /* Add connection to indicate the failed PA sync event */
- pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
- HCI_ROLE_SLAVE);
+ /* Add connection to indicate PA sync event */
+ pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
+ HCI_ROLE_SLAVE);
- if (!pa_sync)
- goto unlock;
+ if (IS_ERR(pa_sync))
+ goto unlock;
+ pa_sync->sync_handle = le16_to_cpu(ev->handle);
+
+ if (ev->status) {
set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
/* Notify iso layer */
@@ -6636,6 +6405,9 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
}
unlock:
+ /* Handle any other pending PA sync command */
+ hci_pa_create_sync_pending(hdev);
+
hci_dev_unlock(hdev);
}
@@ -6645,6 +6417,7 @@ static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data,
struct hci_ev_le_per_adv_report *ev = data;
int mask = hdev->link_mode;
__u8 flags = 0;
+ struct hci_conn *pa_sync;
bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
@@ -6652,8 +6425,28 @@ static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data,
mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
if (!(mask & HCI_LM_ACCEPT))
- hci_le_pa_term_sync(hdev, ev->sync_handle);
+ goto unlock;
+
+ if (!(flags & HCI_PROTO_DEFER))
+ goto unlock;
+
+ pa_sync = hci_conn_hash_lookup_pa_sync_handle
+ (hdev,
+ le16_to_cpu(ev->sync_handle));
+
+ if (!pa_sync)
+ goto unlock;
+ if (ev->data_status == LE_PA_DATA_COMPLETE &&
+ !test_and_set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags)) {
+ /* Notify iso layer */
+ hci_connect_cfm(pa_sync, 0);
+
+ /* Notify MGMT layer */
+ mgmt_device_connected(hdev, pa_sync, NULL, 0);
+ }
+
+unlock:
hci_dev_unlock(hdev);
}
@@ -6684,7 +6477,7 @@ static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
* transition into connected state and mark it as
* successful.
*/
- if (!conn->out && ev->status == 0x1a &&
+ if (!conn->out && ev->status == HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE &&
(hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
status = 0x00;
else
@@ -6861,8 +6654,8 @@ static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
process_adv_report(hdev, info->type, &info->bdaddr,
info->bdaddr_type, &info->direct_addr,
- info->direct_addr_type, info->rssi, NULL, 0,
- false, false, instant);
+ info->direct_addr_type, HCI_ADV_PHY_1M, 0,
+ info->rssi, NULL, 0, false, false, instant);
}
hci_dev_unlock(hdev);
@@ -6900,6 +6693,7 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
struct bt_iso_qos *qos;
bool pending = false;
u16 handle = __le16_to_cpu(ev->handle);
+ u32 c_sdu_interval, p_sdu_interval;
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
@@ -6924,12 +6718,25 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags);
- /* Convert ISO Interval (1.25 ms slots) to SDU Interval (us) */
- qos->ucast.in.interval = le16_to_cpu(ev->interval) * 1250;
- qos->ucast.out.interval = qos->ucast.in.interval;
+ /* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 6, Part G
+ * page 3075:
+ * Transport_Latency_C_To_P = CIG_Sync_Delay + (FT_C_To_P) ×
+ * ISO_Interval + SDU_Interval_C_To_P
+ * ...
+ * SDU_Interval = (CIG_Sync_Delay + (FT) x ISO_Interval) -
+ * Transport_Latency
+ */
+ c_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) +
+ (ev->c_ft * le16_to_cpu(ev->interval) * 1250)) -
+ get_unaligned_le24(ev->c_latency);
+ p_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) +
+ (ev->p_ft * le16_to_cpu(ev->interval) * 1250)) -
+ get_unaligned_le24(ev->p_latency);
switch (conn->role) {
case HCI_ROLE_SLAVE:
+ qos->ucast.in.interval = c_sdu_interval;
+ qos->ucast.out.interval = p_sdu_interval;
/* Convert Transport Latency (us) to Latency (msec) */
qos->ucast.in.latency =
DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
@@ -6943,6 +6750,8 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
qos->ucast.out.phy = ev->p_phy;
break;
case HCI_ROLE_MASTER:
+ qos->ucast.in.interval = p_sdu_interval;
+ qos->ucast.out.interval = c_sdu_interval;
/* Convert Transport Latency (us) to Latency (msec) */
qos->ucast.out.latency =
DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
@@ -7026,7 +6835,7 @@ static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
if (!cis) {
cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE,
cis_handle);
- if (!cis) {
+ if (IS_ERR(cis)) {
hci_le_reject_cis(hdev, ev->cis_handle);
goto unlock;
}
@@ -7068,38 +6877,27 @@ static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
return;
hci_dev_lock(hdev);
- rcu_read_lock();
/* Connect all BISes that are bound to the BIG */
- list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
- if (bacmp(&conn->dst, BDADDR_ANY) ||
- conn->type != ISO_LINK ||
- conn->iso_qos.bcast.big != ev->handle)
+ while ((conn = hci_conn_hash_lookup_big_state(hdev, ev->handle,
+ BT_BOUND))) {
+ if (ev->status) {
+ hci_connect_cfm(conn, ev->status);
+ hci_conn_del(conn);
continue;
+ }
if (hci_conn_set_handle(conn,
__le16_to_cpu(ev->bis_handle[i++])))
continue;
- if (!ev->status) {
- conn->state = BT_CONNECTED;
- set_bit(HCI_CONN_BIG_CREATED, &conn->flags);
- rcu_read_unlock();
- hci_debugfs_create_conn(conn);
- hci_conn_add_sysfs(conn);
- hci_iso_setup_path(conn);
- rcu_read_lock();
- continue;
- }
-
- hci_connect_cfm(conn, ev->status);
- rcu_read_unlock();
- hci_conn_del(conn);
- rcu_read_lock();
+ conn->state = BT_CONNECTED;
+ set_bit(HCI_CONN_BIG_CREATED, &conn->flags);
+ hci_debugfs_create_conn(conn);
+ hci_conn_add_sysfs(conn);
+ hci_iso_setup_path(conn);
}
- rcu_read_unlock();
-
if (!ev->status && !i)
/* If no BISes have been connected for the BIG,
* terminate. This is in case all bound connections
@@ -7116,7 +6914,7 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
struct hci_evt_le_big_sync_estabilished *ev = data;
- struct hci_conn *bis;
+ struct hci_conn *bis, *conn;
int i;
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
@@ -7127,15 +6925,33 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
+ conn = hci_conn_hash_lookup_big_sync_pend(hdev, ev->handle,
+ ev->num_bis);
+ if (!conn) {
+ bt_dev_err(hdev,
+ "Unable to find connection for big 0x%2.2x",
+ ev->handle);
+ goto unlock;
+ }
+
+ clear_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags);
+
+ conn->num_bis = 0;
+ memset(conn->bis, 0, sizeof(conn->num_bis));
+
for (i = 0; i < ev->num_bis; i++) {
u16 handle = le16_to_cpu(ev->bis[i]);
__le32 interval;
bis = hci_conn_hash_lookup_handle(hdev, handle);
if (!bis) {
+ if (handle > HCI_CONN_HANDLE_MAX) {
+ bt_dev_dbg(hdev, "ignore too large handle %u", handle);
+ continue;
+ }
bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
HCI_ROLE_SLAVE, handle);
- if (!bis)
+ if (IS_ERR(bis))
continue;
}
@@ -7143,6 +6959,7 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
/* Mark PA sync as established */
set_bit(HCI_CONN_PA_SYNC, &bis->flags);
+ bis->sync_handle = conn->sync_handle;
bis->iso_qos.bcast.big = ev->handle;
memset(&interval, 0, sizeof(interval));
memcpy(&interval, ev->latency, sizeof(ev->latency));
@@ -7165,11 +6982,17 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
u16 handle = le16_to_cpu(ev->bis[i]);
bis = hci_conn_hash_lookup_handle(hdev, handle);
+ if (!bis)
+ continue;
set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags);
hci_connect_cfm(bis, ev->status);
}
+unlock:
+ /* Handle any other pending BIG sync command */
+ hci_le_big_create_sync_pending(hdev);
+
hci_dev_unlock(hdev);
}
@@ -7186,10 +7009,8 @@ static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
- if (!(mask & HCI_LM_ACCEPT)) {
- hci_le_pa_term_sync(hdev, ev->sync_handle);
+ if (!(mask & HCI_LM_ACCEPT))
goto unlock;
- }
if (!(flags & HCI_PROTO_DEFER))
goto unlock;
@@ -7198,21 +7019,13 @@ static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
(hdev,
le16_to_cpu(ev->sync_handle));
- if (pa_sync)
- goto unlock;
-
- /* Add connection to indicate the PA sync event */
- pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
- HCI_ROLE_SLAVE);
-
if (!pa_sync)
goto unlock;
- pa_sync->sync_handle = le16_to_cpu(ev->sync_handle);
- set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags);
+ pa_sync->iso_qos.bcast.encryption = ev->encryption;
/* Notify iso layer */
- hci_connect_cfm(pa_sync, 0x00);
+ hci_connect_cfm(pa_sync, 0);
unlock:
hci_dev_unlock(hdev);
@@ -7324,10 +7137,10 @@ static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
/* Only match event if command OGF is for LE */
- if (hdev->sent_cmd &&
- hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 &&
- hci_skb_event(hdev->sent_cmd) == ev->subevent) {
- *opcode = hci_skb_opcode(hdev->sent_cmd);
+ if (hdev->req_skb &&
+ hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 &&
+ hci_skb_event(hdev->req_skb) == ev->subevent) {
+ *opcode = hci_skb_opcode(hdev->req_skb);
hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
req_complete_skb);
}
@@ -7626,28 +7439,6 @@ static const struct hci_ev {
/* [0x3e = HCI_EV_LE_META] */
HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
-#if IS_ENABLED(CONFIG_BT_HS)
- /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */
- HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt,
- sizeof(struct hci_ev_phy_link_complete)),
- /* [0x41 = HCI_EV_CHANNEL_SELECTED] */
- HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt,
- sizeof(struct hci_ev_channel_selected)),
- /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */
- HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE,
- hci_disconn_loglink_complete_evt,
- sizeof(struct hci_ev_disconn_logical_link_complete)),
- /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */
- HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt,
- sizeof(struct hci_ev_logical_link_complete)),
- /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */
- HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE,
- hci_disconn_phylink_complete_evt,
- sizeof(struct hci_ev_disconn_phy_link_complete)),
-#endif
- /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
- HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
- sizeof(struct hci_ev_num_comp_blocks)),
/* [0xff = HCI_EV_VENDOR] */
HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
};
@@ -7714,10 +7505,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
}
/* Only match event if command OGF is not for LE */
- if (hdev->sent_cmd &&
- hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 &&
- hci_skb_event(hdev->sent_cmd) == event) {
- hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd),
+ if (hdev->req_skb &&
+ hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) != 0x08 &&
+ hci_skb_event(hdev->req_skb) == event) {
+ hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->req_skb),
status, &req_complete, &req_complete_skb);
req_evt = event;
}
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
deleted file mode 100644
index 6e023b0104b0..000000000000
--- a/net/bluetooth/hci_request.c
+++ /dev/null
@@ -1,901 +0,0 @@
-/*
- BlueZ - Bluetooth protocol stack for Linux
-
- Copyright (C) 2014 Intel Corporation
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License version 2 as
- published by the Free Software Foundation;
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
- IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
- CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
- WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
- ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
- COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
- SOFTWARE IS DISCLAIMED.
-*/
-
-#include <linux/sched/signal.h>
-
-#include <net/bluetooth/bluetooth.h>
-#include <net/bluetooth/hci_core.h>
-#include <net/bluetooth/mgmt.h>
-
-#include "smp.h"
-#include "hci_request.h"
-#include "msft.h"
-#include "eir.h"
-
-void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
-{
- skb_queue_head_init(&req->cmd_q);
- req->hdev = hdev;
- req->err = 0;
-}
-
-void hci_req_purge(struct hci_request *req)
-{
- skb_queue_purge(&req->cmd_q);
-}
-
-bool hci_req_status_pend(struct hci_dev *hdev)
-{
- return hdev->req_status == HCI_REQ_PEND;
-}
-
-static int req_run(struct hci_request *req, hci_req_complete_t complete,
- hci_req_complete_skb_t complete_skb)
-{
- struct hci_dev *hdev = req->hdev;
- struct sk_buff *skb;
- unsigned long flags;
-
- bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
-
- /* If an error occurred during request building, remove all HCI
- * commands queued on the HCI request queue.
- */
- if (req->err) {
- skb_queue_purge(&req->cmd_q);
- return req->err;
- }
-
- /* Do not allow empty requests */
- if (skb_queue_empty(&req->cmd_q))
- return -ENODATA;
-
- skb = skb_peek_tail(&req->cmd_q);
- if (complete) {
- bt_cb(skb)->hci.req_complete = complete;
- } else if (complete_skb) {
- bt_cb(skb)->hci.req_complete_skb = complete_skb;
- bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
- }
-
- spin_lock_irqsave(&hdev->cmd_q.lock, flags);
- skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
- spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
-
- queue_work(hdev->workqueue, &hdev->cmd_work);
-
- return 0;
-}
-
-int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
-{
- return req_run(req, complete, NULL);
-}
-
-int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
-{
- return req_run(req, NULL, complete);
-}
-
-void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
- struct sk_buff *skb)
-{
- bt_dev_dbg(hdev, "result 0x%2.2x", result);
-
- if (hdev->req_status == HCI_REQ_PEND) {
- hdev->req_result = result;
- hdev->req_status = HCI_REQ_DONE;
- if (skb)
- hdev->req_skb = skb_get(skb);
- wake_up_interruptible(&hdev->req_wait_q);
- }
-}
-
-/* Execute request and wait for completion. */
-int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
- unsigned long opt),
- unsigned long opt, u32 timeout, u8 *hci_status)
-{
- struct hci_request req;
- int err = 0;
-
- bt_dev_dbg(hdev, "start");
-
- hci_req_init(&req, hdev);
-
- hdev->req_status = HCI_REQ_PEND;
-
- err = func(&req, opt);
- if (err) {
- if (hci_status)
- *hci_status = HCI_ERROR_UNSPECIFIED;
- return err;
- }
-
- err = hci_req_run_skb(&req, hci_req_sync_complete);
- if (err < 0) {
- hdev->req_status = 0;
-
- /* ENODATA means the HCI request command queue is empty.
- * This can happen when a request with conditionals doesn't
- * trigger any commands to be sent. This is normal behavior
- * and should not trigger an error return.
- */
- if (err == -ENODATA) {
- if (hci_status)
- *hci_status = 0;
- return 0;
- }
-
- if (hci_status)
- *hci_status = HCI_ERROR_UNSPECIFIED;
-
- return err;
- }
-
- err = wait_event_interruptible_timeout(hdev->req_wait_q,
- hdev->req_status != HCI_REQ_PEND, timeout);
-
- if (err == -ERESTARTSYS)
- return -EINTR;
-
- switch (hdev->req_status) {
- case HCI_REQ_DONE:
- err = -bt_to_errno(hdev->req_result);
- if (hci_status)
- *hci_status = hdev->req_result;
- break;
-
- case HCI_REQ_CANCELED:
- err = -hdev->req_result;
- if (hci_status)
- *hci_status = HCI_ERROR_UNSPECIFIED;
- break;
-
- default:
- err = -ETIMEDOUT;
- if (hci_status)
- *hci_status = HCI_ERROR_UNSPECIFIED;
- break;
- }
-
- kfree_skb(hdev->req_skb);
- hdev->req_skb = NULL;
- hdev->req_status = hdev->req_result = 0;
-
- bt_dev_dbg(hdev, "end: err %d", err);
-
- return err;
-}
-
-int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
- unsigned long opt),
- unsigned long opt, u32 timeout, u8 *hci_status)
-{
- int ret;
-
- /* Serialize all requests */
- hci_req_sync_lock(hdev);
- /* check the state after obtaing the lock to protect the HCI_UP
- * against any races from hci_dev_do_close when the controller
- * gets removed.
- */
- if (test_bit(HCI_UP, &hdev->flags))
- ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
- else
- ret = -ENETDOWN;
- hci_req_sync_unlock(hdev);
-
- return ret;
-}
-
-struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
- const void *param)
-{
- int len = HCI_COMMAND_HDR_SIZE + plen;
- struct hci_command_hdr *hdr;
- struct sk_buff *skb;
-
- skb = bt_skb_alloc(len, GFP_ATOMIC);
- if (!skb)
- return NULL;
-
- hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
- hdr->opcode = cpu_to_le16(opcode);
- hdr->plen = plen;
-
- if (plen)
- skb_put_data(skb, param, plen);
-
- bt_dev_dbg(hdev, "skb len %d", skb->len);
-
- hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
- hci_skb_opcode(skb) = opcode;
-
- return skb;
-}
-
-/* Queue a command to an asynchronous HCI request */
-void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
- const void *param, u8 event)
-{
- struct hci_dev *hdev = req->hdev;
- struct sk_buff *skb;
-
- bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
-
- /* If an error occurred during request building, there is no point in
- * queueing the HCI command. We can simply return.
- */
- if (req->err)
- return;
-
- skb = hci_prepare_cmd(hdev, opcode, plen, param);
- if (!skb) {
- bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
- opcode);
- req->err = -ENOMEM;
- return;
- }
-
- if (skb_queue_empty(&req->cmd_q))
- bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
-
- hci_skb_event(skb) = event;
-
- skb_queue_tail(&req->cmd_q, skb);
-}
-
-void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
- const void *param)
-{
- bt_dev_dbg(req->hdev, "HCI_REQ-0x%4.4x", opcode);
- hci_req_add_ev(req, opcode, plen, param, 0);
-}
-
-static void start_interleave_scan(struct hci_dev *hdev)
-{
- hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
- queue_delayed_work(hdev->req_workqueue,
- &hdev->interleave_scan, 0);
-}
-
-static bool is_interleave_scanning(struct hci_dev *hdev)
-{
- return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
-}
-
-static void cancel_interleave_scan(struct hci_dev *hdev)
-{
- bt_dev_dbg(hdev, "cancelling interleave scan");
-
- cancel_delayed_work_sync(&hdev->interleave_scan);
-
- hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
-}
-
-/* Return true if interleave_scan wasn't started until exiting this function,
- * otherwise, return false
- */
-static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
-{
- /* Do interleaved scan only if all of the following are true:
- * - There is at least one ADV monitor
- * - At least one pending LE connection or one device to be scanned for
- * - Monitor offloading is not supported
- * If so, we should alternate between allowlist scan and one without
- * any filters to save power.
- */
- bool use_interleaving = hci_is_adv_monitoring(hdev) &&
- !(list_empty(&hdev->pend_le_conns) &&
- list_empty(&hdev->pend_le_reports)) &&
- hci_get_adv_monitor_offload_ext(hdev) ==
- HCI_ADV_MONITOR_EXT_NONE;
- bool is_interleaving = is_interleave_scanning(hdev);
-
- if (use_interleaving && !is_interleaving) {
- start_interleave_scan(hdev);
- bt_dev_dbg(hdev, "starting interleave scan");
- return true;
- }
-
- if (!use_interleaving && is_interleaving)
- cancel_interleave_scan(hdev);
-
- return false;
-}
-
-void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
-{
- struct hci_dev *hdev = req->hdev;
-
- if (hdev->scanning_paused) {
- bt_dev_dbg(hdev, "Scanning is paused for suspend");
- return;
- }
-
- if (use_ext_scan(hdev)) {
- struct hci_cp_le_set_ext_scan_enable cp;
-
- memset(&cp, 0, sizeof(cp));
- cp.enable = LE_SCAN_DISABLE;
- hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
- &cp);
- } else {
- struct hci_cp_le_set_scan_enable cp;
-
- memset(&cp, 0, sizeof(cp));
- cp.enable = LE_SCAN_DISABLE;
- hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
- }
-
- /* Disable address resolution */
- if (hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
- __u8 enable = 0x00;
-
- hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
- }
-}
-
-static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
- u8 bdaddr_type)
-{
- struct hci_cp_le_del_from_accept_list cp;
-
- cp.bdaddr_type = bdaddr_type;
- bacpy(&cp.bdaddr, bdaddr);
-
- bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
- cp.bdaddr_type);
- hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
-
- if (use_ll_privacy(req->hdev)) {
- struct smp_irk *irk;
-
- irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
- if (irk) {
- struct hci_cp_le_del_from_resolv_list cp;
-
- cp.bdaddr_type = bdaddr_type;
- bacpy(&cp.bdaddr, bdaddr);
-
- hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
- sizeof(cp), &cp);
- }
- }
-}
-
-/* Adds connection to accept list if needed. On error, returns -1. */
-static int add_to_accept_list(struct hci_request *req,
- struct hci_conn_params *params, u8 *num_entries,
- bool allow_rpa)
-{
- struct hci_cp_le_add_to_accept_list cp;
- struct hci_dev *hdev = req->hdev;
-
- /* Already in accept list */
- if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
- params->addr_type))
- return 0;
-
- /* Select filter policy to accept all advertising */
- if (*num_entries >= hdev->le_accept_list_size)
- return -1;
-
- /* Accept list can not be used with RPAs */
- if (!allow_rpa &&
- !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
- hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
- return -1;
- }
-
- /* During suspend, only wakeable devices can be in accept list */
- if (hdev->suspended &&
- !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP))
- return 0;
-
- *num_entries += 1;
- cp.bdaddr_type = params->addr_type;
- bacpy(&cp.bdaddr, &params->addr);
-
- bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
- cp.bdaddr_type);
- hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
-
- if (use_ll_privacy(hdev)) {
- struct smp_irk *irk;
-
- irk = hci_find_irk_by_addr(hdev, &params->addr,
- params->addr_type);
- if (irk) {
- struct hci_cp_le_add_to_resolv_list cp;
-
- cp.bdaddr_type = params->addr_type;
- bacpy(&cp.bdaddr, &params->addr);
- memcpy(cp.peer_irk, irk->val, 16);
-
- if (hci_dev_test_flag(hdev, HCI_PRIVACY))
- memcpy(cp.local_irk, hdev->irk, 16);
- else
- memset(cp.local_irk, 0, 16);
-
- hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
- sizeof(cp), &cp);
- }
- }
-
- return 0;
-}
-
-static u8 update_accept_list(struct hci_request *req)
-{
- struct hci_dev *hdev = req->hdev;
- struct hci_conn_params *params;
- struct bdaddr_list *b;
- u8 num_entries = 0;
- bool pend_conn, pend_report;
- /* We allow usage of accept list even with RPAs in suspend. In the worst
- * case, we won't be able to wake from devices that use the privacy1.2
- * features. Additionally, once we support privacy1.2 and IRK
- * offloading, we can update this to also check for those conditions.
- */
- bool allow_rpa = hdev->suspended;
-
- if (use_ll_privacy(hdev))
- allow_rpa = true;
-
- /* Go through the current accept list programmed into the
- * controller one by one and check if that address is still
- * in the list of pending connections or list of devices to
- * report. If not present in either list, then queue the
- * command to remove it from the controller.
- */
- list_for_each_entry(b, &hdev->le_accept_list, list) {
- pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
- &b->bdaddr,
- b->bdaddr_type);
- pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
- &b->bdaddr,
- b->bdaddr_type);
-
- /* If the device is not likely to connect or report,
- * remove it from the accept list.
- */
- if (!pend_conn && !pend_report) {
- del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
- continue;
- }
-
- /* Accept list can not be used with RPAs */
- if (!allow_rpa &&
- !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
- hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
- return 0x00;
- }
-
- num_entries++;
- }
-
- /* Since all no longer valid accept list entries have been
- * removed, walk through the list of pending connections
- * and ensure that any new device gets programmed into
- * the controller.
- *
- * If the list of the devices is larger than the list of
- * available accept list entries in the controller, then
- * just abort and return filer policy value to not use the
- * accept list.
- */
- list_for_each_entry(params, &hdev->pend_le_conns, action) {
- if (add_to_accept_list(req, params, &num_entries, allow_rpa))
- return 0x00;
- }
-
- /* After adding all new pending connections, walk through
- * the list of pending reports and also add these to the
- * accept list if there is still space. Abort if space runs out.
- */
- list_for_each_entry(params, &hdev->pend_le_reports, action) {
- if (add_to_accept_list(req, params, &num_entries, allow_rpa))
- return 0x00;
- }
-
- /* Use the allowlist unless the following conditions are all true:
- * - We are not currently suspending
- * - There are 1 or more ADV monitors registered and it's not offloaded
- * - Interleaved scanning is not currently using the allowlist
- */
- if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
- hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
- hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
- return 0x00;
-
- /* Select filter policy to use accept list */
- return 0x01;
-}
-
-static bool scan_use_rpa(struct hci_dev *hdev)
-{
- return hci_dev_test_flag(hdev, HCI_PRIVACY);
-}
-
-static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
- u16 window, u8 own_addr_type, u8 filter_policy,
- bool filter_dup, bool addr_resolv)
-{
- struct hci_dev *hdev = req->hdev;
-
- if (hdev->scanning_paused) {
- bt_dev_dbg(hdev, "Scanning is paused for suspend");
- return;
- }
-
- if (use_ll_privacy(hdev) && addr_resolv) {
- u8 enable = 0x01;
-
- hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
- }
-
- /* Use ext scanning if set ext scan param and ext scan enable is
- * supported
- */
- if (use_ext_scan(hdev)) {
- struct hci_cp_le_set_ext_scan_params *ext_param_cp;
- struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
- struct hci_cp_le_scan_phy_params *phy_params;
- u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
- u32 plen;
-
- ext_param_cp = (void *)data;
- phy_params = (void *)ext_param_cp->data;
-
- memset(ext_param_cp, 0, sizeof(*ext_param_cp));
- ext_param_cp->own_addr_type = own_addr_type;
- ext_param_cp->filter_policy = filter_policy;
-
- plen = sizeof(*ext_param_cp);
-
- if (scan_1m(hdev) || scan_2m(hdev)) {
- ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
-
- memset(phy_params, 0, sizeof(*phy_params));
- phy_params->type = type;
- phy_params->interval = cpu_to_le16(interval);
- phy_params->window = cpu_to_le16(window);
-
- plen += sizeof(*phy_params);
- phy_params++;
- }
-
- if (scan_coded(hdev)) {
- ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
-
- memset(phy_params, 0, sizeof(*phy_params));
- phy_params->type = type;
- phy_params->interval = cpu_to_le16(interval);
- phy_params->window = cpu_to_le16(window);
-
- plen += sizeof(*phy_params);
- phy_params++;
- }
-
- hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
- plen, ext_param_cp);
-
- memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
- ext_enable_cp.enable = LE_SCAN_ENABLE;
- ext_enable_cp.filter_dup = filter_dup;
-
- hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
- sizeof(ext_enable_cp), &ext_enable_cp);
- } else {
- struct hci_cp_le_set_scan_param param_cp;
- struct hci_cp_le_set_scan_enable enable_cp;
-
- memset(&param_cp, 0, sizeof(param_cp));
- param_cp.type = type;
- param_cp.interval = cpu_to_le16(interval);
- param_cp.window = cpu_to_le16(window);
- param_cp.own_address_type = own_addr_type;
- param_cp.filter_policy = filter_policy;
- hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
- &param_cp);
-
- memset(&enable_cp, 0, sizeof(enable_cp));
- enable_cp.enable = LE_SCAN_ENABLE;
- enable_cp.filter_dup = filter_dup;
- hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
- &enable_cp);
- }
-}
-
-static void set_random_addr(struct hci_request *req, bdaddr_t *rpa);
-static int hci_update_random_address(struct hci_request *req,
- bool require_privacy, bool use_rpa,
- u8 *own_addr_type)
-{
- struct hci_dev *hdev = req->hdev;
- int err;
-
- /* If privacy is enabled use a resolvable private address. If
- * current RPA has expired or there is something else than
- * the current RPA in use, then generate a new one.
- */
- if (use_rpa) {
- /* If Controller supports LL Privacy use own address type is
- * 0x03
- */
- if (use_ll_privacy(hdev))
- *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
- else
- *own_addr_type = ADDR_LE_DEV_RANDOM;
-
- if (rpa_valid(hdev))
- return 0;
-
- err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
- if (err < 0) {
- bt_dev_err(hdev, "failed to generate new RPA");
- return err;
- }
-
- set_random_addr(req, &hdev->rpa);
-
- return 0;
- }
-
- /* In case of required privacy without resolvable private address,
- * use an non-resolvable private address. This is useful for active
- * scanning and non-connectable advertising.
- */
- if (require_privacy) {
- bdaddr_t nrpa;
-
- while (true) {
- /* The non-resolvable private address is generated
- * from random six bytes with the two most significant
- * bits cleared.
- */
- get_random_bytes(&nrpa, 6);
- nrpa.b[5] &= 0x3f;
-
- /* The non-resolvable private address shall not be
- * equal to the public address.
- */
- if (bacmp(&hdev->bdaddr, &nrpa))
- break;
- }
-
- *own_addr_type = ADDR_LE_DEV_RANDOM;
- set_random_addr(req, &nrpa);
- return 0;
- }
-
- /* If forcing static address is in use or there is no public
- * address use the static address as random address (but skip
- * the HCI command if the current random address is already the
- * static one.
- *
- * In case BR/EDR has been disabled on a dual-mode controller
- * and a static address has been configured, then use that
- * address instead of the public BR/EDR address.
- */
- if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
- !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
- (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
- bacmp(&hdev->static_addr, BDADDR_ANY))) {
- *own_addr_type = ADDR_LE_DEV_RANDOM;
- if (bacmp(&hdev->static_addr, &hdev->random_addr))
- hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
- &hdev->static_addr);
- return 0;
- }
-
- /* Neither privacy nor static address is being used so use a
- * public address.
- */
- *own_addr_type = ADDR_LE_DEV_PUBLIC;
-
- return 0;
-}
-
-/* Ensure to call hci_req_add_le_scan_disable() first to disable the
- * controller based address resolution to be able to reconfigure
- * resolving list.
- */
-void hci_req_add_le_passive_scan(struct hci_request *req)
-{
- struct hci_dev *hdev = req->hdev;
- u8 own_addr_type;
- u8 filter_policy;
- u16 window, interval;
- /* Default is to enable duplicates filter */
- u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
- /* Background scanning should run with address resolution */
- bool addr_resolv = true;
-
- if (hdev->scanning_paused) {
- bt_dev_dbg(hdev, "Scanning is paused for suspend");
- return;
- }
-
- /* Set require_privacy to false since no SCAN_REQ are send
- * during passive scanning. Not using an non-resolvable address
- * here is important so that peer devices using direct
- * advertising with our address will be correctly reported
- * by the controller.
- */
- if (hci_update_random_address(req, false, scan_use_rpa(hdev),
- &own_addr_type))
- return;
-
- if (hdev->enable_advmon_interleave_scan &&
- __hci_update_interleaved_scan(hdev))
- return;
-
- bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
- /* Adding or removing entries from the accept list must
- * happen before enabling scanning. The controller does
- * not allow accept list modification while scanning.
- */
- filter_policy = update_accept_list(req);
-
- /* When the controller is using random resolvable addresses and
- * with that having LE privacy enabled, then controllers with
- * Extended Scanner Filter Policies support can now enable support
- * for handling directed advertising.
- *
- * So instead of using filter polices 0x00 (no accept list)
- * and 0x01 (accept list enabled) use the new filter policies
- * 0x02 (no accept list) and 0x03 (accept list enabled).
- */
- if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
- (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
- filter_policy |= 0x02;
-
- if (hdev->suspended) {
- window = hdev->le_scan_window_suspend;
- interval = hdev->le_scan_int_suspend;
- } else if (hci_is_le_conn_scanning(hdev)) {
- window = hdev->le_scan_window_connect;
- interval = hdev->le_scan_int_connect;
- } else if (hci_is_adv_monitoring(hdev)) {
- window = hdev->le_scan_window_adv_monitor;
- interval = hdev->le_scan_int_adv_monitor;
-
- /* Disable duplicates filter when scanning for advertisement
- * monitor for the following reasons.
- *
- * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
- * controllers ignore RSSI_Sampling_Period when the duplicates
- * filter is enabled.
- *
- * For SW pattern filtering, when we're not doing interleaved
- * scanning, it is necessary to disable duplicates filter,
- * otherwise hosts can only receive one advertisement and it's
- * impossible to know if a peer is still in range.
- */
- filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
- } else {
- window = hdev->le_scan_window;
- interval = hdev->le_scan_interval;
- }
-
- bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
- filter_policy);
- hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
- own_addr_type, filter_policy, filter_dup,
- addr_resolv);
-}
-
-static int hci_req_add_le_interleaved_scan(struct hci_request *req,
- unsigned long opt)
-{
- struct hci_dev *hdev = req->hdev;
- int ret = 0;
-
- hci_dev_lock(hdev);
-
- if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
- hci_req_add_le_scan_disable(req, false);
- hci_req_add_le_passive_scan(req);
-
- switch (hdev->interleave_scan_state) {
- case INTERLEAVE_SCAN_ALLOWLIST:
- bt_dev_dbg(hdev, "next state: allowlist");
- hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
- break;
- case INTERLEAVE_SCAN_NO_FILTER:
- bt_dev_dbg(hdev, "next state: no filter");
- hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
- break;
- case INTERLEAVE_SCAN_NONE:
- BT_ERR("unexpected error");
- ret = -1;
- }
-
- hci_dev_unlock(hdev);
-
- return ret;
-}
-
-static void interleave_scan_work(struct work_struct *work)
-{
- struct hci_dev *hdev = container_of(work, struct hci_dev,
- interleave_scan.work);
- u8 status;
- unsigned long timeout;
-
- if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
- timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
- } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
- timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
- } else {
- bt_dev_err(hdev, "unexpected error");
- return;
- }
-
- hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
- HCI_CMD_TIMEOUT, &status);
-
- /* Don't continue interleaving if it was canceled */
- if (is_interleave_scanning(hdev))
- queue_delayed_work(hdev->req_workqueue,
- &hdev->interleave_scan, timeout);
-}
-
-static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
-{
- struct hci_dev *hdev = req->hdev;
-
- /* If we're advertising or initiating an LE connection we can't
- * go ahead and change the random address at this time. This is
- * because the eventual initiator address used for the
- * subsequently created connection will be undefined (some
- * controllers use the new address and others the one we had
- * when the operation started).
- *
- * In this kind of scenario skip the update and let the random
- * address be updated at the next cycle.
- */
- if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
- hci_lookup_le_connect(hdev)) {
- bt_dev_dbg(hdev, "Deferring random address update");
- hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
- return;
- }
-
- hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
-}
-
-void hci_request_setup(struct hci_dev *hdev)
-{
- INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
-}
-
-void hci_request_cancel_all(struct hci_dev *hdev)
-{
- __hci_cmd_sync_cancel(hdev, ENODEV);
-
- cancel_interleave_scan(hdev);
-}
diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
deleted file mode 100644
index 0be75cf0efed..000000000000
--- a/net/bluetooth/hci_request.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- BlueZ - Bluetooth protocol stack for Linux
- Copyright (C) 2014 Intel Corporation
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License version 2 as
- published by the Free Software Foundation;
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
- IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
- CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
- WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
- ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
- COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
- SOFTWARE IS DISCLAIMED.
-*/
-
-#include <asm/unaligned.h>
-
-#define HCI_REQ_DONE 0
-#define HCI_REQ_PEND 1
-#define HCI_REQ_CANCELED 2
-
-#define hci_req_sync_lock(hdev) mutex_lock(&hdev->req_lock)
-#define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock)
-
-#define HCI_REQ_DONE 0
-#define HCI_REQ_PEND 1
-#define HCI_REQ_CANCELED 2
-
-struct hci_request {
- struct hci_dev *hdev;
- struct sk_buff_head cmd_q;
-
- /* If something goes wrong when building the HCI request, the error
- * value is stored in this field.
- */
- int err;
-};
-
-void hci_req_init(struct hci_request *req, struct hci_dev *hdev);
-void hci_req_purge(struct hci_request *req);
-bool hci_req_status_pend(struct hci_dev *hdev);
-int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
-int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete);
-void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
- struct sk_buff *skb);
-void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
- const void *param);
-void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
- const void *param, u8 event);
-void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
- hci_req_complete_t *req_complete,
- hci_req_complete_skb_t *req_complete_skb);
-
-int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
- unsigned long opt),
- unsigned long opt, u32 timeout, u8 *hci_status);
-int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
- unsigned long opt),
- unsigned long opt, u32 timeout, u8 *hci_status);
-
-struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
- const void *param);
-
-void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn);
-void hci_req_add_le_passive_scan(struct hci_request *req);
-
-void hci_request_setup(struct hci_dev *hdev);
-void hci_request_cancel_all(struct hci_dev *hdev);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 3e7cd330d731..022b86797acd 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -27,7 +27,7 @@
#include <linux/export.h>
#include <linux/utsname.h>
#include <linux/sched.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -101,7 +101,7 @@ static bool hci_sock_gen_cookie(struct sock *sk)
int id = hci_pi(sk)->cookie;
if (!id) {
- id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
+ id = ida_alloc_min(&sock_cookie_ida, 1, GFP_KERNEL);
if (id < 0)
id = 0xffffffff;
@@ -119,7 +119,7 @@ static void hci_sock_free_cookie(struct sock *sk)
if (id) {
hci_pi(sk)->cookie = 0xffffffff;
- ida_simple_remove(&sock_cookie_ida, id);
+ ida_free(&sock_cookie_ida, id);
}
}
@@ -485,7 +485,7 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
return NULL;
ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
- ni->type = hdev->dev_type;
+ ni->type = 0x00; /* Old hdev->dev_type */
ni->bus = hdev->bus;
bacpy(&ni->bdaddr, &hdev->bdaddr);
memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name,
@@ -1007,9 +1007,6 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
return -EOPNOTSUPP;
- if (hdev->dev_type != HCI_PRIMARY)
- return -EOPNOTSUPP;
-
switch (cmd) {
case HCISETRAW:
if (!capable(CAP_NET_ADMIN))
@@ -1929,7 +1926,7 @@ drop:
}
static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
- sockptr_t optval, unsigned int len)
+ sockptr_t optval, unsigned int optlen)
{
struct hci_ufilter uf = { .opcode = 0 };
struct sock *sk = sock->sk;
@@ -1946,10 +1943,9 @@ static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
switch (optname) {
case HCI_DATA_DIR:
- if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
- err = -EFAULT;
+ err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ if (err)
break;
- }
if (opt)
hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
@@ -1958,10 +1954,9 @@ static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
break;
case HCI_TIME_STAMP:
- if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
- err = -EFAULT;
+ err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ if (err)
break;
- }
if (opt)
hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
@@ -1979,11 +1974,9 @@ static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
uf.event_mask[1] = *((u32 *) f->event_mask + 1);
}
- len = min_t(unsigned int, len, sizeof(uf));
- if (copy_from_sockptr(&uf, optval, len)) {
- err = -EFAULT;
+ err = copy_safe_from_sockptr(&uf, sizeof(uf), optval, optlen);
+ if (err)
break;
- }
if (!capable(CAP_NET_RAW)) {
uf.type_mask &= hci_sec_filter.type_mask;
@@ -2012,7 +2005,7 @@ done:
}
static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
- sockptr_t optval, unsigned int len)
+ sockptr_t optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
int err = 0;
@@ -2022,7 +2015,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
if (level == SOL_HCI)
return hci_sock_setsockopt_old(sock, level, optname, optval,
- len);
+ optlen);
if (level != SOL_BLUETOOTH)
return -ENOPROTOOPT;
@@ -2042,10 +2035,9 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
goto done;
}
- if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
- err = -EFAULT;
+ err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ if (err)
break;
- }
hci_pi(sk)->mtu = opt;
break;
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index 5716345a26df..dd770ef5ec36 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -12,7 +12,6 @@
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/mgmt.h>
-#include "hci_request.h"
#include "hci_codec.h"
#include "hci_debugfs.h"
#include "smp.h"
@@ -32,6 +31,10 @@ static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
hdev->req_result = result;
hdev->req_status = HCI_REQ_DONE;
+ /* Free the request command so it is not used as response */
+ kfree_skb(hdev->req_skb);
+ hdev->req_skb = NULL;
+
if (skb) {
struct sock *sk = hci_skb_sk(skb);
@@ -39,15 +42,14 @@ static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
if (sk)
sock_put(sk);
- hdev->req_skb = skb_get(skb);
+ hdev->req_rsp = skb_get(skb);
}
wake_up_interruptible(&hdev->req_wait_q);
}
-static struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode,
- u32 plen, const void *param,
- struct sock *sk)
+struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, struct sock *sk)
{
int len = HCI_COMMAND_HDR_SIZE + plen;
struct hci_command_hdr *hdr;
@@ -110,7 +112,7 @@ static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen,
skb_queue_tail(&req->cmd_q, skb);
}
-static int hci_cmd_sync_run(struct hci_request *req)
+static int hci_req_sync_run(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
struct sk_buff *skb;
@@ -143,6 +145,13 @@ static int hci_cmd_sync_run(struct hci_request *req)
return 0;
}
+static void hci_request_init(struct hci_request *req, struct hci_dev *hdev)
+{
+ skb_queue_head_init(&req->cmd_q);
+ req->hdev = hdev;
+ req->err = 0;
+}
+
/* This function requires the caller holds hdev->req_lock. */
struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u8 event, u32 timeout,
@@ -154,13 +163,13 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
bt_dev_dbg(hdev, "Opcode 0x%4.4x", opcode);
- hci_req_init(&req, hdev);
+ hci_request_init(&req, hdev);
hci_cmd_sync_add(&req, opcode, plen, param, event, sk);
hdev->req_status = HCI_REQ_PEND;
- err = hci_cmd_sync_run(&req);
+ err = hci_req_sync_run(&req);
if (err < 0)
return ERR_PTR(err);
@@ -187,8 +196,8 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
hdev->req_status = 0;
hdev->req_result = 0;
- skb = hdev->req_skb;
- hdev->req_skb = NULL;
+ skb = hdev->req_rsp;
+ hdev->req_rsp = NULL;
bt_dev_dbg(hdev, "end: err %d", err);
@@ -197,6 +206,12 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
return ERR_PTR(err);
}
+ /* If command return a status event skb will be set to NULL as there are
+ * no parameters.
+ */
+ if (!skb)
+ return ERR_PTR(-ENODATA);
+
return skb;
}
EXPORT_SYMBOL(__hci_cmd_sync_sk);
@@ -246,6 +261,11 @@ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
u8 status;
skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);
+
+ /* If command return a status event, skb will be set to -ENODATA */
+ if (skb == ERR_PTR(-ENODATA))
+ return 0;
+
if (IS_ERR(skb)) {
if (!event)
bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld", opcode,
@@ -253,13 +273,6 @@ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
return PTR_ERR(skb);
}
- /* If command return a status event skb will be set to NULL as there are
- * no parameters, in case of failure IS_ERR(skb) would have be set to
- * the actual error would be found with PTR_ERR(skb).
- */
- if (!skb)
- return 0;
-
status = skb->data[0];
kfree_skb(skb);
@@ -276,6 +289,19 @@ int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
}
EXPORT_SYMBOL(__hci_cmd_sync_status);
+int hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u32 timeout)
+{
+ int err;
+
+ hci_req_sync_lock(hdev);
+ err = __hci_cmd_sync_status(hdev, opcode, plen, param, timeout);
+ hci_req_sync_unlock(hdev);
+
+ return err;
+}
+EXPORT_SYMBOL(hci_cmd_sync_status);
+
static void hci_cmd_sync_work(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work);
@@ -330,10 +356,9 @@ static int scan_disable_sync(struct hci_dev *hdev, void *data)
return hci_scan_disable_sync(hdev);
}
-static int hci_inquiry_sync(struct hci_dev *hdev, u8 length);
static int interleaved_inquiry_sync(struct hci_dev *hdev, void *data)
{
- return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN);
+ return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN, 0);
}
static void le_scan_disable(struct work_struct *work)
@@ -354,8 +379,6 @@ static void le_scan_disable(struct work_struct *work)
goto _return;
}
- hdev->discovery.scan_start = 0;
-
/* If we were running LE only scan, change discovery state. If
* we were running both LE and BR/EDR inquiry simultaneously,
* and BR/EDR inquiry is already finished, stop discovery,
@@ -553,6 +576,53 @@ unlock:
hci_dev_unlock(hdev);
}
+static bool is_interleave_scanning(struct hci_dev *hdev)
+{
+ return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
+}
+
+static int hci_passive_scan_sync(struct hci_dev *hdev);
+
+static void interleave_scan_work(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ interleave_scan.work);
+ unsigned long timeout;
+
+ if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
+ timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
+ } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
+ timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
+ } else {
+ bt_dev_err(hdev, "unexpected error");
+ return;
+ }
+
+ hci_passive_scan_sync(hdev);
+
+ hci_dev_lock(hdev);
+
+ switch (hdev->interleave_scan_state) {
+ case INTERLEAVE_SCAN_ALLOWLIST:
+ bt_dev_dbg(hdev, "next state: allowlist");
+ hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
+ break;
+ case INTERLEAVE_SCAN_NO_FILTER:
+ bt_dev_dbg(hdev, "next state: no filter");
+ hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
+ break;
+ case INTERLEAVE_SCAN_NONE:
+ bt_dev_err(hdev, "unexpected error");
+ }
+
+ hci_dev_unlock(hdev);
+
+ /* Don't continue interleaving if it was canceled */
+ if (is_interleave_scanning(hdev))
+ queue_delayed_work(hdev->req_workqueue,
+ &hdev->interleave_scan, timeout);
+}
+
void hci_cmd_sync_init(struct hci_dev *hdev)
{
INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work);
@@ -564,6 +634,18 @@ void hci_cmd_sync_init(struct hci_dev *hdev)
INIT_WORK(&hdev->reenable_adv_work, reenable_adv);
INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable);
INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
+ INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
+}
+
+static void _hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
+ struct hci_cmd_sync_work_entry *entry,
+ int err)
+{
+ if (entry->destroy)
+ entry->destroy(hdev, entry->data, err);
+
+ list_del(&entry->list);
+ kfree(entry);
}
void hci_cmd_sync_clear(struct hci_dev *hdev)
@@ -574,17 +656,12 @@ void hci_cmd_sync_clear(struct hci_dev *hdev)
cancel_work_sync(&hdev->reenable_adv_work);
mutex_lock(&hdev->cmd_sync_work_lock);
- list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
- if (entry->destroy)
- entry->destroy(hdev, entry->data, -ECANCELED);
-
- list_del(&entry->list);
- kfree(entry);
- }
+ list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list)
+ _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
mutex_unlock(&hdev->cmd_sync_work_lock);
}
-void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
+void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
{
bt_dev_dbg(hdev, "err 0x%2.2x", err);
@@ -592,26 +669,31 @@ void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
hdev->req_result = err;
hdev->req_status = HCI_REQ_CANCELED;
- cancel_delayed_work_sync(&hdev->cmd_timer);
- cancel_delayed_work_sync(&hdev->ncmd_timer);
- atomic_set(&hdev->cmd_cnt, 1);
-
- wake_up_interruptible(&hdev->req_wait_q);
+ queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work);
}
}
+EXPORT_SYMBOL(hci_cmd_sync_cancel);
-void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
+/* Cancel ongoing command request synchronously:
+ *
+ * - Set result and mark status to HCI_REQ_CANCELED
+ * - Wakeup command sync thread
+ */
+void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err)
{
bt_dev_dbg(hdev, "err 0x%2.2x", err);
if (hdev->req_status == HCI_REQ_PEND) {
- hdev->req_result = err;
+ /* req_result is __u32 so error must be positive to be properly
+ * propagated.
+ */
+ hdev->req_result = err < 0 ? -err : err;
hdev->req_status = HCI_REQ_CANCELED;
- queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work);
+ wake_up_interruptible(&hdev->req_wait_q);
}
}
-EXPORT_SYMBOL(hci_cmd_sync_cancel);
+EXPORT_SYMBOL(hci_cmd_sync_cancel_sync);
/* Submit HCI command to be run in as cmd_sync_work:
*
@@ -667,6 +749,153 @@ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
}
EXPORT_SYMBOL(hci_cmd_sync_queue);
+static struct hci_cmd_sync_work_entry *
+_hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy)
+{
+ struct hci_cmd_sync_work_entry *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
+ if (func && entry->func != func)
+ continue;
+
+ if (data && entry->data != data)
+ continue;
+
+ if (destroy && entry->destroy != destroy)
+ continue;
+
+ return entry;
+ }
+
+ return NULL;
+}
+
+/* Queue HCI command entry once:
+ *
+ * - Lookup if an entry already exist and only if it doesn't creates a new entry
+ * and queue it.
+ */
+int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy)
+{
+ if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy))
+ return 0;
+
+ return hci_cmd_sync_queue(hdev, func, data, destroy);
+}
+EXPORT_SYMBOL(hci_cmd_sync_queue_once);
+
+/* Run HCI command:
+ *
+ * - hdev must be running
+ * - if on cmd_sync_work then run immediately otherwise queue
+ */
+int hci_cmd_sync_run(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy)
+{
+ /* Only queue command if hdev is running which means it had been opened
+ * and is either on init phase or is already up.
+ */
+ if (!test_bit(HCI_RUNNING, &hdev->flags))
+ return -ENETDOWN;
+
+ /* If on cmd_sync_work then run immediately otherwise queue */
+ if (current_work() == &hdev->cmd_sync_work)
+ return func(hdev, data);
+
+ return hci_cmd_sync_submit(hdev, func, data, destroy);
+}
+EXPORT_SYMBOL(hci_cmd_sync_run);
+
+/* Run HCI command entry once:
+ *
+ * - Lookup if an entry already exist and only if it doesn't creates a new entry
+ * and run it.
+ * - if on cmd_sync_work then run immediately otherwise queue
+ */
+int hci_cmd_sync_run_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy)
+{
+ if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy))
+ return 0;
+
+ return hci_cmd_sync_run(hdev, func, data, destroy);
+}
+EXPORT_SYMBOL(hci_cmd_sync_run_once);
+
+/* Lookup HCI command entry:
+ *
+ * - Return first entry that matches by function callback or data or
+ * destroy callback.
+ */
+struct hci_cmd_sync_work_entry *
+hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy)
+{
+ struct hci_cmd_sync_work_entry *entry;
+
+ mutex_lock(&hdev->cmd_sync_work_lock);
+ entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
+ mutex_unlock(&hdev->cmd_sync_work_lock);
+
+ return entry;
+}
+EXPORT_SYMBOL(hci_cmd_sync_lookup_entry);
+
+/* Cancel HCI command entry */
+void hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
+ struct hci_cmd_sync_work_entry *entry)
+{
+ mutex_lock(&hdev->cmd_sync_work_lock);
+ _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
+ mutex_unlock(&hdev->cmd_sync_work_lock);
+}
+EXPORT_SYMBOL(hci_cmd_sync_cancel_entry);
+
+/* Dequeue one HCI command entry:
+ *
+ * - Lookup and cancel first entry that matches.
+ */
+bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev,
+ hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy)
+{
+ struct hci_cmd_sync_work_entry *entry;
+
+ entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
+ if (!entry)
+ return false;
+
+ hci_cmd_sync_cancel_entry(hdev, entry);
+
+ return true;
+}
+EXPORT_SYMBOL(hci_cmd_sync_dequeue_once);
+
+/* Dequeue HCI command entry:
+ *
+ * - Lookup and cancel any entry that matches by function callback or data or
+ * destroy callback.
+ */
+bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy)
+{
+ struct hci_cmd_sync_work_entry *entry;
+ bool ret = false;
+
+ mutex_lock(&hdev->cmd_sync_work_lock);
+ while ((entry = _hci_cmd_sync_lookup_entry(hdev, func, data,
+ destroy))) {
+ _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
+ ret = true;
+ }
+ mutex_unlock(&hdev->cmd_sync_work_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(hci_cmd_sync_dequeue);
+
int hci_update_eir_sync(struct hci_dev *hdev)
{
struct hci_cp_write_eir cp;
@@ -802,9 +1031,9 @@ static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa)
{
- /* If we're advertising or initiating an LE connection we can't
- * go ahead and change the random address at this time. This is
- * because the eventual initiator address used for the
+ /* If a random_addr has been set we're advertising or initiating an LE
+ * connection we can't go ahead and change the random address at this
+ * time. This is because the eventual initiator address used for the
* subsequently created connection will be undefined (some
* controllers use the new address and others the one we had
* when the operation started).
@@ -812,8 +1041,9 @@ static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa)
* In this kind of scenario skip the update and let the random
* address be updated at the next cycle.
*/
- if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
- hci_lookup_le_connect(hdev)) {
+ if (bacmp(&hdev->random_addr, BDADDR_ANY) &&
+ (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
+ hci_lookup_le_connect(hdev))) {
bt_dev_dbg(hdev, "Deferring random address update");
hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
return 0;
@@ -836,7 +1066,7 @@ int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy,
/* If Controller supports LL Privacy use own address type is
* 0x03
*/
- if (use_ll_privacy(hdev))
+ if (ll_privacy_capable(hdev))
*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
else
*own_addr_type = ADDR_LE_DEV_RANDOM;
@@ -919,11 +1149,10 @@ static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
struct hci_cp_ext_adv_set *set;
u8 data[sizeof(*cp) + sizeof(*set) * 1];
u8 size;
+ struct adv_info *adv = NULL;
/* If request specifies an instance that doesn't exist, fail */
if (instance > 0) {
- struct adv_info *adv;
-
adv = hci_find_adv_instance(hdev, instance);
if (!adv)
return -EINVAL;
@@ -942,7 +1171,7 @@ static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
cp->num_of_sets = !!instance;
cp->enable = 0x00;
- set->handle = instance;
+ set->handle = adv ? adv->handle : instance;
size = sizeof(*cp) + sizeof(*set) * cp->num_of_sets;
@@ -1071,7 +1300,7 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
cp.own_addr_type = own_addr_type;
cp.channel_map = hdev->le_adv_channel_map;
- cp.handle = instance;
+ cp.handle = adv ? adv->handle : instance;
if (flags & MGMT_ADV_FLAG_SEC_2M) {
cp.primary_phy = HCI_ADV_PHY_1M;
@@ -1111,31 +1340,27 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
{
- struct {
- struct hci_cp_le_set_ext_scan_rsp_data cp;
- u8 data[HCI_MAX_EXT_AD_LENGTH];
- } pdu;
+ DEFINE_FLEX(struct hci_cp_le_set_ext_scan_rsp_data, pdu, data, length,
+ HCI_MAX_EXT_AD_LENGTH);
u8 len;
struct adv_info *adv = NULL;
int err;
- memset(&pdu, 0, sizeof(pdu));
-
if (instance) {
adv = hci_find_adv_instance(hdev, instance);
if (!adv || !adv->scan_rsp_changed)
return 0;
}
- len = eir_create_scan_rsp(hdev, instance, pdu.data);
+ len = eir_create_scan_rsp(hdev, instance, pdu->data);
- pdu.cp.handle = instance;
- pdu.cp.length = len;
- pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
- pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
+ pdu->handle = adv ? adv->handle : instance;
+ pdu->length = len;
+ pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
+ pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG;
err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
- sizeof(pdu.cp) + len, &pdu.cp,
+ struct_size(pdu, data, len), pdu,
HCI_CMD_TIMEOUT);
if (err)
return err;
@@ -1143,7 +1368,7 @@ static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
if (adv) {
adv->scan_rsp_changed = false;
} else {
- memcpy(hdev->scan_rsp_data, pdu.data, len);
+ memcpy(hdev->scan_rsp_data, pdu->data, len);
hdev->scan_rsp_data_len = len;
}
@@ -1211,7 +1436,7 @@ int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance)
memset(set, 0, sizeof(*set));
- set->handle = instance;
+ set->handle = adv ? adv->handle : instance;
/* Set duration per instance since controller is responsible for
* scheduling it.
@@ -1287,29 +1512,25 @@ static int hci_set_per_adv_params_sync(struct hci_dev *hdev, u8 instance,
static int hci_set_per_adv_data_sync(struct hci_dev *hdev, u8 instance)
{
- struct {
- struct hci_cp_le_set_per_adv_data cp;
- u8 data[HCI_MAX_PER_AD_LENGTH];
- } pdu;
+ DEFINE_FLEX(struct hci_cp_le_set_per_adv_data, pdu, data, length,
+ HCI_MAX_PER_AD_LENGTH);
u8 len;
-
- memset(&pdu, 0, sizeof(pdu));
+ struct adv_info *adv = NULL;
if (instance) {
- struct adv_info *adv = hci_find_adv_instance(hdev, instance);
-
+ adv = hci_find_adv_instance(hdev, instance);
if (!adv || !adv->periodic)
return 0;
}
- len = eir_create_per_adv_data(hdev, instance, pdu.data);
+ len = eir_create_per_adv_data(hdev, instance, pdu->data);
- pdu.cp.length = len;
- pdu.cp.handle = instance;
- pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
+ pdu->length = len;
+ pdu->handle = adv ? adv->handle : instance;
+ pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_DATA,
- sizeof(pdu.cp) + len, &pdu,
+ struct_size(pdu, data, len), pdu,
HCI_CMD_TIMEOUT);
}
@@ -1565,30 +1786,6 @@ int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance,
HCI_CMD_TIMEOUT, sk);
}
-static int remove_ext_adv_sync(struct hci_dev *hdev, void *data)
-{
- struct adv_info *adv = data;
- u8 instance = 0;
-
- if (adv)
- instance = adv->instance;
-
- return hci_remove_ext_adv_instance_sync(hdev, instance, NULL);
-}
-
-int hci_remove_ext_adv_instance(struct hci_dev *hdev, u8 instance)
-{
- struct adv_info *adv = NULL;
-
- if (instance) {
- adv = hci_find_adv_instance(hdev, instance);
- if (!adv)
- return -EINVAL;
- }
-
- return hci_cmd_sync_queue(hdev, remove_ext_adv_sync, adv, NULL);
-}
-
int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason)
{
struct hci_cp_le_term_big cp;
@@ -1603,31 +1800,27 @@ int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason)
static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance)
{
- struct {
- struct hci_cp_le_set_ext_adv_data cp;
- u8 data[HCI_MAX_EXT_AD_LENGTH];
- } pdu;
+ DEFINE_FLEX(struct hci_cp_le_set_ext_adv_data, pdu, data, length,
+ HCI_MAX_EXT_AD_LENGTH);
u8 len;
struct adv_info *adv = NULL;
int err;
- memset(&pdu, 0, sizeof(pdu));
-
if (instance) {
adv = hci_find_adv_instance(hdev, instance);
if (!adv || !adv->adv_data_changed)
return 0;
}
- len = eir_create_adv_data(hdev, instance, pdu.data);
+ len = eir_create_adv_data(hdev, instance, pdu->data);
- pdu.cp.length = len;
- pdu.cp.handle = instance;
- pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
- pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
+ pdu->length = len;
+ pdu->handle = adv ? adv->handle : instance;
+ pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
+ pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG;
err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA,
- sizeof(pdu.cp) + len, &pdu.cp,
+ struct_size(pdu, data, len), pdu,
HCI_CMD_TIMEOUT);
if (err)
return err;
@@ -1636,7 +1829,7 @@ static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance)
if (adv) {
adv->adv_data_changed = false;
} else {
- memcpy(hdev->adv_data, pdu.data, len);
+ memcpy(hdev->adv_data, pdu->data, len);
hdev->adv_data_len = len;
}
@@ -1945,7 +2138,7 @@ static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val,
static int hci_le_set_addr_resolution_enable_sync(struct hci_dev *hdev, u8 val)
{
- if (!use_ll_privacy(hdev))
+ if (!ll_privacy_capable(hdev))
return 0;
/* If controller is not/already resolving we are done. */
@@ -1990,11 +2183,6 @@ static void hci_start_interleave_scan(struct hci_dev *hdev)
&hdev->interleave_scan, 0);
}
-static bool is_interleave_scanning(struct hci_dev *hdev)
-{
- return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
-}
-
static void cancel_interleave_scan(struct hci_dev *hdev)
{
bt_dev_dbg(hdev, "cancelling interleave scan");
@@ -2042,7 +2230,7 @@ static int hci_le_del_resolve_list_sync(struct hci_dev *hdev,
struct hci_cp_le_del_from_resolv_list cp;
struct bdaddr_list_with_irk *entry;
- if (!use_ll_privacy(hdev))
+ if (!ll_privacy_capable(hdev))
return 0;
/* Check if the IRK has been programmed */
@@ -2107,7 +2295,7 @@ static int hci_le_add_resolve_list_sync(struct hci_dev *hdev,
struct bdaddr_list_with_irk *entry;
struct hci_conn_params *p;
- if (!use_ll_privacy(hdev))
+ if (!ll_privacy_capable(hdev))
return 0;
/* Attempt to program local identity address, type and irk if params is
@@ -2120,7 +2308,8 @@ static int hci_le_add_resolve_list_sync(struct hci_dev *hdev,
hci_copy_identity_address(hdev, &cp.bdaddr, &cp.bdaddr_type);
memcpy(cp.peer_irk, hdev->irk, 16);
goto done;
- }
+ } else if (!(params->flags & HCI_CONN_FLAG_ADDRESS_RESOLUTION))
+ return 0;
irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type);
if (!irk)
@@ -2167,6 +2356,10 @@ static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev,
struct hci_cp_le_set_privacy_mode cp;
struct smp_irk *irk;
+ if (!ll_privacy_capable(hdev) ||
+ !(params->flags & HCI_CONN_FLAG_ADDRESS_RESOLUTION))
+ return 0;
+
/* If device privacy mode has already been set there is nothing to do */
if (params->privacy_mode == HCI_DEVICE_PRIVACY)
return 0;
@@ -2216,11 +2409,6 @@ static int hci_le_add_accept_list_sync(struct hci_dev *hdev,
if (*num_entries >= hdev->le_accept_list_size)
return -ENOSPC;
- /* Accept list can not be used with RPAs */
- if (!use_ll_privacy(hdev) &&
- hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
- return -EINVAL;
-
/* Attempt to program the device in the resolving list first to avoid
* having to rollback in case it fails since the resolving list is
* dynamic it can probably be smaller than the accept list.
@@ -2355,7 +2543,7 @@ static int hci_pause_addr_resolution(struct hci_dev *hdev)
{
int err;
- if (!use_ll_privacy(hdev))
+ if (!ll_privacy_capable(hdev))
return 0;
if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
@@ -2445,16 +2633,26 @@ static struct conn_params *conn_params_copy(struct list_head *list, size_t *n)
return p;
}
+/* Clear LE Accept List */
+static int hci_le_clear_accept_list_sync(struct hci_dev *hdev)
+{
+ if (!(hdev->commands[26] & 0x80))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL,
+ HCI_CMD_TIMEOUT);
+}
+
/* Device must not be scanning when updating the accept list.
*
* Update is done using the following sequence:
*
- * use_ll_privacy((Disable Advertising) -> Disable Resolving List) ->
+ * ll_privacy_capable((Disable Advertising) -> Disable Resolving List) ->
* Remove Devices From Accept List ->
- * (has IRK && use_ll_privacy(Remove Devices From Resolving List))->
+ * (has IRK && ll_privacy_capable(Remove Devices From Resolving List))->
* Add Devices to Accept List ->
- * (has IRK && use_ll_privacy(Remove Devices From Resolving List)) ->
- * use_ll_privacy(Enable Resolving List -> (Enable Advertising)) ->
+ * (has IRK && ll_privacy_capable(Remove Devices From Resolving List)) ->
+ * ll_privacy_capable(Enable Resolving List -> (Enable Advertising)) ->
* Enable Scanning
*
* In case of failure advertising shall be restored to its original state and
@@ -2475,7 +2673,7 @@ static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
/* Pause advertising if resolving list can be used as controllers
* cannot accept resolving list modifications while advertising.
*/
- if (use_ll_privacy(hdev)) {
+ if (ll_privacy_capable(hdev)) {
err = hci_pause_advertising_sync(hdev);
if (err) {
bt_dev_err(hdev, "pause advertising failed: %d", err);
@@ -2493,6 +2691,31 @@ static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
goto done;
}
+ /* Force address filtering if PA Sync is in progress */
+ if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
+ struct hci_cp_le_pa_create_sync *sent;
+
+ sent = hci_sent_cmd_data(hdev, HCI_OP_LE_PA_CREATE_SYNC);
+ if (sent) {
+ struct conn_params pa;
+
+ memset(&pa, 0, sizeof(pa));
+
+ bacpy(&pa.addr, &sent->addr);
+ pa.addr_type = sent->addr_type;
+
+ /* Clear first since there could be addresses left
+ * behind.
+ */
+ hci_le_clear_accept_list_sync(hdev);
+
+ num_entries = 1;
+ err = hci_le_add_accept_list_sync(hdev, &pa,
+ &num_entries);
+ goto done;
+ }
+ }
+
/* Go through the current accept list programmed into the
* controller one by one and check if that address is connected or is
* still in the list of pending connections or list of devices to
@@ -2595,13 +2818,21 @@ done:
bt_dev_err(hdev, "Unable to enable LL privacy: %d", err);
/* Resume advertising if it was paused */
- if (use_ll_privacy(hdev))
+ if (ll_privacy_capable(hdev))
hci_resume_advertising_sync(hdev);
/* Select filter policy to use accept list */
return filter_policy;
}
+static void hci_le_scan_phy_params(struct hci_cp_le_scan_phy_params *cp,
+ u8 type, u16 interval, u16 window)
+{
+ cp->type = type;
+ cp->interval = cpu_to_le16(interval);
+ cp->window = cpu_to_le16(window);
+}
+
static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
u16 interval, u16 window,
u8 own_addr_type, u8 filter_policy)
@@ -2609,7 +2840,7 @@ static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
struct hci_cp_le_set_ext_scan_params *cp;
struct hci_cp_le_scan_phy_params *phy;
u8 data[sizeof(*cp) + sizeof(*phy) * 2];
- u8 num_phy = 0;
+ u8 num_phy = 0x00;
cp = (void *)data;
phy = (void *)cp->data;
@@ -2619,28 +2850,64 @@ static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
cp->own_addr_type = own_addr_type;
cp->filter_policy = filter_policy;
+ /* Check if PA Sync is in progress then select the PHY based on the
+ * hci_conn.iso_qos.
+ */
+ if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
+ struct hci_cp_le_add_to_accept_list *sent;
+
+ sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
+ if (sent) {
+ struct hci_conn *conn;
+
+ conn = hci_conn_hash_lookup_ba(hdev, ISO_LINK,
+ &sent->bdaddr);
+ if (conn) {
+ struct bt_iso_qos *qos = &conn->iso_qos;
+
+ if (qos->bcast.in.phy & BT_ISO_PHY_1M ||
+ qos->bcast.in.phy & BT_ISO_PHY_2M) {
+ cp->scanning_phys |= LE_SCAN_PHY_1M;
+ hci_le_scan_phy_params(phy, type,
+ interval,
+ window);
+ num_phy++;
+ phy++;
+ }
+
+ if (qos->bcast.in.phy & BT_ISO_PHY_CODED) {
+ cp->scanning_phys |= LE_SCAN_PHY_CODED;
+ hci_le_scan_phy_params(phy, type,
+ interval * 3,
+ window * 3);
+ num_phy++;
+ phy++;
+ }
+
+ if (num_phy)
+ goto done;
+ }
+ }
+ }
+
if (scan_1m(hdev) || scan_2m(hdev)) {
cp->scanning_phys |= LE_SCAN_PHY_1M;
-
- phy->type = type;
- phy->interval = cpu_to_le16(interval);
- phy->window = cpu_to_le16(window);
-
+ hci_le_scan_phy_params(phy, type, interval, window);
num_phy++;
phy++;
}
if (scan_coded(hdev)) {
cp->scanning_phys |= LE_SCAN_PHY_CODED;
-
- phy->type = type;
- phy->interval = cpu_to_le16(interval);
- phy->window = cpu_to_le16(window);
-
+ hci_le_scan_phy_params(phy, type, interval * 3, window * 3);
num_phy++;
phy++;
}
+done:
+ if (!num_phy)
+ return -EINVAL;
+
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
sizeof(*cp) + sizeof(*phy) * num_phy,
data, HCI_CMD_TIMEOUT);
@@ -2728,6 +2995,27 @@ static int hci_passive_scan_sync(struct hci_dev *hdev)
*/
filter_policy = hci_update_accept_list_sync(hdev);
+ /* If suspended and filter_policy set to 0x00 (no acceptlist) then
+ * passive scanning cannot be started since that would require the host
+ * to be woken up to process the reports.
+ */
+ if (hdev->suspended && !filter_policy) {
+ /* Check if accept list is empty then there is no need to scan
+ * while suspended.
+ */
+ if (list_empty(&hdev->le_accept_list))
+ return 0;
+
+ /* If there are devices is the accept_list that means some
+ * devices could not be programmed which in non-suspended case
+ * means filter_policy needs to be set to 0x00 so the host needs
+ * to filter, but since this is treating suspended case we
+ * can ignore device needing host to filter to allow devices in
+ * the acceptlist to be able to wakeup the system.
+ */
+ filter_policy = 0x01;
+ }
+
/* When the controller is using random resolvable addresses and
* with that having LE privacy enabled, then controllers with
* Extended Scanner Filter Policies support can now enable support
@@ -2750,6 +3038,20 @@ static int hci_passive_scan_sync(struct hci_dev *hdev)
} else if (hci_is_adv_monitoring(hdev)) {
window = hdev->le_scan_window_adv_monitor;
interval = hdev->le_scan_int_adv_monitor;
+
+ /* Disable duplicates filter when scanning for advertisement
+ * monitor for the following reasons.
+ *
+ * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
+ * controllers ignore RSSI_Sampling_Period when the duplicates
+ * filter is enabled.
+ *
+ * For SW pattern filtering, when we're not doing interleaved
+ * scanning, it is necessary to disable duplicates filter,
+ * otherwise hosts can only receive one advertisement and it's
+ * impossible to know if a peer is still in range.
+ */
+ filter_dups = LE_SCAN_FILTER_DUP_DISABLE;
} else {
window = hdev->le_scan_window;
interval = hdev->le_scan_interval;
@@ -2774,7 +3076,7 @@ static int hci_passive_scan_sync(struct hci_dev *hdev)
* If there are devices to scan:
*
* Disable Scanning -> Update Accept List ->
- * use_ll_privacy((Disable Advertising) -> Disable Resolving List ->
+ * ll_privacy_capable((Disable Advertising) -> Disable Resolving List ->
* Update Resolving List -> Enable Resolving List -> (Enable Advertising)) ->
* Enable Scanning
*
@@ -2879,7 +3181,8 @@ int hci_update_passive_scan(struct hci_dev *hdev)
hci_dev_test_flag(hdev, HCI_UNREGISTER))
return 0;
- return hci_cmd_sync_queue(hdev, update_passive_scan_sync, NULL, NULL);
+ return hci_cmd_sync_queue_once(hdev, update_passive_scan_sync, NULL,
+ NULL);
}
int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val)
@@ -3127,7 +3430,7 @@ int hci_update_name_sync(struct hci_dev *hdev)
*
* HCI_SSP_ENABLED(Enable SSP)
* HCI_LE_ENABLED(Enable LE)
- * HCI_LE_ENABLED(use_ll_privacy(Add local IRK to Resolving List) ->
+ * HCI_LE_ENABLED(ll_privacy_capable(Add local IRK to Resolving List) ->
* Update adv data)
* Enable Authentication
* lmp_bredr_capable(Set Fast Connectable -> Set Scan Type -> Set Class ->
@@ -3215,7 +3518,10 @@ static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
return;
- bacpy(&hdev->public_addr, &ba);
+ if (test_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks))
+ baswap(&hdev->public_addr, &ba);
+ else
+ bacpy(&hdev->public_addr, &ba);
}
struct hci_init_stage {
@@ -3316,10 +3622,6 @@ static int hci_unconf_init_sync(struct hci_dev *hdev)
/* Read Local Supported Features. */
static int hci_read_local_features_sync(struct hci_dev *hdev)
{
- /* Not all AMP controllers support this command */
- if (hdev->dev_type == HCI_AMP && !(hdev->commands[14] & 0x20))
- return 0;
-
return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES,
0, NULL, HCI_CMD_TIMEOUT);
}
@@ -3354,51 +3656,6 @@ static int hci_read_local_cmds_sync(struct hci_dev *hdev)
return 0;
}
-/* Read Local AMP Info */
-static int hci_read_local_amp_info_sync(struct hci_dev *hdev)
-{
- return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_AMP_INFO,
- 0, NULL, HCI_CMD_TIMEOUT);
-}
-
-/* Read Data Blk size */
-static int hci_read_data_block_size_sync(struct hci_dev *hdev)
-{
- return __hci_cmd_sync_status(hdev, HCI_OP_READ_DATA_BLOCK_SIZE,
- 0, NULL, HCI_CMD_TIMEOUT);
-}
-
-/* Read Flow Control Mode */
-static int hci_read_flow_control_mode_sync(struct hci_dev *hdev)
-{
- return __hci_cmd_sync_status(hdev, HCI_OP_READ_FLOW_CONTROL_MODE,
- 0, NULL, HCI_CMD_TIMEOUT);
-}
-
-/* Read Location Data */
-static int hci_read_location_data_sync(struct hci_dev *hdev)
-{
- return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCATION_DATA,
- 0, NULL, HCI_CMD_TIMEOUT);
-}
-
-/* AMP Controller init stage 1 command sequence */
-static const struct hci_init_stage amp_init1[] = {
- /* HCI_OP_READ_LOCAL_VERSION */
- HCI_INIT(hci_read_local_version_sync),
- /* HCI_OP_READ_LOCAL_COMMANDS */
- HCI_INIT(hci_read_local_cmds_sync),
- /* HCI_OP_READ_LOCAL_AMP_INFO */
- HCI_INIT(hci_read_local_amp_info_sync),
- /* HCI_OP_READ_DATA_BLOCK_SIZE */
- HCI_INIT(hci_read_data_block_size_sync),
- /* HCI_OP_READ_FLOW_CONTROL_MODE */
- HCI_INIT(hci_read_flow_control_mode_sync),
- /* HCI_OP_READ_LOCATION_DATA */
- HCI_INIT(hci_read_location_data_sync),
- {}
-};
-
static int hci_init1_sync(struct hci_dev *hdev)
{
int err;
@@ -3412,28 +3669,9 @@ static int hci_init1_sync(struct hci_dev *hdev)
return err;
}
- switch (hdev->dev_type) {
- case HCI_PRIMARY:
- hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
- return hci_init_stage_sync(hdev, br_init1);
- case HCI_AMP:
- hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
- return hci_init_stage_sync(hdev, amp_init1);
- default:
- bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
- break;
- }
-
- return 0;
+ return hci_init_stage_sync(hdev, br_init1);
}
-/* AMP Controller init stage 2 command sequence */
-static const struct hci_init_stage amp_init2[] = {
- /* HCI_OP_READ_LOCAL_FEATURES */
- HCI_INIT(hci_read_local_features_sync),
- {}
-};
-
/* Read Buffer Size (ACL mtu, max pkt, etc.) */
static int hci_read_buffer_size_sync(struct hci_dev *hdev)
{
@@ -3691,9 +3929,6 @@ static int hci_init2_sync(struct hci_dev *hdev)
bt_dev_dbg(hdev, "");
- if (hdev->dev_type == HCI_AMP)
- return hci_init_stage_sync(hdev, amp_init2);
-
err = hci_init_stage_sync(hdev, hci_init2);
if (err)
return err;
@@ -3970,6 +4205,14 @@ static int hci_le_set_event_mask_sync(struct hci_dev *hdev)
if (use_enhanced_conn_complete(hdev))
events[1] |= 0x02; /* LE Enhanced Connection Complete */
+ /* Mark Device Privacy if Privacy Mode is supported */
+ if (privacy_mode_capable(hdev))
+ hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
+
+ /* Mark Address Resolution if LL Privacy is supported */
+ if (ll_privacy_capable(hdev))
+ hdev->conn_flags |= HCI_CONN_FLAG_ADDRESS_RESOLUTION;
+
/* If the controller supports Extended Scanner Filter
* Policies, enable the corresponding event.
*/
@@ -4098,16 +4341,6 @@ static int hci_le_read_accept_list_size_sync(struct hci_dev *hdev)
0, NULL, HCI_CMD_TIMEOUT);
}
-/* Clear LE Accept List */
-static int hci_le_clear_accept_list_sync(struct hci_dev *hdev)
-{
- if (!(hdev->commands[26] & 0x80))
- return 0;
-
- return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL,
- HCI_CMD_TIMEOUT);
-}
-
/* Read LE Resolving List Size */
static int hci_le_read_resolv_list_size_sync(struct hci_dev *hdev)
{
@@ -4531,13 +4764,6 @@ static int hci_init_sync(struct hci_dev *hdev)
if (err < 0)
return err;
- /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
- * BR/EDR/LE type controllers. AMP controllers only need the
- * first two stages of init.
- */
- if (hdev->dev_type != HCI_PRIMARY)
- return 0;
-
err = hci_init3_sync(hdev);
if (err < 0)
return err;
@@ -4601,6 +4827,13 @@ static const struct {
HCI_QUIRK_BROKEN(SET_RPA_TIMEOUT,
"HCI LE Set Random Private Address Timeout command is "
"advertised, but not supported."),
+ HCI_QUIRK_BROKEN(EXT_CREATE_CONN,
+ "HCI LE Extended Create Connection command is "
+ "advertised, but not supported."),
+ HCI_QUIRK_BROKEN(WRITE_AUTH_PAYLOAD_TIMEOUT,
+ "HCI WRITE AUTH PAYLOAD TIMEOUT command leads "
+ "to unexpected SMP errors when pairing "
+ "and will not be used."),
HCI_QUIRK_BROKEN(LE_CODED,
"HCI LE Coded PHY feature bit is set, "
"but its usage is not supported.")
@@ -4766,12 +4999,8 @@ int hci_dev_open_sync(struct hci_dev *hdev)
* In case of user channel usage, it is not important
* if a public address or static random address is
* available.
- *
- * This check is only valid for BR/EDR controllers
- * since AMP controllers do not have an address.
*/
if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
- hdev->dev_type == HCI_PRIMARY &&
!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
!bacmp(&hdev->static_addr, BDADDR_ANY)) {
ret = -EADDRNOTAVAIL;
@@ -4806,8 +5035,7 @@ int hci_dev_open_sync(struct hci_dev *hdev)
!hci_dev_test_flag(hdev, HCI_CONFIG) &&
!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
- hci_dev_test_flag(hdev, HCI_MGMT) &&
- hdev->dev_type == HCI_PRIMARY) {
+ hci_dev_test_flag(hdev, HCI_MGMT)) {
ret = hci_powered_update_sync(hdev);
mgmt_power_on(hdev, ret);
}
@@ -4834,6 +5062,11 @@ int hci_dev_open_sync(struct hci_dev *hdev)
hdev->sent_cmd = NULL;
}
+ if (hdev->req_skb) {
+ kfree_skb(hdev->req_skb);
+ hdev->req_skb = NULL;
+ }
+
clear_bit(HCI_RUNNING, &hdev->flags);
hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
@@ -4894,11 +5127,19 @@ int hci_dev_close_sync(struct hci_dev *hdev)
bt_dev_dbg(hdev, "");
- cancel_delayed_work(&hdev->power_off);
- cancel_delayed_work(&hdev->ncmd_timer);
- cancel_delayed_work(&hdev->le_scan_disable);
+ if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
+ disable_delayed_work(&hdev->power_off);
+ disable_delayed_work(&hdev->ncmd_timer);
+ disable_delayed_work(&hdev->le_scan_disable);
+ } else {
+ cancel_delayed_work(&hdev->power_off);
+ cancel_delayed_work(&hdev->ncmd_timer);
+ cancel_delayed_work(&hdev->le_scan_disable);
+ }
- hci_request_cancel_all(hdev);
+ hci_cmd_sync_cancel_sync(hdev, ENODEV);
+
+ cancel_interleave_scan(hdev);
if (hdev->adv_instance_timeout) {
cancel_delayed_work_sync(&hdev->adv_instance_expire);
@@ -4947,8 +5188,7 @@ int hci_dev_close_sync(struct hci_dev *hdev)
auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
- if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
- !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ if (!auto_off && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
hci_dev_test_flag(hdev, HCI_MGMT))
__mgmt_power_off(hdev);
@@ -4994,6 +5234,12 @@ int hci_dev_close_sync(struct hci_dev *hdev)
hdev->sent_cmd = NULL;
}
+ /* Drop last request */
+ if (hdev->req_skb) {
+ kfree_skb(hdev->req_skb);
+ hdev->req_skb = NULL;
+ }
+
clear_bit(HCI_RUNNING, &hdev->flags);
hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
@@ -5004,9 +5250,6 @@ int hci_dev_close_sync(struct hci_dev *hdev)
hdev->flags &= BIT(HCI_RAW);
hci_dev_clear_volatile_flags(hdev);
- /* Controller radio is available but is currently powered down */
- hdev->amp_status = AMP_STATUS_POWERED_DOWN;
-
memset(hdev->eir, 0, sizeof(hdev->eir));
memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
bacpy(&hdev->random_addr, BDADDR_ANY);
@@ -5043,8 +5286,7 @@ static int hci_power_on_sync(struct hci_dev *hdev)
*/
if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
- (hdev->dev_type == HCI_PRIMARY &&
- !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
+ (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
!bacmp(&hdev->static_addr, BDADDR_ANY))) {
hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
hci_dev_close_sync(hdev);
@@ -5127,7 +5369,7 @@ int hci_stop_discovery_sync(struct hci_dev *hdev)
}
/* Resume advertising if it was paused */
- if (use_ll_privacy(hdev))
+ if (ll_privacy_capable(hdev))
hci_resume_advertising_sync(hdev);
/* No further actions needed for LE-only discovery */
@@ -5140,33 +5382,20 @@ int hci_stop_discovery_sync(struct hci_dev *hdev)
if (!e)
return 0;
- return hci_remote_name_cancel_sync(hdev, &e->data.bdaddr);
+ /* Ignore cancel errors since it should interfere with stopping
+ * of the discovery.
+ */
+ hci_remote_name_cancel_sync(hdev, &e->data.bdaddr);
}
return 0;
}
-static int hci_disconnect_phy_link_sync(struct hci_dev *hdev, u16 handle,
- u8 reason)
-{
- struct hci_cp_disconn_phy_link cp;
-
- memset(&cp, 0, sizeof(cp));
- cp.phy_handle = HCI_PHY_HANDLE(handle);
- cp.reason = reason;
-
- return __hci_cmd_sync_status(hdev, HCI_OP_DISCONN_PHY_LINK,
- sizeof(cp), &cp, HCI_CMD_TIMEOUT);
-}
-
static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn,
u8 reason)
{
struct hci_cp_disconnect cp;
- if (conn->type == AMP_LINK)
- return hci_disconnect_phy_link_sync(hdev, conn->handle, reason);
-
if (test_bit(HCI_CONN_BIG_CREATED, &conn->flags)) {
/* This is a BIS connection, hci_conn_del will
* do the necessary cleanup.
@@ -5403,27 +5632,33 @@ static int hci_power_off_sync(struct hci_dev *hdev)
if (!test_bit(HCI_UP, &hdev->flags))
return 0;
+ hci_dev_set_flag(hdev, HCI_POWERING_DOWN);
+
if (test_bit(HCI_ISCAN, &hdev->flags) ||
test_bit(HCI_PSCAN, &hdev->flags)) {
err = hci_write_scan_enable_sync(hdev, 0x00);
if (err)
- return err;
+ goto out;
}
err = hci_clear_adv_sync(hdev, NULL, false);
if (err)
- return err;
+ goto out;
err = hci_stop_discovery_sync(hdev);
if (err)
- return err;
+ goto out;
/* Terminated due to Power Off */
err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
if (err)
- return err;
+ goto out;
+
+ err = hci_dev_close_sync(hdev);
- return hci_dev_close_sync(hdev);
+out:
+ hci_dev_clear_flag(hdev, HCI_POWERING_DOWN);
+ return err;
}
int hci_set_powered_sync(struct hci_dev *hdev, u8 val)
@@ -5554,7 +5789,7 @@ int hci_update_connectable_sync(struct hci_dev *hdev)
return hci_update_passive_scan_sync(hdev);
}
-static int hci_inquiry_sync(struct hci_dev *hdev, u8 length)
+int hci_inquiry_sync(struct hci_dev *hdev, u8 length, u8 num_rsp)
{
const u8 giac[3] = { 0x33, 0x8b, 0x9e };
const u8 liac[3] = { 0x00, 0x8b, 0x9e };
@@ -5577,6 +5812,7 @@ static int hci_inquiry_sync(struct hci_dev *hdev, u8 length)
memcpy(&cp.lap, giac, sizeof(cp.lap));
cp.length = length;
+ cp.num_rsp = num_rsp;
return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY,
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
@@ -5645,7 +5881,7 @@ static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval)
failed:
/* Resume advertising if it was paused */
- if (use_ll_privacy(hdev))
+ if (ll_privacy_capable(hdev))
hci_resume_advertising_sync(hdev);
/* Resume passive scanning */
@@ -5663,7 +5899,7 @@ static int hci_start_interleaved_discovery_sync(struct hci_dev *hdev)
if (err)
return err;
- return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN);
+ return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0);
}
int hci_start_discovery_sync(struct hci_dev *hdev)
@@ -5675,7 +5911,7 @@ int hci_start_discovery_sync(struct hci_dev *hdev)
switch (hdev->discovery.type) {
case DISCOV_TYPE_BREDR:
- return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN);
+ return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0);
case DISCOV_TYPE_INTERLEAVED:
/* When running simultaneous discovery, the LE scanning time
* should occupy the whole discovery time sine BR/EDR inquiry
@@ -5745,7 +5981,6 @@ static int hci_pause_discovery_sync(struct hci_dev *hdev)
return err;
hdev->discovery_paused = true;
- hdev->discovery_old_state = old_state;
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
return 0;
@@ -6132,7 +6367,8 @@ static int hci_le_ext_create_conn_sync(struct hci_dev *hdev,
plen = sizeof(*cp);
- if (scan_1m(hdev)) {
+ if (scan_1m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_1M ||
+ conn->le_adv_sec_phy == HCI_ADV_PHY_1M)) {
cp->phys |= LE_SCAN_PHY_1M;
set_ext_conn_params(conn, p);
@@ -6140,7 +6376,8 @@ static int hci_le_ext_create_conn_sync(struct hci_dev *hdev,
plen += sizeof(*p);
}
- if (scan_2m(hdev)) {
+ if (scan_2m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_2M ||
+ conn->le_adv_sec_phy == HCI_ADV_PHY_2M)) {
cp->phys |= LE_SCAN_PHY_2M;
set_ext_conn_params(conn, p);
@@ -6148,7 +6385,8 @@ static int hci_le_ext_create_conn_sync(struct hci_dev *hdev,
plen += sizeof(*p);
}
- if (scan_coded(hdev)) {
+ if (scan_coded(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_CODED ||
+ conn->le_adv_sec_phy == HCI_ADV_PHY_CODED)) {
cp->phys |= LE_SCAN_PHY_CODED;
set_ext_conn_params(conn, p);
@@ -6161,12 +6399,21 @@ static int hci_le_ext_create_conn_sync(struct hci_dev *hdev,
conn->conn_timeout, NULL);
}
-int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn)
+static int hci_le_create_conn_sync(struct hci_dev *hdev, void *data)
{
struct hci_cp_le_create_conn cp;
struct hci_conn_params *params;
u8 own_addr_type;
int err;
+ struct hci_conn *conn = data;
+
+ if (!hci_conn_valid(hdev, conn))
+ return -ECANCELED;
+
+ bt_dev_dbg(hdev, "conn %p", conn);
+
+ clear_bit(HCI_CONN_SCANNING, &conn->flags);
+ conn->state = BT_CONNECT;
/* If requested to connect as peripheral use directed advertising */
if (conn->role == HCI_ROLE_SLAVE) {
@@ -6222,7 +6469,7 @@ int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn)
&own_addr_type);
if (err)
goto done;
-
+ /* Send command LE Extended Create Connection if supported */
if (use_ext_conn(hdev)) {
err = hci_le_ext_create_conn_sync(hdev, conn, own_addr_type);
goto done;
@@ -6267,10 +6514,8 @@ done:
int hci_le_create_cis_sync(struct hci_dev *hdev)
{
- struct {
- struct hci_cp_le_create_cis cp;
- struct hci_cis cis[0x1f];
- } cmd;
+ DEFINE_FLEX(struct hci_cp_le_create_cis, cmd, cis, num_cis, 0x1f);
+ size_t aux_num_cis = 0;
struct hci_conn *conn;
u8 cig = BT_ISO_QOS_CIG_UNSET;
@@ -6297,8 +6542,6 @@ int hci_le_create_cis_sync(struct hci_dev *hdev)
* remains pending.
*/
- memset(&cmd, 0, sizeof(cmd));
-
hci_dev_lock(hdev);
rcu_read_lock();
@@ -6335,7 +6578,7 @@ int hci_le_create_cis_sync(struct hci_dev *hdev)
goto done;
list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
- struct hci_cis *cis = &cmd.cis[cmd.cp.num_cis];
+ struct hci_cis *cis = &cmd->cis[aux_num_cis];
if (hci_conn_check_create_cis(conn) ||
conn->iso_qos.ucast.cig != cig)
@@ -6344,25 +6587,25 @@ int hci_le_create_cis_sync(struct hci_dev *hdev)
set_bit(HCI_CONN_CREATE_CIS, &conn->flags);
cis->acl_handle = cpu_to_le16(conn->parent->handle);
cis->cis_handle = cpu_to_le16(conn->handle);
- cmd.cp.num_cis++;
+ aux_num_cis++;
- if (cmd.cp.num_cis >= ARRAY_SIZE(cmd.cis))
+ if (aux_num_cis >= cmd->num_cis)
break;
}
+ cmd->num_cis = aux_num_cis;
done:
rcu_read_unlock();
hci_dev_unlock(hdev);
- if (!cmd.cp.num_cis)
+ if (!aux_num_cis)
return 0;
/* Wait for HCI_LE_CIS_Established */
return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CIS,
- sizeof(cmd.cp) + sizeof(cmd.cis[0]) *
- cmd.cp.num_cis, &cmd,
- HCI_EVT_LE_CIS_ESTABLISHED,
+ struct_size(cmd, cis, cmd->num_cis),
+ cmd, HCI_EVT_LE_CIS_ESTABLISHED,
conn->conn_timeout, NULL);
}
@@ -6414,7 +6657,7 @@ int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
/* If Controller supports LL Privacy use own address type is
* 0x03
*/
- if (use_ll_privacy(hdev))
+ if (ll_privacy_capable(hdev))
*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
else
*own_addr_type = ADDR_LE_DEV_RANDOM;
@@ -6484,3 +6727,143 @@ int hci_update_adv_data(struct hci_dev *hdev, u8 instance)
return hci_cmd_sync_queue(hdev, _update_adv_data_sync,
UINT_PTR(instance), NULL);
}
+
+static int hci_acl_create_conn_sync(struct hci_dev *hdev, void *data)
+{
+ struct hci_conn *conn = data;
+ struct inquiry_entry *ie;
+ struct hci_cp_create_conn cp;
+ int err;
+
+ if (!hci_conn_valid(hdev, conn))
+ return -ECANCELED;
+
+ /* Many controllers disallow HCI Create Connection while it is doing
+ * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
+ * Connection. This may cause the MGMT discovering state to become false
+ * without user space's request but it is okay since the MGMT Discovery
+ * APIs do not promise that discovery should be done forever. Instead,
+ * the user space monitors the status of MGMT discovering and it may
+ * request for discovery again when this flag becomes false.
+ */
+ if (test_bit(HCI_INQUIRY, &hdev->flags)) {
+ err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, 0,
+ NULL, HCI_CMD_TIMEOUT);
+ if (err)
+ bt_dev_warn(hdev, "Failed to cancel inquiry %d", err);
+ }
+
+ conn->state = BT_CONNECT;
+ conn->out = true;
+ conn->role = HCI_ROLE_MASTER;
+
+ conn->attempt++;
+
+ conn->link_policy = hdev->link_policy;
+
+ memset(&cp, 0, sizeof(cp));
+ bacpy(&cp.bdaddr, &conn->dst);
+ cp.pscan_rep_mode = 0x02;
+
+ ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
+ if (ie) {
+ if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
+ cp.pscan_rep_mode = ie->data.pscan_rep_mode;
+ cp.pscan_mode = ie->data.pscan_mode;
+ cp.clock_offset = ie->data.clock_offset |
+ cpu_to_le16(0x8000);
+ }
+
+ memcpy(conn->dev_class, ie->data.dev_class, 3);
+ }
+
+ cp.pkt_type = cpu_to_le16(conn->pkt_type);
+ if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
+ cp.role_switch = 0x01;
+ else
+ cp.role_switch = 0x00;
+
+ return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN,
+ sizeof(cp), &cp,
+ HCI_EV_CONN_COMPLETE,
+ conn->conn_timeout, NULL);
+}
+
+int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn)
+{
+ return hci_cmd_sync_queue_once(hdev, hci_acl_create_conn_sync, conn,
+ NULL);
+}
+
+static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
+{
+ struct hci_conn *conn = data;
+
+ bt_dev_dbg(hdev, "err %d", err);
+
+ if (err == -ECANCELED)
+ return;
+
+ hci_dev_lock(hdev);
+
+ if (!hci_conn_valid(hdev, conn))
+ goto done;
+
+ if (!err) {
+ hci_connect_le_scan_cleanup(conn, 0x00);
+ goto done;
+ }
+
+ /* Check if connection is still pending */
+ if (conn != hci_lookup_le_connect(hdev))
+ goto done;
+
+ /* Flush to make sure we send create conn cancel command if needed */
+ flush_delayed_work(&conn->le_conn_timeout);
+ hci_conn_failed(conn, bt_status(err));
+
+done:
+ hci_dev_unlock(hdev);
+}
+
+int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn)
+{
+ return hci_cmd_sync_queue_once(hdev, hci_le_create_conn_sync, conn,
+ create_le_conn_complete);
+}
+
+int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn)
+{
+ if (conn->state != BT_OPEN)
+ return -EINVAL;
+
+ switch (conn->type) {
+ case ACL_LINK:
+ return !hci_cmd_sync_dequeue_once(hdev,
+ hci_acl_create_conn_sync,
+ conn, NULL);
+ case LE_LINK:
+ return !hci_cmd_sync_dequeue_once(hdev, hci_le_create_conn_sync,
+ conn, create_le_conn_complete);
+ }
+
+ return -ENOENT;
+}
+
+int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn,
+ struct hci_conn_params *params)
+{
+ struct hci_cp_le_conn_update cp;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.handle = cpu_to_le16(conn->handle);
+ cp.conn_interval_min = cpu_to_le16(params->conn_min_interval);
+ cp.conn_interval_max = cpu_to_le16(params->conn_max_interval);
+ cp.conn_latency = cpu_to_le16(params->conn_latency);
+ cp.supervision_timeout = cpu_to_le16(params->supervision_timeout);
+ cp.min_ce_len = cpu_to_le16(0x0000);
+ cp.max_ce_len = cpu_to_le16(0x0000);
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_CONN_UPDATE,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 367e32fe30eb..041ce9adc378 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -21,16 +21,6 @@ static const struct device_type bt_link = {
.release = bt_link_release,
};
-/*
- * The rfcomm tty device will possibly retain even when conn
- * is down, and sysfs doesn't support move zombie device,
- * so we should move the device before conn device is destroyed.
- */
-static int __match_tty(struct device *dev, void *data)
-{
- return !strncmp(dev_name(dev), "rfcomm", 6);
-}
-
void hci_conn_init_sysfs(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
@@ -73,10 +63,13 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
return;
}
+ /* If there are devices using the connection as parent reset it to NULL
+ * before unregistering the device.
+ */
while (1) {
struct device *dev;
- dev = device_find_child(&conn->dev, NULL, __match_tty);
+ dev = device_find_any_child(&conn->dev);
if (!dev)
break;
device_move(dev, NULL, DPM_ORDER_DEV_LAST);
@@ -97,9 +90,28 @@ static void bt_host_release(struct device *dev)
module_put(THIS_MODULE);
}
+static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hci_dev *hdev = to_hci_dev(dev);
+
+ if (hdev->reset)
+ hdev->reset(hdev);
+
+ return count;
+}
+static DEVICE_ATTR_WO(reset);
+
+static struct attribute *bt_host_attrs[] = {
+ &dev_attr_reset.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(bt_host);
+
static const struct device_type bt_host = {
.name = "host",
.release = bt_host_release,
+ .groups = bt_host_groups,
};
void hci_init_sysfs(struct hci_dev *hdev)
diff --git a/net/bluetooth/hidp/Kconfig b/net/bluetooth/hidp/Kconfig
index 6746be07e222..e08aae35351a 100644
--- a/net/bluetooth/hidp/Kconfig
+++ b/net/bluetooth/hidp/Kconfig
@@ -1,8 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config BT_HIDP
tristate "HIDP protocol support"
- depends on BT_BREDR && INPUT && HID_SUPPORT
- select HID
+ depends on BT_BREDR && HID
help
HIDP (Human Interface Device Protocol) is a transport layer
for HID reports. HIDP is required for the Bluetooth Human
diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
index 04f6572d35f1..0cb52a3308ba 100644
--- a/net/bluetooth/iso.c
+++ b/net/bluetooth/iso.c
@@ -3,7 +3,7 @@
* BlueZ - Bluetooth protocol stack for Linux
*
* Copyright (C) 2022 Intel Corporation
- * Copyright 2023 NXP
+ * Copyright 2023-2024 NXP
*/
#include <linux/module.h>
@@ -35,6 +35,7 @@ struct iso_conn {
struct sk_buff *rx_skb;
__u32 rx_len;
__u16 tx_sn;
+ struct kref ref;
};
#define iso_conn_lock(c) spin_lock(&(c)->lock)
@@ -54,7 +55,6 @@ static void iso_sock_kill(struct sock *sk);
enum {
BT_SK_BIG_SYNC,
BT_SK_PA_SYNC,
- BT_SK_PA_SYNC_TERM,
};
struct iso_pinfo {
@@ -81,28 +81,86 @@ static bool check_ucast_qos(struct bt_iso_qos *qos);
static bool check_bcast_qos(struct bt_iso_qos *qos);
static bool iso_match_sid(struct sock *sk, void *data);
static bool iso_match_sync_handle(struct sock *sk, void *data);
+static bool iso_match_sync_handle_pa_report(struct sock *sk, void *data);
static void iso_sock_disconn(struct sock *sk);
typedef bool (*iso_sock_match_t)(struct sock *sk, void *data);
-static struct sock *iso_get_sock_listen(bdaddr_t *src, bdaddr_t *dst,
- iso_sock_match_t match, void *data);
+static struct sock *iso_get_sock(bdaddr_t *src, bdaddr_t *dst,
+ enum bt_sock_state state,
+ iso_sock_match_t match, void *data);
/* ---- ISO timers ---- */
#define ISO_CONN_TIMEOUT (HZ * 40)
#define ISO_DISCONN_TIMEOUT (HZ * 2)
+static void iso_conn_free(struct kref *ref)
+{
+ struct iso_conn *conn = container_of(ref, struct iso_conn, ref);
+
+ BT_DBG("conn %p", conn);
+
+ if (conn->sk)
+ iso_pi(conn->sk)->conn = NULL;
+
+ if (conn->hcon) {
+ conn->hcon->iso_data = NULL;
+ hci_conn_drop(conn->hcon);
+ }
+
+ /* Ensure no more work items will run since hci_conn has been dropped */
+ disable_delayed_work_sync(&conn->timeout_work);
+
+ kfree(conn);
+}
+
+static void iso_conn_put(struct iso_conn *conn)
+{
+ if (!conn)
+ return;
+
+ BT_DBG("conn %p refcnt %d", conn, kref_read(&conn->ref));
+
+ kref_put(&conn->ref, iso_conn_free);
+}
+
+static struct iso_conn *iso_conn_hold_unless_zero(struct iso_conn *conn)
+{
+ if (!conn)
+ return NULL;
+
+ BT_DBG("conn %p refcnt %u", conn, kref_read(&conn->ref));
+
+ if (!kref_get_unless_zero(&conn->ref))
+ return NULL;
+
+ return conn;
+}
+
+static struct sock *iso_sock_hold(struct iso_conn *conn)
+{
+ if (!conn || !bt_sock_linked(&iso_sk_list, conn->sk))
+ return NULL;
+
+ sock_hold(conn->sk);
+
+ return conn->sk;
+}
+
static void iso_sock_timeout(struct work_struct *work)
{
struct iso_conn *conn = container_of(work, struct iso_conn,
timeout_work.work);
struct sock *sk;
+ conn = iso_conn_hold_unless_zero(conn);
+ if (!conn)
+ return;
+
iso_conn_lock(conn);
- sk = conn->sk;
- if (sk)
- sock_hold(sk);
+ sk = iso_sock_hold(conn);
iso_conn_unlock(conn);
+ iso_conn_put(conn);
if (!sk)
return;
@@ -140,9 +198,14 @@ static struct iso_conn *iso_conn_add(struct hci_conn *hcon)
{
struct iso_conn *conn = hcon->iso_data;
+ conn = iso_conn_hold_unless_zero(conn);
if (conn) {
- if (!conn->hcon)
+ if (!conn->hcon) {
+ iso_conn_lock(conn);
conn->hcon = hcon;
+ iso_conn_unlock(conn);
+ }
+ iso_conn_put(conn);
return conn;
}
@@ -150,6 +213,7 @@ static struct iso_conn *iso_conn_add(struct hci_conn *hcon)
if (!conn)
return NULL;
+ kref_init(&conn->ref);
spin_lock_init(&conn->lock);
INIT_DELAYED_WORK(&conn->timeout_work, iso_sock_timeout);
@@ -169,17 +233,15 @@ static void iso_chan_del(struct sock *sk, int err)
struct sock *parent;
conn = iso_pi(sk)->conn;
+ iso_pi(sk)->conn = NULL;
BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
if (conn) {
iso_conn_lock(conn);
conn->sk = NULL;
- iso_pi(sk)->conn = NULL;
iso_conn_unlock(conn);
-
- if (conn->hcon)
- hci_conn_drop(conn->hcon);
+ iso_conn_put(conn);
}
sk->sk_state = BT_CLOSED;
@@ -196,22 +258,12 @@ static void iso_chan_del(struct sock *sk, int err)
sock_set_flag(sk, SOCK_ZAPPED);
}
-static bool iso_match_conn_sync_handle(struct sock *sk, void *data)
-{
- struct hci_conn *hcon = data;
-
- if (test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags))
- return false;
-
- return hcon->sync_handle == iso_pi(sk)->sync_handle;
-}
-
static void iso_conn_del(struct hci_conn *hcon, int err)
{
struct iso_conn *conn = hcon->iso_data;
struct sock *sk;
- struct sock *parent;
+ conn = iso_conn_hold_unless_zero(conn);
if (!conn)
return;
@@ -219,43 +271,20 @@ static void iso_conn_del(struct hci_conn *hcon, int err)
/* Kill socket */
iso_conn_lock(conn);
- sk = conn->sk;
- if (sk)
- sock_hold(sk);
+ sk = iso_sock_hold(conn);
iso_conn_unlock(conn);
+ iso_conn_put(conn);
- if (sk) {
- lock_sock(sk);
-
- /* While a PA sync hcon is in the process of closing,
- * mark parent socket with a flag, so that any residual
- * BIGInfo adv reports that arrive before PA sync is
- * terminated are not processed anymore.
- */
- if (test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags)) {
- parent = iso_get_sock_listen(&hcon->src,
- &hcon->dst,
- iso_match_conn_sync_handle,
- hcon);
-
- if (parent) {
- set_bit(BT_SK_PA_SYNC_TERM,
- &iso_pi(parent)->flags);
- sock_put(parent);
- }
- }
-
- iso_sock_clear_timer(sk);
- iso_chan_del(sk, err);
- release_sock(sk);
- sock_put(sk);
+ if (!sk) {
+ iso_conn_put(conn);
+ return;
}
- /* Ensure no more work items will run before freeing conn. */
- cancel_delayed_work_sync(&conn->timeout_work);
-
- hcon->iso_data = NULL;
- kfree(conn);
+ lock_sock(sk);
+ iso_sock_clear_timer(sk);
+ iso_chan_del(sk, err);
+ release_sock(sk);
+ sock_put(sk);
}
static int __iso_chan_add(struct iso_conn *conn, struct sock *sk,
@@ -581,22 +610,23 @@ static struct sock *__iso_get_sock_listen_by_sid(bdaddr_t *ba, bdaddr_t *bc,
return NULL;
}
-/* Find socket listening:
+/* Find socket in given state:
* source bdaddr (Unicast)
* destination bdaddr (Broadcast only)
* match func - pass NULL to ignore
* match func data - pass -1 to ignore
* Returns closest match.
*/
-static struct sock *iso_get_sock_listen(bdaddr_t *src, bdaddr_t *dst,
- iso_sock_match_t match, void *data)
+static struct sock *iso_get_sock(bdaddr_t *src, bdaddr_t *dst,
+ enum bt_sock_state state,
+ iso_sock_match_t match, void *data)
{
struct sock *sk = NULL, *sk1 = NULL;
read_lock(&iso_sk_list.lock);
sk_for_each(sk, &iso_sk_list.head) {
- if (sk->sk_state != BT_LISTEN)
+ if (sk->sk_state != state)
continue;
/* Match Broadcast destination */
@@ -674,6 +704,8 @@ static void iso_sock_destruct(struct sock *sk)
{
BT_DBG("sk %p", sk);
+ iso_conn_put(iso_pi(sk)->conn);
+
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_write_queue);
}
@@ -690,11 +722,8 @@ static void iso_sock_cleanup_listen(struct sock *parent)
iso_sock_kill(sk);
}
- /* If listening socket stands for a PA sync connection,
- * properly disconnect the hcon and socket.
- */
- if (iso_pi(parent)->conn && iso_pi(parent)->conn->hcon &&
- test_bit(HCI_CONN_PA_SYNC, &iso_pi(parent)->conn->hcon->flags)) {
+ /* If listening socket has a hcon, properly disconnect it */
+ if (iso_pi(parent)->conn && iso_pi(parent)->conn->hcon) {
iso_sock_disconn(parent);
return;
}
@@ -736,6 +765,7 @@ static void iso_sock_disconn(struct sock *sk)
*/
if (bis_sk) {
hcon->state = BT_OPEN;
+ hcon->iso_data = NULL;
iso_pi(sk)->conn->hcon = NULL;
iso_sock_clear_timer(sk);
iso_chan_del(sk, bt_to_errno(hcon->abort_reason));
@@ -745,7 +775,6 @@ static void iso_sock_disconn(struct sock *sk)
}
sk->sk_state = BT_DISCONN;
- iso_sock_set_timer(sk, ISO_DISCONN_TIMEOUT);
iso_conn_lock(iso_pi(sk)->conn);
hci_conn_drop(iso_pi(sk)->conn->hcon);
iso_pi(sk)->conn->hcon = NULL;
@@ -837,10 +866,10 @@ static struct bt_iso_qos default_qos = {
.bcode = {0x00},
.options = 0x00,
.skip = 0x0000,
- .sync_timeout = 0x4000,
+ .sync_timeout = BT_ISO_SYNC_TIMEOUT,
.sync_cte_type = 0x00,
.mse = 0x00,
- .timeout = 0x4000,
+ .timeout = BT_ISO_SYNC_TIMEOUT,
},
};
@@ -860,6 +889,7 @@ static struct sock *iso_sock_alloc(struct net *net, struct socket *sock,
iso_pi(sk)->src_type = BDADDR_LE_PUBLIC;
iso_pi(sk)->qos = default_qos;
+ iso_pi(sk)->sync_handle = -1;
bt_sock_link(&iso_sk_list, sk);
return sk;
@@ -907,7 +937,6 @@ static int iso_sock_bind_bc(struct socket *sock, struct sockaddr *addr,
return -EINVAL;
iso_pi(sk)->dst_type = sa->iso_bc->bc_bdaddr_type;
- iso_pi(sk)->sync_handle = -1;
if (sa->iso_bc->bc_sid > 0x0f)
return -EINVAL;
@@ -984,7 +1013,8 @@ static int iso_sock_bind(struct socket *sock, struct sockaddr *addr,
/* Allow the user to bind a PA sync socket to a number
* of BISes to sync to.
*/
- if (sk->sk_state == BT_CONNECT2 &&
+ if ((sk->sk_state == BT_CONNECT2 ||
+ sk->sk_state == BT_CONNECTED) &&
test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags)) {
err = iso_sock_bind_pa_sk(sk, sa, addr_len);
goto done;
@@ -1076,6 +1106,8 @@ static int iso_listen_bis(struct sock *sk)
{
struct hci_dev *hdev;
int err = 0;
+ struct iso_conn *conn;
+ struct hci_conn *hcon;
BT_DBG("%pMR -> %pMR (SID 0x%2.2x)", &iso_pi(sk)->src,
&iso_pi(sk)->dst, iso_pi(sk)->bc_sid);
@@ -1096,18 +1128,41 @@ static int iso_listen_bis(struct sock *sk)
if (!hdev)
return -EHOSTUNREACH;
+ hci_dev_lock(hdev);
+ lock_sock(sk);
+
/* Fail if user set invalid QoS */
if (iso_pi(sk)->qos_user_set && !check_bcast_qos(&iso_pi(sk)->qos)) {
iso_pi(sk)->qos = default_qos;
- return -EINVAL;
+ err = -EINVAL;
+ goto unlock;
}
- err = hci_pa_create_sync(hdev, &iso_pi(sk)->dst,
- le_addr_type(iso_pi(sk)->dst_type),
- iso_pi(sk)->bc_sid, &iso_pi(sk)->qos);
+ hcon = hci_pa_create_sync(hdev, &iso_pi(sk)->dst,
+ le_addr_type(iso_pi(sk)->dst_type),
+ iso_pi(sk)->bc_sid, &iso_pi(sk)->qos);
+ if (IS_ERR(hcon)) {
+ err = PTR_ERR(hcon);
+ goto unlock;
+ }
- hci_dev_put(hdev);
+ conn = iso_conn_add(hcon);
+ if (!conn) {
+ hci_conn_drop(hcon);
+ err = -ENOMEM;
+ goto unlock;
+ }
+ err = iso_chan_add(conn, sk, NULL);
+ if (err) {
+ hci_conn_drop(hcon);
+ goto unlock;
+ }
+
+unlock:
+ release_sock(sk);
+ hci_dev_unlock(hdev);
+ hci_dev_put(hdev);
return err;
}
@@ -1134,6 +1189,7 @@ static int iso_sock_listen(struct socket *sock, int backlog)
BT_DBG("sk %p backlog %d", sk, backlog);
+ sock_hold(sk);
lock_sock(sk);
if (sk->sk_state != BT_BOUND) {
@@ -1146,10 +1202,16 @@ static int iso_sock_listen(struct socket *sock, int backlog)
goto done;
}
- if (!bacmp(&iso_pi(sk)->dst, BDADDR_ANY))
+ if (!bacmp(&iso_pi(sk)->dst, BDADDR_ANY)) {
err = iso_listen_cis(sk);
- else
+ } else {
+ /* Drop sock lock to avoid potential
+ * deadlock with the hdev lock.
+ */
+ release_sock(sk);
err = iso_listen_bis(sk);
+ lock_sock(sk);
+ }
if (err)
goto done;
@@ -1161,20 +1223,25 @@ static int iso_sock_listen(struct socket *sock, int backlog)
done:
release_sock(sk);
+ sock_put(sk);
return err;
}
static int iso_sock_accept(struct socket *sock, struct socket *newsock,
- int flags, bool kern)
+ struct proto_accept_arg *arg)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct sock *sk = sock->sk, *ch;
long timeo;
int err = 0;
- lock_sock(sk);
+ /* Use explicit nested locking to avoid lockdep warnings generated
+ * because the parent socket and the child socket are locked on the
+ * same thread.
+ */
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
- timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
+ timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK);
BT_DBG("sk %p timeo %ld", sk, timeo);
@@ -1203,7 +1270,7 @@ static int iso_sock_accept(struct socket *sock, struct socket *newsock,
release_sock(sk);
timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
- lock_sock(sk);
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
}
remove_wait_queue(sk_sleep(sk), &wait);
@@ -1214,6 +1281,42 @@ static int iso_sock_accept(struct socket *sock, struct socket *newsock,
BT_DBG("new socket %p", ch);
+ /* A Broadcast Sink might require BIG sync to be terminated
+ * and re-established multiple times, while keeping the same
+ * PA sync handle active. To allow this, once all BIS
+ * connections have been accepted on a PA sync parent socket,
+ * "reset" socket state, to allow future BIG re-sync procedures.
+ */
+ if (test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags)) {
+ /* Iterate through the list of bound BIS indices
+ * and clear each BIS as they are accepted by the
+ * user space, one by one.
+ */
+ for (int i = 0; i < iso_pi(sk)->bc_num_bis; i++) {
+ if (iso_pi(sk)->bc_bis[i] > 0) {
+ iso_pi(sk)->bc_bis[i] = 0;
+ iso_pi(sk)->bc_num_bis--;
+ break;
+ }
+ }
+
+ if (iso_pi(sk)->bc_num_bis == 0) {
+ /* Once the last BIS was accepted, reset parent
+ * socket parameters to mark that the listening
+ * process for BIS connections has been completed:
+ *
+ * 1. Reset the DEFER setup flag on the parent sk.
+ * 2. Clear the flag marking that the BIG create
+ * sync command is pending.
+ * 3. Transition socket state from BT_LISTEN to
+ * BT_CONNECTED.
+ */
+ set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+ clear_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags);
+ sk->sk_state = BT_CONNECTED;
+ }
+ }
+
done:
release_sock(sk);
return err;
@@ -1264,7 +1367,7 @@ static int iso_sock_sendmsg(struct socket *sock, struct msghdr *msg,
return -ENOTCONN;
}
- mtu = iso_pi(sk)->conn->hcon->hdev->iso_mtu;
+ mtu = iso_pi(sk)->conn->hcon->mtu;
release_sock(sk);
@@ -1338,6 +1441,14 @@ static void iso_conn_big_sync(struct sock *sk)
if (!hdev)
return;
+ /* hci_le_big_create_sync requires hdev lock to be held, since
+ * it enqueues the HCI LE BIG Create Sync command via
+ * hci_cmd_sync_queue_once, which checks hdev flags that might
+ * change.
+ */
+ hci_dev_lock(hdev);
+ lock_sock(sk);
+
if (!test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) {
err = hci_le_big_create_sync(hdev, iso_pi(sk)->conn->hcon,
&iso_pi(sk)->qos,
@@ -1348,6 +1459,9 @@ static void iso_conn_big_sync(struct sock *sk)
bt_dev_err(hdev, "hci_le_big_create_sync: %d",
err);
}
+
+ release_sock(sk);
+ hci_dev_unlock(hdev);
}
static int iso_sock_recvmsg(struct socket *sock, struct msghdr *msg,
@@ -1355,30 +1469,57 @@ static int iso_sock_recvmsg(struct socket *sock, struct msghdr *msg,
{
struct sock *sk = sock->sk;
struct iso_pinfo *pi = iso_pi(sk);
+ bool early_ret = false;
+ int err = 0;
BT_DBG("sk %p", sk);
if (test_and_clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
+ sock_hold(sk);
lock_sock(sk);
+
switch (sk->sk_state) {
case BT_CONNECT2:
- if (pi->conn->hcon &&
- test_bit(HCI_CONN_PA_SYNC, &pi->conn->hcon->flags)) {
+ if (test_bit(BT_SK_PA_SYNC, &pi->flags)) {
+ release_sock(sk);
iso_conn_big_sync(sk);
+ lock_sock(sk);
+
sk->sk_state = BT_LISTEN;
} else {
iso_conn_defer_accept(pi->conn->hcon);
sk->sk_state = BT_CONFIG;
}
- release_sock(sk);
- return 0;
+
+ early_ret = true;
+ break;
+ case BT_CONNECTED:
+ if (test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags)) {
+ release_sock(sk);
+ iso_conn_big_sync(sk);
+ lock_sock(sk);
+
+ sk->sk_state = BT_LISTEN;
+ early_ret = true;
+ }
+
+ break;
case BT_CONNECT:
release_sock(sk);
- return iso_connect_cis(sk);
+ err = iso_connect_cis(sk);
+ lock_sock(sk);
+
+ early_ret = true;
+ break;
default:
- release_sock(sk);
break;
}
+
+ release_sock(sk);
+ sock_put(sk);
+
+ if (early_ret)
+ return err;
}
return bt_sock_recvmsg(sock, msg, len, flags);
@@ -1430,8 +1571,8 @@ static bool check_ucast_qos(struct bt_iso_qos *qos)
static bool check_bcast_qos(struct bt_iso_qos *qos)
{
- if (qos->bcast.sync_factor == 0x00)
- return false;
+ if (!qos->bcast.sync_factor)
+ qos->bcast.sync_factor = 0x01;
if (qos->bcast.packing > 0x01)
return false;
@@ -1454,6 +1595,9 @@ static bool check_bcast_qos(struct bt_iso_qos *qos)
if (qos->bcast.skip > 0x01f3)
return false;
+ if (!qos->bcast.sync_timeout)
+ qos->bcast.sync_timeout = BT_ISO_SYNC_TIMEOUT;
+
if (qos->bcast.sync_timeout < 0x000a || qos->bcast.sync_timeout > 0x4000)
return false;
@@ -1463,6 +1607,9 @@ static bool check_bcast_qos(struct bt_iso_qos *qos)
if (qos->bcast.mse > 0x1f)
return false;
+ if (!qos->bcast.timeout)
+ qos->bcast.sync_timeout = BT_ISO_SYNC_TIMEOUT;
+
if (qos->bcast.timeout < 0x000a || qos->bcast.timeout > 0x4000)
return false;
@@ -1473,7 +1620,7 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
- int len, err = 0;
+ int err = 0;
struct bt_iso_qos qos = default_qos;
u32 opt;
@@ -1488,10 +1635,9 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
- err = -EFAULT;
+ err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ if (err)
break;
- }
if (opt)
set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
@@ -1500,10 +1646,9 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname,
break;
case BT_PKT_STATUS:
- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
- err = -EFAULT;
+ err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ if (err)
break;
- }
if (opt)
set_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags);
@@ -1513,22 +1658,16 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname,
case BT_ISO_QOS:
if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND &&
- sk->sk_state != BT_CONNECT2) {
+ sk->sk_state != BT_CONNECT2 &&
+ (!test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags) ||
+ sk->sk_state != BT_CONNECTED)) {
err = -EINVAL;
break;
}
- len = min_t(unsigned int, sizeof(qos), optlen);
-
- if (copy_from_sockptr(&qos, optval, len)) {
- err = -EFAULT;
- break;
- }
-
- if (len == sizeof(qos.ucast) && !check_ucast_qos(&qos)) {
- err = -EINVAL;
+ err = copy_safe_from_sockptr(&qos, sizeof(qos), optval, optlen);
+ if (err)
break;
- }
iso_pi(sk)->qos = qos;
iso_pi(sk)->qos_user_set = true;
@@ -1543,18 +1682,16 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname,
}
if (optlen > sizeof(iso_pi(sk)->base)) {
- err = -EOVERFLOW;
+ err = -EINVAL;
break;
}
- len = min_t(unsigned int, sizeof(iso_pi(sk)->base), optlen);
-
- if (copy_from_sockptr(iso_pi(sk)->base, optval, len)) {
- err = -EFAULT;
+ err = copy_safe_from_sockptr(iso_pi(sk)->base, optlen, optval,
+ optlen);
+ if (err)
break;
- }
- iso_pi(sk)->base_len = len;
+ iso_pi(sk)->base_len = optlen;
break;
@@ -1721,11 +1858,6 @@ static void iso_sock_ready(struct sock *sk)
release_sock(sk);
}
-struct iso_list_data {
- struct hci_conn *hcon;
- int count;
-};
-
static bool iso_match_big(struct sock *sk, void *data)
{
struct hci_evt_le_big_sync_estabilished *ev = data;
@@ -1733,6 +1865,13 @@ static bool iso_match_big(struct sock *sk, void *data)
return ev->handle == iso_pi(sk)->qos.bcast.big;
}
+static bool iso_match_big_hcon(struct sock *sk, void *data)
+{
+ struct hci_conn *hcon = data;
+
+ return hcon->iso_qos.bcast.big == iso_pi(sk)->qos.bcast.big;
+}
+
static bool iso_match_pa_sync_flag(struct sock *sk, void *data)
{
return test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags);
@@ -1744,7 +1883,7 @@ static void iso_conn_ready(struct iso_conn *conn)
struct sock *sk = conn->sk;
struct hci_ev_le_big_sync_estabilished *ev = NULL;
struct hci_ev_le_pa_sync_established *ev2 = NULL;
- struct hci_evt_le_big_info_adv_report *ev3 = NULL;
+ struct hci_ev_le_per_adv_report *ev3 = NULL;
struct hci_conn *hcon;
BT_DBG("conn %p", conn);
@@ -1756,38 +1895,51 @@ static void iso_conn_ready(struct iso_conn *conn)
if (!hcon)
return;
- if (test_bit(HCI_CONN_BIG_SYNC, &hcon->flags) ||
- test_bit(HCI_CONN_BIG_SYNC_FAILED, &hcon->flags)) {
+ if (test_bit(HCI_CONN_BIG_SYNC, &hcon->flags)) {
+ /* A BIS slave hcon is notified to the ISO layer
+ * after the Command Complete for the LE Setup
+ * ISO Data Path command is received. Get the
+ * parent socket that matches the hcon BIG handle.
+ */
+ parent = iso_get_sock(&hcon->src, &hcon->dst,
+ BT_LISTEN, iso_match_big_hcon,
+ hcon);
+ } else if (test_bit(HCI_CONN_BIG_SYNC_FAILED, &hcon->flags)) {
ev = hci_recv_event_data(hcon->hdev,
HCI_EVT_LE_BIG_SYNC_ESTABILISHED);
/* Get reference to PA sync parent socket, if it exists */
- parent = iso_get_sock_listen(&hcon->src,
- &hcon->dst,
- iso_match_pa_sync_flag, NULL);
+ parent = iso_get_sock(&hcon->src, &hcon->dst,
+ BT_LISTEN,
+ iso_match_pa_sync_flag,
+ NULL);
if (!parent && ev)
- parent = iso_get_sock_listen(&hcon->src,
- &hcon->dst,
- iso_match_big, ev);
+ parent = iso_get_sock(&hcon->src,
+ &hcon->dst,
+ BT_LISTEN,
+ iso_match_big, ev);
} else if (test_bit(HCI_CONN_PA_SYNC_FAILED, &hcon->flags)) {
ev2 = hci_recv_event_data(hcon->hdev,
HCI_EV_LE_PA_SYNC_ESTABLISHED);
if (ev2)
- parent = iso_get_sock_listen(&hcon->src,
- &hcon->dst,
- iso_match_sid, ev2);
+ parent = iso_get_sock(&hcon->src,
+ &hcon->dst,
+ BT_LISTEN,
+ iso_match_sid, ev2);
} else if (test_bit(HCI_CONN_PA_SYNC, &hcon->flags)) {
ev3 = hci_recv_event_data(hcon->hdev,
- HCI_EVT_LE_BIG_INFO_ADV_REPORT);
+ HCI_EV_LE_PER_ADV_REPORT);
if (ev3)
- parent = iso_get_sock_listen(&hcon->src,
- &hcon->dst,
- iso_match_sync_handle, ev3);
+ parent = iso_get_sock(&hcon->src,
+ &hcon->dst,
+ BT_LISTEN,
+ iso_match_sync_handle_pa_report,
+ ev3);
}
if (!parent)
- parent = iso_get_sock_listen(&hcon->src,
- BDADDR_ANY, NULL, NULL);
+ parent = iso_get_sock(&hcon->src, BDADDR_ANY,
+ BT_LISTEN, NULL, NULL);
if (!parent)
return;
@@ -1819,12 +1971,10 @@ static void iso_conn_ready(struct iso_conn *conn)
if (!bacmp(&hcon->dst, BDADDR_ANY)) {
bacpy(&hcon->dst, &iso_pi(parent)->dst);
hcon->dst_type = iso_pi(parent)->dst_type;
- hcon->sync_handle = iso_pi(parent)->sync_handle;
}
if (ev3) {
iso_pi(sk)->qos = iso_pi(parent)->qos;
- iso_pi(sk)->qos.bcast.encryption = ev3->encryption;
hcon->iso_qos = iso_pi(sk)->qos;
iso_pi(sk)->bc_num_bis = iso_pi(parent)->bc_num_bis;
memcpy(iso_pi(sk)->bc_bis, iso_pi(parent)->bc_bis, ISO_MAX_NUM_BIS);
@@ -1889,7 +2039,6 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
struct hci_evt_le_big_info_adv_report *ev2;
struct hci_ev_le_per_adv_report *ev3;
struct sock *sk;
- int lm = 0;
bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
@@ -1909,8 +2058,8 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
*/
ev1 = hci_recv_event_data(hdev, HCI_EV_LE_PA_SYNC_ESTABLISHED);
if (ev1) {
- sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr, iso_match_sid,
- ev1);
+ sk = iso_get_sock(&hdev->bdaddr, bdaddr, BT_LISTEN,
+ iso_match_sid, ev1);
if (sk && !ev1->status)
iso_pi(sk)->sync_handle = le16_to_cpu(ev1->handle);
@@ -1919,32 +2068,37 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
ev2 = hci_recv_event_data(hdev, HCI_EVT_LE_BIG_INFO_ADV_REPORT);
if (ev2) {
- /* Try to get PA sync listening socket, if it exists */
- sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr,
- iso_match_pa_sync_flag, NULL);
-
- if (!sk) {
- sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr,
- iso_match_sync_handle, ev2);
-
- /* If PA Sync is in process of terminating,
- * do not handle any more BIGInfo adv reports.
- */
-
- if (sk && test_bit(BT_SK_PA_SYNC_TERM,
- &iso_pi(sk)->flags))
- return lm;
+ /* Check if BIGInfo report has already been handled */
+ sk = iso_get_sock(&hdev->bdaddr, bdaddr, BT_CONNECTED,
+ iso_match_sync_handle, ev2);
+ if (sk) {
+ sock_put(sk);
+ sk = NULL;
+ goto done;
}
+ /* Try to get PA sync socket, if it exists */
+ sk = iso_get_sock(&hdev->bdaddr, bdaddr, BT_CONNECT2,
+ iso_match_sync_handle, ev2);
+ if (!sk)
+ sk = iso_get_sock(&hdev->bdaddr, bdaddr,
+ BT_LISTEN,
+ iso_match_sync_handle,
+ ev2);
+
if (sk) {
int err;
+ struct hci_conn *hcon = iso_pi(sk)->conn->hcon;
+
+ iso_pi(sk)->qos.bcast.encryption = ev2->encryption;
if (ev2->num_bis < iso_pi(sk)->bc_num_bis)
iso_pi(sk)->bc_num_bis = ev2->num_bis;
if (!test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags) &&
!test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) {
- err = hci_le_big_create_sync(hdev, NULL,
+ err = hci_le_big_create_sync(hdev,
+ hcon,
&iso_pi(sk)->qos,
iso_pi(sk)->sync_handle,
iso_pi(sk)->bc_num_bis,
@@ -1957,37 +2111,80 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
}
}
}
+
+ goto done;
}
ev3 = hci_recv_event_data(hdev, HCI_EV_LE_PER_ADV_REPORT);
if (ev3) {
- size_t base_len = ev3->length;
+ size_t base_len = 0;
u8 *base;
+ struct hci_conn *hcon;
+
+ sk = iso_get_sock(&hdev->bdaddr, bdaddr, BT_LISTEN,
+ iso_match_sync_handle_pa_report, ev3);
+ if (!sk)
+ goto done;
+
+ hcon = iso_pi(sk)->conn->hcon;
+ if (!hcon)
+ goto done;
+
+ if (ev3->data_status == LE_PA_DATA_TRUNCATED) {
+ /* The controller was unable to retrieve PA data. */
+ memset(hcon->le_per_adv_data, 0,
+ HCI_MAX_PER_AD_TOT_LEN);
+ hcon->le_per_adv_data_len = 0;
+ hcon->le_per_adv_data_offset = 0;
+ goto done;
+ }
+
+ if (hcon->le_per_adv_data_offset + ev3->length >
+ HCI_MAX_PER_AD_TOT_LEN)
+ goto done;
+
+ memcpy(hcon->le_per_adv_data + hcon->le_per_adv_data_offset,
+ ev3->data, ev3->length);
+ hcon->le_per_adv_data_offset += ev3->length;
+
+ if (ev3->data_status == LE_PA_DATA_COMPLETE) {
+ /* All PA data has been received. */
+ hcon->le_per_adv_data_len =
+ hcon->le_per_adv_data_offset;
+ hcon->le_per_adv_data_offset = 0;
+
+ /* Extract BASE */
+ base = eir_get_service_data(hcon->le_per_adv_data,
+ hcon->le_per_adv_data_len,
+ EIR_BAA_SERVICE_UUID,
+ &base_len);
+
+ if (!base || base_len > BASE_MAX_LENGTH)
+ goto done;
- sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr,
- iso_match_sync_handle_pa_report, ev3);
- base = eir_get_service_data(ev3->data, ev3->length,
- EIR_BAA_SERVICE_UUID, &base_len);
- if (base && sk && base_len <= sizeof(iso_pi(sk)->base)) {
memcpy(iso_pi(sk)->base, base, base_len);
iso_pi(sk)->base_len = base_len;
+ } else {
+ /* This is a PA data fragment. Keep pa_data_len set to 0
+ * until all data has been reassembled.
+ */
+ hcon->le_per_adv_data_len = 0;
}
} else {
- sk = iso_get_sock_listen(&hdev->bdaddr, BDADDR_ANY, NULL, NULL);
+ sk = iso_get_sock(&hdev->bdaddr, BDADDR_ANY,
+ BT_LISTEN, NULL, NULL);
}
done:
if (!sk)
- return lm;
-
- lm |= HCI_LM_ACCEPT;
+ return 0;
if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
*flags |= HCI_PROTO_DEFER;
sock_put(sk);
- return lm;
+ return HCI_LM_ACCEPT;
}
static void iso_connect_cfm(struct hci_conn *hcon, __u8 status)
@@ -2258,13 +2455,9 @@ int iso_init(void)
hci_register_cb(&iso_cb);
- if (IS_ERR_OR_NULL(bt_debugfs))
- return 0;
-
- if (!iso_debugfs) {
+ if (!IS_ERR_OR_NULL(bt_debugfs))
iso_debugfs = debugfs_create_file("iso", 0444, bt_debugfs,
NULL, &iso_debugfs_fops);
- }
iso_inited = true;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 656f49b299d2..c27ea70f71e1 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -39,8 +39,6 @@
#include <net/bluetooth/l2cap.h>
#include "smp.h"
-#include "a2mp.h"
-#include "amp.h"
#define LE_FLOWCTL_MAX_CREDITS 65535
@@ -121,7 +119,6 @@ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
{
struct l2cap_chan *c;
- mutex_lock(&conn->chan_lock);
c = __l2cap_get_chan_by_scid(conn, cid);
if (c) {
/* Only lock if chan reference is not 0 */
@@ -129,7 +126,6 @@ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
if (c)
l2cap_chan_lock(c);
}
- mutex_unlock(&conn->chan_lock);
return c;
}
@@ -142,7 +138,6 @@ static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
{
struct l2cap_chan *c;
- mutex_lock(&conn->chan_lock);
c = __l2cap_get_chan_by_dcid(conn, cid);
if (c) {
/* Only lock if chan reference is not 0 */
@@ -150,7 +145,6 @@ static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
if (c)
l2cap_chan_lock(c);
}
- mutex_unlock(&conn->chan_lock);
return c;
}
@@ -167,24 +161,6 @@ static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
return NULL;
}
-static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
- u8 ident)
-{
- struct l2cap_chan *c;
-
- mutex_lock(&conn->chan_lock);
- c = __l2cap_get_chan_by_ident(conn, ident);
- if (c) {
- /* Only lock if chan reference is not 0 */
- c = l2cap_chan_hold_unless_zero(c);
- if (c)
- l2cap_chan_lock(c);
- }
- mutex_unlock(&conn->chan_lock);
-
- return c;
-}
-
static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
u8 src_type)
{
@@ -435,7 +411,10 @@ static void l2cap_chan_timeout(struct work_struct *work)
BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
- mutex_lock(&conn->chan_lock);
+ if (!conn)
+ return;
+
+ mutex_lock(&conn->lock);
/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
* this work. No need to call l2cap_chan_hold(chan) here again.
*/
@@ -456,7 +435,7 @@ static void l2cap_chan_timeout(struct work_struct *work)
l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
- mutex_unlock(&conn->chan_lock);
+ mutex_unlock(&conn->lock);
}
struct l2cap_chan *l2cap_chan_create(void)
@@ -474,6 +453,9 @@ struct l2cap_chan *l2cap_chan_create(void)
/* Set default lock nesting level */
atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
+ /* Available receive buffer space is initially unknown */
+ chan->rx_avail = -1;
+
write_lock(&chan_list_lock);
list_add(&chan->global_l, &chan_list);
write_unlock(&chan_list_lock);
@@ -555,6 +537,28 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan)
}
EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
+static __u16 l2cap_le_rx_credits(struct l2cap_chan *chan)
+{
+ size_t sdu_len = chan->sdu ? chan->sdu->len : 0;
+
+ if (chan->mps == 0)
+ return 0;
+
+ /* If we don't know the available space in the receiver buffer, give
+ * enough credits for a full packet.
+ */
+ if (chan->rx_avail == -1)
+ return (chan->imtu / chan->mps) + 1;
+
+ /* If we know how much space is available in the receive buffer, give
+ * out as many credits as would fill the buffer.
+ */
+ if (chan->rx_avail <= sdu_len)
+ return 0;
+
+ return DIV_ROUND_UP(chan->rx_avail - sdu_len, chan->mps);
+}
+
static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
{
chan->sdu = NULL;
@@ -563,8 +567,7 @@ static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
chan->tx_credits = tx_credits;
/* Derive MPS from connection MTU to stop HCI fragmentation */
chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
- /* Give enough credits for a full packet */
- chan->rx_credits = (chan->imtu / chan->mps) + 1;
+ chan->rx_credits = l2cap_le_rx_credits(chan);
skb_queue_head_init(&chan->tx_q);
}
@@ -576,7 +579,7 @@ static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
/* L2CAP implementations shall support a minimum MPS of 64 octets */
if (chan->mps < L2CAP_ECRED_MIN_MPS) {
chan->mps = L2CAP_ECRED_MIN_MPS;
- chan->rx_credits = (chan->imtu / chan->mps) + 1;
+ chan->rx_credits = l2cap_le_rx_credits(chan);
}
}
@@ -629,14 +632,15 @@ void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
hci_conn_hold(conn->hcon);
- list_add(&chan->list, &conn->chan_l);
+ /* Append to the list since the order matters for ECRED */
+ list_add_tail(&chan->list, &conn->chan_l);
}
void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
{
- mutex_lock(&conn->chan_lock);
+ mutex_lock(&conn->lock);
__l2cap_chan_add(conn, chan);
- mutex_unlock(&conn->chan_lock);
+ mutex_unlock(&conn->lock);
}
void l2cap_chan_del(struct l2cap_chan *chan, int err)
@@ -651,7 +655,6 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
chan->ops->teardown(chan, err);
if (conn) {
- struct amp_mgr *mgr = conn->hcon->amp_mgr;
/* Delete from channel list */
list_del(&chan->list);
@@ -666,16 +669,6 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
if (chan->chan_type != L2CAP_CHAN_FIXED ||
test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
hci_conn_drop(conn->hcon);
-
- if (mgr && mgr->bredr_chan == chan)
- mgr->bredr_chan = NULL;
- }
-
- if (chan->hs_hchan) {
- struct hci_chan *hs_hchan = chan->hs_hchan;
-
- BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
- amp_disconnect_logical_link(hs_hchan);
}
if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
@@ -735,9 +728,9 @@ void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
if (!conn)
return;
- mutex_lock(&conn->chan_lock);
+ mutex_lock(&conn->lock);
__l2cap_chan_list(conn, func, data);
- mutex_unlock(&conn->chan_lock);
+ mutex_unlock(&conn->lock);
}
EXPORT_SYMBOL_GPL(l2cap_chan_list);
@@ -749,7 +742,7 @@ static void l2cap_conn_update_id_addr(struct work_struct *work)
struct hci_conn *hcon = conn->hcon;
struct l2cap_chan *chan;
- mutex_lock(&conn->chan_lock);
+ mutex_lock(&conn->lock);
list_for_each_entry(chan, &conn->chan_l, list) {
l2cap_chan_lock(chan);
@@ -758,7 +751,7 @@ static void l2cap_conn_update_id_addr(struct work_struct *work)
l2cap_chan_unlock(chan);
}
- mutex_unlock(&conn->chan_lock);
+ mutex_unlock(&conn->lock);
}
static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
@@ -952,6 +945,16 @@ static u8 l2cap_get_ident(struct l2cap_conn *conn)
return id;
}
+static void l2cap_send_acl(struct l2cap_conn *conn, struct sk_buff *skb,
+ u8 flags)
+{
+ /* Check if the hcon still valid before attempting to send */
+ if (hci_conn_valid(conn->hcon->hdev, conn->hcon))
+ hci_send_acl(conn->hchan, skb, flags);
+ else
+ kfree_skb(skb);
+}
+
static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
void *data)
{
@@ -974,13 +977,7 @@ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
skb->priority = HCI_PRIO_MAX;
- hci_send_acl(conn->hchan, skb, flags);
-}
-
-static bool __chan_is_moving(struct l2cap_chan *chan)
-{
- return chan->move_state != L2CAP_MOVE_STABLE &&
- chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
+ l2cap_send_acl(conn, skb, flags);
}
static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
@@ -991,15 +988,6 @@ static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
skb->priority);
- if (chan->hs_hcon && !__chan_is_moving(chan)) {
- if (chan->hs_hchan)
- hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
- else
- kfree_skb(skb);
-
- return;
- }
-
/* Use NO_FLUSH for LE links (where this is the only option) or
* if the BR/EDR link supports it and flushing has not been
* explicitly requested (through FLAG_FLUSHABLE).
@@ -1180,9 +1168,6 @@ static void l2cap_send_sframe(struct l2cap_chan *chan,
if (!control->sframe)
return;
- if (__chan_is_moving(chan))
- return;
-
if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
!control->poll)
control->final = 1;
@@ -1237,40 +1222,6 @@ static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
}
-static bool __amp_capable(struct l2cap_chan *chan)
-{
- struct l2cap_conn *conn = chan->conn;
- struct hci_dev *hdev;
- bool amp_available = false;
-
- if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
- return false;
-
- if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
- return false;
-
- read_lock(&hci_dev_list_lock);
- list_for_each_entry(hdev, &hci_dev_list, list) {
- if (hdev->amp_type != AMP_TYPE_BREDR &&
- test_bit(HCI_UP, &hdev->flags)) {
- amp_available = true;
- break;
- }
- }
- read_unlock(&hci_dev_list_lock);
-
- if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
- return amp_available;
-
- return false;
-}
-
-static bool l2cap_check_efs(struct l2cap_chan *chan)
-{
- /* Check EFS parameters */
- return true;
-}
-
void l2cap_send_conn_req(struct l2cap_chan *chan)
{
struct l2cap_conn *conn = chan->conn;
@@ -1286,76 +1237,6 @@ void l2cap_send_conn_req(struct l2cap_chan *chan)
l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
}
-static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
-{
- struct l2cap_create_chan_req req;
- req.scid = cpu_to_le16(chan->scid);
- req.psm = chan->psm;
- req.amp_id = amp_id;
-
- chan->ident = l2cap_get_ident(chan->conn);
-
- l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
- sizeof(req), &req);
-}
-
-static void l2cap_move_setup(struct l2cap_chan *chan)
-{
- struct sk_buff *skb;
-
- BT_DBG("chan %p", chan);
-
- if (chan->mode != L2CAP_MODE_ERTM)
- return;
-
- __clear_retrans_timer(chan);
- __clear_monitor_timer(chan);
- __clear_ack_timer(chan);
-
- chan->retry_count = 0;
- skb_queue_walk(&chan->tx_q, skb) {
- if (bt_cb(skb)->l2cap.retries)
- bt_cb(skb)->l2cap.retries = 1;
- else
- break;
- }
-
- chan->expected_tx_seq = chan->buffer_seq;
-
- clear_bit(CONN_REJ_ACT, &chan->conn_state);
- clear_bit(CONN_SREJ_ACT, &chan->conn_state);
- l2cap_seq_list_clear(&chan->retrans_list);
- l2cap_seq_list_clear(&chan->srej_list);
- skb_queue_purge(&chan->srej_q);
-
- chan->tx_state = L2CAP_TX_STATE_XMIT;
- chan->rx_state = L2CAP_RX_STATE_MOVE;
-
- set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
-}
-
-static void l2cap_move_done(struct l2cap_chan *chan)
-{
- u8 move_role = chan->move_role;
- BT_DBG("chan %p", chan);
-
- chan->move_state = L2CAP_MOVE_STABLE;
- chan->move_role = L2CAP_MOVE_ROLE_NONE;
-
- if (chan->mode != L2CAP_MODE_ERTM)
- return;
-
- switch (move_role) {
- case L2CAP_MOVE_ROLE_INITIATOR:
- l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
- chan->rx_state = L2CAP_RX_STATE_WAIT_F;
- break;
- case L2CAP_MOVE_ROLE_RESPONDER:
- chan->rx_state = L2CAP_RX_STATE_WAIT_P;
- break;
- }
-}
-
static void l2cap_chan_ready(struct l2cap_chan *chan)
{
/* The channel may have already been flagged as connected in
@@ -1410,7 +1291,7 @@ static void l2cap_le_connect(struct l2cap_chan *chan)
struct l2cap_ecred_conn_data {
struct {
- struct l2cap_ecred_conn_req req;
+ struct l2cap_ecred_conn_req_hdr req;
__le16 scid[5];
} __packed pdu;
struct l2cap_chan *chan;
@@ -1505,10 +1386,7 @@ static void l2cap_le_start(struct l2cap_chan *chan)
static void l2cap_start_connection(struct l2cap_chan *chan)
{
- if (__amp_capable(chan)) {
- BT_DBG("chan %p AMP capable: discover AMPs", chan);
- a2mp_discover_amp(chan);
- } else if (chan->conn->hcon->type == LE_LINK) {
+ if (chan->conn->hcon->type == LE_LINK) {
l2cap_le_start(chan);
} else {
l2cap_send_conn_req(chan);
@@ -1611,11 +1489,6 @@ static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
__clear_ack_timer(chan);
}
- if (chan->scid == L2CAP_CID_A2MP) {
- l2cap_state_change(chan, BT_DISCONN);
- return;
- }
-
req.dcid = cpu_to_le16(chan->dcid);
req.scid = cpu_to_le16(chan->scid);
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
@@ -1631,8 +1504,6 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
BT_DBG("conn %p", conn);
- mutex_lock(&conn->chan_lock);
-
list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
l2cap_chan_lock(chan);
@@ -1701,8 +1572,6 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
l2cap_chan_unlock(chan);
}
-
- mutex_unlock(&conn->chan_lock);
}
static void l2cap_le_conn_ready(struct l2cap_conn *conn)
@@ -1748,17 +1617,12 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
if (hcon->type == ACL_LINK)
l2cap_request_info(conn);
- mutex_lock(&conn->chan_lock);
+ mutex_lock(&conn->lock);
list_for_each_entry(chan, &conn->chan_l, list) {
l2cap_chan_lock(chan);
- if (chan->scid == L2CAP_CID_A2MP) {
- l2cap_chan_unlock(chan);
- continue;
- }
-
if (hcon->type == LE_LINK) {
l2cap_le_start(chan);
} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
@@ -1771,7 +1635,7 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
l2cap_chan_unlock(chan);
}
- mutex_unlock(&conn->chan_lock);
+ mutex_unlock(&conn->lock);
if (hcon->type == LE_LINK)
l2cap_le_conn_ready(conn);
@@ -1786,14 +1650,10 @@ static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
BT_DBG("conn %p", conn);
- mutex_lock(&conn->chan_lock);
-
list_for_each_entry(chan, &conn->chan_l, list) {
if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
l2cap_chan_set_err(chan, err);
}
-
- mutex_unlock(&conn->chan_lock);
}
static void l2cap_info_timeout(struct work_struct *work)
@@ -1804,7 +1664,9 @@ static void l2cap_info_timeout(struct work_struct *work)
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
conn->info_ident = 0;
+ mutex_lock(&conn->lock);
l2cap_conn_start(conn);
+ mutex_unlock(&conn->lock);
}
/*
@@ -1896,6 +1758,8 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
+ mutex_lock(&conn->lock);
+
kfree_skb(conn->rx_skb);
skb_queue_purge(&conn->pending_rx);
@@ -1914,8 +1778,6 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
/* Force the connection to be immediately dropped */
hcon->disc_timeout = 0;
- mutex_lock(&conn->chan_lock);
-
/* Kill channels */
list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
l2cap_chan_hold(chan);
@@ -1929,15 +1791,14 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
l2cap_chan_put(chan);
}
- mutex_unlock(&conn->chan_lock);
-
- hci_chan_del(conn->hchan);
-
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
cancel_delayed_work_sync(&conn->info_timer);
- hcon->l2cap_data = NULL;
+ hci_chan_del(conn->hchan);
conn->hchan = NULL;
+
+ hcon->l2cap_data = NULL;
+ mutex_unlock(&conn->lock);
l2cap_conn_put(conn);
}
@@ -2067,9 +1928,6 @@ static void l2cap_streaming_send(struct l2cap_chan *chan,
BT_DBG("chan %p, skbs %p", chan, skbs);
- if (__chan_is_moving(chan))
- return;
-
skb_queue_splice_tail_init(skbs, &chan->tx_q);
while (!skb_queue_empty(&chan->tx_q)) {
@@ -2112,9 +1970,6 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
return 0;
- if (__chan_is_moving(chan))
- return 0;
-
while (chan->tx_send_head &&
chan->unacked_frames < chan->remote_tx_win &&
chan->tx_state == L2CAP_TX_STATE_XMIT) {
@@ -2180,9 +2035,6 @@ static void l2cap_ertm_resend(struct l2cap_chan *chan)
if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
return;
- if (__chan_is_moving(chan))
- return;
-
while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
seq = l2cap_seq_list_pop(&chan->retrans_list);
@@ -2522,8 +2374,7 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
pdu_len = chan->conn->mtu;
/* Constrain PDU size for BR/EDR connections */
- if (!chan->hs_hcon)
- pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
+ pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
/* Adjust for largest possible L2CAP overhead. */
if (chan->fcs)
@@ -3065,8 +2916,6 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
BT_DBG("conn %p", conn);
- mutex_lock(&conn->chan_lock);
-
list_for_each_entry(chan, &conn->chan_l, list) {
if (chan->chan_type != L2CAP_CHAN_RAW)
continue;
@@ -3081,8 +2930,6 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
if (chan->ops->recv(chan, nskb))
kfree_skb(nskb);
}
-
- mutex_unlock(&conn->chan_lock);
}
/* ---- L2CAP signalling commands ---- */
@@ -3287,11 +3134,6 @@ int l2cap_ertm_init(struct l2cap_chan *chan)
skb_queue_head_init(&chan->tx_q);
- chan->local_amp_id = AMP_ID_BREDR;
- chan->move_id = AMP_ID_BREDR;
- chan->move_state = L2CAP_MOVE_STABLE;
- chan->move_role = L2CAP_MOVE_ROLE_NONE;
-
if (chan->mode != L2CAP_MODE_ERTM)
return 0;
@@ -3326,52 +3168,19 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
{
- return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
- (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
+ return (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW);
}
static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
{
- return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
- (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
+ return (conn->feat_mask & L2CAP_FEAT_EXT_FLOW);
}
static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
struct l2cap_conf_rfc *rfc)
{
- if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
- u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
-
- /* Class 1 devices have must have ERTM timeouts
- * exceeding the Link Supervision Timeout. The
- * default Link Supervision Timeout for AMP
- * controllers is 10 seconds.
- *
- * Class 1 devices use 0xffffffff for their
- * best-effort flush timeout, so the clamping logic
- * will result in a timeout that meets the above
- * requirement. ERTM timeouts are 16-bit values, so
- * the maximum timeout is 65.535 seconds.
- */
-
- /* Convert timeout to milliseconds and round */
- ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
-
- /* This is the recommended formula for class 2 devices
- * that start ERTM timers when packets are sent to the
- * controller.
- */
- ertm_to = 3 * ertm_to + 500;
-
- if (ertm_to > 0xffff)
- ertm_to = 0xffff;
-
- rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
- rfc->monitor_timeout = rfc->retrans_timeout;
- } else {
- rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
- rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
- }
+ rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
+ rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
}
static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
@@ -3623,13 +3432,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
case L2CAP_CONF_EWS:
if (olen != 2)
break;
- if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
- return -ECONNREFUSED;
- set_bit(FLAG_EXT_CTRL, &chan->flags);
- set_bit(CONF_EWS_RECV, &chan->conf_state);
- chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
- chan->remote_tx_win = val;
- break;
+ return -ECONNREFUSED;
default:
if (hint)
@@ -3957,7 +3760,7 @@ static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
struct l2cap_ecred_rsp_data {
struct {
- struct l2cap_ecred_conn_rsp rsp;
+ struct l2cap_ecred_conn_rsp_hdr rsp;
__le16 scid[L2CAP_ECRED_MAX_CID];
} __packed pdu;
int count;
@@ -3966,8 +3769,14 @@ struct l2cap_ecred_rsp_data {
static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
{
struct l2cap_ecred_rsp_data *rsp = data;
+ struct l2cap_ecred_conn_rsp *rsp_flex =
+ container_of(&rsp->pdu.rsp, struct l2cap_ecred_conn_rsp, hdr);
- if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
+ /* Check if channel for outgoing connection or if it wasn't deferred
+ * since in those cases it must be skipped.
+ */
+ if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags) ||
+ !test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
return;
/* Reset ident so only one response is sent */
@@ -3975,7 +3784,7 @@ static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
/* Include all channels pending with the same ident */
if (!rsp->pdu.rsp.result)
- rsp->pdu.rsp.dcid[rsp->count++] = cpu_to_le16(chan->scid);
+ rsp_flex->dcid[rsp->count++] = cpu_to_le16(chan->scid);
else
l2cap_chan_del(chan, ECONNRESET);
}
@@ -4027,11 +3836,7 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
rsp.dcid = cpu_to_le16(chan->scid);
rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
-
- if (chan->hs_hcon)
- rsp_code = L2CAP_CREATE_CHAN_RSP;
- else
- rsp_code = L2CAP_CONN_RSP;
+ rsp_code = L2CAP_CONN_RSP;
BT_DBG("chan %p rsp_code %u", chan, rsp_code);
@@ -4126,13 +3931,12 @@ static inline int l2cap_command_rej(struct l2cap_conn *conn,
return 0;
}
-static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd,
- u8 *data, u8 rsp_code, u8 amp_id)
+static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
+ u8 *data, u8 rsp_code)
{
struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
struct l2cap_conn_rsp rsp;
- struct l2cap_chan *chan = NULL, *pchan;
+ struct l2cap_chan *chan = NULL, *pchan = NULL;
int result, status = L2CAP_CS_NO_INFO;
u16 dcid = 0, scid = __le16_to_cpu(req->scid);
@@ -4145,10 +3949,9 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
&conn->hcon->dst, ACL_LINK);
if (!pchan) {
result = L2CAP_CR_BAD_PSM;
- goto sendresp;
+ goto response;
}
- mutex_lock(&conn->chan_lock);
l2cap_chan_lock(pchan);
/* Check if the ACL is secure enough (if not SDP) */
@@ -4190,7 +3993,6 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
chan->dst_type = bdaddr_dst_type(conn->hcon);
chan->psm = psm;
chan->dcid = scid;
- chan->local_amp_id = amp_id;
__l2cap_chan_add(conn, chan);
@@ -4208,17 +4010,8 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
status = L2CAP_CS_AUTHOR_PEND;
chan->ops->defer(chan);
} else {
- /* Force pending result for AMP controllers.
- * The connection will succeed after the
- * physical link is up.
- */
- if (amp_id == AMP_ID_BREDR) {
- l2cap_state_change(chan, BT_CONFIG);
- result = L2CAP_CR_SUCCESS;
- } else {
- l2cap_state_change(chan, BT_CONNECT2);
- result = L2CAP_CR_PEND;
- }
+ l2cap_state_change(chan, BT_CONFIG);
+ result = L2CAP_CR_SUCCESS;
status = L2CAP_CS_NO_INFO;
}
} else {
@@ -4233,17 +4026,15 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
}
response:
- l2cap_chan_unlock(pchan);
- mutex_unlock(&conn->chan_lock);
- l2cap_chan_put(pchan);
-
-sendresp:
rsp.scid = cpu_to_le16(scid);
rsp.dcid = cpu_to_le16(dcid);
rsp.result = cpu_to_le16(result);
rsp.status = cpu_to_le16(status);
l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
+ if (!pchan)
+ return;
+
if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
struct l2cap_info_req info;
info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
@@ -4266,25 +4057,17 @@ sendresp:
chan->num_conf_req++;
}
- return chan;
+ l2cap_chan_unlock(pchan);
+ l2cap_chan_put(pchan);
}
static int l2cap_connect_req(struct l2cap_conn *conn,
struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
{
- struct hci_dev *hdev = conn->hcon->hdev;
- struct hci_conn *hcon = conn->hcon;
-
if (cmd_len < sizeof(struct l2cap_conn_req))
return -EPROTO;
- hci_dev_lock(hdev);
- if (hci_dev_test_flag(hdev, HCI_MGMT) &&
- !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
- mgmt_device_connected(hdev, hcon, NULL, 0);
- hci_dev_unlock(hdev);
-
- l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
+ l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP);
return 0;
}
@@ -4313,27 +4096,19 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
dcid, scid, result, status);
- mutex_lock(&conn->chan_lock);
-
if (scid) {
chan = __l2cap_get_chan_by_scid(conn, scid);
- if (!chan) {
- err = -EBADSLT;
- goto unlock;
- }
+ if (!chan)
+ return -EBADSLT;
} else {
chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
- if (!chan) {
- err = -EBADSLT;
- goto unlock;
- }
+ if (!chan)
+ return -EBADSLT;
}
chan = l2cap_chan_hold_unless_zero(chan);
- if (!chan) {
- err = -EBADSLT;
- goto unlock;
- }
+ if (!chan)
+ return -EBADSLT;
err = 0;
@@ -4371,9 +4146,6 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
-unlock:
- mutex_unlock(&conn->chan_lock);
-
return err;
}
@@ -4516,10 +4288,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
/* check compatibility */
/* Send rsp for BR/EDR channel */
- if (!chan->hs_hcon)
- l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
- else
- chan->ident = cmd->ident;
+ l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
}
unlock:
@@ -4571,15 +4340,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
goto done;
}
- if (!chan->hs_hcon) {
- l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
- 0);
- } else {
- if (l2cap_check_efs(chan)) {
- amp_create_logical_link(chan);
- chan->ident = cmd->ident;
- }
- }
+ l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0);
}
goto done;
@@ -4672,11 +4433,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
chan->ops->set_shutdown(chan);
- l2cap_chan_unlock(chan);
- mutex_lock(&conn->chan_lock);
- l2cap_chan_lock(chan);
l2cap_chan_del(chan, ECONNRESET);
- mutex_unlock(&conn->chan_lock);
chan->ops->close(chan);
@@ -4713,11 +4470,7 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
return 0;
}
- l2cap_chan_unlock(chan);
- mutex_lock(&conn->chan_lock);
- l2cap_chan_lock(chan);
l2cap_chan_del(chan, 0);
- mutex_unlock(&conn->chan_lock);
chan->ops->close(chan);
@@ -4750,9 +4503,6 @@ static inline int l2cap_information_req(struct l2cap_conn *conn,
if (!disable_ertm)
feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
| L2CAP_FEAT_FCS;
- if (conn->local_fixed_chan & L2CAP_FC_A2MP)
- feat_mask |= L2CAP_FEAT_EXT_FLOW
- | L2CAP_FEAT_EXT_WINDOW;
put_unaligned_le32(feat_mask, rsp->data);
l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
@@ -4841,751 +4591,6 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn,
return 0;
}
-static int l2cap_create_channel_req(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd,
- u16 cmd_len, void *data)
-{
- struct l2cap_create_chan_req *req = data;
- struct l2cap_create_chan_rsp rsp;
- struct l2cap_chan *chan;
- struct hci_dev *hdev;
- u16 psm, scid;
-
- if (cmd_len != sizeof(*req))
- return -EPROTO;
-
- if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
- return -EINVAL;
-
- psm = le16_to_cpu(req->psm);
- scid = le16_to_cpu(req->scid);
-
- BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
-
- /* For controller id 0 make BR/EDR connection */
- if (req->amp_id == AMP_ID_BREDR) {
- l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
- req->amp_id);
- return 0;
- }
-
- /* Validate AMP controller id */
- hdev = hci_dev_get(req->amp_id);
- if (!hdev)
- goto error;
-
- if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
- hci_dev_put(hdev);
- goto error;
- }
-
- chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
- req->amp_id);
- if (chan) {
- struct amp_mgr *mgr = conn->hcon->amp_mgr;
- struct hci_conn *hs_hcon;
-
- hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
- &conn->hcon->dst);
- if (!hs_hcon) {
- hci_dev_put(hdev);
- cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
- chan->dcid);
- return 0;
- }
-
- BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
-
- mgr->bredr_chan = chan;
- chan->hs_hcon = hs_hcon;
- chan->fcs = L2CAP_FCS_NONE;
- conn->mtu = hdev->block_mtu;
- }
-
- hci_dev_put(hdev);
-
- return 0;
-
-error:
- rsp.dcid = 0;
- rsp.scid = cpu_to_le16(scid);
- rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
-
- l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
- sizeof(rsp), &rsp);
-
- return 0;
-}
-
-static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
-{
- struct l2cap_move_chan_req req;
- u8 ident;
-
- BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
-
- ident = l2cap_get_ident(chan->conn);
- chan->ident = ident;
-
- req.icid = cpu_to_le16(chan->scid);
- req.dest_amp_id = dest_amp_id;
-
- l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
- &req);
-
- __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
-}
-
-static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
-{
- struct l2cap_move_chan_rsp rsp;
-
- BT_DBG("chan %p, result 0x%4.4x", chan, result);
-
- rsp.icid = cpu_to_le16(chan->dcid);
- rsp.result = cpu_to_le16(result);
-
- l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
- sizeof(rsp), &rsp);
-}
-
-static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
-{
- struct l2cap_move_chan_cfm cfm;
-
- BT_DBG("chan %p, result 0x%4.4x", chan, result);
-
- chan->ident = l2cap_get_ident(chan->conn);
-
- cfm.icid = cpu_to_le16(chan->scid);
- cfm.result = cpu_to_le16(result);
-
- l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
- sizeof(cfm), &cfm);
-
- __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
-}
-
-static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
-{
- struct l2cap_move_chan_cfm cfm;
-
- BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
-
- cfm.icid = cpu_to_le16(icid);
- cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
-
- l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
- sizeof(cfm), &cfm);
-}
-
-static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
- u16 icid)
-{
- struct l2cap_move_chan_cfm_rsp rsp;
-
- BT_DBG("icid 0x%4.4x", icid);
-
- rsp.icid = cpu_to_le16(icid);
- l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
-}
-
-static void __release_logical_link(struct l2cap_chan *chan)
-{
- chan->hs_hchan = NULL;
- chan->hs_hcon = NULL;
-
- /* Placeholder - release the logical link */
-}
-
-static void l2cap_logical_fail(struct l2cap_chan *chan)
-{
- /* Logical link setup failed */
- if (chan->state != BT_CONNECTED) {
- /* Create channel failure, disconnect */
- l2cap_send_disconn_req(chan, ECONNRESET);
- return;
- }
-
- switch (chan->move_role) {
- case L2CAP_MOVE_ROLE_RESPONDER:
- l2cap_move_done(chan);
- l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
- break;
- case L2CAP_MOVE_ROLE_INITIATOR:
- if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
- chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
- /* Remote has only sent pending or
- * success responses, clean up
- */
- l2cap_move_done(chan);
- }
-
- /* Other amp move states imply that the move
- * has already aborted
- */
- l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
- break;
- }
-}
-
-static void l2cap_logical_finish_create(struct l2cap_chan *chan,
- struct hci_chan *hchan)
-{
- struct l2cap_conf_rsp rsp;
-
- chan->hs_hchan = hchan;
- chan->hs_hcon->l2cap_data = chan->conn;
-
- l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
-
- if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
- int err;
-
- set_default_fcs(chan);
-
- err = l2cap_ertm_init(chan);
- if (err < 0)
- l2cap_send_disconn_req(chan, -err);
- else
- l2cap_chan_ready(chan);
- }
-}
-
-static void l2cap_logical_finish_move(struct l2cap_chan *chan,
- struct hci_chan *hchan)
-{
- chan->hs_hcon = hchan->conn;
- chan->hs_hcon->l2cap_data = chan->conn;
-
- BT_DBG("move_state %d", chan->move_state);
-
- switch (chan->move_state) {
- case L2CAP_MOVE_WAIT_LOGICAL_COMP:
- /* Move confirm will be sent after a success
- * response is received
- */
- chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
- break;
- case L2CAP_MOVE_WAIT_LOGICAL_CFM:
- if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
- } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
- chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
- l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
- } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
- chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
- l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
- }
- break;
- default:
- /* Move was not in expected state, free the channel */
- __release_logical_link(chan);
-
- chan->move_state = L2CAP_MOVE_STABLE;
- }
-}
-
-/* Call with chan locked */
-void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
- u8 status)
-{
- BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
-
- if (status) {
- l2cap_logical_fail(chan);
- __release_logical_link(chan);
- return;
- }
-
- if (chan->state != BT_CONNECTED) {
- /* Ignore logical link if channel is on BR/EDR */
- if (chan->local_amp_id != AMP_ID_BREDR)
- l2cap_logical_finish_create(chan, hchan);
- } else {
- l2cap_logical_finish_move(chan, hchan);
- }
-}
-
-void l2cap_move_start(struct l2cap_chan *chan)
-{
- BT_DBG("chan %p", chan);
-
- if (chan->local_amp_id == AMP_ID_BREDR) {
- if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
- return;
- chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
- chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
- /* Placeholder - start physical link setup */
- } else {
- chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
- chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
- chan->move_id = 0;
- l2cap_move_setup(chan);
- l2cap_send_move_chan_req(chan, 0);
- }
-}
-
-static void l2cap_do_create(struct l2cap_chan *chan, int result,
- u8 local_amp_id, u8 remote_amp_id)
-{
- BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
- local_amp_id, remote_amp_id);
-
- chan->fcs = L2CAP_FCS_NONE;
-
- /* Outgoing channel on AMP */
- if (chan->state == BT_CONNECT) {
- if (result == L2CAP_CR_SUCCESS) {
- chan->local_amp_id = local_amp_id;
- l2cap_send_create_chan_req(chan, remote_amp_id);
- } else {
- /* Revert to BR/EDR connect */
- l2cap_send_conn_req(chan);
- }
-
- return;
- }
-
- /* Incoming channel on AMP */
- if (__l2cap_no_conn_pending(chan)) {
- struct l2cap_conn_rsp rsp;
- char buf[128];
- rsp.scid = cpu_to_le16(chan->dcid);
- rsp.dcid = cpu_to_le16(chan->scid);
-
- if (result == L2CAP_CR_SUCCESS) {
- /* Send successful response */
- rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
- } else {
- /* Send negative response */
- rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
- }
-
- l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
- sizeof(rsp), &rsp);
-
- if (result == L2CAP_CR_SUCCESS) {
- l2cap_state_change(chan, BT_CONFIG);
- set_bit(CONF_REQ_SENT, &chan->conf_state);
- l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
- L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
- chan->num_conf_req++;
- }
- }
-}
-
-static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
- u8 remote_amp_id)
-{
- l2cap_move_setup(chan);
- chan->move_id = local_amp_id;
- chan->move_state = L2CAP_MOVE_WAIT_RSP;
-
- l2cap_send_move_chan_req(chan, remote_amp_id);
-}
-
-static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
-{
- struct hci_chan *hchan = NULL;
-
- /* Placeholder - get hci_chan for logical link */
-
- if (hchan) {
- if (hchan->state == BT_CONNECTED) {
- /* Logical link is ready to go */
- chan->hs_hcon = hchan->conn;
- chan->hs_hcon->l2cap_data = chan->conn;
- chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
- l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
-
- l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
- } else {
- /* Wait for logical link to be ready */
- chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
- }
- } else {
- /* Logical link not available */
- l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
- }
-}
-
-static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
-{
- if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
- u8 rsp_result;
- if (result == -EINVAL)
- rsp_result = L2CAP_MR_BAD_ID;
- else
- rsp_result = L2CAP_MR_NOT_ALLOWED;
-
- l2cap_send_move_chan_rsp(chan, rsp_result);
- }
-
- chan->move_role = L2CAP_MOVE_ROLE_NONE;
- chan->move_state = L2CAP_MOVE_STABLE;
-
- /* Restart data transmission */
- l2cap_ertm_send(chan);
-}
-
-/* Invoke with locked chan */
-void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
-{
- u8 local_amp_id = chan->local_amp_id;
- u8 remote_amp_id = chan->remote_amp_id;
-
- BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
- chan, result, local_amp_id, remote_amp_id);
-
- if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
- return;
-
- if (chan->state != BT_CONNECTED) {
- l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
- } else if (result != L2CAP_MR_SUCCESS) {
- l2cap_do_move_cancel(chan, result);
- } else {
- switch (chan->move_role) {
- case L2CAP_MOVE_ROLE_INITIATOR:
- l2cap_do_move_initiate(chan, local_amp_id,
- remote_amp_id);
- break;
- case L2CAP_MOVE_ROLE_RESPONDER:
- l2cap_do_move_respond(chan, result);
- break;
- default:
- l2cap_do_move_cancel(chan, result);
- break;
- }
- }
-}
-
-static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd,
- u16 cmd_len, void *data)
-{
- struct l2cap_move_chan_req *req = data;
- struct l2cap_move_chan_rsp rsp;
- struct l2cap_chan *chan;
- u16 icid = 0;
- u16 result = L2CAP_MR_NOT_ALLOWED;
-
- if (cmd_len != sizeof(*req))
- return -EPROTO;
-
- icid = le16_to_cpu(req->icid);
-
- BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
-
- if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
- return -EINVAL;
-
- chan = l2cap_get_chan_by_dcid(conn, icid);
- if (!chan) {
- rsp.icid = cpu_to_le16(icid);
- rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
- l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
- sizeof(rsp), &rsp);
- return 0;
- }
-
- chan->ident = cmd->ident;
-
- if (chan->scid < L2CAP_CID_DYN_START ||
- chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
- (chan->mode != L2CAP_MODE_ERTM &&
- chan->mode != L2CAP_MODE_STREAMING)) {
- result = L2CAP_MR_NOT_ALLOWED;
- goto send_move_response;
- }
-
- if (chan->local_amp_id == req->dest_amp_id) {
- result = L2CAP_MR_SAME_ID;
- goto send_move_response;
- }
-
- if (req->dest_amp_id != AMP_ID_BREDR) {
- struct hci_dev *hdev;
- hdev = hci_dev_get(req->dest_amp_id);
- if (!hdev || hdev->dev_type != HCI_AMP ||
- !test_bit(HCI_UP, &hdev->flags)) {
- if (hdev)
- hci_dev_put(hdev);
-
- result = L2CAP_MR_BAD_ID;
- goto send_move_response;
- }
- hci_dev_put(hdev);
- }
-
- /* Detect a move collision. Only send a collision response
- * if this side has "lost", otherwise proceed with the move.
- * The winner has the larger bd_addr.
- */
- if ((__chan_is_moving(chan) ||
- chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
- bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
- result = L2CAP_MR_COLLISION;
- goto send_move_response;
- }
-
- chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
- l2cap_move_setup(chan);
- chan->move_id = req->dest_amp_id;
-
- if (req->dest_amp_id == AMP_ID_BREDR) {
- /* Moving to BR/EDR */
- if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
- result = L2CAP_MR_PEND;
- } else {
- chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
- result = L2CAP_MR_SUCCESS;
- }
- } else {
- chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
- /* Placeholder - uncomment when amp functions are available */
- /*amp_accept_physical(chan, req->dest_amp_id);*/
- result = L2CAP_MR_PEND;
- }
-
-send_move_response:
- l2cap_send_move_chan_rsp(chan, result);
-
- l2cap_chan_unlock(chan);
- l2cap_chan_put(chan);
-
- return 0;
-}
-
-static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
-{
- struct l2cap_chan *chan;
- struct hci_chan *hchan = NULL;
-
- chan = l2cap_get_chan_by_scid(conn, icid);
- if (!chan) {
- l2cap_send_move_chan_cfm_icid(conn, icid);
- return;
- }
-
- __clear_chan_timer(chan);
- if (result == L2CAP_MR_PEND)
- __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
-
- switch (chan->move_state) {
- case L2CAP_MOVE_WAIT_LOGICAL_COMP:
- /* Move confirm will be sent when logical link
- * is complete.
- */
- chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
- break;
- case L2CAP_MOVE_WAIT_RSP_SUCCESS:
- if (result == L2CAP_MR_PEND) {
- break;
- } else if (test_bit(CONN_LOCAL_BUSY,
- &chan->conn_state)) {
- chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
- } else {
- /* Logical link is up or moving to BR/EDR,
- * proceed with move
- */
- chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
- l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
- }
- break;
- case L2CAP_MOVE_WAIT_RSP:
- /* Moving to AMP */
- if (result == L2CAP_MR_SUCCESS) {
- /* Remote is ready, send confirm immediately
- * after logical link is ready
- */
- chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
- } else {
- /* Both logical link and move success
- * are required to confirm
- */
- chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
- }
-
- /* Placeholder - get hci_chan for logical link */
- if (!hchan) {
- /* Logical link not available */
- l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
- break;
- }
-
- /* If the logical link is not yet connected, do not
- * send confirmation.
- */
- if (hchan->state != BT_CONNECTED)
- break;
-
- /* Logical link is already ready to go */
-
- chan->hs_hcon = hchan->conn;
- chan->hs_hcon->l2cap_data = chan->conn;
-
- if (result == L2CAP_MR_SUCCESS) {
- /* Can confirm now */
- l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
- } else {
- /* Now only need move success
- * to confirm
- */
- chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
- }
-
- l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
- break;
- default:
- /* Any other amp move state means the move failed. */
- chan->move_id = chan->local_amp_id;
- l2cap_move_done(chan);
- l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
- }
-
- l2cap_chan_unlock(chan);
- l2cap_chan_put(chan);
-}
-
-static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
- u16 result)
-{
- struct l2cap_chan *chan;
-
- chan = l2cap_get_chan_by_ident(conn, ident);
- if (!chan) {
- /* Could not locate channel, icid is best guess */
- l2cap_send_move_chan_cfm_icid(conn, icid);
- return;
- }
-
- __clear_chan_timer(chan);
-
- if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
- if (result == L2CAP_MR_COLLISION) {
- chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
- } else {
- /* Cleanup - cancel move */
- chan->move_id = chan->local_amp_id;
- l2cap_move_done(chan);
- }
- }
-
- l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
-
- l2cap_chan_unlock(chan);
- l2cap_chan_put(chan);
-}
-
-static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd,
- u16 cmd_len, void *data)
-{
- struct l2cap_move_chan_rsp *rsp = data;
- u16 icid, result;
-
- if (cmd_len != sizeof(*rsp))
- return -EPROTO;
-
- icid = le16_to_cpu(rsp->icid);
- result = le16_to_cpu(rsp->result);
-
- BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
-
- if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
- l2cap_move_continue(conn, icid, result);
- else
- l2cap_move_fail(conn, cmd->ident, icid, result);
-
- return 0;
-}
-
-static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd,
- u16 cmd_len, void *data)
-{
- struct l2cap_move_chan_cfm *cfm = data;
- struct l2cap_chan *chan;
- u16 icid, result;
-
- if (cmd_len != sizeof(*cfm))
- return -EPROTO;
-
- icid = le16_to_cpu(cfm->icid);
- result = le16_to_cpu(cfm->result);
-
- BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
-
- chan = l2cap_get_chan_by_dcid(conn, icid);
- if (!chan) {
- /* Spec requires a response even if the icid was not found */
- l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
- return 0;
- }
-
- if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
- if (result == L2CAP_MC_CONFIRMED) {
- chan->local_amp_id = chan->move_id;
- if (chan->local_amp_id == AMP_ID_BREDR)
- __release_logical_link(chan);
- } else {
- chan->move_id = chan->local_amp_id;
- }
-
- l2cap_move_done(chan);
- }
-
- l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
-
- l2cap_chan_unlock(chan);
- l2cap_chan_put(chan);
-
- return 0;
-}
-
-static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd,
- u16 cmd_len, void *data)
-{
- struct l2cap_move_chan_cfm_rsp *rsp = data;
- struct l2cap_chan *chan;
- u16 icid;
-
- if (cmd_len != sizeof(*rsp))
- return -EPROTO;
-
- icid = le16_to_cpu(rsp->icid);
-
- BT_DBG("icid 0x%4.4x", icid);
-
- chan = l2cap_get_chan_by_scid(conn, icid);
- if (!chan)
- return 0;
-
- __clear_chan_timer(chan);
-
- if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
- chan->local_amp_id = chan->move_id;
-
- if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
- __release_logical_link(chan);
-
- l2cap_move_done(chan);
- }
-
- l2cap_chan_unlock(chan);
- l2cap_chan_put(chan);
-
- return 0;
-}
-
static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
struct l2cap_cmd_hdr *cmd,
u16 cmd_len, u8 *data)
@@ -5613,13 +4618,7 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
memset(&rsp, 0, sizeof(rsp));
- if (max > hcon->le_conn_max_interval) {
- BT_DBG("requested connection interval exceeds current bounds.");
- err = -EINVAL;
- } else {
- err = hci_check_conn_params(min, max, latency, to_multiplier);
- }
-
+ err = hci_check_conn_params(min, max, latency, to_multiplier);
if (err)
rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
else
@@ -5669,13 +4668,9 @@ static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
dcid, mtu, mps, credits, result);
- mutex_lock(&conn->chan_lock);
-
chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
- if (!chan) {
- err = -EBADSLT;
- goto unlock;
- }
+ if (!chan)
+ return -EBADSLT;
err = 0;
@@ -5723,9 +4718,6 @@ static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
l2cap_chan_unlock(chan);
-unlock:
- mutex_unlock(&conn->chan_lock);
-
return err;
}
@@ -5745,7 +4737,6 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
break;
case L2CAP_CONN_RSP:
- case L2CAP_CREATE_CHAN_RSP:
l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
break;
@@ -5780,26 +4771,6 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
l2cap_information_rsp(conn, cmd, cmd_len, data);
break;
- case L2CAP_CREATE_CHAN_REQ:
- err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
- break;
-
- case L2CAP_MOVE_CHAN_REQ:
- err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
- break;
-
- case L2CAP_MOVE_CHAN_RSP:
- l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
- break;
-
- case L2CAP_MOVE_CHAN_CFM:
- err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
- break;
-
- case L2CAP_MOVE_CHAN_CFM_RSP:
- l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
- break;
-
default:
BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
err = -EINVAL;
@@ -5858,7 +4829,6 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn,
goto response;
}
- mutex_lock(&conn->chan_lock);
l2cap_chan_lock(pchan);
if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
@@ -5924,7 +4894,6 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn,
response_unlock:
l2cap_chan_unlock(pchan);
- mutex_unlock(&conn->chan_lock);
l2cap_chan_put(pchan);
if (result == L2CAP_CR_PEND)
@@ -6000,10 +4969,7 @@ static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
u8 *data)
{
struct l2cap_ecred_conn_req *req = (void *) data;
- struct {
- struct l2cap_ecred_conn_rsp rsp;
- __le16 dcid[L2CAP_ECRED_MAX_CID];
- } __packed pdu;
+ DEFINE_RAW_FLEX(struct l2cap_ecred_conn_rsp, pdu, dcid, L2CAP_ECRED_MAX_CID);
struct l2cap_chan *chan, *pchan;
u16 mtu, mps;
__le16 psm;
@@ -6022,7 +4988,7 @@ static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
cmd_len -= sizeof(*req);
num_scid = cmd_len / sizeof(u16);
- if (num_scid > ARRAY_SIZE(pdu.dcid)) {
+ if (num_scid > L2CAP_ECRED_MAX_CID) {
result = L2CAP_CR_LE_INVALID_PARAMS;
goto response;
}
@@ -6051,7 +5017,7 @@ static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
- memset(&pdu, 0, sizeof(pdu));
+ memset(pdu, 0, sizeof(*pdu));
/* Check if we have socket listening on psm */
pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
@@ -6061,7 +5027,6 @@ static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
goto response;
}
- mutex_lock(&conn->chan_lock);
l2cap_chan_lock(pchan);
if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
@@ -6077,8 +5042,8 @@ static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
BT_DBG("scid[%d] 0x%4.4x", i, scid);
- pdu.dcid[i] = 0x0000;
- len += sizeof(*pdu.dcid);
+ pdu->dcid[i] = 0x0000;
+ len += sizeof(*pdu->dcid);
/* Check for valid dynamic CID range */
if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
@@ -6112,13 +5077,13 @@ static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
/* Init response */
- if (!pdu.rsp.credits) {
- pdu.rsp.mtu = cpu_to_le16(chan->imtu);
- pdu.rsp.mps = cpu_to_le16(chan->mps);
- pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
+ if (!pdu->credits) {
+ pdu->mtu = cpu_to_le16(chan->imtu);
+ pdu->mps = cpu_to_le16(chan->mps);
+ pdu->credits = cpu_to_le16(chan->rx_credits);
}
- pdu.dcid[i] = cpu_to_le16(chan->scid);
+ pdu->dcid[i] = cpu_to_le16(chan->scid);
__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
@@ -6136,17 +5101,16 @@ static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
unlock:
l2cap_chan_unlock(pchan);
- mutex_unlock(&conn->chan_lock);
l2cap_chan_put(pchan);
response:
- pdu.rsp.result = cpu_to_le16(result);
+ pdu->result = cpu_to_le16(result);
if (defer)
return 0;
l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
- sizeof(pdu.rsp) + len, &pdu);
+ sizeof(*pdu) + len, pdu);
return 0;
}
@@ -6173,8 +5137,6 @@ static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
result);
- mutex_lock(&conn->chan_lock);
-
cmd_len -= sizeof(*rsp);
list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
@@ -6260,8 +5222,6 @@ static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
l2cap_chan_unlock(chan);
}
- mutex_unlock(&conn->chan_lock);
-
return err;
}
@@ -6374,8 +5334,6 @@ static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
if (cmd_len < sizeof(*rej))
return -EPROTO;
- mutex_lock(&conn->chan_lock);
-
chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
if (!chan)
goto done;
@@ -6390,7 +5348,6 @@ static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
l2cap_chan_put(chan);
done:
- mutex_unlock(&conn->chan_lock);
return 0;
}
@@ -7051,8 +6008,8 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan,
if (control->final) {
clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
- if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
- !__chan_is_moving(chan)) {
+ if (!test_and_clear_bit(CONN_REJ_ACT,
+ &chan->conn_state)) {
control->final = 0;
l2cap_retransmit_all(chan, control);
}
@@ -7245,11 +6202,7 @@ static int l2cap_finish_move(struct l2cap_chan *chan)
BT_DBG("chan %p", chan);
chan->rx_state = L2CAP_RX_STATE_RECV;
-
- if (chan->hs_hcon)
- chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
- else
- chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
+ chan->conn->mtu = chan->conn->hcon->mtu;
return l2cap_resegment(chan);
}
@@ -7316,11 +6269,7 @@ static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
*/
chan->next_tx_seq = control->reqseq;
chan->unacked_frames = 0;
-
- if (chan->hs_hcon)
- chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
- else
- chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
+ chan->conn->mtu = chan->conn->hcon->mtu;
err = l2cap_resegment(chan);
@@ -7525,9 +6474,7 @@ static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
{
struct l2cap_conn *conn = chan->conn;
struct l2cap_le_credits pkt;
- u16 return_credits;
-
- return_credits = (chan->imtu / chan->mps) + 1;
+ u16 return_credits = l2cap_le_rx_credits(chan);
if (chan->rx_credits >= return_credits)
return;
@@ -7546,6 +6493,19 @@ static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
}
+void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail)
+{
+ if (chan->rx_avail == rx_avail)
+ return;
+
+ BT_DBG("chan %p has %zd bytes avail for rx", chan, rx_avail);
+
+ chan->rx_avail = rx_avail;
+
+ if (chan->state == BT_CONNECTED)
+ l2cap_chan_le_send_credits(chan);
+}
+
static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
{
int err;
@@ -7555,6 +6515,12 @@ static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
/* Wait recv to confirm reception before updating the credits */
err = chan->ops->recv(chan, skb);
+ if (err < 0 && chan->rx_avail != -1) {
+ BT_ERR("Queueing received LE L2CAP data failed");
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ return err;
+ }
+
/* Update credits whenever an SDU is received */
l2cap_chan_le_send_credits(chan);
@@ -7577,7 +6543,8 @@ static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
}
chan->rx_credits--;
- BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
+ BT_DBG("chan %p: rx_credits %u -> %u",
+ chan, chan->rx_credits + 1, chan->rx_credits);
/* Update if remote had run out of credits, this should only happens
* if the remote is not using the entire MPS.
@@ -7672,21 +6639,10 @@ static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
chan = l2cap_get_chan_by_scid(conn, cid);
if (!chan) {
- if (cid == L2CAP_CID_A2MP) {
- chan = a2mp_channel_create(conn, skb);
- if (!chan) {
- kfree_skb(skb);
- return;
- }
-
- l2cap_chan_hold(chan);
- l2cap_chan_lock(chan);
- } else {
- BT_DBG("unknown cid 0x%4.4x", cid);
- /* Drop packet and return */
- kfree_skb(skb);
- return;
- }
+ BT_DBG("unknown cid 0x%4.4x", cid);
+ /* Drop packet and return */
+ kfree_skb(skb);
+ return;
}
BT_DBG("chan %p, len %d", chan, skb->len);
@@ -7758,6 +6714,8 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
BT_DBG("chan %p, len %d", chan, skb->len);
+ l2cap_chan_lock(chan);
+
if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
goto drop;
@@ -7769,11 +6727,13 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
bt_cb(skb)->l2cap.psm = psm;
if (!chan->ops->recv(chan, skb)) {
+ l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
return;
}
drop:
+ l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
free_skb:
kfree_skb(skb);
@@ -7842,8 +6802,12 @@ static void process_pending_rx(struct work_struct *work)
BT_DBG("");
+ mutex_lock(&conn->lock);
+
while ((skb = skb_dequeue(&conn->pending_rx)))
l2cap_recv_frame(conn, skb);
+
+ mutex_unlock(&conn->lock);
}
static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
@@ -7871,33 +6835,18 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
- switch (hcon->type) {
- case LE_LINK:
- if (hcon->hdev->le_mtu) {
- conn->mtu = hcon->hdev->le_mtu;
- break;
- }
- fallthrough;
- default:
- conn->mtu = hcon->hdev->acl_mtu;
- break;
- }
-
+ conn->mtu = hcon->mtu;
conn->feat_mask = 0;
conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
- if (hcon->type == ACL_LINK &&
- hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
- conn->local_fixed_chan |= L2CAP_FC_A2MP;
-
if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
(bredr_sc_enabled(hcon->hdev) ||
hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
mutex_init(&conn->ident_lock);
- mutex_init(&conn->chan_lock);
+ mutex_init(&conn->lock);
INIT_LIST_HEAD(&conn->chan_l);
INIT_LIST_HEAD(&conn->users);
@@ -7953,7 +6902,7 @@ static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
}
int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
- bdaddr_t *dst, u8 dst_type)
+ bdaddr_t *dst, u8 dst_type, u16 timeout)
{
struct l2cap_conn *conn;
struct hci_conn *hcon;
@@ -8046,19 +6995,17 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
hcon = hci_connect_le(hdev, dst, dst_type, false,
- chan->sec_level,
- HCI_LE_CONN_TIMEOUT,
- HCI_ROLE_SLAVE);
+ chan->sec_level, timeout,
+ HCI_ROLE_SLAVE, 0, 0);
else
hcon = hci_connect_le_scan(hdev, dst, dst_type,
- chan->sec_level,
- HCI_LE_CONN_TIMEOUT,
+ chan->sec_level, timeout,
CONN_REASON_L2CAP_CHAN);
} else {
u8 auth_type = l2cap_get_auth_type(chan);
hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
- CONN_REASON_L2CAP_CHAN);
+ CONN_REASON_L2CAP_CHAN, timeout);
}
if (IS_ERR(hcon)) {
@@ -8090,7 +7037,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
}
}
- mutex_lock(&conn->chan_lock);
+ mutex_lock(&conn->lock);
l2cap_chan_lock(chan);
if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
@@ -8131,7 +7078,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
chan_unlock:
l2cap_chan_unlock(chan);
- mutex_unlock(&conn->chan_lock);
+ mutex_unlock(&conn->lock);
done:
hci_dev_unlock(hdev);
hci_dev_put(hdev);
@@ -8142,14 +7089,11 @@ EXPORT_SYMBOL_GPL(l2cap_chan_connect);
static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
{
struct l2cap_conn *conn = chan->conn;
- struct {
- struct l2cap_ecred_reconf_req req;
- __le16 scid;
- } pdu;
+ DEFINE_RAW_FLEX(struct l2cap_ecred_reconf_req, pdu, scid, 1);
- pdu.req.mtu = cpu_to_le16(chan->imtu);
- pdu.req.mps = cpu_to_le16(chan->mps);
- pdu.scid = cpu_to_le16(chan->scid);
+ pdu->mtu = cpu_to_le16(chan->imtu);
+ pdu->mps = cpu_to_le16(chan->mps);
+ pdu->scid[0] = cpu_to_le16(chan->scid);
chan->ident = l2cap_get_ident(conn);
@@ -8347,7 +7291,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
- mutex_lock(&conn->chan_lock);
+ mutex_lock(&conn->lock);
list_for_each_entry(chan, &conn->chan_l, list) {
l2cap_chan_lock(chan);
@@ -8355,11 +7299,6 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
state_to_string(chan->state));
- if (chan->scid == L2CAP_CID_A2MP) {
- l2cap_chan_unlock(chan);
- continue;
- }
-
if (!status && encrypt)
chan->sec_level = hcon->sec_level;
@@ -8426,7 +7365,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
l2cap_chan_unlock(chan);
}
- mutex_unlock(&conn->chan_lock);
+ mutex_unlock(&conn->lock);
}
/* Append fragment into frame respecting the maximum len of rx_skb */
@@ -8493,23 +7432,45 @@ static void l2cap_recv_reset(struct l2cap_conn *conn)
conn->rx_len = 0;
}
+struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *c)
+{
+ if (!c)
+ return NULL;
+
+ BT_DBG("conn %p orig refcnt %u", c, kref_read(&c->ref));
+
+ if (!kref_get_unless_zero(&c->ref))
+ return NULL;
+
+ return c;
+}
+
void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
{
- struct l2cap_conn *conn = hcon->l2cap_data;
+ struct l2cap_conn *conn;
int len;
- /* For AMP controller do not create l2cap conn */
- if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
- goto drop;
+ /* Lock hdev to access l2cap_data to avoid race with l2cap_conn_del */
+ hci_dev_lock(hcon->hdev);
+
+ conn = hcon->l2cap_data;
if (!conn)
conn = l2cap_conn_add(hcon);
- if (!conn)
- goto drop;
+ conn = l2cap_conn_hold_unless_zero(conn);
+
+ hci_dev_unlock(hcon->hdev);
+
+ if (!conn) {
+ kfree_skb(skb);
+ return;
+ }
BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
+ mutex_lock(&conn->lock);
+
switch (flags) {
case ACL_START:
case ACL_START_NO_FLUSH:
@@ -8534,7 +7495,7 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
if (len == skb->len) {
/* Complete frame received */
l2cap_recv_frame(conn, skb);
- return;
+ goto unlock;
}
BT_DBG("Start: total len %d, frag len %u", len, skb->len);
@@ -8598,6 +7559,9 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
drop:
kfree_skb(skb);
+unlock:
+ mutex_unlock(&conn->lock);
+ l2cap_conn_put(conn);
}
static struct hci_cb l2cap_cb = {
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index e50d3d102078..acd11b268b98 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -254,7 +254,8 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
chan->mode = L2CAP_MODE_LE_FLOWCTL;
err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid),
- &la.l2_bdaddr, la.l2_bdaddr_type);
+ &la.l2_bdaddr, la.l2_bdaddr_type,
+ sk->sk_sndtimeo);
if (err)
return err;
@@ -326,7 +327,7 @@ done:
}
static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
- int flags, bool kern)
+ struct proto_accept_arg *arg)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct sock *sk = sock->sk, *nsk;
@@ -335,7 +336,7 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
lock_sock_nested(sk, L2CAP_NESTING_PARENT);
- timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
+ timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK);
BT_DBG("sk %p timeo %ld", sk, timeo);
@@ -438,7 +439,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct l2cap_options opts;
struct l2cap_conninfo cinfo;
- int len, err = 0;
+ int err = 0;
+ size_t len;
u32 opt;
BT_DBG("sk %p", sk);
@@ -485,7 +487,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
BT_DBG("mode 0x%2.2x", chan->mode);
- len = min_t(unsigned int, len, sizeof(opts));
+ len = min(len, sizeof(opts));
if (copy_to_user(optval, (char *) &opts, len))
err = -EFAULT;
@@ -535,7 +537,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
cinfo.hci_handle = chan->conn->hcon->handle;
memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3);
- len = min_t(unsigned int, len, sizeof(cinfo));
+ len = min(len, sizeof(cinfo));
if (copy_to_user(optval, (char *) &cinfo, len))
err = -EFAULT;
@@ -708,12 +710,12 @@ static bool l2cap_valid_mtu(struct l2cap_chan *chan, u16 mtu)
{
switch (chan->scid) {
case L2CAP_CID_ATT:
- if (mtu < L2CAP_LE_MIN_MTU)
+ if (mtu && mtu < L2CAP_LE_MIN_MTU)
return false;
break;
default:
- if (mtu < L2CAP_DEFAULT_MIN_MTU)
+ if (mtu && mtu < L2CAP_DEFAULT_MIN_MTU)
return false;
}
@@ -726,7 +728,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
struct sock *sk = sock->sk;
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct l2cap_options opts;
- int len, err = 0;
+ int err = 0;
u32 opt;
BT_DBG("sk %p", sk);
@@ -753,11 +755,10 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
opts.max_tx = chan->max_tx;
opts.txwin_size = chan->tx_win;
- len = min_t(unsigned int, sizeof(opts), optlen);
- if (copy_from_sockptr(&opts, optval, len)) {
- err = -EFAULT;
+ err = copy_safe_from_sockptr(&opts, sizeof(opts), optval,
+ optlen);
+ if (err)
break;
- }
if (opts.txwin_size > L2CAP_DEFAULT_EXT_WINDOW) {
err = -EINVAL;
@@ -800,10 +801,9 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
break;
case L2CAP_LM:
- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
- err = -EFAULT;
+ err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ if (err)
break;
- }
if (opt & L2CAP_LM_FIPS) {
err = -EINVAL;
@@ -884,7 +884,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
struct bt_security sec;
struct bt_power pwr;
struct l2cap_conn *conn;
- int len, err = 0;
+ int err = 0;
u32 opt;
u16 mtu;
u8 mode;
@@ -910,11 +910,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
sec.level = BT_SECURITY_LOW;
- len = min_t(unsigned int, sizeof(sec), optlen);
- if (copy_from_sockptr(&sec, optval, len)) {
- err = -EFAULT;
+ err = copy_safe_from_sockptr(&sec, sizeof(sec), optval, optlen);
+ if (err)
break;
- }
if (sec.level < BT_SECURITY_LOW ||
sec.level > BT_SECURITY_FIPS) {
@@ -959,10 +957,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
- err = -EFAULT;
+ err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ if (err)
break;
- }
if (opt) {
set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
@@ -974,10 +971,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
break;
case BT_FLUSHABLE:
- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
- err = -EFAULT;
+ err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ if (err)
break;
- }
if (opt > BT_FLUSHABLE_ON) {
err = -EINVAL;
@@ -1009,11 +1005,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
- len = min_t(unsigned int, sizeof(pwr), optlen);
- if (copy_from_sockptr(&pwr, optval, len)) {
- err = -EFAULT;
+ err = copy_safe_from_sockptr(&pwr, sizeof(pwr), optval, optlen);
+ if (err)
break;
- }
if (pwr.force_active)
set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
@@ -1022,28 +1016,11 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
break;
case BT_CHANNEL_POLICY:
- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
- err = -EFAULT;
- break;
- }
-
- if (opt > BT_CHANNEL_POLICY_AMP_PREFERRED) {
- err = -EINVAL;
- break;
- }
-
- if (chan->mode != L2CAP_MODE_ERTM &&
- chan->mode != L2CAP_MODE_STREAMING) {
- err = -EOPNOTSUPP;
+ err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ if (err)
break;
- }
-
- chan->chan_policy = (u8) opt;
-
- if (sk->sk_state == BT_CONNECTED &&
- chan->move_role == L2CAP_MOVE_ROLE_NONE)
- l2cap_move_start(chan);
+ err = -EOPNOTSUPP;
break;
case BT_SNDMTU:
@@ -1070,10 +1047,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (copy_from_sockptr(&mtu, optval, sizeof(u16))) {
- err = -EFAULT;
+ err = copy_safe_from_sockptr(&mtu, sizeof(mtu), optval, optlen);
+ if (err)
break;
- }
if (chan->mode == L2CAP_MODE_EXT_FLOWCTL &&
sk->sk_state == BT_CONNECTED)
@@ -1101,10 +1077,10 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (copy_from_sockptr(&mode, optval, sizeof(u8))) {
- err = -EFAULT;
+ err = copy_safe_from_sockptr(&mode, sizeof(mode), optval,
+ optlen);
+ if (err)
break;
- }
BT_DBG("mode %u", mode);
@@ -1157,6 +1133,34 @@ static int l2cap_sock_sendmsg(struct socket *sock, struct msghdr *msg,
return err;
}
+static void l2cap_publish_rx_avail(struct l2cap_chan *chan)
+{
+ struct sock *sk = chan->data;
+ ssize_t avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc);
+ int expected_skbs, skb_overhead;
+
+ if (avail <= 0) {
+ l2cap_chan_rx_avail(chan, 0);
+ return;
+ }
+
+ if (!chan->mps) {
+ l2cap_chan_rx_avail(chan, -1);
+ return;
+ }
+
+ /* Correct available memory by estimated sk_buff overhead.
+ * This is significant due to small transfer sizes. However, accept
+ * at least one full packet if receive space is non-zero.
+ */
+ expected_skbs = DIV_ROUND_UP(avail, chan->mps);
+ skb_overhead = expected_skbs * sizeof(struct sk_buff);
+ if (skb_overhead < avail)
+ l2cap_chan_rx_avail(chan, avail - skb_overhead);
+ else
+ l2cap_chan_rx_avail(chan, -1);
+}
+
static int l2cap_sock_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags)
{
@@ -1193,28 +1197,33 @@ static int l2cap_sock_recvmsg(struct socket *sock, struct msghdr *msg,
else
err = bt_sock_recvmsg(sock, msg, len, flags);
- if (pi->chan->mode != L2CAP_MODE_ERTM)
+ if (pi->chan->mode != L2CAP_MODE_ERTM &&
+ pi->chan->mode != L2CAP_MODE_LE_FLOWCTL &&
+ pi->chan->mode != L2CAP_MODE_EXT_FLOWCTL)
return err;
- /* Attempt to put pending rx data in the socket buffer */
-
lock_sock(sk);
- if (!test_bit(CONN_LOCAL_BUSY, &pi->chan->conn_state))
- goto done;
+ l2cap_publish_rx_avail(pi->chan);
- if (pi->rx_busy_skb) {
- if (!__sock_queue_rcv_skb(sk, pi->rx_busy_skb))
- pi->rx_busy_skb = NULL;
- else
+ /* Attempt to put pending rx data in the socket buffer */
+ while (!list_empty(&pi->rx_busy)) {
+ struct l2cap_rx_busy *rx_busy =
+ list_first_entry(&pi->rx_busy,
+ struct l2cap_rx_busy,
+ list);
+ if (__sock_queue_rcv_skb(sk, rx_busy->skb) < 0)
goto done;
+ list_del(&rx_busy->list);
+ kfree(rx_busy);
}
/* Restore data flow when half of the receive buffer is
* available. This avoids resending large numbers of
* frames.
*/
- if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf >> 1)
+ if (test_bit(CONN_LOCAL_BUSY, &pi->chan->conn_state) &&
+ atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf >> 1)
l2cap_chan_busy(pi->chan, 0);
done:
@@ -1232,6 +1241,10 @@ static void l2cap_sock_kill(struct sock *sk)
BT_DBG("sk %p state %s", sk, state_to_string(sk->sk_state));
+ /* Sock is dead, so set chan data to NULL, avoid other task use invalid
+ * sock pointer.
+ */
+ l2cap_pi(sk)->chan->data = NULL;
/* Kill poor orphan */
l2cap_chan_put(l2cap_pi(sk)->chan);
@@ -1313,9 +1326,10 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
/* prevent sk structure from being freed whilst unlocked */
sock_hold(sk);
- chan = l2cap_pi(sk)->chan;
/* prevent chan structure from being freed whilst unlocked */
- l2cap_chan_hold(chan);
+ chan = l2cap_chan_hold_unless_zero(l2cap_pi(sk)->chan);
+ if (!chan)
+ goto shutdown_already;
BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
@@ -1345,22 +1359,20 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
release_sock(sk);
l2cap_chan_lock(chan);
- conn = chan->conn;
- if (conn)
- /* prevent conn structure from being freed */
- l2cap_conn_get(conn);
+ /* prevent conn structure from being freed */
+ conn = l2cap_conn_hold_unless_zero(chan->conn);
l2cap_chan_unlock(chan);
if (conn)
/* mutex lock must be taken before l2cap_chan_lock() */
- mutex_lock(&conn->chan_lock);
+ mutex_lock(&conn->lock);
l2cap_chan_lock(chan);
l2cap_chan_close(chan, 0);
l2cap_chan_unlock(chan);
if (conn) {
- mutex_unlock(&conn->chan_lock);
+ mutex_unlock(&conn->lock);
l2cap_conn_put(conn);
}
@@ -1474,18 +1486,25 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
{
- struct sock *sk = chan->data;
+ struct sock *sk;
+ struct l2cap_pinfo *pi;
int err;
- lock_sock(sk);
+ sk = chan->data;
+ if (!sk)
+ return -ENXIO;
- if (l2cap_pi(sk)->rx_busy_skb) {
+ pi = l2cap_pi(sk);
+ lock_sock(sk);
+ if (chan->mode == L2CAP_MODE_ERTM && !list_empty(&pi->rx_busy)) {
err = -ENOMEM;
goto done;
}
if (chan->mode != L2CAP_MODE_ERTM &&
- chan->mode != L2CAP_MODE_STREAMING) {
+ chan->mode != L2CAP_MODE_STREAMING &&
+ chan->mode != L2CAP_MODE_LE_FLOWCTL &&
+ chan->mode != L2CAP_MODE_EXT_FLOWCTL) {
/* Even if no filter is attached, we could potentially
* get errors from security modules, etc.
*/
@@ -1496,7 +1515,9 @@ static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
err = __sock_queue_rcv_skb(sk, skb);
- /* For ERTM, handle one skb that doesn't fit into the recv
+ l2cap_publish_rx_avail(chan);
+
+ /* For ERTM and LE, handle a skb that doesn't fit into the recv
* buffer. This is important to do because the data frames
* have already been acked, so the skb cannot be discarded.
*
@@ -1505,8 +1526,18 @@ static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
* acked and reassembled until there is buffer space
* available.
*/
- if (err < 0 && chan->mode == L2CAP_MODE_ERTM) {
- l2cap_pi(sk)->rx_busy_skb = skb;
+ if (err < 0 &&
+ (chan->mode == L2CAP_MODE_ERTM ||
+ chan->mode == L2CAP_MODE_LE_FLOWCTL ||
+ chan->mode == L2CAP_MODE_EXT_FLOWCTL)) {
+ struct l2cap_rx_busy *rx_busy =
+ kmalloc(sizeof(*rx_busy), GFP_KERNEL);
+ if (!rx_busy) {
+ err = -ENOMEM;
+ goto done;
+ }
+ rx_busy->skb = skb;
+ list_add_tail(&rx_busy->list, &pi->rx_busy);
l2cap_chan_busy(chan, 1);
err = 0;
}
@@ -1732,6 +1763,8 @@ static const struct l2cap_ops l2cap_chan_ops = {
static void l2cap_sock_destruct(struct sock *sk)
{
+ struct l2cap_rx_busy *rx_busy, *next;
+
BT_DBG("sk %p", sk);
if (l2cap_pi(sk)->chan) {
@@ -1739,9 +1772,10 @@ static void l2cap_sock_destruct(struct sock *sk)
l2cap_chan_put(l2cap_pi(sk)->chan);
}
- if (l2cap_pi(sk)->rx_busy_skb) {
- kfree_skb(l2cap_pi(sk)->rx_busy_skb);
- l2cap_pi(sk)->rx_busy_skb = NULL;
+ list_for_each_entry_safe(rx_busy, next, &l2cap_pi(sk)->rx_busy, list) {
+ kfree_skb(rx_busy->skb);
+ list_del(&rx_busy->list);
+ kfree(rx_busy);
}
skb_queue_purge(&sk->sk_receive_queue);
@@ -1825,6 +1859,8 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
chan->data = sk;
chan->ops = &l2cap_chan_ops;
+
+ l2cap_publish_rx_avail(chan);
}
static struct proto l2cap_proto = {
@@ -1846,9 +1882,13 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
sk->sk_destruct = l2cap_sock_destruct;
sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
+ INIT_LIST_HEAD(&l2cap_pi(sk)->rx_busy);
+
chan = l2cap_chan_create();
if (!chan) {
sk_free(sk);
+ if (sock)
+ sock->sk = NULL;
return NULL;
}
diff --git a/net/bluetooth/leds.c b/net/bluetooth/leds.c
index f46847632ffa..6e349704efe4 100644
--- a/net/bluetooth/leds.c
+++ b/net/bluetooth/leds.c
@@ -48,7 +48,7 @@ static int power_activate(struct led_classdev *led_cdev)
htrig = to_hci_basic_led_trigger(led_cdev->trigger);
powered = test_bit(HCI_UP, &htrig->hdev->flags);
- led_trigger_event(led_cdev->trigger, powered ? LED_FULL : LED_OFF);
+ led_set_brightness(led_cdev, powered ? LED_FULL : LED_OFF);
return 0;
}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index ee3b4aad8bd8..621c555f639b 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -25,7 +25,7 @@
/* Bluetooth HCI Management interface */
#include <linux/module.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -33,7 +33,6 @@
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/mgmt.h>
-#include "hci_request.h"
#include "smp.h"
#include "mgmt_util.h"
#include "mgmt_config.h"
@@ -42,7 +41,7 @@
#include "aosp.h"
#define MGMT_VERSION 1
-#define MGMT_REVISION 22
+#define MGMT_REVISION 23
static const u16 mgmt_commands[] = {
MGMT_OP_READ_INDEX_LIST,
@@ -133,6 +132,7 @@ static const u16 mgmt_commands[] = {
MGMT_OP_MESH_READ_FEATURES,
MGMT_OP_MESH_SEND,
MGMT_OP_MESH_SEND_CANCEL,
+ MGMT_OP_HCI_CMD_SYNC,
};
static const u16 mgmt_events[] = {
@@ -210,7 +210,7 @@ static const u16 mgmt_untrusted_events[] = {
MGMT_EV_EXP_FEATURE_CHANGED,
};
-#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
+#define CACHE_TIMEOUT secs_to_jiffies(2)
#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
"\x00\x00\x00\x00\x00\x00\x00\x00"
@@ -443,8 +443,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
count = 0;
list_for_each_entry(d, &hci_dev_list, list) {
- if (d->dev_type == HCI_PRIMARY &&
- !hci_dev_test_flag(d, HCI_UNCONFIGURED))
+ if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
count++;
}
@@ -468,8 +467,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
continue;
- if (d->dev_type == HCI_PRIMARY &&
- !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
+ if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
rp->index[count++] = cpu_to_le16(d->id);
bt_dev_dbg(hdev, "Added hci%u", d->id);
}
@@ -503,8 +501,7 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
count = 0;
list_for_each_entry(d, &hci_dev_list, list) {
- if (d->dev_type == HCI_PRIMARY &&
- hci_dev_test_flag(d, HCI_UNCONFIGURED))
+ if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
count++;
}
@@ -528,8 +525,7 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
continue;
- if (d->dev_type == HCI_PRIMARY &&
- hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
+ if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
rp->index[count++] = cpu_to_le16(d->id);
bt_dev_dbg(hdev, "Added hci%u", d->id);
}
@@ -561,10 +557,8 @@ static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
read_lock(&hci_dev_list_lock);
count = 0;
- list_for_each_entry(d, &hci_dev_list, list) {
- if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
- count++;
- }
+ list_for_each_entry(d, &hci_dev_list, list)
+ count++;
rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
if (!rp) {
@@ -585,16 +579,10 @@ static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
continue;
- if (d->dev_type == HCI_PRIMARY) {
- if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
- rp->entry[count].type = 0x01;
- else
- rp->entry[count].type = 0x00;
- } else if (d->dev_type == HCI_AMP) {
- rp->entry[count].type = 0x02;
- } else {
- continue;
- }
+ if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
+ rp->entry[count].type = 0x01;
+ else
+ rp->entry[count].type = 0x00;
rp->entry[count].bus = d->bus;
rp->entry[count++].index = cpu_to_le16(d->id);
@@ -835,8 +823,6 @@ static u32 get_supported_settings(struct hci_dev *hdev)
if (lmp_ssp_capable(hdev)) {
settings |= MGMT_SETTING_SSP;
- if (IS_ENABLED(CONFIG_BT_HS))
- settings |= MGMT_SETTING_HS;
}
if (lmp_sc_capable(hdev))
@@ -901,9 +887,6 @@ static u32 get_current_settings(struct hci_dev *hdev)
if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
settings |= MGMT_SETTING_SSP;
- if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
- settings |= MGMT_SETTING_HS;
-
if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
settings |= MGMT_SETTING_ADVERTISING;
@@ -1335,7 +1318,8 @@ static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
struct mgmt_mode *cp;
/* Make sure cmd still outstanding. */
- if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
+ if (err == -ECANCELED ||
+ cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
return;
cp = cmd->param;
@@ -1368,7 +1352,13 @@ static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
static int set_powered_sync(struct hci_dev *hdev, void *data)
{
struct mgmt_pending_cmd *cmd = data;
- struct mgmt_mode *cp = cmd->param;
+ struct mgmt_mode *cp;
+
+ /* Make sure cmd still outstanding. */
+ if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
+ return -ECANCELED;
+
+ cp = cmd->param;
BT_DBG("%s", hdev->name);
@@ -1390,6 +1380,14 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
+ if (!cp->val) {
+ if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
+ MGMT_STATUS_BUSY);
+ goto failed;
+ }
+ }
+
if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
MGMT_STATUS_BUSY);
@@ -1409,7 +1407,7 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
/* Cancel potentially blocking sync operation before power off */
if (cp->val == 0x00) {
- __hci_cmd_sync_cancel(hdev, -EHOSTDOWN);
+ hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
mgmt_set_powered_complete);
} else {
@@ -1463,10 +1461,15 @@ static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
{
- if (cmd->cmd_complete) {
- u8 *status = data;
+ struct cmd_lookup *match = data;
- cmd->cmd_complete(cmd, *status);
+ /* dequeue cmd_sync entries using cmd as data as that is about to be
+ * removed/freed.
+ */
+ hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
+
+ if (cmd->cmd_complete) {
+ cmd->cmd_complete(cmd, match->mgmt_status);
mgmt_pending_remove(cmd);
return;
@@ -1515,7 +1518,8 @@ static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "err %d", err);
/* Make sure cmd still outstanding. */
- if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
+ if (err == -ECANCELED ||
+ cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
return;
hci_dev_lock(hdev);
@@ -1689,7 +1693,8 @@ static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "err %d", err);
/* Make sure cmd still outstanding. */
- if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
+ if (err == -ECANCELED ||
+ cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
return;
hci_dev_lock(hdev);
@@ -1704,8 +1709,7 @@ static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
new_settings(hdev, cmd->sk);
done:
- if (cmd)
- mgmt_pending_remove(cmd);
+ mgmt_pending_remove(cmd);
hci_dev_unlock(hdev);
}
@@ -1922,7 +1926,7 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
bool changed;
/* Make sure cmd still outstanding. */
- if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
+ if (err == -ECANCELED || cmd != pending_find(MGMT_OP_SET_SSP, hdev))
return;
if (err) {
@@ -1930,7 +1934,6 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
if (enable && hci_dev_test_and_clear_flag(hdev,
HCI_SSP_ENABLED)) {
- hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
new_settings(hdev, NULL);
}
@@ -1943,12 +1946,6 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
} else {
changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
-
- if (!changed)
- changed = hci_dev_test_and_clear_flag(hdev,
- HCI_HS_ENABLED);
- else
- hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
}
mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
@@ -2012,11 +2009,6 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
} else {
changed = hci_dev_test_and_clear_flag(hdev,
HCI_SSP_ENABLED);
- if (!changed)
- changed = hci_dev_test_and_clear_flag(hdev,
- HCI_HS_ENABLED);
- else
- hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
}
err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
@@ -2062,63 +2054,10 @@ failed:
static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
- struct mgmt_mode *cp = data;
- bool changed;
- u8 status;
- int err;
-
bt_dev_dbg(hdev, "sock %p", sk);
- if (!IS_ENABLED(CONFIG_BT_HS))
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
- MGMT_STATUS_NOT_SUPPORTED);
-
- status = mgmt_bredr_support(hdev);
- if (status)
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
-
- if (!lmp_ssp_capable(hdev))
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
MGMT_STATUS_NOT_SUPPORTED);
-
- if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
- MGMT_STATUS_REJECTED);
-
- if (cp->val != 0x00 && cp->val != 0x01)
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
- MGMT_STATUS_INVALID_PARAMS);
-
- hci_dev_lock(hdev);
-
- if (pending_find(MGMT_OP_SET_SSP, hdev)) {
- err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
- MGMT_STATUS_BUSY);
- goto unlock;
- }
-
- if (cp->val) {
- changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
- } else {
- if (hdev_is_powered(hdev)) {
- err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
- MGMT_STATUS_REJECTED);
- goto unlock;
- }
-
- changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
- }
-
- err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
- if (err < 0)
- goto unlock;
-
- if (changed)
- err = new_settings(hdev, sk);
-
-unlock:
- hci_dev_unlock(hdev);
- return err;
}
static void set_le_complete(struct hci_dev *hdev, void *data, int err)
@@ -2586,6 +2525,64 @@ unlock:
return err;
}
+static int send_hci_cmd_sync(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_hci_cmd_sync *cp = cmd->param;
+ struct sk_buff *skb;
+
+ skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cp->opcode),
+ le16_to_cpu(cp->params_len), cp->params,
+ cp->event, cp->timeout ?
+ msecs_to_jiffies(cp->timeout * 1000) :
+ HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb)) {
+ mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
+ mgmt_status(PTR_ERR(skb)));
+ goto done;
+ }
+
+ mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC, 0,
+ skb->data, skb->len);
+
+ kfree_skb(skb);
+
+done:
+ mgmt_pending_free(cmd);
+
+ return 0;
+}
+
+static int mgmt_hci_cmd_sync(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
+{
+ struct mgmt_cp_hci_cmd_sync *cp = data;
+ struct mgmt_pending_cmd *cmd;
+ int err;
+
+ if (len < sizeof(*cp))
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+ cmd = mgmt_pending_new(sk, MGMT_OP_HCI_CMD_SYNC, hdev, data, len);
+ if (!cmd)
+ err = -ENOMEM;
+ else
+ err = hci_cmd_sync_queue(hdev, send_hci_cmd_sync, cmd, NULL);
+
+ if (err < 0) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
+ MGMT_STATUS_FAILED);
+
+ if (cmd)
+ mgmt_pending_free(cmd);
+ }
+
+ hci_dev_unlock(hdev);
+ return err;
+}
+
/* This is a helper function to test for pending mgmt commands that can
* cause CoD or EIR HCI commands. We can only allow one such pending
* mgmt command at a time since otherwise we cannot easily track what
@@ -2686,7 +2683,11 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
goto failed;
}
- err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
+ /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
+ * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
+ */
+ err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
+ mgmt_class_complete);
if (err < 0) {
mgmt_pending_free(cmd);
goto failed;
@@ -2780,8 +2781,11 @@ update_class:
goto unlock;
}
- err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
- mgmt_class_complete);
+ /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
+ * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
+ */
+ err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
+ mgmt_class_complete);
if (err < 0)
mgmt_pending_free(cmd);
@@ -2847,8 +2851,11 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
- err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
- mgmt_class_complete);
+ /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
+ * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
+ */
+ err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
+ mgmt_class_complete);
if (err < 0)
mgmt_pending_free(cmd);
@@ -2896,16 +2903,6 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
key_count);
- for (i = 0; i < key_count; i++) {
- struct mgmt_link_key_info *key = &cp->keys[i];
-
- /* Considering SMP over BREDR/LE, there is no need to check addr_type */
- if (key->type > 0x08)
- return mgmt_cmd_status(sk, hdev->id,
- MGMT_OP_LOAD_LINK_KEYS,
- MGMT_STATUS_INVALID_PARAMS);
- }
-
hci_dev_lock(hdev);
hci_link_keys_clear(hdev);
@@ -2930,6 +2927,19 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
continue;
}
+ if (key->addr.type != BDADDR_BREDR) {
+ bt_dev_warn(hdev,
+ "Invalid link address type %u for %pMR",
+ key->addr.type, &key->addr.bdaddr);
+ continue;
+ }
+
+ if (key->type > 0x08) {
+ bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
+ key->type, &key->addr.bdaddr);
+ continue;
+ }
+
/* Always ignore debug keys and require a new pairing if
* the user wants to use them.
*/
@@ -2987,7 +2997,12 @@ static int unpair_device_sync(struct hci_dev *hdev, void *data)
if (!conn)
return 0;
- return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
+ /* Disregard any possible error since the likes of hci_abort_conn_sync
+ * will clean up the connection no matter the error.
+ */
+ hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
+
+ return 0;
}
static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -3119,13 +3134,44 @@ unlock:
return err;
}
+static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
+{
+ struct mgmt_pending_cmd *cmd = data;
+
+ cmd->cmd_complete(cmd, mgmt_status(err));
+ mgmt_pending_free(cmd);
+}
+
+static int disconnect_sync(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_disconnect *cp = cmd->param;
+ struct hci_conn *conn;
+
+ if (cp->addr.type == BDADDR_BREDR)
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
+ &cp->addr.bdaddr);
+ else
+ conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
+ le_addr_type(cp->addr.type));
+
+ if (!conn)
+ return -ENOTCONN;
+
+ /* Disregard any possible error since the likes of hci_abort_conn_sync
+ * will clean up the connection no matter the error.
+ */
+ hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
+
+ return 0;
+}
+
static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
struct mgmt_cp_disconnect *cp = data;
struct mgmt_rp_disconnect rp;
struct mgmt_pending_cmd *cmd;
- struct hci_conn *conn;
int err;
bt_dev_dbg(hdev, "sock %p", sk);
@@ -3148,27 +3194,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
- if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
- err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
- MGMT_STATUS_BUSY, &rp, sizeof(rp));
- goto failed;
- }
-
- if (cp->addr.type == BDADDR_BREDR)
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
- &cp->addr.bdaddr);
- else
- conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
- le_addr_type(cp->addr.type));
-
- if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
- err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
- MGMT_STATUS_NOT_CONNECTED, &rp,
- sizeof(rp));
- goto failed;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
+ cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
@@ -3176,9 +3202,10 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
cmd->cmd_complete = generic_cmd_complete;
- err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
+ err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
+ disconnect_complete);
if (err < 0)
- mgmt_pending_remove(cmd);
+ mgmt_pending_free(cmd);
failed:
hci_dev_unlock(hdev);
@@ -3188,6 +3215,7 @@ failed:
static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
{
switch (link_type) {
+ case ISO_LINK:
case LE_LINK:
switch (addr_type) {
case ADDR_LE_DEV_PUBLIC:
@@ -3505,7 +3533,8 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
if (cp->addr.type == BDADDR_BREDR) {
conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
- auth_type, CONN_REASON_PAIR_DEVICE);
+ auth_type, CONN_REASON_PAIR_DEVICE,
+ HCI_ACL_CONN_TIMEOUT);
} else {
u8 addr_type = le_addr_type(cp->addr.type);
struct hci_conn_params *p;
@@ -3520,6 +3549,10 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
* will be kept and this function does nothing.
*/
p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
+ if (!p) {
+ err = -EIO;
+ goto unlock;
+ }
if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
p->auto_connect = HCI_AUTO_CONN_DISABLED;
@@ -3817,7 +3850,8 @@ static void set_name_complete(struct hci_dev *hdev, void *data, int err)
bt_dev_dbg(hdev, "err %d", err);
- if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
+ if (err == -ECANCELED ||
+ cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
return;
if (status) {
@@ -3992,7 +4026,8 @@ static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
struct sk_buff *skb = cmd->skb;
u8 status = mgmt_status(err);
- if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
+ if (err == -ECANCELED ||
+ cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
return;
if (!status) {
@@ -4382,12 +4417,6 @@ static const u8 le_simultaneous_roles_uuid[16] = {
0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
};
-/* 15c0a148-c273-11ea-b3de-0242ac130004 */
-static const u8 rpa_resolution_uuid[16] = {
- 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
- 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
-};
-
/* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
static const u8 iso_socket_uuid[16] = {
0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
@@ -4438,17 +4467,6 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
idx++;
}
- if (hdev && ll_privacy_capable(hdev)) {
- if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
- flags = BIT(0) | BIT(1);
- else
- flags = BIT(1);
-
- memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
- rp->features[idx].flags = cpu_to_le32(flags);
- idx++;
- }
-
if (hdev && (aosp_has_quality_report(hdev) ||
hdev->set_quality_report)) {
if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
@@ -4505,27 +4523,6 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
return status;
}
-static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
- struct sock *skip)
-{
- struct mgmt_ev_exp_feature_changed ev;
-
- memset(&ev, 0, sizeof(ev));
- memcpy(ev.uuid, rpa_resolution_uuid, 16);
- ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
-
- // Do we need to be atomic with the conn_flags?
- if (enabled && privacy_mode_capable(hdev))
- hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
- else
- hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
-
- return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
- &ev, sizeof(ev),
- HCI_MGMT_EXP_FEATURE_EVENTS, skip);
-
-}
-
static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
bool enabled, struct sock *skip)
{
@@ -4566,16 +4563,6 @@ static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
}
#endif
- if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
- bool changed;
-
- changed = hci_dev_test_and_clear_flag(hdev,
- HCI_ENABLE_LL_PRIVACY);
- if (changed)
- exp_feature_changed(hdev, rpa_resolution_uuid, false,
- sk);
- }
-
hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
@@ -4681,71 +4668,6 @@ static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
return err;
}
-static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
- struct mgmt_cp_set_exp_feature *cp,
- u16 data_len)
-{
- struct mgmt_rp_set_exp_feature rp;
- bool val, changed;
- int err;
- u32 flags;
-
- /* Command requires to use the controller index */
- if (!hdev)
- return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
- MGMT_OP_SET_EXP_FEATURE,
- MGMT_STATUS_INVALID_INDEX);
-
- /* Changes can only be made when controller is powered down */
- if (hdev_is_powered(hdev))
- return mgmt_cmd_status(sk, hdev->id,
- MGMT_OP_SET_EXP_FEATURE,
- MGMT_STATUS_REJECTED);
-
- /* Parameters are limited to a single octet */
- if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
- return mgmt_cmd_status(sk, hdev->id,
- MGMT_OP_SET_EXP_FEATURE,
- MGMT_STATUS_INVALID_PARAMS);
-
- /* Only boolean on/off is supported */
- if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
- return mgmt_cmd_status(sk, hdev->id,
- MGMT_OP_SET_EXP_FEATURE,
- MGMT_STATUS_INVALID_PARAMS);
-
- val = !!cp->param[0];
-
- if (val) {
- changed = !hci_dev_test_and_set_flag(hdev,
- HCI_ENABLE_LL_PRIVACY);
- hci_dev_clear_flag(hdev, HCI_ADVERTISING);
-
- /* Enable LL privacy + supported settings changed */
- flags = BIT(0) | BIT(1);
- } else {
- changed = hci_dev_test_and_clear_flag(hdev,
- HCI_ENABLE_LL_PRIVACY);
-
- /* Disable LL privacy + supported settings changed */
- flags = BIT(1);
- }
-
- memcpy(rp.uuid, rpa_resolution_uuid, 16);
- rp.flags = cpu_to_le32(flags);
-
- hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
-
- err = mgmt_cmd_complete(sk, hdev->id,
- MGMT_OP_SET_EXP_FEATURE, 0,
- &rp, sizeof(rp));
-
- if (changed)
- exp_ll_privacy_feature_changed(val, hdev, sk);
-
- return err;
-}
-
static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
struct mgmt_cp_set_exp_feature *cp,
u16 data_len)
@@ -4997,7 +4919,6 @@ static const struct mgmt_exp_feature {
EXP_FEAT(debug_uuid, set_debug_func),
#endif
EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
- EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
EXP_FEAT(quality_report_uuid, set_quality_report_func),
EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
@@ -5027,22 +4948,6 @@ static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
MGMT_STATUS_NOT_SUPPORTED);
}
-static u32 get_params_flags(struct hci_dev *hdev,
- struct hci_conn_params *params)
-{
- u32 flags = hdev->conn_flags;
-
- /* Devices using RPAs can only be programmed in the acceptlist if
- * LL Privacy has been enable otherwise they cannot mark
- * HCI_CONN_FLAG_REMOTE_WAKEUP.
- */
- if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
- hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
- flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
-
- return flags;
-}
-
static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len)
{
@@ -5077,7 +4982,6 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
if (!params)
goto done;
- supported_flags = get_params_flags(hdev, params);
current_flags = params->flags;
}
@@ -5157,7 +5061,7 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
- supported_flags = get_params_flags(hdev, params);
+ supported_flags = hdev->conn_flags;
if ((supported_flags | current_flags) != supported_flags) {
bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
@@ -5484,10 +5388,16 @@ static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
{
struct mgmt_rp_remove_adv_monitor rp;
struct mgmt_pending_cmd *cmd = data;
- struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
+ struct mgmt_cp_remove_adv_monitor *cp;
+
+ if (status == -ECANCELED ||
+ cmd != pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev))
+ return;
hci_dev_lock(hdev);
+ cp = cmd->param;
+
rp.monitor_handle = cp->monitor_handle;
if (!status)
@@ -5505,6 +5415,10 @@ static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
{
struct mgmt_pending_cmd *cmd = data;
+
+ if (cmd != pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev))
+ return -ECANCELED;
+
struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
u16 handle = __le16_to_cpu(cp->monitor_handle);
@@ -5536,8 +5450,8 @@ static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
goto unlock;
}
- err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
- mgmt_remove_adv_monitor_complete);
+ err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
+ mgmt_remove_adv_monitor_complete);
if (err) {
mgmt_pending_remove(cmd);
@@ -5883,13 +5797,16 @@ static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
{
struct mgmt_pending_cmd *cmd = data;
+ bt_dev_dbg(hdev, "err %d", err);
+
+ if (err == -ECANCELED)
+ return;
+
if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
return;
- bt_dev_dbg(hdev, "err %d", err);
-
mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
cmd->param, 1);
mgmt_pending_remove(cmd);
@@ -6122,7 +6039,8 @@ static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
{
struct mgmt_pending_cmd *cmd = data;
- if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
+ if (err == -ECANCELED ||
+ cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
return;
bt_dev_dbg(hdev, "err %d", err);
@@ -6766,7 +6684,6 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
- hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
}
hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
@@ -7133,7 +7050,6 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
for (i = 0; i < irk_count; i++) {
struct mgmt_irk_info *irk = &cp->irks[i];
- u8 addr_type = le_addr_type(irk->addr.type);
if (hci_is_blocked_key(hdev,
HCI_BLOCKED_KEY_TYPE_IRK,
@@ -7143,12 +7059,8 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
continue;
}
- /* When using SMP over BR/EDR, the addr type should be set to BREDR */
- if (irk->addr.type == BDADDR_BREDR)
- addr_type = BDADDR_BREDR;
-
hci_add_irk(hdev, &irk->addr.bdaddr,
- addr_type, irk->val,
+ le_addr_type(irk->addr.type), irk->val,
BDADDR_ANY);
}
@@ -7213,15 +7125,6 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
bt_dev_dbg(hdev, "key_count %u", key_count);
- for (i = 0; i < key_count; i++) {
- struct mgmt_ltk_info *key = &cp->keys[i];
-
- if (!ltk_is_valid(key))
- return mgmt_cmd_status(sk, hdev->id,
- MGMT_OP_LOAD_LONG_TERM_KEYS,
- MGMT_STATUS_INVALID_PARAMS);
- }
-
hci_dev_lock(hdev);
hci_smp_ltks_clear(hdev);
@@ -7229,7 +7132,6 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
for (i = 0; i < key_count; i++) {
struct mgmt_ltk_info *key = &cp->keys[i];
u8 type, authenticated;
- u8 addr_type = le_addr_type(key->addr.type);
if (hci_is_blocked_key(hdev,
HCI_BLOCKED_KEY_TYPE_LTK,
@@ -7239,6 +7141,12 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
continue;
}
+ if (!ltk_is_valid(key)) {
+ bt_dev_warn(hdev, "Invalid LTK for %pMR",
+ &key->addr.bdaddr);
+ continue;
+ }
+
switch (key->type) {
case MGMT_LTK_UNAUTHENTICATED:
authenticated = 0x00;
@@ -7264,12 +7172,8 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
continue;
}
- /* When using SMP over BR/EDR, the addr type should be set to BREDR */
- if (key->addr.type == BDADDR_BREDR)
- addr_type = BDADDR_BREDR;
-
hci_add_ltk(hdev, &key->addr.bdaddr,
- addr_type, type, authenticated,
+ le_addr_type(key->addr.type), type, authenticated,
key->val, key->enc_size, key->ediv, key->rand);
}
@@ -7630,6 +7534,24 @@ static void device_added(struct sock *sk, struct hci_dev *hdev,
mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
}
+static void add_device_complete(struct hci_dev *hdev, void *data, int err)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_add_device *cp = cmd->param;
+
+ if (!err) {
+ device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type,
+ cp->action);
+ device_flags_changed(NULL, hdev, &cp->addr.bdaddr,
+ cp->addr.type, hdev->conn_flags,
+ PTR_UINT(cmd->user_data));
+ }
+
+ mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE,
+ mgmt_status(err), &cp->addr, sizeof(cp->addr));
+ mgmt_pending_free(cmd);
+}
+
static int add_device_sync(struct hci_dev *hdev, void *data)
{
return hci_update_passive_scan_sync(hdev);
@@ -7638,6 +7560,7 @@ static int add_device_sync(struct hci_dev *hdev, void *data)
static int add_device(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
+ struct mgmt_pending_cmd *cmd;
struct mgmt_cp_add_device *cp = data;
u8 auto_conn, addr_type;
struct hci_conn_params *params;
@@ -7718,9 +7641,24 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
current_flags = params->flags;
}
- err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
- if (err < 0)
+ cmd = mgmt_pending_new(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
goto unlock;
+ }
+
+ cmd->user_data = UINT_PTR(current_flags);
+
+ err = hci_cmd_sync_queue(hdev, add_device_sync, cmd,
+ add_device_complete);
+ if (err < 0) {
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
+ MGMT_STATUS_FAILED, &cp->addr,
+ sizeof(cp->addr));
+ mgmt_pending_free(cmd);
+ }
+
+ goto unlock;
added:
device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
@@ -7877,6 +7815,18 @@ unlock:
return err;
}
+static int conn_update_sync(struct hci_dev *hdev, void *data)
+{
+ struct hci_conn_params *params = data;
+ struct hci_conn *conn;
+
+ conn = hci_conn_hash_lookup_le(hdev, &params->addr, params->addr_type);
+ if (!conn)
+ return -ECANCELED;
+
+ return hci_le_conn_update_sync(hdev, conn, params);
+}
+
static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
@@ -7910,12 +7860,14 @@ static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
- hci_conn_params_clear_disabled(hdev);
+ if (param_count > 1)
+ hci_conn_params_clear_disabled(hdev);
for (i = 0; i < param_count; i++) {
struct mgmt_conn_param *param = &cp->params[i];
struct hci_conn_params *hci_param;
u16 min, max, latency, timeout;
+ bool update = false;
u8 addr_type;
bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
@@ -7943,6 +7895,19 @@ static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
continue;
}
+ /* Detect when the loading is for an existing parameter then
+ * attempt to trigger the connection update procedure.
+ */
+ if (!i && param_count == 1) {
+ hci_param = hci_conn_params_lookup(hdev,
+ &param->addr.bdaddr,
+ addr_type);
+ if (hci_param)
+ update = true;
+ else
+ hci_conn_params_clear_disabled(hdev);
+ }
+
hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
addr_type);
if (!hci_param) {
@@ -7954,6 +7919,25 @@ static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
hci_param->conn_max_interval = max;
hci_param->conn_latency = latency;
hci_param->supervision_timeout = timeout;
+
+ /* Check if we need to trigger a connection update */
+ if (update) {
+ struct hci_conn *conn;
+
+ /* Lookup for existing connection as central and check
+ * if parameters match and if they don't then trigger
+ * a connection update.
+ */
+ conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr,
+ addr_type);
+ if (conn && conn->role == HCI_ROLE_MASTER &&
+ (conn->le_conn_min_interval != min ||
+ conn->le_conn_max_interval != max ||
+ conn->le_conn_latency != latency ||
+ conn->le_supv_timeout != timeout))
+ hci_cmd_sync_queue(hdev, conn_update_sync,
+ hci_param, NULL);
+ }
}
hci_dev_unlock(hdev);
@@ -8081,7 +8065,8 @@ static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
u8 status = mgmt_status(err);
u16 eir_len;
- if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
+ if (err == -ECANCELED ||
+ cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
return;
if (!status) {
@@ -8470,7 +8455,7 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
static u8 calculate_name_len(struct hci_dev *hdev)
{
- u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
+ u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
return eir_append_local_name(hdev, buf, 0);
}
@@ -8829,8 +8814,7 @@ static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
}
unlock:
- if (cmd)
- mgmt_pending_free(cmd);
+ mgmt_pending_free(cmd);
hci_dev_unlock(hdev);
}
@@ -9375,6 +9359,7 @@ static const struct hci_mgmt_handler mgmt_handlers[] = {
{ mesh_send, MGMT_MESH_SEND_SIZE,
HCI_MGMT_VAR_LEN },
{ mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
+ { mgmt_hci_cmd_sync, MGMT_HCI_CMD_SYNC_SIZE, HCI_MGMT_VAR_LEN },
};
void mgmt_index_added(struct hci_dev *hdev)
@@ -9384,23 +9369,14 @@ void mgmt_index_added(struct hci_dev *hdev)
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
return;
- switch (hdev->dev_type) {
- case HCI_PRIMARY:
- if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
- mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
- NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
- ev.type = 0x01;
- } else {
- mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
- HCI_MGMT_INDEX_EVENTS);
- ev.type = 0x00;
- }
- break;
- case HCI_AMP:
- ev.type = 0x02;
- break;
- default:
- return;
+ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
+ mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
+ HCI_MGMT_UNCONF_INDEX_EVENTS);
+ ev.type = 0x01;
+ } else {
+ mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
+ HCI_MGMT_INDEX_EVENTS);
+ ev.type = 0x00;
}
ev.bus = hdev->bus;
@@ -9412,30 +9388,21 @@ void mgmt_index_added(struct hci_dev *hdev)
void mgmt_index_removed(struct hci_dev *hdev)
{
struct mgmt_ev_ext_index ev;
- u8 status = MGMT_STATUS_INVALID_INDEX;
+ struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
return;
- switch (hdev->dev_type) {
- case HCI_PRIMARY:
- mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
+ mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
- if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
- mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
- NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
- ev.type = 0x01;
- } else {
- mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
- HCI_MGMT_INDEX_EVENTS);
- ev.type = 0x00;
- }
- break;
- case HCI_AMP:
- ev.type = 0x02;
- break;
- default:
- return;
+ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
+ mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
+ HCI_MGMT_UNCONF_INDEX_EVENTS);
+ ev.type = 0x01;
+ } else {
+ mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
+ HCI_MGMT_INDEX_EVENTS);
+ ev.type = 0x00;
}
ev.bus = hdev->bus;
@@ -9477,7 +9444,7 @@ void mgmt_power_on(struct hci_dev *hdev, int err)
void __mgmt_power_off(struct hci_dev *hdev)
{
struct cmd_lookup match = { NULL, hdev };
- u8 status, zero_cod[] = { 0, 0, 0 };
+ u8 zero_cod[] = { 0, 0, 0 };
mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
@@ -9489,11 +9456,11 @@ void __mgmt_power_off(struct hci_dev *hdev)
* status responses.
*/
if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
- status = MGMT_STATUS_INVALID_INDEX;
+ match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
else
- status = MGMT_STATUS_NOT_POWERED;
+ match.mgmt_status = MGMT_STATUS_NOT_POWERED;
- mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
+ mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
@@ -9536,7 +9503,7 @@ void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
ev.store_hint = persistent;
bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
- ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
+ ev.key.addr.type = BDADDR_BREDR;
ev.key.type = key->type;
memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
ev.key.pin_len = key->pin_len;
@@ -9587,7 +9554,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
ev.store_hint = persistent;
bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
- ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
+ ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
ev.key.type = mgmt_ltk_type(key);
ev.key.enc_size = key->enc_size;
ev.key.ediv = key->ediv;
@@ -9616,7 +9583,7 @@ void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
bacpy(&ev.rpa, &irk->rpa);
bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
- ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
+ ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
memcpy(ev.irk.val, irk->val, sizeof(irk->val));
mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
@@ -9645,7 +9612,7 @@ void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
ev.store_hint = persistent;
bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
- ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
+ ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
ev.key.type = csrk->type;
memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
@@ -9681,6 +9648,9 @@ void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
u16 eir_len = 0;
u32 flags = 0;
+ if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+ return;
+
/* allocate buff for LE or BR/EDR adv */
if (conn->le_adv_data_len > 0)
skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
@@ -9690,6 +9660,9 @@ void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
eir_precalc_len(sizeof(conn->dev_class)));
+ if (!skb)
+ return;
+
ev = skb_put(skb, sizeof(*ev));
bacpy(&ev->addr.bdaddr, &conn->dst);
ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
@@ -9720,18 +9693,6 @@ void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
mgmt_event_skb(skb, NULL);
}
-static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
-{
- struct sock **sk = data;
-
- cmd->cmd_complete(cmd, 0);
-
- *sk = cmd->sk;
- sock_hold(*sk);
-
- mgmt_pending_remove(cmd);
-}
-
static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
{
struct hci_dev *hdev = data;
@@ -9748,6 +9709,9 @@ bool mgmt_powering_down(struct hci_dev *hdev)
struct mgmt_pending_cmd *cmd;
struct mgmt_mode *cp;
+ if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
+ return true;
+
cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
if (!cmd)
return false;
@@ -9766,22 +9730,12 @@ void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
struct mgmt_ev_device_disconnected ev;
struct sock *sk = NULL;
- /* The connection is still in hci_conn_hash so test for 1
- * instead of 0 to know if this is the last one.
- */
- if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
- cancel_delayed_work(&hdev->power_off);
- queue_work(hdev->req_workqueue, &hdev->power_off.work);
- }
-
if (!mgmt_connected)
return;
if (link_type != ACL_LINK && link_type != LE_LINK)
return;
- mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
-
bacpy(&ev.addr.bdaddr, bdaddr);
ev.addr.type = link_to_bdaddr(link_type, addr_type);
ev.reason = reason;
@@ -9794,9 +9748,6 @@ void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
if (sk)
sock_put(sk);
-
- mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
- hdev);
}
void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
@@ -9825,21 +9776,18 @@ void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
mgmt_pending_remove(cmd);
}
-void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type, u8 status)
+void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
{
struct mgmt_ev_connect_failed ev;
- /* The connection is still in hci_conn_hash so test for 1
- * instead of 0 to know if this is the last one.
- */
- if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
- cancel_delayed_work(&hdev->power_off);
- queue_work(hdev->req_workqueue, &hdev->power_off.work);
+ if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
+ mgmt_device_disconnected(hdev, &conn->dst, conn->type,
+ conn->dst_type, status, true);
+ return;
}
- bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = link_to_bdaddr(link_type, addr_type);
+ bacpy(&ev.addr.bdaddr, &conn->dst);
+ ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
ev.status = mgmt_status(status);
mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
@@ -10071,6 +10019,9 @@ void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
/* If this is a HCI command related to powering on the
* HCI dev don't send any mgmt signals.
*/
+ if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
+ return;
+
if (pending_find(MGMT_OP_SET_POWERED, hdev))
return;
}
@@ -10465,6 +10416,8 @@ void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
+ if (!skb)
+ return;
ev = skb_put(skb, sizeof(*ev));
bacpy(&ev->addr.bdaddr, bdaddr);
diff --git a/net/bluetooth/mgmt_util.c b/net/bluetooth/mgmt_util.c
index 0115f783bde8..17ab909a7c07 100644
--- a/net/bluetooth/mgmt_util.c
+++ b/net/bluetooth/mgmt_util.c
@@ -21,7 +21,7 @@
SOFTWARE IS DISCLAIMED.
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
diff --git a/net/bluetooth/msft.c b/net/bluetooth/msft.c
index 630e3023273b..5a8ccc491b14 100644
--- a/net/bluetooth/msft.c
+++ b/net/bluetooth/msft.c
@@ -7,7 +7,6 @@
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/mgmt.h>
-#include "hci_request.h"
#include "mgmt_util.h"
#include "msft.h"
@@ -769,7 +768,7 @@ void msft_register(struct hci_dev *hdev)
mutex_init(&msft->filter_lock);
}
-void msft_unregister(struct hci_dev *hdev)
+void msft_release(struct hci_dev *hdev)
{
struct msft_data *msft = hdev->msft_data;
@@ -875,6 +874,7 @@ static int msft_add_address_filter_sync(struct hci_dev *hdev, void *data)
remove = true;
goto done;
}
+
cp->sub_opcode = MSFT_OP_LE_MONITOR_ADVERTISEMENT;
cp->rssi_high = address_filter->rssi_high;
cp->rssi_low = address_filter->rssi_low;
@@ -887,6 +887,8 @@ static int msft_add_address_filter_sync(struct hci_dev *hdev, void *data)
skb = __hci_cmd_sync(hdev, hdev->msft_opcode, size, cp,
HCI_CMD_TIMEOUT);
+ kfree(cp);
+
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Failed to enable address %pMR filter",
&address_filter->bdaddr);
diff --git a/net/bluetooth/msft.h b/net/bluetooth/msft.h
index 2a63205b377b..fe538e9c91c0 100644
--- a/net/bluetooth/msft.h
+++ b/net/bluetooth/msft.h
@@ -14,7 +14,7 @@
bool msft_monitor_supported(struct hci_dev *hdev);
void msft_register(struct hci_dev *hdev);
-void msft_unregister(struct hci_dev *hdev);
+void msft_release(struct hci_dev *hdev);
void msft_do_open(struct hci_dev *hdev);
void msft_do_close(struct hci_dev *hdev);
void msft_vendor_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb);
@@ -35,7 +35,7 @@ static inline bool msft_monitor_supported(struct hci_dev *hdev)
}
static inline void msft_register(struct hci_dev *hdev) {}
-static inline void msft_unregister(struct hci_dev *hdev) {}
+static inline void msft_release(struct hci_dev *hdev) {}
static inline void msft_do_open(struct hci_dev *hdev) {}
static inline void msft_do_close(struct hci_dev *hdev) {}
static inline void msft_vendor_evt(struct hci_dev *hdev, void *data,
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 1d34d8497033..ad5177e3a69b 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -28,7 +28,7 @@
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/kthread.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index b54e8a530f55..913402806fa0 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -274,13 +274,13 @@ static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock,
struct rfcomm_dlc *d;
struct sock *sk;
- sk = bt_sock_alloc(net, sock, &rfcomm_proto, proto, prio, kern);
- if (!sk)
+ d = rfcomm_dlc_alloc(prio);
+ if (!d)
return NULL;
- d = rfcomm_dlc_alloc(prio);
- if (!d) {
- sk_free(sk);
+ sk = bt_sock_alloc(net, sock, &rfcomm_proto, proto, prio, kern);
+ if (!sk) {
+ rfcomm_dlc_free(d);
return NULL;
}
@@ -468,8 +468,8 @@ done:
return err;
}
-static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags,
- bool kern)
+static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock,
+ struct proto_accept_arg *arg)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct sock *sk = sock->sk, *nsk;
@@ -483,7 +483,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
goto done;
}
- timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
+ timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK);
BT_DBG("sk %p timeo %ld", sk, timeo);
@@ -629,10 +629,9 @@ static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname,
switch (optname) {
case RFCOMM_LM:
- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
- err = -EFAULT;
+ err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ if (err)
break;
- }
if (opt & RFCOMM_LM_FIPS) {
err = -EINVAL;
@@ -664,7 +663,6 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname,
struct sock *sk = sock->sk;
struct bt_security sec;
int err = 0;
- size_t len;
u32 opt;
BT_DBG("sk %p", sk);
@@ -686,11 +684,9 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname,
sec.level = BT_SECURITY_LOW;
- len = min_t(unsigned int, sizeof(sec), optlen);
- if (copy_from_sockptr(&sec, optval, len)) {
- err = -EFAULT;
+ err = copy_safe_from_sockptr(&sec, sizeof(sec), optval, optlen);
+ if (err)
break;
- }
if (sec.level > BT_SECURITY_HIGH) {
err = -EINVAL;
@@ -706,10 +702,9 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
- err = -EFAULT;
+ err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ if (err)
break;
- }
if (opt)
set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
@@ -733,7 +728,8 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
struct sock *l2cap_sk;
struct l2cap_conn *conn;
struct rfcomm_conninfo cinfo;
- int len, err = 0;
+ int err = 0;
+ size_t len;
u32 opt;
BT_DBG("sk %p", sk);
@@ -787,7 +783,7 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
cinfo.hci_handle = conn->hcon->handle;
memcpy(cinfo.dev_class, conn->hcon->dev_class, 3);
- len = min_t(unsigned int, len, sizeof(cinfo));
+ len = min(len, sizeof(cinfo));
if (copy_to_user(optval, (char *) &cinfo, len))
err = -EFAULT;
@@ -806,7 +802,8 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c
{
struct sock *sk = sock->sk;
struct bt_security sec;
- int len, err = 0;
+ int err = 0;
+ size_t len;
BT_DBG("sk %p", sk);
@@ -831,7 +828,7 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c
sec.level = rfcomm_pi(sk)->sec_level;
sec.key_size = 0;
- len = min_t(unsigned int, len, sizeof(sec));
+ len = min(len, sizeof(sec));
if (copy_to_user(optval, (char *) &sec, len))
err = -EFAULT;
@@ -869,9 +866,7 @@ static int rfcomm_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned lon
if (err == -ENOIOCTLCMD) {
#ifdef CONFIG_BT_RFCOMM_TTY
- lock_sock(sk);
err = rfcomm_dev_ioctl(sk, cmd, (void __user *) arg);
- release_sock(sk);
#else
err = -EOPNOTSUPP;
#endif
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 69c75c041fe1..21a5b5535ebc 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -201,14 +201,14 @@ static ssize_t address_show(struct device *tty_dev,
struct device_attribute *attr, char *buf)
{
struct rfcomm_dev *dev = dev_get_drvdata(tty_dev);
- return sprintf(buf, "%pMR\n", &dev->dst);
+ return sysfs_emit(buf, "%pMR\n", &dev->dst);
}
static ssize_t channel_show(struct device *tty_dev,
struct device_attribute *attr, char *buf)
{
struct rfcomm_dev *dev = dev_get_drvdata(tty_dev);
- return sprintf(buf, "%d\n", dev->channel);
+ return sysfs_emit(buf, "%d\n", dev->channel);
}
static DEVICE_ATTR_RO(address);
@@ -504,7 +504,7 @@ static int rfcomm_get_dev_list(void __user *arg)
struct rfcomm_dev *dev;
struct rfcomm_dev_list_req *dl;
struct rfcomm_dev_info *di;
- int n = 0, size, err;
+ int n = 0, err;
u16 dev_num;
BT_DBG("");
@@ -515,12 +515,11 @@ static int rfcomm_get_dev_list(void __user *arg)
if (!dev_num || dev_num > (PAGE_SIZE * 4) / sizeof(*di))
return -EINVAL;
- size = sizeof(*dl) + dev_num * sizeof(*di);
-
- dl = kzalloc(size, GFP_KERNEL);
+ dl = kzalloc(struct_size(dl, dev_info, dev_num), GFP_KERNEL);
if (!dl)
return -ENOMEM;
+ dl->dev_num = dev_num;
di = dl->dev_info;
mutex_lock(&rfcomm_dev_lock);
@@ -528,12 +527,12 @@ static int rfcomm_get_dev_list(void __user *arg)
list_for_each_entry(dev, &rfcomm_dev_list, list) {
if (!tty_port_get(&dev->port))
continue;
- (di + n)->id = dev->id;
- (di + n)->flags = dev->flags;
- (di + n)->state = dev->dlc->state;
- (di + n)->channel = dev->channel;
- bacpy(&(di + n)->src, &dev->src);
- bacpy(&(di + n)->dst, &dev->dst);
+ di[n].id = dev->id;
+ di[n].flags = dev->flags;
+ di[n].state = dev->dlc->state;
+ di[n].channel = dev->channel;
+ bacpy(&di[n].src, &dev->src);
+ bacpy(&di[n].dst, &dev->dst);
tty_port_put(&dev->port);
if (++n >= dev_num)
break;
@@ -542,9 +541,7 @@ static int rfcomm_get_dev_list(void __user *arg)
mutex_unlock(&rfcomm_dev_lock);
dl->dev_num = n;
- size = sizeof(*dl) + n * sizeof(*di);
-
- err = copy_to_user(arg, dl, size);
+ err = copy_to_user(arg, dl, struct_size(dl, dev_info, n));
kfree(dl);
return err ? -EFAULT : 0;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index c736186aba26..5d1bc0d6aee0 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -51,6 +51,7 @@ struct sco_conn {
struct delayed_work timeout_work;
unsigned int mtu;
+ struct kref ref;
};
#define sco_conn_lock(c) spin_lock(&c->lock)
@@ -76,17 +77,86 @@ struct sco_pinfo {
#define SCO_CONN_TIMEOUT (HZ * 40)
#define SCO_DISCONN_TIMEOUT (HZ * 2)
+static void sco_conn_free(struct kref *ref)
+{
+ struct sco_conn *conn = container_of(ref, struct sco_conn, ref);
+
+ BT_DBG("conn %p", conn);
+
+ if (conn->sk)
+ sco_pi(conn->sk)->conn = NULL;
+
+ if (conn->hcon) {
+ conn->hcon->sco_data = NULL;
+ hci_conn_drop(conn->hcon);
+ }
+
+ /* Ensure no more work items will run since hci_conn has been dropped */
+ disable_delayed_work_sync(&conn->timeout_work);
+
+ kfree(conn);
+}
+
+static void sco_conn_put(struct sco_conn *conn)
+{
+ if (!conn)
+ return;
+
+ BT_DBG("conn %p refcnt %d", conn, kref_read(&conn->ref));
+
+ kref_put(&conn->ref, sco_conn_free);
+}
+
+static struct sco_conn *sco_conn_hold(struct sco_conn *conn)
+{
+ BT_DBG("conn %p refcnt %u", conn, kref_read(&conn->ref));
+
+ kref_get(&conn->ref);
+ return conn;
+}
+
+static struct sco_conn *sco_conn_hold_unless_zero(struct sco_conn *conn)
+{
+ if (!conn)
+ return NULL;
+
+ BT_DBG("conn %p refcnt %u", conn, kref_read(&conn->ref));
+
+ if (!kref_get_unless_zero(&conn->ref))
+ return NULL;
+
+ return conn;
+}
+
+static struct sock *sco_sock_hold(struct sco_conn *conn)
+{
+ if (!conn || !bt_sock_linked(&sco_sk_list, conn->sk))
+ return NULL;
+
+ sock_hold(conn->sk);
+
+ return conn->sk;
+}
+
static void sco_sock_timeout(struct work_struct *work)
{
struct sco_conn *conn = container_of(work, struct sco_conn,
timeout_work.work);
struct sock *sk;
+ conn = sco_conn_hold_unless_zero(conn);
+ if (!conn)
+ return;
+
sco_conn_lock(conn);
- sk = conn->sk;
- if (sk)
- sock_hold(sk);
+ if (!conn->hcon) {
+ sco_conn_unlock(conn);
+ sco_conn_put(conn);
+ return;
+ }
+ sk = sco_sock_hold(conn);
sco_conn_unlock(conn);
+ sco_conn_put(conn);
if (!sk)
return;
@@ -122,12 +192,15 @@ static void sco_sock_clear_timer(struct sock *sk)
/* ---- SCO connections ---- */
static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
{
- struct hci_dev *hdev = hcon->hdev;
struct sco_conn *conn = hcon->sco_data;
+ conn = sco_conn_hold_unless_zero(conn);
if (conn) {
- if (!conn->hcon)
+ if (!conn->hcon) {
+ sco_conn_lock(conn);
conn->hcon = hcon;
+ sco_conn_unlock(conn);
+ }
return conn;
}
@@ -135,14 +208,16 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
if (!conn)
return NULL;
+ kref_init(&conn->ref);
spin_lock_init(&conn->lock);
INIT_DELAYED_WORK(&conn->timeout_work, sco_sock_timeout);
hcon->sco_data = conn;
conn->hcon = hcon;
+ conn->mtu = hcon->mtu;
- if (hdev->sco_mtu > 0)
- conn->mtu = hdev->sco_mtu;
+ if (hcon->mtu > 0)
+ conn->mtu = hcon->mtu;
else
conn->mtu = 60;
@@ -158,17 +233,15 @@ static void sco_chan_del(struct sock *sk, int err)
struct sco_conn *conn;
conn = sco_pi(sk)->conn;
+ sco_pi(sk)->conn = NULL;
BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
if (conn) {
sco_conn_lock(conn);
conn->sk = NULL;
- sco_pi(sk)->conn = NULL;
sco_conn_unlock(conn);
-
- if (conn->hcon)
- hci_conn_drop(conn->hcon);
+ sco_conn_put(conn);
}
sk->sk_state = BT_CLOSED;
@@ -183,31 +256,28 @@ static void sco_conn_del(struct hci_conn *hcon, int err)
struct sco_conn *conn = hcon->sco_data;
struct sock *sk;
+ conn = sco_conn_hold_unless_zero(conn);
if (!conn)
return;
BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
- /* Kill socket */
sco_conn_lock(conn);
- sk = conn->sk;
- if (sk)
- sock_hold(sk);
+ sk = sco_sock_hold(conn);
sco_conn_unlock(conn);
+ sco_conn_put(conn);
- if (sk) {
- lock_sock(sk);
- sco_sock_clear_timer(sk);
- sco_chan_del(sk, err);
- release_sock(sk);
- sock_put(sk);
+ if (!sk) {
+ sco_conn_put(conn);
+ return;
}
- /* Ensure no more work items will run before freeing conn. */
- cancel_delayed_work_sync(&conn->timeout_work);
-
- hcon->sco_data = NULL;
- kfree(conn);
+ /* Kill socket */
+ lock_sock(sk);
+ sco_sock_clear_timer(sk);
+ sco_chan_del(sk, err);
+ release_sock(sk);
+ sock_put(sk);
}
static void __sco_chan_add(struct sco_conn *conn, struct sock *sk,
@@ -257,14 +327,18 @@ static int sco_connect(struct sock *sk)
else
type = SCO_LINK;
- if (sco_pi(sk)->setting == BT_VOICE_TRANSPARENT &&
- (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev))) {
- err = -EOPNOTSUPP;
- goto unlock;
+ switch (sco_pi(sk)->setting & SCO_AIRMODE_MASK) {
+ case SCO_AIRMODE_TRANSP:
+ if (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev)) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+ break;
}
hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst,
- sco_pi(sk)->setting, &sco_pi(sk)->codec);
+ sco_pi(sk)->setting, &sco_pi(sk)->codec,
+ sk->sk_sndtimeo);
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
goto unlock;
@@ -390,6 +464,8 @@ static void sco_sock_destruct(struct sock *sk)
{
BT_DBG("sk %p", sk);
+ sco_conn_put(sco_pi(sk)->conn);
+
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_write_queue);
}
@@ -437,17 +513,6 @@ static void __sco_sock_close(struct sock *sk)
case BT_CONNECTED:
case BT_CONFIG:
- if (sco_pi(sk)->conn->hcon) {
- sk->sk_state = BT_DISCONN;
- sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
- sco_conn_lock(sco_pi(sk)->conn);
- hci_conn_drop(sco_pi(sk)->conn->hcon);
- sco_pi(sk)->conn->hcon = NULL;
- sco_conn_unlock(sco_pi(sk)->conn);
- } else
- sco_chan_del(sk, ECONNRESET);
- break;
-
case BT_CONNECT2:
case BT_CONNECT:
case BT_DISCONN:
@@ -642,7 +707,7 @@ done:
}
static int sco_sock_accept(struct socket *sock, struct socket *newsock,
- int flags, bool kern)
+ struct proto_accept_arg *arg)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct sock *sk = sock->sk, *ch;
@@ -651,7 +716,7 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock,
lock_sock(sk);
- timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
+ timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK);
BT_DBG("sk %p timeo %ld", sk, timeo);
@@ -823,7 +888,7 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
- int len, err = 0;
+ int err = 0;
struct bt_voice voice;
u32 opt;
struct bt_codecs *codecs;
@@ -842,10 +907,9 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
- err = -EFAULT;
+ err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ if (err)
break;
- }
if (opt)
set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
@@ -862,18 +926,10 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
voice.setting = sco_pi(sk)->setting;
- len = min_t(unsigned int, sizeof(voice), optlen);
- if (copy_from_sockptr(&voice, optval, len)) {
- err = -EFAULT;
- break;
- }
-
- /* Explicitly check for these values */
- if (voice.setting != BT_VOICE_TRANSPARENT &&
- voice.setting != BT_VOICE_CVSD_16BIT) {
- err = -EINVAL;
+ err = copy_safe_from_sockptr(&voice, sizeof(voice), optval,
+ optlen);
+ if (err)
break;
- }
sco_pi(sk)->setting = voice.setting;
hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src,
@@ -882,17 +938,21 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
err = -EBADFD;
break;
}
- if (enhanced_sync_conn_capable(hdev) &&
- voice.setting == BT_VOICE_TRANSPARENT)
- sco_pi(sk)->codec.id = BT_CODEC_TRANSPARENT;
+
+ switch (sco_pi(sk)->setting & SCO_AIRMODE_MASK) {
+ case SCO_AIRMODE_TRANSP:
+ if (enhanced_sync_conn_capable(hdev))
+ sco_pi(sk)->codec.id = BT_CODEC_TRANSPARENT;
+ break;
+ }
+
hci_dev_put(hdev);
break;
case BT_PKT_STATUS:
- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
- err = -EFAULT;
+ err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ if (err)
break;
- }
if (opt)
set_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags);
@@ -933,9 +993,10 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (copy_from_sockptr(buffer, optval, optlen)) {
+ err = copy_struct_from_sockptr(buffer, sizeof(buffer), optval,
+ optlen);
+ if (err) {
hci_dev_put(hdev);
- err = -EFAULT;
break;
}
@@ -966,7 +1027,8 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
struct sock *sk = sock->sk;
struct sco_options opts;
struct sco_conninfo cinfo;
- int len, err = 0;
+ int err = 0;
+ size_t len;
BT_DBG("sk %p", sk);
@@ -988,7 +1050,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
BT_DBG("mtu %u", opts.mtu);
- len = min_t(unsigned int, len, sizeof(opts));
+ len = min(len, sizeof(opts));
if (copy_to_user(optval, (char *)&opts, len))
err = -EFAULT;
@@ -1006,7 +1068,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
- len = min_t(unsigned int, len, sizeof(cinfo));
+ len = min(len, sizeof(cinfo));
if (copy_to_user(optval, (char *)&cinfo, len))
err = -EFAULT;
@@ -1299,6 +1361,7 @@ static void sco_conn_ready(struct sco_conn *conn)
bacpy(&sco_pi(sk)->src, &conn->hcon->src);
bacpy(&sco_pi(sk)->dst, &conn->hcon->dst);
+ sco_conn_hold(conn);
hci_conn_hold(conn->hcon);
__sco_chan_add(conn, sk, parent);
@@ -1355,8 +1418,10 @@ static void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
struct sco_conn *conn;
conn = sco_conn_add(hcon);
- if (conn)
+ if (conn) {
sco_conn_ready(conn);
+ sco_conn_put(conn);
+ }
} else
sco_conn_del(hcon, bt_to_errno(status));
}
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 1e7ea3a4b7ef..8b9724fd752a 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -914,7 +914,7 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
* Confirms and the responder Enters the passkey.
*/
if (smp->method == OVERLAP) {
- if (hcon->role == HCI_ROLE_MASTER)
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags))
smp->method = CFM_PASSKEY;
else
smp->method = REQ_PASSKEY;
@@ -964,7 +964,7 @@ static u8 smp_confirm(struct smp_chan *smp)
smp_send_cmd(smp->conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
- if (conn->hcon->out)
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags))
SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM);
else
SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM);
@@ -980,7 +980,8 @@ static u8 smp_random(struct smp_chan *smp)
int ret;
bt_dev_dbg(conn->hcon->hdev, "conn %p %s", conn,
- conn->hcon->out ? "initiator" : "responder");
+ test_bit(SMP_FLAG_INITIATOR, &smp->flags) ? "initiator" :
+ "responder");
ret = smp_c1(smp->tk, smp->rrnd, smp->preq, smp->prsp,
hcon->init_addr_type, &hcon->init_addr,
@@ -994,7 +995,7 @@ static u8 smp_random(struct smp_chan *smp)
return SMP_CONFIRM_FAILED;
}
- if (hcon->out) {
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
u8 stk[16];
__le64 rand = 0;
__le16 ediv = 0;
@@ -1059,7 +1060,6 @@ static void smp_notify_keys(struct l2cap_conn *conn)
}
if (smp->remote_irk) {
- smp->remote_irk->link_type = hcon->type;
mgmt_new_irk(hdev, smp->remote_irk, persistent);
/* Now that user space can be considered to know the
@@ -1079,28 +1079,24 @@ static void smp_notify_keys(struct l2cap_conn *conn)
}
if (smp->csrk) {
- smp->csrk->link_type = hcon->type;
smp->csrk->bdaddr_type = hcon->dst_type;
bacpy(&smp->csrk->bdaddr, &hcon->dst);
mgmt_new_csrk(hdev, smp->csrk, persistent);
}
if (smp->responder_csrk) {
- smp->responder_csrk->link_type = hcon->type;
smp->responder_csrk->bdaddr_type = hcon->dst_type;
bacpy(&smp->responder_csrk->bdaddr, &hcon->dst);
mgmt_new_csrk(hdev, smp->responder_csrk, persistent);
}
if (smp->ltk) {
- smp->ltk->link_type = hcon->type;
smp->ltk->bdaddr_type = hcon->dst_type;
bacpy(&smp->ltk->bdaddr, &hcon->dst);
mgmt_new_ltk(hdev, smp->ltk, persistent);
}
if (smp->responder_ltk) {
- smp->responder_ltk->link_type = hcon->type;
smp->responder_ltk->bdaddr_type = hcon->dst_type;
bacpy(&smp->responder_ltk->bdaddr, &hcon->dst);
mgmt_new_ltk(hdev, smp->responder_ltk, persistent);
@@ -1120,8 +1116,6 @@ static void smp_notify_keys(struct l2cap_conn *conn)
key = hci_add_link_key(hdev, smp->conn->hcon, &hcon->dst,
smp->link_key, type, 0, &persistent);
if (key) {
- key->link_type = hcon->type;
- key->bdaddr_type = hcon->dst_type;
mgmt_new_link_key(hdev, key, persistent);
/* Don't keep debug keys around if the relevant
@@ -1256,14 +1250,15 @@ static void smp_distribute_keys(struct smp_chan *smp)
rsp = (void *) &smp->prsp[1];
/* The responder sends its keys first */
- if (hcon->out && (smp->remote_key_dist & KEY_DIST_MASK)) {
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags) &&
+ (smp->remote_key_dist & KEY_DIST_MASK)) {
smp_allow_key_dist(smp);
return;
}
req = (void *) &smp->preq[1];
- if (hcon->out) {
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
keydist = &rsp->init_key_dist;
*keydist &= req->init_key_dist;
} else {
@@ -1432,7 +1427,7 @@ static int sc_mackey_and_ltk(struct smp_chan *smp, u8 mackey[16], u8 ltk[16])
struct hci_conn *hcon = smp->conn->hcon;
u8 *na, *nb, a[7], b[7];
- if (hcon->out) {
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
na = smp->prnd;
nb = smp->rrnd;
} else {
@@ -1460,7 +1455,7 @@ static void sc_dhkey_check(struct smp_chan *smp)
a[6] = hcon->init_addr_type;
b[6] = hcon->resp_addr_type;
- if (hcon->out) {
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
local_addr = a;
remote_addr = b;
memcpy(io_cap, &smp->preq[1], 3);
@@ -1539,7 +1534,7 @@ static u8 sc_passkey_round(struct smp_chan *smp, u8 smp_op)
/* The round is only complete when the initiator
* receives pairing random.
*/
- if (!hcon->out) {
+ if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM,
sizeof(smp->prnd), smp->prnd);
if (smp->passkey_round == 20)
@@ -1567,7 +1562,7 @@ static u8 sc_passkey_round(struct smp_chan *smp, u8 smp_op)
SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM);
- if (hcon->out) {
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM,
sizeof(smp->prnd), smp->prnd);
return 0;
@@ -1578,7 +1573,7 @@ static u8 sc_passkey_round(struct smp_chan *smp, u8 smp_op)
case SMP_CMD_PUBLIC_KEY:
default:
/* Initiating device starts the round */
- if (!hcon->out)
+ if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags))
return 0;
bt_dev_dbg(hdev, "Starting passkey round %u",
@@ -1623,7 +1618,7 @@ static int sc_user_reply(struct smp_chan *smp, u16 mgmt_op, __le32 passkey)
}
/* Initiator sends DHKey check first */
- if (hcon->out) {
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
sc_dhkey_check(smp);
SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK);
} else if (test_and_clear_bit(SMP_FLAG_DHKEY_PENDING, &smp->flags)) {
@@ -1746,7 +1741,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
struct smp_cmd_pairing rsp, *req = (void *) skb->data;
struct l2cap_chan *chan = conn->smp;
struct hci_dev *hdev = conn->hcon->hdev;
- struct smp_chan *smp;
+ struct smp_chan *smp = chan->data;
u8 key_size, auth, sec_level;
int ret;
@@ -1755,16 +1750,14 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
if (skb->len < sizeof(*req))
return SMP_INVALID_PARAMS;
- if (conn->hcon->role != HCI_ROLE_SLAVE)
+ if (smp && test_bit(SMP_FLAG_INITIATOR, &smp->flags))
return SMP_CMD_NOTSUPP;
- if (!chan->data)
+ if (!smp) {
smp = smp_chan_create(conn);
- else
- smp = chan->data;
-
- if (!smp)
- return SMP_UNSPECIFIED;
+ if (!smp)
+ return SMP_UNSPECIFIED;
+ }
/* We didn't start the pairing, so match remote */
auth = req->auth_req & AUTH_REQ_MASK(hdev);
@@ -1946,7 +1939,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
if (skb->len < sizeof(*rsp))
return SMP_INVALID_PARAMS;
- if (conn->hcon->role != HCI_ROLE_MASTER)
+ if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags))
return SMP_CMD_NOTSUPP;
skb_pull(skb, sizeof(*rsp));
@@ -2041,7 +2034,7 @@ static u8 sc_check_confirm(struct smp_chan *smp)
if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY)
return sc_passkey_round(smp, SMP_CMD_PAIRING_CONFIRM);
- if (conn->hcon->out) {
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
smp->prnd);
SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM);
@@ -2063,7 +2056,7 @@ static int fixup_sc_false_positive(struct smp_chan *smp)
u8 auth;
/* The issue is only observed when we're in responder role */
- if (hcon->out)
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags))
return SMP_UNSPECIFIED;
if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
@@ -2099,7 +2092,8 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
struct hci_dev *hdev = hcon->hdev;
bt_dev_dbg(hdev, "conn %p %s", conn,
- hcon->out ? "initiator" : "responder");
+ test_bit(SMP_FLAG_INITIATOR, &smp->flags) ? "initiator" :
+ "responder");
if (skb->len < sizeof(smp->pcnf))
return SMP_INVALID_PARAMS;
@@ -2121,7 +2115,7 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
return ret;
}
- if (conn->hcon->out) {
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
smp->prnd);
SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM);
@@ -2156,7 +2150,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
if (!test_bit(SMP_FLAG_SC, &smp->flags))
return smp_random(smp);
- if (hcon->out) {
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
pkax = smp->local_pk;
pkbx = smp->remote_pk;
na = smp->prnd;
@@ -2169,7 +2163,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
}
if (smp->method == REQ_OOB) {
- if (!hcon->out)
+ if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags))
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM,
sizeof(smp->prnd), smp->prnd);
SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK);
@@ -2180,7 +2174,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY)
return sc_passkey_round(smp, SMP_CMD_PAIRING_RANDOM);
- if (hcon->out) {
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
u8 cfm[16];
err = smp_f4(smp->tfm_cmac, smp->remote_pk, smp->local_pk,
@@ -2221,7 +2215,7 @@ mackey_and_ltk:
return SMP_UNSPECIFIED;
if (smp->method == REQ_OOB) {
- if (hcon->out) {
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
sc_dhkey_check(smp);
SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK);
}
@@ -2295,10 +2289,27 @@ bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level,
return false;
}
+static void smp_send_pairing_req(struct smp_chan *smp, __u8 auth)
+{
+ struct smp_cmd_pairing cp;
+
+ if (smp->conn->hcon->type == ACL_LINK)
+ build_bredr_pairing_cmd(smp, &cp, NULL);
+ else
+ build_pairing_cmd(smp->conn, &cp, NULL, auth);
+
+ smp->preq[0] = SMP_CMD_PAIRING_REQ;
+ memcpy(&smp->preq[1], &cp, sizeof(cp));
+
+ smp_send_cmd(smp->conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
+ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP);
+
+ set_bit(SMP_FLAG_INITIATOR, &smp->flags);
+}
+
static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct smp_cmd_security_req *rp = (void *) skb->data;
- struct smp_cmd_pairing cp;
struct hci_conn *hcon = conn->hcon;
struct hci_dev *hdev = hcon->hdev;
struct smp_chan *smp;
@@ -2347,16 +2358,20 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
skb_pull(skb, sizeof(*rp));
- memset(&cp, 0, sizeof(cp));
- build_pairing_cmd(conn, &cp, NULL, auth);
+ smp_send_pairing_req(smp, auth);
- smp->preq[0] = SMP_CMD_PAIRING_REQ;
- memcpy(&smp->preq[1], &cp, sizeof(cp));
+ return 0;
+}
- smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
- SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP);
+static void smp_send_security_req(struct smp_chan *smp, __u8 auth)
+{
+ struct smp_cmd_security_req cp;
- return 0;
+ cp.auth_req = auth;
+ smp_send_cmd(smp->conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp);
+ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_REQ);
+
+ clear_bit(SMP_FLAG_INITIATOR, &smp->flags);
}
int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
@@ -2427,23 +2442,11 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
authreq |= SMP_AUTH_MITM;
}
- if (hcon->role == HCI_ROLE_MASTER) {
- struct smp_cmd_pairing cp;
-
- build_pairing_cmd(conn, &cp, NULL, authreq);
- smp->preq[0] = SMP_CMD_PAIRING_REQ;
- memcpy(&smp->preq[1], &cp, sizeof(cp));
-
- smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
- SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP);
- } else {
- struct smp_cmd_security_req cp;
- cp.auth_req = authreq;
- smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp);
- SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_REQ);
- }
+ if (hcon->role == HCI_ROLE_MASTER)
+ smp_send_pairing_req(smp, authreq);
+ else
+ smp_send_security_req(smp, authreq);
- set_bit(SMP_FLAG_INITIATOR, &smp->flags);
ret = 0;
unlock:
@@ -2694,8 +2697,6 @@ static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb)
static u8 sc_select_method(struct smp_chan *smp)
{
- struct l2cap_conn *conn = smp->conn;
- struct hci_conn *hcon = conn->hcon;
struct smp_cmd_pairing *local, *remote;
u8 local_mitm, remote_mitm, local_io, remote_io, method;
@@ -2708,7 +2709,7 @@ static u8 sc_select_method(struct smp_chan *smp)
* the "struct smp_cmd_pairing" from them we need to skip the
* first byte which contains the opcode.
*/
- if (hcon->out) {
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
local = (void *) &smp->preq[1];
remote = (void *) &smp->prsp[1];
} else {
@@ -2777,7 +2778,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
/* Non-initiating device sends its public key after receiving
* the key from the initiating device.
*/
- if (!hcon->out) {
+ if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
err = sc_send_public_key(smp);
if (err)
return err;
@@ -2839,7 +2840,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
}
if (smp->method == REQ_OOB) {
- if (hcon->out)
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags))
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM,
sizeof(smp->prnd), smp->prnd);
@@ -2848,7 +2849,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
return 0;
}
- if (hcon->out)
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags))
SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM);
if (smp->method == REQ_PASSKEY) {
@@ -2863,7 +2864,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
/* The Initiating device waits for the non-initiating device to
* send the confirm value.
*/
- if (conn->hcon->out)
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags))
return 0;
err = smp_f4(smp->tfm_cmac, smp->local_pk, smp->remote_pk, smp->prnd,
@@ -2897,7 +2898,7 @@ static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb)
a[6] = hcon->init_addr_type;
b[6] = hcon->resp_addr_type;
- if (hcon->out) {
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
local_addr = a;
remote_addr = b;
memcpy(io_cap, &smp->prsp[1], 3);
@@ -2922,7 +2923,7 @@ static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb)
if (crypto_memneq(check->e, e, 16))
return SMP_DHKEY_CHECK_FAILED;
- if (!hcon->out) {
+ if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
if (test_bit(SMP_FLAG_WAIT_USER, &smp->flags)) {
set_bit(SMP_FLAG_DHKEY_PENDING, &smp->flags);
return 0;
@@ -2934,7 +2935,7 @@ static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb)
sc_add_ltk(smp);
- if (hcon->out) {
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
hci_le_start_enc(hcon, 0, 0, smp->tk, smp->enc_key_size);
hcon->enc_key_size = smp->enc_key_size;
}
@@ -3083,7 +3084,6 @@ static void bredr_pairing(struct l2cap_chan *chan)
struct l2cap_conn *conn = chan->conn;
struct hci_conn *hcon = conn->hcon;
struct hci_dev *hdev = hcon->hdev;
- struct smp_cmd_pairing req;
struct smp_chan *smp;
bt_dev_dbg(hdev, "chan %p", chan);
@@ -3135,14 +3135,7 @@ static void bredr_pairing(struct l2cap_chan *chan)
bt_dev_dbg(hdev, "starting SMP over BR/EDR");
- /* Prepare and send the BR/EDR SMP Pairing Request */
- build_bredr_pairing_cmd(smp, &req, NULL);
-
- smp->preq[0] = SMP_CMD_PAIRING_REQ;
- memcpy(&smp->preq[1], &req, sizeof(req));
-
- smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(req), &req);
- SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP);
+ smp_send_pairing_req(smp, 0x00);
}
static void smp_resume_cb(struct l2cap_chan *chan)
diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c
index 8906f7bdf4a9..f71f67c6896b 100644
--- a/net/bpf/bpf_dummy_struct_ops.c
+++ b/net/bpf/bpf_dummy_struct_ops.c
@@ -7,7 +7,7 @@
#include <linux/bpf.h>
#include <linux/btf.h>
-extern struct bpf_struct_ops bpf_bpf_dummy_ops;
+static struct bpf_struct_ops bpf_bpf_dummy_ops;
/* A common type for test_N with return value in bpf_dummy_ops */
typedef int (*dummy_ops_test_ret_fn)(struct bpf_dummy_ops_state *state, ...);
@@ -22,6 +22,8 @@ struct bpf_dummy_ops_test_args {
struct bpf_dummy_ops_state state;
};
+static struct btf *bpf_dummy_ops_btf;
+
static struct bpf_dummy_ops_test_args *
dummy_ops_init_args(const union bpf_attr *kattr, unsigned int nr)
{
@@ -77,6 +79,51 @@ static int dummy_ops_call_op(void *image, struct bpf_dummy_ops_test_args *args)
args->args[3], args->args[4]);
}
+static const struct bpf_ctx_arg_aux *find_ctx_arg_info(struct bpf_prog_aux *aux, int offset)
+{
+ int i;
+
+ for (i = 0; i < aux->ctx_arg_info_size; i++)
+ if (aux->ctx_arg_info[i].offset == offset)
+ return &aux->ctx_arg_info[i];
+
+ return NULL;
+}
+
+/* There is only one check at the moment:
+ * - zero should not be passed for pointer parameters not marked as nullable.
+ */
+static int check_test_run_args(struct bpf_prog *prog, struct bpf_dummy_ops_test_args *args)
+{
+ const struct btf_type *func_proto = prog->aux->attach_func_proto;
+
+ for (u32 arg_no = 0; arg_no < btf_type_vlen(func_proto) ; ++arg_no) {
+ const struct btf_param *param = &btf_params(func_proto)[arg_no];
+ const struct bpf_ctx_arg_aux *info;
+ const struct btf_type *t;
+ int offset;
+
+ if (args->args[arg_no] != 0)
+ continue;
+
+ /* Program is validated already, so there is no need
+ * to check if t is NULL.
+ */
+ t = btf_type_skip_modifiers(bpf_dummy_ops_btf, param->type, NULL);
+ if (!btf_type_is_ptr(t))
+ continue;
+
+ offset = btf_ctx_arg_offset(bpf_dummy_ops_btf, func_proto, arg_no);
+ info = find_ctx_arg_info(prog->aux, offset);
+ if (info && type_may_be_null(info->reg_type))
+ continue;
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
extern const struct bpf_link_ops bpf_struct_ops_link_lops;
int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
@@ -85,14 +132,21 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
const struct bpf_struct_ops *st_ops = &bpf_bpf_dummy_ops;
const struct btf_type *func_proto;
struct bpf_dummy_ops_test_args *args;
- struct bpf_tramp_links *tlinks;
+ struct bpf_tramp_links *tlinks = NULL;
struct bpf_tramp_link *link = NULL;
void *image = NULL;
unsigned int op_idx;
+ u32 image_off = 0;
int prog_ret;
+ s32 type_id;
int err;
- if (prog->aux->attach_btf_id != st_ops->type_id)
+ type_id = btf_find_by_name_kind(bpf_dummy_ops_btf,
+ bpf_bpf_dummy_ops.name,
+ BTF_KIND_STRUCT);
+ if (type_id < 0)
+ return -EINVAL;
+ if (prog->aux->attach_btf_id != type_id)
return -EOPNOTSUPP;
func_proto = prog->aux->attach_func_proto;
@@ -100,14 +154,12 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
if (IS_ERR(args))
return PTR_ERR(args);
- tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
- if (!tlinks) {
- err = -ENOMEM;
+ err = check_test_run_args(prog, args);
+ if (err)
goto out;
- }
- image = arch_alloc_bpf_trampoline(PAGE_SIZE);
- if (!image) {
+ tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
+ if (!tlinks) {
err = -ENOMEM;
goto out;
}
@@ -125,11 +177,14 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
err = bpf_struct_ops_prepare_trampoline(tlinks, link,
&st_ops->func_models[op_idx],
&dummy_ops_test_ret_function,
- image, image + PAGE_SIZE);
+ &image, &image_off,
+ true);
if (err < 0)
goto out;
- arch_protect_bpf_trampoline(image, PAGE_SIZE);
+ err = arch_protect_bpf_trampoline(image, PAGE_SIZE);
+ if (err)
+ goto out;
prog_ret = dummy_ops_call_op(image, args);
err = dummy_ops_copy_args(args);
@@ -139,7 +194,7 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
err = -EFAULT;
out:
kfree(args);
- arch_free_bpf_trampoline(image, PAGE_SIZE);
+ bpf_struct_ops_image_free(image);
if (link)
bpf_link_put(&link->link);
kfree(tlinks);
@@ -148,6 +203,7 @@ out:
static int bpf_dummy_init(struct btf *btf)
{
+ bpf_dummy_ops_btf = btf;
return 0;
}
@@ -169,7 +225,7 @@ static int bpf_dummy_ops_check_member(const struct btf_type *t,
case offsetof(struct bpf_dummy_ops, test_sleepable):
break;
default:
- if (prog->aux->sleepable)
+ if (prog->sleepable)
return -EINVAL;
}
@@ -216,16 +272,16 @@ static int bpf_dummy_init_member(const struct btf_type *t,
return -EOPNOTSUPP;
}
-static int bpf_dummy_reg(void *kdata)
+static int bpf_dummy_reg(void *kdata, struct bpf_link *link)
{
return -EOPNOTSUPP;
}
-static void bpf_dummy_unreg(void *kdata)
+static void bpf_dummy_unreg(void *kdata, struct bpf_link *link)
{
}
-static int bpf_dummy_test_1(struct bpf_dummy_ops_state *cb)
+static int bpf_dummy_ops__test_1(struct bpf_dummy_ops_state *cb__nullable)
{
return 0;
}
@@ -242,12 +298,12 @@ static int bpf_dummy_test_sleepable(struct bpf_dummy_ops_state *cb)
}
static struct bpf_dummy_ops __bpf_bpf_dummy_ops = {
- .test_1 = bpf_dummy_test_1,
+ .test_1 = bpf_dummy_ops__test_1,
.test_2 = bpf_dummy_test_2,
.test_sleepable = bpf_dummy_test_sleepable,
};
-struct bpf_struct_ops bpf_bpf_dummy_ops = {
+static struct bpf_struct_ops bpf_bpf_dummy_ops = {
.verifier_ops = &bpf_dummy_verifier_ops,
.init = bpf_dummy_init,
.check_member = bpf_dummy_ops_check_member,
@@ -256,4 +312,11 @@ struct bpf_struct_ops bpf_bpf_dummy_ops = {
.unreg = bpf_dummy_unreg,
.name = "bpf_dummy_ops",
.cfi_stubs = &__bpf_bpf_dummy_ops,
+ .owner = THIS_MODULE,
};
+
+static int __init bpf_dummy_struct_ops_init(void)
+{
+ return register_bpf_struct_ops(&bpf_bpf_dummy_ops, bpf_dummy_ops);
+}
+late_initcall(bpf_dummy_struct_ops_init);
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index dfd919374017..7cb192cbd65f 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -12,6 +12,7 @@
#include <linux/rcupdate_trace.h>
#include <linux/sched/signal.h>
#include <net/bpf_sk_storage.h>
+#include <net/hotdata.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <net/net_namespace.h>
@@ -126,9 +127,10 @@ struct xdp_test_data {
#define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head))
#define TEST_XDP_MAX_BATCH 256
-static void xdp_test_run_init_page(struct page *page, void *arg)
+static void xdp_test_run_init_page(netmem_ref netmem, void *arg)
{
- struct xdp_page_head *head = phys_to_virt(page_to_phys(page));
+ struct xdp_page_head *head =
+ phys_to_virt(page_to_phys(netmem_to_page(netmem)));
struct xdp_buff *new_ctx, *orig_ctx;
u32 headroom = XDP_PACKET_HEADROOM;
struct xdp_test_data *xdp = arg;
@@ -151,7 +153,7 @@ static void xdp_test_run_init_page(struct page *page, void *arg)
new_ctx->data = new_ctx->data_meta + meta_len;
xdp_update_frame_from_buff(new_ctx, frm);
- frm->mem = new_ctx->rxq->mem;
+ frm->mem_type = new_ctx->rxq->mem.type;
memcpy(&head->orig_ctx, new_ctx, sizeof(head->orig_ctx));
}
@@ -244,6 +246,7 @@ static void reset_ctx(struct xdp_page_head *head)
head->ctx.data_meta = head->orig_ctx.data_meta;
head->ctx.data_end = head->orig_ctx.data_end;
xdp_update_frame_from_buff(&head->ctx, head->frame);
+ head->frame->mem_type = head->orig_ctx.rxq->mem.type;
}
static int xdp_recv_frames(struct xdp_frame **frames, int nframes,
@@ -254,7 +257,8 @@ static int xdp_recv_frames(struct xdp_frame **frames, int nframes,
int i, n;
LIST_HEAD(list);
- n = kmem_cache_alloc_bulk(skbuff_cache, gfp, nframes, (void **)skbs);
+ n = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, gfp, nframes,
+ (void **)skbs);
if (unlikely(n == 0)) {
for (i = 0; i < nframes; i++)
xdp_return_frame(frames[i]);
@@ -281,9 +285,10 @@ static int xdp_recv_frames(struct xdp_frame **frames, int nframes,
static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog,
u32 repeat)
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
int err = 0, act, ret, i, nframes = 0, batch_sz;
struct xdp_frame **frames = xdp->frames;
+ struct bpf_redirect_info *ri;
struct xdp_page_head *head;
struct xdp_frame *frm;
bool redirect = false;
@@ -293,6 +298,8 @@ static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog,
batch_sz = min_t(u32, repeat, xdp->batch_size);
local_bh_disable();
+ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
+ ri = bpf_net_ctx_get_ri();
xdp_set_return_frame_no_direct();
for (i = 0; i < batch_sz; i++) {
@@ -357,6 +364,7 @@ out:
}
xdp_clear_return_frame_no_direct();
+ bpf_net_ctx_clear(bpf_net_ctx);
local_bh_enable();
return err;
}
@@ -392,6 +400,7 @@ static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx,
static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
u32 *retval, u32 *time, bool xdp)
{
+ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
struct bpf_prog_array_item item = {.prog = prog};
struct bpf_run_ctx *old_ctx;
struct bpf_cg_run_ctx run_ctx;
@@ -417,10 +426,14 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
do {
run_ctx.prog_item = &item;
local_bh_disable();
+ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
+
if (xdp)
*retval = bpf_prog_run_xdp(prog, ctx);
else
*retval = bpf_prog_run(prog, ctx);
+
+ bpf_net_ctx_clear(bpf_net_ctx);
local_bh_enable();
} while (bpf_test_timer_continue(&t, 1, repeat, &ret, time));
bpf_reset_run_ctx(old_ctx);
@@ -573,6 +586,13 @@ __bpf_kfunc int bpf_modify_return_test2(int a, int *b, short c, int d,
return a + *b + c + d + (long)e + f + g;
}
+__bpf_kfunc int bpf_modify_return_test_tp(int nonce)
+{
+ trace_bpf_trigger_tp(nonce);
+
+ return nonce;
+}
+
int noinline bpf_fentry_shadow_test(int a)
{
return a + 1;
@@ -617,21 +637,22 @@ CFI_NOSEAL(bpf_kfunc_call_memb_release_dtor);
__bpf_kfunc_end_defs();
-BTF_SET8_START(bpf_test_modify_return_ids)
+BTF_KFUNCS_START(bpf_test_modify_return_ids)
BTF_ID_FLAGS(func, bpf_modify_return_test)
BTF_ID_FLAGS(func, bpf_modify_return_test2)
+BTF_ID_FLAGS(func, bpf_modify_return_test_tp)
BTF_ID_FLAGS(func, bpf_fentry_test1, KF_SLEEPABLE)
-BTF_SET8_END(bpf_test_modify_return_ids)
+BTF_KFUNCS_END(bpf_test_modify_return_ids)
static const struct btf_kfunc_id_set bpf_test_modify_return_set = {
.owner = THIS_MODULE,
.set = &bpf_test_modify_return_ids,
};
-BTF_SET8_START(test_sk_check_kfunc_ids)
+BTF_KFUNCS_START(test_sk_check_kfunc_ids)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_kfunc_call_memb_release, KF_RELEASE)
-BTF_SET8_END(test_sk_check_kfunc_ids)
+BTF_KFUNCS_END(test_sk_check_kfunc_ids)
static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
u32 size, u32 headroom, u32 tailroom)
@@ -639,12 +660,9 @@ static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
void *data;
- if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
+ if (user_size < ETH_HLEN || user_size > PAGE_SIZE - headroom - tailroom)
return ERR_PTR(-EINVAL);
- if (user_size > size)
- return ERR_PTR(-EMSGSIZE);
-
size = SKB_DATA_ALIGN(size);
data = kzalloc(size + headroom + tailroom, GFP_USER);
if (!data)
@@ -717,10 +735,16 @@ static void
__bpf_prog_test_run_raw_tp(void *data)
{
struct bpf_raw_tp_test_run_info *info = data;
+ struct bpf_trace_run_ctx run_ctx = {};
+ struct bpf_run_ctx *old_run_ctx;
+
+ old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
rcu_read_lock();
info->retval = bpf_prog_run(info->prog, info->ctx);
rcu_read_unlock();
+
+ bpf_reset_run_ctx(old_run_ctx);
}
int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
@@ -967,7 +991,8 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
void *data;
int ret;
- if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
+ if ((kattr->test.flags & ~BPF_F_TEST_SKB_CHECKSUM_COMPLETE) ||
+ kattr->test.cpu || kattr->test.batch_size)
return -EINVAL;
data = bpf_test_init(kattr, kattr->test.data_size_in,
@@ -990,6 +1015,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
case BPF_PROG_TYPE_LWT_IN:
case BPF_PROG_TYPE_LWT_OUT:
case BPF_PROG_TYPE_LWT_XMIT:
+ case BPF_PROG_TYPE_CGROUP_SKB:
is_direct_pkt_access = true;
break;
default:
@@ -1015,6 +1041,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
__skb_put(skb, size);
+
if (ctx && ctx->ifindex > 1) {
dev = dev_get_by_index(net, ctx->ifindex);
if (!dev) {
@@ -1050,9 +1077,19 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
__skb_push(skb, hh_len);
if (is_direct_pkt_access)
bpf_compute_data_pointers(skb);
+
ret = convert___skb_to_skb(skb, ctx);
if (ret)
goto out;
+
+ if (kattr->test.flags & BPF_F_TEST_SKB_CHECKSUM_COMPLETE) {
+ const int off = skb_network_offset(skb);
+ int len = skb->len - off;
+
+ skb->csum = skb_checksum(skb, off, len, 0);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
+
ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
if (ret)
goto out;
@@ -1067,6 +1104,20 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
}
memset(__skb_push(skb, hh_len), 0, hh_len);
}
+
+ if (kattr->test.flags & BPF_F_TEST_SKB_CHECKSUM_COMPLETE) {
+ const int off = skb_network_offset(skb);
+ int len = skb->len - off;
+ __wsum csum;
+
+ csum = skb_checksum(skb, off, len, 0);
+
+ if (csum_fold(skb->csum) != csum_fold(csum)) {
+ ret = -EBADMSG;
+ goto out;
+ }
+ }
+
convert_skb_to___skb(skb, ctx);
size = skb->len;
diff --git a/net/bridge/br.c b/net/bridge/br.c
index ac19b797dbec..183fcb362f9e 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -51,6 +51,13 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
}
}
+ if (is_vlan_dev(dev)) {
+ struct net_device *real_dev = vlan_dev_real_dev(dev);
+
+ if (netif_is_bridge_master(real_dev))
+ br_vlan_vlan_upper_event(real_dev, dev, event);
+ }
+
/* not a port of a bridge */
p = br_port_get_rtnl(dev);
if (!p)
@@ -356,26 +363,21 @@ void br_opt_toggle(struct net_bridge *br, enum net_bridge_opts opt, bool on)
clear_bit(opt, &br->options);
}
-static void __net_exit br_net_exit_batch(struct list_head *net_list)
+static void __net_exit br_net_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
struct net_device *dev;
struct net *net;
- LIST_HEAD(list);
-
- rtnl_lock();
+ ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list)
for_each_netdev(net, dev)
if (netif_is_bridge_master(dev))
- br_dev_delete(dev, &list);
-
- unregister_netdevice_many(&list);
-
- rtnl_unlock();
+ br_dev_delete(dev, dev_to_kill);
}
static struct pernet_operations br_net_ops = {
- .exit_batch = br_net_exit_batch,
+ .exit_batch_rtnl = br_net_exit_batch_rtnl,
};
static const struct stp_proto br_stp_proto = {
diff --git a/net/bridge/br_arp_nd_proxy.c b/net/bridge/br_arp_nd_proxy.c
index c7869a286df4..115a23054a58 100644
--- a/net/bridge/br_arp_nd_proxy.c
+++ b/net/bridge/br_arp_nd_proxy.c
@@ -229,7 +229,7 @@ void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br,
#endif
#if IS_ENABLED(CONFIG_IPV6)
-struct nd_msg *br_is_nd_neigh_msg(struct sk_buff *skb, struct nd_msg *msg)
+struct nd_msg *br_is_nd_neigh_msg(const struct sk_buff *skb, struct nd_msg *msg)
{
struct nd_msg *m;
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 65cee0ad3c1b..0ab4613aa07a 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -27,6 +27,7 @@ EXPORT_SYMBOL_GPL(nf_br_ops);
/* net device transmit always called with BH disabled */
netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ enum skb_drop_reason reason = pskb_may_pull_reason(skb, ETH_HLEN);
struct net_bridge_mcast_port *pmctx_null = NULL;
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_mcast *brmctx = &br->multicast_ctx;
@@ -38,6 +39,11 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
const unsigned char *dest;
u16 vid = 0;
+ if (unlikely(reason != SKB_NOT_DROPPED_YET)) {
+ kfree_skb_reason(skb, reason);
+ return NETDEV_TX_OK;
+ }
+
memset(skb->cb, 0, sizeof(struct br_input_skb_cb));
br_tc_skb_miss_set(skb, false);
@@ -108,38 +114,23 @@ out:
return NETDEV_TX_OK;
}
-static struct lock_class_key bridge_netdev_addr_lock_key;
-
-static void br_set_lockdep_class(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key);
-}
-
static int br_dev_init(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
int err;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
err = br_fdb_hash_init(br);
- if (err) {
- free_percpu(dev->tstats);
+ if (err)
return err;
- }
err = br_mdb_hash_init(br);
if (err) {
- free_percpu(dev->tstats);
br_fdb_hash_fini(br);
return err;
}
err = br_vlan_init(br);
if (err) {
- free_percpu(dev->tstats);
br_mdb_hash_fini(br);
br_fdb_hash_fini(br);
return err;
@@ -147,14 +138,14 @@ static int br_dev_init(struct net_device *dev)
err = br_multicast_init_stats(br);
if (err) {
- free_percpu(dev->tstats);
br_vlan_flush(br);
br_mdb_hash_fini(br);
br_fdb_hash_fini(br);
+ return err;
}
- br_set_lockdep_class(dev);
- return err;
+ netdev_lockdep_set_classes(dev);
+ return 0;
}
static void br_dev_uninit(struct net_device *dev)
@@ -166,7 +157,6 @@ static void br_dev_uninit(struct net_device *dev)
br_vlan_flush(br);
br_mdb_hash_fini(br);
br_fdb_hash_fini(br);
- free_percpu(dev->tstats);
}
static int br_dev_open(struct net_device *dev)
@@ -213,7 +203,7 @@ static int br_change_mtu(struct net_device *dev, int new_mtu)
{
struct net_bridge *br = netdev_priv(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
/* this flag will be cleared if the MTU was automatically adjusted */
br_opt_toggle(br, BROPT_MTU_SET_BY_USER, true);
@@ -338,7 +328,7 @@ int br_netpoll_enable(struct net_bridge_port *p)
return __br_netpoll_enable(p);
}
-static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
+static int br_netpoll_setup(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_port *p;
@@ -405,7 +395,7 @@ static int br_fill_forward_path(struct net_device_path_ctx *ctx,
br_vlan_fill_forward_path_pvid(br, ctx, path);
f = br_fdb_find_rcu(br, ctx->daddr, path->bridge.vlan_id);
- if (!f || !f->dst)
+ if (!f)
return -1;
dst = READ_ONCE(f->dst);
@@ -481,7 +471,7 @@ static const struct net_device_ops br_netdev_ops = {
.ndo_fill_forward_path = br_fill_forward_path,
};
-static struct device_type br_type = {
+static const struct device_type br_type = {
.name = "bridge",
};
@@ -497,12 +487,15 @@ void br_dev_setup(struct net_device *dev)
dev->ethtool_ops = &br_ethtool_ops;
SET_NETDEV_DEVTYPE(dev, &br_type);
dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
+ dev->lltx = true;
+ dev->netns_local = true;
- dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL |
- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
+ dev->features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
dev->vlan_features = COMMON_FEATURES;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
br->dev = dev;
spin_lock_init(&br->lock);
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index c622de5eccd0..902694c0ce64 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -18,7 +18,7 @@
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/atomic.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/if_vlan.h>
#include <net/switchdev.h>
#include <trace/events/bridge.h>
@@ -35,10 +35,7 @@ static struct kmem_cache *br_fdb_cache __read_mostly;
int __init br_fdb_init(void)
{
- br_fdb_cache = kmem_cache_create("bridge_fdb_cache",
- sizeof(struct net_bridge_fdb_entry),
- 0,
- SLAB_HWCACHE_ALIGN, NULL);
+ br_fdb_cache = KMEM_CACHE(net_bridge_fdb_entry, SLAB_HWCACHE_ALIGN);
if (!br_fdb_cache)
return -ENOMEM;
@@ -76,13 +73,6 @@ static inline int has_expired(const struct net_bridge *br,
time_before_eq(fdb->updated + hold_time(br), jiffies);
}
-static void fdb_rcu_free(struct rcu_head *head)
-{
- struct net_bridge_fdb_entry *ent
- = container_of(head, struct net_bridge_fdb_entry, rcu);
- kmem_cache_free(br_fdb_cache, ent);
-}
-
static int fdb_to_nud(const struct net_bridge *br,
const struct net_bridge_fdb_entry *fdb)
{
@@ -332,7 +322,7 @@ static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f,
if (test_and_clear_bit(BR_FDB_DYNAMIC_LEARNED, &f->flags))
atomic_dec(&br->fdb_n_learned);
fdb_notify(br, f, RTM_DELNEIGH, swdev_notify);
- call_rcu(&f->rcu, fdb_rcu_free);
+ kfree_rcu(f, rcu);
}
/* Delete a local entry if no other port had the same address.
@@ -965,6 +955,7 @@ int br_fdb_dump(struct sk_buff *skb,
struct net_device *filter_dev,
int *idx)
{
+ struct ndo_fdb_dump_context *ctx = (void *)cb->ctx;
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_fdb_entry *f;
int err = 0;
@@ -980,7 +971,7 @@ int br_fdb_dump(struct sk_buff *skb,
rcu_read_lock();
hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
- if (*idx < cb->args[2])
+ if (*idx < ctx->fdb_idx)
goto skip;
if (filter_dev && (!f->dst || f->dst->dev != filter_dev)) {
if (filter_dev != dev)
@@ -1162,7 +1153,7 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
struct net_bridge_port *p, const unsigned char *addr,
u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[],
- struct netlink_ext_ack *extack)
+ bool *notified, struct netlink_ext_ack *extack)
{
int err = 0;
@@ -1193,6 +1184,8 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
spin_unlock_bh(&br->hash_lock);
}
+ if (!err)
+ *notified = true;
return err;
}
@@ -1205,7 +1198,7 @@ static const struct nla_policy br_nda_fdb_pol[NFEA_MAX + 1] = {
int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid, u16 nlh_flags,
- struct netlink_ext_ack *extack)
+ bool *notified, struct netlink_ext_ack *extack)
{
struct nlattr *nfea_tb[NFEA_MAX + 1], *attr;
struct net_bridge_vlan_group *vg;
@@ -1268,10 +1261,10 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
/* VID was specified, so use it. */
err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb,
- extack);
+ notified, extack);
} else {
err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb,
- extack);
+ notified, extack);
if (err || !vg || !vg->num_vlans)
goto out;
@@ -1283,7 +1276,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
if (!br_vlan_should_use(v))
continue;
err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid,
- nfea_tb, extack);
+ nfea_tb, notified, extack);
if (err)
goto out;
}
@@ -1295,7 +1288,7 @@ out:
static int fdb_delete_by_addr_and_port(struct net_bridge *br,
const struct net_bridge_port *p,
- const u8 *addr, u16 vlan)
+ const u8 *addr, u16 vlan, bool *notified)
{
struct net_bridge_fdb_entry *fdb;
@@ -1304,18 +1297,19 @@ static int fdb_delete_by_addr_and_port(struct net_bridge *br,
return -ENOENT;
fdb_delete(br, fdb, true);
+ *notified = true;
return 0;
}
static int __br_fdb_delete(struct net_bridge *br,
const struct net_bridge_port *p,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid, bool *notified)
{
int err;
spin_lock_bh(&br->hash_lock);
- err = fdb_delete_by_addr_and_port(br, p, addr, vid);
+ err = fdb_delete_by_addr_and_port(br, p, addr, vid, notified);
spin_unlock_bh(&br->hash_lock);
return err;
@@ -1324,12 +1318,11 @@ static int __br_fdb_delete(struct net_bridge *br,
/* Remove neighbor entry with RTM_DELNEIGH */
int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
- const unsigned char *addr, u16 vid,
+ const unsigned char *addr, u16 vid, bool *notified,
struct netlink_ext_ack *extack)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_port *p = NULL;
- struct net_bridge_vlan *v;
struct net_bridge *br;
int err;
@@ -1348,23 +1341,19 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
}
if (vid) {
- v = br_vlan_find(vg, vid);
- if (!v) {
- pr_info("bridge: RTM_DELNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
- return -EINVAL;
- }
-
- err = __br_fdb_delete(br, p, addr, vid);
+ err = __br_fdb_delete(br, p, addr, vid, notified);
} else {
+ struct net_bridge_vlan *v;
+
err = -ENOENT;
- err &= __br_fdb_delete(br, p, addr, 0);
+ err &= __br_fdb_delete(br, p, addr, 0, notified);
if (!vg || !vg->num_vlans)
return err;
list_for_each_entry(v, &vg->vlan_list, vlist) {
if (!br_vlan_should_use(v))
continue;
- err &= __br_fdb_delete(br, p, addr, v->vid);
+ err &= __br_fdb_delete(br, p, addr, v->vid, notified);
}
}
@@ -1472,12 +1461,10 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
modified = true;
}
- if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
+ if (test_and_set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
/* Refresh entry */
fdb->used = jiffies;
- } else if (!test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags)) {
- /* Take over SW learned entry */
- set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags);
+ } else {
modified = true;
}
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 7431f89e897b..29097e984b4f 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -25,8 +25,8 @@ static inline int should_deliver(const struct net_bridge_port *p,
vg = nbp_vlan_group_rcu(p);
return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
- p->state == BR_STATE_FORWARDING && br_allowed_egress(vg, skb) &&
- nbp_switchdev_allowed_egress(p, skb) &&
+ (br_mst_is_enabled(p->br) || p->state == BR_STATE_FORWARDING) &&
+ br_allowed_egress(vg, skb) && nbp_switchdev_allowed_egress(p, skb) &&
!br_skb_isolated(p, skb);
}
@@ -201,6 +201,7 @@ void br_flood(struct net_bridge *br, struct sk_buff *skb,
enum br_pkt_type pkt_type, bool local_rcv, bool local_orig,
u16 vid)
{
+ enum skb_drop_reason reason = SKB_DROP_REASON_NO_TX_TARGET;
struct net_bridge_port *prev = NULL;
struct net_bridge_port *p;
@@ -234,8 +235,11 @@ void br_flood(struct net_bridge *br, struct sk_buff *skb,
continue;
prev = maybe_deliver(prev, p, skb, local_orig);
- if (IS_ERR(prev))
+ if (IS_ERR(prev)) {
+ reason = PTR_ERR(prev) == -ENOMEM ? SKB_DROP_REASON_NOMEM :
+ SKB_DROP_REASON_NOT_SPECIFIED;
goto out;
+ }
}
if (!prev)
@@ -249,7 +253,7 @@ void br_flood(struct net_bridge *br, struct sk_buff *skb,
out:
if (!local_rcv)
- kfree_skb(skb);
+ kfree_skb_reason(skb, reason);
}
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
@@ -258,6 +262,7 @@ static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
{
struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
const unsigned char *src = eth_hdr(skb)->h_source;
+ struct sk_buff *nskb;
if (!should_deliver(p, skb))
return;
@@ -266,12 +271,16 @@ static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
if (skb->dev == p->dev && ether_addr_equal(src, addr))
return;
- skb = skb_copy(skb, GFP_ATOMIC);
- if (!skb) {
+ __skb_push(skb, ETH_HLEN);
+ nskb = pskb_copy(skb, GFP_ATOMIC);
+ __skb_pull(skb, ETH_HLEN);
+ if (!nskb) {
DEV_STATS_INC(dev, tx_dropped);
return;
}
+ skb = nskb;
+ __skb_pull(skb, ETH_HLEN);
if (!is_broadcast_ether_addr(addr))
memcpy(eth_hdr(skb)->h_dest, addr, ETH_ALEN);
@@ -284,6 +293,7 @@ void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
struct net_bridge_mcast *brmctx,
bool local_rcv, bool local_orig)
{
+ enum skb_drop_reason reason = SKB_DROP_REASON_NO_TX_TARGET;
struct net_bridge_port *prev = NULL;
struct net_bridge_port_group *p;
bool allow_mode_include = true;
@@ -324,8 +334,11 @@ void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
}
prev = maybe_deliver(prev, port, skb, local_orig);
- if (IS_ERR(prev))
+ if (IS_ERR(prev)) {
+ reason = PTR_ERR(prev) == -ENOMEM ? SKB_DROP_REASON_NOMEM :
+ SKB_DROP_REASON_NOT_SPECIFIED;
goto out;
+ }
delivered:
if ((unsigned long)lport >= (unsigned long)port)
p = rcu_dereference(p->next);
@@ -344,6 +357,6 @@ delivered:
out:
if (!local_rcv)
- kfree_skb(skb);
+ kfree_skb_reason(skb, reason);
}
#endif
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index f21097e73482..232133a0fd21 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -30,7 +30,7 @@ br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
return netif_receive_skb(skb);
}
-static int br_pass_frame_up(struct sk_buff *skb)
+static int br_pass_frame_up(struct sk_buff *skb, bool promisc)
{
struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
struct net_bridge *br = netdev_priv(brdev);
@@ -65,6 +65,8 @@ static int br_pass_frame_up(struct sk_buff *skb)
br_multicast_count(br, NULL, skb, br_multicast_igmp_type(skb),
BR_MCAST_DIR_TX);
+ BR_INPUT_SKB_CB(skb)->promisc = promisc;
+
return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
dev_net(indev), NULL, skb, indev, NULL,
br_netif_receive_skb);
@@ -73,6 +75,7 @@ static int br_pass_frame_up(struct sk_buff *skb)
/* note: already called with rcu_read_lock */
int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
+ enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
struct net_bridge_port *p = br_port_get_rcu(skb->dev);
enum br_pkt_type pkt_type = BR_PKT_UNICAST;
struct net_bridge_fdb_entry *dst = NULL;
@@ -82,6 +85,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
struct net_bridge_mcast *brmctx;
struct net_bridge_vlan *vlan;
struct net_bridge *br;
+ bool promisc;
u16 vid = 0;
u8 state;
@@ -93,8 +97,10 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
if (br_mst_is_enabled(br)) {
state = BR_STATE_FORWARDING;
} else {
- if (p->state == BR_STATE_DISABLED)
+ if (p->state == BR_STATE_DISABLED) {
+ reason = SKB_DROP_REASON_BRIDGE_INGRESS_STP_STATE;
goto drop;
+ }
state = p->state;
}
@@ -137,7 +143,9 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
if (p->flags & BR_LEARNING)
br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, 0);
- local_rcv = !!(br->dev->flags & IFF_PROMISC);
+ promisc = !!(br->dev->flags & IFF_PROMISC);
+ local_rcv = promisc;
+
if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
/* by definition the broadcast is also a multicast address */
if (is_broadcast_ether_addr(eth_hdr(skb)->h_dest)) {
@@ -150,8 +158,10 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
}
}
- if (state == BR_STATE_LEARNING)
+ if (state == BR_STATE_LEARNING) {
+ reason = SKB_DROP_REASON_BRIDGE_INGRESS_STP_STATE;
goto drop;
+ }
BR_INPUT_SKB_CB(skb)->brdev = br->dev;
BR_INPUT_SKB_CB(skb)->src_port_isolated = !!(p->flags & BR_ISOLATED);
@@ -200,7 +210,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
unsigned long now = jiffies;
if (test_bit(BR_FDB_LOCAL, &dst->flags))
- return br_pass_frame_up(skb);
+ return br_pass_frame_up(skb, false);
if (now != dst->used)
dst->used = now;
@@ -213,12 +223,12 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
}
if (local_rcv)
- return br_pass_frame_up(skb);
+ return br_pass_frame_up(skb, promisc);
out:
return 0;
drop:
- kfree_skb(skb);
+ kfree_skb_reason(skb, reason);
goto out;
}
EXPORT_SYMBOL_GPL(br_handle_frame_finish);
@@ -319,6 +329,7 @@ static int br_process_frame_type(struct net_bridge_port *p,
*/
static rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
{
+ enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
struct net_bridge_port *p;
struct sk_buff *skb = *pskb;
const unsigned char *dest = eth_hdr(skb)->h_dest;
@@ -326,8 +337,10 @@ static rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
return RX_HANDLER_PASS;
- if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
+ if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) {
+ reason = SKB_DROP_REASON_MAC_INVALID_SOURCE;
goto drop;
+ }
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
@@ -369,6 +382,7 @@ static rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
return RX_HANDLER_PASS;
case 0x01: /* IEEE MAC (Pause) */
+ reason = SKB_DROP_REASON_MAC_IEEE_MAC_CONTROL;
goto drop;
case 0x0E: /* 802.1AB LLDP */
@@ -386,6 +400,8 @@ static rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
goto forward;
}
+ BR_INPUT_SKB_CB(skb)->promisc = false;
+
/* The else clause should be hit when nf_hook():
* - returns < 0 (drop/error)
* - returns = 0 (stolen/nf_queue)
@@ -416,8 +432,9 @@ defer_stp_filtering:
return nf_hook_bridge_pre(skb, pskb);
default:
+ reason = SKB_DROP_REASON_BRIDGE_INGRESS_STP_STATE;
drop:
- kfree_skb(skb);
+ kfree_skb_reason(skb, reason);
}
return RX_HANDLER_CONSUMED;
}
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index bc37e47ad829..1a52a0bca086 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -1674,7 +1674,7 @@ int br_mdb_get(struct net_device *dev, struct nlattr *tb[], u32 portid, u32 seq,
spin_lock_bh(&br->multicast_lock);
mp = br_mdb_ip_get(br, &group);
- if (!mp) {
+ if (!mp || (!mp->ports && !mp->host_joined)) {
NL_SET_ERR_MSG_MOD(extack, "MDB entry not found");
err = -ENOENT;
goto unlock;
diff --git a/net/bridge/br_mst.c b/net/bridge/br_mst.c
index ee680adcee17..1820f09ff59c 100644
--- a/net/bridge/br_mst.c
+++ b/net/bridge/br_mst.c
@@ -73,12 +73,11 @@ int br_mst_get_state(const struct net_device *dev, u16 msti, u8 *state)
}
EXPORT_SYMBOL_GPL(br_mst_get_state);
-static void br_mst_vlan_set_state(struct net_bridge_port *p, struct net_bridge_vlan *v,
+static void br_mst_vlan_set_state(struct net_bridge_vlan_group *vg,
+ struct net_bridge_vlan *v,
u8 state)
{
- struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
-
- if (v->state == state)
+ if (br_vlan_get_state(v) == state)
return;
br_vlan_set_state(v, state);
@@ -100,11 +99,12 @@ int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state,
};
struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *v;
- int err;
+ int err = 0;
- vg = nbp_vlan_group(p);
+ rcu_read_lock();
+ vg = nbp_vlan_group_rcu(p);
if (!vg)
- return 0;
+ goto out;
/* MSTI 0 (CST) state changes are notified via the regular
* SWITCHDEV_ATTR_ID_PORT_STP_STATE.
@@ -112,17 +112,20 @@ int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state,
if (msti) {
err = switchdev_port_attr_set(p->dev, &attr, extack);
if (err && err != -EOPNOTSUPP)
- return err;
+ goto out;
}
- list_for_each_entry(v, &vg->vlan_list, vlist) {
+ err = 0;
+ list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
if (v->brvlan->msti != msti)
continue;
- br_mst_vlan_set_state(p, v, state);
+ br_mst_vlan_set_state(vg, v, state);
}
- return 0;
+out:
+ rcu_read_unlock();
+ return err;
}
static void br_mst_vlan_sync_state(struct net_bridge_vlan *pv, u16 msti)
@@ -136,13 +139,13 @@ static void br_mst_vlan_sync_state(struct net_bridge_vlan *pv, u16 msti)
* it.
*/
if (v != pv && v->brvlan->msti == msti) {
- br_mst_vlan_set_state(pv->port, pv, v->state);
+ br_mst_vlan_set_state(vg, pv, v->state);
return;
}
}
/* Otherwise, start out in a new MSTI with all ports disabled. */
- return br_mst_vlan_set_state(pv->port, pv, BR_STATE_DISABLED);
+ return br_mst_vlan_set_state(vg, pv, BR_STATE_DISABLED);
}
int br_mst_vlan_set_msti(struct net_bridge_vlan *mv, u16 msti)
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 2d7b73242958..b2ae0d2434d2 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -2045,16 +2045,14 @@ void br_multicast_del_port(struct net_bridge_port *port)
{
struct net_bridge *br = port->br;
struct net_bridge_port_group *pg;
- HLIST_HEAD(deleted_head);
struct hlist_node *n;
/* Take care of the remaining groups, only perm ones should be left */
spin_lock_bh(&br->multicast_lock);
hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
br_multicast_find_del_pg(br, pg);
- hlist_move_list(&br->mcast_gc_list, &deleted_head);
spin_unlock_bh(&br->multicast_lock);
- br_multicast_gc(&deleted_head);
+ flush_work(&br->mcast_gc_work);
br_multicast_port_ctx_deinit(&port->multicast_ctx);
free_percpu(port->mcast_stats);
}
@@ -5053,7 +5051,7 @@ void br_multicast_uninit_stats(struct net_bridge *br)
free_percpu(br->mcast_stats);
}
-/* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */
+/* noinline for https://llvm.org/pr45802#c9 */
static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src)
{
dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 35e10c5a766d..94cbe967d1c1 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -33,9 +33,11 @@
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/addrconf.h>
+#include <net/dst_metadata.h>
#include <net/route.h>
#include <net/netfilter/br_netfilter.h>
#include <net/netns/generic.h>
+#include <net/inet_dscp.h>
#include <linux/uaccess.h>
#include "br_private.h"
@@ -137,6 +139,7 @@ static inline bool is_pppoe_ipv6(const struct sk_buff *skb,
#define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN)
struct brnf_frag_data {
+ local_lock_t bh_lock;
char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH];
u8 encap_size;
u8 size;
@@ -144,7 +147,9 @@ struct brnf_frag_data {
__be16 vlan_proto;
};
-static DEFINE_PER_CPU(struct brnf_frag_data, brnf_frag_data_storage);
+static DEFINE_PER_CPU(struct brnf_frag_data, brnf_frag_data_storage) = {
+ .bh_lock = INIT_LOCAL_LOCK(bh_lock),
+};
static void nf_bridge_info_free(struct sk_buff *skb)
{
@@ -365,11 +370,11 @@ br_nf_ipv4_daddr_was_changed(const struct sk_buff *skb,
*/
static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
- struct net_device *dev = skb->dev, *br_indev;
- struct iphdr *iph = ip_hdr(skb);
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+ struct net_device *dev = skb->dev, *br_indev;
+ const struct iphdr *iph = ip_hdr(skb);
+ enum skb_drop_reason reason;
struct rtable *rt;
- int err;
br_indev = nf_bridge_get_physindev(skb, net);
if (!br_indev) {
@@ -385,37 +390,13 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_
}
nf_bridge->in_prerouting = 0;
if (br_nf_ipv4_daddr_was_changed(skb, nf_bridge)) {
- if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
- struct in_device *in_dev = __in_dev_get_rcu(dev);
-
- /* If err equals -EHOSTUNREACH the error is due to a
- * martian destination or due to the fact that
- * forwarding is disabled. For most martian packets,
- * ip_route_output_key() will fail. It won't fail for 2 types of
- * martian destinations: loopback destinations and destination
- * 0.0.0.0. In both cases the packet will be dropped because the
- * destination is the loopback device and not the bridge. */
- if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev))
- goto free_skb;
-
- rt = ip_route_output(net, iph->daddr, 0,
- RT_TOS(iph->tos), 0);
- if (!IS_ERR(rt)) {
- /* - Bridged-and-DNAT'ed traffic doesn't
- * require ip_forwarding. */
- if (rt->dst.dev == dev) {
- skb_dst_drop(skb);
- skb_dst_set(skb, &rt->dst);
- goto bridged_dnat;
- }
- ip_rt_put(rt);
- }
-free_skb:
- kfree_skb(skb);
+ reason = ip_route_input(skb, iph->daddr, iph->saddr,
+ ip4h_dscp(iph), dev);
+ if (reason) {
+ kfree_skb_reason(skb, reason);
return 0;
} else {
if (skb_dst(skb)->dev == dev) {
-bridged_dnat:
skb->dev = br_indev;
nf_bridge_update_protocol(skb);
nf_bridge_push_encap_header(skb);
@@ -600,11 +581,17 @@ static unsigned int br_nf_local_in(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
+ bool promisc = BR_INPUT_SKB_CB(skb)->promisc;
struct nf_conntrack *nfct = skb_nfct(skb);
const struct nf_ct_hook *ct_hook;
struct nf_conn *ct;
int ret;
+ if (promisc) {
+ nf_reset_ct(skb);
+ return NF_ACCEPT;
+ }
+
if (!nfct || skb->pkt_type == PACKET_HOST)
return NF_ACCEPT;
@@ -612,8 +599,12 @@ static unsigned int br_nf_local_in(void *priv,
if (likely(nf_ct_is_confirmed(ct)))
return NF_ACCEPT;
+ if (WARN_ON_ONCE(refcount_read(&nfct->use) != 1)) {
+ nf_reset_ct(skb);
+ return NF_ACCEPT;
+ }
+
WARN_ON_ONCE(skb_shared(skb));
- WARN_ON_ONCE(refcount_read(&nfct->use) != 1);
/* We can't call nf_confirm here, it would create a dependency
* on nf_conntrack module.
@@ -843,6 +834,7 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff
{
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
unsigned int mtu, mtu_reserved;
+ int ret;
mtu_reserved = nf_bridge_mtu_reduction(skb);
mtu = skb->dev->mtu;
@@ -863,6 +855,10 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff
return br_dev_queue_push_xmit(net, sk, skb);
}
+ /* Fragmentation on metadata/template dst is not supported */
+ if (unlikely(!skb_valid_dst(skb)))
+ goto drop;
+
/* This is wrong! We should preserve the original fragment
* boundaries by preserving frag_list rather than refragmenting.
*/
@@ -875,6 +871,7 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff
IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
+ local_lock_nested_bh(&brnf_frag_data_storage.bh_lock);
data = this_cpu_ptr(&brnf_frag_data_storage);
if (skb_vlan_tag_present(skb)) {
@@ -890,7 +887,9 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff
skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
data->size);
- return br_nf_ip_fragment(net, sk, skb, br_nf_push_frag_xmit);
+ ret = br_nf_ip_fragment(net, sk, skb, br_nf_push_frag_xmit);
+ local_unlock_nested_bh(&brnf_frag_data_storage.bh_lock);
+ return ret;
}
if (IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) &&
skb->protocol == htons(ETH_P_IPV6)) {
@@ -902,6 +901,7 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff
IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
+ local_lock_nested_bh(&brnf_frag_data_storage.bh_lock);
data = this_cpu_ptr(&brnf_frag_data_storage);
data->encap_size = nf_bridge_encap_header_len(skb);
data->size = ETH_HLEN + data->encap_size;
@@ -909,8 +909,12 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff
skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
data->size);
- if (v6ops)
- return v6ops->fragment(net, sk, skb, br_nf_push_frag_xmit);
+ if (v6ops) {
+ ret = v6ops->fragment(net, sk, skb, br_nf_push_frag_xmit);
+ local_unlock_nested_bh(&brnf_frag_data_storage.bh_lock);
+ return ret;
+ }
+ local_unlock_nested_bh(&brnf_frag_data_storage.bh_lock);
kfree_skb(skb);
return -EMSGSIZE;
@@ -1170,7 +1174,7 @@ int br_nf_hook_thresh(unsigned int hook, struct net *net,
#ifdef CONFIG_SYSCTL
static
-int brnf_sysctl_call_tables(struct ctl_table *ctl, int write,
+int brnf_sysctl_call_tables(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
@@ -1219,7 +1223,6 @@ static struct ctl_table brnf_table[] = {
.mode = 0644,
.proc_handler = brnf_sysctl_call_tables,
},
- { }
};
static inline void br_netfilter_sysctl_default(struct brnf_net *brnf)
@@ -1268,7 +1271,7 @@ static int br_netfilter_sysctl_init_net(struct net *net)
static void br_netfilter_sysctl_exit_net(struct net *net,
struct brnf_net *brnet)
{
- struct ctl_table *table = brnet->ctl_hdr->ctl_table_arg;
+ const struct ctl_table *table = brnet->ctl_hdr->ctl_table_arg;
unregister_net_sysctl_table(brnet->ctl_hdr);
if (!net_eq(net, &init_net))
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 5ad4abfcb7ba..3e0f47203f2a 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -455,7 +455,8 @@ static int br_fill_ifinfo(struct sk_buff *skb,
u32 filter_mask, const struct net_device *dev,
bool getlink)
{
- u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
+ u8 operstate = netif_running(dev) ? READ_ONCE(dev->operstate) :
+ IF_OPER_DOWN;
struct nlattr *af = NULL;
struct net_bridge *br;
struct ifinfomsg *hdr;
@@ -666,7 +667,7 @@ void br_ifinfo_notify(int event, const struct net_bridge *br,
{
u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED;
- return br_info_notify(event, br, port, filter);
+ br_info_notify(event, br, port, filter);
}
/*
@@ -1919,8 +1920,13 @@ int __init br_netlink_init(void)
{
int err;
- br_vlan_rtnl_init();
- rtnl_af_register(&br_af_ops);
+ err = br_vlan_rtnl_init();
+ if (err)
+ goto out;
+
+ err = rtnl_af_register(&br_af_ops);
+ if (err)
+ goto out_vlan;
err = rtnl_link_register(&br_link_ops);
if (err)
@@ -1930,6 +1936,9 @@ int __init br_netlink_init(void)
out_af:
rtnl_af_unregister(&br_af_ops);
+out_vlan:
+ br_vlan_rtnl_uninit();
+out:
return err;
}
diff --git a/net/bridge/br_netlink_tunnel.c b/net/bridge/br_netlink_tunnel.c
index 17abf092f7ca..71a12da30004 100644
--- a/net/bridge/br_netlink_tunnel.c
+++ b/net/bridge/br_netlink_tunnel.c
@@ -315,8 +315,8 @@ int br_process_vlan_tunnel_info(const struct net_bridge *br,
if (curr_change)
*changed = curr_change;
- __vlan_tunnel_handle_range(p, &v_start, &v_end, v,
- curr_change);
+ __vlan_tunnel_handle_range(p, &v_start, &v_end, v,
+ curr_change);
}
if (v_start && v_end)
br_vlan_notify(br, p, v_start->vid, v_end->vid,
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 86ea5e6689b5..1054b8a88edc 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -589,6 +589,7 @@ struct br_input_skb_cb {
#endif
u8 proxyarp_replied:1;
u8 src_port_isolated:1;
+ u8 promisc:1;
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
u8 vlan_filtered:1;
#endif
@@ -852,12 +853,12 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev, const unsigned char *addr, u16 vid,
- struct netlink_ext_ack *extack);
+ bool *notified, struct netlink_ext_ack *extack);
int br_fdb_delete_bulk(struct nlmsghdr *nlh, struct net_device *dev,
struct netlink_ext_ack *extack);
int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[], struct net_device *dev,
const unsigned char *addr, u16 vid, u16 nlh_flags,
- struct netlink_ext_ack *extack);
+ bool *notified, struct netlink_ext_ack *extack);
int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev, struct net_device *fdev, int *idx);
int br_fdb_get(struct sk_buff *skb, struct nlattr *tb[], struct net_device *dev,
@@ -1570,7 +1571,10 @@ void br_vlan_get_stats(const struct net_bridge_vlan *v,
void br_vlan_port_event(struct net_bridge_port *p, unsigned long event);
int br_vlan_bridge_event(struct net_device *dev, unsigned long event,
void *ptr);
-void br_vlan_rtnl_init(void);
+void br_vlan_vlan_upper_event(struct net_device *br_dev,
+ struct net_device *vlan_dev,
+ unsigned long event);
+int br_vlan_rtnl_init(void);
void br_vlan_rtnl_uninit(void);
void br_vlan_notify(const struct net_bridge *br,
const struct net_bridge_port *p,
@@ -1801,10 +1805,17 @@ static inline int br_vlan_bridge_event(struct net_device *dev,
return 0;
}
-static inline void br_vlan_rtnl_init(void)
+static inline void br_vlan_vlan_upper_event(struct net_device *br_dev,
+ struct net_device *vlan_dev,
+ unsigned long event)
{
}
+static inline int br_vlan_rtnl_init(void)
+{
+ return 0;
+}
+
static inline void br_vlan_rtnl_uninit(void)
{
}
@@ -2288,6 +2299,6 @@ void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br,
u16 vid, struct net_bridge_port *p);
void br_do_suppress_nd(struct sk_buff *skb, struct net_bridge *br,
u16 vid, struct net_bridge_port *p, struct nd_msg *msg);
-struct nd_msg *br_is_nd_neigh_msg(struct sk_buff *skb, struct nd_msg *m);
+struct nd_msg *br_is_nd_neigh_msg(const struct sk_buff *skb, struct nd_msg *m);
bool br_is_neigh_suppress_enabled(const struct net_bridge_port *p, u16 vid);
#endif
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 0e4572f31330..7895489ac6fe 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -17,7 +17,7 @@
#include <net/llc.h>
#include <net/llc_pdu.h>
#include <net/stp.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "br_private.h"
#include "br_private_stp.h"
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index ea733542244c..c1176a5e02c4 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -1002,7 +1002,7 @@ static const struct attribute_group bridge_group = {
* Returns the number of bytes read.
*/
static ssize_t brforward_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -1023,10 +1023,10 @@ static ssize_t brforward_read(struct file *filp, struct kobject *kobj,
return n;
}
-static struct bin_attribute bridge_forward = {
+static const struct bin_attribute bridge_forward = {
.attr = { .name = SYSFS_BRIDGE_FDB,
.mode = 0444, },
- .read = brforward_read,
+ .read_new = brforward_read,
};
/*
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 15f44d026e75..d9a69ec9affe 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -841,7 +841,7 @@ void br_vlan_flush(struct net_bridge *br)
vg = br_vlan_group(br);
__vlan_flush(br, NULL, vg);
RCU_INIT_POINTER(br->vlgrp, NULL);
- synchronize_rcu();
+ synchronize_net();
__vlan_group_free(vg);
}
@@ -1372,7 +1372,7 @@ void nbp_vlan_flush(struct net_bridge_port *port)
vg = nbp_vlan_group(port);
__vlan_flush(port->br, port, vg);
RCU_INIT_POINTER(port->vlgrp, NULL);
- synchronize_rcu();
+ synchronize_net();
__vlan_group_free(vg);
}
@@ -1664,6 +1664,18 @@ static void br_vlan_set_all_vlan_dev_state(struct net_bridge_port *p)
}
}
+static void br_vlan_toggle_bridge_binding(struct net_device *br_dev,
+ bool enable)
+{
+ struct net_bridge *br = netdev_priv(br_dev);
+
+ if (enable)
+ br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING, true);
+ else
+ br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING,
+ br_vlan_has_upper_bind_vlan_dev(br_dev));
+}
+
static void br_vlan_upper_change(struct net_device *dev,
struct net_device *upper_dev,
bool linking)
@@ -1673,13 +1685,9 @@ static void br_vlan_upper_change(struct net_device *dev,
if (!br_vlan_is_bind_vlan_dev(upper_dev))
return;
- if (linking) {
+ br_vlan_toggle_bridge_binding(dev, linking);
+ if (linking)
br_vlan_set_vlan_dev_state(br, upper_dev);
- br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING, true);
- } else {
- br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING,
- br_vlan_has_upper_bind_vlan_dev(dev));
- }
}
struct br_vlan_link_state_walk_data {
@@ -1764,6 +1772,30 @@ int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr)
return ret;
}
+void br_vlan_vlan_upper_event(struct net_device *br_dev,
+ struct net_device *vlan_dev,
+ unsigned long event)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(vlan_dev);
+ struct net_bridge *br = netdev_priv(br_dev);
+ bool bridge_binding;
+
+ switch (event) {
+ case NETDEV_CHANGE:
+ case NETDEV_UP:
+ break;
+ default:
+ return;
+ }
+
+ bridge_binding = vlan->flags & VLAN_FLAG_BRIDGE_BINDING;
+ br_vlan_toggle_bridge_binding(br_dev, bridge_binding);
+ if (bridge_binding)
+ br_vlan_set_vlan_dev_state(br, vlan_dev);
+ else if (!bridge_binding && netif_carrier_ok(br_dev))
+ netif_carrier_on(vlan_dev);
+}
+
/* Must be protected by RTNL. */
void br_vlan_port_event(struct net_bridge_port *p, unsigned long event)
{
@@ -2296,19 +2328,18 @@ static int br_vlan_rtm_process(struct sk_buff *skb, struct nlmsghdr *nlh,
return err;
}
-void br_vlan_rtnl_init(void)
+static const struct rtnl_msg_handler br_vlan_rtnl_msg_handlers[] = {
+ {THIS_MODULE, PF_BRIDGE, RTM_NEWVLAN, br_vlan_rtm_process, NULL, 0},
+ {THIS_MODULE, PF_BRIDGE, RTM_DELVLAN, br_vlan_rtm_process, NULL, 0},
+ {THIS_MODULE, PF_BRIDGE, RTM_GETVLAN, NULL, br_vlan_rtm_dump, 0},
+};
+
+int br_vlan_rtnl_init(void)
{
- rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETVLAN, NULL,
- br_vlan_rtm_dump, 0);
- rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWVLAN,
- br_vlan_rtm_process, NULL, 0);
- rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELVLAN,
- br_vlan_rtm_process, NULL, 0);
+ return rtnl_register_many(br_vlan_rtnl_msg_handlers);
}
void br_vlan_rtnl_uninit(void)
{
- rtnl_unregister(PF_BRIDGE, RTM_GETVLAN);
- rtnl_unregister(PF_BRIDGE, RTM_NEWVLAN);
- rtnl_unregister(PF_BRIDGE, RTM_DELVLAN);
+ rtnl_unregister_many(br_vlan_rtnl_msg_handlers);
}
diff --git a/net/bridge/br_vlan_tunnel.c b/net/bridge/br_vlan_tunnel.c
index 81833ca7a2c7..a966a6ec8263 100644
--- a/net/bridge/br_vlan_tunnel.c
+++ b/net/bridge/br_vlan_tunnel.c
@@ -65,13 +65,14 @@ static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
{
struct metadata_dst *metadata = rtnl_dereference(vlan->tinfo.tunnel_dst);
__be64 key = key32_to_tunnel_id(cpu_to_be32(tun_id));
+ IP_TUNNEL_DECLARE_FLAGS(flags) = { };
int err;
if (metadata)
return -EEXIST;
- metadata = __ip_tun_set_dst(0, 0, 0, 0, 0, TUNNEL_KEY,
- key, 0);
+ __set_bit(IP_TUNNEL_KEY_BIT, flags);
+ metadata = __ip_tun_set_dst(0, 0, 0, 0, 0, flags, key, 0);
if (!metadata)
return -EINVAL;
@@ -185,6 +186,7 @@ void br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
struct net_bridge_vlan *vlan)
{
+ IP_TUNNEL_DECLARE_FLAGS(flags) = { };
struct metadata_dst *tunnel_dst;
__be64 tunnel_id;
int err;
@@ -202,7 +204,8 @@ int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
return err;
if (BR_INPUT_SKB_CB(skb)->backup_nhid) {
- tunnel_dst = __ip_tun_set_dst(0, 0, 0, 0, 0, TUNNEL_KEY,
+ __set_bit(IP_TUNNEL_KEY_BIT, flags);
+ tunnel_dst = __ip_tun_set_dst(0, 0, 0, 0, 0, flags,
tunnel_id, 0);
if (!tunnel_dst)
return -ENOMEM;
diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig
index 7f304a19ac1b..f16bbbbb9481 100644
--- a/net/bridge/netfilter/Kconfig
+++ b/net/bridge/netfilter/Kconfig
@@ -39,6 +39,16 @@ config NF_CONNTRACK_BRIDGE
To compile it as a module, choose M here. If unsure, say N.
+# old sockopt interface and eval loop
+config BRIDGE_NF_EBTABLES_LEGACY
+ tristate "Legacy EBTABLES support"
+ depends on BRIDGE && NETFILTER_XTABLES
+ default n
+ help
+ Legacy ebtables packet/frame classifier.
+ This is not needed if you are using ebtables over nftables
+ (iptables-nft).
+
menuconfig BRIDGE_NF_EBTABLES
tristate "Ethernet Bridge tables (ebtables) support"
depends on BRIDGE && NETFILTER && NETFILTER_XTABLES
@@ -55,6 +65,7 @@ if BRIDGE_NF_EBTABLES
#
config BRIDGE_EBT_BROUTE
tristate "ebt: broute table support"
+ select BRIDGE_NF_EBTABLES_LEGACY
help
The ebtables broute table is used to define rules that decide between
bridging and routing frames, giving Linux the functionality of a
@@ -65,6 +76,7 @@ config BRIDGE_EBT_BROUTE
config BRIDGE_EBT_T_FILTER
tristate "ebt: filter table support"
+ select BRIDGE_NF_EBTABLES_LEGACY
help
The ebtables filter table is used to define frame filtering rules at
local input, forwarding and local output. See the man page for
@@ -74,6 +86,7 @@ config BRIDGE_EBT_T_FILTER
config BRIDGE_EBT_T_NAT
tristate "ebt: nat table support"
+ select BRIDGE_NF_EBTABLES_LEGACY
help
The ebtables nat table is used to define rules that alter the MAC
source address (MAC SNAT) or the MAC destination address (MAC DNAT).
diff --git a/net/bridge/netfilter/Makefile b/net/bridge/netfilter/Makefile
index 1c9ce49ab651..b9a1303da977 100644
--- a/net/bridge/netfilter/Makefile
+++ b/net/bridge/netfilter/Makefile
@@ -9,7 +9,7 @@ obj-$(CONFIG_NFT_BRIDGE_REJECT) += nft_reject_bridge.o
# connection tracking
obj-$(CONFIG_NF_CONNTRACK_BRIDGE) += nf_conntrack_bridge.o
-obj-$(CONFIG_BRIDGE_NF_EBTABLES) += ebtables.o
+obj-$(CONFIG_BRIDGE_NF_EBTABLES_LEGACY) += ebtables.o
# tables
obj-$(CONFIG_BRIDGE_EBT_BROUTE) += ebtable_broute.o
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 99d82676f780..3e67d4aff419 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1111,6 +1111,8 @@ static int do_replace(struct net *net, sockptr_t arg, unsigned int len)
struct ebt_table_info *newinfo;
struct ebt_replace tmp;
+ if (len < sizeof(tmp))
+ return -EINVAL;
if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
return -EFAULT;
@@ -1254,7 +1256,7 @@ int ebt_register_table(struct net *net, const struct ebt_table *input_table,
goto free_unlock;
}
- ops = kmemdup(template_ops, sizeof(*ops) * num_ops, GFP_KERNEL);
+ ops = kmemdup_array(template_ops, num_ops, sizeof(*ops), GFP_KERNEL);
if (!ops) {
ret = -ENOMEM;
if (newinfo->nentries)
@@ -1423,6 +1425,8 @@ static int update_counters(struct net *net, sockptr_t arg, unsigned int len)
{
struct ebt_replace hlp;
+ if (len < sizeof(hlp))
+ return -EINVAL;
if (copy_from_sockptr(&hlp, arg, sizeof(hlp)))
return -EFAULT;
@@ -2352,6 +2356,8 @@ static int compat_update_counters(struct net *net, sockptr_t arg,
{
struct compat_ebt_replace hlp;
+ if (len < sizeof(hlp))
+ return -EINVAL;
if (copy_from_sockptr(&hlp, arg, sizeof(hlp)))
return -EFAULT;
diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c
index 6f877e31709b..816bb0fde718 100644
--- a/net/bridge/netfilter/nf_conntrack_bridge.c
+++ b/net/bridge/netfilter/nf_conntrack_bridge.c
@@ -32,7 +32,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk,
struct sk_buff *))
{
int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
- bool mono_delivery_time = skb->mono_delivery_time;
+ u8 tstamp_type = skb->tstamp_type;
unsigned int hlen, ll_rs, mtu;
ktime_t tstamp = skb->tstamp;
struct ip_frag_state state;
@@ -82,7 +82,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk,
if (iter.frag)
ip_fraglist_prepare(skb, &iter);
- skb_set_delivery_time(skb, tstamp, mono_delivery_time);
+ skb_set_delivery_time(skb, tstamp, tstamp_type);
err = output(net, sk, data, skb);
if (err || !iter.frag)
break;
@@ -113,7 +113,7 @@ slow_path:
goto blackhole;
}
- skb_set_delivery_time(skb2, tstamp, mono_delivery_time);
+ skb_set_delivery_time(skb2, tstamp, tstamp_type);
err = output(net, sk, data, skb2);
if (err)
goto blackhole;
@@ -294,18 +294,24 @@ static unsigned int nf_ct_bridge_pre(void *priv, struct sk_buff *skb,
static unsigned int nf_ct_bridge_in(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
- enum ip_conntrack_info ctinfo;
+ bool promisc = BR_INPUT_SKB_CB(skb)->promisc;
+ struct nf_conntrack *nfct = skb_nfct(skb);
struct nf_conn *ct;
- if (skb->pkt_type == PACKET_HOST)
+ if (promisc) {
+ nf_reset_ct(skb);
+ return NF_ACCEPT;
+ }
+
+ if (!nfct || skb->pkt_type == PACKET_HOST)
return NF_ACCEPT;
/* nf_conntrack_confirm() cannot handle concurrent clones,
* this happens for broad/multicast frames with e.g. macvlan on top
* of the bridge device.
*/
- ct = nf_ct_get(skb, &ctinfo);
- if (!ct || nf_ct_is_confirmed(ct) || nf_ct_is_template(ct))
+ ct = container_of(nfct, struct nf_conn, ct_general);
+ if (nf_ct_is_confirmed(ct) || nf_ct_is_template(ct))
return NF_ACCEPT;
/* let inet prerouting call conntrack again */
diff --git a/net/bridge/netfilter/nft_meta_bridge.c b/net/bridge/netfilter/nft_meta_bridge.c
index bd4d1b4d745f..5adced1e7d0c 100644
--- a/net/bridge/netfilter/nft_meta_bridge.c
+++ b/net/bridge/netfilter/nft_meta_bridge.c
@@ -63,7 +63,7 @@ static void nft_meta_bridge_get_eval(const struct nft_expr *expr,
return nft_meta_get_eval(expr, regs, pkt);
}
- strncpy((char *)dest, br_dev ? br_dev->name : "", IFNAMSIZ);
+ strscpy_pad((char *)dest, br_dev ? br_dev->name : "", IFNAMSIZ);
return;
err:
regs->verdict.code = NFT_BREAK;
@@ -142,7 +142,7 @@ static int nft_meta_bridge_set_init(const struct nft_ctx *ctx,
}
priv->len = len;
- err = nft_parse_register_load(tb[NFTA_META_SREG], &priv->sreg, len);
+ err = nft_parse_register_load(ctx, tb[NFTA_META_SREG], &priv->sreg, len);
if (err < 0)
return err;
@@ -168,8 +168,7 @@ static bool nft_meta_bridge_set_reduce(struct nft_regs_track *track,
}
static int nft_meta_bridge_set_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data)
+ const struct nft_expr *expr)
{
struct nft_meta *priv = nft_expr_priv(expr);
unsigned int hooks;
@@ -179,7 +178,7 @@ static int nft_meta_bridge_set_validate(const struct nft_ctx *ctx,
hooks = 1 << NF_BR_PRE_ROUTING;
break;
default:
- return nft_meta_set_validate(ctx, expr, data);
+ return nft_meta_set_validate(ctx, expr);
}
return nft_chain_validate_hooks(ctx->chain, hooks);
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index 71b54fed7263..1cb5c16e97b7 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -170,8 +170,7 @@ out:
}
static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data)
+ const struct nft_expr *expr)
{
return nft_chain_validate_hooks(ctx->chain, (1 << NF_BR_PRE_ROUTING) |
(1 << NF_BR_LOCAL_IN));
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 8480684f2762..20139fa1be1f 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -201,14 +201,14 @@ int cfctrl_linkup_request(struct cflayer *layer,
struct cflayer *user_layer)
{
struct cfctrl *cfctrl = container_obj(layer);
+ struct cflayer *dn = cfctrl->serv.layer.dn;
+ char utility_name[UTILITY_NAME_LENGTH];
+ struct cfctrl_request_info *req;
+ struct cfpkt *pkt;
u32 tmp32;
u16 tmp16;
u8 tmp8;
- struct cfctrl_request_info *req;
int ret;
- char utility_name[16];
- struct cfpkt *pkt;
- struct cflayer *dn = cfctrl->serv.layer.dn;
if (!dn) {
pr_debug("not able to send linkup request\n");
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index 7796414d47e5..96236d21b18e 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -21,13 +21,6 @@ do { \
pr_warn(errmsg); \
} while (0)
-struct cfpktq {
- struct sk_buff_head head;
- atomic_t count;
- /* Lock protects count updates */
- spinlock_t lock;
-};
-
/*
* net/caif/ is generic and does not
* understand SKB, so we do this typecast
@@ -305,10 +298,8 @@ struct cfpkt *cfpkt_append(struct cfpkt *dstpkt,
if (unlikely(is_erronous(dstpkt) || is_erronous(addpkt))) {
return dstpkt;
}
- if (expectlen > addlen)
- neededtailspace = expectlen;
- else
- neededtailspace = addlen;
+
+ neededtailspace = max(expectlen, addlen);
if (dst->tail + neededtailspace > dst->end) {
/* Create a dumplicate of 'dst' with more tail space */
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index 7b0af33bdb97..3c335057f255 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -9,7 +9,7 @@
#include <linux/stddef.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/caif/caif_layer.h>
#include <net/caif/cfsrvl.h>
#include <net/caif/cfpkt.h>
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index 9cef9496a707..171fa32ada85 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -183,12 +183,6 @@ bool cfsrvl_ready(struct cfsrvl *service, int *err)
return true;
}
-u8 cfsrvl_getphyid(struct cflayer *layer)
-{
- struct cfsrvl *servl = container_obj(layer);
- return servl->dev_info.id;
-}
-
bool cfsrvl_phyid_match(struct cflayer *layer, int phyid)
{
struct cfsrvl *servl = container_obj(layer);
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 47901bd4def1..94ad09e36df2 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -47,7 +47,6 @@ struct chnl_net {
struct caif_connect_request conn_req;
struct list_head list_field;
struct net_device *netdev;
- char name[256];
wait_queue_head_t netmgmt_wq;
/* Flow status to remember and control the transmission. */
bool flowenabled;
@@ -347,7 +346,6 @@ static int chnl_net_init(struct net_device *dev)
struct chnl_net *priv;
ASSERT_RTNL();
priv = netdev_priv(dev);
- strncpy(priv->name, dev->name, sizeof(priv->name));
INIT_LIST_HEAD(&priv->list_field);
return 0;
}
diff --git a/net/can/Kconfig b/net/can/Kconfig
index cb56be8e3862..af64a6f76458 100644
--- a/net/can/Kconfig
+++ b/net/can/Kconfig
@@ -56,18 +56,17 @@ config CAN_GW
source "net/can/j1939/Kconfig"
config CAN_ISOTP
- tristate "ISO 15765-2:2016 CAN transport protocol"
+ tristate "ISO 15765-2 CAN transport protocol"
help
CAN Transport Protocols offer support for segmented Point-to-Point
communication between CAN nodes via two defined CAN Identifiers.
+ This protocol driver implements segmented data transfers for CAN CC
+ (aka Classical CAN, CAN 2.0B) and CAN FD frame types which were
+ introduced with ISO 15765-2:2016.
As CAN frames can only transport a small amount of data bytes
- (max. 8 bytes for 'classic' CAN and max. 64 bytes for CAN FD) this
+ (max. 8 bytes for CAN CC and max. 64 bytes for CAN FD) this
segmentation is needed to transport longer Protocol Data Units (PDU)
as needed e.g. for vehicle diagnosis (UDS, ISO 14229) or IP-over-CAN
traffic.
- This protocol driver implements data transfers according to
- ISO 15765-2:2016 for 'classic' CAN and CAN FD frame types.
- If you want to perform automotive vehicle diagnostic services (UDS),
- say 'y'.
endif
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 7343fd487dbe..65230e81fa08 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -171,6 +171,7 @@ static int can_create(struct net *net, struct socket *sock, int protocol,
/* release sk on errors */
sock_orphan(sk);
sock_put(sk);
+ sock->sk = NULL;
}
errout:
@@ -286,8 +287,8 @@ int can_send(struct sk_buff *skb, int loop)
netif_rx(newskb);
/* update statistics */
- pkg_stats->tx_frames++;
- pkg_stats->tx_frames_delta++;
+ atomic_long_inc(&pkg_stats->tx_frames);
+ atomic_long_inc(&pkg_stats->tx_frames_delta);
return 0;
@@ -646,8 +647,8 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev)
int matches;
/* update statistics */
- pkg_stats->rx_frames++;
- pkg_stats->rx_frames_delta++;
+ atomic_long_inc(&pkg_stats->rx_frames);
+ atomic_long_inc(&pkg_stats->rx_frames_delta);
/* create non-zero unique skb identifier together with *skb */
while (!(can_skb_prv(skb)->skbcnt))
@@ -668,8 +669,8 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev)
consume_skb(skb);
if (matches > 0) {
- pkg_stats->matches++;
- pkg_stats->matches_delta++;
+ atomic_long_inc(&pkg_stats->matches);
+ atomic_long_inc(&pkg_stats->matches_delta);
}
}
@@ -865,6 +866,8 @@ static __init int can_init(void)
/* check for correct padding to be able to use the structs similarly */
BUILD_BUG_ON(offsetof(struct can_frame, len) !=
offsetof(struct canfd_frame, len) ||
+ offsetof(struct can_frame, len) !=
+ offsetof(struct canxl_frame, flags) ||
offsetof(struct can_frame, data) !=
offsetof(struct canfd_frame, data));
diff --git a/net/can/af_can.h b/net/can/af_can.h
index 7c2d9161e224..22f3352c77fe 100644
--- a/net/can/af_can.h
+++ b/net/can/af_can.h
@@ -66,9 +66,9 @@ struct receiver {
struct can_pkg_stats {
unsigned long jiffies_init;
- unsigned long rx_frames;
- unsigned long tx_frames;
- unsigned long matches;
+ atomic_long_t rx_frames;
+ atomic_long_t tx_frames;
+ atomic_long_t matches;
unsigned long total_rx_rate;
unsigned long total_tx_rate;
@@ -82,9 +82,9 @@ struct can_pkg_stats {
unsigned long max_tx_rate;
unsigned long max_rx_match_ratio;
- unsigned long rx_frames_delta;
- unsigned long tx_frames_delta;
- unsigned long matches_delta;
+ atomic_long_t rx_frames_delta;
+ atomic_long_t tx_frames_delta;
+ atomic_long_t matches_delta;
};
/* persistent statistics */
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 9168114fc87f..217049fa496e 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -72,9 +72,11 @@
#define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
/* use of last_frames[index].flags */
+#define RX_LOCAL 0x10 /* frame was created on the local host */
+#define RX_OWN 0x20 /* frame was sent via the socket it was received on */
#define RX_RECV 0x40 /* received data for this element */
#define RX_THR 0x80 /* element not been sent due to throttle feature */
-#define BCM_CAN_FLAGS_MASK 0x3F /* to clean private flags after usage */
+#define BCM_CAN_FLAGS_MASK 0x0F /* to clean private flags after usage */
/* get best masking value for can_rx_register() for a given single can_id */
#define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
@@ -138,6 +140,16 @@ static LIST_HEAD(bcm_notifier_list);
static DEFINE_SPINLOCK(bcm_notifier_lock);
static struct bcm_sock *bcm_busy_notifier;
+/* Return pointer to store the extra msg flags for bcm_recvmsg().
+ * We use the space of one unsigned int beyond the 'struct sockaddr_can'
+ * in skb->cb.
+ */
+static inline unsigned int *bcm_flags(struct sk_buff *skb)
+{
+ /* return pointer after struct sockaddr_can */
+ return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
+}
+
static inline struct bcm_sock *bcm_sk(const struct sock *sk)
{
return (struct bcm_sock *)sk;
@@ -325,6 +337,7 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
struct sock *sk = op->sk;
unsigned int datalen = head->nframes * op->cfsiz;
int err;
+ unsigned int *pflags;
skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
if (!skb)
@@ -332,6 +345,14 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
skb_put_data(skb, head, sizeof(*head));
+ /* ensure space for sockaddr_can and msg flags */
+ sock_skb_cb_check_size(sizeof(struct sockaddr_can) +
+ sizeof(unsigned int));
+
+ /* initialize msg flags */
+ pflags = bcm_flags(skb);
+ *pflags = 0;
+
if (head->nframes) {
/* CAN frames starting here */
firstframe = (struct canfd_frame *)skb_tail_pointer(skb);
@@ -344,8 +365,14 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
* relevant for updates that are generated by the
* BCM, where nframes is 1
*/
- if (head->nframes == 1)
+ if (head->nframes == 1) {
+ if (firstframe->flags & RX_LOCAL)
+ *pflags |= MSG_DONTROUTE;
+ if (firstframe->flags & RX_OWN)
+ *pflags |= MSG_CONFIRM;
+
firstframe->flags &= BCM_CAN_FLAGS_MASK;
+ }
}
if (has_timestamp) {
@@ -360,7 +387,6 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
* containing the interface index.
*/
- sock_skb_cb_check_size(sizeof(struct sockaddr_can));
addr = (struct sockaddr_can *)skb->cb;
memset(addr, 0, sizeof(*addr));
addr->can_family = AF_CAN;
@@ -444,7 +470,7 @@ static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
op->frames_filtered = op->frames_abs = 0;
/* this element is not throttled anymore */
- data->flags &= (BCM_CAN_FLAGS_MASK|RX_RECV);
+ data->flags &= ~RX_THR;
memset(&head, 0, sizeof(head));
head.opcode = RX_CHANGED;
@@ -465,13 +491,17 @@ static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
*/
static void bcm_rx_update_and_send(struct bcm_op *op,
struct canfd_frame *lastdata,
- const struct canfd_frame *rxdata)
+ const struct canfd_frame *rxdata,
+ unsigned char traffic_flags)
{
memcpy(lastdata, rxdata, op->cfsiz);
/* mark as used and throttled by default */
lastdata->flags |= (RX_RECV|RX_THR);
+ /* add own/local/remote traffic flags */
+ lastdata->flags |= traffic_flags;
+
/* throttling mode inactive ? */
if (!op->kt_ival2) {
/* send RX_CHANGED to the user immediately */
@@ -508,7 +538,8 @@ rx_changed_settime:
* received data stored in op->last_frames[]
*/
static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
- const struct canfd_frame *rxdata)
+ const struct canfd_frame *rxdata,
+ unsigned char traffic_flags)
{
struct canfd_frame *cf = op->frames + op->cfsiz * index;
struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
@@ -521,7 +552,7 @@ static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
if (!(lcf->flags & RX_RECV)) {
/* received data for the first time => send update to user */
- bcm_rx_update_and_send(op, lcf, rxdata);
+ bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags);
return;
}
@@ -529,7 +560,7 @@ static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
for (i = 0; i < rxdata->len; i += 8) {
if ((get_u64(cf, i) & get_u64(rxdata, i)) !=
(get_u64(cf, i) & get_u64(lcf, i))) {
- bcm_rx_update_and_send(op, lcf, rxdata);
+ bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags);
return;
}
}
@@ -537,7 +568,7 @@ static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
if (op->flags & RX_CHECK_DLC) {
/* do a real check in CAN frame length */
if (rxdata->len != lcf->len) {
- bcm_rx_update_and_send(op, lcf, rxdata);
+ bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags);
return;
}
}
@@ -644,6 +675,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
struct bcm_op *op = (struct bcm_op *)data;
const struct canfd_frame *rxframe = (struct canfd_frame *)skb->data;
unsigned int i;
+ unsigned char traffic_flags;
if (op->can_id != rxframe->can_id)
return;
@@ -673,15 +705,24 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
return;
}
+ /* compute flags to distinguish between own/local/remote CAN traffic */
+ traffic_flags = 0;
+ if (skb->sk) {
+ traffic_flags |= RX_LOCAL;
+ if (skb->sk == op->sk)
+ traffic_flags |= RX_OWN;
+ }
+
if (op->flags & RX_FILTER_ID) {
/* the easiest case */
- bcm_rx_update_and_send(op, op->last_frames, rxframe);
+ bcm_rx_update_and_send(op, op->last_frames, rxframe,
+ traffic_flags);
goto rx_starttimer;
}
if (op->nframes == 1) {
/* simple compare with index 0 */
- bcm_rx_cmp_to_index(op, 0, rxframe);
+ bcm_rx_cmp_to_index(op, 0, rxframe, traffic_flags);
goto rx_starttimer;
}
@@ -698,7 +739,8 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
if ((get_u64(op->frames, 0) & get_u64(rxframe, 0)) ==
(get_u64(op->frames, 0) &
get_u64(op->frames + op->cfsiz * i, 0))) {
- bcm_rx_cmp_to_index(op, i, rxframe);
+ bcm_rx_cmp_to_index(op, i, rxframe,
+ traffic_flags);
break;
}
}
@@ -1428,6 +1470,12 @@ static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
/* remove device reference, if this is our bound device */
if (bo->bound && bo->ifindex == dev->ifindex) {
+#if IS_ENABLED(CONFIG_PROC_FS)
+ if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read) {
+ remove_proc_entry(bo->procname, sock_net(sk)->can.bcmproc_dir);
+ bo->bcm_proc_read = NULL;
+ }
+#endif
bo->bound = 0;
bo->ifindex = 0;
notify_enodev = 1;
@@ -1675,6 +1723,9 @@ static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
}
+ /* assign the flags that have been recorded in bcm_send_to_user() */
+ msg->msg_flags |= *(bcm_flags(skb));
+
skb_free_datagram(sk, skb);
return size;
diff --git a/net/can/gw.c b/net/can/gw.c
index 37528826935e..ef93293c1fae 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -1265,6 +1265,15 @@ static struct pernet_operations cangw_pernet_ops = {
.exit_batch = cangw_pernet_exit_batch,
};
+static const struct rtnl_msg_handler cgw_rtnl_msg_handlers[] __initconst_or_module = {
+ {.owner = THIS_MODULE, .protocol = PF_CAN, .msgtype = RTM_NEWROUTE,
+ .doit = cgw_create_job},
+ {.owner = THIS_MODULE, .protocol = PF_CAN, .msgtype = RTM_DELROUTE,
+ .doit = cgw_remove_job},
+ {.owner = THIS_MODULE, .protocol = PF_CAN, .msgtype = RTM_GETROUTE,
+ .dumpit = cgw_dump_jobs},
+};
+
static __init int cgw_module_init(void)
{
int ret;
@@ -1290,27 +1299,13 @@ static __init int cgw_module_init(void)
if (ret)
goto out_register_notifier;
- ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_GETROUTE,
- NULL, cgw_dump_jobs, 0);
- if (ret)
- goto out_rtnl_register1;
-
- ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE,
- cgw_create_job, NULL, 0);
- if (ret)
- goto out_rtnl_register2;
- ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE,
- cgw_remove_job, NULL, 0);
+ ret = rtnl_register_many(cgw_rtnl_msg_handlers);
if (ret)
- goto out_rtnl_register3;
+ goto out_rtnl_register;
return 0;
-out_rtnl_register3:
- rtnl_unregister(PF_CAN, RTM_NEWROUTE);
-out_rtnl_register2:
- rtnl_unregister(PF_CAN, RTM_GETROUTE);
-out_rtnl_register1:
+out_rtnl_register:
unregister_netdevice_notifier(&notifier);
out_register_notifier:
kmem_cache_destroy(cgw_cache);
diff --git a/net/can/isotp.c b/net/can/isotp.c
index d1c6f206f429..16046931542a 100644
--- a/net/can/isotp.c
+++ b/net/can/isotp.c
@@ -72,7 +72,7 @@
#include <net/sock.h>
#include <net/net_namespace.h>
-MODULE_DESCRIPTION("PF_CAN isotp 15765-2:2016 protocol");
+MODULE_DESCRIPTION("PF_CAN ISO 15765-2 transport protocol");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
MODULE_ALIAS("can-proto-6");
@@ -83,10 +83,11 @@ MODULE_ALIAS("can-proto-6");
(CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
(CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
-/* ISO 15765-2:2016 supports more than 4095 byte per ISO PDU as the FF_DL can
- * take full 32 bit values (4 Gbyte). We would need some good concept to handle
- * this between user space and kernel space. For now set the static buffer to
- * something about 8 kbyte to be able to test this new functionality.
+/* Since ISO 15765-2:2016 the CAN isotp protocol supports more than 4095
+ * byte per ISO PDU as the FF_DL can take full 32 bit values (4 Gbyte).
+ * We would need some good concept to handle this between user space and
+ * kernel space. For now set the static buffer to something about 8 kbyte
+ * to be able to test this new functionality.
*/
#define DEFAULT_MAX_PDU_SIZE 8300
@@ -381,8 +382,9 @@ static int isotp_rcv_fc(struct isotp_sock *so, struct canfd_frame *cf, int ae)
return 1;
}
- /* get communication parameters only from the first FC frame */
- if (so->tx.state == ISOTP_WAIT_FIRST_FC) {
+ /* get static/dynamic communication params from first/every FC frame */
+ if (so->tx.state == ISOTP_WAIT_FIRST_FC ||
+ so->opt.flags & CAN_ISOTP_DYN_FC_PARMS) {
so->txfc.bs = cf->data[ae + 1];
so->txfc.stmin = cf->data[ae + 2];
diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c
index a6fb89fa6278..7e8a20f2fc42 100644
--- a/net/can/j1939/main.c
+++ b/net/can/j1939/main.c
@@ -30,10 +30,6 @@ MODULE_ALIAS("can-proto-" __stringify(CAN_J1939));
/* CAN_HDR: #bytes before can_frame data part */
#define J1939_CAN_HDR (offsetof(struct can_frame, data))
-/* CAN_FTR: #bytes beyond data part */
-#define J1939_CAN_FTR (sizeof(struct can_frame) - J1939_CAN_HDR - \
- sizeof(((struct can_frame *)0)->data))
-
/* lowest layer */
static void j1939_can_recv(struct sk_buff *iskb, void *data)
{
@@ -342,7 +338,7 @@ int j1939_send_one(struct j1939_priv *priv, struct sk_buff *skb)
memset(cf, 0, J1939_CAN_HDR);
/* make it a full can frame again */
- skb_put(skb, J1939_CAN_FTR + (8 - dlc));
+ skb_put_zero(skb, 8 - dlc);
canid = CAN_EFF_FLAG |
(skcb->priority << 26) |
diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
index 305dd72c844c..17226b2341d0 100644
--- a/net/can/j1939/socket.c
+++ b/net/can/j1939/socket.c
@@ -1132,7 +1132,7 @@ static int j1939_sk_send_loop(struct j1939_priv *priv, struct sock *sk,
todo_size = size;
- while (todo_size) {
+ do {
struct j1939_sk_buff_cb *skcb;
segment_size = min_t(size_t, J1939_MAX_TP_PACKET_SIZE,
@@ -1177,7 +1177,7 @@ static int j1939_sk_send_loop(struct j1939_priv *priv, struct sock *sk,
todo_size -= segment_size;
session->total_queued_size += segment_size;
- }
+ } while (todo_size);
switch (ret) {
case 0: /* OK */
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
index fe3df23a2595..9b72d118d756 100644
--- a/net/can/j1939/transport.c
+++ b/net/can/j1939/transport.c
@@ -382,8 +382,9 @@ sk_buff *j1939_session_skb_get_by_offset(struct j1939_session *session,
skb_queue_walk(&session->skb_queue, do_skb) {
do_skcb = j1939_skb_to_cb(do_skb);
- if (offset_start >= do_skcb->offset &&
- offset_start < (do_skcb->offset + do_skb->len)) {
+ if ((offset_start >= do_skcb->offset &&
+ offset_start < (do_skcb->offset + do_skb->len)) ||
+ (offset_start == 0 && do_skcb->offset == 0 && do_skb->len == 0)) {
skb = do_skb;
}
}
@@ -1179,10 +1180,10 @@ static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer)
break;
case -ENETDOWN:
/* In this case we should get a netdev_event(), all active
- * sessions will be cleared by
- * j1939_cancel_all_active_sessions(). So handle this as an
- * error, but let j1939_cancel_all_active_sessions() do the
- * cleanup including propagation of the error to user space.
+ * sessions will be cleared by j1939_cancel_active_session().
+ * So handle this as an error, but let
+ * j1939_cancel_active_session() do the cleanup including
+ * propagation of the error to user space.
*/
break;
case -EOVERFLOW:
@@ -1505,7 +1506,7 @@ static struct j1939_session *j1939_session_new(struct j1939_priv *priv,
session->state = J1939_SESSION_NEW;
skb_queue_head_init(&session->skb_queue);
- skb_queue_tail(&session->skb_queue, skb);
+ skb_queue_tail(&session->skb_queue, skb_get(skb));
skcb = j1939_skb_to_cb(skb);
memcpy(&session->skcb, skcb, sizeof(session->skcb));
@@ -1593,8 +1594,8 @@ j1939_session *j1939_xtp_rx_rts_session_new(struct j1939_priv *priv,
struct j1939_sk_buff_cb skcb = *j1939_skb_to_cb(skb);
struct j1939_session *session;
const u8 *dat;
+ int len, ret;
pgn_t pgn;
- int len;
netdev_dbg(priv->ndev, "%s\n", __func__);
@@ -1653,7 +1654,22 @@ j1939_session *j1939_xtp_rx_rts_session_new(struct j1939_priv *priv,
session->tskey = priv->rx_tskey++;
j1939_sk_errqueue(session, J1939_ERRQUEUE_RX_RTS);
- WARN_ON_ONCE(j1939_session_activate(session));
+ ret = j1939_session_activate(session);
+ if (ret) {
+ /* Entering this scope indicates an issue with the J1939 bus.
+ * Possible scenarios include:
+ * - A time lapse occurred, and a new session was initiated
+ * due to another packet being sent correctly. This could
+ * have been caused by too long interrupt, debugger, or being
+ * out-scheduled by another task.
+ * - The bus is receiving numerous erroneous packets, either
+ * from a malfunctioning device or during a test scenario.
+ */
+ netdev_alert(priv->ndev, "%s: 0x%p: concurrent session with same addr (%02x %02x) is already active.\n",
+ __func__, session, skcb.addr.sa, skcb.addr.da);
+ j1939_session_put(session);
+ return NULL;
+ }
return session;
}
@@ -1681,6 +1697,8 @@ static int j1939_xtp_rx_rts_session_active(struct j1939_session *session,
j1939_session_timers_cancel(session);
j1939_session_cancel(session, J1939_XTP_ABORT_BUSY);
+ if (session->transmission)
+ j1939_session_deactivate_activate_next(session);
return -EBUSY;
}
diff --git a/net/can/proc.c b/net/can/proc.c
index bbce97825f13..25fdf060e30d 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -118,6 +118,13 @@ void can_stat_update(struct timer_list *t)
struct can_pkg_stats *pkg_stats = net->can.pkg_stats;
unsigned long j = jiffies; /* snapshot */
+ long rx_frames = atomic_long_read(&pkg_stats->rx_frames);
+ long tx_frames = atomic_long_read(&pkg_stats->tx_frames);
+ long matches = atomic_long_read(&pkg_stats->matches);
+ long rx_frames_delta = atomic_long_read(&pkg_stats->rx_frames_delta);
+ long tx_frames_delta = atomic_long_read(&pkg_stats->tx_frames_delta);
+ long matches_delta = atomic_long_read(&pkg_stats->matches_delta);
+
/* restart counting in timer context on user request */
if (user_reset)
can_init_stats(net);
@@ -127,35 +134,33 @@ void can_stat_update(struct timer_list *t)
can_init_stats(net);
/* prevent overflow in calc_rate() */
- if (pkg_stats->rx_frames > (ULONG_MAX / HZ))
+ if (rx_frames > (LONG_MAX / HZ))
can_init_stats(net);
/* prevent overflow in calc_rate() */
- if (pkg_stats->tx_frames > (ULONG_MAX / HZ))
+ if (tx_frames > (LONG_MAX / HZ))
can_init_stats(net);
/* matches overflow - very improbable */
- if (pkg_stats->matches > (ULONG_MAX / 100))
+ if (matches > (LONG_MAX / 100))
can_init_stats(net);
/* calc total values */
- if (pkg_stats->rx_frames)
- pkg_stats->total_rx_match_ratio = (pkg_stats->matches * 100) /
- pkg_stats->rx_frames;
+ if (rx_frames)
+ pkg_stats->total_rx_match_ratio = (matches * 100) / rx_frames;
pkg_stats->total_tx_rate = calc_rate(pkg_stats->jiffies_init, j,
- pkg_stats->tx_frames);
+ tx_frames);
pkg_stats->total_rx_rate = calc_rate(pkg_stats->jiffies_init, j,
- pkg_stats->rx_frames);
+ rx_frames);
/* calc current values */
- if (pkg_stats->rx_frames_delta)
+ if (rx_frames_delta)
pkg_stats->current_rx_match_ratio =
- (pkg_stats->matches_delta * 100) /
- pkg_stats->rx_frames_delta;
+ (matches_delta * 100) / rx_frames_delta;
- pkg_stats->current_tx_rate = calc_rate(0, HZ, pkg_stats->tx_frames_delta);
- pkg_stats->current_rx_rate = calc_rate(0, HZ, pkg_stats->rx_frames_delta);
+ pkg_stats->current_tx_rate = calc_rate(0, HZ, tx_frames_delta);
+ pkg_stats->current_rx_rate = calc_rate(0, HZ, rx_frames_delta);
/* check / update maximum values */
if (pkg_stats->max_tx_rate < pkg_stats->current_tx_rate)
@@ -168,9 +173,9 @@ void can_stat_update(struct timer_list *t)
pkg_stats->max_rx_match_ratio = pkg_stats->current_rx_match_ratio;
/* clear values for 'current rate' calculation */
- pkg_stats->tx_frames_delta = 0;
- pkg_stats->rx_frames_delta = 0;
- pkg_stats->matches_delta = 0;
+ atomic_long_set(&pkg_stats->tx_frames_delta, 0);
+ atomic_long_set(&pkg_stats->rx_frames_delta, 0);
+ atomic_long_set(&pkg_stats->matches_delta, 0);
/* restart timer (one second) */
mod_timer(&net->can.stattimer, round_jiffies(jiffies + HZ));
@@ -214,9 +219,12 @@ static int can_stats_proc_show(struct seq_file *m, void *v)
struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats;
seq_putc(m, '\n');
- seq_printf(m, " %8ld transmitted frames (TXF)\n", pkg_stats->tx_frames);
- seq_printf(m, " %8ld received frames (RXF)\n", pkg_stats->rx_frames);
- seq_printf(m, " %8ld matched frames (RXMF)\n", pkg_stats->matches);
+ seq_printf(m, " %8ld transmitted frames (TXF)\n",
+ atomic_long_read(&pkg_stats->tx_frames));
+ seq_printf(m, " %8ld received frames (RXF)\n",
+ atomic_long_read(&pkg_stats->rx_frames));
+ seq_printf(m, " %8ld matched frames (RXMF)\n",
+ atomic_long_read(&pkg_stats->matches));
seq_putc(m, '\n');
diff --git a/net/can/raw.c b/net/can/raw.c
index e6b822624ba2..46e8ed9d64da 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -91,6 +91,10 @@ struct raw_sock {
int recv_own_msgs;
int fd_frames;
int xl_frames;
+ struct can_raw_vcid_options raw_vcid_opts;
+ canid_t tx_vcid_shifted;
+ canid_t rx_vcid_shifted;
+ canid_t rx_vcid_mask_shifted;
int join_filters;
int count; /* number of active filters */
struct can_filter dfilter; /* default/single filter */
@@ -134,10 +138,29 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
return;
/* make sure to not pass oversized frames to the socket */
- if ((!ro->fd_frames && can_is_canfd_skb(oskb)) ||
- (!ro->xl_frames && can_is_canxl_skb(oskb)))
+ if (!ro->fd_frames && can_is_canfd_skb(oskb))
return;
+ if (can_is_canxl_skb(oskb)) {
+ struct canxl_frame *cxl = (struct canxl_frame *)oskb->data;
+
+ /* make sure to not pass oversized frames to the socket */
+ if (!ro->xl_frames)
+ return;
+
+ /* filter CAN XL VCID content */
+ if (ro->raw_vcid_opts.flags & CAN_RAW_XL_VCID_RX_FILTER) {
+ /* apply VCID filter if user enabled the filter */
+ if ((cxl->prio & ro->rx_vcid_mask_shifted) !=
+ (ro->rx_vcid_shifted & ro->rx_vcid_mask_shifted))
+ return;
+ } else {
+ /* no filter => do not forward VCID tagged frames */
+ if (cxl->prio & CANXL_VCID_MASK)
+ return;
+ }
+ }
+
/* eliminate multiple filter matches for the same skb */
if (this_cpu_ptr(ro->uniq)->skb == oskb &&
this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
@@ -698,6 +721,19 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
ro->fd_frames = ro->xl_frames;
break;
+ case CAN_RAW_XL_VCID_OPTS:
+ if (optlen != sizeof(ro->raw_vcid_opts))
+ return -EINVAL;
+
+ if (copy_from_sockptr(&ro->raw_vcid_opts, optval, optlen))
+ return -EFAULT;
+
+ /* prepare 32 bit values for handling in hot path */
+ ro->tx_vcid_shifted = ro->raw_vcid_opts.tx_vcid << CANXL_VCID_OFFSET;
+ ro->rx_vcid_shifted = ro->raw_vcid_opts.rx_vcid << CANXL_VCID_OFFSET;
+ ro->rx_vcid_mask_shifted = ro->raw_vcid_opts.rx_vcid_mask << CANXL_VCID_OFFSET;
+ break;
+
case CAN_RAW_JOIN_FILTERS:
if (optlen != sizeof(ro->join_filters))
return -EINVAL;
@@ -720,7 +756,6 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
struct raw_sock *ro = raw_sk(sk);
int len;
void *val;
- int err = 0;
if (level != SOL_CAN_RAW)
return -EINVAL;
@@ -730,7 +765,9 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
return -EINVAL;
switch (optname) {
- case CAN_RAW_FILTER:
+ case CAN_RAW_FILTER: {
+ int err = 0;
+
lock_sock(sk);
if (ro->count > 0) {
int fsize = ro->count * sizeof(struct can_filter);
@@ -755,7 +792,7 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
if (!err)
err = put_user(len, optlen);
return err;
-
+ }
case CAN_RAW_ERR_FILTER:
if (len > sizeof(can_err_mask_t))
len = sizeof(can_err_mask_t);
@@ -786,6 +823,25 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
val = &ro->xl_frames;
break;
+ case CAN_RAW_XL_VCID_OPTS: {
+ int err = 0;
+
+ /* user space buffer to small for VCID opts? */
+ if (len < sizeof(ro->raw_vcid_opts)) {
+ /* return -ERANGE and needed space in optlen */
+ err = -ERANGE;
+ if (put_user(sizeof(ro->raw_vcid_opts), optlen))
+ err = -EFAULT;
+ } else {
+ if (len > sizeof(ro->raw_vcid_opts))
+ len = sizeof(ro->raw_vcid_opts);
+ if (copy_to_user(optval, &ro->raw_vcid_opts, len))
+ err = -EFAULT;
+ }
+ if (!err)
+ err = put_user(len, optlen);
+ return err;
+ }
case CAN_RAW_JOIN_FILTERS:
if (len > sizeof(int))
len = sizeof(int);
@@ -803,23 +859,41 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
return 0;
}
-static bool raw_bad_txframe(struct raw_sock *ro, struct sk_buff *skb, int mtu)
+static void raw_put_canxl_vcid(struct raw_sock *ro, struct sk_buff *skb)
+{
+ struct canxl_frame *cxl = (struct canxl_frame *)skb->data;
+
+ /* sanitize non CAN XL bits */
+ cxl->prio &= (CANXL_PRIO_MASK | CANXL_VCID_MASK);
+
+ /* clear VCID in CAN XL frame if pass through is disabled */
+ if (!(ro->raw_vcid_opts.flags & CAN_RAW_XL_VCID_TX_PASS))
+ cxl->prio &= CANXL_PRIO_MASK;
+
+ /* set VCID in CAN XL frame if enabled */
+ if (ro->raw_vcid_opts.flags & CAN_RAW_XL_VCID_TX_SET) {
+ cxl->prio &= CANXL_PRIO_MASK;
+ cxl->prio |= ro->tx_vcid_shifted;
+ }
+}
+
+static unsigned int raw_check_txframe(struct raw_sock *ro, struct sk_buff *skb, int mtu)
{
/* Classical CAN -> no checks for flags and device capabilities */
if (can_is_can_skb(skb))
- return false;
+ return CAN_MTU;
/* CAN FD -> needs to be enabled and a CAN FD or CAN XL device */
if (ro->fd_frames && can_is_canfd_skb(skb) &&
(mtu == CANFD_MTU || can_is_canxl_dev_mtu(mtu)))
- return false;
+ return CANFD_MTU;
/* CAN XL -> needs to be enabled and a CAN XL device */
if (ro->xl_frames && can_is_canxl_skb(skb) &&
can_is_canxl_dev_mtu(mtu))
- return false;
+ return CANXL_MTU;
- return true;
+ return 0;
}
static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
@@ -829,6 +903,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
struct sockcm_cookie sockc;
struct sk_buff *skb;
struct net_device *dev;
+ unsigned int txmtu;
int ifindex;
int err = -EINVAL;
@@ -869,9 +944,16 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
goto free_skb;
err = -EINVAL;
- if (raw_bad_txframe(ro, skb, dev->mtu))
+
+ /* check for valid CAN (CC/FD/XL) frame content */
+ txmtu = raw_check_txframe(ro, skb, dev->mtu);
+ if (!txmtu)
goto free_skb;
+ /* only CANXL: clear/forward/set VCID value */
+ if (txmtu == CANXL_MTU)
+ raw_put_canxl_vcid(ro, skb);
+
sockcm_init(&sockc, sk);
if (msg->msg_controllen) {
err = sock_cmsg_send(sk, msg, &sockc);
@@ -880,11 +962,11 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
}
skb->dev = dev;
- skb->priority = READ_ONCE(sk->sk_priority);
+ skb->priority = sockc.priority;
skb->mark = READ_ONCE(sk->sk_mark);
skb->tstamp = sockc.transmit_time;
- skb_setup_tx_timestamp(skb, sockc.tsflags);
+ skb_setup_tx_timestamp(skb, &sockc);
err = can_send(skb, ro->loopback);
diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
index 1daf95e17d67..3a5bd1cd1e99 100644
--- a/net/ceph/crush/mapper.c
+++ b/net/ceph/crush/mapper.c
@@ -429,7 +429,10 @@ static int is_out(const struct crush_map *map,
/**
* crush_choose_firstn - choose numrep distinct items of given type
* @map: the crush_map
+ * @work: working space initialized by crush_init_workspace()
* @bucket: the bucket we are choose an item from
+ * @weight: weight vector (for map leaves)
+ * @weight_max: size of weight vector
* @x: crush input value
* @numrep: the number of items to choose
* @type: the type of item to choose
@@ -445,6 +448,7 @@ static int is_out(const struct crush_map *map,
* @vary_r: pass r to recursive calls
* @out2: second output vector for leaf items (if @recurse_to_leaf)
* @parent_r: r value passed from the parent
+ * @choose_args: weights and ids for each known bucket
*/
static int crush_choose_firstn(const struct crush_map *map,
struct crush_work *work,
@@ -636,9 +640,8 @@ reject:
}
-/**
+/*
* crush_choose_indep: alternative breadth-first positionally stable mapping
- *
*/
static void crush_choose_indep(const struct crush_map *map,
struct crush_work *work,
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index 051d22c0e4ad..01b2ce1e8fc0 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -74,18 +74,6 @@ int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
return set_secret(dst, src->key);
}
-int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
-{
- if (*p + sizeof(u16) + sizeof(key->created) +
- sizeof(u16) + key->len > end)
- return -ERANGE;
- ceph_encode_16(p, key->type);
- ceph_encode_copy(p, &key->created, sizeof(key->created));
- ceph_encode_16(p, key->len);
- ceph_encode_copy(p, key->key, key->len);
- return 0;
-}
-
int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end)
{
int ret;
diff --git a/net/ceph/crypto.h b/net/ceph/crypto.h
index 13bd526349fa..23de29fc613c 100644
--- a/net/ceph/crypto.h
+++ b/net/ceph/crypto.h
@@ -21,7 +21,6 @@ struct ceph_crypto_key {
int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
const struct ceph_crypto_key *src);
-int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end);
int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end);
int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *in);
void ceph_crypto_key_destroy(struct ceph_crypto_key *key);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 3c8b78d9c4d1..d1b5705dc0c6 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -1254,7 +1254,7 @@ static int ceph_dns_resolve_name(const char *name, size_t namelen,
colon_p = memchr(name, ':', namelen);
if (delim_p && colon_p)
- end = delim_p < colon_p ? delim_p : colon_p;
+ end = min(delim_p, colon_p);
else if (!delim_p && colon_p)
end = colon_p;
else {
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index f263f7e91a21..ab66b599ac47 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -1085,13 +1085,19 @@ static void delayed_work(struct work_struct *work)
struct ceph_mon_client *monc =
container_of(work, struct ceph_mon_client, delayed_work.work);
- dout("monc delayed_work\n");
mutex_lock(&monc->mutex);
+ dout("%s mon%d\n", __func__, monc->cur_mon);
+ if (monc->cur_mon < 0) {
+ goto out;
+ }
+
if (monc->hunting) {
dout("%s continuing hunt\n", __func__);
reopen_session(monc);
} else {
int is_auth = ceph_auth_is_authenticated(monc->auth);
+
+ dout("%s is_authed %d\n", __func__, is_auth);
if (ceph_con_keepalive_expired(&monc->con,
CEPH_MONC_PING_TIMEOUT)) {
dout("monc keepalive timeout\n");
@@ -1116,6 +1122,8 @@ static void delayed_work(struct work_struct *work)
}
}
__schedule_delayed(monc);
+
+out:
mutex_unlock(&monc->mutex);
}
@@ -1232,13 +1240,15 @@ EXPORT_SYMBOL(ceph_monc_init);
void ceph_monc_stop(struct ceph_mon_client *monc)
{
dout("stop\n");
- cancel_delayed_work_sync(&monc->delayed_work);
mutex_lock(&monc->mutex);
__close_session(monc);
+ monc->hunting = false;
monc->cur_mon = -1;
mutex_unlock(&monc->mutex);
+ cancel_delayed_work_sync(&monc->delayed_work);
+
/*
* flush msgr queue before we destroy ourselves to ensure that:
* - any work that references our embedded con is finished.
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 9d078b37fe0b..b24afec24138 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1173,6 +1173,8 @@ EXPORT_SYMBOL(ceph_osdc_new_request);
int __ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op, int cnt)
{
+ WARN_ON(op->op != CEPH_OSD_OP_SPARSE_READ);
+
op->extent.sparse_ext_cnt = cnt;
op->extent.sparse_ext = kmalloc_array(cnt,
sizeof(*op->extent.sparse_ext),
@@ -4999,40 +5001,6 @@ out_put_lreq:
}
EXPORT_SYMBOL(ceph_osdc_notify);
-/*
- * Return the number of milliseconds since the watch was last
- * confirmed, or an error. If there is an error, the watch is no
- * longer valid, and should be destroyed with ceph_osdc_unwatch().
- */
-int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
- struct ceph_osd_linger_request *lreq)
-{
- unsigned long stamp, age;
- int ret;
-
- down_read(&osdc->lock);
- mutex_lock(&lreq->lock);
- stamp = lreq->watch_valid_thru;
- if (!list_empty(&lreq->pending_lworks)) {
- struct linger_work *lwork =
- list_first_entry(&lreq->pending_lworks,
- struct linger_work,
- pending_item);
-
- if (time_before(lwork->queued_stamp, stamp))
- stamp = lwork->queued_stamp;
- }
- age = jiffies - stamp;
- dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__,
- lreq, lreq->linger_id, age, lreq->last_error);
- /* we are truncating to msecs, so return a safe upper bound */
- ret = lreq->last_error ?: 1 + jiffies_to_msecs(age);
-
- mutex_unlock(&lreq->lock);
- up_read(&osdc->lock);
- return ret;
-}
-
static int decode_watcher(void **p, void *end, struct ceph_watch_item *item)
{
u8 struct_v;
diff --git a/net/ceph/pagelist.c b/net/ceph/pagelist.c
index 74622b278d57..5a9c4be5f222 100644
--- a/net/ceph/pagelist.c
+++ b/net/ceph/pagelist.c
@@ -131,41 +131,3 @@ int ceph_pagelist_free_reserve(struct ceph_pagelist *pl)
return 0;
}
EXPORT_SYMBOL(ceph_pagelist_free_reserve);
-
-/* Create a truncation point. */
-void ceph_pagelist_set_cursor(struct ceph_pagelist *pl,
- struct ceph_pagelist_cursor *c)
-{
- c->pl = pl;
- c->page_lru = pl->head.prev;
- c->room = pl->room;
-}
-EXPORT_SYMBOL(ceph_pagelist_set_cursor);
-
-/* Truncate a pagelist to the given point. Move extra pages to reserve.
- * This won't sleep.
- * Returns: 0 on success,
- * -EINVAL if the pagelist doesn't match the trunc point pagelist
- */
-int ceph_pagelist_truncate(struct ceph_pagelist *pl,
- struct ceph_pagelist_cursor *c)
-{
- struct page *page;
-
- if (pl != c->pl)
- return -EINVAL;
- ceph_pagelist_unmap_tail(pl);
- while (pl->head.prev != c->page_lru) {
- page = list_entry(pl->head.prev, struct page, lru);
- /* move from pagelist to reserve */
- list_move_tail(&page->lru, &pl->free_list);
- ++pl->num_pages_free;
- }
- pl->room = c->room;
- if (!list_empty(&pl->head)) {
- page = list_entry(pl->head.prev, struct page, lru);
- pl->mapped_tail = kmap(page);
- }
- return 0;
-}
-EXPORT_SYMBOL(ceph_pagelist_truncate);
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c
index 64305e7056a1..4509757d8b3b 100644
--- a/net/ceph/pagevec.c
+++ b/net/ceph/pagevec.c
@@ -55,58 +55,6 @@ struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags)
}
EXPORT_SYMBOL(ceph_alloc_page_vector);
-/*
- * copy user data into a page vector
- */
-int ceph_copy_user_to_page_vector(struct page **pages,
- const void __user *data,
- loff_t off, size_t len)
-{
- int i = 0;
- int po = off & ~PAGE_MASK;
- int left = len;
- int l, bad;
-
- while (left > 0) {
- l = min_t(int, PAGE_SIZE-po, left);
- bad = copy_from_user(page_address(pages[i]) + po, data, l);
- if (bad == l)
- return -EFAULT;
- data += l - bad;
- left -= l - bad;
- po += l - bad;
- if (po == PAGE_SIZE) {
- po = 0;
- i++;
- }
- }
- return len;
-}
-EXPORT_SYMBOL(ceph_copy_user_to_page_vector);
-
-void ceph_copy_to_page_vector(struct page **pages,
- const void *data,
- loff_t off, size_t len)
-{
- int i = 0;
- size_t po = off & ~PAGE_MASK;
- size_t left = len;
-
- while (left > 0) {
- size_t l = min_t(size_t, PAGE_SIZE-po, left);
-
- memcpy(page_address(pages[i]) + po, data, l);
- data += l;
- left -= l;
- po += l;
- if (po == PAGE_SIZE) {
- po = 0;
- i++;
- }
- }
-}
-EXPORT_SYMBOL(ceph_copy_to_page_vector);
-
void ceph_copy_from_page_vector(struct page **pages,
void *data,
loff_t off, size_t len)
diff --git a/net/core/Makefile b/net/core/Makefile
index 821aec06abf1..d9326600e289 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -18,6 +18,8 @@ obj-y += dev.o dev_addr_lists.o dst.o netevent.o \
obj-$(CONFIG_NETDEV_ADDR_LIST_TEST) += dev_addr_lists_test.o
obj-y += net-sysfs.o
+obj-y += hotdata.o
+obj-y += netdev_rx_queue.o
obj-$(CONFIG_PAGE_POOL) += page_pool.o page_pool_user.o
obj-$(CONFIG_PROC_FS) += net-procfs.o
obj-$(CONFIG_NET_PKTGEN) += pktgen.o
@@ -25,6 +27,7 @@ obj-$(CONFIG_NETPOLL) += netpoll.o
obj-$(CONFIG_FIB_RULES) += fib_rules.o
obj-$(CONFIG_TRACEPOINTS) += net-traces.o
obj-$(CONFIG_NET_DROP_MONITOR) += drop_monitor.o
+obj-$(CONFIG_NET_IEEE8021Q_HELPERS) += ieee8021q_helpers.o
obj-$(CONFIG_NET_SELFTESTS) += selftests.o
obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += timestamping.o
obj-$(CONFIG_NET_PTP_CLASSIFY) += ptp_classifier.o
@@ -40,4 +43,7 @@ obj-$(CONFIG_NET_SOCK_MSG) += skmsg.o
obj-$(CONFIG_BPF_SYSCALL) += sock_map.o
obj-$(CONFIG_BPF_SYSCALL) += bpf_sk_storage.o
obj-$(CONFIG_OF) += of_net.o
-obj-$(CONFIG_NET_TEST) += gso_test.o
+obj-$(CONFIG_NET_TEST) += net_test.o
+obj-$(CONFIG_NET_DEVMEM) += devmem.o
+obj-$(CONFIG_DEBUG_NET_SMALL_RTNL) += rtnl_net_debug.o
+obj-$(CONFIG_FAIL_SKB_REALLOC) += skb_fault_injection.o
diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
index 6c4d90b24d46..2e538399757f 100644
--- a/net/core/bpf_sk_storage.c
+++ b/net/core/bpf_sk_storage.c
@@ -50,15 +50,16 @@ void bpf_sk_storage_free(struct sock *sk)
{
struct bpf_local_storage *sk_storage;
+ migrate_disable();
rcu_read_lock();
sk_storage = rcu_dereference(sk->sk_bpf_storage);
- if (!sk_storage) {
- rcu_read_unlock();
- return;
- }
+ if (!sk_storage)
+ goto out;
bpf_local_storage_destroy(sk_storage);
+out:
rcu_read_unlock();
+ migrate_enable();
}
static void bpf_sk_storage_map_free(struct bpf_map *map)
@@ -106,7 +107,7 @@ static long bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key,
if (sock) {
sdata = bpf_local_storage_update(
sock->sk, (struct bpf_local_storage_map *)map, value,
- map_flags, GFP_ATOMIC);
+ map_flags, false, GFP_ATOMIC);
sockfd_put(sock);
return PTR_ERR_OR_ZERO(sdata);
}
@@ -137,7 +138,7 @@ bpf_sk_storage_clone_elem(struct sock *newsk,
{
struct bpf_local_storage_elem *copy_selem;
- copy_selem = bpf_selem_alloc(smap, newsk, NULL, true, GFP_ATOMIC);
+ copy_selem = bpf_selem_alloc(smap, newsk, NULL, true, false, GFP_ATOMIC);
if (!copy_selem)
return NULL;
@@ -160,6 +161,7 @@ int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
RCU_INIT_POINTER(newsk->sk_bpf_storage, NULL);
+ migrate_disable();
rcu_read_lock();
sk_storage = rcu_dereference(sk->sk_bpf_storage);
@@ -212,6 +214,7 @@ int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
out:
rcu_read_unlock();
+ migrate_enable();
/* In case of an error, don't free anything explicitly here, the
* caller is responsible to call bpf_sk_storage_free.
@@ -243,7 +246,7 @@ BPF_CALL_5(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
refcount_inc_not_zero(&sk->sk_refcnt)) {
sdata = bpf_local_storage_update(
sk, (struct bpf_local_storage_map *)map, value,
- BPF_NOEXIST, gfp_flags);
+ BPF_NOEXIST, false, gfp_flags);
/* sk must be a fullsock (guaranteed by verifier),
* so sock_gen_put() is unnecessary.
*/
@@ -352,11 +355,6 @@ const struct bpf_func_proto bpf_sk_storage_delete_proto = {
static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog *prog)
{
- const struct btf *btf_vmlinux;
- const struct btf_type *t;
- const char *tname;
- u32 btf_id;
-
if (prog->aux->dst_prog)
return false;
@@ -371,13 +369,7 @@ static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog *prog)
return true;
case BPF_TRACE_FENTRY:
case BPF_TRACE_FEXIT:
- btf_vmlinux = bpf_get_btf_vmlinux();
- if (IS_ERR_OR_NULL(btf_vmlinux))
- return false;
- btf_id = prog->aux->attach_btf_id;
- t = btf_type_by_id(btf_vmlinux, btf_id);
- tname = btf_name_by_offset(btf_vmlinux, t->name_off);
- return !!strncmp(tname, "bpf_sk_storage",
+ return !!strncmp(prog->aux->attach_func_name, "bpf_sk_storage",
strlen("bpf_sk_storage"));
default:
return false;
@@ -496,27 +488,22 @@ bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs)
if (!bpf_capable())
return ERR_PTR(-EPERM);
- nla_for_each_nested(nla, nla_stgs, rem) {
- if (nla_type(nla) == SK_DIAG_BPF_STORAGE_REQ_MAP_FD) {
- if (nla_len(nla) != sizeof(u32))
- return ERR_PTR(-EINVAL);
- nr_maps++;
- }
+ nla_for_each_nested_type(nla, SK_DIAG_BPF_STORAGE_REQ_MAP_FD,
+ nla_stgs, rem) {
+ if (nla_len(nla) != sizeof(u32))
+ return ERR_PTR(-EINVAL);
+ nr_maps++;
}
diag = kzalloc(struct_size(diag, maps, nr_maps), GFP_KERNEL);
if (!diag)
return ERR_PTR(-ENOMEM);
- nla_for_each_nested(nla, nla_stgs, rem) {
- struct bpf_map *map;
- int map_fd;
-
- if (nla_type(nla) != SK_DIAG_BPF_STORAGE_REQ_MAP_FD)
- continue;
+ nla_for_each_nested_type(nla, SK_DIAG_BPF_STORAGE_REQ_MAP_FD,
+ nla_stgs, rem) {
+ int map_fd = nla_get_u32(nla);
+ struct bpf_map *map = bpf_map_get(map_fd);
- map_fd = nla_get_u32(nla);
- map = bpf_map_get(map_fd);
if (IS_ERR(map)) {
err = PTR_ERR(map);
goto err_free;
diff --git a/net/core/datagram.c b/net/core/datagram.c
index a8b625abe242..f0693707aece 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -324,25 +324,6 @@ void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
}
EXPORT_SYMBOL(skb_free_datagram);
-void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len)
-{
- bool slow;
-
- if (!skb_unref(skb)) {
- sk_peek_offset_bwd(sk, len);
- return;
- }
-
- slow = lock_sock_fast(sk);
- sk_peek_offset_bwd(sk, len);
- skb_orphan(skb);
- unlock_sock_fast(sk, slow);
-
- /* skb is now orphaned, can be freed outside of locked section */
- __kfree_skb(skb);
-}
-EXPORT_SYMBOL(__skb_free_datagram_locked);
-
int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
struct sk_buff *skb, unsigned int flags,
void (*destructor)(struct sock *sk,
@@ -426,6 +407,9 @@ static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
return 0;
}
+ if (!skb_frags_readable(skb))
+ goto short_copy;
+
/* Copy paged appendix. Hmm... why does this look so complicated? */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
@@ -435,15 +419,23 @@ static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
end = start + skb_frag_size(frag);
if ((copy = end - offset) > 0) {
- struct page *page = skb_frag_page(frag);
- u8 *vaddr = kmap(page);
+ u32 p_off, p_len, copied;
+ struct page *p;
+ u8 *vaddr;
if (copy > len)
copy = len;
- n = INDIRECT_CALL_1(cb, simple_copy_to_iter,
- vaddr + skb_frag_off(frag) + offset - start,
- copy, data, to);
- kunmap(page);
+
+ n = 0;
+ skb_frag_foreach_page(frag,
+ skb_frag_off(frag) + offset - start,
+ copy, p, p_off, p_len, copied) {
+ vaddr = kmap_local_page(p);
+ n += INDIRECT_CALL_1(cb, simple_copy_to_iter,
+ vaddr + p_off, p_len, data, to);
+ kunmap_local(vaddr);
+ }
+
offset += n;
if (n != copy)
goto short_copy;
@@ -629,16 +621,13 @@ fault:
}
EXPORT_SYMBOL(skb_copy_datagram_from_iter);
-int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
- struct sk_buff *skb, struct iov_iter *from,
- size_t length)
+int zerocopy_fill_skb_from_iter(struct sk_buff *skb,
+ struct iov_iter *from, size_t length)
{
- int frag;
-
- if (msg && msg->msg_ubuf && msg->sg_from_iter)
- return msg->sg_from_iter(sk, skb, from, length);
+ int frag = skb_shinfo(skb)->nr_frags;
- frag = skb_shinfo(skb)->nr_frags;
+ if (!skb_frags_readable(skb))
+ return -EFAULT;
while (length && iov_iter_count(from)) {
struct page *head, *last_head = NULL;
@@ -646,7 +635,6 @@ int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
int refs, order, n = 0;
size_t start;
ssize_t copied;
- unsigned long truesize;
if (frag == MAX_SKB_FRAGS)
return -EMSGSIZE;
@@ -658,17 +646,9 @@ int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
length -= copied;
- truesize = PAGE_ALIGN(copied + start);
skb->data_len += copied;
skb->len += copied;
- skb->truesize += truesize;
- if (sk && sk->sk_type == SOCK_STREAM) {
- sk_wmem_queued_add(sk, truesize);
- if (!skb_zcopy_pure(skb))
- sk_mem_charge(sk, truesize);
- } else {
- refcount_add(truesize, &skb->sk->sk_wmem_alloc);
- }
+ skb->truesize += PAGE_ALIGN(copied + start);
head = compound_head(pages[n]);
order = compound_order(head);
@@ -711,6 +691,30 @@ int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
}
return 0;
}
+
+int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb, struct iov_iter *from,
+ size_t length)
+{
+ unsigned long orig_size = skb->truesize;
+ unsigned long truesize;
+ int ret;
+
+ if (msg && msg->msg_ubuf && msg->sg_from_iter)
+ ret = msg->sg_from_iter(skb, from, length);
+ else
+ ret = zerocopy_fill_skb_from_iter(skb, from, length);
+
+ truesize = skb->truesize - orig_size;
+ if (sk && sk->sk_type == SOCK_STREAM) {
+ sk_wmem_queued_add(sk, truesize);
+ if (!skb_zcopy_pure(skb))
+ sk_mem_charge(sk, truesize);
+ } else {
+ refcount_add(truesize, &skb->sk->sk_wmem_alloc);
+ }
+ return ret;
+}
EXPORT_SYMBOL(__zerocopy_sg_from_iter);
/**
diff --git a/net/core/dev.c b/net/core/dev.c
index 76e6438f4858..2f7f5fd9ffec 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -77,7 +77,9 @@
#include <linux/hash.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/sched/isolation.h>
#include <linux/sched/mm.h>
+#include <linux/smpboot.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/string.h>
@@ -90,6 +92,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/ethtool_netlink.h>
#include <linux/skbuff.h>
#include <linux/kthread.h>
#include <linux/bpf.h>
@@ -103,6 +106,7 @@
#include <net/dst.h>
#include <net/dst_metadata.h>
#include <net/gro.h>
+#include <net/netdev_queues.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
#include <net/checksum.h>
@@ -153,41 +157,23 @@
#include <linux/prandom.h>
#include <linux/once_lite.h>
#include <net/netdev_rx_queue.h>
+#include <net/page_pool/types.h>
+#include <net/page_pool/helpers.h>
+#include <net/rps.h>
+#include <linux/phy_link_topology.h>
#include "dev.h"
+#include "devmem.h"
#include "net-sysfs.h"
static DEFINE_SPINLOCK(ptype_lock);
struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
-struct list_head ptype_all __read_mostly; /* Taps */
static int netif_rx_internal(struct sk_buff *skb);
static int call_netdevice_notifiers_extack(unsigned long val,
struct net_device *dev,
struct netlink_ext_ack *extack);
-/*
- * The @dev_base_head list is protected by @dev_base_lock and the rtnl
- * semaphore.
- *
- * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
- *
- * Writers must hold the rtnl semaphore while they loop through the
- * dev_base_head list, and hold dev_base_lock for writing when they do the
- * actual updates. This allows pure readers to access the list even
- * while a writer is preparing to update it.
- *
- * To put it another way, dev_base_lock is held for writing only to
- * protect against pure readers; the rtnl semaphore provides the
- * protection against other writers.
- *
- * See, for example usages, register_netdevice() and
- * unregister_netdevice(), which must be called with the rtnl
- * semaphore held.
- */
-DEFINE_RWLOCK(dev_base_lock);
-EXPORT_SYMBOL(dev_base_lock);
-
static DEFINE_MUTEX(ifalias_mutex);
/* protects napi_hash addition/deletion and napi_gen_id */
@@ -196,12 +182,11 @@ static DEFINE_SPINLOCK(napi_hash_lock);
static unsigned int napi_gen_id = NR_CPUS;
static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
-static DECLARE_RWSEM(devnet_rename_sem);
-
static inline void dev_base_seq_inc(struct net *net)
{
- while (++net->dev_base_seq == 0)
- ;
+ unsigned int val = net->dev_base_seq + 1;
+
+ WRITE_ONCE(net->dev_base_seq, val ?: 1);
}
static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
@@ -216,37 +201,62 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
}
-static inline void rps_lock_irqsave(struct softnet_data *sd,
- unsigned long *flags)
+#ifndef CONFIG_PREEMPT_RT
+
+static DEFINE_STATIC_KEY_FALSE(use_backlog_threads_key);
+
+static int __init setup_backlog_napi_threads(char *arg)
+{
+ static_branch_enable(&use_backlog_threads_key);
+ return 0;
+}
+early_param("thread_backlog_napi", setup_backlog_napi_threads);
+
+static bool use_backlog_threads(void)
+{
+ return static_branch_unlikely(&use_backlog_threads_key);
+}
+
+#else
+
+static bool use_backlog_threads(void)
+{
+ return true;
+}
+
+#endif
+
+static inline void backlog_lock_irq_save(struct softnet_data *sd,
+ unsigned long *flags)
{
- if (IS_ENABLED(CONFIG_RPS))
+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
- else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ else
local_irq_save(*flags);
}
-static inline void rps_lock_irq_disable(struct softnet_data *sd)
+static inline void backlog_lock_irq_disable(struct softnet_data *sd)
{
- if (IS_ENABLED(CONFIG_RPS))
+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
spin_lock_irq(&sd->input_pkt_queue.lock);
- else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ else
local_irq_disable();
}
-static inline void rps_unlock_irq_restore(struct softnet_data *sd,
- unsigned long *flags)
+static inline void backlog_unlock_irq_restore(struct softnet_data *sd,
+ unsigned long *flags)
{
- if (IS_ENABLED(CONFIG_RPS))
+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
- else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ else
local_irq_restore(*flags);
}
-static inline void rps_unlock_irq_enable(struct softnet_data *sd)
+static inline void backlog_unlock_irq_enable(struct softnet_data *sd)
{
- if (IS_ENABLED(CONFIG_RPS))
+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
spin_unlock_irq(&sd->input_pkt_queue.lock);
- else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ else
local_irq_enable();
}
@@ -341,13 +351,22 @@ int netdev_name_node_alt_create(struct net_device *dev, const char *name)
return 0;
}
-static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
+static void netdev_name_node_alt_free(struct rcu_head *head)
{
- list_del(&name_node->list);
+ struct netdev_name_node *name_node =
+ container_of(head, struct netdev_name_node, rcu);
+
kfree(name_node->name);
netdev_name_node_free(name_node);
}
+static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
+{
+ netdev_name_node_del(name_node);
+ list_del(&name_node->list);
+ call_rcu(&name_node->rcu, netdev_name_node_alt_free);
+}
+
int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
{
struct netdev_name_node *name_node;
@@ -362,10 +381,7 @@ int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
if (name_node == dev->name_node || name_node->dev != dev)
return -EINVAL;
- netdev_name_node_del(name_node);
- synchronize_rcu();
__netdev_name_node_alt_destroy(name_node);
-
return 0;
}
@@ -373,8 +389,10 @@ static void netdev_name_node_alt_flush(struct net_device *dev)
{
struct netdev_name_node *name_node, *tmp;
- list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list)
- __netdev_name_node_alt_destroy(name_node);
+ list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list) {
+ list_del(&name_node->list);
+ netdev_name_node_alt_free(&name_node->rcu);
+ }
}
/* Device list insertion */
@@ -385,12 +403,10 @@ static void list_netdevice(struct net_device *dev)
ASSERT_RTNL();
- write_lock(&dev_base_lock);
list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
netdev_name_node_add(net, dev->name_node);
hlist_add_head_rcu(&dev->index_hlist,
dev_index_hash(net, dev->ifindex));
- write_unlock(&dev_base_lock);
netdev_for_each_altname(dev, name_node)
netdev_name_node_add(net, name_node);
@@ -404,7 +420,7 @@ static void list_netdevice(struct net_device *dev)
/* Device list removal
* caller must respect a RCU grace period before freeing/reusing dev
*/
-static void unlist_netdevice(struct net_device *dev, bool lock)
+static void unlist_netdevice(struct net_device *dev)
{
struct netdev_name_node *name_node;
struct net *net = dev_net(dev);
@@ -417,13 +433,9 @@ static void unlist_netdevice(struct net_device *dev, bool lock)
netdev_name_node_del(name_node);
/* Unlink dev from the device chain */
- if (lock)
- write_lock(&dev_base_lock);
list_del_rcu(&dev->dev_list);
netdev_name_node_del(dev->name_node);
hlist_del_rcu(&dev->index_hlist);
- if (lock)
- write_unlock(&dev_base_lock);
dev_base_seq_inc(dev_net(dev));
}
@@ -439,9 +451,17 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
* queue in the local softnet handler.
*/
-DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
+DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data) = {
+ .process_queue_bh_lock = INIT_LOCAL_LOCK(process_queue_bh_lock),
+};
EXPORT_PER_CPU_SYMBOL(softnet_data);
+/* Page_pool has a lockless array/stack to alloc/recycle pages.
+ * PP consumers must pay attention to run APIs in the appropriate context
+ * (e.g. NAPI context).
+ */
+DEFINE_PER_CPU(struct page_pool *, system_page_pool);
+
#ifdef CONFIG_LOCKDEP
/*
* register_netdevice() inits txq->_xmit_lock and sets lockdep class
@@ -551,7 +571,7 @@ static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
static inline struct list_head *ptype_head(const struct packet_type *pt)
{
if (pt->type == htons(ETH_P_ALL))
- return pt->dev ? &pt->dev->ptype_all : &ptype_all;
+ return pt->dev ? &pt->dev->ptype_all : &net_hotdata.ptype_all;
else
return pt->dev ? &pt->dev->ptype_specific :
&ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
@@ -653,7 +673,7 @@ int dev_get_iflink(const struct net_device *dev)
if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
return dev->netdev_ops->ndo_get_iflink(dev);
- return dev->ifindex;
+ return READ_ONCE(dev->ifindex);
}
EXPORT_SYMBOL(dev_get_iflink);
@@ -733,14 +753,88 @@ int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
}
EXPORT_SYMBOL_GPL(dev_fill_forward_path);
+/* must be called under rcu_read_lock(), as we dont take a reference */
+static struct napi_struct *napi_by_id(unsigned int napi_id)
+{
+ unsigned int hash = napi_id % HASH_SIZE(napi_hash);
+ struct napi_struct *napi;
+
+ hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
+ if (napi->napi_id == napi_id)
+ return napi;
+
+ return NULL;
+}
+
+/* must be called under rcu_read_lock(), as we dont take a reference */
+static struct napi_struct *
+netdev_napi_by_id(struct net *net, unsigned int napi_id)
+{
+ struct napi_struct *napi;
+
+ napi = napi_by_id(napi_id);
+ if (!napi)
+ return NULL;
+
+ if (WARN_ON_ONCE(!napi->dev))
+ return NULL;
+ if (!net_eq(net, dev_net(napi->dev)))
+ return NULL;
+
+ return napi;
+}
+
+/**
+ * netdev_napi_by_id_lock() - find a device by NAPI ID and lock it
+ * @net: the applicable net namespace
+ * @napi_id: ID of a NAPI of a target device
+ *
+ * Find a NAPI instance with @napi_id. Lock its device.
+ * The device must be in %NETREG_REGISTERED state for lookup to succeed.
+ * netdev_unlock() must be called to release it.
+ *
+ * Return: pointer to NAPI, its device with lock held, NULL if not found.
+ */
+struct napi_struct *
+netdev_napi_by_id_lock(struct net *net, unsigned int napi_id)
+{
+ struct napi_struct *napi;
+ struct net_device *dev;
+
+ rcu_read_lock();
+ napi = netdev_napi_by_id(net, napi_id);
+ if (!napi || READ_ONCE(napi->dev->reg_state) != NETREG_REGISTERED) {
+ rcu_read_unlock();
+ return NULL;
+ }
+
+ dev = napi->dev;
+ dev_hold(dev);
+ rcu_read_unlock();
+
+ dev = __netdev_put_lock(dev);
+ if (!dev)
+ return NULL;
+
+ rcu_read_lock();
+ napi = netdev_napi_by_id(net, napi_id);
+ if (napi && napi->dev != dev)
+ napi = NULL;
+ rcu_read_unlock();
+
+ if (!napi)
+ netdev_unlock(dev);
+ return napi;
+}
+
/**
* __dev_get_by_name - find a device by its name
* @net: the applicable net namespace
* @name: name to find
*
- * Find an interface by name. Must be called under RTNL semaphore
- * or @dev_base_lock. If the name is found a pointer to the device
- * is returned. If the name is not found then %NULL is returned. The
+ * Find an interface by name. Must be called under RTNL semaphore.
+ * If the name is found a pointer to the device is returned.
+ * If the name is not found then %NULL is returned. The
* reference counters are not incremented so the caller must be
* careful with locks.
*/
@@ -821,8 +915,7 @@ EXPORT_SYMBOL(netdev_get_by_name);
* Search for an interface by index. Returns %NULL if the device
* is not found or a pointer to the device. The device has not
* had its reference counter increased so the caller must be careful
- * about locking. The caller must hold either the RTNL semaphore
- * or @dev_base_lock.
+ * about locking. The caller must hold the RTNL semaphore.
*/
struct net_device *__dev_get_by_index(struct net *net, int ifindex)
@@ -908,7 +1001,6 @@ EXPORT_SYMBOL(netdev_get_by_index);
* its reference counter increased so the caller must be careful
* about locking. The caller must hold RCU lock.
*/
-
struct net_device *dev_get_by_napi_id(unsigned int napi_id)
{
struct napi_struct *napi;
@@ -922,7 +1014,85 @@ struct net_device *dev_get_by_napi_id(unsigned int napi_id)
return napi ? napi->dev : NULL;
}
-EXPORT_SYMBOL(dev_get_by_napi_id);
+
+/* Release the held reference on the net_device, and if the net_device
+ * is still registered try to lock the instance lock. If device is being
+ * unregistered NULL will be returned (but the reference has been released,
+ * either way!)
+ *
+ * This helper is intended for locking net_device after it has been looked up
+ * using a lockless lookup helper. Lock prevents the instance from going away.
+ */
+struct net_device *__netdev_put_lock(struct net_device *dev)
+{
+ netdev_lock(dev);
+ if (dev->reg_state > NETREG_REGISTERED) {
+ netdev_unlock(dev);
+ dev_put(dev);
+ return NULL;
+ }
+ dev_put(dev);
+ return dev;
+}
+
+/**
+ * netdev_get_by_index_lock() - find a device by its ifindex
+ * @net: the applicable net namespace
+ * @ifindex: index of device
+ *
+ * Search for an interface by index. If a valid device
+ * with @ifindex is found it will be returned with netdev->lock held.
+ * netdev_unlock() must be called to release it.
+ *
+ * Return: pointer to a device with lock held, NULL if not found.
+ */
+struct net_device *netdev_get_by_index_lock(struct net *net, int ifindex)
+{
+ struct net_device *dev;
+
+ dev = dev_get_by_index(net, ifindex);
+ if (!dev)
+ return NULL;
+
+ return __netdev_put_lock(dev);
+}
+
+struct net_device *
+netdev_xa_find_lock(struct net *net, struct net_device *dev,
+ unsigned long *index)
+{
+ if (dev)
+ netdev_unlock(dev);
+
+ do {
+ rcu_read_lock();
+ dev = xa_find(&net->dev_by_index, index, ULONG_MAX, XA_PRESENT);
+ if (!dev) {
+ rcu_read_unlock();
+ return NULL;
+ }
+ dev_hold(dev);
+ rcu_read_unlock();
+
+ dev = __netdev_put_lock(dev);
+ if (dev)
+ return dev;
+
+ (*index)++;
+ } while (true);
+}
+
+static DEFINE_SEQLOCK(netdev_rename_lock);
+
+void netdev_copy_name(struct net_device *dev, char *name)
+{
+ unsigned int seq;
+
+ do {
+ seq = read_seqbegin(&netdev_rename_lock);
+ strscpy(name, dev->name, IFNAMSIZ);
+ } while (read_seqretry(&netdev_rename_lock, seq));
+}
/**
* netdev_get_name - get a netdevice name, knowing its ifindex.
@@ -935,7 +1105,6 @@ int netdev_get_name(struct net *net, char *name, int ifindex)
struct net_device *dev;
int ret;
- down_read(&devnet_rename_sem);
rcu_read_lock();
dev = dev_get_by_index_rcu(net, ifindex);
@@ -944,15 +1113,20 @@ int netdev_get_name(struct net *net, char *name, int ifindex)
goto out;
}
- strcpy(name, dev->name);
+ netdev_copy_name(dev, name);
ret = 0;
out:
rcu_read_unlock();
- up_read(&devnet_rename_sem);
return ret;
}
+static bool dev_addr_cmp(struct net_device *dev, unsigned short type,
+ const char *ha)
+{
+ return dev->type == type && !memcmp(dev->dev_addr, ha, dev->addr_len);
+}
+
/**
* dev_getbyhwaddr_rcu - find a device by its hardware address
* @net: the applicable net namespace
@@ -961,7 +1135,7 @@ out:
*
* Search for an interface by MAC address. Returns NULL if the device
* is not found or a pointer to the device.
- * The caller must hold RCU or RTNL.
+ * The caller must hold RCU.
* The returned device has not had its ref count increased
* and the caller must therefore be careful about locking
*
@@ -973,14 +1147,39 @@ struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
struct net_device *dev;
for_each_netdev_rcu(net, dev)
- if (dev->type == type &&
- !memcmp(dev->dev_addr, ha, dev->addr_len))
+ if (dev_addr_cmp(dev, type, ha))
return dev;
return NULL;
}
EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
+/**
+ * dev_getbyhwaddr() - find a device by its hardware address
+ * @net: the applicable net namespace
+ * @type: media type of device
+ * @ha: hardware address
+ *
+ * Similar to dev_getbyhwaddr_rcu(), but the owner needs to hold
+ * rtnl_lock.
+ *
+ * Context: rtnl_lock() must be held.
+ * Return: pointer to the net_device, or NULL if not found
+ */
+struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type,
+ const char *ha)
+{
+ struct net_device *dev;
+
+ ASSERT_RTNL();
+ for_each_netdev(net, dev)
+ if (dev_addr_cmp(dev, type, ha))
+ return dev;
+
+ return NULL;
+}
+EXPORT_SYMBOL(dev_getbyhwaddr);
+
struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
{
struct net_device *dev, *ret = NULL;
@@ -1181,61 +1380,50 @@ static int dev_get_valid_name(struct net *net, struct net_device *dev,
*/
int dev_change_name(struct net_device *dev, const char *newname)
{
+ struct net *net = dev_net(dev);
unsigned char old_assign_type;
char oldname[IFNAMSIZ];
int err = 0;
int ret;
- struct net *net;
- ASSERT_RTNL();
- BUG_ON(!dev_net(dev));
+ ASSERT_RTNL_NET(net);
- net = dev_net(dev);
-
- down_write(&devnet_rename_sem);
-
- if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
- up_write(&devnet_rename_sem);
+ if (!strncmp(newname, dev->name, IFNAMSIZ))
return 0;
- }
memcpy(oldname, dev->name, IFNAMSIZ);
+ write_seqlock_bh(&netdev_rename_lock);
err = dev_get_valid_name(net, dev, newname);
- if (err < 0) {
- up_write(&devnet_rename_sem);
+ write_sequnlock_bh(&netdev_rename_lock);
+
+ if (err < 0)
return err;
- }
if (oldname[0] && !strchr(oldname, '%'))
netdev_info(dev, "renamed from %s%s\n", oldname,
dev->flags & IFF_UP ? " (while UP)" : "");
old_assign_type = dev->name_assign_type;
- dev->name_assign_type = NET_NAME_RENAMED;
+ WRITE_ONCE(dev->name_assign_type, NET_NAME_RENAMED);
rollback:
ret = device_rename(&dev->dev, dev->name);
if (ret) {
+ write_seqlock_bh(&netdev_rename_lock);
memcpy(dev->name, oldname, IFNAMSIZ);
- dev->name_assign_type = old_assign_type;
- up_write(&devnet_rename_sem);
+ write_sequnlock_bh(&netdev_rename_lock);
+ WRITE_ONCE(dev->name_assign_type, old_assign_type);
return ret;
}
- up_write(&devnet_rename_sem);
-
netdev_adjacent_rename_links(dev, oldname);
- write_lock(&dev_base_lock);
netdev_name_node_del(dev->name_node);
- write_unlock(&dev_base_lock);
- synchronize_rcu();
+ synchronize_net();
- write_lock(&dev_base_lock);
netdev_name_node_add(net, dev->name_node);
- write_unlock(&dev_base_lock);
ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
ret = notifier_to_errno(ret);
@@ -1244,10 +1432,11 @@ rollback:
/* err >= 0 after dev_alloc_name() or stores the first errno */
if (err >= 0) {
err = ret;
- down_write(&devnet_rename_sem);
+ write_seqlock_bh(&netdev_rename_lock);
memcpy(dev->name, oldname, IFNAMSIZ);
+ write_sequnlock_bh(&netdev_rename_lock);
memcpy(oldname, newname, IFNAMSIZ);
- dev->name_assign_type = old_assign_type;
+ WRITE_ONCE(dev->name_assign_type, old_assign_type);
old_assign_type = NET_NAME_RENAMED;
goto rollback;
} else {
@@ -1450,7 +1639,7 @@ static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
if (ret)
clear_bit(__LINK_STATE_START, &dev->state);
else {
- dev->flags |= IFF_UP;
+ netif_set_up(dev, true);
dev_set_rx_mode(dev);
dev_activate(dev);
add_device_randomness(dev->dev_addr, dev->addr_len);
@@ -1529,7 +1718,7 @@ static void __dev_close_many(struct list_head *head)
if (ops->ndo_stop)
ops->ndo_stop(dev);
- dev->flags &= ~IFF_UP;
+ netif_set_up(dev, false);
netpoll_poll_enable(dev);
}
}
@@ -1739,14 +1928,19 @@ int register_netdevice_notifier(struct notifier_block *nb)
/* Close race with setup_net() and cleanup_net() */
down_write(&pernet_ops_rwsem);
+
+ /* When RTNL is removed, we need protection for netdev_chain. */
rtnl_lock();
+
err = raw_notifier_chain_register(&netdev_chain, nb);
if (err)
goto unlock;
if (dev_boot_phase)
goto unlock;
for_each_net(net) {
+ __rtnl_net_lock(net);
err = call_netdevice_register_net_notifiers(nb, net);
+ __rtnl_net_unlock(net);
if (err)
goto rollback;
}
@@ -1757,8 +1951,11 @@ unlock:
return err;
rollback:
- for_each_net_continue_reverse(net)
+ for_each_net_continue_reverse(net) {
+ __rtnl_net_lock(net);
call_netdevice_unregister_net_notifiers(nb, net);
+ __rtnl_net_unlock(net);
+ }
raw_notifier_chain_unregister(&netdev_chain, nb);
goto unlock;
@@ -1791,8 +1988,11 @@ int unregister_netdevice_notifier(struct notifier_block *nb)
if (err)
goto unlock;
- for_each_net(net)
+ for_each_net(net) {
+ __rtnl_net_lock(net);
call_netdevice_unregister_net_notifiers(nb, net);
+ __rtnl_net_unlock(net);
+ }
unlock:
rtnl_unlock();
@@ -1856,9 +2056,10 @@ int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb)
{
int err;
- rtnl_lock();
+ rtnl_net_lock(net);
err = __register_netdevice_notifier_net(net, nb, false);
- rtnl_unlock();
+ rtnl_net_unlock(net);
+
return err;
}
EXPORT_SYMBOL(register_netdevice_notifier_net);
@@ -1884,9 +2085,10 @@ int unregister_netdevice_notifier_net(struct net *net,
{
int err;
- rtnl_lock();
+ rtnl_net_lock(net);
err = __unregister_netdevice_notifier_net(net, nb);
- rtnl_unlock();
+ rtnl_net_unlock(net);
+
return err;
}
EXPORT_SYMBOL(unregister_netdevice_notifier_net);
@@ -1899,19 +2101,56 @@ static void __move_netdevice_notifier_net(struct net *src_net,
__register_netdevice_notifier_net(dst_net, nb, true);
}
+static void rtnl_net_dev_lock(struct net_device *dev)
+{
+ bool again;
+
+ do {
+ struct net *net;
+
+ again = false;
+
+ /* netns might be being dismantled. */
+ rcu_read_lock();
+ net = dev_net_rcu(dev);
+ net_passive_inc(net);
+ rcu_read_unlock();
+
+ rtnl_net_lock(net);
+
+#ifdef CONFIG_NET_NS
+ /* dev might have been moved to another netns. */
+ if (!net_eq(net, rcu_access_pointer(dev->nd_net.net))) {
+ rtnl_net_unlock(net);
+ net_passive_dec(net);
+ again = true;
+ }
+#endif
+ } while (again);
+}
+
+static void rtnl_net_dev_unlock(struct net_device *dev)
+{
+ struct net *net = dev_net(dev);
+
+ rtnl_net_unlock(net);
+ net_passive_dec(net);
+}
+
int register_netdevice_notifier_dev_net(struct net_device *dev,
struct notifier_block *nb,
struct netdev_net_notifier *nn)
{
int err;
- rtnl_lock();
+ rtnl_net_dev_lock(dev);
err = __register_netdevice_notifier_net(dev_net(dev), nb, false);
if (!err) {
nn->nb = nb;
list_add(&nn->list, &dev->net_notifier_list);
}
- rtnl_unlock();
+ rtnl_net_dev_unlock(dev);
+
return err;
}
EXPORT_SYMBOL(register_netdevice_notifier_dev_net);
@@ -1922,10 +2161,11 @@ int unregister_netdevice_notifier_dev_net(struct net_device *dev,
{
int err;
- rtnl_lock();
+ rtnl_net_dev_lock(dev);
list_del(&nn->list);
err = __unregister_netdevice_notifier_net(dev_net(dev), nb);
- rtnl_unlock();
+ rtnl_net_dev_unlock(dev);
+
return err;
}
EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net);
@@ -2073,6 +2313,11 @@ void net_dec_egress_queue(void)
EXPORT_SYMBOL_GPL(net_dec_egress_queue);
#endif
+#ifdef CONFIG_NET_CLS_ACT
+DEFINE_STATIC_KEY_FALSE(tcf_sw_enabled_key);
+EXPORT_SYMBOL(tcf_sw_enabled_key);
+#endif
+
DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
EXPORT_SYMBOL(netstamp_needed_key);
#ifdef CONFIG_JUMP_LABEL
@@ -2129,7 +2374,7 @@ EXPORT_SYMBOL(net_disable_timestamp);
static inline void net_timestamp_set(struct sk_buff *skb)
{
skb->tstamp = 0;
- skb->mono_delivery_time = 0;
+ skb->tstamp_type = SKB_CLOCK_REALTIME;
if (static_branch_unlikely(&netstamp_needed_key))
skb->tstamp = ktime_get_real();
}
@@ -2242,7 +2487,8 @@ static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
*/
bool dev_nit_active(struct net_device *dev)
{
- return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all);
+ return !list_empty(&net_hotdata.ptype_all) ||
+ !list_empty(&dev->ptype_all);
}
EXPORT_SYMBOL_GPL(dev_nit_active);
@@ -2253,15 +2499,14 @@ EXPORT_SYMBOL_GPL(dev_nit_active);
void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
{
- struct packet_type *ptype;
+ struct list_head *ptype_list = &net_hotdata.ptype_all;
+ struct packet_type *ptype, *pt_prev = NULL;
struct sk_buff *skb2 = NULL;
- struct packet_type *pt_prev = NULL;
- struct list_head *ptype_list = &ptype_all;
rcu_read_lock();
again:
list_for_each_entry_rcu(ptype, ptype_list, list) {
- if (ptype->ignore_outgoing)
+ if (READ_ONCE(ptype->ignore_outgoing))
continue;
/* Never send packets back to the socket
@@ -2302,7 +2547,7 @@ again:
pt_prev = ptype;
}
- if (ptype_list == &ptype_all) {
+ if (ptype_list == &net_hotdata.ptype_all) {
ptype_list = &dev->ptype_all;
goto again;
}
@@ -2914,6 +3159,8 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
if (dev->num_tc)
netif_setup_tc(dev, txq);
+ net_shaper_set_real_num_tx_queues(dev, txq);
+
dev_qdisc_change_real_num_tx(dev, txq);
dev->real_num_tx_queues = txq;
@@ -3196,7 +3443,7 @@ void netif_device_attach(struct net_device *dev)
if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
netif_running(dev)) {
netif_tx_wake_all_queues(dev);
- __netdev_watchdog_up(dev);
+ netdev_watchdog_up(dev);
}
}
EXPORT_SYMBOL(netif_device_attach);
@@ -3277,6 +3524,10 @@ int skb_checksum_help(struct sk_buff *skb)
return -EINVAL;
}
+ if (!skb_frags_readable(skb)) {
+ return -EFAULT;
+ }
+
/* Before computing a checksum, we should make sure no frag could
* be modified by an external entity : checksum could be wrong.
*/
@@ -3353,6 +3604,7 @@ int skb_crc32c_csum_help(struct sk_buff *skb)
out:
return ret;
}
+EXPORT_SYMBOL(skb_crc32c_csum_help);
__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
{
@@ -3398,8 +3650,9 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
if (!(dev->features & NETIF_F_HIGHDMA)) {
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ struct page *page = skb_frag_page(frag);
- if (PageHighMem(skb_frag_page(frag)))
+ if (page && PageHighMem(page))
return 1;
}
}
@@ -3471,7 +3724,7 @@ static netdev_features_t gso_features_check(const struct sk_buff *skb,
if (gso_segs > READ_ONCE(dev->gso_max_segs))
return features & ~NETIF_F_GSO_MASK;
- if (unlikely(skb->len >= READ_ONCE(dev->gso_max_size)))
+ if (unlikely(skb->len >= netif_get_gso_max_size(dev, skb)))
return features & ~NETIF_F_GSO_MASK;
if (!skb_shinfo(skb)->gso_type) {
@@ -3598,6 +3851,11 @@ int skb_csum_hwoffload_help(struct sk_buff *skb,
return 0;
if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
+ if (vlan_get_protocol(skb) == htons(ETH_P_IPV6) &&
+ skb_network_header_len(skb) != sizeof(struct ipv6hdr) &&
+ !ipv6_has_hopopt_jumbo(skb))
+ goto sw_checksum;
+
switch (skb->csum_offset) {
case offsetof(struct tcphdr, check):
case offsetof(struct udphdr, check):
@@ -3605,6 +3863,7 @@ int skb_csum_hwoffload_help(struct sk_buff *skb,
}
}
+sw_checksum:
return skb_checksum_help(skb);
}
EXPORT_SYMBOL(skb_csum_hwoffload_help);
@@ -3613,6 +3872,9 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
{
netdev_features_t features;
+ if (!skb_frags_readable(skb))
+ goto out_kfree_skb;
+
features = netif_skb_features(skb);
skb = validate_xmit_vlan(skb, features);
if (unlikely(!skb))
@@ -3672,7 +3934,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
next = skb->next;
skb_mark_not_on_list(skb);
- /* in case skb wont be segmented, point to itself */
+ /* in case skb won't be segmented, point to itself */
skb->prev = skb;
skb = validate_xmit_skb(skb, dev, again);
@@ -3717,7 +3979,7 @@ static void qdisc_pkt_len_init(struct sk_buff *skb)
sizeof(_tcphdr), &_tcphdr);
if (likely(th))
hdr_len += __tcp_hdrlen(th);
- } else {
+ } else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
struct udphdr _udphdr;
if (skb_header_pointer(skb, hdr_len,
@@ -3725,10 +3987,14 @@ static void qdisc_pkt_len_init(struct sk_buff *skb)
hdr_len += sizeof(struct udphdr);
}
- if (shinfo->gso_type & SKB_GSO_DODGY)
- gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
- shinfo->gso_size);
+ if (unlikely(shinfo->gso_type & SKB_GSO_DODGY)) {
+ int payload = skb->len - hdr_len;
+ /* Malicious packet. */
+ if (payload <= 0)
+ return;
+ gso_segs = DIV_ROUND_UP(payload, shinfo->gso_size);
+ }
qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
}
}
@@ -3791,6 +4057,10 @@ no_lock_out:
return rc;
}
+ if (unlikely(READ_ONCE(q->owner) == smp_processor_id())) {
+ kfree_skb_reason(skb, SKB_DROP_REASON_TC_RECLASSIFY_LOOP);
+ return NET_XMIT_DROP;
+ }
/*
* Heuristic to force contended enqueues to serialize on a
* separate lock before trying to get qdisc main lock.
@@ -3830,7 +4100,9 @@ no_lock_out:
qdisc_run_end(q);
rc = NET_XMIT_SUCCESS;
} else {
+ WRITE_ONCE(q->owner, smp_processor_id());
rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
+ WRITE_ONCE(q->owner, -1);
if (qdisc_run_begin(q)) {
if (unlikely(contended)) {
spin_unlock(&q->busylock);
@@ -3903,6 +4175,7 @@ netdev_tx_queue_mapping(struct net_device *dev, struct sk_buff *skb)
return netdev_get_tx_queue(dev, netdev_cap_txqueue(dev, qm));
}
+#ifndef CONFIG_PREEMPT_RT
static bool netdev_xmit_txqueue_skipped(void)
{
return __this_cpu_read(softnet_data.xmit.skip_txqueue);
@@ -3913,6 +4186,19 @@ void netdev_xmit_skip_txqueue(bool skip)
__this_cpu_write(softnet_data.xmit.skip_txqueue, skip);
}
EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue);
+
+#else
+static bool netdev_xmit_txqueue_skipped(void)
+{
+ return current->net_xmit.skip_txqueue;
+}
+
+void netdev_xmit_skip_txqueue(bool skip)
+{
+ current->net_xmit.skip_txqueue = skip;
+}
+EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue);
+#endif
#endif /* CONFIG_NET_EGRESS */
#ifdef CONFIG_NET_XGRESS
@@ -3927,6 +4213,14 @@ static int tc_run(struct tcx_entry *entry, struct sk_buff *skb,
if (!miniq)
return ret;
+ /* Global bypass */
+ if (!static_branch_likely(&tcf_sw_enabled_key))
+ return ret;
+
+ /* Block-wise bypass */
+ if (tcf_block_bypass_sw(miniq->block))
+ return ret;
+
tc_skb_cb(skb)->mru = 0;
tc_skb_cb(skb)->post_ct = false;
tcf_set_drop_reason(skb, *drop_reason);
@@ -3987,10 +4281,13 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
{
struct bpf_mprog_entry *entry = rcu_dereference_bh(skb->dev->tcx_ingress);
enum skb_drop_reason drop_reason = SKB_DROP_REASON_TC_INGRESS;
+ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
int sch_ret;
if (!entry)
return skb;
+
+ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
if (*pt_prev) {
*ret = deliver_skb(skb, *pt_prev, orig_dev);
*pt_prev = NULL;
@@ -4019,10 +4316,12 @@ ingress_verdict:
break;
}
*ret = NET_RX_SUCCESS;
+ bpf_net_ctx_clear(bpf_net_ctx);
return NULL;
case TC_ACT_SHOT:
kfree_skb_reason(skb, drop_reason);
*ret = NET_RX_DROP;
+ bpf_net_ctx_clear(bpf_net_ctx);
return NULL;
/* used by tc_run */
case TC_ACT_STOLEN:
@@ -4032,8 +4331,10 @@ ingress_verdict:
fallthrough;
case TC_ACT_CONSUMED:
*ret = NET_RX_SUCCESS;
+ bpf_net_ctx_clear(bpf_net_ctx);
return NULL;
}
+ bpf_net_ctx_clear(bpf_net_ctx);
return skb;
}
@@ -4043,11 +4344,14 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
{
struct bpf_mprog_entry *entry = rcu_dereference_bh(dev->tcx_egress);
enum skb_drop_reason drop_reason = SKB_DROP_REASON_TC_EGRESS;
+ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
int sch_ret;
if (!entry)
return skb;
+ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
+
/* qdisc_skb_cb(skb)->pkt_len & tcx_set_ingress() was
* already set by the caller.
*/
@@ -4063,10 +4367,12 @@ egress_verdict:
/* No need to push/pop skb's mac_header here on egress! */
skb_do_redirect(skb);
*ret = NET_XMIT_SUCCESS;
+ bpf_net_ctx_clear(bpf_net_ctx);
return NULL;
case TC_ACT_SHOT:
kfree_skb_reason(skb, drop_reason);
*ret = NET_XMIT_DROP;
+ bpf_net_ctx_clear(bpf_net_ctx);
return NULL;
/* used by tc_run */
case TC_ACT_STOLEN:
@@ -4076,8 +4382,10 @@ egress_verdict:
fallthrough;
case TC_ACT_CONSUMED:
*ret = NET_XMIT_SUCCESS;
+ bpf_net_ctx_clear(bpf_net_ctx);
return NULL;
}
+ bpf_net_ctx_clear(bpf_net_ctx);
return skb;
}
@@ -4173,13 +4481,6 @@ u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
}
EXPORT_SYMBOL(dev_pick_tx_zero);
-u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev)
-{
- return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
-}
-EXPORT_SYMBOL(dev_pick_tx_cpu_id);
-
u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
@@ -4420,20 +4721,11 @@ EXPORT_SYMBOL(__dev_direct_xmit);
/*************************************************************************
* Receiver routines
*************************************************************************/
+static DEFINE_PER_CPU(struct task_struct *, backlog_napi);
-int netdev_max_backlog __read_mostly = 1000;
-EXPORT_SYMBOL(netdev_max_backlog);
-
-int netdev_tstamp_prequeue __read_mostly = 1;
-unsigned int sysctl_skb_defer_max __read_mostly = 64;
-int netdev_budget __read_mostly = 300;
-/* Must be at least 2 jiffes to guarantee 1 jiffy timeout */
-unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ;
int weight_p __read_mostly = 64; /* old backlog weight */
int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
-int dev_rx_weight __read_mostly = 64;
-int dev_tx_weight __read_mostly = 64;
/* Called with irq disabled */
static inline void ____napi_schedule(struct softnet_data *sd,
@@ -4452,35 +4744,27 @@ static inline void ____napi_schedule(struct softnet_data *sd,
*/
thread = READ_ONCE(napi->thread);
if (thread) {
- /* Avoid doing set_bit() if the thread is in
- * INTERRUPTIBLE state, cause napi_thread_wait()
- * makes sure to proceed with napi polling
- * if the thread is explicitly woken from here.
- */
- if (READ_ONCE(thread->__state) != TASK_INTERRUPTIBLE)
- set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
+ if (use_backlog_threads() && thread == raw_cpu_read(backlog_napi))
+ goto use_local_napi;
+
+ set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
wake_up_process(thread);
return;
}
}
+use_local_napi:
list_add_tail(&napi->poll_list, &sd->poll_list);
WRITE_ONCE(napi->list_owner, smp_processor_id());
/* If not called from net_rx_action()
* we have to raise NET_RX_SOFTIRQ.
*/
if (!sd->in_net_rx_action)
- __raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ raise_softirq_irqoff(NET_RX_SOFTIRQ);
}
#ifdef CONFIG_RPS
-/* One global table that all flow-based protocols share. */
-struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
-EXPORT_SYMBOL(rps_sock_flow_table);
-u32 rps_cpu_mask __read_mostly;
-EXPORT_SYMBOL(rps_cpu_mask);
-
struct static_key_false rps_needed __read_mostly;
EXPORT_SYMBOL(rps_needed);
struct static_key_false rfs_needed __read_mostly;
@@ -4491,12 +4775,13 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct rps_dev_flow *rflow, u16 next_cpu)
{
if (next_cpu < nr_cpu_ids) {
+ u32 head;
#ifdef CONFIG_RFS_ACCEL
struct netdev_rx_queue *rxqueue;
struct rps_dev_flow_table *flow_table;
struct rps_dev_flow *old_rflow;
- u32 flow_id;
u16 rxq_index;
+ u32 flow_id;
int rc;
/* Should we steer this flow to a different hardware queue? */
@@ -4518,16 +4803,16 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
goto out;
old_rflow = rflow;
rflow = &flow_table->flows[flow_id];
- rflow->filter = rc;
- if (old_rflow->filter == rflow->filter)
- old_rflow->filter = RPS_NO_FILTER;
+ WRITE_ONCE(rflow->filter, rc);
+ if (old_rflow->filter == rc)
+ WRITE_ONCE(old_rflow->filter, RPS_NO_FILTER);
out:
#endif
- rflow->last_qtail =
- per_cpu(softnet_data, next_cpu).input_queue_head;
+ head = READ_ONCE(per_cpu(softnet_data, next_cpu).input_queue_head);
+ rps_input_queue_tail_save(&rflow->last_qtail, head);
}
- rflow->cpu = next_cpu;
+ WRITE_ONCE(rflow->cpu, next_cpu);
return rflow;
}
@@ -4572,7 +4857,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
if (!hash)
goto done;
- sock_flow_table = rcu_dereference(rps_sock_flow_table);
+ sock_flow_table = rcu_dereference(net_hotdata.rps_sock_flow_table);
if (flow_table && sock_flow_table) {
struct rps_dev_flow *rflow;
u32 next_cpu;
@@ -4582,10 +4867,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
* This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow().
*/
ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]);
- if ((ident ^ hash) & ~rps_cpu_mask)
+ if ((ident ^ hash) & ~net_hotdata.rps_cpu_mask)
goto try_rps;
- next_cpu = ident & rps_cpu_mask;
+ next_cpu = ident & net_hotdata.rps_cpu_mask;
/* OK, now we know there is a match,
* we can look at the local (per receive queue) flow table
@@ -4606,7 +4891,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
*/
if (unlikely(tcpu != next_cpu) &&
(tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
- ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
+ ((int)(READ_ONCE(per_cpu(softnet_data, tcpu).input_queue_head) -
rflow->last_qtail)) >= 0)) {
tcpu = next_cpu;
rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
@@ -4660,9 +4945,9 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
if (flow_table && flow_id <= flow_table->mask) {
rflow = &flow_table->flows[flow_id];
cpu = READ_ONCE(rflow->cpu);
- if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
- ((int)(per_cpu(softnet_data, cpu).input_queue_head -
- rflow->last_qtail) <
+ if (READ_ONCE(rflow->filter) == filter_id && cpu < nr_cpu_ids &&
+ ((int)(READ_ONCE(per_cpu(softnet_data, cpu).input_queue_head) -
+ READ_ONCE(rflow->last_qtail)) <
(int)(10 * flow_table->mask)))
expire = false;
}
@@ -4709,6 +4994,11 @@ static void napi_schedule_rps(struct softnet_data *sd)
#ifdef CONFIG_RPS
if (sd != mysd) {
+ if (use_backlog_threads()) {
+ __napi_schedule_irqoff(&sd->backlog);
+ return;
+ }
+
sd->rps_ipi_next = mysd->rps_ipi_list;
mysd->rps_ipi_list = sd;
@@ -4723,6 +5013,23 @@ static void napi_schedule_rps(struct softnet_data *sd)
__napi_schedule_irqoff(&mysd->backlog);
}
+void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu)
+{
+ unsigned long flags;
+
+ if (use_backlog_threads()) {
+ backlog_lock_irq_save(sd, &flags);
+
+ if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
+ __napi_schedule_irqoff(&sd->backlog);
+
+ backlog_unlock_irq_restore(sd, &flags);
+
+ } else if (!cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) {
+ smp_call_function_single_async(cpu, &sd->defer_csd);
+ }
+}
+
#ifdef CONFIG_NET_FLOW_LIMIT
int netdev_flow_limit_table_len __read_mostly = (1 << 12);
#endif
@@ -4734,7 +5041,7 @@ static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
struct softnet_data *sd;
unsigned int old_flow, new_flow;
- if (qlen < (READ_ONCE(netdev_max_backlog) >> 1))
+ if (qlen < (READ_ONCE(net_hotdata.max_backlog) >> 1))
return false;
sd = this_cpu_ptr(&softnet_data);
@@ -4774,36 +5081,45 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
struct softnet_data *sd;
unsigned long flags;
unsigned int qlen;
+ int max_backlog;
+ u32 tail;
+
+ reason = SKB_DROP_REASON_DEV_READY;
+ if (!netif_running(skb->dev))
+ goto bad_dev;
- reason = SKB_DROP_REASON_NOT_SPECIFIED;
+ reason = SKB_DROP_REASON_CPU_BACKLOG;
sd = &per_cpu(softnet_data, cpu);
- rps_lock_irqsave(sd, &flags);
- if (!netif_running(skb->dev))
- goto drop;
+ qlen = skb_queue_len_lockless(&sd->input_pkt_queue);
+ max_backlog = READ_ONCE(net_hotdata.max_backlog);
+ if (unlikely(qlen > max_backlog))
+ goto cpu_backlog_drop;
+ backlog_lock_irq_save(sd, &flags);
qlen = skb_queue_len(&sd->input_pkt_queue);
- if (qlen <= READ_ONCE(netdev_max_backlog) && !skb_flow_limit(skb, qlen)) {
- if (qlen) {
-enqueue:
- __skb_queue_tail(&sd->input_pkt_queue, skb);
- input_queue_tail_incr_save(sd, qtail);
- rps_unlock_irq_restore(sd, &flags);
- return NET_RX_SUCCESS;
+ if (qlen <= max_backlog && !skb_flow_limit(skb, qlen)) {
+ if (!qlen) {
+ /* Schedule NAPI for backlog device. We can use
+ * non atomic operation as we own the queue lock.
+ */
+ if (!__test_and_set_bit(NAPI_STATE_SCHED,
+ &sd->backlog.state))
+ napi_schedule_rps(sd);
}
+ __skb_queue_tail(&sd->input_pkt_queue, skb);
+ tail = rps_input_queue_tail_incr(sd);
+ backlog_unlock_irq_restore(sd, &flags);
- /* Schedule NAPI for backlog device
- * We can use non atomic operation since we own the queue lock
- */
- if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
- napi_schedule_rps(sd);
- goto enqueue;
+ /* save the tail outside of the critical section */
+ rps_input_queue_tail_save(qtail, tail);
+ return NET_RX_SUCCESS;
}
- reason = SKB_DROP_REASON_CPU_BACKLOG;
-drop:
- sd->dropped++;
- rps_unlock_irq_restore(sd, &flags);
+ backlog_unlock_irq_restore(sd, &flags);
+cpu_backlog_drop:
+ atomic_inc(&sd->dropped);
+bad_dev:
dev_core_stats_rx_dropped_inc(skb->dev);
kfree_skb_reason(skb, reason);
return NET_RX_DROP;
@@ -4833,7 +5149,7 @@ static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
}
u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog)
+ const struct bpf_prog *xdp_prog)
{
void *orig_data, *orig_data_end, *hard_start;
struct netdev_rx_queue *rxqueue;
@@ -4858,6 +5174,12 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
skb_headlen(skb) + mac_len, true);
+ if (skb_is_nonlinear(skb)) {
+ skb_shinfo(skb)->xdp_frags_size = skb->data_len;
+ xdp_buff_set_frags_flag(xdp);
+ } else {
+ xdp_buff_clear_frags_flag(xdp);
+ }
orig_data_end = xdp->data_end;
orig_data = xdp->data;
@@ -4887,6 +5209,14 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
skb->len += off; /* positive on grow, negative on shrink */
}
+ /* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
+ * (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
+ */
+ if (xdp_buff_has_frags(xdp))
+ skb->data_len = skb_shinfo(skb)->xdp_frags_size;
+ else
+ skb->data_len = 0;
+
/* check if XDP changed eth hdr such SKB needs update */
eth = (struct ethhdr *)xdp->data;
if ((orig_eth_type != eth->h_proto) ||
@@ -4920,11 +5250,35 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
return act;
}
-static u32 netif_receive_generic_xdp(struct sk_buff *skb,
+static int
+netif_skb_check_for_xdp(struct sk_buff **pskb, const struct bpf_prog *prog)
+{
+ struct sk_buff *skb = *pskb;
+ int err, hroom, troom;
+
+ if (!skb_cow_data_for_xdp(this_cpu_read(system_page_pool), pskb, prog))
+ return 0;
+
+ /* In case we have to go down the path and also linearize,
+ * then lets do the pskb_expand_head() work just once here.
+ */
+ hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
+ troom = skb->tail + skb->data_len - skb->end;
+ err = pskb_expand_head(skb,
+ hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
+ troom > 0 ? troom + 128 : 0, GFP_ATOMIC);
+ if (err)
+ return err;
+
+ return skb_linearize(skb);
+}
+
+static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog)
+ const struct bpf_prog *xdp_prog)
{
- u32 act = XDP_DROP;
+ struct sk_buff *skb = *pskb;
+ u32 mac_len, act = XDP_DROP;
/* Reinjected packets coming from act_mirred or similar should
* not get XDP generic processing.
@@ -4932,41 +5286,36 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
if (skb_is_redirected(skb))
return XDP_PASS;
- /* XDP packets must be linear and must have sufficient headroom
- * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
- * native XDP provides, thus we need to do it here as well.
+ /* XDP packets must have sufficient headroom of XDP_PACKET_HEADROOM
+ * bytes. This is the guarantee that also native XDP provides,
+ * thus we need to do it here as well.
*/
+ mac_len = skb->data - skb_mac_header(skb);
+ __skb_push(skb, mac_len);
+
if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
skb_headroom(skb) < XDP_PACKET_HEADROOM) {
- int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
- int troom = skb->tail + skb->data_len - skb->end;
-
- /* In case we have to go down the path and also linearize,
- * then lets do the pskb_expand_head() work just once here.
- */
- if (pskb_expand_head(skb,
- hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
- troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
- goto do_drop;
- if (skb_linearize(skb))
+ if (netif_skb_check_for_xdp(pskb, xdp_prog))
goto do_drop;
}
- act = bpf_prog_run_generic_xdp(skb, xdp, xdp_prog);
+ __skb_pull(*pskb, mac_len);
+
+ act = bpf_prog_run_generic_xdp(*pskb, xdp, xdp_prog);
switch (act) {
case XDP_REDIRECT:
case XDP_TX:
case XDP_PASS:
break;
default:
- bpf_warn_invalid_xdp_action(skb->dev, xdp_prog, act);
+ bpf_warn_invalid_xdp_action((*pskb)->dev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
- trace_xdp_exception(skb->dev, xdp_prog, act);
+ trace_xdp_exception((*pskb)->dev, xdp_prog, act);
fallthrough;
case XDP_DROP:
do_drop:
- kfree_skb(skb);
+ kfree_skb(*pskb);
break;
}
@@ -4979,7 +5328,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
* and DDOS attacks will be more effective. In-driver-XDP use dedicated TX
* queues, so they do not have this starvation issue.
*/
-void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
+void generic_xdp_tx(struct sk_buff *skb, const struct bpf_prog *xdp_prog)
{
struct net_device *dev = skb->dev;
struct netdev_queue *txq;
@@ -5004,32 +5353,38 @@ void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
-int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
+int do_xdp_generic(const struct bpf_prog *xdp_prog, struct sk_buff **pskb)
{
+ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
+
if (xdp_prog) {
struct xdp_buff xdp;
u32 act;
int err;
- act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
+ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
+ act = netif_receive_generic_xdp(pskb, &xdp, xdp_prog);
if (act != XDP_PASS) {
switch (act) {
case XDP_REDIRECT:
- err = xdp_do_generic_redirect(skb->dev, skb,
+ err = xdp_do_generic_redirect((*pskb)->dev, *pskb,
&xdp, xdp_prog);
if (err)
goto out_redir;
break;
case XDP_TX:
- generic_xdp_tx(skb, xdp_prog);
+ generic_xdp_tx(*pskb, xdp_prog);
break;
}
+ bpf_net_ctx_clear(bpf_net_ctx);
return XDP_DROP;
}
+ bpf_net_ctx_clear(bpf_net_ctx);
}
return XDP_PASS;
out_redir:
- kfree_skb_reason(skb, SKB_DROP_REASON_XDP);
+ bpf_net_ctx_clear(bpf_net_ctx);
+ kfree_skb_reason(*pskb, SKB_DROP_REASON_XDP);
return XDP_DROP;
}
EXPORT_SYMBOL_GPL(do_xdp_generic);
@@ -5038,7 +5393,7 @@ static int netif_rx_internal(struct sk_buff *skb)
{
int ret;
- net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
+ net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), skb);
trace_netif_rx(skb);
@@ -5122,7 +5477,7 @@ int netif_rx(struct sk_buff *skb)
}
EXPORT_SYMBOL(netif_rx);
-static __latent_entropy void net_tx_action(struct softirq_action *h)
+static __latent_entropy void net_tx_action(void)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
@@ -5144,7 +5499,7 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
trace_consume_skb(skb, net_tx_action);
else
trace_kfree_skb(skb, net_tx_action,
- get_kfree_skb_cb(skb)->reason);
+ get_kfree_skb_cb(skb)->reason, NULL);
if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
__kfree_skb(skb);
@@ -5330,15 +5685,21 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
int ret = NET_RX_DROP;
__be16 type;
- net_timestamp_check(!READ_ONCE(netdev_tstamp_prequeue), skb);
+ net_timestamp_check(!READ_ONCE(net_hotdata.tstamp_prequeue), skb);
trace_netif_receive_skb(skb);
orig_dev = skb->dev;
skb_reset_network_header(skb);
+#if !defined(CONFIG_DEBUG_NET)
+ /* We plan to no longer reset the transport header here.
+ * Give some time to fuzzers and dev build to catch bugs
+ * in network stacks.
+ */
if (!skb_transport_header_was_set(skb))
skb_reset_transport_header(skb);
+#endif
skb_reset_mac_len(skb);
pt_prev = NULL;
@@ -5352,7 +5713,8 @@ another_round:
int ret2;
migrate_disable();
- ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
+ ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog),
+ &skb);
migrate_enable();
if (ret2 != XDP_PASS) {
@@ -5373,7 +5735,7 @@ another_round:
if (pfmemalloc)
goto skip_taps;
- list_for_each_entry_rcu(ptype, &ptype_all, list) {
+ list_for_each_entry_rcu(ptype, &net_hotdata.ptype_all, list) {
if (pt_prev)
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = ptype;
@@ -5598,10 +5960,9 @@ static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemallo
struct packet_type *pt_curr = NULL;
/* Current (common) orig_dev of sublist */
struct net_device *od_curr = NULL;
- struct list_head sublist;
struct sk_buff *skb, *next;
+ LIST_HEAD(sublist);
- INIT_LIST_HEAD(&sublist);
list_for_each_entry_safe(skb, next, head, list) {
struct net_device *orig_dev = skb->dev;
struct packet_type *pt_prev = NULL;
@@ -5713,7 +6074,7 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
{
int ret;
- net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
+ net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), skb);
if (skb_defer_rx_timestamp(skb))
return NET_RX_SUCCESS;
@@ -5739,11 +6100,11 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
void netif_receive_skb_list_internal(struct list_head *head)
{
struct sk_buff *skb, *next;
- struct list_head sublist;
+ LIST_HEAD(sublist);
- INIT_LIST_HEAD(&sublist);
list_for_each_entry_safe(skb, next, head, list) {
- net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
+ net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue),
+ skb);
skb_list_del_init(skb);
if (!skb_defer_rx_timestamp(skb))
list_add_tail(&skb->list, &sublist);
@@ -5822,8 +6183,6 @@ void netif_receive_skb_list(struct list_head *head)
}
EXPORT_SYMBOL(netif_receive_skb_list);
-static DEFINE_PER_CPU(struct work_struct, flush_works);
-
/* Network device is going away, flush any packets still pending */
static void flush_backlog(struct work_struct *work)
{
@@ -5833,23 +6192,25 @@ static void flush_backlog(struct work_struct *work)
local_bh_disable();
sd = this_cpu_ptr(&softnet_data);
- rps_lock_irq_disable(sd);
+ backlog_lock_irq_disable(sd);
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->input_pkt_queue);
dev_kfree_skb_irq(skb);
- input_queue_head_incr(sd);
+ rps_input_queue_head_incr(sd);
}
}
- rps_unlock_irq_enable(sd);
+ backlog_unlock_irq_enable(sd);
+ local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->process_queue);
kfree_skb(skb);
- input_queue_head_incr(sd);
+ rps_input_queue_head_incr(sd);
}
}
+ local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
local_bh_enable();
}
@@ -5859,14 +6220,14 @@ static bool flush_required(int cpu)
struct softnet_data *sd = &per_cpu(softnet_data, cpu);
bool do_flush;
- rps_lock_irq_disable(sd);
+ backlog_lock_irq_disable(sd);
/* as insertion into process_queue happens with the rps lock held,
* process_queue access may race only with dequeue
*/
do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
!skb_queue_empty_lockless(&sd->process_queue);
- rps_unlock_irq_enable(sd);
+ backlog_unlock_irq_enable(sd);
return do_flush;
#endif
@@ -5878,36 +6239,54 @@ static bool flush_required(int cpu)
return true;
}
+struct flush_backlogs {
+ cpumask_t flush_cpus;
+ struct work_struct w[];
+};
+
+static struct flush_backlogs *flush_backlogs_alloc(void)
+{
+ return kmalloc(struct_size_t(struct flush_backlogs, w, nr_cpu_ids),
+ GFP_KERNEL);
+}
+
+static struct flush_backlogs *flush_backlogs_fallback;
+static DEFINE_MUTEX(flush_backlogs_mutex);
+
static void flush_all_backlogs(void)
{
- static cpumask_t flush_cpus;
+ struct flush_backlogs *ptr = flush_backlogs_alloc();
unsigned int cpu;
- /* since we are under rtnl lock protection we can use static data
- * for the cpumask and avoid allocating on stack the possibly
- * large mask
- */
- ASSERT_RTNL();
+ if (!ptr) {
+ mutex_lock(&flush_backlogs_mutex);
+ ptr = flush_backlogs_fallback;
+ }
+ cpumask_clear(&ptr->flush_cpus);
cpus_read_lock();
- cpumask_clear(&flush_cpus);
for_each_online_cpu(cpu) {
if (flush_required(cpu)) {
- queue_work_on(cpu, system_highpri_wq,
- per_cpu_ptr(&flush_works, cpu));
- cpumask_set_cpu(cpu, &flush_cpus);
+ INIT_WORK(&ptr->w[cpu], flush_backlog);
+ queue_work_on(cpu, system_highpri_wq, &ptr->w[cpu]);
+ __cpumask_set_cpu(cpu, &ptr->flush_cpus);
}
}
/* we can have in flight packet[s] on the cpus we are not flushing,
* synchronize_net() in unregister_netdevice_many() will take care of
- * them
+ * them.
*/
- for_each_cpu(cpu, &flush_cpus)
- flush_work(per_cpu_ptr(&flush_works, cpu));
+ for_each_cpu(cpu, &ptr->flush_cpus)
+ flush_work(&ptr->w[cpu]);
cpus_read_unlock();
+
+ if (ptr != flush_backlogs_fallback)
+ kfree(ptr);
+ else
+ mutex_unlock(&flush_backlogs_mutex);
}
static void net_rps_send_ipi(struct softnet_data *remsd)
@@ -5932,7 +6311,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
#ifdef CONFIG_RPS
struct softnet_data *remsd = sd->rps_ipi_list;
- if (remsd) {
+ if (!use_backlog_threads() && remsd) {
sd->rps_ipi_list = NULL;
local_irq_enable();
@@ -5947,7 +6326,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
- return sd->rps_ipi_list != NULL;
+ return !use_backlog_threads() && sd->rps_ipi_list;
#else
return false;
#endif
@@ -5967,21 +6346,26 @@ static int process_backlog(struct napi_struct *napi, int quota)
net_rps_action_and_irq_enable(sd);
}
- napi->weight = READ_ONCE(dev_rx_weight);
+ napi->weight = READ_ONCE(net_hotdata.dev_rx_weight);
while (again) {
struct sk_buff *skb;
+ local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
while ((skb = __skb_dequeue(&sd->process_queue))) {
+ local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
rcu_read_lock();
__netif_receive_skb(skb);
rcu_read_unlock();
- input_queue_head_incr(sd);
- if (++work >= quota)
+ if (++work >= quota) {
+ rps_input_queue_head_add(sd, work);
return work;
+ }
+ local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
}
+ local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
- rps_lock_irq_disable(sd);
+ backlog_lock_irq_disable(sd);
if (skb_queue_empty(&sd->input_pkt_queue)) {
/*
* Inline a custom version of __napi_complete().
@@ -5991,15 +6375,19 @@ static int process_backlog(struct napi_struct *napi, int quota)
* We can use a plain write instead of clear_bit(),
* and we dont need an smp_mb() memory barrier.
*/
- napi->state = 0;
+ napi->state &= NAPIF_STATE_THREADED;
again = false;
} else {
+ local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
skb_queue_splice_tail_init(&sd->input_pkt_queue,
&sd->process_queue);
+ local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
}
- rps_unlock_irq_enable(sd);
+ backlog_unlock_irq_enable(sd);
}
+ if (work)
+ rps_input_queue_head_add(sd, work);
return work;
}
@@ -6088,12 +6476,12 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
if (work_done) {
if (n->gro_bitmask)
- timeout = READ_ONCE(n->dev->gro_flush_timeout);
- n->defer_hard_irqs_count = READ_ONCE(n->dev->napi_defer_hard_irqs);
+ timeout = napi_get_gro_flush_timeout(n);
+ n->defer_hard_irqs_count = napi_get_defer_hard_irqs(n);
}
if (n->defer_hard_irqs_count > 0) {
n->defer_hard_irqs_count--;
- timeout = READ_ONCE(n->dev->gro_flush_timeout);
+ timeout = napi_get_gro_flush_timeout(n);
if (timeout)
ret = false;
}
@@ -6143,17 +6531,25 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
}
EXPORT_SYMBOL(napi_complete_done);
-/* must be called under rcu_read_lock(), as we dont take a reference */
-struct napi_struct *napi_by_id(unsigned int napi_id)
+static void skb_defer_free_flush(struct softnet_data *sd)
{
- unsigned int hash = napi_id % HASH_SIZE(napi_hash);
- struct napi_struct *napi;
+ struct sk_buff *skb, *next;
- hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
- if (napi->napi_id == napi_id)
- return napi;
+ /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */
+ if (!READ_ONCE(sd->defer_list))
+ return;
- return NULL;
+ spin_lock(&sd->defer_lock);
+ skb = sd->defer_list;
+ sd->defer_list = NULL;
+ sd->defer_count = 0;
+ spin_unlock(&sd->defer_lock);
+
+ while (skb != NULL) {
+ next = skb->next;
+ napi_consume_skb(skb, 1);
+ skb = next;
+ }
}
#if defined(CONFIG_NET_RX_BUSY_POLL)
@@ -6177,9 +6573,15 @@ static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
clear_bit(NAPI_STATE_SCHED, &napi->state);
}
-static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, bool prefer_busy_poll,
- u16 budget)
+enum {
+ NAPI_F_PREFER_BUSY_POLL = 1,
+ NAPI_F_END_ON_RESCHED = 2,
+};
+
+static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock,
+ unsigned flags, u16 budget)
{
+ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
bool skip_schedule = false;
unsigned long timeout;
int rc;
@@ -6197,10 +6599,11 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, bool
clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
local_bh_disable();
+ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
- if (prefer_busy_poll) {
- napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs);
- timeout = READ_ONCE(napi->dev->gro_flush_timeout);
+ if (flags & NAPI_F_PREFER_BUSY_POLL) {
+ napi->defer_hard_irqs_count = napi_get_defer_hard_irqs(napi);
+ timeout = napi_get_gro_flush_timeout(napi);
if (napi->defer_hard_irqs_count && timeout) {
hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED);
skip_schedule = true;
@@ -6219,26 +6622,28 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, bool
netpoll_poll_unlock(have_poll_lock);
if (rc == budget)
__busy_poll_stop(napi, skip_schedule);
+ bpf_net_ctx_clear(bpf_net_ctx);
local_bh_enable();
}
-void napi_busy_loop(unsigned int napi_id,
- bool (*loop_end)(void *, unsigned long),
- void *loop_end_arg, bool prefer_busy_poll, u16 budget)
+static void __napi_busy_loop(unsigned int napi_id,
+ bool (*loop_end)(void *, unsigned long),
+ void *loop_end_arg, unsigned flags, u16 budget)
{
unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
int (*napi_poll)(struct napi_struct *napi, int budget);
+ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
void *have_poll_lock = NULL;
struct napi_struct *napi;
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
restart:
napi_poll = NULL;
- rcu_read_lock();
-
napi = napi_by_id(napi_id);
if (!napi)
- goto out;
+ return;
if (!IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_disable();
@@ -6246,6 +6651,7 @@ restart:
int work = 0;
local_bh_disable();
+ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
if (!napi_poll) {
unsigned long val = READ_ONCE(napi->state);
@@ -6254,14 +6660,14 @@ restart:
*/
if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
NAPIF_STATE_IN_BUSY_POLL)) {
- if (prefer_busy_poll)
+ if (flags & NAPI_F_PREFER_BUSY_POLL)
set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
goto count;
}
if (cmpxchg(&napi->state, val,
val | NAPIF_STATE_IN_BUSY_POLL |
NAPIF_STATE_SCHED) != val) {
- if (prefer_busy_poll)
+ if (flags & NAPI_F_PREFER_BUSY_POLL)
set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
goto count;
}
@@ -6275,18 +6681,23 @@ count:
if (work > 0)
__NET_ADD_STATS(dev_net(napi->dev),
LINUX_MIB_BUSYPOLLRXPACKETS, work);
+ skb_defer_free_flush(this_cpu_ptr(&softnet_data));
+ bpf_net_ctx_clear(bpf_net_ctx);
local_bh_enable();
if (!loop_end || loop_end(loop_end_arg, start_time))
break;
if (unlikely(need_resched())) {
+ if (flags & NAPI_F_END_ON_RESCHED)
+ break;
if (napi_poll)
- busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget);
+ busy_poll_stop(napi, have_poll_lock, flags, budget);
if (!IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_enable();
rcu_read_unlock();
cond_resched();
+ rcu_read_lock();
if (loop_end(loop_end_arg, start_time))
return;
goto restart;
@@ -6294,34 +6705,111 @@ count:
cpu_relax();
}
if (napi_poll)
- busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget);
+ busy_poll_stop(napi, have_poll_lock, flags, budget);
if (!IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_enable();
-out:
+}
+
+void napi_busy_loop_rcu(unsigned int napi_id,
+ bool (*loop_end)(void *, unsigned long),
+ void *loop_end_arg, bool prefer_busy_poll, u16 budget)
+{
+ unsigned flags = NAPI_F_END_ON_RESCHED;
+
+ if (prefer_busy_poll)
+ flags |= NAPI_F_PREFER_BUSY_POLL;
+
+ __napi_busy_loop(napi_id, loop_end, loop_end_arg, flags, budget);
+}
+
+void napi_busy_loop(unsigned int napi_id,
+ bool (*loop_end)(void *, unsigned long),
+ void *loop_end_arg, bool prefer_busy_poll, u16 budget)
+{
+ unsigned flags = prefer_busy_poll ? NAPI_F_PREFER_BUSY_POLL : 0;
+
+ rcu_read_lock();
+ __napi_busy_loop(napi_id, loop_end, loop_end_arg, flags, budget);
rcu_read_unlock();
}
EXPORT_SYMBOL(napi_busy_loop);
+void napi_suspend_irqs(unsigned int napi_id)
+{
+ struct napi_struct *napi;
+
+ rcu_read_lock();
+ napi = napi_by_id(napi_id);
+ if (napi) {
+ unsigned long timeout = napi_get_irq_suspend_timeout(napi);
+
+ if (timeout)
+ hrtimer_start(&napi->timer, ns_to_ktime(timeout),
+ HRTIMER_MODE_REL_PINNED);
+ }
+ rcu_read_unlock();
+}
+
+void napi_resume_irqs(unsigned int napi_id)
+{
+ struct napi_struct *napi;
+
+ rcu_read_lock();
+ napi = napi_by_id(napi_id);
+ if (napi) {
+ /* If irq_suspend_timeout is set to 0 between the call to
+ * napi_suspend_irqs and now, the original value still
+ * determines the safety timeout as intended and napi_watchdog
+ * will resume irq processing.
+ */
+ if (napi_get_irq_suspend_timeout(napi)) {
+ local_bh_disable();
+ napi_schedule(napi);
+ local_bh_enable();
+ }
+ }
+ rcu_read_unlock();
+}
+
#endif /* CONFIG_NET_RX_BUSY_POLL */
+static void __napi_hash_add_with_id(struct napi_struct *napi,
+ unsigned int napi_id)
+{
+ WRITE_ONCE(napi->napi_id, napi_id);
+ hlist_add_head_rcu(&napi->napi_hash_node,
+ &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
+}
+
+static void napi_hash_add_with_id(struct napi_struct *napi,
+ unsigned int napi_id)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&napi_hash_lock, flags);
+ WARN_ON_ONCE(napi_by_id(napi_id));
+ __napi_hash_add_with_id(napi, napi_id);
+ spin_unlock_irqrestore(&napi_hash_lock, flags);
+}
+
static void napi_hash_add(struct napi_struct *napi)
{
+ unsigned long flags;
+
if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state))
return;
- spin_lock(&napi_hash_lock);
+ spin_lock_irqsave(&napi_hash_lock, flags);
/* 0..NR_CPUS range is reserved for sender_cpu use */
do {
if (unlikely(++napi_gen_id < MIN_NAPI_ID))
napi_gen_id = MIN_NAPI_ID;
} while (napi_by_id(napi_gen_id));
- napi->napi_id = napi_gen_id;
- hlist_add_head_rcu(&napi->napi_hash_node,
- &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
+ __napi_hash_add_with_id(napi, napi_gen_id);
- spin_unlock(&napi_hash_lock);
+ spin_unlock_irqrestore(&napi_hash_lock, flags);
}
/* Warning : caller is responsible to make sure rcu grace period
@@ -6329,11 +6817,13 @@ static void napi_hash_add(struct napi_struct *napi)
*/
static void napi_hash_del(struct napi_struct *napi)
{
- spin_lock(&napi_hash_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&napi_hash_lock, flags);
hlist_del_init_rcu(&napi->napi_hash_node);
- spin_unlock(&napi_hash_lock);
+ spin_unlock_irqrestore(&napi_hash_lock, flags);
}
static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
@@ -6370,6 +6860,8 @@ int dev_set_threaded(struct net_device *dev, bool threaded)
struct napi_struct *napi;
int err = 0;
+ netdev_assert_locked_or_invisible(dev);
+
if (dev->threaded == threaded)
return 0;
@@ -6385,7 +6877,7 @@ int dev_set_threaded(struct net_device *dev, bool threaded)
}
}
- dev->threaded = threaded;
+ WRITE_ONCE(dev->threaded, threaded);
/* Make sure kthread is created before THREADED bit
* is set.
@@ -6442,9 +6934,83 @@ void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index,
}
EXPORT_SYMBOL(netif_queue_set_napi);
-void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
- int (*poll)(struct napi_struct *, int), int weight)
+static void napi_restore_config(struct napi_struct *n)
+{
+ n->defer_hard_irqs = n->config->defer_hard_irqs;
+ n->gro_flush_timeout = n->config->gro_flush_timeout;
+ n->irq_suspend_timeout = n->config->irq_suspend_timeout;
+ /* a NAPI ID might be stored in the config, if so use it. if not, use
+ * napi_hash_add to generate one for us.
+ */
+ if (n->config->napi_id) {
+ napi_hash_add_with_id(n, n->config->napi_id);
+ } else {
+ napi_hash_add(n);
+ n->config->napi_id = n->napi_id;
+ }
+}
+
+static void napi_save_config(struct napi_struct *n)
+{
+ n->config->defer_hard_irqs = n->defer_hard_irqs;
+ n->config->gro_flush_timeout = n->gro_flush_timeout;
+ n->config->irq_suspend_timeout = n->irq_suspend_timeout;
+ napi_hash_del(n);
+}
+
+/* Netlink wants the NAPI list to be sorted by ID, if adding a NAPI which will
+ * inherit an existing ID try to insert it at the right position.
+ */
+static void
+netif_napi_dev_list_add(struct net_device *dev, struct napi_struct *napi)
{
+ unsigned int new_id, pos_id;
+ struct list_head *higher;
+ struct napi_struct *pos;
+
+ new_id = UINT_MAX;
+ if (napi->config && napi->config->napi_id)
+ new_id = napi->config->napi_id;
+
+ higher = &dev->napi_list;
+ list_for_each_entry(pos, &dev->napi_list, dev_list) {
+ if (pos->napi_id >= MIN_NAPI_ID)
+ pos_id = pos->napi_id;
+ else if (pos->config)
+ pos_id = pos->config->napi_id;
+ else
+ pos_id = UINT_MAX;
+
+ if (pos_id <= new_id)
+ break;
+ higher = &pos->dev_list;
+ }
+ list_add_rcu(&napi->dev_list, higher); /* adds after higher */
+}
+
+/* Double check that napi_get_frags() allocates skbs with
+ * skb->head being backed by slab, not a page fragment.
+ * This is to make sure bug fixed in 3226b158e67c
+ * ("net: avoid 32 x truesize under-estimation for tiny skbs")
+ * does not accidentally come back.
+ */
+static void napi_get_frags_check(struct napi_struct *napi)
+{
+ struct sk_buff *skb;
+
+ local_bh_disable();
+ skb = napi_get_frags(napi);
+ WARN_ON_ONCE(skb && skb->head_frag);
+ napi_free_frags(napi);
+ local_bh_enable();
+}
+
+void netif_napi_add_weight_locked(struct net_device *dev,
+ struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int),
+ int weight)
+{
+ netdev_assert_locked(dev);
if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state)))
return;
@@ -6468,24 +7034,32 @@ void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
napi->list_owner = -1;
set_bit(NAPI_STATE_SCHED, &napi->state);
set_bit(NAPI_STATE_NPSVC, &napi->state);
- list_add_rcu(&napi->dev_list, &dev->napi_list);
- napi_hash_add(napi);
+ netif_napi_dev_list_add(dev, napi);
+
+ /* default settings from sysfs are applied to all NAPIs. any per-NAPI
+ * configuration will be loaded in napi_enable
+ */
+ napi_set_defer_hard_irqs(napi, READ_ONCE(dev->napi_defer_hard_irqs));
+ napi_set_gro_flush_timeout(napi, READ_ONCE(dev->gro_flush_timeout));
+
napi_get_frags_check(napi);
/* Create kthread for this napi if dev->threaded is set.
* Clear dev->threaded if kthread creation failed so that
* threaded mode will not be enabled in napi_enable().
*/
if (dev->threaded && napi_kthread_create(napi))
- dev->threaded = 0;
- netif_napi_set_irq(napi, -1);
+ dev->threaded = false;
+ netif_napi_set_irq_locked(napi, -1);
}
-EXPORT_SYMBOL(netif_napi_add_weight);
+EXPORT_SYMBOL(netif_napi_add_weight_locked);
-void napi_disable(struct napi_struct *n)
+void napi_disable_locked(struct napi_struct *n)
{
unsigned long val, new;
might_sleep();
+ netdev_assert_locked(n->dev);
+
set_bit(NAPI_STATE_DISABLE, &n->state);
val = READ_ONCE(n->state);
@@ -6501,21 +7075,40 @@ void napi_disable(struct napi_struct *n)
hrtimer_cancel(&n->timer);
+ if (n->config)
+ napi_save_config(n);
+ else
+ napi_hash_del(n);
+
clear_bit(NAPI_STATE_DISABLE, &n->state);
}
-EXPORT_SYMBOL(napi_disable);
+EXPORT_SYMBOL(napi_disable_locked);
/**
- * napi_enable - enable NAPI scheduling
- * @n: NAPI context
+ * napi_disable() - prevent NAPI from scheduling
+ * @n: NAPI context
*
- * Resume NAPI from being scheduled on this context.
- * Must be paired with napi_disable.
+ * Stop NAPI from being scheduled on this context.
+ * Waits till any outstanding processing completes.
+ * Takes netdev_lock() for associated net_device.
*/
-void napi_enable(struct napi_struct *n)
+void napi_disable(struct napi_struct *n)
+{
+ netdev_lock(n->dev);
+ napi_disable_locked(n);
+ netdev_unlock(n->dev);
+}
+EXPORT_SYMBOL(napi_disable);
+
+void napi_enable_locked(struct napi_struct *n)
{
unsigned long new, val = READ_ONCE(n->state);
+ if (n->config)
+ napi_restore_config(n);
+ else
+ napi_hash_add(n);
+
do {
BUG_ON(!test_bit(NAPI_STATE_SCHED, &val));
@@ -6524,6 +7117,22 @@ void napi_enable(struct napi_struct *n)
new |= NAPIF_STATE_THREADED;
} while (!try_cmpxchg(&n->state, &val, new));
}
+EXPORT_SYMBOL(napi_enable_locked);
+
+/**
+ * napi_enable() - enable NAPI scheduling
+ * @n: NAPI context
+ *
+ * Enable scheduling of a NAPI instance.
+ * Must be paired with napi_disable().
+ * Takes netdev_lock() for associated net_device.
+ */
+void napi_enable(struct napi_struct *n)
+{
+ netdev_lock(n->dev);
+ napi_enable_locked(n);
+ netdev_unlock(n->dev);
+}
EXPORT_SYMBOL(napi_enable);
static void flush_gro_hash(struct napi_struct *napi)
@@ -6540,12 +7149,18 @@ static void flush_gro_hash(struct napi_struct *napi)
}
/* Must be called in process context */
-void __netif_napi_del(struct napi_struct *napi)
+void __netif_napi_del_locked(struct napi_struct *napi)
{
+ netdev_assert_locked(napi->dev);
+
if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state))
return;
- napi_hash_del(napi);
+ if (napi->config) {
+ napi->index = -1;
+ napi->config = NULL;
+ }
+
list_del_rcu(&napi->dev_list);
napi_free_frags(napi);
@@ -6557,7 +7172,7 @@ void __netif_napi_del(struct napi_struct *napi)
napi->thread = NULL;
}
}
-EXPORT_SYMBOL(__netif_napi_del);
+EXPORT_SYMBOL(__netif_napi_del_locked);
static int __napi_poll(struct napi_struct *n, bool *repoll)
{
@@ -6654,8 +7269,6 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
static int napi_thread_wait(struct napi_struct *napi)
{
- bool woken = false;
-
set_current_state(TASK_INTERRUPTIBLE);
while (!kthread_should_stop()) {
@@ -6664,15 +7277,13 @@ static int napi_thread_wait(struct napi_struct *napi)
* Testing SCHED bit is not enough because SCHED bit might be
* set by some other busy poll thread or by napi_disable().
*/
- if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state) || woken) {
+ if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state)) {
WARN_ON(!list_empty(&napi->poll_list));
__set_current_state(TASK_RUNNING);
return 0;
}
schedule();
- /* woken being true indicates this thread owns this napi. */
- woken = true;
set_current_state(TASK_INTERRUPTIBLE);
}
__set_current_state(TASK_RUNNING);
@@ -6680,73 +7291,66 @@ static int napi_thread_wait(struct napi_struct *napi)
return -1;
}
-static void skb_defer_free_flush(struct softnet_data *sd)
+static void napi_threaded_poll_loop(struct napi_struct *napi)
{
- struct sk_buff *skb, *next;
+ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
+ struct softnet_data *sd;
+ unsigned long last_qs = jiffies;
- /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */
- if (!READ_ONCE(sd->defer_list))
- return;
+ for (;;) {
+ bool repoll = false;
+ void *have;
- spin_lock(&sd->defer_lock);
- skb = sd->defer_list;
- sd->defer_list = NULL;
- sd->defer_count = 0;
- spin_unlock(&sd->defer_lock);
+ local_bh_disable();
+ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
- while (skb != NULL) {
- next = skb->next;
- napi_consume_skb(skb, 1);
- skb = next;
- }
-}
+ sd = this_cpu_ptr(&softnet_data);
+ sd->in_napi_threaded_poll = true;
-static int napi_threaded_poll(void *data)
-{
- struct napi_struct *napi = data;
- struct softnet_data *sd;
- void *have;
+ have = netpoll_poll_lock(napi);
+ __napi_poll(napi, &repoll);
+ netpoll_poll_unlock(have);
- while (!napi_thread_wait(napi)) {
- for (;;) {
- bool repoll = false;
+ sd->in_napi_threaded_poll = false;
+ barrier();
- local_bh_disable();
- sd = this_cpu_ptr(&softnet_data);
- sd->in_napi_threaded_poll = true;
+ if (sd_has_rps_ipi_waiting(sd)) {
+ local_irq_disable();
+ net_rps_action_and_irq_enable(sd);
+ }
+ skb_defer_free_flush(sd);
+ bpf_net_ctx_clear(bpf_net_ctx);
+ local_bh_enable();
- have = netpoll_poll_lock(napi);
- __napi_poll(napi, &repoll);
- netpoll_poll_unlock(have);
+ if (!repoll)
+ break;
- sd->in_napi_threaded_poll = false;
- barrier();
+ rcu_softirq_qs_periodic(last_qs);
+ cond_resched();
+ }
+}
- if (sd_has_rps_ipi_waiting(sd)) {
- local_irq_disable();
- net_rps_action_and_irq_enable(sd);
- }
- skb_defer_free_flush(sd);
- local_bh_enable();
+static int napi_threaded_poll(void *data)
+{
+ struct napi_struct *napi = data;
- if (!repoll)
- break;
+ while (!napi_thread_wait(napi))
+ napi_threaded_poll_loop(napi);
- cond_resched();
- }
- }
return 0;
}
-static __latent_entropy void net_rx_action(struct softirq_action *h)
+static __latent_entropy void net_rx_action(void)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies +
- usecs_to_jiffies(READ_ONCE(netdev_budget_usecs));
- int budget = READ_ONCE(netdev_budget);
+ usecs_to_jiffies(READ_ONCE(net_hotdata.netdev_budget_usecs));
+ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
+ int budget = READ_ONCE(net_hotdata.netdev_budget);
LIST_HEAD(list);
LIST_HEAD(repoll);
+ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
start:
sd->in_net_rx_action = true;
local_irq_disable();
@@ -6799,7 +7403,8 @@ start:
sd->in_net_rx_action = false;
net_rps_action_and_irq_enable(sd);
-end:;
+end:
+ bpf_net_ctx_clear(bpf_net_ctx);
}
struct netdev_adjacent {
@@ -8415,27 +9020,29 @@ static void dev_change_rx_flags(struct net_device *dev, int flags)
static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
{
unsigned int old_flags = dev->flags;
+ unsigned int promiscuity, flags;
kuid_t uid;
kgid_t gid;
ASSERT_RTNL();
- dev->flags |= IFF_PROMISC;
- dev->promiscuity += inc;
- if (dev->promiscuity == 0) {
+ promiscuity = dev->promiscuity + inc;
+ if (promiscuity == 0) {
/*
* Avoid overflow.
* If inc causes overflow, untouch promisc and return error.
*/
- if (inc < 0)
- dev->flags &= ~IFF_PROMISC;
- else {
- dev->promiscuity -= inc;
+ if (unlikely(inc > 0)) {
netdev_warn(dev, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n");
return -EOVERFLOW;
}
+ flags = old_flags & ~IFF_PROMISC;
+ } else {
+ flags = old_flags | IFF_PROMISC;
}
- if (dev->flags != old_flags) {
+ WRITE_ONCE(dev->promiscuity, promiscuity);
+ if (flags != old_flags) {
+ WRITE_ONCE(dev->flags, flags);
netdev_info(dev, "%s promiscuous mode\n",
dev->flags & IFF_PROMISC ? "entered" : "left");
if (audit_enabled) {
@@ -8486,25 +9093,27 @@ EXPORT_SYMBOL(dev_set_promiscuity);
static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
{
unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
+ unsigned int allmulti, flags;
ASSERT_RTNL();
- dev->flags |= IFF_ALLMULTI;
- dev->allmulti += inc;
- if (dev->allmulti == 0) {
+ allmulti = dev->allmulti + inc;
+ if (allmulti == 0) {
/*
* Avoid overflow.
* If inc causes overflow, untouch allmulti and return error.
*/
- if (inc < 0)
- dev->flags &= ~IFF_ALLMULTI;
- else {
- dev->allmulti -= inc;
+ if (unlikely(inc > 0)) {
netdev_warn(dev, "allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n");
return -EOVERFLOW;
}
+ flags = old_flags & ~IFF_ALLMULTI;
+ } else {
+ flags = old_flags | IFF_ALLMULTI;
}
- if (dev->flags ^ old_flags) {
+ WRITE_ONCE(dev->allmulti, allmulti);
+ if (flags != old_flags) {
+ WRITE_ONCE(dev->flags, flags);
netdev_info(dev, "%s allmulticast mode\n",
dev->flags & IFF_ALLMULTI ? "entered" : "left");
dev_change_rx_flags(dev, IFF_ALLMULTI);
@@ -8586,12 +9195,12 @@ unsigned int dev_get_flags(const struct net_device *dev)
{
unsigned int flags;
- flags = (dev->flags & ~(IFF_PROMISC |
+ flags = (READ_ONCE(dev->flags) & ~(IFF_PROMISC |
IFF_ALLMULTI |
IFF_RUNNING |
IFF_LOWER_UP |
IFF_DORMANT)) |
- (dev->gflags & (IFF_PROMISC |
+ (READ_ONCE(dev->gflags) & (IFF_PROMISC |
IFF_ALLMULTI));
if (netif_running(dev)) {
@@ -8830,7 +9439,7 @@ int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
return -ERANGE;
if (new_len != orig_len) {
- dev->tx_queue_len = new_len;
+ WRITE_ONCE(dev->tx_queue_len, new_len);
res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
res = notifier_to_errno(res);
if (res)
@@ -8844,7 +9453,7 @@ int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
err_rollback:
netdev_err(dev, "refused to change device tx_queue_len\n");
- dev->tx_queue_len = orig_len;
+ WRITE_ONCE(dev->tx_queue_len, orig_len);
return res;
}
@@ -8914,7 +9523,7 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
}
EXPORT_SYMBOL(dev_set_mac_address);
-static DECLARE_RWSEM(dev_addr_sem);
+DECLARE_RWSEM(dev_addr_sem);
int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
struct netlink_ext_ack *extack)
@@ -9082,7 +9691,7 @@ EXPORT_SYMBOL(netdev_port_same_parent_id);
*/
int dev_change_proto_down(struct net_device *dev, bool proto_down)
{
- if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN))
+ if (!dev->change_proto_down)
return -EOPNOTSUPP;
if (!netif_device_present(dev))
return -ENODEV;
@@ -9090,7 +9699,7 @@ int dev_change_proto_down(struct net_device *dev, bool proto_down)
netif_carrier_off(dev);
else
netif_carrier_on(dev);
- dev->proto_down = proto_down;
+ WRITE_ONCE(dev->proto_down, proto_down);
return 0;
}
@@ -9104,18 +9713,21 @@ int dev_change_proto_down(struct net_device *dev, bool proto_down)
void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
u32 value)
{
+ u32 proto_down_reason;
int b;
if (!mask) {
- dev->proto_down_reason = value;
+ proto_down_reason = value;
} else {
+ proto_down_reason = dev->proto_down_reason;
for_each_set_bit(b, &mask, 32) {
if (value & (1 << b))
- dev->proto_down_reason |= BIT(b);
+ proto_down_reason |= BIT(b);
else
- dev->proto_down_reason &= ~BIT(b);
+ proto_down_reason &= ~BIT(b);
}
}
+ WRITE_ONCE(dev->proto_down_reason, proto_down_reason);
}
struct bpf_xdp_link {
@@ -9176,6 +9788,40 @@ u8 dev_xdp_prog_count(struct net_device *dev)
}
EXPORT_SYMBOL_GPL(dev_xdp_prog_count);
+u8 dev_xdp_sb_prog_count(struct net_device *dev)
+{
+ u8 count = 0;
+ int i;
+
+ for (i = 0; i < __MAX_XDP_MODE; i++)
+ if (dev->xdp_state[i].prog &&
+ !dev->xdp_state[i].prog->aux->xdp_has_frags)
+ count++;
+ return count;
+}
+
+int dev_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf)
+{
+ if (!dev->netdev_ops->ndo_bpf)
+ return -EOPNOTSUPP;
+
+ if (dev->cfg->hds_config == ETHTOOL_TCP_DATA_SPLIT_ENABLED &&
+ bpf->command == XDP_SETUP_PROG &&
+ bpf->prog && !bpf->prog->aux->xdp_has_frags) {
+ NL_SET_ERR_MSG(bpf->extack,
+ "unable to propagate XDP to device using tcp-data-split");
+ return -EBUSY;
+ }
+
+ if (dev_get_min_mp_channel_count(dev)) {
+ NL_SET_ERR_MSG(bpf->extack, "unable to propagate XDP to device using memory provider");
+ return -EBUSY;
+ }
+
+ return dev->netdev_ops->ndo_bpf(dev, bpf);
+}
+EXPORT_SYMBOL_GPL(dev_xdp_propagate);
+
u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode)
{
struct bpf_prog *prog = dev_xdp_prog(dev, mode);
@@ -9204,6 +9850,17 @@ static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode,
struct netdev_bpf xdp;
int err;
+ if (dev->cfg->hds_config == ETHTOOL_TCP_DATA_SPLIT_ENABLED &&
+ prog && !prog->aux->xdp_has_frags) {
+ NL_SET_ERR_MSG(extack, "unable to install XDP to device using tcp-data-split");
+ return -EBUSY;
+ }
+
+ if (dev_get_min_mp_channel_count(dev)) {
+ NL_SET_ERR_MSG(extack, "unable to install XDP to device using memory provider");
+ return -EBUSY;
+ }
+
memset(&xdp, 0, sizeof(xdp));
xdp.command = mode == XDP_MODE_HW ? XDP_SETUP_PROG_HW : XDP_SETUP_PROG;
xdp.extack = extack;
@@ -9352,6 +10009,10 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack
NL_SET_ERR_MSG(extack, "Program bound to different device");
return -EINVAL;
}
+ if (bpf_prog_is_dev_bound(new_prog->aux) && mode == XDP_MODE_SKB) {
+ NL_SET_ERR_MSG(extack, "Can't attach device-bound programs in generic mode");
+ return -EINVAL;
+ }
if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) {
NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device");
return -EINVAL;
@@ -9628,6 +10289,20 @@ err_out:
return err;
}
+u32 dev_get_min_mp_channel_count(const struct net_device *dev)
+{
+ int i;
+
+ ASSERT_RTNL();
+
+ for (i = dev->real_num_rx_queues - 1; i >= 0; i--)
+ if (dev->_rx[i].mp_params.mp_priv)
+ /* The channel count is the idx plus 1. */
+ return i + 1;
+
+ return 0;
+}
+
/**
* dev_index_reserve() - allocate an ifindex in a namespace
* @net: the applicable net namespace
@@ -9665,14 +10340,23 @@ static void dev_index_release(struct net *net, int ifindex)
WARN_ON(xa_erase(&net->dev_by_index, ifindex));
}
+static bool from_cleanup_net(void)
+{
+#ifdef CONFIG_NET_NS
+ return current == cleanup_net_task;
+#else
+ return false;
+#endif
+}
+
/* Delayed registration/unregisteration */
LIST_HEAD(net_todo_list);
DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
+atomic_t dev_unreg_count = ATOMIC_INIT(0);
static void net_set_todo(struct net_device *dev)
{
list_add_tail(&dev->todo_list, &net_todo_list);
- atomic_inc(&dev_net(dev)->dev_unreg_count);
}
static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
@@ -9719,6 +10403,15 @@ static void netdev_sync_lower_features(struct net_device *upper,
}
}
+static bool netdev_has_ip_or_hw_csum(netdev_features_t features)
+{
+ netdev_features_t ip_csum_mask = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ bool ip_csum = (features & ip_csum_mask) == ip_csum_mask;
+ bool hw_csum = features & NETIF_F_HW_CSUM;
+
+ return ip_csum || hw_csum;
+}
+
static netdev_features_t netdev_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -9800,15 +10493,9 @@ static netdev_features_t netdev_fix_features(struct net_device *dev,
features &= ~NETIF_F_LRO;
}
- if (features & NETIF_F_HW_TLS_TX) {
- bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) ==
- (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
- bool hw_csum = features & NETIF_F_HW_CSUM;
-
- if (!ip_csum && !hw_csum) {
- netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
- features &= ~NETIF_F_HW_TLS_TX;
- }
+ if ((features & NETIF_F_HW_TLS_TX) && !netdev_has_ip_or_hw_csum(features)) {
+ netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
+ features &= ~NETIF_F_HW_TLS_TX;
}
if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) {
@@ -9816,6 +10503,11 @@ static netdev_features_t netdev_fix_features(struct net_device *dev,
features &= ~NETIF_F_HW_TLS_RX;
}
+ if ((features & NETIF_F_GSO_UDP_L4) && !netdev_has_ip_or_hw_csum(features)) {
+ netdev_dbg(dev, "Dropping USO feature since no CSUM feature.\n");
+ features &= ~NETIF_F_GSO_UDP_L4;
+ }
+
return features;
}
@@ -10120,6 +10812,17 @@ static void netdev_do_free_pcpu_stats(struct net_device *dev)
}
}
+static void netdev_free_phy_link_topology(struct net_device *dev)
+{
+ struct phy_link_topology *topo = dev->link_topo;
+
+ if (IS_ENABLED(CONFIG_PHYLIB) && topo) {
+ xa_destroy(&topo->phys);
+ kfree(topo);
+ dev->link_topo = NULL;
+ }
+}
+
/**
* register_netdevice() - register a network device
* @dev: device to register
@@ -10149,6 +10852,10 @@ int register_netdevice(struct net_device *dev)
if (ret)
return ret;
+ /* rss ctx ID 0 is reserved for the default context, start from 1 */
+ xa_init_flags(&dev->ethtool->rss_ctx, XA_FLAGS_ALLOC1);
+ mutex_init(&dev->ethtool->rss_lock);
+
spin_lock_init(&dev->addr_list_lock);
netdev_set_addr_lockdep_class(dev);
@@ -10237,9 +10944,11 @@ int register_netdevice(struct net_device *dev)
goto err_ifindex_release;
ret = netdev_register_kobject(dev);
- write_lock(&dev_base_lock);
- dev->reg_state = ret ? NETREG_UNREGISTERED : NETREG_REGISTERED;
- write_unlock(&dev_base_lock);
+
+ netdev_lock(dev);
+ WRITE_ONCE(dev->reg_state, ret ? NETREG_UNREGISTERED : NETREG_REGISTERED);
+ netdev_unlock(dev);
+
if (ret)
goto err_uninit_notify;
@@ -10305,49 +11014,26 @@ err_free_name:
}
EXPORT_SYMBOL(register_netdevice);
-/**
- * init_dummy_netdev - init a dummy network device for NAPI
- * @dev: device to init
- *
- * This takes a network device structure and initialize the minimum
- * amount of fields so it can be used to schedule NAPI polls without
- * registering a full blown interface. This is to be used by drivers
- * that need to tie several hardware interfaces to a single NAPI
- * poll scheduler due to HW limitations.
+/* Initialize the core of a dummy net device.
+ * The setup steps dummy netdevs need which normal netdevs get by going
+ * through register_netdevice().
*/
-int init_dummy_netdev(struct net_device *dev)
+static void init_dummy_netdev(struct net_device *dev)
{
- /* Clear everything. Note we don't initialize spinlocks
- * are they aren't supposed to be taken by any of the
- * NAPI code and this dummy netdev is supposed to be
- * only ever used for NAPI polls
- */
- memset(dev, 0, sizeof(struct net_device));
-
/* make sure we BUG if trying to hit standard
* register/unregister code path
*/
dev->reg_state = NETREG_DUMMY;
- /* NAPI wants this */
- INIT_LIST_HEAD(&dev->napi_list);
-
/* a dummy interface is started by default */
set_bit(__LINK_STATE_PRESENT, &dev->state);
set_bit(__LINK_STATE_START, &dev->state);
- /* napi_busy_loop stats accounting wants this */
- dev_net_set(dev, &init_net);
-
/* Note : We dont allocate pcpu_refcnt for dummy devices,
* because users of this 'device' dont need to change
* its refcount.
*/
-
- return 0;
}
-EXPORT_SYMBOL_GPL(init_dummy_netdev);
-
/**
* register_netdev - register a network device
@@ -10364,12 +11050,16 @@ EXPORT_SYMBOL_GPL(init_dummy_netdev);
*/
int register_netdev(struct net_device *dev)
{
+ struct net *net = dev_net(dev);
int err;
- if (rtnl_lock_killable())
+ if (rtnl_net_lock_killable(net))
return -EINTR;
+
err = register_netdevice(dev);
- rtnl_unlock();
+
+ rtnl_net_unlock(net);
+
return err;
}
EXPORT_SYMBOL(register_netdev);
@@ -10446,8 +11136,9 @@ static struct net_device *netdev_wait_allrefs_any(struct list_head *list)
rebroadcast_time = jiffies;
}
+ rcu_barrier();
+
if (!wait) {
- rcu_barrier();
wait = WAIT_REFS_MIN_MSECS;
} else {
msleep(wait);
@@ -10499,6 +11190,7 @@ void netdev_run_todo(void)
{
struct net_device *dev, *tmp;
struct list_head list;
+ int cnt;
#ifdef CONFIG_LOCKDEP
struct list_head unlink_list;
@@ -10529,12 +11221,13 @@ void netdev_run_todo(void)
continue;
}
- write_lock(&dev_base_lock);
- dev->reg_state = NETREG_UNREGISTERED;
- write_unlock(&dev_base_lock);
+ netdev_lock(dev);
+ WRITE_ONCE(dev->reg_state, NETREG_UNREGISTERED);
+ netdev_unlock(dev);
linkwatch_sync_dev(dev);
}
+ cnt = 0;
while (!list_empty(&list)) {
dev = netdev_wait_allrefs_any(&list);
list_del(&dev->todo_list);
@@ -10552,12 +11245,61 @@ void netdev_run_todo(void)
if (dev->needs_free_netdev)
free_netdev(dev);
- if (atomic_dec_and_test(&dev_net(dev)->dev_unreg_count))
- wake_up(&netdev_unregistering_wq);
+ cnt++;
/* Free network device */
kobject_put(&dev->dev.kobj);
}
+ if (cnt && atomic_sub_and_test(cnt, &dev_unreg_count))
+ wake_up(&netdev_unregistering_wq);
+}
+
+/* Collate per-cpu network dstats statistics
+ *
+ * Read per-cpu network statistics from dev->dstats and populate the related
+ * fields in @s.
+ */
+static void dev_fetch_dstats(struct rtnl_link_stats64 *s,
+ const struct pcpu_dstats __percpu *dstats)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ u64 rx_packets, rx_bytes, rx_drops;
+ u64 tx_packets, tx_bytes, tx_drops;
+ const struct pcpu_dstats *stats;
+ unsigned int start;
+
+ stats = per_cpu_ptr(dstats, cpu);
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ rx_packets = u64_stats_read(&stats->rx_packets);
+ rx_bytes = u64_stats_read(&stats->rx_bytes);
+ rx_drops = u64_stats_read(&stats->rx_drops);
+ tx_packets = u64_stats_read(&stats->tx_packets);
+ tx_bytes = u64_stats_read(&stats->tx_bytes);
+ tx_drops = u64_stats_read(&stats->tx_drops);
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ s->rx_packets += rx_packets;
+ s->rx_bytes += rx_bytes;
+ s->rx_dropped += rx_drops;
+ s->tx_packets += tx_packets;
+ s->tx_bytes += tx_bytes;
+ s->tx_dropped += tx_drops;
+ }
+}
+
+/* ndo_get_stats64 implementation for dtstats-based accounting.
+ *
+ * Populate @s from dev->stats and dev->dstats. This is used internally by the
+ * core for NETDEV_PCPU_STAT_DSTAT-type stats collection.
+ */
+static void dev_get_dstats64(const struct net_device *dev,
+ struct rtnl_link_stats64 *s)
+{
+ netdev_stats_to_stats64(s, &dev->stats);
+ dev_fetch_dstats(s, dev->dstats);
}
/* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
@@ -10608,7 +11350,7 @@ noinline void netdev_core_stats_inc(struct net_device *dev, u32 offset)
return;
}
- field = (__force unsigned long __percpu *)((__force void *)p + offset);
+ field = (unsigned long __percpu *)((void __percpu *)p + offset);
this_cpu_inc(*field);
}
EXPORT_SYMBOL_GPL(netdev_core_stats_inc);
@@ -10629,11 +11371,29 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
const struct net_device_ops *ops = dev->netdev_ops;
const struct net_device_core_stats __percpu *p;
+ /*
+ * IPv{4,6} and udp tunnels share common stat helpers and use
+ * different stat type (NETDEV_PCPU_STAT_TSTATS vs
+ * NETDEV_PCPU_STAT_DSTATS). Ensure the accounting is consistent.
+ */
+ BUILD_BUG_ON(offsetof(struct pcpu_sw_netstats, rx_bytes) !=
+ offsetof(struct pcpu_dstats, rx_bytes));
+ BUILD_BUG_ON(offsetof(struct pcpu_sw_netstats, rx_packets) !=
+ offsetof(struct pcpu_dstats, rx_packets));
+ BUILD_BUG_ON(offsetof(struct pcpu_sw_netstats, tx_bytes) !=
+ offsetof(struct pcpu_dstats, tx_bytes));
+ BUILD_BUG_ON(offsetof(struct pcpu_sw_netstats, tx_packets) !=
+ offsetof(struct pcpu_dstats, tx_packets));
+
if (ops->ndo_get_stats64) {
memset(storage, 0, sizeof(*storage));
ops->ndo_get_stats64(dev, storage);
} else if (ops->ndo_get_stats) {
netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
+ } else if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_TSTATS) {
+ dev_get_tstats64(dev, storage);
+ } else if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_DSTATS) {
+ dev_get_dstats64(dev, storage);
} else {
netdev_stats_to_stats64(storage, &dev->stats);
}
@@ -10745,19 +11505,12 @@ void netdev_sw_irq_coalesce_default_on(struct net_device *dev)
WARN_ON(dev->reg_state == NETREG_REGISTERED);
if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
- dev->gro_flush_timeout = 20000;
- dev->napi_defer_hard_irqs = 1;
+ netdev_set_gro_flush_timeout(dev, 20000);
+ netdev_set_defer_hard_irqs(dev, 1);
}
}
EXPORT_SYMBOL_GPL(netdev_sw_irq_coalesce_default_on);
-void netdev_freemem(struct net_device *dev)
-{
- char *addr = (char *)dev - dev->padded;
-
- kvfree(addr);
-}
-
/**
* alloc_netdev_mqs - allocate network device
* @sizeof_priv: size of private data to allocate space for
@@ -10777,8 +11530,8 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
unsigned int txqs, unsigned int rxqs)
{
struct net_device *dev;
- unsigned int alloc_size;
- struct net_device *p;
+ size_t napi_config_sz;
+ unsigned int maxqs;
BUG_ON(strlen(name) >= sizeof(dev->name));
@@ -10792,21 +11545,14 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
return NULL;
}
- alloc_size = sizeof(struct net_device);
- if (sizeof_priv) {
- /* ensure 32-byte alignment of private area */
- alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
- alloc_size += sizeof_priv;
- }
- /* ensure 32-byte alignment of whole construct */
- alloc_size += NETDEV_ALIGN - 1;
+ maxqs = max(txqs, rxqs);
- p = kvzalloc(alloc_size, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
- if (!p)
+ dev = kvzalloc(struct_size(dev, priv, sizeof_priv),
+ GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
+ if (!dev)
return NULL;
- dev = PTR_ALIGN(p, NETDEV_ALIGN);
- dev->padded = (char *)dev - (char *)p;
+ dev->priv_len = sizeof_priv;
ref_tracker_dir_init(&dev->refcnt_tracker, 128, name);
#ifdef CONFIG_PCPU_DEV_REFCNT
@@ -10853,6 +11599,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
#ifdef CONFIG_NET_SCHED
hash_init(dev->qdisc_hash);
#endif
+
+ mutex_init(&dev->lock);
+
dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
setup(dev);
@@ -10870,8 +11619,21 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
dev->real_num_rx_queues = rxqs;
if (netif_alloc_rx_queues(dev))
goto free_all;
+ dev->ethtool = kzalloc(sizeof(*dev->ethtool), GFP_KERNEL_ACCOUNT);
+ if (!dev->ethtool)
+ goto free_all;
- strcpy(dev->name, name);
+ dev->cfg = kzalloc(sizeof(*dev->cfg), GFP_KERNEL_ACCOUNT);
+ if (!dev->cfg)
+ goto free_all;
+ dev->cfg_pending = dev->cfg;
+
+ napi_config_sz = array_size(maxqs, sizeof(*dev->napi_config));
+ dev->napi_config = kvzalloc(napi_config_sz, GFP_KERNEL_ACCOUNT);
+ if (!dev->napi_config)
+ goto free_all;
+
+ strscpy(dev->name, name);
dev->name_assign_type = name_assign_type;
dev->group = INIT_NETDEV_GROUP;
if (!dev->ethtool_ops)
@@ -10890,11 +11652,27 @@ free_pcpu:
free_percpu(dev->pcpu_refcnt);
free_dev:
#endif
- netdev_freemem(dev);
+ kvfree(dev);
return NULL;
}
EXPORT_SYMBOL(alloc_netdev_mqs);
+static void netdev_napi_exit(struct net_device *dev)
+{
+ if (!list_empty(&dev->napi_list)) {
+ struct napi_struct *p, *n;
+
+ netdev_lock(dev);
+ list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
+ __netif_napi_del_locked(p);
+ netdev_unlock(dev);
+
+ synchronize_net();
+ }
+
+ kvfree(dev->napi_config);
+}
+
/**
* free_netdev - free network device
* @dev: device
@@ -10906,8 +11684,6 @@ EXPORT_SYMBOL(alloc_netdev_mqs);
*/
void free_netdev(struct net_device *dev)
{
- struct napi_struct *p, *n;
-
might_sleep();
/* When called immediately after register_netdevice() failed the unwind
@@ -10920,6 +11696,9 @@ void free_netdev(struct net_device *dev)
return;
}
+ WARN_ON(dev->cfg != dev->cfg_pending);
+ kfree(dev->cfg);
+ kfree(dev->ethtool);
netif_free_tx_queues(dev);
netif_free_rx_queues(dev);
@@ -10928,8 +11707,7 @@ void free_netdev(struct net_device *dev)
/* Flush device addresses */
dev_addr_flush(dev);
- list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
- netif_napi_del(p);
+ netdev_napi_exit(dev);
ref_tracker_dir_exit(&dev->refcnt_tracker);
#ifdef CONFIG_PCPU_DEV_REFCNT
@@ -10941,14 +11719,19 @@ void free_netdev(struct net_device *dev)
free_percpu(dev->xdp_bulkq);
dev->xdp_bulkq = NULL;
+ netdev_free_phy_link_topology(dev);
+
+ mutex_destroy(&dev->lock);
+
/* Compatibility with error handling in drivers */
- if (dev->reg_state == NETREG_UNINITIALIZED) {
- netdev_freemem(dev);
+ if (dev->reg_state == NETREG_UNINITIALIZED ||
+ dev->reg_state == NETREG_DUMMY) {
+ kvfree(dev);
return;
}
BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
- dev->reg_state = NETREG_RELEASED;
+ WRITE_ONCE(dev->reg_state, NETREG_RELEASED);
/* will free via device release */
put_device(&dev->dev);
@@ -10956,6 +11739,19 @@ void free_netdev(struct net_device *dev)
EXPORT_SYMBOL(free_netdev);
/**
+ * alloc_netdev_dummy - Allocate and initialize a dummy net device.
+ * @sizeof_priv: size of private data to allocate space for
+ *
+ * Return: the allocated net_device on success, NULL otherwise
+ */
+struct net_device *alloc_netdev_dummy(int sizeof_priv)
+{
+ return alloc_netdev(sizeof_priv, "dummy#", NET_NAME_UNKNOWN,
+ init_dummy_netdev);
+}
+EXPORT_SYMBOL_GPL(alloc_netdev_dummy);
+
+/**
* synchronize_net - Synchronize with packet receive processing
*
* Wait for packets currently being received to be done.
@@ -10964,13 +11760,41 @@ EXPORT_SYMBOL(free_netdev);
void synchronize_net(void)
{
might_sleep();
- if (rtnl_is_locked())
+ if (from_cleanup_net() || rtnl_is_locked())
synchronize_rcu_expedited();
else
synchronize_rcu();
}
EXPORT_SYMBOL(synchronize_net);
+static void netdev_rss_contexts_free(struct net_device *dev)
+{
+ struct ethtool_rxfh_context *ctx;
+ unsigned long context;
+
+ mutex_lock(&dev->ethtool->rss_lock);
+ xa_for_each(&dev->ethtool->rss_ctx, context, ctx) {
+ struct ethtool_rxfh_param rxfh;
+
+ rxfh.indir = ethtool_rxfh_context_indir(ctx);
+ rxfh.key = ethtool_rxfh_context_key(ctx);
+ rxfh.hfunc = ctx->hfunc;
+ rxfh.input_xfrm = ctx->input_xfrm;
+ rxfh.rss_context = context;
+ rxfh.rss_delete = true;
+
+ xa_erase(&dev->ethtool->rss_ctx, context);
+ if (dev->ethtool_ops->create_rxfh_context)
+ dev->ethtool_ops->remove_rxfh_context(dev, ctx,
+ context, NULL);
+ else
+ dev->ethtool_ops->set_rxfh(dev, &rxfh, NULL);
+ kfree(ctx);
+ }
+ xa_destroy(&dev->ethtool->rss_ctx);
+ mutex_unlock(&dev->ethtool->rss_lock);
+}
+
/**
* unregister_netdevice_queue - remove device from the kernel
* @dev: device
@@ -11004,6 +11828,7 @@ void unregister_netdevice_many_notify(struct list_head *head,
{
struct net_device *dev, *tmp;
LIST_HEAD(close_head);
+ int cnt = 0;
BUG_ON(dev_boot_phase);
ASSERT_RTNL();
@@ -11035,10 +11860,10 @@ void unregister_netdevice_many_notify(struct list_head *head,
list_for_each_entry(dev, head, unreg_list) {
/* And unlink it from device chain. */
- write_lock(&dev_base_lock);
- unlist_netdevice(dev, false);
- dev->reg_state = NETREG_UNREGISTERING;
- write_unlock(&dev_base_lock);
+ unlist_netdevice(dev);
+ netdev_lock(dev);
+ WRITE_ONCE(dev->reg_state, NETREG_UNREGISTERING);
+ netdev_unlock(dev);
}
flush_all_backlogs();
@@ -11052,6 +11877,7 @@ void unregister_netdevice_many_notify(struct list_head *head,
dev_tcx_uninstall(dev);
dev_xdp_uninstall(dev);
bpf_dev_bound_netdev_unregister(dev);
+ dev_dmabuf_uninstall(dev);
netdev_offload_xstats_disable_all(dev);
@@ -11075,11 +11901,17 @@ void unregister_netdevice_many_notify(struct list_head *head,
netdev_name_node_alt_flush(dev);
netdev_name_node_free(dev->name_node);
+ netdev_rss_contexts_free(dev);
+
call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev);
if (dev->netdev_ops->ndo_uninit)
dev->netdev_ops->ndo_uninit(dev);
+ mutex_destroy(&dev->ethtool->rss_lock);
+
+ net_shaper_flush_netdev(dev);
+
if (skb)
rtmsg_ifinfo_send(skb, dev, GFP_KERNEL, portid, nlh);
@@ -11100,7 +11932,9 @@ void unregister_netdevice_many_notify(struct list_head *head,
list_for_each_entry(dev, head, unreg_list) {
netdev_put(dev, &dev->dev_registered_tracker);
net_set_todo(dev);
+ cnt++;
}
+ atomic_add(cnt, &dev_unreg_count);
list_del(head);
}
@@ -11110,7 +11944,7 @@ void unregister_netdevice_many_notify(struct list_head *head,
* @head: list of devices
*
* Note: As most callers use a stack allocated list_head,
- * we force a list_del() to make sure stack wont be corrupted later.
+ * we force a list_del() to make sure stack won't be corrupted later.
*/
void unregister_netdevice_many(struct list_head *head)
{
@@ -11131,9 +11965,9 @@ EXPORT_SYMBOL(unregister_netdevice_many);
*/
void unregister_netdev(struct net_device *dev)
{
- rtnl_lock();
+ rtnl_net_dev_lock(dev);
unregister_netdevice(dev);
- rtnl_unlock();
+ rtnl_net_dev_unlock(dev);
}
EXPORT_SYMBOL(unregister_netdev);
@@ -11165,10 +11999,10 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net,
/* Don't allow namespace local devices to be moved. */
err = -EINVAL;
- if (dev->features & NETIF_F_NETNS_LOCAL)
+ if (dev->netns_local)
goto out;
- /* Ensure the device has been registrered */
+ /* Ensure the device has been registered */
if (dev->reg_state != NETREG_REGISTERED)
goto out;
@@ -11218,7 +12052,7 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net,
dev_close(dev);
/* And unlink it from device chain */
- unlist_netdevice(dev, true);
+ unlist_netdevice(dev);
synchronize_net();
@@ -11257,8 +12091,12 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net,
dev_net_set(dev, net);
dev->ifindex = new_ifindex;
- if (new_name[0]) /* Rename the netdev to prepared name */
+ if (new_name[0]) {
+ /* Rename the netdev to prepared name */
+ write_seqlock_bh(&netdev_rename_lock);
strscpy(dev->name, new_name, IFNAMSIZ);
+ write_sequnlock_bh(&netdev_rename_lock);
+ }
/* Fixup kobjects */
dev_set_uevent_suppress(&dev->dev, 1);
@@ -11333,7 +12171,7 @@ static int dev_cpu_dead(unsigned int oldcpu)
list_del_init(&napi->poll_list);
if (napi->poll == process_backlog)
- napi->state = 0;
+ napi->state &= NAPIF_STATE_THREADED;
else
____napi_schedule(sd, napi);
}
@@ -11341,21 +12179,23 @@ static int dev_cpu_dead(unsigned int oldcpu)
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
+ if (!use_backlog_threads()) {
#ifdef CONFIG_RPS
- remsd = oldsd->rps_ipi_list;
- oldsd->rps_ipi_list = NULL;
+ remsd = oldsd->rps_ipi_list;
+ oldsd->rps_ipi_list = NULL;
#endif
- /* send out pending IPI's on offline CPU */
- net_rps_send_ipi(remsd);
+ /* send out pending IPI's on offline CPU */
+ net_rps_send_ipi(remsd);
+ }
/* Process offline CPU's input_pkt_queue */
while ((skb = __skb_dequeue(&oldsd->process_queue))) {
netif_rx(skb);
- input_queue_head_incr(oldsd);
+ rps_input_queue_head_incr(oldsd);
}
while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
netif_rx(skb);
- input_queue_head_incr(oldsd);
+ rps_input_queue_head_incr(oldsd);
}
return 0;
@@ -11541,7 +12381,7 @@ static void __net_exit default_device_exit_net(struct net *net)
char fb_name[IFNAMSIZ];
/* Ignore unmoveable devices (i.e. loopback) */
- if (dev->features & NETIF_F_NETNS_LOCAL)
+ if (dev->netns_local)
continue;
/* Leave virtual devices for the generic cleanup */
@@ -11554,11 +12394,8 @@ static void __net_exit default_device_exit_net(struct net *net)
snprintf(fb_name, IFNAMSIZ, "dev%%d");
netdev_for_each_altname_safe(dev, name_node, tmp)
- if (netdev_name_in_use(&init_net, name_node->name)) {
- netdev_name_node_del(name_node);
- synchronize_rcu();
+ if (netdev_name_in_use(&init_net, name_node->name))
__netdev_name_node_alt_destroy(name_node);
- }
err = dev_change_net_namespace(dev, &init_net, fb_name);
if (err) {
@@ -11605,7 +12442,7 @@ static struct pernet_operations __net_initdata default_device_ops = {
static void __init net_dev_struct_check(void)
{
/* TX read-mostly hotpath */
- CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, priv_flags);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, priv_flags_fast);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, netdev_ops);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, header_ops);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, _tx);
@@ -11631,19 +12468,18 @@ static void __init net_dev_struct_check(void)
/* TXRX read-mostly hotpath */
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, lstats);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, state);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, flags);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, hard_header_len);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, features);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, ip6_ptr);
- CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_txrx, 38);
+ CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_txrx, 46);
/* RX read-mostly hotpath */
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, ptype_specific);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, ifindex);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, real_num_rx_queues);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, _rx);
- CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, gro_flush_timeout);
- CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, napi_defer_hard_irqs);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, gro_max_size);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, gro_ipv4_max_size);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, rx_handler);
@@ -11655,7 +12491,7 @@ static void __init net_dev_struct_check(void)
#ifdef CONFIG_NET_XGRESS
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, tcx_ingress);
#endif
- CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_rx, 104);
+ CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_rx, 92);
}
/*
@@ -11665,6 +12501,67 @@ static void __init net_dev_struct_check(void)
*
*/
+/* We allocate 256 pages for each CPU if PAGE_SHIFT is 12 */
+#define SYSTEM_PERCPU_PAGE_POOL_SIZE ((1 << 20) / PAGE_SIZE)
+
+static int net_page_pool_create(int cpuid)
+{
+#if IS_ENABLED(CONFIG_PAGE_POOL)
+ struct page_pool_params page_pool_params = {
+ .pool_size = SYSTEM_PERCPU_PAGE_POOL_SIZE,
+ .flags = PP_FLAG_SYSTEM_POOL,
+ .nid = cpu_to_mem(cpuid),
+ };
+ struct page_pool *pp_ptr;
+ int err;
+
+ pp_ptr = page_pool_create_percpu(&page_pool_params, cpuid);
+ if (IS_ERR(pp_ptr))
+ return -ENOMEM;
+
+ err = xdp_reg_page_pool(pp_ptr);
+ if (err) {
+ page_pool_destroy(pp_ptr);
+ return err;
+ }
+
+ per_cpu(system_page_pool, cpuid) = pp_ptr;
+#endif
+ return 0;
+}
+
+static int backlog_napi_should_run(unsigned int cpu)
+{
+ struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
+ struct napi_struct *napi = &sd->backlog;
+
+ return test_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
+}
+
+static void run_backlog_napi(unsigned int cpu)
+{
+ struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
+
+ napi_threaded_poll_loop(&sd->backlog);
+}
+
+static void backlog_napi_setup(unsigned int cpu)
+{
+ struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
+ struct napi_struct *napi = &sd->backlog;
+
+ napi->thread = this_cpu_read(backlog_napi);
+ set_bit(NAPI_STATE_THREADED, &napi->state);
+}
+
+static struct smp_hotplug_thread backlog_threads = {
+ .store = &backlog_napi,
+ .thread_should_run = backlog_napi_should_run,
+ .thread_fn = run_backlog_napi,
+ .thread_comm = "backlog_napi/%u",
+ .setup = backlog_napi_setup,
+};
+
/*
* This is called single threaded during boot, so no need
* to take the rtnl semaphore.
@@ -11683,7 +12580,6 @@ static int __init net_dev_init(void)
if (netdev_kobject_init())
goto out;
- INIT_LIST_HEAD(&ptype_all);
for (i = 0; i < PTYPE_HASH_SIZE; i++)
INIT_LIST_HEAD(&ptype_base[i]);
@@ -11694,12 +12590,13 @@ static int __init net_dev_init(void)
* Initialise the packet receive queues.
*/
+ flush_backlogs_fallback = flush_backlogs_alloc();
+ if (!flush_backlogs_fallback)
+ goto out;
+
for_each_possible_cpu(i) {
- struct work_struct *flush = per_cpu_ptr(&flush_works, i);
struct softnet_data *sd = &per_cpu(softnet_data, i);
- INIT_WORK(flush, flush_backlog);
-
skb_queue_head_init(&sd->input_pkt_queue);
skb_queue_head_init(&sd->process_queue);
#ifdef CONFIG_XFRM_OFFLOAD
@@ -11717,7 +12614,13 @@ static int __init net_dev_init(void)
init_gro_hash(&sd->backlog);
sd->backlog.poll = process_backlog;
sd->backlog.weight = weight_p;
+ INIT_LIST_HEAD(&sd->backlog.poll_list);
+
+ if (net_page_pool_create(i))
+ goto out;
}
+ if (use_backlog_threads())
+ smpboot_register_percpu_thread(&backlog_threads);
dev_boot_phase = 0;
@@ -11743,7 +12646,25 @@ static int __init net_dev_init(void)
NULL, dev_cpu_dead);
WARN_ON(rc < 0);
rc = 0;
+
+ /* avoid static key IPIs to isolated CPUs */
+ if (housekeeping_enabled(HK_TYPE_MISC))
+ net_enable_timestamp();
out:
+ if (rc < 0) {
+ for_each_possible_cpu(i) {
+ struct page_pool *pp_ptr;
+
+ pp_ptr = per_cpu(system_page_pool, i);
+ if (!pp_ptr)
+ continue;
+
+ xdp_unreg_page_pool(pp_ptr);
+ page_pool_destroy(pp_ptr);
+ per_cpu(system_page_pool, i) = NULL;
+ }
+ }
+
return rc;
}
diff --git a/net/core/dev.h b/net/core/dev.h
index 7480b4c84298..a5b166bbd169 100644
--- a/net/core/dev.h
+++ b/net/core/dev.h
@@ -2,12 +2,12 @@
#ifndef _NET_CORE_DEV_H
#define _NET_CORE_DEV_H
+#include <linux/cleanup.h>
#include <linux/types.h>
+#include <linux/rwsem.h>
+#include <linux/netdevice.h>
struct net;
-struct net_device;
-struct netdev_bpf;
-struct netdev_phys_item_id;
struct netlink_ext_ack;
struct cpumask;
@@ -23,6 +23,23 @@ struct sd_flow_limit {
extern int netdev_flow_limit_table_len;
+struct napi_struct *
+netdev_napi_by_id_lock(struct net *net, unsigned int napi_id);
+struct net_device *dev_get_by_napi_id(unsigned int napi_id);
+
+struct net_device *netdev_get_by_index_lock(struct net *net, int ifindex);
+struct net_device *__netdev_put_lock(struct net_device *dev);
+struct net_device *
+netdev_xa_find_lock(struct net *net, struct net_device *dev,
+ unsigned long *index);
+
+DEFINE_FREE(netdev_unlock, struct net_device *, if (_T) netdev_unlock(_T));
+
+#define for_each_netdev_lock_scoped(net, var_name, ifindex) \
+ for (struct net_device *var_name __free(netdev_unlock) = NULL; \
+ (var_name = netdev_xa_find_lock(net, var_name, &ifindex)); \
+ ifindex++)
+
#ifdef CONFIG_PROC_FS
int __init dev_proc_init(void);
#else
@@ -36,16 +53,24 @@ void dev_addr_flush(struct net_device *dev);
int dev_addr_init(struct net_device *dev);
void dev_addr_check(struct net_device *dev);
+#if IS_ENABLED(CONFIG_NET_SHAPER)
+void net_shaper_flush_netdev(struct net_device *dev);
+void net_shaper_set_real_num_tx_queues(struct net_device *dev,
+ unsigned int txq);
+#else
+static inline void net_shaper_flush_netdev(struct net_device *dev) {}
+static inline void net_shaper_set_real_num_tx_queues(struct net_device *dev,
+ unsigned int txq) {}
+#endif
+
/* sysctls not referred to from outside net/core/ */
-extern int netdev_budget;
-extern unsigned int netdev_budget_usecs;
-extern unsigned int sysctl_skb_defer_max;
-extern int netdev_tstamp_prequeue;
extern int netdev_unregister_timeout_secs;
extern int weight_p;
extern int dev_weight_rx_bias;
extern int dev_weight_tx_bias;
+extern struct rw_semaphore dev_addr_sem;
+
/* rtnl helpers */
extern struct list_head net_todo_list;
void netdev_run_todo(void);
@@ -56,6 +81,7 @@ struct netdev_name_node {
struct list_head list;
struct net_device *dev;
const char *name;
+ struct rcu_head rcu;
};
int netdev_get_name(struct net *net, char *name, int ifindex);
@@ -101,6 +127,18 @@ void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
void unregister_netdevice_many_notify(struct list_head *head,
u32 portid, const struct nlmsghdr *nlh);
+static inline void netif_set_up(struct net_device *dev, bool value)
+{
+ if (value)
+ dev->flags |= IFF_UP;
+ else
+ dev->flags &= ~IFF_UP;
+
+ netdev_lock(dev);
+ dev->up = value;
+ netdev_unlock(dev);
+}
+
static inline void netif_set_gso_max_size(struct net_device *dev,
unsigned int size)
{
@@ -140,6 +178,119 @@ static inline void netif_set_gro_ipv4_max_size(struct net_device *dev,
WRITE_ONCE(dev->gro_ipv4_max_size, size);
}
+/**
+ * napi_get_defer_hard_irqs - get the NAPI's defer_hard_irqs
+ * @n: napi struct to get the defer_hard_irqs field from
+ *
+ * Return: the per-NAPI value of the defar_hard_irqs field.
+ */
+static inline u32 napi_get_defer_hard_irqs(const struct napi_struct *n)
+{
+ return READ_ONCE(n->defer_hard_irqs);
+}
+
+/**
+ * napi_set_defer_hard_irqs - set the defer_hard_irqs for a napi
+ * @n: napi_struct to set the defer_hard_irqs field
+ * @defer: the value the field should be set to
+ */
+static inline void napi_set_defer_hard_irqs(struct napi_struct *n, u32 defer)
+{
+ WRITE_ONCE(n->defer_hard_irqs, defer);
+}
+
+/**
+ * netdev_set_defer_hard_irqs - set defer_hard_irqs for all NAPIs of a netdev
+ * @netdev: the net_device for which all NAPIs will have defer_hard_irqs set
+ * @defer: the defer_hard_irqs value to set
+ */
+static inline void netdev_set_defer_hard_irqs(struct net_device *netdev,
+ u32 defer)
+{
+ unsigned int count = max(netdev->num_rx_queues,
+ netdev->num_tx_queues);
+ struct napi_struct *napi;
+ int i;
+
+ WRITE_ONCE(netdev->napi_defer_hard_irqs, defer);
+ list_for_each_entry(napi, &netdev->napi_list, dev_list)
+ napi_set_defer_hard_irqs(napi, defer);
+
+ for (i = 0; i < count; i++)
+ netdev->napi_config[i].defer_hard_irqs = defer;
+}
+
+/**
+ * napi_get_gro_flush_timeout - get the gro_flush_timeout
+ * @n: napi struct to get the gro_flush_timeout from
+ *
+ * Return: the per-NAPI value of the gro_flush_timeout field.
+ */
+static inline unsigned long
+napi_get_gro_flush_timeout(const struct napi_struct *n)
+{
+ return READ_ONCE(n->gro_flush_timeout);
+}
+
+/**
+ * napi_set_gro_flush_timeout - set the gro_flush_timeout for a napi
+ * @n: napi struct to set the gro_flush_timeout
+ * @timeout: timeout value to set
+ *
+ * napi_set_gro_flush_timeout sets the per-NAPI gro_flush_timeout
+ */
+static inline void napi_set_gro_flush_timeout(struct napi_struct *n,
+ unsigned long timeout)
+{
+ WRITE_ONCE(n->gro_flush_timeout, timeout);
+}
+
+/**
+ * netdev_set_gro_flush_timeout - set gro_flush_timeout of a netdev's NAPIs
+ * @netdev: the net_device for which all NAPIs will have gro_flush_timeout set
+ * @timeout: the timeout value to set
+ */
+static inline void netdev_set_gro_flush_timeout(struct net_device *netdev,
+ unsigned long timeout)
+{
+ unsigned int count = max(netdev->num_rx_queues,
+ netdev->num_tx_queues);
+ struct napi_struct *napi;
+ int i;
+
+ WRITE_ONCE(netdev->gro_flush_timeout, timeout);
+ list_for_each_entry(napi, &netdev->napi_list, dev_list)
+ napi_set_gro_flush_timeout(napi, timeout);
+
+ for (i = 0; i < count; i++)
+ netdev->napi_config[i].gro_flush_timeout = timeout;
+}
+
+/**
+ * napi_get_irq_suspend_timeout - get the irq_suspend_timeout
+ * @n: napi struct to get the irq_suspend_timeout from
+ *
+ * Return: the per-NAPI value of the irq_suspend_timeout field.
+ */
+static inline unsigned long
+napi_get_irq_suspend_timeout(const struct napi_struct *n)
+{
+ return READ_ONCE(n->irq_suspend_timeout);
+}
+
+/**
+ * napi_set_irq_suspend_timeout - set the irq_suspend_timeout for a napi
+ * @n: napi struct to set the irq_suspend_timeout
+ * @timeout: timeout value to set
+ *
+ * napi_set_irq_suspend_timeout sets the per-NAPI irq_suspend_timeout
+ */
+static inline void napi_set_irq_suspend_timeout(struct napi_struct *n,
+ unsigned long timeout)
+{
+ WRITE_ONCE(n->irq_suspend_timeout, timeout);
+}
+
int rps_cpumask_housekeeping(struct cpumask *mask);
#if defined(CONFIG_DEBUG_NET) && defined(CONFIG_BPF_SYSCALL)
@@ -148,5 +299,48 @@ void xdp_do_check_flushed(struct napi_struct *napi);
static inline void xdp_do_check_flushed(struct napi_struct *napi) { }
#endif
-struct napi_struct *napi_by_id(unsigned int napi_id);
+void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu);
+
+#define XMIT_RECURSION_LIMIT 8
+
+#ifndef CONFIG_PREEMPT_RT
+static inline bool dev_xmit_recursion(void)
+{
+ return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
+ XMIT_RECURSION_LIMIT);
+}
+
+static inline void dev_xmit_recursion_inc(void)
+{
+ __this_cpu_inc(softnet_data.xmit.recursion);
+}
+
+static inline void dev_xmit_recursion_dec(void)
+{
+ __this_cpu_dec(softnet_data.xmit.recursion);
+}
+#else
+static inline bool dev_xmit_recursion(void)
+{
+ return unlikely(current->net_xmit.recursion > XMIT_RECURSION_LIMIT);
+}
+
+static inline void dev_xmit_recursion_inc(void)
+{
+ current->net_xmit.recursion++;
+}
+
+static inline void dev_xmit_recursion_dec(void)
+{
+ current->net_xmit.recursion--;
+}
+#endif
+
+int dev_set_hwtstamp_phylib(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack);
+int dev_get_hwtstamp_phylib(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg);
+int net_hwtstamp_validate(const struct kernel_hwtstamp_config *cfg);
+
#endif
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index baa63dee2829..90716bd736f3 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -242,9 +242,9 @@ static void __hw_addr_unsync_one(struct netdev_hw_addr_list *to_list,
__hw_addr_del_entry(from_list, ha, false, false);
}
-static int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list,
- struct netdev_hw_addr_list *from_list,
- int addr_len)
+int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list,
+ struct netdev_hw_addr_list *from_list,
+ int addr_len)
{
int err = 0;
struct netdev_hw_addr *ha, *tmp;
@@ -260,9 +260,10 @@ static int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list,
}
return err;
}
+EXPORT_SYMBOL(__hw_addr_sync_multiple);
/* This function only works where there is a strict 1-1 relationship
- * between source and destionation of they synch. If you ever need to
+ * between source and destination of they synch. If you ever need to
* sync addresses to more then 1 destination, you need to use
* __hw_addr_sync_multiple().
*/
@@ -299,8 +300,8 @@ void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
EXPORT_SYMBOL(__hw_addr_unsync);
/**
- * __hw_addr_sync_dev - Synchonize device's multicast list
- * @list: address list to syncronize
+ * __hw_addr_sync_dev - Synchronize device's multicast list
+ * @list: address list to synchronize
* @dev: device to sync
* @sync: function to call if address should be added
* @unsync: function to call if address should be removed
diff --git a/net/core/dev_addr_lists_test.c b/net/core/dev_addr_lists_test.c
index 4dbd0dc6aea2..8e1dba825e94 100644
--- a/net/core/dev_addr_lists_test.c
+++ b/net/core/dev_addr_lists_test.c
@@ -49,7 +49,6 @@ static int dev_addr_test_init(struct kunit *test)
KUNIT_FAIL(test, "Can't register netdev %d", err);
}
- rtnl_lock();
return 0;
}
@@ -57,7 +56,6 @@ static void dev_addr_test_exit(struct kunit *test)
{
struct net_device *netdev = test->priv;
- rtnl_unlock();
unregister_netdev(netdev);
free_netdev(netdev);
}
@@ -67,6 +65,7 @@ static void dev_addr_test_basic(struct kunit *test)
struct net_device *netdev = test->priv;
u8 addr[ETH_ALEN];
+ rtnl_lock();
KUNIT_EXPECT_TRUE(test, !!netdev->dev_addr);
memset(addr, 2, sizeof(addr));
@@ -76,6 +75,7 @@ static void dev_addr_test_basic(struct kunit *test)
memset(addr, 3, sizeof(addr));
dev_addr_set(netdev, addr);
KUNIT_EXPECT_MEMEQ(test, netdev->dev_addr, addr, sizeof(addr));
+ rtnl_unlock();
}
static void dev_addr_test_sync_one(struct kunit *test)
@@ -86,6 +86,7 @@ static void dev_addr_test_sync_one(struct kunit *test)
datp = netdev_priv(netdev);
+ rtnl_lock();
memset(addr, 1, sizeof(addr));
eth_hw_addr_set(netdev, addr);
@@ -103,6 +104,7 @@ static void dev_addr_test_sync_one(struct kunit *test)
* considered synced and we overwrite in place.
*/
KUNIT_EXPECT_EQ(test, 0, datp->addr_seen);
+ rtnl_unlock();
}
static void dev_addr_test_add_del(struct kunit *test)
@@ -114,6 +116,7 @@ static void dev_addr_test_add_del(struct kunit *test)
datp = netdev_priv(netdev);
+ rtnl_lock();
for (i = 1; i < 4; i++) {
memset(addr, i, sizeof(addr));
KUNIT_EXPECT_EQ(test, 0, dev_addr_add(netdev, addr,
@@ -143,6 +146,7 @@ static void dev_addr_test_add_del(struct kunit *test)
__hw_addr_sync_dev(&netdev->dev_addrs, netdev, dev_addr_test_sync,
dev_addr_test_unsync);
KUNIT_EXPECT_EQ(test, 1, datp->addr_seen);
+ rtnl_unlock();
}
static void dev_addr_test_del_main(struct kunit *test)
@@ -150,6 +154,7 @@ static void dev_addr_test_del_main(struct kunit *test)
struct net_device *netdev = test->priv;
u8 addr[ETH_ALEN];
+ rtnl_lock();
memset(addr, 1, sizeof(addr));
eth_hw_addr_set(netdev, addr);
@@ -161,6 +166,7 @@ static void dev_addr_test_del_main(struct kunit *test)
NETDEV_HW_ADDR_T_LAN));
KUNIT_EXPECT_EQ(test, -ENOENT, dev_addr_del(netdev, addr,
NETDEV_HW_ADDR_T_LAN));
+ rtnl_unlock();
}
static void dev_addr_test_add_set(struct kunit *test)
@@ -172,6 +178,7 @@ static void dev_addr_test_add_set(struct kunit *test)
datp = netdev_priv(netdev);
+ rtnl_lock();
/* There is no external API like dev_addr_add_excl(),
* so shuffle the tree a little bit and exploit aliasing.
*/
@@ -191,6 +198,7 @@ static void dev_addr_test_add_set(struct kunit *test)
__hw_addr_sync_dev(&netdev->dev_addrs, netdev, dev_addr_test_sync,
dev_addr_test_unsync);
KUNIT_EXPECT_EQ(test, 0xffff, datp->addr_seen);
+ rtnl_unlock();
}
static void dev_addr_test_add_excl(struct kunit *test)
@@ -199,6 +207,7 @@ static void dev_addr_test_add_excl(struct kunit *test)
u8 addr[ETH_ALEN];
int i;
+ rtnl_lock();
for (i = 0; i < 10; i++) {
memset(addr, i, sizeof(addr));
KUNIT_EXPECT_EQ(test, 0, dev_uc_add_excl(netdev, addr));
@@ -213,6 +222,7 @@ static void dev_addr_test_add_excl(struct kunit *test)
memset(addr, i, sizeof(addr));
KUNIT_EXPECT_EQ(test, -EEXIST, dev_uc_add_excl(netdev, addr));
}
+ rtnl_unlock();
}
static struct kunit_case dev_addr_test_cases[] = {
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 9a66cf5015f2..4c2098ac9d72 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -6,6 +6,7 @@
#include <linux/rtnetlink.h>
#include <linux/net_tstamp.h>
#include <linux/phylib_stubs.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/wireless.h>
#include <linux/if_bridge.h>
#include <net/dsa_stubs.h>
@@ -64,7 +65,7 @@ int dev_ifconf(struct net *net, struct ifconf __user *uifc)
}
/* Loop over the interfaces, and write an info block for each. */
- rtnl_lock();
+ rtnl_net_lock(net);
for_each_netdev(net, dev) {
if (!pos)
done = inet_gifconf(dev, NULL, 0, size);
@@ -72,12 +73,12 @@ int dev_ifconf(struct net *net, struct ifconf __user *uifc)
done = inet_gifconf(dev, pos + total,
len - total, size);
if (done < 0) {
- rtnl_unlock();
+ rtnl_net_unlock(net);
return -EFAULT;
}
total += done;
}
- rtnl_unlock();
+ rtnl_net_unlock(net);
return put_user(total, &uifc->ifc_len);
}
@@ -184,7 +185,7 @@ static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cm
return err;
}
-static int net_hwtstamp_validate(const struct kernel_hwtstamp_config *cfg)
+int net_hwtstamp_validate(const struct kernel_hwtstamp_config *cfg)
{
enum hwtstamp_tx_types tx_type;
enum hwtstamp_rx_filters rx_filter;
@@ -259,19 +260,32 @@ static int dev_eth_ioctl(struct net_device *dev,
* @dev: Network device
* @cfg: Timestamping configuration structure
*
- * Helper for enforcing a common policy that phylib timestamping, if available,
- * should take precedence in front of hardware timestamping provided by the
- * netdev.
+ * Helper for calling the default hardware provider timestamping.
*
* Note: phy_mii_ioctl() only handles SIOCSHWTSTAMP (not SIOCGHWTSTAMP), and
* there only exists a phydev->mii_ts->hwtstamp() method. So this will return
* -EOPNOTSUPP for phylib for now, which is still more accurate than letting
* the netdev handle the GET request.
*/
-static int dev_get_hwtstamp_phylib(struct net_device *dev,
- struct kernel_hwtstamp_config *cfg)
+int dev_get_hwtstamp_phylib(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
{
- if (phy_has_hwtstamp(dev->phydev))
+ struct hwtstamp_provider *hwprov;
+
+ hwprov = rtnl_dereference(dev->hwprov);
+ if (hwprov) {
+ cfg->qualifier = hwprov->desc.qualifier;
+ if (hwprov->source == HWTSTAMP_SOURCE_PHYLIB &&
+ hwprov->phydev)
+ return phy_hwtstamp_get(hwprov->phydev, cfg);
+
+ if (hwprov->source == HWTSTAMP_SOURCE_NETDEV)
+ return dev->netdev_ops->ndo_hwtstamp_get(dev, cfg);
+
+ return -EOPNOTSUPP;
+ }
+
+ if (phy_is_default_hwtstamp(dev->phydev))
return phy_hwtstamp_get(dev->phydev, cfg);
return dev->netdev_ops->ndo_hwtstamp_get(dev, cfg);
@@ -319,28 +333,48 @@ static int dev_get_hwtstamp(struct net_device *dev, struct ifreq *ifr)
* should take precedence in front of hardware timestamping provided by the
* netdev. If the netdev driver needs to perform specific actions even for PHY
* timestamping to work properly (a switch port must trap the timestamped
- * frames and not forward them), it must set IFF_SEE_ALL_HWTSTAMP_REQUESTS in
- * dev->priv_flags.
+ * frames and not forward them), it must set dev->see_all_hwtstamp_requests.
*/
int dev_set_hwtstamp_phylib(struct net_device *dev,
struct kernel_hwtstamp_config *cfg,
struct netlink_ext_ack *extack)
{
const struct net_device_ops *ops = dev->netdev_ops;
- bool phy_ts = phy_has_hwtstamp(dev->phydev);
struct kernel_hwtstamp_config old_cfg = {};
+ struct hwtstamp_provider *hwprov;
+ struct phy_device *phydev;
bool changed = false;
+ bool phy_ts;
int err;
+ hwprov = rtnl_dereference(dev->hwprov);
+ if (hwprov) {
+ if (hwprov->source == HWTSTAMP_SOURCE_PHYLIB &&
+ hwprov->phydev) {
+ phy_ts = true;
+ phydev = hwprov->phydev;
+ } else if (hwprov->source == HWTSTAMP_SOURCE_NETDEV) {
+ phy_ts = false;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ cfg->qualifier = hwprov->desc.qualifier;
+ } else {
+ phy_ts = phy_is_default_hwtstamp(dev->phydev);
+ if (phy_ts)
+ phydev = dev->phydev;
+ }
+
cfg->source = phy_ts ? HWTSTAMP_SOURCE_PHYLIB : HWTSTAMP_SOURCE_NETDEV;
- if (phy_ts && (dev->priv_flags & IFF_SEE_ALL_HWTSTAMP_REQUESTS)) {
+ if (phy_ts && dev->see_all_hwtstamp_requests) {
err = ops->ndo_hwtstamp_get(dev, &old_cfg);
if (err)
return err;
}
- if (!phy_ts || (dev->priv_flags & IFF_SEE_ALL_HWTSTAMP_REQUESTS)) {
+ if (!phy_ts || dev->see_all_hwtstamp_requests) {
err = ops->ndo_hwtstamp_set(dev, cfg, extack);
if (err) {
if (extack->_msg)
@@ -349,11 +383,11 @@ int dev_set_hwtstamp_phylib(struct net_device *dev,
}
}
- if (phy_ts && (dev->priv_flags & IFF_SEE_ALL_HWTSTAMP_REQUESTS))
+ if (phy_ts && dev->see_all_hwtstamp_requests)
changed = kernel_hwtstamp_config_changed(&old_cfg, cfg);
if (phy_ts) {
- err = phy_hwtstamp_set(dev->phydev, cfg, extack);
+ err = phy_hwtstamp_set(phydev, cfg, extack);
if (err) {
if (changed)
ops->ndo_hwtstamp_set(dev, &old_cfg, NULL);
@@ -363,7 +397,6 @@ int dev_set_hwtstamp_phylib(struct net_device *dev,
return 0;
}
-EXPORT_SYMBOL_GPL(dev_set_hwtstamp_phylib);
static int dev_set_hwtstamp(struct net_device *dev, struct ifreq *ifr)
{
@@ -510,7 +543,7 @@ static int dev_siocwandev(struct net_device *dev, struct if_settings *ifs)
}
/*
- * Perform the SIOCxIFxxx calls, inside rtnl_lock()
+ * Perform the SIOCxIFxxx calls, inside rtnl_net_lock()
*/
static int dev_ifsioc(struct net *net, struct ifreq *ifr, void __user *data,
unsigned int cmd)
@@ -587,11 +620,14 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, void __user *data,
return -ENODEV;
if (!netif_is_bridge_master(dev))
return -EOPNOTSUPP;
+
netdev_hold(dev, &dev_tracker, GFP_KERNEL);
- rtnl_unlock();
+ rtnl_net_unlock(net);
+
err = br_ioctl_call(net, netdev_priv(dev), cmd, ifr, NULL);
+
netdev_put(dev, &dev_tracker);
- rtnl_lock();
+ rtnl_net_lock(net);
return err;
case SIOCDEVPRIVATE ... SIOCDEVPRIVATE + 15:
@@ -737,9 +773,11 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
dev_load(net, ifr->ifr_name);
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
- rtnl_lock();
+
+ rtnl_net_lock(net);
ret = dev_ifsioc(net, ifr, data, cmd);
- rtnl_unlock();
+ rtnl_net_unlock(net);
+
if (colon)
*colon = ':';
return ret;
@@ -783,9 +821,11 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
case SIOCBONDSLAVEINFOQUERY:
case SIOCBONDINFOQUERY:
dev_load(net, ifr->ifr_name);
- rtnl_lock();
+
+ rtnl_net_lock(net);
ret = dev_ifsioc(net, ifr, data, cmd);
- rtnl_unlock();
+ rtnl_net_unlock(net);
+
if (need_copyout)
*need_copyout = false;
return ret;
@@ -808,9 +848,10 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
(cmd >= SIOCDEVPRIVATE &&
cmd <= SIOCDEVPRIVATE + 15)) {
dev_load(net, ifr->ifr_name);
- rtnl_lock();
+
+ rtnl_net_lock(net);
ret = dev_ifsioc(net, ifr, data, cmd);
- rtnl_unlock();
+ rtnl_net_unlock(net);
return ret;
}
return -ENOTTY;
diff --git a/net/core/devmem.c b/net/core/devmem.c
new file mode 100644
index 000000000000..0e5a2c672efd
--- /dev/null
+++ b/net/core/devmem.c
@@ -0,0 +1,402 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Devmem TCP
+ *
+ * Authors: Mina Almasry <almasrymina@google.com>
+ * Willem de Bruijn <willemdebruijn.kernel@gmail.com>
+ * Kaiyuan Zhang <kaiyuanz@google.com
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/ethtool_netlink.h>
+#include <linux/genalloc.h>
+#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <net/netdev_queues.h>
+#include <net/netdev_rx_queue.h>
+#include <net/page_pool/helpers.h>
+#include <trace/events/page_pool.h>
+
+#include "devmem.h"
+#include "mp_dmabuf_devmem.h"
+#include "page_pool_priv.h"
+
+/* Device memory support */
+
+/* Protected by rtnl_lock() */
+static DEFINE_XARRAY_FLAGS(net_devmem_dmabuf_bindings, XA_FLAGS_ALLOC1);
+
+static void net_devmem_dmabuf_free_chunk_owner(struct gen_pool *genpool,
+ struct gen_pool_chunk *chunk,
+ void *not_used)
+{
+ struct dmabuf_genpool_chunk_owner *owner = chunk->owner;
+
+ kvfree(owner->niovs);
+ kfree(owner);
+}
+
+static dma_addr_t net_devmem_get_dma_addr(const struct net_iov *niov)
+{
+ struct dmabuf_genpool_chunk_owner *owner = net_iov_owner(niov);
+
+ return owner->base_dma_addr +
+ ((dma_addr_t)net_iov_idx(niov) << PAGE_SHIFT);
+}
+
+void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding)
+{
+ size_t size, avail;
+
+ gen_pool_for_each_chunk(binding->chunk_pool,
+ net_devmem_dmabuf_free_chunk_owner, NULL);
+
+ size = gen_pool_size(binding->chunk_pool);
+ avail = gen_pool_avail(binding->chunk_pool);
+
+ if (!WARN(size != avail, "can't destroy genpool. size=%zu, avail=%zu",
+ size, avail))
+ gen_pool_destroy(binding->chunk_pool);
+
+ dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
+ DMA_FROM_DEVICE);
+ dma_buf_detach(binding->dmabuf, binding->attachment);
+ dma_buf_put(binding->dmabuf);
+ xa_destroy(&binding->bound_rxqs);
+ kfree(binding);
+}
+
+struct net_iov *
+net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
+{
+ struct dmabuf_genpool_chunk_owner *owner;
+ unsigned long dma_addr;
+ struct net_iov *niov;
+ ssize_t offset;
+ ssize_t index;
+
+ dma_addr = gen_pool_alloc_owner(binding->chunk_pool, PAGE_SIZE,
+ (void **)&owner);
+ if (!dma_addr)
+ return NULL;
+
+ offset = dma_addr - owner->base_dma_addr;
+ index = offset / PAGE_SIZE;
+ niov = &owner->niovs[index];
+
+ niov->pp_magic = 0;
+ niov->pp = NULL;
+ atomic_long_set(&niov->pp_ref_count, 0);
+
+ return niov;
+}
+
+void net_devmem_free_dmabuf(struct net_iov *niov)
+{
+ struct net_devmem_dmabuf_binding *binding = net_iov_binding(niov);
+ unsigned long dma_addr = net_devmem_get_dma_addr(niov);
+
+ if (WARN_ON(!gen_pool_has_addr(binding->chunk_pool, dma_addr,
+ PAGE_SIZE)))
+ return;
+
+ gen_pool_free(binding->chunk_pool, dma_addr, PAGE_SIZE);
+}
+
+void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
+{
+ struct netdev_rx_queue *rxq;
+ unsigned long xa_idx;
+ unsigned int rxq_idx;
+ int err;
+
+ if (binding->list.next)
+ list_del(&binding->list);
+
+ xa_for_each(&binding->bound_rxqs, xa_idx, rxq) {
+ WARN_ON(rxq->mp_params.mp_priv != binding);
+
+ rxq->mp_params.mp_priv = NULL;
+
+ rxq_idx = get_netdev_rx_queue_index(rxq);
+
+ err = netdev_rx_queue_restart(binding->dev, rxq_idx);
+ WARN_ON(err && err != -ENETDOWN);
+ }
+
+ xa_erase(&net_devmem_dmabuf_bindings, binding->id);
+
+ net_devmem_dmabuf_binding_put(binding);
+}
+
+int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
+ struct net_devmem_dmabuf_binding *binding,
+ struct netlink_ext_ack *extack)
+{
+ struct netdev_rx_queue *rxq;
+ u32 xa_idx;
+ int err;
+
+ if (rxq_idx >= dev->real_num_rx_queues) {
+ NL_SET_ERR_MSG(extack, "rx queue index out of range");
+ return -ERANGE;
+ }
+
+ if (dev->cfg->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED) {
+ NL_SET_ERR_MSG(extack, "tcp-data-split is disabled");
+ return -EINVAL;
+ }
+
+ if (dev->cfg->hds_thresh) {
+ NL_SET_ERR_MSG(extack, "hds-thresh is not zero");
+ return -EINVAL;
+ }
+
+ rxq = __netif_get_rx_queue(dev, rxq_idx);
+ if (rxq->mp_params.mp_priv) {
+ NL_SET_ERR_MSG(extack, "designated queue already memory provider bound");
+ return -EEXIST;
+ }
+
+#ifdef CONFIG_XDP_SOCKETS
+ if (rxq->pool) {
+ NL_SET_ERR_MSG(extack, "designated queue already in use by AF_XDP");
+ return -EBUSY;
+ }
+#endif
+
+ err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b,
+ GFP_KERNEL);
+ if (err)
+ return err;
+
+ rxq->mp_params.mp_priv = binding;
+
+ err = netdev_rx_queue_restart(dev, rxq_idx);
+ if (err)
+ goto err_xa_erase;
+
+ return 0;
+
+err_xa_erase:
+ rxq->mp_params.mp_priv = NULL;
+ xa_erase(&binding->bound_rxqs, xa_idx);
+
+ return err;
+}
+
+struct net_devmem_dmabuf_binding *
+net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
+ struct netlink_ext_ack *extack)
+{
+ struct net_devmem_dmabuf_binding *binding;
+ static u32 id_alloc_next;
+ struct scatterlist *sg;
+ struct dma_buf *dmabuf;
+ unsigned int sg_idx, i;
+ unsigned long virtual;
+ int err;
+
+ dmabuf = dma_buf_get(dmabuf_fd);
+ if (IS_ERR(dmabuf))
+ return ERR_CAST(dmabuf);
+
+ binding = kzalloc_node(sizeof(*binding), GFP_KERNEL,
+ dev_to_node(&dev->dev));
+ if (!binding) {
+ err = -ENOMEM;
+ goto err_put_dmabuf;
+ }
+
+ binding->dev = dev;
+
+ err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id,
+ binding, xa_limit_32b, &id_alloc_next,
+ GFP_KERNEL);
+ if (err < 0)
+ goto err_free_binding;
+
+ xa_init_flags(&binding->bound_rxqs, XA_FLAGS_ALLOC);
+
+ refcount_set(&binding->ref, 1);
+
+ binding->dmabuf = dmabuf;
+
+ binding->attachment = dma_buf_attach(binding->dmabuf, dev->dev.parent);
+ if (IS_ERR(binding->attachment)) {
+ err = PTR_ERR(binding->attachment);
+ NL_SET_ERR_MSG(extack, "Failed to bind dmabuf to device");
+ goto err_free_id;
+ }
+
+ binding->sgt = dma_buf_map_attachment_unlocked(binding->attachment,
+ DMA_FROM_DEVICE);
+ if (IS_ERR(binding->sgt)) {
+ err = PTR_ERR(binding->sgt);
+ NL_SET_ERR_MSG(extack, "Failed to map dmabuf attachment");
+ goto err_detach;
+ }
+
+ /* For simplicity we expect to make PAGE_SIZE allocations, but the
+ * binding can be much more flexible than that. We may be able to
+ * allocate MTU sized chunks here. Leave that for future work...
+ */
+ binding->chunk_pool =
+ gen_pool_create(PAGE_SHIFT, dev_to_node(&dev->dev));
+ if (!binding->chunk_pool) {
+ err = -ENOMEM;
+ goto err_unmap;
+ }
+
+ virtual = 0;
+ for_each_sgtable_dma_sg(binding->sgt, sg, sg_idx) {
+ dma_addr_t dma_addr = sg_dma_address(sg);
+ struct dmabuf_genpool_chunk_owner *owner;
+ size_t len = sg_dma_len(sg);
+ struct net_iov *niov;
+
+ owner = kzalloc_node(sizeof(*owner), GFP_KERNEL,
+ dev_to_node(&dev->dev));
+ if (!owner) {
+ err = -ENOMEM;
+ goto err_free_chunks;
+ }
+
+ owner->base_virtual = virtual;
+ owner->base_dma_addr = dma_addr;
+ owner->num_niovs = len / PAGE_SIZE;
+ owner->binding = binding;
+
+ err = gen_pool_add_owner(binding->chunk_pool, dma_addr,
+ dma_addr, len, dev_to_node(&dev->dev),
+ owner);
+ if (err) {
+ kfree(owner);
+ err = -EINVAL;
+ goto err_free_chunks;
+ }
+
+ owner->niovs = kvmalloc_array(owner->num_niovs,
+ sizeof(*owner->niovs),
+ GFP_KERNEL);
+ if (!owner->niovs) {
+ err = -ENOMEM;
+ goto err_free_chunks;
+ }
+
+ for (i = 0; i < owner->num_niovs; i++) {
+ niov = &owner->niovs[i];
+ niov->owner = owner;
+ page_pool_set_dma_addr_netmem(net_iov_to_netmem(niov),
+ net_devmem_get_dma_addr(niov));
+ }
+
+ virtual += len;
+ }
+
+ return binding;
+
+err_free_chunks:
+ gen_pool_for_each_chunk(binding->chunk_pool,
+ net_devmem_dmabuf_free_chunk_owner, NULL);
+ gen_pool_destroy(binding->chunk_pool);
+err_unmap:
+ dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
+ DMA_FROM_DEVICE);
+err_detach:
+ dma_buf_detach(dmabuf, binding->attachment);
+err_free_id:
+ xa_erase(&net_devmem_dmabuf_bindings, binding->id);
+err_free_binding:
+ kfree(binding);
+err_put_dmabuf:
+ dma_buf_put(dmabuf);
+ return ERR_PTR(err);
+}
+
+void dev_dmabuf_uninstall(struct net_device *dev)
+{
+ struct net_devmem_dmabuf_binding *binding;
+ struct netdev_rx_queue *rxq;
+ unsigned long xa_idx;
+ unsigned int i;
+
+ for (i = 0; i < dev->real_num_rx_queues; i++) {
+ binding = dev->_rx[i].mp_params.mp_priv;
+ if (!binding)
+ continue;
+
+ xa_for_each(&binding->bound_rxqs, xa_idx, rxq)
+ if (rxq == &dev->_rx[i]) {
+ xa_erase(&binding->bound_rxqs, xa_idx);
+ break;
+ }
+ }
+}
+
+/*** "Dmabuf devmem memory provider" ***/
+
+int mp_dmabuf_devmem_init(struct page_pool *pool)
+{
+ struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
+
+ if (!binding)
+ return -EINVAL;
+
+ /* dma-buf dma addresses do not need and should not be used with
+ * dma_sync_for_cpu/device. Force disable dma_sync.
+ */
+ pool->dma_sync = false;
+ pool->dma_sync_for_cpu = false;
+
+ if (pool->p.order != 0)
+ return -E2BIG;
+
+ net_devmem_dmabuf_binding_get(binding);
+ return 0;
+}
+
+netmem_ref mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp)
+{
+ struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
+ struct net_iov *niov;
+ netmem_ref netmem;
+
+ niov = net_devmem_alloc_dmabuf(binding);
+ if (!niov)
+ return 0;
+
+ netmem = net_iov_to_netmem(niov);
+
+ page_pool_set_pp_info(pool, netmem);
+
+ pool->pages_state_hold_cnt++;
+ trace_page_pool_state_hold(pool, netmem, pool->pages_state_hold_cnt);
+ return netmem;
+}
+
+void mp_dmabuf_devmem_destroy(struct page_pool *pool)
+{
+ struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
+
+ net_devmem_dmabuf_binding_put(binding);
+}
+
+bool mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem)
+{
+ long refcount = atomic_long_read(netmem_get_pp_ref_count_ref(netmem));
+
+ if (WARN_ON_ONCE(!netmem_is_net_iov(netmem)))
+ return false;
+
+ if (WARN_ON_ONCE(refcount != 1))
+ return false;
+
+ page_pool_clear_pp_info(netmem);
+
+ net_devmem_free_dmabuf(netmem_to_net_iov(netmem));
+
+ /* We don't want the page pool put_page()ing our net_iovs. */
+ return false;
+}
diff --git a/net/core/devmem.h b/net/core/devmem.h
new file mode 100644
index 000000000000..76099ef9c482
--- /dev/null
+++ b/net/core/devmem.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Device memory TCP support
+ *
+ * Authors: Mina Almasry <almasrymina@google.com>
+ * Willem de Bruijn <willemb@google.com>
+ * Kaiyuan Zhang <kaiyuanz@google.com>
+ *
+ */
+#ifndef _NET_DEVMEM_H
+#define _NET_DEVMEM_H
+
+struct netlink_ext_ack;
+
+struct net_devmem_dmabuf_binding {
+ struct dma_buf *dmabuf;
+ struct dma_buf_attachment *attachment;
+ struct sg_table *sgt;
+ struct net_device *dev;
+ struct gen_pool *chunk_pool;
+
+ /* The user holds a ref (via the netlink API) for as long as they want
+ * the binding to remain alive. Each page pool using this binding holds
+ * a ref to keep the binding alive. Each allocated net_iov holds a
+ * ref.
+ *
+ * The binding undos itself and unmaps the underlying dmabuf once all
+ * those refs are dropped and the binding is no longer desired or in
+ * use.
+ */
+ refcount_t ref;
+
+ /* The list of bindings currently active. Used for netlink to notify us
+ * of the user dropping the bind.
+ */
+ struct list_head list;
+
+ /* rxq's this binding is active on. */
+ struct xarray bound_rxqs;
+
+ /* ID of this binding. Globally unique to all bindings currently
+ * active.
+ */
+ u32 id;
+};
+
+#if defined(CONFIG_NET_DEVMEM)
+/* Owner of the dma-buf chunks inserted into the gen pool. Each scatterlist
+ * entry from the dmabuf is inserted into the genpool as a chunk, and needs
+ * this owner struct to keep track of some metadata necessary to create
+ * allocations from this chunk.
+ */
+struct dmabuf_genpool_chunk_owner {
+ /* Offset into the dma-buf where this chunk starts. */
+ unsigned long base_virtual;
+
+ /* dma_addr of the start of the chunk. */
+ dma_addr_t base_dma_addr;
+
+ /* Array of net_iovs for this chunk. */
+ struct net_iov *niovs;
+ size_t num_niovs;
+
+ struct net_devmem_dmabuf_binding *binding;
+};
+
+void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding);
+struct net_devmem_dmabuf_binding *
+net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
+ struct netlink_ext_ack *extack);
+void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding);
+int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
+ struct net_devmem_dmabuf_binding *binding,
+ struct netlink_ext_ack *extack);
+void dev_dmabuf_uninstall(struct net_device *dev);
+
+static inline struct dmabuf_genpool_chunk_owner *
+net_iov_owner(const struct net_iov *niov)
+{
+ return niov->owner;
+}
+
+static inline unsigned int net_iov_idx(const struct net_iov *niov)
+{
+ return niov - net_iov_owner(niov)->niovs;
+}
+
+static inline struct net_devmem_dmabuf_binding *
+net_iov_binding(const struct net_iov *niov)
+{
+ return net_iov_owner(niov)->binding;
+}
+
+static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
+{
+ struct dmabuf_genpool_chunk_owner *owner = net_iov_owner(niov);
+
+ return owner->base_virtual +
+ ((unsigned long)net_iov_idx(niov) << PAGE_SHIFT);
+}
+
+static inline u32 net_iov_binding_id(const struct net_iov *niov)
+{
+ return net_iov_owner(niov)->binding->id;
+}
+
+static inline void
+net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding *binding)
+{
+ refcount_inc(&binding->ref);
+}
+
+static inline void
+net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
+{
+ if (!refcount_dec_and_test(&binding->ref))
+ return;
+
+ __net_devmem_dmabuf_binding_free(binding);
+}
+
+struct net_iov *
+net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding);
+void net_devmem_free_dmabuf(struct net_iov *ppiov);
+
+#else
+struct net_devmem_dmabuf_binding;
+
+static inline void
+__net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding)
+{
+}
+
+static inline struct net_devmem_dmabuf_binding *
+net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
+ struct netlink_ext_ack *extack)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void
+net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
+{
+}
+
+static inline int
+net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
+ struct net_devmem_dmabuf_binding *binding,
+ struct netlink_ext_ack *extack)
+
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void dev_dmabuf_uninstall(struct net_device *dev)
+{
+}
+
+static inline struct net_iov *
+net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
+{
+ return NULL;
+}
+
+static inline void net_devmem_free_dmabuf(struct net_iov *ppiov)
+{
+}
+
+static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
+{
+ return 0;
+}
+
+static inline u32 net_iov_binding_id(const struct net_iov *niov)
+{
+ return 0;
+}
+#endif
+
+#endif /* _NET_DEVMEM_H */
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index b0f221d658be..212f0a048cab 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -37,7 +37,7 @@
#include <trace/events/napi.h>
#include <trace/events/devlink.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#define TRACE_ON 1
#define TRACE_OFF 0
@@ -74,7 +74,7 @@ struct net_dm_hw_entries {
};
struct per_cpu_dm_data {
- spinlock_t lock; /* Protects 'skb', 'hw_entries' and
+ raw_spinlock_t lock; /* Protects 'skb', 'hw_entries' and
* 'send_timer'
*/
union {
@@ -109,7 +109,8 @@ static u32 net_dm_queue_len = 1000;
struct net_dm_alert_ops {
void (*kfree_skb_probe)(void *ignore, struct sk_buff *skb,
void *location,
- enum skb_drop_reason reason);
+ enum skb_drop_reason reason,
+ struct sock *rx_sk);
void (*napi_poll_probe)(void *ignore, struct napi_struct *napi,
int work, int budget);
void (*work_item_func)(struct work_struct *work);
@@ -168,9 +169,9 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
err:
mod_timer(&data->send_timer, jiffies + HZ / 10);
out:
- spin_lock_irqsave(&data->lock, flags);
+ raw_spin_lock_irqsave(&data->lock, flags);
swap(data->skb, skb);
- spin_unlock_irqrestore(&data->lock, flags);
+ raw_spin_unlock_irqrestore(&data->lock, flags);
if (skb) {
struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
@@ -225,7 +226,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
local_irq_save(flags);
data = this_cpu_ptr(&dm_cpu_data);
- spin_lock(&data->lock);
+ raw_spin_lock(&data->lock);
dskb = data->skb;
if (!dskb)
@@ -259,12 +260,13 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
}
out:
- spin_unlock_irqrestore(&data->lock, flags);
+ raw_spin_unlock_irqrestore(&data->lock, flags);
}
static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb,
void *location,
- enum skb_drop_reason reason)
+ enum skb_drop_reason reason,
+ struct sock *rx_sk)
{
trace_drop_common(skb, location);
}
@@ -314,9 +316,9 @@ net_dm_hw_reset_per_cpu_data(struct per_cpu_dm_data *hw_data)
mod_timer(&hw_data->send_timer, jiffies + HZ / 10);
}
- spin_lock_irqsave(&hw_data->lock, flags);
+ raw_spin_lock_irqsave(&hw_data->lock, flags);
swap(hw_data->hw_entries, hw_entries);
- spin_unlock_irqrestore(&hw_data->lock, flags);
+ raw_spin_unlock_irqrestore(&hw_data->lock, flags);
return hw_entries;
}
@@ -448,7 +450,7 @@ net_dm_hw_trap_summary_probe(void *ignore, const struct devlink *devlink,
return;
hw_data = this_cpu_ptr(&dm_hw_cpu_data);
- spin_lock_irqsave(&hw_data->lock, flags);
+ raw_spin_lock_irqsave(&hw_data->lock, flags);
hw_entries = hw_data->hw_entries;
if (!hw_entries)
@@ -477,7 +479,7 @@ net_dm_hw_trap_summary_probe(void *ignore, const struct devlink *devlink,
}
out:
- spin_unlock_irqrestore(&hw_data->lock, flags);
+ raw_spin_unlock_irqrestore(&hw_data->lock, flags);
}
static const struct net_dm_alert_ops net_dm_alert_summary_ops = {
@@ -491,7 +493,8 @@ static const struct net_dm_alert_ops net_dm_alert_summary_ops = {
static void net_dm_packet_trace_kfree_skb_hit(void *ignore,
struct sk_buff *skb,
void *location,
- enum skb_drop_reason reason)
+ enum skb_drop_reason reason,
+ struct sock *rx_sk)
{
ktime_t tstamp = ktime_get_real();
struct per_cpu_dm_data *data;
@@ -1673,7 +1676,7 @@ static struct notifier_block dropmon_net_notifier = {
static void __net_dm_cpu_data_init(struct per_cpu_dm_data *data)
{
- spin_lock_init(&data->lock);
+ raw_spin_lock_init(&data->lock);
skb_queue_head_init(&data->drop_queue);
u64_stats_init(&data->stats.syncp);
}
@@ -1731,30 +1734,30 @@ static int __init init_net_drop_monitor(void)
return -ENOSPC;
}
- rc = genl_register_family(&net_drop_monitor_family);
- if (rc) {
- pr_err("Could not create drop monitor netlink family\n");
- return rc;
+ for_each_possible_cpu(cpu) {
+ net_dm_cpu_data_init(cpu);
+ net_dm_hw_cpu_data_init(cpu);
}
- WARN_ON(net_drop_monitor_family.mcgrp_offset != NET_DM_GRP_ALERT);
rc = register_netdevice_notifier(&dropmon_net_notifier);
if (rc < 0) {
pr_crit("Failed to register netdevice notifier\n");
+ return rc;
+ }
+
+ rc = genl_register_family(&net_drop_monitor_family);
+ if (rc) {
+ pr_err("Could not create drop monitor netlink family\n");
goto out_unreg;
}
+ WARN_ON(net_drop_monitor_family.mcgrp_offset != NET_DM_GRP_ALERT);
rc = 0;
- for_each_possible_cpu(cpu) {
- net_dm_cpu_data_init(cpu);
- net_dm_hw_cpu_data_init(cpu);
- }
-
goto out;
out_unreg:
- genl_unregister_family(&net_drop_monitor_family);
+ WARN_ON(unregister_netdevice_notifier(&dropmon_net_notifier));
out:
return rc;
}
@@ -1763,19 +1766,18 @@ static void exit_net_drop_monitor(void)
{
int cpu;
- BUG_ON(unregister_netdevice_notifier(&dropmon_net_notifier));
-
/*
* Because of the module_get/put we do in the trace state change path
* we are guaranteed not to have any current users when we get here
*/
+ BUG_ON(genl_unregister_family(&net_drop_monitor_family));
+
+ BUG_ON(unregister_netdevice_notifier(&dropmon_net_notifier));
for_each_possible_cpu(cpu) {
net_dm_hw_cpu_data_fini(cpu);
net_dm_cpu_data_fini(cpu);
}
-
- BUG_ON(genl_unregister_family(&net_drop_monitor_family));
}
module_init(init_net_drop_monitor);
diff --git a/net/core/dst.c b/net/core/dst.c
index 6838d3212c37..9552a90d4772 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -96,7 +96,7 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
}
EXPORT_SYMBOL(dst_alloc);
-struct dst_entry *dst_destroy(struct dst_entry * dst)
+static void dst_destroy(struct dst_entry *dst)
{
struct dst_entry *child = NULL;
@@ -109,9 +109,6 @@ struct dst_entry *dst_destroy(struct dst_entry * dst)
child = xdst->child;
}
#endif
- if (!(dst->flags & DST_NOCOUNT))
- dst_entries_add(dst->ops, -1);
-
if (dst->ops->destroy)
dst->ops->destroy(dst);
netdev_put(dst->dev, &dst->dev_tracker);
@@ -126,15 +123,13 @@ struct dst_entry *dst_destroy(struct dst_entry * dst)
dst = child;
if (dst)
dst_release_immediate(dst);
- return NULL;
}
-EXPORT_SYMBOL(dst_destroy);
static void dst_destroy_rcu(struct rcu_head *head)
{
struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
- dst = dst_destroy(dst);
+ dst_destroy(dst);
}
/* Operations to mark dst as DEAD and clean up the net device referenced
@@ -161,17 +156,27 @@ void dst_dev_put(struct dst_entry *dst)
}
EXPORT_SYMBOL(dst_dev_put);
+static void dst_count_dec(struct dst_entry *dst)
+{
+ if (!(dst->flags & DST_NOCOUNT))
+ dst_entries_add(dst->ops, -1);
+}
+
void dst_release(struct dst_entry *dst)
{
- if (dst && rcuref_put(&dst->__rcuref))
+ if (dst && rcuref_put(&dst->__rcuref)) {
+ dst_count_dec(dst);
call_rcu_hurry(&dst->rcu_head, dst_destroy_rcu);
+ }
}
EXPORT_SYMBOL(dst_release);
void dst_release_immediate(struct dst_entry *dst)
{
- if (dst && rcuref_put(&dst->__rcuref))
+ if (dst && rcuref_put(&dst->__rcuref)) {
+ dst_count_dec(dst);
dst_destroy(dst);
+ }
}
EXPORT_SYMBOL(dst_release_immediate);
diff --git a/net/core/dst_cache.c b/net/core/dst_cache.c
index 0ccfd5fa5cb9..70c634b9e7b0 100644
--- a/net/core/dst_cache.c
+++ b/net/core/dst_cache.c
@@ -27,6 +27,7 @@ struct dst_cache_pcpu {
static void dst_cache_per_cpu_dst_set(struct dst_cache_pcpu *dst_cache,
struct dst_entry *dst, u32 cookie)
{
+ DEBUG_NET_WARN_ON_ONCE(!in_softirq());
dst_release(dst_cache->dst);
if (dst)
dst_hold(dst);
@@ -40,6 +41,7 @@ static struct dst_entry *dst_cache_per_cpu_get(struct dst_cache *dst_cache,
{
struct dst_entry *dst;
+ DEBUG_NET_WARN_ON_ONCE(!in_softirq());
dst = idst->dst;
if (!dst)
goto fail;
@@ -47,7 +49,8 @@ static struct dst_entry *dst_cache_per_cpu_get(struct dst_cache *dst_cache,
/* the cache already hold a dst reference; it can't go away */
dst_hold(dst);
- if (unlikely(!time_after(idst->refresh_ts, dst_cache->reset_ts) ||
+ if (unlikely(!time_after(idst->refresh_ts,
+ READ_ONCE(dst_cache->reset_ts)) ||
(dst->obsolete && !dst->ops->check(dst, idst->cookie)))) {
dst_cache_per_cpu_dst_set(idst, NULL, 0);
dst_release(dst);
@@ -83,7 +86,7 @@ struct rtable *dst_cache_get_ip4(struct dst_cache *dst_cache, __be32 *saddr)
return NULL;
*saddr = idst->in_saddr.s_addr;
- return container_of(dst, struct rtable, dst);
+ return dst_rtable(dst);
}
EXPORT_SYMBOL_GPL(dst_cache_get_ip4);
@@ -111,8 +114,8 @@ void dst_cache_set_ip6(struct dst_cache *dst_cache, struct dst_entry *dst,
return;
idst = this_cpu_ptr(dst_cache->cache);
- dst_cache_per_cpu_dst_set(this_cpu_ptr(dst_cache->cache), dst,
- rt6_get_cookie((struct rt6_info *)dst));
+ dst_cache_per_cpu_dst_set(idst, dst,
+ rt6_get_cookie(dst_rt6_info(dst)));
idst->in6_saddr = *saddr;
}
EXPORT_SYMBOL_GPL(dst_cache_set_ip6);
@@ -170,7 +173,7 @@ void dst_cache_reset_now(struct dst_cache *dst_cache)
if (!dst_cache->cache)
return;
- dst_cache->reset_ts = jiffies;
+ dst_cache_reset(dst_cache);
for_each_possible_cpu(i) {
struct dst_cache_pcpu *idst = per_cpu_ptr(dst_cache->cache, i);
struct dst_entry *dst = idst->dst;
diff --git a/net/core/fib_notifier.c b/net/core/fib_notifier.c
index fc96259807b6..5cdca49b1d7c 100644
--- a/net/core/fib_notifier.c
+++ b/net/core/fib_notifier.c
@@ -43,7 +43,6 @@ static unsigned int fib_seq_sum(struct net *net)
struct fib_notifier_ops *ops;
unsigned int fib_seq = 0;
- rtnl_lock();
rcu_read_lock();
list_for_each_entry_rcu(ops, &fn_net->fib_notifier_ops, list) {
if (!try_module_get(ops->owner))
@@ -52,7 +51,6 @@ static unsigned int fib_seq_sum(struct net *net)
module_put(ops->owner);
}
rcu_read_unlock();
- rtnl_unlock();
return fib_seq;
}
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 3f933ffcefc3..94a7872ab231 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -11,6 +11,7 @@
#include <linux/list.h>
#include <linux/module.h>
#include <net/net_namespace.h>
+#include <net/inet_dscp.h>
#include <net/sock.h>
#include <net/fib_rules.h>
#include <net/ip_tunnels.h>
@@ -36,8 +37,8 @@ static const struct fib_kuid_range fib_kuid_range_unset = {
bool fib_rule_matchall(const struct fib_rule *rule)
{
- if (rule->iifindex || rule->oifindex || rule->mark || rule->tun_id ||
- rule->flags)
+ if (READ_ONCE(rule->iifindex) || READ_ONCE(rule->oifindex) ||
+ rule->mark || rule->tun_id || rule->flags)
return false;
if (rule->suppress_ifgroup != -1 || rule->suppress_prefixlen != -1)
return false;
@@ -72,7 +73,7 @@ int fib_default_rule_add(struct fib_rules_ops *ops,
r->suppress_prefixlen = -1;
r->suppress_ifgroup = -1;
- /* The lock is not required here, the list in unreacheable
+ /* The lock is not required here, the list in unreachable
* at the moment this function is called */
list_add_tail(&r->list, &ops->rules_list);
return 0;
@@ -100,7 +101,8 @@ static void notify_rule_change(int event, struct fib_rule *rule,
struct fib_rules_ops *ops, struct nlmsghdr *nlh,
u32 pid);
-static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family)
+static struct fib_rules_ops *lookup_rules_ops(const struct net *net,
+ int family)
{
struct fib_rules_ops *ops;
@@ -259,12 +261,14 @@ static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
struct flowi *fl, int flags,
struct fib_lookup_arg *arg)
{
- int ret = 0;
+ int iifindex, oifindex, ret = 0;
- if (rule->iifindex && (rule->iifindex != fl->flowi_iif))
+ iifindex = READ_ONCE(rule->iifindex);
+ if (iifindex && (iifindex != fl->flowi_iif))
goto out;
- if (rule->oifindex && (rule->oifindex != fl->flowi_oif))
+ oifindex = READ_ONCE(rule->oifindex);
+ if (oifindex && (oifindex != fl->flowi_oif))
goto out;
if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
@@ -369,7 +373,9 @@ static int call_fib_rule_notifiers(struct net *net,
.rule = rule,
};
- ops->fib_rules_seq++;
+ ASSERT_RTNL();
+ /* Paired with READ_ONCE() in fib_rules_seq() */
+ WRITE_ONCE(ops->fib_rules_seq, ops->fib_rules_seq + 1);
return call_fib_notifiers(net, event_type, &info.info);
}
@@ -396,17 +402,16 @@ int fib_rules_dump(struct net *net, struct notifier_block *nb, int family,
}
EXPORT_SYMBOL_GPL(fib_rules_dump);
-unsigned int fib_rules_seq_read(struct net *net, int family)
+unsigned int fib_rules_seq_read(const struct net *net, int family)
{
unsigned int fib_rules_seq;
struct fib_rules_ops *ops;
- ASSERT_RTNL();
-
ops = lookup_rules_ops(net, family);
if (!ops)
return 0;
- fib_rules_seq = ops->fib_rules_seq;
+ /* Paired with WRITE_ONCE() in call_fib_rule_notifiers() */
+ fib_rules_seq = READ_ONCE(ops->fib_rules_seq);
rules_ops_put(ops);
return fib_rules_seq;
@@ -555,8 +560,7 @@ static int fib_nl2rule(struct sk_buff *skb, struct nlmsghdr *nlh,
nlrule->pref = fib_default_rule_pref(ops);
}
- nlrule->proto = tb[FRA_PROTOCOL] ?
- nla_get_u8(tb[FRA_PROTOCOL]) : RTPROT_UNSPEC;
+ nlrule->proto = nla_get_u8_default(tb[FRA_PROTOCOL], RTPROT_UNSPEC);
if (tb[FRA_IIFNAME]) {
struct net_device *dev;
@@ -766,7 +770,10 @@ static const struct nla_policy fib_rule_policy[FRA_MAX + 1] = {
[FRA_PROTOCOL] = { .type = NLA_U8 },
[FRA_IP_PROTO] = { .type = NLA_U8 },
[FRA_SPORT_RANGE] = { .len = sizeof(struct fib_rule_port_range) },
- [FRA_DPORT_RANGE] = { .len = sizeof(struct fib_rule_port_range) }
+ [FRA_DPORT_RANGE] = { .len = sizeof(struct fib_rule_port_range) },
+ [FRA_DSCP] = NLA_POLICY_MAX(NLA_U8, INET_DSCP_MASK >> 2),
+ [FRA_FLOWLABEL] = { .type = NLA_BE32 },
+ [FRA_FLOWLABEL_MASK] = { .type = NLA_BE32 },
};
int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -1036,14 +1043,14 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
if (rule->iifname[0]) {
if (nla_put_string(skb, FRA_IIFNAME, rule->iifname))
goto nla_put_failure;
- if (rule->iifindex == -1)
+ if (READ_ONCE(rule->iifindex) == -1)
frh->flags |= FIB_RULE_IIF_DETACHED;
}
if (rule->oifname[0]) {
if (nla_put_string(skb, FRA_OIFNAME, rule->oifname))
goto nla_put_failure;
- if (rule->oifindex == -1)
+ if (READ_ONCE(rule->oifindex) == -1)
frh->flags |= FIB_RULE_OIF_DETACHED;
}
@@ -1142,10 +1149,10 @@ static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
struct fib_rules_ops *ops;
- int idx = 0, family;
+ int err, idx = 0, family;
if (cb->strict_check) {
- int err = fib_valid_dumprule_req(nlh, cb->extack);
+ err = fib_valid_dumprule_req(nlh, cb->extack);
if (err < 0)
return err;
@@ -1158,17 +1165,17 @@ static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
if (ops == NULL)
return -EAFNOSUPPORT;
- dump_rules(skb, cb, ops);
-
- return skb->len;
+ return dump_rules(skb, cb, ops);
}
+ err = 0;
rcu_read_lock();
list_for_each_entry_rcu(ops, &net->rules_ops, list) {
if (idx < cb->args[0] || !try_module_get(ops->owner))
goto skip;
- if (dump_rules(skb, cb, ops) < 0)
+ err = dump_rules(skb, cb, ops);
+ if (err < 0)
break;
cb->args[1] = 0;
@@ -1178,7 +1185,7 @@ skip:
rcu_read_unlock();
cb->args[0] = idx;
- return skb->len;
+ return err;
}
static void notify_rule_change(int event, struct fib_rule *rule,
@@ -1205,8 +1212,7 @@ static void notify_rule_change(int event, struct fib_rule *rule,
rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL);
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(net, ops->nlgroup, err);
+ rtnl_set_sk_err(net, ops->nlgroup, err);
}
static void attach_rules(struct list_head *rules, struct net_device *dev)
@@ -1216,10 +1222,10 @@ static void attach_rules(struct list_head *rules, struct net_device *dev)
list_for_each_entry(rule, rules, list) {
if (rule->iifindex == -1 &&
strcmp(dev->name, rule->iifname) == 0)
- rule->iifindex = dev->ifindex;
+ WRITE_ONCE(rule->iifindex, dev->ifindex);
if (rule->oifindex == -1 &&
strcmp(dev->name, rule->oifname) == 0)
- rule->oifindex = dev->ifindex;
+ WRITE_ONCE(rule->oifindex, dev->ifindex);
}
}
@@ -1229,9 +1235,9 @@ static void detach_rules(struct list_head *rules, struct net_device *dev)
list_for_each_entry(rule, rules, list) {
if (rule->iifindex == dev->ifindex)
- rule->iifindex = -1;
+ WRITE_ONCE(rule->iifindex, -1);
if (rule->oifindex == dev->ifindex)
- rule->oifindex = -1;
+ WRITE_ONCE(rule->oifindex, -1);
}
}
@@ -1288,12 +1294,18 @@ static struct pernet_operations fib_rules_net_ops = {
.exit = fib_rules_net_exit,
};
+static const struct rtnl_msg_handler fib_rules_rtnl_msg_handlers[] __initconst = {
+ {.msgtype = RTM_NEWRULE, .doit = fib_nl_newrule},
+ {.msgtype = RTM_DELRULE, .doit = fib_nl_delrule},
+ {.msgtype = RTM_GETRULE, .dumpit = fib_nl_dumprule,
+ .flags = RTNL_FLAG_DUMP_UNLOCKED},
+};
+
static int __init fib_rules_init(void)
{
int err;
- rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, 0);
+
+ rtnl_register_many(fib_rules_rtnl_msg_handlers);
err = register_pernet_subsys(&fib_rules_net_ops);
if (err < 0)
@@ -1308,9 +1320,7 @@ static int __init fib_rules_init(void)
fail_unregister:
unregister_pernet_subsys(&fib_rules_net_ops);
fail:
- rtnl_unregister(PF_UNSPEC, RTM_NEWRULE);
- rtnl_unregister(PF_UNSPEC, RTM_DELRULE);
- rtnl_unregister(PF_UNSPEC, RTM_GETRULE);
+ rtnl_unregister_many(fib_rules_rtnl_msg_handlers);
return err;
}
diff --git a/net/core/filter.c b/net/core/filter.c
index ef3e78b6a39c..2ec162dd83c4 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -42,7 +42,7 @@
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/uaccess.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/filter.h>
#include <linux/ratelimit.h>
#include <linux/seccomp.h>
@@ -84,11 +84,15 @@
#include <net/netkit.h>
#include <linux/un.h>
#include <net/xdp_sock_drv.h>
+#include <net/inet_dscp.h>
#include "dev.h"
+/* Keep the struct bpf_fib_lookup small so that it fits into a cacheline */
+static_assert(sizeof(struct bpf_fib_lookup) == 64, "struct bpf_fib_lookup size check");
+
static const struct bpf_func_proto *
-bpf_sk_base_func_proto(enum bpf_func_id func_id);
+bpf_sk_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len)
{
@@ -778,7 +782,7 @@ jmp_rest:
BPF_EMIT_JMP;
break;
- /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
+ /* ldxb 4 * ([14] & 0xf) is remapped into 6 insns. */
case BPF_LDX | BPF_MSH | BPF_B: {
struct sock_filter tmp = {
.code = BPF_LD | BPF_ABS | BPF_B,
@@ -804,7 +808,7 @@ jmp_rest:
*insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
break;
}
- /* RET_K is remaped into 2 insns. RET_A case doesn't need an
+ /* RET_K is remapped into 2 insns. RET_A case doesn't need an
* extra mov as BPF_REG_0 is already mapped into BPF_REG_A.
*/
case BPF_RET | BPF_A:
@@ -1262,8 +1266,8 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
* so we need to keep the user BPF around until the 2nd
* pass. At this time, the user BPF is stored in fp->insns.
*/
- old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
- GFP_KERNEL | __GFP_NOWARN);
+ old_prog = kmemdup_array(fp->insns, old_len, sizeof(struct sock_filter),
+ GFP_KERNEL | __GFP_NOWARN);
if (!old_prog) {
err = -ENOMEM;
goto out_err;
@@ -1650,18 +1654,14 @@ void sk_reuseport_prog_free(struct bpf_prog *prog)
bpf_prog_destroy(prog);
}
-struct bpf_scratchpad {
- union {
- __be32 diff[MAX_BPF_STACK / sizeof(__be32)];
- u8 buff[MAX_BPF_STACK];
- };
-};
-
-static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
-
static inline int __bpf_try_make_writable(struct sk_buff *skb,
unsigned int write_len)
{
+#ifdef CONFIG_DEBUG_NET
+ /* Avoid a splat in pskb_may_pull_reason() */
+ if (write_len > INT_MAX)
+ return -EINVAL;
+#endif
return skb_ensure_writable(skb, write_len);
}
@@ -2010,10 +2010,6 @@ static const struct bpf_func_proto bpf_l4_csum_replace_proto = {
BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size,
__be32 *, to, u32, to_size, __wsum, seed)
{
- struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
- u32 diff_size = from_size + to_size;
- int i, j = 0;
-
/* This is quite flexible, some examples:
*
* from_size == 0, to_size > 0, seed := csum --> pushing data
@@ -2022,16 +2018,19 @@ BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size,
*
* Even for diffing, from_size and to_size don't need to be equal.
*/
- if (unlikely(((from_size | to_size) & (sizeof(__be32) - 1)) ||
- diff_size > sizeof(sp->diff)))
- return -EINVAL;
- for (i = 0; i < from_size / sizeof(__be32); i++, j++)
- sp->diff[j] = ~from[i];
- for (i = 0; i < to_size / sizeof(__be32); i++, j++)
- sp->diff[j] = to[i];
+ __wsum ret = seed;
+
+ if (from_size && to_size)
+ ret = csum_sub(csum_partial(to, to_size, ret),
+ csum_partial(from, from_size, 0));
+ else if (to_size)
+ ret = csum_partial(to, to_size, ret);
- return csum_partial(sp->diff, diff_size, seed);
+ else if (from_size)
+ ret = ~csum_partial(from, from_size, ~ret);
+
+ return csum_from32to16((__force unsigned int)ret);
}
static const struct bpf_func_proto bpf_csum_diff_proto = {
@@ -2215,7 +2214,7 @@ static int bpf_out_neigh_v6(struct net *net, struct sk_buff *skb,
rcu_read_lock();
if (!nh) {
dst = skb_dst(skb);
- nexthop = rt6_nexthop(container_of(dst, struct rt6_info, dst),
+ nexthop = rt6_nexthop(dst_rt6_info(dst),
&ipv6_hdr(skb)->daddr);
} else {
nexthop = &nh->ipv6_nh;
@@ -2233,7 +2232,7 @@ static int bpf_out_neigh_v6(struct net *net, struct sk_buff *skb,
rcu_read_unlock();
return ret;
}
- rcu_read_unlock_bh();
+ rcu_read_unlock();
if (dst)
IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
out_drop:
@@ -2271,12 +2270,12 @@ static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev,
err = bpf_out_neigh_v6(net, skb, dev, nh);
if (unlikely(net_xmit_eval(err)))
- dev->stats.tx_errors++;
+ DEV_STATS_INC(dev, tx_errors);
else
ret = NET_XMIT_SUCCESS;
goto out_xmit;
out_drop:
- dev->stats.tx_errors++;
+ DEV_STATS_INC(dev, tx_errors);
kfree_skb(skb);
out_xmit:
return ret;
@@ -2314,8 +2313,7 @@ static int bpf_out_neigh_v4(struct net *net, struct sk_buff *skb,
rcu_read_lock();
if (!nh) {
- struct dst_entry *dst = skb_dst(skb);
- struct rtable *rt = container_of(dst, struct rtable, dst);
+ struct rtable *rt = skb_rtable(skb);
neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
} else if (nh->nh_family == AF_INET6) {
@@ -2357,7 +2355,7 @@ static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev,
struct flowi4 fl4 = {
.flowi4_flags = FLOWI_FLAG_ANYSRC,
.flowi4_mark = skb->mark,
- .flowi4_tos = RT_TOS(ip4h->tos),
+ .flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h)),
.flowi4_oif = dev->ifindex,
.flowi4_proto = ip4h->protocol,
.daddr = ip4h->daddr,
@@ -2378,12 +2376,12 @@ static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev,
err = bpf_out_neigh_v4(net, skb, dev, nh);
if (unlikely(net_xmit_eval(err)))
- dev->stats.tx_errors++;
+ DEV_STATS_INC(dev, tx_errors);
else
ret = NET_XMIT_SUCCESS;
goto out_xmit;
out_drop:
- dev->stats.tx_errors++;
+ DEV_STATS_INC(dev, tx_errors);
kfree_skb(skb);
out_xmit:
return ret;
@@ -2423,9 +2421,9 @@ out:
/* Internal, non-exposed redirect flags. */
enum {
- BPF_F_NEIGH = (1ULL << 1),
- BPF_F_PEER = (1ULL << 2),
- BPF_F_NEXTHOP = (1ULL << 3),
+ BPF_F_NEIGH = (1ULL << 16),
+ BPF_F_PEER = (1ULL << 17),
+ BPF_F_NEXTHOP = (1ULL << 18),
#define BPF_F_REDIRECT_INTERNAL (BPF_F_NEIGH | BPF_F_PEER | BPF_F_NEXTHOP)
};
@@ -2435,6 +2433,8 @@ BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
struct sk_buff *clone;
int ret;
+ BUILD_BUG_ON(BPF_F_REDIRECT_INTERNAL & BPF_F_REDIRECT_FLAGS);
+
if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL)))
return -EINVAL;
@@ -2469,9 +2469,6 @@ static const struct bpf_func_proto bpf_clone_redirect_proto = {
.arg3_type = ARG_ANYTHING,
};
-DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
-EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info);
-
static struct net_device *skb_get_peer_dev(struct net_device *dev)
{
const struct net_device_ops *ops = dev->netdev_ops;
@@ -2484,7 +2481,7 @@ static struct net_device *skb_get_peer_dev(struct net_device *dev)
int skb_do_redirect(struct sk_buff *skb)
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
struct net *net = dev_net(skb->dev);
struct net_device *dev;
u32 flags = ri->flags;
@@ -2517,7 +2514,7 @@ out_drop:
BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL)))
return TC_ACT_SHOT;
@@ -2538,7 +2535,7 @@ static const struct bpf_func_proto bpf_redirect_proto = {
BPF_CALL_2(bpf_redirect_peer, u32, ifindex, u64, flags)
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
if (unlikely(flags))
return TC_ACT_SHOT;
@@ -2560,7 +2557,7 @@ static const struct bpf_func_proto bpf_redirect_peer_proto = {
BPF_CALL_4(bpf_redirect_neigh, u32, ifindex, struct bpf_redir_neigh *, params,
int, plen, u64, flags)
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
if (unlikely((plen && plen < sizeof(*params)) || flags))
return TC_ACT_SHOT;
@@ -2607,18 +2604,16 @@ BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes)
static void sk_msg_reset_curr(struct sk_msg *msg)
{
- u32 i = msg->sg.start;
- u32 len = 0;
-
- do {
- len += sk_msg_elem(msg, i)->length;
- sk_msg_iter_var_next(i);
- if (len >= msg->sg.size)
- break;
- } while (i != msg->sg.end);
+ if (!msg->sg.size) {
+ msg->sg.curr = msg->sg.start;
+ msg->sg.copybreak = 0;
+ } else {
+ u32 i = msg->sg.end;
- msg->sg.curr = i;
- msg->sg.copybreak = 0;
+ sk_msg_iter_var_prev(i);
+ msg->sg.curr = i;
+ msg->sg.copybreak = msg->sg.data[i].length;
+ }
}
static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
@@ -2781,7 +2776,7 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
sk_msg_iter_var_next(i);
} while (i != msg->sg.end);
- if (start >= offset + l)
+ if (start > offset + l)
return -EINVAL;
space = MAX_MSG_FRAGS - sk_msg_elem_used(msg);
@@ -2806,6 +2801,8 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
raw = page_address(page);
+ if (i == msg->sg.end)
+ sk_msg_iter_var_prev(i);
psge = sk_msg_elem(msg, i);
front = start - offset;
back = psge->length - front;
@@ -2822,7 +2819,13 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
}
put_page(sg_page(psge));
- } else if (start - offset) {
+ new = i;
+ goto place_new;
+ }
+
+ if (start - offset) {
+ if (i == msg->sg.end)
+ sk_msg_iter_var_prev(i);
psge = sk_msg_elem(msg, i);
rsge = sk_msg_elem_cpy(msg, i);
@@ -2833,39 +2836,44 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
sk_msg_iter_var_next(i);
sg_unmark_end(psge);
sg_unmark_end(&rsge);
- sk_msg_iter_next(msg, end);
}
/* Slot(s) to place newly allocated data */
+ sk_msg_iter_next(msg, end);
new = i;
+ sk_msg_iter_var_next(i);
+
+ if (i == msg->sg.end) {
+ if (!rsge.length)
+ goto place_new;
+ sk_msg_iter_next(msg, end);
+ goto place_new;
+ }
/* Shift one or two slots as needed */
- if (!copy) {
- sge = sk_msg_elem_cpy(msg, i);
+ sge = sk_msg_elem_cpy(msg, new);
+ sg_unmark_end(&sge);
+ nsge = sk_msg_elem_cpy(msg, i);
+ if (rsge.length) {
sk_msg_iter_var_next(i);
- sg_unmark_end(&sge);
+ nnsge = sk_msg_elem_cpy(msg, i);
sk_msg_iter_next(msg, end);
+ }
- nsge = sk_msg_elem_cpy(msg, i);
+ while (i != msg->sg.end) {
+ msg->sg.data[i] = sge;
+ sge = nsge;
+ sk_msg_iter_var_next(i);
if (rsge.length) {
- sk_msg_iter_var_next(i);
+ nsge = nnsge;
nnsge = sk_msg_elem_cpy(msg, i);
- }
-
- while (i != msg->sg.end) {
- msg->sg.data[i] = sge;
- sge = nsge;
- sk_msg_iter_var_next(i);
- if (rsge.length) {
- nsge = nnsge;
- nnsge = sk_msg_elem_cpy(msg, i);
- } else {
- nsge = sk_msg_elem_cpy(msg, i);
- }
+ } else {
+ nsge = sk_msg_elem_cpy(msg, i);
}
}
+place_new:
/* Place newly allocated data buffer */
sk_mem_charge(msg->sk, len);
msg->sg.size += len;
@@ -2894,8 +2902,10 @@ static const struct bpf_func_proto bpf_msg_push_data_proto = {
static void sk_msg_shift_left(struct sk_msg *msg, int i)
{
+ struct scatterlist *sge = sk_msg_elem(msg, i);
int prev;
+ put_page(sg_page(sge));
do {
prev = i;
sk_msg_iter_var_next(i);
@@ -2932,6 +2942,9 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
if (unlikely(flags))
return -EINVAL;
+ if (unlikely(len == 0))
+ return 0;
+
/* First find the starting scatterlist element */
i = msg->sg.start;
do {
@@ -2944,7 +2957,7 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
} while (i != msg->sg.end);
/* Bounds checks: start and pop must be inside message */
- if (start >= offset + l || last >= msg->sg.size)
+ if (start >= offset + l || last > msg->sg.size)
return -EINVAL;
space = MAX_MSG_FRAGS - sk_msg_elem_used(msg);
@@ -2968,17 +2981,17 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
*
* Then if B is non-zero AND there is no space allocate space and
* compact A, B regions into page. If there is space shift ring to
- * the rigth free'ing the next element in ring to place B, leaving
+ * the right free'ing the next element in ring to place B, leaving
* A untouched except to reduce length.
*/
if (start != offset) {
struct scatterlist *nsge, *sge = sk_msg_elem(msg, i);
- int a = start;
+ int a = start - offset;
int b = sge->length - pop - a;
sk_msg_iter_var_next(i);
- if (pop < sge->length - a) {
+ if (b > 0) {
if (space) {
sge->length = a;
sk_msg_shift_right(msg, i);
@@ -2997,7 +3010,6 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
if (unlikely(!page))
return -ENOMEM;
- sge->length = a;
orig = sg_page(sge);
from = sg_virt(sge);
to = page_address(page);
@@ -3007,7 +3019,7 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
put_page(orig);
}
pop = 0;
- } else if (pop >= sge->length - a) {
+ } else {
pop -= (sge->length - a);
sge->length = a;
}
@@ -3041,7 +3053,6 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
pop -= sge->length;
sk_msg_shift_left(msg, i);
}
- sk_msg_iter_var_next(i);
}
sk_mem_uncharge(msg->sk, len - pop);
@@ -3178,6 +3189,7 @@ BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto,
bpf_push_mac_rcsum(skb);
ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
bpf_pull_mac_rcsum(skb);
+ skb_reset_mac_len(skb);
bpf_compute_data_pointers(skb);
return ret;
@@ -3537,13 +3549,20 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
if (skb_is_gso(skb)) {
struct skb_shared_info *shinfo = skb_shinfo(skb);
- /* Due to header grow, MSS needs to be downgraded. */
- if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
- skb_decrease_gso_size(shinfo, len_diff);
-
/* Header must be checked, and gso_segs recomputed. */
shinfo->gso_type |= gso_type;
shinfo->gso_segs = 0;
+
+ /* Due to header growth, MSS needs to be downgraded.
+ * There is a BUG_ON() when segmenting the frag_list with
+ * head_frag true, so linearize the skb after downgrading
+ * the MSS.
+ */
+ if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) {
+ skb_decrease_gso_size(shinfo, len_diff);
+ if (shinfo->frag_list)
+ return skb_linearize(skb);
+ }
}
return 0;
@@ -3715,13 +3734,22 @@ static const struct bpf_func_proto bpf_skb_adjust_room_proto = {
static u32 __bpf_skb_min_len(const struct sk_buff *skb)
{
- u32 min_len = skb_network_offset(skb);
+ int offset = skb_network_offset(skb);
+ u32 min_len = 0;
- if (skb_transport_header_was_set(skb))
- min_len = skb_transport_offset(skb);
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- min_len = skb_checksum_start_offset(skb) +
- skb->csum_offset + sizeof(__sum16);
+ if (offset > 0)
+ min_len = offset;
+ if (skb_transport_header_was_set(skb)) {
+ offset = skb_transport_offset(skb);
+ if (offset > 0)
+ min_len = offset;
+ }
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ offset = skb_checksum_start_offset(skb) +
+ skb->csum_offset + sizeof(__sum16);
+ if (offset > 0)
+ min_len = offset;
+ }
return min_len;
}
@@ -4100,13 +4128,13 @@ static int bpf_xdp_frags_increase_tail(struct xdp_buff *xdp, int offset)
}
static void bpf_xdp_shrink_data_zc(struct xdp_buff *xdp, int shrink,
- struct xdp_mem_info *mem_info, bool release)
+ enum xdp_mem_type mem_type, bool release)
{
struct xdp_buff *zc_frag = xsk_buff_get_tail(xdp);
if (release) {
xsk_buff_del_tail(zc_frag);
- __xdp_return(NULL, mem_info, false, zc_frag);
+ __xdp_return(0, mem_type, false, zc_frag);
} else {
zc_frag->data_end -= shrink;
}
@@ -4115,19 +4143,16 @@ static void bpf_xdp_shrink_data_zc(struct xdp_buff *xdp, int shrink,
static bool bpf_xdp_shrink_data(struct xdp_buff *xdp, skb_frag_t *frag,
int shrink)
{
- struct xdp_mem_info *mem_info = &xdp->rxq->mem;
+ enum xdp_mem_type mem_type = xdp->rxq->mem.type;
bool release = skb_frag_size(frag) == shrink;
- if (mem_info->type == MEM_TYPE_XSK_BUFF_POOL) {
- bpf_xdp_shrink_data_zc(xdp, shrink, mem_info, release);
+ if (mem_type == MEM_TYPE_XSK_BUFF_POOL) {
+ bpf_xdp_shrink_data_zc(xdp, shrink, mem_type, release);
goto out;
}
- if (release) {
- struct page *page = skb_frag_page(frag);
-
- __xdp_return(page_address(page), mem_info, false, NULL);
- }
+ if (release)
+ __xdp_return(skb_frag_netmem(frag), mem_type, false, NULL);
out:
return release;
@@ -4266,50 +4291,50 @@ static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = {
*/
void xdp_do_flush(void)
{
- __dev_flush();
- __cpu_map_flush();
- __xsk_map_flush();
+ struct list_head *lh_map, *lh_dev, *lh_xsk;
+
+ bpf_net_ctx_get_all_used_flush_lists(&lh_map, &lh_dev, &lh_xsk);
+ if (lh_dev)
+ __dev_flush(lh_dev);
+ if (lh_map)
+ __cpu_map_flush(lh_map);
+ if (lh_xsk)
+ __xsk_map_flush(lh_xsk);
}
EXPORT_SYMBOL_GPL(xdp_do_flush);
#if defined(CONFIG_DEBUG_NET) && defined(CONFIG_BPF_SYSCALL)
void xdp_do_check_flushed(struct napi_struct *napi)
{
- bool ret;
+ struct list_head *lh_map, *lh_dev, *lh_xsk;
+ bool missed = false;
- ret = dev_check_flush();
- ret |= cpu_map_check_flush();
- ret |= xsk_map_check_flush();
+ bpf_net_ctx_get_all_used_flush_lists(&lh_map, &lh_dev, &lh_xsk);
+ if (lh_dev) {
+ __dev_flush(lh_dev);
+ missed = true;
+ }
+ if (lh_map) {
+ __cpu_map_flush(lh_map);
+ missed = true;
+ }
+ if (lh_xsk) {
+ __xsk_map_flush(lh_xsk);
+ missed = true;
+ }
- WARN_ONCE(ret, "Missing xdp_do_flush() invocation after NAPI by %ps\n",
+ WARN_ONCE(missed, "Missing xdp_do_flush() invocation after NAPI by %ps\n",
napi->poll);
}
#endif
-void bpf_clear_redirect_map(struct bpf_map *map)
-{
- struct bpf_redirect_info *ri;
- int cpu;
-
- for_each_possible_cpu(cpu) {
- ri = per_cpu_ptr(&bpf_redirect_info, cpu);
- /* Avoid polluting remote cacheline due to writes if
- * not needed. Once we pass this test, we need the
- * cmpxchg() to make sure it hasn't been changed in
- * the meantime by remote CPU.
- */
- if (unlikely(READ_ONCE(ri->map) == map))
- cmpxchg(&ri->map, map, NULL);
- }
-}
-
DEFINE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key);
EXPORT_SYMBOL_GPL(bpf_master_redirect_enabled_key);
u32 xdp_master_redirect(struct xdp_buff *xdp)
{
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
struct net_device *master, *slave;
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
master = netdev_master_upper_dev_get_rcu(xdp->rxq->dev);
slave = master->netdev_ops->ndo_xdp_get_xmit_slave(master, xdp);
@@ -4329,9 +4354,9 @@ u32 xdp_master_redirect(struct xdp_buff *xdp)
EXPORT_SYMBOL_GPL(xdp_master_redirect);
static inline int __xdp_do_redirect_xsk(struct bpf_redirect_info *ri,
- struct net_device *dev,
+ const struct net_device *dev,
struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog)
+ const struct bpf_prog *xdp_prog)
{
enum bpf_map_type map_type = ri->map_type;
void *fwd = ri->tgt_value;
@@ -4352,18 +4377,20 @@ err:
return err;
}
-static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri,
- struct net_device *dev,
- struct xdp_frame *xdpf,
- struct bpf_prog *xdp_prog)
+static __always_inline int
+__xdp_do_redirect_frame(struct bpf_redirect_info *ri, struct net_device *dev,
+ struct xdp_frame *xdpf,
+ const struct bpf_prog *xdp_prog)
{
enum bpf_map_type map_type = ri->map_type;
void *fwd = ri->tgt_value;
u32 map_id = ri->map_id;
+ u32 flags = ri->flags;
struct bpf_map *map;
int err;
ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
+ ri->flags = 0;
ri->map_type = BPF_MAP_TYPE_UNSPEC;
if (unlikely(!xdpf)) {
@@ -4375,11 +4402,20 @@ static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri,
case BPF_MAP_TYPE_DEVMAP:
fallthrough;
case BPF_MAP_TYPE_DEVMAP_HASH:
- map = READ_ONCE(ri->map);
- if (unlikely(map)) {
+ if (unlikely(flags & BPF_F_BROADCAST)) {
+ map = READ_ONCE(ri->map);
+
+ /* The map pointer is cleared when the map is being torn
+ * down by dev_map_free()
+ */
+ if (unlikely(!map)) {
+ err = -ENOENT;
+ break;
+ }
+
WRITE_ONCE(ri->map, NULL);
err = dev_map_enqueue_multi(xdpf, dev, map,
- ri->flags & BPF_F_EXCLUDE_INGRESS);
+ flags & BPF_F_EXCLUDE_INGRESS);
} else {
err = dev_map_enqueue(fwd, xdpf, dev);
}
@@ -4413,9 +4449,9 @@ err:
}
int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog)
+ const struct bpf_prog *xdp_prog)
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
enum bpf_map_type map_type = ri->map_type;
if (map_type == BPF_MAP_TYPE_XSKMAP)
@@ -4427,9 +4463,10 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
EXPORT_SYMBOL_GPL(xdp_do_redirect);
int xdp_do_redirect_frame(struct net_device *dev, struct xdp_buff *xdp,
- struct xdp_frame *xdpf, struct bpf_prog *xdp_prog)
+ struct xdp_frame *xdpf,
+ const struct bpf_prog *xdp_prog)
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
enum bpf_map_type map_type = ri->map_type;
if (map_type == BPF_MAP_TYPE_XSKMAP)
@@ -4442,11 +4479,11 @@ EXPORT_SYMBOL_GPL(xdp_do_redirect_frame);
static int xdp_do_generic_redirect_map(struct net_device *dev,
struct sk_buff *skb,
struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog,
- void *fwd,
- enum bpf_map_type map_type, u32 map_id)
+ const struct bpf_prog *xdp_prog,
+ void *fwd, enum bpf_map_type map_type,
+ u32 map_id, u32 flags)
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
struct bpf_map *map;
int err;
@@ -4454,11 +4491,20 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
case BPF_MAP_TYPE_DEVMAP:
fallthrough;
case BPF_MAP_TYPE_DEVMAP_HASH:
- map = READ_ONCE(ri->map);
- if (unlikely(map)) {
+ if (unlikely(flags & BPF_F_BROADCAST)) {
+ map = READ_ONCE(ri->map);
+
+ /* The map pointer is cleared when the map is being torn
+ * down by dev_map_free()
+ */
+ if (unlikely(!map)) {
+ err = -ENOENT;
+ break;
+ }
+
WRITE_ONCE(ri->map, NULL);
err = dev_map_redirect_multi(dev, skb, xdp_prog, map,
- ri->flags & BPF_F_EXCLUDE_INGRESS);
+ flags & BPF_F_EXCLUDE_INGRESS);
} else {
err = dev_map_generic_redirect(fwd, skb, xdp_prog);
}
@@ -4489,15 +4535,18 @@ err:
}
int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
- struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
+ struct xdp_buff *xdp,
+ const struct bpf_prog *xdp_prog)
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
enum bpf_map_type map_type = ri->map_type;
void *fwd = ri->tgt_value;
u32 map_id = ri->map_id;
+ u32 flags = ri->flags;
int err;
ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
+ ri->flags = 0;
ri->map_type = BPF_MAP_TYPE_UNSPEC;
if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) {
@@ -4517,7 +4566,7 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
return 0;
}
- return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, fwd, map_type, map_id);
+ return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, fwd, map_type, map_id, flags);
err:
_trace_xdp_redirect_err(dev, xdp_prog, ri->tgt_index, err);
return err;
@@ -4525,7 +4574,7 @@ err:
BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
if (unlikely(flags))
return XDP_ABORTED;
@@ -4662,7 +4711,7 @@ set_compat:
to->tunnel_tos = info->key.tos;
to->tunnel_ttl = info->key.ttl;
if (flags & BPF_F_TUNINFO_FLAGS)
- to->tunnel_flags = info->key.tun_flags;
+ to->tunnel_flags = ip_tunnel_flags_to_be16(info->key.tun_flags);
else
to->tunnel_ext = 0;
@@ -4705,7 +4754,7 @@ BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size)
int err;
if (unlikely(!info ||
- !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) {
+ !ip_tunnel_is_options_present(info->key.tun_flags))) {
err = -ENOENT;
goto err_clear;
}
@@ -4775,15 +4824,15 @@ BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
memset(info, 0, sizeof(*info));
info->mode = IP_TUNNEL_INFO_TX;
- info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
- if (flags & BPF_F_DONT_FRAGMENT)
- info->key.tun_flags |= TUNNEL_DONT_FRAGMENT;
- if (flags & BPF_F_ZERO_CSUM_TX)
- info->key.tun_flags &= ~TUNNEL_CSUM;
- if (flags & BPF_F_SEQ_NUMBER)
- info->key.tun_flags |= TUNNEL_SEQ;
- if (flags & BPF_F_NO_TUNNEL_KEY)
- info->key.tun_flags &= ~TUNNEL_KEY;
+ __set_bit(IP_TUNNEL_NOCACHE_BIT, info->key.tun_flags);
+ __assign_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, info->key.tun_flags,
+ flags & BPF_F_DONT_FRAGMENT);
+ __assign_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags,
+ !(flags & BPF_F_ZERO_CSUM_TX));
+ __assign_bit(IP_TUNNEL_SEQ_BIT, info->key.tun_flags,
+ flags & BPF_F_SEQ_NUMBER);
+ __assign_bit(IP_TUNNEL_KEY_BIT, info->key.tun_flags,
+ !(flags & BPF_F_NO_TUNNEL_KEY));
info->key.tun_id = cpu_to_be64(from->tunnel_id);
info->key.tos = from->tunnel_tos;
@@ -4821,13 +4870,15 @@ BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb,
{
struct ip_tunnel_info *info = skb_tunnel_info(skb);
const struct metadata_dst *md = this_cpu_ptr(md_dst);
+ IP_TUNNEL_DECLARE_FLAGS(present) = { };
if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1))))
return -EINVAL;
if (unlikely(size > IP_TUNNEL_OPTS_MAX))
return -ENOMEM;
- ip_tunnel_info_opts_set(info, from, size, TUNNEL_OPTIONS_PRESENT);
+ ip_tunnel_set_options_present(present);
+ ip_tunnel_info_opts_set(info, from, size, present);
return 0;
}
@@ -5094,6 +5145,17 @@ static u64 __bpf_get_netns_cookie(struct sock *sk)
return net->net_cookie;
}
+BPF_CALL_1(bpf_get_netns_cookie, struct sk_buff *, skb)
+{
+ return __bpf_get_netns_cookie(skb && skb->sk ? skb->sk : NULL);
+}
+
+static const struct bpf_func_proto bpf_get_netns_cookie_proto = {
+ .func = bpf_get_netns_cookie,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX_OR_NULL,
+};
+
BPF_CALL_1(bpf_get_netns_cookie_sock, struct sock *, ctx)
{
return __bpf_get_netns_cookie(ctx);
@@ -5236,6 +5298,11 @@ static int bpf_sol_tcp_setsockopt(struct sock *sk, int optname,
return -EINVAL;
inet_csk(sk)->icsk_rto_min = timeout;
break;
+ case TCP_BPF_SOCK_OPS_CB_FLAGS:
+ if (val & ~(BPF_SOCK_OPS_ALL_CB_FLAGS))
+ return -EINVAL;
+ tp->bpf_sock_ops_cb_flags = val;
+ break;
default:
return -EINVAL;
}
@@ -5324,6 +5391,17 @@ static int sol_tcp_sockopt(struct sock *sk, int optname,
if (*optlen < 1)
return -EINVAL;
break;
+ case TCP_BPF_SOCK_OPS_CB_FLAGS:
+ if (*optlen != sizeof(int))
+ return -EINVAL;
+ if (getopt) {
+ struct tcp_sock *tp = tcp_sk(sk);
+ int cb_flags = tp->bpf_sock_ops_cb_flags;
+
+ memcpy(optval, &cb_flags, *optlen);
+ return 0;
+ }
+ return bpf_sol_tcp_setsockopt(sk, optname, optval, *optlen);
default:
if (getopt)
return -EINVAL;
@@ -5857,7 +5935,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
fl4.flowi4_iif = params->ifindex;
fl4.flowi4_oif = 0;
}
- fl4.flowi4_tos = params->tos & IPTOS_RT_MASK;
+ fl4.flowi4_tos = params->tos & INET_DSCP_MASK;
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
fl4.flowi4_flags = 0;
@@ -5884,7 +5962,10 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
} else {
- fl4.flowi4_mark = 0;
+ if (flags & BPF_FIB_LOOKUP_MARK)
+ fl4.flowi4_mark = params->mark;
+ else
+ fl4.flowi4_mark = 0;
fl4.flowi4_secid = 0;
fl4.flowi4_tun_key.tun_id = 0;
fl4.flowi4_uid = sock_net_uid(net, NULL);
@@ -5988,7 +6069,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
return -ENODEV;
idev = __in6_dev_get_safely(dev);
- if (unlikely(!idev || !idev->cnf.forwarding))
+ if (unlikely(!idev || !READ_ONCE(idev->cnf.forwarding)))
return BPF_FIB_LKUP_RET_FWD_DISABLED;
if (flags & BPF_FIB_LOOKUP_OUTPUT) {
@@ -6027,7 +6108,10 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
err = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, &res,
strict);
} else {
- fl6.flowi6_mark = 0;
+ if (flags & BPF_FIB_LOOKUP_MARK)
+ fl6.flowi6_mark = params->mark;
+ else
+ fl6.flowi6_mark = 0;
fl6.flowi6_secid = 0;
fl6.flowi6_tun_key.tun_id = 0;
fl6.flowi6_uid = sock_net_uid(net, NULL);
@@ -6105,7 +6189,7 @@ set_fwd_params:
#define BPF_FIB_LOOKUP_MASK (BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT | \
BPF_FIB_LOOKUP_SKIP_NEIGH | BPF_FIB_LOOKUP_TBID | \
- BPF_FIB_LOOKUP_SRC)
+ BPF_FIB_LOOKUP_SRC | BPF_FIB_LOOKUP_MARK)
BPF_CALL_4(bpf_xdp_fib_lookup, struct xdp_buff *, ctx,
struct bpf_fib_lookup *, params, int, plen, u32, flags)
@@ -6213,12 +6297,10 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb,
{
int ret = BPF_MTU_CHK_RET_FRAG_NEEDED;
struct net_device *dev = skb->dev;
- int skb_len, dev_len;
- int mtu;
+ int mtu, dev_len, skb_len;
if (unlikely(flags & ~(BPF_MTU_CHK_SEGS)))
return -EINVAL;
-
if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len)))
return -EINVAL;
@@ -6227,7 +6309,6 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb,
return -ENODEV;
mtu = READ_ONCE(dev->mtu);
-
dev_len = mtu + dev->hard_header_len;
/* If set use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */
@@ -6245,15 +6326,12 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb,
*/
if (skb_is_gso(skb)) {
ret = BPF_MTU_CHK_RET_SUCCESS;
-
if (flags & BPF_MTU_CHK_SEGS &&
!skb_gso_validate_network_len(skb, mtu))
ret = BPF_MTU_CHK_RET_SEGS_TOOBIG;
}
out:
- /* BPF verifier guarantees valid pointer */
*mtu_len = mtu;
-
return ret;
}
@@ -6274,8 +6352,6 @@ BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp,
return -ENODEV;
mtu = READ_ONCE(dev->mtu);
-
- /* Add L2-header as dev MTU is L3 size */
dev_len = mtu + dev->hard_header_len;
/* Use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */
@@ -6286,9 +6362,7 @@ BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp,
if (xdp_len > dev_len)
ret = BPF_MTU_CHK_RET_FRAG_NEEDED;
- /* BPF verifier guarantees valid pointer */
*mtu_len = mtu;
-
return ret;
}
@@ -6298,7 +6372,8 @@ static const struct bpf_func_proto bpf_skb_check_mtu_proto = {
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
- .arg3_type = ARG_PTR_TO_INT,
+ .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_WRITE | MEM_ALIGNED,
+ .arg3_size = sizeof(u32),
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -6309,7 +6384,8 @@ static const struct bpf_func_proto bpf_xdp_check_mtu_proto = {
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
- .arg3_type = ARG_PTR_TO_INT,
+ .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_WRITE | MEM_ALIGNED,
+ .arg3_size = sizeof(u32),
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -6418,6 +6494,7 @@ BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset,
void *srh_tlvs, *srh_end, *ptr;
int srhoff = 0;
+ lockdep_assert_held(&srh_state->bh_lock);
if (srh == NULL)
return -EINVAL;
@@ -6474,6 +6551,7 @@ BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb,
int hdroff = 0;
int err;
+ lockdep_assert_held(&srh_state->bh_lock);
switch (action) {
case SEG6_LOCAL_ACTION_END_X:
if (!seg6_bpf_has_valid_srh(skb))
@@ -6550,6 +6628,7 @@ BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset,
int srhoff = 0;
int ret;
+ lockdep_assert_held(&srh_state->bh_lock);
if (unlikely(srh == NULL))
return -EINVAL;
@@ -6705,8 +6784,6 @@ __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
/* sk_to_full_sk() may return (sk)->rsk_listener, so make sure the original sk
* sock refcnt is decremented to prevent a request_sock leak.
*/
- if (!sk_fullsock(sk2))
- sk2 = NULL;
if (sk2 != sk) {
sock_gen_put(sk);
/* Ensure there is no need to bump sk2 refcnt */
@@ -6753,8 +6830,6 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
/* sk_to_full_sk() may return (sk)->rsk_listener, so make sure the original sk
* sock refcnt is decremented to prevent a request_sock leak.
*/
- if (!sk_fullsock(sk2))
- sk2 = NULL;
if (sk2 != sk) {
sock_gen_put(sk);
/* Ensure there is no need to bump sk2 refcnt */
@@ -6783,7 +6858,7 @@ static const struct bpf_func_proto bpf_skc_lookup_tcp_proto = {
.ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -6802,7 +6877,7 @@ static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = {
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -6821,7 +6896,7 @@ static const struct bpf_func_proto bpf_sk_lookup_udp_proto = {
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -6845,7 +6920,7 @@ static const struct bpf_func_proto bpf_tc_skc_lookup_tcp_proto = {
.ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -6869,7 +6944,7 @@ static const struct bpf_func_proto bpf_tc_sk_lookup_tcp_proto = {
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -6893,7 +6968,7 @@ static const struct bpf_func_proto bpf_tc_sk_lookup_udp_proto = {
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -6931,7 +7006,7 @@ static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = {
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -6955,7 +7030,7 @@ static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = {
.ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -6979,7 +7054,7 @@ static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = {
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -6999,7 +7074,7 @@ static const struct bpf_func_proto bpf_sock_addr_skc_lookup_tcp_proto = {
.ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -7018,7 +7093,7 @@ static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = {
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -7037,7 +7112,7 @@ static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = {
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -7203,7 +7278,7 @@ BPF_CALL_1(bpf_get_listener_sock, struct sock *, sk)
{
sk = sk_to_full_sk(sk);
- if (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_RCU_FREE))
+ if (sk && sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_RCU_FREE))
return (unsigned long)sk;
return (unsigned long)NULL;
@@ -7576,7 +7651,7 @@ static const struct bpf_func_proto bpf_sock_ops_load_hdr_opt_proto = {
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
- .arg2_type = ARG_PTR_TO_MEM,
+ .arg2_type = ARG_PTR_TO_MEM | MEM_WRITE,
.arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_ANYTHING,
};
@@ -7694,17 +7769,21 @@ BPF_CALL_3(bpf_skb_set_tstamp, struct sk_buff *, skb,
return -EOPNOTSUPP;
switch (tstamp_type) {
- case BPF_SKB_TSTAMP_DELIVERY_MONO:
+ case BPF_SKB_CLOCK_REALTIME:
+ skb->tstamp = tstamp;
+ skb->tstamp_type = SKB_CLOCK_REALTIME;
+ break;
+ case BPF_SKB_CLOCK_MONOTONIC:
if (!tstamp)
return -EINVAL;
skb->tstamp = tstamp;
- skb->mono_delivery_time = 1;
+ skb->tstamp_type = SKB_CLOCK_MONOTONIC;
break;
- case BPF_SKB_TSTAMP_UNSPEC:
- if (tstamp)
+ case BPF_SKB_CLOCK_TAI:
+ if (!tstamp)
return -EINVAL;
- skb->tstamp = 0;
- skb->mono_delivery_time = 0;
+ skb->tstamp = tstamp;
+ skb->tstamp_type = SKB_CLOCK_TAI;
break;
default:
return -EINVAL;
@@ -7828,42 +7907,37 @@ static const struct bpf_func_proto bpf_tcp_raw_check_syncookie_ipv6_proto = {
#endif /* CONFIG_INET */
-bool bpf_helper_changes_pkt_data(void *func)
-{
- if (func == bpf_skb_vlan_push ||
- func == bpf_skb_vlan_pop ||
- func == bpf_skb_store_bytes ||
- func == bpf_skb_change_proto ||
- func == bpf_skb_change_head ||
- func == sk_skb_change_head ||
- func == bpf_skb_change_tail ||
- func == sk_skb_change_tail ||
- func == bpf_skb_adjust_room ||
- func == sk_skb_adjust_room ||
- func == bpf_skb_pull_data ||
- func == sk_skb_pull_data ||
- func == bpf_clone_redirect ||
- func == bpf_l3_csum_replace ||
- func == bpf_l4_csum_replace ||
- func == bpf_xdp_adjust_head ||
- func == bpf_xdp_adjust_meta ||
- func == bpf_msg_pull_data ||
- func == bpf_msg_push_data ||
- func == bpf_msg_pop_data ||
- func == bpf_xdp_adjust_tail ||
-#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
- func == bpf_lwt_seg6_store_bytes ||
- func == bpf_lwt_seg6_adjust_srh ||
- func == bpf_lwt_seg6_action ||
-#endif
-#ifdef CONFIG_INET
- func == bpf_sock_ops_store_hdr_opt ||
-#endif
- func == bpf_lwt_in_push_encap ||
- func == bpf_lwt_xmit_push_encap)
+bool bpf_helper_changes_pkt_data(enum bpf_func_id func_id)
+{
+ switch (func_id) {
+ case BPF_FUNC_clone_redirect:
+ case BPF_FUNC_l3_csum_replace:
+ case BPF_FUNC_l4_csum_replace:
+ case BPF_FUNC_lwt_push_encap:
+ case BPF_FUNC_lwt_seg6_action:
+ case BPF_FUNC_lwt_seg6_adjust_srh:
+ case BPF_FUNC_lwt_seg6_store_bytes:
+ case BPF_FUNC_msg_pop_data:
+ case BPF_FUNC_msg_pull_data:
+ case BPF_FUNC_msg_push_data:
+ case BPF_FUNC_skb_adjust_room:
+ case BPF_FUNC_skb_change_head:
+ case BPF_FUNC_skb_change_proto:
+ case BPF_FUNC_skb_change_tail:
+ case BPF_FUNC_skb_pull_data:
+ case BPF_FUNC_skb_store_bytes:
+ case BPF_FUNC_skb_vlan_pop:
+ case BPF_FUNC_skb_vlan_push:
+ case BPF_FUNC_store_hdr_opt:
+ case BPF_FUNC_xdp_adjust_head:
+ case BPF_FUNC_xdp_adjust_meta:
+ case BPF_FUNC_xdp_adjust_tail:
+ /* tail-called program could call any of the above */
+ case BPF_FUNC_tail_call:
return true;
-
- return false;
+ default:
+ return false;
+ }
}
const struct bpf_func_proto bpf_event_output_data_proto __weak;
@@ -7894,7 +7968,7 @@ sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_ktime_get_coarse_ns:
return &bpf_ktime_get_coarse_ns_proto;
default:
- return bpf_base_func_proto(func_id);
+ return bpf_base_func_proto(func_id, prog);
}
}
@@ -7987,7 +8061,7 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return NULL;
}
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -8006,7 +8080,7 @@ sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_perf_event_output:
return &bpf_skb_event_output_proto;
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -8132,6 +8206,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_skb_under_cgroup_proto;
case BPF_FUNC_get_socket_cookie:
return &bpf_get_socket_cookie_proto;
+ case BPF_FUNC_get_netns_cookie:
+ return &bpf_get_netns_cookie_proto;
case BPF_FUNC_get_socket_uid:
return &bpf_get_socket_uid_proto;
case BPF_FUNC_fib_lookup:
@@ -8193,7 +8269,7 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
#endif
#endif
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -8252,13 +8328,13 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
#endif
#endif
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
#if IS_MODULE(CONFIG_NF_CONNTRACK) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)
/* The nf_conn___init type is used in the NF_CONNTRACK kfuncs. The
* kfuncs are defined in two different modules, and we want to be able
- * to use them interchangably with the same BTF type ID. Because modules
+ * to use them interchangeably with the same BTF type ID. Because modules
* can't de-duplicate BTF IDs between each other, we need the type to be
* referenced in the vmlinux BTF or the verifier will get confused about
* the different types. So we add this dummy type reference which will
@@ -8313,7 +8389,7 @@ sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_tcp_sock_proto;
#endif /* CONFIG_INET */
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -8342,8 +8418,6 @@ sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_event_output_data_proto;
case BPF_FUNC_get_current_uid_gid:
return &bpf_get_current_uid_gid_proto;
- case BPF_FUNC_get_current_pid_tgid:
- return &bpf_get_current_pid_tgid_proto;
case BPF_FUNC_sk_storage_get:
return &bpf_sk_storage_get_proto;
case BPF_FUNC_sk_storage_delete:
@@ -8355,7 +8429,7 @@ sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_cgroup_classid_curr_proto;
#endif
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -8399,7 +8473,7 @@ sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_skc_lookup_tcp_proto;
#endif
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -8410,7 +8484,7 @@ flow_dissector_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_skb_load_bytes:
return &bpf_flow_dissector_load_bytes_proto;
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -8437,7 +8511,7 @@ lwt_out_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_skb_under_cgroup:
return &bpf_skb_under_cgroup_proto;
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -8526,13 +8600,16 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type
if (off + size > offsetofend(struct __sk_buff, cb[4]))
return false;
break;
+ case bpf_ctx_range(struct __sk_buff, data):
+ case bpf_ctx_range(struct __sk_buff, data_meta):
+ case bpf_ctx_range(struct __sk_buff, data_end):
+ if (info->is_ldsx || size != size_default)
+ return false;
+ break;
case bpf_ctx_range_till(struct __sk_buff, remote_ip6[0], remote_ip6[3]):
case bpf_ctx_range_till(struct __sk_buff, local_ip6[0], local_ip6[3]):
case bpf_ctx_range_till(struct __sk_buff, remote_ip4, remote_ip4):
case bpf_ctx_range_till(struct __sk_buff, local_ip4, local_ip4):
- case bpf_ctx_range(struct __sk_buff, data):
- case bpf_ctx_range(struct __sk_buff, data_meta):
- case bpf_ctx_range(struct __sk_buff, data_end):
if (size != size_default)
return false;
break;
@@ -8612,7 +8689,7 @@ static bool cg_skb_is_valid_access(int off, int size,
return false;
case bpf_ctx_range(struct __sk_buff, data):
case bpf_ctx_range(struct __sk_buff, data_end):
- if (!bpf_capable())
+ if (!bpf_token_capable(prog->aux->token, CAP_BPF))
return false;
break;
}
@@ -8624,7 +8701,7 @@ static bool cg_skb_is_valid_access(int off, int size,
case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
break;
case bpf_ctx_range(struct __sk_buff, tstamp):
- if (!bpf_capable())
+ if (!bpf_token_capable(prog->aux->token, CAP_BPF))
return false;
break;
default:
@@ -8976,6 +9053,14 @@ static bool xdp_is_valid_access(int off, int size,
}
}
return false;
+ } else {
+ switch (off) {
+ case offsetof(struct xdp_md, data_meta):
+ case offsetof(struct xdp_md, data):
+ case offsetof(struct xdp_md, data_end):
+ if (info->is_ldsx)
+ return false;
+ }
}
switch (off) {
@@ -8993,7 +9078,8 @@ static bool xdp_is_valid_access(int off, int size,
return __is_valid_xdp_access(off, size);
}
-void bpf_warn_invalid_xdp_action(struct net_device *dev, struct bpf_prog *prog, u32 act)
+void bpf_warn_invalid_xdp_action(const struct net_device *dev,
+ const struct bpf_prog *prog, u32 act)
{
const u32 act_max = XDP_REDIRECT;
@@ -9301,12 +9387,12 @@ static bool flow_dissector_is_valid_access(int off, int size,
switch (off) {
case bpf_ctx_range(struct __sk_buff, data):
- if (size != size_default)
+ if (info->is_ldsx || size != size_default)
return false;
info->reg_type = PTR_TO_PACKET;
return true;
case bpf_ctx_range(struct __sk_buff, data_end):
- if (size != size_default)
+ if (info->is_ldsx || size != size_default)
return false;
info->reg_type = PTR_TO_PACKET_END;
return true;
@@ -9357,16 +9443,17 @@ static struct bpf_insn *bpf_convert_tstamp_type_read(const struct bpf_insn *si,
{
__u8 value_reg = si->dst_reg;
__u8 skb_reg = si->src_reg;
- /* AX is needed because src_reg and dst_reg could be the same */
- __u8 tmp_reg = BPF_REG_AX;
-
- *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg,
- SKB_BF_MONO_TC_OFFSET);
- *insn++ = BPF_JMP32_IMM(BPF_JSET, tmp_reg,
- SKB_MONO_DELIVERY_TIME_MASK, 2);
- *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_TSTAMP_UNSPEC);
- *insn++ = BPF_JMP_A(1);
- *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_TSTAMP_DELIVERY_MONO);
+ BUILD_BUG_ON(__SKB_CLOCK_MAX != (int)BPF_SKB_CLOCK_TAI);
+ BUILD_BUG_ON(SKB_CLOCK_REALTIME != (int)BPF_SKB_CLOCK_REALTIME);
+ BUILD_BUG_ON(SKB_CLOCK_MONOTONIC != (int)BPF_SKB_CLOCK_MONOTONIC);
+ BUILD_BUG_ON(SKB_CLOCK_TAI != (int)BPF_SKB_CLOCK_TAI);
+ *insn++ = BPF_LDX_MEM(BPF_B, value_reg, skb_reg, SKB_BF_MONO_TC_OFFSET);
+ *insn++ = BPF_ALU32_IMM(BPF_AND, value_reg, SKB_TSTAMP_TYPE_MASK);
+#ifdef __BIG_ENDIAN_BITFIELD
+ *insn++ = BPF_ALU32_IMM(BPF_RSH, value_reg, SKB_TSTAMP_TYPE_RSHIFT);
+#else
+ BUILD_BUG_ON(!(SKB_TSTAMP_TYPE_MASK & 0x1));
+#endif
return insn;
}
@@ -9409,11 +9496,12 @@ static struct bpf_insn *bpf_convert_tstamp_read(const struct bpf_prog *prog,
__u8 tmp_reg = BPF_REG_AX;
*insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, SKB_BF_MONO_TC_OFFSET);
- *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg,
- TC_AT_INGRESS_MASK | SKB_MONO_DELIVERY_TIME_MASK);
- *insn++ = BPF_JMP32_IMM(BPF_JNE, tmp_reg,
- TC_AT_INGRESS_MASK | SKB_MONO_DELIVERY_TIME_MASK, 2);
- /* skb->tc_at_ingress && skb->mono_delivery_time,
+ /* check if ingress mask bits is set */
+ *insn++ = BPF_JMP32_IMM(BPF_JSET, tmp_reg, TC_AT_INGRESS_MASK, 1);
+ *insn++ = BPF_JMP_A(4);
+ *insn++ = BPF_JMP32_IMM(BPF_JSET, tmp_reg, SKB_TSTAMP_TYPE_MASK, 1);
+ *insn++ = BPF_JMP_A(2);
+ /* skb->tc_at_ingress && skb->tstamp_type,
* read 0 as the (rcv) timestamp.
*/
*insn++ = BPF_MOV64_IMM(value_reg, 0);
@@ -9438,7 +9526,7 @@ static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_prog *prog,
* the bpf prog is aware the tstamp could have delivery time.
* Thus, write skb->tstamp as is if tstamp_type_access is true.
* Otherwise, writing at ingress will have to clear the
- * mono_delivery_time bit also.
+ * skb->tstamp_type bit also.
*/
if (!prog->tstamp_type_access) {
__u8 tmp_reg = BPF_REG_AX;
@@ -9448,8 +9536,8 @@ static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_prog *prog,
*insn++ = BPF_JMP32_IMM(BPF_JSET, tmp_reg, TC_AT_INGRESS_MASK, 1);
/* goto <store> */
*insn++ = BPF_JMP_A(2);
- /* <clear>: mono_delivery_time */
- *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, ~SKB_MONO_DELIVERY_TIME_MASK);
+ /* <clear>: skb->tstamp_type */
+ *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, ~SKB_TSTAMP_TYPE_MASK);
*insn++ = BPF_STX_MEM(BPF_B, skb_reg, tmp_reg, SKB_BF_MONO_TC_OFFSET);
}
#endif
@@ -10153,10 +10241,6 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type,
} \
} while (0)
-#define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(S, NS, F, NF, TF) \
- SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( \
- S, NS, F, NF, BPF_FIELD_SIZEOF(NS, NF), 0, TF)
-
static u32 sock_addr_convert_ctx_access(enum bpf_access_type type,
const struct bpf_insn *si,
struct bpf_insn *insn_buf,
@@ -11005,7 +11089,6 @@ const struct bpf_verifier_ops lwt_seg6local_verifier_ops = {
};
const struct bpf_prog_ops lwt_seg6local_prog_ops = {
- .test_run = bpf_prog_test_run_skb,
};
const struct bpf_verifier_ops cg_sock_verifier_ops = {
@@ -11168,6 +11251,7 @@ BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern,
bool is_sockarray = map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY;
struct sock_reuseport *reuse;
struct sock *selected_sk;
+ int err;
selected_sk = map->ops->map_lookup_elem(map, key);
if (!selected_sk)
@@ -11175,10 +11259,6 @@ BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern,
reuse = rcu_dereference(selected_sk->sk_reuseport_cb);
if (!reuse) {
- /* Lookup in sock_map can return TCP ESTABLISHED sockets. */
- if (sk_is_refcounted(selected_sk))
- sock_put(selected_sk);
-
/* reuseport_array has only sk with non NULL sk_reuseport_cb.
* The only (!reuse) case here is - the sk has already been
* unhashed (e.g. by close()), so treat it as -ENOENT.
@@ -11186,24 +11266,33 @@ BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern,
* Other maps (e.g. sock_map) do not provide this guarantee and
* the sk may never be in the reuseport group to begin with.
*/
- return is_sockarray ? -ENOENT : -EINVAL;
+ err = is_sockarray ? -ENOENT : -EINVAL;
+ goto error;
}
if (unlikely(reuse->reuseport_id != reuse_kern->reuseport_id)) {
struct sock *sk = reuse_kern->sk;
- if (sk->sk_protocol != selected_sk->sk_protocol)
- return -EPROTOTYPE;
- else if (sk->sk_family != selected_sk->sk_family)
- return -EAFNOSUPPORT;
-
- /* Catch all. Likely bound to a different sockaddr. */
- return -EBADFD;
+ if (sk->sk_protocol != selected_sk->sk_protocol) {
+ err = -EPROTOTYPE;
+ } else if (sk->sk_family != selected_sk->sk_family) {
+ err = -EAFNOSUPPORT;
+ } else {
+ /* Catch all. Likely bound to a different sockaddr. */
+ err = -EBADFD;
+ }
+ goto error;
}
reuse_kern->selected_sk = selected_sk;
return 0;
+error:
+ /* Lookup in sock_map can return TCP ESTABLISHED sockets. */
+ if (sk_is_refcounted(selected_sk))
+ sock_put(selected_sk);
+
+ return err;
}
static const struct bpf_func_proto sk_select_reuseport_proto = {
@@ -11268,7 +11357,7 @@ sk_reuseport_func_proto(enum bpf_func_id func_id,
case BPF_FUNC_ktime_get_coarse_ns:
return &bpf_ktime_get_coarse_ns_proto;
default:
- return bpf_base_func_proto(func_id);
+ return bpf_base_func_proto(func_id, prog);
}
}
@@ -11450,7 +11539,7 @@ sk_lookup_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_sk_release:
return &bpf_sk_release_proto;
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -11784,7 +11873,7 @@ const struct bpf_func_proto bpf_sock_from_file_proto = {
};
static const struct bpf_func_proto *
-bpf_sk_base_func_proto(enum bpf_func_id func_id)
+bpf_sk_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
const struct bpf_func_proto *func;
@@ -11813,38 +11902,44 @@ bpf_sk_base_func_proto(enum bpf_func_id func_id)
case BPF_FUNC_ktime_get_coarse_ns:
return &bpf_ktime_get_coarse_ns_proto;
default:
- return bpf_base_func_proto(func_id);
+ return bpf_base_func_proto(func_id, prog);
}
- if (!perfmon_capable())
+ if (!bpf_token_capable(prog->aux->token, CAP_PERFMON))
return NULL;
return func;
}
__bpf_kfunc_start_defs();
-__bpf_kfunc int bpf_dynptr_from_skb(struct sk_buff *skb, u64 flags,
- struct bpf_dynptr_kern *ptr__uninit)
+__bpf_kfunc int bpf_dynptr_from_skb(struct __sk_buff *s, u64 flags,
+ struct bpf_dynptr *ptr__uninit)
{
+ struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)ptr__uninit;
+ struct sk_buff *skb = (struct sk_buff *)s;
+
if (flags) {
- bpf_dynptr_set_null(ptr__uninit);
+ bpf_dynptr_set_null(ptr);
return -EINVAL;
}
- bpf_dynptr_init(ptr__uninit, skb, BPF_DYNPTR_TYPE_SKB, 0, skb->len);
+ bpf_dynptr_init(ptr, skb, BPF_DYNPTR_TYPE_SKB, 0, skb->len);
return 0;
}
-__bpf_kfunc int bpf_dynptr_from_xdp(struct xdp_buff *xdp, u64 flags,
- struct bpf_dynptr_kern *ptr__uninit)
+__bpf_kfunc int bpf_dynptr_from_xdp(struct xdp_md *x, u64 flags,
+ struct bpf_dynptr *ptr__uninit)
{
+ struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)ptr__uninit;
+ struct xdp_buff *xdp = (struct xdp_buff *)x;
+
if (flags) {
- bpf_dynptr_set_null(ptr__uninit);
+ bpf_dynptr_set_null(ptr);
return -EINVAL;
}
- bpf_dynptr_init(ptr__uninit, xdp, BPF_DYNPTR_TYPE_XDP, 0, xdp_get_buff_len(xdp));
+ bpf_dynptr_init(ptr, xdp, BPF_DYNPTR_TYPE_XDP, 0, xdp_get_buff_len(xdp));
return 0;
}
@@ -11869,33 +11964,136 @@ __bpf_kfunc int bpf_sock_addr_set_sun_path(struct bpf_sock_addr_kern *sa_kern,
return 0;
}
+
+__bpf_kfunc int bpf_sk_assign_tcp_reqsk(struct __sk_buff *s, struct sock *sk,
+ struct bpf_tcp_req_attrs *attrs, int attrs__sz)
+{
+#if IS_ENABLED(CONFIG_SYN_COOKIES)
+ struct sk_buff *skb = (struct sk_buff *)s;
+ const struct request_sock_ops *ops;
+ struct inet_request_sock *ireq;
+ struct tcp_request_sock *treq;
+ struct request_sock *req;
+ struct net *net;
+ __u16 min_mss;
+ u32 tsoff = 0;
+
+ if (attrs__sz != sizeof(*attrs) ||
+ attrs->reserved[0] || attrs->reserved[1] || attrs->reserved[2])
+ return -EINVAL;
+
+ if (!skb_at_tc_ingress(skb))
+ return -EINVAL;
+
+ net = dev_net(skb->dev);
+ if (net != sock_net(sk))
+ return -ENETUNREACH;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ ops = &tcp_request_sock_ops;
+ min_mss = 536;
+ break;
+#if IS_BUILTIN(CONFIG_IPV6)
+ case htons(ETH_P_IPV6):
+ ops = &tcp6_request_sock_ops;
+ min_mss = IPV6_MIN_MTU - 60;
+ break;
+#endif
+ default:
+ return -EINVAL;
+ }
+
+ if (sk->sk_type != SOCK_STREAM || sk->sk_state != TCP_LISTEN ||
+ sk_is_mptcp(sk))
+ return -EINVAL;
+
+ if (attrs->mss < min_mss)
+ return -EINVAL;
+
+ if (attrs->wscale_ok) {
+ if (!READ_ONCE(net->ipv4.sysctl_tcp_window_scaling))
+ return -EINVAL;
+
+ if (attrs->snd_wscale > TCP_MAX_WSCALE ||
+ attrs->rcv_wscale > TCP_MAX_WSCALE)
+ return -EINVAL;
+ }
+
+ if (attrs->sack_ok && !READ_ONCE(net->ipv4.sysctl_tcp_sack))
+ return -EINVAL;
+
+ if (attrs->tstamp_ok) {
+ if (!READ_ONCE(net->ipv4.sysctl_tcp_timestamps))
+ return -EINVAL;
+
+ tsoff = attrs->rcv_tsecr - tcp_ns_to_ts(attrs->usec_ts_ok, tcp_clock_ns());
+ }
+
+ req = inet_reqsk_alloc(ops, sk, false);
+ if (!req)
+ return -ENOMEM;
+
+ ireq = inet_rsk(req);
+ treq = tcp_rsk(req);
+
+ req->rsk_listener = sk;
+ req->syncookie = 1;
+ req->mss = attrs->mss;
+ req->ts_recent = attrs->rcv_tsval;
+
+ ireq->snd_wscale = attrs->snd_wscale;
+ ireq->rcv_wscale = attrs->rcv_wscale;
+ ireq->tstamp_ok = !!attrs->tstamp_ok;
+ ireq->sack_ok = !!attrs->sack_ok;
+ ireq->wscale_ok = !!attrs->wscale_ok;
+ ireq->ecn_ok = !!attrs->ecn_ok;
+
+ treq->req_usec_ts = !!attrs->usec_ts_ok;
+ treq->ts_off = tsoff;
+
+ skb_orphan(skb);
+ skb->sk = req_to_sk(req);
+ skb->destructor = sock_pfree;
+
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
__bpf_kfunc_end_defs();
-int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags,
- struct bpf_dynptr_kern *ptr__uninit)
+int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags,
+ struct bpf_dynptr *ptr__uninit)
{
+ struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)ptr__uninit;
int err;
err = bpf_dynptr_from_skb(skb, flags, ptr__uninit);
if (err)
return err;
- bpf_dynptr_set_rdonly(ptr__uninit);
+ bpf_dynptr_set_rdonly(ptr);
return 0;
}
-BTF_SET8_START(bpf_kfunc_check_set_skb)
-BTF_ID_FLAGS(func, bpf_dynptr_from_skb)
-BTF_SET8_END(bpf_kfunc_check_set_skb)
+BTF_KFUNCS_START(bpf_kfunc_check_set_skb)
+BTF_ID_FLAGS(func, bpf_dynptr_from_skb, KF_TRUSTED_ARGS)
+BTF_KFUNCS_END(bpf_kfunc_check_set_skb)
-BTF_SET8_START(bpf_kfunc_check_set_xdp)
+BTF_KFUNCS_START(bpf_kfunc_check_set_xdp)
BTF_ID_FLAGS(func, bpf_dynptr_from_xdp)
-BTF_SET8_END(bpf_kfunc_check_set_xdp)
+BTF_KFUNCS_END(bpf_kfunc_check_set_xdp)
-BTF_SET8_START(bpf_kfunc_check_set_sock_addr)
+BTF_KFUNCS_START(bpf_kfunc_check_set_sock_addr)
BTF_ID_FLAGS(func, bpf_sock_addr_set_sun_path)
-BTF_SET8_END(bpf_kfunc_check_set_sock_addr)
+BTF_KFUNCS_END(bpf_kfunc_check_set_sock_addr)
+
+BTF_KFUNCS_START(bpf_kfunc_check_set_tcp_reqsk)
+BTF_ID_FLAGS(func, bpf_sk_assign_tcp_reqsk, KF_TRUSTED_ARGS)
+BTF_KFUNCS_END(bpf_kfunc_check_set_tcp_reqsk)
static const struct btf_kfunc_id_set bpf_kfunc_set_skb = {
.owner = THIS_MODULE,
@@ -11912,6 +12110,11 @@ static const struct btf_kfunc_id_set bpf_kfunc_set_sock_addr = {
.set = &bpf_kfunc_check_set_sock_addr,
};
+static const struct btf_kfunc_id_set bpf_kfunc_set_tcp_reqsk = {
+ .owner = THIS_MODULE,
+ .set = &bpf_kfunc_check_set_tcp_reqsk,
+};
+
static int __init bpf_kfunc_init(void)
{
int ret;
@@ -11926,9 +12129,11 @@ static int __init bpf_kfunc_init(void)
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_LWT_XMIT, &bpf_kfunc_set_skb);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_LWT_SEG6LOCAL, &bpf_kfunc_set_skb);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_NETFILTER, &bpf_kfunc_set_skb);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_kfunc_set_skb);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &bpf_kfunc_set_xdp);
- return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
- &bpf_kfunc_set_sock_addr);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
+ &bpf_kfunc_set_sock_addr);
+ return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_kfunc_set_tcp_reqsk);
}
late_initcall(bpf_kfunc_init);
@@ -11968,9 +12173,9 @@ __bpf_kfunc int bpf_sock_destroy(struct sock_common *sock)
__bpf_kfunc_end_defs();
-BTF_SET8_START(bpf_sk_iter_kfunc_ids)
+BTF_KFUNCS_START(bpf_sk_iter_kfunc_ids)
BTF_ID_FLAGS(func, bpf_sock_destroy, KF_TRUSTED_ARGS)
-BTF_SET8_END(bpf_sk_iter_kfunc_ids)
+BTF_KFUNCS_END(bpf_sk_iter_kfunc_ids)
static int tracing_iter_filter(const struct bpf_prog *prog, u32 kfunc_id)
{
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 272f09251343..9cd8de6bebb5 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -299,9 +299,10 @@ void skb_flow_dissect_meta(const struct sk_buff *skb,
EXPORT_SYMBOL(skb_flow_dissect_meta);
static void
-skb_flow_dissect_set_enc_addr_type(enum flow_dissector_key_id type,
- struct flow_dissector *flow_dissector,
- void *target_container)
+skb_flow_dissect_set_enc_control(enum flow_dissector_key_id type,
+ u32 ctrl_flags,
+ struct flow_dissector *flow_dissector,
+ void *target_container)
{
struct flow_dissector_key_control *ctrl;
@@ -312,6 +313,7 @@ skb_flow_dissect_set_enc_addr_type(enum flow_dissector_key_id type,
FLOW_DISSECTOR_KEY_ENC_CONTROL,
target_container);
ctrl->addr_type = type;
+ ctrl->flags = ctrl_flags;
}
void
@@ -367,6 +369,7 @@ skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
{
struct ip_tunnel_info *info;
struct ip_tunnel_key *key;
+ u32 ctrl_flags = 0;
/* A quick check to see if there might be something to do. */
if (!dissector_uses_key(flow_dissector,
@@ -391,11 +394,20 @@ skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
key = &info->key;
+ if (test_bit(IP_TUNNEL_CSUM_BIT, key->tun_flags))
+ ctrl_flags |= FLOW_DIS_F_TUNNEL_CSUM;
+ if (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, key->tun_flags))
+ ctrl_flags |= FLOW_DIS_F_TUNNEL_DONT_FRAGMENT;
+ if (test_bit(IP_TUNNEL_OAM_BIT, key->tun_flags))
+ ctrl_flags |= FLOW_DIS_F_TUNNEL_OAM;
+ if (test_bit(IP_TUNNEL_CRIT_OPT_BIT, key->tun_flags))
+ ctrl_flags |= FLOW_DIS_F_TUNNEL_CRIT_OPT;
+
switch (ip_tunnel_info_af(info)) {
case AF_INET:
- skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- flow_dissector,
- target_container);
+ skb_flow_dissect_set_enc_control(FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ ctrl_flags, flow_dissector,
+ target_container);
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
struct flow_dissector_key_ipv4_addrs *ipv4;
@@ -408,9 +420,9 @@ skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
}
break;
case AF_INET6:
- skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- flow_dissector,
- target_container);
+ skb_flow_dissect_set_enc_control(FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ ctrl_flags, flow_dissector,
+ target_container);
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
struct flow_dissector_key_ipv6_addrs *ipv6;
@@ -422,6 +434,10 @@ skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
ipv6->dst = key->u.ipv6.dst;
}
break;
+ default:
+ skb_flow_dissect_set_enc_control(0, ctrl_flags, flow_dissector,
+ target_container);
+ break;
}
if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
@@ -455,17 +471,25 @@ skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
struct flow_dissector_key_enc_opts *enc_opt;
+ IP_TUNNEL_DECLARE_FLAGS(flags) = { };
+ u32 val;
enc_opt = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_ENC_OPTS,
target_container);
- if (info->options_len) {
- enc_opt->len = info->options_len;
- ip_tunnel_info_opts_get(enc_opt->data, info);
- enc_opt->dst_opt_type = info->key.tun_flags &
- TUNNEL_OPTIONS_PRESENT;
- }
+ if (!info->options_len)
+ return;
+
+ enc_opt->len = info->options_len;
+ ip_tunnel_info_opts_get(enc_opt->data, info);
+
+ ip_tunnel_set_options_present(flags);
+ ip_tunnel_flags_and(flags, info->key.tun_flags, flags);
+
+ val = find_next_bit(flags, __IP_TUNNEL_FLAG_NUM,
+ IP_TUNNEL_GENEVE_OPT_BIT);
+ enc_opt->dst_opt_type = val < __IP_TUNNEL_FLAG_NUM ? val : 0;
}
}
EXPORT_SYMBOL(skb_flow_dissect_tunnel_info);
@@ -829,23 +853,30 @@ __skb_flow_dissect_ports(const struct sk_buff *skb,
void *target_container, const void *data,
int nhoff, u8 ip_proto, int hlen)
{
- enum flow_dissector_key_id dissector_ports = FLOW_DISSECTOR_KEY_MAX;
- struct flow_dissector_key_ports *key_ports;
+ struct flow_dissector_key_ports_range *key_ports_range = NULL;
+ struct flow_dissector_key_ports *key_ports = NULL;
+ __be32 ports;
if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS))
- dissector_ports = FLOW_DISSECTOR_KEY_PORTS;
- else if (dissector_uses_key(flow_dissector,
- FLOW_DISSECTOR_KEY_PORTS_RANGE))
- dissector_ports = FLOW_DISSECTOR_KEY_PORTS_RANGE;
+ key_ports = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_PORTS,
+ target_container);
- if (dissector_ports == FLOW_DISSECTOR_KEY_MAX)
+ if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS_RANGE))
+ key_ports_range = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_PORTS_RANGE,
+ target_container);
+
+ if (!key_ports && !key_ports_range)
return;
- key_ports = skb_flow_dissector_target(flow_dissector,
- dissector_ports,
- target_container);
- key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
- data, hlen);
+ ports = __skb_flow_get_ports(skb, nhoff, ip_proto, data, hlen);
+
+ if (key_ports)
+ key_ports->ports = ports;
+
+ if (key_ports_range)
+ key_ports_range->tp.ports = ports;
}
static void
@@ -900,6 +931,7 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys,
struct flow_dissector *flow_dissector,
void *target_container)
{
+ struct flow_dissector_key_ports_range *key_ports_range = NULL;
struct flow_dissector_key_ports *key_ports = NULL;
struct flow_dissector_key_control *key_control;
struct flow_dissector_key_basic *key_basic;
@@ -944,20 +976,21 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys,
key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
}
- if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS))
+ if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS)) {
key_ports = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_PORTS,
target_container);
- else if (dissector_uses_key(flow_dissector,
- FLOW_DISSECTOR_KEY_PORTS_RANGE))
- key_ports = skb_flow_dissector_target(flow_dissector,
- FLOW_DISSECTOR_KEY_PORTS_RANGE,
- target_container);
-
- if (key_ports) {
key_ports->src = flow_keys->sport;
key_ports->dst = flow_keys->dport;
}
+ if (dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_PORTS_RANGE)) {
+ key_ports_range = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_PORTS_RANGE,
+ target_container);
+ key_ports_range->tp.src = flow_keys->sport;
+ key_ports_range->tp.dst = flow_keys->dport;
+ }
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
@@ -1084,21 +1117,22 @@ bool __skb_flow_dissect(const struct net *net,
FLOW_DISSECTOR_KEY_BASIC,
target_container);
+ rcu_read_lock();
+
if (skb) {
if (!net) {
if (skb->dev)
- net = dev_net(skb->dev);
+ net = dev_net_rcu(skb->dev);
else if (skb->sk)
net = sock_net(skb->sk);
}
}
- WARN_ON_ONCE(!net);
+ DEBUG_NET_WARN_ON_ONCE(!net);
if (net) {
enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR;
struct bpf_prog_array *run_array;
- rcu_read_lock();
run_array = rcu_dereference(init_net.bpf.run_array[type]);
if (!run_array)
run_array = rcu_dereference(net->bpf.run_array[type]);
@@ -1126,17 +1160,17 @@ bool __skb_flow_dissect(const struct net *net,
prog = READ_ONCE(run_array->items[0].prog);
result = bpf_flow_dissect(prog, &ctx, n_proto, nhoff,
hlen, flags);
- if (result == BPF_FLOW_DISSECTOR_CONTINUE)
- goto dissect_continue;
- __skb_flow_bpf_to_target(&flow_keys, flow_dissector,
- target_container);
- rcu_read_unlock();
- return result == BPF_OK;
+ if (result != BPF_FLOW_DISSECTOR_CONTINUE) {
+ __skb_flow_bpf_to_target(&flow_keys, flow_dissector,
+ target_container);
+ rcu_read_unlock();
+ return result == BPF_OK;
+ }
}
-dissect_continue:
- rcu_read_unlock();
}
+ rcu_read_unlock();
+
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct ethhdr *eth = eth_hdr(skb);
@@ -1784,6 +1818,13 @@ u32 flow_hash_from_keys(struct flow_keys *keys)
}
EXPORT_SYMBOL(flow_hash_from_keys);
+u32 flow_hash_from_keys_seed(struct flow_keys *keys,
+ const siphash_key_t *keyval)
+{
+ return __flow_hash_from_keys(keys, keyval);
+}
+EXPORT_SYMBOL(flow_hash_from_keys_seed);
+
static inline u32 ___skb_get_hash(const struct sk_buff *skb,
struct flow_keys *keys,
const siphash_key_t *keyval)
@@ -1823,22 +1864,23 @@ EXPORT_SYMBOL(make_flow_keys_digest);
static struct flow_dissector flow_keys_dissector_symmetric __read_mostly;
-u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
+u32 __skb_get_hash_symmetric_net(const struct net *net, const struct sk_buff *skb)
{
struct flow_keys keys;
__flow_hash_secret_init();
memset(&keys, 0, sizeof(keys));
- __skb_flow_dissect(NULL, skb, &flow_keys_dissector_symmetric,
+ __skb_flow_dissect(net, skb, &flow_keys_dissector_symmetric,
&keys, NULL, 0, 0, 0, 0);
return __flow_hash_from_keys(&keys, &hashrnd);
}
-EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
+EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric_net);
/**
- * __skb_get_hash: calculate a flow hash
+ * __skb_get_hash_net: calculate a flow hash
+ * @net: associated network namespace, derived from @skb if NULL
* @skb: sk_buff to calculate flow hash from
*
* This function calculates a flow hash based on src/dst addresses
@@ -1846,18 +1888,24 @@ EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
* on success, zero indicates no valid hash. Also, sets l4_hash in skb
* if hash is a canonical 4-tuple hash over transport ports.
*/
-void __skb_get_hash(struct sk_buff *skb)
+void __skb_get_hash_net(const struct net *net, struct sk_buff *skb)
{
struct flow_keys keys;
u32 hash;
+ memset(&keys, 0, sizeof(keys));
+
+ __skb_flow_dissect(net, skb, &flow_keys_dissector,
+ &keys, NULL, 0, 0, 0,
+ FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
+
__flow_hash_secret_init();
- hash = ___skb_get_hash(skb, &keys, &hashrnd);
+ hash = __flow_hash_from_keys(&keys, &hashrnd);
__skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
}
-EXPORT_SYMBOL(__skb_get_hash);
+EXPORT_SYMBOL(__skb_get_hash_net);
__u32 skb_get_hash_perturb(const struct sk_buff *skb,
const siphash_key_t *perturb)
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index fae9c4694186..412816076b8b 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -206,7 +206,7 @@ void gen_kill_estimator(struct net_rate_estimator __rcu **rate_est)
{
struct net_rate_estimator *est;
- est = xchg((__force struct net_rate_estimator **)rate_est, NULL);
+ est = unrcu_pointer(xchg(rate_est, NULL));
if (est) {
timer_shutdown_sync(&est->timer);
kfree_rcu(est, rcu);
diff --git a/net/core/gro.c b/net/core/gro.c
index 0759277dc14e..0ad549b07e03 100644
--- a/net/core/gro.c
+++ b/net/core/gro.c
@@ -3,16 +3,11 @@
#include <net/dst_metadata.h>
#include <net/busy_poll.h>
#include <trace/events/net.h>
+#include <linux/skbuff_ref.h>
#define MAX_GRO_SKBS 8
-/* This should be increased if a protocol with a bigger head is added. */
-#define GRO_MAX_HEAD (MAX_HEADER + 128)
-
static DEFINE_SPINLOCK(offload_lock);
-struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base);
-/* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
-int gro_normal_batch __read_mostly = 8;
/**
* dev_add_offload - register offload handlers
@@ -31,7 +26,7 @@ void dev_add_offload(struct packet_offload *po)
struct packet_offload *elem;
spin_lock(&offload_lock);
- list_for_each_entry(elem, &offload_base, list) {
+ list_for_each_entry(elem, &net_hotdata.offload_base, list) {
if (po->priority < elem->priority)
break;
}
@@ -55,7 +50,7 @@ EXPORT_SYMBOL(dev_add_offload);
*/
static void __dev_remove_offload(struct packet_offload *po)
{
- struct list_head *head = &offload_base;
+ struct list_head *head = &net_hotdata.offload_base;
struct packet_offload *po1;
spin_lock(&offload_lock);
@@ -100,7 +95,6 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
unsigned int headlen = skb_headlen(skb);
unsigned int len = skb_gro_len(skb);
unsigned int delta_truesize;
- unsigned int gro_max_size;
unsigned int new_truesize;
struct sk_buff *lp;
int segs;
@@ -114,12 +108,8 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
if (p->pp_recycle != skb->pp_recycle)
return -ETOOMANYREFS;
- /* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */
- gro_max_size = p->protocol == htons(ETH_P_IPV6) ?
- READ_ONCE(p->dev->gro_max_size) :
- READ_ONCE(p->dev->gro_ipv4_max_size);
-
- if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush))
+ if (unlikely(p->len + len >= netif_get_gro_max_size(p->dev, p) ||
+ NAPI_GRO_CB(skb)->flush))
return -E2BIG;
if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) {
@@ -195,8 +185,9 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
}
merge:
- /* sk owenrship - if any - completely transferred to the aggregated packet */
+ /* sk ownership - if any - completely transferred to the aggregated packet */
skb->destructor = NULL;
+ skb->sk = NULL;
delta_truesize = skb->truesize;
if (offset > headlen) {
unsigned int eat = offset - headlen;
@@ -232,12 +223,39 @@ done:
return 0;
}
+int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
+{
+ if (unlikely(p->len + skb->len >= 65536))
+ return -E2BIG;
+
+ if (NAPI_GRO_CB(p)->last == p)
+ skb_shinfo(p)->frag_list = skb;
+ else
+ NAPI_GRO_CB(p)->last->next = skb;
+
+ skb_pull(skb, skb_gro_offset(skb));
+
+ NAPI_GRO_CB(p)->last = skb;
+ NAPI_GRO_CB(p)->count++;
+ p->data_len += skb->len;
+
+ /* sk ownership - if any - completely transferred to the aggregated packet */
+ skb->destructor = NULL;
+ skb->sk = NULL;
+ p->truesize += skb->truesize;
+ p->len += skb->len;
+
+ NAPI_GRO_CB(skb)->same_flow = 1;
+
+ return 0;
+}
+
static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
{
+ struct list_head *head = &net_hotdata.offload_base;
struct packet_offload *ptype;
__be16 type = skb->protocol;
- struct list_head *head = &offload_base;
int err = -ENOENT;
BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
@@ -332,8 +350,6 @@ static void gro_list_prepare(const struct list_head *head,
list_for_each_entry(p, head, list) {
unsigned long diffs;
- NAPI_GRO_CB(p)->flush = 0;
-
if (hash != skb_get_hash_raw(p)) {
NAPI_GRO_CB(p)->same_flow = 0;
continue;
@@ -350,7 +366,7 @@ static void gro_list_prepare(const struct list_head *head,
skb_mac_header(skb),
maclen);
- /* in most common scenarions 'slow_gro' is 0
+ /* in most common scenarios 'slow_gro' is 0
* otherwise we are already on some slower paths
* either skip all the infrequent tests altogether or
* avoid trying too hard to skip each of them individually
@@ -369,14 +385,22 @@ static void gro_list_prepare(const struct list_head *head,
static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
{
- const struct skb_shared_info *pinfo = skb_shinfo(skb);
- const skb_frag_t *frag0 = &pinfo->frags[0];
+ const struct skb_shared_info *pinfo;
+ const skb_frag_t *frag0;
+ unsigned int headlen;
+ NAPI_GRO_CB(skb)->network_offset = 0;
NAPI_GRO_CB(skb)->data_offset = 0;
- NAPI_GRO_CB(skb)->frag0 = NULL;
- NAPI_GRO_CB(skb)->frag0_len = 0;
+ headlen = skb_headlen(skb);
+ NAPI_GRO_CB(skb)->frag0 = skb->data;
+ NAPI_GRO_CB(skb)->frag0_len = headlen;
+ if (headlen)
+ return;
- if (!skb_headlen(skb) && pinfo->nr_frags &&
+ pinfo = skb_shinfo(skb);
+ frag0 = &pinfo->frags[0];
+
+ if (pinfo->nr_frags && skb_frag_page(frag0) &&
!PageHighMem(skb_frag_page(frag0)) &&
(!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
@@ -438,7 +462,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
{
u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
struct gro_list *gro_list = &napi->gro_hash[bucket];
- struct list_head *head = &offload_base;
+ struct list_head *head = &net_hotdata.offload_base;
struct packet_offload *ptype;
__be16 type = skb->protocol;
struct sk_buff *pp = NULL;
@@ -466,7 +490,6 @@ found_ptype:
sizeof(u32))); /* Avoid slow unaligned acc */
*(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
- NAPI_GRO_CB(skb)->is_atomic = 1;
NAPI_GRO_CB(skb)->count = 1;
if (unlikely(skb_is_gso(skb))) {
NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
@@ -544,7 +567,7 @@ normal:
struct packet_offload *gro_find_receive_by_type(__be16 type)
{
- struct list_head *offload_head = &offload_base;
+ struct list_head *offload_head = &net_hotdata.offload_base;
struct packet_offload *ptype;
list_for_each_entry_rcu(ptype, offload_head, list) {
@@ -558,7 +581,7 @@ EXPORT_SYMBOL(gro_find_receive_by_type);
struct packet_offload *gro_find_complete_by_type(__be16 type)
{
- struct list_head *offload_head = &offload_base;
+ struct list_head *offload_head = &net_hotdata.offload_base;
struct packet_offload *ptype;
list_for_each_entry_rcu(ptype, offload_head, list) {
@@ -630,6 +653,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
skb->pkt_type = PACKET_HOST;
skb->encapsulation = 0;
+ skb->ip_summed = CHECKSUM_NONE;
skb_shinfo(skb)->gso_type = 0;
skb_shinfo(skb)->gso_size = 0;
if (unlikely(skb->slow_gro)) {
@@ -700,7 +724,7 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
skb_reset_mac_header(skb);
skb_gro_reset_offset(skb, hlen);
- if (unlikely(skb_gro_header_hard(skb, hlen))) {
+ if (unlikely(!skb_gro_may_pull(skb, hlen))) {
eth = skb_gro_header_slow(skb, hlen, 0);
if (unlikely(!eth)) {
net_warn_ratelimited("%s: dropping impossible skb from %s\n",
@@ -710,7 +734,10 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
}
} else {
eth = (const struct ethhdr *)skb->data;
- gro_pull_from_frag0(skb, hlen);
+
+ if (NAPI_GRO_CB(skb)->frag0 != skb->data)
+ gro_pull_from_frag0(skb, hlen);
+
NAPI_GRO_CB(skb)->frag0 += hlen;
NAPI_GRO_CB(skb)->frag0_len -= hlen;
}
diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c
index ed5ec5de47f6..ff8e5b64bf6b 100644
--- a/net/core/gro_cells.c
+++ b/net/core/gro_cells.c
@@ -3,6 +3,7 @@
#include <linux/slab.h>
#include <linux/netdevice.h>
#include <net/gro_cells.h>
+#include <net/hotdata.h>
struct gro_cell {
struct sk_buff_head napi_skbs;
@@ -26,7 +27,7 @@ int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
cell = this_cpu_ptr(gcells->cells);
- if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(netdev_max_backlog)) {
+ if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(net_hotdata.max_backlog)) {
drop:
dev_core_stats_rx_dropped_inc(dev);
kfree_skb(skb);
diff --git a/net/core/gso.c b/net/core/gso.c
index 9e1803bfc9c6..bcd156372f4d 100644
--- a/net/core/gso.c
+++ b/net/core/gso.c
@@ -17,7 +17,7 @@ struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb,
struct packet_offload *ptype;
rcu_read_lock();
- list_for_each_entry_rcu(ptype, &offload_base, list) {
+ list_for_each_entry_rcu(ptype, &net_hotdata.offload_base, list) {
if (ptype->type == type && ptype->callbacks.gso_segment) {
segs = ptype->callbacks.gso_segment(skb, features);
break;
@@ -48,7 +48,7 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
__skb_pull(skb, vlan_depth);
rcu_read_lock();
- list_for_each_entry_rcu(ptype, &offload_base, list) {
+ list_for_each_entry_rcu(ptype, &net_hotdata.offload_base, list) {
if (ptype->type == type && ptype->callbacks.gso_segment) {
segs = ptype->callbacks.gso_segment(skb, features);
break;
diff --git a/net/core/hotdata.c b/net/core/hotdata.c
new file mode 100644
index 000000000000..d0aaaaa556f2
--- /dev/null
+++ b/net/core/hotdata.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/cache.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+#include <net/hotdata.h>
+#include <net/proto_memory.h>
+
+struct net_hotdata net_hotdata __cacheline_aligned = {
+ .offload_base = LIST_HEAD_INIT(net_hotdata.offload_base),
+ .ptype_all = LIST_HEAD_INIT(net_hotdata.ptype_all),
+ .gro_normal_batch = 8,
+
+ .netdev_budget = 300,
+ /* Must be at least 2 jiffes to guarantee 1 jiffy timeout */
+ .netdev_budget_usecs = 2 * USEC_PER_SEC / HZ,
+
+ .tstamp_prequeue = 1,
+ .max_backlog = 1000,
+ .dev_tx_weight = 64,
+ .dev_rx_weight = 64,
+ .sysctl_max_skb_frags = MAX_SKB_FRAGS,
+ .sysctl_skb_defer_max = 64,
+ .sysctl_mem_pcpu_rsv = SK_MEMORY_PCPU_RESERVE
+};
+EXPORT_SYMBOL(net_hotdata);
diff --git a/net/core/ieee8021q_helpers.c b/net/core/ieee8021q_helpers.c
new file mode 100644
index 000000000000..759a9b9f3f89
--- /dev/null
+++ b/net/core/ieee8021q_helpers.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2024 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+
+#include <linux/array_size.h>
+#include <linux/printk.h>
+#include <linux/types.h>
+#include <net/dscp.h>
+#include <net/ieee8021q.h>
+
+/* The following arrays map Traffic Types (TT) to traffic classes (TC) for
+ * different number of queues as shown in the example provided by
+ * IEEE 802.1Q-2022 in Annex I "I.3 Traffic type to traffic class mapping" and
+ * Table I-1 "Traffic type to traffic class mapping".
+ */
+static const u8 ieee8021q_8queue_tt_tc_map[] = {
+ [IEEE8021Q_TT_BK] = 0,
+ [IEEE8021Q_TT_BE] = 1,
+ [IEEE8021Q_TT_EE] = 2,
+ [IEEE8021Q_TT_CA] = 3,
+ [IEEE8021Q_TT_VI] = 4,
+ [IEEE8021Q_TT_VO] = 5,
+ [IEEE8021Q_TT_IC] = 6,
+ [IEEE8021Q_TT_NC] = 7,
+};
+
+static const u8 ieee8021q_7queue_tt_tc_map[] = {
+ [IEEE8021Q_TT_BK] = 0,
+ [IEEE8021Q_TT_BE] = 1,
+ [IEEE8021Q_TT_EE] = 2,
+ [IEEE8021Q_TT_CA] = 3,
+ [IEEE8021Q_TT_VI] = 4, [IEEE8021Q_TT_VO] = 4,
+ [IEEE8021Q_TT_IC] = 5,
+ [IEEE8021Q_TT_NC] = 6,
+};
+
+static const u8 ieee8021q_6queue_tt_tc_map[] = {
+ [IEEE8021Q_TT_BK] = 0,
+ [IEEE8021Q_TT_BE] = 1,
+ [IEEE8021Q_TT_EE] = 2, [IEEE8021Q_TT_CA] = 2,
+ [IEEE8021Q_TT_VI] = 3, [IEEE8021Q_TT_VO] = 3,
+ [IEEE8021Q_TT_IC] = 4,
+ [IEEE8021Q_TT_NC] = 5,
+};
+
+static const u8 ieee8021q_5queue_tt_tc_map[] = {
+ [IEEE8021Q_TT_BK] = 0, [IEEE8021Q_TT_BE] = 0,
+ [IEEE8021Q_TT_EE] = 1, [IEEE8021Q_TT_CA] = 1,
+ [IEEE8021Q_TT_VI] = 2, [IEEE8021Q_TT_VO] = 2,
+ [IEEE8021Q_TT_IC] = 3,
+ [IEEE8021Q_TT_NC] = 4,
+};
+
+static const u8 ieee8021q_4queue_tt_tc_map[] = {
+ [IEEE8021Q_TT_BK] = 0, [IEEE8021Q_TT_BE] = 0,
+ [IEEE8021Q_TT_EE] = 1, [IEEE8021Q_TT_CA] = 1,
+ [IEEE8021Q_TT_VI] = 2, [IEEE8021Q_TT_VO] = 2,
+ [IEEE8021Q_TT_IC] = 3, [IEEE8021Q_TT_NC] = 3,
+};
+
+static const u8 ieee8021q_3queue_tt_tc_map[] = {
+ [IEEE8021Q_TT_BK] = 0, [IEEE8021Q_TT_BE] = 0,
+ [IEEE8021Q_TT_EE] = 0, [IEEE8021Q_TT_CA] = 0,
+ [IEEE8021Q_TT_VI] = 1, [IEEE8021Q_TT_VO] = 1,
+ [IEEE8021Q_TT_IC] = 2, [IEEE8021Q_TT_NC] = 2,
+};
+
+static const u8 ieee8021q_2queue_tt_tc_map[] = {
+ [IEEE8021Q_TT_BK] = 0, [IEEE8021Q_TT_BE] = 0,
+ [IEEE8021Q_TT_EE] = 0, [IEEE8021Q_TT_CA] = 0,
+ [IEEE8021Q_TT_VI] = 1, [IEEE8021Q_TT_VO] = 1,
+ [IEEE8021Q_TT_IC] = 1, [IEEE8021Q_TT_NC] = 1,
+};
+
+static const u8 ieee8021q_1queue_tt_tc_map[] = {
+ [IEEE8021Q_TT_BK] = 0, [IEEE8021Q_TT_BE] = 0,
+ [IEEE8021Q_TT_EE] = 0, [IEEE8021Q_TT_CA] = 0,
+ [IEEE8021Q_TT_VI] = 0, [IEEE8021Q_TT_VO] = 0,
+ [IEEE8021Q_TT_IC] = 0, [IEEE8021Q_TT_NC] = 0,
+};
+
+/**
+ * ieee8021q_tt_to_tc - Map IEEE 802.1Q Traffic Type to Traffic Class
+ * @tt: IEEE 802.1Q Traffic Type
+ * @num_queues: Number of queues
+ *
+ * This function maps an IEEE 802.1Q Traffic Type to a Traffic Class (TC) based
+ * on the number of queues configured on the NIC. The mapping is based on the
+ * example provided by IEEE 802.1Q-2022 in Annex I "I.3 Traffic type to traffic
+ * class mapping" and Table I-1 "Traffic type to traffic class mapping".
+ *
+ * Return: Traffic Class corresponding to the given Traffic Type or negative
+ * value in case of error.
+ */
+int ieee8021q_tt_to_tc(enum ieee8021q_traffic_type tt, unsigned int num_queues)
+{
+ if (tt < 0 || tt >= IEEE8021Q_TT_MAX) {
+ pr_err("Requested Traffic Type (%d) is out of range (%d)\n", tt,
+ IEEE8021Q_TT_MAX);
+ return -EINVAL;
+ }
+
+ switch (num_queues) {
+ case 8:
+ compiletime_assert(ARRAY_SIZE(ieee8021q_8queue_tt_tc_map) !=
+ IEEE8021Q_TT_MAX - 1,
+ "ieee8021q_8queue_tt_tc_map != max - 1");
+ return ieee8021q_8queue_tt_tc_map[tt];
+ case 7:
+ compiletime_assert(ARRAY_SIZE(ieee8021q_7queue_tt_tc_map) !=
+ IEEE8021Q_TT_MAX - 1,
+ "ieee8021q_7queue_tt_tc_map != max - 1");
+
+ return ieee8021q_7queue_tt_tc_map[tt];
+ case 6:
+ compiletime_assert(ARRAY_SIZE(ieee8021q_6queue_tt_tc_map) !=
+ IEEE8021Q_TT_MAX - 1,
+ "ieee8021q_6queue_tt_tc_map != max - 1");
+
+ return ieee8021q_6queue_tt_tc_map[tt];
+ case 5:
+ compiletime_assert(ARRAY_SIZE(ieee8021q_5queue_tt_tc_map) !=
+ IEEE8021Q_TT_MAX - 1,
+ "ieee8021q_5queue_tt_tc_map != max - 1");
+
+ return ieee8021q_5queue_tt_tc_map[tt];
+ case 4:
+ compiletime_assert(ARRAY_SIZE(ieee8021q_4queue_tt_tc_map) !=
+ IEEE8021Q_TT_MAX - 1,
+ "ieee8021q_4queue_tt_tc_map != max - 1");
+
+ return ieee8021q_4queue_tt_tc_map[tt];
+ case 3:
+ compiletime_assert(ARRAY_SIZE(ieee8021q_3queue_tt_tc_map) !=
+ IEEE8021Q_TT_MAX - 1,
+ "ieee8021q_3queue_tt_tc_map != max - 1");
+
+ return ieee8021q_3queue_tt_tc_map[tt];
+ case 2:
+ compiletime_assert(ARRAY_SIZE(ieee8021q_2queue_tt_tc_map) !=
+ IEEE8021Q_TT_MAX - 1,
+ "ieee8021q_2queue_tt_tc_map != max - 1");
+
+ return ieee8021q_2queue_tt_tc_map[tt];
+ case 1:
+ compiletime_assert(ARRAY_SIZE(ieee8021q_1queue_tt_tc_map) !=
+ IEEE8021Q_TT_MAX - 1,
+ "ieee8021q_1queue_tt_tc_map != max - 1");
+
+ return ieee8021q_1queue_tt_tc_map[tt];
+ }
+
+ pr_err("Invalid number of queues %d\n", num_queues);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(ieee8021q_tt_to_tc);
+
+/**
+ * ietf_dscp_to_ieee8021q_tt - Map IETF DSCP to IEEE 802.1Q Traffic Type
+ * @dscp: IETF DSCP value
+ *
+ * This function maps an IETF DSCP value to an IEEE 802.1Q Traffic Type (TT).
+ * Since there is no corresponding mapping between DSCP and IEEE 802.1Q Traffic
+ * Type, this function is inspired by the RFC8325 documentation which describe
+ * the mapping between DSCP and 802.11 User Priority (UP) values.
+ *
+ * Return: IEEE 802.1Q Traffic Type corresponding to the given DSCP value
+ */
+int ietf_dscp_to_ieee8021q_tt(u8 dscp)
+{
+ switch (dscp) {
+ case DSCP_CS0:
+ /* Comment from RFC8325:
+ * [RFC4594], Section 4.8, recommends High-Throughput Data be marked
+ * AF1x (that is, AF11, AF12, and AF13, according to the rules defined
+ * in [RFC2475]).
+ *
+ * By default (as described in Section 2.3), High-Throughput Data will
+ * map to UP 1 and, thus, to the Background Access Category (AC_BK),
+ * which is contrary to the intent expressed in [RFC4594].
+
+ * Unfortunately, there really is no corresponding fit for the High-
+ * Throughput Data service class within the constrained 4 Access
+ * Category [IEEE.802.11-2016] model. If the High-Throughput Data
+ * service class is assigned to the Best Effort Access Category (AC_BE),
+ * then it would contend with Low-Latency Data (while [RFC4594]
+ * recommends a distinction in servicing between these service classes)
+ * as well as with the default service class; alternatively, if it is
+ * assigned to the Background Access Category (AC_BK), then it would
+ * receive a less-then-best-effort service and contend with Low-Priority
+ * Data (as discussed in Section 4.2.10).
+ *
+ * As such, since there is no directly corresponding fit for the High-
+ * Throughout Data service class within the [IEEE.802.11-2016] model, it
+ * is generally RECOMMENDED to map High-Throughput Data to UP 0, thereby
+ * admitting it to the Best Effort Access Category (AC_BE).
+ *
+ * Note: The above text is from RFC8325 which is describing the mapping
+ * between DSCP and 802.11 User Priority (UP) values. The mapping
+ * between UP and IEEE 802.1Q Traffic Type is not defined in the RFC but
+ * the 802.11 AC_BK and AC_BE are closely related to the IEEE 802.1Q
+ * Traffic Types BE and BK.
+ */
+ case DSCP_AF11:
+ case DSCP_AF12:
+ case DSCP_AF13:
+ return IEEE8021Q_TT_BE;
+ /* Comment from RFC8325:
+ * RFC3662 and RFC4594 both recommend Low-Priority Data be marked
+ * with DSCP CS1. The Low-Priority Data service class loosely
+ * corresponds to the [IEEE.802.11-2016] Background Access Category
+ */
+ case DSCP_CS1:
+ return IEEE8021Q_TT_BK;
+ case DSCP_CS2:
+ case DSCP_AF21:
+ case DSCP_AF22:
+ case DSCP_AF23:
+ return IEEE8021Q_TT_EE;
+ case DSCP_CS3:
+ case DSCP_AF31:
+ case DSCP_AF32:
+ case DSCP_AF33:
+ return IEEE8021Q_TT_CA;
+ case DSCP_CS4:
+ case DSCP_AF41:
+ case DSCP_AF42:
+ case DSCP_AF43:
+ return IEEE8021Q_TT_VI;
+ case DSCP_CS5:
+ case DSCP_EF:
+ case DSCP_VOICE_ADMIT:
+ return IEEE8021Q_TT_VO;
+ case DSCP_CS6:
+ return IEEE8021Q_TT_IC;
+ case DSCP_CS7:
+ return IEEE8021Q_TT_NC;
+ }
+
+ return SIMPLE_IETF_DSCP_TO_IEEE8021Q_TT(dscp);
+}
+EXPORT_SYMBOL_GPL(ietf_dscp_to_ieee8021q_tt);
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 429571c258da..cb04ef2b9807 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -33,7 +33,7 @@ static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
static LIST_HEAD(lweventlist);
static DEFINE_SPINLOCK(lweventlist_lock);
-static unsigned char default_operstate(const struct net_device *dev)
+static unsigned int default_operstate(const struct net_device *dev)
{
if (netif_testing(dev))
return IF_OPER_TESTING;
@@ -42,12 +42,21 @@ static unsigned char default_operstate(const struct net_device *dev)
* first check whether lower is indeed the source of its down state.
*/
if (!netif_carrier_ok(dev)) {
- int iflink = dev_get_iflink(dev);
struct net_device *peer;
+ int iflink;
+
+ /* If called from netdev_run_todo()/linkwatch_sync_dev(),
+ * dev_net(dev) can be already freed, and RTNL is not held.
+ */
+ if (dev->reg_state <= NETREG_REGISTERED)
+ iflink = dev_get_iflink(dev);
+ else
+ iflink = dev->ifindex;
if (iflink == dev->ifindex)
return IF_OPER_DOWN;
+ ASSERT_RTNL();
peer = __dev_get_by_index(dev_net(dev), iflink);
if (!peer)
return IF_OPER_DOWN;
@@ -62,16 +71,13 @@ static unsigned char default_operstate(const struct net_device *dev)
return IF_OPER_UP;
}
-
static void rfc2863_policy(struct net_device *dev)
{
- unsigned char operstate = default_operstate(dev);
+ unsigned int operstate = default_operstate(dev);
- if (operstate == dev->operstate)
+ if (operstate == READ_ONCE(dev->operstate))
return;
- write_lock(&dev_base_lock);
-
switch(dev->link_mode) {
case IF_LINK_MODE_TESTING:
if (operstate == IF_OPER_UP)
@@ -87,9 +93,7 @@ static void rfc2863_policy(struct net_device *dev)
break;
}
- dev->operstate = operstate;
-
- write_unlock(&dev_base_lock);
+ WRITE_ONCE(dev->operstate, operstate);
}
@@ -153,9 +157,9 @@ static void linkwatch_schedule_work(int urgent)
* override the existing timer.
*/
if (test_bit(LW_URGENT, &linkwatch_flags))
- mod_delayed_work(system_wq, &linkwatch_work, 0);
+ mod_delayed_work(system_unbound_wq, &linkwatch_work, 0);
else
- schedule_delayed_work(&linkwatch_work, delay);
+ queue_delayed_work(system_unbound_wq, &linkwatch_work, delay);
}
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index 4a0797f0a154..ae74634310a3 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -10,8 +10,10 @@
#include <linux/bpf.h>
#include <net/lwtunnel.h>
#include <net/gre.h>
+#include <net/ip.h>
#include <net/ip6_route.h>
#include <net/ipv6_stubs.h>
+#include <net/inet_dscp.h>
struct bpf_lwt_prog {
struct bpf_prog *prog;
@@ -38,13 +40,14 @@ static inline struct bpf_lwt *bpf_lwt_lwtunnel(struct lwtunnel_state *lwt)
static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
struct dst_entry *dst, bool can_redirect)
{
+ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
int ret;
- /* Migration disable and BH disable are needed to protect per-cpu
- * redirect_info between BPF prog and skb_do_redirect().
+ /* Disabling BH is needed to protect per-CPU bpf_redirect_info between
+ * BPF prog and skb_do_redirect().
*/
- migrate_disable();
local_bh_disable();
+ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
bpf_compute_data_pointers(skb);
ret = bpf_prog_run_save_cb(lwt->prog, skb);
@@ -77,24 +80,26 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
break;
}
+ bpf_net_ctx_clear(bpf_net_ctx);
local_bh_enable();
- migrate_enable();
return ret;
}
static int bpf_lwt_input_reroute(struct sk_buff *skb)
{
+ enum skb_drop_reason reason;
int err = -EINVAL;
if (skb->protocol == htons(ETH_P_IP)) {
struct net_device *dev = skb_dst(skb)->dev;
- struct iphdr *iph = ip_hdr(skb);
+ const struct iphdr *iph = ip_hdr(skb);
dev_hold(dev);
skb_dst_drop(skb);
- err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
- iph->tos, dev);
+ reason = ip_route_input_noref(skb, iph->daddr, iph->saddr,
+ ip4h_dscp(iph), dev);
+ err = reason ? -EINVAL : 0;
dev_put(dev);
} else if (skb->protocol == htons(ETH_P_IPV6)) {
skb_dst_drop(skb);
@@ -204,7 +209,7 @@ static int bpf_lwt_xmit_reroute(struct sk_buff *skb)
fl4.flowi4_oif = oif;
fl4.flowi4_mark = skb->mark;
fl4.flowi4_uid = sock_net_uid(net, sk);
- fl4.flowi4_tos = RT_TOS(iph->tos);
+ fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(iph));
fl4.flowi4_flags = FLOWI_FLAG_ANYSRC;
fl4.flowi4_proto = iph->protocol;
fl4.daddr = iph->daddr;
diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
index 711cd3b4347a..4417a18b3e95 100644
--- a/net/core/lwtunnel.c
+++ b/net/core/lwtunnel.c
@@ -23,6 +23,8 @@
#include <net/ip6_fib.h>
#include <net/rtnh.h>
+#include "dev.h"
+
DEFINE_STATIC_KEY_FALSE(nf_hooks_lwtunnel_enabled);
EXPORT_SYMBOL_GPL(nf_hooks_lwtunnel_enabled);
@@ -325,13 +327,23 @@ EXPORT_SYMBOL_GPL(lwtunnel_cmp_encap);
int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
- struct dst_entry *dst = skb_dst(skb);
const struct lwtunnel_encap_ops *ops;
struct lwtunnel_state *lwtstate;
- int ret = -EINVAL;
+ struct dst_entry *dst;
+ int ret;
+
+ if (dev_xmit_recursion()) {
+ net_crit_ratelimited("%s(): recursion limit reached on datapath\n",
+ __func__);
+ ret = -ENETDOWN;
+ goto drop;
+ }
- if (!dst)
+ dst = skb_dst(skb);
+ if (!dst) {
+ ret = -EINVAL;
goto drop;
+ }
lwtstate = dst->lwtstate;
if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
@@ -341,8 +353,11 @@ int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb)
ret = -EOPNOTSUPP;
rcu_read_lock();
ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
- if (likely(ops && ops->output))
+ if (likely(ops && ops->output)) {
+ dev_xmit_recursion_inc();
ret = ops->output(net, sk, skb);
+ dev_xmit_recursion_dec();
+ }
rcu_read_unlock();
if (ret == -EOPNOTSUPP)
@@ -359,13 +374,23 @@ EXPORT_SYMBOL_GPL(lwtunnel_output);
int lwtunnel_xmit(struct sk_buff *skb)
{
- struct dst_entry *dst = skb_dst(skb);
const struct lwtunnel_encap_ops *ops;
struct lwtunnel_state *lwtstate;
- int ret = -EINVAL;
+ struct dst_entry *dst;
+ int ret;
+
+ if (dev_xmit_recursion()) {
+ net_crit_ratelimited("%s(): recursion limit reached on datapath\n",
+ __func__);
+ ret = -ENETDOWN;
+ goto drop;
+ }
- if (!dst)
+ dst = skb_dst(skb);
+ if (!dst) {
+ ret = -EINVAL;
goto drop;
+ }
lwtstate = dst->lwtstate;
@@ -376,8 +401,11 @@ int lwtunnel_xmit(struct sk_buff *skb)
ret = -EOPNOTSUPP;
rcu_read_lock();
ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
- if (likely(ops && ops->xmit))
+ if (likely(ops && ops->xmit)) {
+ dev_xmit_recursion_inc();
ret = ops->xmit(skb);
+ dev_xmit_recursion_dec();
+ }
rcu_read_unlock();
if (ret == -EOPNOTSUPP)
@@ -394,13 +422,23 @@ EXPORT_SYMBOL_GPL(lwtunnel_xmit);
int lwtunnel_input(struct sk_buff *skb)
{
- struct dst_entry *dst = skb_dst(skb);
const struct lwtunnel_encap_ops *ops;
struct lwtunnel_state *lwtstate;
- int ret = -EINVAL;
+ struct dst_entry *dst;
+ int ret;
- if (!dst)
+ if (dev_xmit_recursion()) {
+ net_crit_ratelimited("%s(): recursion limit reached on datapath\n",
+ __func__);
+ ret = -ENETDOWN;
goto drop;
+ }
+
+ dst = skb_dst(skb);
+ if (!dst) {
+ ret = -EINVAL;
+ goto drop;
+ }
lwtstate = dst->lwtstate;
if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
@@ -410,8 +448,11 @@ int lwtunnel_input(struct sk_buff *skb)
ret = -EOPNOTSUPP;
rcu_read_lock();
ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
- if (likely(ops && ops->input))
+ if (likely(ops && ops->input)) {
+ dev_xmit_recursion_inc();
ret = ops->input(skb);
+ dev_xmit_recursion_dec();
+ }
rcu_read_unlock();
if (ret == -EOPNOTSUPP)
diff --git a/net/core/mp_dmabuf_devmem.h b/net/core/mp_dmabuf_devmem.h
new file mode 100644
index 000000000000..67cd0dd7319c
--- /dev/null
+++ b/net/core/mp_dmabuf_devmem.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Dmabuf device memory provider.
+ *
+ * Authors: Mina Almasry <almasrymina@google.com>
+ *
+ */
+#ifndef _NET_MP_DMABUF_DEVMEM_H
+#define _NET_MP_DMABUF_DEVMEM_H
+
+#include <net/netmem.h>
+
+#if defined(CONFIG_NET_DEVMEM)
+int mp_dmabuf_devmem_init(struct page_pool *pool);
+
+netmem_ref mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp);
+
+void mp_dmabuf_devmem_destroy(struct page_pool *pool);
+
+bool mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem);
+#else
+static inline int mp_dmabuf_devmem_init(struct page_pool *pool)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline netmem_ref
+mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp)
+{
+ return 0;
+}
+
+static inline void mp_dmabuf_devmem_destroy(struct page_pool *pool)
+{
+}
+
+static inline bool
+mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem)
+{
+ return false;
+}
+#endif
+
+#endif /* _NET_MP_DMABUF_DEVMEM_H */
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 552719c3bbc3..1a620f903c56 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -14,7 +14,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/slab.h>
-#include <linux/kmemleak.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -61,6 +60,25 @@ static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
static const struct seq_operations neigh_stat_seq_ops;
#endif
+static struct hlist_head *neigh_get_dev_table(struct net_device *dev, int family)
+{
+ int i;
+
+ switch (family) {
+ default:
+ DEBUG_NET_WARN_ON_ONCE(1);
+ fallthrough; /* to avoid panic by null-ptr-deref */
+ case AF_INET:
+ i = NEIGH_ARP_TABLE;
+ break;
+ case AF_INET6:
+ i = NEIGH_ND_TABLE;
+ break;
+ }
+
+ return &dev->neighbours[i];
+}
+
/*
Neighbour hash table buckets are protected with rwlock tbl->lock.
@@ -205,18 +223,14 @@ static void neigh_update_flags(struct neighbour *neigh, u32 flags, int *notify,
}
}
-static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np,
- struct neigh_table *tbl)
+bool neigh_remove_one(struct neighbour *n)
{
bool retval = false;
write_lock(&n->lock);
if (refcount_read(&n->refcnt) == 1) {
- struct neighbour *neigh;
-
- neigh = rcu_dereference_protected(n->next,
- lockdep_is_held(&tbl->lock));
- rcu_assign_pointer(*np, neigh);
+ hlist_del_rcu(&n->hash);
+ hlist_del_rcu(&n->dev_list);
neigh_mark_dead(n);
retval = true;
}
@@ -226,29 +240,6 @@ static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np,
return retval;
}
-bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
-{
- struct neigh_hash_table *nht;
- void *pkey = ndel->primary_key;
- u32 hash_val;
- struct neighbour *n;
- struct neighbour __rcu **np;
-
- nht = rcu_dereference_protected(tbl->nht,
- lockdep_is_held(&tbl->lock));
- hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd);
- hash_val = hash_val >> (32 - nht->hash_shift);
-
- np = &nht->hash_buckets[hash_val];
- while ((n = rcu_dereference_protected(*np,
- lockdep_is_held(&tbl->lock)))) {
- if (n == ndel)
- return neigh_del(n, np, tbl);
- np = &n->next;
- }
- return false;
-}
-
static int neigh_forced_gc(struct neigh_table *tbl)
{
int max_clean = atomic_read(&tbl->gc_entries) -
@@ -276,7 +267,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
remove = true;
write_unlock(&n->lock);
- if (remove && neigh_remove_one(n, tbl))
+ if (remove && neigh_remove_one(n))
shrunk++;
if (shrunk >= max_clean)
break;
@@ -380,54 +371,42 @@ static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net,
static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
bool skip_perm)
{
- int i;
- struct neigh_hash_table *nht;
+ struct hlist_head *dev_head;
+ struct hlist_node *tmp;
+ struct neighbour *n;
- nht = rcu_dereference_protected(tbl->nht,
- lockdep_is_held(&tbl->lock));
+ dev_head = neigh_get_dev_table(dev, tbl->family);
- for (i = 0; i < (1 << nht->hash_shift); i++) {
- struct neighbour *n;
- struct neighbour __rcu **np = &nht->hash_buckets[i];
+ hlist_for_each_entry_safe(n, tmp, dev_head, dev_list) {
+ if (skip_perm && n->nud_state & NUD_PERMANENT)
+ continue;
- while ((n = rcu_dereference_protected(*np,
- lockdep_is_held(&tbl->lock))) != NULL) {
- if (dev && n->dev != dev) {
- np = &n->next;
- continue;
- }
- if (skip_perm && n->nud_state & NUD_PERMANENT) {
- np = &n->next;
- continue;
- }
- rcu_assign_pointer(*np,
- rcu_dereference_protected(n->next,
- lockdep_is_held(&tbl->lock)));
- write_lock(&n->lock);
- neigh_del_timer(n);
- neigh_mark_dead(n);
- if (refcount_read(&n->refcnt) != 1) {
- /* The most unpleasant situation.
- We must destroy neighbour entry,
- but someone still uses it.
-
- The destroy will be delayed until
- the last user releases us, but
- we must kill timers etc. and move
- it to safe state.
- */
- __skb_queue_purge(&n->arp_queue);
- n->arp_queue_len_bytes = 0;
- WRITE_ONCE(n->output, neigh_blackhole);
- if (n->nud_state & NUD_VALID)
- n->nud_state = NUD_NOARP;
- else
- n->nud_state = NUD_NONE;
- neigh_dbg(2, "neigh %p is stray\n", n);
- }
- write_unlock(&n->lock);
- neigh_cleanup_and_release(n);
+ hlist_del_rcu(&n->hash);
+ hlist_del_rcu(&n->dev_list);
+ write_lock(&n->lock);
+ neigh_del_timer(n);
+ neigh_mark_dead(n);
+ if (refcount_read(&n->refcnt) != 1) {
+ /* The most unpleasant situation.
+ * We must destroy neighbour entry,
+ * but someone still uses it.
+ *
+ * The destroy will be delayed until
+ * the last user releases us, but
+ * we must kill timers etc. and move
+ * it to safe state.
+ */
+ __skb_queue_purge(&n->arp_queue);
+ n->arp_queue_len_bytes = 0;
+ WRITE_ONCE(n->output, neigh_blackhole);
+ if (n->nud_state & NUD_VALID)
+ n->nud_state = NUD_NOARP;
+ else
+ n->nud_state = NUD_NONE;
+ neigh_dbg(2, "neigh %p is stray\n", n);
}
+ write_unlock(&n->lock);
+ neigh_cleanup_and_release(n);
}
}
@@ -530,27 +509,21 @@ static void neigh_get_hash_rnd(u32 *x)
static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
{
- size_t size = (1 << shift) * sizeof(struct neighbour *);
+ size_t size = (1 << shift) * sizeof(struct hlist_head);
+ struct hlist_head *hash_heads;
struct neigh_hash_table *ret;
- struct neighbour __rcu **buckets;
int i;
ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
if (!ret)
return NULL;
- if (size <= PAGE_SIZE) {
- buckets = kzalloc(size, GFP_ATOMIC);
- } else {
- buckets = (struct neighbour __rcu **)
- __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
- get_order(size));
- kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
- }
- if (!buckets) {
+
+ hash_heads = kvzalloc(size, GFP_ATOMIC);
+ if (!hash_heads) {
kfree(ret);
return NULL;
}
- ret->hash_buckets = buckets;
+ ret->hash_heads = hash_heads;
ret->hash_shift = shift;
for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
neigh_get_hash_rnd(&ret->hash_rnd[i]);
@@ -562,15 +535,8 @@ static void neigh_hash_free_rcu(struct rcu_head *head)
struct neigh_hash_table *nht = container_of(head,
struct neigh_hash_table,
rcu);
- size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
- struct neighbour __rcu **buckets = nht->hash_buckets;
- if (size <= PAGE_SIZE) {
- kfree(buckets);
- } else {
- kmemleak_free(buckets);
- free_pages((unsigned long)buckets, get_order(size));
- }
+ kvfree(nht->hash_heads);
kfree(nht);
}
@@ -589,24 +555,17 @@ static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
return old_nht;
for (i = 0; i < (1 << old_nht->hash_shift); i++) {
- struct neighbour *n, *next;
+ struct hlist_node *tmp;
+ struct neighbour *n;
- for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
- lockdep_is_held(&tbl->lock));
- n != NULL;
- n = next) {
+ neigh_for_each_in_bucket_safe(n, tmp, &old_nht->hash_heads[i]) {
hash = tbl->hash(n->primary_key, n->dev,
new_nht->hash_rnd);
hash >>= (32 - new_nht->hash_shift);
- next = rcu_dereference_protected(n->next,
- lockdep_is_held(&tbl->lock));
- rcu_assign_pointer(n->next,
- rcu_dereference_protected(
- new_nht->hash_buckets[hash],
- lockdep_is_held(&tbl->lock)));
- rcu_assign_pointer(new_nht->hash_buckets[hash], n);
+ hlist_del_rcu(&n->hash);
+ hlist_add_head_rcu(&n->hash, &new_nht->hash_heads[hash]);
}
}
@@ -693,11 +652,7 @@ ___neigh_create(struct neigh_table *tbl, const void *pkey,
goto out_tbl_unlock;
}
- for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
- lockdep_is_held(&tbl->lock));
- n1 != NULL;
- n1 = rcu_dereference_protected(n1->next,
- lockdep_is_held(&tbl->lock))) {
+ neigh_for_each_in_bucket(n1, &nht->hash_heads[hash_val]) {
if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
if (want_ref)
neigh_hold(n1);
@@ -713,10 +668,11 @@ ___neigh_create(struct neigh_table *tbl, const void *pkey,
list_add_tail(&n->managed_list, &n->tbl->managed_list);
if (want_ref)
neigh_hold(n);
- rcu_assign_pointer(n->next,
- rcu_dereference_protected(nht->hash_buckets[hash_val],
- lockdep_is_held(&tbl->lock)));
- rcu_assign_pointer(nht->hash_buckets[hash_val], n);
+ hlist_add_head_rcu(&n->hash, &nht->hash_heads[hash_val]);
+
+ hlist_add_head_rcu(&n->dev_list,
+ neigh_get_dev_table(dev, tbl->family));
+
write_unlock_bh(&tbl->lock);
neigh_dbg(2, "neigh %p is created\n", n);
rc = n;
@@ -734,7 +690,9 @@ out_neigh_release:
struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
struct net_device *dev, bool want_ref)
{
- return ___neigh_create(tbl, pkey, dev, 0, false, want_ref);
+ bool exempt_from_gc = !!(dev->flags & IFF_LOOPBACK);
+
+ return ___neigh_create(tbl, pkey, dev, 0, exempt_from_gc, want_ref);
}
EXPORT_SYMBOL(__neigh_create);
@@ -946,10 +904,10 @@ static void neigh_connect(struct neighbour *neigh)
static void neigh_periodic_work(struct work_struct *work)
{
struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
+ struct neigh_hash_table *nht;
+ struct hlist_node *tmp;
struct neighbour *n;
- struct neighbour __rcu **np;
unsigned int i;
- struct neigh_hash_table *nht;
NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
@@ -974,10 +932,7 @@ static void neigh_periodic_work(struct work_struct *work)
goto out;
for (i = 0 ; i < (1 << nht->hash_shift); i++) {
- np = &nht->hash_buckets[i];
-
- while ((n = rcu_dereference_protected(*np,
- lockdep_is_held(&tbl->lock))) != NULL) {
+ neigh_for_each_in_bucket_safe(n, tmp, &nht->hash_heads[i]) {
unsigned int state;
write_lock(&n->lock);
@@ -986,7 +941,7 @@ static void neigh_periodic_work(struct work_struct *work)
if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) ||
(n->flags & NTF_EXT_LEARNED)) {
write_unlock(&n->lock);
- goto next_elt;
+ continue;
}
if (time_before(n->used, n->confirmed) &&
@@ -997,18 +952,14 @@ static void neigh_periodic_work(struct work_struct *work)
(state == NUD_FAILED ||
!time_in_range_open(jiffies, n->used,
n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
- rcu_assign_pointer(*np,
- rcu_dereference_protected(n->next,
- lockdep_is_held(&tbl->lock)));
+ hlist_del_rcu(&n->hash);
+ hlist_del_rcu(&n->dev_list);
neigh_mark_dead(n);
write_unlock(&n->lock);
neigh_cleanup_and_release(n);
continue;
}
write_unlock(&n->lock);
-
-next_elt:
- np = &n->next;
}
/*
* It's fine to release lock here, even if hash table
@@ -1769,7 +1720,7 @@ static void neigh_parms_destroy(struct neigh_parms *parms)
static struct lock_class_key neigh_table_proxy_queue_class;
-static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
+static struct neigh_table __rcu *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
void neigh_table_init(int index, struct neigh_table *tbl)
{
@@ -1826,13 +1777,19 @@ void neigh_table_init(int index, struct neigh_table *tbl)
tbl->last_flush = now;
tbl->last_rand = now + tbl->parms.reachable_time * 20;
- neigh_tables[index] = tbl;
+ rcu_assign_pointer(neigh_tables[index], tbl);
}
EXPORT_SYMBOL(neigh_table_init);
+/*
+ * Only called from ndisc_cleanup(), which means this is dead code
+ * because we no longer can unload IPv6 module.
+ */
int neigh_table_clear(int index, struct neigh_table *tbl)
{
- neigh_tables[index] = NULL;
+ RCU_INIT_POINTER(neigh_tables[index], NULL);
+ synchronize_rcu();
+
/* It is not clean... Fix it to unload IPv6 module safely */
cancel_delayed_work_sync(&tbl->managed_work);
cancel_delayed_work_sync(&tbl->gc_work);
@@ -1864,10 +1821,10 @@ static struct neigh_table *neigh_find_table(int family)
switch (family) {
case AF_INET:
- tbl = neigh_tables[NEIGH_ARP_TABLE];
+ tbl = rcu_dereference_rtnl(neigh_tables[NEIGH_ARP_TABLE]);
break;
case AF_INET6:
- tbl = neigh_tables[NEIGH_ND_TABLE];
+ tbl = rcu_dereference_rtnl(neigh_tables[NEIGH_ND_TABLE]);
break;
}
@@ -1949,7 +1906,7 @@ static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
NETLINK_CB(skb).portid, extack);
write_lock_bh(&tbl->lock);
neigh_release(neigh);
- neigh_remove_one(neigh, tbl);
+ neigh_remove_one(neigh);
write_unlock_bh(&tbl->lock);
out:
@@ -2293,6 +2250,7 @@ static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
[NDTPA_IFINDEX] = { .type = NLA_U32 },
[NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
+ [NDTPA_QUEUE_LENBYTES] = { .type = NLA_U32 },
[NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
[NDTPA_APP_PROBES] = { .type = NLA_U32 },
[NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
@@ -2331,7 +2289,7 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
ndtmsg = nlmsg_data(nlh);
for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
- tbl = neigh_tables[tidx];
+ tbl = rcu_dereference_rtnl(neigh_tables[tidx]);
if (!tbl)
continue;
if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
@@ -2519,7 +2477,7 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
struct neigh_parms *p;
- tbl = neigh_tables[tidx];
+ tbl = rcu_dereference_rtnl(neigh_tables[tidx]);
if (!tbl)
continue;
@@ -2674,7 +2632,7 @@ static bool neigh_master_filtered(struct net_device *dev, int master_idx)
if (!master_idx)
return false;
- master = dev ? netdev_master_upper_dev_get(dev) : NULL;
+ master = dev ? netdev_master_upper_dev_get_rcu(dev) : NULL;
/* 0 is already used to denote NDA_MASTER wasn't passed, therefore need another
* invalid value for ifindex to denote "no master".
@@ -2707,7 +2665,7 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
{
struct net *net = sock_net(skb->sk);
struct neighbour *n;
- int rc, h, s_h = cb->args[1];
+ int err = 0, h, s_h = cb->args[1];
int idx, s_idx = idx = cb->args[2];
struct neigh_hash_table *nht;
unsigned int flags = NLM_F_MULTI;
@@ -2715,37 +2673,31 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
if (filter->dev_idx || filter->master_idx)
flags |= NLM_F_DUMP_FILTERED;
- rcu_read_lock();
nht = rcu_dereference(tbl->nht);
for (h = s_h; h < (1 << nht->hash_shift); h++) {
if (h > s_h)
s_idx = 0;
- for (n = rcu_dereference(nht->hash_buckets[h]), idx = 0;
- n != NULL;
- n = rcu_dereference(n->next)) {
+ idx = 0;
+ neigh_for_each_in_bucket_rcu(n, &nht->hash_heads[h]) {
if (idx < s_idx || !net_eq(dev_net(n->dev), net))
goto next;
if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
neigh_master_filtered(n->dev, filter->master_idx))
goto next;
- if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_NEWNEIGH,
- flags) < 0) {
- rc = -1;
+ err = neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWNEIGH, flags);
+ if (err < 0)
goto out;
- }
next:
idx++;
}
}
- rc = skb->len;
out:
- rcu_read_unlock();
cb->args[1] = h;
cb->args[2] = idx;
- return rc;
+ return err;
}
static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
@@ -2754,7 +2706,7 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
{
struct pneigh_entry *n;
struct net *net = sock_net(skb->sk);
- int rc, h, s_h = cb->args[3];
+ int err = 0, h, s_h = cb->args[3];
int idx, s_idx = idx = cb->args[4];
unsigned int flags = NLM_F_MULTI;
@@ -2772,11 +2724,11 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
neigh_master_filtered(n->dev, filter->master_idx))
goto next;
- if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_NEWNEIGH, flags, tbl) < 0) {
+ err = pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWNEIGH, flags, tbl);
+ if (err < 0) {
read_unlock_bh(&tbl->lock);
- rc = -1;
goto out;
}
next:
@@ -2785,12 +2737,10 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
}
read_unlock_bh(&tbl->lock);
- rc = skb->len;
out:
cb->args[3] = h;
cb->args[4] = idx;
- return rc;
-
+ return err;
}
static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
@@ -2875,11 +2825,13 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
if (err < 0 && cb->strict_check)
return err;
+ err = 0;
s_t = cb->args[0];
+ rcu_read_lock();
for (t = 0; t < NEIGH_NR_TABLES; t++) {
- tbl = neigh_tables[t];
+ tbl = rcu_dereference(neigh_tables[t]);
if (!tbl)
continue;
@@ -2895,9 +2847,10 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
if (err < 0)
break;
}
+ rcu_read_unlock();
cb->args[0] = t;
- return skb->len;
+ return err;
}
static int neigh_valid_get_req(const struct nlmsghdr *nlh,
@@ -3094,9 +3047,7 @@ void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void
for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
struct neighbour *n;
- for (n = rcu_dereference(nht->hash_buckets[chain]);
- n != NULL;
- n = rcu_dereference(n->next))
+ neigh_for_each_in_bucket(n, &nht->hash_heads[chain])
cb(n, cookie);
}
read_unlock_bh(&tbl->lock);
@@ -3108,29 +3059,25 @@ EXPORT_SYMBOL(neigh_for_each);
void __neigh_for_each_release(struct neigh_table *tbl,
int (*cb)(struct neighbour *))
{
- int chain;
struct neigh_hash_table *nht;
+ int chain;
nht = rcu_dereference_protected(tbl->nht,
lockdep_is_held(&tbl->lock));
for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
+ struct hlist_node *tmp;
struct neighbour *n;
- struct neighbour __rcu **np;
- np = &nht->hash_buckets[chain];
- while ((n = rcu_dereference_protected(*np,
- lockdep_is_held(&tbl->lock))) != NULL) {
+ neigh_for_each_in_bucket_safe(n, tmp, &nht->hash_heads[chain]) {
int release;
write_lock(&n->lock);
release = cb(n);
if (release) {
- rcu_assign_pointer(*np,
- rcu_dereference_protected(n->next,
- lockdep_is_held(&tbl->lock)));
+ hlist_del_rcu(&n->hash);
+ hlist_del_rcu(&n->dev_list);
neigh_mark_dead(n);
- } else
- np = &n->next;
+ }
write_unlock(&n->lock);
if (release)
neigh_cleanup_and_release(n);
@@ -3143,14 +3090,15 @@ int neigh_xmit(int index, struct net_device *dev,
const void *addr, struct sk_buff *skb)
{
int err = -EAFNOSUPPORT;
+
if (likely(index < NEIGH_NR_TABLES)) {
struct neigh_table *tbl;
struct neighbour *neigh;
- tbl = neigh_tables[index];
- if (!tbl)
- goto out;
rcu_read_lock();
+ tbl = rcu_dereference(neigh_tables[index]);
+ if (!tbl)
+ goto out_unlock;
if (index == NEIGH_ARP_TABLE) {
u32 key = *((u32 *)addr);
@@ -3166,6 +3114,7 @@ int neigh_xmit(int index, struct net_device *dev,
goto out_kfree_skb;
}
err = READ_ONCE(neigh->output)(neigh, skb);
+out_unlock:
rcu_read_unlock();
}
else if (index == NEIGH_LINK_TABLE) {
@@ -3185,43 +3134,53 @@ EXPORT_SYMBOL(neigh_xmit);
#ifdef CONFIG_PROC_FS
-static struct neighbour *neigh_get_first(struct seq_file *seq)
+static struct neighbour *neigh_get_valid(struct seq_file *seq,
+ struct neighbour *n,
+ loff_t *pos)
{
struct neigh_seq_state *state = seq->private;
struct net *net = seq_file_net(seq);
+
+ if (!net_eq(dev_net(n->dev), net))
+ return NULL;
+
+ if (state->neigh_sub_iter) {
+ loff_t fakep = 0;
+ void *v;
+
+ v = state->neigh_sub_iter(state, n, pos ? pos : &fakep);
+ if (!v)
+ return NULL;
+ if (pos)
+ return v;
+ }
+
+ if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
+ return n;
+
+ if (READ_ONCE(n->nud_state) & ~NUD_NOARP)
+ return n;
+
+ return NULL;
+}
+
+static struct neighbour *neigh_get_first(struct seq_file *seq)
+{
+ struct neigh_seq_state *state = seq->private;
struct neigh_hash_table *nht = state->nht;
- struct neighbour *n = NULL;
- int bucket;
+ struct neighbour *n, *tmp;
state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
- for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
- n = rcu_dereference(nht->hash_buckets[bucket]);
-
- while (n) {
- if (!net_eq(dev_net(n->dev), net))
- goto next;
- if (state->neigh_sub_iter) {
- loff_t fakep = 0;
- void *v;
- v = state->neigh_sub_iter(state, n, &fakep);
- if (!v)
- goto next;
- }
- if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
- break;
- if (READ_ONCE(n->nud_state) & ~NUD_NOARP)
- break;
-next:
- n = rcu_dereference(n->next);
+ while (++state->bucket < (1 << nht->hash_shift)) {
+ neigh_for_each_in_bucket(n, &nht->hash_heads[state->bucket]) {
+ tmp = neigh_get_valid(seq, n, NULL);
+ if (tmp)
+ return tmp;
}
-
- if (n)
- break;
}
- state->bucket = bucket;
- return n;
+ return NULL;
}
static struct neighbour *neigh_get_next(struct seq_file *seq,
@@ -3229,46 +3188,28 @@ static struct neighbour *neigh_get_next(struct seq_file *seq,
loff_t *pos)
{
struct neigh_seq_state *state = seq->private;
- struct net *net = seq_file_net(seq);
- struct neigh_hash_table *nht = state->nht;
+ struct neighbour *tmp;
if (state->neigh_sub_iter) {
void *v = state->neigh_sub_iter(state, n, pos);
+
if (v)
return n;
}
- n = rcu_dereference(n->next);
- while (1) {
- while (n) {
- if (!net_eq(dev_net(n->dev), net))
- goto next;
- if (state->neigh_sub_iter) {
- void *v = state->neigh_sub_iter(state, n, pos);
- if (v)
- return n;
- goto next;
- }
- if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
- break;
-
- if (READ_ONCE(n->nud_state) & ~NUD_NOARP)
- break;
-next:
- n = rcu_dereference(n->next);
+ hlist_for_each_entry_continue(n, hash) {
+ tmp = neigh_get_valid(seq, n, pos);
+ if (tmp) {
+ n = tmp;
+ goto out;
}
-
- if (n)
- break;
-
- if (++state->bucket >= (1 << nht->hash_shift))
- break;
-
- n = rcu_dereference(nht->hash_buckets[state->bucket]);
}
+ n = neigh_get_first(seq);
+out:
if (n && pos)
--(*pos);
+
return n;
}
@@ -3371,7 +3312,7 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl
struct neigh_seq_state *state = seq->private;
state->tbl = tbl;
- state->bucket = 0;
+ state->bucket = -1;
state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
rcu_read_lock();
@@ -3507,10 +3448,12 @@ static const struct seq_operations neigh_stat_seq_ops = {
static void __neigh_notify(struct neighbour *n, int type, int flags,
u32 pid)
{
- struct net *net = dev_net(n->dev);
struct sk_buff *skb;
int err = -ENOBUFS;
+ struct net *net;
+ rcu_read_lock();
+ net = dev_net_rcu(n->dev);
skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
if (skb == NULL)
goto errout;
@@ -3523,10 +3466,11 @@ static void __neigh_notify(struct neighbour *n, int type, int flags,
goto errout;
}
rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
- return;
+ goto out;
errout:
- if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
+ rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
+out:
+ rcu_read_unlock();
}
void neigh_app_ns(struct neighbour *n)
@@ -3538,7 +3482,7 @@ EXPORT_SYMBOL(neigh_app_ns);
#ifdef CONFIG_SYSCTL
static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
-static int proc_unres_qlen(struct ctl_table *ctl, int write,
+static int proc_unres_qlen(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int size, ret;
@@ -3573,7 +3517,7 @@ static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
rcu_read_unlock();
}
-static void neigh_proc_update(struct ctl_table *ctl, int write)
+static void neigh_proc_update(const struct ctl_table *ctl, int write)
{
struct net_device *dev = ctl->extra1;
struct neigh_parms *p = ctl->extra2;
@@ -3590,7 +3534,7 @@ static void neigh_proc_update(struct ctl_table *ctl, int write)
neigh_copy_dflt_parms(net, p, index);
}
-static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
+static int neigh_proc_dointvec_zero_intmax(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp,
loff_t *ppos)
{
@@ -3605,7 +3549,7 @@ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
return ret;
}
-static int neigh_proc_dointvec_ms_jiffies_positive(struct ctl_table *ctl, int write,
+static int neigh_proc_dointvec_ms_jiffies_positive(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table tmp = *ctl;
@@ -3621,7 +3565,7 @@ static int neigh_proc_dointvec_ms_jiffies_positive(struct ctl_table *ctl, int wr
return ret;
}
-int neigh_proc_dointvec(struct ctl_table *ctl, int write, void *buffer,
+int neigh_proc_dointvec(const struct ctl_table *ctl, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
@@ -3631,7 +3575,7 @@ int neigh_proc_dointvec(struct ctl_table *ctl, int write, void *buffer,
}
EXPORT_SYMBOL(neigh_proc_dointvec);
-int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, void *buffer,
+int neigh_proc_dointvec_jiffies(const struct ctl_table *ctl, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
@@ -3641,7 +3585,7 @@ int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, void *buffer,
}
EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
-static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
+static int neigh_proc_dointvec_userhz_jiffies(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp,
loff_t *ppos)
{
@@ -3651,7 +3595,7 @@ static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
return ret;
}
-int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
+int neigh_proc_dointvec_ms_jiffies(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
@@ -3661,7 +3605,7 @@ int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
}
EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
-static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
+static int neigh_proc_dointvec_unres_qlen(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp,
loff_t *ppos)
{
@@ -3671,7 +3615,7 @@ static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
return ret;
}
-static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
+static int neigh_proc_base_reachable_time(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp,
loff_t *ppos)
{
@@ -3728,7 +3672,7 @@ static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
static struct neigh_sysctl_table {
struct ctl_table_header *sysctl_header;
- struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
+ struct ctl_table neigh_vars[NEIGH_VAR_MAX];
} neigh_sysctl_template __read_mostly = {
.neigh_vars = {
NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
@@ -3779,7 +3723,6 @@ static struct neigh_sysctl_table {
.extra2 = SYSCTL_INT_MAX,
.proc_handler = proc_dointvec_minmax,
},
- {},
},
};
@@ -3807,8 +3750,6 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
if (dev) {
dev_name_source = dev->name;
/* Terminate the table early */
- memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
- sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
neigh_vars_size = NEIGH_VAR_BASE_REACHABLE_TIME_MS + 1;
} else {
struct neigh_table *tbl = p->tbl;
@@ -3885,16 +3826,18 @@ EXPORT_SYMBOL(neigh_sysctl_unregister);
#endif /* CONFIG_SYSCTL */
+static const struct rtnl_msg_handler neigh_rtnl_msg_handlers[] __initconst = {
+ {.msgtype = RTM_NEWNEIGH, .doit = neigh_add},
+ {.msgtype = RTM_DELNEIGH, .doit = neigh_delete},
+ {.msgtype = RTM_GETNEIGH, .doit = neigh_get, .dumpit = neigh_dump_info,
+ .flags = RTNL_FLAG_DUMP_UNLOCKED},
+ {.msgtype = RTM_GETNEIGHTBL, .dumpit = neightbl_dump_info},
+ {.msgtype = RTM_SETNEIGHTBL, .doit = neightbl_set},
+};
+
static int __init neigh_init(void)
{
- rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_GETNEIGH, neigh_get, neigh_dump_info, 0);
-
- rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
- 0);
- rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0);
-
+ rtnl_register_many(neigh_rtnl_msg_handlers);
return 0;
}
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
index 09f7ed1a04e8..fa6d3969734a 100644
--- a/net/core/net-procfs.c
+++ b/net/core/net-procfs.c
@@ -3,52 +3,22 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/wext.h>
+#include <net/hotdata.h>
#include "dev.h"
-#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
-
-#define get_bucket(x) ((x) >> BUCKET_SPACE)
-#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
-#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
-
-static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
+static void *dev_seq_from_index(struct seq_file *seq, loff_t *pos)
{
- struct net *net = seq_file_net(seq);
+ unsigned long ifindex = *pos;
struct net_device *dev;
- struct hlist_head *h;
- unsigned int count = 0, offset = get_offset(*pos);
- h = &net->dev_index_head[get_bucket(*pos)];
- hlist_for_each_entry_rcu(dev, h, index_hlist) {
- if (++count == offset)
- return dev;
+ for_each_netdev_dump(seq_file_net(seq), dev, ifindex) {
+ *pos = dev->ifindex;
+ return dev;
}
-
- return NULL;
-}
-
-static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
-{
- struct net_device *dev;
- unsigned int bucket;
-
- do {
- dev = dev_from_same_bucket(seq, pos);
- if (dev)
- return dev;
-
- bucket = get_bucket(*pos) + 1;
- *pos = set_bucket_offset(bucket, 1);
- } while (bucket < NETDEV_HASHENTRIES);
-
return NULL;
}
-/*
- * This is invoked by the /proc filesystem handler to display a device
- * in detail.
- */
static void *dev_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
@@ -56,16 +26,13 @@ static void *dev_seq_start(struct seq_file *seq, loff_t *pos)
if (!*pos)
return SEQ_START_TOKEN;
- if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
- return NULL;
-
- return dev_from_bucket(seq, pos);
+ return dev_seq_from_index(seq, pos);
}
static void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
- return dev_from_bucket(seq, pos);
+ return dev_seq_from_index(seq, pos);
}
static void dev_seq_stop(struct seq_file *seq, void *v)
@@ -177,7 +144,8 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
seq_printf(seq,
"%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x "
"%08x %08x\n",
- sd->processed, sd->dropped, sd->time_squeeze, 0,
+ sd->processed, atomic_read(&sd->dropped),
+ sd->time_squeeze, 0,
0, 0, 0, 0, /* was fastroute */
0, /* was cpu_collision */
sd->received_rps, flow_limit_count,
@@ -217,7 +185,7 @@ static void *ptype_get_idx(struct seq_file *seq, loff_t pos)
}
}
- list_for_each_entry_rcu(pt, &ptype_all, list) {
+ list_for_each_entry_rcu(pt, &net_hotdata.ptype_all, list) {
if (i == pos)
return pt;
++i;
@@ -265,13 +233,13 @@ static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
}
}
- nxt = ptype_all.next;
+ nxt = net_hotdata.ptype_all.next;
goto ptype_all;
}
if (pt->type == htons(ETH_P_ALL)) {
ptype_all:
- if (nxt != &ptype_all)
+ if (nxt != &net_hotdata.ptype_all)
goto found;
hash = 0;
nxt = ptype_base[0].next;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index a09d507c5b03..07cb99b114bd 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -24,6 +24,7 @@
#include <linux/of_net.h>
#include <linux/cpu.h>
#include <net/netdev_rx_queue.h>
+#include <net/rps.h>
#include "dev.h"
#include "net-sysfs.h"
@@ -31,13 +32,14 @@
#ifdef CONFIG_SYSFS
static const char fmt_hex[] = "%#x\n";
static const char fmt_dec[] = "%d\n";
+static const char fmt_uint[] = "%u\n";
static const char fmt_ulong[] = "%lu\n";
static const char fmt_u64[] = "%llu\n";
-/* Caller holds RTNL or dev_base_lock */
+/* Caller holds RTNL, netdev->lock or RCU */
static inline int dev_isalive(const struct net_device *dev)
{
- return dev->reg_state <= NETREG_REGISTERED;
+ return READ_ONCE(dev->reg_state) <= NETREG_REGISTERED;
}
/* use same locking rules as GIF* ioctl's */
@@ -48,10 +50,10 @@ static ssize_t netdev_show(const struct device *dev,
struct net_device *ndev = to_net_dev(dev);
ssize_t ret = -EINVAL;
- read_lock(&dev_base_lock);
+ rcu_read_lock();
if (dev_isalive(ndev))
ret = (*format)(ndev, buf);
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
return ret;
}
@@ -60,7 +62,7 @@ static ssize_t netdev_show(const struct device *dev,
#define NETDEVICE_SHOW(field, format_string) \
static ssize_t format_##field(const struct net_device *dev, char *buf) \
{ \
- return sysfs_emit(buf, format_string, dev->field); \
+ return sysfs_emit(buf, format_string, READ_ONCE(dev->field)); \
} \
static ssize_t field##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
@@ -106,6 +108,36 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
return ret;
}
+/* Same as netdev_store() but takes netdev_lock() instead of rtnl_lock() */
+static ssize_t
+netdev_lock_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len,
+ int (*set)(struct net_device *, unsigned long))
+{
+ struct net_device *netdev = to_net_dev(dev);
+ struct net *net = dev_net(netdev);
+ unsigned long new;
+ int ret;
+
+ if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+ return -EPERM;
+
+ ret = kstrtoul(buf, 0, &new);
+ if (ret)
+ return ret;
+
+ netdev_lock(netdev);
+
+ if (dev_isalive(netdev)) {
+ ret = (*set)(netdev, new);
+ if (ret == 0)
+ ret = len;
+ }
+ netdev_unlock(netdev);
+
+ return ret;
+}
+
NETDEVICE_SHOW_RO(dev_id, fmt_hex);
NETDEVICE_SHOW_RO(dev_port, fmt_dec);
NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec);
@@ -125,7 +157,7 @@ static DEVICE_ATTR_RO(iflink);
static ssize_t format_name_assign_type(const struct net_device *dev, char *buf)
{
- return sysfs_emit(buf, fmt_dec, dev->name_assign_type);
+ return sysfs_emit(buf, fmt_dec, READ_ONCE(dev->name_assign_type));
}
static ssize_t name_assign_type_show(struct device *dev,
@@ -135,24 +167,28 @@ static ssize_t name_assign_type_show(struct device *dev,
struct net_device *ndev = to_net_dev(dev);
ssize_t ret = -EINVAL;
- if (ndev->name_assign_type != NET_NAME_UNKNOWN)
+ if (READ_ONCE(ndev->name_assign_type) != NET_NAME_UNKNOWN)
ret = netdev_show(dev, attr, buf, format_name_assign_type);
return ret;
}
static DEVICE_ATTR_RO(name_assign_type);
-/* use same locking rules as GIFHWADDR ioctl's */
+/* use same locking rules as GIFHWADDR ioctl's (dev_get_mac_address()) */
static ssize_t address_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct net_device *ndev = to_net_dev(dev);
ssize_t ret = -EINVAL;
- read_lock(&dev_base_lock);
+ down_read(&dev_addr_sem);
+
+ rcu_read_lock();
if (dev_isalive(ndev))
ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len);
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
+
+ up_read(&dev_addr_sem);
return ret;
}
static DEVICE_ATTR_RO(address);
@@ -161,10 +197,13 @@ static ssize_t broadcast_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct net_device *ndev = to_net_dev(dev);
+ int ret = -EINVAL;
+ rcu_read_lock();
if (dev_isalive(ndev))
- return sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len);
- return -EINVAL;
+ ret = sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len);
+ rcu_read_unlock();
+ return ret;
}
static DEVICE_ATTR_RO(broadcast);
@@ -227,7 +266,7 @@ static ssize_t speed_show(struct device *dev,
if (!rtnl_trylock())
return restart_syscall();
- if (netif_running(netdev) && netif_device_present(netdev)) {
+ if (netif_running(netdev)) {
struct ethtool_link_ksettings cmd;
if (!__ethtool_get_link_ksettings(netdev, &cmd))
@@ -318,11 +357,9 @@ static ssize_t operstate_show(struct device *dev,
const struct net_device *netdev = to_net_dev(dev);
unsigned char operstate;
- read_lock(&dev_base_lock);
- operstate = netdev->operstate;
+ operstate = READ_ONCE(netdev->operstate);
if (!netif_running(netdev))
operstate = IF_OPER_DOWN;
- read_unlock(&dev_base_lock);
if (operstate >= ARRAY_SIZE(operstates))
return -EINVAL; /* should not happen */
@@ -402,7 +439,7 @@ NETDEVICE_SHOW_RW(tx_queue_len, fmt_dec);
static int change_gro_flush_timeout(struct net_device *dev, unsigned long val)
{
- WRITE_ONCE(dev->gro_flush_timeout, val);
+ netdev_set_gro_flush_timeout(dev, val);
return 0;
}
@@ -413,13 +450,16 @@ static ssize_t gro_flush_timeout_store(struct device *dev,
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- return netdev_store(dev, attr, buf, len, change_gro_flush_timeout);
+ return netdev_lock_store(dev, attr, buf, len, change_gro_flush_timeout);
}
NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong);
static int change_napi_defer_hard_irqs(struct net_device *dev, unsigned long val)
{
- WRITE_ONCE(dev->napi_defer_hard_irqs, val);
+ if (val > S32_MAX)
+ return -ERANGE;
+
+ netdev_set_defer_hard_irqs(dev, (u32)val);
return 0;
}
@@ -430,9 +470,10 @@ static ssize_t napi_defer_hard_irqs_store(struct device *dev,
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- return netdev_store(dev, attr, buf, len, change_napi_defer_hard_irqs);
+ return netdev_lock_store(dev, attr, buf, len,
+ change_napi_defer_hard_irqs);
}
-NETDEVICE_SHOW_RW(napi_defer_hard_irqs, fmt_dec);
+NETDEVICE_SHOW_RW(napi_defer_hard_irqs, fmt_uint);
static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len)
@@ -599,13 +640,13 @@ static ssize_t threaded_show(struct device *dev,
struct net_device *netdev = to_net_dev(dev);
ssize_t ret = -EINVAL;
- if (!rtnl_trylock())
- return restart_syscall();
+ rcu_read_lock();
if (dev_isalive(netdev))
- ret = sysfs_emit(buf, fmt_dec, netdev->threaded);
+ ret = sysfs_emit(buf, fmt_dec, READ_ONCE(netdev->threaded));
+
+ rcu_read_unlock();
- rtnl_unlock();
return ret;
}
@@ -628,7 +669,7 @@ static ssize_t threaded_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- return netdev_store(dev, attr, buf, len, modify_napi_threaded);
+ return netdev_lock_store(dev, attr, buf, len, modify_napi_threaded);
}
static DEVICE_ATTR_RW(threaded);
@@ -680,14 +721,14 @@ static ssize_t netstat_show(const struct device *d,
WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
offset % sizeof(u64) != 0);
- read_lock(&dev_base_lock);
+ rcu_read_lock();
if (dev_isalive(dev)) {
struct rtnl_link_stats64 temp;
const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
ret = sysfs_emit(buf, fmt_u64, *(u64 *)(((u8 *)stats) + offset));
}
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
return ret;
}
@@ -1050,7 +1091,7 @@ static const void *rx_queue_namespace(const struct kobject *kobj)
struct device *dev = &queue->dev->dev;
const void *ns = NULL;
- if (dev->class && dev->class->ns_type)
+ if (dev->class && dev->class->namespace)
ns = dev->class->namespace(dev);
return ns;
@@ -1409,6 +1450,65 @@ static struct netdev_queue_attribute bql_hold_time_attribute __ro_after_init
= __ATTR(hold_time, 0644,
bql_show_hold_time, bql_set_hold_time);
+static ssize_t bql_show_stall_thrs(struct netdev_queue *queue, char *buf)
+{
+ struct dql *dql = &queue->dql;
+
+ return sysfs_emit(buf, "%u\n", jiffies_to_msecs(dql->stall_thrs));
+}
+
+static ssize_t bql_set_stall_thrs(struct netdev_queue *queue,
+ const char *buf, size_t len)
+{
+ struct dql *dql = &queue->dql;
+ unsigned int value;
+ int err;
+
+ err = kstrtouint(buf, 10, &value);
+ if (err < 0)
+ return err;
+
+ value = msecs_to_jiffies(value);
+ if (value && (value < 4 || value > 4 / 2 * BITS_PER_LONG))
+ return -ERANGE;
+
+ if (!dql->stall_thrs && value)
+ dql->last_reap = jiffies;
+ /* Force last_reap to be live */
+ smp_wmb();
+ dql->stall_thrs = value;
+
+ return len;
+}
+
+static struct netdev_queue_attribute bql_stall_thrs_attribute __ro_after_init =
+ __ATTR(stall_thrs, 0644, bql_show_stall_thrs, bql_set_stall_thrs);
+
+static ssize_t bql_show_stall_max(struct netdev_queue *queue, char *buf)
+{
+ return sysfs_emit(buf, "%u\n", READ_ONCE(queue->dql.stall_max));
+}
+
+static ssize_t bql_set_stall_max(struct netdev_queue *queue,
+ const char *buf, size_t len)
+{
+ WRITE_ONCE(queue->dql.stall_max, 0);
+ return len;
+}
+
+static struct netdev_queue_attribute bql_stall_max_attribute __ro_after_init =
+ __ATTR(stall_max, 0644, bql_show_stall_max, bql_set_stall_max);
+
+static ssize_t bql_show_stall_cnt(struct netdev_queue *queue, char *buf)
+{
+ struct dql *dql = &queue->dql;
+
+ return sysfs_emit(buf, "%lu\n", dql->stall_cnt);
+}
+
+static struct netdev_queue_attribute bql_stall_cnt_attribute __ro_after_init =
+ __ATTR(stall_cnt, 0444, bql_show_stall_cnt, NULL);
+
static ssize_t bql_show_inflight(struct netdev_queue *queue,
char *buf)
{
@@ -1447,6 +1547,9 @@ static struct attribute *dql_attrs[] __ro_after_init = {
&bql_limit_min_attribute.attr,
&bql_hold_time_attribute.attr,
&bql_inflight_attribute.attr,
+ &bql_stall_thrs_attribute.attr,
+ &bql_stall_cnt_attribute.attr,
+ &bql_stall_max_attribute.attr,
NULL
};
@@ -1454,6 +1557,9 @@ static const struct attribute_group dql_group = {
.name = "byte_queue_limits",
.attrs = dql_attrs,
};
+#else
+/* Fake declaration, all the code using it should be dead */
+static const struct attribute_group dql_group = {};
#endif /* CONFIG_BQL */
#ifdef CONFIG_XPS
@@ -1669,7 +1775,7 @@ static const void *netdev_queue_namespace(const struct kobject *kobj)
struct device *dev = &queue->dev->dev;
const void *ns = NULL;
- if (dev->class && dev->class->ns_type)
+ if (dev->class && dev->class->namespace)
ns = dev->class->namespace(dev);
return ns;
@@ -1691,6 +1797,14 @@ static const struct kobj_type netdev_queue_ktype = {
.get_ownership = netdev_queue_get_ownership,
};
+static bool netdev_uses_bql(const struct net_device *dev)
+{
+ if (dev->lltx || (dev->priv_flags & IFF_NO_QUEUE))
+ return false;
+
+ return IS_ENABLED(CONFIG_BQL);
+}
+
static int netdev_queue_add_kobject(struct net_device *dev, int index)
{
struct netdev_queue *queue = dev->_tx + index;
@@ -1708,11 +1822,11 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
if (error)
goto err;
-#ifdef CONFIG_BQL
- error = sysfs_create_group(kobj, &dql_group);
- if (error)
- goto err;
-#endif
+ if (netdev_uses_bql(dev)) {
+ error = sysfs_create_group(kobj, &dql_group);
+ if (error)
+ goto err;
+ }
kobject_uevent(kobj, KOBJ_ADD);
return 0;
@@ -1733,9 +1847,9 @@ static int tx_queue_change_owner(struct net_device *ndev, int index,
if (error)
return error;
-#ifdef CONFIG_BQL
- error = sysfs_group_change_owner(kobj, &dql_group, kuid, kgid);
-#endif
+ if (netdev_uses_bql(ndev))
+ error = sysfs_group_change_owner(kobj, &dql_group, kuid, kgid);
+
return error;
}
#endif /* CONFIG_SYSFS */
@@ -1767,9 +1881,10 @@ netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
if (!refcount_read(&dev_net(dev)->ns.count))
queue->kobj.uevent_suppress = 1;
-#ifdef CONFIG_BQL
- sysfs_remove_group(&queue->kobj, &dql_group);
-#endif
+
+ if (netdev_uses_bql(dev))
+ sysfs_remove_group(&queue->kobj, &dql_group);
+
kobject_put(&queue->kobj);
}
@@ -1947,7 +2062,7 @@ static void netdev_release(struct device *d)
* device is dead and about to be freed.
*/
kfree(rcu_access_pointer(dev->ifalias));
- netdev_freemem(dev);
+ kvfree(dev);
}
static const void *net_namespace(const struct device *d)
@@ -1965,7 +2080,7 @@ static void net_get_ownership(const struct device *d, kuid_t *uid, kgid_t *gid)
net_ns_get_ownership(net, uid, gid);
}
-static struct class net_class __ro_after_init = {
+static const struct class net_class = {
.name = "net",
.dev_release = netdev_release,
.dev_groups = net_class_groups,
diff --git a/net/core/net-traces.c b/net/core/net-traces.c
index 6aef976bc1da..f2fa34b1d78d 100644
--- a/net/core/net-traces.c
+++ b/net/core/net-traces.c
@@ -23,7 +23,7 @@
#include <linux/net_dropmon.h>
#include <linux/slab.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <asm/bitops.h>
#define CREATE_TRACE_POINTS
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 72799533426b..4303f2a49262 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -56,7 +56,6 @@ static bool init_net_initialized;
* outside.
*/
DECLARE_RWSEM(pernet_ops_rwsem);
-EXPORT_SYMBOL_GPL(pernet_ops_rwsem);
#define MIN_PERNET_OPS_ID \
((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *))
@@ -69,12 +68,15 @@ DEFINE_COOKIE(net_cookie);
static struct net_generic *net_alloc_generic(void)
{
+ unsigned int gen_ptrs = READ_ONCE(max_gen_ptrs);
+ unsigned int generic_size;
struct net_generic *ng;
- unsigned int generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);
+
+ generic_size = offsetof(struct net_generic, ptr[gen_ptrs]);
ng = kzalloc(generic_size, GFP_KERNEL);
if (ng)
- ng->s.len = max_gen_ptrs;
+ ng->s.len = gen_ptrs;
return ng;
}
@@ -122,7 +124,7 @@ static int ops_init(const struct pernet_operations *ops, struct net *net)
int err = -ENOMEM;
void *data = NULL;
- if (ops->id && ops->size) {
+ if (ops->id) {
data = kzalloc(ops->size, GFP_KERNEL);
if (!data)
goto out;
@@ -137,7 +139,7 @@ static int ops_init(const struct pernet_operations *ops, struct net *net)
if (!err)
return 0;
- if (ops->id && ops->size) {
+ if (ops->id) {
ng = rcu_dereference_protected(net->gen,
lockdep_is_held(&pernet_ops_rwsem));
ng->ptr[*ops->id] = NULL;
@@ -179,7 +181,8 @@ static void ops_free_list(const struct pernet_operations *ops,
struct list_head *net_exit_list)
{
struct net *net;
- if (ops->size && ops->id) {
+
+ if (ops->id) {
list_for_each_entry(net, net_exit_list, exit_list)
kfree(net_generic(net, *ops->id));
}
@@ -305,35 +308,55 @@ struct net *get_net_ns_by_id(const struct net *net, int id)
}
EXPORT_SYMBOL_GPL(get_net_ns_by_id);
+static __net_init void preinit_net_sysctl(struct net *net)
+{
+ net->core.sysctl_somaxconn = SOMAXCONN;
+ /* Limits per socket sk_omem_alloc usage.
+ * TCP zerocopy regular usage needs 128 KB.
+ */
+ net->core.sysctl_optmem_max = 128 * 1024;
+ net->core.sysctl_txrehash = SOCK_TXREHASH_ENABLED;
+ net->core.sysctl_tstamp_allow_data = 1;
+}
+
/* init code that must occur even if setup_net() is not called. */
-static __net_init void preinit_net(struct net *net)
+static __net_init void preinit_net(struct net *net, struct user_namespace *user_ns)
{
+ refcount_set(&net->passive, 1);
+ refcount_set(&net->ns.count, 1);
+ ref_tracker_dir_init(&net->refcnt_tracker, 128, "net refcnt");
ref_tracker_dir_init(&net->notrefcnt_tracker, 128, "net notrefcnt");
+
+ get_random_bytes(&net->hash_mix, sizeof(u32));
+ net->dev_base_seq = 1;
+ net->user_ns = user_ns;
+
+ idr_init(&net->netns_ids);
+ spin_lock_init(&net->nsid_lock);
+ mutex_init(&net->ipv4.ra_mutex);
+
+#ifdef CONFIG_DEBUG_NET_SMALL_RTNL
+ mutex_init(&net->rtnl_mutex);
+ lock_set_cmp_fn(&net->rtnl_mutex, rtnl_net_lock_cmp_fn, NULL);
+#endif
+
+ preinit_net_sysctl(net);
}
/*
* setup_net runs the initializers for the network namespace object.
*/
-static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
+static __net_init int setup_net(struct net *net)
{
/* Must be called with pernet_ops_rwsem held */
const struct pernet_operations *ops, *saved_ops;
- int error = 0;
LIST_HEAD(net_exit_list);
+ LIST_HEAD(dev_kill_list);
+ int error = 0;
- refcount_set(&net->ns.count, 1);
- ref_tracker_dir_init(&net->refcnt_tracker, 128, "net refcnt");
-
- refcount_set(&net->passive, 1);
- get_random_bytes(&net->hash_mix, sizeof(u32));
preempt_disable();
net->net_cookie = gen_cookie_next(&net_cookie);
preempt_enable();
- net->dev_base_seq = 1;
- net->user_ns = user_ns;
- idr_init(&net->netns_ids);
- spin_lock_init(&net->nsid_lock);
- mutex_init(&net->ipv4.ra_mutex);
list_for_each_entry(ops, &pernet_list, list) {
error = ops_init(ops, net);
@@ -358,6 +381,15 @@ out_undo:
synchronize_rcu();
ops = saved_ops;
+ rtnl_lock();
+ list_for_each_entry_continue_reverse(ops, &pernet_list, list) {
+ if (ops->exit_batch_rtnl)
+ ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list);
+ }
+ unregister_netdevice_many(&dev_kill_list);
+ rtnl_unlock();
+
+ ops = saved_ops;
list_for_each_entry_continue_reverse(ops, &pernet_list, list)
ops_exit_list(ops, &net_exit_list);
@@ -369,32 +401,6 @@ out_undo:
goto out;
}
-static int __net_init net_defaults_init_net(struct net *net)
-{
- net->core.sysctl_somaxconn = SOMAXCONN;
- /* Limits per socket sk_omem_alloc usage.
- * TCP zerocopy regular usage needs 128 KB.
- */
- net->core.sysctl_optmem_max = 128 * 1024;
- net->core.sysctl_txrehash = SOCK_TXREHASH_ENABLED;
-
- return 0;
-}
-
-static struct pernet_operations net_defaults_ops = {
- .init = net_defaults_init_net,
-};
-
-static __init int net_defaults_init(void)
-{
- if (register_pernet_subsys(&net_defaults_ops))
- panic("Cannot initialize net default settings");
-
- return 0;
-}
-
-core_initcall(net_defaults_init);
-
#ifdef CONFIG_NET_NS
static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
{
@@ -443,7 +449,22 @@ out_free:
goto out;
}
-static void net_free(struct net *net)
+static LLIST_HEAD(defer_free_list);
+
+static void net_complete_free(void)
+{
+ struct llist_node *kill_list;
+ struct net *net, *next;
+
+ /* Get the list of namespaces to free from last round. */
+ kill_list = llist_del_all(&defer_free_list);
+
+ llist_for_each_entry_safe(net, next, kill_list, defer_free_list)
+ kmem_cache_free(net_cachep, net);
+
+}
+
+void net_passive_dec(struct net *net)
{
if (refcount_dec_and_test(&net->passive)) {
kfree(rcu_access_pointer(net->gen));
@@ -451,7 +472,8 @@ static void net_free(struct net *net)
/* There should not be any trackers left there. */
ref_tracker_dir_exit(&net->notrefcnt_tracker);
- kmem_cache_free(net_cachep, net);
+ /* Wait for an extra rcu_barrier() before final free. */
+ llist_add(&net->defer_free_list, &defer_free_list);
}
}
@@ -460,7 +482,7 @@ void net_drop_ns(void *p)
struct net *net = (struct net *)p;
if (net)
- net_free(net);
+ net_passive_dec(net);
}
struct net *copy_net_ns(unsigned long flags,
@@ -483,8 +505,7 @@ struct net *copy_net_ns(unsigned long flags,
goto dec_ucounts;
}
- preinit_net(net);
- refcount_set(&net->passive, 1);
+ preinit_net(net, user_ns);
net->ucounts = ucounts;
get_user_ns(user_ns);
@@ -492,7 +513,7 @@ struct net *copy_net_ns(unsigned long flags,
if (rv < 0)
goto put_userns;
- rv = setup_net(net, user_ns);
+ rv = setup_net(net);
up_read(&pernet_ops_rwsem);
@@ -502,7 +523,7 @@ put_userns:
key_remove_domain(net->key_domain);
#endif
put_user_ns(user_ns);
- net_free(net);
+ net_passive_dec(net);
dec_ucounts:
dec_net_namespaces(ucounts);
return ERR_PTR(rv);
@@ -567,12 +588,17 @@ static void unhash_nsid(struct net *net, struct net *last)
static LLIST_HEAD(cleanup_list);
+struct task_struct *cleanup_net_task;
+
static void cleanup_net(struct work_struct *work)
{
const struct pernet_operations *ops;
struct net *net, *tmp, *last;
struct llist_node *net_kill_list;
LIST_HEAD(net_exit_list);
+ LIST_HEAD(dev_kill_list);
+
+ cleanup_net_task = current;
/* Atomically snapshot the list of namespaces to cleanup */
net_kill_list = llist_del_all(&cleanup_list);
@@ -611,7 +637,15 @@ static void cleanup_net(struct work_struct *work)
* the rcu_barrier() below isn't sufficient alone.
* Also the pre_exit() and exit() methods need this barrier.
*/
- synchronize_rcu();
+ synchronize_rcu_expedited();
+
+ rtnl_lock();
+ list_for_each_entry_reverse(ops, &pernet_list, list) {
+ if (ops->exit_batch_rtnl)
+ ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list);
+ }
+ unregister_netdevice_many(&dev_kill_list);
+ rtnl_unlock();
/* Run all of the network namespace exit methods */
list_for_each_entry_reverse(ops, &pernet_list, list)
@@ -628,6 +662,8 @@ static void cleanup_net(struct work_struct *work)
*/
rcu_barrier();
+ net_complete_free();
+
/* Finally it is safe to free my network namespace structure */
list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
list_del_init(&net->exit_list);
@@ -636,8 +672,9 @@ static void cleanup_net(struct work_struct *work)
key_remove_domain(net->key_domain);
#endif
put_user_ns(net->user_ns);
- net_free(net);
+ net_passive_dec(net);
}
+ cleanup_net_task = NULL;
}
/**
@@ -671,30 +708,33 @@ EXPORT_SYMBOL_GPL(__put_net);
* get_net_ns - increment the refcount of the network namespace
* @ns: common namespace (net)
*
- * Returns the net's common namespace.
+ * Returns the net's common namespace or ERR_PTR() if ref is zero.
*/
struct ns_common *get_net_ns(struct ns_common *ns)
{
- return &get_net(container_of(ns, struct net, ns))->ns;
+ struct net *net;
+
+ net = maybe_get_net(container_of(ns, struct net, ns));
+ if (net)
+ return &net->ns;
+ return ERR_PTR(-EINVAL);
}
EXPORT_SYMBOL_GPL(get_net_ns);
struct net *get_net_ns_by_fd(int fd)
{
- struct fd f = fdget(fd);
- struct net *net = ERR_PTR(-EINVAL);
+ CLASS(fd, f)(fd);
- if (!f.file)
+ if (fd_empty(f))
return ERR_PTR(-EBADF);
- if (proc_ns_file(f.file)) {
- struct ns_common *ns = get_proc_ns(file_inode(f.file));
+ if (proc_ns_file(fd_file(f))) {
+ struct ns_common *ns = get_proc_ns(file_inode(fd_file(f)));
if (ns->ops == &netns_operations)
- net = get_net(container_of(ns, struct net, ns));
+ return get_net(container_of(ns, struct net, ns));
}
- fdput(f);
- return net;
+ return ERR_PTR(-EINVAL);
}
EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
#endif
@@ -1071,7 +1111,7 @@ static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
end:
if (net_cb.fillargs.add_ref)
put_net(net_cb.tgt_net);
- return err < 0 ? err : skb->len;
+ return err;
}
static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
@@ -1140,13 +1180,23 @@ static void __init netns_ipv4_struct_check(void)
CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
sysctl_tcp_early_demux);
CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
+ sysctl_tcp_l3mdev_accept);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
sysctl_tcp_reordering);
CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
sysctl_tcp_rmem);
- CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_rx, 18);
+ CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_rx, 22);
}
#endif
+static const struct rtnl_msg_handler net_ns_rtnl_msg_handlers[] __initconst = {
+ {.msgtype = RTM_NEWNSID, .doit = rtnl_net_newid,
+ .flags = RTNL_FLAG_DOIT_UNLOCKED},
+ {.msgtype = RTM_GETNSID, .doit = rtnl_net_getid,
+ .dumpit = rtnl_net_dumpid,
+ .flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED},
+};
+
void __init net_ns_init(void)
{
struct net_generic *ng;
@@ -1172,9 +1222,10 @@ void __init net_ns_init(void)
#ifdef CONFIG_KEYS
init_net.key_domain = &init_net_key_domain;
#endif
+ preinit_net(&init_net, &init_user_ns);
+
down_write(&pernet_ops_rwsem);
- preinit_net(&init_net);
- if (setup_net(&init_net, &init_user_ns))
+ if (setup_net(&init_net))
panic("Could not setup the initial network namespace");
init_net_initialized = true;
@@ -1183,17 +1234,24 @@ void __init net_ns_init(void)
if (register_pernet_subsys(&net_ns_ops))
panic("Could not register network namespace subsystems");
- rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL,
- RTNL_FLAG_DOIT_UNLOCKED);
- rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid,
- RTNL_FLAG_DOIT_UNLOCKED);
+ rtnl_register_many(net_ns_rtnl_msg_handlers);
}
static void free_exit_list(struct pernet_operations *ops, struct list_head *net_exit_list)
{
ops_pre_exit_list(ops, net_exit_list);
synchronize_rcu();
+
+ if (ops->exit_batch_rtnl) {
+ LIST_HEAD(dev_kill_list);
+
+ rtnl_lock();
+ ops->exit_batch_rtnl(net_exit_list, &dev_kill_list);
+ unregister_netdevice_many(&dev_kill_list);
+ rtnl_unlock();
+ }
ops_exit_list(ops, net_exit_list);
+
ops_free_list(ops, net_exit_list);
}
@@ -1206,7 +1264,7 @@ static int __register_pernet_operations(struct list_head *list,
LIST_HEAD(net_exit_list);
list_add_tail(&ops->list, list);
- if (ops->init || (ops->id && ops->size)) {
+ if (ops->init || ops->id) {
/* We held write locked pernet_ops_rwsem, and parallel
* setup_net() and cleanup_net() are not possible.
*/
@@ -1272,13 +1330,20 @@ static int register_pernet_operations(struct list_head *list,
{
int error;
+ if (WARN_ON(!!ops->id ^ !!ops->size))
+ return -EINVAL;
+
if (ops->id) {
error = ida_alloc_min(&net_generic_ids, MIN_PERNET_OPS_ID,
GFP_KERNEL);
if (error < 0)
return error;
*ops->id = error;
- max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1);
+ /* This does not require READ_ONCE as writers already hold
+ * pernet_ops_rwsem. But WRITE_ONCE is needed to protect
+ * net_alloc_generic.
+ */
+ WRITE_ONCE(max_gen_ptrs, max(max_gen_ptrs, *ops->id + 1));
}
error = __register_pernet_operations(list, ops);
if (error) {
diff --git a/net/core/gso_test.c b/net/core/net_test.c
index 4c2e77bd12f4..9c3a590865d2 100644
--- a/net/core/gso_test.c
+++ b/net/core/net_test.c
@@ -1,6 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include <kunit/test.h>
+
+/* GSO */
+
#include <linux/skbuff.h>
static const char hdr[] = "abcdefgh";
@@ -225,7 +228,7 @@ static void gso_test_func(struct kunit *test)
segs = skb_segment(skb, features);
if (IS_ERR(segs)) {
- KUNIT_FAIL(test, "segs error %lld", PTR_ERR(segs));
+ KUNIT_FAIL(test, "segs error %pe", segs);
goto free_gso_skb;
} else if (!segs) {
KUNIT_FAIL(test, "no segments");
@@ -258,17 +261,127 @@ free_gso_skb:
consume_skb(skb);
}
-static struct kunit_case gso_test_cases[] = {
- KUNIT_CASE_PARAM(gso_test_func, gso_test_gen_params),
- {}
+/* IP tunnel flags */
+
+#include <net/ip_tunnels.h>
+
+struct ip_tunnel_flags_test {
+ const char *name;
+
+ const u16 *src_bits;
+ const u16 *exp_bits;
+ u8 src_num;
+ u8 exp_num;
+
+ __be16 exp_val;
+ bool exp_comp;
+};
+
+#define IP_TUNNEL_FLAGS_TEST(n, src, comp, eval, exp) { \
+ .name = (n), \
+ .src_bits = (src), \
+ .src_num = ARRAY_SIZE(src), \
+ .exp_comp = (comp), \
+ .exp_val = (eval), \
+ .exp_bits = (exp), \
+ .exp_num = ARRAY_SIZE(exp), \
+}
+
+/* These are __be16-compatible and can be compared as is */
+static const u16 ip_tunnel_flags_1[] = {
+ IP_TUNNEL_KEY_BIT,
+ IP_TUNNEL_STRICT_BIT,
+ IP_TUNNEL_ERSPAN_OPT_BIT,
+};
+
+/* Due to the previous flags design limitation, setting either
+ * ``IP_TUNNEL_CSUM_BIT`` (on Big Endian) or ``IP_TUNNEL_DONT_FRAGMENT_BIT``
+ * (on Little) also sets VTI/ISATAP bit. In the bitmap implementation, they
+ * correspond to ``BIT(16)``, which is bigger than ``U16_MAX``, but still is
+ * backward-compatible.
+ */
+#ifdef __LITTLE_ENDIAN
+#define IP_TUNNEL_CONFLICT_BIT IP_TUNNEL_DONT_FRAGMENT_BIT
+#else
+#define IP_TUNNEL_CONFLICT_BIT IP_TUNNEL_CSUM_BIT
+#endif
+
+static const u16 ip_tunnel_flags_2_src[] = {
+ IP_TUNNEL_CONFLICT_BIT,
+};
+
+static const u16 ip_tunnel_flags_2_exp[] = {
+ IP_TUNNEL_CONFLICT_BIT,
+ IP_TUNNEL_SIT_ISATAP_BIT,
};
-static struct kunit_suite gso_test_suite = {
- .name = "net_core_gso",
- .test_cases = gso_test_cases,
+/* Bits 17 and higher are not compatible with __be16 flags */
+static const u16 ip_tunnel_flags_3_src[] = {
+ IP_TUNNEL_VXLAN_OPT_BIT,
+ 17,
+ 18,
+ 20,
};
-kunit_test_suite(gso_test_suite);
+static const u16 ip_tunnel_flags_3_exp[] = {
+ IP_TUNNEL_VXLAN_OPT_BIT,
+};
+
+static const struct ip_tunnel_flags_test ip_tunnel_flags_test[] = {
+ IP_TUNNEL_FLAGS_TEST("compat", ip_tunnel_flags_1, true,
+ cpu_to_be16(BIT(IP_TUNNEL_KEY_BIT) |
+ BIT(IP_TUNNEL_STRICT_BIT) |
+ BIT(IP_TUNNEL_ERSPAN_OPT_BIT)),
+ ip_tunnel_flags_1),
+ IP_TUNNEL_FLAGS_TEST("conflict", ip_tunnel_flags_2_src, true,
+ VTI_ISVTI, ip_tunnel_flags_2_exp),
+ IP_TUNNEL_FLAGS_TEST("new", ip_tunnel_flags_3_src, false,
+ cpu_to_be16(BIT(IP_TUNNEL_VXLAN_OPT_BIT)),
+ ip_tunnel_flags_3_exp),
+};
+
+static void
+ip_tunnel_flags_test_case_to_desc(const struct ip_tunnel_flags_test *t,
+ char *desc)
+{
+ strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
+}
+KUNIT_ARRAY_PARAM(ip_tunnel_flags_test, ip_tunnel_flags_test,
+ ip_tunnel_flags_test_case_to_desc);
+
+static void ip_tunnel_flags_test_run(struct kunit *test)
+{
+ const struct ip_tunnel_flags_test *t = test->param_value;
+ IP_TUNNEL_DECLARE_FLAGS(src) = { };
+ IP_TUNNEL_DECLARE_FLAGS(exp) = { };
+ IP_TUNNEL_DECLARE_FLAGS(out);
+
+ for (u32 j = 0; j < t->src_num; j++)
+ __set_bit(t->src_bits[j], src);
+ for (u32 j = 0; j < t->exp_num; j++)
+ __set_bit(t->exp_bits[j], exp);
+
+ KUNIT_ASSERT_EQ(test, t->exp_comp,
+ ip_tunnel_flags_is_be16_compat(src));
+ KUNIT_ASSERT_EQ(test, (__force u16)t->exp_val,
+ (__force u16)ip_tunnel_flags_to_be16(src));
+
+ ip_tunnel_flags_from_be16(out, t->exp_val);
+ KUNIT_ASSERT_TRUE(test, __ipt_flag_op(bitmap_equal, exp, out));
+}
+
+static struct kunit_case net_test_cases[] = {
+ KUNIT_CASE_PARAM(gso_test_func, gso_test_gen_params),
+ KUNIT_CASE_PARAM(ip_tunnel_flags_test_run,
+ ip_tunnel_flags_test_gen_params),
+ { },
+};
+
+static struct kunit_suite net_test_suite = {
+ .name = "net_core",
+ .test_cases = net_test_cases,
+};
+kunit_test_suite(net_test_suite);
+MODULE_DESCRIPTION("KUnit tests for networking core");
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("KUnit tests for segmentation offload");
diff --git a/net/core/netdev-genl-gen.c b/net/core/netdev-genl-gen.c
index be7f2ebd61b2..996ac6a449eb 100644
--- a/net/core/netdev-genl-gen.c
+++ b/net/core/netdev-genl-gen.c
@@ -9,16 +9,21 @@
#include "netdev-genl-gen.h"
#include <uapi/linux/netdev.h>
+#include <linux/list.h>
/* Integer value ranges */
static const struct netlink_range_validation netdev_a_page_pool_id_range = {
.min = 1ULL,
- .max = 4294967295ULL,
+ .max = U32_MAX,
};
static const struct netlink_range_validation netdev_a_page_pool_ifindex_range = {
.min = 1ULL,
- .max = 2147483647ULL,
+ .max = S32_MAX,
+};
+
+static const struct netlink_range_validation netdev_a_napi_defer_hard_irqs_range = {
+ .max = S32_MAX,
};
/* Common nested types */
@@ -27,6 +32,11 @@ const struct nla_policy netdev_page_pool_info_nl_policy[NETDEV_A_PAGE_POOL_IFIND
[NETDEV_A_PAGE_POOL_IFINDEX] = NLA_POLICY_FULL_RANGE(NLA_U32, &netdev_a_page_pool_ifindex_range),
};
+const struct nla_policy netdev_queue_id_nl_policy[NETDEV_A_QUEUE_TYPE + 1] = {
+ [NETDEV_A_QUEUE_ID] = { .type = NLA_U32, },
+ [NETDEV_A_QUEUE_TYPE] = NLA_POLICY_MAX(NLA_U32, 1),
+};
+
/* NETDEV_CMD_DEV_GET - do */
static const struct nla_policy netdev_dev_get_nl_policy[NETDEV_A_DEV_IFINDEX + 1] = {
[NETDEV_A_DEV_IFINDEX] = NLA_POLICY_MIN(NLA_U32, 1),
@@ -68,6 +78,27 @@ static const struct nla_policy netdev_napi_get_dump_nl_policy[NETDEV_A_NAPI_IFIN
[NETDEV_A_NAPI_IFINDEX] = NLA_POLICY_MIN(NLA_U32, 1),
};
+/* NETDEV_CMD_QSTATS_GET - dump */
+static const struct nla_policy netdev_qstats_get_nl_policy[NETDEV_A_QSTATS_SCOPE + 1] = {
+ [NETDEV_A_QSTATS_IFINDEX] = NLA_POLICY_MIN(NLA_U32, 1),
+ [NETDEV_A_QSTATS_SCOPE] = NLA_POLICY_MASK(NLA_UINT, 0x1),
+};
+
+/* NETDEV_CMD_BIND_RX - do */
+static const struct nla_policy netdev_bind_rx_nl_policy[NETDEV_A_DMABUF_FD + 1] = {
+ [NETDEV_A_DMABUF_IFINDEX] = NLA_POLICY_MIN(NLA_U32, 1),
+ [NETDEV_A_DMABUF_FD] = { .type = NLA_U32, },
+ [NETDEV_A_DMABUF_QUEUES] = NLA_POLICY_NESTED(netdev_queue_id_nl_policy),
+};
+
+/* NETDEV_CMD_NAPI_SET - do */
+static const struct nla_policy netdev_napi_set_nl_policy[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT + 1] = {
+ [NETDEV_A_NAPI_ID] = { .type = NLA_U32, },
+ [NETDEV_A_NAPI_DEFER_HARD_IRQS] = NLA_POLICY_FULL_RANGE(NLA_U32, &netdev_a_napi_defer_hard_irqs_range),
+ [NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT] = { .type = NLA_UINT, },
+ [NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT] = { .type = NLA_UINT, },
+};
+
/* Ops table for netdev */
static const struct genl_split_ops netdev_nl_ops[] = {
{
@@ -138,6 +169,27 @@ static const struct genl_split_ops netdev_nl_ops[] = {
.maxattr = NETDEV_A_NAPI_IFINDEX,
.flags = GENL_CMD_CAP_DUMP,
},
+ {
+ .cmd = NETDEV_CMD_QSTATS_GET,
+ .dumpit = netdev_nl_qstats_get_dumpit,
+ .policy = netdev_qstats_get_nl_policy,
+ .maxattr = NETDEV_A_QSTATS_SCOPE,
+ .flags = GENL_CMD_CAP_DUMP,
+ },
+ {
+ .cmd = NETDEV_CMD_BIND_RX,
+ .doit = netdev_nl_bind_rx_doit,
+ .policy = netdev_bind_rx_nl_policy,
+ .maxattr = NETDEV_A_DMABUF_FD,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = NETDEV_CMD_NAPI_SET,
+ .doit = netdev_nl_napi_set_doit,
+ .policy = netdev_napi_set_nl_policy,
+ .maxattr = NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
};
static const struct genl_multicast_group netdev_nl_mcgrps[] = {
@@ -145,6 +197,16 @@ static const struct genl_multicast_group netdev_nl_mcgrps[] = {
[NETDEV_NLGRP_PAGE_POOL] = { "page-pool", },
};
+static void __netdev_nl_sock_priv_init(void *priv)
+{
+ netdev_nl_sock_priv_init(priv);
+}
+
+static void __netdev_nl_sock_priv_destroy(void *priv)
+{
+ netdev_nl_sock_priv_destroy(priv);
+}
+
struct genl_family netdev_nl_family __ro_after_init = {
.name = NETDEV_FAMILY_NAME,
.version = NETDEV_FAMILY_VERSION,
@@ -155,4 +217,7 @@ struct genl_family netdev_nl_family __ro_after_init = {
.n_split_ops = ARRAY_SIZE(netdev_nl_ops),
.mcgrps = netdev_nl_mcgrps,
.n_mcgrps = ARRAY_SIZE(netdev_nl_mcgrps),
+ .sock_priv_size = sizeof(struct list_head),
+ .sock_priv_init = __netdev_nl_sock_priv_init,
+ .sock_priv_destroy = __netdev_nl_sock_priv_destroy,
};
diff --git a/net/core/netdev-genl-gen.h b/net/core/netdev-genl-gen.h
index a47f2bcbe4fa..e09dd7539ff2 100644
--- a/net/core/netdev-genl-gen.h
+++ b/net/core/netdev-genl-gen.h
@@ -10,9 +10,11 @@
#include <net/genetlink.h>
#include <uapi/linux/netdev.h>
+#include <linux/list.h>
/* Common nested types */
extern const struct nla_policy netdev_page_pool_info_nl_policy[NETDEV_A_PAGE_POOL_IFINDEX + 1];
+extern const struct nla_policy netdev_queue_id_nl_policy[NETDEV_A_QUEUE_TYPE + 1];
int netdev_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info);
int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
@@ -28,6 +30,10 @@ int netdev_nl_queue_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb);
int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info);
int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
+int netdev_nl_qstats_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb);
+int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info);
+int netdev_nl_napi_set_doit(struct sk_buff *skb, struct genl_info *info);
enum {
NETDEV_NLGRP_MGMT,
@@ -36,4 +42,7 @@ enum {
extern struct genl_family netdev_nl_family;
+void netdev_nl_sock_priv_init(struct list_head *priv);
+void netdev_nl_sock_priv_destroy(struct list_head *priv);
+
#endif /* _LINUX_NETDEV_GEN_H */
diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
index fd98936da3ae..715f85c6b62e 100644
--- a/net/core/netdev-genl.c
+++ b/net/core/netdev-genl.c
@@ -3,15 +3,17 @@
#include <linux/netdevice.h>
#include <linux/notifier.h>
#include <linux/rtnetlink.h>
+#include <net/busy_poll.h>
#include <net/net_namespace.h>
+#include <net/netdev_queues.h>
+#include <net/netdev_rx_queue.h>
#include <net/sock.h>
#include <net/xdp.h>
#include <net/xdp_sock.h>
-#include <net/netdev_rx_queue.h>
-#include <net/busy_poll.h>
-#include "netdev-genl-gen.h"
#include "dev.h"
+#include "devmem.h"
+#include "netdev-genl-gen.h"
struct netdev_nl_dump_ctx {
unsigned long ifindex;
@@ -22,7 +24,7 @@ struct netdev_nl_dump_ctx {
static struct netdev_nl_dump_ctx *netdev_dump_ctx(struct netlink_callback *cb)
{
- NL_ASSERT_DUMP_CTX_FITS(struct netdev_nl_dump_ctx);
+ NL_ASSERT_CTX_FITS(struct netdev_nl_dump_ctx);
return (struct netdev_nl_dump_ctx *)cb->ctx;
}
@@ -58,22 +60,22 @@ XDP_METADATA_KFUNC_xxx
nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES,
xdp_rx_meta, NETDEV_A_DEV_PAD) ||
nla_put_u64_64bit(rsp, NETDEV_A_DEV_XSK_FEATURES,
- xsk_features, NETDEV_A_DEV_PAD)) {
- genlmsg_cancel(rsp, hdr);
- return -EINVAL;
- }
+ xsk_features, NETDEV_A_DEV_PAD))
+ goto err_cancel_msg;
if (netdev->xdp_features & NETDEV_XDP_ACT_XSK_ZEROCOPY) {
if (nla_put_u32(rsp, NETDEV_A_DEV_XDP_ZC_MAX_SEGS,
- netdev->xdp_zc_max_segs)) {
- genlmsg_cancel(rsp, hdr);
- return -EINVAL;
- }
+ netdev->xdp_zc_max_segs))
+ goto err_cancel_msg;
}
genlmsg_end(rsp, hdr);
return 0;
+
+err_cancel_msg:
+ genlmsg_cancel(rsp, hdr);
+ return -EMSGSIZE;
}
static void
@@ -152,30 +154,27 @@ int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
}
rtnl_unlock();
- if (err != -EMSGSIZE)
- return err;
-
- return skb->len;
+ return err;
}
static int
netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi,
const struct genl_info *info)
{
+ unsigned long irq_suspend_timeout;
+ unsigned long gro_flush_timeout;
+ u32 napi_defer_hard_irqs;
void *hdr;
pid_t pid;
- if (WARN_ON_ONCE(!napi->dev))
- return -EINVAL;
- if (!(napi->dev->flags & IFF_UP))
+ if (!napi->dev->up)
return 0;
hdr = genlmsg_iput(rsp, info);
if (!hdr)
return -EMSGSIZE;
- if (napi->napi_id >= MIN_NAPI_ID &&
- nla_put_u32(rsp, NETDEV_A_NAPI_ID, napi->napi_id))
+ if (nla_put_u32(rsp, NETDEV_A_NAPI_ID, napi->napi_id))
goto nla_put_failure;
if (nla_put_u32(rsp, NETDEV_A_NAPI_IFINDEX, napi->dev->ifindex))
@@ -190,6 +189,21 @@ netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi,
goto nla_put_failure;
}
+ napi_defer_hard_irqs = napi_get_defer_hard_irqs(napi);
+ if (nla_put_s32(rsp, NETDEV_A_NAPI_DEFER_HARD_IRQS,
+ napi_defer_hard_irqs))
+ goto nla_put_failure;
+
+ irq_suspend_timeout = napi_get_irq_suspend_timeout(napi);
+ if (nla_put_uint(rsp, NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT,
+ irq_suspend_timeout))
+ goto nla_put_failure;
+
+ gro_flush_timeout = napi_get_gro_flush_timeout(napi);
+ if (nla_put_uint(rsp, NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT,
+ gro_flush_timeout))
+ goto nla_put_failure;
+
genlmsg_end(rsp, hdr);
return 0;
@@ -215,18 +229,21 @@ int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info)
if (!rsp)
return -ENOMEM;
- rtnl_lock();
-
- napi = napi_by_id(napi_id);
- if (napi)
+ napi = netdev_napi_by_id_lock(genl_info_net(info), napi_id);
+ if (napi) {
err = netdev_nl_napi_fill_one(rsp, napi, info);
- else
- err = -EINVAL;
-
- rtnl_unlock();
+ netdev_unlock(napi->dev);
+ } else {
+ NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]);
+ err = -ENOENT;
+ }
- if (err)
+ if (err) {
goto err_free_msg;
+ } else if (!rsp->len) {
+ err = -ENOENT;
+ goto err_free_msg;
+ }
return genlmsg_reply(rsp, info);
@@ -241,12 +258,21 @@ netdev_nl_napi_dump_one(struct net_device *netdev, struct sk_buff *rsp,
struct netdev_nl_dump_ctx *ctx)
{
struct napi_struct *napi;
+ unsigned int prev_id;
int err = 0;
- if (!(netdev->flags & IFF_UP))
+ if (!netdev->up)
return err;
+ prev_id = UINT_MAX;
list_for_each_entry(napi, &netdev->napi_list, dev_list) {
+ if (napi->napi_id < MIN_NAPI_ID)
+ continue;
+
+ /* Dump continuation below depends on the list being sorted */
+ WARN_ON_ONCE(napi->napi_id >= prev_id);
+ prev_id = napi->napi_id;
+
if (ctx->napi_id && napi->napi_id >= ctx->napi_id)
continue;
@@ -270,33 +296,79 @@ int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
if (info->attrs[NETDEV_A_NAPI_IFINDEX])
ifindex = nla_get_u32(info->attrs[NETDEV_A_NAPI_IFINDEX]);
- rtnl_lock();
if (ifindex) {
- netdev = __dev_get_by_index(net, ifindex);
- if (netdev)
+ netdev = netdev_get_by_index_lock(net, ifindex);
+ if (netdev) {
err = netdev_nl_napi_dump_one(netdev, skb, info, ctx);
- else
+ netdev_unlock(netdev);
+ } else {
err = -ENODEV;
+ }
} else {
- for_each_netdev_dump(net, netdev, ctx->ifindex) {
+ for_each_netdev_lock_scoped(net, netdev, ctx->ifindex) {
err = netdev_nl_napi_dump_one(netdev, skb, info, ctx);
if (err < 0)
break;
ctx->napi_id = 0;
}
}
- rtnl_unlock();
- if (err != -EMSGSIZE)
- return err;
+ return err;
+}
+
+static int
+netdev_nl_napi_set_config(struct napi_struct *napi, struct genl_info *info)
+{
+ u64 irq_suspend_timeout = 0;
+ u64 gro_flush_timeout = 0;
+ u32 defer = 0;
+
+ if (info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]) {
+ defer = nla_get_u32(info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]);
+ napi_set_defer_hard_irqs(napi, defer);
+ }
+
+ if (info->attrs[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT]) {
+ irq_suspend_timeout = nla_get_uint(info->attrs[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT]);
+ napi_set_irq_suspend_timeout(napi, irq_suspend_timeout);
+ }
+
+ if (info->attrs[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT]) {
+ gro_flush_timeout = nla_get_uint(info->attrs[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT]);
+ napi_set_gro_flush_timeout(napi, gro_flush_timeout);
+ }
+
+ return 0;
+}
+
+int netdev_nl_napi_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct napi_struct *napi;
+ unsigned int napi_id;
+ int err;
+
+ if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID))
+ return -EINVAL;
+
+ napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]);
+
+ napi = netdev_napi_by_id_lock(genl_info_net(info), napi_id);
+ if (napi) {
+ err = netdev_nl_napi_set_config(napi, info);
+ netdev_unlock(napi->dev);
+ } else {
+ NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]);
+ err = -ENOENT;
+ }
- return skb->len;
+ return err;
}
static int
netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev,
u32 q_idx, u32 q_type, const struct genl_info *info)
{
+ struct net_devmem_dmabuf_binding *binding;
struct netdev_rx_queue *rxq;
struct netdev_queue *txq;
void *hdr;
@@ -316,6 +388,12 @@ netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev,
if (rxq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID,
rxq->napi->napi_id))
goto nla_put_failure;
+
+ binding = rxq->mp_params.mp_priv;
+ if (binding &&
+ nla_put_u32(rsp, NETDEV_A_QUEUE_DMABUF, binding->id))
+ goto nla_put_failure;
+
break;
case NETDEV_QUEUE_TYPE_TX:
txq = netdev_get_tx_queue(netdev, q_idx);
@@ -352,10 +430,10 @@ static int
netdev_nl_queue_fill(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx,
u32 q_type, const struct genl_info *info)
{
- int err = 0;
+ int err;
- if (!(netdev->flags & IFF_UP))
- return err;
+ if (!netdev->up)
+ return -ENOENT;
err = netdev_nl_queue_validate(netdev, q_idx, q_type);
if (err)
@@ -386,11 +464,13 @@ int netdev_nl_queue_get_doit(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
- netdev = __dev_get_by_index(genl_info_net(info), ifindex);
- if (netdev)
+ netdev = netdev_get_by_index_lock(genl_info_net(info), ifindex);
+ if (netdev) {
err = netdev_nl_queue_fill(rsp, netdev, q_id, q_type, info);
- else
+ netdev_unlock(netdev);
+ } else {
err = -ENODEV;
+ }
rtnl_unlock();
@@ -410,24 +490,21 @@ netdev_nl_queue_dump_one(struct net_device *netdev, struct sk_buff *rsp,
struct netdev_nl_dump_ctx *ctx)
{
int err = 0;
- int i;
- if (!(netdev->flags & IFF_UP))
+ if (!netdev->up)
return err;
- for (i = ctx->rxq_idx; i < netdev->real_num_rx_queues;) {
- err = netdev_nl_queue_fill_one(rsp, netdev, i,
+ for (; ctx->rxq_idx < netdev->real_num_rx_queues; ctx->rxq_idx++) {
+ err = netdev_nl_queue_fill_one(rsp, netdev, ctx->rxq_idx,
NETDEV_QUEUE_TYPE_RX, info);
if (err)
return err;
- ctx->rxq_idx = i++;
}
- for (i = ctx->txq_idx; i < netdev->real_num_tx_queues;) {
- err = netdev_nl_queue_fill_one(rsp, netdev, i,
+ for (; ctx->txq_idx < netdev->real_num_tx_queues; ctx->txq_idx++) {
+ err = netdev_nl_queue_fill_one(rsp, netdev, ctx->txq_idx,
NETDEV_QUEUE_TYPE_TX, info);
if (err)
return err;
- ctx->txq_idx = i++;
}
return err;
@@ -447,13 +524,15 @@ int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
rtnl_lock();
if (ifindex) {
- netdev = __dev_get_by_index(net, ifindex);
- if (netdev)
+ netdev = netdev_get_by_index_lock(net, ifindex);
+ if (netdev) {
err = netdev_nl_queue_dump_one(netdev, skb, info, ctx);
- else
+ netdev_unlock(netdev);
+ } else {
err = -ENODEV;
+ }
} else {
- for_each_netdev_dump(net, netdev, ctx->ifindex) {
+ for_each_netdev_lock_scoped(net, netdev, ctx->ifindex) {
err = netdev_nl_queue_dump_one(netdev, skb, info, ctx);
if (err < 0)
break;
@@ -463,10 +542,390 @@ int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
}
rtnl_unlock();
- if (err != -EMSGSIZE)
- return err;
+ return err;
+}
- return skb->len;
+#define NETDEV_STAT_NOT_SET (~0ULL)
+
+static void netdev_nl_stats_add(void *_sum, const void *_add, size_t size)
+{
+ const u64 *add = _add;
+ u64 *sum = _sum;
+
+ while (size) {
+ if (*add != NETDEV_STAT_NOT_SET && *sum != NETDEV_STAT_NOT_SET)
+ *sum += *add;
+ sum++;
+ add++;
+ size -= 8;
+ }
+}
+
+static int netdev_stat_put(struct sk_buff *rsp, unsigned int attr_id, u64 value)
+{
+ if (value == NETDEV_STAT_NOT_SET)
+ return 0;
+ return nla_put_uint(rsp, attr_id, value);
+}
+
+static int
+netdev_nl_stats_write_rx(struct sk_buff *rsp, struct netdev_queue_stats_rx *rx)
+{
+ if (netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_PACKETS, rx->packets) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_BYTES, rx->bytes) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_ALLOC_FAIL, rx->alloc_fail) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROPS, rx->hw_drops) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS, rx->hw_drop_overruns) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY, rx->csum_unnecessary) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_NONE, rx->csum_none) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_BAD, rx->csum_bad) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_PACKETS, rx->hw_gro_packets) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_BYTES, rx->hw_gro_bytes) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS, rx->hw_gro_wire_packets) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES, rx->hw_gro_wire_bytes) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS, rx->hw_drop_ratelimits))
+ return -EMSGSIZE;
+ return 0;
+}
+
+static int
+netdev_nl_stats_write_tx(struct sk_buff *rsp, struct netdev_queue_stats_tx *tx)
+{
+ if (netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_PACKETS, tx->packets) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_BYTES, tx->bytes) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROPS, tx->hw_drops) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_ERRORS, tx->hw_drop_errors) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_CSUM_NONE, tx->csum_none) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_NEEDS_CSUM, tx->needs_csum) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_PACKETS, tx->hw_gso_packets) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_BYTES, tx->hw_gso_bytes) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS, tx->hw_gso_wire_packets) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES, tx->hw_gso_wire_bytes) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS, tx->hw_drop_ratelimits) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_STOP, tx->stop) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_WAKE, tx->wake))
+ return -EMSGSIZE;
+ return 0;
+}
+
+static int
+netdev_nl_stats_queue(struct net_device *netdev, struct sk_buff *rsp,
+ u32 q_type, int i, const struct genl_info *info)
+{
+ const struct netdev_stat_ops *ops = netdev->stat_ops;
+ struct netdev_queue_stats_rx rx;
+ struct netdev_queue_stats_tx tx;
+ void *hdr;
+
+ hdr = genlmsg_iput(rsp, info);
+ if (!hdr)
+ return -EMSGSIZE;
+ if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex) ||
+ nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_TYPE, q_type) ||
+ nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_ID, i))
+ goto nla_put_failure;
+
+ switch (q_type) {
+ case NETDEV_QUEUE_TYPE_RX:
+ memset(&rx, 0xff, sizeof(rx));
+ ops->get_queue_stats_rx(netdev, i, &rx);
+ if (!memchr_inv(&rx, 0xff, sizeof(rx)))
+ goto nla_cancel;
+ if (netdev_nl_stats_write_rx(rsp, &rx))
+ goto nla_put_failure;
+ break;
+ case NETDEV_QUEUE_TYPE_TX:
+ memset(&tx, 0xff, sizeof(tx));
+ ops->get_queue_stats_tx(netdev, i, &tx);
+ if (!memchr_inv(&tx, 0xff, sizeof(tx)))
+ goto nla_cancel;
+ if (netdev_nl_stats_write_tx(rsp, &tx))
+ goto nla_put_failure;
+ break;
+ }
+
+ genlmsg_end(rsp, hdr);
+ return 0;
+
+nla_cancel:
+ genlmsg_cancel(rsp, hdr);
+ return 0;
+nla_put_failure:
+ genlmsg_cancel(rsp, hdr);
+ return -EMSGSIZE;
+}
+
+static int
+netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp,
+ const struct genl_info *info,
+ struct netdev_nl_dump_ctx *ctx)
+{
+ const struct netdev_stat_ops *ops = netdev->stat_ops;
+ int i, err;
+
+ if (!(netdev->flags & IFF_UP))
+ return 0;
+
+ i = ctx->rxq_idx;
+ while (ops->get_queue_stats_rx && i < netdev->real_num_rx_queues) {
+ err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_RX,
+ i, info);
+ if (err)
+ return err;
+ ctx->rxq_idx = ++i;
+ }
+ i = ctx->txq_idx;
+ while (ops->get_queue_stats_tx && i < netdev->real_num_tx_queues) {
+ err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_TX,
+ i, info);
+ if (err)
+ return err;
+ ctx->txq_idx = ++i;
+ }
+
+ ctx->rxq_idx = 0;
+ ctx->txq_idx = 0;
+ return 0;
+}
+
+static int
+netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp,
+ const struct genl_info *info)
+{
+ struct netdev_queue_stats_rx rx_sum, rx;
+ struct netdev_queue_stats_tx tx_sum, tx;
+ const struct netdev_stat_ops *ops;
+ void *hdr;
+ int i;
+
+ ops = netdev->stat_ops;
+ /* Netdev can't guarantee any complete counters */
+ if (!ops->get_base_stats)
+ return 0;
+
+ memset(&rx_sum, 0xff, sizeof(rx_sum));
+ memset(&tx_sum, 0xff, sizeof(tx_sum));
+
+ ops->get_base_stats(netdev, &rx_sum, &tx_sum);
+
+ /* The op was there, but nothing reported, don't bother */
+ if (!memchr_inv(&rx_sum, 0xff, sizeof(rx_sum)) &&
+ !memchr_inv(&tx_sum, 0xff, sizeof(tx_sum)))
+ return 0;
+
+ hdr = genlmsg_iput(rsp, info);
+ if (!hdr)
+ return -EMSGSIZE;
+ if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex))
+ goto nla_put_failure;
+
+ for (i = 0; i < netdev->real_num_rx_queues; i++) {
+ memset(&rx, 0xff, sizeof(rx));
+ if (ops->get_queue_stats_rx)
+ ops->get_queue_stats_rx(netdev, i, &rx);
+ netdev_nl_stats_add(&rx_sum, &rx, sizeof(rx));
+ }
+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
+ memset(&tx, 0xff, sizeof(tx));
+ if (ops->get_queue_stats_tx)
+ ops->get_queue_stats_tx(netdev, i, &tx);
+ netdev_nl_stats_add(&tx_sum, &tx, sizeof(tx));
+ }
+
+ if (netdev_nl_stats_write_rx(rsp, &rx_sum) ||
+ netdev_nl_stats_write_tx(rsp, &tx_sum))
+ goto nla_put_failure;
+
+ genlmsg_end(rsp, hdr);
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(rsp, hdr);
+ return -EMSGSIZE;
+}
+
+static int
+netdev_nl_qstats_get_dump_one(struct net_device *netdev, unsigned int scope,
+ struct sk_buff *skb, const struct genl_info *info,
+ struct netdev_nl_dump_ctx *ctx)
+{
+ if (!netdev->stat_ops)
+ return 0;
+
+ switch (scope) {
+ case 0:
+ return netdev_nl_stats_by_netdev(netdev, skb, info);
+ case NETDEV_QSTATS_SCOPE_QUEUE:
+ return netdev_nl_stats_by_queue(netdev, skb, info, ctx);
+ }
+
+ return -EINVAL; /* Should not happen, per netlink policy */
+}
+
+int netdev_nl_qstats_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
+ const struct genl_info *info = genl_info_dump(cb);
+ struct net *net = sock_net(skb->sk);
+ struct net_device *netdev;
+ unsigned int ifindex;
+ unsigned int scope;
+ int err = 0;
+
+ scope = 0;
+ if (info->attrs[NETDEV_A_QSTATS_SCOPE])
+ scope = nla_get_uint(info->attrs[NETDEV_A_QSTATS_SCOPE]);
+
+ ifindex = 0;
+ if (info->attrs[NETDEV_A_QSTATS_IFINDEX])
+ ifindex = nla_get_u32(info->attrs[NETDEV_A_QSTATS_IFINDEX]);
+
+ rtnl_lock();
+ if (ifindex) {
+ netdev = __dev_get_by_index(net, ifindex);
+ if (netdev && netdev->stat_ops) {
+ err = netdev_nl_qstats_get_dump_one(netdev, scope, skb,
+ info, ctx);
+ } else {
+ NL_SET_BAD_ATTR(info->extack,
+ info->attrs[NETDEV_A_QSTATS_IFINDEX]);
+ err = netdev ? -EOPNOTSUPP : -ENODEV;
+ }
+ } else {
+ for_each_netdev_dump(net, netdev, ctx->ifindex) {
+ err = netdev_nl_qstats_get_dump_one(netdev, scope, skb,
+ info, ctx);
+ if (err < 0)
+ break;
+ }
+ }
+ rtnl_unlock();
+
+ return err;
+}
+
+int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *tb[ARRAY_SIZE(netdev_queue_id_nl_policy)];
+ struct net_devmem_dmabuf_binding *binding;
+ struct list_head *sock_binding_list;
+ u32 ifindex, dmabuf_fd, rxq_idx;
+ struct net_device *netdev;
+ struct sk_buff *rsp;
+ struct nlattr *attr;
+ int rem, err = 0;
+ void *hdr;
+
+ if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX) ||
+ GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_FD) ||
+ GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_QUEUES))
+ return -EINVAL;
+
+ ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]);
+ dmabuf_fd = nla_get_u32(info->attrs[NETDEV_A_DMABUF_FD]);
+
+ sock_binding_list = genl_sk_priv_get(&netdev_nl_family,
+ NETLINK_CB(skb).sk);
+ if (IS_ERR(sock_binding_list))
+ return PTR_ERR(sock_binding_list);
+
+ rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!rsp)
+ return -ENOMEM;
+
+ hdr = genlmsg_iput(rsp, info);
+ if (!hdr) {
+ err = -EMSGSIZE;
+ goto err_genlmsg_free;
+ }
+
+ rtnl_lock();
+
+ netdev = __dev_get_by_index(genl_info_net(info), ifindex);
+ if (!netdev || !netif_device_present(netdev)) {
+ err = -ENODEV;
+ goto err_unlock;
+ }
+
+ if (dev_xdp_prog_count(netdev)) {
+ NL_SET_ERR_MSG(info->extack, "unable to bind dmabuf to device with XDP program attached");
+ err = -EEXIST;
+ goto err_unlock;
+ }
+
+ binding = net_devmem_bind_dmabuf(netdev, dmabuf_fd, info->extack);
+ if (IS_ERR(binding)) {
+ err = PTR_ERR(binding);
+ goto err_unlock;
+ }
+
+ nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES,
+ genlmsg_data(info->genlhdr),
+ genlmsg_len(info->genlhdr), rem) {
+ err = nla_parse_nested(
+ tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr,
+ netdev_queue_id_nl_policy, info->extack);
+ if (err < 0)
+ goto err_unbind;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) ||
+ NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE)) {
+ err = -EINVAL;
+ goto err_unbind;
+ }
+
+ if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) {
+ NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]);
+ err = -EINVAL;
+ goto err_unbind;
+ }
+
+ rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]);
+
+ err = net_devmem_bind_dmabuf_to_queue(netdev, rxq_idx, binding,
+ info->extack);
+ if (err)
+ goto err_unbind;
+ }
+
+ list_add(&binding->list, sock_binding_list);
+
+ nla_put_u32(rsp, NETDEV_A_DMABUF_ID, binding->id);
+ genlmsg_end(rsp, hdr);
+
+ err = genlmsg_reply(rsp, info);
+ if (err)
+ goto err_unbind;
+
+ rtnl_unlock();
+
+ return 0;
+
+err_unbind:
+ net_devmem_unbind_dmabuf(binding);
+err_unlock:
+ rtnl_unlock();
+err_genlmsg_free:
+ nlmsg_free(rsp);
+ return err;
+}
+
+void netdev_nl_sock_priv_init(struct list_head *priv)
+{
+ INIT_LIST_HEAD(priv);
+}
+
+void netdev_nl_sock_priv_destroy(struct list_head *priv)
+{
+ struct net_devmem_dmabuf_binding *binding;
+ struct net_devmem_dmabuf_binding *temp;
+
+ list_for_each_entry_safe(binding, temp, priv, list) {
+ rtnl_lock();
+ net_devmem_unbind_dmabuf(binding);
+ rtnl_unlock();
+ }
}
static int netdev_genl_netdevice_event(struct notifier_block *nb,
diff --git a/net/core/netdev_rx_queue.c b/net/core/netdev_rx_queue.c
new file mode 100644
index 000000000000..db82786fa0c4
--- /dev/null
+++ b/net/core/netdev_rx_queue.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/netdevice.h>
+#include <net/netdev_queues.h>
+#include <net/netdev_rx_queue.h>
+
+#include "page_pool_priv.h"
+
+int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
+{
+ struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, rxq_idx);
+ void *new_mem, *old_mem;
+ int err;
+
+ if (!dev->queue_mgmt_ops || !dev->queue_mgmt_ops->ndo_queue_stop ||
+ !dev->queue_mgmt_ops->ndo_queue_mem_free ||
+ !dev->queue_mgmt_ops->ndo_queue_mem_alloc ||
+ !dev->queue_mgmt_ops->ndo_queue_start)
+ return -EOPNOTSUPP;
+
+ ASSERT_RTNL();
+
+ new_mem = kvzalloc(dev->queue_mgmt_ops->ndo_queue_mem_size, GFP_KERNEL);
+ if (!new_mem)
+ return -ENOMEM;
+
+ old_mem = kvzalloc(dev->queue_mgmt_ops->ndo_queue_mem_size, GFP_KERNEL);
+ if (!old_mem) {
+ err = -ENOMEM;
+ goto err_free_new_mem;
+ }
+
+ err = dev->queue_mgmt_ops->ndo_queue_mem_alloc(dev, new_mem, rxq_idx);
+ if (err)
+ goto err_free_old_mem;
+
+ err = page_pool_check_memory_provider(dev, rxq);
+ if (err)
+ goto err_free_new_queue_mem;
+
+ err = dev->queue_mgmt_ops->ndo_queue_stop(dev, old_mem, rxq_idx);
+ if (err)
+ goto err_free_new_queue_mem;
+
+ err = dev->queue_mgmt_ops->ndo_queue_start(dev, new_mem, rxq_idx);
+ if (err)
+ goto err_start_queue;
+
+ dev->queue_mgmt_ops->ndo_queue_mem_free(dev, old_mem);
+
+ kvfree(old_mem);
+ kvfree(new_mem);
+
+ return 0;
+
+err_start_queue:
+ /* Restarting the queue with old_mem should be successful as we haven't
+ * changed any of the queue configuration, and there is not much we can
+ * do to recover from a failure here.
+ *
+ * WARN if we fail to recover the old rx queue, and at least free
+ * old_mem so we don't also leak that.
+ */
+ if (dev->queue_mgmt_ops->ndo_queue_start(dev, old_mem, rxq_idx)) {
+ WARN(1,
+ "Failed to restart old queue in error path. RX queue %d may be unhealthy.",
+ rxq_idx);
+ dev->queue_mgmt_ops->ndo_queue_mem_free(dev, old_mem);
+ }
+
+err_free_new_queue_mem:
+ dev->queue_mgmt_ops->ndo_queue_mem_free(dev, new_mem);
+
+err_free_old_mem:
+ kvfree(old_mem);
+
+err_free_new_mem:
+ kvfree(new_mem);
+
+ return err;
+}
+EXPORT_SYMBOL_NS_GPL(netdev_rx_queue_restart, "NETDEV_INTERNAL");
diff --git a/net/core/netmem_priv.h b/net/core/netmem_priv.h
new file mode 100644
index 000000000000..7eadb8393e00
--- /dev/null
+++ b/net/core/netmem_priv.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __NETMEM_PRIV_H
+#define __NETMEM_PRIV_H
+
+static inline unsigned long netmem_get_pp_magic(netmem_ref netmem)
+{
+ return __netmem_clear_lsb(netmem)->pp_magic;
+}
+
+static inline void netmem_or_pp_magic(netmem_ref netmem, unsigned long pp_magic)
+{
+ __netmem_clear_lsb(netmem)->pp_magic |= pp_magic;
+}
+
+static inline void netmem_clear_pp_magic(netmem_ref netmem)
+{
+ __netmem_clear_lsb(netmem)->pp_magic = 0;
+}
+
+static inline void netmem_set_pp(netmem_ref netmem, struct page_pool *pool)
+{
+ __netmem_clear_lsb(netmem)->pp = pool;
+}
+
+static inline void netmem_set_dma_addr(netmem_ref netmem,
+ unsigned long dma_addr)
+{
+ __netmem_clear_lsb(netmem)->dma_addr = dma_addr;
+}
+#endif
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 543007f159f9..0ab722d95a2d 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -34,7 +34,7 @@
#include <net/addrconf.h>
#include <net/ndisc.h>
#include <net/ip6_checksum.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <trace/events/napi.h>
#include <linux/kconfig.h>
@@ -45,11 +45,6 @@
#define MAX_UDP_CHUNK 1460
#define MAX_SKBS 32
-
-static struct sk_buff_head skb_pool;
-
-DEFINE_STATIC_SRCU(netpoll_srcu);
-
#define USEC_PER_POLL 50
#define MAX_SKB_SIZE \
@@ -162,7 +157,7 @@ static void poll_one_napi(struct napi_struct *napi)
if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state))
return;
- /* We explicilty pass the polling call a budget of 0 to
+ /* We explicitly pass the polling call a budget of 0 to
* indicate that we are clearing the Tx path only.
*/
work = napi->poll(napi, 0);
@@ -220,41 +215,39 @@ EXPORT_SYMBOL(netpoll_poll_dev);
void netpoll_poll_disable(struct net_device *dev)
{
struct netpoll_info *ni;
- int idx;
+
might_sleep();
- idx = srcu_read_lock(&netpoll_srcu);
- ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
+ ni = rtnl_dereference(dev->npinfo);
if (ni)
down(&ni->dev_lock);
- srcu_read_unlock(&netpoll_srcu, idx);
}
-EXPORT_SYMBOL(netpoll_poll_disable);
void netpoll_poll_enable(struct net_device *dev)
{
struct netpoll_info *ni;
- rcu_read_lock();
- ni = rcu_dereference(dev->npinfo);
+
+ ni = rtnl_dereference(dev->npinfo);
if (ni)
up(&ni->dev_lock);
- rcu_read_unlock();
}
-EXPORT_SYMBOL(netpoll_poll_enable);
-static void refill_skbs(void)
+static void refill_skbs(struct netpoll *np)
{
+ struct sk_buff_head *skb_pool;
struct sk_buff *skb;
unsigned long flags;
- spin_lock_irqsave(&skb_pool.lock, flags);
- while (skb_pool.qlen < MAX_SKBS) {
+ skb_pool = &np->skb_pool;
+
+ spin_lock_irqsave(&skb_pool->lock, flags);
+ while (skb_pool->qlen < MAX_SKBS) {
skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
if (!skb)
break;
- __skb_queue_tail(&skb_pool, skb);
+ __skb_queue_tail(skb_pool, skb);
}
- spin_unlock_irqrestore(&skb_pool.lock, flags);
+ spin_unlock_irqrestore(&skb_pool->lock, flags);
}
static void zap_completion_queue(void)
@@ -291,12 +284,12 @@ static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
struct sk_buff *skb;
zap_completion_queue();
- refill_skbs();
+ refill_skbs(np);
repeat:
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb)
- skb = skb_dequeue(&skb_pool);
+ skb = skb_dequeue(&np->skb_pool);
if (!skb) {
if (++count < 10) {
@@ -316,7 +309,7 @@ static int netpoll_owner_active(struct net_device *dev)
struct napi_struct *napi;
list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
- if (napi->poll_owner == smp_processor_id())
+ if (READ_ONCE(napi->poll_owner) == smp_processor_id())
return 1;
}
return 0;
@@ -326,6 +319,7 @@ static int netpoll_owner_active(struct net_device *dev)
static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
{
netdev_tx_t status = NETDEV_TX_BUSY;
+ netdev_tx_t ret = NET_XMIT_DROP;
struct net_device *dev;
unsigned long tries;
/* It is up to the caller to keep npinfo alive. */
@@ -334,11 +328,12 @@ static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
lockdep_assert_irqs_disabled();
dev = np->dev;
+ rcu_read_lock();
npinfo = rcu_dereference_bh(dev->npinfo);
if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
dev_kfree_skb_irq(skb);
- return NET_XMIT_DROP;
+ goto out;
}
/* don't get messages out of order, and no recursion */
@@ -377,7 +372,10 @@ static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
skb_queue_tail(&npinfo->txq, skb);
schedule_delayed_work(&npinfo->tx_work,0);
}
- return NETDEV_TX_OK;
+ ret = NETDEV_TX_OK;
+out:
+ rcu_read_unlock();
+ return ret;
}
netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
@@ -397,7 +395,7 @@ netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
}
EXPORT_SYMBOL(netpoll_send_skb);
-void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
+int netpoll_send_udp(struct netpoll *np, const char *msg, int len)
{
int total_len, ip_len, udp_len;
struct sk_buff *skb;
@@ -421,7 +419,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
skb = find_skb(np, total_len + np->dev->needed_tailroom,
total_len - len);
if (!skb)
- return;
+ return -ENOMEM;
skb_copy_to_linear_data(skb, msg, len);
skb_put(skb, len);
@@ -497,7 +495,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
skb->dev = np->dev;
- netpoll_send_skb(np, skb);
+ return (int)netpoll_send_skb(np, skb);
}
EXPORT_SYMBOL(netpoll_send_udp);
@@ -538,6 +536,14 @@ static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
return -1;
}
+static void skb_pool_flush(struct netpoll *np)
+{
+ struct sk_buff_head *skb_pool;
+
+ skb_pool = &np->skb_pool;
+ skb_queue_purge_reason(skb_pool, SKB_CONSUMED);
+}
+
int netpoll_parse_options(struct netpoll *np, char *opt)
{
char *cur=opt, *delim;
@@ -626,17 +632,17 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
const struct net_device_ops *ops;
int err;
- np->dev = ndev;
- strscpy(np->dev_name, ndev->name, IFNAMSIZ);
+ skb_queue_head_init(&np->skb_pool);
if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
np_err(np, "%s doesn't support polling, aborting\n",
- np->dev_name);
+ ndev->name);
err = -ENOTSUPP;
goto out;
}
- if (!ndev->npinfo) {
+ npinfo = rtnl_dereference(ndev->npinfo);
+ if (!npinfo) {
npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
if (!npinfo) {
err = -ENOMEM;
@@ -649,19 +655,23 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
refcount_set(&npinfo->refcnt, 1);
- ops = np->dev->netdev_ops;
+ ops = ndev->netdev_ops;
if (ops->ndo_netpoll_setup) {
- err = ops->ndo_netpoll_setup(ndev, npinfo);
+ err = ops->ndo_netpoll_setup(ndev);
if (err)
goto free_npinfo;
}
} else {
- npinfo = rtnl_dereference(ndev->npinfo);
refcount_inc(&npinfo->refcnt);
}
+ np->dev = ndev;
+ strscpy(np->dev_name, ndev->name, IFNAMSIZ);
npinfo->netpoll = np;
+ /* fill up the skb queue */
+ refill_skbs(np);
+
/* last thing to do is link it to the net device structure */
rcu_assign_pointer(ndev->npinfo, npinfo);
@@ -677,6 +687,7 @@ EXPORT_SYMBOL_GPL(__netpoll_setup);
int netpoll_setup(struct netpoll *np)
{
struct net_device *ndev = NULL;
+ bool ip_overwritten = false;
struct in_device *in_dev;
int err;
@@ -741,6 +752,7 @@ put_noaddr:
}
np->local_ip.ip = ifa->ifa_local;
+ ip_overwritten = true;
np_info(np, "local IP %pI4\n", &np->local_ip.ip);
} else {
#if IS_ENABLED(CONFIG_IPV6)
@@ -757,6 +769,7 @@ put_noaddr:
!!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
continue;
np->local_ip.in6 = ifp->addr;
+ ip_overwritten = true;
err = 0;
break;
}
@@ -777,16 +790,18 @@ put_noaddr:
}
}
- /* fill up the skb queue */
- refill_skbs();
-
err = __netpoll_setup(np, ndev);
if (err)
- goto put;
+ goto flush;
rtnl_unlock();
return 0;
+flush:
+ skb_pool_flush(np);
put:
+ DEBUG_NET_WARN_ON_ONCE(np->dev);
+ if (ip_overwritten)
+ memset(&np->local_ip, 0, sizeof(np->local_ip));
netdev_put(ndev, &np->dev_tracker);
unlock:
rtnl_unlock();
@@ -794,13 +809,6 @@ unlock:
}
EXPORT_SYMBOL(netpoll_setup);
-static int __init netpoll_init(void)
-{
- skb_queue_head_init(&skb_pool);
- return 0;
-}
-core_initcall(netpoll_init);
-
static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
{
struct netpoll_info *npinfo =
@@ -826,8 +834,6 @@ void __netpoll_cleanup(struct netpoll *np)
if (!npinfo)
return;
- synchronize_srcu(&netpoll_srcu);
-
if (refcount_dec_and_test(&npinfo->refcnt)) {
const struct net_device_ops *ops;
@@ -839,6 +845,8 @@ void __netpoll_cleanup(struct netpoll *np)
call_rcu(&npinfo->rcu, rcu_cleanup_netpoll_info);
} else
RCU_INIT_POINTER(np->dev->npinfo, NULL);
+
+ skb_pool_flush(np);
}
EXPORT_SYMBOL_GPL(__netpoll_cleanup);
@@ -853,14 +861,20 @@ void __netpoll_free(struct netpoll *np)
}
EXPORT_SYMBOL_GPL(__netpoll_free);
+void do_netpoll_cleanup(struct netpoll *np)
+{
+ __netpoll_cleanup(np);
+ netdev_put(np->dev, &np->dev_tracker);
+ np->dev = NULL;
+}
+EXPORT_SYMBOL(do_netpoll_cleanup);
+
void netpoll_cleanup(struct netpoll *np)
{
rtnl_lock();
if (!np->dev)
goto out;
- __netpoll_cleanup(np);
- netdev_put(np->dev, &np->dev_tracker);
- np->dev = NULL;
+ do_netpoll_cleanup(np);
out:
rtnl_unlock();
}
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 4933762e5a6b..f5e908c9e7ad 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -5,11 +5,13 @@
* Copyright (C) 2016 Red Hat, Inc.
*/
+#include <linux/error-injection.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/device.h>
+#include <net/netdev_rx_queue.h>
#include <net/page_pool/helpers.h>
#include <net/xdp.h>
@@ -23,14 +25,20 @@
#include <trace/events/page_pool.h>
+#include "mp_dmabuf_devmem.h"
+#include "netmem_priv.h"
#include "page_pool_priv.h"
+DEFINE_STATIC_KEY_FALSE(page_pool_mem_providers);
+
#define DEFER_TIME (msecs_to_jiffies(1000))
#define DEFER_WARN_INTERVAL (60 * HZ)
#define BIAS_MAX (LONG_MAX >> 1)
#ifdef CONFIG_PAGE_POOL_STATS
+static DEFINE_PER_CPU(struct page_pool_recycle_stats, pp_system_recycle_stats);
+
/* alloc_stat_inc is intended to be used in softirq context */
#define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++)
/* recycle_stat_inc is safe to use when preemption is possible. */
@@ -121,9 +129,9 @@ int page_pool_ethtool_stats_get_count(void)
}
EXPORT_SYMBOL(page_pool_ethtool_stats_get_count);
-u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
+u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats)
{
- struct page_pool_stats *pool_stats = stats;
+ const struct page_pool_stats *pool_stats = stats;
*data++ = pool_stats->alloc_stats.fast;
*data++ = pool_stats->alloc_stats.slow;
@@ -170,16 +178,33 @@ static void page_pool_producer_unlock(struct page_pool *pool,
spin_unlock_bh(&pool->ring.producer_lock);
}
+static void page_pool_struct_check(void)
+{
+ CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool, frag, frag_users);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool, frag, frag_page);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool, frag, frag_offset);
+ CACHELINE_ASSERT_GROUP_SIZE(struct page_pool, frag,
+ PAGE_POOL_FRAG_GROUP_ALIGN);
+}
+
static int page_pool_init(struct page_pool *pool,
- const struct page_pool_params *params)
+ const struct page_pool_params *params,
+ int cpuid)
{
unsigned int ring_qsize = 1024; /* Default */
+ struct netdev_rx_queue *rxq;
+ int err;
+
+ page_pool_struct_check();
memcpy(&pool->p, &params->fast, sizeof(pool->p));
memcpy(&pool->slow, &params->slow, sizeof(pool->slow));
+ pool->cpuid = cpuid;
+ pool->dma_sync_for_cpu = true;
+
/* Validate only known flags were used */
- if (pool->p.flags & ~(PP_FLAG_ALL))
+ if (pool->slow.flags & ~PP_FLAG_ALL)
return -EINVAL;
if (pool->p.pool_size)
@@ -193,22 +218,26 @@ static int page_pool_init(struct page_pool *pool,
* DMA_BIDIRECTIONAL is for allowing page used for DMA sending,
* which is the XDP_TX use-case.
*/
- if (pool->p.flags & PP_FLAG_DMA_MAP) {
+ if (pool->slow.flags & PP_FLAG_DMA_MAP) {
if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
(pool->p.dma_dir != DMA_BIDIRECTIONAL))
return -EINVAL;
+
+ pool->dma_map = true;
}
- if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) {
+ if (pool->slow.flags & PP_FLAG_DMA_SYNC_DEV) {
/* In order to request DMA-sync-for-device the page
* needs to be mapped
*/
- if (!(pool->p.flags & PP_FLAG_DMA_MAP))
+ if (!(pool->slow.flags & PP_FLAG_DMA_MAP))
return -EINVAL;
if (!pool->p.max_len)
return -EINVAL;
+ pool->dma_sync = true;
+
/* pool->p.offset has to be set according to the address
* offset used by the DMA engine to start copying rx data
*/
@@ -217,14 +246,24 @@ static int page_pool_init(struct page_pool *pool,
pool->has_init_callback = !!pool->slow.init_callback;
#ifdef CONFIG_PAGE_POOL_STATS
- pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
- if (!pool->recycle_stats)
- return -ENOMEM;
+ if (!(pool->slow.flags & PP_FLAG_SYSTEM_POOL)) {
+ pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
+ if (!pool->recycle_stats)
+ return -ENOMEM;
+ } else {
+ /* For system page pool instance we use a singular stats object
+ * instead of allocating a separate percpu variable for each
+ * (also percpu) page pool instance.
+ */
+ pool->recycle_stats = &pp_system_recycle_stats;
+ pool->system = true;
+ }
#endif
if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) {
#ifdef CONFIG_PAGE_POOL_STATS
- free_percpu(pool->recycle_stats);
+ if (!pool->system)
+ free_percpu(pool->recycle_stats);
#endif
return -ENOMEM;
}
@@ -234,29 +273,65 @@ static int page_pool_init(struct page_pool *pool,
/* Driver calling page_pool_create() also call page_pool_destroy() */
refcount_set(&pool->user_cnt, 1);
- if (pool->p.flags & PP_FLAG_DMA_MAP)
+ if (pool->dma_map)
get_device(pool->p.dev);
+ if (pool->slow.flags & PP_FLAG_ALLOW_UNREADABLE_NETMEM) {
+ /* We rely on rtnl_lock()ing to make sure netdev_rx_queue
+ * configuration doesn't change while we're initializing
+ * the page_pool.
+ */
+ ASSERT_RTNL();
+ rxq = __netif_get_rx_queue(pool->slow.netdev,
+ pool->slow.queue_idx);
+ pool->mp_priv = rxq->mp_params.mp_priv;
+ }
+
+ if (pool->mp_priv) {
+ if (!pool->dma_map || !pool->dma_sync)
+ return -EOPNOTSUPP;
+
+ err = mp_dmabuf_devmem_init(pool);
+ if (err) {
+ pr_warn("%s() mem-provider init failed %d\n", __func__,
+ err);
+ goto free_ptr_ring;
+ }
+
+ static_branch_inc(&page_pool_mem_providers);
+ }
+
return 0;
+
+free_ptr_ring:
+ ptr_ring_cleanup(&pool->ring, NULL);
+#ifdef CONFIG_PAGE_POOL_STATS
+ if (!pool->system)
+ free_percpu(pool->recycle_stats);
+#endif
+ return err;
}
static void page_pool_uninit(struct page_pool *pool)
{
ptr_ring_cleanup(&pool->ring, NULL);
- if (pool->p.flags & PP_FLAG_DMA_MAP)
+ if (pool->dma_map)
put_device(pool->p.dev);
#ifdef CONFIG_PAGE_POOL_STATS
- free_percpu(pool->recycle_stats);
+ if (!pool->system)
+ free_percpu(pool->recycle_stats);
#endif
}
/**
- * page_pool_create() - create a page pool.
+ * page_pool_create_percpu() - create a page pool for a given cpu.
* @params: parameters, see struct page_pool_params
+ * @cpuid: cpu identifier
*/
-struct page_pool *page_pool_create(const struct page_pool_params *params)
+struct page_pool *
+page_pool_create_percpu(const struct page_pool_params *params, int cpuid)
{
struct page_pool *pool;
int err;
@@ -265,7 +340,7 @@ struct page_pool *page_pool_create(const struct page_pool_params *params)
if (!pool)
return ERR_PTR(-ENOMEM);
- err = page_pool_init(pool, params);
+ err = page_pool_init(pool, params, cpuid);
if (err < 0)
goto err_free;
@@ -282,21 +357,30 @@ err_free:
kfree(pool);
return ERR_PTR(err);
}
+EXPORT_SYMBOL(page_pool_create_percpu);
+
+/**
+ * page_pool_create() - create a page pool
+ * @params: parameters, see struct page_pool_params
+ */
+struct page_pool *page_pool_create(const struct page_pool_params *params)
+{
+ return page_pool_create_percpu(params, -1);
+}
EXPORT_SYMBOL(page_pool_create);
-static void page_pool_return_page(struct page_pool *pool, struct page *page);
+static void page_pool_return_page(struct page_pool *pool, netmem_ref netmem);
-noinline
-static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
+static noinline netmem_ref page_pool_refill_alloc_cache(struct page_pool *pool)
{
struct ptr_ring *r = &pool->ring;
- struct page *page;
+ netmem_ref netmem;
int pref_nid; /* preferred NUMA node */
/* Quicker fallback, avoid locks when ring is empty */
if (__ptr_ring_empty(r)) {
alloc_stat_inc(pool, empty);
- return NULL;
+ return 0;
}
/* Softirq guarantee CPU and thus NUMA node is stable. This,
@@ -311,64 +395,74 @@ static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
/* Refill alloc array, but only if NUMA match */
do {
- page = __ptr_ring_consume(r);
- if (unlikely(!page))
+ netmem = (__force netmem_ref)__ptr_ring_consume(r);
+ if (unlikely(!netmem))
break;
- if (likely(page_to_nid(page) == pref_nid)) {
- pool->alloc.cache[pool->alloc.count++] = page;
+ if (likely(netmem_is_pref_nid(netmem, pref_nid))) {
+ pool->alloc.cache[pool->alloc.count++] = netmem;
} else {
/* NUMA mismatch;
* (1) release 1 page to page-allocator and
* (2) break out to fallthrough to alloc_pages_node.
* This limit stress on page buddy alloactor.
*/
- page_pool_return_page(pool, page);
+ page_pool_return_page(pool, netmem);
alloc_stat_inc(pool, waive);
- page = NULL;
+ netmem = 0;
break;
}
} while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
/* Return last page */
if (likely(pool->alloc.count > 0)) {
- page = pool->alloc.cache[--pool->alloc.count];
+ netmem = pool->alloc.cache[--pool->alloc.count];
alloc_stat_inc(pool, refill);
}
- return page;
+ return netmem;
}
/* fast path */
-static struct page *__page_pool_get_cached(struct page_pool *pool)
+static netmem_ref __page_pool_get_cached(struct page_pool *pool)
{
- struct page *page;
+ netmem_ref netmem;
/* Caller MUST guarantee safe non-concurrent access, e.g. softirq */
if (likely(pool->alloc.count)) {
/* Fast-path */
- page = pool->alloc.cache[--pool->alloc.count];
+ netmem = pool->alloc.cache[--pool->alloc.count];
alloc_stat_inc(pool, fast);
} else {
- page = page_pool_refill_alloc_cache(pool);
+ netmem = page_pool_refill_alloc_cache(pool);
}
- return page;
+ return netmem;
}
-static void page_pool_dma_sync_for_device(struct page_pool *pool,
- struct page *page,
- unsigned int dma_sync_size)
+static void __page_pool_dma_sync_for_device(const struct page_pool *pool,
+ netmem_ref netmem,
+ u32 dma_sync_size)
{
- dma_addr_t dma_addr = page_pool_get_dma_addr(page);
+#if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
+ dma_addr_t dma_addr = page_pool_get_dma_addr_netmem(netmem);
dma_sync_size = min(dma_sync_size, pool->p.max_len);
- dma_sync_single_range_for_device(pool->p.dev, dma_addr,
- pool->p.offset, dma_sync_size,
- pool->p.dma_dir);
+ __dma_sync_single_for_device(pool->p.dev, dma_addr + pool->p.offset,
+ dma_sync_size, pool->p.dma_dir);
+#endif
+}
+
+static __always_inline void
+page_pool_dma_sync_for_device(const struct page_pool *pool,
+ netmem_ref netmem,
+ u32 dma_sync_size)
+{
+ if (pool->dma_sync && dma_dev_need_sync(pool->p.dev))
+ __page_pool_dma_sync_for_device(pool, netmem, dma_sync_size);
}
-static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
+static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem)
{
dma_addr_t dma;
@@ -377,52 +471,28 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
* into page private data (i.e 32bit cpu with 64bit DMA caps)
* This mapping is kept for lifetime of page, until leaving pool.
*/
- dma = dma_map_page_attrs(pool->p.dev, page, 0,
- (PAGE_SIZE << pool->p.order),
- pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC |
- DMA_ATTR_WEAK_ORDERING);
+ dma = dma_map_page_attrs(pool->p.dev, netmem_to_page(netmem), 0,
+ (PAGE_SIZE << pool->p.order), pool->p.dma_dir,
+ DMA_ATTR_SKIP_CPU_SYNC |
+ DMA_ATTR_WEAK_ORDERING);
if (dma_mapping_error(pool->p.dev, dma))
return false;
- if (page_pool_set_dma_addr(page, dma))
+ if (page_pool_set_dma_addr_netmem(netmem, dma))
goto unmap_failed;
- if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
- page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
+ page_pool_dma_sync_for_device(pool, netmem, pool->p.max_len);
return true;
unmap_failed:
- WARN_ON_ONCE("unexpected DMA address, please report to netdev@");
+ WARN_ONCE(1, "unexpected DMA address, please report to netdev@");
dma_unmap_page_attrs(pool->p.dev, dma,
PAGE_SIZE << pool->p.order, pool->p.dma_dir,
DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
return false;
}
-static void page_pool_set_pp_info(struct page_pool *pool,
- struct page *page)
-{
- page->pp = pool;
- page->pp_magic |= PP_SIGNATURE;
-
- /* Ensuring all pages have been split into one fragment initially:
- * page_pool_set_pp_info() is only called once for every page when it
- * is allocated from the page allocator and page_pool_fragment_page()
- * is dirtying the same cache line as the page->pp_magic above, so
- * the overhead is negligible.
- */
- page_pool_fragment_page(page, 1);
- if (pool->has_init_callback)
- pool->slow.init_callback(page, pool->slow.init_arg);
-}
-
-static void page_pool_clear_pp_info(struct page *page)
-{
- page->pp_magic = 0;
- page->pp = NULL;
-}
-
static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
gfp_t gfp)
{
@@ -433,94 +503,102 @@ static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
if (unlikely(!page))
return NULL;
- if ((pool->p.flags & PP_FLAG_DMA_MAP) &&
- unlikely(!page_pool_dma_map(pool, page))) {
+ if (pool->dma_map && unlikely(!page_pool_dma_map(pool, page_to_netmem(page)))) {
put_page(page);
return NULL;
}
alloc_stat_inc(pool, slow_high_order);
- page_pool_set_pp_info(pool, page);
+ page_pool_set_pp_info(pool, page_to_netmem(page));
/* Track how many pages are held 'in-flight' */
pool->pages_state_hold_cnt++;
- trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
+ trace_page_pool_state_hold(pool, page_to_netmem(page),
+ pool->pages_state_hold_cnt);
return page;
}
/* slow path */
-noinline
-static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
- gfp_t gfp)
+static noinline netmem_ref __page_pool_alloc_pages_slow(struct page_pool *pool,
+ gfp_t gfp)
{
const int bulk = PP_ALLOC_CACHE_REFILL;
- unsigned int pp_flags = pool->p.flags;
unsigned int pp_order = pool->p.order;
- struct page *page;
+ bool dma_map = pool->dma_map;
+ netmem_ref netmem;
int i, nr_pages;
/* Don't support bulk alloc for high-order pages */
if (unlikely(pp_order))
- return __page_pool_alloc_page_order(pool, gfp);
+ return page_to_netmem(__page_pool_alloc_page_order(pool, gfp));
/* Unnecessary as alloc cache is empty, but guarantees zero count */
if (unlikely(pool->alloc.count > 0))
return pool->alloc.cache[--pool->alloc.count];
- /* Mark empty alloc.cache slots "empty" for alloc_pages_bulk_array */
+ /* Mark empty alloc.cache slots "empty" for alloc_pages_bulk */
memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
- nr_pages = alloc_pages_bulk_array_node(gfp, pool->p.nid, bulk,
- pool->alloc.cache);
+ nr_pages = alloc_pages_bulk_node(gfp, pool->p.nid, bulk,
+ (struct page **)pool->alloc.cache);
if (unlikely(!nr_pages))
- return NULL;
+ return 0;
/* Pages have been filled into alloc.cache array, but count is zero and
* page element have not been (possibly) DMA mapped.
*/
for (i = 0; i < nr_pages; i++) {
- page = pool->alloc.cache[i];
- if ((pp_flags & PP_FLAG_DMA_MAP) &&
- unlikely(!page_pool_dma_map(pool, page))) {
- put_page(page);
+ netmem = pool->alloc.cache[i];
+ if (dma_map && unlikely(!page_pool_dma_map(pool, netmem))) {
+ put_page(netmem_to_page(netmem));
continue;
}
- page_pool_set_pp_info(pool, page);
- pool->alloc.cache[pool->alloc.count++] = page;
+ page_pool_set_pp_info(pool, netmem);
+ pool->alloc.cache[pool->alloc.count++] = netmem;
/* Track how many pages are held 'in-flight' */
pool->pages_state_hold_cnt++;
- trace_page_pool_state_hold(pool, page,
+ trace_page_pool_state_hold(pool, netmem,
pool->pages_state_hold_cnt);
}
/* Return last page */
if (likely(pool->alloc.count > 0)) {
- page = pool->alloc.cache[--pool->alloc.count];
+ netmem = pool->alloc.cache[--pool->alloc.count];
alloc_stat_inc(pool, slow);
} else {
- page = NULL;
+ netmem = 0;
}
/* When page just alloc'ed is should/must have refcnt 1. */
- return page;
+ return netmem;
}
/* For using page_pool replace: alloc_pages() API calls, but provide
* synchronization guarantee for allocation side.
*/
-struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
+netmem_ref page_pool_alloc_netmems(struct page_pool *pool, gfp_t gfp)
{
- struct page *page;
+ netmem_ref netmem;
/* Fast-path: Get a page from cache */
- page = __page_pool_get_cached(pool);
- if (page)
- return page;
+ netmem = __page_pool_get_cached(pool);
+ if (netmem)
+ return netmem;
/* Slow-path: cache empty, do real allocation */
- page = __page_pool_alloc_pages_slow(pool, gfp);
- return page;
+ if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_priv)
+ netmem = mp_dmabuf_devmem_alloc_netmems(pool, gfp);
+ else
+ netmem = __page_pool_alloc_pages_slow(pool, gfp);
+ return netmem;
+}
+EXPORT_SYMBOL(page_pool_alloc_netmems);
+ALLOW_ERROR_INJECTION(page_pool_alloc_netmems, NULL);
+
+struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
+{
+ return netmem_to_page(page_pool_alloc_netmems(pool, gfp));
}
EXPORT_SYMBOL(page_pool_alloc_pages);
@@ -548,24 +626,46 @@ s32 page_pool_inflight(const struct page_pool *pool, bool strict)
return inflight;
}
-static __always_inline
-void __page_pool_release_page_dma(struct page_pool *pool, struct page *page)
+void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem)
+{
+ netmem_set_pp(netmem, pool);
+ netmem_or_pp_magic(netmem, PP_SIGNATURE);
+
+ /* Ensuring all pages have been split into one fragment initially:
+ * page_pool_set_pp_info() is only called once for every page when it
+ * is allocated from the page allocator and page_pool_fragment_page()
+ * is dirtying the same cache line as the page->pp_magic above, so
+ * the overhead is negligible.
+ */
+ page_pool_fragment_netmem(netmem, 1);
+ if (pool->has_init_callback)
+ pool->slow.init_callback(netmem, pool->slow.init_arg);
+}
+
+void page_pool_clear_pp_info(netmem_ref netmem)
+{
+ netmem_clear_pp_magic(netmem);
+ netmem_set_pp(netmem, NULL);
+}
+
+static __always_inline void __page_pool_release_page_dma(struct page_pool *pool,
+ netmem_ref netmem)
{
dma_addr_t dma;
- if (!(pool->p.flags & PP_FLAG_DMA_MAP))
+ if (!pool->dma_map)
/* Always account for inflight pages, even if we didn't
* map them
*/
return;
- dma = page_pool_get_dma_addr(page);
+ dma = page_pool_get_dma_addr_netmem(netmem);
/* When page is unmapped, it cannot be returned to our pool */
dma_unmap_page_attrs(pool->p.dev, dma,
PAGE_SIZE << pool->p.order, pool->p.dma_dir,
DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
- page_pool_set_dma_addr(page, 0);
+ page_pool_set_dma_addr_netmem(netmem, 0);
}
/* Disconnects a page (from a page_pool). API users can have a need
@@ -573,35 +673,41 @@ void __page_pool_release_page_dma(struct page_pool *pool, struct page *page)
* a regular page (that will eventually be returned to the normal
* page-allocator via put_page).
*/
-void page_pool_return_page(struct page_pool *pool, struct page *page)
+void page_pool_return_page(struct page_pool *pool, netmem_ref netmem)
{
int count;
+ bool put;
- __page_pool_release_page_dma(pool, page);
-
- page_pool_clear_pp_info(page);
+ put = true;
+ if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_priv)
+ put = mp_dmabuf_devmem_release_page(pool, netmem);
+ else
+ __page_pool_release_page_dma(pool, netmem);
/* This may be the last page returned, releasing the pool, so
* it is not safe to reference pool afterwards.
*/
count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt);
- trace_page_pool_state_release(pool, page, count);
+ trace_page_pool_state_release(pool, netmem, count);
- put_page(page);
+ if (put) {
+ page_pool_clear_pp_info(netmem);
+ put_page(netmem_to_page(netmem));
+ }
/* An optimization would be to call __free_pages(page, pool->p.order)
* knowing page is not part of page-cache (thus avoiding a
* __page_cache_release() call).
*/
}
-static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page)
+static bool page_pool_recycle_in_ring(struct page_pool *pool, netmem_ref netmem)
{
int ret;
/* BH protection not needed if current is softirq */
if (in_softirq())
- ret = ptr_ring_produce(&pool->ring, page);
+ ret = ptr_ring_produce(&pool->ring, (__force void *)netmem);
else
- ret = ptr_ring_produce_bh(&pool->ring, page);
+ ret = ptr_ring_produce_bh(&pool->ring, (__force void *)netmem);
if (!ret) {
recycle_stat_inc(pool, ring);
@@ -616,7 +722,7 @@ static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page)
*
* Caller must provide appropriate safe context.
*/
-static bool page_pool_recycle_in_cache(struct page *page,
+static bool page_pool_recycle_in_cache(netmem_ref netmem,
struct page_pool *pool)
{
if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) {
@@ -625,19 +731,26 @@ static bool page_pool_recycle_in_cache(struct page *page,
}
/* Caller MUST have verified/know (page_ref_count(page) == 1) */
- pool->alloc.cache[pool->alloc.count++] = page;
+ pool->alloc.cache[pool->alloc.count++] = netmem;
recycle_stat_inc(pool, cached);
return true;
}
+static bool __page_pool_page_can_be_recycled(netmem_ref netmem)
+{
+ return netmem_is_net_iov(netmem) ||
+ (page_ref_count(netmem_to_page(netmem)) == 1 &&
+ !page_is_pfmemalloc(netmem_to_page(netmem)));
+}
+
/* If the page refcnt == 1, this will try to recycle the page.
- * if PP_FLAG_DMA_SYNC_DEV is set, we'll try to sync the DMA area for
+ * If pool->dma_sync is set, we'll try to sync the DMA area for
* the configured size min(dma_sync_size, pool->max_len).
* If the page refcnt != 1, then the page will be returned to memory
* subsystem.
*/
-static __always_inline struct page *
-__page_pool_put_page(struct page_pool *pool, struct page *page,
+static __always_inline netmem_ref
+__page_pool_put_page(struct page_pool *pool, netmem_ref netmem,
unsigned int dma_sync_size, bool allow_direct)
{
lockdep_assert_no_hardirq();
@@ -651,20 +764,18 @@ __page_pool_put_page(struct page_pool *pool, struct page *page,
* page is NOT reusable when allocated when system is under
* some pressure. (page_is_pfmemalloc)
*/
- if (likely(page_ref_count(page) == 1 && !page_is_pfmemalloc(page))) {
+ if (likely(__page_pool_page_can_be_recycled(netmem))) {
/* Read barrier done in page_ref_count / READ_ONCE */
- if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
- page_pool_dma_sync_for_device(pool, page,
- dma_sync_size);
+ page_pool_dma_sync_for_device(pool, netmem, dma_sync_size);
- if (allow_direct && in_softirq() &&
- page_pool_recycle_in_cache(page, pool))
- return NULL;
+ if (allow_direct && page_pool_recycle_in_cache(netmem, pool))
+ return 0;
/* Page found as candidate for recycling */
- return page;
+ return netmem;
}
+
/* Fallback/non-XDP mode: API user have elevated refcnt.
*
* Many drivers split up the page into fragments, and some
@@ -679,174 +790,253 @@ __page_pool_put_page(struct page_pool *pool, struct page *page,
* will be invoking put_page.
*/
recycle_stat_inc(pool, released_refcnt);
- page_pool_return_page(pool, page);
+ page_pool_return_page(pool, netmem);
- return NULL;
+ return 0;
}
-void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page,
- unsigned int dma_sync_size, bool allow_direct)
+static bool page_pool_napi_local(const struct page_pool *pool)
{
- page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
- if (page && !page_pool_recycle_in_ring(pool, page)) {
+ const struct napi_struct *napi;
+ u32 cpuid;
+
+ if (unlikely(!in_softirq()))
+ return false;
+
+ /* Allow direct recycle if we have reasons to believe that we are
+ * in the same context as the consumer would run, so there's
+ * no possible race.
+ * __page_pool_put_page() makes sure we're not in hardirq context
+ * and interrupts are enabled prior to accessing the cache.
+ */
+ cpuid = smp_processor_id();
+ if (READ_ONCE(pool->cpuid) == cpuid)
+ return true;
+
+ napi = READ_ONCE(pool->p.napi);
+
+ return napi && READ_ONCE(napi->list_owner) == cpuid;
+}
+
+void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem,
+ unsigned int dma_sync_size, bool allow_direct)
+{
+ if (!allow_direct)
+ allow_direct = page_pool_napi_local(pool);
+
+ netmem =
+ __page_pool_put_page(pool, netmem, dma_sync_size, allow_direct);
+ if (netmem && !page_pool_recycle_in_ring(pool, netmem)) {
/* Cache full, fallback to free pages */
recycle_stat_inc(pool, ring_full);
- page_pool_return_page(pool, page);
+ page_pool_return_page(pool, netmem);
}
}
+EXPORT_SYMBOL(page_pool_put_unrefed_netmem);
+
+void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page,
+ unsigned int dma_sync_size, bool allow_direct)
+{
+ page_pool_put_unrefed_netmem(pool, page_to_netmem(page), dma_sync_size,
+ allow_direct);
+}
EXPORT_SYMBOL(page_pool_put_unrefed_page);
-/**
- * page_pool_put_page_bulk() - release references on multiple pages
- * @pool: pool from which pages were allocated
- * @data: array holding page pointers
- * @count: number of pages in @data
- *
- * Tries to refill a number of pages into the ptr_ring cache holding ptr_ring
- * producer lock. If the ptr_ring is full, page_pool_put_page_bulk()
- * will release leftover pages to the page allocator.
- * page_pool_put_page_bulk() is suitable to be run inside the driver NAPI tx
- * completion loop for the XDP_REDIRECT use case.
- *
- * Please note the caller must not use data area after running
- * page_pool_put_page_bulk(), as this function overwrites it.
- */
-void page_pool_put_page_bulk(struct page_pool *pool, void **data,
- int count)
+static void page_pool_recycle_ring_bulk(struct page_pool *pool,
+ netmem_ref *bulk,
+ u32 bulk_len)
{
- int i, bulk_len = 0;
bool in_softirq;
+ u32 i;
- for (i = 0; i < count; i++) {
- struct page *page = virt_to_head_page(data[i]);
-
- /* It is not the last user for the page frag case */
- if (!page_pool_is_last_ref(page))
- continue;
-
- page = __page_pool_put_page(pool, page, -1, false);
- /* Approved for bulk recycling in ptr_ring cache */
- if (page)
- data[bulk_len++] = page;
- }
-
- if (unlikely(!bulk_len))
- return;
-
- /* Bulk producer into ptr_ring page_pool cache */
+ /* Bulk produce into ptr_ring page_pool cache */
in_softirq = page_pool_producer_lock(pool);
+
for (i = 0; i < bulk_len; i++) {
- if (__ptr_ring_produce(&pool->ring, data[i])) {
+ if (__ptr_ring_produce(&pool->ring, (__force void *)bulk[i])) {
/* ring full */
recycle_stat_inc(pool, ring_full);
break;
}
}
- recycle_stat_add(pool, ring, i);
+
page_pool_producer_unlock(pool, in_softirq);
+ recycle_stat_add(pool, ring, i);
- /* Hopefully all pages was return into ptr_ring */
+ /* Hopefully all pages were returned into ptr_ring */
if (likely(i == bulk_len))
return;
- /* ptr_ring cache full, free remaining pages outside producer lock
- * since put_page() with refcnt == 1 can be an expensive operation
+ /*
+ * ptr_ring cache is full, free remaining pages outside producer lock
+ * since put_page() with refcnt == 1 can be an expensive operation.
*/
for (; i < bulk_len; i++)
- page_pool_return_page(pool, data[i]);
+ page_pool_return_page(pool, bulk[i]);
}
-EXPORT_SYMBOL(page_pool_put_page_bulk);
-static struct page *page_pool_drain_frag(struct page_pool *pool,
- struct page *page)
+/**
+ * page_pool_put_netmem_bulk() - release references on multiple netmems
+ * @data: array holding netmem references
+ * @count: number of entries in @data
+ *
+ * Tries to refill a number of netmems into the ptr_ring cache holding ptr_ring
+ * producer lock. If the ptr_ring is full, page_pool_put_netmem_bulk()
+ * will release leftover netmems to the memory provider.
+ * page_pool_put_netmem_bulk() is suitable to be run inside the driver NAPI tx
+ * completion loop for the XDP_REDIRECT use case.
+ *
+ * Please note the caller must not use data area after running
+ * page_pool_put_netmem_bulk(), as this function overwrites it.
+ */
+void page_pool_put_netmem_bulk(netmem_ref *data, u32 count)
+{
+ u32 bulk_len = 0;
+
+ for (u32 i = 0; i < count; i++) {
+ netmem_ref netmem = netmem_compound_head(data[i]);
+
+ if (page_pool_unref_and_test(netmem))
+ data[bulk_len++] = netmem;
+ }
+
+ count = bulk_len;
+ while (count) {
+ netmem_ref bulk[XDP_BULK_QUEUE_SIZE];
+ struct page_pool *pool = NULL;
+ bool allow_direct;
+ u32 foreign = 0;
+
+ bulk_len = 0;
+
+ for (u32 i = 0; i < count; i++) {
+ struct page_pool *netmem_pp;
+ netmem_ref netmem = data[i];
+
+ netmem_pp = netmem_get_pp(netmem);
+ if (unlikely(!pool)) {
+ pool = netmem_pp;
+ allow_direct = page_pool_napi_local(pool);
+ } else if (netmem_pp != pool) {
+ /*
+ * If the netmem belongs to a different
+ * page_pool, save it for another round.
+ */
+ data[foreign++] = netmem;
+ continue;
+ }
+
+ netmem = __page_pool_put_page(pool, netmem, -1,
+ allow_direct);
+ /* Approved for bulk recycling in ptr_ring cache */
+ if (netmem)
+ bulk[bulk_len++] = netmem;
+ }
+
+ if (bulk_len)
+ page_pool_recycle_ring_bulk(pool, bulk, bulk_len);
+
+ count = foreign;
+ }
+}
+EXPORT_SYMBOL(page_pool_put_netmem_bulk);
+
+static netmem_ref page_pool_drain_frag(struct page_pool *pool,
+ netmem_ref netmem)
{
long drain_count = BIAS_MAX - pool->frag_users;
/* Some user is still using the page frag */
- if (likely(page_pool_unref_page(page, drain_count)))
- return NULL;
+ if (likely(page_pool_unref_netmem(netmem, drain_count)))
+ return 0;
- if (page_ref_count(page) == 1 && !page_is_pfmemalloc(page)) {
- if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
- page_pool_dma_sync_for_device(pool, page, -1);
-
- return page;
+ if (__page_pool_page_can_be_recycled(netmem)) {
+ page_pool_dma_sync_for_device(pool, netmem, -1);
+ return netmem;
}
- page_pool_return_page(pool, page);
- return NULL;
+ page_pool_return_page(pool, netmem);
+ return 0;
}
static void page_pool_free_frag(struct page_pool *pool)
{
long drain_count = BIAS_MAX - pool->frag_users;
- struct page *page = pool->frag_page;
+ netmem_ref netmem = pool->frag_page;
- pool->frag_page = NULL;
+ pool->frag_page = 0;
- if (!page || page_pool_unref_page(page, drain_count))
+ if (!netmem || page_pool_unref_netmem(netmem, drain_count))
return;
- page_pool_return_page(pool, page);
+ page_pool_return_page(pool, netmem);
}
-struct page *page_pool_alloc_frag(struct page_pool *pool,
- unsigned int *offset,
- unsigned int size, gfp_t gfp)
+netmem_ref page_pool_alloc_frag_netmem(struct page_pool *pool,
+ unsigned int *offset, unsigned int size,
+ gfp_t gfp)
{
unsigned int max_size = PAGE_SIZE << pool->p.order;
- struct page *page = pool->frag_page;
+ netmem_ref netmem = pool->frag_page;
if (WARN_ON(size > max_size))
- return NULL;
+ return 0;
size = ALIGN(size, dma_get_cache_alignment());
*offset = pool->frag_offset;
- if (page && *offset + size > max_size) {
- page = page_pool_drain_frag(pool, page);
- if (page) {
+ if (netmem && *offset + size > max_size) {
+ netmem = page_pool_drain_frag(pool, netmem);
+ if (netmem) {
+ recycle_stat_inc(pool, cached);
alloc_stat_inc(pool, fast);
goto frag_reset;
}
}
- if (!page) {
- page = page_pool_alloc_pages(pool, gfp);
- if (unlikely(!page)) {
- pool->frag_page = NULL;
- return NULL;
+ if (!netmem) {
+ netmem = page_pool_alloc_netmems(pool, gfp);
+ if (unlikely(!netmem)) {
+ pool->frag_page = 0;
+ return 0;
}
- pool->frag_page = page;
+ pool->frag_page = netmem;
frag_reset:
pool->frag_users = 1;
*offset = 0;
pool->frag_offset = size;
- page_pool_fragment_page(page, BIAS_MAX);
- return page;
+ page_pool_fragment_netmem(netmem, BIAS_MAX);
+ return netmem;
}
pool->frag_users++;
pool->frag_offset = *offset + size;
- alloc_stat_inc(pool, fast);
- return page;
+ return netmem;
+}
+EXPORT_SYMBOL(page_pool_alloc_frag_netmem);
+
+struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
+ unsigned int size, gfp_t gfp)
+{
+ return netmem_to_page(page_pool_alloc_frag_netmem(pool, offset, size,
+ gfp));
}
EXPORT_SYMBOL(page_pool_alloc_frag);
static void page_pool_empty_ring(struct page_pool *pool)
{
- struct page *page;
+ netmem_ref netmem;
/* Empty recycle ring */
- while ((page = ptr_ring_consume_bh(&pool->ring))) {
+ while ((netmem = (__force netmem_ref)ptr_ring_consume_bh(&pool->ring))) {
/* Verify the refcnt invariant of cached pages */
- if (!(page_ref_count(page) == 1))
+ if (!(netmem_ref_count(netmem) == 1))
pr_crit("%s() page_pool refcnt %d violation\n",
- __func__, page_ref_count(page));
+ __func__, netmem_ref_count(netmem));
- page_pool_return_page(pool, page);
+ page_pool_return_page(pool, netmem);
}
}
@@ -857,12 +1047,18 @@ static void __page_pool_destroy(struct page_pool *pool)
page_pool_unlist(pool);
page_pool_uninit(pool);
+
+ if (pool->mp_priv) {
+ mp_dmabuf_devmem_destroy(pool);
+ static_branch_dec(&page_pool_mem_providers);
+ }
+
kfree(pool);
}
static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
{
- struct page *page;
+ netmem_ref netmem;
if (pool->destroy_cnt)
return;
@@ -872,8 +1068,8 @@ static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
* call concurrently.
*/
while (pool->alloc.count) {
- page = pool->alloc.cache[--pool->alloc.count];
- page_pool_return_page(pool, page);
+ netmem = pool->alloc.cache[--pool->alloc.count];
+ page_pool_return_page(pool, netmem);
}
}
@@ -927,27 +1123,34 @@ static void page_pool_release_retry(struct work_struct *wq)
}
void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
- struct xdp_mem_info *mem)
+ const struct xdp_mem_info *mem)
{
refcount_inc(&pool->user_cnt);
pool->disconnect = disconnect;
pool->xdp_mem_id = mem->id;
}
-void page_pool_unlink_napi(struct page_pool *pool)
+void page_pool_disable_direct_recycling(struct page_pool *pool)
{
+ /* Disable direct recycling based on pool->cpuid.
+ * Paired with READ_ONCE() in page_pool_napi_local().
+ */
+ WRITE_ONCE(pool->cpuid, -1);
+
if (!pool->p.napi)
return;
/* To avoid races with recycling and additional barriers make sure
* pool and NAPI are unlinked when NAPI is disabled.
*/
- WARN_ON(!test_bit(NAPI_STATE_SCHED, &pool->p.napi->state) ||
- READ_ONCE(pool->p.napi->list_owner) != -1);
+ WARN_ON(!test_bit(NAPI_STATE_SCHED, &pool->p.napi->state));
+ WARN_ON(READ_ONCE(pool->p.napi->list_owner) != -1);
+ mutex_lock(&page_pools_lock);
WRITE_ONCE(pool->p.napi, NULL);
+ mutex_unlock(&page_pools_lock);
}
-EXPORT_SYMBOL(page_pool_unlink_napi);
+EXPORT_SYMBOL(page_pool_disable_direct_recycling);
void page_pool_destroy(struct page_pool *pool)
{
@@ -957,7 +1160,7 @@ void page_pool_destroy(struct page_pool *pool)
if (!page_pool_put(pool))
return;
- page_pool_unlink_napi(pool);
+ page_pool_disable_direct_recycling(pool);
page_pool_free_frag(pool);
if (!page_pool_release(pool))
@@ -975,15 +1178,15 @@ EXPORT_SYMBOL(page_pool_destroy);
/* Caller must provide appropriate safe context, e.g. NAPI. */
void page_pool_update_nid(struct page_pool *pool, int new_nid)
{
- struct page *page;
+ netmem_ref netmem;
trace_page_pool_update_nid(pool, new_nid);
pool->p.nid = new_nid;
/* Flush pool alloc cache, as refill will check NUMA node */
while (pool->alloc.count) {
- page = pool->alloc.cache[--pool->alloc.count];
- page_pool_return_page(pool, page);
+ netmem = pool->alloc.cache[--pool->alloc.count];
+ page_pool_return_page(pool, netmem);
}
}
EXPORT_SYMBOL(page_pool_update_nid);
diff --git a/net/core/page_pool_priv.h b/net/core/page_pool_priv.h
index 90665d40f1eb..2fb06d5f6d55 100644
--- a/net/core/page_pool_priv.h
+++ b/net/core/page_pool_priv.h
@@ -3,10 +3,58 @@
#ifndef __PAGE_POOL_PRIV_H
#define __PAGE_POOL_PRIV_H
+#include <net/page_pool/helpers.h>
+
+#include "netmem_priv.h"
+
+extern struct mutex page_pools_lock;
+
s32 page_pool_inflight(const struct page_pool *pool, bool strict);
int page_pool_list(struct page_pool *pool);
void page_pool_detached(struct page_pool *pool);
void page_pool_unlist(struct page_pool *pool);
+static inline bool
+page_pool_set_dma_addr_netmem(netmem_ref netmem, dma_addr_t addr)
+{
+ if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) {
+ netmem_set_dma_addr(netmem, addr >> PAGE_SHIFT);
+
+ /* We assume page alignment to shave off bottom bits,
+ * if this "compression" doesn't work we need to drop.
+ */
+ return addr != (dma_addr_t)netmem_get_dma_addr(netmem)
+ << PAGE_SHIFT;
+ }
+
+ netmem_set_dma_addr(netmem, addr);
+ return false;
+}
+
+static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
+{
+ return page_pool_set_dma_addr_netmem(page_to_netmem(page), addr);
+}
+
+#if defined(CONFIG_PAGE_POOL)
+void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem);
+void page_pool_clear_pp_info(netmem_ref netmem);
+int page_pool_check_memory_provider(struct net_device *dev,
+ struct netdev_rx_queue *rxq);
+#else
+static inline void page_pool_set_pp_info(struct page_pool *pool,
+ netmem_ref netmem)
+{
+}
+static inline void page_pool_clear_pp_info(netmem_ref netmem)
+{
+}
+static inline int page_pool_check_memory_provider(struct net_device *dev,
+ struct netdev_rx_queue *rxq)
+{
+ return 0;
+}
+#endif
+
#endif
diff --git a/net/core/page_pool_user.c b/net/core/page_pool_user.c
index 278294aca66a..6677e0c2e256 100644
--- a/net/core/page_pool_user.c
+++ b/net/core/page_pool_user.c
@@ -3,19 +3,23 @@
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/xarray.h>
+#include <net/busy_poll.h>
#include <net/net_debug.h>
-#include <net/page_pool/types.h>
+#include <net/netdev_rx_queue.h>
#include <net/page_pool/helpers.h>
+#include <net/page_pool/types.h>
#include <net/sock.h>
+#include "devmem.h"
#include "page_pool_priv.h"
#include "netdev-genl-gen.h"
static DEFINE_XARRAY_FLAGS(page_pools, XA_FLAGS_ALLOC1);
-/* Protects: page_pools, netdevice->page_pools, pool->slow.netdev, pool->user.
+/* Protects: page_pools, netdevice->page_pools, pool->p.napi, pool->slow.netdev,
+ * pool->user.
* Ordering: inside rtnl_lock
*/
-static DEFINE_MUTEX(page_pools_lock);
+DEFINE_MUTEX(page_pools_lock);
/* Page pools are only reachable from user space (via netlink) if they are
* linked to a netdev at creation time. Following page pool "visibility"
@@ -103,8 +107,6 @@ out:
mutex_unlock(&page_pools_lock);
rtnl_unlock();
- if (skb->len && err == -EMSGSIZE)
- return skb->len;
return err;
}
@@ -214,7 +216,9 @@ static int
page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool,
const struct genl_info *info)
{
+ struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
size_t inflight, refsz;
+ unsigned int napi_id;
void *hdr;
hdr = genlmsg_iput(rsp, info);
@@ -228,8 +232,10 @@ page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool,
nla_put_u32(rsp, NETDEV_A_PAGE_POOL_IFINDEX,
pool->slow.netdev->ifindex))
goto err_cancel;
- if (pool->user.napi_id &&
- nla_put_uint(rsp, NETDEV_A_PAGE_POOL_NAPI_ID, pool->user.napi_id))
+
+ napi_id = pool->p.napi ? READ_ONCE(pool->p.napi->napi_id) : 0;
+ if (napi_id >= MIN_NAPI_ID &&
+ nla_put_uint(rsp, NETDEV_A_PAGE_POOL_NAPI_ID, napi_id))
goto err_cancel;
inflight = page_pool_inflight(pool, false);
@@ -243,6 +249,9 @@ page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool,
pool->user.detach_time))
goto err_cancel;
+ if (binding && nla_put_u32(rsp, NETDEV_A_PAGE_POOL_DMABUF, binding->id))
+ goto err_cancel;
+
genlmsg_end(rsp, hdr);
return 0;
@@ -315,8 +324,6 @@ int page_pool_list(struct page_pool *pool)
if (pool->slow.netdev) {
hlist_add_head(&pool->user.list,
&pool->slow.netdev->page_pools);
- pool->user.napi_id = pool->p.napi ? pool->p.napi->napi_id : 0;
-
netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_ADD_NTF);
}
@@ -346,6 +353,30 @@ void page_pool_unlist(struct page_pool *pool)
mutex_unlock(&page_pools_lock);
}
+int page_pool_check_memory_provider(struct net_device *dev,
+ struct netdev_rx_queue *rxq)
+{
+ struct net_devmem_dmabuf_binding *binding = rxq->mp_params.mp_priv;
+ struct page_pool *pool;
+ struct hlist_node *n;
+
+ if (!binding)
+ return 0;
+
+ mutex_lock(&page_pools_lock);
+ hlist_for_each_entry_safe(pool, n, &dev->page_pools, user.list) {
+ if (pool->mp_priv != binding)
+ continue;
+
+ if (pool->slow.queue_idx == get_netdev_rx_queue_index(rxq)) {
+ mutex_unlock(&page_pools_lock);
+ return 0;
+ }
+ }
+ mutex_unlock(&page_pools_lock);
+ return -ENODATA;
+}
+
static void page_pool_unreg_netdev_wipe(struct net_device *netdev)
{
struct page_pool *pool;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index ea55a758a475..82b6a2c3c141 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -69,7 +69,7 @@
*
* By design there should only be *one* "controlling" process. In practice
* multiple write accesses gives unpredictable result. Understood by "write"
- * to /proc gives result code thats should be read be the "writer".
+ * to /proc gives result code that should be read be the "writer".
* For practical use this should be no problem.
*
* Note when adding devices to a specific CPU there good idea to also assign
@@ -851,6 +851,9 @@ static ssize_t get_imix_entries(const char __user *buffer,
unsigned long weight;
unsigned long size;
+ if (pkt_dev->n_imix_entries >= MAX_IMIX_ENTRIES)
+ return -E2BIG;
+
len = num_arg(&buffer[i], max_digits, &size);
if (len < 0)
return len;
@@ -880,9 +883,6 @@ static ssize_t get_imix_entries(const char __user *buffer,
i++;
pkt_dev->n_imix_entries++;
-
- if (pkt_dev->n_imix_entries > MAX_IMIX_ENTRIES)
- return -E2BIG;
} while (c == ' ');
return i;
@@ -2285,7 +2285,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
s64 remaining;
struct hrtimer_sleeper t;
- hrtimer_init_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ hrtimer_setup_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
hrtimer_set_expires(&t.timer, spin_until);
remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer));
@@ -2371,11 +2371,11 @@ static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow)
if (pkt_dev->spi) {
/* We need as quick as possible to find the right SA
- * Searching with minimum criteria to archieve this.
+ * Searching with minimum criteria to achieve, this.
*/
x = xfrm_state_lookup_byspi(pn->net, htonl(pkt_dev->spi), AF_INET);
} else {
- /* slow path: we dont already have xfrm_state */
+ /* slow path: we don't already have xfrm_state */
x = xfrm_stateonly_find(pn->net, DUMMY_MARK, 0,
(xfrm_address_t *)&pkt_dev->cur_daddr,
(xfrm_address_t *)&pkt_dev->cur_saddr,
@@ -3654,7 +3654,7 @@ static int pktgen_thread_worker(void *arg)
struct pktgen_dev *pkt_dev = NULL;
int cpu = t->cpu;
- WARN_ON(smp_processor_id() != cpu);
+ WARN_ON_ONCE(smp_processor_id() != cpu);
init_waitqueue_head(&t->queue);
complete(&t->start_done);
@@ -3838,8 +3838,8 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
pkt_dev->ipsmode = XFRM_MODE_TRANSPORT;
pkt_dev->ipsproto = IPPROTO_ESP;
- /* xfrm tunnel mode needs additional dst to extract outter
- * ip header protocol/ttl/id field, here creat a phony one.
+ /* xfrm tunnel mode needs additional dst to extract outer
+ * ip header protocol/ttl/id field, here create a phony one.
* instead of looking for a valid rt, which definitely hurting
* performance under such circumstance.
*/
@@ -3883,17 +3883,14 @@ static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)
list_add_tail(&t->th_list, &pn->pktgen_threads);
init_completion(&t->start_done);
- p = kthread_create_on_node(pktgen_thread_worker,
- t,
- cpu_to_node(cpu),
- "kpktgend_%d", cpu);
+ p = kthread_create_on_cpu(pktgen_thread_worker, t, cpu, "kpktgend_%d");
if (IS_ERR(p)) {
pr_err("kthread_create_on_node() failed for cpu %d\n", t->cpu);
list_del(&t->th_list);
kfree(t);
return PTR_ERR(p);
}
- kthread_bind(p, cpu);
+
t->tsk = p;
pe = proc_create_data(t->tsk->comm, 0600, pn->proc_dir,
@@ -3989,6 +3986,7 @@ static int __net_init pg_net_init(struct net *net)
goto remove;
}
+ cpus_read_lock();
for_each_online_cpu(cpu) {
int err;
@@ -3997,6 +3995,7 @@ static int __net_init pg_net_init(struct net *net)
pr_warn("Cannot create thread for cpu %d (%d)\n",
cpu, err);
}
+ cpus_read_unlock();
if (list_empty(&pn->pktgen_threads)) {
pr_err("Initialization failed for all threads\n");
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index bd50e9fe3234..d1e559fce918 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -84,7 +84,6 @@ int rtnl_lock_killable(void)
{
return mutex_lock_killable(&rtnl_mutex);
}
-EXPORT_SYMBOL(rtnl_lock_killable);
static struct sk_buff *defer_kfree_skb_list;
void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail)
@@ -179,6 +178,176 @@ bool lockdep_rtnl_is_held(void)
EXPORT_SYMBOL(lockdep_rtnl_is_held);
#endif /* #ifdef CONFIG_PROVE_LOCKING */
+#ifdef CONFIG_DEBUG_NET_SMALL_RTNL
+void __rtnl_net_lock(struct net *net)
+{
+ ASSERT_RTNL();
+
+ mutex_lock(&net->rtnl_mutex);
+}
+EXPORT_SYMBOL(__rtnl_net_lock);
+
+void __rtnl_net_unlock(struct net *net)
+{
+ ASSERT_RTNL();
+
+ mutex_unlock(&net->rtnl_mutex);
+}
+EXPORT_SYMBOL(__rtnl_net_unlock);
+
+void rtnl_net_lock(struct net *net)
+{
+ rtnl_lock();
+ __rtnl_net_lock(net);
+}
+EXPORT_SYMBOL(rtnl_net_lock);
+
+void rtnl_net_unlock(struct net *net)
+{
+ __rtnl_net_unlock(net);
+ rtnl_unlock();
+}
+EXPORT_SYMBOL(rtnl_net_unlock);
+
+int rtnl_net_trylock(struct net *net)
+{
+ int ret = rtnl_trylock();
+
+ if (ret)
+ __rtnl_net_lock(net);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtnl_net_trylock);
+
+int rtnl_net_lock_killable(struct net *net)
+{
+ int ret = rtnl_lock_killable();
+
+ if (!ret)
+ __rtnl_net_lock(net);
+
+ return ret;
+}
+
+static int rtnl_net_cmp_locks(const struct net *net_a, const struct net *net_b)
+{
+ if (net_eq(net_a, net_b))
+ return 0;
+
+ /* always init_net first */
+ if (net_eq(net_a, &init_net))
+ return -1;
+
+ if (net_eq(net_b, &init_net))
+ return 1;
+
+ /* otherwise lock in ascending order */
+ return net_a < net_b ? -1 : 1;
+}
+
+int rtnl_net_lock_cmp_fn(const struct lockdep_map *a, const struct lockdep_map *b)
+{
+ const struct net *net_a, *net_b;
+
+ net_a = container_of(a, struct net, rtnl_mutex.dep_map);
+ net_b = container_of(b, struct net, rtnl_mutex.dep_map);
+
+ return rtnl_net_cmp_locks(net_a, net_b);
+}
+
+bool rtnl_net_is_locked(struct net *net)
+{
+ return rtnl_is_locked() && mutex_is_locked(&net->rtnl_mutex);
+}
+EXPORT_SYMBOL(rtnl_net_is_locked);
+
+bool lockdep_rtnl_net_is_held(struct net *net)
+{
+ return lockdep_rtnl_is_held() && lockdep_is_held(&net->rtnl_mutex);
+}
+EXPORT_SYMBOL(lockdep_rtnl_net_is_held);
+#else
+static int rtnl_net_cmp_locks(const struct net *net_a, const struct net *net_b)
+{
+ /* No need to swap */
+ return -1;
+}
+#endif
+
+struct rtnl_nets {
+ /* ->newlink() needs to freeze 3 netns at most;
+ * 2 for the new device, 1 for its peer.
+ */
+ struct net *net[3];
+ unsigned char len;
+};
+
+static void rtnl_nets_init(struct rtnl_nets *rtnl_nets)
+{
+ memset(rtnl_nets, 0, sizeof(*rtnl_nets));
+}
+
+static void rtnl_nets_destroy(struct rtnl_nets *rtnl_nets)
+{
+ int i;
+
+ for (i = 0; i < rtnl_nets->len; i++) {
+ put_net(rtnl_nets->net[i]);
+ rtnl_nets->net[i] = NULL;
+ }
+
+ rtnl_nets->len = 0;
+}
+
+/**
+ * rtnl_nets_add - Add netns to be locked before ->newlink().
+ *
+ * @rtnl_nets: rtnl_nets pointer passed to ->get_peer_net().
+ * @net: netns pointer with an extra refcnt held.
+ *
+ * The extra refcnt is released in rtnl_nets_destroy().
+ */
+static void rtnl_nets_add(struct rtnl_nets *rtnl_nets, struct net *net)
+{
+ int i;
+
+ DEBUG_NET_WARN_ON_ONCE(rtnl_nets->len == ARRAY_SIZE(rtnl_nets->net));
+
+ for (i = 0; i < rtnl_nets->len; i++) {
+ switch (rtnl_net_cmp_locks(rtnl_nets->net[i], net)) {
+ case 0:
+ put_net(net);
+ return;
+ case 1:
+ swap(rtnl_nets->net[i], net);
+ }
+ }
+
+ rtnl_nets->net[i] = net;
+ rtnl_nets->len++;
+}
+
+static void rtnl_nets_lock(struct rtnl_nets *rtnl_nets)
+{
+ int i;
+
+ rtnl_lock();
+
+ for (i = 0; i < rtnl_nets->len; i++)
+ __rtnl_net_lock(rtnl_nets->net[i]);
+}
+
+static void rtnl_nets_unlock(struct rtnl_nets *rtnl_nets)
+{
+ int i;
+
+ for (i = 0; i < rtnl_nets->len; i++)
+ __rtnl_net_unlock(rtnl_nets->net[i]);
+
+ rtnl_unlock();
+}
+
static struct rtnl_link __rcu *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
static inline int rtm_msgindex(int msgtype)
@@ -269,64 +438,13 @@ unlock:
}
/**
- * rtnl_register_module - Register a rtnetlink message type
- *
- * @owner: module registering the hook (THIS_MODULE)
- * @protocol: Protocol family or PF_UNSPEC
- * @msgtype: rtnetlink message type
- * @doit: Function pointer called for each request message
- * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
- * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions
- *
- * Like rtnl_register, but for use by removable modules.
- */
-int rtnl_register_module(struct module *owner,
- int protocol, int msgtype,
- rtnl_doit_func doit, rtnl_dumpit_func dumpit,
- unsigned int flags)
-{
- return rtnl_register_internal(owner, protocol, msgtype,
- doit, dumpit, flags);
-}
-EXPORT_SYMBOL_GPL(rtnl_register_module);
-
-/**
- * rtnl_register - Register a rtnetlink message type
- * @protocol: Protocol family or PF_UNSPEC
- * @msgtype: rtnetlink message type
- * @doit: Function pointer called for each request message
- * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
- * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions
- *
- * Registers the specified function pointers (at least one of them has
- * to be non-NULL) to be called whenever a request message for the
- * specified protocol family and message type is received.
- *
- * The special protocol family PF_UNSPEC may be used to define fallback
- * function pointers for the case when no entry for the specific protocol
- * family exists.
- */
-void rtnl_register(int protocol, int msgtype,
- rtnl_doit_func doit, rtnl_dumpit_func dumpit,
- unsigned int flags)
-{
- int err;
-
- err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit,
- flags);
- if (err)
- pr_err("Unable to register rtnetlink message handler, "
- "protocol = %d, message type = %d\n", protocol, msgtype);
-}
-
-/**
* rtnl_unregister - Unregister a rtnetlink message type
* @protocol: Protocol family or PF_UNSPEC
* @msgtype: rtnetlink message type
*
* Returns 0 on success or a negative error code.
*/
-int rtnl_unregister(int protocol, int msgtype)
+static int rtnl_unregister(int protocol, int msgtype)
{
struct rtnl_link __rcu **tab;
struct rtnl_link *link;
@@ -349,7 +467,6 @@ int rtnl_unregister(int protocol, int msgtype)
return 0;
}
-EXPORT_SYMBOL_GPL(rtnl_unregister);
/**
* rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
@@ -384,46 +501,86 @@ void rtnl_unregister_all(int protocol)
}
EXPORT_SYMBOL_GPL(rtnl_unregister_all);
-static LIST_HEAD(link_ops);
-
-static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
+/**
+ * __rtnl_register_many - Register rtnetlink message types
+ * @handlers: Array of struct rtnl_msg_handlers
+ * @n: The length of @handlers
+ *
+ * Registers the specified function pointers (at least one of them has
+ * to be non-NULL) to be called whenever a request message for the
+ * specified protocol family and message type is received.
+ *
+ * The special protocol family PF_UNSPEC may be used to define fallback
+ * function pointers for the case when no entry for the specific protocol
+ * family exists.
+ *
+ * When one element of @handlers fails to register,
+ * 1) built-in: panics.
+ * 2) modules : the previous successful registrations are unwinded
+ * and an error is returned.
+ *
+ * Use rtnl_register_many().
+ */
+int __rtnl_register_many(const struct rtnl_msg_handler *handlers, int n)
{
- const struct rtnl_link_ops *ops;
+ const struct rtnl_msg_handler *handler;
+ int i, err;
- list_for_each_entry(ops, &link_ops, list) {
- if (!strcmp(ops->kind, kind))
- return ops;
+ for (i = 0, handler = handlers; i < n; i++, handler++) {
+ err = rtnl_register_internal(handler->owner, handler->protocol,
+ handler->msgtype, handler->doit,
+ handler->dumpit, handler->flags);
+ if (err) {
+ if (!handler->owner)
+ panic("Unable to register rtnetlink message "
+ "handlers, %pS\n", handlers);
+
+ __rtnl_unregister_many(handlers, i);
+ break;
+ }
}
- return NULL;
+
+ return err;
}
+EXPORT_SYMBOL_GPL(__rtnl_register_many);
-/**
- * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
- * @ops: struct rtnl_link_ops * to register
- *
- * The caller must hold the rtnl_mutex. This function should be used
- * by drivers that create devices during module initialization. It
- * must be called before registering the devices.
- *
- * Returns 0 on success or a negative error code.
- */
-int __rtnl_link_register(struct rtnl_link_ops *ops)
+void __rtnl_unregister_many(const struct rtnl_msg_handler *handlers, int n)
{
- if (rtnl_link_ops_get(ops->kind))
- return -EEXIST;
+ const struct rtnl_msg_handler *handler;
+ int i;
- /* The check for alloc/setup is here because if ops
- * does not have that filled up, it is not possible
- * to use the ops for creating device. So do not
- * fill up dellink as well. That disables rtnl_dellink.
- */
- if ((ops->alloc || ops->setup) && !ops->dellink)
- ops->dellink = unregister_netdevice_queue;
+ for (i = n - 1, handler = handlers + n - 1; i >= 0; i--, handler--)
+ rtnl_unregister(handler->protocol, handler->msgtype);
+}
+EXPORT_SYMBOL_GPL(__rtnl_unregister_many);
- list_add_tail(&ops->list, &link_ops);
- return 0;
+static DEFINE_MUTEX(link_ops_mutex);
+static LIST_HEAD(link_ops);
+
+static struct rtnl_link_ops *rtnl_link_ops_get(const char *kind, int *srcu_index)
+{
+ struct rtnl_link_ops *ops;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(ops, &link_ops, list) {
+ if (!strcmp(ops->kind, kind)) {
+ *srcu_index = srcu_read_lock(&ops->srcu);
+ goto unlock;
+ }
+ }
+
+ ops = NULL;
+unlock:
+ rcu_read_unlock();
+
+ return ops;
+}
+
+static void rtnl_link_ops_put(struct rtnl_link_ops *ops, int srcu_index)
+{
+ srcu_read_unlock(&ops->srcu, srcu_index);
}
-EXPORT_SYMBOL_GPL(__rtnl_link_register);
/**
* rtnl_link_register - Register rtnl_link_ops with rtnetlink.
@@ -433,6 +590,7 @@ EXPORT_SYMBOL_GPL(__rtnl_link_register);
*/
int rtnl_link_register(struct rtnl_link_ops *ops)
{
+ struct rtnl_link_ops *tmp;
int err;
/* Sanity-check max sizes to avoid stack buffer overflow. */
@@ -440,9 +598,31 @@ int rtnl_link_register(struct rtnl_link_ops *ops)
ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE))
return -EINVAL;
- rtnl_lock();
- err = __rtnl_link_register(ops);
- rtnl_unlock();
+ /* The check for alloc/setup is here because if ops
+ * does not have that filled up, it is not possible
+ * to use the ops for creating device. So do not
+ * fill up dellink as well. That disables rtnl_dellink.
+ */
+ if ((ops->alloc || ops->setup) && !ops->dellink)
+ ops->dellink = unregister_netdevice_queue;
+
+ err = init_srcu_struct(&ops->srcu);
+ if (err)
+ return err;
+
+ mutex_lock(&link_ops_mutex);
+
+ list_for_each_entry(tmp, &link_ops, list) {
+ if (!strcmp(ops->kind, tmp->kind)) {
+ err = -EEXIST;
+ goto unlock;
+ }
+ }
+
+ list_add_tail_rcu(&ops->list, &link_ops);
+unlock:
+ mutex_unlock(&link_ops_mutex);
+
return err;
}
EXPORT_SYMBOL_GPL(rtnl_link_register);
@@ -459,48 +639,20 @@ static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
unregister_netdevice_many(&list_kill);
}
-/**
- * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
- * @ops: struct rtnl_link_ops * to unregister
- *
- * The caller must hold the rtnl_mutex and guarantee net_namespace_list
- * integrity (hold pernet_ops_rwsem for writing to close the race
- * with setup_net() and cleanup_net()).
- */
-void __rtnl_link_unregister(struct rtnl_link_ops *ops)
-{
- struct net *net;
-
- for_each_net(net) {
- __rtnl_kill_links(net, ops);
- }
- list_del(&ops->list);
-}
-EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
-
/* Return with the rtnl_lock held when there are no network
* devices unregistering in any network namespace.
*/
static void rtnl_lock_unregistering_all(void)
{
- struct net *net;
- bool unregistering;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
add_wait_queue(&netdev_unregistering_wq, &wait);
for (;;) {
- unregistering = false;
rtnl_lock();
/* We held write locked pernet_ops_rwsem, and parallel
* setup_net() and cleanup_net() are not possible.
*/
- for_each_net(net) {
- if (atomic_read(&net->dev_unreg_count) > 0) {
- unregistering = true;
- break;
- }
- }
- if (!unregistering)
+ if (!atomic_read(&dev_unreg_count))
break;
__rtnl_unlock();
@@ -515,10 +667,22 @@ static void rtnl_lock_unregistering_all(void)
*/
void rtnl_link_unregister(struct rtnl_link_ops *ops)
{
+ struct net *net;
+
+ mutex_lock(&link_ops_mutex);
+ list_del_rcu(&ops->list);
+ mutex_unlock(&link_ops_mutex);
+
+ synchronize_srcu(&ops->srcu);
+ cleanup_srcu_struct(&ops->srcu);
+
/* Close the race with setup_net() and cleanup_net() */
down_write(&pernet_ops_rwsem);
rtnl_lock_unregistering_all();
- __rtnl_link_unregister(ops);
+
+ for_each_net(net)
+ __rtnl_kill_links(net, ops);
+
rtnl_unlock();
up_write(&pernet_ops_rwsem);
}
@@ -575,31 +739,51 @@ static size_t rtnl_link_get_size(const struct net_device *dev)
static LIST_HEAD(rtnl_af_ops);
-static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
+static struct rtnl_af_ops *rtnl_af_lookup(const int family, int *srcu_index)
{
- const struct rtnl_af_ops *ops;
+ struct rtnl_af_ops *ops;
ASSERT_RTNL();
- list_for_each_entry(ops, &rtnl_af_ops, list) {
- if (ops->family == family)
- return ops;
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(ops, &rtnl_af_ops, list) {
+ if (ops->family == family) {
+ *srcu_index = srcu_read_lock(&ops->srcu);
+ goto unlock;
+ }
}
- return NULL;
+ ops = NULL;
+unlock:
+ rcu_read_unlock();
+
+ return ops;
+}
+
+static void rtnl_af_put(struct rtnl_af_ops *ops, int srcu_index)
+{
+ srcu_read_unlock(&ops->srcu, srcu_index);
}
/**
* rtnl_af_register - Register rtnl_af_ops with rtnetlink.
* @ops: struct rtnl_af_ops * to register
*
- * Returns 0 on success or a negative error code.
+ * Return: 0 on success or a negative error code.
*/
-void rtnl_af_register(struct rtnl_af_ops *ops)
+int rtnl_af_register(struct rtnl_af_ops *ops)
{
+ int err = init_srcu_struct(&ops->srcu);
+
+ if (err)
+ return err;
+
rtnl_lock();
list_add_tail_rcu(&ops->list, &rtnl_af_ops);
rtnl_unlock();
+
+ return 0;
}
EXPORT_SYMBOL_GPL(rtnl_af_register);
@@ -614,6 +798,8 @@ void rtnl_af_unregister(struct rtnl_af_ops *ops)
rtnl_unlock();
synchronize_rcu();
+ synchronize_srcu(&ops->srcu);
+ cleanup_srcu_struct(&ops->srcu);
}
EXPORT_SYMBOL_GPL(rtnl_af_unregister);
@@ -851,9 +1037,22 @@ int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
}
EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
+void netdev_set_operstate(struct net_device *dev, int newstate)
+{
+ unsigned int old = READ_ONCE(dev->operstate);
+
+ do {
+ if (old == newstate)
+ return;
+ } while (!try_cmpxchg(&dev->operstate, &old, newstate));
+
+ netdev_state_change(dev);
+}
+EXPORT_SYMBOL(netdev_set_operstate);
+
static void set_operstate(struct net_device *dev, unsigned char transition)
{
- unsigned char operstate = dev->operstate;
+ unsigned char operstate = READ_ONCE(dev->operstate);
switch (transition) {
case IF_OPER_UP:
@@ -875,12 +1074,7 @@ static void set_operstate(struct net_device *dev, unsigned char transition)
break;
}
- if (dev->operstate != operstate) {
- write_lock(&dev_base_lock);
- dev->operstate = operstate;
- write_unlock(&dev_base_lock);
- netdev_state_change(dev);
- }
+ netdev_set_operstate(dev, operstate);
}
static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
@@ -1037,8 +1231,8 @@ static size_t rtnl_proto_down_size(const struct net_device *dev)
{
size_t size = nla_total_size(1);
- if (dev->proto_down_reason)
- size += nla_total_size(0) + nla_total_size(4);
+ /* Assume dev->proto_down_reason is not zero. */
+ size += nla_total_size(0) + nla_total_size(4);
return size;
}
@@ -1119,6 +1313,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ nla_total_size(MAX_ADDR_LEN) /* IFLA_PERM_ADDRESS */
+ rtnl_devlink_port_size(dev)
+ rtnl_dpll_pin_size(dev)
+ + nla_total_size(8) /* IFLA_MAX_PACING_OFFLOAD_HORIZON */
+ 0;
}
@@ -1456,17 +1651,18 @@ static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
return 0;
}
-static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
+static int rtnl_fill_link_ifmap(struct sk_buff *skb,
+ const struct net_device *dev)
{
struct rtnl_link_ifmap map;
memset(&map, 0, sizeof(map));
- map.mem_start = dev->mem_start;
- map.mem_end = dev->mem_end;
- map.base_addr = dev->base_addr;
- map.irq = dev->irq;
- map.dma = dev->dma;
- map.port = dev->if_port;
+ map.mem_start = READ_ONCE(dev->mem_start);
+ map.mem_end = READ_ONCE(dev->mem_end);
+ map.base_addr = READ_ONCE(dev->base_addr);
+ map.irq = READ_ONCE(dev->irq);
+ map.dma = READ_ONCE(dev->dma);
+ map.port = READ_ONCE(dev->if_port);
if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
return -EMSGSIZE;
@@ -1477,13 +1673,15 @@ static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
static u32 rtnl_xdp_prog_skb(struct net_device *dev)
{
const struct bpf_prog *generic_xdp_prog;
+ u32 res = 0;
- ASSERT_RTNL();
+ rcu_read_lock();
+ generic_xdp_prog = rcu_dereference(dev->xdp_prog);
+ if (generic_xdp_prog)
+ res = generic_xdp_prog->aux->id;
+ rcu_read_unlock();
- generic_xdp_prog = rtnl_dereference(dev->xdp_prog);
- if (!generic_xdp_prog)
- return 0;
- return generic_xdp_prog->aux->id;
+ return res;
}
static u32 rtnl_xdp_prog_drv(struct net_device *dev)
@@ -1603,7 +1801,8 @@ static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
upper_dev = netdev_master_upper_dev_get_rcu(dev);
if (upper_dev)
- ret = nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex);
+ ret = nla_put_u32(skb, IFLA_MASTER,
+ READ_ONCE(upper_dev->ifindex));
rcu_read_unlock();
return ret;
@@ -1612,10 +1811,10 @@ static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev,
bool force)
{
- int ifindex = dev_get_iflink(dev);
+ int iflink = dev_get_iflink(dev);
- if (force || dev->ifindex != ifindex)
- return nla_put_u32(skb, IFLA_LINK, ifindex);
+ if (force || READ_ONCE(dev->ifindex) != iflink)
+ return nla_put_u32(skb, IFLA_LINK, iflink);
return 0;
}
@@ -1699,7 +1898,7 @@ static int rtnl_fill_alt_ifnames(struct sk_buff *skb,
struct netdev_name_node *name_node;
int count = 0;
- list_for_each_entry(name_node, &dev->name_node->list, list) {
+ list_for_each_entry_rcu(name_node, &dev->name_node->list, list) {
if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name))
return -EMSGSIZE;
count++;
@@ -1707,6 +1906,7 @@ static int rtnl_fill_alt_ifnames(struct sk_buff *skb,
return count;
}
+/* RCU protected. */
static int rtnl_fill_prop_list(struct sk_buff *skb,
const struct net_device *dev)
{
@@ -1735,10 +1935,10 @@ static int rtnl_fill_proto_down(struct sk_buff *skb,
struct nlattr *pr;
u32 preason;
- if (nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
+ if (nla_put_u8(skb, IFLA_PROTO_DOWN, READ_ONCE(dev->proto_down)))
goto nla_put_failure;
- preason = dev->proto_down_reason;
+ preason = READ_ONCE(dev->proto_down_reason);
if (!preason)
return 0;
@@ -1811,6 +2011,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
u32 event, int *new_nsid, int new_ifindex,
int tgt_netnsid, gfp_t gfp)
{
+ char devname[IFNAMSIZ];
struct ifinfomsg *ifm;
struct nlmsghdr *nlh;
struct Qdisc *qdisc;
@@ -1823,41 +2024,53 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
ifm = nlmsg_data(nlh);
ifm->ifi_family = AF_UNSPEC;
ifm->__ifi_pad = 0;
- ifm->ifi_type = dev->type;
- ifm->ifi_index = dev->ifindex;
+ ifm->ifi_type = READ_ONCE(dev->type);
+ ifm->ifi_index = READ_ONCE(dev->ifindex);
ifm->ifi_flags = dev_get_flags(dev);
ifm->ifi_change = change;
if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid))
goto nla_put_failure;
- qdisc = rtnl_dereference(dev->qdisc);
- if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
- nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
+ netdev_copy_name(dev, devname);
+ if (nla_put_string(skb, IFLA_IFNAME, devname))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_TXQLEN, READ_ONCE(dev->tx_queue_len)) ||
nla_put_u8(skb, IFLA_OPERSTATE,
- netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
- nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
- nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
- nla_put_u32(skb, IFLA_MIN_MTU, dev->min_mtu) ||
- nla_put_u32(skb, IFLA_MAX_MTU, dev->max_mtu) ||
- nla_put_u32(skb, IFLA_GROUP, dev->group) ||
- nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
- nla_put_u32(skb, IFLA_ALLMULTI, dev->allmulti) ||
- nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
- nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) ||
- nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) ||
- nla_put_u32(skb, IFLA_GRO_MAX_SIZE, dev->gro_max_size) ||
- nla_put_u32(skb, IFLA_GSO_IPV4_MAX_SIZE, dev->gso_ipv4_max_size) ||
- nla_put_u32(skb, IFLA_GRO_IPV4_MAX_SIZE, dev->gro_ipv4_max_size) ||
- nla_put_u32(skb, IFLA_TSO_MAX_SIZE, dev->tso_max_size) ||
- nla_put_u32(skb, IFLA_TSO_MAX_SEGS, dev->tso_max_segs) ||
+ netif_running(dev) ? READ_ONCE(dev->operstate) :
+ IF_OPER_DOWN) ||
+ nla_put_u8(skb, IFLA_LINKMODE, READ_ONCE(dev->link_mode)) ||
+ nla_put_u32(skb, IFLA_MTU, READ_ONCE(dev->mtu)) ||
+ nla_put_u32(skb, IFLA_MIN_MTU, READ_ONCE(dev->min_mtu)) ||
+ nla_put_u32(skb, IFLA_MAX_MTU, READ_ONCE(dev->max_mtu)) ||
+ nla_put_u32(skb, IFLA_GROUP, READ_ONCE(dev->group)) ||
+ nla_put_u32(skb, IFLA_PROMISCUITY, READ_ONCE(dev->promiscuity)) ||
+ nla_put_u32(skb, IFLA_ALLMULTI, READ_ONCE(dev->allmulti)) ||
+ nla_put_u32(skb, IFLA_NUM_TX_QUEUES,
+ READ_ONCE(dev->num_tx_queues)) ||
+ nla_put_u32(skb, IFLA_GSO_MAX_SEGS,
+ READ_ONCE(dev->gso_max_segs)) ||
+ nla_put_u32(skb, IFLA_GSO_MAX_SIZE,
+ READ_ONCE(dev->gso_max_size)) ||
+ nla_put_u32(skb, IFLA_GRO_MAX_SIZE,
+ READ_ONCE(dev->gro_max_size)) ||
+ nla_put_u32(skb, IFLA_GSO_IPV4_MAX_SIZE,
+ READ_ONCE(dev->gso_ipv4_max_size)) ||
+ nla_put_u32(skb, IFLA_GRO_IPV4_MAX_SIZE,
+ READ_ONCE(dev->gro_ipv4_max_size)) ||
+ nla_put_u32(skb, IFLA_TSO_MAX_SIZE,
+ READ_ONCE(dev->tso_max_size)) ||
+ nla_put_u32(skb, IFLA_TSO_MAX_SEGS,
+ READ_ONCE(dev->tso_max_segs)) ||
+ nla_put_uint(skb, IFLA_MAX_PACING_OFFLOAD_HORIZON,
+ READ_ONCE(dev->max_pacing_offload_horizon)) ||
#ifdef CONFIG_RPS
- nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
+ nla_put_u32(skb, IFLA_NUM_RX_QUEUES,
+ READ_ONCE(dev->num_rx_queues)) ||
#endif
put_master_ifindex(skb, dev) ||
nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
- (qdisc &&
- nla_put_string(skb, IFLA_QDISC, qdisc->ops->id)) ||
nla_put_ifalias(skb, dev) ||
nla_put_u32(skb, IFLA_CARRIER_CHANGES,
atomic_read(&dev->carrier_up_count) +
@@ -1876,9 +2089,6 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
goto nla_put_failure;
}
- if (rtnl_fill_link_ifmap(skb, dev))
- goto nla_put_failure;
-
if (dev->addr_len) {
if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
@@ -1911,9 +2121,6 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
goto nla_put_failure;
}
- if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp))
- goto nla_put_failure;
-
if (new_nsid &&
nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0)
goto nla_put_failure;
@@ -1926,12 +2133,18 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
goto nla_put_failure;
rcu_read_lock();
+ if (rtnl_fill_link_netnsid(skb, dev, src_net, GFP_ATOMIC))
+ goto nla_put_failure_rcu;
+ qdisc = rcu_dereference(dev->qdisc);
+ if (qdisc && nla_put_string(skb, IFLA_QDISC, qdisc->ops->id))
+ goto nla_put_failure_rcu;
if (rtnl_fill_link_af(skb, dev, ext_filter_mask))
goto nla_put_failure_rcu;
- rcu_read_unlock();
-
+ if (rtnl_fill_link_ifmap(skb, dev))
+ goto nla_put_failure_rcu;
if (rtnl_fill_prop_list(skb, dev))
- goto nla_put_failure;
+ goto nla_put_failure_rcu;
+ rcu_read_unlock();
if (dev->dev.parent &&
nla_put_string(skb, IFLA_PARENT_DEV_NAME,
@@ -1960,6 +2173,7 @@ nla_put_failure:
}
static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
+ [IFLA_UNSPEC] = { .strict_start_type = IFLA_DPLL_PIN },
[IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
[IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
[IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
@@ -1988,7 +2202,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
[IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
[IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 },
- [IFLA_GSO_MAX_SIZE] = { .type = NLA_U32 },
+ [IFLA_GSO_MAX_SIZE] = NLA_POLICY_MIN(NLA_U32, MAX_TCP_HEADER + 1),
[IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
[IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */
[IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
@@ -2013,7 +2227,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_TSO_MAX_SIZE] = { .type = NLA_REJECT },
[IFLA_TSO_MAX_SEGS] = { .type = NLA_REJECT },
[IFLA_ALLMULTI] = { .type = NLA_REJECT },
- [IFLA_GSO_IPV4_MAX_SIZE] = { .type = NLA_U32 },
+ [IFLA_GSO_IPV4_MAX_SIZE] = NLA_POLICY_MIN(NLA_U32, MAX_TCP_HEADER + 1),
[IFLA_GRO_IPV4_MAX_SIZE] = { .type = NLA_U32 },
};
@@ -2068,10 +2282,11 @@ static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
[IFLA_XDP_PROG_ID] = { .type = NLA_U32 },
};
-static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla)
+static struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla,
+ int *ops_srcu_index)
{
- const struct rtnl_link_ops *ops = NULL;
struct nlattr *linfo[IFLA_INFO_MAX + 1];
+ struct rtnl_link_ops *ops = NULL;
if (nla_parse_nested_deprecated(linfo, IFLA_INFO_MAX, nla, ifla_info_policy, NULL) < 0)
return NULL;
@@ -2080,7 +2295,7 @@ static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla
char kind[MODULE_NAME_LEN];
nla_strscpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind));
- ops = rtnl_link_ops_get(kind);
+ ops = rtnl_link_ops_get(kind, ops_srcu_index);
}
return ops;
@@ -2201,24 +2416,22 @@ static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh,
static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
{
struct netlink_ext_ack *extack = cb->extack;
+ struct rtnl_link_ops *kind_ops = NULL;
const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
- struct net *tgt_net = net;
- int h, s_h;
- int idx = 0, s_idx;
- struct net_device *dev;
- struct hlist_head *head;
+ unsigned int flags = NLM_F_MULTI;
struct nlattr *tb[IFLA_MAX+1];
+ struct {
+ unsigned long ifindex;
+ } *ctx = (void *)cb->ctx;
+ struct net *tgt_net = net;
u32 ext_filter_mask = 0;
- const struct rtnl_link_ops *kind_ops = NULL;
- unsigned int flags = NLM_F_MULTI;
+ struct net_device *dev;
+ int ops_srcu_index;
int master_idx = 0;
int netnsid = -1;
int err, i;
- s_h = cb->args[0];
- s_idx = cb->args[1];
-
err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack);
if (err < 0) {
if (cb->strict_check)
@@ -2238,7 +2451,9 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid);
if (IS_ERR(tgt_net)) {
NL_SET_ERR_MSG(extack, "Invalid target network namespace id");
- return PTR_ERR(tgt_net);
+ err = PTR_ERR(tgt_net);
+ netnsid = -1;
+ goto out;
}
break;
case IFLA_EXT_MASK:
@@ -2248,12 +2463,13 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
master_idx = nla_get_u32(tb[i]);
break;
case IFLA_LINKINFO:
- kind_ops = linkinfo_to_kind_ops(tb[i]);
+ kind_ops = linkinfo_to_kind_ops(tb[i], &ops_srcu_index);
break;
default:
if (cb->strict_check) {
NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request");
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
}
}
@@ -2262,38 +2478,27 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
flags |= NLM_F_DUMP_FILTERED;
walk_entries:
- for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
- idx = 0;
- head = &tgt_net->dev_index_head[h];
- hlist_for_each_entry(dev, head, index_hlist) {
- if (link_dump_filtered(dev, master_idx, kind_ops))
- goto cont;
- if (idx < s_idx)
- goto cont;
- err = rtnl_fill_ifinfo(skb, dev, net,
- RTM_NEWLINK,
- NETLINK_CB(cb->skb).portid,
- nlh->nlmsg_seq, 0, flags,
- ext_filter_mask, 0, NULL, 0,
- netnsid, GFP_KERNEL);
-
- if (err < 0) {
- if (likely(skb->len))
- goto out;
-
- goto out_err;
- }
-cont:
- idx++;
- }
+ err = 0;
+ for_each_netdev_dump(tgt_net, dev, ctx->ifindex) {
+ if (link_dump_filtered(dev, master_idx, kind_ops))
+ continue;
+ err = rtnl_fill_ifinfo(skb, dev, net, RTM_NEWLINK,
+ NETLINK_CB(cb->skb).portid,
+ nlh->nlmsg_seq, 0, flags,
+ ext_filter_mask, 0, NULL, 0,
+ netnsid, GFP_KERNEL);
+ if (err < 0)
+ break;
}
-out:
- err = skb->len;
-out_err:
- cb->args[1] = idx;
- cb->args[0] = h;
+
+
cb->seq = tgt_net->dev_base_seq;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+
+out:
+
+ if (kind_ops)
+ rtnl_link_ops_put(kind_ops, ops_srcu_index);
if (netnsid >= 0)
put_net(tgt_net);
@@ -2322,9 +2527,10 @@ int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer,
}
EXPORT_SYMBOL(rtnl_nla_parse_ifinfomsg);
-struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
+static struct net *rtnl_link_get_net_ifla(struct nlattr *tb[])
{
- struct net *net;
+ struct net *net = NULL;
+
/* Examine the link attributes and figure out which
* network namespace we are talking about.
*/
@@ -2332,8 +2538,17 @@ struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
else if (tb[IFLA_NET_NS_FD])
net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
- else
+
+ return net;
+}
+
+struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
+{
+ struct net *net = rtnl_link_get_net_ifla(tb);
+
+ if (!net)
net = get_net(src_net);
+
return net;
}
EXPORT_SYMBOL(rtnl_link_get_net);
@@ -2473,20 +2688,24 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[],
int rem, err;
nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
- const struct rtnl_af_ops *af_ops;
+ struct rtnl_af_ops *af_ops;
+ int af_ops_srcu_index;
- af_ops = rtnl_af_lookup(nla_type(af));
+ af_ops = rtnl_af_lookup(nla_type(af), &af_ops_srcu_index);
if (!af_ops)
return -EAFNOSUPPORT;
if (!af_ops->set_link_af)
- return -EOPNOTSUPP;
-
- if (af_ops->validate_link_af) {
+ err = -EOPNOTSUPP;
+ else if (af_ops->validate_link_af)
err = af_ops->validate_link_af(dev, af, extack);
- if (err < 0)
- return err;
- }
+ else
+ err = 0;
+
+ rtnl_af_put(af_ops, af_ops_srcu_index);
+
+ if (err < 0)
+ return err;
}
}
@@ -2552,7 +2771,7 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
- nla_len(attr) < NLA_HDRLEN) {
+ nla_len(attr) < sizeof(struct ifla_vf_vlan_info)) {
return -EINVAL;
}
if (len >= MAX_VLAN_LIST_LEN)
@@ -2730,7 +2949,7 @@ static int do_set_proto_down(struct net_device *dev,
bool proto_down;
int err;
- if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) {
+ if (!dev->change_proto_down) {
NL_SET_ERR_MSG(extack, "Protodown not supported by device");
return -EOPNOTSUPP;
}
@@ -2777,8 +2996,8 @@ static int do_set_proto_down(struct net_device *dev,
#define DO_SETLINK_MODIFIED 0x01
/* notify flag means notify + modified. */
#define DO_SETLINK_NOTIFY 0x03
-static int do_setlink(const struct sk_buff *skb,
- struct net_device *dev, struct ifinfomsg *ifm,
+static int do_setlink(const struct sk_buff *skb, struct net_device *dev,
+ struct net *tgt_net, struct ifinfomsg *ifm,
struct netlink_ext_ack *extack,
struct nlattr **tb, int status)
{
@@ -2786,32 +3005,25 @@ static int do_setlink(const struct sk_buff *skb,
char ifname[IFNAMSIZ];
int err;
+ err = validate_linkmsg(dev, tb, extack);
+ if (err < 0)
+ goto errout;
+
if (tb[IFLA_IFNAME])
nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
else
ifname[0] = '\0';
- if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) {
+ if (!net_eq(tgt_net, dev_net(dev))) {
const char *pat = ifname[0] ? ifname : NULL;
- struct net *net;
int new_ifindex;
- net = rtnl_link_get_net_capable(skb, dev_net(dev),
- tb, CAP_NET_ADMIN);
- if (IS_ERR(net)) {
- err = PTR_ERR(net);
- goto errout;
- }
-
- if (tb[IFLA_NEW_IFINDEX])
- new_ifindex = nla_get_s32(tb[IFLA_NEW_IFINDEX]);
- else
- new_ifindex = 0;
+ new_ifindex = nla_get_s32_default(tb[IFLA_NEW_IFINDEX], 0);
- err = __dev_change_net_namespace(dev, net, pat, new_ifindex);
- put_net(net);
+ err = __dev_change_net_namespace(dev, tgt_net, pat, new_ifindex);
if (err)
goto errout;
+
status |= DO_SETLINK_MODIFIED;
}
@@ -2983,11 +3195,9 @@ static int do_setlink(const struct sk_buff *skb,
if (tb[IFLA_LINKMODE]) {
unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
- write_lock(&dev_base_lock);
if (dev->link_mode ^ value)
status |= DO_SETLINK_NOTIFY;
- dev->link_mode = value;
- write_unlock(&dev_base_lock);
+ WRITE_ONCE(dev->link_mode, value);
}
if (tb[IFLA_VFINFO_LIST]) {
@@ -3072,11 +3282,18 @@ static int do_setlink(const struct sk_buff *skb,
int rem;
nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
- const struct rtnl_af_ops *af_ops;
+ struct rtnl_af_ops *af_ops;
+ int af_ops_srcu_index;
- BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af))));
+ af_ops = rtnl_af_lookup(nla_type(af), &af_ops_srcu_index);
+ if (!af_ops) {
+ err = -EAFNOSUPPORT;
+ goto errout;
+ }
err = af_ops->set_link_af(dev, af, extack);
+ rtnl_af_put(af_ops, af_ops_srcu_index);
+
if (err < 0)
goto errout;
@@ -3173,11 +3390,13 @@ static struct net_device *rtnl_dev_get(struct net *net,
static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
+ struct ifinfomsg *ifm = nlmsg_data(nlh);
struct net *net = sock_net(skb->sk);
- struct ifinfomsg *ifm;
- struct net_device *dev;
- int err;
struct nlattr *tb[IFLA_MAX+1];
+ struct net_device *dev = NULL;
+ struct rtnl_nets rtnl_nets;
+ struct net *tgt_net;
+ int err;
err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
ifla_policy, extack);
@@ -3188,25 +3407,32 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err < 0)
goto errout;
- err = -EINVAL;
- ifm = nlmsg_data(nlh);
+ tgt_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
+ if (IS_ERR(tgt_net)) {
+ err = PTR_ERR(tgt_net);
+ goto errout;
+ }
+
+ rtnl_nets_init(&rtnl_nets);
+ rtnl_nets_add(&rtnl_nets, get_net(net));
+ rtnl_nets_add(&rtnl_nets, tgt_net);
+
+ rtnl_nets_lock(&rtnl_nets);
+
if (ifm->ifi_index > 0)
dev = __dev_get_by_index(net, ifm->ifi_index);
else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
dev = rtnl_dev_get(net, tb);
else
- goto errout;
+ err = -EINVAL;
- if (dev == NULL) {
+ if (dev)
+ err = do_setlink(skb, dev, tgt_net, ifm, extack, tb, 0);
+ else if (!err)
err = -ENODEV;
- goto errout;
- }
- err = validate_linkmsg(dev, tb, extack);
- if (err < 0)
- goto errout;
-
- err = do_setlink(skb, dev, ifm, extack, tb, 0);
+ rtnl_nets_unlock(&rtnl_nets);
+ rtnl_nets_destroy(&rtnl_nets);
errout:
return err;
}
@@ -3266,14 +3492,14 @@ EXPORT_SYMBOL_GPL(rtnl_delete_link);
static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
+ struct ifinfomsg *ifm = nlmsg_data(nlh);
struct net *net = sock_net(skb->sk);
u32 portid = NETLINK_CB(skb).portid;
- struct net *tgt_net = net;
- struct net_device *dev = NULL;
- struct ifinfomsg *ifm;
struct nlattr *tb[IFLA_MAX+1];
- int err;
+ struct net_device *dev = NULL;
+ struct net *tgt_net = net;
int netnsid = -1;
+ int err;
err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
ifla_policy, extack);
@@ -3291,27 +3517,24 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
return PTR_ERR(tgt_net);
}
- err = -EINVAL;
- ifm = nlmsg_data(nlh);
+ rtnl_net_lock(tgt_net);
+
if (ifm->ifi_index > 0)
dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
- dev = rtnl_dev_get(net, tb);
+ dev = rtnl_dev_get(tgt_net, tb);
+
+ if (dev)
+ err = rtnl_delete_link(dev, portid, nlh);
+ else if (ifm->ifi_index > 0 || tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
+ err = -ENODEV;
else if (tb[IFLA_GROUP])
err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
else
- goto out;
-
- if (!dev) {
- if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME] || ifm->ifi_index > 0)
- err = -ENODEV;
-
- goto out;
- }
+ err = -EINVAL;
- err = rtnl_delete_link(dev, portid, nlh);
+ rtnl_net_unlock(tgt_net);
-out:
if (netnsid >= 0)
put_net(tgt_net);
@@ -3438,21 +3661,90 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
}
EXPORT_SYMBOL(rtnl_create_link);
+struct rtnl_newlink_tbs {
+ struct nlattr *tb[IFLA_MAX + 1];
+ struct nlattr *linkinfo[IFLA_INFO_MAX + 1];
+ struct nlattr *attr[RTNL_MAX_TYPE + 1];
+ struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1];
+};
+
+static int rtnl_changelink(const struct sk_buff *skb, struct nlmsghdr *nlh,
+ const struct rtnl_link_ops *ops,
+ struct net_device *dev, struct net *tgt_net,
+ struct rtnl_newlink_tbs *tbs,
+ struct nlattr **data,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr ** const linkinfo = tbs->linkinfo;
+ struct nlattr ** const tb = tbs->tb;
+ int status = 0;
+ int err;
+
+ if (nlh->nlmsg_flags & NLM_F_EXCL)
+ return -EEXIST;
+
+ if (nlh->nlmsg_flags & NLM_F_REPLACE)
+ return -EOPNOTSUPP;
+
+ if (linkinfo[IFLA_INFO_DATA]) {
+ if (!ops || ops != dev->rtnl_link_ops || !ops->changelink)
+ return -EOPNOTSUPP;
+
+ err = ops->changelink(dev, tb, data, extack);
+ if (err < 0)
+ return err;
+
+ status |= DO_SETLINK_NOTIFY;
+ }
+
+ if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
+ const struct rtnl_link_ops *m_ops = NULL;
+ struct nlattr **slave_data = NULL;
+ struct net_device *master_dev;
+
+ master_dev = netdev_master_upper_dev_get(dev);
+ if (master_dev)
+ m_ops = master_dev->rtnl_link_ops;
+
+ if (!m_ops || !m_ops->slave_changelink)
+ return -EOPNOTSUPP;
+
+ if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)
+ return -EINVAL;
+
+ if (m_ops->slave_maxtype) {
+ err = nla_parse_nested_deprecated(tbs->slave_attr,
+ m_ops->slave_maxtype,
+ linkinfo[IFLA_INFO_SLAVE_DATA],
+ m_ops->slave_policy, extack);
+ if (err < 0)
+ return err;
+
+ slave_data = tbs->slave_attr;
+ }
+
+ err = m_ops->slave_changelink(master_dev, dev, tb, slave_data, extack);
+ if (err < 0)
+ return err;
+
+ status |= DO_SETLINK_NOTIFY;
+ }
+
+ return do_setlink(skb, dev, tgt_net, nlmsg_data(nlh), extack, tb, status);
+}
+
static int rtnl_group_changelink(const struct sk_buff *skb,
- struct net *net, int group,
- struct ifinfomsg *ifm,
- struct netlink_ext_ack *extack,
- struct nlattr **tb)
+ struct net *net, struct net *tgt_net,
+ int group, struct ifinfomsg *ifm,
+ struct netlink_ext_ack *extack,
+ struct nlattr **tb)
{
struct net_device *dev, *aux;
int err;
for_each_netdev_safe(net, dev, aux) {
if (dev->group == group) {
- err = validate_linkmsg(dev, tb, extack);
- if (err < 0)
- return err;
- err = do_setlink(skb, dev, ifm, extack, tb, 0);
+ err = do_setlink(skb, dev, tgt_net, ifm, extack, tb, 0);
if (err < 0)
return err;
}
@@ -3463,6 +3755,8 @@ static int rtnl_group_changelink(const struct sk_buff *skb,
static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm,
const struct rtnl_link_ops *ops,
+ struct net *tgt_net, struct net *link_net,
+ struct net *peer_net,
const struct nlmsghdr *nlh,
struct nlattr **tb, struct nlattr **data,
struct netlink_ext_ack *extack)
@@ -3470,7 +3764,6 @@ static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm,
unsigned char name_assign_type = NET_NAME_USER;
struct net *net = sock_net(skb->sk);
u32 portid = NETLINK_CB(skb).portid;
- struct net *dest_net, *link_net;
struct net_device *dev;
char ifname[IFNAMSIZ];
int err;
@@ -3485,27 +3778,7 @@ static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm,
name_assign_type = NET_NAME_ENUM;
}
- dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
- if (IS_ERR(dest_net))
- return PTR_ERR(dest_net);
-
- if (tb[IFLA_LINK_NETNSID]) {
- int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
-
- link_net = get_net_ns_by_id(dest_net, id);
- if (!link_net) {
- NL_SET_ERR_MSG(extack, "Unknown network namespace id");
- err = -EINVAL;
- goto out;
- }
- err = -EPERM;
- if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
- goto out;
- } else {
- link_net = NULL;
- }
-
- dev = rtnl_create_link(link_net ? : dest_net, ifname,
+ dev = rtnl_create_link(link_net ? : tgt_net, ifname,
name_assign_type, ops, tb, extack);
if (IS_ERR(dev)) {
err = PTR_ERR(dev);
@@ -3514,8 +3787,13 @@ static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm,
dev->ifindex = ifm->ifi_index;
+ if (link_net)
+ net = link_net;
+ if (peer_net)
+ net = peer_net;
+
if (ops->newlink)
- err = ops->newlink(link_net ? : net, dev, tb, data, extack);
+ err = ops->newlink(net, dev, tb, data, extack);
else
err = register_netdevice(dev);
if (err < 0) {
@@ -3527,7 +3805,7 @@ static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm,
if (err < 0)
goto out_unregister;
if (link_net) {
- err = dev_change_net_namespace(dev, dest_net, ifname);
+ err = dev_change_net_namespace(dev, tgt_net, ifname);
if (err < 0)
goto out_unregister;
}
@@ -3537,9 +3815,6 @@ static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm,
goto out_unregister;
}
out:
- if (link_net)
- put_net(link_net);
- put_net(dest_net);
return err;
out_unregister:
if (ops->newlink) {
@@ -3553,41 +3828,43 @@ out_unregister:
goto out;
}
-struct rtnl_newlink_tbs {
+static struct net *rtnl_get_peer_net(const struct rtnl_link_ops *ops,
+ struct nlattr *tbp[],
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
struct nlattr *tb[IFLA_MAX + 1];
- struct nlattr *attr[RTNL_MAX_TYPE + 1];
- struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1];
-};
+ int err;
+
+ if (!data || !data[ops->peer_type])
+ return rtnl_link_get_net_ifla(tbp);
+
+ err = rtnl_nla_parse_ifinfomsg(tb, data[ops->peer_type], extack);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ if (ops->validate) {
+ err = ops->validate(tb, NULL, extack);
+ if (err < 0)
+ return ERR_PTR(err);
+ }
+
+ return rtnl_link_get_net_ifla(tb);
+}
static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+ const struct rtnl_link_ops *ops,
+ struct net *tgt_net, struct net *link_net,
+ struct net *peer_net,
struct rtnl_newlink_tbs *tbs,
+ struct nlattr **data,
struct netlink_ext_ack *extack)
{
- struct nlattr *linkinfo[IFLA_INFO_MAX + 1];
struct nlattr ** const tb = tbs->tb;
- const struct rtnl_link_ops *m_ops;
- struct net_device *master_dev;
struct net *net = sock_net(skb->sk);
- const struct rtnl_link_ops *ops;
- struct nlattr **slave_data;
- char kind[MODULE_NAME_LEN];
struct net_device *dev;
struct ifinfomsg *ifm;
- struct nlattr **data;
bool link_specified;
- int err;
-
-#ifdef CONFIG_MODULES
-replay:
-#endif
- err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
- ifla_policy, extack);
- if (err < 0)
- return err;
-
- err = rtnl_ensure_unique_netns(tb, extack, false);
- if (err < 0)
- return err;
ifm = nlmsg_data(nlh);
if (ifm->ifi_index > 0) {
@@ -3604,151 +3881,153 @@ replay:
dev = NULL;
}
- master_dev = NULL;
- m_ops = NULL;
- if (dev) {
- master_dev = netdev_master_upper_dev_get(dev);
- if (master_dev)
- m_ops = master_dev->rtnl_link_ops;
+ if (dev)
+ return rtnl_changelink(skb, nlh, ops, dev, tgt_net, tbs, data, extack);
+
+ if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
+ /* No dev found and NLM_F_CREATE not set. Requested dev does not exist,
+ * or it's for a group
+ */
+ if (link_specified || !tb[IFLA_GROUP])
+ return -ENODEV;
+
+ return rtnl_group_changelink(skb, net, tgt_net,
+ nla_get_u32(tb[IFLA_GROUP]),
+ ifm, extack, tb);
+ }
+
+ if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
+ return -EOPNOTSUPP;
+
+ if (!ops) {
+ NL_SET_ERR_MSG(extack, "Unknown device type");
+ return -EOPNOTSUPP;
}
+ return rtnl_newlink_create(skb, ifm, ops, tgt_net, link_net, peer_net, nlh,
+ tb, data, extack);
+}
+
+static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct net *tgt_net, *link_net = NULL, *peer_net = NULL;
+ struct nlattr **tb, **linkinfo, **data = NULL;
+ struct rtnl_link_ops *ops = NULL;
+ struct rtnl_newlink_tbs *tbs;
+ struct rtnl_nets rtnl_nets;
+ int ops_srcu_index;
+ int ret;
+
+ tbs = kmalloc(sizeof(*tbs), GFP_KERNEL);
+ if (!tbs)
+ return -ENOMEM;
+
+ tb = tbs->tb;
+ ret = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg), tb,
+ IFLA_MAX, ifla_policy, extack);
+ if (ret < 0)
+ goto free;
+
+ ret = rtnl_ensure_unique_netns(tb, extack, false);
+ if (ret < 0)
+ goto free;
+
+ linkinfo = tbs->linkinfo;
if (tb[IFLA_LINKINFO]) {
- err = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX,
+ ret = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX,
tb[IFLA_LINKINFO],
ifla_info_policy, NULL);
- if (err < 0)
- return err;
- } else
- memset(linkinfo, 0, sizeof(linkinfo));
+ if (ret < 0)
+ goto free;
+ } else {
+ memset(linkinfo, 0, sizeof(tbs->linkinfo));
+ }
if (linkinfo[IFLA_INFO_KIND]) {
+ char kind[MODULE_NAME_LEN];
+
nla_strscpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
- ops = rtnl_link_ops_get(kind);
- } else {
- kind[0] = '\0';
- ops = NULL;
+ ops = rtnl_link_ops_get(kind, &ops_srcu_index);
+#ifdef CONFIG_MODULES
+ if (!ops) {
+ request_module("rtnl-link-%s", kind);
+ ops = rtnl_link_ops_get(kind, &ops_srcu_index);
+ }
+#endif
}
- data = NULL;
+ rtnl_nets_init(&rtnl_nets);
+
if (ops) {
- if (ops->maxtype > RTNL_MAX_TYPE)
- return -EINVAL;
+ if (ops->maxtype > RTNL_MAX_TYPE) {
+ ret = -EINVAL;
+ goto put_ops;
+ }
if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
- err = nla_parse_nested_deprecated(tbs->attr, ops->maxtype,
+ ret = nla_parse_nested_deprecated(tbs->attr, ops->maxtype,
linkinfo[IFLA_INFO_DATA],
ops->policy, extack);
- if (err < 0)
- return err;
+ if (ret < 0)
+ goto put_ops;
+
data = tbs->attr;
}
+
if (ops->validate) {
- err = ops->validate(tb, data, extack);
- if (err < 0)
- return err;
+ ret = ops->validate(tb, data, extack);
+ if (ret < 0)
+ goto put_ops;
}
- }
- slave_data = NULL;
- if (m_ops) {
- if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)
- return -EINVAL;
-
- if (m_ops->slave_maxtype &&
- linkinfo[IFLA_INFO_SLAVE_DATA]) {
- err = nla_parse_nested_deprecated(tbs->slave_attr,
- m_ops->slave_maxtype,
- linkinfo[IFLA_INFO_SLAVE_DATA],
- m_ops->slave_policy,
- extack);
- if (err < 0)
- return err;
- slave_data = tbs->slave_attr;
+ if (ops->peer_type) {
+ peer_net = rtnl_get_peer_net(ops, tb, data, extack);
+ if (IS_ERR(peer_net)) {
+ ret = PTR_ERR(peer_net);
+ goto put_ops;
+ }
+ if (peer_net)
+ rtnl_nets_add(&rtnl_nets, peer_net);
}
}
- if (dev) {
- int status = 0;
-
- if (nlh->nlmsg_flags & NLM_F_EXCL)
- return -EEXIST;
- if (nlh->nlmsg_flags & NLM_F_REPLACE)
- return -EOPNOTSUPP;
-
- err = validate_linkmsg(dev, tb, extack);
- if (err < 0)
- return err;
-
- if (linkinfo[IFLA_INFO_DATA]) {
- if (!ops || ops != dev->rtnl_link_ops ||
- !ops->changelink)
- return -EOPNOTSUPP;
+ tgt_net = rtnl_link_get_net_capable(skb, sock_net(skb->sk), tb, CAP_NET_ADMIN);
+ if (IS_ERR(tgt_net)) {
+ ret = PTR_ERR(tgt_net);
+ goto put_net;
+ }
- err = ops->changelink(dev, tb, data, extack);
- if (err < 0)
- return err;
- status |= DO_SETLINK_NOTIFY;
- }
+ rtnl_nets_add(&rtnl_nets, tgt_net);
- if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
- if (!m_ops || !m_ops->slave_changelink)
- return -EOPNOTSUPP;
+ if (tb[IFLA_LINK_NETNSID]) {
+ int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
- err = m_ops->slave_changelink(master_dev, dev, tb,
- slave_data, extack);
- if (err < 0)
- return err;
- status |= DO_SETLINK_NOTIFY;
+ link_net = get_net_ns_by_id(tgt_net, id);
+ if (!link_net) {
+ NL_SET_ERR_MSG(extack, "Unknown network namespace id");
+ ret = -EINVAL;
+ goto put_net;
}
- return do_setlink(skb, dev, ifm, extack, tb, status);
- }
-
- if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
- /* No dev found and NLM_F_CREATE not set. Requested dev does not exist,
- * or it's for a group
- */
- if (link_specified)
- return -ENODEV;
- if (tb[IFLA_GROUP])
- return rtnl_group_changelink(skb, net,
- nla_get_u32(tb[IFLA_GROUP]),
- ifm, extack, tb);
- return -ENODEV;
- }
-
- if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
- return -EOPNOTSUPP;
+ rtnl_nets_add(&rtnl_nets, link_net);
- if (!ops) {
-#ifdef CONFIG_MODULES
- if (kind[0]) {
- __rtnl_unlock();
- request_module("rtnl-link-%s", kind);
- rtnl_lock();
- ops = rtnl_link_ops_get(kind);
- if (ops)
- goto replay;
+ if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ goto put_net;
}
-#endif
- NL_SET_ERR_MSG(extack, "Unknown device type");
- return -EOPNOTSUPP;
}
- return rtnl_newlink_create(skb, ifm, ops, nlh, tb, data, extack);
-}
+ rtnl_nets_lock(&rtnl_nets);
+ ret = __rtnl_newlink(skb, nlh, ops, tgt_net, link_net, peer_net, tbs, data, extack);
+ rtnl_nets_unlock(&rtnl_nets);
-static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct netlink_ext_ack *extack)
-{
- struct rtnl_newlink_tbs *tbs;
- int ret;
-
- tbs = kmalloc(sizeof(*tbs), GFP_KERNEL);
- if (!tbs)
- return -ENOMEM;
-
- ret = __rtnl_newlink(skb, nlh, tbs, extack);
+put_net:
+ rtnl_nets_destroy(&rtnl_nets);
+put_ops:
+ if (ops)
+ rtnl_link_ops_put(ops, ops_srcu_index);
+free:
kfree(tbs);
return ret;
}
@@ -3977,22 +4256,28 @@ static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack);
}
-static u32 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
+static noinline_for_stack u32 rtnl_calcit(struct sk_buff *skb,
+ struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
size_t min_ifinfo_dump_size = 0;
- struct nlattr *tb[IFLA_MAX+1];
u32 ext_filter_mask = 0;
struct net_device *dev;
- int hdrlen;
+ struct nlattr *nla;
+ int hdrlen, rem;
/* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
- if (nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) {
- if (tb[IFLA_EXT_MASK])
- ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
+ if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
+ return NLMSG_GOODSIZE;
+
+ nla_for_each_attr_type(nla, IFLA_EXT_MASK,
+ nlmsg_attrdata(nlh, hdrlen),
+ nlmsg_attrlen(nlh, hdrlen), rem) {
+ if (nla_len(nla) == sizeof(u32))
+ ext_filter_mask = nla_get_u32(nla);
}
if (!ext_filter_mask)
@@ -4089,8 +4374,7 @@ struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
}
return skb;
errout:
- if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_LINK, err);
+ rtnl_set_sk_err(net, RTNLGRP_LINK, err);
return NULL;
}
@@ -4315,9 +4599,10 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
netif_is_bridge_port(dev)) {
struct net_device *br_dev = netdev_master_upper_dev_get(dev);
const struct net_device_ops *ops = br_dev->netdev_ops;
+ bool notified = false;
err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
- nlh->nlmsg_flags, extack);
+ nlh->nlmsg_flags, &notified, extack);
if (err)
goto out;
else
@@ -4326,16 +4611,18 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
/* Embedded bridge, macvlan, and any other device support */
if ((ndm->ndm_flags & NTF_SELF)) {
+ bool notified = false;
+
if (dev->netdev_ops->ndo_fdb_add)
err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
vid,
nlh->nlmsg_flags,
- extack);
+ &notified, extack);
else
err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
nlh->nlmsg_flags);
- if (!err) {
+ if (!err && !notified) {
rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
ndm->ndm_state);
ndm->ndm_flags &= ~NTF_SELF;
@@ -4435,11 +4722,13 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
netif_is_bridge_port(dev)) {
struct net_device *br_dev = netdev_master_upper_dev_get(dev);
+ bool notified = false;
ops = br_dev->netdev_ops;
if (!del_bulk) {
if (ops->ndo_fdb_del)
- err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack);
+ err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid,
+ &notified, extack);
} else {
if (ops->ndo_fdb_del_bulk)
err = ops->ndo_fdb_del_bulk(nlh, dev, extack);
@@ -4453,10 +4742,13 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
/* Embedded bridge, macvlan, and any other device support */
if (ndm->ndm_flags & NTF_SELF) {
+ bool notified = false;
+
ops = dev->netdev_ops;
if (!del_bulk) {
if (ops->ndo_fdb_del)
- err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack);
+ err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid,
+ &notified, extack);
else
err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
} else {
@@ -4467,7 +4759,7 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
}
if (!err) {
- if (!del_bulk)
+ if (!del_bulk && !notified)
rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
ndm->ndm_state);
ndm->ndm_flags &= ~NTF_SELF;
@@ -4483,15 +4775,16 @@ static int nlmsg_populate_fdb(struct sk_buff *skb,
int *idx,
struct netdev_hw_addr_list *list)
{
+ struct ndo_fdb_dump_context *ctx = (void *)cb->ctx;
struct netdev_hw_addr *ha;
- int err;
u32 portid, seq;
+ int err;
portid = NETLINK_CB(cb->skb).portid;
seq = cb->nlh->nlmsg_seq;
list_for_each_entry(ha, &list->list, list) {
- if (*idx < cb->args[2])
+ if (*idx < ctx->fdb_idx)
goto skip;
err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
@@ -4630,18 +4923,16 @@ static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh,
static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct net_device *dev;
- struct net_device *br_dev = NULL;
- const struct net_device_ops *ops = NULL;
- const struct net_device_ops *cops = NULL;
+ const struct net_device_ops *ops = NULL, *cops = NULL;
+ struct ndo_fdb_dump_context *ctx = (void *)cb->ctx;
+ struct net_device *dev, *br_dev = NULL;
struct net *net = sock_net(skb->sk);
- struct hlist_head *head;
int brport_idx = 0;
int br_idx = 0;
- int h, s_h;
- int idx = 0, s_idx;
- int err = 0;
int fidx = 0;
+ int err;
+
+ NL_ASSERT_CTX_FITS(struct ndo_fdb_dump_context);
if (cb->strict_check)
err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx,
@@ -4660,70 +4951,51 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
ops = br_dev->netdev_ops;
}
- s_h = cb->args[0];
- s_idx = cb->args[1];
-
- for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
- idx = 0;
- head = &net->dev_index_head[h];
- hlist_for_each_entry(dev, head, index_hlist) {
-
- if (brport_idx && (dev->ifindex != brport_idx))
- continue;
-
- if (!br_idx) { /* user did not specify a specific bridge */
- if (netif_is_bridge_port(dev)) {
- br_dev = netdev_master_upper_dev_get(dev);
- cops = br_dev->netdev_ops;
- }
- } else {
- if (dev != br_dev &&
- !netif_is_bridge_port(dev))
- continue;
+ for_each_netdev_dump(net, dev, ctx->ifindex) {
+ if (brport_idx && (dev->ifindex != brport_idx))
+ continue;
- if (br_dev != netdev_master_upper_dev_get(dev) &&
- !netif_is_bridge_master(dev))
- continue;
- cops = ops;
+ if (!br_idx) { /* user did not specify a specific bridge */
+ if (netif_is_bridge_port(dev)) {
+ br_dev = netdev_master_upper_dev_get(dev);
+ cops = br_dev->netdev_ops;
}
+ } else {
+ if (dev != br_dev &&
+ !netif_is_bridge_port(dev))
+ continue;
- if (idx < s_idx)
- goto cont;
+ if (br_dev != netdev_master_upper_dev_get(dev) &&
+ !netif_is_bridge_master(dev))
+ continue;
+ cops = ops;
+ }
- if (netif_is_bridge_port(dev)) {
- if (cops && cops->ndo_fdb_dump) {
- err = cops->ndo_fdb_dump(skb, cb,
- br_dev, dev,
- &fidx);
- if (err == -EMSGSIZE)
- goto out;
- }
+ if (netif_is_bridge_port(dev)) {
+ if (cops && cops->ndo_fdb_dump) {
+ err = cops->ndo_fdb_dump(skb, cb, br_dev, dev,
+ &fidx);
+ if (err == -EMSGSIZE)
+ break;
}
+ }
- if (dev->netdev_ops->ndo_fdb_dump)
- err = dev->netdev_ops->ndo_fdb_dump(skb, cb,
- dev, NULL,
- &fidx);
- else
- err = ndo_dflt_fdb_dump(skb, cb, dev, NULL,
- &fidx);
- if (err == -EMSGSIZE)
- goto out;
+ if (dev->netdev_ops->ndo_fdb_dump)
+ err = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, NULL,
+ &fidx);
+ else
+ err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, &fidx);
+ if (err == -EMSGSIZE)
+ break;
- cops = NULL;
+ cops = NULL;
- /* reset fdb offset to 0 for rest of the interfaces */
- cb->args[2] = 0;
- fidx = 0;
-cont:
- idx++;
- }
+ /* reset fdb offset to 0 for rest of the interfaces */
+ ctx->fdb_idx = 0;
+ fidx = 0;
}
-out:
- cb->args[0] = h;
- cb->args[1] = idx;
- cb->args[2] = fidx;
+ ctx->fdb_idx = fidx;
return skb->len;
}
@@ -5269,15 +5541,14 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
if (br_spec) {
- nla_for_each_nested(attr, br_spec, rem) {
- if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
- if (nla_len(attr) < sizeof(flags))
- return -EINVAL;
+ nla_for_each_nested_type(attr, IFLA_BRIDGE_FLAGS, br_spec,
+ rem) {
+ if (nla_len(attr) < sizeof(flags))
+ return -EINVAL;
- have_flags = true;
- flags = nla_get_u16(attr);
- break;
- }
+ have_flags = true;
+ flags = nla_get_u16(attr);
+ break;
}
}
@@ -5986,19 +6257,17 @@ static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct netlink_ext_ack *extack = cb->extack;
- int h, s_h, err, s_idx, s_idxattr, s_prividx;
struct rtnl_stats_dump_filters filters;
struct net *net = sock_net(skb->sk);
unsigned int flags = NLM_F_MULTI;
struct if_stats_msg *ifsm;
- struct hlist_head *head;
+ struct {
+ unsigned long ifindex;
+ int idxattr;
+ int prividx;
+ } *ctx = (void *)cb->ctx;
struct net_device *dev;
- int idx = 0;
-
- s_h = cb->args[0];
- s_idx = cb->args[1];
- s_idxattr = cb->args[2];
- s_prividx = cb->args[3];
+ int err;
cb->seq = net->dev_base_seq;
@@ -6017,39 +6286,26 @@ static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
if (err)
return err;
- for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
- idx = 0;
- head = &net->dev_index_head[h];
- hlist_for_each_entry(dev, head, index_hlist) {
- if (idx < s_idx)
- goto cont;
- err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, 0,
- flags, &filters,
- &s_idxattr, &s_prividx,
- extack);
- /* If we ran out of room on the first message,
- * we're in trouble
- */
- WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
+ for_each_netdev_dump(net, dev, ctx->ifindex) {
+ err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, 0,
+ flags, &filters,
+ &ctx->idxattr, &ctx->prividx,
+ extack);
+ /* If we ran out of room on the first message,
+ * we're in trouble.
+ */
+ WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
- if (err < 0)
- goto out;
- s_prividx = 0;
- s_idxattr = 0;
- nl_dump_check_consistent(cb, nlmsg_hdr(skb));
-cont:
- idx++;
- }
+ if (err < 0)
+ break;
+ ctx->prividx = 0;
+ ctx->idxattr = 0;
+ nl_dump_check_consistent(cb, nlmsg_hdr(skb));
}
-out:
- cb->args[3] = s_prividx;
- cb->args[2] = s_idxattr;
- cb->args[1] = idx;
- cb->args[0] = h;
- return skb->len;
+ return err;
}
void rtnl_offload_xstats_notify(struct net_device *dev)
@@ -6188,7 +6444,7 @@ static int rtnl_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
int idx, s_idx;
int err;
- NL_ASSERT_DUMP_CTX_FITS(struct rtnl_mdb_dump_ctx);
+ NL_ASSERT_CTX_FITS(struct rtnl_mdb_dump_ctx);
if (cb->strict_check) {
err = rtnl_mdb_valid_dump_req(cb->nlh, cb->extack);
@@ -6508,6 +6764,52 @@ static int rtnl_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
/* Process one rtnetlink message. */
+static int rtnl_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const bool needs_lock = !(cb->flags & RTNL_FLAG_DUMP_UNLOCKED);
+ rtnl_dumpit_func dumpit = cb->data;
+ int err;
+
+ /* Previous iteration have already finished, avoid calling->dumpit()
+ * again, it may not expect to be called after it reached the end.
+ */
+ if (!dumpit)
+ return 0;
+
+ if (needs_lock)
+ rtnl_lock();
+ err = dumpit(skb, cb);
+ if (needs_lock)
+ rtnl_unlock();
+
+ /* Old dump handlers used to send NLM_DONE as in a separate recvmsg().
+ * Some applications which parse netlink manually depend on this.
+ */
+ if (cb->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE) {
+ if (err < 0 && err != -EMSGSIZE)
+ return err;
+ if (!err)
+ cb->data = NULL;
+
+ return skb->len;
+ }
+ return err;
+}
+
+static int rtnetlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct netlink_dump_control *control)
+{
+ if (control->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE ||
+ !(control->flags & RTNL_FLAG_DUMP_UNLOCKED)) {
+ WARN_ON(control->data);
+ control->data = control->dump;
+ control->dump = rtnl_dumpit;
+ }
+
+ return netlink_dump_start(ssk, skb, nlh, control);
+}
+
static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -6552,6 +6854,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
}
owner = link->owner;
dumpit = link->dumpit;
+ flags = link->flags;
if (type == RTM_GETLINK - RTM_BASE)
min_dump_alloc = rtnl_calcit(skb, nlh);
@@ -6569,8 +6872,9 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
.dump = dumpit,
.min_dump_alloc = min_dump_alloc,
.module = owner,
+ .flags = flags,
};
- err = netlink_dump_start(rtnl, skb, nlh, &c);
+ err = rtnetlink_dump_start(rtnl, skb, nlh, &c);
/* netlink_dump_start() will keep a reference on
* module if dump is still in progress.
*/
@@ -6685,7 +6989,6 @@ static int __net_init rtnetlink_net_init(struct net *net)
struct netlink_kernel_cfg cfg = {
.groups = RTNLGRP_MAX,
.input = rtnetlink_rcv,
- .cb_mutex = &rtnl_mutex,
.flags = NL_CFG_F_NONROOT_RECV,
.bind = rtnetlink_bind,
};
@@ -6708,6 +7011,41 @@ static struct pernet_operations rtnetlink_net_ops = {
.exit = rtnetlink_net_exit,
};
+static const struct rtnl_msg_handler rtnetlink_rtnl_msg_handlers[] __initconst = {
+ {.msgtype = RTM_NEWLINK, .doit = rtnl_newlink,
+ .flags = RTNL_FLAG_DOIT_PERNET},
+ {.msgtype = RTM_DELLINK, .doit = rtnl_dellink,
+ .flags = RTNL_FLAG_DOIT_PERNET_WIP},
+ {.msgtype = RTM_GETLINK, .doit = rtnl_getlink,
+ .dumpit = rtnl_dump_ifinfo, .flags = RTNL_FLAG_DUMP_SPLIT_NLM_DONE},
+ {.msgtype = RTM_SETLINK, .doit = rtnl_setlink,
+ .flags = RTNL_FLAG_DOIT_PERNET_WIP},
+ {.msgtype = RTM_GETADDR, .dumpit = rtnl_dump_all},
+ {.msgtype = RTM_GETROUTE, .dumpit = rtnl_dump_all},
+ {.msgtype = RTM_GETNETCONF, .dumpit = rtnl_dump_all},
+ {.msgtype = RTM_GETSTATS, .doit = rtnl_stats_get,
+ .dumpit = rtnl_stats_dump},
+ {.msgtype = RTM_SETSTATS, .doit = rtnl_stats_set},
+ {.msgtype = RTM_NEWLINKPROP, .doit = rtnl_newlinkprop},
+ {.msgtype = RTM_DELLINKPROP, .doit = rtnl_dellinkprop},
+ {.protocol = PF_BRIDGE, .msgtype = RTM_GETLINK,
+ .dumpit = rtnl_bridge_getlink},
+ {.protocol = PF_BRIDGE, .msgtype = RTM_DELLINK,
+ .doit = rtnl_bridge_dellink},
+ {.protocol = PF_BRIDGE, .msgtype = RTM_SETLINK,
+ .doit = rtnl_bridge_setlink},
+ {.protocol = PF_BRIDGE, .msgtype = RTM_NEWNEIGH, .doit = rtnl_fdb_add},
+ {.protocol = PF_BRIDGE, .msgtype = RTM_DELNEIGH, .doit = rtnl_fdb_del,
+ .flags = RTNL_FLAG_BULK_DEL_SUPPORTED},
+ {.protocol = PF_BRIDGE, .msgtype = RTM_GETNEIGH, .doit = rtnl_fdb_get,
+ .dumpit = rtnl_fdb_dump},
+ {.protocol = PF_BRIDGE, .msgtype = RTM_NEWMDB, .doit = rtnl_mdb_add},
+ {.protocol = PF_BRIDGE, .msgtype = RTM_DELMDB, .doit = rtnl_mdb_del,
+ .flags = RTNL_FLAG_BULK_DEL_SUPPORTED},
+ {.protocol = PF_BRIDGE, .msgtype = RTM_GETMDB, .doit = rtnl_mdb_get,
+ .dumpit = rtnl_mdb_dump},
+};
+
void __init rtnetlink_init(void)
{
if (register_pernet_subsys(&rtnetlink_net_ops))
@@ -6715,34 +7053,5 @@ void __init rtnetlink_init(void)
register_netdevice_notifier(&rtnetlink_dev_notifier);
- rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
- rtnl_dump_ifinfo, 0);
- rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0);
-
- rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0);
- rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0);
- rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0);
-
- rtnl_register(PF_UNSPEC, RTM_NEWLINKPROP, rtnl_newlinkprop, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_DELLINKPROP, rtnl_dellinkprop, NULL, 0);
-
- rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0);
- rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL,
- RTNL_FLAG_BULK_DEL_SUPPORTED);
- rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0);
-
- rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0);
- rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0);
- rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0);
-
- rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump,
- 0);
- rtnl_register(PF_UNSPEC, RTM_SETSTATS, rtnl_stats_set, NULL, 0);
-
- rtnl_register(PF_BRIDGE, RTM_GETMDB, rtnl_mdb_get, rtnl_mdb_dump, 0);
- rtnl_register(PF_BRIDGE, RTM_NEWMDB, rtnl_mdb_add, NULL, 0);
- rtnl_register(PF_BRIDGE, RTM_DELMDB, rtnl_mdb_del, NULL,
- RTNL_FLAG_BULK_DEL_SUPPORTED);
+ rtnl_register_many(rtnetlink_rtnl_msg_handlers);
}
diff --git a/net/core/rtnl_net_debug.c b/net/core/rtnl_net_debug.c
new file mode 100644
index 000000000000..7ecd28cc1c22
--- /dev/null
+++ b/net/core/rtnl_net_debug.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright Amazon.com Inc. or its affiliates. */
+
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/notifier.h>
+#include <linux/rtnetlink.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+
+static int rtnl_net_debug_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct net *net = dev_net(dev);
+ enum netdev_cmd cmd = event;
+
+ /* Keep enum and don't add default to trigger -Werror=switch */
+ switch (cmd) {
+ case NETDEV_UP:
+ case NETDEV_DOWN:
+ case NETDEV_REBOOT:
+ case NETDEV_CHANGE:
+ case NETDEV_REGISTER:
+ case NETDEV_UNREGISTER:
+ case NETDEV_CHANGEMTU:
+ case NETDEV_CHANGEADDR:
+ case NETDEV_PRE_CHANGEADDR:
+ case NETDEV_GOING_DOWN:
+ case NETDEV_FEAT_CHANGE:
+ case NETDEV_BONDING_FAILOVER:
+ case NETDEV_PRE_UP:
+ case NETDEV_PRE_TYPE_CHANGE:
+ case NETDEV_POST_TYPE_CHANGE:
+ case NETDEV_POST_INIT:
+ case NETDEV_PRE_UNINIT:
+ case NETDEV_RELEASE:
+ case NETDEV_NOTIFY_PEERS:
+ case NETDEV_JOIN:
+ case NETDEV_CHANGEUPPER:
+ case NETDEV_RESEND_IGMP:
+ case NETDEV_PRECHANGEMTU:
+ case NETDEV_CHANGEINFODATA:
+ case NETDEV_BONDING_INFO:
+ case NETDEV_PRECHANGEUPPER:
+ case NETDEV_CHANGELOWERSTATE:
+ case NETDEV_UDP_TUNNEL_PUSH_INFO:
+ case NETDEV_UDP_TUNNEL_DROP_INFO:
+ case NETDEV_CHANGE_TX_QUEUE_LEN:
+ case NETDEV_CVLAN_FILTER_PUSH_INFO:
+ case NETDEV_CVLAN_FILTER_DROP_INFO:
+ case NETDEV_SVLAN_FILTER_PUSH_INFO:
+ case NETDEV_SVLAN_FILTER_DROP_INFO:
+ case NETDEV_OFFLOAD_XSTATS_ENABLE:
+ case NETDEV_OFFLOAD_XSTATS_DISABLE:
+ case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
+ case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
+ case NETDEV_XDP_FEAT_CHANGE:
+ ASSERT_RTNL();
+ break;
+
+ case NETDEV_CHANGENAME:
+ ASSERT_RTNL_NET(net);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int rtnl_net_debug_net_id;
+
+static int __net_init rtnl_net_debug_net_init(struct net *net)
+{
+ struct notifier_block *nb;
+
+ nb = net_generic(net, rtnl_net_debug_net_id);
+ nb->notifier_call = rtnl_net_debug_event;
+
+ return register_netdevice_notifier_net(net, nb);
+}
+
+static void __net_exit rtnl_net_debug_net_exit(struct net *net)
+{
+ struct notifier_block *nb;
+
+ nb = net_generic(net, rtnl_net_debug_net_id);
+ unregister_netdevice_notifier_net(net, nb);
+}
+
+static struct pernet_operations rtnl_net_debug_net_ops __net_initdata = {
+ .init = rtnl_net_debug_net_init,
+ .exit = rtnl_net_debug_net_exit,
+ .id = &rtnl_net_debug_net_id,
+ .size = sizeof(struct notifier_block),
+};
+
+static struct notifier_block rtnl_net_debug_block = {
+ .notifier_call = rtnl_net_debug_event,
+};
+
+static int __init rtnl_net_debug_init(void)
+{
+ int ret;
+
+ ret = register_pernet_device(&rtnl_net_debug_net_ops);
+ if (ret)
+ return ret;
+
+ ret = register_netdevice_notifier(&rtnl_net_debug_block);
+ if (ret)
+ unregister_pernet_subsys(&rtnl_net_debug_net_ops);
+
+ return ret;
+}
+
+subsys_initcall(rtnl_net_debug_init);
diff --git a/net/core/scm.c b/net/core/scm.c
index d0e0852a24d5..733c0cbd393d 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -36,6 +36,7 @@
#include <net/compat.h>
#include <net/scm.h>
#include <net/cls_cgroup.h>
+#include <net/af_unix.h>
/*
@@ -85,8 +86,15 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
return -ENOMEM;
*fplp = fpl;
fpl->count = 0;
+ fpl->count_unix = 0;
fpl->max = SCM_MAX_FD;
fpl->user = NULL;
+#if IS_ENABLED(CONFIG_UNIX)
+ fpl->inflight = false;
+ fpl->dead = false;
+ fpl->edges = NULL;
+ INIT_LIST_HEAD(&fpl->vertices);
+#endif
}
fpp = &fpl->fp[fpl->count];
@@ -109,6 +117,9 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
fput(file);
return -EINVAL;
}
+ if (unix_get_socket(file))
+ fpl->count_unix++;
+
*fpp++ = file;
fpl->count++;
}
@@ -271,6 +282,16 @@ efault:
}
EXPORT_SYMBOL(put_cmsg);
+int put_cmsg_notrunc(struct msghdr *msg, int level, int type, int len,
+ void *data)
+{
+ /* Don't produce truncated CMSGs */
+ if (!msg->msg_control || msg->msg_controllen < CMSG_LEN(len))
+ return -ETOOSMALL;
+
+ return put_cmsg(msg, level, type, len, data);
+}
+
void put_cmsg_scm_timestamping64(struct msghdr *msg, struct scm_timestamping_internal *tss_internal)
{
struct scm_timestamping64 tss;
@@ -371,8 +392,14 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
if (new_fpl) {
for (i = 0; i < fpl->count; i++)
get_file(fpl->fp[i]);
+
new_fpl->max = new_fpl->count;
new_fpl->user = get_uid(fpl->user);
+#if IS_ENABLED(CONFIG_UNIX)
+ new_fpl->inflight = false;
+ new_fpl->edges = NULL;
+ INIT_LIST_HEAD(&new_fpl->vertices);
+#endif
}
return new_fpl;
}
diff --git a/net/core/skb_fault_injection.c b/net/core/skb_fault_injection.c
new file mode 100644
index 000000000000..4235db6bdfad
--- /dev/null
+++ b/net/core/skb_fault_injection.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/debugfs.h>
+#include <linux/fault-inject.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+
+static struct {
+ struct fault_attr attr;
+ char devname[IFNAMSIZ];
+ bool filtered;
+} skb_realloc = {
+ .attr = FAULT_ATTR_INITIALIZER,
+ .filtered = false,
+};
+
+static bool should_fail_net_realloc_skb(struct sk_buff *skb)
+{
+ struct net_device *net = skb->dev;
+
+ if (skb_realloc.filtered &&
+ strncmp(net->name, skb_realloc.devname, IFNAMSIZ))
+ /* device name filter set, but names do not match */
+ return false;
+
+ if (!should_fail(&skb_realloc.attr, 1))
+ return false;
+
+ return true;
+}
+ALLOW_ERROR_INJECTION(should_fail_net_realloc_skb, TRUE);
+
+void skb_might_realloc(struct sk_buff *skb)
+{
+ if (!should_fail_net_realloc_skb(skb))
+ return;
+
+ pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+}
+EXPORT_SYMBOL(skb_might_realloc);
+
+static int __init fail_skb_realloc_setup(char *str)
+{
+ return setup_fault_attr(&skb_realloc.attr, str);
+}
+__setup("fail_skb_realloc=", fail_skb_realloc_setup);
+
+static void reset_settings(void)
+{
+ skb_realloc.filtered = false;
+ memset(&skb_realloc.devname, 0, IFNAMSIZ);
+}
+
+static ssize_t devname_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ ssize_t ret;
+
+ reset_settings();
+ ret = simple_write_to_buffer(&skb_realloc.devname, IFNAMSIZ,
+ ppos, buffer, count);
+ if (ret < 0)
+ return ret;
+
+ skb_realloc.devname[IFNAMSIZ - 1] = '\0';
+ /* Remove a possible \n at the end of devname */
+ strim(skb_realloc.devname);
+
+ if (strnlen(skb_realloc.devname, IFNAMSIZ))
+ skb_realloc.filtered = true;
+
+ return count;
+}
+
+static ssize_t devname_read(struct file *file,
+ char __user *buffer,
+ size_t size, loff_t *ppos)
+{
+ if (!skb_realloc.filtered)
+ return 0;
+
+ return simple_read_from_buffer(buffer, size, ppos, &skb_realloc.devname,
+ strlen(skb_realloc.devname));
+}
+
+static const struct file_operations devname_ops = {
+ .write = devname_write,
+ .read = devname_read,
+};
+
+static int __init fail_skb_realloc_debugfs(void)
+{
+ umode_t mode = S_IFREG | 0600;
+ struct dentry *dir;
+
+ dir = fault_create_debugfs_attr("fail_skb_realloc", NULL,
+ &skb_realloc.attr);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+
+ debugfs_create_file("devname", mode, dir, NULL, &devname_ops);
+
+ return 0;
+}
+
+late_initcall(fail_skb_realloc_debugfs);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index edbbef563d4d..b1c81687e9d8 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -51,6 +51,7 @@
#endif
#include <linux/string.h>
#include <linux/skbuff.h>
+#include <linux/skbuff_ref.h>
#include <linux/splice.h>
#include <linux/cache.h>
#include <linux/rtnetlink.h>
@@ -68,7 +69,9 @@
#include <net/dst.h>
#include <net/sock.h>
#include <net/checksum.h>
+#include <net/gro.h>
#include <net/gso.h>
+#include <net/hotdata.h>
#include <net/ip6_checksum.h>
#include <net/xfrm.h>
#include <net/mpls.h>
@@ -86,18 +89,16 @@
#include <linux/textsearch.h>
#include "dev.h"
+#include "netmem_priv.h"
#include "sock_destructor.h"
-struct kmem_cache *skbuff_cache __ro_after_init;
-static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
#ifdef CONFIG_SKB_EXTENSIONS
static struct kmem_cache *skbuff_ext_cache __ro_after_init;
#endif
-
-static struct kmem_cache *skb_small_head_cache __ro_after_init;
-
-#define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(MAX_TCP_HEADER)
+#define GRO_MAX_HEAD_PAD (GRO_MAX_HEAD + NET_SKB_PAD + NET_IP_ALIGN)
+#define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(max(MAX_TCP_HEADER, \
+ GRO_MAX_HEAD_PAD))
/* We want SKB_SMALL_HEAD_CACHE_SIZE to not be a power of two.
* This should ensure that SKB_SMALL_HEAD_HEADROOM is a unique
@@ -112,8 +113,23 @@ static struct kmem_cache *skb_small_head_cache __ro_after_init;
#define SKB_SMALL_HEAD_HEADROOM \
SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE)
-int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
-EXPORT_SYMBOL(sysctl_max_skb_frags);
+/* kcm_write_msgs() relies on casting paged frags to bio_vec to use
+ * iov_iter_bvec(). These static asserts ensure the cast is valid is long as the
+ * netmem is a page.
+ */
+static_assert(offsetof(struct bio_vec, bv_page) ==
+ offsetof(skb_frag_t, netmem));
+static_assert(sizeof_field(struct bio_vec, bv_page) ==
+ sizeof_field(skb_frag_t, netmem));
+
+static_assert(offsetof(struct bio_vec, bv_len) == offsetof(skb_frag_t, len));
+static_assert(sizeof_field(struct bio_vec, bv_len) ==
+ sizeof_field(skb_frag_t, len));
+
+static_assert(offsetof(struct bio_vec, bv_offset) ==
+ offsetof(skb_frag_t, offset));
+static_assert(sizeof_field(struct bio_vec, bv_offset) ==
+ sizeof_field(skb_frag_t, offset));
#undef FN
#define FN(reason) [SKB_DROP_REASON_##reason] = #reason,
@@ -207,97 +223,31 @@ static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
#define NAPI_SKB_CACHE_BULK 16
#define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2)
-#if PAGE_SIZE == SZ_4K
-
-#define NAPI_HAS_SMALL_PAGE_FRAG 1
-#define NAPI_SMALL_PAGE_PFMEMALLOC(nc) ((nc).pfmemalloc)
-
-/* specialized page frag allocator using a single order 0 page
- * and slicing it into 1K sized fragment. Constrained to systems
- * with a very limited amount of 1K fragments fitting a single
- * page - to avoid excessive truesize underestimation
- */
-
-struct page_frag_1k {
- void *va;
- u16 offset;
- bool pfmemalloc;
-};
-
-static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp)
-{
- struct page *page;
- int offset;
-
- offset = nc->offset - SZ_1K;
- if (likely(offset >= 0))
- goto use_frag;
-
- page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
- if (!page)
- return NULL;
-
- nc->va = page_address(page);
- nc->pfmemalloc = page_is_pfmemalloc(page);
- offset = PAGE_SIZE - SZ_1K;
- page_ref_add(page, offset / SZ_1K);
-
-use_frag:
- nc->offset = offset;
- return nc->va + offset;
-}
-#else
-
-/* the small page is actually unused in this build; add dummy helpers
- * to please the compiler and avoid later preprocessor's conditionals
- */
-#define NAPI_HAS_SMALL_PAGE_FRAG 0
-#define NAPI_SMALL_PAGE_PFMEMALLOC(nc) false
-
-struct page_frag_1k {
-};
-
-static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp_mask)
-{
- return NULL;
-}
-
-#endif
-
struct napi_alloc_cache {
+ local_lock_t bh_lock;
struct page_frag_cache page;
- struct page_frag_1k page_small;
unsigned int skb_count;
void *skb_cache[NAPI_SKB_CACHE_SIZE];
};
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
-static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
-
-/* Double check that napi_get_frags() allocates skbs with
- * skb->head being backed by slab, not a page fragment.
- * This is to make sure bug fixed in 3226b158e67c
- * ("net: avoid 32 x truesize under-estimation for tiny skbs")
- * does not accidentally come back.
- */
-void napi_get_frags_check(struct napi_struct *napi)
-{
- struct sk_buff *skb;
-
- local_bh_disable();
- skb = napi_get_frags(napi);
- WARN_ON_ONCE(!NAPI_HAS_SMALL_PAGE_FRAG && skb && skb->head_frag);
- napi_free_frags(napi);
- local_bh_enable();
-}
+static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache) = {
+ .bh_lock = INIT_LOCAL_LOCK(bh_lock),
+};
void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
+ void *data;
fragsz = SKB_DATA_ALIGN(fragsz);
- return page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask);
+ local_lock_nested_bh(&napi_alloc_cache.bh_lock);
+ data = __page_frag_alloc_align(&nc->page, fragsz,
+ GFP_ATOMIC | __GFP_NOWARN, align_mask);
+ local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
+ return data;
+
}
EXPORT_SYMBOL(__napi_alloc_frag_align);
@@ -305,17 +255,16 @@ void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
{
void *data;
- fragsz = SKB_DATA_ALIGN(fragsz);
if (in_hardirq() || irqs_disabled()) {
struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache);
- data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask);
+ fragsz = SKB_DATA_ALIGN(fragsz);
+ data = __page_frag_alloc_align(nc, fragsz,
+ GFP_ATOMIC | __GFP_NOWARN,
+ align_mask);
} else {
- struct napi_alloc_cache *nc;
-
local_bh_disable();
- nc = this_cpu_ptr(&napi_alloc_cache);
- data = page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask);
+ data = __napi_alloc_frag_align(fragsz, align_mask);
local_bh_enable();
}
return data;
@@ -327,17 +276,21 @@ static struct sk_buff *napi_skb_cache_get(void)
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
struct sk_buff *skb;
+ local_lock_nested_bh(&napi_alloc_cache.bh_lock);
if (unlikely(!nc->skb_count)) {
- nc->skb_count = kmem_cache_alloc_bulk(skbuff_cache,
- GFP_ATOMIC,
+ nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache,
+ GFP_ATOMIC | __GFP_NOWARN,
NAPI_SKB_CACHE_BULK,
nc->skb_cache);
- if (unlikely(!nc->skb_count))
+ if (unlikely(!nc->skb_count)) {
+ local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
return NULL;
+ }
}
skb = nc->skb_cache[--nc->skb_count];
- kasan_mempool_unpoison_object(skb, kmem_cache_size(skbuff_cache));
+ local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
+ kasan_mempool_unpoison_object(skb, kmem_cache_size(net_hotdata.skbuff_cache));
return skb;
}
@@ -395,7 +348,8 @@ struct sk_buff *slab_build_skb(void *data)
struct sk_buff *skb;
unsigned int size;
- skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
+ skb = kmem_cache_alloc(net_hotdata.skbuff_cache,
+ GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
@@ -446,7 +400,8 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size)
{
struct sk_buff *skb;
- skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
+ skb = kmem_cache_alloc(net_hotdata.skbuff_cache,
+ GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
@@ -557,7 +512,7 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
obj_size = SKB_HEAD_ALIGN(*size);
if (obj_size <= SKB_SMALL_HEAD_CACHE_SIZE &&
!(flags & KMALLOC_NOT_NORMAL_BITS)) {
- obj = kmem_cache_alloc_node(skb_small_head_cache,
+ obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache,
flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
node);
*size = SKB_SMALL_HEAD_CACHE_SIZE;
@@ -565,7 +520,7 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
goto out;
/* Try again but now we are using pfmemalloc reserves */
ret_pfmemalloc = true;
- obj = kmem_cache_alloc_node(skb_small_head_cache, flags, node);
+ obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache, flags, node);
goto out;
}
@@ -628,7 +583,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
u8 *data;
cache = (flags & SKB_ALLOC_FCLONE)
- ? skbuff_fclone_cache : skbuff_cache;
+ ? net_hotdata.skbuff_fclone_cache : net_hotdata.skbuff_cache;
if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
gfp_mask |= __GFP_MEMALLOC;
@@ -709,7 +664,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
/* If requested length is either too small or too big,
* we use kmalloc() for skb->head allocation.
*/
- if (len <= SKB_WITH_OVERHEAD(1024) ||
+ if (len <= SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE) ||
len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
(gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
@@ -726,12 +681,16 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
if (in_hardirq() || irqs_disabled()) {
nc = this_cpu_ptr(&netdev_alloc_cache);
data = page_frag_alloc(nc, len, gfp_mask);
- pfmemalloc = nc->pfmemalloc;
+ pfmemalloc = page_frag_cache_is_pfmemalloc(nc);
} else {
local_bh_disable();
+ local_lock_nested_bh(&napi_alloc_cache.bh_lock);
+
nc = this_cpu_ptr(&napi_alloc_cache.page);
data = page_frag_alloc(nc, len, gfp_mask);
- pfmemalloc = nc->pfmemalloc;
+ pfmemalloc = page_frag_cache_is_pfmemalloc(nc);
+
+ local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
local_bh_enable();
}
@@ -758,10 +717,9 @@ skb_fail:
EXPORT_SYMBOL(__netdev_alloc_skb);
/**
- * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
+ * napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
* @napi: napi instance this buffer was allocated for
* @len: length to allocate
- * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
*
* Allocate a new sk_buff for use in NAPI receive. This buffer will
* attempt to allocate the head from a special reserved region used
@@ -770,9 +728,9 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
*
* %NULL is returned if there is no free memory.
*/
-struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
- gfp_t gfp_mask)
+struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int len)
{
+ gfp_t gfp_mask = GFP_ATOMIC | __GFP_NOWARN;
struct napi_alloc_cache *nc;
struct sk_buff *skb;
bool pfmemalloc;
@@ -783,10 +741,8 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
/* If requested length is either too small or too big,
* we use kmalloc() for skb->head allocation.
- * When the small frag allocator is available, prefer it over kmalloc
- * for small fragments
*/
- if ((!NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) ||
+ if (len <= SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE) ||
len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
(gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI,
@@ -796,32 +752,17 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
goto skb_success;
}
- nc = this_cpu_ptr(&napi_alloc_cache);
+ len = SKB_HEAD_ALIGN(len);
if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC;
- if (NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) {
- /* we are artificially inflating the allocation size, but
- * that is not as bad as it may look like, as:
- * - 'len' less than GRO_MAX_HEAD makes little sense
- * - On most systems, larger 'len' values lead to fragment
- * size above 512 bytes
- * - kmalloc would use the kmalloc-1k slab for such values
- * - Builds with smaller GRO_MAX_HEAD will very likely do
- * little networking, as that implies no WiFi and no
- * tunnels support, and 32 bits arches.
- */
- len = SZ_1K;
-
- data = page_frag_alloc_1k(&nc->page_small, gfp_mask);
- pfmemalloc = NAPI_SMALL_PAGE_PFMEMALLOC(nc->page_small);
- } else {
- len = SKB_HEAD_ALIGN(len);
+ local_lock_nested_bh(&napi_alloc_cache.bh_lock);
+ nc = this_cpu_ptr(&napi_alloc_cache);
- data = page_frag_alloc(&nc->page, len, gfp_mask);
- pfmemalloc = nc->page.pfmemalloc;
- }
+ data = page_frag_alloc(&nc->page, len, gfp_mask);
+ pfmemalloc = page_frag_cache_is_pfmemalloc(&nc->page);
+ local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
if (unlikely(!data))
return NULL;
@@ -843,19 +784,19 @@ skb_success:
skb_fail:
return skb;
}
-EXPORT_SYMBOL(__napi_alloc_skb);
+EXPORT_SYMBOL(napi_alloc_skb);
-void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
- int size, unsigned int truesize)
+void skb_add_rx_frag_netmem(struct sk_buff *skb, int i, netmem_ref netmem,
+ int off, int size, unsigned int truesize)
{
DEBUG_NET_WARN_ON_ONCE(size > truesize);
- skb_fill_page_desc(skb, i, page, off, size);
+ skb_fill_netmem_desc(skb, i, netmem, off, size);
skb->len += size;
skb->data_len += size;
skb->truesize += truesize;
}
-EXPORT_SYMBOL(skb_add_rx_frag);
+EXPORT_SYMBOL(skb_add_rx_frag_netmem);
void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
unsigned int truesize)
@@ -890,18 +831,107 @@ static void skb_clone_fraglist(struct sk_buff *skb)
skb_get(list);
}
-static bool is_pp_page(struct page *page)
+static bool is_pp_netmem(netmem_ref netmem)
{
- return (page->pp_magic & ~0x3UL) == PP_SIGNATURE;
+ return (netmem_get_pp_magic(netmem) & ~0x3UL) == PP_SIGNATURE;
}
+int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb,
+ unsigned int headroom)
+{
#if IS_ENABLED(CONFIG_PAGE_POOL)
-bool napi_pp_put_page(struct page *page, bool napi_safe)
+ u32 size, truesize, len, max_head_size, off;
+ struct sk_buff *skb = *pskb, *nskb;
+ int err, i, head_off;
+ void *data;
+
+ /* XDP does not support fraglist so we need to linearize
+ * the skb.
+ */
+ if (skb_has_frag_list(skb))
+ return -EOPNOTSUPP;
+
+ max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE - headroom);
+ if (skb->len > max_head_size + MAX_SKB_FRAGS * PAGE_SIZE)
+ return -ENOMEM;
+
+ size = min_t(u32, skb->len, max_head_size);
+ truesize = SKB_HEAD_ALIGN(size) + headroom;
+ data = page_pool_dev_alloc_va(pool, &truesize);
+ if (!data)
+ return -ENOMEM;
+
+ nskb = napi_build_skb(data, truesize);
+ if (!nskb) {
+ page_pool_free_va(pool, data, true);
+ return -ENOMEM;
+ }
+
+ skb_reserve(nskb, headroom);
+ skb_copy_header(nskb, skb);
+ skb_mark_for_recycle(nskb);
+
+ err = skb_copy_bits(skb, 0, nskb->data, size);
+ if (err) {
+ consume_skb(nskb);
+ return err;
+ }
+ skb_put(nskb, size);
+
+ head_off = skb_headroom(nskb) - skb_headroom(skb);
+ skb_headers_offset_update(nskb, head_off);
+
+ off = size;
+ len = skb->len - off;
+ for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) {
+ struct page *page;
+ u32 page_off;
+
+ size = min_t(u32, len, PAGE_SIZE);
+ truesize = size;
+
+ page = page_pool_dev_alloc(pool, &page_off, &truesize);
+ if (!page) {
+ consume_skb(nskb);
+ return -ENOMEM;
+ }
+
+ skb_add_rx_frag(nskb, i, page, page_off, size, truesize);
+ err = skb_copy_bits(skb, off, page_address(page) + page_off,
+ size);
+ if (err) {
+ consume_skb(nskb);
+ return err;
+ }
+
+ len -= size;
+ off += size;
+ }
+
+ consume_skb(skb);
+ *pskb = nskb;
+
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+EXPORT_SYMBOL(skb_pp_cow_data);
+
+int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
+ const struct bpf_prog *prog)
{
- bool allow_direct = false;
- struct page_pool *pp;
+ if (!prog->aux->xdp_has_frags)
+ return -EINVAL;
- page = compound_head(page);
+ return skb_pp_cow_data(pool, pskb, XDP_PACKET_HEADROOM);
+}
+EXPORT_SYMBOL(skb_cow_data_for_xdp);
+
+#if IS_ENABLED(CONFIG_PAGE_POOL)
+bool napi_pp_put_page(netmem_ref netmem)
+{
+ netmem = netmem_compound_head(netmem);
/* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation
* in order to preserve any existing bits, such as bit 0 for the
@@ -910,41 +940,21 @@ bool napi_pp_put_page(struct page *page, bool napi_safe)
* and page_is_pfmemalloc() is checked in __page_pool_put_page()
* to avoid recycling the pfmemalloc page.
*/
- if (unlikely(!is_pp_page(page)))
+ if (unlikely(!is_pp_netmem(netmem)))
return false;
- pp = page->pp;
-
- /* Allow direct recycle if we have reasons to believe that we are
- * in the same context as the consumer would run, so there's
- * no possible race.
- * __page_pool_put_page() makes sure we're not in hardirq context
- * and interrupts are enabled prior to accessing the cache.
- */
- if (napi_safe || in_softirq()) {
- const struct napi_struct *napi = READ_ONCE(pp->p.napi);
-
- allow_direct = napi &&
- READ_ONCE(napi->list_owner) == smp_processor_id();
- }
-
- /* Driver set this to memory recycling info. Reset it on recycle.
- * This will *not* work for NIC using a split-page memory model.
- * The page will be returned to the pool here regardless of the
- * 'flipped' fragment being in use or not.
- */
- page_pool_put_full_page(pp, page, allow_direct);
+ page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, false);
return true;
}
EXPORT_SYMBOL(napi_pp_put_page);
#endif
-static bool skb_pp_recycle(struct sk_buff *skb, void *data, bool napi_safe)
+static bool skb_pp_recycle(struct sk_buff *skb, void *data)
{
if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle)
return false;
- return napi_pp_put_page(virt_to_page(data), napi_safe);
+ return napi_pp_put_page(page_to_netmem(virt_to_page(data)));
}
/**
@@ -960,7 +970,7 @@ static bool skb_pp_recycle(struct sk_buff *skb, void *data, bool napi_safe)
static int skb_pp_frag_ref(struct sk_buff *skb)
{
struct skb_shared_info *shinfo;
- struct page *head_page;
+ netmem_ref head_netmem;
int i;
if (!skb->pp_recycle)
@@ -969,11 +979,11 @@ static int skb_pp_frag_ref(struct sk_buff *skb)
shinfo = skb_shinfo(skb);
for (i = 0; i < shinfo->nr_frags; i++) {
- head_page = compound_head(skb_frag_page(&shinfo->frags[i]));
- if (likely(is_pp_page(head_page)))
- page_pool_ref_page(head_page);
+ head_netmem = netmem_compound_head(shinfo->frags[i].netmem);
+ if (likely(is_pp_netmem(head_netmem)))
+ page_pool_ref_netmem(head_netmem);
else
- page_ref_inc(head_page);
+ page_ref_inc(netmem_to_page(head_netmem));
}
return 0;
}
@@ -981,17 +991,17 @@ static int skb_pp_frag_ref(struct sk_buff *skb)
static void skb_kfree_head(void *head, unsigned int end_offset)
{
if (end_offset == SKB_SMALL_HEAD_HEADROOM)
- kmem_cache_free(skb_small_head_cache, head);
+ kmem_cache_free(net_hotdata.skb_small_head_cache, head);
else
kfree(head);
}
-static void skb_free_head(struct sk_buff *skb, bool napi_safe)
+static void skb_free_head(struct sk_buff *skb)
{
unsigned char *head = skb->head;
if (skb->head_frag) {
- if (skb_pp_recycle(skb, head, napi_safe))
+ if (skb_pp_recycle(skb, head))
return;
skb_free_frag(head);
} else {
@@ -999,15 +1009,12 @@ static void skb_free_head(struct sk_buff *skb, bool napi_safe)
}
}
-static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason,
- bool napi_safe)
+static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
int i;
- if (skb->cloned &&
- atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
- &shinfo->dataref))
+ if (!skb_data_unref(skb, shinfo))
goto exit;
if (skb_zcopy(skb)) {
@@ -1019,13 +1026,13 @@ static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason,
}
for (i = 0; i < shinfo->nr_frags; i++)
- napi_frag_unref(&shinfo->frags[i], skb->pp_recycle, napi_safe);
+ __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle);
free_head:
if (shinfo->frag_list)
kfree_skb_list_reason(shinfo->frag_list, reason);
- skb_free_head(skb, napi_safe);
+ skb_free_head(skb);
exit:
/* When we clone an SKB we copy the reycling bit. The pp_recycle
* bit is only set on the head though, so in order to avoid races
@@ -1048,7 +1055,7 @@ static void kfree_skbmem(struct sk_buff *skb)
switch (skb->fclone) {
case SKB_FCLONE_UNAVAILABLE:
- kmem_cache_free(skbuff_cache, skb);
+ kmem_cache_free(net_hotdata.skbuff_cache, skb);
return;
case SKB_FCLONE_ORIG:
@@ -1069,7 +1076,7 @@ static void kfree_skbmem(struct sk_buff *skb)
if (!refcount_dec_and_test(&fclones->fclone_ref))
return;
fastpath:
- kmem_cache_free(skbuff_fclone_cache, fclones);
+ kmem_cache_free(net_hotdata.skbuff_fclone_cache, fclones);
}
void skb_release_head_state(struct sk_buff *skb)
@@ -1086,12 +1093,11 @@ void skb_release_head_state(struct sk_buff *skb)
}
/* Free everything but the sk_buff shell. */
-static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason,
- bool napi_safe)
+static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason)
{
skb_release_head_state(skb);
if (likely(skb->head))
- skb_release_data(skb, reason, napi_safe);
+ skb_release_data(skb, reason);
}
/**
@@ -1105,13 +1111,14 @@ static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason,
void __kfree_skb(struct sk_buff *skb)
{
- skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED, false);
+ skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED);
kfree_skbmem(skb);
}
EXPORT_SYMBOL(__kfree_skb);
static __always_inline
-bool __kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason)
+bool __sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb,
+ enum skb_drop_reason reason)
{
if (unlikely(!skb_unref(skb)))
return false;
@@ -1124,26 +1131,27 @@ bool __kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason)
if (reason == SKB_CONSUMED)
trace_consume_skb(skb, __builtin_return_address(0));
else
- trace_kfree_skb(skb, __builtin_return_address(0), reason);
+ trace_kfree_skb(skb, __builtin_return_address(0), reason, sk);
return true;
}
/**
- * kfree_skb_reason - free an sk_buff with special reason
+ * sk_skb_reason_drop - free an sk_buff with special reason
+ * @sk: the socket to receive @skb, or NULL if not applicable
* @skb: buffer to free
* @reason: reason why this skb is dropped
*
- * Drop a reference to the buffer and free it if the usage count has
- * hit zero. Meanwhile, pass the drop reason to 'kfree_skb'
- * tracepoint.
+ * Drop a reference to the buffer and free it if the usage count has hit
+ * zero. Meanwhile, pass the receiving socket and drop reason to
+ * 'kfree_skb' tracepoint.
*/
void __fix_address
-kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason)
+sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason reason)
{
- if (__kfree_skb_reason(skb, reason))
+ if (__sk_skb_reason_drop(sk, skb, reason))
__kfree_skb(skb);
}
-EXPORT_SYMBOL(kfree_skb_reason);
+EXPORT_SYMBOL(sk_skb_reason_drop);
#define KFREE_SKB_BULK_SIZE 16
@@ -1162,11 +1170,11 @@ static void kfree_skb_add_bulk(struct sk_buff *skb,
return;
}
- skb_release_all(skb, reason, false);
+ skb_release_all(skb, reason);
sa->skb_array[sa->skb_count++] = skb;
if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) {
- kmem_cache_free_bulk(skbuff_cache, KFREE_SKB_BULK_SIZE,
+ kmem_cache_free_bulk(net_hotdata.skbuff_cache, KFREE_SKB_BULK_SIZE,
sa->skb_array);
sa->skb_count = 0;
}
@@ -1182,7 +1190,7 @@ kfree_skb_list_reason(struct sk_buff *segs, enum skb_drop_reason reason)
while (segs) {
struct sk_buff *next = segs->next;
- if (__kfree_skb_reason(segs, reason)) {
+ if (__sk_skb_reason_drop(NULL, segs, reason)) {
skb_poison_list(segs);
kfree_skb_add_bulk(segs, &sa, reason);
}
@@ -1191,7 +1199,7 @@ kfree_skb_list_reason(struct sk_buff *segs, enum skb_drop_reason reason)
}
if (sa.skb_count)
- kmem_cache_free_bulk(skbuff_cache, sa.skb_count, sa.skb_array);
+ kmem_cache_free_bulk(net_hotdata.skbuff_cache, sa.skb_count, sa.skb_array);
}
EXPORT_SYMBOL(kfree_skb_list_reason);
@@ -1223,22 +1231,28 @@ void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
has_trans = skb_transport_header_was_set(skb);
printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n"
- "mac=(%d,%d) net=(%d,%d) trans=%d\n"
+ "mac=(%d,%d) mac_len=%u net=(%d,%d) trans=%d\n"
"shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n"
- "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n"
- "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n",
+ "csum(0x%x start=%u offset=%u ip_summed=%u complete_sw=%u valid=%u level=%u)\n"
+ "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n"
+ "priority=0x%x mark=0x%x alloc_cpu=%u vlan_all=0x%x\n"
+ "encapsulation=%d inner(proto=0x%04x, mac=%u, net=%u, trans=%u)\n",
level, skb->len, headroom, skb_headlen(skb), tailroom,
has_mac ? skb->mac_header : -1,
has_mac ? skb_mac_header_len(skb) : -1,
+ skb->mac_len,
skb->network_header,
has_trans ? skb_network_header_len(skb) : -1,
has_trans ? skb->transport_header : -1,
sh->tx_flags, sh->nr_frags,
sh->gso_size, sh->gso_type, sh->gso_segs,
- skb->csum, skb->ip_summed, skb->csum_complete_sw,
- skb->csum_valid, skb->csum_level,
+ skb->csum, skb->csum_start, skb->csum_offset, skb->ip_summed,
+ skb->csum_complete_sw, skb->csum_valid, skb->csum_level,
skb->hash, skb->sw_hash, skb->l4_hash,
- ntohs(skb->protocol), skb->pkt_type, skb->skb_iif);
+ ntohs(skb->protocol), skb->pkt_type, skb->skb_iif,
+ skb->priority, skb->mark, skb->alloc_cpu, skb->vlan_all,
+ skb->encapsulation, skb->inner_protocol, skb->inner_mac_header,
+ skb->inner_network_header, skb->inner_transport_header);
if (dev)
printk("%sdev name=%s feat=%pNF\n",
@@ -1267,6 +1281,14 @@ void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
struct page *p;
u8 *vaddr;
+ if (skb_frag_is_net_iov(frag)) {
+ printk("%sskb frag %d: not readable\n", level, i);
+ len -= skb_frag_size(frag);
+ if (!len)
+ break;
+ continue;
+ }
+
skb_frag_foreach_page(frag, skb_frag_off(frag),
skb_frag_size(frag), p, p_off, p_len,
copied) {
@@ -1336,7 +1358,7 @@ EXPORT_SYMBOL(consume_skb);
void __consume_stateless_skb(struct sk_buff *skb)
{
trace_consume_skb(skb, __builtin_return_address(0));
- skb_release_data(skb, SKB_CONSUMED, false);
+ skb_release_data(skb, SKB_CONSUMED);
kfree_skbmem(skb);
}
@@ -1348,22 +1370,24 @@ static void napi_skb_cache_put(struct sk_buff *skb)
if (!kasan_mempool_poison_object(skb))
return;
+ local_lock_nested_bh(&napi_alloc_cache.bh_lock);
nc->skb_cache[nc->skb_count++] = skb;
if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++)
kasan_mempool_unpoison_object(nc->skb_cache[i],
- kmem_cache_size(skbuff_cache));
+ kmem_cache_size(net_hotdata.skbuff_cache));
- kmem_cache_free_bulk(skbuff_cache, NAPI_SKB_CACHE_HALF,
+ kmem_cache_free_bulk(net_hotdata.skbuff_cache, NAPI_SKB_CACHE_HALF,
nc->skb_cache + NAPI_SKB_CACHE_HALF);
nc->skb_count = NAPI_SKB_CACHE_HALF;
}
+ local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
}
void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason)
{
- skb_release_all(skb, reason, true);
+ skb_release_all(skb, reason);
napi_skb_cache_put(skb);
}
@@ -1401,7 +1425,7 @@ void napi_consume_skb(struct sk_buff *skb, int budget)
return;
}
- skb_release_all(skb, SKB_CONSUMED, !!budget);
+ skb_release_all(skb, SKB_CONSUMED);
napi_skb_cache_put(skb);
}
EXPORT_SYMBOL(napi_consume_skb);
@@ -1532,7 +1556,7 @@ EXPORT_SYMBOL_GPL(alloc_skb_for_msg);
*/
struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
{
- skb_release_all(dst, SKB_CONSUMED, false);
+ skb_release_all(dst, SKB_CONSUMED);
return __skb_clone(dst, src);
}
EXPORT_SYMBOL_GPL(skb_morph);
@@ -1600,7 +1624,7 @@ static struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size)
return NULL;
}
- uarg->ubuf.callback = msg_zerocopy_callback;
+ uarg->ubuf.ops = &msg_zerocopy_ubuf_ops;
uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1;
uarg->len = 1;
uarg->bytelen = size;
@@ -1626,7 +1650,7 @@ struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
u32 bytelen, next;
/* there might be non MSG_ZEROCOPY users */
- if (uarg->callback != msg_zerocopy_callback)
+ if (uarg->ops != &msg_zerocopy_ubuf_ops)
return NULL;
/* realloc only when socket is locked (TCP, UDP cork),
@@ -1737,8 +1761,8 @@ release:
sock_put(sk);
}
-void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg,
- bool success)
+static void msg_zerocopy_complete(struct sk_buff *skb, struct ubuf_info *uarg,
+ bool success)
{
struct ubuf_info_msgzc *uarg_zc = uarg_to_msgzc(uarg);
@@ -1747,7 +1771,6 @@ void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg,
if (refcount_dec_and_test(&uarg->refcnt))
__msg_zerocopy_callback(uarg_zc);
}
-EXPORT_SYMBOL_GPL(msg_zerocopy_callback);
void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref)
{
@@ -1757,22 +1780,35 @@ void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref)
uarg_to_msgzc(uarg)->len--;
if (have_uref)
- msg_zerocopy_callback(NULL, uarg, true);
+ msg_zerocopy_complete(NULL, uarg, true);
}
EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort);
+const struct ubuf_info_ops msg_zerocopy_ubuf_ops = {
+ .complete = msg_zerocopy_complete,
+};
+EXPORT_SYMBOL_GPL(msg_zerocopy_ubuf_ops);
+
int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
struct msghdr *msg, int len,
struct ubuf_info *uarg)
{
- struct ubuf_info *orig_uarg = skb_zcopy(skb);
int err, orig_len = skb->len;
- /* An skb can only point to one uarg. This edge case happens when
- * TCP appends to an skb, but zerocopy_realloc triggered a new alloc.
- */
- if (orig_uarg && uarg != orig_uarg)
- return -EEXIST;
+ if (uarg->ops->link_skb) {
+ err = uarg->ops->link_skb(skb, uarg);
+ if (err)
+ return err;
+ } else {
+ struct ubuf_info *orig_uarg = skb_zcopy(skb);
+
+ /* An skb can only point to one uarg. This edge case happens
+ * when TCP appends to an skb, but zerocopy_realloc triggered
+ * a new alloc.
+ */
+ if (orig_uarg && uarg != orig_uarg)
+ return -EEXIST;
+ }
err = __zerocopy_sg_from_iter(msg, sk, skb, &msg->msg_iter, len);
if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
@@ -1846,6 +1882,9 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
return -EINVAL;
+ if (!skb_frags_readable(skb))
+ return -EFAULT;
+
if (!num_frags)
goto release;
@@ -1906,10 +1945,11 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
/* skb frags point to kernel buffers */
for (i = 0; i < new_frags - 1; i++) {
- __skb_fill_page_desc(skb, i, head, 0, psize);
+ __skb_fill_netmem_desc(skb, i, page_to_netmem(head), 0, psize);
head = (struct page *)page_private(head);
}
- __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
+ __skb_fill_netmem_desc(skb, new_frags - 1, page_to_netmem(head), 0,
+ d_off);
skb_shinfo(skb)->nr_frags = new_frags;
release:
@@ -1951,7 +1991,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
- n = kmem_cache_alloc(skbuff_cache, gfp_mask);
+ n = kmem_cache_alloc(net_hotdata.skbuff_cache, gfp_mask);
if (!n)
return NULL;
@@ -2014,11 +2054,20 @@ static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
{
- int headerlen = skb_headroom(skb);
- unsigned int size = skb_end_offset(skb) + skb->data_len;
- struct sk_buff *n = __alloc_skb(size, gfp_mask,
- skb_alloc_rx_flag(skb), NUMA_NO_NODE);
+ struct sk_buff *n;
+ unsigned int size;
+ int headerlen;
+
+ if (!skb_frags_readable(skb))
+ return NULL;
+
+ if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST))
+ return NULL;
+ headerlen = skb_headroom(skb);
+ size = skb_end_offset(skb) + skb->data_len;
+ n = __alloc_skb(size, gfp_mask,
+ skb_alloc_rx_flag(skb), NUMA_NO_NODE);
if (!n)
return NULL;
@@ -2163,9 +2212,9 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
if (skb_has_frag_list(skb))
skb_clone_fraglist(skb);
- skb_release_data(skb, SKB_CONSUMED, false);
+ skb_release_data(skb, SKB_CONSUMED);
} else {
- skb_free_head(skb, false);
+ skb_free_head(skb);
}
off = (data + nhead) - skb->head;
@@ -2346,12 +2395,20 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
/*
* Allocate the copy buffer
*/
- struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
- gfp_mask, skb_alloc_rx_flag(skb),
- NUMA_NO_NODE);
- int oldheadroom = skb_headroom(skb);
int head_copy_len, head_copy_off;
+ struct sk_buff *n;
+ int oldheadroom;
+
+ if (!skb_frags_readable(skb))
+ return NULL;
+ if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST))
+ return NULL;
+
+ oldheadroom = skb_headroom(skb);
+ n = __alloc_skb(newheadroom + skb->len + newtailroom,
+ gfp_mask, skb_alloc_rx_flag(skb),
+ NUMA_NO_NODE);
if (!n)
return NULL;
@@ -2689,6 +2746,9 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
*/
int i, k, eat = (skb->tail + delta) - skb->end;
+ if (!skb_frags_readable(skb))
+ return NULL;
+
if (eat > 0 || skb_cloned(skb)) {
if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
GFP_ATOMIC))
@@ -2842,6 +2902,9 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
to += copy;
}
+ if (!skb_frags_readable(skb))
+ goto fault;
+
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
@@ -3030,9 +3093,15 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
/*
* then map the fragments
*/
+ if (!skb_frags_readable(skb))
+ return false;
+
for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
+ if (WARN_ON_ONCE(!skb_frag_page(f)))
+ return false;
+
if (__splice_segment(skb_frag_page(f),
skb_frag_off(f), skb_frag_size(f),
offset, len, spd, false, sk, pipe))
@@ -3250,6 +3319,9 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
from += copy;
}
+ if (!skb_frags_readable(skb))
+ goto fault;
+
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
int end;
@@ -3329,6 +3401,9 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
pos = copy;
}
+ if (WARN_ON_ONCE(!skb_frags_readable(skb)))
+ return 0;
+
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -3429,6 +3504,9 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
pos = copy;
}
+ if (!skb_frags_readable(skb))
+ return 0;
+
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
@@ -3647,7 +3725,8 @@ skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
if (plen) {
page = virt_to_head_page(from->head);
offset = from->data - (unsigned char *)page_address(page);
- __skb_fill_page_desc(to, 0, page, offset, plen);
+ __skb_fill_netmem_desc(to, 0, page_to_netmem(page),
+ offset, plen);
get_page(page);
j = 1;
len -= plen;
@@ -3919,6 +3998,7 @@ static inline void skb_split_inside_header(struct sk_buff *skb,
skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
+ skb1->unreadable = skb->unreadable;
skb_shinfo(skb)->nr_frags = 0;
skb1->data_len = skb->data_len;
skb1->len += skb1->data_len;
@@ -3966,6 +4046,8 @@ static inline void skb_split_no_header(struct sk_buff *skb,
pos += size;
}
skb_shinfo(skb1)->nr_frags = k;
+
+ skb1->unreadable = skb->unreadable;
}
/**
@@ -4029,6 +4111,9 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
if (skb_zcopy(tgt) || skb_zcopy(skb))
return 0;
+ DEBUG_NET_WARN_ON_ONCE(tgt->pp_recycle != skb->pp_recycle);
+ DEBUG_NET_WARN_ON_ONCE(skb_cmp_decrypted(tgt, skb));
+
todo = shiftlen;
from = 0;
to = skb_shinfo(tgt)->nr_frags;
@@ -4037,8 +4122,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
/* Actual merge is delayed until the point when we know we can
* commit all, so that we don't have to undo partial changes
*/
- if (!to ||
- !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
+ if (!skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
skb_frag_off(fragfrom))) {
merge = -1;
} else {
@@ -4201,6 +4285,9 @@ next_skb:
return block_limit - abs_offset;
}
+ if (!skb_frags_readable(st->cur_skb))
+ return 0;
+
if (st->frag_idx == 0 && !st->frag_data)
st->stepped_offset += skb_headlen(st->cur_skb);
@@ -4277,6 +4364,41 @@ void skb_abort_seq_read(struct skb_seq_state *st)
}
EXPORT_SYMBOL(skb_abort_seq_read);
+/**
+ * skb_copy_seq_read() - copy from a skb_seq_state to a buffer
+ * @st: source skb_seq_state
+ * @offset: offset in source
+ * @to: destination buffer
+ * @len: number of bytes to copy
+ *
+ * Copy @len bytes from @offset bytes into the source @st to the destination
+ * buffer @to. `offset` should increase (or be unchanged) with each subsequent
+ * call to this function. If offset needs to decrease from the previous use `st`
+ * should be reset first.
+ *
+ * Return: 0 on success or -EINVAL if the copy ended early
+ */
+int skb_copy_seq_read(struct skb_seq_state *st, int offset, void *to, int len)
+{
+ const u8 *data;
+ u32 sqlen;
+
+ for (;;) {
+ sqlen = skb_seq_read(offset, &data, st);
+ if (sqlen == 0)
+ return -EINVAL;
+ if (sqlen >= len) {
+ memcpy(to, data, len);
+ return 0;
+ }
+ memcpy(to, data, sqlen);
+ to += sqlen;
+ offset += sqlen;
+ len -= sqlen;
+ }
+}
+EXPORT_SYMBOL(skb_copy_seq_read);
+
#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
@@ -4889,7 +5011,7 @@ static void skb_extensions_init(void) {}
void __init skb_init(void)
{
- skbuff_cache = kmem_cache_create_usercopy("skbuff_head_cache",
+ net_hotdata.skbuff_cache = kmem_cache_create_usercopy("skbuff_head_cache",
sizeof(struct sk_buff),
0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|
@@ -4897,7 +5019,7 @@ void __init skb_init(void)
offsetof(struct sk_buff, cb),
sizeof_field(struct sk_buff, cb),
NULL);
- skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
+ net_hotdata.skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
sizeof(struct sk_buff_fclones),
0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC,
@@ -4906,7 +5028,7 @@ void __init skb_init(void)
* struct skb_shared_info is located at the end of skb->head,
* and should not be copied to/from user.
*/
- skb_small_head_cache = kmem_cache_create_usercopy("skbuff_small_head",
+ net_hotdata.skb_small_head_cache = kmem_cache_create_usercopy("skbuff_small_head",
SKB_SMALL_HEAD_CACHE_SIZE,
0,
SLAB_HWCACHE_ALIGN | SLAB_PANIC,
@@ -5029,7 +5151,7 @@ EXPORT_SYMBOL_GPL(skb_to_sgvec);
* 3. sg_unmark_end
* 4. skb_to_sgvec(payload2)
*
- * When mapping mutilple payload conditionally, skb_to_sgvec_nomark
+ * When mapping multiple payload conditionally, skb_to_sgvec_nomark
* is more preferable.
*/
int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
@@ -5294,7 +5416,7 @@ static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
{
bool ret;
- if (likely(READ_ONCE(sysctl_tstamp_allow_data) || tsonly))
+ if (likely(tsonly || READ_ONCE(sock_net(sk)->core.sysctl_tstamp_allow_data)))
return true;
read_lock_bh(&sk->sk_callback_lock);
@@ -5779,7 +5901,7 @@ void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
{
if (head_stolen) {
skb_release_head_state(skb);
- kmem_cache_free(skbuff_cache, skb);
+ kmem_cache_free(net_hotdata.skbuff_cache, skb);
} else {
__kfree_skb(skb);
}
@@ -5813,7 +5935,10 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
if (to->pp_recycle != from->pp_recycle)
return false;
- if (len <= skb_tailroom(to)) {
+ if (skb_frags_readable(from) != skb_frags_readable(to))
+ return false;
+
+ if (len <= skb_tailroom(to) && skb_frags_readable(from)) {
if (len)
BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
*delta_truesize = 0;
@@ -5887,7 +6012,7 @@ EXPORT_SYMBOL(skb_try_coalesce);
* @skb: buffer to clean
* @xnet: packet is crossing netns
*
- * skb_scrub_packet can be used after encapsulating or decapsulting a packet
+ * skb_scrub_packet can be used after encapsulating or decapsulating a packet
* into/from a tunnel. Some information have to be cleared during these
* operations.
* skb_scrub_packet can also be used to clean a skb before injecting it in
@@ -5908,11 +6033,11 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
skb->offload_fwd_mark = 0;
skb->offload_l3_fwd_mark = 0;
#endif
+ ipvs_reset(skb);
if (!xnet)
return;
- ipvs_reset(skb);
skb->mark = 0;
skb_clear_tstamp(skb);
}
@@ -5990,6 +6115,9 @@ int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len)
if (!pskb_may_pull(skb, write_len))
return -ENOMEM;
+ if (!skb_frags_readable(skb))
+ return -EFAULT;
+
if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
return 0;
@@ -6109,7 +6237,7 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
return err;
skb->protocol = skb->vlan_proto;
- skb->mac_len += VLAN_HLEN;
+ skb->network_header -= VLAN_HLEN;
skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
}
@@ -6465,12 +6593,12 @@ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
skb_frag_ref(skb, i);
if (skb_has_frag_list(skb))
skb_clone_fraglist(skb);
- skb_release_data(skb, SKB_CONSUMED, false);
+ skb_release_data(skb, SKB_CONSUMED);
} else {
/* we can reuse existing recount- all we did was
* relocate values
*/
- skb_free_head(skb, false);
+ skb_free_head(skb);
}
skb->head = data;
@@ -6605,7 +6733,7 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
skb_kfree_head(data, size);
return -ENOMEM;
}
- skb_release_data(skb, SKB_CONSUMED, false);
+ skb_release_data(skb, SKB_CONSUMED);
skb->head = data;
skb->head_frag = 0;
@@ -6669,7 +6797,7 @@ void skb_condense(struct sk_buff *skb)
{
if (skb->data_len) {
if (skb->data_len > skb->end - skb->tail ||
- skb_cloned(skb))
+ skb_cloned(skb) || !skb_frags_readable(skb))
return;
/* Nice, we can free page frag(s) right now */
@@ -6737,6 +6865,14 @@ static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old,
xfrm_state_hold(sp->xvec[i]);
}
#endif
+#ifdef CONFIG_MCTP_FLOWS
+ if (old_active & (1 << SKB_EXT_MCTP)) {
+ struct mctp_flow *flow = skb_ext_get_ptr(old, SKB_EXT_MCTP);
+
+ if (flow->key)
+ refcount_inc(&flow->key->refs);
+ }
+#endif
__skb_ext_put(old);
return new;
}
@@ -6877,6 +7013,19 @@ free_now:
EXPORT_SYMBOL(__skb_ext_put);
#endif /* CONFIG_SKB_EXTENSIONS */
+static void kfree_skb_napi_cache(struct sk_buff *skb)
+{
+ /* if SKB is a clone, don't handle this case */
+ if (skb->fclone != SKB_FCLONE_UNAVAILABLE) {
+ __kfree_skb(skb);
+ return;
+ }
+
+ local_bh_disable();
+ __napi_kfree_skb(skb, SKB_CONSUMED);
+ local_bh_enable();
+}
+
/**
* skb_attempt_defer_free - queue skb for remote freeing
* @skb: buffer
@@ -6892,10 +7041,10 @@ void skb_attempt_defer_free(struct sk_buff *skb)
unsigned int defer_max;
bool kick;
- if (WARN_ON_ONCE(cpu >= nr_cpu_ids) ||
- !cpu_online(cpu) ||
- cpu == raw_smp_processor_id()) {
-nodefer: __kfree_skb(skb);
+ if (cpu == raw_smp_processor_id() ||
+ WARN_ON_ONCE(cpu >= nr_cpu_ids) ||
+ !cpu_online(cpu)) {
+nodefer: kfree_skb_napi_cache(skb);
return;
}
@@ -6903,7 +7052,7 @@ nodefer: __kfree_skb(skb);
DEBUG_NET_WARN_ON_ONCE(skb->destructor);
sd = &per_cpu(softnet_data, cpu);
- defer_max = READ_ONCE(sysctl_skb_defer_max);
+ defer_max = READ_ONCE(net_hotdata.sysctl_skb_defer_max);
if (READ_ONCE(sd->defer_count) >= defer_max)
goto nodefer;
@@ -6921,8 +7070,8 @@ nodefer: __kfree_skb(skb);
/* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
* if we are unlucky enough (this seems very unlikely).
*/
- if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1))
- smp_call_function_single_async(cpu, &sd->defer_csd);
+ if (unlikely(kick))
+ kick_defer_list_purge(sd, cpu);
}
static void skb_splice_csum_page(struct sk_buff *skb, struct page *page,
@@ -6955,7 +7104,7 @@ static void skb_splice_csum_page(struct sk_buff *skb, struct page *page,
ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter,
ssize_t maxsize, gfp_t gfp)
{
- size_t frag_limit = READ_ONCE(sysctl_max_skb_frags);
+ size_t frag_limit = READ_ONCE(net_hotdata.sysctl_max_skb_frags);
struct page *pages[8], **ppages = pages;
ssize_t spliced = 0, ret = 0;
unsigned int i;
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 4d75ef9d24bf..0ddc4c718833 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -293,7 +293,7 @@ out:
/* If we trim data a full sg elem before curr pointer update
* copybreak and current so that any future copy operations
* start at new copy location.
- * However trimed data that has not yet been used in a copy op
+ * However trimmed data that has not yet been used in a copy op
* does not require an update.
*/
if (!msg->sg.size) {
@@ -369,8 +369,8 @@ int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
struct sk_msg *msg, u32 bytes)
{
int ret = -ENOSPC, i = msg->sg.curr;
+ u32 copy, buf_size, copied = 0;
struct scatterlist *sge;
- u32 copy, buf_size;
void *to;
do {
@@ -397,6 +397,7 @@ int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
goto out;
}
bytes -= copy;
+ copied += copy;
if (!bytes)
break;
msg->sg.copybreak = 0;
@@ -404,7 +405,7 @@ int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
} while (i != msg->sg.end);
out:
msg->sg.curr = i;
- return ret;
+ return (ret < 0) ? ret : copied;
}
EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
@@ -434,7 +435,8 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
page = sg_page(sge);
if (copied + copy > len)
copy = len - copied;
- copy = copy_page_to_iter(page, sge->offset, copy, iter);
+ if (copy)
+ copy = copy_page_to_iter(page, sge->offset, copy, iter);
if (!copy) {
copied = copied ? copied : -EFAULT;
goto out;
@@ -444,8 +446,10 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
if (likely(!peek)) {
sge->offset += copy;
sge->length -= copy;
- if (!msg_rx->skb)
+ if (!msg_rx->skb) {
sk_mem_uncharge(sk, copy);
+ atomic_sub(copy, &sk->sk_rmem_alloc);
+ }
msg_rx->sg.size -= copy;
if (!sge->length) {
@@ -545,6 +549,9 @@ static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
return num_sge;
}
+#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
+ psock->ingress_bytes += len;
+#endif
copied = len;
msg->sg.start = 0;
msg->sg.size = copied;
@@ -771,6 +778,8 @@ static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
list_del(&msg->list);
+ if (!msg->skb)
+ atomic_sub(msg->sg.size, &psock->sk->sk_rmem_alloc);
sk_msg_free(psock->sk, msg);
kfree(msg);
}
@@ -1116,9 +1125,9 @@ static void sk_psock_strp_data_ready(struct sock *sk)
if (tls_sw_has_ctx_rx(sk)) {
psock->saved_data_ready(sk);
} else {
- write_lock_bh(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
strp_data_ready(&psock->strp);
- write_unlock_bh(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
}
}
rcu_read_unlock();
@@ -1138,6 +1147,10 @@ int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
if (!ret)
sk_psock_set_state(psock, SK_PSOCK_RX_STRP_ENABLED);
+ if (sk_is_tcp(sk)) {
+ psock->strp.cb.read_sock = tcp_bpf_strp_read_sock;
+ psock->copied_seq = tcp_sk(sk)->copied_seq;
+ }
return ret;
}
@@ -1226,11 +1239,8 @@ static void sk_psock_verdict_data_ready(struct sock *sk)
rcu_read_lock();
psock = sk_psock(sk);
- if (psock) {
- read_lock_bh(&sk->sk_callback_lock);
+ if (psock)
sk_psock_data_ready(sk, psock);
- read_unlock_bh(&sk->sk_callback_lock);
- }
rcu_read_unlock();
}
}
diff --git a/net/core/sock.c b/net/core/sock.c
index 5e78798456fd..6c0e87f97fa4 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -85,7 +85,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/errqueue.h>
@@ -124,9 +124,11 @@
#include <linux/netdevice.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
+#include <linux/skbuff_ref.h>
#include <net/net_namespace.h>
#include <net/request_sock.h>
#include <net/sock.h>
+#include <net/proto_memory.h>
#include <linux/net_tstamp.h>
#include <net/xfrm.h>
#include <linux/ipsec.h>
@@ -284,8 +286,6 @@ EXPORT_SYMBOL(sysctl_rmem_max);
__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
-int sysctl_tstamp_allow_data __read_mostly = 1;
-
DEFINE_STATIC_KEY_FALSE(memalloc_socks_key);
EXPORT_SYMBOL_GPL(memalloc_socks_key);
@@ -454,6 +454,13 @@ static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen,
return 0;
}
+static bool sk_set_prio_allowed(const struct sock *sk, int val)
+{
+ return ((val >= TC_PRIO_BESTEFFORT && val <= TC_PRIO_INTERACTIVE) ||
+ sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) ||
+ sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN));
+}
+
static bool sock_needs_netstamp(const struct sock *sk)
{
switch (sk->sk_family) {
@@ -481,7 +488,7 @@ int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
unsigned long flags;
struct sk_buff_head *list = &sk->sk_receive_queue;
- if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
+ if (atomic_read(&sk->sk_rmem_alloc) >= READ_ONCE(sk->sk_rcvbuf)) {
atomic_inc(&sk->sk_drops);
trace_sock_rcvqueue_full(sk, skb);
return -ENOMEM;
@@ -551,7 +558,7 @@ int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
skb->dev = NULL;
- if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
+ if (sk_rcvqueues_full(sk, READ_ONCE(sk->sk_rcvbuf))) {
atomic_inc(&sk->sk_drops);
goto discard_and_relse;
}
@@ -820,14 +827,11 @@ EXPORT_SYMBOL(sock_set_sndtimeo);
static void __sock_set_timestamps(struct sock *sk, bool val, bool new, bool ns)
{
+ sock_valbool_flag(sk, SOCK_RCVTSTAMP, val);
+ sock_valbool_flag(sk, SOCK_RCVTSTAMPNS, val && ns);
if (val) {
sock_valbool_flag(sk, SOCK_TSTAMP_NEW, new);
- sock_valbool_flag(sk, SOCK_RCVTSTAMPNS, ns);
- sock_set_flag(sk, SOCK_RCVTSTAMP);
sock_enable_timestamp(sk, SOCK_TIMESTAMP);
- } else {
- sock_reset_flag(sk, SOCK_RCVTSTAMP);
- sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
}
}
@@ -1048,6 +1052,75 @@ static int sock_reserve_memory(struct sock *sk, int bytes)
return 0;
}
+#ifdef CONFIG_PAGE_POOL
+
+/* This is the number of tokens and frags that the user can SO_DEVMEM_DONTNEED
+ * in 1 syscall. The limit exists to limit the amount of memory the kernel
+ * allocates to copy these tokens, and to prevent looping over the frags for
+ * too long.
+ */
+#define MAX_DONTNEED_TOKENS 128
+#define MAX_DONTNEED_FRAGS 1024
+
+static noinline_for_stack int
+sock_devmem_dontneed(struct sock *sk, sockptr_t optval, unsigned int optlen)
+{
+ unsigned int num_tokens, i, j, k, netmem_num = 0;
+ struct dmabuf_token *tokens;
+ int ret = 0, num_frags = 0;
+ netmem_ref netmems[16];
+
+ if (!sk_is_tcp(sk))
+ return -EBADF;
+
+ if (optlen % sizeof(*tokens) ||
+ optlen > sizeof(*tokens) * MAX_DONTNEED_TOKENS)
+ return -EINVAL;
+
+ num_tokens = optlen / sizeof(*tokens);
+ tokens = kvmalloc_array(num_tokens, sizeof(*tokens), GFP_KERNEL);
+ if (!tokens)
+ return -ENOMEM;
+
+ if (copy_from_sockptr(tokens, optval, optlen)) {
+ kvfree(tokens);
+ return -EFAULT;
+ }
+
+ xa_lock_bh(&sk->sk_user_frags);
+ for (i = 0; i < num_tokens; i++) {
+ for (j = 0; j < tokens[i].token_count; j++) {
+ if (++num_frags > MAX_DONTNEED_FRAGS)
+ goto frag_limit_reached;
+
+ netmem_ref netmem = (__force netmem_ref)__xa_erase(
+ &sk->sk_user_frags, tokens[i].token_start + j);
+
+ if (!netmem || WARN_ON_ONCE(!netmem_is_net_iov(netmem)))
+ continue;
+
+ netmems[netmem_num++] = netmem;
+ if (netmem_num == ARRAY_SIZE(netmems)) {
+ xa_unlock_bh(&sk->sk_user_frags);
+ for (k = 0; k < netmem_num; k++)
+ WARN_ON_ONCE(!napi_pp_put_page(netmems[k]));
+ netmem_num = 0;
+ xa_lock_bh(&sk->sk_user_frags);
+ }
+ ret++;
+ }
+ }
+
+frag_limit_reached:
+ xa_unlock_bh(&sk->sk_user_frags);
+ for (k = 0; k < netmem_num; k++)
+ WARN_ON_ONCE(!napi_pp_put_page(netmems[k]));
+
+ kvfree(tokens);
+ return ret;
+}
+#endif
+
void sockopt_lock_sock(struct sock *sk)
{
/* When current->bpf_ctx is set, the setsockopt is called from
@@ -1082,6 +1155,17 @@ bool sockopt_capable(int cap)
}
EXPORT_SYMBOL(sockopt_capable);
+static int sockopt_validate_clockid(__kernel_clockid_t value)
+{
+ switch (value) {
+ case CLOCK_REALTIME:
+ case CLOCK_MONOTONIC:
+ case CLOCK_TAI:
+ return 0;
+ }
+ return -EINVAL;
+}
+
/*
* This is meant for all protocols to use and covers goings on
* at the socket level. Everything here is generic.
@@ -1116,9 +1200,7 @@ int sk_setsockopt(struct sock *sk, int level, int optname,
/* handle options which do not require locking the socket. */
switch (optname) {
case SO_PRIORITY:
- if ((val >= 0 && val <= 6) ||
- sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) ||
- sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
+ if (sk_set_prio_allowed(sk, val)) {
sock_set_priority(sk, val);
return 0;
}
@@ -1199,6 +1281,10 @@ int sk_setsockopt(struct sock *sk, int level, int optname,
ret = -EOPNOTSUPP;
return ret;
}
+#ifdef CONFIG_PAGE_POOL
+ case SO_DEVMEM_DONTNEED:
+ return sock_devmem_dontneed(sk, optval, optlen);
+#endif
}
sockopt_lock_sock(sk);
@@ -1214,7 +1300,10 @@ int sk_setsockopt(struct sock *sk, int level, int optname,
sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
break;
case SO_REUSEPORT:
- sk->sk_reuseport = valbool;
+ if (valbool && !sk_is_inet(sk))
+ ret = -EOPNOTSUPP;
+ else
+ sk->sk_reuseport = valbool;
break;
case SO_DONTROUTE:
sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
@@ -1433,6 +1522,10 @@ set_sndbuf:
sock_valbool_flag(sk, SOCK_RCVMARK, valbool);
break;
+ case SO_RCVPRIORITY:
+ sock_valbool_flag(sk, SOCK_RCVPRIORITY, valbool);
+ break;
+
case SO_RXQ_OVFL:
sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
break;
@@ -1496,6 +1589,11 @@ set_sndbuf:
ret = -EPERM;
break;
}
+
+ ret = sockopt_validate_clockid(sk_txtime.clockid);
+ if (ret)
+ break;
+
sock_valbool_flag(sk, SOCK_TXTIME, true);
sk->sk_clockid = sk_txtime.clockid;
sk->sk_txtime_deadline_mode =
@@ -1856,6 +1954,10 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
v.val = sock_flag(sk, SOCK_RCVMARK);
break;
+ case SO_RCVPRIORITY:
+ v.val = sock_flag(sk, SOCK_RCVPRIORITY);
+ break;
+
case SO_RXQ_OVFL:
v.val = sock_flag(sk, SOCK_RXQ_OVFL);
break;
@@ -2031,7 +2133,7 @@ static inline void sock_lock_init(struct sock *sk)
/*
* Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
- * even temporarly, because of RCU lookups. sk_node should also be left as is.
+ * even temporarily, because of RCU lookups. sk_node should also be left as is.
* We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
*/
static void sock_copy(struct sock *nsk, const struct sock *osk)
@@ -2052,8 +2154,9 @@ static void sock_copy(struct sock *nsk, const struct sock *osk)
memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
- memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
- prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
+ unsafe_memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
+ prot->obj_size - offsetof(struct sock, sk_dontcopy_end),
+ /* alloc is larger than struct, see sk_prot_alloc() */);
#ifdef CONFIG_SECURITY_NETWORK
nsk->sk_security = sptr;
@@ -2143,6 +2246,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
get_net_track(net, &sk->ns_tracker, priority);
sock_inuse_add(net, 1);
} else {
+ net_passive_inc(net);
__netns_tracker_alloc(net, &sk->ns_tracker,
false, priority);
}
@@ -2167,6 +2271,7 @@ EXPORT_SYMBOL(sk_alloc);
static void __sk_destruct(struct rcu_head *head)
{
struct sock *sk = container_of(head, struct sock, sk_rcu);
+ struct net *net = sock_net(sk);
struct sk_filter *filter;
if (sk->sk_destruct)
@@ -2198,14 +2303,28 @@ static void __sk_destruct(struct rcu_head *head)
put_cred(sk->sk_peer_cred);
put_pid(sk->sk_peer_pid);
- if (likely(sk->sk_net_refcnt))
- put_net_track(sock_net(sk), &sk->ns_tracker);
- else
- __netns_tracker_free(sock_net(sk), &sk->ns_tracker, false);
-
+ if (likely(sk->sk_net_refcnt)) {
+ put_net_track(net, &sk->ns_tracker);
+ } else {
+ __netns_tracker_free(net, &sk->ns_tracker, false);
+ net_passive_dec(net);
+ }
sk_prot_free(sk->sk_prot_creator, sk);
}
+void sk_net_refcnt_upgrade(struct sock *sk)
+{
+ struct net *net = sock_net(sk);
+
+ WARN_ON_ONCE(sk->sk_net_refcnt);
+ __netns_tracker_free(net, &sk->ns_tracker, false);
+ net_passive_dec(net);
+ sk->sk_net_refcnt = 1;
+ get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
+ sock_inuse_add(net, 1);
+}
+EXPORT_SYMBOL_GPL(sk_net_refcnt_upgrade);
+
void sk_destruct(struct sock *sk)
{
bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
@@ -2260,7 +2379,12 @@ static void sk_init_common(struct sock *sk)
lockdep_set_class_and_name(&sk->sk_error_queue.lock,
af_elock_keys + sk->sk_family,
af_family_elock_key_strings[sk->sk_family]);
- lockdep_set_class_and_name(&sk->sk_callback_lock,
+ if (sk->sk_kern_sock)
+ lockdep_set_class_and_name(&sk->sk_callback_lock,
+ af_kern_callback_keys + sk->sk_family,
+ af_family_kern_clock_key_strings[sk->sk_family]);
+ else
+ lockdep_set_class_and_name(&sk->sk_callback_lock,
af_callback_keys + sk->sk_family,
af_family_clock_key_strings[sk->sk_family]);
}
@@ -2297,6 +2421,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
* is not properly dismantling its kernel sockets at netns
* destroy time.
*/
+ net_passive_inc(sock_net(newsk));
__netns_tracker_alloc(sock_net(newsk), &newsk->ns_tracker,
false, priority);
}
@@ -2503,19 +2628,16 @@ void __sock_wfree(struct sk_buff *skb)
void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
{
skb_orphan(skb);
- skb->sk = sk;
#ifdef CONFIG_INET
- if (unlikely(!sk_fullsock(sk))) {
- skb->destructor = sock_edemux;
- sock_hold(sk);
- return;
- }
+ if (unlikely(!sk_fullsock(sk)))
+ return skb_set_owner_edemux(skb, sk);
#endif
+ skb->sk = sk;
skb->destructor = sock_wfree;
skb_set_hash_from_sk(skb, sk);
/*
* We used to take a refcount on sk, but following operation
- * is enough to guarantee sk_free() wont free this sock until
+ * is enough to guarantee sk_free() won't free this sock until
* all in-flight packets are completed
*/
refcount_add(skb->truesize, &sk->sk_wmem_alloc);
@@ -2524,13 +2646,12 @@ EXPORT_SYMBOL(skb_set_owner_w);
static bool can_skb_orphan_partial(const struct sk_buff *skb)
{
-#ifdef CONFIG_TLS_DEVICE
/* Drivers depend on in-order delivery for crypto offload,
* partial orphan breaks out-of-order-OK logic.
*/
- if (skb->decrypted)
+ if (skb_is_decrypted(skb))
return false;
-#endif
+
return (skb->destructor == sock_wfree ||
(IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree));
}
@@ -2582,8 +2703,18 @@ EXPORT_SYMBOL(sock_efree);
#ifdef CONFIG_INET
void sock_pfree(struct sk_buff *skb)
{
- if (sk_is_refcounted(skb->sk))
- sock_gen_put(skb->sk);
+ struct sock *sk = skb->sk;
+
+ if (!sk_is_refcounted(sk))
+ return;
+
+ if (sk->sk_state == TCP_NEW_SYN_RECV && inet_reqsk(sk)->syncookie) {
+ inet_reqsk(sk)->rsk_listener = NULL;
+ reqsk_free(inet_reqsk(sk));
+ return;
+ }
+
+ sock_gen_put(sk);
}
EXPORT_SYMBOL(sock_pfree);
#endif /* CONFIG_INET */
@@ -2799,6 +2930,8 @@ int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
{
u32 tsflags;
+ BUILD_BUG_ON(SOF_TIMESTAMPING_LAST == (1 << 31));
+
switch (cmsg->cmsg_type) {
case SO_MARK:
if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
@@ -2827,10 +2960,28 @@ int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
return -EINVAL;
sockc->transmit_time = get_unaligned((u64 *)CMSG_DATA(cmsg));
break;
+ case SCM_TS_OPT_ID:
+ if (sk_is_tcp(sk))
+ return -EINVAL;
+ tsflags = READ_ONCE(sk->sk_tsflags);
+ if (!(tsflags & SOF_TIMESTAMPING_OPT_ID))
+ return -EINVAL;
+ if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
+ return -EINVAL;
+ sockc->ts_opt_id = *(u32 *)CMSG_DATA(cmsg);
+ sockc->tsflags |= SOCKCM_FLAG_TS_OPT_ID;
+ break;
/* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
case SCM_RIGHTS:
case SCM_CREDENTIALS:
break;
+ case SO_PRIORITY:
+ if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
+ return -EINVAL;
+ if (!sk_set_prio_allowed(sk, *(u32 *)CMSG_DATA(cmsg)))
+ return -EPERM;
+ sockc->priority = *(u32 *)CMSG_DATA(cmsg);
+ break;
default:
return -EINVAL;
}
@@ -3230,8 +3381,8 @@ int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
}
EXPORT_SYMBOL(sock_no_socketpair);
-int sock_no_accept(struct socket *sock, struct socket *newsock, int flags,
- bool kern)
+int sock_no_accept(struct socket *sock, struct socket *newsock,
+ struct proto_accept_arg *arg)
{
return -EOPNOTSUPP;
}
@@ -3326,7 +3477,7 @@ static void sock_def_error_report(struct sock *sk)
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
wake_up_interruptible_poll(&wq->wait, EPOLLERR);
- sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
+ sk_wake_async_rcu(sk, SOCK_WAKE_IO, POLL_ERR);
rcu_read_unlock();
}
@@ -3341,7 +3492,7 @@ void sock_def_readable(struct sock *sk)
if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI |
EPOLLRDNORM | EPOLLRDBAND);
- sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
+ sk_wake_async_rcu(sk, SOCK_WAKE_WAITD, POLL_IN);
rcu_read_unlock();
}
@@ -3361,7 +3512,7 @@ static void sock_def_write_space(struct sock *sk)
EPOLLWRNORM | EPOLLWRBAND);
/* Should agree with poll, otherwise some programs break */
- sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
+ sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
rcu_read_unlock();
@@ -3386,7 +3537,7 @@ static void sock_def_write_space_wfree(struct sock *sk)
EPOLLWRNORM | EPOLLWRBAND);
/* Should agree with poll, otherwise some programs break */
- sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
+ sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
}
@@ -3397,7 +3548,7 @@ static void sock_def_destruct(struct sock *sk)
void sk_send_sigurg(struct sock *sk)
{
if (sk->sk_socket && sk->sk_socket->file)
- if (send_sigurg(&sk->sk_socket->file->f_owner))
+ if (send_sigurg(sk->sk_socket->file))
sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
}
EXPORT_SYMBOL(sk_send_sigurg);
@@ -3449,18 +3600,6 @@ void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid)
}
sk->sk_uid = uid;
- rwlock_init(&sk->sk_callback_lock);
- if (sk->sk_kern_sock)
- lockdep_set_class_and_name(
- &sk->sk_callback_lock,
- af_kern_callback_keys + sk->sk_family,
- af_family_kern_clock_key_strings[sk->sk_family]);
- else
- lockdep_set_class_and_name(
- &sk->sk_callback_lock,
- af_callback_keys + sk->sk_family,
- af_family_clock_key_strings[sk->sk_family]);
-
sk->sk_state_change = sock_def_wakeup;
sk->sk_data_ready = sock_def_readable;
sk->sk_write_space = sock_def_write_space;
@@ -3677,7 +3816,7 @@ EXPORT_SYMBOL(sock_recv_errqueue);
*
* FIX: POSIX 1003.1g is very ambiguous here. It states that
* asynchronous errors should be reported by getsockopt. We assume
- * this means if you specify SO_ERROR (otherwise whats the point of it).
+ * this means if you specify SO_ERROR (otherwise what is the point of it).
*/
int sock_common_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
@@ -4223,3 +4362,65 @@ int sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
return sock_ioctl_out(sk, cmd, arg);
}
EXPORT_SYMBOL(sk_ioctl);
+
+static int __init sock_struct_check(void)
+{
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_drops);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_peek_off);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_error_queue);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_receive_queue);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_backlog);
+
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst_ifindex);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst_cookie);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvbuf);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_filter);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_wq);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_data_ready);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvtimeo);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvlowat);
+
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_err);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_socket);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_memcg);
+
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_lock);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_reserved_mem);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_forward_alloc);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_tsflags);
+
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_omem_alloc);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_omem_alloc);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_sndbuf);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_wmem_queued);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_wmem_alloc);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_tsq_flags);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_send_head);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_write_queue);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_write_pending);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_dst_pending_confirm);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_pacing_status);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_frag);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_timer);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_pacing_rate);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_zckey);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_tskey);
+
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_max_pacing_rate);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_sndtimeo);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_priority);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_mark);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_dst_cache);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_route_caps);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_type);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_max_size);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_allocation);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_txhash);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_max_segs);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_pacing_shift);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_use_task_frag);
+ return 0;
+}
+
+core_initcall(sock_struct_check);
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index b1e29e18d1d6..a08eed9b9142 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -16,9 +16,10 @@
#include <linux/inet_diag.h>
#include <linux/sock_diag.h>
-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
-static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
-static DEFINE_MUTEX(sock_diag_table_mutex);
+static const struct sock_diag_handler __rcu *sock_diag_handlers[AF_MAX];
+
+static const struct sock_diag_inet_compat __rcu *inet_rcv_compat;
+
static struct workqueue_struct *broadcast_wq;
DEFINE_COOKIE(sock_cookie);
@@ -122,6 +123,24 @@ static size_t sock_diag_nlmsg_size(void)
+ nla_total_size_64bit(sizeof(struct tcp_info))); /* INET_DIAG_INFO */
}
+static const struct sock_diag_handler *sock_diag_lock_handler(int family)
+{
+ const struct sock_diag_handler *handler;
+
+ rcu_read_lock();
+ handler = rcu_dereference(sock_diag_handlers[family]);
+ if (handler && !try_module_get(handler->owner))
+ handler = NULL;
+ rcu_read_unlock();
+
+ return handler;
+}
+
+static void sock_diag_unlock_handler(const struct sock_diag_handler *handler)
+{
+ module_put(handler->owner);
+}
+
static void sock_diag_broadcast_destroy_work(struct work_struct *work)
{
struct broadcast_sk *bsk =
@@ -138,12 +157,12 @@ static void sock_diag_broadcast_destroy_work(struct work_struct *work)
if (!skb)
goto out;
- mutex_lock(&sock_diag_table_mutex);
- hndl = sock_diag_handlers[sk->sk_family];
- if (hndl && hndl->get_info)
- err = hndl->get_info(skb, sk);
- mutex_unlock(&sock_diag_table_mutex);
-
+ hndl = sock_diag_lock_handler(sk->sk_family);
+ if (hndl) {
+ if (hndl->get_info)
+ err = hndl->get_info(skb, sk);
+ sock_diag_unlock_handler(hndl);
+ }
if (!err)
nlmsg_multicast(sock_net(sk)->diag_nlsk, skb, 0, group,
GFP_KERNEL);
@@ -166,51 +185,43 @@ void sock_diag_broadcast_destroy(struct sock *sk)
queue_work(broadcast_wq, &bsk->work);
}
-void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh))
+void sock_diag_register_inet_compat(const struct sock_diag_inet_compat *ptr)
{
- mutex_lock(&sock_diag_table_mutex);
- inet_rcv_compat = fn;
- mutex_unlock(&sock_diag_table_mutex);
+ xchg(&inet_rcv_compat, RCU_INITIALIZER(ptr));
}
EXPORT_SYMBOL_GPL(sock_diag_register_inet_compat);
-void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh))
+void sock_diag_unregister_inet_compat(const struct sock_diag_inet_compat *ptr)
{
- mutex_lock(&sock_diag_table_mutex);
- inet_rcv_compat = NULL;
- mutex_unlock(&sock_diag_table_mutex);
+ const struct sock_diag_inet_compat *old;
+
+ old = unrcu_pointer(xchg(&inet_rcv_compat, NULL));
+ WARN_ON_ONCE(old != ptr);
}
EXPORT_SYMBOL_GPL(sock_diag_unregister_inet_compat);
int sock_diag_register(const struct sock_diag_handler *hndl)
{
- int err = 0;
+ int family = hndl->family;
- if (hndl->family >= AF_MAX)
+ if (family >= AF_MAX)
return -EINVAL;
- mutex_lock(&sock_diag_table_mutex);
- if (sock_diag_handlers[hndl->family])
- err = -EBUSY;
- else
- sock_diag_handlers[hndl->family] = hndl;
- mutex_unlock(&sock_diag_table_mutex);
-
- return err;
+ return !cmpxchg((const struct sock_diag_handler **)
+ &sock_diag_handlers[family],
+ NULL, hndl) ? 0 : -EBUSY;
}
EXPORT_SYMBOL_GPL(sock_diag_register);
-void sock_diag_unregister(const struct sock_diag_handler *hnld)
+void sock_diag_unregister(const struct sock_diag_handler *hndl)
{
- int family = hnld->family;
+ int family = hndl->family;
if (family >= AF_MAX)
return;
- mutex_lock(&sock_diag_table_mutex);
- BUG_ON(sock_diag_handlers[family] != hnld);
- sock_diag_handlers[family] = NULL;
- mutex_unlock(&sock_diag_table_mutex);
+ xchg((const struct sock_diag_handler **)&sock_diag_handlers[family],
+ NULL);
}
EXPORT_SYMBOL_GPL(sock_diag_unregister);
@@ -227,20 +238,20 @@ static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh)
return -EINVAL;
req->sdiag_family = array_index_nospec(req->sdiag_family, AF_MAX);
- if (sock_diag_handlers[req->sdiag_family] == NULL)
+ if (!rcu_access_pointer(sock_diag_handlers[req->sdiag_family]))
sock_load_diag_module(req->sdiag_family, 0);
- mutex_lock(&sock_diag_table_mutex);
- hndl = sock_diag_handlers[req->sdiag_family];
+ hndl = sock_diag_lock_handler(req->sdiag_family);
if (hndl == NULL)
- err = -ENOENT;
- else if (nlh->nlmsg_type == SOCK_DIAG_BY_FAMILY)
+ return -ENOENT;
+
+ if (nlh->nlmsg_type == SOCK_DIAG_BY_FAMILY)
err = hndl->dump(skb, nlh);
else if (nlh->nlmsg_type == SOCK_DESTROY && hndl->destroy)
err = hndl->destroy(skb, nlh);
else
err = -EOPNOTSUPP;
- mutex_unlock(&sock_diag_table_mutex);
+ sock_diag_unlock_handler(hndl);
return err;
}
@@ -248,20 +259,27 @@ static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh)
static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
+ const struct sock_diag_inet_compat *ptr;
int ret;
switch (nlh->nlmsg_type) {
case TCPDIAG_GETSOCK:
case DCCPDIAG_GETSOCK:
- if (inet_rcv_compat == NULL)
+
+ if (!rcu_access_pointer(inet_rcv_compat))
sock_load_diag_module(AF_INET, 0);
- mutex_lock(&sock_diag_table_mutex);
- if (inet_rcv_compat != NULL)
- ret = inet_rcv_compat(skb, nlh);
- else
- ret = -EOPNOTSUPP;
- mutex_unlock(&sock_diag_table_mutex);
+ rcu_read_lock();
+ ptr = rcu_dereference(inet_rcv_compat);
+ if (ptr && !try_module_get(ptr->owner))
+ ptr = NULL;
+ rcu_read_unlock();
+
+ ret = -EOPNOTSUPP;
+ if (ptr) {
+ ret = ptr->fn(skb, nlh);
+ module_put(ptr->owner);
+ }
return ret;
case SOCK_DIAG_BY_FAMILY:
@@ -272,13 +290,9 @@ static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
}
}
-static DEFINE_MUTEX(sock_diag_mutex);
-
static void sock_diag_rcv(struct sk_buff *skb)
{
- mutex_lock(&sock_diag_mutex);
netlink_rcv_skb(skb, &sock_diag_rcv_msg);
- mutex_unlock(&sock_diag_mutex);
}
static int sock_diag_bind(struct net *net, int group)
@@ -286,12 +300,12 @@ static int sock_diag_bind(struct net *net, int group)
switch (group) {
case SKNLGRP_INET_TCP_DESTROY:
case SKNLGRP_INET_UDP_DESTROY:
- if (!sock_diag_handlers[AF_INET])
+ if (!rcu_access_pointer(sock_diag_handlers[AF_INET]))
sock_load_diag_module(AF_INET, 0);
break;
case SKNLGRP_INET6_TCP_DESTROY:
case SKNLGRP_INET6_UDP_DESTROY:
- if (!sock_diag_handlers[AF_INET6])
+ if (!rcu_access_pointer(sock_diag_handlers[AF_INET6]))
sock_load_diag_module(AF_INET6, 0);
break;
}
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index 27d733c0f65e..82a14f131d00 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -24,8 +24,16 @@ struct bpf_stab {
#define SOCK_CREATE_FLAG_MASK \
(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
+/* This mutex is used to
+ * - protect race between prog/link attach/detach and link prog update, and
+ * - protect race between releasing and accessing map in bpf_link.
+ * A single global mutex lock is used since it is expected contention is low.
+ */
+static DEFINE_MUTEX(sockmap_mutex);
+
static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
- struct bpf_prog *old, u32 which);
+ struct bpf_prog *old, struct bpf_link *link,
+ u32 which);
static struct sk_psock_progs *sock_map_progs(struct bpf_map *map);
static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
@@ -59,55 +67,50 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
{
- u32 ufd = attr->target_fd;
struct bpf_map *map;
- struct fd f;
int ret;
if (attr->attach_flags || attr->replace_bpf_fd)
return -EINVAL;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->target_fd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
- ret = sock_map_prog_update(map, prog, NULL, attr->attach_type);
- fdput(f);
+ mutex_lock(&sockmap_mutex);
+ ret = sock_map_prog_update(map, prog, NULL, NULL, attr->attach_type);
+ mutex_unlock(&sockmap_mutex);
return ret;
}
int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
{
- u32 ufd = attr->target_fd;
struct bpf_prog *prog;
struct bpf_map *map;
- struct fd f;
int ret;
if (attr->attach_flags || attr->replace_bpf_fd)
return -EINVAL;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->target_fd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
prog = bpf_prog_get(attr->attach_bpf_fd);
- if (IS_ERR(prog)) {
- ret = PTR_ERR(prog);
- goto put_map;
- }
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
if (prog->type != ptype) {
ret = -EINVAL;
goto put_prog;
}
- ret = sock_map_prog_update(map, NULL, prog, attr->attach_type);
+ mutex_lock(&sockmap_mutex);
+ ret = sock_map_prog_update(map, NULL, prog, NULL, attr->attach_type);
+ mutex_unlock(&sockmap_mutex);
put_prog:
bpf_prog_put(prog);
-put_map:
- fdput(f);
return ret;
}
@@ -156,6 +159,7 @@ static void sock_map_del_link(struct sock *sk,
verdict_stop = true;
list_del(&link->list);
sk_psock_free_link(link);
+ break;
}
}
spin_unlock_bh(&psock->link_lock);
@@ -299,7 +303,10 @@ static int sock_map_link(struct bpf_map *map, struct sock *sk)
write_lock_bh(&sk->sk_callback_lock);
if (stream_parser && stream_verdict && !psock->saved_data_ready) {
- ret = sk_psock_init_strp(sk, psock);
+ if (sk_is_tcp(sk))
+ ret = sk_psock_init_strp(sk, psock);
+ else
+ ret = -EOPNOTSUPP;
if (ret) {
write_unlock_bh(&sk->sk_callback_lock);
sk_psock_put(sk, psock);
@@ -408,12 +415,11 @@ static void *sock_map_lookup_sys(struct bpf_map *map, void *key)
static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
struct sock **psk)
{
- struct sock *sk;
+ struct sock *sk = NULL;
int err = 0;
spin_lock_bh(&stab->lock);
- sk = *psk;
- if (!sk_test || sk_test == sk)
+ if (!sk_test || sk_test == *psk)
sk = xchg(psk, NULL);
if (likely(sk))
@@ -538,6 +544,9 @@ static bool sock_map_sk_state_allowed(const struct sock *sk)
return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN);
if (sk_is_stream_unix(sk))
return (1 << sk->sk_state) & TCPF_ESTABLISHED;
+ if (sk_is_vsock(sk) &&
+ (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET))
+ return (1 << sk->sk_state) & TCPF_ESTABLISHED;
return true;
}
@@ -644,6 +653,8 @@ BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
sk = __sock_map_lookup_elem(map, key);
if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
return SK_DROP;
+ if ((flags & BPF_F_INGRESS) && sk_is_vsock(sk))
+ return SK_DROP;
skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
return SK_PASS;
@@ -672,6 +683,8 @@ BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg,
return SK_DROP;
if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
return SK_DROP;
+ if (sk_is_vsock(sk))
+ return SK_DROP;
msg->flags = flags;
msg->sk_redir = sk;
@@ -1171,6 +1184,7 @@ static void sock_hash_free(struct bpf_map *map)
sock_put(elem->sk);
sock_hash_free_elem(htab, elem);
}
+ cond_resched();
}
/* wait for psock readers accessing its map link */
@@ -1245,6 +1259,8 @@ BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
sk = __sock_hash_lookup_elem(map, key);
if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
return SK_DROP;
+ if ((flags & BPF_F_INGRESS) && sk_is_vsock(sk))
+ return SK_DROP;
skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
return SK_PASS;
@@ -1273,6 +1289,8 @@ BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg,
return SK_DROP;
if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
return SK_DROP;
+ if (sk_is_vsock(sk))
+ return SK_DROP;
msg->flags = flags;
msg->sk_redir = sk;
@@ -1454,80 +1472,108 @@ static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
return NULL;
}
-static int sock_map_prog_lookup(struct bpf_map *map, struct bpf_prog ***pprog,
- u32 which)
+static int sock_map_prog_link_lookup(struct bpf_map *map, struct bpf_prog ***pprog,
+ struct bpf_link ***plink, u32 which)
{
struct sk_psock_progs *progs = sock_map_progs(map);
+ struct bpf_prog **cur_pprog;
+ struct bpf_link **cur_plink;
if (!progs)
return -EOPNOTSUPP;
switch (which) {
case BPF_SK_MSG_VERDICT:
- *pprog = &progs->msg_parser;
+ cur_pprog = &progs->msg_parser;
+ cur_plink = &progs->msg_parser_link;
break;
#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
case BPF_SK_SKB_STREAM_PARSER:
- *pprog = &progs->stream_parser;
+ cur_pprog = &progs->stream_parser;
+ cur_plink = &progs->stream_parser_link;
break;
#endif
case BPF_SK_SKB_STREAM_VERDICT:
if (progs->skb_verdict)
return -EBUSY;
- *pprog = &progs->stream_verdict;
+ cur_pprog = &progs->stream_verdict;
+ cur_plink = &progs->stream_verdict_link;
break;
case BPF_SK_SKB_VERDICT:
if (progs->stream_verdict)
return -EBUSY;
- *pprog = &progs->skb_verdict;
+ cur_pprog = &progs->skb_verdict;
+ cur_plink = &progs->skb_verdict_link;
break;
default:
return -EOPNOTSUPP;
}
+ *pprog = cur_pprog;
+ if (plink)
+ *plink = cur_plink;
return 0;
}
+/* Handle the following four cases:
+ * prog_attach: prog != NULL, old == NULL, link == NULL
+ * prog_detach: prog == NULL, old != NULL, link == NULL
+ * link_attach: prog != NULL, old == NULL, link != NULL
+ * link_detach: prog == NULL, old != NULL, link != NULL
+ */
static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
- struct bpf_prog *old, u32 which)
+ struct bpf_prog *old, struct bpf_link *link,
+ u32 which)
{
struct bpf_prog **pprog;
+ struct bpf_link **plink;
int ret;
- ret = sock_map_prog_lookup(map, &pprog, which);
+ ret = sock_map_prog_link_lookup(map, &pprog, &plink, which);
if (ret)
return ret;
- if (old)
- return psock_replace_prog(pprog, prog, old);
+ /* for prog_attach/prog_detach/link_attach, return error if a bpf_link
+ * exists for that prog.
+ */
+ if ((!link || prog) && *plink)
+ return -EBUSY;
- psock_set_prog(pprog, prog);
- return 0;
+ if (old) {
+ ret = psock_replace_prog(pprog, prog, old);
+ if (!ret)
+ *plink = NULL;
+ } else {
+ psock_set_prog(pprog, prog);
+ if (link)
+ *plink = link;
+ }
+
+ return ret;
}
int sock_map_bpf_prog_query(const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
- u32 prog_cnt = 0, flags = 0, ufd = attr->target_fd;
+ u32 prog_cnt = 0, flags = 0;
struct bpf_prog **pprog;
struct bpf_prog *prog;
struct bpf_map *map;
- struct fd f;
u32 id = 0;
int ret;
if (attr->query.query_flags)
return -EINVAL;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->target_fd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
rcu_read_lock();
- ret = sock_map_prog_lookup(map, &pprog, attr->query.attach_type);
+ ret = sock_map_prog_link_lookup(map, &pprog, NULL, attr->query.attach_type);
if (ret)
goto end;
@@ -1552,7 +1598,6 @@ end:
copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
ret = -EFAULT;
- fdput(f);
return ret;
}
@@ -1633,19 +1678,23 @@ void sock_map_close(struct sock *sk, long timeout)
lock_sock(sk);
rcu_read_lock();
- psock = sk_psock_get(sk);
- if (unlikely(!psock)) {
- rcu_read_unlock();
- release_sock(sk);
- saved_close = READ_ONCE(sk->sk_prot)->close;
- } else {
+ psock = sk_psock(sk);
+ if (likely(psock)) {
saved_close = psock->saved_close;
sock_map_remove_links(sk, psock);
+ psock = sk_psock_get(sk);
+ if (unlikely(!psock))
+ goto no_psock;
rcu_read_unlock();
sk_psock_stop(psock);
release_sock(sk);
cancel_delayed_work_sync(&psock->work);
sk_psock_put(sk, psock);
+ } else {
+ saved_close = READ_ONCE(sk->sk_prot)->close;
+no_psock:
+ rcu_read_unlock();
+ release_sock(sk);
}
/* Make sure we do not recurse. This is a bug.
@@ -1657,6 +1706,200 @@ void sock_map_close(struct sock *sk, long timeout)
}
EXPORT_SYMBOL_GPL(sock_map_close);
+struct sockmap_link {
+ struct bpf_link link;
+ struct bpf_map *map;
+ enum bpf_attach_type attach_type;
+};
+
+static void sock_map_link_release(struct bpf_link *link)
+{
+ struct sockmap_link *sockmap_link = container_of(link, struct sockmap_link, link);
+
+ mutex_lock(&sockmap_mutex);
+ if (!sockmap_link->map)
+ goto out;
+
+ WARN_ON_ONCE(sock_map_prog_update(sockmap_link->map, NULL, link->prog, link,
+ sockmap_link->attach_type));
+
+ bpf_map_put_with_uref(sockmap_link->map);
+ sockmap_link->map = NULL;
+out:
+ mutex_unlock(&sockmap_mutex);
+}
+
+static int sock_map_link_detach(struct bpf_link *link)
+{
+ sock_map_link_release(link);
+ return 0;
+}
+
+static void sock_map_link_dealloc(struct bpf_link *link)
+{
+ kfree(link);
+}
+
+/* Handle the following two cases:
+ * case 1: link != NULL, prog != NULL, old != NULL
+ * case 2: link != NULL, prog != NULL, old == NULL
+ */
+static int sock_map_link_update_prog(struct bpf_link *link,
+ struct bpf_prog *prog,
+ struct bpf_prog *old)
+{
+ const struct sockmap_link *sockmap_link = container_of(link, struct sockmap_link, link);
+ struct bpf_prog **pprog, *old_link_prog;
+ struct bpf_link **plink;
+ int ret = 0;
+
+ mutex_lock(&sockmap_mutex);
+
+ /* If old prog is not NULL, ensure old prog is the same as link->prog. */
+ if (old && link->prog != old) {
+ ret = -EPERM;
+ goto out;
+ }
+ /* Ensure link->prog has the same type/attach_type as the new prog. */
+ if (link->prog->type != prog->type ||
+ link->prog->expected_attach_type != prog->expected_attach_type) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!sockmap_link->map) {
+ ret = -ENOLINK;
+ goto out;
+ }
+
+ ret = sock_map_prog_link_lookup(sockmap_link->map, &pprog, &plink,
+ sockmap_link->attach_type);
+ if (ret)
+ goto out;
+
+ /* return error if the stored bpf_link does not match the incoming bpf_link. */
+ if (link != *plink) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (old) {
+ ret = psock_replace_prog(pprog, prog, old);
+ if (ret)
+ goto out;
+ } else {
+ psock_set_prog(pprog, prog);
+ }
+
+ bpf_prog_inc(prog);
+ old_link_prog = xchg(&link->prog, prog);
+ bpf_prog_put(old_link_prog);
+
+out:
+ mutex_unlock(&sockmap_mutex);
+ return ret;
+}
+
+static u32 sock_map_link_get_map_id(const struct sockmap_link *sockmap_link)
+{
+ u32 map_id = 0;
+
+ mutex_lock(&sockmap_mutex);
+ if (sockmap_link->map)
+ map_id = sockmap_link->map->id;
+ mutex_unlock(&sockmap_mutex);
+ return map_id;
+}
+
+static int sock_map_link_fill_info(const struct bpf_link *link,
+ struct bpf_link_info *info)
+{
+ const struct sockmap_link *sockmap_link = container_of(link, struct sockmap_link, link);
+ u32 map_id = sock_map_link_get_map_id(sockmap_link);
+
+ info->sockmap.map_id = map_id;
+ info->sockmap.attach_type = sockmap_link->attach_type;
+ return 0;
+}
+
+static void sock_map_link_show_fdinfo(const struct bpf_link *link,
+ struct seq_file *seq)
+{
+ const struct sockmap_link *sockmap_link = container_of(link, struct sockmap_link, link);
+ u32 map_id = sock_map_link_get_map_id(sockmap_link);
+
+ seq_printf(seq, "map_id:\t%u\n", map_id);
+ seq_printf(seq, "attach_type:\t%u\n", sockmap_link->attach_type);
+}
+
+static const struct bpf_link_ops sock_map_link_ops = {
+ .release = sock_map_link_release,
+ .dealloc = sock_map_link_dealloc,
+ .detach = sock_map_link_detach,
+ .update_prog = sock_map_link_update_prog,
+ .fill_link_info = sock_map_link_fill_info,
+ .show_fdinfo = sock_map_link_show_fdinfo,
+};
+
+int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog)
+{
+ struct bpf_link_primer link_primer;
+ struct sockmap_link *sockmap_link;
+ enum bpf_attach_type attach_type;
+ struct bpf_map *map;
+ int ret;
+
+ if (attr->link_create.flags)
+ return -EINVAL;
+
+ map = bpf_map_get_with_uref(attr->link_create.target_fd);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+ if (map->map_type != BPF_MAP_TYPE_SOCKMAP && map->map_type != BPF_MAP_TYPE_SOCKHASH) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ sockmap_link = kzalloc(sizeof(*sockmap_link), GFP_USER);
+ if (!sockmap_link) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ attach_type = attr->link_create.attach_type;
+ bpf_link_init(&sockmap_link->link, BPF_LINK_TYPE_SOCKMAP, &sock_map_link_ops, prog);
+ sockmap_link->map = map;
+ sockmap_link->attach_type = attach_type;
+
+ ret = bpf_link_prime(&sockmap_link->link, &link_primer);
+ if (ret) {
+ kfree(sockmap_link);
+ goto out;
+ }
+
+ mutex_lock(&sockmap_mutex);
+ ret = sock_map_prog_update(map, prog, NULL, &sockmap_link->link, attach_type);
+ mutex_unlock(&sockmap_mutex);
+ if (ret) {
+ bpf_link_cleanup(&link_primer);
+ goto out;
+ }
+
+ /* Increase refcnt for the prog since when old prog is replaced with
+ * psock_replace_prog() and psock_set_prog() its refcnt will be decreased.
+ *
+ * Actually, we do not need to increase refcnt for the prog since bpf_link
+ * will hold a reference. But in order to have less complexity w.r.t.
+ * replacing/setting prog, let us increase the refcnt to make things simpler.
+ */
+ bpf_prog_inc(prog);
+
+ return bpf_link_settle(&link_primer);
+
+out:
+ bpf_map_put_with_uref(map);
+ return ret;
+}
+
static int sock_map_iter_attach_target(struct bpf_prog *prog,
union bpf_iter_link_info *linfo,
struct bpf_iter_aux_info *aux)
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index 5a165286e4d8..4211710393a8 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -173,10 +173,9 @@ static bool __reuseport_detach_closed_sock(struct sock *sk,
static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
{
- unsigned int size = sizeof(struct sock_reuseport) +
- sizeof(struct sock *) * max_socks;
- struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC);
+ struct sock_reuseport *reuse;
+ reuse = kzalloc(struct_size(reuse, socks, max_socks), GFP_ATOMIC);
if (!reuse)
return NULL;
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 0f0cb1465e08..c7769ee0d9c5 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -23,6 +23,9 @@
#include <net/net_ratelimit.h>
#include <net/busy_poll.h>
#include <net/pkt_sched.h>
+#include <net/hotdata.h>
+#include <net/proto_memory.h>
+#include <net/rps.h>
#include "dev.h"
@@ -30,6 +33,8 @@ static int int_3600 = 3600;
static int min_sndbuf = SOCK_MIN_SNDBUF;
static int min_rcvbuf = SOCK_MIN_RCVBUF;
static int max_skb_frags = MAX_SKB_FRAGS;
+static int min_mem_pcpu_rsv = SK_MEMORY_PCPU_RESERVE;
+static int netdev_budget_usecs_min = 2 * USEC_PER_SEC / HZ;
static int net_msg_warn; /* Unused, but still a sysctl */
@@ -47,29 +52,45 @@ int sysctl_devconf_inherit_init_net __read_mostly;
EXPORT_SYMBOL(sysctl_devconf_inherit_init_net);
#if IS_ENABLED(CONFIG_NET_FLOW_LIMIT) || IS_ENABLED(CONFIG_RPS)
-static void dump_cpumask(void *buffer, size_t *lenp, loff_t *ppos,
- struct cpumask *mask)
+static int dump_cpumask(void *buffer, size_t *lenp, loff_t *ppos,
+ struct cpumask *mask)
{
- char kbuf[128];
+ char *kbuf;
int len;
if (*ppos || !*lenp) {
*lenp = 0;
- return;
+ return 0;
+ }
+
+ /* CPUs are displayed as a hex bitmap + a comma between each groups of 8
+ * nibbles (except the last one which has a newline instead).
+ * Guesstimate the buffer size at the group granularity level.
+ */
+ len = min(DIV_ROUND_UP(nr_cpumask_bits, 32) * (8 + 1), *lenp);
+ kbuf = kmalloc(len, GFP_KERNEL);
+ if (!kbuf) {
+ *lenp = 0;
+ return -ENOMEM;
}
- len = min(sizeof(kbuf) - 1, *lenp);
len = scnprintf(kbuf, len, "%*pb", cpumask_pr_args(mask));
if (!len) {
*lenp = 0;
- return;
+ goto free_buf;
}
- if (len < *lenp)
- kbuf[len++] = '\n';
+ /* scnprintf writes a trailing null char not counted in the returned
+ * length, override it with a newline.
+ */
+ kbuf[len++] = '\n';
memcpy(buffer, kbuf, len);
*lenp = len;
*ppos += len;
+
+free_buf:
+ kfree(kbuf);
+ return 0;
}
#endif
@@ -91,7 +112,7 @@ static struct cpumask *rps_default_mask_cow_alloc(struct net *net)
return rps_default_mask;
}
-static int rps_default_mask_sysctl(struct ctl_table *table, int write,
+static int rps_default_mask_sysctl(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = (struct net *)table->data;
@@ -113,8 +134,8 @@ static int rps_default_mask_sysctl(struct ctl_table *table, int write,
if (err)
goto done;
} else {
- dump_cpumask(buffer, lenp, ppos,
- net->core.rps_default_mask ? : cpu_none_mask);
+ err = dump_cpumask(buffer, lenp, ppos,
+ net->core.rps_default_mask ? : cpu_none_mask);
}
done:
@@ -122,7 +143,7 @@ done:
return err;
}
-static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
+static int rps_sock_flow_sysctl(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
unsigned int orig_size, size;
@@ -137,7 +158,8 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
mutex_lock(&sock_flow_mutex);
- orig_sock_table = rcu_dereference_protected(rps_sock_flow_table,
+ orig_sock_table = rcu_dereference_protected(
+ net_hotdata.rps_sock_flow_table,
lockdep_is_held(&sock_flow_mutex));
size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0;
@@ -158,7 +180,8 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
mutex_unlock(&sock_flow_mutex);
return -ENOMEM;
}
- rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1;
+ net_hotdata.rps_cpu_mask =
+ roundup_pow_of_two(nr_cpu_ids) - 1;
sock_table->mask = size - 1;
} else
sock_table = orig_sock_table;
@@ -169,7 +192,8 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
sock_table = NULL;
if (sock_table != orig_sock_table) {
- rcu_assign_pointer(rps_sock_flow_table, sock_table);
+ rcu_assign_pointer(net_hotdata.rps_sock_flow_table,
+ sock_table);
if (sock_table) {
static_branch_inc(&rps_needed);
static_branch_inc(&rfs_needed);
@@ -191,7 +215,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
#ifdef CONFIG_NET_FLOW_LIMIT
static DEFINE_MUTEX(flow_limit_update_mutex);
-static int flow_limit_cpu_sysctl(struct ctl_table *table, int write,
+static int flow_limit_cpu_sysctl(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct sd_flow_limit *cur;
@@ -240,7 +264,7 @@ write_unlock:
}
rcu_read_unlock();
- dump_cpumask(buffer, lenp, ppos, mask);
+ ret = dump_cpumask(buffer, lenp, ppos, mask);
}
done:
@@ -248,7 +272,7 @@ done:
return ret;
}
-static int flow_limit_table_len_sysctl(struct ctl_table *table, int write,
+static int flow_limit_table_len_sysctl(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
unsigned int old, *ptr;
@@ -270,7 +294,7 @@ static int flow_limit_table_len_sysctl(struct ctl_table *table, int write,
#endif /* CONFIG_NET_FLOW_LIMIT */
#ifdef CONFIG_NET_SCHED
-static int set_default_qdisc(struct ctl_table *table, int write,
+static int set_default_qdisc(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
char id[IFNAMSIZ];
@@ -289,25 +313,25 @@ static int set_default_qdisc(struct ctl_table *table, int write,
}
#endif
-static int proc_do_dev_weight(struct ctl_table *table, int write,
+static int proc_do_dev_weight(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
static DEFINE_MUTEX(dev_weight_mutex);
int ret, weight;
mutex_lock(&dev_weight_mutex);
- ret = proc_dointvec(table, write, buffer, lenp, ppos);
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (!ret && write) {
weight = READ_ONCE(weight_p);
- WRITE_ONCE(dev_rx_weight, weight * dev_weight_rx_bias);
- WRITE_ONCE(dev_tx_weight, weight * dev_weight_tx_bias);
+ WRITE_ONCE(net_hotdata.dev_rx_weight, weight * dev_weight_rx_bias);
+ WRITE_ONCE(net_hotdata.dev_tx_weight, weight * dev_weight_tx_bias);
}
mutex_unlock(&dev_weight_mutex);
return ret;
}
-static int proc_do_rss_key(struct ctl_table *table, int write,
+static int proc_do_rss_key(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table fake_table;
@@ -320,7 +344,7 @@ static int proc_do_rss_key(struct ctl_table *table, int write,
}
#ifdef CONFIG_BPF_JIT
-static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
+static int proc_dointvec_minmax_bpf_enable(const struct ctl_table *table, int write,
void *buffer, size_t *lenp,
loff_t *ppos)
{
@@ -353,7 +377,7 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
# ifdef CONFIG_HAVE_EBPF_JIT
static int
-proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
+proc_dointvec_minmax_bpf_restricted(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
if (!capable(CAP_SYS_ADMIN))
@@ -364,7 +388,7 @@ proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
# endif /* CONFIG_HAVE_EBPF_JIT */
static int
-proc_dolongvec_minmax_bpf_restricted(struct ctl_table *table, int write,
+proc_dolongvec_minmax_bpf_restricted(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
if (!capable(CAP_SYS_ADMIN))
@@ -376,36 +400,12 @@ proc_dolongvec_minmax_bpf_restricted(struct ctl_table *table, int write,
static struct ctl_table net_core_table[] = {
{
- .procname = "wmem_max",
- .data = &sysctl_wmem_max,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &min_sndbuf,
- },
- {
- .procname = "rmem_max",
- .data = &sysctl_rmem_max,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &min_rcvbuf,
- },
- {
- .procname = "wmem_default",
- .data = &sysctl_wmem_default,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &min_sndbuf,
- },
- {
- .procname = "rmem_default",
- .data = &sysctl_rmem_default,
+ .procname = "mem_pcpu_rsv",
+ .data = &net_hotdata.sysctl_mem_pcpu_rsv,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &min_rcvbuf,
+ .extra1 = &min_mem_pcpu_rsv,
},
{
.procname = "dev_weight",
@@ -413,6 +413,7 @@ static struct ctl_table net_core_table[] = {
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_do_dev_weight,
+ .extra1 = SYSCTL_ONE,
},
{
.procname = "dev_weight_rx_bias",
@@ -420,6 +421,7 @@ static struct ctl_table net_core_table[] = {
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_do_dev_weight,
+ .extra1 = SYSCTL_ONE,
},
{
.procname = "dev_weight_tx_bias",
@@ -427,10 +429,11 @@ static struct ctl_table net_core_table[] = {
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_do_dev_weight,
+ .extra1 = SYSCTL_ONE,
},
{
.procname = "netdev_max_backlog",
- .data = &netdev_max_backlog,
+ .data = &net_hotdata.max_backlog,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
@@ -489,7 +492,7 @@ static struct ctl_table net_core_table[] = {
#endif
{
.procname = "netdev_tstamp_prequeue",
- .data = &netdev_tstamp_prequeue,
+ .data = &net_hotdata.tstamp_prequeue,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
@@ -508,15 +511,6 @@ static struct ctl_table net_core_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
- {
- .procname = "tstamp_allow_data",
- .data = &sysctl_tstamp_allow_data,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE
- },
#ifdef CONFIG_RPS
{
.procname = "rps_sock_flow_entries",
@@ -567,7 +561,7 @@ static struct ctl_table net_core_table[] = {
#endif
{
.procname = "netdev_budget",
- .data = &netdev_budget,
+ .data = &net_hotdata.netdev_budget,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
@@ -581,7 +575,7 @@ static struct ctl_table net_core_table[] = {
},
{
.procname = "max_skb_frags",
- .data = &sysctl_max_skb_frags,
+ .data = &net_hotdata.sysctl_max_skb_frags,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
@@ -590,11 +584,11 @@ static struct ctl_table net_core_table[] = {
},
{
.procname = "netdev_budget_usecs",
- .data = &netdev_budget_usecs,
+ .data = &net_hotdata.netdev_budget_usecs,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
+ .extra1 = &netdev_budget_usecs_min,
},
{
.procname = "fb_tunnels_only_for_init_net",
@@ -623,7 +617,7 @@ static struct ctl_table net_core_table[] = {
},
{
.procname = "gro_normal_batch",
- .data = &gro_normal_batch,
+ .data = &net_hotdata.gro_normal_batch,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
@@ -640,13 +634,12 @@ static struct ctl_table net_core_table[] = {
},
{
.procname = "skb_defer_max",
- .data = &sysctl_skb_defer_max,
+ .data = &net_hotdata.sysctl_skb_defer_max,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
},
- { }
};
static struct ctl_table netns_core_table[] = {
@@ -683,7 +676,50 @@ static struct ctl_table netns_core_table[] = {
.extra2 = SYSCTL_ONE,
.proc_handler = proc_dou8vec_minmax,
},
- { }
+ {
+ .procname = "tstamp_allow_data",
+ .data = &init_net.core.sysctl_tstamp_allow_data,
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE
+ },
+ /* sysctl_core_net_init() will set the values after this
+ * to readonly in network namespaces
+ */
+ {
+ .procname = "wmem_max",
+ .data = &sysctl_wmem_max,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_sndbuf,
+ },
+ {
+ .procname = "rmem_max",
+ .data = &sysctl_rmem_max,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_rcvbuf,
+ },
+ {
+ .procname = "wmem_default",
+ .data = &sysctl_wmem_default,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_sndbuf,
+ },
+ {
+ .procname = "rmem_default",
+ .data = &sysctl_rmem_default,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_rcvbuf,
+ },
};
static int __init fb_tunnels_only_for_init_net_sysctl_setup(char *str)
@@ -701,20 +737,27 @@ __setup("fb_tunnels=", fb_tunnels_only_for_init_net_sysctl_setup);
static __net_init int sysctl_core_net_init(struct net *net)
{
- struct ctl_table *tbl, *tmp;
+ size_t table_size = ARRAY_SIZE(netns_core_table);
+ struct ctl_table *tbl;
tbl = netns_core_table;
if (!net_eq(net, &init_net)) {
+ int i;
tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
if (tbl == NULL)
goto err_dup;
- for (tmp = tbl; tmp->procname; tmp++)
- tmp->data += (char *)net - (char *)&init_net;
+ for (i = 0; i < table_size; ++i) {
+ if (tbl[i].data == &sysctl_wmem_max)
+ break;
+
+ tbl[i].data += (char *)net - (char *)&init_net;
+ }
+ for (; i < table_size; ++i)
+ tbl[i].mode &= ~0222;
}
- net->core.sysctl_hdr = register_net_sysctl_sz(net, "net/core", tbl,
- ARRAY_SIZE(netns_core_table));
+ net->core.sysctl_hdr = register_net_sysctl_sz(net, "net/core", tbl, table_size);
if (net->core.sysctl_hdr == NULL)
goto err_reg;
@@ -729,7 +772,7 @@ err_dup:
static __net_exit void sysctl_core_net_exit(struct net *net)
{
- struct ctl_table *tbl;
+ const struct ctl_table *tbl;
tbl = net->core.sysctl_hdr->ctl_table_arg;
unregister_net_sysctl_table(net->core.sysctl_hdr);
diff --git a/net/core/timestamping.c b/net/core/timestamping.c
index 04840697fe79..a50a7ef49ae8 100644
--- a/net/core/timestamping.c
+++ b/net/core/timestamping.c
@@ -9,6 +9,7 @@
#include <linux/ptp_classify.h>
#include <linux/skbuff.h>
#include <linux/export.h>
+#include <linux/ptp_clock_kernel.h>
static unsigned int classify(const struct sk_buff *skb)
{
@@ -21,18 +22,39 @@ static unsigned int classify(const struct sk_buff *skb)
void skb_clone_tx_timestamp(struct sk_buff *skb)
{
+ struct hwtstamp_provider *hwprov;
struct mii_timestamper *mii_ts;
+ struct phy_device *phydev;
struct sk_buff *clone;
unsigned int type;
- if (!skb->sk)
+ if (!skb->sk || !skb->dev)
return;
+ rcu_read_lock();
+ hwprov = rcu_dereference(skb->dev->hwprov);
+ if (hwprov) {
+ if (hwprov->source != HWTSTAMP_SOURCE_PHYLIB ||
+ !hwprov->phydev) {
+ rcu_read_unlock();
+ return;
+ }
+
+ phydev = hwprov->phydev;
+ } else {
+ phydev = skb->dev->phydev;
+ if (!phy_is_default_hwtstamp(phydev)) {
+ rcu_read_unlock();
+ return;
+ }
+ }
+ rcu_read_unlock();
+
type = classify(skb);
if (type == PTP_CLASS_NONE)
return;
- mii_ts = skb->dev->phydev->mii_ts;
+ mii_ts = phydev->mii_ts;
if (likely(mii_ts->txtstamp)) {
clone = skb_clone_sk(skb);
if (!clone)
@@ -44,12 +66,33 @@ EXPORT_SYMBOL_GPL(skb_clone_tx_timestamp);
bool skb_defer_rx_timestamp(struct sk_buff *skb)
{
+ struct hwtstamp_provider *hwprov;
struct mii_timestamper *mii_ts;
+ struct phy_device *phydev;
unsigned int type;
- if (!skb->dev || !skb->dev->phydev || !skb->dev->phydev->mii_ts)
+ if (!skb->dev)
return false;
+ rcu_read_lock();
+ hwprov = rcu_dereference(skb->dev->hwprov);
+ if (hwprov) {
+ if (hwprov->source != HWTSTAMP_SOURCE_PHYLIB ||
+ !hwprov->phydev) {
+ rcu_read_unlock();
+ return false;
+ }
+
+ phydev = hwprov->phydev;
+ } else {
+ phydev = skb->dev->phydev;
+ if (!phy_is_default_hwtstamp(phydev)) {
+ rcu_read_unlock();
+ return false;
+ }
+ }
+ rcu_read_unlock();
+
if (skb_headroom(skb) < ETH_HLEN)
return false;
@@ -62,7 +105,7 @@ bool skb_defer_rx_timestamp(struct sk_buff *skb)
if (type == PTP_CLASS_NONE)
return false;
- mii_ts = skb->dev->phydev->mii_ts;
+ mii_ts = phydev->mii_ts;
if (likely(mii_ts->rxtstamp))
return mii_ts->rxtstamp(mii_ts, skb, type);
diff --git a/net/core/tso.c b/net/core/tso.c
index e00796e3b146..6df997b9076e 100644
--- a/net/core/tso.c
+++ b/net/core/tso.c
@@ -3,7 +3,7 @@
#include <linux/if_vlan.h>
#include <net/ip.h>
#include <net/tso.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso,
int size, bool is_last)
diff --git a/net/core/utils.c b/net/core/utils.c
index c994e95172ac..27f4cffaae05 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Generic address resultion entity
+ * Generic address resolution entity
*
* Authors:
* net_random Alan Cox
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 4869c1c2d8f3..2c6ab6fb452f 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -16,6 +16,7 @@
#include <linux/bug.h>
#include <net/page_pool/helpers.h>
+#include <net/hotdata.h>
#include <net/xdp.h>
#include <net/xdp_priv.h> /* struct xdp_mem_allocator */
#include <trace/events/xdp.h>
@@ -75,7 +76,7 @@ static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
xa = container_of(rcu, struct xdp_mem_allocator, rcu);
/* Allow this ID to be reused */
- ida_simple_remove(&mem_id_pool, xa->mem.id);
+ ida_free(&mem_id_pool, xa->mem.id);
kfree(xa);
}
@@ -126,10 +127,8 @@ void xdp_unreg_mem_model(struct xdp_mem_info *mem)
return;
if (type == MEM_TYPE_PAGE_POOL) {
- rcu_read_lock();
- xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params);
+ xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params);
page_pool_destroy(xa->page_pool);
- rcu_read_unlock();
}
}
EXPORT_SYMBOL_GPL(xdp_unreg_mem_model);
@@ -187,7 +186,6 @@ int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
xdp_rxq_info_init(xdp_rxq);
xdp_rxq->dev = dev;
xdp_rxq->queue_index = queue_index;
- xdp_rxq->napi_id = napi_id;
xdp_rxq->frag_size = frag_size;
xdp_rxq->reg_state = REG_STATE_REGISTERED;
@@ -242,7 +240,7 @@ static int __mem_id_cyclic_get(gfp_t gfp)
int id;
again:
- id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp);
+ id = ida_alloc_range(&mem_id_pool, mem_id_next, MEM_ID_MAX - 1, gfp);
if (id < 0) {
if (id == -ENOSPC) {
/* Cyclic allocator, reset next id */
@@ -294,10 +292,8 @@ static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem,
mutex_lock(&mem_id_lock);
ret = __mem_id_init_hash_table();
mutex_unlock(&mem_id_lock);
- if (ret < 0) {
- WARN_ON(1);
+ if (ret < 0)
return ERR_PTR(ret);
- }
}
xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp);
@@ -317,7 +313,7 @@ static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem,
/* Insert allocator into ID lookup table */
ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node);
if (IS_ERR(ptr)) {
- ida_simple_remove(&mem_id_pool, mem->id);
+ ida_free(&mem_id_pool, mem->id);
mem->id = 0;
errno = PTR_ERR(ptr);
goto err;
@@ -361,6 +357,9 @@ int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
if (IS_ERR(xdp_alloc))
return PTR_ERR(xdp_alloc);
+ if (type == MEM_TYPE_XSK_BUFF_POOL && allocator)
+ xsk_pool_set_rxq_info(allocator, xdp_rxq);
+
if (trace_mem_connect_enabled() && xdp_alloc)
trace_mem_connect(xdp_alloc, xdp_rxq);
return 0;
@@ -368,33 +367,87 @@ int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
+/**
+ * xdp_reg_page_pool - register &page_pool as a memory provider for XDP
+ * @pool: &page_pool to register
+ *
+ * Can be used to register pools manually without connecting to any XDP RxQ
+ * info, so that the XDP layer will be aware of them. Then, they can be
+ * attached to an RxQ info manually via xdp_rxq_info_attach_page_pool().
+ *
+ * Return: %0 on success, -errno on error.
+ */
+int xdp_reg_page_pool(struct page_pool *pool)
+{
+ struct xdp_mem_info mem;
+
+ return xdp_reg_mem_model(&mem, MEM_TYPE_PAGE_POOL, pool);
+}
+EXPORT_SYMBOL_GPL(xdp_reg_page_pool);
+
+/**
+ * xdp_unreg_page_pool - unregister &page_pool from the memory providers list
+ * @pool: &page_pool to unregister
+ *
+ * A shorthand for manual unregistering page pools. If the pool was previously
+ * attached to an RxQ info, it must be detached first.
+ */
+void xdp_unreg_page_pool(const struct page_pool *pool)
+{
+ struct xdp_mem_info mem = {
+ .type = MEM_TYPE_PAGE_POOL,
+ .id = pool->xdp_mem_id,
+ };
+
+ xdp_unreg_mem_model(&mem);
+}
+EXPORT_SYMBOL_GPL(xdp_unreg_page_pool);
+
+/**
+ * xdp_rxq_info_attach_page_pool - attach registered pool to RxQ info
+ * @xdp_rxq: XDP RxQ info to attach the pool to
+ * @pool: pool to attach
+ *
+ * If the pool was registered manually, this function must be called instead
+ * of xdp_rxq_info_reg_mem_model() to connect it to the RxQ info.
+ */
+void xdp_rxq_info_attach_page_pool(struct xdp_rxq_info *xdp_rxq,
+ const struct page_pool *pool)
+{
+ struct xdp_mem_info mem = {
+ .type = MEM_TYPE_PAGE_POOL,
+ .id = pool->xdp_mem_id,
+ };
+
+ xdp_rxq_info_attach_mem_model(xdp_rxq, &mem);
+}
+EXPORT_SYMBOL_GPL(xdp_rxq_info_attach_page_pool);
+
/* XDP RX runs under NAPI protection, and in different delivery error
* scenarios (e.g. queue full), it is possible to return the xdp_frame
* while still leveraging this protection. The @napi_direct boolean
* is used for those calls sites. Thus, allowing for faster recycling
* of xdp_frames/pages in those cases.
*/
-void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
- struct xdp_buff *xdp)
+void __xdp_return(netmem_ref netmem, enum xdp_mem_type mem_type,
+ bool napi_direct, struct xdp_buff *xdp)
{
- struct page *page;
-
- switch (mem->type) {
+ switch (mem_type) {
case MEM_TYPE_PAGE_POOL:
- page = virt_to_head_page(data);
+ netmem = netmem_compound_head(netmem);
if (napi_direct && xdp_return_frame_no_direct())
napi_direct = false;
/* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE)
* as mem->type knows this a page_pool page
*/
- page_pool_put_full_page(page->pp, page, napi_direct);
+ page_pool_put_full_netmem(netmem_get_pp(netmem), netmem,
+ napi_direct);
break;
case MEM_TYPE_PAGE_SHARED:
- page_frag_free(data);
+ page_frag_free(__netmem_address(netmem));
break;
case MEM_TYPE_PAGE_ORDER0:
- page = virt_to_page(data); /* Assumes order0 page*/
- put_page(page);
+ put_page(__netmem_to_page(netmem));
break;
case MEM_TYPE_XSK_BUFF_POOL:
/* NB! Only valid from an xdp_buff! */
@@ -402,7 +455,7 @@ void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
break;
default:
/* Not possible, checked in xdp_rxq_info_reg_mem_model() */
- WARN(1, "Incorrect XDP memory type (%d) usage", mem->type);
+ WARN(1, "Incorrect XDP memory type (%d) usage", mem_type);
break;
}
}
@@ -410,38 +463,34 @@ void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
void xdp_return_frame(struct xdp_frame *xdpf)
{
struct skb_shared_info *sinfo;
- int i;
if (likely(!xdp_frame_has_frags(xdpf)))
goto out;
sinfo = xdp_get_shared_info_from_frame(xdpf);
- for (i = 0; i < sinfo->nr_frags; i++) {
- struct page *page = skb_frag_page(&sinfo->frags[i]);
+ for (u32 i = 0; i < sinfo->nr_frags; i++)
+ __xdp_return(skb_frag_netmem(&sinfo->frags[i]), xdpf->mem_type,
+ false, NULL);
- __xdp_return(page_address(page), &xdpf->mem, false, NULL);
- }
out:
- __xdp_return(xdpf->data, &xdpf->mem, false, NULL);
+ __xdp_return(virt_to_netmem(xdpf->data), xdpf->mem_type, false, NULL);
}
EXPORT_SYMBOL_GPL(xdp_return_frame);
void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
{
struct skb_shared_info *sinfo;
- int i;
if (likely(!xdp_frame_has_frags(xdpf)))
goto out;
sinfo = xdp_get_shared_info_from_frame(xdpf);
- for (i = 0; i < sinfo->nr_frags; i++) {
- struct page *page = skb_frag_page(&sinfo->frags[i]);
+ for (u32 i = 0; i < sinfo->nr_frags; i++)
+ __xdp_return(skb_frag_netmem(&sinfo->frags[i]), xdpf->mem_type,
+ true, NULL);
- __xdp_return(page_address(page), &xdpf->mem, true, NULL);
- }
out:
- __xdp_return(xdpf->data, &xdpf->mem, true, NULL);
+ __xdp_return(virt_to_netmem(xdpf->data), xdpf->mem_type, true, NULL);
}
EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
@@ -455,46 +504,19 @@ EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
* xdp_frame_bulk is usually stored/allocated on the function
* call-stack to avoid locking penalties.
*/
-void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq)
-{
- struct xdp_mem_allocator *xa = bq->xa;
-
- if (unlikely(!xa || !bq->count))
- return;
-
- page_pool_put_page_bulk(xa->page_pool, bq->q, bq->count);
- /* bq->xa is not cleared to save lookup, if mem.id same in next bulk */
- bq->count = 0;
-}
-EXPORT_SYMBOL_GPL(xdp_flush_frame_bulk);
/* Must be called with rcu_read_lock held */
void xdp_return_frame_bulk(struct xdp_frame *xdpf,
struct xdp_frame_bulk *bq)
{
- struct xdp_mem_info *mem = &xdpf->mem;
- struct xdp_mem_allocator *xa;
-
- if (mem->type != MEM_TYPE_PAGE_POOL) {
+ if (xdpf->mem_type != MEM_TYPE_PAGE_POOL) {
xdp_return_frame(xdpf);
return;
}
- xa = bq->xa;
- if (unlikely(!xa)) {
- xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
- bq->count = 0;
- bq->xa = xa;
- }
-
if (bq->count == XDP_BULK_QUEUE_SIZE)
xdp_flush_frame_bulk(bq);
- if (unlikely(mem->id != xa->mem.id)) {
- xdp_flush_frame_bulk(bq);
- bq->xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
- }
-
if (unlikely(xdp_frame_has_frags(xdpf))) {
struct skb_shared_info *sinfo;
int i;
@@ -503,31 +525,40 @@ void xdp_return_frame_bulk(struct xdp_frame *xdpf,
for (i = 0; i < sinfo->nr_frags; i++) {
skb_frag_t *frag = &sinfo->frags[i];
- bq->q[bq->count++] = skb_frag_address(frag);
+ bq->q[bq->count++] = skb_frag_netmem(frag);
if (bq->count == XDP_BULK_QUEUE_SIZE)
xdp_flush_frame_bulk(bq);
}
}
- bq->q[bq->count++] = xdpf->data;
+ bq->q[bq->count++] = virt_to_netmem(xdpf->data);
}
EXPORT_SYMBOL_GPL(xdp_return_frame_bulk);
+/**
+ * xdp_return_frag -- free one XDP frag or decrement its refcount
+ * @netmem: network memory reference to release
+ * @xdp: &xdp_buff to release the frag for
+ */
+void xdp_return_frag(netmem_ref netmem, const struct xdp_buff *xdp)
+{
+ __xdp_return(netmem, xdp->rxq->mem.type, true, NULL);
+}
+EXPORT_SYMBOL_GPL(xdp_return_frag);
+
void xdp_return_buff(struct xdp_buff *xdp)
{
struct skb_shared_info *sinfo;
- int i;
if (likely(!xdp_buff_has_frags(xdp)))
goto out;
sinfo = xdp_get_shared_info_from_buff(xdp);
- for (i = 0; i < sinfo->nr_frags; i++) {
- struct page *page = skb_frag_page(&sinfo->frags[i]);
+ for (u32 i = 0; i < sinfo->nr_frags; i++)
+ __xdp_return(skb_frag_netmem(&sinfo->frags[i]),
+ xdp->rxq->mem.type, true, xdp);
- __xdp_return(page_address(page), &xdp->rxq->mem, true, xdp);
- }
out:
- __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp);
+ __xdp_return(virt_to_netmem(xdp->data), xdp->rxq->mem.type, true, xdp);
}
EXPORT_SYMBOL_GPL(xdp_return_buff);
@@ -573,7 +604,7 @@ struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp)
xdpf->headroom = 0;
xdpf->metasize = metasize;
xdpf->frame_sz = PAGE_SIZE;
- xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
+ xdpf->mem_type = MEM_TYPE_PAGE_ORDER0;
xsk_buff_free(xdp);
return xdpf;
@@ -589,7 +620,7 @@ EXPORT_SYMBOL_GPL(xdp_warn);
int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp)
{
- n_skb = kmem_cache_alloc_bulk(skbuff_cache, gfp, n_skb, skbs);
+ n_skb = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, gfp, n_skb, skbs);
if (unlikely(!n_skb))
return -ENOMEM;
@@ -597,6 +628,173 @@ int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp)
}
EXPORT_SYMBOL_GPL(xdp_alloc_skb_bulk);
+/**
+ * xdp_build_skb_from_buff - create an skb from &xdp_buff
+ * @xdp: &xdp_buff to convert to an skb
+ *
+ * Perform common operations to create a new skb to pass up the stack from
+ * &xdp_buff: allocate an skb head from the NAPI percpu cache, initialize
+ * skb data pointers and offsets, set the recycle bit if the buff is
+ * PP-backed, Rx queue index, protocol and update frags info.
+ *
+ * Return: new &sk_buff on success, %NULL on error.
+ */
+struct sk_buff *xdp_build_skb_from_buff(const struct xdp_buff *xdp)
+{
+ const struct xdp_rxq_info *rxq = xdp->rxq;
+ const struct skb_shared_info *sinfo;
+ struct sk_buff *skb;
+ u32 nr_frags = 0;
+ int metalen;
+
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ nr_frags = sinfo->nr_frags;
+ }
+
+ skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ __skb_put(skb, xdp->data_end - xdp->data);
+
+ metalen = xdp->data - xdp->data_meta;
+ if (metalen > 0)
+ skb_metadata_set(skb, metalen);
+
+ if (rxq->mem.type == MEM_TYPE_PAGE_POOL)
+ skb_mark_for_recycle(skb);
+
+ skb_record_rx_queue(skb, rxq->queue_index);
+
+ if (unlikely(nr_frags)) {
+ u32 tsize;
+
+ tsize = sinfo->xdp_frags_truesize ? : nr_frags * xdp->frame_sz;
+ xdp_update_skb_shared_info(skb, nr_frags,
+ sinfo->xdp_frags_size, tsize,
+ xdp_buff_is_frag_pfmemalloc(xdp));
+ }
+
+ skb->protocol = eth_type_trans(skb, rxq->dev);
+
+ return skb;
+}
+EXPORT_SYMBOL_GPL(xdp_build_skb_from_buff);
+
+/**
+ * xdp_copy_frags_from_zc - copy frags from XSk buff to skb
+ * @skb: skb to copy frags to
+ * @xdp: XSk &xdp_buff from which the frags will be copied
+ * @pp: &page_pool backing page allocation, if available
+ *
+ * Copy all frags from XSk &xdp_buff to the skb to pass it up the stack.
+ * Allocate a new buffer for each frag, copy it and attach to the skb.
+ *
+ * Return: true on success, false on netmem allocation fail.
+ */
+static noinline bool xdp_copy_frags_from_zc(struct sk_buff *skb,
+ const struct xdp_buff *xdp,
+ struct page_pool *pp)
+{
+ struct skb_shared_info *sinfo = skb_shinfo(skb);
+ const struct skb_shared_info *xinfo;
+ u32 nr_frags, tsize = 0;
+ bool pfmemalloc = false;
+
+ xinfo = xdp_get_shared_info_from_buff(xdp);
+ nr_frags = xinfo->nr_frags;
+
+ for (u32 i = 0; i < nr_frags; i++) {
+ u32 len = skb_frag_size(&xinfo->frags[i]);
+ u32 offset, truesize = len;
+ netmem_ref netmem;
+
+ netmem = page_pool_dev_alloc_netmem(pp, &offset, &truesize);
+ if (unlikely(!netmem)) {
+ sinfo->nr_frags = i;
+ return false;
+ }
+
+ memcpy(__netmem_address(netmem),
+ __netmem_address(xinfo->frags[i].netmem),
+ LARGEST_ALIGN(len));
+ __skb_fill_netmem_desc_noacc(sinfo, i, netmem, offset, len);
+
+ tsize += truesize;
+ pfmemalloc |= netmem_is_pfmemalloc(netmem);
+ }
+
+ xdp_update_skb_shared_info(skb, nr_frags, xinfo->xdp_frags_size,
+ tsize, pfmemalloc);
+
+ return true;
+}
+
+/**
+ * xdp_build_skb_from_zc - create an skb from XSk &xdp_buff
+ * @xdp: source XSk buff
+ *
+ * Similar to xdp_build_skb_from_buff(), but for XSk frames. Allocate an skb
+ * head, new buffer for the head, copy the data and initialize the skb fields.
+ * If there are frags, allocate new buffers for them and copy.
+ * Buffers are allocated from the system percpu pools to try recycling them.
+ * If new skb was built successfully, @xdp is returned to XSk pool's freelist.
+ * On error, it remains untouched and the caller must take care of this.
+ *
+ * Return: new &sk_buff on success, %NULL on error.
+ */
+struct sk_buff *xdp_build_skb_from_zc(struct xdp_buff *xdp)
+{
+ struct page_pool *pp = this_cpu_read(system_page_pool);
+ const struct xdp_rxq_info *rxq = xdp->rxq;
+ u32 len = xdp->data_end - xdp->data_meta;
+ u32 truesize = xdp->frame_sz;
+ struct sk_buff *skb;
+ int metalen;
+ void *data;
+
+ if (!IS_ENABLED(CONFIG_PAGE_POOL))
+ return NULL;
+
+ data = page_pool_dev_alloc_va(pp, &truesize);
+ if (unlikely(!data))
+ return NULL;
+
+ skb = napi_build_skb(data, truesize);
+ if (unlikely(!skb)) {
+ page_pool_free_va(pp, data, true);
+ return NULL;
+ }
+
+ skb_mark_for_recycle(skb);
+ skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
+
+ memcpy(__skb_put(skb, len), xdp->data_meta, LARGEST_ALIGN(len));
+
+ metalen = xdp->data - xdp->data_meta;
+ if (metalen > 0) {
+ skb_metadata_set(skb, metalen);
+ __skb_pull(skb, metalen);
+ }
+
+ skb_record_rx_queue(skb, rxq->queue_index);
+
+ if (unlikely(xdp_buff_has_frags(xdp)) &&
+ unlikely(!xdp_copy_frags_from_zc(skb, xdp, pp))) {
+ napi_consume_skb(skb, true);
+ return NULL;
+ }
+
+ xsk_buff_free(xdp);
+
+ skb->protocol = eth_type_trans(skb, rxq->dev);
+
+ return skb;
+}
+EXPORT_SYMBOL_GPL(xdp_build_skb_from_zc);
+
struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
struct sk_buff *skb,
struct net_device *dev)
@@ -643,7 +841,7 @@ struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
* - RX ring dev queue index (skb_record_rx_queue)
*/
- if (xdpf->mem.type == MEM_TYPE_PAGE_POOL)
+ if (xdpf->mem_type == MEM_TYPE_PAGE_POOL)
skb_mark_for_recycle(skb);
/* Allow SKB to reuse area used by xdp_frame */
@@ -658,7 +856,7 @@ struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
{
struct sk_buff *skb;
- skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
+ skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC);
if (unlikely(!skb))
return NULL;
@@ -690,8 +888,7 @@ struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf)
nxdpf = addr;
nxdpf->data = addr + headroom;
nxdpf->frame_sz = PAGE_SIZE;
- nxdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
- nxdpf->mem.id = 0;
+ nxdpf->mem_type = MEM_TYPE_PAGE_ORDER0;
return nxdpf;
}
@@ -771,11 +968,11 @@ __bpf_kfunc int bpf_xdp_metadata_rx_vlan_tag(const struct xdp_md *ctx,
__bpf_kfunc_end_defs();
-BTF_SET8_START(xdp_metadata_kfunc_ids)
+BTF_KFUNCS_START(xdp_metadata_kfunc_ids)
#define XDP_METADATA_KFUNC(_, __, name, ___) BTF_ID_FLAGS(func, name, KF_TRUSTED_ARGS)
XDP_METADATA_KFUNC_xxx
#undef XDP_METADATA_KFUNC
-BTF_SET8_END(xdp_metadata_kfunc_ids)
+BTF_KFUNCS_END(xdp_metadata_kfunc_ids)
static const struct btf_kfunc_id_set xdp_metadata_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 2e6b8c8fd2de..03eb1d941fca 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -2408,6 +2408,11 @@ static struct notifier_block dcbnl_nb __read_mostly = {
.notifier_call = dcbnl_netdevice_event,
};
+static const struct rtnl_msg_handler dcbnl_rtnl_msg_handlers[] __initconst = {
+ {.msgtype = RTM_GETDCB, .doit = dcb_doit},
+ {.msgtype = RTM_SETDCB, .doit = dcb_doit},
+};
+
static int __init dcbnl_init(void)
{
int err;
@@ -2416,8 +2421,7 @@ static int __init dcbnl_init(void)
if (err)
return err;
- rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, 0);
+ rtnl_register_many(dcbnl_rtnl_msg_handlers);
return 0;
}
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index c4bbac99740d..1cba001bb4c8 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -376,15 +376,11 @@ EXPORT_SYMBOL_GPL(dccp_ackvec_parsed_cleanup);
int __init dccp_ackvec_init(void)
{
- dccp_ackvec_slab = kmem_cache_create("dccp_ackvec",
- sizeof(struct dccp_ackvec), 0,
- SLAB_HWCACHE_ALIGN, NULL);
+ dccp_ackvec_slab = KMEM_CACHE(dccp_ackvec, SLAB_HWCACHE_ALIGN);
if (dccp_ackvec_slab == NULL)
goto out_err;
- dccp_ackvec_record_slab = kmem_cache_create("dccp_ackvec_record",
- sizeof(struct dccp_ackvec_record),
- 0, SLAB_HWCACHE_ALIGN, NULL);
+ dccp_ackvec_record_slab = KMEM_CACHE(dccp_ackvec_record, SLAB_HWCACHE_ALIGN);
if (dccp_ackvec_record_slab == NULL)
goto out_destroy_slab;
diff --git a/net/dccp/ccids/Kconfig b/net/dccp/ccids/Kconfig
index a3eeb84d16f9..e3d388c33d25 100644
--- a/net/dccp/ccids/Kconfig
+++ b/net/dccp/ccids/Kconfig
@@ -13,7 +13,7 @@ config IP_DCCP_CCID2_DEBUG
config IP_DCCP_CCID3
bool "CCID-3 (TCP-Friendly)"
- def_bool y if (IP_DCCP = y || IP_DCCP = m)
+ default IP_DCCP = y || IP_DCCP = m
help
CCID-3 denotes TCP-Friendly Rate Control (TFRC), an equation-based
rate-controlled congestion control mechanism. TFRC is designed to
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 4d9823d6dced..d6b30700af67 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -353,6 +353,7 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len)
/**
* ccid2_rtt_estimator - Sample RTT and compute RTO using RFC2988 algorithm
* @sk: socket to perform estimator on
+ * @mrtt: measured RTT
*
* This code is almost identical with TCP's tcp_rtt_estimator(), since
* - it has a higher sampling frequency (recommended by RFC 1323),
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index ca8670f78ac6..f349d16dd8f6 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -22,7 +22,7 @@
#include "../dccp.h"
#include "ccid3.h"
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#ifdef CONFIG_IP_DCCP_CCID3_DEBUG
static bool ccid3_debug;
diff --git a/net/dccp/diag.c b/net/dccp/diag.c
index 8a82c5a2c5a8..f5019d95c3ae 100644
--- a/net/dccp/diag.c
+++ b/net/dccp/diag.c
@@ -58,6 +58,7 @@ static int dccp_diag_dump_one(struct netlink_callback *cb,
}
static const struct inet_diag_handler dccp_diag_handler = {
+ .owner = THIS_MODULE,
.dump = dccp_diag_dump,
.dump_one = dccp_diag_dump_one,
.idiag_get_info = dccp_diag_get_info,
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index 54086bb05c42..f7554dcdaaba 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -1166,8 +1166,12 @@ static u8 dccp_feat_change_recv(struct list_head *fn, u8 is_mandatory, u8 opt,
goto not_valid_or_not_known;
}
- return dccp_feat_push_confirm(fn, feat, local, &fval);
+ if (dccp_feat_push_confirm(fn, feat, local, &fval)) {
+ kfree(fval.sp.vec);
+ return DCCP_RESET_CODE_TOO_BUSY;
+ }
+ return 0;
} else if (entry->state == FEAT_UNSTABLE) { /* 6.6.2 */
return 0;
}
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 44b033fe1ef6..be515ba821e2 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -15,6 +15,7 @@
#include <net/icmp.h>
#include <net/inet_common.h>
+#include <net/inet_dscp.h>
#include <net/inet_hashtables.h>
#include <net/inet_sock.h>
#include <net/protocol.h>
@@ -24,6 +25,7 @@
#include <net/xfrm.h>
#include <net/secure_seq.h>
#include <net/netns/generic.h>
+#include <net/rstreason.h>
#include "ackvec.h"
#include "ccid.h"
@@ -472,7 +474,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
.flowi4_oif = inet_iif(skb),
.daddr = iph->saddr,
.saddr = iph->daddr,
- .flowi4_tos = ip_sock_rt_tos(sk),
+ .flowi4_tos = inet_dscp_to_dsfield(inet_sk_dscp(inet_sk(sk))),
.flowi4_scope = ip_sock_rt_scope(sk),
.flowi4_proto = sk->sk_protocol,
.fl4_sport = dccp_hdr(skb)->dccph_dport,
@@ -521,7 +523,8 @@ out:
return err;
}
-static void dccp_v4_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
+static void dccp_v4_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb,
+ enum sk_rst_reason reason)
{
int err;
const struct iphdr *rxiph;
@@ -655,8 +658,11 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
if (dccp_v4_send_response(sk, req))
goto drop_and_free;
- inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
- reqsk_put(req);
+ if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT)))
+ reqsk_free(req);
+ else
+ reqsk_put(req);
+
return 0;
drop_and_free:
@@ -706,7 +712,7 @@ int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
reset:
- dccp_v4_ctl_send_reset(sk, skb);
+ dccp_v4_ctl_send_reset(sk, skb, SK_RST_REASON_NOT_SPECIFIED);
kfree_skb(skb);
return 0;
}
@@ -869,7 +875,7 @@ lookup:
if (nsk == sk) {
reqsk_put(req);
} else if (dccp_child_process(sk, nsk, skb)) {
- dccp_v4_ctl_send_reset(sk, skb);
+ dccp_v4_ctl_send_reset(sk, skb, SK_RST_REASON_NOT_SPECIFIED);
goto discard_and_relse;
} else {
sock_put(sk);
@@ -909,7 +915,7 @@ no_dccp_socket:
if (dh->dccph_type != DCCP_PKT_RESET) {
DCCP_SKB_CB(skb)->dccpd_reset_code =
DCCP_RESET_CODE_NO_CONNECTION;
- dccp_v4_ctl_send_reset(sk, skb);
+ dccp_v4_ctl_send_reset(sk, skb, SK_RST_REASON_NOT_SPECIFIED);
}
discard_it:
@@ -1039,7 +1045,7 @@ static void __net_exit dccp_v4_exit_net(struct net *net)
static void __net_exit dccp_v4_exit_batch(struct list_head *net_exit_list)
{
- inet_twsk_purge(&dccp_hashinfo, AF_INET);
+ inet_twsk_purge(&dccp_hashinfo);
}
static struct pernet_operations dccp_v4_ops = {
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index ded07e09f813..d6649246188d 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -29,6 +29,7 @@
#include <net/secure_seq.h>
#include <net/netns/generic.h>
#include <net/sock.h>
+#include <net/rstreason.h>
#include "dccp.h"
#include "ipv6.h"
@@ -256,7 +257,8 @@ static void dccp_v6_reqsk_destructor(struct request_sock *req)
kfree_skb(inet_rsk(req)->pktopts);
}
-static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
+static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb,
+ enum sk_rst_reason reason)
{
const struct ipv6hdr *rxip6h;
struct sk_buff *skb;
@@ -398,8 +400,11 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
if (dccp_v6_send_response(sk, req))
goto drop_and_free;
- inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
- reqsk_put(req);
+ if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT)))
+ reqsk_free(req);
+ else
+ reqsk_put(req);
+
return 0;
drop_and_free:
@@ -613,7 +618,7 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
by tcp. Feel free to propose better solution.
--ANK (980728)
*/
- if (np->rxopt.all)
+ if (np->rxopt.all && sk->sk_state != DCCP_LISTEN)
opt_skb = skb_clone_and_charge_r(skb, sk);
if (sk->sk_state == DCCP_OPEN) { /* Fast path */
@@ -656,7 +661,7 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
reset:
- dccp_v6_ctl_send_reset(sk, skb);
+ dccp_v6_ctl_send_reset(sk, skb, SK_RST_REASON_NOT_SPECIFIED);
discard:
if (opt_skb != NULL)
__kfree_skb(opt_skb);
@@ -762,7 +767,7 @@ lookup:
if (nsk == sk) {
reqsk_put(req);
} else if (dccp_child_process(sk, nsk, skb)) {
- dccp_v6_ctl_send_reset(sk, skb);
+ dccp_v6_ctl_send_reset(sk, skb, SK_RST_REASON_NOT_SPECIFIED);
goto discard_and_relse;
} else {
sock_put(sk);
@@ -801,7 +806,7 @@ no_dccp_socket:
if (dh->dccph_type != DCCP_PKT_RESET) {
DCCP_SKB_CB(skb)->dccpd_reset_code =
DCCP_RESET_CODE_NO_CONNECTION;
- dccp_v6_ctl_send_reset(sk, skb);
+ dccp_v6_ctl_send_reset(sk, skb, SK_RST_REASON_NOT_SPECIFIED);
}
discard_it:
@@ -1119,15 +1124,9 @@ static void __net_exit dccp_v6_exit_net(struct net *net)
inet_ctl_sock_destroy(pn->v6_ctl_sk);
}
-static void __net_exit dccp_v6_exit_batch(struct list_head *net_exit_list)
-{
- inet_twsk_purge(&dccp_hashinfo, AF_INET6);
-}
-
static struct pernet_operations dccp_v6_ops = {
.init = dccp_v6_init_net,
.exit = dccp_v6_exit_net,
- .exit_batch = dccp_v6_exit_batch,
.id = &dccp_v6_pernet_id,
.size = sizeof(struct dccp_v6_pernet),
};
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 64d805b27add..fecc8190064f 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -15,6 +15,7 @@
#include <net/sock.h>
#include <net/xfrm.h>
#include <net/inet_timewait_sock.h>
+#include <net/rstreason.h>
#include "ackvec.h"
#include "ccid.h"
@@ -53,17 +54,10 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
if (state == DCCP_TIME_WAIT)
timeo = DCCP_TIMEWAIT_LEN;
- /* tw_timer is pinned, so we need to make sure BH are disabled
- * in following section, otherwise timer handler could run before
- * we complete the initialization.
- */
- local_bh_disable();
- inet_twsk_schedule(tw, timeo);
/* Linkage updates.
* Note that access to tw after this point is illegal.
*/
- inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
- local_bh_enable();
+ inet_twsk_hashdance_schedule(tw, sk, &dccp_hashinfo, timeo);
} else {
/* Sorry, if we're out of memory, just CLOSE this
* socket up. We've got bigger problems than
@@ -202,7 +196,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
drop:
if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET)
- req->rsk_ops->send_reset(sk, skb);
+ req->rsk_ops->send_reset(sk, skb, SK_RST_REASON_NOT_SPECIFIED);
inet_csk_reqsk_queue_drop(sk, req);
out:
diff --git a/net/dccp/options.c b/net/dccp/options.c
index d24cad05001e..db62d4767024 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -10,7 +10,7 @@
#include <linux/dccp.h>
#include <linux/module.h>
#include <linux/types.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
diff --git a/net/dccp/output.c b/net/dccp/output.c
index fd2eb148d24d..5c2e24f3c39b 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -204,7 +204,7 @@ void dccp_write_space(struct sock *sk)
wake_up_interruptible(&wq->wait);
/* Should agree with poll, otherwise some programs break */
if (sock_writeable(sk))
- sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
+ sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT);
rcu_read_unlock();
}
diff --git a/net/dccp/sysctl.c b/net/dccp/sysctl.c
index ee8d4f5afa72..b15845fd6300 100644
--- a/net/dccp/sysctl.c
+++ b/net/dccp/sysctl.c
@@ -11,10 +11,6 @@
#include "dccp.h"
#include "feat.h"
-#ifndef CONFIG_SYSCTL
-#error This file should not be compiled without CONFIG_SYSCTL defined
-#endif
-
/* Boundary values */
static int u8_max = 0xFF;
static unsigned long seqw_min = DCCPF_SEQ_WMIN,
@@ -90,8 +86,6 @@ static struct ctl_table dccp_default_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_ms_jiffies,
},
-
- { }
};
static struct ctl_table_header *dccp_table_header;
diff --git a/net/devlink/core.c b/net/devlink/core.c
index 7f0b093208d7..7203c39532fc 100644
--- a/net/devlink/core.c
+++ b/net/devlink/core.c
@@ -117,7 +117,7 @@ static struct devlink_rel *devlink_rel_alloc(void)
err = xa_alloc_cyclic(&devlink_rels, &rel->index, rel,
xa_limit_32b, &next, GFP_KERNEL);
- if (err) {
+ if (err < 0) {
kfree(rel);
return ERR_PTR(err);
}
@@ -314,7 +314,7 @@ static void devlink_release(struct work_struct *work)
mutex_destroy(&devlink->lock);
lockdep_unregister_key(&devlink->lock_key);
put_device(devlink->dev);
- kfree(devlink);
+ kvfree(devlink);
}
void devlink_put(struct devlink *devlink)
@@ -420,7 +420,7 @@ struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
if (!devlink_reload_actions_valid(ops))
return NULL;
- devlink = kzalloc(sizeof(*devlink) + priv_size, GFP_KERNEL);
+ devlink = kvzalloc(struct_size(devlink, priv, priv_size), GFP_KERNEL);
if (!devlink)
return NULL;
@@ -455,7 +455,7 @@ struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
return devlink;
err_xa_alloc:
- kfree(devlink);
+ kvfree(devlink);
return NULL;
}
EXPORT_SYMBOL_GPL(devlink_alloc_ns);
diff --git a/net/devlink/dev.c b/net/devlink/dev.c
index 19dbf540748a..d6e3db300acb 100644
--- a/net/devlink/dev.c
+++ b/net/devlink/dev.c
@@ -531,10 +531,8 @@ int devlink_nl_reload_doit(struct sk_buff *skb, struct genl_info *info)
return err;
}
- if (info->attrs[DEVLINK_ATTR_RELOAD_ACTION])
- action = nla_get_u8(info->attrs[DEVLINK_ATTR_RELOAD_ACTION]);
- else
- action = DEVLINK_RELOAD_ACTION_DRIVER_REINIT;
+ action = nla_get_u8_default(info->attrs[DEVLINK_ATTR_RELOAD_ACTION],
+ DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
if (!devlink_reload_action_is_supported(devlink, action)) {
NL_SET_ERR_MSG(info->extack, "Requested reload action is not supported by the driver");
@@ -971,14 +969,14 @@ static int devlink_nl_flash_update_fill(struct sk_buff *msg,
nla_put_string(msg, DEVLINK_ATTR_FLASH_UPDATE_COMPONENT,
params->component))
goto nla_put_failure;
- if (nla_put_u64_64bit(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_DONE,
- params->done, DEVLINK_ATTR_PAD))
+ if (devlink_nl_put_u64(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_DONE,
+ params->done))
goto nla_put_failure;
- if (nla_put_u64_64bit(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_TOTAL,
- params->total, DEVLINK_ATTR_PAD))
+ if (devlink_nl_put_u64(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_TOTAL,
+ params->total))
goto nla_put_failure;
- if (nla_put_u64_64bit(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_TIMEOUT,
- params->timeout, DEVLINK_ATTR_PAD))
+ if (devlink_nl_put_u64(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_TIMEOUT,
+ params->timeout))
goto nla_put_failure;
out:
@@ -1202,23 +1200,19 @@ static void __devlink_compat_running_version(struct devlink *devlink,
if (err)
goto free_msg;
- nla_for_each_attr(nlattr, (void *)msg->data, msg->len, rem) {
+ nla_for_each_attr_type(nlattr, DEVLINK_ATTR_INFO_VERSION_RUNNING,
+ (void *)msg->data, msg->len, rem) {
const struct nlattr *kv;
int rem_kv;
- if (nla_type(nlattr) != DEVLINK_ATTR_INFO_VERSION_RUNNING)
- continue;
-
- nla_for_each_nested(kv, nlattr, rem_kv) {
- if (nla_type(kv) != DEVLINK_ATTR_INFO_VERSION_VALUE)
- continue;
-
+ nla_for_each_nested_type(kv, DEVLINK_ATTR_INFO_VERSION_VALUE,
+ nlattr, rem_kv) {
strlcat(buf, nla_data(kv), len);
strlcat(buf, " ", len);
}
}
free_msg:
- nlmsg_free(msg);
+ nlmsg_consume(msg);
}
void devlink_compat_running_version(struct devlink *devlink,
diff --git a/net/devlink/devl_internal.h b/net/devlink/devl_internal.h
index c7a8e13f917c..14eaad9cfe35 100644
--- a/net/devlink/devl_internal.h
+++ b/net/devlink/devl_internal.h
@@ -166,7 +166,7 @@ int devlink_nl_dumpit(struct sk_buff *msg, struct netlink_callback *cb,
static inline struct devlink_nl_dump_state *
devlink_dump_state(struct netlink_callback *cb)
{
- NL_ASSERT_DUMP_CTX_FITS(struct devlink_nl_dump_state);
+ NL_ASSERT_CTX_FITS(struct devlink_nl_dump_state);
return (struct devlink_nl_dump_state *)cb->ctx;
}
@@ -181,6 +181,11 @@ devlink_nl_put_handle(struct sk_buff *msg, struct devlink *devlink)
return 0;
}
+static inline int devlink_nl_put_u64(struct sk_buff *msg, int attrtype, u64 val)
+{
+ return nla_put_u64_64bit(msg, attrtype, val, DEVLINK_ATTR_PAD);
+}
+
int devlink_nl_put_nested_handle(struct sk_buff *msg, struct net *net,
struct devlink *devlink, int attrtype);
int devlink_nl_msg_reply_and_new(struct sk_buff **msg, struct genl_info *info);
diff --git a/net/devlink/dpipe.c b/net/devlink/dpipe.c
index a72a9292efc5..e55701b007f0 100644
--- a/net/devlink/dpipe.c
+++ b/net/devlink/dpipe.c
@@ -165,18 +165,17 @@ static int devlink_dpipe_table_put(struct sk_buff *skb,
return -EMSGSIZE;
if (nla_put_string(skb, DEVLINK_ATTR_DPIPE_TABLE_NAME, table->name) ||
- nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_SIZE, table_size,
- DEVLINK_ATTR_PAD))
+ devlink_nl_put_u64(skb, DEVLINK_ATTR_DPIPE_TABLE_SIZE, table_size))
goto nla_put_failure;
if (nla_put_u8(skb, DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED,
table->counters_enabled))
goto nla_put_failure;
if (table->resource_valid) {
- if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID,
- table->resource_id, DEVLINK_ATTR_PAD) ||
- nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS,
- table->resource_units, DEVLINK_ATTR_PAD))
+ if (devlink_nl_put_u64(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID,
+ table->resource_id) ||
+ devlink_nl_put_u64(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS,
+ table->resource_units))
goto nla_put_failure;
}
if (devlink_dpipe_matches_put(table, skb))
@@ -403,12 +402,11 @@ static int devlink_dpipe_entry_put(struct sk_buff *skb,
if (!entry_attr)
return -EMSGSIZE;
- if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_ENTRY_INDEX, entry->index,
- DEVLINK_ATTR_PAD))
+ if (devlink_nl_put_u64(skb, DEVLINK_ATTR_DPIPE_ENTRY_INDEX, entry->index))
goto nla_put_failure;
if (entry->counter_valid)
- if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_ENTRY_COUNTER,
- entry->counter, DEVLINK_ATTR_PAD))
+ if (devlink_nl_put_u64(skb, DEVLINK_ATTR_DPIPE_ENTRY_COUNTER,
+ entry->counter))
goto nla_put_failure;
matches_attr = nla_nest_start_noflag(skb,
@@ -839,7 +837,7 @@ EXPORT_SYMBOL_GPL(devlink_dpipe_table_counter_enabled);
*/
int devl_dpipe_table_register(struct devlink *devlink,
const char *table_name,
- struct devlink_dpipe_table_ops *table_ops,
+ const struct devlink_dpipe_table_ops *table_ops,
void *priv, bool counter_control_extern)
{
struct devlink_dpipe_table *table;
diff --git a/net/devlink/health.c b/net/devlink/health.c
index acb8c0e174bb..57db6799722a 100644
--- a/net/devlink/health.c
+++ b/net/devlink/health.c
@@ -287,29 +287,27 @@ devlink_nl_health_reporter_fill(struct sk_buff *msg,
if (nla_put_u8(msg, DEVLINK_ATTR_HEALTH_REPORTER_STATE,
reporter->health_state))
goto reporter_nest_cancel;
- if (nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_ERR_COUNT,
- reporter->error_count, DEVLINK_ATTR_PAD))
+ if (devlink_nl_put_u64(msg, DEVLINK_ATTR_HEALTH_REPORTER_ERR_COUNT,
+ reporter->error_count))
goto reporter_nest_cancel;
- if (nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_RECOVER_COUNT,
- reporter->recovery_count, DEVLINK_ATTR_PAD))
+ if (devlink_nl_put_u64(msg, DEVLINK_ATTR_HEALTH_REPORTER_RECOVER_COUNT,
+ reporter->recovery_count))
goto reporter_nest_cancel;
if (reporter->ops->recover &&
- nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD,
- reporter->graceful_period,
- DEVLINK_ATTR_PAD))
+ devlink_nl_put_u64(msg, DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD,
+ reporter->graceful_period))
goto reporter_nest_cancel;
if (reporter->ops->recover &&
nla_put_u8(msg, DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER,
reporter->auto_recover))
goto reporter_nest_cancel;
if (reporter->dump_fmsg &&
- nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS,
- jiffies_to_msecs(reporter->dump_ts),
- DEVLINK_ATTR_PAD))
+ devlink_nl_put_u64(msg, DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS,
+ jiffies_to_msecs(reporter->dump_ts)))
goto reporter_nest_cancel;
if (reporter->dump_fmsg &&
- nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS_NS,
- reporter->dump_real_ts, DEVLINK_ATTR_PAD))
+ devlink_nl_put_u64(msg, DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS_NS,
+ reporter->dump_real_ts))
goto reporter_nest_cancel;
if (reporter->ops->dump &&
nla_put_u8(msg, DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP,
@@ -963,8 +961,7 @@ devlink_fmsg_item_fill_data(struct devlink_fmsg_item *msg, struct sk_buff *skb)
case NLA_U32:
return nla_put_u32(skb, attrtype, *(u32 *)msg->value);
case NLA_U64:
- return nla_put_u64_64bit(skb, attrtype, *(u64 *)msg->value,
- DEVLINK_ATTR_PAD);
+ return devlink_nl_put_u64(skb, attrtype, *(u64 *)msg->value);
case NLA_NUL_STRING:
return nla_put_string(skb, attrtype, (char *)&msg->value);
case NLA_BINARY:
@@ -1241,3 +1238,70 @@ int devlink_nl_health_reporter_test_doit(struct sk_buff *skb,
return reporter->ops->test(reporter, info->extack);
}
+
+/**
+ * devlink_fmsg_dump_skb - Dump sk_buffer structure
+ * @fmsg: devlink formatted message pointer
+ * @skb: pointer to skb
+ *
+ * Dump diagnostic information about sk_buff structure, like headroom, length,
+ * tailroom, MAC, etc.
+ */
+void devlink_fmsg_dump_skb(struct devlink_fmsg *fmsg, const struct sk_buff *skb)
+{
+ struct skb_shared_info *sh = skb_shinfo(skb);
+ struct sock *sk = skb->sk;
+ bool has_mac, has_trans;
+
+ has_mac = skb_mac_header_was_set(skb);
+ has_trans = skb_transport_header_was_set(skb);
+
+ devlink_fmsg_pair_nest_start(fmsg, "skb");
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_put(fmsg, "actual len", skb->len);
+ devlink_fmsg_put(fmsg, "head len", skb_headlen(skb));
+ devlink_fmsg_put(fmsg, "data len", skb->data_len);
+ devlink_fmsg_put(fmsg, "tail len", skb_tailroom(skb));
+ devlink_fmsg_put(fmsg, "MAC", has_mac ? skb->mac_header : -1);
+ devlink_fmsg_put(fmsg, "MAC len",
+ has_mac ? skb_mac_header_len(skb) : -1);
+ devlink_fmsg_put(fmsg, "network hdr", skb->network_header);
+ devlink_fmsg_put(fmsg, "network hdr len",
+ has_trans ? skb_network_header_len(skb) : -1);
+ devlink_fmsg_put(fmsg, "transport hdr",
+ has_trans ? skb->transport_header : -1);
+ devlink_fmsg_put(fmsg, "csum", (__force u32)skb->csum);
+ devlink_fmsg_put(fmsg, "csum_ip_summed", (u8)skb->ip_summed);
+ devlink_fmsg_put(fmsg, "csum_complete_sw", !!skb->csum_complete_sw);
+ devlink_fmsg_put(fmsg, "csum_valid", !!skb->csum_valid);
+ devlink_fmsg_put(fmsg, "csum_level", (u8)skb->csum_level);
+ devlink_fmsg_put(fmsg, "sw_hash", !!skb->sw_hash);
+ devlink_fmsg_put(fmsg, "l4_hash", !!skb->l4_hash);
+ devlink_fmsg_put(fmsg, "proto", ntohs(skb->protocol));
+ devlink_fmsg_put(fmsg, "pkt_type", (u8)skb->pkt_type);
+ devlink_fmsg_put(fmsg, "iif", skb->skb_iif);
+
+ if (sk) {
+ devlink_fmsg_pair_nest_start(fmsg, "sk");
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_put(fmsg, "family", sk->sk_type);
+ devlink_fmsg_put(fmsg, "type", sk->sk_type);
+ devlink_fmsg_put(fmsg, "proto", sk->sk_protocol);
+ devlink_fmsg_obj_nest_end(fmsg);
+ devlink_fmsg_pair_nest_end(fmsg);
+ }
+
+ devlink_fmsg_obj_nest_end(fmsg);
+ devlink_fmsg_pair_nest_end(fmsg);
+
+ devlink_fmsg_pair_nest_start(fmsg, "shinfo");
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_put(fmsg, "tx_flags", sh->tx_flags);
+ devlink_fmsg_put(fmsg, "nr_frags", sh->nr_frags);
+ devlink_fmsg_put(fmsg, "gso_size", sh->gso_size);
+ devlink_fmsg_put(fmsg, "gso_type", sh->gso_type);
+ devlink_fmsg_put(fmsg, "gso_segs", sh->gso_segs);
+ devlink_fmsg_obj_nest_end(fmsg);
+ devlink_fmsg_pair_nest_end(fmsg);
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_dump_skb);
diff --git a/net/devlink/netlink.c b/net/devlink/netlink.c
index 499885c8b9ca..593605c1b1ef 100644
--- a/net/devlink/netlink.c
+++ b/net/devlink/netlink.c
@@ -193,12 +193,13 @@ devlink_get_from_attrs_lock(struct net *net, struct nlattr **attrs,
devname = nla_data(attrs[DEVLINK_ATTR_DEV_NAME]);
devlinks_xa_for_each_registered_get(net, index, devlink) {
- devl_dev_lock(devlink, dev_lock);
- if (devl_is_registered(devlink) &&
- strcmp(devlink->dev->bus->name, busname) == 0 &&
- strcmp(dev_name(devlink->dev), devname) == 0)
- return devlink;
- devl_dev_unlock(devlink, dev_lock);
+ if (strcmp(devlink->dev->bus->name, busname) == 0 &&
+ strcmp(dev_name(devlink->dev), devname) == 0) {
+ devl_dev_lock(devlink, dev_lock);
+ if (devl_is_registered(devlink))
+ return devlink;
+ devl_dev_unlock(devlink, dev_lock);
+ }
devlink_put(devlink);
}
diff --git a/net/devlink/netlink_gen.c b/net/devlink/netlink_gen.c
index c81cf2dd154f..f9786d51f68f 100644
--- a/net/devlink/netlink_gen.c
+++ b/net/devlink/netlink_gen.c
@@ -198,7 +198,7 @@ static const struct nla_policy devlink_eswitch_set_nl_policy[DEVLINK_ATTR_ESWITC
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
[DEVLINK_ATTR_ESWITCH_MODE] = NLA_POLICY_MAX(NLA_U16, 1),
- [DEVLINK_ATTR_ESWITCH_INLINE_MODE] = NLA_POLICY_MAX(NLA_U16, 3),
+ [DEVLINK_ATTR_ESWITCH_INLINE_MODE] = NLA_POLICY_MAX(NLA_U8, 3),
[DEVLINK_ATTR_ESWITCH_ENCAP_MODE] = NLA_POLICY_MAX(NLA_U8, 1),
};
diff --git a/net/devlink/param.c b/net/devlink/param.c
index 22bc3b500518..dcf0d1ccebba 100644
--- a/net/devlink/param.c
+++ b/net/devlink/param.c
@@ -158,11 +158,12 @@ static int devlink_param_get(struct devlink *devlink,
static int devlink_param_set(struct devlink *devlink,
const struct devlink_param *param,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
if (!param->set)
return -EOPNOTSUPP;
- return param->set(devlink, param->id, ctx);
+ return param->set(devlink, param->id, ctx, extack);
}
static int
@@ -571,7 +572,7 @@ static int __devlink_nl_cmd_param_set_doit(struct devlink *devlink,
return -EOPNOTSUPP;
ctx.val = value;
ctx.cmode = cmode;
- err = devlink_param_set(devlink, param, &ctx);
+ err = devlink_param_set(devlink, param, &ctx, info->extack);
if (err)
return err;
}
diff --git a/net/devlink/port.c b/net/devlink/port.c
index 4b2d46ccfe48..939081a0e615 100644
--- a/net/devlink/port.c
+++ b/net/devlink/port.c
@@ -16,6 +16,7 @@ static const struct nla_policy devlink_function_nl_policy[DEVLINK_PORT_FUNCTION_
DEVLINK_PORT_FN_STATE_ACTIVE),
[DEVLINK_PORT_FN_ATTR_CAPS] =
NLA_POLICY_BITFIELD32(DEVLINK_PORT_FN_CAPS_VALID_MASK),
+ [DEVLINK_PORT_FN_ATTR_MAX_IO_EQS] = { .type = NLA_U32 },
};
#define ASSERT_DEVLINK_PORT_REGISTERED(devlink_port) \
@@ -182,6 +183,30 @@ static int devlink_port_fn_caps_fill(struct devlink_port *devlink_port,
return 0;
}
+static int devlink_port_fn_max_io_eqs_fill(struct devlink_port *port,
+ struct sk_buff *msg,
+ struct netlink_ext_ack *extack,
+ bool *msg_updated)
+{
+ u32 max_io_eqs;
+ int err;
+
+ if (!port->ops->port_fn_max_io_eqs_get)
+ return 0;
+
+ err = port->ops->port_fn_max_io_eqs_get(port, &max_io_eqs, extack);
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ return 0;
+ return err;
+ }
+ err = nla_put_u32(msg, DEVLINK_PORT_FN_ATTR_MAX_IO_EQS, max_io_eqs);
+ if (err)
+ return err;
+ *msg_updated = true;
+ return 0;
+}
+
int devlink_nl_port_handle_fill(struct sk_buff *msg, struct devlink_port *devlink_port)
{
if (devlink_nl_put_handle(msg, devlink_port->devlink))
@@ -410,6 +435,18 @@ static int devlink_port_fn_caps_set(struct devlink_port *devlink_port,
}
static int
+devlink_port_fn_max_io_eqs_set(struct devlink_port *devlink_port,
+ const struct nlattr *attr,
+ struct netlink_ext_ack *extack)
+{
+ u32 max_io_eqs;
+
+ max_io_eqs = nla_get_u32(attr);
+ return devlink_port->ops->port_fn_max_io_eqs_set(devlink_port,
+ max_io_eqs, extack);
+}
+
+static int
devlink_nl_port_function_attrs_put(struct sk_buff *msg, struct devlink_port *port,
struct netlink_ext_ack *extack)
{
@@ -430,6 +467,9 @@ devlink_nl_port_function_attrs_put(struct sk_buff *msg, struct devlink_port *por
err = devlink_port_fn_state_fill(port, msg, extack, &msg_updated);
if (err)
goto out;
+ err = devlink_port_fn_max_io_eqs_fill(port, msg, extack, &msg_updated);
+ if (err)
+ goto out;
err = devlink_rel_devlink_handle_put(msg, port->devlink,
port->rel_index,
DEVLINK_PORT_FN_ATTR_DEVLINK,
@@ -726,6 +766,12 @@ static int devlink_port_function_validate(struct devlink_port *devlink_port,
}
}
}
+ if (tb[DEVLINK_PORT_FN_ATTR_MAX_IO_EQS] &&
+ !ops->port_fn_max_io_eqs_set) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[DEVLINK_PORT_FN_ATTR_MAX_IO_EQS],
+ "Function does not support max_io_eqs setting");
+ return -EOPNOTSUPP;
+ }
return 0;
}
@@ -761,6 +807,13 @@ static int devlink_port_function_set(struct devlink_port *port,
return err;
}
+ attr = tb[DEVLINK_PORT_FN_ATTR_MAX_IO_EQS];
+ if (attr) {
+ err = devlink_port_fn_max_io_eqs_set(port, attr, extack);
+ if (err)
+ return err;
+ }
+
/* Keep this as the last function attribute set, so that when
* multiple port function attributes are set along with state,
* Those can be applied first before activating the state.
@@ -889,7 +942,7 @@ int devlink_nl_port_new_doit(struct sk_buff *skb, struct genl_info *info)
err = -ENOMEM;
goto err_out_port_del;
}
- err = devlink_nl_port_fill(msg, devlink_port, DEVLINK_CMD_NEW,
+ err = devlink_nl_port_fill(msg, devlink_port, DEVLINK_CMD_PORT_NEW,
info->snd_portid, info->snd_seq, 0, NULL);
if (WARN_ON_ONCE(err))
goto err_out_msg_free;
@@ -1323,7 +1376,7 @@ EXPORT_SYMBOL_GPL(devlink_port_attrs_set);
*
* @devlink_port: devlink port
* @controller: associated controller number for the devlink port instance
- * @pf: associated PF for the devlink port instance
+ * @pf: associated PCI function number for the devlink port instance
* @external: indicates if the port is for an external controller
*/
void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u32 controller,
@@ -1349,8 +1402,9 @@ EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_pf_set);
*
* @devlink_port: devlink port
* @controller: associated controller number for the devlink port instance
- * @pf: associated PF for the devlink port instance
- * @vf: associated VF of a PF for the devlink port instance
+ * @pf: associated PCI function number for the devlink port instance
+ * @vf: associated PCI VF number of a PF for the devlink port instance;
+ * VF number starts from 0 for the first PCI virtual function
* @external: indicates if the port is for an external controller
*/
void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, u32 controller,
@@ -1377,8 +1431,8 @@ EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_vf_set);
*
* @devlink_port: devlink port
* @controller: associated controller number for the devlink port instance
- * @pf: associated PF for the devlink port instance
- * @sf: associated SF of a PF for the devlink port instance
+ * @pf: associated PCI function number for the devlink port instance
+ * @sf: associated SF number of a PF for the devlink port instance
* @external: indicates if the port is for an external controller
*/
void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, u32 controller,
diff --git a/net/devlink/rate.c b/net/devlink/rate.c
index 7139e67e93ae..8828ffaf6cbc 100644
--- a/net/devlink/rate.c
+++ b/net/devlink/rate.c
@@ -108,12 +108,12 @@ static int devlink_nl_rate_fill(struct sk_buff *msg,
goto nla_put_failure;
}
- if (nla_put_u64_64bit(msg, DEVLINK_ATTR_RATE_TX_SHARE,
- devlink_rate->tx_share, DEVLINK_ATTR_PAD))
+ if (devlink_nl_put_u64(msg, DEVLINK_ATTR_RATE_TX_SHARE,
+ devlink_rate->tx_share))
goto nla_put_failure;
- if (nla_put_u64_64bit(msg, DEVLINK_ATTR_RATE_TX_MAX,
- devlink_rate->tx_max, DEVLINK_ATTR_PAD))
+ if (devlink_nl_put_u64(msg, DEVLINK_ATTR_RATE_TX_MAX,
+ devlink_rate->tx_max))
goto nla_put_failure;
if (nla_put_u32(msg, DEVLINK_ATTR_RATE_TX_PRIORITY,
diff --git a/net/devlink/region.c b/net/devlink/region.c
index 7319127c5913..63fb297f6d67 100644
--- a/net/devlink/region.c
+++ b/net/devlink/region.c
@@ -77,7 +77,7 @@ static int devlink_nl_region_snapshot_id_put(struct sk_buff *msg,
snap_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_REGION_SNAPSHOT);
if (!snap_attr)
- return -EINVAL;
+ return -EMSGSIZE;
err = nla_put_u32(msg, DEVLINK_ATTR_REGION_SNAPSHOT_ID, snapshot->id);
if (err)
@@ -102,7 +102,7 @@ static int devlink_nl_region_snapshots_id_put(struct sk_buff *msg,
snapshots_attr = nla_nest_start_noflag(msg,
DEVLINK_ATTR_REGION_SNAPSHOTS);
if (!snapshots_attr)
- return -EINVAL;
+ return -EMSGSIZE;
list_for_each_entry(snapshot, &region->snapshot_list, list) {
err = devlink_nl_region_snapshot_id_put(msg, devlink, snapshot);
@@ -145,9 +145,7 @@ static int devlink_nl_region_fill(struct sk_buff *msg, struct devlink *devlink,
if (err)
goto nla_put_failure;
- err = nla_put_u64_64bit(msg, DEVLINK_ATTR_REGION_SIZE,
- region->size,
- DEVLINK_ATTR_PAD);
+ err = devlink_nl_put_u64(msg, DEVLINK_ATTR_REGION_SIZE, region->size);
if (err)
goto nla_put_failure;
@@ -210,8 +208,8 @@ devlink_nl_region_notify_build(struct devlink_region *region,
if (err)
goto out_cancel_msg;
} else {
- err = nla_put_u64_64bit(msg, DEVLINK_ATTR_REGION_SIZE,
- region->size, DEVLINK_ATTR_PAD);
+ err = devlink_nl_put_u64(msg, DEVLINK_ATTR_REGION_SIZE,
+ region->size);
if (err)
goto out_cancel_msg;
}
@@ -773,8 +771,7 @@ static int devlink_nl_cmd_region_read_chunk_fill(struct sk_buff *msg,
if (err)
goto nla_put_failure;
- err = nla_put_u64_64bit(msg, DEVLINK_ATTR_REGION_CHUNK_ADDR, addr,
- DEVLINK_ATTR_PAD);
+ err = devlink_nl_put_u64(msg, DEVLINK_ATTR_REGION_CHUNK_ADDR, addr);
if (err)
goto nla_put_failure;
diff --git a/net/devlink/resource.c b/net/devlink/resource.c
index 594c8aeb3bfa..2d6324f3d91f 100644
--- a/net/devlink/resource.c
+++ b/net/devlink/resource.c
@@ -141,12 +141,12 @@ devlink_resource_size_params_put(struct devlink_resource *resource,
struct devlink_resource_size_params *size_params;
size_params = &resource->size_params;
- if (nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_GRAN,
- size_params->size_granularity, DEVLINK_ATTR_PAD) ||
- nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MAX,
- size_params->size_max, DEVLINK_ATTR_PAD) ||
- nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MIN,
- size_params->size_min, DEVLINK_ATTR_PAD) ||
+ if (devlink_nl_put_u64(skb, DEVLINK_ATTR_RESOURCE_SIZE_GRAN,
+ size_params->size_granularity) ||
+ devlink_nl_put_u64(skb, DEVLINK_ATTR_RESOURCE_SIZE_MAX,
+ size_params->size_max) ||
+ devlink_nl_put_u64(skb, DEVLINK_ATTR_RESOURCE_SIZE_MIN,
+ size_params->size_min) ||
nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_UNIT, size_params->unit))
return -EMSGSIZE;
return 0;
@@ -157,9 +157,8 @@ static int devlink_resource_occ_put(struct devlink_resource *resource,
{
if (!resource->occ_get)
return 0;
- return nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC,
- resource->occ_get(resource->occ_get_priv),
- DEVLINK_ATTR_PAD);
+ return devlink_nl_put_u64(skb, DEVLINK_ATTR_RESOURCE_OCC,
+ resource->occ_get(resource->occ_get_priv));
}
static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb,
@@ -174,14 +173,12 @@ static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb,
return -EMSGSIZE;
if (nla_put_string(skb, DEVLINK_ATTR_RESOURCE_NAME, resource->name) ||
- nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE, resource->size,
- DEVLINK_ATTR_PAD) ||
- nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_ID, resource->id,
- DEVLINK_ATTR_PAD))
+ devlink_nl_put_u64(skb, DEVLINK_ATTR_RESOURCE_SIZE, resource->size) ||
+ devlink_nl_put_u64(skb, DEVLINK_ATTR_RESOURCE_ID, resource->id))
goto nla_put_failure;
if (resource->size != resource->size_new &&
- nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_NEW,
- resource->size_new, DEVLINK_ATTR_PAD))
+ devlink_nl_put_u64(skb, DEVLINK_ATTR_RESOURCE_SIZE_NEW,
+ resource->size_new))
goto nla_put_failure;
if (devlink_resource_occ_put(resource, skb))
goto nla_put_failure;
@@ -348,7 +345,7 @@ int devl_resource_register(struct devlink *devlink,
resource = devlink_resource_find(devlink, NULL, resource_id);
if (resource)
- return -EINVAL;
+ return -EEXIST;
resource = kzalloc(sizeof(*resource), GFP_KERNEL);
if (!resource)
@@ -384,39 +381,6 @@ int devl_resource_register(struct devlink *devlink,
}
EXPORT_SYMBOL_GPL(devl_resource_register);
-/**
- * devlink_resource_register - devlink resource register
- *
- * @devlink: devlink
- * @resource_name: resource's name
- * @resource_size: resource's size
- * @resource_id: resource's id
- * @parent_resource_id: resource's parent id
- * @size_params: size parameters
- *
- * Generic resources should reuse the same names across drivers.
- * Please see the generic resources list at:
- * Documentation/networking/devlink/devlink-resource.rst
- *
- * Context: Takes and release devlink->lock <mutex>.
- */
-int devlink_resource_register(struct devlink *devlink,
- const char *resource_name,
- u64 resource_size,
- u64 resource_id,
- u64 parent_resource_id,
- const struct devlink_resource_size_params *size_params)
-{
- int err;
-
- devl_lock(devlink);
- err = devl_resource_register(devlink, resource_name, resource_size,
- resource_id, parent_resource_id, size_params);
- devl_unlock(devlink);
- return err;
-}
-EXPORT_SYMBOL_GPL(devlink_resource_register);
-
static void devlink_resource_unregister(struct devlink *devlink,
struct devlink_resource *resource)
{
@@ -517,28 +481,6 @@ void devl_resource_occ_get_register(struct devlink *devlink,
EXPORT_SYMBOL_GPL(devl_resource_occ_get_register);
/**
- * devlink_resource_occ_get_register - register occupancy getter
- *
- * @devlink: devlink
- * @resource_id: resource id
- * @occ_get: occupancy getter callback
- * @occ_get_priv: occupancy getter callback priv
- *
- * Context: Takes and release devlink->lock <mutex>.
- */
-void devlink_resource_occ_get_register(struct devlink *devlink,
- u64 resource_id,
- devlink_resource_occ_get_t *occ_get,
- void *occ_get_priv)
-{
- devl_lock(devlink);
- devl_resource_occ_get_register(devlink, resource_id,
- occ_get, occ_get_priv);
- devl_unlock(devlink);
-}
-EXPORT_SYMBOL_GPL(devlink_resource_occ_get_register);
-
-/**
* devl_resource_occ_get_unregister - unregister occupancy getter
*
* @devlink: devlink
@@ -560,20 +502,3 @@ void devl_resource_occ_get_unregister(struct devlink *devlink,
resource->occ_get_priv = NULL;
}
EXPORT_SYMBOL_GPL(devl_resource_occ_get_unregister);
-
-/**
- * devlink_resource_occ_get_unregister - unregister occupancy getter
- *
- * @devlink: devlink
- * @resource_id: resource id
- *
- * Context: Takes and release devlink->lock <mutex>.
- */
-void devlink_resource_occ_get_unregister(struct devlink *devlink,
- u64 resource_id)
-{
- devl_lock(devlink);
- devl_resource_occ_get_unregister(devlink, resource_id);
- devl_unlock(devlink);
-}
-EXPORT_SYMBOL_GPL(devlink_resource_occ_get_unregister);
diff --git a/net/devlink/trap.c b/net/devlink/trap.c
index 5d18c7424df1..f36087f90db5 100644
--- a/net/devlink/trap.c
+++ b/net/devlink/trap.c
@@ -189,14 +189,12 @@ devlink_trap_group_stats_put(struct sk_buff *msg,
if (!attr)
return -EMSGSIZE;
- if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_PACKETS,
- u64_stats_read(&stats.rx_packets),
- DEVLINK_ATTR_PAD))
+ if (devlink_nl_put_u64(msg, DEVLINK_ATTR_STATS_RX_PACKETS,
+ u64_stats_read(&stats.rx_packets)))
goto nla_put_failure;
- if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_BYTES,
- u64_stats_read(&stats.rx_bytes),
- DEVLINK_ATTR_PAD))
+ if (devlink_nl_put_u64(msg, DEVLINK_ATTR_STATS_RX_BYTES,
+ u64_stats_read(&stats.rx_bytes)))
goto nla_put_failure;
nla_nest_end(msg, attr);
@@ -231,18 +229,15 @@ static int devlink_trap_stats_put(struct sk_buff *msg, struct devlink *devlink,
return -EMSGSIZE;
if (devlink->ops->trap_drop_counter_get &&
- nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_DROPPED, drops,
- DEVLINK_ATTR_PAD))
+ devlink_nl_put_u64(msg, DEVLINK_ATTR_STATS_RX_DROPPED, drops))
goto nla_put_failure;
- if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_PACKETS,
- u64_stats_read(&stats.rx_packets),
- DEVLINK_ATTR_PAD))
+ if (devlink_nl_put_u64(msg, DEVLINK_ATTR_STATS_RX_PACKETS,
+ u64_stats_read(&stats.rx_packets)))
goto nla_put_failure;
- if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_BYTES,
- u64_stats_read(&stats.rx_bytes),
- DEVLINK_ATTR_PAD))
+ if (devlink_nl_put_u64(msg, DEVLINK_ATTR_STATS_RX_BYTES,
+ u64_stats_read(&stats.rx_bytes)))
goto nla_put_failure;
nla_nest_end(msg, attr);
@@ -750,8 +745,7 @@ devlink_trap_policer_stats_put(struct sk_buff *msg, struct devlink *devlink,
if (!attr)
return -EMSGSIZE;
- if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_DROPPED, drops,
- DEVLINK_ATTR_PAD))
+ if (devlink_nl_put_u64(msg, DEVLINK_ATTR_STATS_RX_DROPPED, drops))
goto nla_put_failure;
nla_nest_end(msg, attr);
@@ -783,12 +777,12 @@ devlink_nl_trap_policer_fill(struct sk_buff *msg, struct devlink *devlink,
policer_item->policer->id))
goto nla_put_failure;
- if (nla_put_u64_64bit(msg, DEVLINK_ATTR_TRAP_POLICER_RATE,
- policer_item->rate, DEVLINK_ATTR_PAD))
+ if (devlink_nl_put_u64(msg, DEVLINK_ATTR_TRAP_POLICER_RATE,
+ policer_item->rate))
goto nla_put_failure;
- if (nla_put_u64_64bit(msg, DEVLINK_ATTR_TRAP_POLICER_BURST,
- policer_item->burst, DEVLINK_ATTR_PAD))
+ if (devlink_nl_put_u64(msg, DEVLINK_ATTR_TRAP_POLICER_BURST,
+ policer_item->burst))
goto nla_put_failure;
err = devlink_trap_policer_stats_put(msg, devlink,
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 8e698bea99a3..2dfe9063613f 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -129,7 +129,7 @@ config NET_DSA_TAG_RTL4_A
tristate "Tag driver for Realtek 4 byte protocol A tags"
help
Say Y or M if you want to enable support for tagging frames for the
- Realtek switches with 4 byte protocol A tags, sich as found in
+ Realtek switches with 4 byte protocol A tags, such as found in
the Realtek RTL8366RB.
config NET_DSA_TAG_RTL8_4
@@ -166,6 +166,12 @@ config NET_DSA_TAG_TRAILER
Say Y or M if you want to enable support for tagging frames at
with a trailed. e.g. Marvell 88E6060.
+config NET_DSA_TAG_VSC73XX_8021Q
+ tristate "Tag driver for Microchip/Vitesse VSC73xx family of switches, using VLAN"
+ help
+ Say Y or M if you want to enable support for tagging frames with a
+ custom VLAN-based header.
+
config NET_DSA_TAG_XRS700X
tristate "Tag driver for XRS700x switches"
help
diff --git a/net/dsa/Makefile b/net/dsa/Makefile
index 8a1894a42552..555c07cfeb71 100644
--- a/net/dsa/Makefile
+++ b/net/dsa/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_NET_DSA_TAG_RTL8_4) += tag_rtl8_4.o
obj-$(CONFIG_NET_DSA_TAG_RZN1_A5PSW) += tag_rzn1_a5psw.o
obj-$(CONFIG_NET_DSA_TAG_SJA1105) += tag_sja1105.o
obj-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o
+obj-$(CONFIG_NET_DSA_TAG_VSC73XX_8021Q) += tag_vsc73xx_8021q.o
obj-$(CONFIG_NET_DSA_TAG_XRS700X) += tag_xrs700x.o
# for tracing framework to find trace.h
diff --git a/net/dsa/devlink.c b/net/dsa/devlink.c
index 431bf52290a1..f41f9fc2194e 100644
--- a/net/dsa/devlink.c
+++ b/net/dsa/devlink.c
@@ -194,7 +194,8 @@ int dsa_devlink_param_get(struct devlink *dl, u32 id,
EXPORT_SYMBOL_GPL(dsa_devlink_param_get);
int dsa_devlink_param_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
@@ -228,10 +229,15 @@ int dsa_devlink_resource_register(struct dsa_switch *ds,
u64 parent_resource_id,
const struct devlink_resource_size_params *size_params)
{
- return devlink_resource_register(ds->devlink, resource_name,
- resource_size, resource_id,
- parent_resource_id,
- size_params);
+ int ret;
+
+ devl_lock(ds->devlink);
+ ret = devl_resource_register(ds->devlink, resource_name, resource_size,
+ resource_id, parent_resource_id,
+ size_params);
+ devl_unlock(ds->devlink);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(dsa_devlink_resource_register);
@@ -246,15 +252,19 @@ void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds,
devlink_resource_occ_get_t *occ_get,
void *occ_get_priv)
{
- return devlink_resource_occ_get_register(ds->devlink, resource_id,
- occ_get, occ_get_priv);
+ devl_lock(ds->devlink);
+ devl_resource_occ_get_register(ds->devlink, resource_id, occ_get,
+ occ_get_priv);
+ devl_unlock(ds->devlink);
}
EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_register);
void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
u64 resource_id)
{
- devlink_resource_occ_get_unregister(ds->devlink, resource_id);
+ devl_lock(ds->devlink);
+ devl_resource_occ_get_unregister(ds->devlink, resource_id);
+ devl_unlock(ds->devlink);
}
EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_unregister);
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index ac7be864e80d..e827775baf2e 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include <linux/of.h>
-#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <net/dsa_stubs.h>
#include <net/sch_generic.h>
@@ -626,7 +625,6 @@ static void dsa_switch_teardown_tag_protocol(struct dsa_switch *ds)
static int dsa_switch_setup(struct dsa_switch *ds)
{
- struct device_node *dn;
int err;
if (ds->setup)
@@ -666,10 +664,7 @@ static int dsa_switch_setup(struct dsa_switch *ds)
dsa_user_mii_bus_init(ds);
- dn = of_get_child_by_name(ds->dev->of_node, "mdio");
-
- err = of_mdiobus_register(ds->user_mii_bus, dn);
- of_node_put(dn);
+ err = mdiobus_register(ds->user_mii_bus);
if (err < 0)
goto free_user_mii_bus;
}
@@ -1372,7 +1367,7 @@ static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
return dsa_switch_parse_ports_of(ds, dn);
}
-static int dev_is_class(struct device *dev, void *class)
+static int dev_is_class(struct device *dev, const void *class)
{
if (dev->class != NULL && !strcmp(dev->class->name, class))
return 1;
@@ -1574,6 +1569,7 @@ EXPORT_SYMBOL_GPL(dsa_unregister_switch);
void dsa_switch_shutdown(struct dsa_switch *ds)
{
struct net_device *conduit, *user_dev;
+ LIST_HEAD(close_list);
struct dsa_port *dp;
mutex_lock(&dsa2_mutex);
@@ -1583,10 +1579,16 @@ void dsa_switch_shutdown(struct dsa_switch *ds)
rtnl_lock();
+ dsa_switch_for_each_cpu_port(dp, ds)
+ list_add(&dp->conduit->close_list, &close_list);
+
+ dev_close_many(&close_list, true);
+
dsa_switch_for_each_user_port(dp, ds) {
conduit = dsa_port_to_conduit(dp);
user_dev = dp->user;
+ netif_device_detach(user_dev);
netdev_upper_dev_unlink(conduit, user_dev);
}
diff --git a/net/dsa/port.c b/net/dsa/port.c
index c42dac87671b..5c9d1798e830 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -1467,10 +1467,34 @@ int dsa_port_change_conduit(struct dsa_port *dp, struct net_device *conduit,
*/
dsa_user_unsync_ha(dev);
+ /* If live-changing, we also need to uninstall the user device address
+ * from the port FDB and the conduit interface.
+ */
+ if (dev->flags & IFF_UP)
+ dsa_user_host_uc_uninstall(dev);
+
err = dsa_port_assign_conduit(dp, conduit, extack, true);
if (err)
goto rewind_old_addrs;
+ /* If the port doesn't have its own MAC address and relies on the DSA
+ * conduit's one, inherit it again from the new DSA conduit.
+ */
+ if (is_zero_ether_addr(dp->mac))
+ eth_hw_addr_inherit(dev, conduit);
+
+ /* If live-changing, we need to install the user device address to the
+ * port FDB and the conduit interface.
+ */
+ if (dev->flags & IFF_UP) {
+ err = dsa_user_host_uc_install(dev, dev->dev_addr);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to install host UC address");
+ goto rewind_addr_inherit;
+ }
+ }
+
dsa_user_sync_ha(dev);
if (vlan_filtering) {
@@ -1500,10 +1524,26 @@ rewind_new_vlan:
rewind_new_addrs:
dsa_user_unsync_ha(dev);
+ if (dev->flags & IFF_UP)
+ dsa_user_host_uc_uninstall(dev);
+
+rewind_addr_inherit:
+ if (is_zero_ether_addr(dp->mac))
+ eth_hw_addr_inherit(dev, old_conduit);
+
dsa_port_assign_conduit(dp, old_conduit, NULL, false);
/* Restore the objects on the old CPU port */
rewind_old_addrs:
+ if (dev->flags & IFF_UP) {
+ tmp = dsa_user_host_uc_install(dev, dev->dev_addr);
+ if (tmp) {
+ dev_err(ds->dev,
+ "port %d failed to restore host UC address: %pe\n",
+ dp->index, ERR_PTR(tmp));
+ }
+ }
+
dsa_user_sync_ha(dev);
if (vlan_filtering) {
@@ -1535,100 +1575,32 @@ void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
cpu_dp->tag_ops = tag_ops;
}
-static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp)
-{
- struct device_node *phy_dn;
- struct phy_device *phydev;
-
- phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0);
- if (!phy_dn)
- return NULL;
-
- phydev = of_phy_find_device(phy_dn);
- if (!phydev) {
- of_node_put(phy_dn);
- return ERR_PTR(-EPROBE_DEFER);
- }
-
- of_node_put(phy_dn);
- return phydev;
-}
-
-static struct phylink_pcs *
-dsa_port_phylink_mac_select_pcs(struct phylink_config *config,
- phy_interface_t interface)
-{
- struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
- struct phylink_pcs *pcs = ERR_PTR(-EOPNOTSUPP);
- struct dsa_switch *ds = dp->ds;
-
- if (ds->ops->phylink_mac_select_pcs)
- pcs = ds->ops->phylink_mac_select_pcs(ds, dp->index, interface);
-
- return pcs;
-}
-
-static int dsa_port_phylink_mac_prepare(struct phylink_config *config,
- unsigned int mode,
- phy_interface_t interface)
+/* dsa_supports_eee - indicate that EEE is supported
+ * @ds: pointer to &struct dsa_switch
+ * @port: port index
+ *
+ * A default implementation for the .support_eee() DSA operations member,
+ * which drivers can use to indicate that they support EEE on all of their
+ * user ports.
+ *
+ * Returns: true
+ */
+bool dsa_supports_eee(struct dsa_switch *ds, int port)
{
- struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
- struct dsa_switch *ds = dp->ds;
- int err = 0;
-
- if (ds->ops->phylink_mac_prepare)
- err = ds->ops->phylink_mac_prepare(ds, dp->index, mode,
- interface);
-
- return err;
+ return true;
}
+EXPORT_SYMBOL_GPL(dsa_supports_eee);
static void dsa_port_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
- struct dsa_switch *ds = dp->ds;
-
- if (!ds->ops->phylink_mac_config)
- return;
-
- ds->ops->phylink_mac_config(ds, dp->index, mode, state);
-}
-
-static int dsa_port_phylink_mac_finish(struct phylink_config *config,
- unsigned int mode,
- phy_interface_t interface)
-{
- struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
- struct dsa_switch *ds = dp->ds;
- int err = 0;
-
- if (ds->ops->phylink_mac_finish)
- err = ds->ops->phylink_mac_finish(ds, dp->index, mode,
- interface);
-
- return err;
}
static void dsa_port_phylink_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
- struct phy_device *phydev = NULL;
- struct dsa_switch *ds = dp->ds;
-
- if (dsa_port_is_user(dp))
- phydev = dp->user->phydev;
-
- if (!ds->ops->phylink_mac_link_down) {
- if (ds->ops->adjust_link && phydev)
- ds->ops->adjust_link(ds, dp->index, phydev);
- return;
- }
-
- ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface);
}
static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
@@ -1638,30 +1610,17 @@ static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
- struct dsa_switch *ds = dp->ds;
-
- if (!ds->ops->phylink_mac_link_up) {
- if (ds->ops->adjust_link && phydev)
- ds->ops->adjust_link(ds, dp->index, phydev);
- return;
- }
-
- ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev,
- speed, duplex, tx_pause, rx_pause);
}
static const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
- .mac_select_pcs = dsa_port_phylink_mac_select_pcs,
- .mac_prepare = dsa_port_phylink_mac_prepare,
.mac_config = dsa_port_phylink_mac_config,
- .mac_finish = dsa_port_phylink_mac_finish,
.mac_link_down = dsa_port_phylink_mac_link_down,
.mac_link_up = dsa_port_phylink_mac_link_up,
};
int dsa_port_phylink_create(struct dsa_port *dp)
{
+ const struct phylink_mac_ops *mac_ops;
struct dsa_switch *ds = dp->ds;
phy_interface_t mode;
struct phylink *pl;
@@ -1685,8 +1644,12 @@ int dsa_port_phylink_create(struct dsa_port *dp)
}
}
- pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn),
- mode, &dsa_port_phylink_mac_ops);
+ mac_ops = &dsa_port_phylink_mac_ops;
+ if (ds->phylink_mac_ops)
+ mac_ops = ds->phylink_mac_ops;
+
+ pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn), mode,
+ mac_ops);
if (IS_ERR(pl)) {
pr_err("error creating PHYLINK: %ld\n", PTR_ERR(pl));
return PTR_ERR(pl);
@@ -1703,78 +1666,6 @@ void dsa_port_phylink_destroy(struct dsa_port *dp)
dp->pl = NULL;
}
-static int dsa_shared_port_setup_phy_of(struct dsa_port *dp, bool enable)
-{
- struct dsa_switch *ds = dp->ds;
- struct phy_device *phydev;
- int port = dp->index;
- int err = 0;
-
- phydev = dsa_port_get_phy_device(dp);
- if (!phydev)
- return 0;
-
- if (IS_ERR(phydev))
- return PTR_ERR(phydev);
-
- if (enable) {
- err = genphy_resume(phydev);
- if (err < 0)
- goto err_put_dev;
-
- err = genphy_read_status(phydev);
- if (err < 0)
- goto err_put_dev;
- } else {
- err = genphy_suspend(phydev);
- if (err < 0)
- goto err_put_dev;
- }
-
- if (ds->ops->adjust_link)
- ds->ops->adjust_link(ds, port, phydev);
-
- dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev));
-
-err_put_dev:
- put_device(&phydev->mdio.dev);
- return err;
-}
-
-static int dsa_shared_port_fixed_link_register_of(struct dsa_port *dp)
-{
- struct device_node *dn = dp->dn;
- struct dsa_switch *ds = dp->ds;
- struct phy_device *phydev;
- int port = dp->index;
- phy_interface_t mode;
- int err;
-
- err = of_phy_register_fixed_link(dn);
- if (err) {
- dev_err(ds->dev,
- "failed to register the fixed PHY of port %d\n",
- port);
- return err;
- }
-
- phydev = of_phy_find_device(dn);
-
- err = of_get_phy_mode(dn, &mode);
- if (err)
- mode = PHY_INTERFACE_MODE_NA;
- phydev->interface = mode;
-
- genphy_read_status(phydev);
-
- if (ds->ops->adjust_link)
- ds->ops->adjust_link(ds, port, phydev);
-
- put_device(&phydev->mdio.dev);
-
- return 0;
-}
-
static int dsa_shared_port_phylink_register(struct dsa_port *dp)
{
struct dsa_switch *ds = dp->ds;
@@ -1952,12 +1843,20 @@ static void dsa_shared_port_validate_of(struct dsa_port *dp,
dn, dsa_port_is_cpu(dp) ? "CPU" : "DSA", dp->index);
}
+static void dsa_shared_port_link_down(struct dsa_port *dp)
+{
+ struct dsa_switch *ds = dp->ds;
+
+ if (ds->phylink_mac_ops && ds->phylink_mac_ops->mac_link_down)
+ ds->phylink_mac_ops->mac_link_down(&dp->pl_config, MLO_AN_FIXED,
+ PHY_INTERFACE_MODE_NA);
+}
+
int dsa_shared_port_link_register_of(struct dsa_port *dp)
{
struct dsa_switch *ds = dp->ds;
bool missing_link_description;
bool missing_phy_mode;
- int port = dp->index;
dsa_shared_port_validate_of(dp, &missing_phy_mode,
&missing_link_description);
@@ -1967,46 +1866,28 @@ int dsa_shared_port_link_register_of(struct dsa_port *dp)
dsa_switches_apply_workarounds))
return -EINVAL;
- if (!ds->ops->adjust_link) {
- if (missing_link_description) {
- dev_warn(ds->dev,
- "Skipping phylink registration for %s port %d\n",
- dsa_port_is_cpu(dp) ? "CPU" : "DSA", dp->index);
- } else {
- if (ds->ops->phylink_mac_link_down)
- ds->ops->phylink_mac_link_down(ds, port,
- MLO_AN_FIXED, PHY_INTERFACE_MODE_NA);
+ if (missing_link_description) {
+ dev_warn(ds->dev,
+ "Skipping phylink registration for %s port %d\n",
+ dsa_port_is_cpu(dp) ? "CPU" : "DSA", dp->index);
+ } else {
+ dsa_shared_port_link_down(dp);
- return dsa_shared_port_phylink_register(dp);
- }
- return 0;
+ return dsa_shared_port_phylink_register(dp);
}
- dev_warn(ds->dev,
- "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n");
-
- if (of_phy_is_fixed_link(dp->dn))
- return dsa_shared_port_fixed_link_register_of(dp);
- else
- return dsa_shared_port_setup_phy_of(dp, true);
+ return 0;
}
void dsa_shared_port_link_unregister_of(struct dsa_port *dp)
{
- struct dsa_switch *ds = dp->ds;
-
- if (!ds->ops->adjust_link && dp->pl) {
+ if (dp->pl) {
rtnl_lock();
phylink_disconnect_phy(dp->pl);
rtnl_unlock();
dsa_port_phylink_destroy(dp);
return;
}
-
- if (of_phy_is_fixed_link(dp->dn))
- of_phy_deregister_fixed_link(dp->dn);
- else
- dsa_shared_port_setup_phy_of(dp, false);
}
int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr,
diff --git a/net/dsa/tag.c b/net/dsa/tag.c
index 6e402d49afd3..79ad105902d9 100644
--- a/net/dsa/tag.c
+++ b/net/dsa/tag.c
@@ -105,8 +105,9 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
p = netdev_priv(skb->dev);
- if (unlikely(cpu_dp->ds->untag_bridge_pvid)) {
- nskb = dsa_untag_bridge_pvid(skb);
+ if (unlikely(cpu_dp->ds->untag_bridge_pvid ||
+ cpu_dp->ds->untag_vlan_aware_bridge_pvid)) {
+ nskb = dsa_software_vlan_untag(skb);
if (!nskb) {
kfree_skb(skb);
return 0;
diff --git a/net/dsa/tag.h b/net/dsa/tag.h
index f6b9c73718df..5d80ddad4ff6 100644
--- a/net/dsa/tag.h
+++ b/net/dsa/tag.h
@@ -44,46 +44,81 @@ static inline struct net_device *dsa_conduit_find_user(struct net_device *dev,
return NULL;
}
-/* If under a bridge with vlan_filtering=0, make sure to send pvid-tagged
- * frames as untagged, since the bridge will not untag them.
+/**
+ * dsa_software_untag_vlan_aware_bridge: Software untagging for VLAN-aware bridge
+ * @skb: Pointer to received socket buffer (packet)
+ * @br: Pointer to bridge upper interface of ingress port
+ * @vid: Parsed VID from packet
+ *
+ * The bridge can process tagged packets. Software like STP/PTP may not. The
+ * bridge can also process untagged packets, to the same effect as if they were
+ * tagged with the PVID of the ingress port. So packets tagged with the PVID of
+ * the bridge port must be software-untagged, to support both use cases.
*/
-static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
+static inline void dsa_software_untag_vlan_aware_bridge(struct sk_buff *skb,
+ struct net_device *br,
+ u16 vid)
{
- struct dsa_port *dp = dsa_user_to_port(skb->dev);
- struct net_device *br = dsa_port_bridge_dev_get(dp);
- struct net_device *dev = skb->dev;
- struct net_device *upper_dev;
- u16 vid, pvid, proto;
+ u16 pvid, proto;
int err;
- if (!br || br_vlan_enabled(br))
- return skb;
-
err = br_vlan_get_proto(br, &proto);
if (err)
- return skb;
+ return;
- /* Move VLAN tag from data to hwaccel */
- if (!skb_vlan_tag_present(skb) && skb->protocol == htons(proto)) {
- skb = skb_vlan_untag(skb);
- if (!skb)
- return NULL;
- }
+ err = br_vlan_get_pvid_rcu(skb->dev, &pvid);
+ if (err)
+ return;
- if (!skb_vlan_tag_present(skb))
- return skb;
+ if (vid == pvid && skb->vlan_proto == htons(proto))
+ __vlan_hwaccel_clear_tag(skb);
+}
- vid = skb_vlan_tag_get_id(skb);
+/**
+ * dsa_software_untag_vlan_unaware_bridge: Software untagging for VLAN-unaware bridge
+ * @skb: Pointer to received socket buffer (packet)
+ * @br: Pointer to bridge upper interface of ingress port
+ * @vid: Parsed VID from packet
+ *
+ * The bridge ignores all VLAN tags. Software like STP/PTP may not (it may run
+ * on the plain port, or on a VLAN upper interface). Maybe packets are coming
+ * to software as tagged with a driver-defined VID which is NOT equal to the
+ * PVID of the bridge port (since the bridge is VLAN-unaware, its configuration
+ * should NOT be committed to hardware). DSA needs a method for this private
+ * VID to be communicated by software to it, and if packets are tagged with it,
+ * software-untag them. Note: the private VID may be different per bridge, to
+ * support the FDB isolation use case.
+ *
+ * FIXME: this is currently implemented based on the broken assumption that
+ * the "private VID" used by the driver in VLAN-unaware mode is equal to the
+ * bridge PVID. It should not be, except for a coincidence; the bridge PVID is
+ * irrelevant to the data path in the VLAN-unaware mode. Thus, the VID that
+ * this function removes is wrong.
+ *
+ * All users of ds->untag_bridge_pvid should fix their drivers, if necessary,
+ * to make the two independent. Only then, if there still remains a need to
+ * strip the private VID from packets, then a new ds->ops->get_private_vid()
+ * API shall be introduced to communicate to DSA what this VID is, which needs
+ * to be stripped here.
+ */
+static inline void dsa_software_untag_vlan_unaware_bridge(struct sk_buff *skb,
+ struct net_device *br,
+ u16 vid)
+{
+ struct net_device *upper_dev;
+ u16 pvid, proto;
+ int err;
- /* We already run under an RCU read-side critical section since
- * we are called from netif_receive_skb_list_internal().
- */
- err = br_vlan_get_pvid_rcu(dev, &pvid);
+ err = br_vlan_get_proto(br, &proto);
if (err)
- return skb;
+ return;
- if (vid != pvid)
- return skb;
+ err = br_vlan_get_pvid_rcu(skb->dev, &pvid);
+ if (err)
+ return;
+
+ if (vid != pvid || skb->vlan_proto != htons(proto))
+ return;
/* The sad part about attempting to untag from DSA is that we
* don't know, unless we check, if the skb will end up in
@@ -95,10 +130,56 @@ static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
* definitely keep the tag, to make sure it keeps working.
*/
upper_dev = __vlan_find_dev_deep_rcu(br, htons(proto), vid);
- if (upper_dev)
+ if (!upper_dev)
+ __vlan_hwaccel_clear_tag(skb);
+}
+
+/**
+ * dsa_software_vlan_untag: Software VLAN untagging in DSA receive path
+ * @skb: Pointer to socket buffer (packet)
+ *
+ * Receive path method for switches which send some packets as VLAN-tagged
+ * towards the CPU port (generally from VLAN-aware bridge ports) even when the
+ * packet was not tagged on the wire. Called when ds->untag_bridge_pvid
+ * (legacy) or ds->untag_vlan_aware_bridge_pvid is set to true.
+ *
+ * As a side effect of this method, any VLAN tag from the skb head is moved
+ * to hwaccel.
+ */
+static inline struct sk_buff *dsa_software_vlan_untag(struct sk_buff *skb)
+{
+ struct dsa_port *dp = dsa_user_to_port(skb->dev);
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
+ u16 vid, proto;
+ int err;
+
+ /* software untagging for standalone ports not yet necessary */
+ if (!br)
+ return skb;
+
+ err = br_vlan_get_proto(br, &proto);
+ if (err)
return skb;
- __vlan_hwaccel_clear_tag(skb);
+ /* Move VLAN tag from data to hwaccel */
+ if (!skb_vlan_tag_present(skb) && skb->protocol == htons(proto)) {
+ skb = skb_vlan_untag(skb);
+ if (!skb)
+ return NULL;
+ }
+
+ if (!skb_vlan_tag_present(skb))
+ return skb;
+
+ vid = skb_vlan_tag_get_id(skb);
+
+ if (br_vlan_enabled(br)) {
+ if (dp->ds->untag_vlan_aware_bridge_pvid)
+ dsa_software_untag_vlan_aware_bridge(skb, br, vid);
+ } else {
+ if (dp->ds->untag_bridge_pvid)
+ dsa_software_untag_vlan_unaware_bridge(skb, br, vid);
+ }
return skb;
}
diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c
index 71b26ae6db39..3ee53e28ec2e 100644
--- a/net/dsa/tag_8021q.c
+++ b/net/dsa/tag_8021q.c
@@ -286,7 +286,8 @@ int dsa_switch_tag_8021q_vlan_del(struct dsa_switch *ds,
* be used for VLAN-unaware bridging.
*/
int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge)
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct dsa_port *dp = dsa_to_port(ds, port);
u16 standalone_vid, bridge_vid;
@@ -304,6 +305,8 @@ int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, int port,
dsa_port_tag_8021q_vlan_del(dp, standalone_vid, false);
+ *tx_fwd_offload = true;
+
return 0;
}
EXPORT_SYMBOL_GPL(dsa_tag_8021q_bridge_join);
@@ -468,8 +471,8 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
}
EXPORT_SYMBOL_GPL(dsa_8021q_xmit);
-struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *conduit,
- int vbid)
+static struct net_device *
+dsa_tag_8021q_find_port_by_vbid(struct net_device *conduit, int vbid)
{
struct dsa_port *cpu_dp = conduit->dsa_ptr;
struct dsa_switch_tree *dst = cpu_dp->dst;
@@ -495,30 +498,91 @@ struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *conduit,
return NULL;
}
-EXPORT_SYMBOL_GPL(dsa_tag_8021q_find_port_by_vbid);
+struct net_device *dsa_tag_8021q_find_user(struct net_device *conduit,
+ int source_port, int switch_id,
+ int vid, int vbid)
+{
+ /* Always prefer precise source port information, if available */
+ if (source_port != -1 && switch_id != -1)
+ return dsa_conduit_find_user(conduit, switch_id, source_port);
+ else if (vbid >= 1)
+ return dsa_tag_8021q_find_port_by_vbid(conduit, vbid);
+
+ return dsa_find_designated_bridge_port_by_vid(conduit, vid);
+}
+EXPORT_SYMBOL_GPL(dsa_tag_8021q_find_user);
+
+/**
+ * dsa_8021q_rcv - Decode source information from tag_8021q header
+ * @skb: RX socket buffer
+ * @source_port: pointer to storage for precise source port information.
+ * If this is known already from outside tag_8021q, the pre-initialized
+ * value is preserved. If not known, pass -1.
+ * @switch_id: similar to source_port.
+ * @vbid: pointer to storage for imprecise bridge ID. Must be pre-initialized
+ * with -1. If a positive value is returned, the source_port and switch_id
+ * are invalid.
+ * @vid: pointer to storage for original VID, in case tag_8021q decoding failed.
+ *
+ * If the packet has a tag_8021q header, decode it and set @source_port,
+ * @switch_id and @vbid, and strip the header. Otherwise set @vid and keep the
+ * header in the hwaccel area of the packet.
+ */
void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id,
- int *vbid)
+ int *vbid, int *vid)
{
- u16 vid, tci;
+ int tmp_source_port, tmp_switch_id, tmp_vbid;
+ __be16 vlan_proto;
+ u16 tmp_vid, tci;
if (skb_vlan_tag_present(skb)) {
+ vlan_proto = skb->vlan_proto;
tci = skb_vlan_tag_get(skb);
__vlan_hwaccel_clear_tag(skb);
} else {
+ struct vlan_ethhdr *hdr = vlan_eth_hdr(skb);
+
+ vlan_proto = hdr->h_vlan_proto;
skb_push_rcsum(skb, ETH_HLEN);
__skb_vlan_pop(skb, &tci);
skb_pull_rcsum(skb, ETH_HLEN);
}
- vid = tci & VLAN_VID_MASK;
+ tmp_vid = tci & VLAN_VID_MASK;
+ if (!vid_is_dsa_8021q(tmp_vid)) {
+ /* Not a tag_8021q frame, so return the VID to the
+ * caller for further processing, and put the tag back
+ */
+ if (vid)
+ *vid = tmp_vid;
+
+ __vlan_hwaccel_put_tag(skb, vlan_proto, tci);
+
+ return;
+ }
- *source_port = dsa_8021q_rx_source_port(vid);
- *switch_id = dsa_8021q_rx_switch_id(vid);
+ tmp_source_port = dsa_8021q_rx_source_port(tmp_vid);
+ tmp_switch_id = dsa_8021q_rx_switch_id(tmp_vid);
+ tmp_vbid = dsa_tag_8021q_rx_vbid(tmp_vid);
+
+ /* Precise source port information is unknown when receiving from a
+ * VLAN-unaware bridging domain, and tmp_source_port and tmp_switch_id
+ * are zeroes in this case.
+ *
+ * Preserve the source information from hardware-specific mechanisms,
+ * if available. This allows us to not overwrite a valid source port
+ * and switch ID with less precise values.
+ */
+ if (tmp_vbid == 0 && *source_port == -1)
+ *source_port = tmp_source_port;
+ if (tmp_vbid == 0 && *switch_id == -1)
+ *switch_id = tmp_switch_id;
if (vbid)
- *vbid = dsa_tag_8021q_rx_vbid(vid);
+ *vbid = tmp_vbid;
skb->priority = (tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ return;
}
EXPORT_SYMBOL_GPL(dsa_8021q_rcv);
diff --git a/net/dsa/tag_8021q.h b/net/dsa/tag_8021q.h
index 41f7167ac520..27b8906f99ec 100644
--- a/net/dsa/tag_8021q.h
+++ b/net/dsa/tag_8021q.h
@@ -14,10 +14,11 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
u16 tpid, u16 tci);
void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id,
- int *vbid);
+ int *vbid, int *vid);
-struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *conduit,
- int vbid);
+struct net_device *dsa_tag_8021q_find_user(struct net_device *conduit,
+ int source_port, int switch_id,
+ int vid, int vbid);
int dsa_switch_tag_8021q_vlan_add(struct dsa_switch *ds,
struct dsa_notifier_tag_8021q_vlan_info *info);
diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c
index ee7b272ab715..c33d4bf17929 100644
--- a/net/dsa/tag_ksz.c
+++ b/net/dsa/tag_ksz.c
@@ -66,7 +66,7 @@ static int ksz_connect(struct dsa_switch *ds)
if (!priv)
return -ENOMEM;
- xmit_worker = kthread_create_worker(0, "dsa%d:%d_xmit",
+ xmit_worker = kthread_run_worker(0, "dsa%d:%d_xmit",
ds->dst->index, ds->index);
if (IS_ERR(xmit_worker)) {
ret = PTR_ERR(xmit_worker);
@@ -111,9 +111,10 @@ static struct sk_buff *ksz_common_rcv(struct sk_buff *skb,
* DA(6bytes)|SA(6bytes)|....|Data(nbytes)|tag0(1byte)|FCS(4bytes)
* ---------------------------------------------------------------------------
* tag0 : zero-based value represents port
- * (eg, 0x00=port1, 0x02=port3, 0x06=port7)
+ * (eg, 0x0=port1, 0x2=port3, 0x3=port4)
*/
+#define KSZ8795_TAIL_TAG_EG_PORT_M GENMASK(1, 0)
#define KSZ8795_TAIL_TAG_OVERRIDE BIT(6)
#define KSZ8795_TAIL_TAG_LOOKUP BIT(7)
@@ -141,7 +142,8 @@ static struct sk_buff *ksz8795_rcv(struct sk_buff *skb, struct net_device *dev)
{
u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
- return ksz_common_rcv(skb, dev, tag[0] & 7, KSZ_EGRESS_TAG_LEN);
+ return ksz_common_rcv(skb, dev, tag[0] & KSZ8795_TAIL_TAG_EG_PORT_M,
+ KSZ_EGRESS_TAG_LEN);
}
static const struct dsa_device_ops ksz8795_netdev_ops = {
@@ -176,8 +178,9 @@ MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_KSZ8795, KSZ8795_NAME);
#define KSZ9477_INGRESS_TAG_LEN 2
#define KSZ9477_PTP_TAG_LEN 4
-#define KSZ9477_PTP_TAG_INDICATION 0x80
+#define KSZ9477_PTP_TAG_INDICATION BIT(7)
+#define KSZ9477_TAIL_TAG_EG_PORT_M GENMASK(2, 0)
#define KSZ9477_TAIL_TAG_PRIO GENMASK(8, 7)
#define KSZ9477_TAIL_TAG_OVERRIDE BIT(9)
#define KSZ9477_TAIL_TAG_LOOKUP BIT(10)
@@ -310,7 +313,7 @@ static struct sk_buff *ksz9477_rcv(struct sk_buff *skb, struct net_device *dev)
{
/* Tag decoding */
u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
- unsigned int port = tag[0] & 7;
+ unsigned int port = tag[0] & KSZ9477_TAIL_TAG_EG_PORT_M;
unsigned int len = KSZ_EGRESS_TAG_LEN;
/* Extra 4-bytes PTP timestamp */
diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c
index e0e4300bfbd3..bf6608fc6be7 100644
--- a/net/dsa/tag_ocelot.c
+++ b/net/dsa/tag_ocelot.c
@@ -8,40 +8,6 @@
#define OCELOT_NAME "ocelot"
#define SEVILLE_NAME "seville"
-/* If the port is under a VLAN-aware bridge, remove the VLAN header from the
- * payload and move it into the DSA tag, which will make the switch classify
- * the packet to the bridge VLAN. Otherwise, leave the classified VLAN at zero,
- * which is the pvid of standalone and VLAN-unaware bridge ports.
- */
-static void ocelot_xmit_get_vlan_info(struct sk_buff *skb, struct dsa_port *dp,
- u64 *vlan_tci, u64 *tag_type)
-{
- struct net_device *br = dsa_port_bridge_dev_get(dp);
- struct vlan_ethhdr *hdr;
- u16 proto, tci;
-
- if (!br || !br_vlan_enabled(br)) {
- *vlan_tci = 0;
- *tag_type = IFH_TAG_TYPE_C;
- return;
- }
-
- hdr = skb_vlan_eth_hdr(skb);
- br_vlan_get_proto(br, &proto);
-
- if (ntohs(hdr->h_vlan_proto) == proto) {
- vlan_remove_tag(skb, &tci);
- *vlan_tci = tci;
- } else {
- rcu_read_lock();
- br_vlan_get_pvid_rcu(br, &tci);
- rcu_read_unlock();
- *vlan_tci = tci;
- }
-
- *tag_type = (proto != ETH_P_8021Q) ? IFH_TAG_TYPE_S : IFH_TAG_TYPE_C;
-}
-
static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
__be32 ifh_prefix, void **ifh)
{
@@ -53,7 +19,8 @@ static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
u32 rew_op = 0;
u64 qos_class;
- ocelot_xmit_get_vlan_info(skb, dp, &vlan_tci, &tag_type);
+ ocelot_xmit_get_vlan_info(skb, dsa_port_bridge_dev_get(dp), &vlan_tci,
+ &tag_type);
qos_class = netdev_get_num_tc(netdev) ?
netdev_get_prio_tc_map(netdev, skb->priority) : skb->priority;
diff --git a/net/dsa/tag_ocelot_8021q.c b/net/dsa/tag_ocelot_8021q.c
index b059381310fe..3929584791e4 100644
--- a/net/dsa/tag_ocelot_8021q.c
+++ b/net/dsa/tag_ocelot_8021q.c
@@ -79,9 +79,9 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
static struct sk_buff *ocelot_rcv(struct sk_buff *skb,
struct net_device *netdev)
{
- int src_port, switch_id;
+ int src_port = -1, switch_id = -1;
- dsa_8021q_rcv(skb, &src_port, &switch_id, NULL);
+ dsa_8021q_rcv(skb, &src_port, &switch_id, NULL, NULL);
skb->dev = dsa_conduit_find_user(netdev, switch_id, src_port);
if (!skb->dev)
@@ -110,7 +110,7 @@ static int ocelot_connect(struct dsa_switch *ds)
if (!priv)
return -ENOMEM;
- priv->xmit_worker = kthread_create_worker(0, "felix_xmit");
+ priv->xmit_worker = kthread_run_worker(0, "felix_xmit");
if (IS_ERR(priv->xmit_worker)) {
err = PTR_ERR(priv->xmit_worker);
kfree(priv);
diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c
index 2717e9d7b612..02adec693811 100644
--- a/net/dsa/tag_sja1105.c
+++ b/net/dsa/tag_sja1105.c
@@ -75,7 +75,7 @@ sja1105_tagger_private(struct dsa_switch *ds)
}
/* Similar to is_link_local_ether_addr(hdr->h_dest) but also covers PTP */
-static inline bool sja1105_is_link_local(const struct sk_buff *skb)
+static bool sja1105_is_link_local(const struct sk_buff *skb)
{
const struct ethhdr *hdr = eth_hdr(skb);
u64 dmac = ether_addr_to_u64(hdr->h_dest);
@@ -121,7 +121,7 @@ static void sja1105_meta_unpack(const struct sk_buff *skb,
packing(buf + 7, &meta->switch_id, 7, 0, 1, UNPACK, 0);
}
-static inline bool sja1105_is_meta_frame(const struct sk_buff *skb)
+static bool sja1105_is_meta_frame(const struct sk_buff *skb)
{
const struct ethhdr *hdr = eth_hdr(skb);
u64 smac = ether_addr_to_u64(hdr->h_source);
@@ -472,37 +472,14 @@ static bool sja1110_skb_has_inband_control_extension(const struct sk_buff *skb)
return ntohs(eth_hdr(skb)->h_proto) == ETH_P_SJA1110;
}
-/* If the VLAN in the packet is a tag_8021q one, set @source_port and
- * @switch_id and strip the header. Otherwise set @vid and keep it in the
- * packet.
- */
-static void sja1105_vlan_rcv(struct sk_buff *skb, int *source_port,
- int *switch_id, int *vbid, u16 *vid)
-{
- struct vlan_ethhdr *hdr = vlan_eth_hdr(skb);
- u16 vlan_tci;
-
- if (skb_vlan_tag_present(skb))
- vlan_tci = skb_vlan_tag_get(skb);
- else
- vlan_tci = ntohs(hdr->h_vlan_TCI);
-
- if (vid_is_dsa_8021q(vlan_tci & VLAN_VID_MASK))
- return dsa_8021q_rcv(skb, source_port, switch_id, vbid);
-
- /* Try our best with imprecise RX */
- *vid = vlan_tci & VLAN_VID_MASK;
-}
-
static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
struct net_device *netdev)
{
- int source_port = -1, switch_id = -1, vbid = -1;
+ int source_port = -1, switch_id = -1, vbid = -1, vid = -1;
struct sja1105_meta meta = {0};
struct ethhdr *hdr;
bool is_link_local;
bool is_meta;
- u16 vid;
hdr = eth_hdr(skb);
is_link_local = sja1105_is_link_local(skb);
@@ -524,37 +501,16 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
/* Normal data plane traffic and link-local frames are tagged with
* a tag_8021q VLAN which we have to strip
*/
- if (sja1105_skb_has_tag_8021q(skb)) {
- int tmp_source_port = -1, tmp_switch_id = -1;
-
- sja1105_vlan_rcv(skb, &tmp_source_port, &tmp_switch_id, &vbid,
- &vid);
- /* Preserve the source information from the INCL_SRCPT option,
- * if available. This allows us to not overwrite a valid source
- * port and switch ID with zeroes when receiving link-local
- * frames from a VLAN-unaware bridged port (non-zero vbid) or a
- * VLAN-aware bridged port (non-zero vid). Furthermore, the
- * tag_8021q source port information is only of trust when the
- * vbid is 0 (precise port). Otherwise, tmp_source_port and
- * tmp_switch_id will be zeroes.
- */
- if (vbid == 0 && source_port == -1)
- source_port = tmp_source_port;
- if (vbid == 0 && switch_id == -1)
- switch_id = tmp_switch_id;
- } else if (source_port == -1 && switch_id == -1) {
+ if (sja1105_skb_has_tag_8021q(skb))
+ dsa_8021q_rcv(skb, &source_port, &switch_id, &vbid, &vid);
+ else if (source_port == -1 && switch_id == -1)
/* Packets with no source information have no chance of
* getting accepted, drop them straight away.
*/
return NULL;
- }
- if (source_port != -1 && switch_id != -1)
- skb->dev = dsa_conduit_find_user(netdev, switch_id, source_port);
- else if (vbid >= 1)
- skb->dev = dsa_tag_8021q_find_port_by_vbid(netdev, vbid);
- else
- skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid);
+ skb->dev = dsa_tag_8021q_find_user(netdev, source_port, switch_id,
+ vid, vbid);
if (!skb->dev) {
netdev_warn(netdev, "Couldn't decode source port\n");
return NULL;
@@ -677,9 +633,8 @@ static struct sk_buff *sja1110_rcv_inband_control_extension(struct sk_buff *skb,
static struct sk_buff *sja1110_rcv(struct sk_buff *skb,
struct net_device *netdev)
{
- int source_port = -1, switch_id = -1, vbid = -1;
+ int source_port = -1, switch_id = -1, vbid = -1, vid = -1;
bool host_only = false;
- u16 vid = 0;
if (sja1110_skb_has_inband_control_extension(skb)) {
skb = sja1110_rcv_inband_control_extension(skb, &source_port,
@@ -691,14 +646,11 @@ static struct sk_buff *sja1110_rcv(struct sk_buff *skb,
/* Packets with in-band control extensions might still have RX VLANs */
if (likely(sja1105_skb_has_tag_8021q(skb)))
- sja1105_vlan_rcv(skb, &source_port, &switch_id, &vbid, &vid);
-
- if (vbid >= 1)
- skb->dev = dsa_tag_8021q_find_port_by_vbid(netdev, vbid);
- else if (source_port == -1 || switch_id == -1)
- skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid);
- else
- skb->dev = dsa_conduit_find_user(netdev, switch_id, source_port);
+ dsa_8021q_rcv(skb, &source_port, &switch_id, &vbid, &vid);
+
+ skb->dev = dsa_tag_8021q_find_user(netdev, source_port, switch_id,
+ vid, vbid);
+
if (!skb->dev) {
netdev_warn(netdev, "Couldn't decode source port\n");
return NULL;
@@ -755,7 +707,7 @@ static int sja1105_connect(struct dsa_switch *ds)
spin_lock_init(&priv->meta_lock);
- xmit_worker = kthread_create_worker(0, "dsa%d:%d_xmit",
+ xmit_worker = kthread_run_worker(0, "dsa%d:%d_xmit",
ds->dst->index, ds->index);
if (IS_ERR(xmit_worker)) {
err = PTR_ERR(xmit_worker);
diff --git a/net/dsa/tag_vsc73xx_8021q.c b/net/dsa/tag_vsc73xx_8021q.c
new file mode 100644
index 000000000000..af121a9aff7f
--- /dev/null
+++ b/net/dsa/tag_vsc73xx_8021q.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (C) 2024 Pawel Dembicki <paweldembicki@gmail.com>
+ */
+#include <linux/dsa/8021q.h>
+
+#include "tag.h"
+#include "tag_8021q.h"
+
+#define VSC73XX_8021Q_NAME "vsc73xx-8021q"
+
+static struct sk_buff *
+vsc73xx_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct dsa_port *dp = dsa_user_to_port(netdev);
+ u16 queue_mapping = skb_get_queue_mapping(skb);
+ u16 tx_vid = dsa_tag_8021q_standalone_vid(dp);
+ u8 pcp;
+
+ if (skb->offload_fwd_mark) {
+ unsigned int bridge_num = dsa_port_bridge_num_get(dp);
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
+
+ if (br_vlan_enabled(br))
+ return skb;
+
+ tx_vid = dsa_tag_8021q_bridge_vid(bridge_num);
+ }
+
+ pcp = netdev_txq_to_tc(netdev, queue_mapping);
+
+ return dsa_8021q_xmit(skb, netdev, ETH_P_8021Q,
+ ((pcp << VLAN_PRIO_SHIFT) | tx_vid));
+}
+
+static struct sk_buff *
+vsc73xx_rcv(struct sk_buff *skb, struct net_device *netdev)
+{
+ int src_port = -1, switch_id = -1, vbid = -1, vid = -1;
+
+ dsa_8021q_rcv(skb, &src_port, &switch_id, &vbid, &vid);
+
+ skb->dev = dsa_tag_8021q_find_user(netdev, src_port, switch_id,
+ vid, vbid);
+ if (!skb->dev) {
+ dev_warn_ratelimited(&netdev->dev,
+ "Couldn't decode source port\n");
+ return NULL;
+ }
+
+ dsa_default_offload_fwd_mark(skb);
+
+ return skb;
+}
+
+static const struct dsa_device_ops vsc73xx_8021q_netdev_ops = {
+ .name = VSC73XX_8021Q_NAME,
+ .proto = DSA_TAG_PROTO_VSC73XX_8021Q,
+ .xmit = vsc73xx_xmit,
+ .rcv = vsc73xx_rcv,
+ .needed_headroom = VLAN_HLEN,
+ .promisc_on_conduit = true,
+};
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DSA tag driver for VSC73XX family of switches, using VLAN");
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_VSC73XX_8021Q, VSC73XX_8021Q_NAME);
+
+module_dsa_tag_driver(vsc73xx_8021q_netdev_ops);
diff --git a/net/dsa/trace.h b/net/dsa/trace.h
index 567f29a39707..83f3e5f78491 100644
--- a/net/dsa/trace.h
+++ b/net/dsa/trace.h
@@ -39,8 +39,8 @@ DECLARE_EVENT_CLASS(dsa_port_addr_op_hw,
),
TP_fast_assign(
- __assign_str(dev, dev_name(dp->ds->dev));
- __assign_str(kind, dsa_port_kind(dp));
+ __assign_str(dev);
+ __assign_str(kind);
__entry->port = dp->index;
ether_addr_copy(__entry->addr, addr);
__entry->vid = vid;
@@ -98,8 +98,8 @@ DECLARE_EVENT_CLASS(dsa_port_addr_op_refcount,
),
TP_fast_assign(
- __assign_str(dev, dev_name(dp->ds->dev));
- __assign_str(kind, dsa_port_kind(dp));
+ __assign_str(dev);
+ __assign_str(kind);
__entry->port = dp->index;
ether_addr_copy(__entry->addr, addr);
__entry->vid = vid;
@@ -157,8 +157,8 @@ DECLARE_EVENT_CLASS(dsa_port_addr_del_not_found,
),
TP_fast_assign(
- __assign_str(dev, dev_name(dp->ds->dev));
- __assign_str(kind, dsa_port_kind(dp));
+ __assign_str(dev);
+ __assign_str(kind);
__entry->port = dp->index;
ether_addr_copy(__entry->addr, addr);
__entry->vid = vid;
@@ -199,7 +199,7 @@ TRACE_EVENT(dsa_lag_fdb_add_hw,
),
TP_fast_assign(
- __assign_str(dev, lag_dev->name);
+ __assign_str(dev);
ether_addr_copy(__entry->addr, addr);
__entry->vid = vid;
dsa_db_print(db, __entry->db_buf);
@@ -227,7 +227,7 @@ TRACE_EVENT(dsa_lag_fdb_add_bump,
),
TP_fast_assign(
- __assign_str(dev, lag_dev->name);
+ __assign_str(dev);
ether_addr_copy(__entry->addr, addr);
__entry->vid = vid;
dsa_db_print(db, __entry->db_buf);
@@ -255,7 +255,7 @@ TRACE_EVENT(dsa_lag_fdb_del_hw,
),
TP_fast_assign(
- __assign_str(dev, lag_dev->name);
+ __assign_str(dev);
ether_addr_copy(__entry->addr, addr);
__entry->vid = vid;
dsa_db_print(db, __entry->db_buf);
@@ -283,7 +283,7 @@ TRACE_EVENT(dsa_lag_fdb_del_drop,
),
TP_fast_assign(
- __assign_str(dev, lag_dev->name);
+ __assign_str(dev);
ether_addr_copy(__entry->addr, addr);
__entry->vid = vid;
dsa_db_print(db, __entry->db_buf);
@@ -310,7 +310,7 @@ TRACE_EVENT(dsa_lag_fdb_del_not_found,
),
TP_fast_assign(
- __assign_str(dev, lag_dev->name);
+ __assign_str(dev);
ether_addr_copy(__entry->addr, addr);
__entry->vid = vid;
dsa_db_print(db, __entry->db_buf);
@@ -338,8 +338,8 @@ DECLARE_EVENT_CLASS(dsa_vlan_op_hw,
),
TP_fast_assign(
- __assign_str(dev, dev_name(dp->ds->dev));
- __assign_str(kind, dsa_port_kind(dp));
+ __assign_str(dev);
+ __assign_str(kind);
__entry->port = dp->index;
__entry->vid = vlan->vid;
__entry->flags = vlan->flags;
@@ -383,8 +383,8 @@ DECLARE_EVENT_CLASS(dsa_vlan_op_refcount,
),
TP_fast_assign(
- __assign_str(dev, dev_name(dp->ds->dev));
- __assign_str(kind, dsa_port_kind(dp));
+ __assign_str(dev);
+ __assign_str(kind);
__entry->port = dp->index;
__entry->vid = vlan->vid;
__entry->flags = vlan->flags;
@@ -426,8 +426,8 @@ TRACE_EVENT(dsa_vlan_del_not_found,
),
TP_fast_assign(
- __assign_str(dev, dev_name(dp->ds->dev));
- __assign_str(kind, dsa_port_kind(dp));
+ __assign_str(dev);
+ __assign_str(kind);
__entry->port = dp->index;
__entry->vid = vlan->vid;
),
diff --git a/net/dsa/user.c b/net/dsa/user.c
index b15e71cc342c..291ab1b4acc4 100644
--- a/net/dsa/user.c
+++ b/net/dsa/user.c
@@ -210,7 +210,7 @@ static int dsa_user_sync_uc(struct net_device *dev,
return 0;
return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
- &ctx);
+ &ctx);
}
static int dsa_user_unsync_uc(struct net_device *dev,
@@ -230,7 +230,7 @@ static int dsa_user_unsync_uc(struct net_device *dev,
return 0;
return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
- &ctx);
+ &ctx);
}
static int dsa_user_sync_mc(struct net_device *dev,
@@ -250,7 +250,7 @@ static int dsa_user_sync_mc(struct net_device *dev,
return 0;
return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
- &ctx);
+ &ctx);
}
static int dsa_user_unsync_mc(struct net_device *dev,
@@ -270,7 +270,7 @@ static int dsa_user_unsync_mc(struct net_device *dev,
return 0;
return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
- &ctx);
+ &ctx);
}
void dsa_user_sync_ha(struct net_device *dev)
@@ -352,63 +352,85 @@ void dsa_user_mii_bus_init(struct dsa_switch *ds)
/* user device handling ****************************************************/
static int dsa_user_get_iflink(const struct net_device *dev)
{
- return dsa_user_to_conduit(dev)->ifindex;
+ return READ_ONCE(dsa_user_to_conduit(dev)->ifindex);
}
-static int dsa_user_open(struct net_device *dev)
+int dsa_user_host_uc_install(struct net_device *dev, const u8 *addr)
{
struct net_device *conduit = dsa_user_to_conduit(dev);
struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
int err;
- err = dev_open(conduit, NULL);
- if (err < 0) {
- netdev_err(dev, "failed to open conduit %s\n", conduit->name);
- goto out;
- }
-
if (dsa_switch_supports_uc_filtering(ds)) {
- err = dsa_port_standalone_host_fdb_add(dp, dev->dev_addr, 0);
+ err = dsa_port_standalone_host_fdb_add(dp, addr, 0);
if (err)
goto out;
}
- if (!ether_addr_equal(dev->dev_addr, conduit->dev_addr)) {
- err = dev_uc_add(conduit, dev->dev_addr);
+ if (!ether_addr_equal(addr, conduit->dev_addr)) {
+ err = dev_uc_add(conduit, addr);
if (err < 0)
goto del_host_addr;
}
- err = dsa_port_enable_rt(dp, dev->phydev);
- if (err)
- goto del_unicast;
-
return 0;
-del_unicast:
- if (!ether_addr_equal(dev->dev_addr, conduit->dev_addr))
- dev_uc_del(conduit, dev->dev_addr);
del_host_addr:
if (dsa_switch_supports_uc_filtering(ds))
- dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
+ dsa_port_standalone_host_fdb_del(dp, addr, 0);
out:
return err;
}
-static int dsa_user_close(struct net_device *dev)
+void dsa_user_host_uc_uninstall(struct net_device *dev)
{
struct net_device *conduit = dsa_user_to_conduit(dev);
struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
- dsa_port_disable_rt(dp);
-
if (!ether_addr_equal(dev->dev_addr, conduit->dev_addr))
dev_uc_del(conduit, dev->dev_addr);
if (dsa_switch_supports_uc_filtering(ds))
dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
+}
+
+static int dsa_user_open(struct net_device *dev)
+{
+ struct net_device *conduit = dsa_user_to_conduit(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ int err;
+
+ err = dev_open(conduit, NULL);
+ if (err < 0) {
+ netdev_err(dev, "failed to open conduit %s\n", conduit->name);
+ goto out;
+ }
+
+ err = dsa_user_host_uc_install(dev, dev->dev_addr);
+ if (err)
+ goto out;
+
+ err = dsa_port_enable_rt(dp, dev->phydev);
+ if (err)
+ goto out_del_host_uc;
+
+ return 0;
+
+out_del_host_uc:
+ dsa_user_host_uc_uninstall(dev);
+out:
+ return err;
+}
+
+static int dsa_user_close(struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_user_to_port(dev);
+
+ dsa_port_disable_rt(dp);
+
+ dsa_user_host_uc_uninstall(dev);
return 0;
}
@@ -448,7 +470,6 @@ static void dsa_user_set_rx_mode(struct net_device *dev)
static int dsa_user_set_mac_address(struct net_device *dev, void *a)
{
- struct net_device *conduit = dsa_user_to_conduit(dev);
struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
struct sockaddr *addr = a;
@@ -470,34 +491,16 @@ static int dsa_user_set_mac_address(struct net_device *dev, void *a)
if (!(dev->flags & IFF_UP))
goto out_change_dev_addr;
- if (dsa_switch_supports_uc_filtering(ds)) {
- err = dsa_port_standalone_host_fdb_add(dp, addr->sa_data, 0);
- if (err)
- return err;
- }
-
- if (!ether_addr_equal(addr->sa_data, conduit->dev_addr)) {
- err = dev_uc_add(conduit, addr->sa_data);
- if (err < 0)
- goto del_unicast;
- }
-
- if (!ether_addr_equal(dev->dev_addr, conduit->dev_addr))
- dev_uc_del(conduit, dev->dev_addr);
+ err = dsa_user_host_uc_install(dev, addr->sa_data);
+ if (err)
+ return err;
- if (dsa_switch_supports_uc_filtering(ds))
- dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
+ dsa_user_host_uc_uninstall(dev);
out_change_dev_addr:
eth_hw_addr_set(dev, addr->sa_data);
return 0;
-
-del_unicast:
- if (dsa_switch_supports_uc_filtering(ds))
- dsa_port_standalone_host_fdb_del(dp, addr->sa_data, 0);
-
- return err;
}
struct dsa_user_dump_ctx {
@@ -512,12 +515,13 @@ dsa_user_port_fdb_do_dump(const unsigned char *addr, u16 vid,
bool is_static, void *data)
{
struct dsa_user_dump_ctx *dump = data;
+ struct ndo_fdb_dump_context *ctx = (void *)dump->cb->ctx;
u32 portid = NETLINK_CB(dump->cb->skb).portid;
u32 seq = dump->cb->nlh->nlmsg_seq;
struct nlmsghdr *nlh;
struct ndmsg *ndm;
- if (dump->idx < dump->cb->args[2])
+ if (dump->idx < ctx->fdb_idx)
goto skip;
nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
@@ -875,8 +879,8 @@ static int dsa_user_port_obj_del(struct net_device *dev, const void *ctx,
return err;
}
-static inline netdev_tx_t dsa_user_netpoll_send_skb(struct net_device *dev,
- struct sk_buff *skb)
+static netdev_tx_t dsa_user_netpoll_send_skb(struct net_device *dev,
+ struct sk_buff *skb)
{
#ifdef CONFIG_NET_POLL_CONTROLLER
struct dsa_user_priv *p = netdev_priv(dev);
@@ -1039,15 +1043,12 @@ static void dsa_user_get_strings(struct net_device *dev,
struct dsa_switch *ds = dp->ds;
if (stringset == ETH_SS_STATS) {
- int len = ETH_GSTRING_LEN;
-
- strscpy_pad(data, "tx_packets", len);
- strscpy_pad(data + len, "tx_bytes", len);
- strscpy_pad(data + 2 * len, "rx_packets", len);
- strscpy_pad(data + 3 * len, "rx_bytes", len);
+ ethtool_puts(&data, "tx_packets");
+ ethtool_puts(&data, "tx_bytes");
+ ethtool_puts(&data, "rx_packets");
+ ethtool_puts(&data, "rx_bytes");
if (ds->ops->get_strings)
- ds->ops->get_strings(ds, dp->index, stringset,
- data + 4 * len);
+ ds->ops->get_strings(ds, dp->index, stringset, data);
} else if (stringset == ETH_SS_TEST) {
net_selftest_get_strings(data);
}
@@ -1149,6 +1150,16 @@ dsa_user_get_rmon_stats(struct net_device *dev,
ds->ops->get_rmon_stats(ds, dp->index, rmon_stats, ranges);
}
+static void dsa_user_get_ts_stats(struct net_device *dev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (ds->ops->get_ts_stats)
+ ds->ops->get_ts_stats(ds, dp->index, ts_stats);
+}
+
static void dsa_user_net_selftest(struct net_device *ndev,
struct ethtool_test *etest, u64 *buf)
{
@@ -1222,14 +1233,18 @@ static int dsa_user_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
return ret;
}
-static int dsa_user_set_eee(struct net_device *dev, struct ethtool_eee *e)
+static int dsa_user_set_eee(struct net_device *dev, struct ethtool_keee *e)
{
struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
int ret;
+ /* Check whether the switch supports EEE */
+ if (!ds->ops->support_eee || !ds->ops->support_eee(ds, dp->index))
+ return -EOPNOTSUPP;
+
/* Port's PHY and MAC both need to be EEE capable */
- if (!dev->phydev || !dp->pl)
+ if (!dev->phydev)
return -ENODEV;
if (!ds->ops->set_mac_eee)
@@ -1242,22 +1257,18 @@ static int dsa_user_set_eee(struct net_device *dev, struct ethtool_eee *e)
return phylink_ethtool_set_eee(dp->pl, e);
}
-static int dsa_user_get_eee(struct net_device *dev, struct ethtool_eee *e)
+static int dsa_user_get_eee(struct net_device *dev, struct ethtool_keee *e)
{
struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
- int ret;
-
- /* Port's PHY and MAC both need to be EEE capable */
- if (!dev->phydev || !dp->pl)
- return -ENODEV;
- if (!ds->ops->get_mac_eee)
+ /* Check whether the switch supports EEE */
+ if (!ds->ops->support_eee || !ds->ops->support_eee(ds, dp->index))
return -EOPNOTSUPP;
- ret = ds->ops->get_mac_eee(ds, dp->index, e);
- if (ret)
- return ret;
+ /* Port's PHY and MAC both need to be EEE capable */
+ if (!dev->phydev)
+ return -ENODEV;
return phylink_ethtool_get_eee(dp->pl, e);
}
@@ -1305,8 +1316,7 @@ static int dsa_user_set_pauseparam(struct net_device *dev,
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-static int dsa_user_netpoll_setup(struct net_device *dev,
- struct netpoll_info *ni)
+static int dsa_user_netpoll_setup(struct net_device *dev)
{
struct net_device *conduit = dsa_user_to_conduit(dev);
struct dsa_user_priv *p = netdev_priv(dev);
@@ -1362,7 +1372,7 @@ dsa_user_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
static int
dsa_user_add_cls_matchall_mirred(struct net_device *dev,
struct tc_cls_matchall_offload *cls,
- bool ingress)
+ bool ingress, bool ingress_target)
{
struct netlink_ext_ack *extack = cls->common.extack;
struct dsa_port *dp = dsa_user_to_port(dev);
@@ -1374,11 +1384,19 @@ dsa_user_add_cls_matchall_mirred(struct net_device *dev,
struct dsa_port *to_dp;
int err;
- if (!ds->ops->port_mirror_add)
+ if (cls->common.protocol != htons(ETH_P_ALL)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload \"protocol all\" matchall filter");
return -EOPNOTSUPP;
+ }
+
+ if (!ds->ops->port_mirror_add) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Switch does not support mirroring operation");
+ return -EOPNOTSUPP;
+ }
- if (!flow_action_basic_hw_stats_check(&cls->rule->action,
- cls->common.extack))
+ if (!flow_action_basic_hw_stats_check(&cls->rule->action, extack))
return -EOPNOTSUPP;
act = &cls->rule->action.entries[0];
@@ -1386,8 +1404,36 @@ dsa_user_add_cls_matchall_mirred(struct net_device *dev,
if (!act->dev)
return -EINVAL;
- if (!dsa_user_dev_check(act->dev))
+ if (dsa_user_dev_check(act->dev)) {
+ if (ingress_target) {
+ /* We can only fulfill this using software assist */
+ if (cls->common.skip_sw) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only mirred to ingress of DSA user port if filter also runs in software");
+ return -EOPNOTSUPP;
+ }
+ to_dp = dp->cpu_dp;
+ } else {
+ to_dp = dsa_user_to_port(act->dev);
+ }
+ } else {
+ /* Handle mirroring to foreign target ports as a mirror towards
+ * the CPU. The software tc rule will take the packets from
+ * there.
+ */
+ if (cls->common.skip_sw) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only mirred to CPU if filter also runs in software");
+ return -EOPNOTSUPP;
+ }
+ to_dp = dp->cpu_dp;
+ }
+
+ if (dp->ds != to_dp->ds) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cross-chip mirroring not implemented");
return -EOPNOTSUPP;
+ }
mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
if (!mall_tc_entry)
@@ -1396,9 +1442,6 @@ dsa_user_add_cls_matchall_mirred(struct net_device *dev,
mall_tc_entry->cookie = cls->cookie;
mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
mirror = &mall_tc_entry->mirror;
-
- to_dp = dsa_user_to_port(act->dev);
-
mirror->to_local_port = to_dp->index;
mirror->ingress = ingress;
@@ -1439,8 +1482,7 @@ dsa_user_add_cls_matchall_police(struct net_device *dev,
return -EOPNOTSUPP;
}
- if (!flow_action_basic_hw_stats_check(&cls->rule->action,
- cls->common.extack))
+ if (!flow_action_basic_hw_stats_check(&cls->rule->action, extack))
return -EOPNOTSUPP;
list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) {
@@ -1478,17 +1520,30 @@ static int dsa_user_add_cls_matchall(struct net_device *dev,
struct tc_cls_matchall_offload *cls,
bool ingress)
{
- int err = -EOPNOTSUPP;
+ const struct flow_action *action = &cls->rule->action;
+ struct netlink_ext_ack *extack = cls->common.extack;
- if (cls->common.protocol == htons(ETH_P_ALL) &&
- flow_offload_has_one_action(&cls->rule->action) &&
- cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED)
- err = dsa_user_add_cls_matchall_mirred(dev, cls, ingress);
- else if (flow_offload_has_one_action(&cls->rule->action) &&
- cls->rule->action.entries[0].id == FLOW_ACTION_POLICE)
- err = dsa_user_add_cls_matchall_police(dev, cls, ingress);
+ if (!flow_offload_has_one_action(action)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload matchall filter with more than one action");
+ return -EOPNOTSUPP;
+ }
- return err;
+ switch (action->entries[0].id) {
+ case FLOW_ACTION_MIRRED:
+ return dsa_user_add_cls_matchall_mirred(dev, cls, ingress,
+ false);
+ case FLOW_ACTION_MIRRED_INGRESS:
+ return dsa_user_add_cls_matchall_mirred(dev, cls, ingress,
+ true);
+ case FLOW_ACTION_POLICE:
+ return dsa_user_add_cls_matchall_police(dev, cls, ingress);
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unknown action");
+ break;
+ }
+
+ return -EOPNOTSUPP;
}
static void dsa_user_del_cls_matchall(struct net_device *dev,
@@ -1726,7 +1781,7 @@ static int dsa_user_set_rxnfc(struct net_device *dev,
}
static int dsa_user_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *ts)
+ struct kernel_ethtool_ts_info *ts)
{
struct dsa_user_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->dp->ds;
@@ -2120,7 +2175,7 @@ int dsa_user_change_mtu(struct net_device *dev, int new_mtu)
if (err)
goto out_port_failed;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
dsa_bridge_mtu_normalization(dp);
@@ -2137,6 +2192,32 @@ out_conduit_failed:
}
static int __maybe_unused
+dsa_user_dcbnl_set_apptrust(struct net_device *dev, u8 *sel, int nsel)
+{
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+
+ if (!ds->ops->port_set_apptrust)
+ return -EOPNOTSUPP;
+
+ return ds->ops->port_set_apptrust(ds, port, sel, nsel);
+}
+
+static int __maybe_unused
+dsa_user_dcbnl_get_apptrust(struct net_device *dev, u8 *sel, int *nsel)
+{
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+
+ if (!ds->ops->port_get_apptrust)
+ return -EOPNOTSUPP;
+
+ return ds->ops->port_get_apptrust(ds, port, sel, nsel);
+}
+
+static int __maybe_unused
dsa_user_dcbnl_set_default_prio(struct net_device *dev, struct dcb_app *app)
{
struct dsa_port *dp = dsa_user_to_port(dev);
@@ -2163,6 +2244,58 @@ dsa_user_dcbnl_set_default_prio(struct net_device *dev, struct dcb_app *app)
return 0;
}
+/* Update the DSCP prio entries on all user ports of the switch in case
+ * the switch supports global DSCP prio instead of per port DSCP prios.
+ */
+static int dsa_user_dcbnl_ieee_global_dscp_setdel(struct net_device *dev,
+ struct dcb_app *app, bool del)
+{
+ int (*setdel)(struct net_device *dev, struct dcb_app *app);
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+ struct dsa_port *other_dp;
+ int err, restore_err;
+
+ if (del)
+ setdel = dcb_ieee_delapp;
+ else
+ setdel = dcb_ieee_setapp;
+
+ dsa_switch_for_each_user_port(other_dp, ds) {
+ struct net_device *user = other_dp->user;
+
+ if (!user || user == dev)
+ continue;
+
+ err = setdel(user, app);
+ if (err)
+ goto err_try_to_restore;
+ }
+
+ return 0;
+
+err_try_to_restore:
+
+ /* Revert logic to restore previous state of app entries */
+ if (!del)
+ setdel = dcb_ieee_delapp;
+ else
+ setdel = dcb_ieee_setapp;
+
+ dsa_switch_for_each_user_port_continue_reverse(other_dp, ds) {
+ struct net_device *user = other_dp->user;
+
+ if (!user || user == dev)
+ continue;
+
+ restore_err = setdel(user, app);
+ if (restore_err)
+ netdev_err(user, "Failed to restore DSCP prio entry configuration\n");
+ }
+
+ return err;
+}
+
static int __maybe_unused
dsa_user_dcbnl_add_dscp_prio(struct net_device *dev, struct dcb_app *app)
{
@@ -2194,6 +2327,17 @@ dsa_user_dcbnl_add_dscp_prio(struct net_device *dev, struct dcb_app *app)
return err;
}
+ if (!ds->dscp_prio_mapping_is_global)
+ return 0;
+
+ err = dsa_user_dcbnl_ieee_global_dscp_setdel(dev, app, false);
+ if (err) {
+ if (ds->ops->port_del_dscp_prio)
+ ds->ops->port_del_dscp_prio(ds, port, dscp, new_prio);
+ dcb_ieee_delapp(dev, app);
+ return err;
+ }
+
return 0;
}
@@ -2264,6 +2408,18 @@ dsa_user_dcbnl_del_dscp_prio(struct net_device *dev, struct dcb_app *app)
return err;
}
+ if (!ds->dscp_prio_mapping_is_global)
+ return 0;
+
+ err = dsa_user_dcbnl_ieee_global_dscp_setdel(dev, app, true);
+ if (err) {
+ if (ds->ops->port_add_dscp_prio)
+ ds->ops->port_add_dscp_prio(ds, port, dscp,
+ app->priority);
+ dcb_ieee_setapp(dev, app);
+ return err;
+ }
+
return 0;
}
@@ -2355,6 +2511,7 @@ static const struct ethtool_ops dsa_user_ethtool_ops = {
.get_eth_mac_stats = dsa_user_get_eth_mac_stats,
.get_eth_ctrl_stats = dsa_user_get_eth_ctrl_stats,
.get_rmon_stats = dsa_user_get_rmon_stats,
+ .get_ts_stats = dsa_user_get_ts_stats,
.set_wol = dsa_user_set_wol,
.get_wol = dsa_user_get_wol,
.set_eee = dsa_user_set_eee,
@@ -2376,6 +2533,8 @@ static const struct ethtool_ops dsa_user_ethtool_ops = {
static const struct dcbnl_rtnl_ops __maybe_unused dsa_user_dcbnl_ops = {
.ieee_setapp = dsa_user_dcbnl_ieee_setapp,
.ieee_delapp = dsa_user_dcbnl_ieee_delapp,
+ .dcbnl_setapptrust = dsa_user_dcbnl_set_apptrust,
+ .dcbnl_getapptrust = dsa_user_dcbnl_get_apptrust,
};
static void dsa_user_get_stats64(struct net_device *dev,
@@ -2429,7 +2588,7 @@ static const struct net_device_ops dsa_user_netdev_ops = {
.ndo_fill_forward_path = dsa_user_fill_forward_path,
};
-static struct device_type dsa_type = {
+static const struct device_type dsa_type = {
.name = "dsa",
};
@@ -2445,7 +2604,7 @@ EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
static void dsa_user_phylink_fixed_state(struct phylink_config *config,
struct phylink_link_state *state)
{
- struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
+ struct dsa_port *dp = dsa_phylink_to_port(config);
struct dsa_switch *ds = dp->ds;
/* No need to check that this operation is valid, the callback would
@@ -2536,11 +2695,12 @@ void dsa_user_setup_tagger(struct net_device *user)
user->features = conduit->vlan_features | NETIF_F_HW_TC;
user->hw_features |= NETIF_F_HW_TC;
- user->features |= NETIF_F_LLTX;
if (user->needed_tailroom)
user->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST);
if (ds->needs_standalone_vlan_filtering)
user->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ user->lltx = true;
}
int dsa_user_suspend(struct net_device *user_dev)
@@ -2625,11 +2785,7 @@ int dsa_user_create(struct dsa_port *port)
user_dev->vlan_features = conduit->vlan_features;
p = netdev_priv(user_dev);
- user_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!user_dev->tstats) {
- free_netdev(user_dev);
- return -ENOMEM;
- }
+ user_dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
ret = gro_cells_init(&p->gcells, user_dev);
if (ret)
@@ -2695,7 +2851,6 @@ out_phy:
out_gcells:
gro_cells_destroy(&p->gcells);
out_free:
- free_percpu(user_dev->tstats);
free_netdev(user_dev);
port->user = NULL;
return ret;
@@ -2716,7 +2871,6 @@ void dsa_user_destroy(struct net_device *user_dev)
dsa_port_phylink_destroy(dp);
gro_cells_destroy(&p->gcells);
- free_percpu(user_dev->tstats);
free_netdev(user_dev);
}
@@ -2782,12 +2936,6 @@ int dsa_user_change_conduit(struct net_device *dev, struct net_device *conduit,
ERR_PTR(err));
}
- /* If the port doesn't have its own MAC address and relies on the DSA
- * conduit's one, inherit it again from the new DSA conduit.
- */
- if (is_zero_ether_addr(dp->mac))
- eth_hw_addr_inherit(dev, conduit);
-
return 0;
out_revert_conduit_link:
diff --git a/net/dsa/user.h b/net/dsa/user.h
index 996069130bea..016884bead3c 100644
--- a/net/dsa/user.h
+++ b/net/dsa/user.h
@@ -42,6 +42,8 @@ int dsa_user_suspend(struct net_device *user_dev);
int dsa_user_resume(struct net_device *user_dev);
int dsa_user_register_notifier(void);
void dsa_user_unregister_notifier(void);
+int dsa_user_host_uc_install(struct net_device *dev, const u8 *addr);
+void dsa_user_host_uc_uninstall(struct net_device *dev);
void dsa_user_sync_ha(struct net_device *dev);
void dsa_user_unsync_ha(struct net_device *dev);
void dsa_user_setup_tagger(struct net_device *user);
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 2edc8b796a4e..4e3651101b86 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -161,20 +161,8 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
skb->dev = dev;
skb_reset_mac_header(skb);
- eth = (struct ethhdr *)skb->data;
- skb_pull_inline(skb, ETH_HLEN);
-
- if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
- dev->dev_addr))) {
- if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
- if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
- skb->pkt_type = PACKET_BROADCAST;
- else
- skb->pkt_type = PACKET_MULTICAST;
- } else {
- skb->pkt_type = PACKET_OTHERHOST;
- }
- }
+ eth = eth_skb_pull_mac(skb);
+ eth_skb_pkt_type(skb, dev);
/*
* Some variants of DSA tagging don't have an ethertype field
diff --git a/net/ethtool/Makefile b/net/ethtool/Makefile
index 504f954a1b28..a1490c4afe6b 100644
--- a/net/ethtool/Makefile
+++ b/net/ethtool/Makefile
@@ -8,4 +8,5 @@ ethtool_nl-y := netlink.o bitset.o strset.o linkinfo.o linkmodes.o rss.o \
linkstate.o debug.o wol.o features.o privflags.o rings.o \
channels.o coalesce.o pause.o eee.o tsinfo.o cabletest.o \
tunnels.o fec.o eeprom.o stats.o phc_vclocks.o mm.o \
- module.o pse-pd.o plca.o mm.o
+ module.o cmis_fw_update.o cmis_cdb.o pse-pd.o plca.o mm.o \
+ phy.o tsconfig.o
diff --git a/net/ethtool/bitset.c b/net/ethtool/bitset.c
index 0515d6604b3b..f0883357d12e 100644
--- a/net/ethtool/bitset.c
+++ b/net/ethtool/bitset.c
@@ -425,12 +425,32 @@ static int ethnl_parse_bit(unsigned int *index, bool *val, unsigned int nbits,
return 0;
}
+/**
+ * ethnl_bitmap32_equal() - Compare two bitmaps
+ * @map1: first bitmap
+ * @map2: second bitmap
+ * @nbits: bit size to compare
+ *
+ * Return: true if first @nbits are equal, false if not
+ */
+static bool ethnl_bitmap32_equal(const u32 *map1, const u32 *map2,
+ unsigned int nbits)
+{
+ if (memcmp(map1, map2, nbits / 32 * sizeof(u32)))
+ return false;
+ if (nbits % 32 == 0)
+ return true;
+ return !((map1[nbits / 32] ^ map2[nbits / 32]) &
+ ethnl_lower_bits(nbits % 32));
+}
+
static int
ethnl_update_bitset32_verbose(u32 *bitmap, unsigned int nbits,
const struct nlattr *attr, struct nlattr **tb,
ethnl_string_array_t names,
struct netlink_ext_ack *extack, bool *mod)
{
+ u32 *saved_bitmap = NULL;
struct nlattr *bit_attr;
bool no_mask;
int rem;
@@ -448,8 +468,20 @@ ethnl_update_bitset32_verbose(u32 *bitmap, unsigned int nbits,
}
no_mask = tb[ETHTOOL_A_BITSET_NOMASK];
- if (no_mask)
- ethnl_bitmap32_clear(bitmap, 0, nbits, mod);
+ if (no_mask) {
+ unsigned int nwords = DIV_ROUND_UP(nbits, 32);
+ unsigned int nbytes = nwords * sizeof(u32);
+ bool dummy;
+
+ /* The bitmap size is only the size of the map part without
+ * its mask part.
+ */
+ saved_bitmap = kcalloc(nwords, sizeof(u32), GFP_KERNEL);
+ if (!saved_bitmap)
+ return -ENOMEM;
+ memcpy(saved_bitmap, bitmap, nbytes);
+ ethnl_bitmap32_clear(bitmap, 0, nbits, &dummy);
+ }
nla_for_each_nested(bit_attr, tb[ETHTOOL_A_BITSET_BITS], rem) {
bool old_val, new_val;
@@ -458,22 +490,30 @@ ethnl_update_bitset32_verbose(u32 *bitmap, unsigned int nbits,
if (nla_type(bit_attr) != ETHTOOL_A_BITSET_BITS_BIT) {
NL_SET_ERR_MSG_ATTR(extack, bit_attr,
"only ETHTOOL_A_BITSET_BITS_BIT allowed in ETHTOOL_A_BITSET_BITS");
+ kfree(saved_bitmap);
return -EINVAL;
}
ret = ethnl_parse_bit(&idx, &new_val, nbits, bit_attr, no_mask,
names, extack);
- if (ret < 0)
+ if (ret < 0) {
+ kfree(saved_bitmap);
return ret;
+ }
old_val = bitmap[idx / 32] & ((u32)1 << (idx % 32));
if (new_val != old_val) {
if (new_val)
bitmap[idx / 32] |= ((u32)1 << (idx % 32));
else
bitmap[idx / 32] &= ~((u32)1 << (idx % 32));
- *mod = true;
+ if (!no_mask)
+ *mod = true;
}
}
+ if (no_mask && !ethnl_bitmap32_equal(saved_bitmap, bitmap, nbits))
+ *mod = true;
+
+ kfree(saved_bitmap);
return 0;
}
diff --git a/net/ethtool/cabletest.c b/net/ethtool/cabletest.c
index 06a151165c31..84096f6b0236 100644
--- a/net/ethtool/cabletest.c
+++ b/net/ethtool/cabletest.c
@@ -13,7 +13,7 @@
const struct nla_policy ethnl_cable_test_act_policy[] = {
[ETHTOOL_A_CABLE_TEST_HEADER] =
- NLA_POLICY_NESTED(ethnl_header_policy),
+ NLA_POLICY_NESTED(ethnl_header_policy_phy),
};
static int ethnl_cable_test_started(struct phy_device *phydev, u8 cmd)
@@ -58,6 +58,7 @@ int ethnl_act_cable_test(struct sk_buff *skb, struct genl_info *info)
struct ethnl_req_info req_info = {};
const struct ethtool_phy_ops *ops;
struct nlattr **tb = info->attrs;
+ struct phy_device *phydev;
struct net_device *dev;
int ret;
@@ -69,12 +70,16 @@ int ethnl_act_cable_test(struct sk_buff *skb, struct genl_info *info)
return ret;
dev = req_info.dev;
- if (!dev->phydev) {
+
+ rtnl_lock();
+ phydev = ethnl_req_get_phydev(&req_info, tb,
+ ETHTOOL_A_CABLE_TEST_HEADER,
+ info->extack);
+ if (IS_ERR_OR_NULL(phydev)) {
ret = -EOPNOTSUPP;
- goto out_dev_put;
+ goto out_rtnl;
}
- rtnl_lock();
ops = ethtool_phy_ops;
if (!ops || !ops->start_cable_test) {
ret = -EOPNOTSUPP;
@@ -85,17 +90,15 @@ int ethnl_act_cable_test(struct sk_buff *skb, struct genl_info *info)
if (ret < 0)
goto out_rtnl;
- ret = ops->start_cable_test(dev->phydev, info->extack);
+ ret = ops->start_cable_test(phydev, info->extack);
ethnl_ops_complete(dev);
if (!ret)
- ethnl_cable_test_started(dev->phydev,
- ETHTOOL_MSG_CABLE_TEST_NTF);
+ ethnl_cable_test_started(phydev, ETHTOOL_MSG_CABLE_TEST_NTF);
out_rtnl:
rtnl_unlock();
-out_dev_put:
ethnl_parse_header_dev_put(&req_info);
return ret;
}
@@ -160,7 +163,8 @@ void ethnl_cable_test_finished(struct phy_device *phydev)
}
EXPORT_SYMBOL_GPL(ethnl_cable_test_finished);
-int ethnl_cable_test_result(struct phy_device *phydev, u8 pair, u8 result)
+int ethnl_cable_test_result_with_src(struct phy_device *phydev, u8 pair,
+ u8 result, u32 src)
{
struct nlattr *nest;
int ret = -EMSGSIZE;
@@ -173,6 +177,10 @@ int ethnl_cable_test_result(struct phy_device *phydev, u8 pair, u8 result)
goto err;
if (nla_put_u8(phydev->skb, ETHTOOL_A_CABLE_RESULT_CODE, result))
goto err;
+ if (src != ETHTOOL_A_CABLE_INF_SRC_UNSPEC) {
+ if (nla_put_u32(phydev->skb, ETHTOOL_A_CABLE_RESULT_SRC, src))
+ goto err;
+ }
nla_nest_end(phydev->skb, nest);
return 0;
@@ -181,9 +189,10 @@ err:
nla_nest_cancel(phydev->skb, nest);
return ret;
}
-EXPORT_SYMBOL_GPL(ethnl_cable_test_result);
+EXPORT_SYMBOL_GPL(ethnl_cable_test_result_with_src);
-int ethnl_cable_test_fault_length(struct phy_device *phydev, u8 pair, u32 cm)
+int ethnl_cable_test_fault_length_with_src(struct phy_device *phydev, u8 pair,
+ u32 cm, u32 src)
{
struct nlattr *nest;
int ret = -EMSGSIZE;
@@ -197,6 +206,11 @@ int ethnl_cable_test_fault_length(struct phy_device *phydev, u8 pair, u32 cm)
goto err;
if (nla_put_u32(phydev->skb, ETHTOOL_A_CABLE_FAULT_LENGTH_CM, cm))
goto err;
+ if (src != ETHTOOL_A_CABLE_INF_SRC_UNSPEC) {
+ if (nla_put_u32(phydev->skb, ETHTOOL_A_CABLE_FAULT_LENGTH_SRC,
+ src))
+ goto err;
+ }
nla_nest_end(phydev->skb, nest);
return 0;
@@ -205,11 +219,7 @@ err:
nla_nest_cancel(phydev->skb, nest);
return ret;
}
-EXPORT_SYMBOL_GPL(ethnl_cable_test_fault_length);
-
-struct cable_test_tdr_req_info {
- struct ethnl_req_info base;
-};
+EXPORT_SYMBOL_GPL(ethnl_cable_test_fault_length_with_src);
static const struct nla_policy cable_test_tdr_act_cfg_policy[] = {
[ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST] = { .type = NLA_U32 },
@@ -220,7 +230,7 @@ static const struct nla_policy cable_test_tdr_act_cfg_policy[] = {
const struct nla_policy ethnl_cable_test_tdr_act_policy[] = {
[ETHTOOL_A_CABLE_TEST_TDR_HEADER] =
- NLA_POLICY_NESTED(ethnl_header_policy),
+ NLA_POLICY_NESTED(ethnl_header_policy_phy),
[ETHTOOL_A_CABLE_TEST_TDR_CFG] = { .type = NLA_NESTED },
};
@@ -309,6 +319,7 @@ int ethnl_act_cable_test_tdr(struct sk_buff *skb, struct genl_info *info)
struct ethnl_req_info req_info = {};
const struct ethtool_phy_ops *ops;
struct nlattr **tb = info->attrs;
+ struct phy_device *phydev;
struct phy_tdr_config cfg;
struct net_device *dev;
int ret;
@@ -321,10 +332,6 @@ int ethnl_act_cable_test_tdr(struct sk_buff *skb, struct genl_info *info)
return ret;
dev = req_info.dev;
- if (!dev->phydev) {
- ret = -EOPNOTSUPP;
- goto out_dev_put;
- }
ret = ethnl_act_cable_test_tdr_cfg(tb[ETHTOOL_A_CABLE_TEST_TDR_CFG],
info, &cfg);
@@ -332,6 +339,14 @@ int ethnl_act_cable_test_tdr(struct sk_buff *skb, struct genl_info *info)
goto out_dev_put;
rtnl_lock();
+ phydev = ethnl_req_get_phydev(&req_info, tb,
+ ETHTOOL_A_CABLE_TEST_TDR_HEADER,
+ info->extack);
+ if (IS_ERR_OR_NULL(phydev)) {
+ ret = -EOPNOTSUPP;
+ goto out_rtnl;
+ }
+
ops = ethtool_phy_ops;
if (!ops || !ops->start_cable_test_tdr) {
ret = -EOPNOTSUPP;
@@ -342,12 +357,12 @@ int ethnl_act_cable_test_tdr(struct sk_buff *skb, struct genl_info *info)
if (ret < 0)
goto out_rtnl;
- ret = ops->start_cable_test_tdr(dev->phydev, info->extack, &cfg);
+ ret = ops->start_cable_test_tdr(phydev, info->extack, &cfg);
ethnl_ops_complete(dev);
if (!ret)
- ethnl_cable_test_started(dev->phydev,
+ ethnl_cable_test_started(phydev,
ETHTOOL_MSG_CABLE_TEST_TDR_NTF);
out_rtnl:
diff --git a/net/ethtool/channels.c b/net/ethtool/channels.c
index 7b4bbd674bae..ca4f80282448 100644
--- a/net/ethtool/channels.c
+++ b/net/ethtool/channels.c
@@ -114,8 +114,7 @@ ethnl_set_channels(struct ethnl_req_info *req_info, struct genl_info *info)
struct net_device *dev = req_info->dev;
struct ethtool_channels channels = {};
struct nlattr **tb = info->attrs;
- u32 err_attr, max_rxfh_in_use;
- u64 max_rxnfc_in_use;
+ u32 err_attr;
int ret;
dev->ethtool_ops->get_channels(dev, &channels);
@@ -166,22 +165,9 @@ ethnl_set_channels(struct ethnl_req_info *req_info, struct genl_info *info)
return -EINVAL;
}
- /* ensure the new Rx count fits within the configured Rx flow
- * indirection table/rxnfc settings
- */
- if (ethtool_get_max_rxnfc_channel(dev, &max_rxnfc_in_use))
- max_rxnfc_in_use = 0;
- if (!netif_is_rxfh_configured(dev) ||
- ethtool_get_max_rxfh_channel(dev, &max_rxfh_in_use))
- max_rxfh_in_use = 0;
- if (channels.combined_count + channels.rx_count <= max_rxfh_in_use) {
- GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing indirection table settings");
- return -EINVAL;
- }
- if (channels.combined_count + channels.rx_count <= max_rxnfc_in_use) {
- GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing ntuple filter settings");
- return -EINVAL;
- }
+ ret = ethtool_check_max_channel(dev, channels, info);
+ if (ret)
+ return ret;
/* Disabling channels, query zero-copy AF_XDP sockets */
from_channel = channels.combined_count +
diff --git a/net/ethtool/cmis.h b/net/ethtool/cmis.h
new file mode 100644
index 000000000000..1e790413db0e
--- /dev/null
+++ b/net/ethtool/cmis.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#define ETHTOOL_CMIS_CDB_LPL_MAX_PL_LENGTH 120
+#define ETHTOOL_CMIS_CDB_EPL_MAX_PL_LENGTH 2048
+#define ETHTOOL_CMIS_CDB_CMD_PAGE 0x9F
+#define ETHTOOL_CMIS_CDB_PAGE_I2C_ADDR 0x50
+
+/**
+ * struct ethtool_cmis_cdb - CDB commands parameters
+ * @cmis_rev: CMIS revision major.
+ * @read_write_len_ext: Allowable additional number of byte octets to the LPL
+ * in a READ or a WRITE CDB commands.
+ * @max_completion_time: Maximum CDB command completion time in msec.
+ */
+struct ethtool_cmis_cdb {
+ u8 cmis_rev;
+ u8 read_write_len_ext;
+ u16 max_completion_time;
+};
+
+enum ethtool_cmis_cdb_cmd_id {
+ ETHTOOL_CMIS_CDB_CMD_QUERY_STATUS = 0x0000,
+ ETHTOOL_CMIS_CDB_CMD_MODULE_FEATURES = 0x0040,
+ ETHTOOL_CMIS_CDB_CMD_FW_MANAGMENT_FEATURES = 0x0041,
+ ETHTOOL_CMIS_CDB_CMD_START_FW_DOWNLOAD = 0x0101,
+ ETHTOOL_CMIS_CDB_CMD_WRITE_FW_BLOCK_LPL = 0x0103,
+ ETHTOOL_CMIS_CDB_CMD_WRITE_FW_BLOCK_EPL = 0x0104,
+ ETHTOOL_CMIS_CDB_CMD_COMPLETE_FW_DOWNLOAD = 0x0107,
+ ETHTOOL_CMIS_CDB_CMD_RUN_FW_IMAGE = 0x0109,
+ ETHTOOL_CMIS_CDB_CMD_COMMIT_FW_IMAGE = 0x010A,
+};
+
+/**
+ * struct ethtool_cmis_cdb_request - CDB commands request fields as decribed in
+ * the CMIS standard
+ * @id: Command ID.
+ * @epl_len: EPL memory length.
+ * @lpl_len: LPL memory length.
+ * @chk_code: Check code for the previous field and the payload.
+ * @resv1: Added to match the CMIS standard request continuity.
+ * @resv2: Added to match the CMIS standard request continuity.
+ * @payload: Payload for the CDB commands.
+ * @epl: Extended payload for the CDB commands.
+ */
+struct ethtool_cmis_cdb_request {
+ __be16 id;
+ struct_group(body,
+ __be16 epl_len;
+ u8 lpl_len;
+ u8 chk_code;
+ u8 resv1;
+ u8 resv2;
+ u8 payload[ETHTOOL_CMIS_CDB_LPL_MAX_PL_LENGTH];
+ );
+ u8 *epl; /* Everything above this field checksummed. */
+};
+
+#define CDB_F_COMPLETION_VALID BIT(0)
+#define CDB_F_STATUS_VALID BIT(1)
+#define CDB_F_MODULE_STATE_VALID BIT(2)
+
+/**
+ * struct ethtool_cmis_cdb_cmd_args - CDB commands execution arguments
+ * @req: CDB command fields as described in the CMIS standard.
+ * @max_duration: Maximum duration time for command completion in msec.
+ * @read_write_len_ext: Allowable additional number of byte octets to the LPL
+ * in a READ or a WRITE commands.
+ * @msleep_pre_rpl: Waiting time before checking reply in msec.
+ * @rpl_exp_len: Expected reply length in bytes.
+ * @flags: Validation flags for CDB commands.
+ * @err_msg: Error message to be sent to user space.
+ */
+struct ethtool_cmis_cdb_cmd_args {
+ struct ethtool_cmis_cdb_request req;
+ u16 max_duration;
+ u8 read_write_len_ext;
+ u8 msleep_pre_rpl;
+ u8 rpl_exp_len;
+ u8 flags;
+ char *err_msg;
+};
+
+/**
+ * struct ethtool_cmis_cdb_rpl_hdr - CDB commands reply header arguments
+ * @rpl_len: Reply length.
+ * @rpl_chk_code: Reply check code.
+ */
+struct ethtool_cmis_cdb_rpl_hdr {
+ u8 rpl_len;
+ u8 rpl_chk_code;
+};
+
+/**
+ * struct ethtool_cmis_cdb_rpl - CDB commands reply arguments
+ * @hdr: CDB commands reply header arguments.
+ * @payload: Payload for the CDB commands reply.
+ */
+struct ethtool_cmis_cdb_rpl {
+ struct ethtool_cmis_cdb_rpl_hdr hdr;
+ u8 payload[ETHTOOL_CMIS_CDB_LPL_MAX_PL_LENGTH];
+};
+
+u32 ethtool_cmis_get_max_lpl_size(u8 num_of_byte_octs);
+u32 ethtool_cmis_get_max_epl_size(u8 num_of_byte_octs);
+
+void ethtool_cmis_cdb_compose_args(struct ethtool_cmis_cdb_cmd_args *args,
+ enum ethtool_cmis_cdb_cmd_id cmd, u8 *lpl,
+ u8 lpl_len, u8 *epl, u16 epl_len,
+ u16 max_duration, u8 read_write_len_ext,
+ u16 msleep_pre_rpl, u8 rpl_exp_len,
+ u8 flags);
+
+void ethtool_cmis_cdb_check_completion_flag(u8 cmis_rev, u8 *flags);
+
+void ethtool_cmis_page_init(struct ethtool_module_eeprom *page_data,
+ u8 page, u32 offset, u32 length);
+
+struct ethtool_cmis_cdb *
+ethtool_cmis_cdb_init(struct net_device *dev,
+ const struct ethtool_module_fw_flash_params *params,
+ struct ethnl_module_fw_flash_ntf_params *ntf_params);
+void ethtool_cmis_cdb_fini(struct ethtool_cmis_cdb *cdb);
+
+int ethtool_cmis_wait_for_cond(struct net_device *dev, u8 flags, u8 flag,
+ u16 max_duration, u32 offset,
+ bool (*cond_success)(u8), bool (*cond_fail)(u8), u8 *state);
+
+int ethtool_cmis_cdb_execute_cmd(struct net_device *dev,
+ struct ethtool_cmis_cdb_cmd_args *args);
diff --git a/net/ethtool/cmis_cdb.c b/net/ethtool/cmis_cdb.c
new file mode 100644
index 000000000000..d159dc121bde
--- /dev/null
+++ b/net/ethtool/cmis_cdb.c
@@ -0,0 +1,678 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/ethtool.h>
+#include <linux/jiffies.h>
+
+#include "common.h"
+#include "module_fw.h"
+#include "cmis.h"
+
+/* For accessing the LPL field on page 9Fh, the allowable length extension is
+ * min(i, 15) byte octets where i specifies the allowable additional number of
+ * byte octets in a READ or a WRITE.
+ */
+u32 ethtool_cmis_get_max_lpl_size(u8 num_of_byte_octs)
+{
+ return 8 * (1 + min_t(u8, num_of_byte_octs, 15));
+}
+
+/* For accessing the EPL field on page 9Fh, the allowable length extension is
+ * min(i, 255) byte octets where i specifies the allowable additional number of
+ * byte octets in a READ or a WRITE.
+ */
+u32 ethtool_cmis_get_max_epl_size(u8 num_of_byte_octs)
+{
+ return 8 * (1 + min_t(u8, num_of_byte_octs, 255));
+}
+
+void ethtool_cmis_cdb_compose_args(struct ethtool_cmis_cdb_cmd_args *args,
+ enum ethtool_cmis_cdb_cmd_id cmd, u8 *lpl,
+ u8 lpl_len, u8 *epl, u16 epl_len,
+ u16 max_duration, u8 read_write_len_ext,
+ u16 msleep_pre_rpl, u8 rpl_exp_len, u8 flags)
+{
+ args->req.id = cpu_to_be16(cmd);
+ args->req.lpl_len = lpl_len;
+ if (lpl) {
+ memcpy(args->req.payload, lpl, args->req.lpl_len);
+ args->read_write_len_ext =
+ ethtool_cmis_get_max_lpl_size(read_write_len_ext);
+ }
+ if (epl) {
+ args->req.epl_len = cpu_to_be16(epl_len);
+ args->req.epl = epl;
+ args->read_write_len_ext =
+ ethtool_cmis_get_max_epl_size(read_write_len_ext);
+ }
+
+ args->max_duration = max_duration;
+ args->msleep_pre_rpl = msleep_pre_rpl;
+ args->rpl_exp_len = rpl_exp_len;
+ args->flags = flags;
+ args->err_msg = NULL;
+}
+
+void ethtool_cmis_page_init(struct ethtool_module_eeprom *page_data,
+ u8 page, u32 offset, u32 length)
+{
+ page_data->page = page;
+ page_data->offset = offset;
+ page_data->length = length;
+ page_data->i2c_address = ETHTOOL_CMIS_CDB_PAGE_I2C_ADDR;
+}
+
+#define CMIS_REVISION_PAGE 0x00
+#define CMIS_REVISION_OFFSET 0x01
+
+struct cmis_rev_rpl {
+ u8 rev;
+};
+
+static u8 cmis_rev_rpl_major(struct cmis_rev_rpl *rpl)
+{
+ return rpl->rev >> 4;
+}
+
+static int cmis_rev_major_get(struct net_device *dev, u8 *rev_major)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct ethtool_module_eeprom page_data = {0};
+ struct netlink_ext_ack extack = {};
+ struct cmis_rev_rpl rpl = {};
+ int err;
+
+ ethtool_cmis_page_init(&page_data, CMIS_REVISION_PAGE,
+ CMIS_REVISION_OFFSET, sizeof(rpl));
+ page_data.data = (u8 *)&rpl;
+
+ err = ops->get_module_eeprom_by_page(dev, &page_data, &extack);
+ if (err < 0) {
+ if (extack._msg)
+ netdev_err(dev, "%s\n", extack._msg);
+ return err;
+ }
+
+ *rev_major = cmis_rev_rpl_major(&rpl);
+
+ return 0;
+}
+
+#define CMIS_CDB_ADVERTISEMENT_PAGE 0x01
+#define CMIS_CDB_ADVERTISEMENT_OFFSET 0xA3
+
+/* Based on section 8.4.11 "CDB Messaging Support Advertisement" in CMIS
+ * standard revision 5.2.
+ */
+struct cmis_cdb_advert_rpl {
+ u8 inst_supported;
+ u8 read_write_len_ext;
+ u8 resv1;
+ u8 resv2;
+};
+
+static u8 cmis_cdb_advert_rpl_inst_supported(struct cmis_cdb_advert_rpl *rpl)
+{
+ return rpl->inst_supported >> 6;
+}
+
+static int cmis_cdb_advertisement_get(struct ethtool_cmis_cdb *cdb,
+ struct net_device *dev,
+ struct ethnl_module_fw_flash_ntf_params *ntf_params)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct ethtool_module_eeprom page_data = {};
+ struct cmis_cdb_advert_rpl rpl = {};
+ struct netlink_ext_ack extack = {};
+ int err;
+
+ ethtool_cmis_page_init(&page_data, CMIS_CDB_ADVERTISEMENT_PAGE,
+ CMIS_CDB_ADVERTISEMENT_OFFSET, sizeof(rpl));
+ page_data.data = (u8 *)&rpl;
+
+ err = ops->get_module_eeprom_by_page(dev, &page_data, &extack);
+ if (err < 0) {
+ if (extack._msg)
+ netdev_err(dev, "%s\n", extack._msg);
+ return err;
+ }
+
+ if (!cmis_cdb_advert_rpl_inst_supported(&rpl)) {
+ ethnl_module_fw_flash_ntf_err(dev, ntf_params,
+ "CDB functionality is not supported",
+ NULL);
+ return -EOPNOTSUPP;
+ }
+
+ cdb->read_write_len_ext = rpl.read_write_len_ext;
+
+ return 0;
+}
+
+#define CMIS_PASSWORD_ENTRY_PAGE 0x00
+#define CMIS_PASSWORD_ENTRY_OFFSET 0x7A
+
+struct cmis_password_entry_pl {
+ __be32 password;
+};
+
+/* See section 9.3.1 "CMD 0000h: Query Status" in CMIS standard revision 5.2.
+ * struct cmis_cdb_query_status_pl and struct cmis_cdb_query_status_rpl are
+ * structured layouts of the flat arrays,
+ * struct ethtool_cmis_cdb_request::payload and
+ * struct ethtool_cmis_cdb_rpl::payload respectively.
+ */
+struct cmis_cdb_query_status_pl {
+ u16 response_delay;
+};
+
+struct cmis_cdb_query_status_rpl {
+ u8 length;
+ u8 status;
+};
+
+static int
+cmis_cdb_validate_password(struct ethtool_cmis_cdb *cdb,
+ struct net_device *dev,
+ const struct ethtool_module_fw_flash_params *params,
+ struct ethnl_module_fw_flash_ntf_params *ntf_params)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct cmis_cdb_query_status_pl qs_pl = {0};
+ struct ethtool_module_eeprom page_data = {};
+ struct ethtool_cmis_cdb_cmd_args args = {};
+ struct cmis_password_entry_pl pe_pl = {};
+ struct cmis_cdb_query_status_rpl *rpl;
+ struct netlink_ext_ack extack = {};
+ int err;
+
+ ethtool_cmis_page_init(&page_data, CMIS_PASSWORD_ENTRY_PAGE,
+ CMIS_PASSWORD_ENTRY_OFFSET, sizeof(pe_pl));
+ page_data.data = (u8 *)&pe_pl;
+
+ pe_pl = *((struct cmis_password_entry_pl *)page_data.data);
+ pe_pl.password = params->password;
+ err = ops->set_module_eeprom_by_page(dev, &page_data, &extack);
+ if (err < 0) {
+ if (extack._msg)
+ netdev_err(dev, "%s\n", extack._msg);
+ return err;
+ }
+
+ ethtool_cmis_cdb_compose_args(&args, ETHTOOL_CMIS_CDB_CMD_QUERY_STATUS,
+ (u8 *)&qs_pl, sizeof(qs_pl), NULL, 0, 0,
+ cdb->read_write_len_ext, 1000,
+ sizeof(*rpl),
+ CDB_F_COMPLETION_VALID | CDB_F_STATUS_VALID);
+
+ err = ethtool_cmis_cdb_execute_cmd(dev, &args);
+ if (err < 0) {
+ ethnl_module_fw_flash_ntf_err(dev, ntf_params,
+ "Query Status command failed",
+ args.err_msg);
+ return err;
+ }
+
+ rpl = (struct cmis_cdb_query_status_rpl *)args.req.payload;
+ if (!rpl->length || !rpl->status) {
+ ethnl_module_fw_flash_ntf_err(dev, ntf_params,
+ "Password was not accepted",
+ NULL);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Some CDB commands asserts the CDB completion flag only from CMIS
+ * revision 5. Therefore, check the relevant validity flag only when
+ * the revision supports it.
+ */
+void ethtool_cmis_cdb_check_completion_flag(u8 cmis_rev, u8 *flags)
+{
+ *flags |= cmis_rev >= 5 ? CDB_F_COMPLETION_VALID : 0;
+}
+
+#define CMIS_CDB_MODULE_FEATURES_RESV_DATA 34
+
+/* See section 9.4.1 "CMD 0040h: Module Features" in CMIS standard revision 5.2.
+ * struct cmis_cdb_module_features_rpl is structured layout of the flat
+ * array, ethtool_cmis_cdb_rpl::payload.
+ */
+struct cmis_cdb_module_features_rpl {
+ u8 resv1[CMIS_CDB_MODULE_FEATURES_RESV_DATA];
+ __be16 max_completion_time;
+};
+
+static u16
+cmis_cdb_module_features_completion_time(struct cmis_cdb_module_features_rpl *rpl)
+{
+ return be16_to_cpu(rpl->max_completion_time);
+}
+
+static int cmis_cdb_module_features_get(struct ethtool_cmis_cdb *cdb,
+ struct net_device *dev,
+ struct ethnl_module_fw_flash_ntf_params *ntf_params)
+{
+ struct ethtool_cmis_cdb_cmd_args args = {};
+ struct cmis_cdb_module_features_rpl *rpl;
+ u8 flags = CDB_F_STATUS_VALID;
+ int err;
+
+ ethtool_cmis_cdb_check_completion_flag(cdb->cmis_rev, &flags);
+ ethtool_cmis_cdb_compose_args(&args,
+ ETHTOOL_CMIS_CDB_CMD_MODULE_FEATURES,
+ NULL, 0, NULL, 0, 0,
+ cdb->read_write_len_ext, 1000,
+ sizeof(*rpl), flags);
+
+ err = ethtool_cmis_cdb_execute_cmd(dev, &args);
+ if (err < 0) {
+ ethnl_module_fw_flash_ntf_err(dev, ntf_params,
+ "Module Features command failed",
+ args.err_msg);
+ return err;
+ }
+
+ rpl = (struct cmis_cdb_module_features_rpl *)args.req.payload;
+ cdb->max_completion_time =
+ cmis_cdb_module_features_completion_time(rpl);
+
+ return 0;
+}
+
+struct ethtool_cmis_cdb *
+ethtool_cmis_cdb_init(struct net_device *dev,
+ const struct ethtool_module_fw_flash_params *params,
+ struct ethnl_module_fw_flash_ntf_params *ntf_params)
+{
+ struct ethtool_cmis_cdb *cdb;
+ int err;
+
+ cdb = kzalloc(sizeof(*cdb), GFP_KERNEL);
+ if (!cdb)
+ return ERR_PTR(-ENOMEM);
+
+ err = cmis_rev_major_get(dev, &cdb->cmis_rev);
+ if (err < 0)
+ goto err;
+
+ if (cdb->cmis_rev < 4) {
+ ethnl_module_fw_flash_ntf_err(dev, ntf_params,
+ "CMIS revision doesn't support module firmware flashing",
+ NULL);
+ err = -EOPNOTSUPP;
+ goto err;
+ }
+
+ err = cmis_cdb_advertisement_get(cdb, dev, ntf_params);
+ if (err < 0)
+ goto err;
+
+ if (params->password_valid) {
+ err = cmis_cdb_validate_password(cdb, dev, params, ntf_params);
+ if (err < 0)
+ goto err;
+ }
+
+ err = cmis_cdb_module_features_get(cdb, dev, ntf_params);
+ if (err < 0)
+ goto err;
+
+ return cdb;
+
+err:
+ ethtool_cmis_cdb_fini(cdb);
+ return ERR_PTR(err);
+}
+
+void ethtool_cmis_cdb_fini(struct ethtool_cmis_cdb *cdb)
+{
+ kfree(cdb);
+}
+
+static bool is_completed(u8 data)
+{
+ return !!(data & 0x40);
+}
+
+#define CMIS_CDB_STATUS_SUCCESS 0x01
+
+static bool status_success(u8 data)
+{
+ return data == CMIS_CDB_STATUS_SUCCESS;
+}
+
+#define CMIS_CDB_STATUS_FAIL 0x40
+
+static bool status_fail(u8 data)
+{
+ return data & CMIS_CDB_STATUS_FAIL;
+}
+
+struct cmis_wait_for_cond_rpl {
+ u8 state;
+};
+
+static int
+ethtool_cmis_module_poll(struct net_device *dev,
+ struct cmis_wait_for_cond_rpl *rpl, u32 offset,
+ bool (*cond_success)(u8), bool (*cond_fail)(u8))
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct ethtool_module_eeprom page_data = {0};
+ struct netlink_ext_ack extack = {};
+ int err;
+
+ ethtool_cmis_page_init(&page_data, 0, offset, sizeof(rpl));
+ page_data.data = (u8 *)rpl;
+
+ err = ops->get_module_eeprom_by_page(dev, &page_data, &extack);
+ if (err < 0) {
+ if (extack._msg)
+ netdev_err_once(dev, "%s\n", extack._msg);
+ return -EBUSY;
+ }
+
+ if ((*cond_success)(rpl->state))
+ return 0;
+
+ if (*cond_fail && (*cond_fail)(rpl->state))
+ return -EIO;
+
+ return -EBUSY;
+}
+
+int ethtool_cmis_wait_for_cond(struct net_device *dev, u8 flags, u8 flag,
+ u16 max_duration, u32 offset,
+ bool (*cond_success)(u8), bool (*cond_fail)(u8),
+ u8 *state)
+{
+ struct cmis_wait_for_cond_rpl rpl = {};
+ unsigned long end;
+ int err;
+
+ if (!(flags & flag))
+ return 0;
+
+ if (max_duration == 0)
+ max_duration = U16_MAX;
+
+ end = jiffies + msecs_to_jiffies(max_duration);
+ do {
+ err = ethtool_cmis_module_poll(dev, &rpl, offset, cond_success,
+ cond_fail);
+ if (err != -EBUSY)
+ goto out;
+
+ msleep(20);
+ } while (time_before(jiffies, end));
+
+ err = ethtool_cmis_module_poll(dev, &rpl, offset, cond_success,
+ cond_fail);
+ if (err == -EBUSY)
+ err = -ETIMEDOUT;
+
+out:
+ *state = rpl.state;
+ return err;
+}
+
+#define CMIS_CDB_COMPLETION_FLAG_OFFSET 0x08
+
+static int cmis_cdb_wait_for_completion(struct net_device *dev,
+ struct ethtool_cmis_cdb_cmd_args *args)
+{
+ u8 flag;
+ int err;
+
+ /* Some vendors demand waiting time before checking completion flag
+ * in some CDB commands.
+ */
+ msleep(args->msleep_pre_rpl);
+
+ err = ethtool_cmis_wait_for_cond(dev, args->flags,
+ CDB_F_COMPLETION_VALID,
+ args->max_duration,
+ CMIS_CDB_COMPLETION_FLAG_OFFSET,
+ is_completed, NULL, &flag);
+ if (err < 0)
+ args->err_msg = "Completion Flag did not set on time";
+
+ return err;
+}
+
+#define CMIS_CDB_STATUS_OFFSET 0x25
+
+static void cmis_cdb_status_fail_msg_get(u8 status, char **err_msg)
+{
+ switch (status) {
+ case 0b10000001:
+ *err_msg = "CDB Status is in progress: Busy capturing command";
+ break;
+ case 0b10000010:
+ *err_msg =
+ "CDB Status is in progress: Busy checking/validating command";
+ break;
+ case 0b10000011:
+ *err_msg = "CDB Status is in progress: Busy executing";
+ break;
+ case 0b01000000:
+ *err_msg = "CDB status failed: no specific failure";
+ break;
+ case 0b01000010:
+ *err_msg =
+ "CDB status failed: Parameter range error or parameter not supported";
+ break;
+ case 0b01000101:
+ *err_msg = "CDB status failed: CdbChkCode error";
+ break;
+ case 0b01000110:
+ *err_msg = "CDB status failed: Password error";
+ break;
+ default:
+ *err_msg = "Unknown failure reason";
+ }
+};
+
+static int cmis_cdb_wait_for_status(struct net_device *dev,
+ struct ethtool_cmis_cdb_cmd_args *args)
+{
+ u8 status;
+ int err;
+
+ /* Some vendors demand waiting time before checking status in some
+ * CDB commands.
+ */
+ msleep(args->msleep_pre_rpl);
+
+ err = ethtool_cmis_wait_for_cond(dev, args->flags, CDB_F_STATUS_VALID,
+ args->max_duration,
+ CMIS_CDB_STATUS_OFFSET,
+ status_success, status_fail, &status);
+ if (err < 0 && !args->err_msg)
+ cmis_cdb_status_fail_msg_get(status, &args->err_msg);
+
+ return err;
+}
+
+#define CMIS_CDB_REPLY_OFFSET 0x86
+
+static int cmis_cdb_process_reply(struct net_device *dev,
+ struct ethtool_module_eeprom *page_data,
+ struct ethtool_cmis_cdb_cmd_args *args)
+{
+ u8 rpl_hdr_len = sizeof(struct ethtool_cmis_cdb_rpl_hdr);
+ u8 rpl_exp_len = args->rpl_exp_len + rpl_hdr_len;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct netlink_ext_ack extack = {};
+ struct ethtool_cmis_cdb_rpl *rpl;
+ int err;
+
+ if (!args->rpl_exp_len)
+ return 0;
+
+ ethtool_cmis_page_init(page_data, ETHTOOL_CMIS_CDB_CMD_PAGE,
+ CMIS_CDB_REPLY_OFFSET, rpl_exp_len);
+ page_data->data = kmalloc(page_data->length, GFP_KERNEL);
+ if (!page_data->data)
+ return -ENOMEM;
+
+ err = ops->get_module_eeprom_by_page(dev, page_data, &extack);
+ if (err < 0) {
+ if (extack._msg)
+ netdev_err(dev, "%s\n", extack._msg);
+ goto out;
+ }
+
+ rpl = (struct ethtool_cmis_cdb_rpl *)page_data->data;
+ if ((args->rpl_exp_len > rpl->hdr.rpl_len + rpl_hdr_len) ||
+ !rpl->hdr.rpl_chk_code) {
+ err = -EIO;
+ goto out;
+ }
+
+ args->req.lpl_len = rpl->hdr.rpl_len;
+ memcpy(args->req.payload, rpl->payload, args->req.lpl_len);
+
+out:
+ kfree(page_data->data);
+ return err;
+}
+
+static int
+__ethtool_cmis_cdb_execute_cmd(struct net_device *dev,
+ struct ethtool_module_eeprom *page_data,
+ u8 page, u32 offset, u32 length, void *data)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct netlink_ext_ack extack = {};
+ int err;
+
+ ethtool_cmis_page_init(page_data, page, offset, length);
+ page_data->data = kmemdup(data, page_data->length, GFP_KERNEL);
+ if (!page_data->data)
+ return -ENOMEM;
+
+ err = ops->set_module_eeprom_by_page(dev, page_data, &extack);
+ if (err < 0) {
+ if (extack._msg)
+ netdev_err(dev, "%s\n", extack._msg);
+ }
+
+ kfree(page_data->data);
+ return err;
+}
+
+#define CMIS_CDB_EPL_PAGE_START 0xA0
+#define CMIS_CDB_EPL_PAGE_END 0xAF
+#define CMIS_CDB_EPL_FW_BLOCK_OFFSET_START 128
+#define CMIS_CDB_EPL_FW_BLOCK_OFFSET_END 255
+
+static int
+ethtool_cmis_cdb_execute_epl_cmd(struct net_device *dev,
+ struct ethtool_cmis_cdb_cmd_args *args,
+ struct ethtool_module_eeprom *page_data)
+{
+ u16 epl_len = be16_to_cpu(args->req.epl_len);
+ u32 bytes_written = 0;
+ u8 page;
+ int err;
+
+ for (page = CMIS_CDB_EPL_PAGE_START;
+ page <= CMIS_CDB_EPL_PAGE_END && bytes_written < epl_len; page++) {
+ u16 offset = CMIS_CDB_EPL_FW_BLOCK_OFFSET_START;
+
+ while (offset <= CMIS_CDB_EPL_FW_BLOCK_OFFSET_END &&
+ bytes_written < epl_len) {
+ u32 bytes_left = epl_len - bytes_written;
+ u16 space_left, bytes_to_write;
+
+ space_left = CMIS_CDB_EPL_FW_BLOCK_OFFSET_END - offset + 1;
+ bytes_to_write = min_t(u16, bytes_left,
+ min_t(u16, space_left,
+ args->read_write_len_ext));
+
+ err = __ethtool_cmis_cdb_execute_cmd(dev, page_data,
+ page, offset,
+ bytes_to_write,
+ args->req.epl + bytes_written);
+ if (err < 0)
+ return err;
+
+ offset += bytes_to_write;
+ bytes_written += bytes_to_write;
+ }
+ }
+ return 0;
+}
+
+static u8 cmis_cdb_calc_checksum(const void *data, size_t size)
+{
+ const u8 *bytes = (const u8 *)data;
+ u8 checksum = 0;
+
+ for (size_t i = 0; i < size; i++)
+ checksum += bytes[i];
+
+ return ~checksum;
+}
+
+#define CMIS_CDB_CMD_ID_OFFSET 0x80
+
+int ethtool_cmis_cdb_execute_cmd(struct net_device *dev,
+ struct ethtool_cmis_cdb_cmd_args *args)
+{
+ struct ethtool_module_eeprom page_data = {};
+ u32 offset;
+ int err;
+
+ args->req.chk_code =
+ cmis_cdb_calc_checksum(&args->req,
+ offsetof(struct ethtool_cmis_cdb_request,
+ epl));
+
+ if (args->req.lpl_len > args->read_write_len_ext) {
+ args->err_msg = "LPL length is longer than CDB read write length extension allows";
+ return -EINVAL;
+ }
+
+ /* According to the CMIS standard, there are two options to trigger the
+ * CDB commands. The default option is triggering the command by writing
+ * the CMDID bytes. Therefore, the command will be split to 2 calls:
+ * First, with everything except the CMDID field and then the CMDID
+ * field.
+ */
+ offset = CMIS_CDB_CMD_ID_OFFSET +
+ offsetof(struct ethtool_cmis_cdb_request, body);
+ err = __ethtool_cmis_cdb_execute_cmd(dev, &page_data,
+ ETHTOOL_CMIS_CDB_CMD_PAGE, offset,
+ sizeof(args->req.body),
+ &args->req.body);
+ if (err < 0)
+ return err;
+
+ if (args->req.epl_len) {
+ err = ethtool_cmis_cdb_execute_epl_cmd(dev, args, &page_data);
+ if (err < 0)
+ return err;
+ }
+
+ offset = CMIS_CDB_CMD_ID_OFFSET +
+ offsetof(struct ethtool_cmis_cdb_request, id);
+ err = __ethtool_cmis_cdb_execute_cmd(dev, &page_data,
+ ETHTOOL_CMIS_CDB_CMD_PAGE, offset,
+ sizeof(args->req.id),
+ &args->req.id);
+ if (err < 0)
+ return err;
+
+ err = cmis_cdb_wait_for_completion(dev, args);
+ if (err < 0)
+ return err;
+
+ err = cmis_cdb_wait_for_status(dev, args);
+ if (err < 0)
+ return err;
+
+ return cmis_cdb_process_reply(dev, &page_data, args);
+}
diff --git a/net/ethtool/cmis_fw_update.c b/net/ethtool/cmis_fw_update.c
new file mode 100644
index 000000000000..48aef6220f00
--- /dev/null
+++ b/net/ethtool/cmis_fw_update.c
@@ -0,0 +1,479 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/ethtool.h>
+#include <linux/firmware.h>
+
+#include "common.h"
+#include "module_fw.h"
+#include "cmis.h"
+
+struct cmis_fw_update_fw_mng_features {
+ u8 start_cmd_payload_size;
+ u8 write_mechanism;
+ u16 max_duration_start;
+ u16 max_duration_write;
+ u16 max_duration_complete;
+};
+
+/* See section 9.4.2 "CMD 0041h: Firmware Management Features" in CMIS standard
+ * revision 5.2.
+ * struct cmis_cdb_fw_mng_features_rpl is a structured layout of the flat
+ * array, ethtool_cmis_cdb_rpl::payload.
+ */
+struct cmis_cdb_fw_mng_features_rpl {
+ u8 resv1;
+ u8 resv2;
+ u8 start_cmd_payload_size;
+ u8 resv3;
+ u8 read_write_len_ext;
+ u8 write_mechanism;
+ u8 resv4;
+ u8 resv5;
+ __be16 max_duration_start;
+ __be16 resv6;
+ __be16 max_duration_write;
+ __be16 max_duration_complete;
+ __be16 resv7;
+};
+
+enum cmis_cdb_fw_write_mechanism {
+ CMIS_CDB_FW_WRITE_MECHANISM_NONE = 0x00,
+ CMIS_CDB_FW_WRITE_MECHANISM_LPL = 0x01,
+ CMIS_CDB_FW_WRITE_MECHANISM_EPL = 0x10,
+ CMIS_CDB_FW_WRITE_MECHANISM_BOTH = 0x11,
+};
+
+static int
+cmis_fw_update_fw_mng_features_get(struct ethtool_cmis_cdb *cdb,
+ struct net_device *dev,
+ struct cmis_fw_update_fw_mng_features *fw_mng,
+ struct ethnl_module_fw_flash_ntf_params *ntf_params)
+{
+ struct ethtool_cmis_cdb_cmd_args args = {};
+ struct cmis_cdb_fw_mng_features_rpl *rpl;
+ u8 flags = CDB_F_STATUS_VALID;
+ int err;
+
+ ethtool_cmis_cdb_check_completion_flag(cdb->cmis_rev, &flags);
+ ethtool_cmis_cdb_compose_args(&args,
+ ETHTOOL_CMIS_CDB_CMD_FW_MANAGMENT_FEATURES,
+ NULL, 0, NULL, 0,
+ cdb->max_completion_time,
+ cdb->read_write_len_ext, 1000,
+ sizeof(*rpl), flags);
+
+ err = ethtool_cmis_cdb_execute_cmd(dev, &args);
+ if (err < 0) {
+ ethnl_module_fw_flash_ntf_err(dev, ntf_params,
+ "FW Management Features command failed",
+ args.err_msg);
+ return err;
+ }
+
+ rpl = (struct cmis_cdb_fw_mng_features_rpl *)args.req.payload;
+ if (rpl->write_mechanism == CMIS_CDB_FW_WRITE_MECHANISM_NONE) {
+ ethnl_module_fw_flash_ntf_err(dev, ntf_params,
+ "CDB write mechanism is not supported",
+ NULL);
+ return -EOPNOTSUPP;
+ }
+
+ /* Above, we used read_write_len_ext that we got from CDB
+ * advertisement. Update it with the value that we got from module
+ * features query, which is specific for Firmware Management Commands
+ * (IDs 0100h-01FFh).
+ */
+ cdb->read_write_len_ext = rpl->read_write_len_ext;
+ fw_mng->start_cmd_payload_size = rpl->start_cmd_payload_size;
+ fw_mng->write_mechanism =
+ rpl->write_mechanism == CMIS_CDB_FW_WRITE_MECHANISM_LPL ?
+ CMIS_CDB_FW_WRITE_MECHANISM_LPL :
+ CMIS_CDB_FW_WRITE_MECHANISM_EPL;
+ fw_mng->max_duration_start = be16_to_cpu(rpl->max_duration_start);
+ fw_mng->max_duration_write = be16_to_cpu(rpl->max_duration_write);
+ fw_mng->max_duration_complete = be16_to_cpu(rpl->max_duration_complete);
+
+ return 0;
+}
+
+/* See section 9.7.2 "CMD 0101h: Start Firmware Download" in CMIS standard
+ * revision 5.2.
+ * struct cmis_cdb_start_fw_download_pl is a structured layout of the
+ * flat array, ethtool_cmis_cdb_request::payload.
+ */
+struct cmis_cdb_start_fw_download_pl {
+ __struct_group(cmis_cdb_start_fw_download_pl_h, head, /* no attrs */,
+ __be32 image_size;
+ __be32 resv1;
+ );
+ u8 vendor_data[ETHTOOL_CMIS_CDB_LPL_MAX_PL_LENGTH -
+ sizeof(struct cmis_cdb_start_fw_download_pl_h)];
+};
+
+static int
+cmis_fw_update_start_download(struct ethtool_cmis_cdb *cdb,
+ struct ethtool_cmis_fw_update_params *fw_update,
+ struct cmis_fw_update_fw_mng_features *fw_mng)
+{
+ u8 vendor_data_size = fw_mng->start_cmd_payload_size;
+ struct cmis_cdb_start_fw_download_pl pl = {};
+ struct ethtool_cmis_cdb_cmd_args args = {};
+ u8 lpl_len;
+ int err;
+
+ pl.image_size = cpu_to_be32(fw_update->fw->size);
+ memcpy(pl.vendor_data, fw_update->fw->data, vendor_data_size);
+
+ lpl_len = offsetof(struct cmis_cdb_start_fw_download_pl,
+ vendor_data[vendor_data_size]);
+
+ ethtool_cmis_cdb_compose_args(&args,
+ ETHTOOL_CMIS_CDB_CMD_START_FW_DOWNLOAD,
+ (u8 *)&pl, lpl_len, NULL, 0,
+ fw_mng->max_duration_start,
+ cdb->read_write_len_ext, 1000, 0,
+ CDB_F_COMPLETION_VALID | CDB_F_STATUS_VALID);
+
+ err = ethtool_cmis_cdb_execute_cmd(fw_update->dev, &args);
+ if (err < 0)
+ ethnl_module_fw_flash_ntf_err(fw_update->dev,
+ &fw_update->ntf_params,
+ "Start FW download command failed",
+ args.err_msg);
+
+ return err;
+}
+
+/* See section 9.7.4 "CMD 0103h: Write Firmware Block LPL" in CMIS standard
+ * revision 5.2.
+ * struct cmis_cdb_write_fw_block_lpl_pl is a structured layout of the
+ * flat array, ethtool_cmis_cdb_request::payload.
+ */
+struct cmis_cdb_write_fw_block_lpl_pl {
+ __be32 block_address;
+ u8 fw_block[ETHTOOL_CMIS_CDB_LPL_MAX_PL_LENGTH - sizeof(__be32)];
+};
+
+static int
+cmis_fw_update_write_image_lpl(struct ethtool_cmis_cdb *cdb,
+ struct ethtool_cmis_fw_update_params *fw_update,
+ struct cmis_fw_update_fw_mng_features *fw_mng)
+{
+ u8 start = fw_mng->start_cmd_payload_size;
+ u32 offset, max_block_size, max_lpl_len;
+ u32 image_size = fw_update->fw->size;
+ int err;
+
+ max_lpl_len = min_t(u32,
+ ethtool_cmis_get_max_lpl_size(cdb->read_write_len_ext),
+ ETHTOOL_CMIS_CDB_LPL_MAX_PL_LENGTH);
+ max_block_size =
+ max_lpl_len - sizeof_field(struct cmis_cdb_write_fw_block_lpl_pl,
+ block_address);
+
+ for (offset = start; offset < image_size; offset += max_block_size) {
+ struct cmis_cdb_write_fw_block_lpl_pl pl = {
+ .block_address = cpu_to_be32(offset - start),
+ };
+ struct ethtool_cmis_cdb_cmd_args args = {};
+ u32 block_size, lpl_len;
+
+ ethnl_module_fw_flash_ntf_in_progress(fw_update->dev,
+ &fw_update->ntf_params,
+ offset - start,
+ image_size);
+ block_size = min_t(u32, max_block_size, image_size - offset);
+ memcpy(pl.fw_block, &fw_update->fw->data[offset], block_size);
+ lpl_len = block_size +
+ sizeof_field(struct cmis_cdb_write_fw_block_lpl_pl,
+ block_address);
+
+ ethtool_cmis_cdb_compose_args(&args,
+ ETHTOOL_CMIS_CDB_CMD_WRITE_FW_BLOCK_LPL,
+ (u8 *)&pl, lpl_len, NULL, 0,
+ fw_mng->max_duration_write,
+ cdb->read_write_len_ext, 1, 0,
+ CDB_F_COMPLETION_VALID | CDB_F_STATUS_VALID);
+
+ err = ethtool_cmis_cdb_execute_cmd(fw_update->dev, &args);
+ if (err < 0) {
+ ethnl_module_fw_flash_ntf_err(fw_update->dev,
+ &fw_update->ntf_params,
+ "Write FW block LPL command failed",
+ args.err_msg);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+struct cmis_cdb_write_fw_block_epl_pl {
+ u8 fw_block[ETHTOOL_CMIS_CDB_EPL_MAX_PL_LENGTH];
+};
+
+static int
+cmis_fw_update_write_image_epl(struct ethtool_cmis_cdb *cdb,
+ struct ethtool_cmis_fw_update_params *fw_update,
+ struct cmis_fw_update_fw_mng_features *fw_mng)
+{
+ u8 start = fw_mng->start_cmd_payload_size;
+ u32 image_size = fw_update->fw->size;
+ u32 offset, lpl_len;
+ int err;
+
+ lpl_len = sizeof_field(struct cmis_cdb_write_fw_block_lpl_pl,
+ block_address);
+
+ for (offset = start; offset < image_size;
+ offset += ETHTOOL_CMIS_CDB_EPL_MAX_PL_LENGTH) {
+ struct cmis_cdb_write_fw_block_lpl_pl lpl = {
+ .block_address = cpu_to_be32(offset - start),
+ };
+ struct cmis_cdb_write_fw_block_epl_pl *epl;
+ struct ethtool_cmis_cdb_cmd_args args = {};
+ u32 epl_len;
+
+ ethnl_module_fw_flash_ntf_in_progress(fw_update->dev,
+ &fw_update->ntf_params,
+ offset - start,
+ image_size);
+
+ epl_len = min_t(u32, ETHTOOL_CMIS_CDB_EPL_MAX_PL_LENGTH,
+ image_size - offset);
+ epl = kmalloc_array(epl_len, sizeof(u8), GFP_KERNEL);
+ if (!epl)
+ return -ENOMEM;
+
+ memcpy(epl->fw_block, &fw_update->fw->data[offset], epl_len);
+
+ ethtool_cmis_cdb_compose_args(&args,
+ ETHTOOL_CMIS_CDB_CMD_WRITE_FW_BLOCK_EPL,
+ (u8 *)&lpl, lpl_len, (u8 *)epl,
+ epl_len,
+ fw_mng->max_duration_write,
+ cdb->read_write_len_ext, 1, 0,
+ CDB_F_COMPLETION_VALID | CDB_F_STATUS_VALID);
+
+ err = ethtool_cmis_cdb_execute_cmd(fw_update->dev, &args);
+ kfree(epl);
+ if (err < 0) {
+ ethnl_module_fw_flash_ntf_err(fw_update->dev,
+ &fw_update->ntf_params,
+ "Write FW block EPL command failed",
+ args.err_msg);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int
+cmis_fw_update_complete_download(struct ethtool_cmis_cdb *cdb,
+ struct net_device *dev,
+ struct cmis_fw_update_fw_mng_features *fw_mng,
+ struct ethnl_module_fw_flash_ntf_params *ntf_params)
+{
+ struct ethtool_cmis_cdb_cmd_args args = {};
+ int err;
+
+ ethtool_cmis_cdb_compose_args(&args,
+ ETHTOOL_CMIS_CDB_CMD_COMPLETE_FW_DOWNLOAD,
+ NULL, 0, NULL, 0,
+ fw_mng->max_duration_complete,
+ cdb->read_write_len_ext, 1000, 0,
+ CDB_F_COMPLETION_VALID | CDB_F_STATUS_VALID);
+
+ err = ethtool_cmis_cdb_execute_cmd(dev, &args);
+ if (err < 0)
+ ethnl_module_fw_flash_ntf_err(dev, ntf_params,
+ "Complete FW download command failed",
+ args.err_msg);
+
+ return err;
+}
+
+static int
+cmis_fw_update_download_image(struct ethtool_cmis_cdb *cdb,
+ struct ethtool_cmis_fw_update_params *fw_update,
+ struct cmis_fw_update_fw_mng_features *fw_mng)
+{
+ int err;
+
+ err = cmis_fw_update_start_download(cdb, fw_update, fw_mng);
+ if (err < 0)
+ return err;
+
+ if (fw_mng->write_mechanism == CMIS_CDB_FW_WRITE_MECHANISM_LPL) {
+ err = cmis_fw_update_write_image_lpl(cdb, fw_update, fw_mng);
+ if (err < 0)
+ return err;
+ } else {
+ err = cmis_fw_update_write_image_epl(cdb, fw_update, fw_mng);
+ if (err < 0)
+ return err;
+ }
+
+ err = cmis_fw_update_complete_download(cdb, fw_update->dev, fw_mng,
+ &fw_update->ntf_params);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+enum {
+ CMIS_MODULE_LOW_PWR = 1,
+ CMIS_MODULE_READY = 3,
+};
+
+static bool module_is_ready(u8 data)
+{
+ u8 state = (data >> 1) & 7;
+
+ return state == CMIS_MODULE_READY || state == CMIS_MODULE_LOW_PWR;
+}
+
+#define CMIS_MODULE_READY_MAX_DURATION_MSEC 1000
+#define CMIS_MODULE_STATE_OFFSET 3
+
+static int
+cmis_fw_update_wait_for_module_state(struct net_device *dev, u8 flags)
+{
+ u8 state;
+
+ return ethtool_cmis_wait_for_cond(dev, flags, CDB_F_MODULE_STATE_VALID,
+ CMIS_MODULE_READY_MAX_DURATION_MSEC,
+ CMIS_MODULE_STATE_OFFSET,
+ module_is_ready, NULL, &state);
+}
+
+/* See section 9.7.10 "CMD 0109h: Run Firmware Image" in CMIS standard
+ * revision 5.2.
+ * struct cmis_cdb_run_fw_image_pl is a structured layout of the flat
+ * array, ethtool_cmis_cdb_request::payload.
+ */
+struct cmis_cdb_run_fw_image_pl {
+ u8 resv1;
+ u8 image_to_run;
+ u16 delay_to_reset;
+};
+
+static int
+cmis_fw_update_run_image(struct ethtool_cmis_cdb *cdb, struct net_device *dev,
+ struct ethnl_module_fw_flash_ntf_params *ntf_params)
+{
+ struct ethtool_cmis_cdb_cmd_args args = {};
+ struct cmis_cdb_run_fw_image_pl pl = {0};
+ int err;
+
+ ethtool_cmis_cdb_compose_args(&args, ETHTOOL_CMIS_CDB_CMD_RUN_FW_IMAGE,
+ (u8 *)&pl, sizeof(pl), NULL, 0,
+ cdb->max_completion_time,
+ cdb->read_write_len_ext, 1000, 0,
+ CDB_F_MODULE_STATE_VALID);
+
+ err = ethtool_cmis_cdb_execute_cmd(dev, &args);
+ if (err < 0) {
+ ethnl_module_fw_flash_ntf_err(dev, ntf_params,
+ "Run image command failed",
+ args.err_msg);
+ return err;
+ }
+
+ err = cmis_fw_update_wait_for_module_state(dev, args.flags);
+ if (err < 0)
+ ethnl_module_fw_flash_ntf_err(dev, ntf_params,
+ "Module is not ready on time after reset",
+ NULL);
+
+ return err;
+}
+
+static int
+cmis_fw_update_commit_image(struct ethtool_cmis_cdb *cdb,
+ struct net_device *dev,
+ struct ethnl_module_fw_flash_ntf_params *ntf_params)
+{
+ struct ethtool_cmis_cdb_cmd_args args = {};
+ int err;
+
+ ethtool_cmis_cdb_compose_args(&args,
+ ETHTOOL_CMIS_CDB_CMD_COMMIT_FW_IMAGE,
+ NULL, 0, NULL, 0,
+ cdb->max_completion_time,
+ cdb->read_write_len_ext, 1000, 0,
+ CDB_F_COMPLETION_VALID | CDB_F_STATUS_VALID);
+
+ err = ethtool_cmis_cdb_execute_cmd(dev, &args);
+ if (err < 0)
+ ethnl_module_fw_flash_ntf_err(dev, ntf_params,
+ "Commit image command failed",
+ args.err_msg);
+
+ return err;
+}
+
+static int cmis_fw_update_reset(struct net_device *dev)
+{
+ __u32 reset_data = ETH_RESET_PHY;
+
+ return dev->ethtool_ops->reset(dev, &reset_data);
+}
+
+void
+ethtool_cmis_fw_update(struct ethtool_cmis_fw_update_params *fw_update)
+{
+ struct ethnl_module_fw_flash_ntf_params *ntf_params =
+ &fw_update->ntf_params;
+ struct cmis_fw_update_fw_mng_features fw_mng = {0};
+ struct net_device *dev = fw_update->dev;
+ struct ethtool_cmis_cdb *cdb;
+ int err;
+
+ cdb = ethtool_cmis_cdb_init(dev, &fw_update->params, ntf_params);
+ if (IS_ERR(cdb))
+ goto err_send_ntf;
+
+ ethnl_module_fw_flash_ntf_start(dev, ntf_params);
+
+ err = cmis_fw_update_fw_mng_features_get(cdb, dev, &fw_mng, ntf_params);
+ if (err < 0)
+ goto err_cdb_fini;
+
+ err = cmis_fw_update_download_image(cdb, fw_update, &fw_mng);
+ if (err < 0)
+ goto err_cdb_fini;
+
+ err = cmis_fw_update_run_image(cdb, dev, ntf_params);
+ if (err < 0)
+ goto err_cdb_fini;
+
+ /* The CDB command "Run Firmware Image" resets the firmware, so the new
+ * one might have different settings.
+ * Free the old CDB instance, and init a new one.
+ */
+ ethtool_cmis_cdb_fini(cdb);
+
+ cdb = ethtool_cmis_cdb_init(dev, &fw_update->params, ntf_params);
+ if (IS_ERR(cdb))
+ goto err_send_ntf;
+
+ err = cmis_fw_update_commit_image(cdb, dev, ntf_params);
+ if (err < 0)
+ goto err_cdb_fini;
+
+ err = cmis_fw_update_reset(dev);
+ if (err < 0)
+ goto err_cdb_fini;
+
+ ethnl_module_fw_flash_ntf_complete(dev, ntf_params);
+ ethtool_cmis_cdb_fini(cdb);
+ return;
+
+err_cdb_fini:
+ ethtool_cmis_cdb_fini(cdb);
+err_send_ntf:
+ ethnl_module_fw_flash_ntf_err(dev, ntf_params, NULL, NULL);
+}
diff --git a/net/ethtool/coalesce.c b/net/ethtool/coalesce.c
index 83112c1a71ae..3e18ca1ccc5e 100644
--- a/net/ethtool/coalesce.c
+++ b/net/ethtool/coalesce.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/dim.h>
#include "netlink.h"
#include "common.h"
@@ -82,6 +83,14 @@ static int coalesce_prepare_data(const struct ethnl_req_info *req_base,
static int coalesce_reply_size(const struct ethnl_req_info *req_base,
const struct ethnl_reply_data *reply_base)
{
+ int modersz = nla_total_size(0) + /* _PROFILE_IRQ_MODERATION, nest */
+ nla_total_size(sizeof(u32)) + /* _IRQ_MODERATION_USEC */
+ nla_total_size(sizeof(u32)) + /* _IRQ_MODERATION_PKTS */
+ nla_total_size(sizeof(u32)); /* _IRQ_MODERATION_COMPS */
+
+ int total_modersz = nla_total_size(0) + /* _{R,T}X_PROFILE, nest */
+ modersz * NET_DIM_PARAMS_NUM_PROFILES;
+
return nla_total_size(sizeof(u32)) + /* _RX_USECS */
nla_total_size(sizeof(u32)) + /* _RX_MAX_FRAMES */
nla_total_size(sizeof(u32)) + /* _RX_USECS_IRQ */
@@ -108,7 +117,8 @@ static int coalesce_reply_size(const struct ethnl_req_info *req_base,
nla_total_size(sizeof(u8)) + /* _USE_CQE_MODE_RX */
nla_total_size(sizeof(u32)) + /* _TX_AGGR_MAX_BYTES */
nla_total_size(sizeof(u32)) + /* _TX_AGGR_MAX_FRAMES */
- nla_total_size(sizeof(u32)); /* _TX_AGGR_TIME_USECS */
+ nla_total_size(sizeof(u32)) + /* _TX_AGGR_TIME_USECS */
+ total_modersz * 2; /* _{R,T}X_PROFILE */
}
static bool coalesce_put_u32(struct sk_buff *skb, u16 attr_type, u32 val,
@@ -127,6 +137,74 @@ static bool coalesce_put_bool(struct sk_buff *skb, u16 attr_type, u32 val,
return nla_put_u8(skb, attr_type, !!val);
}
+/**
+ * coalesce_put_profile - fill reply with a nla nest with four child nla nests.
+ * @skb: socket buffer the message is stored in
+ * @attr_type: nest attr type ETHTOOL_A_COALESCE_*X_PROFILE
+ * @profile: data passed to userspace
+ * @coal_flags: modifiable parameters supported by the driver
+ *
+ * Put a dim profile nest attribute. Refer to ETHTOOL_A_PROFILE_IRQ_MODERATION.
+ *
+ * Return: 0 on success or a negative error code.
+ */
+static int coalesce_put_profile(struct sk_buff *skb, u16 attr_type,
+ const struct dim_cq_moder *profile,
+ u8 coal_flags)
+{
+ struct nlattr *profile_attr, *moder_attr;
+ int i, ret;
+
+ if (!profile || !coal_flags)
+ return 0;
+
+ profile_attr = nla_nest_start(skb, attr_type);
+ if (!profile_attr)
+ return -EMSGSIZE;
+
+ for (i = 0; i < NET_DIM_PARAMS_NUM_PROFILES; i++) {
+ moder_attr = nla_nest_start(skb,
+ ETHTOOL_A_PROFILE_IRQ_MODERATION);
+ if (!moder_attr) {
+ ret = -EMSGSIZE;
+ goto cancel_profile;
+ }
+
+ if (coal_flags & DIM_COALESCE_USEC) {
+ ret = nla_put_u32(skb, ETHTOOL_A_IRQ_MODERATION_USEC,
+ profile[i].usec);
+ if (ret)
+ goto cancel_moder;
+ }
+
+ if (coal_flags & DIM_COALESCE_PKTS) {
+ ret = nla_put_u32(skb, ETHTOOL_A_IRQ_MODERATION_PKTS,
+ profile[i].pkts);
+ if (ret)
+ goto cancel_moder;
+ }
+
+ if (coal_flags & DIM_COALESCE_COMPS) {
+ ret = nla_put_u32(skb, ETHTOOL_A_IRQ_MODERATION_COMPS,
+ profile[i].comps);
+ if (ret)
+ goto cancel_moder;
+ }
+
+ nla_nest_end(skb, moder_attr);
+ }
+
+ nla_nest_end(skb, profile_attr);
+
+ return 0;
+
+cancel_moder:
+ nla_nest_cancel(skb, moder_attr);
+cancel_profile:
+ nla_nest_cancel(skb, profile_attr);
+ return ret;
+}
+
static int coalesce_fill_reply(struct sk_buff *skb,
const struct ethnl_req_info *req_base,
const struct ethnl_reply_data *reply_base)
@@ -135,6 +213,8 @@ static int coalesce_fill_reply(struct sk_buff *skb,
const struct kernel_ethtool_coalesce *kcoal = &data->kernel_coalesce;
const struct ethtool_coalesce *coal = &data->coalesce;
u32 supported = data->supported_params;
+ struct dim_irq_moder *moder;
+ int ret = 0;
if (coalesce_put_u32(skb, ETHTOOL_A_COALESCE_RX_USECS,
coal->rx_coalesce_usecs, supported) ||
@@ -192,11 +272,42 @@ static int coalesce_fill_reply(struct sk_buff *skb,
kcoal->tx_aggr_time_usecs, supported))
return -EMSGSIZE;
- return 0;
+ if (!req_base->dev || !req_base->dev->irq_moder)
+ return 0;
+
+ moder = req_base->dev->irq_moder;
+ rcu_read_lock();
+ if (moder->profile_flags & DIM_PROFILE_RX) {
+ ret = coalesce_put_profile(skb, ETHTOOL_A_COALESCE_RX_PROFILE,
+ rcu_dereference(moder->rx_profile),
+ moder->coal_flags);
+ if (ret)
+ goto out;
+ }
+
+ if (moder->profile_flags & DIM_PROFILE_TX)
+ ret = coalesce_put_profile(skb, ETHTOOL_A_COALESCE_TX_PROFILE,
+ rcu_dereference(moder->tx_profile),
+ moder->coal_flags);
+
+out:
+ rcu_read_unlock();
+ return ret;
}
/* COALESCE_SET */
+static const struct nla_policy coalesce_irq_moderation_policy[] = {
+ [ETHTOOL_A_IRQ_MODERATION_USEC] = { .type = NLA_U32 },
+ [ETHTOOL_A_IRQ_MODERATION_PKTS] = { .type = NLA_U32 },
+ [ETHTOOL_A_IRQ_MODERATION_COMPS] = { .type = NLA_U32 },
+};
+
+static const struct nla_policy coalesce_profile_policy[] = {
+ [ETHTOOL_A_PROFILE_IRQ_MODERATION] =
+ NLA_POLICY_NESTED(coalesce_irq_moderation_policy),
+};
+
const struct nla_policy ethnl_coalesce_set_policy[] = {
[ETHTOOL_A_COALESCE_HEADER] =
NLA_POLICY_NESTED(ethnl_header_policy),
@@ -227,6 +338,10 @@ const struct nla_policy ethnl_coalesce_set_policy[] = {
[ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES] = { .type = NLA_U32 },
[ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES] = { .type = NLA_U32 },
[ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS] = { .type = NLA_U32 },
+ [ETHTOOL_A_COALESCE_RX_PROFILE] =
+ NLA_POLICY_NESTED(coalesce_profile_policy),
+ [ETHTOOL_A_COALESCE_TX_PROFILE] =
+ NLA_POLICY_NESTED(coalesce_profile_policy),
};
static int
@@ -234,6 +349,7 @@ ethnl_set_coalesce_validate(struct ethnl_req_info *req_info,
struct genl_info *info)
{
const struct ethtool_ops *ops = req_info->dev->ethtool_ops;
+ struct dim_irq_moder *irq_moder = req_info->dev->irq_moder;
struct nlattr **tb = info->attrs;
u32 supported_params;
u16 a;
@@ -243,6 +359,12 @@ ethnl_set_coalesce_validate(struct ethnl_req_info *req_info,
/* make sure that only supported parameters are present */
supported_params = ops->supported_coalesce_params;
+ if (irq_moder && irq_moder->profile_flags & DIM_PROFILE_RX)
+ supported_params |= ETHTOOL_COALESCE_RX_PROFILE;
+
+ if (irq_moder && irq_moder->profile_flags & DIM_PROFILE_TX)
+ supported_params |= ETHTOOL_COALESCE_TX_PROFILE;
+
for (a = ETHTOOL_A_COALESCE_RX_USECS; a < __ETHTOOL_A_COALESCE_CNT; a++)
if (tb[a] && !(supported_params & attr_to_mask(a))) {
NL_SET_ERR_MSG_ATTR(info->extack, tb[a],
@@ -253,6 +375,138 @@ ethnl_set_coalesce_validate(struct ethnl_req_info *req_info,
return 1;
}
+/**
+ * ethnl_update_irq_moder - update a specific field in the given profile
+ * @irq_moder: place that collects dim related information
+ * @irq_field: field in profile to modify
+ * @attr_type: attr type ETHTOOL_A_IRQ_MODERATION_*
+ * @tb: netlink attribute with new values or null
+ * @coal_bit: DIM_COALESCE_* bit from coal_flags
+ * @mod: pointer to bool for modification tracking
+ * @extack: netlink extended ack
+ *
+ * Return: 0 on success or a negative error code.
+ */
+static int ethnl_update_irq_moder(struct dim_irq_moder *irq_moder,
+ u16 *irq_field, u16 attr_type,
+ struct nlattr **tb,
+ u8 coal_bit, bool *mod,
+ struct netlink_ext_ack *extack)
+{
+ int ret = 0;
+ u32 val;
+
+ if (!tb[attr_type])
+ return 0;
+
+ if (irq_moder->coal_flags & coal_bit) {
+ val = nla_get_u32(tb[attr_type]);
+ if (*irq_field == val)
+ return 0;
+
+ *irq_field = val;
+ *mod = true;
+ } else {
+ NL_SET_BAD_ATTR(extack, tb[attr_type]);
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+/**
+ * ethnl_update_profile - get a profile nest with child nests from userspace.
+ * @dev: netdevice to update the profile
+ * @dst: profile get from the driver and modified by ethnl_update_profile.
+ * @nests: nest attr ETHTOOL_A_COALESCE_*X_PROFILE to set profile.
+ * @mod: pointer to bool for modification tracking
+ * @extack: Netlink extended ack
+ *
+ * Layout of nests:
+ * Nested ETHTOOL_A_COALESCE_*X_PROFILE attr
+ * Nested ETHTOOL_A_PROFILE_IRQ_MODERATION attr
+ * ETHTOOL_A_IRQ_MODERATION_USEC attr
+ * ETHTOOL_A_IRQ_MODERATION_PKTS attr
+ * ETHTOOL_A_IRQ_MODERATION_COMPS attr
+ * ...
+ * Nested ETHTOOL_A_PROFILE_IRQ_MODERATION attr
+ * ETHTOOL_A_IRQ_MODERATION_USEC attr
+ * ETHTOOL_A_IRQ_MODERATION_PKTS attr
+ * ETHTOOL_A_IRQ_MODERATION_COMPS attr
+ *
+ * Return: 0 on success or a negative error code.
+ */
+static int ethnl_update_profile(struct net_device *dev,
+ struct dim_cq_moder __rcu **dst,
+ const struct nlattr *nests,
+ bool *mod,
+ struct netlink_ext_ack *extack)
+{
+ int len_irq_moder = ARRAY_SIZE(coalesce_irq_moderation_policy);
+ struct nlattr *tb[ARRAY_SIZE(coalesce_irq_moderation_policy)];
+ struct dim_irq_moder *irq_moder = dev->irq_moder;
+ struct dim_cq_moder *new_profile, *old_profile;
+ int ret, rem, i = 0, len;
+ struct nlattr *nest;
+
+ if (!nests)
+ return 0;
+
+ if (!*dst)
+ return -EOPNOTSUPP;
+
+ old_profile = rtnl_dereference(*dst);
+ len = NET_DIM_PARAMS_NUM_PROFILES * sizeof(*old_profile);
+ new_profile = kmemdup(old_profile, len, GFP_KERNEL);
+ if (!new_profile)
+ return -ENOMEM;
+
+ nla_for_each_nested_type(nest, ETHTOOL_A_PROFILE_IRQ_MODERATION,
+ nests, rem) {
+ ret = nla_parse_nested(tb, len_irq_moder - 1, nest,
+ coalesce_irq_moderation_policy,
+ extack);
+ if (ret)
+ goto err_out;
+
+ ret = ethnl_update_irq_moder(irq_moder, &new_profile[i].usec,
+ ETHTOOL_A_IRQ_MODERATION_USEC,
+ tb, DIM_COALESCE_USEC,
+ mod, extack);
+ if (ret)
+ goto err_out;
+
+ ret = ethnl_update_irq_moder(irq_moder, &new_profile[i].pkts,
+ ETHTOOL_A_IRQ_MODERATION_PKTS,
+ tb, DIM_COALESCE_PKTS,
+ mod, extack);
+ if (ret)
+ goto err_out;
+
+ ret = ethnl_update_irq_moder(irq_moder, &new_profile[i].comps,
+ ETHTOOL_A_IRQ_MODERATION_COMPS,
+ tb, DIM_COALESCE_COMPS,
+ mod, extack);
+ if (ret)
+ goto err_out;
+
+ i++;
+ }
+
+ /* After the profile is modified, dim itself is a dynamic
+ * mechanism and will quickly fit to the appropriate
+ * coalescing parameters according to the new profile.
+ */
+ rcu_assign_pointer(*dst, new_profile);
+ kfree_rcu(old_profile, rcu);
+
+ return 0;
+
+err_out:
+ kfree(new_profile);
+ return ret;
+}
+
static int
__ethnl_set_coalesce(struct ethnl_req_info *req_info, struct genl_info *info,
bool *dual_change)
@@ -317,6 +571,22 @@ __ethnl_set_coalesce(struct ethnl_req_info *req_info, struct genl_info *info,
ethnl_update_u32(&kernel_coalesce.tx_aggr_time_usecs,
tb[ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS], &mod);
+ if (dev->irq_moder && dev->irq_moder->profile_flags & DIM_PROFILE_RX) {
+ ret = ethnl_update_profile(dev, &dev->irq_moder->rx_profile,
+ tb[ETHTOOL_A_COALESCE_RX_PROFILE],
+ &mod, info->extack);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (dev->irq_moder && dev->irq_moder->profile_flags & DIM_PROFILE_TX) {
+ ret = ethnl_update_profile(dev, &dev->irq_moder->tx_profile,
+ tb[ETHTOOL_A_COALESCE_TX_PROFILE],
+ &mod, info->extack);
+ if (ret < 0)
+ return ret;
+ }
+
/* Update operation modes */
ethnl_update_bool32(&coalesce.use_adaptive_rx_coalesce,
tb[ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX], &mod_mode);
diff --git a/net/ethtool/common.c b/net/ethtool/common.c
index 6b2a360dcdf0..b97374b508f6 100644
--- a/net/ethtool/common.c
+++ b/net/ethtool/common.c
@@ -5,8 +5,13 @@
#include <linux/phy.h>
#include <linux/rtnetlink.h>
#include <linux/ptp_clock_kernel.h>
+#include <linux/phy_link_topology.h>
+#include <net/netdev_queues.h>
+#include "netlink.h"
#include "common.h"
+#include "../core/dev.h"
+
const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] = {
[NETIF_F_SG_BIT] = "tx-scatter-gather",
@@ -24,8 +29,6 @@ const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] = {
[NETIF_F_HW_VLAN_STAG_FILTER_BIT] = "rx-vlan-stag-filter",
[NETIF_F_VLAN_CHALLENGED_BIT] = "vlan-challenged",
[NETIF_F_GSO_BIT] = "tx-generic-segmentation",
- [NETIF_F_LLTX_BIT] = "tx-lockless",
- [NETIF_F_NETNS_LOCAL_BIT] = "netns-local",
[NETIF_F_GRO_BIT] = "rx-gro",
[NETIF_F_GRO_HW_BIT] = "rx-gro-hw",
[NETIF_F_LRO_BIT] = "rx-lro",
@@ -51,7 +54,6 @@ const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] = {
[NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc",
[NETIF_F_SCTP_CRC_BIT] = "tx-checksum-sctp",
- [NETIF_F_FCOE_MTU_BIT] = "fcoe-mtu",
[NETIF_F_NTUPLE_BIT] = "rx-ntuple-filter",
[NETIF_F_RXHASH_BIT] = "rx-hashing",
[NETIF_F_RXCSUM_BIT] = "rx-checksum",
@@ -211,6 +213,7 @@ const char link_mode_names[][ETH_GSTRING_LEN] = {
__DEFINE_LINK_MODE_NAME(10, T1S, Full),
__DEFINE_LINK_MODE_NAME(10, T1S, Half),
__DEFINE_LINK_MODE_NAME(10, T1S_P2MP, Half),
+ __DEFINE_LINK_MODE_NAME(10, T1BRR, Full),
};
static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -251,6 +254,7 @@ static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS);
#define __LINK_MODE_LANES_T1S_P2MP 1
#define __LINK_MODE_LANES_VR8 8
#define __LINK_MODE_LANES_DR8_2 8
+#define __LINK_MODE_LANES_T1BRR 1
#define __DEFINE_LINK_MODE_PARAMS(_speed, _type, _duplex) \
[ETHTOOL_LINK_MODE(_speed, _type, _duplex)] = { \
@@ -374,6 +378,7 @@ const struct link_mode_info link_mode_params[] = {
__DEFINE_LINK_MODE_PARAMS(10, T1S, Full),
__DEFINE_LINK_MODE_PARAMS(10, T1S, Half),
__DEFINE_LINK_MODE_PARAMS(10, T1S_P2MP, Half),
+ __DEFINE_LINK_MODE_PARAMS(10, T1BRR, Full),
};
static_assert(ARRAY_SIZE(link_mode_params) == __ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -426,6 +431,7 @@ const char sof_timestamping_names[][ETH_GSTRING_LEN] = {
[const_ilog2(SOF_TIMESTAMPING_OPT_TX_SWHW)] = "option-tx-swhw",
[const_ilog2(SOF_TIMESTAMPING_BIND_PHC)] = "bind-phc",
[const_ilog2(SOF_TIMESTAMPING_OPT_ID_TCP)] = "option-id-tcp",
+ [const_ilog2(SOF_TIMESTAMPING_OPT_RX_FILTER)] = "option-rx-filter",
};
static_assert(ARRAY_SIZE(sof_timestamping_names) == __SOF_TIMESTAMPING_CNT);
@@ -457,6 +463,11 @@ const char ts_rx_filter_names[][ETH_GSTRING_LEN] = {
};
static_assert(ARRAY_SIZE(ts_rx_filter_names) == __HWTSTAMP_FILTER_CNT);
+const char ts_flags_names[][ETH_GSTRING_LEN] = {
+ [const_ilog2(HWTSTAMP_FLAG_BONDED_PHC_INDEX)] = "bonded-phc-index",
+};
+static_assert(ARRAY_SIZE(ts_flags_names) == __HWTSTAMP_FLAG_CNT);
+
const char udp_tunnel_type_names[][ETH_GSTRING_LEN] = {
[ETHTOOL_UDP_TUNNEL_TYPE_VXLAN] = "vxlan",
[ETHTOOL_UDP_TUNNEL_TYPE_GENEVE] = "geneve",
@@ -536,7 +547,21 @@ static int ethtool_get_rxnfc_rule_count(struct net_device *dev)
return info.rule_cnt;
}
-int ethtool_get_max_rxnfc_channel(struct net_device *dev, u64 *max)
+/* Max offset for one RSS context */
+static u32 ethtool_get_rss_ctx_max_channel(struct ethtool_rxfh_context *ctx)
+{
+ u32 max_ring = 0;
+ u32 i, *tbl;
+
+ if (WARN_ON_ONCE(!ctx))
+ return 0;
+ tbl = ethtool_rxfh_context_indir(ctx);
+ for (i = 0; i < ctx->indir_size; i++)
+ max_ring = max(max_ring, tbl[i]);
+ return max_ring;
+}
+
+static int ethtool_get_max_rxnfc_channel(struct net_device *dev, u64 *max)
{
const struct ethtool_ops *ops = dev->ethtool_ops;
struct ethtool_rxnfc *info;
@@ -572,10 +597,18 @@ int ethtool_get_max_rxnfc_channel(struct net_device *dev, u64 *max)
if (rule_info.fs.ring_cookie != RX_CLS_FLOW_DISC &&
rule_info.fs.ring_cookie != RX_CLS_FLOW_WAKE &&
- !(rule_info.flow_type & FLOW_RSS) &&
- !ethtool_get_flow_spec_ring_vf(rule_info.fs.ring_cookie))
- max_ring =
- max_t(u64, max_ring, rule_info.fs.ring_cookie);
+ !ethtool_get_flow_spec_ring_vf(rule_info.fs.ring_cookie)) {
+ u64 ring = rule_info.fs.ring_cookie;
+
+ if (rule_info.flow_type & FLOW_RSS) {
+ struct ethtool_rxfh_context *ctx;
+
+ ctx = xa_load(&dev->ethtool->rss_ctx,
+ rule_info.rss_context);
+ ring += ethtool_get_rss_ctx_max_channel(ctx);
+ }
+ max_ring = max_t(u64, max_ring, ring);
+ }
}
kvfree(info);
@@ -587,41 +620,151 @@ err_free_info:
return err;
}
-int ethtool_get_max_rxfh_channel(struct net_device *dev, u32 *max)
+/* Max offset across all of a device's RSS contexts */
+static u32 ethtool_get_max_rss_ctx_channel(struct net_device *dev)
+{
+ struct ethtool_rxfh_context *ctx;
+ unsigned long context;
+ u32 max_ring = 0;
+
+ mutex_lock(&dev->ethtool->rss_lock);
+ xa_for_each(&dev->ethtool->rss_ctx, context, ctx)
+ max_ring = max(max_ring, ethtool_get_rss_ctx_max_channel(ctx));
+ mutex_unlock(&dev->ethtool->rss_lock);
+
+ return max_ring;
+}
+
+static u32 ethtool_get_max_rxfh_channel(struct net_device *dev)
{
struct ethtool_rxfh_param rxfh = {};
u32 dev_size, current_max = 0;
int ret;
+ /* While we do track whether RSS context has an indirection
+ * table explicitly set by the user, no driver looks at that bit.
+ * Assume drivers won't auto-regenerate the additional tables,
+ * to be safe.
+ */
+ current_max = ethtool_get_max_rss_ctx_channel(dev);
+
+ if (!netif_is_rxfh_configured(dev))
+ return current_max;
+
if (!dev->ethtool_ops->get_rxfh_indir_size ||
!dev->ethtool_ops->get_rxfh)
- return -EOPNOTSUPP;
+ return current_max;
dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev);
if (dev_size == 0)
- return -EOPNOTSUPP;
+ return current_max;
rxfh.indir = kcalloc(dev_size, sizeof(rxfh.indir[0]), GFP_USER);
if (!rxfh.indir)
- return -ENOMEM;
+ return U32_MAX;
ret = dev->ethtool_ops->get_rxfh(dev, &rxfh);
- if (ret)
- goto out;
+ if (ret) {
+ current_max = U32_MAX;
+ goto out_free;
+ }
while (dev_size--)
current_max = max(current_max, rxfh.indir[dev_size]);
- *max = current_max;
-
-out:
+out_free:
kfree(rxfh.indir);
- return ret;
+ return current_max;
+}
+
+int ethtool_check_max_channel(struct net_device *dev,
+ struct ethtool_channels channels,
+ struct genl_info *info)
+{
+ u64 max_rxnfc_in_use;
+ u32 max_rxfh_in_use;
+ int max_mp_in_use;
+
+ /* ensure the new Rx count fits within the configured Rx flow
+ * indirection table/rxnfc settings
+ */
+ if (ethtool_get_max_rxnfc_channel(dev, &max_rxnfc_in_use))
+ max_rxnfc_in_use = 0;
+ max_rxfh_in_use = ethtool_get_max_rxfh_channel(dev);
+ if (channels.combined_count + channels.rx_count <= max_rxfh_in_use) {
+ if (info)
+ GENL_SET_ERR_MSG_FMT(info, "requested channel counts are too low for existing indirection table (%d)", max_rxfh_in_use);
+ return -EINVAL;
+ }
+ if (channels.combined_count + channels.rx_count <= max_rxnfc_in_use) {
+ if (info)
+ GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing ntuple filter settings");
+ return -EINVAL;
+ }
+
+ max_mp_in_use = dev_get_min_mp_channel_count(dev);
+ if (channels.combined_count + channels.rx_count <= max_mp_in_use) {
+ if (info)
+ GENL_SET_ERR_MSG_FMT(info, "requested channel counts are too low for existing memory provider setting (%d)", max_mp_in_use);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int ethtool_check_rss_ctx_busy(struct net_device *dev, u32 rss_context)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct ethtool_rxnfc *info;
+ int rc, i, rule_cnt;
+
+ if (!ops->get_rxnfc)
+ return 0;
+
+ rule_cnt = ethtool_get_rxnfc_rule_count(dev);
+ if (!rule_cnt)
+ return 0;
+
+ if (rule_cnt < 0)
+ return -EINVAL;
+
+ info = kvzalloc(struct_size(info, rule_locs, rule_cnt), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->cmd = ETHTOOL_GRXCLSRLALL;
+ info->rule_cnt = rule_cnt;
+ rc = ops->get_rxnfc(dev, info, info->rule_locs);
+ if (rc)
+ goto out_free;
+
+ for (i = 0; i < rule_cnt; i++) {
+ struct ethtool_rxnfc rule_info = {
+ .cmd = ETHTOOL_GRXCLSRULE,
+ .fs.location = info->rule_locs[i],
+ };
+
+ rc = ops->get_rxnfc(dev, &rule_info, NULL);
+ if (rc)
+ goto out_free;
+
+ if (rule_info.fs.flow_type & FLOW_RSS &&
+ rule_info.rss_context == rss_context) {
+ rc = -EBUSY;
+ goto out_free;
+ }
+ }
+
+out_free:
+ kvfree(info);
+ return rc;
}
int ethtool_check_ops(const struct ethtool_ops *ops)
{
if (WARN_ON(ops->set_coalesce && !ops->supported_coalesce_params))
return -EINVAL;
+ if (WARN_ON(ops->rxfh_max_num_contexts == 1))
+ return -EINVAL;
/* NOTE: sufficiently insane drivers may swap ethtool_ops at runtime,
* the fact that ops are checked at registration time does not
* mean the ops attached to a netdev later on are sane.
@@ -629,29 +772,176 @@ int ethtool_check_ops(const struct ethtool_ops *ops)
return 0;
}
-int __ethtool_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
+void ethtool_ringparam_get_cfg(struct net_device *dev,
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kparam,
+ struct netlink_ext_ack *extack)
{
- const struct ethtool_ops *ops = dev->ethtool_ops;
- struct phy_device *phydev = dev->phydev;
+ memset(param, 0, sizeof(*param));
+ memset(kparam, 0, sizeof(*kparam));
+
+ param->cmd = ETHTOOL_GRINGPARAM;
+ dev->ethtool_ops->get_ringparam(dev, param, kparam, extack);
+
+ /* Driver gives us current state, we want to return current config */
+ kparam->tcp_data_split = dev->cfg->hds_config;
+}
+static void ethtool_init_tsinfo(struct kernel_ethtool_ts_info *info)
+{
memset(info, 0, sizeof(*info));
info->cmd = ETHTOOL_GET_TS_INFO;
+ info->phc_index = -1;
+}
+
+int ethtool_net_get_ts_info_by_phc(struct net_device *dev,
+ struct kernel_ethtool_ts_info *info,
+ struct hwtstamp_provider_desc *hwprov_desc)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ int err;
- if (phy_has_tsinfo(phydev))
- return phy_ts_info(phydev, info);
- if (ops->get_ts_info)
- return ops->get_ts_info(dev, info);
+ if (!ops->get_ts_info)
+ return -ENODEV;
- info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
- info->phc_index = -1;
+ /* Does ptp comes from netdev */
+ ethtool_init_tsinfo(info);
+ info->phc_qualifier = hwprov_desc->qualifier;
+ err = ops->get_ts_info(dev, info);
+ if (err)
+ return err;
- return 0;
+ if (info->phc_index == hwprov_desc->index &&
+ net_support_hwtstamp_qualifier(dev, hwprov_desc->qualifier))
+ return 0;
+
+ return -ENODEV;
+}
+
+struct phy_device *
+ethtool_phy_get_ts_info_by_phc(struct net_device *dev,
+ struct kernel_ethtool_ts_info *info,
+ struct hwtstamp_provider_desc *hwprov_desc)
+{
+ int err;
+
+ /* Only precise qualifier is supported in phydev */
+ if (hwprov_desc->qualifier != HWTSTAMP_PROVIDER_QUALIFIER_PRECISE)
+ return ERR_PTR(-ENODEV);
+
+ /* Look in the phy topology */
+ if (dev->link_topo) {
+ struct phy_device_node *pdn;
+ unsigned long phy_index;
+
+ xa_for_each(&dev->link_topo->phys, phy_index, pdn) {
+ if (!phy_has_tsinfo(pdn->phy))
+ continue;
+
+ ethtool_init_tsinfo(info);
+ err = phy_ts_info(pdn->phy, info);
+ if (err)
+ return ERR_PTR(err);
+
+ if (info->phc_index == hwprov_desc->index)
+ return pdn->phy;
+ }
+ return ERR_PTR(-ENODEV);
+ }
+
+ /* Look on the dev->phydev */
+ if (phy_has_tsinfo(dev->phydev)) {
+ ethtool_init_tsinfo(info);
+ err = phy_ts_info(dev->phydev, info);
+ if (err)
+ return ERR_PTR(err);
+
+ if (info->phc_index == hwprov_desc->index)
+ return dev->phydev;
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+int ethtool_get_ts_info_by_phc(struct net_device *dev,
+ struct kernel_ethtool_ts_info *info,
+ struct hwtstamp_provider_desc *hwprov_desc)
+{
+ int err;
+
+ err = ethtool_net_get_ts_info_by_phc(dev, info, hwprov_desc);
+ if (err == -ENODEV) {
+ struct phy_device *phy;
+
+ phy = ethtool_phy_get_ts_info_by_phc(dev, info, hwprov_desc);
+ if (IS_ERR(phy))
+ err = PTR_ERR(phy);
+ else
+ err = 0;
+ }
+
+ info->so_timestamping |= SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+
+ return err;
+}
+
+int __ethtool_get_ts_info(struct net_device *dev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct hwtstamp_provider *hwprov;
+ int err = 0;
+
+ rcu_read_lock();
+ hwprov = rcu_dereference(dev->hwprov);
+ /* No provider specified, use default behavior */
+ if (!hwprov) {
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct phy_device *phydev = dev->phydev;
+
+ ethtool_init_tsinfo(info);
+ if (phy_is_default_hwtstamp(phydev) &&
+ phy_has_tsinfo(phydev))
+ err = phy_ts_info(phydev, info);
+ else if (ops->get_ts_info)
+ err = ops->get_ts_info(dev, info);
+
+ info->so_timestamping |= SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+
+ rcu_read_unlock();
+ return err;
+ }
+
+ err = ethtool_get_ts_info_by_phc(dev, info, &hwprov->desc);
+ rcu_read_unlock();
+ return err;
+}
+
+bool net_support_hwtstamp_qualifier(struct net_device *dev,
+ enum hwtstamp_provider_qualifier qualifier)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+
+ if (!ops)
+ return false;
+
+ /* Return true with precise qualifier and with NIC without
+ * qualifier description to not break the old behavior.
+ */
+ if (!ops->supported_hwtstamp_qualifiers &&
+ qualifier == HWTSTAMP_PROVIDER_QUALIFIER_PRECISE)
+ return true;
+
+ if (ops->supported_hwtstamp_qualifiers & BIT(qualifier))
+ return true;
+
+ return false;
}
int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index)
{
- struct ethtool_ts_info info = { };
+ struct kernel_ethtool_ts_info info = { };
int num = 0;
if (!__ethtool_get_ts_info(dev, &info))
@@ -661,7 +951,7 @@ int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index)
}
EXPORT_SYMBOL(ethtool_get_phc_vclocks);
-int ethtool_get_ts_info_by_layer(struct net_device *dev, struct ethtool_ts_info *info)
+int ethtool_get_ts_info_by_layer(struct net_device *dev, struct kernel_ethtool_ts_info *info)
{
return __ethtool_get_ts_info(dev, info);
}
@@ -712,3 +1002,17 @@ ethtool_forced_speed_maps_init(struct ethtool_forced_speed_map *maps, u32 size)
}
}
EXPORT_SYMBOL_GPL(ethtool_forced_speed_maps_init);
+
+void ethtool_rxfh_context_lost(struct net_device *dev, u32 context_id)
+{
+ struct ethtool_rxfh_context *ctx;
+
+ WARN_ONCE(!rtnl_is_locked() &&
+ !lockdep_is_held_type(&dev->ethtool->rss_lock, -1),
+ "RSS context lock assertion failed\n");
+
+ netdev_err(dev, "device error, RSS context %d lost\n", context_id);
+ ctx = xa_erase(&dev->ethtool->rss_ctx, context_id);
+ kfree(ctx);
+}
+EXPORT_SYMBOL(ethtool_rxfh_context_lost);
diff --git a/net/ethtool/common.h b/net/ethtool/common.h
index 28b8aaaf9bcb..a1088c2441d0 100644
--- a/net/ethtool/common.h
+++ b/net/ethtool/common.h
@@ -13,6 +13,7 @@
ETHTOOL_LINK_MODE_ ## speed ## base ## type ## _ ## duplex ## _BIT
#define __SOF_TIMESTAMPING_CNT (const_ilog2(SOF_TIMESTAMPING_LAST) + 1)
+#define __HWTSTAMP_FLAG_CNT (const_ilog2(HWTSTAMP_FLAG_LAST) + 1)
struct link_mode_info {
int speed;
@@ -20,6 +21,9 @@ struct link_mode_info {
u8 duplex;
};
+struct genl_info;
+struct hwtstamp_provider_desc;
+
extern const char
netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN];
extern const char
@@ -35,6 +39,7 @@ extern const char wol_mode_names[][ETH_GSTRING_LEN];
extern const char sof_timestamping_names[][ETH_GSTRING_LEN];
extern const char ts_tx_type_names[][ETH_GSTRING_LEN];
extern const char ts_rx_filter_names[][ETH_GSTRING_LEN];
+extern const char ts_flags_names[][ETH_GSTRING_LEN];
extern const char udp_tunnel_type_names[][ETH_GSTRING_LEN];
int __ethtool_get_link(struct net_device *dev);
@@ -42,9 +47,29 @@ int __ethtool_get_link(struct net_device *dev);
bool convert_legacy_settings_to_link_ksettings(
struct ethtool_link_ksettings *link_ksettings,
const struct ethtool_cmd *legacy_settings);
-int ethtool_get_max_rxfh_channel(struct net_device *dev, u32 *max);
-int ethtool_get_max_rxnfc_channel(struct net_device *dev, u64 *max);
-int __ethtool_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info);
+int ethtool_check_max_channel(struct net_device *dev,
+ struct ethtool_channels channels,
+ struct genl_info *info);
+int ethtool_check_rss_ctx_busy(struct net_device *dev, u32 rss_context);
+
+void ethtool_ringparam_get_cfg(struct net_device *dev,
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kparam,
+ struct netlink_ext_ack *extack);
+
+int __ethtool_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info);
+int ethtool_get_ts_info_by_phc(struct net_device *dev,
+ struct kernel_ethtool_ts_info *info,
+ struct hwtstamp_provider_desc *hwprov_desc);
+int ethtool_net_get_ts_info_by_phc(struct net_device *dev,
+ struct kernel_ethtool_ts_info *info,
+ struct hwtstamp_provider_desc *hwprov_desc);
+struct phy_device *
+ethtool_phy_get_ts_info_by_phc(struct net_device *dev,
+ struct kernel_ethtool_ts_info *info,
+ struct hwtstamp_provider_desc *hwprov_desc);
+bool net_support_hwtstamp_qualifier(struct net_device *dev,
+ enum hwtstamp_provider_qualifier qualifier);
extern const struct ethtool_phy_ops *ethtool_phy_ops;
extern const struct ethtool_pse_ops *ethtool_pse_ops;
diff --git a/net/ethtool/eee.c b/net/ethtool/eee.c
index 2853394d06a8..bf398973eb8a 100644
--- a/net/ethtool/eee.c
+++ b/net/ethtool/eee.c
@@ -4,16 +4,13 @@
#include "common.h"
#include "bitset.h"
-#define EEE_MODES_COUNT \
- (sizeof_field(struct ethtool_eee, supported) * BITS_PER_BYTE)
-
struct eee_req_info {
struct ethnl_req_info base;
};
struct eee_reply_data {
struct ethnl_reply_data base;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
};
#define EEE_REPDATA(__reply_base) \
@@ -30,6 +27,7 @@ static int eee_prepare_data(const struct ethnl_req_info *req_base,
{
struct eee_reply_data *data = EEE_REPDATA(reply_base);
struct net_device *dev = reply_base->dev;
+ struct ethtool_keee *eee = &data->eee;
int ret;
if (!dev->ethtool_ops->get_eee)
@@ -37,7 +35,7 @@ static int eee_prepare_data(const struct ethnl_req_info *req_base,
ret = ethnl_ops_begin(dev);
if (ret < 0)
return ret;
- ret = dev->ethtool_ops->get_eee(dev, &data->eee);
+ ret = dev->ethtool_ops->get_eee(dev, eee);
ethnl_ops_complete(dev);
return ret;
@@ -48,24 +46,21 @@ static int eee_reply_size(const struct ethnl_req_info *req_base,
{
bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
const struct eee_reply_data *data = EEE_REPDATA(reply_base);
- const struct ethtool_eee *eee = &data->eee;
+ const struct ethtool_keee *eee = &data->eee;
int len = 0;
int ret;
- BUILD_BUG_ON(sizeof(eee->advertised) * BITS_PER_BYTE !=
- EEE_MODES_COUNT);
- BUILD_BUG_ON(sizeof(eee->lp_advertised) * BITS_PER_BYTE !=
- EEE_MODES_COUNT);
-
/* MODES_OURS */
- ret = ethnl_bitset32_size(&eee->advertised, &eee->supported,
- EEE_MODES_COUNT, link_mode_names, compact);
+ ret = ethnl_bitset_size(eee->advertised, eee->supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+ link_mode_names, compact);
if (ret < 0)
return ret;
len += ret;
/* MODES_PEERS */
- ret = ethnl_bitset32_size(&eee->lp_advertised, NULL,
- EEE_MODES_COUNT, link_mode_names, compact);
+ ret = ethnl_bitset_size(eee->lp_advertised, NULL,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+ link_mode_names, compact);
if (ret < 0)
return ret;
len += ret;
@@ -84,24 +79,26 @@ static int eee_fill_reply(struct sk_buff *skb,
{
bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
const struct eee_reply_data *data = EEE_REPDATA(reply_base);
- const struct ethtool_eee *eee = &data->eee;
+ const struct ethtool_keee *eee = &data->eee;
int ret;
- ret = ethnl_put_bitset32(skb, ETHTOOL_A_EEE_MODES_OURS,
- &eee->advertised, &eee->supported,
- EEE_MODES_COUNT, link_mode_names, compact);
+ ret = ethnl_put_bitset(skb, ETHTOOL_A_EEE_MODES_OURS,
+ eee->advertised, eee->supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+ link_mode_names, compact);
if (ret < 0)
return ret;
- ret = ethnl_put_bitset32(skb, ETHTOOL_A_EEE_MODES_PEER,
- &eee->lp_advertised, NULL, EEE_MODES_COUNT,
- link_mode_names, compact);
+ ret = ethnl_put_bitset(skb, ETHTOOL_A_EEE_MODES_PEER,
+ eee->lp_advertised, NULL,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+ link_mode_names, compact);
if (ret < 0)
return ret;
- if (nla_put_u8(skb, ETHTOOL_A_EEE_ACTIVE, !!eee->eee_active) ||
- nla_put_u8(skb, ETHTOOL_A_EEE_ENABLED, !!eee->eee_enabled) ||
+ if (nla_put_u8(skb, ETHTOOL_A_EEE_ACTIVE, eee->eee_active) ||
+ nla_put_u8(skb, ETHTOOL_A_EEE_ENABLED, eee->eee_enabled) ||
nla_put_u8(skb, ETHTOOL_A_EEE_TX_LPI_ENABLED,
- !!eee->tx_lpi_enabled) ||
+ eee->tx_lpi_enabled) ||
nla_put_u32(skb, ETHTOOL_A_EEE_TX_LPI_TIMER, eee->tx_lpi_timer))
return -EMSGSIZE;
@@ -132,7 +129,7 @@ ethnl_set_eee(struct ethnl_req_info *req_info, struct genl_info *info)
{
struct net_device *dev = req_info->dev;
struct nlattr **tb = info->attrs;
- struct ethtool_eee eee = {};
+ struct ethtool_keee eee = {};
bool mod = false;
int ret;
@@ -140,14 +137,15 @@ ethnl_set_eee(struct ethnl_req_info *req_info, struct genl_info *info)
if (ret < 0)
return ret;
- ret = ethnl_update_bitset32(&eee.advertised, EEE_MODES_COUNT,
- tb[ETHTOOL_A_EEE_MODES_OURS],
- link_mode_names, info->extack, &mod);
+ ret = ethnl_update_bitset(eee.advertised,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+ tb[ETHTOOL_A_EEE_MODES_OURS],
+ link_mode_names, info->extack, &mod);
if (ret < 0)
return ret;
- ethnl_update_bool32(&eee.eee_enabled, tb[ETHTOOL_A_EEE_ENABLED], &mod);
- ethnl_update_bool32(&eee.tx_lpi_enabled,
- tb[ETHTOOL_A_EEE_TX_LPI_ENABLED], &mod);
+ ethnl_update_bool(&eee.eee_enabled, tb[ETHTOOL_A_EEE_ENABLED], &mod);
+ ethnl_update_bool(&eee.tx_lpi_enabled, tb[ETHTOOL_A_EEE_TX_LPI_ENABLED],
+ &mod);
ethnl_update_u32(&eee.tx_lpi_timer, tb[ETHTOOL_A_EEE_TX_LPI_TIMER],
&mod);
if (!mod)
diff --git a/net/ethtool/eeprom.c b/net/ethtool/eeprom.c
index 6209c3a9c8f7..3b8209e930fd 100644
--- a/net/ethtool/eeprom.c
+++ b/net/ethtool/eeprom.c
@@ -91,6 +91,12 @@ static int get_module_eeprom_by_page(struct net_device *dev,
{
const struct ethtool_ops *ops = dev->ethtool_ops;
+ if (dev->ethtool->module_fw_flash_in_progress) {
+ NL_SET_ERR_MSG(extack,
+ "Module firmware flashing is in progress");
+ return -EBUSY;
+ }
+
if (dev->sfp_bus)
return sfp_get_module_eeprom_by_page(dev->sfp_bus, page_data, extack);
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 7519b0818b91..1c3ba2247776 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -26,12 +26,12 @@
#include <linux/sched/signal.h>
#include <linux/net.h>
#include <linux/pm_runtime.h>
+#include <linux/utsname.h>
#include <net/devlink.h>
#include <net/ipv6.h>
#include <net/xdp_sock_drv.h>
#include <net/flow_offload.h>
#include <linux/ethtool_netlink.h>
-#include <generated/utsrelease.h>
#include "common.h"
/* State held across locks and calls for commands which have devlink fallback */
@@ -65,7 +65,8 @@ u32 ethtool_op_get_link(struct net_device *dev)
}
EXPORT_SYMBOL(ethtool_op_get_link);
-int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
+int ethtool_op_get_ts_info(struct net_device *dev,
+ struct kernel_ethtool_ts_info *info)
{
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
@@ -441,6 +442,9 @@ int __ethtool_get_link_ksettings(struct net_device *dev,
if (!dev->ethtool_ops->get_link_ksettings)
return -EOPNOTSUPP;
+ if (!netif_device_present(dev))
+ return -ENODEV;
+
memset(link_ksettings, 0, sizeof(*link_ksettings));
return dev->ethtool_ops->get_link_ksettings(dev, link_ksettings);
}
@@ -658,6 +662,9 @@ static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
if (!dev->ethtool_ops->get_link_ksettings)
return -EOPNOTSUPP;
+ if (dev->ethtool->module_fw_flash_in_progress)
+ return -EBUSY;
+
memset(&link_ksettings, 0, sizeof(link_ksettings));
err = dev->ethtool_ops->get_link_ksettings(dev, &link_ksettings);
if (err < 0)
@@ -713,7 +720,8 @@ ethtool_get_drvinfo(struct net_device *dev, struct ethtool_devlink_compat *rsp)
struct device *parent = dev->dev.parent;
rsp->info.cmd = ETHTOOL_GDRVINFO;
- strscpy(rsp->info.version, UTS_RELEASE, sizeof(rsp->info.version));
+ strscpy(rsp->info.version, init_uts_ns.name.release,
+ sizeof(rsp->info.version));
if (ops->get_drvinfo) {
ops->get_drvinfo(dev, &rsp->info);
if (!rsp->info.bus_info[0] && parent)
@@ -984,7 +992,13 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
if (rc)
return rc;
- if (ops->get_rxfh) {
+ /* Nonzero ring with RSS only makes sense if NIC adds them together */
+ if (cmd == ETHTOOL_SRXCLSRLINS && info.fs.flow_type & FLOW_RSS &&
+ !ops->cap_rss_rxnfc_adds &&
+ ethtool_get_flow_spec_ring(info.fs.ring_cookie))
+ return -EINVAL;
+
+ if (cmd == ETHTOOL_SRXFH && ops->get_rxfh) {
struct ethtool_rxfh_param rxfh = {};
rc = ops->get_rxfh(dev, &rxfh);
@@ -1198,6 +1212,7 @@ static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev,
const struct ethtool_ops *ops = dev->ethtool_ops;
struct ethtool_rxfh_param rxfh_dev = {};
u32 user_indir_size, user_key_size;
+ struct ethtool_rxfh_context *ctx;
struct ethtool_rxfh rxfh;
u32 indir_bytes;
u8 *rss_config;
@@ -1221,7 +1236,8 @@ static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev,
if (rxfh.rsvd8[0] || rxfh.rsvd8[1] || rxfh.rsvd32)
return -EINVAL;
/* Most drivers don't handle rss_context, check it's 0 as well */
- if (rxfh.rss_context && !ops->cap_rss_ctx_supported)
+ if (rxfh.rss_context && !(ops->cap_rss_ctx_supported ||
+ ops->create_rxfh_context))
return -EOPNOTSUPP;
rxfh.indir_size = rxfh_dev.indir_size;
@@ -1245,11 +1261,31 @@ static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev,
if (user_key_size)
rxfh_dev.key = rss_config + indir_bytes;
- rxfh_dev.rss_context = rxfh.rss_context;
-
- ret = dev->ethtool_ops->get_rxfh(dev, &rxfh_dev);
- if (ret)
- goto out;
+ if (rxfh.rss_context) {
+ ctx = xa_load(&dev->ethtool->rss_ctx, rxfh.rss_context);
+ if (!ctx) {
+ ret = -ENOENT;
+ goto out;
+ }
+ if (rxfh_dev.indir)
+ memcpy(rxfh_dev.indir, ethtool_rxfh_context_indir(ctx),
+ indir_bytes);
+ if (!ops->rxfh_per_ctx_key) {
+ rxfh_dev.key_size = 0;
+ } else {
+ if (rxfh_dev.key)
+ memcpy(rxfh_dev.key,
+ ethtool_rxfh_context_key(ctx),
+ user_key_size);
+ rxfh_dev.hfunc = ctx->hfunc;
+ }
+ rxfh_dev.input_xfrm = ctx->input_xfrm;
+ ret = 0;
+ } else {
+ ret = dev->ethtool_ops->get_rxfh(dev, &rxfh_dev);
+ if (ret)
+ goto out;
+ }
if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh, hfunc),
&rxfh_dev.hfunc, sizeof(rxfh.hfunc))) {
@@ -1260,6 +1296,11 @@ static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev,
sizeof(rxfh.input_xfrm))) {
ret = -EFAULT;
} else if (copy_to_user(useraddr +
+ offsetof(struct ethtool_rxfh, key_size),
+ &rxfh_dev.key_size,
+ sizeof(rxfh.key_size))) {
+ ret = -EFAULT;
+ } else if (copy_to_user(useraddr +
offsetof(struct ethtool_rxfh, rss_config[0]),
rss_config, total_size)) {
ret = -EFAULT;
@@ -1270,17 +1311,54 @@ out:
return ret;
}
+static struct ethtool_rxfh_context *
+ethtool_rxfh_ctx_alloc(const struct ethtool_ops *ops,
+ u32 indir_size, u32 key_size)
+{
+ size_t indir_bytes, flex_len, key_off, size;
+ struct ethtool_rxfh_context *ctx;
+ u32 priv_bytes, indir_max;
+ u16 key_max;
+
+ key_max = max(key_size, ops->rxfh_key_space);
+ indir_max = max(indir_size, ops->rxfh_indir_space);
+
+ priv_bytes = ALIGN(ops->rxfh_priv_size, sizeof(u32));
+ indir_bytes = array_size(indir_max, sizeof(u32));
+
+ key_off = size_add(priv_bytes, indir_bytes);
+ flex_len = size_add(key_off, key_max);
+ size = struct_size_t(struct ethtool_rxfh_context, data, flex_len);
+
+ ctx = kzalloc(size, GFP_KERNEL_ACCOUNT);
+ if (!ctx)
+ return NULL;
+
+ ctx->indir_size = indir_size;
+ ctx->key_size = key_size;
+ ctx->key_off = key_off;
+ ctx->priv_size = ops->rxfh_priv_size;
+
+ ctx->hfunc = ETH_RSS_HASH_NO_CHANGE;
+ ctx->input_xfrm = RXH_XFRM_NO_CHANGE;
+
+ return ctx;
+}
+
static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
void __user *useraddr)
{
u32 rss_cfg_offset = offsetof(struct ethtool_rxfh, rss_config[0]);
const struct ethtool_ops *ops = dev->ethtool_ops;
u32 dev_indir_size = 0, dev_key_size = 0, i;
+ u32 user_indir_len = 0, indir_bytes = 0;
struct ethtool_rxfh_param rxfh_dev = {};
+ struct ethtool_rxfh_context *ctx = NULL;
struct netlink_ext_ack *extack = NULL;
struct ethtool_rxnfc rx_rings;
struct ethtool_rxfh rxfh;
- u32 indir_bytes = 0;
+ bool locked = false; /* dev->ethtool->rss_lock taken */
+ bool create = false;
u8 *rss_config;
int ret;
@@ -1299,33 +1377,45 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
if (rxfh.rsvd8[0] || rxfh.rsvd8[1] || rxfh.rsvd32)
return -EINVAL;
/* Most drivers don't handle rss_context, check it's 0 as well */
- if (rxfh.rss_context && !ops->cap_rss_ctx_supported)
+ if (rxfh.rss_context && !(ops->cap_rss_ctx_supported ||
+ ops->create_rxfh_context))
return -EOPNOTSUPP;
/* Check input data transformation capabilities */
if (rxfh.input_xfrm && rxfh.input_xfrm != RXH_XFRM_SYM_XOR &&
rxfh.input_xfrm != RXH_XFRM_NO_CHANGE)
return -EINVAL;
- if ((rxfh.input_xfrm & RXH_XFRM_SYM_XOR) &&
+ if (rxfh.input_xfrm != RXH_XFRM_NO_CHANGE &&
+ (rxfh.input_xfrm & RXH_XFRM_SYM_XOR) &&
!ops->cap_rss_sym_xor_supported)
return -EOPNOTSUPP;
+ create = rxfh.rss_context == ETH_RXFH_CONTEXT_ALLOC;
- /* If either indir, hash key or function is valid, proceed further.
- * Must request at least one change: indir size, hash key, function
- * or input transformation.
- */
if ((rxfh.indir_size &&
rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE &&
rxfh.indir_size != dev_indir_size) ||
- (rxfh.key_size && (rxfh.key_size != dev_key_size)) ||
+ (rxfh.key_size && rxfh.key_size != dev_key_size))
+ return -EINVAL;
+
+ /* Must request at least one change: indir size, hash key, function
+ * or input transformation.
+ * There's no need for any of it in case of context creation.
+ */
+ if (!create &&
(rxfh.indir_size == ETH_RXFH_INDIR_NO_CHANGE &&
rxfh.key_size == 0 && rxfh.hfunc == ETH_RSS_HASH_NO_CHANGE &&
rxfh.input_xfrm == RXH_XFRM_NO_CHANGE))
return -EINVAL;
- if (rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE)
- indir_bytes = dev_indir_size * sizeof(rxfh_dev.indir[0]);
+ indir_bytes = dev_indir_size * sizeof(rxfh_dev.indir[0]);
+
+ /* Check settings which may be global rather than per RSS-context */
+ if (rxfh.rss_context && !ops->rxfh_per_ctx_key)
+ if (rxfh.key_size ||
+ (rxfh.hfunc && rxfh.hfunc != ETH_RSS_HASH_NO_CHANGE) ||
+ (rxfh.input_xfrm && rxfh.input_xfrm != RXH_XFRM_NO_CHANGE))
+ return -EOPNOTSUPP;
- rss_config = kzalloc(indir_bytes + rxfh.key_size, GFP_USER);
+ rss_config = kzalloc(indir_bytes + dev_key_size, GFP_USER);
if (!rss_config)
return -ENOMEM;
@@ -1340,6 +1430,7 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
*/
if (rxfh.indir_size &&
rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE) {
+ user_indir_len = indir_bytes;
rxfh_dev.indir = (u32 *)rss_config;
rxfh_dev.indir_size = dev_indir_size;
ret = ethtool_copy_validate_indir(rxfh_dev.indir,
@@ -1366,20 +1457,90 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
rxfh_dev.key_size = dev_key_size;
rxfh_dev.key = rss_config + indir_bytes;
if (copy_from_user(rxfh_dev.key,
- useraddr + rss_cfg_offset + indir_bytes,
+ useraddr + rss_cfg_offset + user_indir_len,
rxfh.key_size)) {
ret = -EFAULT;
goto out;
}
}
+ if (rxfh.rss_context) {
+ mutex_lock(&dev->ethtool->rss_lock);
+ locked = true;
+ }
+
+ if (rxfh.rss_context && rxfh_dev.rss_delete) {
+ ret = ethtool_check_rss_ctx_busy(dev, rxfh.rss_context);
+ if (ret)
+ goto out;
+ }
+
+ if (create) {
+ if (rxfh_dev.rss_delete) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ctx = ethtool_rxfh_ctx_alloc(ops, dev_indir_size, dev_key_size);
+ if (!ctx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (ops->create_rxfh_context) {
+ u32 limit = ops->rxfh_max_num_contexts ?: U32_MAX;
+ u32 ctx_id;
+
+ /* driver uses new API, core allocates ID */
+ ret = xa_alloc(&dev->ethtool->rss_ctx, &ctx_id, ctx,
+ XA_LIMIT(1, limit - 1),
+ GFP_KERNEL_ACCOUNT);
+ if (ret < 0) {
+ kfree(ctx);
+ goto out;
+ }
+ WARN_ON(!ctx_id); /* can't happen */
+ rxfh.rss_context = ctx_id;
+ }
+ } else if (rxfh.rss_context) {
+ ctx = xa_load(&dev->ethtool->rss_ctx, rxfh.rss_context);
+ if (!ctx) {
+ ret = -ENOENT;
+ goto out;
+ }
+ }
rxfh_dev.hfunc = rxfh.hfunc;
rxfh_dev.rss_context = rxfh.rss_context;
rxfh_dev.input_xfrm = rxfh.input_xfrm;
- ret = ops->set_rxfh(dev, &rxfh_dev, extack);
- if (ret)
+ if (rxfh.rss_context && ops->create_rxfh_context) {
+ if (create) {
+ ret = ops->create_rxfh_context(dev, ctx, &rxfh_dev,
+ extack);
+ /* Make sure driver populates defaults */
+ WARN_ON_ONCE(!ret && !rxfh_dev.key &&
+ ops->rxfh_per_ctx_key &&
+ !memchr_inv(ethtool_rxfh_context_key(ctx),
+ 0, ctx->key_size));
+ } else if (rxfh_dev.rss_delete) {
+ ret = ops->remove_rxfh_context(dev, ctx,
+ rxfh.rss_context,
+ extack);
+ } else {
+ ret = ops->modify_rxfh_context(dev, ctx, &rxfh_dev,
+ extack);
+ }
+ } else {
+ ret = ops->set_rxfh(dev, &rxfh_dev, extack);
+ }
+ if (ret) {
+ if (create) {
+ /* failed to create, free our new tracking entry */
+ if (ops->create_rxfh_context)
+ xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context);
+ kfree(ctx);
+ }
goto out;
+ }
if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh, rss_context),
&rxfh_dev.rss_context, sizeof(rxfh_dev.rss_context)))
@@ -1392,8 +1553,62 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
else if (rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE)
dev->priv_flags |= IFF_RXFH_CONFIGURED;
}
+ /* Update rss_ctx tracking */
+ if (create && !ops->create_rxfh_context) {
+ /* driver uses old API, it chose context ID */
+ if (WARN_ON(xa_load(&dev->ethtool->rss_ctx, rxfh_dev.rss_context))) {
+ /* context ID reused, our tracking is screwed */
+ kfree(ctx);
+ goto out;
+ }
+ /* Allocate the exact ID the driver gave us */
+ if (xa_is_err(xa_store(&dev->ethtool->rss_ctx, rxfh_dev.rss_context,
+ ctx, GFP_KERNEL))) {
+ kfree(ctx);
+ goto out;
+ }
+
+ /* Fetch the defaults for the old API, in the new API drivers
+ * should write defaults into ctx themselves.
+ */
+ rxfh_dev.indir = (u32 *)rss_config;
+ rxfh_dev.indir_size = dev_indir_size;
+
+ rxfh_dev.key = rss_config + indir_bytes;
+ rxfh_dev.key_size = dev_key_size;
+
+ ret = ops->get_rxfh(dev, &rxfh_dev);
+ if (WARN_ON(ret)) {
+ xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context);
+ kfree(ctx);
+ goto out;
+ }
+ }
+ if (rxfh_dev.rss_delete) {
+ WARN_ON(xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context) != ctx);
+ kfree(ctx);
+ } else if (ctx) {
+ if (rxfh_dev.indir) {
+ for (i = 0; i < dev_indir_size; i++)
+ ethtool_rxfh_context_indir(ctx)[i] = rxfh_dev.indir[i];
+ ctx->indir_configured =
+ rxfh.indir_size &&
+ rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE;
+ }
+ if (rxfh_dev.key) {
+ memcpy(ethtool_rxfh_context_key(ctx), rxfh_dev.key,
+ dev_key_size);
+ ctx->key_configured = !!rxfh.key_size;
+ }
+ if (rxfh_dev.hfunc != ETH_RSS_HASH_NO_CHANGE)
+ ctx->hfunc = rxfh_dev.hfunc;
+ if (rxfh_dev.input_xfrm != RXH_XFRM_NO_CHANGE)
+ ctx->input_xfrm = rxfh_dev.input_xfrm;
+ }
out:
+ if (locked)
+ mutex_unlock(&dev->ethtool->rss_lock);
kfree(rss_config);
return ret;
}
@@ -1448,6 +1663,9 @@ static int ethtool_reset(struct net_device *dev, char __user *useraddr)
if (!dev->ethtool_ops->reset)
return -EOPNOTSUPP;
+ if (dev->ethtool->module_fw_flash_in_progress)
+ return -EBUSY;
+
if (copy_from_user(&reset, useraddr, sizeof(reset)))
return -EFAULT;
@@ -1502,28 +1720,63 @@ static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
if (ret)
return ret;
- dev->wol_enabled = !!wol.wolopts;
+ dev->ethtool->wol_enabled = !!wol.wolopts;
ethtool_notify(dev, ETHTOOL_MSG_WOL_NTF, NULL);
return 0;
}
+static void eee_to_keee(struct ethtool_keee *keee,
+ const struct ethtool_eee *eee)
+{
+ memset(keee, 0, sizeof(*keee));
+
+ keee->eee_enabled = eee->eee_enabled;
+ keee->tx_lpi_enabled = eee->tx_lpi_enabled;
+ keee->tx_lpi_timer = eee->tx_lpi_timer;
+
+ ethtool_convert_legacy_u32_to_link_mode(keee->advertised,
+ eee->advertised);
+}
+
+static void keee_to_eee(struct ethtool_eee *eee,
+ const struct ethtool_keee *keee)
+{
+ bool overflow;
+
+ memset(eee, 0, sizeof(*eee));
+
+ eee->eee_active = keee->eee_active;
+ eee->eee_enabled = keee->eee_enabled;
+ eee->tx_lpi_enabled = keee->tx_lpi_enabled;
+ eee->tx_lpi_timer = keee->tx_lpi_timer;
+
+ overflow = !ethtool_convert_link_mode_to_legacy_u32(&eee->supported,
+ keee->supported);
+ ethtool_convert_link_mode_to_legacy_u32(&eee->advertised,
+ keee->advertised);
+ ethtool_convert_link_mode_to_legacy_u32(&eee->lp_advertised,
+ keee->lp_advertised);
+ if (overflow)
+ pr_warn("Ethtool ioctl interface doesn't support passing EEE linkmodes beyond bit 32\n");
+}
+
static int ethtool_get_eee(struct net_device *dev, char __user *useraddr)
{
- struct ethtool_eee edata;
+ struct ethtool_keee keee;
+ struct ethtool_eee eee;
int rc;
if (!dev->ethtool_ops->get_eee)
return -EOPNOTSUPP;
- memset(&edata, 0, sizeof(struct ethtool_eee));
- edata.cmd = ETHTOOL_GEEE;
- rc = dev->ethtool_ops->get_eee(dev, &edata);
-
+ memset(&keee, 0, sizeof(keee));
+ rc = dev->ethtool_ops->get_eee(dev, &keee);
if (rc)
return rc;
- if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ keee_to_eee(&eee, &keee);
+ if (copy_to_user(useraddr, &eee, sizeof(eee)))
return -EFAULT;
return 0;
@@ -1531,16 +1784,18 @@ static int ethtool_get_eee(struct net_device *dev, char __user *useraddr)
static int ethtool_set_eee(struct net_device *dev, char __user *useraddr)
{
- struct ethtool_eee edata;
+ struct ethtool_keee keee;
+ struct ethtool_eee eee;
int ret;
if (!dev->ethtool_ops->set_eee)
return -EOPNOTSUPP;
- if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ if (copy_from_user(&eee, useraddr, sizeof(eee)))
return -EFAULT;
- ret = dev->ethtool_ops->set_eee(dev, &edata);
+ eee_to_keee(&keee, &eee);
+ ret = dev->ethtool_ops->set_eee(dev, &keee);
if (!ret)
ethtool_notify(dev, ETHTOOL_MSG_EEE_NTF, NULL);
return ret;
@@ -1804,8 +2059,8 @@ static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr)
static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr)
{
- struct ethtool_ringparam ringparam, max = { .cmd = ETHTOOL_GRINGPARAM };
struct kernel_ethtool_ringparam kernel_ringparam;
+ struct ethtool_ringparam ringparam, max;
int ret;
if (!dev->ethtool_ops->set_ringparam || !dev->ethtool_ops->get_ringparam)
@@ -1814,7 +2069,7 @@ static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr)
if (copy_from_user(&ringparam, useraddr, sizeof(ringparam)))
return -EFAULT;
- dev->ethtool_ops->get_ringparam(dev, &max, &kernel_ringparam, NULL);
+ ethtool_ringparam_get_cfg(dev, &max, &kernel_ringparam, NULL);
/* ensure new ring parameters are within the maximums */
if (ringparam.rx_pending > max.rx_max_pending ||
@@ -1850,8 +2105,6 @@ static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
{
struct ethtool_channels channels, curr = { .cmd = ETHTOOL_GCHANNELS };
u16 from_channel, to_channel;
- u64 max_rxnfc_in_use;
- u32 max_rxfh_in_use;
unsigned int i;
int ret;
@@ -1881,16 +2134,9 @@ static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
(!channels.rx_count || !channels.tx_count))
return -EINVAL;
- /* ensure the new Rx count fits within the configured Rx flow
- * indirection table/rxnfc settings */
- if (ethtool_get_max_rxnfc_channel(dev, &max_rxnfc_in_use))
- max_rxnfc_in_use = 0;
- if (!netif_is_rxfh_configured(dev) ||
- ethtool_get_max_rxfh_channel(dev, &max_rxfh_in_use))
- max_rxfh_in_use = 0;
- if (channels.combined_count + channels.rx_count <=
- max_t(u64, max_rxnfc_in_use, max_rxfh_in_use))
- return -EINVAL;
+ ret = ethtool_check_max_channel(dev, channels, NULL);
+ if (ret)
+ return ret;
/* Disabling channels, query zero-copy AF_XDP sockets */
from_channel = channels.combined_count +
@@ -2182,7 +2428,7 @@ static int ethtool_get_phy_stats_ethtool(struct net_device *dev,
const struct ethtool_ops *ops = dev->ethtool_ops;
int n_stats, ret;
- if (!ops || !ops->get_sset_count || ops->get_ethtool_phy_stats)
+ if (!ops || !ops->get_sset_count || !ops->get_ethtool_phy_stats)
return -EOPNOTSUPP;
n_stats = ops->get_sset_count(dev, ETH_SS_PHY_STATS);
@@ -2405,13 +2651,20 @@ out:
static int ethtool_get_ts_info(struct net_device *dev, void __user *useraddr)
{
- struct ethtool_ts_info info;
+ struct kernel_ethtool_ts_info kernel_info;
+ struct ethtool_ts_info info = {};
int err;
- err = __ethtool_get_ts_info(dev, &info);
+ err = __ethtool_get_ts_info(dev, &kernel_info);
if (err)
return err;
+ info.cmd = kernel_info.cmd;
+ info.so_timestamping = kernel_info.so_timestamping;
+ info.phc_index = kernel_info.phc_index;
+ info.tx_types = kernel_info.tx_types;
+ info.rx_filters = kernel_info.rx_filters;
+
if (copy_to_user(useraddr, &info, sizeof(info)))
return -EFAULT;
@@ -2424,6 +2677,9 @@ int ethtool_get_module_info_call(struct net_device *dev,
const struct ethtool_ops *ops = dev->ethtool_ops;
struct phy_device *phydev = dev->phydev;
+ if (dev->ethtool->module_fw_flash_in_progress)
+ return -EBUSY;
+
if (dev->sfp_bus)
return sfp_get_module_info(dev->sfp_bus, modinfo);
@@ -2461,6 +2717,9 @@ int ethtool_get_module_eeprom_call(struct net_device *dev,
const struct ethtool_ops *ops = dev->ethtool_ops;
struct phy_device *phydev = dev->phydev;
+ if (dev->ethtool->module_fw_flash_in_progress)
+ return -EBUSY;
+
if (dev->sfp_bus)
return sfp_get_module_eeprom(dev->sfp_bus, ee, data);
diff --git a/net/ethtool/linkinfo.c b/net/ethtool/linkinfo.c
index 5c317d23787b..30b8ce275159 100644
--- a/net/ethtool/linkinfo.c
+++ b/net/ethtool/linkinfo.c
@@ -35,7 +35,7 @@ static int linkinfo_prepare_data(const struct ethnl_req_info *req_base,
if (ret < 0)
return ret;
ret = __ethtool_get_link_ksettings(dev, &data->ksettings);
- if (ret < 0 && info)
+ if (ret < 0)
GENL_SET_ERR_MSG(info, "failed to retrieve link settings");
ethnl_ops_complete(dev);
diff --git a/net/ethtool/linkmodes.c b/net/ethtool/linkmodes.c
index b2591db49f7d..259cd9ef1f2a 100644
--- a/net/ethtool/linkmodes.c
+++ b/net/ethtool/linkmodes.c
@@ -40,7 +40,7 @@ static int linkmodes_prepare_data(const struct ethnl_req_info *req_base,
return ret;
ret = __ethtool_get_link_ksettings(dev, &data->ksettings);
- if (ret < 0 && info) {
+ if (ret < 0) {
GENL_SET_ERR_MSG(info, "failed to retrieve link settings");
goto out;
}
diff --git a/net/ethtool/linkstate.c b/net/ethtool/linkstate.c
index b2de2108b356..05a5f72c99fa 100644
--- a/net/ethtool/linkstate.c
+++ b/net/ethtool/linkstate.c
@@ -3,6 +3,7 @@
#include "netlink.h"
#include "common.h"
#include <linux/phy.h>
+#include <linux/phylib_stubs.h>
struct linkstate_req_info {
struct ethnl_req_info base;
@@ -26,9 +27,8 @@ const struct nla_policy ethnl_linkstate_get_policy[] = {
NLA_POLICY_NESTED(ethnl_header_policy_stats),
};
-static int linkstate_get_sqi(struct net_device *dev)
+static int linkstate_get_sqi(struct phy_device *phydev)
{
- struct phy_device *phydev = dev->phydev;
int ret;
if (!phydev)
@@ -37,6 +37,8 @@ static int linkstate_get_sqi(struct net_device *dev)
mutex_lock(&phydev->lock);
if (!phydev->drv || !phydev->drv->get_sqi)
ret = -EOPNOTSUPP;
+ else if (!phydev->link)
+ ret = -ENETDOWN;
else
ret = phydev->drv->get_sqi(phydev);
mutex_unlock(&phydev->lock);
@@ -44,9 +46,8 @@ static int linkstate_get_sqi(struct net_device *dev)
return ret;
}
-static int linkstate_get_sqi_max(struct net_device *dev)
+static int linkstate_get_sqi_max(struct phy_device *phydev)
{
- struct phy_device *phydev = dev->phydev;
int ret;
if (!phydev)
@@ -55,6 +56,8 @@ static int linkstate_get_sqi_max(struct net_device *dev)
mutex_lock(&phydev->lock);
if (!phydev->drv || !phydev->drv->get_sqi_max)
ret = -EOPNOTSUPP;
+ else if (!phydev->link)
+ ret = -ENETDOWN;
else
ret = phydev->drv->get_sqi_max(phydev);
mutex_unlock(&phydev->lock);
@@ -62,6 +65,17 @@ static int linkstate_get_sqi_max(struct net_device *dev)
return ret;
};
+static bool linkstate_sqi_critical_error(int sqi)
+{
+ return sqi < 0 && sqi != -EOPNOTSUPP && sqi != -ENETDOWN;
+}
+
+static bool linkstate_sqi_valid(struct linkstate_reply_data *data)
+{
+ return data->sqi >= 0 && data->sqi_max >= 0 &&
+ data->sqi <= data->sqi_max;
+}
+
static int linkstate_get_link_ext_state(struct net_device *dev,
struct linkstate_reply_data *data)
{
@@ -85,20 +99,29 @@ static int linkstate_prepare_data(const struct ethnl_req_info *req_base,
{
struct linkstate_reply_data *data = LINKSTATE_REPDATA(reply_base);
struct net_device *dev = reply_base->dev;
+ struct nlattr **tb = info->attrs;
+ struct phy_device *phydev;
int ret;
+ phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_LINKSTATE_HEADER,
+ info->extack);
+ if (IS_ERR(phydev)) {
+ ret = PTR_ERR(phydev);
+ goto out;
+ }
+
ret = ethnl_ops_begin(dev);
if (ret < 0)
return ret;
data->link = __ethtool_get_link(dev);
- ret = linkstate_get_sqi(dev);
- if (ret < 0 && ret != -EOPNOTSUPP)
+ ret = linkstate_get_sqi(phydev);
+ if (linkstate_sqi_critical_error(ret))
goto out;
data->sqi = ret;
- ret = linkstate_get_sqi_max(dev);
- if (ret < 0 && ret != -EOPNOTSUPP)
+ ret = linkstate_get_sqi_max(phydev);
+ if (linkstate_sqi_critical_error(ret))
goto out;
data->sqi_max = ret;
@@ -112,9 +135,9 @@ static int linkstate_prepare_data(const struct ethnl_req_info *req_base,
sizeof(data->link_stats) / 8);
if (req_base->flags & ETHTOOL_FLAG_STATS) {
- if (dev->phydev)
- data->link_stats.link_down_events =
- READ_ONCE(dev->phydev->link_down_events);
+ if (phydev)
+ phy_ethtool_get_link_ext_stats(phydev,
+ &data->link_stats);
if (dev->ethtool_ops->get_link_ext_stats)
dev->ethtool_ops->get_link_ext_stats(dev,
@@ -136,11 +159,10 @@ static int linkstate_reply_size(const struct ethnl_req_info *req_base,
len = nla_total_size(sizeof(u8)) /* LINKSTATE_LINK */
+ 0;
- if (data->sqi != -EOPNOTSUPP)
- len += nla_total_size(sizeof(u32));
-
- if (data->sqi_max != -EOPNOTSUPP)
- len += nla_total_size(sizeof(u32));
+ if (linkstate_sqi_valid(data)) {
+ len += nla_total_size(sizeof(u32)); /* LINKSTATE_SQI */
+ len += nla_total_size(sizeof(u32)); /* LINKSTATE_SQI_MAX */
+ }
if (data->link_ext_state_provided)
len += nla_total_size(sizeof(u8)); /* LINKSTATE_EXT_STATE */
@@ -164,13 +186,14 @@ static int linkstate_fill_reply(struct sk_buff *skb,
nla_put_u8(skb, ETHTOOL_A_LINKSTATE_LINK, !!data->link))
return -EMSGSIZE;
- if (data->sqi != -EOPNOTSUPP &&
- nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI, data->sqi))
- return -EMSGSIZE;
+ if (linkstate_sqi_valid(data)) {
+ if (nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI, data->sqi))
+ return -EMSGSIZE;
- if (data->sqi_max != -EOPNOTSUPP &&
- nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI_MAX, data->sqi_max))
- return -EMSGSIZE;
+ if (nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI_MAX,
+ data->sqi_max))
+ return -EMSGSIZE;
+ }
if (data->link_ext_state_provided) {
if (nla_put_u8(skb, ETHTOOL_A_LINKSTATE_EXT_STATE,
diff --git a/net/ethtool/module.c b/net/ethtool/module.c
index ceb575efc290..6988e07bdcd6 100644
--- a/net/ethtool/module.c
+++ b/net/ethtool/module.c
@@ -1,10 +1,14 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/ethtool.h>
+#include <linux/firmware.h>
+#include <linux/sfp.h>
+#include <net/devlink.h>
#include "netlink.h"
#include "common.h"
#include "bitset.h"
+#include "module_fw.h"
struct module_req_info {
struct ethnl_req_info base;
@@ -33,6 +37,12 @@ static int module_get_power_mode(struct net_device *dev,
if (!ops->get_module_power_mode)
return 0;
+ if (dev->ethtool->module_fw_flash_in_progress) {
+ NL_SET_ERR_MSG(extack,
+ "Module firmware flashing is in progress");
+ return -EBUSY;
+ }
+
return ops->get_module_power_mode(dev, &data->power, extack);
}
@@ -109,6 +119,12 @@ ethnl_set_module_validate(struct ethnl_req_info *req_info,
if (!tb[ETHTOOL_A_MODULE_POWER_MODE_POLICY])
return 0;
+ if (req_info->dev->ethtool->module_fw_flash_in_progress) {
+ NL_SET_ERR_MSG(info->extack,
+ "Module firmware flashing is in progress");
+ return -EBUSY;
+ }
+
if (!ops->get_module_power_mode || !ops->set_module_power_mode) {
NL_SET_ERR_MSG_ATTR(info->extack,
tb[ETHTOOL_A_MODULE_POWER_MODE_POLICY],
@@ -158,3 +174,381 @@ const struct ethnl_request_ops ethnl_module_request_ops = {
.set = ethnl_set_module,
.set_ntf_cmd = ETHTOOL_MSG_MODULE_NTF,
};
+
+/* MODULE_FW_FLASH_ACT */
+
+const struct nla_policy
+ethnl_module_fw_flash_act_policy[ETHTOOL_A_MODULE_FW_FLASH_PASSWORD + 1] = {
+ [ETHTOOL_A_MODULE_FW_FLASH_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
+ [ETHTOOL_A_MODULE_FW_FLASH_FILE_NAME] = { .type = NLA_NUL_STRING },
+ [ETHTOOL_A_MODULE_FW_FLASH_PASSWORD] = { .type = NLA_U32 },
+};
+
+static LIST_HEAD(module_fw_flash_work_list);
+static DEFINE_SPINLOCK(module_fw_flash_work_list_lock);
+
+static int
+module_flash_fw_work_list_add(struct ethtool_module_fw_flash *module_fw,
+ struct genl_info *info)
+{
+ struct ethtool_module_fw_flash *work;
+
+ /* First, check if already registered. */
+ spin_lock(&module_fw_flash_work_list_lock);
+ list_for_each_entry(work, &module_fw_flash_work_list, list) {
+ if (work->fw_update.ntf_params.portid == info->snd_portid &&
+ work->fw_update.dev == module_fw->fw_update.dev) {
+ spin_unlock(&module_fw_flash_work_list_lock);
+ return -EALREADY;
+ }
+ }
+
+ list_add_tail(&module_fw->list, &module_fw_flash_work_list);
+ spin_unlock(&module_fw_flash_work_list_lock);
+
+ return 0;
+}
+
+static void module_flash_fw_work_list_del(struct list_head *list)
+{
+ spin_lock(&module_fw_flash_work_list_lock);
+ list_del(list);
+ spin_unlock(&module_fw_flash_work_list_lock);
+}
+
+static void module_flash_fw_work(struct work_struct *work)
+{
+ struct ethtool_module_fw_flash *module_fw;
+
+ module_fw = container_of(work, struct ethtool_module_fw_flash, work);
+
+ ethtool_cmis_fw_update(&module_fw->fw_update);
+
+ module_flash_fw_work_list_del(&module_fw->list);
+ module_fw->fw_update.dev->ethtool->module_fw_flash_in_progress = false;
+ netdev_put(module_fw->fw_update.dev, &module_fw->dev_tracker);
+ release_firmware(module_fw->fw_update.fw);
+ kfree(module_fw);
+}
+
+#define MODULE_EEPROM_PHYS_ID_PAGE 0
+#define MODULE_EEPROM_PHYS_ID_I2C_ADDR 0x50
+
+static int module_flash_fw_work_init(struct ethtool_module_fw_flash *module_fw,
+ struct net_device *dev,
+ struct netlink_ext_ack *extack)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct ethtool_module_eeprom page_data = {};
+ u8 phys_id;
+ int err;
+
+ /* Fetch the SFF-8024 Identifier Value. For all supported standards, it
+ * is located at I2C address 0x50, byte 0. See section 4.1 in SFF-8024,
+ * revision 4.9.
+ */
+ page_data.page = MODULE_EEPROM_PHYS_ID_PAGE;
+ page_data.offset = SFP_PHYS_ID;
+ page_data.length = sizeof(phys_id);
+ page_data.i2c_address = MODULE_EEPROM_PHYS_ID_I2C_ADDR;
+ page_data.data = &phys_id;
+
+ err = ops->get_module_eeprom_by_page(dev, &page_data, extack);
+ if (err < 0)
+ return err;
+
+ switch (phys_id) {
+ case SFF8024_ID_QSFP_DD:
+ case SFF8024_ID_OSFP:
+ case SFF8024_ID_DSFP:
+ case SFF8024_ID_QSFP_PLUS_CMIS:
+ case SFF8024_ID_SFP_DD_CMIS:
+ case SFF8024_ID_SFP_PLUS_CMIS:
+ INIT_WORK(&module_fw->work, module_flash_fw_work);
+ break;
+ default:
+ NL_SET_ERR_MSG(extack,
+ "Module type does not support firmware flashing");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+void ethnl_module_fw_flash_sock_destroy(struct ethnl_sock_priv *sk_priv)
+{
+ struct ethtool_module_fw_flash *work;
+
+ spin_lock(&module_fw_flash_work_list_lock);
+ list_for_each_entry(work, &module_fw_flash_work_list, list) {
+ if (work->fw_update.dev == sk_priv->dev &&
+ work->fw_update.ntf_params.portid == sk_priv->portid) {
+ work->fw_update.ntf_params.closed_sock = true;
+ break;
+ }
+ }
+ spin_unlock(&module_fw_flash_work_list_lock);
+}
+
+static int
+module_flash_fw_schedule(struct net_device *dev, const char *file_name,
+ struct ethtool_module_fw_flash_params *params,
+ struct sk_buff *skb, struct genl_info *info)
+{
+ struct ethtool_cmis_fw_update_params *fw_update;
+ struct ethtool_module_fw_flash *module_fw;
+ int err;
+
+ module_fw = kzalloc(sizeof(*module_fw), GFP_KERNEL);
+ if (!module_fw)
+ return -ENOMEM;
+
+ fw_update = &module_fw->fw_update;
+ fw_update->params = *params;
+ err = request_firmware_direct(&fw_update->fw,
+ file_name, &dev->dev);
+ if (err) {
+ NL_SET_ERR_MSG(info->extack,
+ "Failed to request module firmware image");
+ goto err_free;
+ }
+
+ err = module_flash_fw_work_init(module_fw, dev, info->extack);
+ if (err < 0)
+ goto err_release_firmware;
+
+ dev->ethtool->module_fw_flash_in_progress = true;
+ netdev_hold(dev, &module_fw->dev_tracker, GFP_KERNEL);
+ fw_update->dev = dev;
+ fw_update->ntf_params.portid = info->snd_portid;
+ fw_update->ntf_params.seq = info->snd_seq;
+ fw_update->ntf_params.closed_sock = false;
+
+ err = ethnl_sock_priv_set(skb, dev, fw_update->ntf_params.portid,
+ ETHTOOL_SOCK_TYPE_MODULE_FW_FLASH);
+ if (err < 0)
+ goto err_release_firmware;
+
+ err = module_flash_fw_work_list_add(module_fw, info);
+ if (err < 0)
+ goto err_release_firmware;
+
+ schedule_work(&module_fw->work);
+
+ return 0;
+
+err_release_firmware:
+ release_firmware(fw_update->fw);
+err_free:
+ kfree(module_fw);
+ return err;
+}
+
+static int module_flash_fw(struct net_device *dev, struct nlattr **tb,
+ struct sk_buff *skb, struct genl_info *info)
+{
+ struct ethtool_module_fw_flash_params params = {};
+ const char *file_name;
+ struct nlattr *attr;
+
+ if (GENL_REQ_ATTR_CHECK(info, ETHTOOL_A_MODULE_FW_FLASH_FILE_NAME))
+ return -EINVAL;
+
+ file_name = nla_data(tb[ETHTOOL_A_MODULE_FW_FLASH_FILE_NAME]);
+
+ attr = tb[ETHTOOL_A_MODULE_FW_FLASH_PASSWORD];
+ if (attr) {
+ params.password = cpu_to_be32(nla_get_u32(attr));
+ params.password_valid = true;
+ }
+
+ return module_flash_fw_schedule(dev, file_name, &params, skb, info);
+}
+
+static int ethnl_module_fw_flash_validate(struct net_device *dev,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink_port *devlink_port = dev->devlink_port;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+
+ if (!ops->set_module_eeprom_by_page ||
+ !ops->get_module_eeprom_by_page) {
+ NL_SET_ERR_MSG(extack,
+ "Flashing module firmware is not supported by this device");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ops->reset) {
+ NL_SET_ERR_MSG(extack,
+ "Reset module is not supported by this device, so flashing is not permitted");
+ return -EOPNOTSUPP;
+ }
+
+ if (dev->ethtool->module_fw_flash_in_progress) {
+ NL_SET_ERR_MSG(extack, "Module firmware flashing already in progress");
+ return -EBUSY;
+ }
+
+ if (dev->flags & IFF_UP) {
+ NL_SET_ERR_MSG(extack, "Netdevice is up, so flashing is not permitted");
+ return -EBUSY;
+ }
+
+ if (devlink_port && devlink_port->attrs.split) {
+ NL_SET_ERR_MSG(extack, "Can't perform firmware flashing on a split port");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int ethnl_act_module_fw_flash(struct sk_buff *skb, struct genl_info *info)
+{
+ struct ethnl_req_info req_info = {};
+ struct nlattr **tb = info->attrs;
+ struct net_device *dev;
+ int ret;
+
+ ret = ethnl_parse_header_dev_get(&req_info,
+ tb[ETHTOOL_A_MODULE_FW_FLASH_HEADER],
+ genl_info_net(info), info->extack,
+ true);
+ if (ret < 0)
+ return ret;
+ dev = req_info.dev;
+
+ rtnl_lock();
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ goto out_rtnl;
+
+ ret = ethnl_module_fw_flash_validate(dev, info->extack);
+ if (ret < 0)
+ goto out_rtnl;
+
+ ret = module_flash_fw(dev, tb, skb, info);
+
+ ethnl_ops_complete(dev);
+
+out_rtnl:
+ rtnl_unlock();
+ ethnl_parse_header_dev_put(&req_info);
+ return ret;
+}
+
+/* MODULE_FW_FLASH_NTF */
+
+static int
+ethnl_module_fw_flash_ntf_put_err(struct sk_buff *skb, char *err_msg,
+ char *sub_err_msg)
+{
+ int err_msg_len, sub_err_msg_len, total_len;
+ struct nlattr *attr;
+
+ if (!err_msg)
+ return 0;
+
+ err_msg_len = strlen(err_msg);
+ total_len = err_msg_len + 2; /* For period and NUL. */
+
+ if (sub_err_msg) {
+ sub_err_msg_len = strlen(sub_err_msg);
+ total_len += sub_err_msg_len + 2; /* For ", ". */
+ }
+
+ attr = nla_reserve(skb, ETHTOOL_A_MODULE_FW_FLASH_STATUS_MSG,
+ total_len);
+ if (!attr)
+ return -ENOMEM;
+
+ if (sub_err_msg)
+ sprintf(nla_data(attr), "%s, %s.", err_msg, sub_err_msg);
+ else
+ sprintf(nla_data(attr), "%s.", err_msg);
+
+ return 0;
+}
+
+static void
+ethnl_module_fw_flash_ntf(struct net_device *dev,
+ enum ethtool_module_fw_flash_status status,
+ struct ethnl_module_fw_flash_ntf_params *ntf_params,
+ char *err_msg, char *sub_err_msg,
+ u64 done, u64 total)
+{
+ struct sk_buff *skb;
+ void *hdr;
+ int ret;
+
+ if (ntf_params->closed_sock)
+ return;
+
+ skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!skb)
+ return;
+
+ hdr = ethnl_unicast_put(skb, ntf_params->portid, ++ntf_params->seq,
+ ETHTOOL_MSG_MODULE_FW_FLASH_NTF);
+ if (!hdr)
+ goto err_skb;
+
+ ret = ethnl_fill_reply_header(skb, dev,
+ ETHTOOL_A_MODULE_FW_FLASH_HEADER);
+ if (ret < 0)
+ goto err_skb;
+
+ if (nla_put_u32(skb, ETHTOOL_A_MODULE_FW_FLASH_STATUS, status))
+ goto err_skb;
+
+ ret = ethnl_module_fw_flash_ntf_put_err(skb, err_msg, sub_err_msg);
+ if (ret < 0)
+ goto err_skb;
+
+ if (nla_put_uint(skb, ETHTOOL_A_MODULE_FW_FLASH_DONE, done))
+ goto err_skb;
+
+ if (nla_put_uint(skb, ETHTOOL_A_MODULE_FW_FLASH_TOTAL, total))
+ goto err_skb;
+
+ genlmsg_end(skb, hdr);
+ genlmsg_unicast(dev_net(dev), skb, ntf_params->portid);
+ return;
+
+err_skb:
+ nlmsg_free(skb);
+}
+
+void ethnl_module_fw_flash_ntf_err(struct net_device *dev,
+ struct ethnl_module_fw_flash_ntf_params *params,
+ char *err_msg, char *sub_err_msg)
+{
+ ethnl_module_fw_flash_ntf(dev, ETHTOOL_MODULE_FW_FLASH_STATUS_ERROR,
+ params, err_msg, sub_err_msg, 0, 0);
+}
+
+void
+ethnl_module_fw_flash_ntf_start(struct net_device *dev,
+ struct ethnl_module_fw_flash_ntf_params *params)
+{
+ ethnl_module_fw_flash_ntf(dev, ETHTOOL_MODULE_FW_FLASH_STATUS_STARTED,
+ params, NULL, NULL, 0, 0);
+}
+
+void
+ethnl_module_fw_flash_ntf_complete(struct net_device *dev,
+ struct ethnl_module_fw_flash_ntf_params *params)
+{
+ ethnl_module_fw_flash_ntf(dev, ETHTOOL_MODULE_FW_FLASH_STATUS_COMPLETED,
+ params, NULL, NULL, 0, 0);
+}
+
+void
+ethnl_module_fw_flash_ntf_in_progress(struct net_device *dev,
+ struct ethnl_module_fw_flash_ntf_params *params,
+ u64 done, u64 total)
+{
+ ethnl_module_fw_flash_ntf(dev,
+ ETHTOOL_MODULE_FW_FLASH_STATUS_IN_PROGRESS,
+ params, NULL, NULL, done, total);
+}
diff --git a/net/ethtool/module_fw.h b/net/ethtool/module_fw.h
new file mode 100644
index 000000000000..634543a12d0c
--- /dev/null
+++ b/net/ethtool/module_fw.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <uapi/linux/ethtool.h>
+#include "netlink.h"
+
+/**
+ * struct ethnl_module_fw_flash_ntf_params - module firmware flashing
+ * notifications parameters
+ * @portid: Netlink portid of sender.
+ * @seq: Sequence number of sender.
+ * @closed_sock: Indicates whether the socket was closed from user space.
+ */
+struct ethnl_module_fw_flash_ntf_params {
+ u32 portid;
+ u32 seq;
+ bool closed_sock;
+};
+
+/**
+ * struct ethtool_module_fw_flash_params - module firmware flashing parameters
+ * @password: Module password. Only valid when @pass_valid is set.
+ * @password_valid: Whether the module password is valid or not.
+ */
+struct ethtool_module_fw_flash_params {
+ __be32 password;
+ u8 password_valid:1;
+};
+
+/**
+ * struct ethtool_cmis_fw_update_params - CMIS firmware update specific
+ * parameters
+ * @dev: Pointer to the net_device to be flashed.
+ * @params: Module firmware flashing parameters.
+ * @ntf_params: Module firmware flashing notification parameters.
+ * @fw: Firmware to flash.
+ */
+struct ethtool_cmis_fw_update_params {
+ struct net_device *dev;
+ struct ethtool_module_fw_flash_params params;
+ struct ethnl_module_fw_flash_ntf_params ntf_params;
+ const struct firmware *fw;
+};
+
+/**
+ * struct ethtool_module_fw_flash - module firmware flashing
+ * @list: List node for &module_fw_flash_work_list.
+ * @dev_tracker: Refcount tracker for @dev.
+ * @work: The flashing firmware work.
+ * @fw_update: CMIS firmware update specific parameters.
+ */
+struct ethtool_module_fw_flash {
+ struct list_head list;
+ netdevice_tracker dev_tracker;
+ struct work_struct work;
+ struct ethtool_cmis_fw_update_params fw_update;
+};
+
+void ethnl_module_fw_flash_sock_destroy(struct ethnl_sock_priv *sk_priv);
+
+void
+ethnl_module_fw_flash_ntf_err(struct net_device *dev,
+ struct ethnl_module_fw_flash_ntf_params *params,
+ char *err_msg, char *sub_err_msg);
+void
+ethnl_module_fw_flash_ntf_start(struct net_device *dev,
+ struct ethnl_module_fw_flash_ntf_params *params);
+void
+ethnl_module_fw_flash_ntf_complete(struct net_device *dev,
+ struct ethnl_module_fw_flash_ntf_params *params);
+void
+ethnl_module_fw_flash_ntf_in_progress(struct net_device *dev,
+ struct ethnl_module_fw_flash_ntf_params *params,
+ u64 done, u64 total);
+
+void ethtool_cmis_fw_update(struct ethtool_cmis_fw_update_params *params);
diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
index fe3553f60bf3..734849a57369 100644
--- a/net/ethtool/netlink.c
+++ b/net/ethtool/netlink.c
@@ -1,9 +1,12 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <net/netdev_queues.h>
#include <net/sock.h>
#include <linux/ethtool_netlink.h>
+#include <linux/phy_link_topology.h>
#include <linux/pm_runtime.h>
#include "netlink.h"
+#include "module_fw.h"
static struct genl_family ethtool_genl_family;
@@ -30,6 +33,53 @@ const struct nla_policy ethnl_header_policy_stats[] = {
ETHTOOL_FLAGS_STATS),
};
+const struct nla_policy ethnl_header_policy_phy[] = {
+ [ETHTOOL_A_HEADER_DEV_INDEX] = { .type = NLA_U32 },
+ [ETHTOOL_A_HEADER_DEV_NAME] = { .type = NLA_NUL_STRING,
+ .len = ALTIFNAMSIZ - 1 },
+ [ETHTOOL_A_HEADER_FLAGS] = NLA_POLICY_MASK(NLA_U32,
+ ETHTOOL_FLAGS_BASIC),
+ [ETHTOOL_A_HEADER_PHY_INDEX] = NLA_POLICY_MIN(NLA_U32, 1),
+};
+
+const struct nla_policy ethnl_header_policy_phy_stats[] = {
+ [ETHTOOL_A_HEADER_DEV_INDEX] = { .type = NLA_U32 },
+ [ETHTOOL_A_HEADER_DEV_NAME] = { .type = NLA_NUL_STRING,
+ .len = ALTIFNAMSIZ - 1 },
+ [ETHTOOL_A_HEADER_FLAGS] = NLA_POLICY_MASK(NLA_U32,
+ ETHTOOL_FLAGS_STATS),
+ [ETHTOOL_A_HEADER_PHY_INDEX] = NLA_POLICY_MIN(NLA_U32, 1),
+};
+
+int ethnl_sock_priv_set(struct sk_buff *skb, struct net_device *dev, u32 portid,
+ enum ethnl_sock_type type)
+{
+ struct ethnl_sock_priv *sk_priv;
+
+ sk_priv = genl_sk_priv_get(&ethtool_genl_family, NETLINK_CB(skb).sk);
+ if (IS_ERR(sk_priv))
+ return PTR_ERR(sk_priv);
+
+ sk_priv->dev = dev;
+ sk_priv->portid = portid;
+ sk_priv->type = type;
+
+ return 0;
+}
+
+static void ethnl_sock_priv_destroy(void *priv)
+{
+ struct ethnl_sock_priv *sk_priv = priv;
+
+ switch (sk_priv->type) {
+ case ETHTOOL_SOCK_TYPE_MODULE_FW_FLASH:
+ ethnl_module_fw_flash_sock_destroy(sk_priv);
+ break;
+ default:
+ break;
+ }
+}
+
int ethnl_ops_begin(struct net_device *dev)
{
int ret;
@@ -41,7 +91,7 @@ int ethnl_ops_begin(struct net_device *dev)
pm_runtime_get_sync(dev->dev.parent);
if (!netif_device_present(dev) ||
- dev->reg_state == NETREG_UNREGISTERING) {
+ dev->reg_state >= NETREG_UNREGISTERING) {
ret = -ENODEV;
goto err;
}
@@ -89,7 +139,7 @@ int ethnl_parse_header_dev_get(struct ethnl_req_info *req_info,
const struct nlattr *header, struct net *net,
struct netlink_ext_ack *extack, bool require_dev)
{
- struct nlattr *tb[ARRAY_SIZE(ethnl_header_policy)];
+ struct nlattr *tb[ARRAY_SIZE(ethnl_header_policy_phy)];
const struct nlattr *devname_attr;
struct net_device *dev = NULL;
u32 flags = 0;
@@ -104,7 +154,7 @@ int ethnl_parse_header_dev_get(struct ethnl_req_info *req_info,
/* No validation here, command policy should have a nested policy set
* for the header, therefore validation should have already been done.
*/
- ret = nla_parse_nested(tb, ARRAY_SIZE(ethnl_header_policy) - 1, header,
+ ret = nla_parse_nested(tb, ARRAY_SIZE(ethnl_header_policy_phy) - 1, header,
NULL, extack);
if (ret < 0)
return ret;
@@ -145,11 +195,45 @@ int ethnl_parse_header_dev_get(struct ethnl_req_info *req_info,
return -EINVAL;
}
+ if (tb[ETHTOOL_A_HEADER_PHY_INDEX]) {
+ if (dev) {
+ req_info->phy_index = nla_get_u32(tb[ETHTOOL_A_HEADER_PHY_INDEX]);
+ } else {
+ NL_SET_ERR_MSG_ATTR(extack, header,
+ "phy_index set without a netdev");
+ return -EINVAL;
+ }
+ }
+
req_info->dev = dev;
req_info->flags = flags;
return 0;
}
+struct phy_device *ethnl_req_get_phydev(const struct ethnl_req_info *req_info,
+ struct nlattr **tb, unsigned int header,
+ struct netlink_ext_ack *extack)
+{
+ struct phy_device *phydev;
+
+ ASSERT_RTNL();
+
+ if (!req_info->dev)
+ return NULL;
+
+ if (!req_info->phy_index)
+ return req_info->dev->phydev;
+
+ phydev = phy_link_topo_get_phy(req_info->dev, req_info->phy_index);
+ if (!phydev && tb) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[header],
+ "no phy matching phyindex");
+ return ERR_PTR(-ENODEV);
+ }
+
+ return phydev;
+}
+
/**
* ethnl_fill_reply_header() - Put common header into a reply message
* @skb: skb with the message
@@ -239,6 +323,11 @@ void *ethnl_bcastmsg_put(struct sk_buff *skb, u8 cmd)
cmd);
}
+void *ethnl_unicast_put(struct sk_buff *skb, u32 portid, u32 seq, u8 cmd)
+{
+ return genlmsg_put(skb, portid, seq, &ethtool_genl_family, 0, cmd);
+}
+
int ethnl_multicast(struct sk_buff *skb, struct net_device *dev)
{
return genlmsg_multicast_netns(&ethtool_genl_family, dev_net(dev), skb,
@@ -306,6 +395,8 @@ ethnl_default_requests[__ETHTOOL_MSG_USER_CNT] = {
[ETHTOOL_MSG_PLCA_GET_STATUS] = &ethnl_plca_status_request_ops,
[ETHTOOL_MSG_MM_GET] = &ethnl_mm_request_ops,
[ETHTOOL_MSG_MM_SET] = &ethnl_mm_request_ops,
+ [ETHTOOL_MSG_TSCONFIG_GET] = &ethnl_tsconfig_request_ops,
+ [ETHTOOL_MSG_TSCONFIG_SET] = &ethnl_tsconfig_request_ops,
};
static struct ethnl_dump_ctx *ethnl_dump_context(struct netlink_callback *cb)
@@ -477,11 +568,7 @@ out:
return ret;
}
-/* Default ->dumpit() handler for GET requests. Device iteration copied from
- * rtnl_dump_ifinfo(); we have to be more careful about device hashtable
- * persistence as we cannot guarantee to hold RTNL lock through the whole
- * function as rtnetnlink does.
- */
+/* Default ->dumpit() handler for GET requests. */
static int ethnl_default_dumpit(struct sk_buff *skb,
struct netlink_callback *cb)
{
@@ -490,14 +577,14 @@ static int ethnl_default_dumpit(struct sk_buff *skb,
struct net_device *dev;
int ret = 0;
- rtnl_lock();
+ rcu_read_lock();
for_each_netdev_dump(net, dev, ctx->pos_ifindex) {
dev_hold(dev);
- rtnl_unlock();
+ rcu_read_unlock();
ret = ethnl_default_dump_one(skb, dev, ctx, genl_info_dump(cb));
- rtnl_lock();
+ rcu_read_lock();
dev_put(dev);
if (ret < 0 && ret != -EOPNOTSUPP) {
@@ -507,7 +594,7 @@ static int ethnl_default_dumpit(struct sk_buff *skb,
}
ret = 0;
}
- rtnl_unlock();
+ rcu_read_unlock();
return ret;
}
@@ -581,6 +668,7 @@ static int ethnl_default_set_doit(struct sk_buff *skb, struct genl_info *info)
const struct ethnl_request_ops *ops;
struct ethnl_req_info req_info = {};
const u8 cmd = info->genlhdr->cmd;
+ struct net_device *dev;
int ret;
ops = ethnl_default_requests[cmd];
@@ -602,20 +690,36 @@ static int ethnl_default_set_doit(struct sk_buff *skb, struct genl_info *info)
goto out_dev;
}
+ dev = req_info.dev;
+
rtnl_lock();
- ret = ethnl_ops_begin(req_info.dev);
+ dev->cfg_pending = kmemdup(dev->cfg, sizeof(*dev->cfg),
+ GFP_KERNEL_ACCOUNT);
+ if (!dev->cfg_pending) {
+ ret = -ENOMEM;
+ goto out_tie_cfg;
+ }
+
+ ret = ethnl_ops_begin(dev);
if (ret < 0)
- goto out_rtnl;
+ goto out_free_cfg;
ret = ops->set(&req_info, info);
- if (ret <= 0)
+ if (ret < 0)
goto out_ops;
- ethtool_notify(req_info.dev, ops->set_ntf_cmd, NULL);
+
+ swap(dev->cfg, dev->cfg_pending);
+ if (!ret)
+ goto out_ops;
+ ethtool_notify(dev, ops->set_ntf_cmd, NULL);
ret = 0;
out_ops:
- ethnl_ops_complete(req_info.dev);
-out_rtnl:
+ ethnl_ops_complete(dev);
+out_free_cfg:
+ kfree(dev->cfg_pending);
+out_tie_cfg:
+ dev->cfg_pending = dev->cfg;
rtnl_unlock();
out_dev:
ethnl_parse_header_dev_put(&req_info);
@@ -764,10 +868,22 @@ static void ethnl_notify_features(struct netdev_notifier_info *info)
static int ethnl_netdev_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
+ struct netdev_notifier_info *info = ptr;
+ struct netlink_ext_ack *extack;
+ struct net_device *dev;
+
+ dev = netdev_notifier_info_to_dev(info);
+ extack = netdev_notifier_info_to_extack(info);
+
switch (event) {
case NETDEV_FEAT_CHANGE:
ethnl_notify_features(ptr);
break;
+ case NETDEV_PRE_UP:
+ if (dev->ethtool->module_fw_flash_in_progress) {
+ NL_SET_ERR_MSG(extack, "Can't set port up while flashing module firmware");
+ return NOTIFY_BAD;
+ }
}
return NOTIFY_DONE;
@@ -978,9 +1094,9 @@ static const struct genl_ops ethtool_genl_ops[] = {
{
.cmd = ETHTOOL_MSG_TSINFO_GET,
.doit = ethnl_default_doit,
- .start = ethnl_default_start,
- .dumpit = ethnl_default_dumpit,
- .done = ethnl_default_done,
+ .start = ethnl_tsinfo_start,
+ .dumpit = ethnl_tsinfo_dumpit,
+ .done = ethnl_tsinfo_done,
.policy = ethnl_tsinfo_get_policy,
.maxattr = ARRAY_SIZE(ethnl_tsinfo_get_policy) - 1,
},
@@ -1085,6 +1201,8 @@ static const struct genl_ops ethtool_genl_ops[] = {
{
.cmd = ETHTOOL_MSG_RSS_GET,
.doit = ethnl_default_doit,
+ .start = ethnl_rss_dump_start,
+ .dumpit = ethnl_rss_dumpit,
.policy = ethnl_rss_get_policy,
.maxattr = ARRAY_SIZE(ethnl_rss_get_policy) - 1,
},
@@ -1129,6 +1247,38 @@ static const struct genl_ops ethtool_genl_ops[] = {
.policy = ethnl_mm_set_policy,
.maxattr = ARRAY_SIZE(ethnl_mm_set_policy) - 1,
},
+ {
+ .cmd = ETHTOOL_MSG_MODULE_FW_FLASH_ACT,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .doit = ethnl_act_module_fw_flash,
+ .policy = ethnl_module_fw_flash_act_policy,
+ .maxattr = ARRAY_SIZE(ethnl_module_fw_flash_act_policy) - 1,
+ },
+ {
+ .cmd = ETHTOOL_MSG_PHY_GET,
+ .doit = ethnl_phy_doit,
+ .start = ethnl_phy_start,
+ .dumpit = ethnl_phy_dumpit,
+ .done = ethnl_phy_done,
+ .policy = ethnl_phy_get_policy,
+ .maxattr = ARRAY_SIZE(ethnl_phy_get_policy) - 1,
+ },
+ {
+ .cmd = ETHTOOL_MSG_TSCONFIG_GET,
+ .doit = ethnl_default_doit,
+ .start = ethnl_default_start,
+ .dumpit = ethnl_default_dumpit,
+ .done = ethnl_default_done,
+ .policy = ethnl_tsconfig_get_policy,
+ .maxattr = ARRAY_SIZE(ethnl_tsconfig_get_policy) - 1,
+ },
+ {
+ .cmd = ETHTOOL_MSG_TSCONFIG_SET,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .doit = ethnl_default_set_doit,
+ .policy = ethnl_tsconfig_set_policy,
+ .maxattr = ARRAY_SIZE(ethnl_tsconfig_set_policy) - 1,
+ },
};
static const struct genl_multicast_group ethtool_nl_mcgrps[] = {
@@ -1145,6 +1295,8 @@ static struct genl_family ethtool_genl_family __ro_after_init = {
.resv_start_op = ETHTOOL_MSG_MODULE_GET + 1,
.mcgrps = ethtool_nl_mcgrps,
.n_mcgrps = ARRAY_SIZE(ethtool_nl_mcgrps),
+ .sock_priv_size = sizeof(struct ethnl_sock_priv),
+ .sock_priv_destroy = ethnl_sock_priv_destroy,
};
/* module setup */
diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h
index 9a333a8d04c1..ec6ab5443a6f 100644
--- a/net/ethtool/netlink.h
+++ b/net/ethtool/netlink.h
@@ -21,6 +21,7 @@ struct sk_buff *ethnl_reply_init(size_t payload, struct net_device *dev, u8 cmd,
void **ehdrp);
void *ethnl_dump_put(struct sk_buff *skb, struct netlink_callback *cb, u8 cmd);
void *ethnl_bcastmsg_put(struct sk_buff *skb, u8 cmd);
+void *ethnl_unicast_put(struct sk_buff *skb, u32 portid, u32 seq, u8 cmd);
int ethnl_multicast(struct sk_buff *skb, struct net_device *dev);
/**
@@ -250,6 +251,9 @@ static inline unsigned int ethnl_reply_header_size(void)
* @dev: network device the request is for (may be null)
* @dev_tracker: refcount tracker for @dev reference
* @flags: request flags common for all request types
+ * @phy_index: phy_device index connected to @dev this request is for. Can be
+ * 0 if the request doesn't target a phy, or if the @dev's attached
+ * phy is targeted.
*
* This is a common base for request specific structures holding data from
* parsed userspace request. These always embed struct ethnl_req_info at
@@ -259,6 +263,7 @@ struct ethnl_req_info {
struct net_device *dev;
netdevice_tracker dev_tracker;
u32 flags;
+ u32 phy_index;
};
static inline void ethnl_parse_header_dev_put(struct ethnl_req_info *req_info)
@@ -267,6 +272,28 @@ static inline void ethnl_parse_header_dev_put(struct ethnl_req_info *req_info)
}
/**
+ * ethnl_req_get_phydev() - Gets the phy_device targeted by this request,
+ * if any. Must be called under rntl_lock().
+ * @req_info: The ethnl request to get the phy from.
+ * @tb: The netlink attributes array, for error reporting.
+ * @header: The netlink header index, used for error reporting.
+ * @extack: The netlink extended ACK, for error reporting.
+ *
+ * The caller must hold RTNL, until it's done interacting with the returned
+ * phy_device.
+ *
+ * Return: A phy_device pointer corresponding either to the passed phy_index
+ * if one is provided. If not, the phy_device attached to the
+ * net_device targeted by this request is returned. If there's no
+ * targeted net_device, or no phy_device is attached, NULL is
+ * returned. If the provided phy_index is invalid, an error pointer
+ * is returned.
+ */
+struct phy_device *ethnl_req_get_phydev(const struct ethnl_req_info *req_info,
+ struct nlattr **tb, unsigned int header,
+ struct netlink_ext_ack *extack);
+
+/**
* struct ethnl_reply_data - base type of reply data for GET requests
* @dev: device for current reply message; in single shot requests it is
* equal to &ethnl_req_info.dev; in dumps it's different for each
@@ -283,6 +310,19 @@ struct ethnl_reply_data {
int ethnl_ops_begin(struct net_device *dev);
void ethnl_ops_complete(struct net_device *dev);
+enum ethnl_sock_type {
+ ETHTOOL_SOCK_TYPE_MODULE_FW_FLASH,
+};
+
+struct ethnl_sock_priv {
+ struct net_device *dev;
+ u32 portid;
+ enum ethnl_sock_type type;
+};
+
+int ethnl_sock_priv_set(struct sk_buff *skb, struct net_device *dev, u32 portid,
+ enum ethnl_sock_type type);
+
/**
* struct ethnl_request_ops - unified handling of GET and SET requests
* @request_cmd: command id for request (GET)
@@ -395,9 +435,13 @@ extern const struct ethnl_request_ops ethnl_rss_request_ops;
extern const struct ethnl_request_ops ethnl_plca_cfg_request_ops;
extern const struct ethnl_request_ops ethnl_plca_status_request_ops;
extern const struct ethnl_request_ops ethnl_mm_request_ops;
+extern const struct ethnl_request_ops ethnl_phy_request_ops;
+extern const struct ethnl_request_ops ethnl_tsconfig_request_ops;
extern const struct nla_policy ethnl_header_policy[ETHTOOL_A_HEADER_FLAGS + 1];
extern const struct nla_policy ethnl_header_policy_stats[ETHTOOL_A_HEADER_FLAGS + 1];
+extern const struct nla_policy ethnl_header_policy_phy[ETHTOOL_A_HEADER_PHY_INDEX + 1];
+extern const struct nla_policy ethnl_header_policy_phy_stats[ETHTOOL_A_HEADER_PHY_INDEX + 1];
extern const struct nla_policy ethnl_strset_get_policy[ETHTOOL_A_STRSET_COUNTS_ONLY + 1];
extern const struct nla_policy ethnl_linkinfo_get_policy[ETHTOOL_A_LINKINFO_HEADER + 1];
extern const struct nla_policy ethnl_linkinfo_set_policy[ETHTOOL_A_LINKINFO_TP_MDIX_CTRL + 1];
@@ -413,7 +457,7 @@ extern const struct nla_policy ethnl_features_set_policy[ETHTOOL_A_FEATURES_WANT
extern const struct nla_policy ethnl_privflags_get_policy[ETHTOOL_A_PRIVFLAGS_HEADER + 1];
extern const struct nla_policy ethnl_privflags_set_policy[ETHTOOL_A_PRIVFLAGS_FLAGS + 1];
extern const struct nla_policy ethnl_rings_get_policy[ETHTOOL_A_RINGS_HEADER + 1];
-extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX + 1];
+extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_HDS_THRESH_MAX + 1];
extern const struct nla_policy ethnl_channels_get_policy[ETHTOOL_A_CHANNELS_HEADER + 1];
extern const struct nla_policy ethnl_channels_set_policy[ETHTOOL_A_CHANNELS_COMBINED_COUNT + 1];
extern const struct nla_policy ethnl_coalesce_get_policy[ETHTOOL_A_COALESCE_HEADER + 1];
@@ -422,7 +466,7 @@ extern const struct nla_policy ethnl_pause_get_policy[ETHTOOL_A_PAUSE_STATS_SRC
extern const struct nla_policy ethnl_pause_set_policy[ETHTOOL_A_PAUSE_TX + 1];
extern const struct nla_policy ethnl_eee_get_policy[ETHTOOL_A_EEE_HEADER + 1];
extern const struct nla_policy ethnl_eee_set_policy[ETHTOOL_A_EEE_TX_LPI_TIMER + 1];
-extern const struct nla_policy ethnl_tsinfo_get_policy[ETHTOOL_A_TSINFO_HEADER + 1];
+extern const struct nla_policy ethnl_tsinfo_get_policy[ETHTOOL_A_TSINFO_MAX + 1];
extern const struct nla_policy ethnl_cable_test_act_policy[ETHTOOL_A_CABLE_TEST_HEADER + 1];
extern const struct nla_policy ethnl_cable_test_tdr_act_policy[ETHTOOL_A_CABLE_TEST_TDR_CFG + 1];
extern const struct nla_policy ethnl_tunnel_info_get_policy[ETHTOOL_A_TUNNEL_INFO_HEADER + 1];
@@ -435,12 +479,16 @@ extern const struct nla_policy ethnl_module_get_policy[ETHTOOL_A_MODULE_HEADER +
extern const struct nla_policy ethnl_module_set_policy[ETHTOOL_A_MODULE_POWER_MODE_POLICY + 1];
extern const struct nla_policy ethnl_pse_get_policy[ETHTOOL_A_PSE_HEADER + 1];
extern const struct nla_policy ethnl_pse_set_policy[ETHTOOL_A_PSE_MAX + 1];
-extern const struct nla_policy ethnl_rss_get_policy[ETHTOOL_A_RSS_CONTEXT + 1];
+extern const struct nla_policy ethnl_rss_get_policy[ETHTOOL_A_RSS_START_CONTEXT + 1];
extern const struct nla_policy ethnl_plca_get_cfg_policy[ETHTOOL_A_PLCA_HEADER + 1];
extern const struct nla_policy ethnl_plca_set_cfg_policy[ETHTOOL_A_PLCA_MAX + 1];
extern const struct nla_policy ethnl_plca_get_status_policy[ETHTOOL_A_PLCA_HEADER + 1];
extern const struct nla_policy ethnl_mm_get_policy[ETHTOOL_A_MM_HEADER + 1];
extern const struct nla_policy ethnl_mm_set_policy[ETHTOOL_A_MM_MAX + 1];
+extern const struct nla_policy ethnl_module_fw_flash_act_policy[ETHTOOL_A_MODULE_FW_FLASH_PASSWORD + 1];
+extern const struct nla_policy ethnl_phy_get_policy[ETHTOOL_A_PHY_HEADER + 1];
+extern const struct nla_policy ethnl_tsconfig_get_policy[ETHTOOL_A_TSCONFIG_HEADER + 1];
+extern const struct nla_policy ethnl_tsconfig_set_policy[ETHTOOL_A_TSCONFIG_MAX + 1];
int ethnl_set_features(struct sk_buff *skb, struct genl_info *info);
int ethnl_act_cable_test(struct sk_buff *skb, struct genl_info *info);
@@ -448,11 +496,22 @@ int ethnl_act_cable_test_tdr(struct sk_buff *skb, struct genl_info *info);
int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info);
int ethnl_tunnel_info_start(struct netlink_callback *cb);
int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
+int ethnl_act_module_fw_flash(struct sk_buff *skb, struct genl_info *info);
+int ethnl_rss_dump_start(struct netlink_callback *cb);
+int ethnl_rss_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
+int ethnl_phy_start(struct netlink_callback *cb);
+int ethnl_phy_doit(struct sk_buff *skb, struct genl_info *info);
+int ethnl_phy_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
+int ethnl_phy_done(struct netlink_callback *cb);
+int ethnl_tsinfo_start(struct netlink_callback *cb);
+int ethnl_tsinfo_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
+int ethnl_tsinfo_done(struct netlink_callback *cb);
extern const char stats_std_names[__ETHTOOL_STATS_CNT][ETH_GSTRING_LEN];
extern const char stats_eth_phy_names[__ETHTOOL_A_STATS_ETH_PHY_CNT][ETH_GSTRING_LEN];
extern const char stats_eth_mac_names[__ETHTOOL_A_STATS_ETH_MAC_CNT][ETH_GSTRING_LEN];
extern const char stats_eth_ctrl_names[__ETHTOOL_A_STATS_ETH_CTRL_CNT][ETH_GSTRING_LEN];
extern const char stats_rmon_names[__ETHTOOL_A_STATS_RMON_CNT][ETH_GSTRING_LEN];
+extern const char stats_phy_names[__ETHTOOL_A_STATS_PHY_CNT][ETH_GSTRING_LEN];
#endif /* _NET_ETHTOOL_NETLINK_H */
diff --git a/net/ethtool/phy.c b/net/ethtool/phy.c
new file mode 100644
index 000000000000..e067cc234419
--- /dev/null
+++ b/net/ethtool/phy.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Bootlin
+ *
+ */
+#include "common.h"
+#include "netlink.h"
+
+#include <linux/phy.h>
+#include <linux/phy_link_topology.h>
+#include <linux/sfp.h>
+
+struct phy_req_info {
+ struct ethnl_req_info base;
+ struct phy_device_node *pdn;
+};
+
+#define PHY_REQINFO(__req_base) \
+ container_of(__req_base, struct phy_req_info, base)
+
+const struct nla_policy ethnl_phy_get_policy[ETHTOOL_A_PHY_HEADER + 1] = {
+ [ETHTOOL_A_PHY_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy),
+};
+
+/* Caller holds rtnl */
+static ssize_t
+ethnl_phy_reply_size(const struct ethnl_req_info *req_base,
+ struct netlink_ext_ack *extack)
+{
+ struct phy_req_info *req_info = PHY_REQINFO(req_base);
+ struct phy_device_node *pdn = req_info->pdn;
+ struct phy_device *phydev = pdn->phy;
+ size_t size = 0;
+
+ ASSERT_RTNL();
+
+ /* ETHTOOL_A_PHY_INDEX */
+ size += nla_total_size(sizeof(u32));
+
+ /* ETHTOOL_A_DRVNAME */
+ if (phydev->drv)
+ size += nla_total_size(strlen(phydev->drv->name) + 1);
+
+ /* ETHTOOL_A_NAME */
+ size += nla_total_size(strlen(dev_name(&phydev->mdio.dev)) + 1);
+
+ /* ETHTOOL_A_PHY_UPSTREAM_TYPE */
+ size += nla_total_size(sizeof(u32));
+
+ if (phy_on_sfp(phydev)) {
+ const char *upstream_sfp_name = sfp_get_name(pdn->parent_sfp_bus);
+
+ /* ETHTOOL_A_PHY_UPSTREAM_SFP_NAME */
+ if (upstream_sfp_name)
+ size += nla_total_size(strlen(upstream_sfp_name) + 1);
+
+ /* ETHTOOL_A_PHY_UPSTREAM_INDEX */
+ size += nla_total_size(sizeof(u32));
+ }
+
+ /* ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME */
+ if (phydev->sfp_bus) {
+ const char *sfp_name = sfp_get_name(phydev->sfp_bus);
+
+ if (sfp_name)
+ size += nla_total_size(strlen(sfp_name) + 1);
+ }
+
+ return size;
+}
+
+static int
+ethnl_phy_fill_reply(const struct ethnl_req_info *req_base, struct sk_buff *skb)
+{
+ struct phy_req_info *req_info = PHY_REQINFO(req_base);
+ struct phy_device_node *pdn = req_info->pdn;
+ struct phy_device *phydev = pdn->phy;
+ enum phy_upstream ptype;
+
+ ptype = pdn->upstream_type;
+
+ if (nla_put_u32(skb, ETHTOOL_A_PHY_INDEX, phydev->phyindex) ||
+ nla_put_string(skb, ETHTOOL_A_PHY_NAME, dev_name(&phydev->mdio.dev)) ||
+ nla_put_u32(skb, ETHTOOL_A_PHY_UPSTREAM_TYPE, ptype))
+ return -EMSGSIZE;
+
+ if (phydev->drv &&
+ nla_put_string(skb, ETHTOOL_A_PHY_DRVNAME, phydev->drv->name))
+ return -EMSGSIZE;
+
+ if (ptype == PHY_UPSTREAM_PHY) {
+ struct phy_device *upstream = pdn->upstream.phydev;
+ const char *sfp_upstream_name;
+
+ /* Parent index */
+ if (nla_put_u32(skb, ETHTOOL_A_PHY_UPSTREAM_INDEX, upstream->phyindex))
+ return -EMSGSIZE;
+
+ if (pdn->parent_sfp_bus) {
+ sfp_upstream_name = sfp_get_name(pdn->parent_sfp_bus);
+ if (sfp_upstream_name &&
+ nla_put_string(skb, ETHTOOL_A_PHY_UPSTREAM_SFP_NAME,
+ sfp_upstream_name))
+ return -EMSGSIZE;
+ }
+ }
+
+ if (phydev->sfp_bus) {
+ const char *sfp_name = sfp_get_name(phydev->sfp_bus);
+
+ if (sfp_name &&
+ nla_put_string(skb, ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME,
+ sfp_name))
+ return -EMSGSIZE;
+ }
+
+ return 0;
+}
+
+static int ethnl_phy_parse_request(struct ethnl_req_info *req_base,
+ struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ struct phy_link_topology *topo = req_base->dev->link_topo;
+ struct phy_req_info *req_info = PHY_REQINFO(req_base);
+ struct phy_device *phydev;
+
+ phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_PHY_HEADER,
+ extack);
+ if (!phydev)
+ return 0;
+
+ if (IS_ERR(phydev))
+ return PTR_ERR(phydev);
+
+ if (!topo)
+ return 0;
+
+ req_info->pdn = xa_load(&topo->phys, phydev->phyindex);
+
+ return 0;
+}
+
+int ethnl_phy_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct phy_req_info req_info = {};
+ struct nlattr **tb = info->attrs;
+ struct sk_buff *rskb;
+ void *reply_payload;
+ int reply_len;
+ int ret;
+
+ ret = ethnl_parse_header_dev_get(&req_info.base,
+ tb[ETHTOOL_A_PHY_HEADER],
+ genl_info_net(info), info->extack,
+ true);
+ if (ret < 0)
+ return ret;
+
+ rtnl_lock();
+
+ ret = ethnl_phy_parse_request(&req_info.base, tb, info->extack);
+ if (ret < 0)
+ goto err_unlock_rtnl;
+
+ /* No PHY, return early */
+ if (!req_info.pdn)
+ goto err_unlock_rtnl;
+
+ ret = ethnl_phy_reply_size(&req_info.base, info->extack);
+ if (ret < 0)
+ goto err_unlock_rtnl;
+ reply_len = ret + ethnl_reply_header_size();
+
+ rskb = ethnl_reply_init(reply_len, req_info.base.dev,
+ ETHTOOL_MSG_PHY_GET_REPLY,
+ ETHTOOL_A_PHY_HEADER,
+ info, &reply_payload);
+ if (!rskb) {
+ ret = -ENOMEM;
+ goto err_unlock_rtnl;
+ }
+
+ ret = ethnl_phy_fill_reply(&req_info.base, rskb);
+ if (ret)
+ goto err_free_msg;
+
+ rtnl_unlock();
+ ethnl_parse_header_dev_put(&req_info.base);
+ genlmsg_end(rskb, reply_payload);
+
+ return genlmsg_reply(rskb, info);
+
+err_free_msg:
+ nlmsg_free(rskb);
+err_unlock_rtnl:
+ rtnl_unlock();
+ ethnl_parse_header_dev_put(&req_info.base);
+ return ret;
+}
+
+struct ethnl_phy_dump_ctx {
+ struct phy_req_info *phy_req_info;
+ unsigned long ifindex;
+ unsigned long phy_index;
+};
+
+int ethnl_phy_start(struct netlink_callback *cb)
+{
+ const struct genl_info *info = genl_info_dump(cb);
+ struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
+ int ret;
+
+ BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
+
+ ctx->phy_req_info = kzalloc(sizeof(*ctx->phy_req_info), GFP_KERNEL);
+ if (!ctx->phy_req_info)
+ return -ENOMEM;
+
+ ret = ethnl_parse_header_dev_get(&ctx->phy_req_info->base,
+ info->attrs[ETHTOOL_A_PHY_HEADER],
+ sock_net(cb->skb->sk), cb->extack,
+ false);
+ ctx->ifindex = 0;
+ ctx->phy_index = 0;
+
+ if (ret)
+ kfree(ctx->phy_req_info);
+
+ return ret;
+}
+
+int ethnl_phy_done(struct netlink_callback *cb)
+{
+ struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
+
+ if (ctx->phy_req_info->base.dev)
+ ethnl_parse_header_dev_put(&ctx->phy_req_info->base);
+
+ kfree(ctx->phy_req_info);
+
+ return 0;
+}
+
+static int ethnl_phy_dump_one_dev(struct sk_buff *skb, struct net_device *dev,
+ struct netlink_callback *cb)
+{
+ struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
+ struct phy_req_info *pri = ctx->phy_req_info;
+ struct phy_device_node *pdn;
+ int ret = 0;
+ void *ehdr;
+
+ if (!dev->link_topo)
+ return 0;
+
+ xa_for_each_start(&dev->link_topo->phys, ctx->phy_index, pdn, ctx->phy_index) {
+ ehdr = ethnl_dump_put(skb, cb, ETHTOOL_MSG_PHY_GET_REPLY);
+ if (!ehdr) {
+ ret = -EMSGSIZE;
+ break;
+ }
+
+ ret = ethnl_fill_reply_header(skb, dev, ETHTOOL_A_PHY_HEADER);
+ if (ret < 0) {
+ genlmsg_cancel(skb, ehdr);
+ break;
+ }
+
+ pri->pdn = pdn;
+ ret = ethnl_phy_fill_reply(&pri->base, skb);
+ if (ret < 0) {
+ genlmsg_cancel(skb, ehdr);
+ break;
+ }
+
+ genlmsg_end(skb, ehdr);
+ }
+
+ return ret;
+}
+
+int ethnl_phy_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
+ struct net *net = sock_net(skb->sk);
+ struct net_device *dev;
+ int ret = 0;
+
+ rtnl_lock();
+
+ if (ctx->phy_req_info->base.dev) {
+ ret = ethnl_phy_dump_one_dev(skb, ctx->phy_req_info->base.dev, cb);
+ } else {
+ for_each_netdev_dump(net, dev, ctx->ifindex) {
+ ret = ethnl_phy_dump_one_dev(skb, dev, cb);
+ if (ret)
+ break;
+
+ ctx->phy_index = 0;
+ }
+ }
+ rtnl_unlock();
+
+ return ret;
+}
diff --git a/net/ethtool/plca.c b/net/ethtool/plca.c
index b1e2e3b5027f..e1f7820a6158 100644
--- a/net/ethtool/plca.c
+++ b/net/ethtool/plca.c
@@ -25,7 +25,7 @@ struct plca_reply_data {
const struct nla_policy ethnl_plca_get_cfg_policy[] = {
[ETHTOOL_A_PLCA_HEADER] =
- NLA_POLICY_NESTED(ethnl_header_policy),
+ NLA_POLICY_NESTED(ethnl_header_policy_phy),
};
static void plca_update_sint(int *dst, struct nlattr **tb, u32 attrid,
@@ -58,10 +58,14 @@ static int plca_get_cfg_prepare_data(const struct ethnl_req_info *req_base,
struct plca_reply_data *data = PLCA_REPDATA(reply_base);
struct net_device *dev = reply_base->dev;
const struct ethtool_phy_ops *ops;
+ struct nlattr **tb = info->attrs;
+ struct phy_device *phydev;
int ret;
+ phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_PLCA_HEADER,
+ info->extack);
// check that the PHY device is available and connected
- if (!dev->phydev) {
+ if (IS_ERR_OR_NULL(phydev)) {
ret = -EOPNOTSUPP;
goto out;
}
@@ -80,7 +84,7 @@ static int plca_get_cfg_prepare_data(const struct ethnl_req_info *req_base,
memset(&data->plca_cfg, 0xff,
sizeof_field(struct plca_reply_data, plca_cfg));
- ret = ops->get_plca_cfg(dev->phydev, &data->plca_cfg);
+ ret = ops->get_plca_cfg(phydev, &data->plca_cfg);
ethnl_ops_complete(dev);
out:
@@ -129,7 +133,7 @@ static int plca_get_cfg_fill_reply(struct sk_buff *skb,
const struct nla_policy ethnl_plca_set_cfg_policy[] = {
[ETHTOOL_A_PLCA_HEADER] =
- NLA_POLICY_NESTED(ethnl_header_policy),
+ NLA_POLICY_NESTED(ethnl_header_policy_phy),
[ETHTOOL_A_PLCA_ENABLED] = NLA_POLICY_MAX(NLA_U8, 1),
[ETHTOOL_A_PLCA_NODE_ID] = NLA_POLICY_MAX(NLA_U32, 255),
[ETHTOOL_A_PLCA_NODE_CNT] = NLA_POLICY_RANGE(NLA_U32, 1, 255),
@@ -141,15 +145,17 @@ const struct nla_policy ethnl_plca_set_cfg_policy[] = {
static int
ethnl_set_plca(struct ethnl_req_info *req_info, struct genl_info *info)
{
- struct net_device *dev = req_info->dev;
const struct ethtool_phy_ops *ops;
struct nlattr **tb = info->attrs;
struct phy_plca_cfg plca_cfg;
+ struct phy_device *phydev;
bool mod = false;
int ret;
+ phydev = ethnl_req_get_phydev(req_info, tb, ETHTOOL_A_PLCA_HEADER,
+ info->extack);
// check that the PHY device is available and connected
- if (!dev->phydev)
+ if (IS_ERR_OR_NULL(phydev))
return -EOPNOTSUPP;
ops = ethtool_phy_ops;
@@ -168,7 +174,7 @@ ethnl_set_plca(struct ethnl_req_info *req_info, struct genl_info *info)
if (!mod)
return 0;
- ret = ops->set_plca_cfg(dev->phydev, &plca_cfg, info->extack);
+ ret = ops->set_plca_cfg(phydev, &plca_cfg, info->extack);
return ret < 0 ? ret : 1;
}
@@ -191,7 +197,7 @@ const struct ethnl_request_ops ethnl_plca_cfg_request_ops = {
const struct nla_policy ethnl_plca_get_status_policy[] = {
[ETHTOOL_A_PLCA_HEADER] =
- NLA_POLICY_NESTED(ethnl_header_policy),
+ NLA_POLICY_NESTED(ethnl_header_policy_phy),
};
static int plca_get_status_prepare_data(const struct ethnl_req_info *req_base,
@@ -201,10 +207,14 @@ static int plca_get_status_prepare_data(const struct ethnl_req_info *req_base,
struct plca_reply_data *data = PLCA_REPDATA(reply_base);
struct net_device *dev = reply_base->dev;
const struct ethtool_phy_ops *ops;
+ struct nlattr **tb = info->attrs;
+ struct phy_device *phydev;
int ret;
+ phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_PLCA_HEADER,
+ info->extack);
// check that the PHY device is available and connected
- if (!dev->phydev) {
+ if (IS_ERR_OR_NULL(phydev)) {
ret = -EOPNOTSUPP;
goto out;
}
@@ -223,7 +233,7 @@ static int plca_get_status_prepare_data(const struct ethnl_req_info *req_base,
memset(&data->plca_st, 0xff,
sizeof_field(struct plca_reply_data, plca_st));
- ret = ops->get_plca_status(dev->phydev, &data->plca_st);
+ ret = ops->get_plca_status(phydev, &data->plca_st);
ethnl_ops_complete(dev);
out:
return ret;
diff --git a/net/ethtool/pse-pd.c b/net/ethtool/pse-pd.c
index cc478af77111..4f6b99eab2a6 100644
--- a/net/ethtool/pse-pd.c
+++ b/net/ethtool/pse-pd.c
@@ -19,7 +19,7 @@ struct pse_req_info {
struct pse_reply_data {
struct ethnl_reply_data base;
- struct pse_control_status status;
+ struct ethtool_pse_control_status status;
};
#define PSE_REPDATA(__reply_base) \
@@ -28,17 +28,15 @@ struct pse_reply_data {
/* PSE_GET */
const struct nla_policy ethnl_pse_get_policy[ETHTOOL_A_PSE_HEADER + 1] = {
- [ETHTOOL_A_PSE_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy),
+ [ETHTOOL_A_PSE_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy_phy),
};
-static int pse_get_pse_attributes(struct net_device *dev,
+static int pse_get_pse_attributes(struct phy_device *phydev,
struct netlink_ext_ack *extack,
struct pse_reply_data *data)
{
- struct phy_device *phydev = dev->phydev;
-
if (!phydev) {
- NL_SET_ERR_MSG(extack, "No PHY is attached");
+ NL_SET_ERR_MSG(extack, "No PHY found");
return -EOPNOTSUPP;
}
@@ -58,13 +56,20 @@ static int pse_prepare_data(const struct ethnl_req_info *req_base,
{
struct pse_reply_data *data = PSE_REPDATA(reply_base);
struct net_device *dev = reply_base->dev;
+ struct nlattr **tb = info->attrs;
+ struct phy_device *phydev;
int ret;
ret = ethnl_ops_begin(dev);
if (ret < 0)
return ret;
- ret = pse_get_pse_attributes(dev, info->extack, data);
+ phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_PSE_HEADER,
+ info->extack);
+ if (IS_ERR(phydev))
+ return -ENODEV;
+
+ ret = pse_get_pse_attributes(phydev, info->extack, data);
ethnl_ops_complete(dev);
@@ -75,23 +80,73 @@ static int pse_reply_size(const struct ethnl_req_info *req_base,
const struct ethnl_reply_data *reply_base)
{
const struct pse_reply_data *data = PSE_REPDATA(reply_base);
- const struct pse_control_status *st = &data->status;
+ const struct ethtool_pse_control_status *st = &data->status;
int len = 0;
if (st->podl_admin_state > 0)
len += nla_total_size(sizeof(u32)); /* _PODL_PSE_ADMIN_STATE */
if (st->podl_pw_status > 0)
len += nla_total_size(sizeof(u32)); /* _PODL_PSE_PW_D_STATUS */
+ if (st->c33_admin_state > 0)
+ len += nla_total_size(sizeof(u32)); /* _C33_PSE_ADMIN_STATE */
+ if (st->c33_pw_status > 0)
+ len += nla_total_size(sizeof(u32)); /* _C33_PSE_PW_D_STATUS */
+ if (st->c33_pw_class > 0)
+ len += nla_total_size(sizeof(u32)); /* _C33_PSE_PW_CLASS */
+ if (st->c33_actual_pw > 0)
+ len += nla_total_size(sizeof(u32)); /* _C33_PSE_ACTUAL_PW */
+ if (st->c33_ext_state_info.c33_pse_ext_state > 0) {
+ len += nla_total_size(sizeof(u32)); /* _C33_PSE_EXT_STATE */
+ if (st->c33_ext_state_info.__c33_pse_ext_substate > 0)
+ /* _C33_PSE_EXT_SUBSTATE */
+ len += nla_total_size(sizeof(u32));
+ }
+ if (st->c33_avail_pw_limit > 0)
+ /* _C33_AVAIL_PSE_PW_LIMIT */
+ len += nla_total_size(sizeof(u32));
+ if (st->c33_pw_limit_nb_ranges > 0)
+ /* _C33_PSE_PW_LIMIT_RANGES */
+ len += st->c33_pw_limit_nb_ranges *
+ (nla_total_size(0) +
+ nla_total_size(sizeof(u32)) * 2);
return len;
}
+static int pse_put_pw_limit_ranges(struct sk_buff *skb,
+ const struct ethtool_pse_control_status *st)
+{
+ const struct ethtool_c33_pse_pw_limit_range *pw_limit_ranges;
+ int i;
+
+ pw_limit_ranges = st->c33_pw_limit_ranges;
+ for (i = 0; i < st->c33_pw_limit_nb_ranges; i++) {
+ struct nlattr *nest;
+
+ nest = nla_nest_start(skb, ETHTOOL_A_C33_PSE_PW_LIMIT_RANGES);
+ if (!nest)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(skb, ETHTOOL_A_C33_PSE_PW_LIMIT_MIN,
+ pw_limit_ranges->min) ||
+ nla_put_u32(skb, ETHTOOL_A_C33_PSE_PW_LIMIT_MAX,
+ pw_limit_ranges->max)) {
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+ }
+ nla_nest_end(skb, nest);
+ pw_limit_ranges++;
+ }
+
+ return 0;
+}
+
static int pse_fill_reply(struct sk_buff *skb,
const struct ethnl_req_info *req_base,
const struct ethnl_reply_data *reply_base)
{
const struct pse_reply_data *data = PSE_REPDATA(reply_base);
- const struct pse_control_status *st = &data->status;
+ const struct ethtool_pse_control_status *st = &data->status;
if (st->podl_admin_state > 0 &&
nla_put_u32(skb, ETHTOOL_A_PODL_PSE_ADMIN_STATE,
@@ -103,48 +158,146 @@ static int pse_fill_reply(struct sk_buff *skb,
st->podl_pw_status))
return -EMSGSIZE;
+ if (st->c33_admin_state > 0 &&
+ nla_put_u32(skb, ETHTOOL_A_C33_PSE_ADMIN_STATE,
+ st->c33_admin_state))
+ return -EMSGSIZE;
+
+ if (st->c33_pw_status > 0 &&
+ nla_put_u32(skb, ETHTOOL_A_C33_PSE_PW_D_STATUS,
+ st->c33_pw_status))
+ return -EMSGSIZE;
+
+ if (st->c33_pw_class > 0 &&
+ nla_put_u32(skb, ETHTOOL_A_C33_PSE_PW_CLASS,
+ st->c33_pw_class))
+ return -EMSGSIZE;
+
+ if (st->c33_actual_pw > 0 &&
+ nla_put_u32(skb, ETHTOOL_A_C33_PSE_ACTUAL_PW,
+ st->c33_actual_pw))
+ return -EMSGSIZE;
+
+ if (st->c33_ext_state_info.c33_pse_ext_state > 0) {
+ if (nla_put_u32(skb, ETHTOOL_A_C33_PSE_EXT_STATE,
+ st->c33_ext_state_info.c33_pse_ext_state))
+ return -EMSGSIZE;
+
+ if (st->c33_ext_state_info.__c33_pse_ext_substate > 0 &&
+ nla_put_u32(skb, ETHTOOL_A_C33_PSE_EXT_SUBSTATE,
+ st->c33_ext_state_info.__c33_pse_ext_substate))
+ return -EMSGSIZE;
+ }
+
+ if (st->c33_avail_pw_limit > 0 &&
+ nla_put_u32(skb, ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT,
+ st->c33_avail_pw_limit))
+ return -EMSGSIZE;
+
+ if (st->c33_pw_limit_nb_ranges > 0 &&
+ pse_put_pw_limit_ranges(skb, st))
+ return -EMSGSIZE;
+
return 0;
}
+static void pse_cleanup_data(struct ethnl_reply_data *reply_base)
+{
+ const struct pse_reply_data *data = PSE_REPDATA(reply_base);
+
+ kfree(data->status.c33_pw_limit_ranges);
+}
+
/* PSE_SET */
const struct nla_policy ethnl_pse_set_policy[ETHTOOL_A_PSE_MAX + 1] = {
- [ETHTOOL_A_PSE_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy),
+ [ETHTOOL_A_PSE_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy_phy),
[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL] =
NLA_POLICY_RANGE(NLA_U32, ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED,
ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED),
+ [ETHTOOL_A_C33_PSE_ADMIN_CONTROL] =
+ NLA_POLICY_RANGE(NLA_U32, ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED,
+ ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED),
+ [ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT] = { .type = NLA_U32 },
};
static int
-ethnl_set_pse_validate(struct ethnl_req_info *req_info, struct genl_info *info)
+ethnl_set_pse_validate(struct phy_device *phydev, struct genl_info *info)
{
- return !!info->attrs[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL];
+ struct nlattr **tb = info->attrs;
+
+ if (IS_ERR_OR_NULL(phydev)) {
+ NL_SET_ERR_MSG(info->extack, "No PHY is attached");
+ return -EOPNOTSUPP;
+ }
+
+ if (!phydev->psec) {
+ NL_SET_ERR_MSG(info->extack, "No PSE is attached");
+ return -EOPNOTSUPP;
+ }
+
+ if (tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL] &&
+ !pse_has_podl(phydev->psec)) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL],
+ "setting PoDL PSE admin control not supported");
+ return -EOPNOTSUPP;
+ }
+ if (tb[ETHTOOL_A_C33_PSE_ADMIN_CONTROL] &&
+ !pse_has_c33(phydev->psec)) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ tb[ETHTOOL_A_C33_PSE_ADMIN_CONTROL],
+ "setting C33 PSE admin control not supported");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
}
static int
ethnl_set_pse(struct ethnl_req_info *req_info, struct genl_info *info)
{
- struct net_device *dev = req_info->dev;
- struct pse_control_config config = {};
struct nlattr **tb = info->attrs;
struct phy_device *phydev;
+ int ret;
- /* this values are already validated by the ethnl_pse_set_policy */
- config.admin_cotrol = nla_get_u32(tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL]);
+ phydev = ethnl_req_get_phydev(req_info, tb, ETHTOOL_A_PSE_HEADER,
+ info->extack);
+ ret = ethnl_set_pse_validate(phydev, info);
+ if (ret)
+ return ret;
- phydev = dev->phydev;
- if (!phydev) {
- NL_SET_ERR_MSG(info->extack, "No PHY is attached");
- return -EOPNOTSUPP;
+ if (tb[ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT]) {
+ unsigned int pw_limit;
+
+ pw_limit = nla_get_u32(tb[ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT]);
+ ret = pse_ethtool_set_pw_limit(phydev->psec, info->extack,
+ pw_limit);
+ if (ret)
+ return ret;
}
- if (!phydev->psec) {
- NL_SET_ERR_MSG(info->extack, "No PSE is attached");
- return -EOPNOTSUPP;
+ /* These values are already validated by the ethnl_pse_set_policy */
+ if (tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL] ||
+ tb[ETHTOOL_A_C33_PSE_ADMIN_CONTROL]) {
+ struct pse_control_config config = {};
+
+ if (tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL])
+ config.podl_admin_control = nla_get_u32(tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL]);
+ if (tb[ETHTOOL_A_C33_PSE_ADMIN_CONTROL])
+ config.c33_admin_control = nla_get_u32(tb[ETHTOOL_A_C33_PSE_ADMIN_CONTROL]);
+
+ /* pse_ethtool_set_config() will do nothing if the config
+ * is zero
+ */
+ ret = pse_ethtool_set_config(phydev->psec, info->extack,
+ &config);
+ if (ret)
+ return ret;
}
- /* Return errno directly - PSE has no notification */
- return pse_ethtool_set_config(phydev->psec, info->extack, &config);
+ /* Return errno or zero - PSE has no notification */
+ return ret;
}
const struct ethnl_request_ops ethnl_pse_request_ops = {
@@ -157,8 +310,8 @@ const struct ethnl_request_ops ethnl_pse_request_ops = {
.prepare_data = pse_prepare_data,
.reply_size = pse_reply_size,
.fill_reply = pse_fill_reply,
+ .cleanup_data = pse_cleanup_data,
- .set_validate = ethnl_set_pse_validate,
.set = ethnl_set_pse,
/* PSE has no notification */
};
diff --git a/net/ethtool/rings.c b/net/ethtool/rings.c
index b7865a14fdf8..aeedd5ec6b8c 100644
--- a/net/ethtool/rings.c
+++ b/net/ethtool/rings.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <net/netdev_queues.h>
+
#include "netlink.h"
#include "common.h"
@@ -37,6 +39,10 @@ static int rings_prepare_data(const struct ethnl_req_info *req_base,
ret = ethnl_ops_begin(dev);
if (ret < 0)
return ret;
+
+ data->kernel_ringparam.tcp_data_split = dev->cfg->hds_config;
+ data->kernel_ringparam.hds_thresh = dev->cfg->hds_thresh;
+
dev->ethtool_ops->get_ringparam(dev, &data->ringparam,
&data->kernel_ringparam, info->extack);
ethnl_ops_complete(dev);
@@ -61,7 +67,9 @@ static int rings_reply_size(const struct ethnl_req_info *req_base,
nla_total_size(sizeof(u8)) + /* _RINGS_TX_PUSH */
nla_total_size(sizeof(u8))) + /* _RINGS_RX_PUSH */
nla_total_size(sizeof(u32)) + /* _RINGS_TX_PUSH_BUF_LEN */
- nla_total_size(sizeof(u32)); /* _RINGS_TX_PUSH_BUF_LEN_MAX */
+ nla_total_size(sizeof(u32)) + /* _RINGS_TX_PUSH_BUF_LEN_MAX */
+ nla_total_size(sizeof(u32)) + /* _RINGS_HDS_THRESH */
+ nla_total_size(sizeof(u32)); /* _RINGS_HDS_THRESH_MAX*/
}
static int rings_fill_reply(struct sk_buff *skb,
@@ -108,7 +116,12 @@ static int rings_fill_reply(struct sk_buff *skb,
(nla_put_u32(skb, ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX,
kr->tx_push_buf_max_len) ||
nla_put_u32(skb, ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN,
- kr->tx_push_buf_len))))
+ kr->tx_push_buf_len))) ||
+ ((supported_ring_params & ETHTOOL_RING_USE_HDS_THRS) &&
+ (nla_put_u32(skb, ETHTOOL_A_RINGS_HDS_THRESH,
+ kr->hds_thresh) ||
+ nla_put_u32(skb, ETHTOOL_A_RINGS_HDS_THRESH_MAX,
+ kr->hds_thresh_max))))
return -EMSGSIZE;
return 0;
@@ -130,6 +143,7 @@ const struct nla_policy ethnl_rings_set_policy[] = {
[ETHTOOL_A_RINGS_TX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1),
[ETHTOOL_A_RINGS_RX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1),
[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN] = { .type = NLA_U32 },
+ [ETHTOOL_A_RINGS_HDS_THRESH] = { .type = NLA_U32 },
};
static int
@@ -155,6 +169,14 @@ ethnl_set_rings_validate(struct ethnl_req_info *req_info,
return -EOPNOTSUPP;
}
+ if (tb[ETHTOOL_A_RINGS_HDS_THRESH] &&
+ !(ops->supported_ring_params & ETHTOOL_RING_USE_HDS_THRS)) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ tb[ETHTOOL_A_RINGS_HDS_THRESH],
+ "setting hds-thresh is not supported");
+ return -EOPNOTSUPP;
+ }
+
if (tb[ETHTOOL_A_RINGS_CQE_SIZE] &&
!(ops->supported_ring_params & ETHTOOL_RING_USE_CQE_SIZE)) {
NL_SET_ERR_MSG_ATTR(info->extack,
@@ -193,16 +215,16 @@ ethnl_set_rings_validate(struct ethnl_req_info *req_info,
static int
ethnl_set_rings(struct ethnl_req_info *req_info, struct genl_info *info)
{
- struct kernel_ethtool_ringparam kernel_ringparam = {};
- struct ethtool_ringparam ringparam = {};
+ struct kernel_ethtool_ringparam kernel_ringparam;
struct net_device *dev = req_info->dev;
+ struct ethtool_ringparam ringparam;
struct nlattr **tb = info->attrs;
const struct nlattr *err_attr;
bool mod = false;
int ret;
- dev->ethtool_ops->get_ringparam(dev, &ringparam,
- &kernel_ringparam, info->extack);
+ ethtool_ringparam_get_cfg(dev, &ringparam, &kernel_ringparam,
+ info->extack);
ethnl_update_u32(&ringparam.rx_pending, tb[ETHTOOL_A_RINGS_RX], &mod);
ethnl_update_u32(&ringparam.rx_mini_pending,
@@ -222,9 +244,32 @@ ethnl_set_rings(struct ethnl_req_info *req_info, struct genl_info *info)
tb[ETHTOOL_A_RINGS_RX_PUSH], &mod);
ethnl_update_u32(&kernel_ringparam.tx_push_buf_len,
tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN], &mod);
+ ethnl_update_u32(&kernel_ringparam.hds_thresh,
+ tb[ETHTOOL_A_RINGS_HDS_THRESH], &mod);
if (!mod)
return 0;
+ if (kernel_ringparam.tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED &&
+ dev_xdp_sb_prog_count(dev)) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ tb[ETHTOOL_A_RINGS_TCP_DATA_SPLIT],
+ "tcp-data-split can not be enabled with single buffer XDP");
+ return -EINVAL;
+ }
+
+ if (dev_get_min_mp_channel_count(dev)) {
+ if (kernel_ringparam.tcp_data_split !=
+ ETHTOOL_TCP_DATA_SPLIT_ENABLED) {
+ NL_SET_ERR_MSG(info->extack,
+ "can't disable tcp-data-split while device has memory provider enabled");
+ return -EINVAL;
+ } else if (kernel_ringparam.hds_thresh) {
+ NL_SET_ERR_MSG(info->extack,
+ "can't set non-zero hds_thresh while device is memory provider enabled");
+ return -EINVAL;
+ }
+ }
+
/* ensure new ring parameters are within limits */
if (ringparam.rx_pending > ringparam.rx_max_pending)
err_attr = tb[ETHTOOL_A_RINGS_RX];
@@ -234,6 +279,8 @@ ethnl_set_rings(struct ethnl_req_info *req_info, struct genl_info *info)
err_attr = tb[ETHTOOL_A_RINGS_RX_JUMBO];
else if (ringparam.tx_pending > ringparam.tx_max_pending)
err_attr = tb[ETHTOOL_A_RINGS_TX];
+ else if (kernel_ringparam.hds_thresh > kernel_ringparam.hds_thresh_max)
+ err_attr = tb[ETHTOOL_A_RINGS_HDS_THRESH];
else
err_attr = NULL;
if (err_attr) {
@@ -250,6 +297,9 @@ ethnl_set_rings(struct ethnl_req_info *req_info, struct genl_info *info)
return -EINVAL;
}
+ dev->cfg_pending->hds_config = kernel_ringparam.tcp_data_split;
+ dev->cfg_pending->hds_thresh = kernel_ringparam.hds_thresh;
+
ret = dev->ethtool_ops->set_ringparam(dev, &ringparam,
&kernel_ringparam, info->extack);
return ret < 0 ? ret : 1;
diff --git a/net/ethtool/rss.c b/net/ethtool/rss.c
index 71679137eff2..58df9ad02ce8 100644
--- a/net/ethtool/rss.c
+++ b/net/ethtool/rss.c
@@ -10,6 +10,7 @@ struct rss_req_info {
struct rss_reply_data {
struct ethnl_reply_data base;
+ bool no_key_fields;
u32 indir_size;
u32 hkey_size;
u32 hfunc;
@@ -27,6 +28,7 @@ struct rss_reply_data {
const struct nla_policy ethnl_rss_get_policy[] = {
[ETHTOOL_A_RSS_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy),
[ETHTOOL_A_RSS_CONTEXT] = { .type = NLA_U32 },
+ [ETHTOOL_A_RSS_START_CONTEXT] = { .type = NLA_U32 },
};
static int
@@ -37,18 +39,18 @@ rss_parse_request(struct ethnl_req_info *req_info, struct nlattr **tb,
if (tb[ETHTOOL_A_RSS_CONTEXT])
request->rss_context = nla_get_u32(tb[ETHTOOL_A_RSS_CONTEXT]);
+ if (tb[ETHTOOL_A_RSS_START_CONTEXT]) {
+ NL_SET_BAD_ATTR(extack, tb[ETHTOOL_A_RSS_START_CONTEXT]);
+ return -EINVAL;
+ }
return 0;
}
static int
-rss_prepare_data(const struct ethnl_req_info *req_base,
- struct ethnl_reply_data *reply_base,
- const struct genl_info *info)
+rss_prepare_get(const struct rss_req_info *request, struct net_device *dev,
+ struct rss_reply_data *data, const struct genl_info *info)
{
- struct rss_reply_data *data = RSS_REPDATA(reply_base);
- struct rss_req_info *request = RSS_REQINFO(req_base);
- struct net_device *dev = reply_base->dev;
struct ethtool_rxfh_param rxfh = {};
const struct ethtool_ops *ops;
u32 total_size, indir_bytes;
@@ -56,12 +58,6 @@ rss_prepare_data(const struct ethnl_req_info *req_base,
int ret;
ops = dev->ethtool_ops;
- if (!ops->get_rxfh)
- return -EOPNOTSUPP;
-
- /* Some drivers don't handle rss_context */
- if (request->rss_context && !ops->cap_rss_ctx_supported)
- return -EOPNOTSUPP;
ret = ethnl_ops_begin(dev);
if (ret < 0)
@@ -91,7 +87,6 @@ rss_prepare_data(const struct ethnl_req_info *req_base,
rxfh.indir = data->indir_table;
rxfh.key_size = data->hkey_size;
rxfh.key = data->hkey;
- rxfh.rss_context = request->rss_context;
ret = ops->get_rxfh(dev, &rxfh);
if (ret)
@@ -105,13 +100,76 @@ out_ops:
}
static int
+rss_prepare_ctx(const struct rss_req_info *request, struct net_device *dev,
+ struct rss_reply_data *data, const struct genl_info *info)
+{
+ struct ethtool_rxfh_context *ctx;
+ u32 total_size, indir_bytes;
+ u8 *rss_config;
+
+ data->no_key_fields = !dev->ethtool_ops->rxfh_per_ctx_key;
+
+ ctx = xa_load(&dev->ethtool->rss_ctx, request->rss_context);
+ if (!ctx)
+ return -ENOENT;
+
+ data->indir_size = ctx->indir_size;
+ data->hkey_size = ctx->key_size;
+ data->hfunc = ctx->hfunc;
+ data->input_xfrm = ctx->input_xfrm;
+
+ indir_bytes = data->indir_size * sizeof(u32);
+ total_size = indir_bytes + data->hkey_size;
+ rss_config = kzalloc(total_size, GFP_KERNEL);
+ if (!rss_config)
+ return -ENOMEM;
+
+ data->indir_table = (u32 *)rss_config;
+ memcpy(data->indir_table, ethtool_rxfh_context_indir(ctx), indir_bytes);
+
+ if (data->hkey_size) {
+ data->hkey = rss_config + indir_bytes;
+ memcpy(data->hkey, ethtool_rxfh_context_key(ctx),
+ data->hkey_size);
+ }
+
+ return 0;
+}
+
+static int
+rss_prepare_data(const struct ethnl_req_info *req_base,
+ struct ethnl_reply_data *reply_base,
+ const struct genl_info *info)
+{
+ struct rss_reply_data *data = RSS_REPDATA(reply_base);
+ struct rss_req_info *request = RSS_REQINFO(req_base);
+ struct net_device *dev = reply_base->dev;
+ const struct ethtool_ops *ops;
+
+ ops = dev->ethtool_ops;
+ if (!ops->get_rxfh)
+ return -EOPNOTSUPP;
+
+ /* Some drivers don't handle rss_context */
+ if (request->rss_context) {
+ if (!ops->cap_rss_ctx_supported && !ops->create_rxfh_context)
+ return -EOPNOTSUPP;
+
+ return rss_prepare_ctx(request, dev, data, info);
+ }
+
+ return rss_prepare_get(request, dev, data, info);
+}
+
+static int
rss_reply_size(const struct ethnl_req_info *req_base,
const struct ethnl_reply_data *reply_base)
{
const struct rss_reply_data *data = RSS_REPDATA(reply_base);
int len;
- len = nla_total_size(sizeof(u32)) + /* _RSS_HFUNC */
+ len = nla_total_size(sizeof(u32)) + /* _RSS_CONTEXT */
+ nla_total_size(sizeof(u32)) + /* _RSS_HFUNC */
nla_total_size(sizeof(u32)) + /* _RSS_INPUT_XFRM */
nla_total_size(sizeof(u32) * data->indir_size) + /* _RSS_INDIR */
nla_total_size(data->hkey_size); /* _RSS_HKEY */
@@ -124,14 +182,24 @@ rss_fill_reply(struct sk_buff *skb, const struct ethnl_req_info *req_base,
const struct ethnl_reply_data *reply_base)
{
const struct rss_reply_data *data = RSS_REPDATA(reply_base);
+ struct rss_req_info *request = RSS_REQINFO(req_base);
+
+ if (request->rss_context &&
+ nla_put_u32(skb, ETHTOOL_A_RSS_CONTEXT, request->rss_context))
+ return -EMSGSIZE;
+
+ if ((data->indir_size &&
+ nla_put(skb, ETHTOOL_A_RSS_INDIR,
+ sizeof(u32) * data->indir_size, data->indir_table)))
+ return -EMSGSIZE;
+
+ if (data->no_key_fields)
+ return 0;
if ((data->hfunc &&
nla_put_u32(skb, ETHTOOL_A_RSS_HFUNC, data->hfunc)) ||
(data->input_xfrm &&
nla_put_u32(skb, ETHTOOL_A_RSS_INPUT_XFRM, data->input_xfrm)) ||
- (data->indir_size &&
- nla_put(skb, ETHTOOL_A_RSS_INDIR,
- sizeof(u32) * data->indir_size, data->indir_table)) ||
(data->hkey_size &&
nla_put(skb, ETHTOOL_A_RSS_HKEY, data->hkey_size, data->hkey)))
return -EMSGSIZE;
@@ -146,6 +214,146 @@ static void rss_cleanup_data(struct ethnl_reply_data *reply_base)
kfree(data->indir_table);
}
+struct rss_nl_dump_ctx {
+ unsigned long ifindex;
+ unsigned long ctx_idx;
+
+ /* User wants to only dump contexts from given ifindex */
+ unsigned int match_ifindex;
+ unsigned int start_ctx;
+};
+
+static struct rss_nl_dump_ctx *rss_dump_ctx(struct netlink_callback *cb)
+{
+ NL_ASSERT_CTX_FITS(struct rss_nl_dump_ctx);
+
+ return (struct rss_nl_dump_ctx *)cb->ctx;
+}
+
+int ethnl_rss_dump_start(struct netlink_callback *cb)
+{
+ const struct genl_info *info = genl_info_dump(cb);
+ struct rss_nl_dump_ctx *ctx = rss_dump_ctx(cb);
+ struct ethnl_req_info req_info = {};
+ struct nlattr **tb = info->attrs;
+ int ret;
+
+ /* Filtering by context not supported */
+ if (tb[ETHTOOL_A_RSS_CONTEXT]) {
+ NL_SET_BAD_ATTR(info->extack, tb[ETHTOOL_A_RSS_CONTEXT]);
+ return -EINVAL;
+ }
+ if (tb[ETHTOOL_A_RSS_START_CONTEXT]) {
+ ctx->start_ctx = nla_get_u32(tb[ETHTOOL_A_RSS_START_CONTEXT]);
+ ctx->ctx_idx = ctx->start_ctx;
+ }
+
+ ret = ethnl_parse_header_dev_get(&req_info,
+ tb[ETHTOOL_A_RSS_HEADER],
+ sock_net(cb->skb->sk), cb->extack,
+ false);
+ if (req_info.dev) {
+ ctx->match_ifindex = req_info.dev->ifindex;
+ ctx->ifindex = ctx->match_ifindex;
+ ethnl_parse_header_dev_put(&req_info);
+ req_info.dev = NULL;
+ }
+
+ return ret;
+}
+
+static int
+rss_dump_one_ctx(struct sk_buff *skb, struct netlink_callback *cb,
+ struct net_device *dev, u32 rss_context)
+{
+ const struct genl_info *info = genl_info_dump(cb);
+ struct rss_reply_data data = {};
+ struct rss_req_info req = {};
+ void *ehdr;
+ int ret;
+
+ req.rss_context = rss_context;
+
+ ehdr = ethnl_dump_put(skb, cb, ETHTOOL_MSG_RSS_GET_REPLY);
+ if (!ehdr)
+ return -EMSGSIZE;
+
+ ret = ethnl_fill_reply_header(skb, dev, ETHTOOL_A_RSS_HEADER);
+ if (ret < 0)
+ goto err_cancel;
+
+ /* Context 0 is not currently storred or cached in the XArray */
+ if (!rss_context)
+ ret = rss_prepare_get(&req, dev, &data, info);
+ else
+ ret = rss_prepare_ctx(&req, dev, &data, info);
+ if (ret)
+ goto err_cancel;
+
+ ret = rss_fill_reply(skb, &req.base, &data.base);
+ if (ret)
+ goto err_cleanup;
+ genlmsg_end(skb, ehdr);
+
+ rss_cleanup_data(&data.base);
+ return 0;
+
+err_cleanup:
+ rss_cleanup_data(&data.base);
+err_cancel:
+ genlmsg_cancel(skb, ehdr);
+ return ret;
+}
+
+static int
+rss_dump_one_dev(struct sk_buff *skb, struct netlink_callback *cb,
+ struct net_device *dev)
+{
+ struct rss_nl_dump_ctx *ctx = rss_dump_ctx(cb);
+ int ret;
+
+ if (!dev->ethtool_ops->get_rxfh)
+ return 0;
+
+ if (!ctx->ctx_idx) {
+ ret = rss_dump_one_ctx(skb, cb, dev, 0);
+ if (ret)
+ return ret;
+ ctx->ctx_idx++;
+ }
+
+ for (; xa_find(&dev->ethtool->rss_ctx, &ctx->ctx_idx,
+ ULONG_MAX, XA_PRESENT); ctx->ctx_idx++) {
+ ret = rss_dump_one_ctx(skb, cb, dev, ctx->ctx_idx);
+ if (ret)
+ return ret;
+ }
+ ctx->ctx_idx = ctx->start_ctx;
+
+ return 0;
+}
+
+int ethnl_rss_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct rss_nl_dump_ctx *ctx = rss_dump_ctx(cb);
+ struct net *net = sock_net(skb->sk);
+ struct net_device *dev;
+ int ret = 0;
+
+ rtnl_lock();
+ for_each_netdev_dump(net, dev, ctx->ifindex) {
+ if (ctx->match_ifindex && ctx->match_ifindex != ctx->ifindex)
+ break;
+
+ ret = rss_dump_one_dev(skb, cb, dev);
+ if (ret)
+ break;
+ }
+ rtnl_unlock();
+
+ return ret;
+}
+
const struct ethnl_request_ops ethnl_rss_request_ops = {
.request_cmd = ETHTOOL_MSG_RSS_GET,
.reply_cmd = ETHTOOL_MSG_RSS_GET_REPLY,
diff --git a/net/ethtool/stats.c b/net/ethtool/stats.c
index 912f0c4fff2f..3ca8eb2a3b31 100644
--- a/net/ethtool/stats.c
+++ b/net/ethtool/stats.c
@@ -1,5 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/phy.h>
+#include <linux/phylib_stubs.h>
+
#include "netlink.h"
#include "common.h"
#include "bitset.h"
@@ -20,6 +23,7 @@ struct stats_reply_data {
struct ethtool_eth_mac_stats mac_stats;
struct ethtool_eth_ctrl_stats ctrl_stats;
struct ethtool_rmon_stats rmon_stats;
+ struct ethtool_phy_stats phydev_stats;
);
const struct ethtool_rmon_hist_range *rmon_ranges;
};
@@ -32,6 +36,7 @@ const char stats_std_names[__ETHTOOL_STATS_CNT][ETH_GSTRING_LEN] = {
[ETHTOOL_STATS_ETH_MAC] = "eth-mac",
[ETHTOOL_STATS_ETH_CTRL] = "eth-ctrl",
[ETHTOOL_STATS_RMON] = "rmon",
+ [ETHTOOL_STATS_PHY] = "phydev",
};
const char stats_eth_phy_names[__ETHTOOL_A_STATS_ETH_PHY_CNT][ETH_GSTRING_LEN] = {
@@ -76,6 +81,15 @@ const char stats_rmon_names[__ETHTOOL_A_STATS_RMON_CNT][ETH_GSTRING_LEN] = {
[ETHTOOL_A_STATS_RMON_JABBER] = "etherStatsJabbers",
};
+const char stats_phy_names[__ETHTOOL_A_STATS_PHY_CNT][ETH_GSTRING_LEN] = {
+ [ETHTOOL_A_STATS_PHY_RX_PKTS] = "RxFrames",
+ [ETHTOOL_A_STATS_PHY_RX_BYTES] = "RxOctets",
+ [ETHTOOL_A_STATS_PHY_RX_ERRORS] = "RxErrors",
+ [ETHTOOL_A_STATS_PHY_TX_PKTS] = "TxFrames",
+ [ETHTOOL_A_STATS_PHY_TX_BYTES] = "TxOctets",
+ [ETHTOOL_A_STATS_PHY_TX_ERRORS] = "TxErrors",
+};
+
const struct nla_policy ethnl_stats_get_policy[ETHTOOL_A_STATS_SRC + 1] = {
[ETHTOOL_A_STATS_HEADER] =
NLA_POLICY_NESTED(ethnl_header_policy),
@@ -120,8 +134,15 @@ static int stats_prepare_data(const struct ethnl_req_info *req_base,
struct stats_reply_data *data = STATS_REPDATA(reply_base);
enum ethtool_mac_stats_src src = req_info->src;
struct net_device *dev = reply_base->dev;
+ struct nlattr **tb = info->attrs;
+ struct phy_device *phydev;
int ret;
+ phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_STATS_HEADER,
+ info->extack);
+ if (IS_ERR(phydev))
+ return PTR_ERR(phydev);
+
ret = ethnl_ops_begin(dev);
if (ret < 0)
return ret;
@@ -145,6 +166,14 @@ static int stats_prepare_data(const struct ethnl_req_info *req_base,
data->ctrl_stats.src = src;
data->rmon_stats.src = src;
+ if ((test_bit(ETHTOOL_STATS_PHY, req_info->stat_mask) ||
+ test_bit(ETHTOOL_STATS_ETH_PHY, req_info->stat_mask)) &&
+ src == ETHTOOL_MAC_STATS_SRC_AGGREGATE) {
+ if (phydev)
+ phy_ethtool_get_phy_stats(phydev, &data->phy_stats,
+ &data->phydev_stats);
+ }
+
if (test_bit(ETHTOOL_STATS_ETH_PHY, req_info->stat_mask) &&
dev->ethtool_ops->get_eth_phy_stats)
dev->ethtool_ops->get_eth_phy_stats(dev, &data->phy_stats);
@@ -194,6 +223,10 @@ static int stats_reply_size(const struct ethnl_req_info *req_base,
nla_total_size(4)) * /* _A_STATS_GRP_HIST_BKT_HI */
ETHTOOL_RMON_HIST_MAX * 2;
}
+ if (test_bit(ETHTOOL_STATS_PHY, req_info->stat_mask)) {
+ n_stats += sizeof(struct ethtool_phy_stats) / sizeof(u64);
+ n_grps++;
+ }
len += n_grps * (nla_total_size(0) + /* _A_STATS_GRP */
nla_total_size(4) + /* _A_STATS_GRP_ID */
@@ -247,6 +280,25 @@ static int stats_put_phy_stats(struct sk_buff *skb,
return 0;
}
+static int stats_put_phydev_stats(struct sk_buff *skb,
+ const struct stats_reply_data *data)
+{
+ if (stat_put(skb, ETHTOOL_A_STATS_PHY_RX_PKTS,
+ data->phydev_stats.rx_packets) ||
+ stat_put(skb, ETHTOOL_A_STATS_PHY_RX_BYTES,
+ data->phydev_stats.rx_bytes) ||
+ stat_put(skb, ETHTOOL_A_STATS_PHY_RX_ERRORS,
+ data->phydev_stats.rx_errors) ||
+ stat_put(skb, ETHTOOL_A_STATS_PHY_TX_PKTS,
+ data->phydev_stats.tx_packets) ||
+ stat_put(skb, ETHTOOL_A_STATS_PHY_TX_BYTES,
+ data->phydev_stats.tx_bytes) ||
+ stat_put(skb, ETHTOOL_A_STATS_PHY_TX_ERRORS,
+ data->phydev_stats.tx_errors))
+ return -EMSGSIZE;
+ return 0;
+}
+
static int stats_put_mac_stats(struct sk_buff *skb,
const struct stats_reply_data *data)
{
@@ -423,6 +475,9 @@ static int stats_fill_reply(struct sk_buff *skb,
if (!ret && test_bit(ETHTOOL_STATS_RMON, req_info->stat_mask))
ret = stats_put_stats(skb, data, ETHTOOL_STATS_RMON,
ETH_SS_STATS_RMON, stats_put_rmon_stats);
+ if (!ret && test_bit(ETHTOOL_STATS_PHY, req_info->stat_mask))
+ ret = stats_put_stats(skb, data, ETHTOOL_STATS_PHY,
+ ETH_SS_STATS_PHY, stats_put_phydev_stats);
return ret;
}
diff --git a/net/ethtool/strset.c b/net/ethtool/strset.c
index c678b484a079..f6a67109beda 100644
--- a/net/ethtool/strset.c
+++ b/net/ethtool/strset.c
@@ -75,6 +75,11 @@ static const struct strset_info info_template[] = {
.count = __HWTSTAMP_FILTER_CNT,
.strings = ts_rx_filter_names,
},
+ [ETH_SS_TS_FLAGS] = {
+ .per_dev = false,
+ .count = __HWTSTAMP_FLAG_CNT,
+ .strings = ts_flags_names,
+ },
[ETH_SS_UDP_TUNNEL_TYPES] = {
.per_dev = false,
.count = __ETHTOOL_UDP_TUNNEL_TYPE_CNT,
@@ -105,6 +110,11 @@ static const struct strset_info info_template[] = {
.count = __ETHTOOL_A_STATS_RMON_CNT,
.strings = stats_rmon_names,
},
+ [ETH_SS_STATS_PHY] = {
+ .per_dev = false,
+ .count = __ETHTOOL_A_STATS_PHY_CNT,
+ .strings = stats_phy_names,
+ },
};
struct strset_req_info {
@@ -126,7 +136,7 @@ struct strset_reply_data {
const struct nla_policy ethnl_strset_get_policy[] = {
[ETHTOOL_A_STRSET_HEADER] =
- NLA_POLICY_NESTED(ethnl_header_policy),
+ NLA_POLICY_NESTED(ethnl_header_policy_phy),
[ETHTOOL_A_STRSET_STRINGSETS] = { .type = NLA_NESTED },
[ETHTOOL_A_STRSET_COUNTS_ONLY] = { .type = NLA_FLAG },
};
@@ -233,17 +243,18 @@ static void strset_cleanup_data(struct ethnl_reply_data *reply_base)
}
static int strset_prepare_set(struct strset_info *info, struct net_device *dev,
- unsigned int id, bool counts_only)
+ struct phy_device *phydev, unsigned int id,
+ bool counts_only)
{
const struct ethtool_phy_ops *phy_ops = ethtool_phy_ops;
const struct ethtool_ops *ops = dev->ethtool_ops;
void *strings;
int count, ret;
- if (id == ETH_SS_PHY_STATS && dev->phydev &&
+ if (id == ETH_SS_PHY_STATS && phydev &&
!ops->get_ethtool_phy_stats && phy_ops &&
phy_ops->get_sset_count)
- ret = phy_ops->get_sset_count(dev->phydev);
+ ret = phy_ops->get_sset_count(phydev);
else if (ops->get_sset_count && ops->get_strings)
ret = ops->get_sset_count(dev, id);
else
@@ -258,10 +269,10 @@ static int strset_prepare_set(struct strset_info *info, struct net_device *dev,
strings = kcalloc(count, ETH_GSTRING_LEN, GFP_KERNEL);
if (!strings)
return -ENOMEM;
- if (id == ETH_SS_PHY_STATS && dev->phydev &&
+ if (id == ETH_SS_PHY_STATS && phydev &&
!ops->get_ethtool_phy_stats && phy_ops &&
phy_ops->get_strings)
- phy_ops->get_strings(dev->phydev, strings);
+ phy_ops->get_strings(phydev, strings);
else
ops->get_strings(dev, id, strings);
info->strings = strings;
@@ -279,6 +290,8 @@ static int strset_prepare_data(const struct ethnl_req_info *req_base,
const struct strset_req_info *req_info = STRSET_REQINFO(req_base);
struct strset_reply_data *data = STRSET_REPDATA(reply_base);
struct net_device *dev = reply_base->dev;
+ struct nlattr **tb = info->attrs;
+ struct phy_device *phydev;
unsigned int i;
int ret;
@@ -289,14 +302,20 @@ static int strset_prepare_data(const struct ethnl_req_info *req_base,
for (i = 0; i < ETH_SS_COUNT; i++) {
if ((req_info->req_ids & (1U << i)) &&
data->sets[i].per_dev) {
- if (info)
- GENL_SET_ERR_MSG(info, "requested per device strings without dev");
+ GENL_SET_ERR_MSG(info, "requested per device strings without dev");
return -EINVAL;
}
}
return 0;
}
+ phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_HEADER_FLAGS,
+ info->extack);
+
+ /* phydev can be NULL, check for errors only */
+ if (IS_ERR(phydev))
+ return PTR_ERR(phydev);
+
ret = ethnl_ops_begin(dev);
if (ret < 0)
goto err_strset;
@@ -305,7 +324,7 @@ static int strset_prepare_data(const struct ethnl_req_info *req_base,
!data->sets[i].per_dev)
continue;
- ret = strset_prepare_set(&data->sets[i], dev, i,
+ ret = strset_prepare_set(&data->sets[i], dev, phydev, i,
req_info->counts_only);
if (ret < 0)
goto err_ops;
diff --git a/net/ethtool/ts.h b/net/ethtool/ts.h
new file mode 100644
index 000000000000..d901a879a671
--- /dev/null
+++ b/net/ethtool/ts.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _NET_ETHTOOL_TS_H
+#define _NET_ETHTOOL_TS_H
+
+#include "netlink.h"
+
+static const struct nla_policy
+ethnl_ts_hwtst_prov_policy[ETHTOOL_A_TS_HWTSTAMP_PROVIDER_MAX + 1] = {
+ [ETHTOOL_A_TS_HWTSTAMP_PROVIDER_INDEX] = { .type = NLA_U32 },
+ [ETHTOOL_A_TS_HWTSTAMP_PROVIDER_QUALIFIER] =
+ NLA_POLICY_MAX(NLA_U32, HWTSTAMP_PROVIDER_QUALIFIER_CNT - 1)
+};
+
+int ts_parse_hwtst_provider(const struct nlattr *nest,
+ struct hwtstamp_provider_desc *hwprov_desc,
+ struct netlink_ext_ack *extack,
+ bool *mod);
+
+#endif /* _NET_ETHTOOL_TS_H */
diff --git a/net/ethtool/tsconfig.c b/net/ethtool/tsconfig.c
new file mode 100644
index 000000000000..2be356bdfe87
--- /dev/null
+++ b/net/ethtool/tsconfig.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+
+#include "netlink.h"
+#include "common.h"
+#include "bitset.h"
+#include "../core/dev.h"
+#include "ts.h"
+
+struct tsconfig_req_info {
+ struct ethnl_req_info base;
+};
+
+struct tsconfig_reply_data {
+ struct ethnl_reply_data base;
+ struct hwtstamp_provider_desc hwprov_desc;
+ struct {
+ u32 tx_type;
+ u32 rx_filter;
+ u32 flags;
+ } hwtst_config;
+};
+
+#define TSCONFIG_REPDATA(__reply_base) \
+ container_of(__reply_base, struct tsconfig_reply_data, base)
+
+const struct nla_policy ethnl_tsconfig_get_policy[ETHTOOL_A_TSCONFIG_HEADER + 1] = {
+ [ETHTOOL_A_TSCONFIG_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
+};
+
+static int tsconfig_prepare_data(const struct ethnl_req_info *req_base,
+ struct ethnl_reply_data *reply_base,
+ const struct genl_info *info)
+{
+ struct tsconfig_reply_data *data = TSCONFIG_REPDATA(reply_base);
+ struct hwtstamp_provider *hwprov = NULL;
+ struct net_device *dev = reply_base->dev;
+ struct kernel_hwtstamp_config cfg = {};
+ int ret;
+
+ if (!dev->netdev_ops->ndo_hwtstamp_get)
+ return -EOPNOTSUPP;
+
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = dev_get_hwtstamp_phylib(dev, &cfg);
+ if (ret)
+ goto out;
+
+ data->hwtst_config.tx_type = BIT(cfg.tx_type);
+ data->hwtst_config.rx_filter = BIT(cfg.rx_filter);
+ data->hwtst_config.flags = cfg.flags;
+
+ data->hwprov_desc.index = -1;
+ hwprov = rtnl_dereference(dev->hwprov);
+ if (hwprov) {
+ data->hwprov_desc.index = hwprov->desc.index;
+ data->hwprov_desc.qualifier = hwprov->desc.qualifier;
+ } else {
+ struct kernel_ethtool_ts_info ts_info = {};
+
+ ts_info.phc_index = -1;
+ ret = __ethtool_get_ts_info(dev, &ts_info);
+ if (ret)
+ goto out;
+
+ if (ts_info.phc_index == -1)
+ return -ENODEV;
+
+ data->hwprov_desc.index = ts_info.phc_index;
+ data->hwprov_desc.qualifier = ts_info.phc_qualifier;
+ }
+
+out:
+ ethnl_ops_complete(dev);
+ return ret;
+}
+
+static int tsconfig_reply_size(const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ const struct tsconfig_reply_data *data = TSCONFIG_REPDATA(reply_base);
+ bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
+ int len = 0;
+ int ret;
+
+ BUILD_BUG_ON(__HWTSTAMP_TX_CNT > 32);
+ BUILD_BUG_ON(__HWTSTAMP_FILTER_CNT > 32);
+ BUILD_BUG_ON(__HWTSTAMP_FLAG_CNT > 32);
+
+ if (data->hwtst_config.flags) {
+ ret = ethnl_bitset32_size(&data->hwtst_config.flags,
+ NULL, __HWTSTAMP_FLAG_CNT,
+ ts_flags_names, compact);
+ if (ret < 0)
+ return ret;
+ len += ret; /* _TSCONFIG_HWTSTAMP_FLAGS */
+ }
+
+ if (data->hwtst_config.tx_type) {
+ ret = ethnl_bitset32_size(&data->hwtst_config.tx_type,
+ NULL, __HWTSTAMP_TX_CNT,
+ ts_tx_type_names, compact);
+ if (ret < 0)
+ return ret;
+ len += ret; /* _TSCONFIG_TX_TYPES */
+ }
+ if (data->hwtst_config.rx_filter) {
+ ret = ethnl_bitset32_size(&data->hwtst_config.rx_filter,
+ NULL, __HWTSTAMP_FILTER_CNT,
+ ts_rx_filter_names, compact);
+ if (ret < 0)
+ return ret;
+ len += ret; /* _TSCONFIG_RX_FILTERS */
+ }
+
+ if (data->hwprov_desc.index >= 0)
+ /* _TSCONFIG_HWTSTAMP_PROVIDER */
+ len += nla_total_size(0) +
+ 2 * nla_total_size(sizeof(u32));
+
+ return len;
+}
+
+static int tsconfig_fill_reply(struct sk_buff *skb,
+ const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ const struct tsconfig_reply_data *data = TSCONFIG_REPDATA(reply_base);
+ bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
+ int ret;
+
+ if (data->hwtst_config.flags) {
+ ret = ethnl_put_bitset32(skb, ETHTOOL_A_TSCONFIG_HWTSTAMP_FLAGS,
+ &data->hwtst_config.flags, NULL,
+ __HWTSTAMP_FLAG_CNT,
+ ts_flags_names, compact);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (data->hwtst_config.tx_type) {
+ ret = ethnl_put_bitset32(skb, ETHTOOL_A_TSCONFIG_TX_TYPES,
+ &data->hwtst_config.tx_type, NULL,
+ __HWTSTAMP_TX_CNT,
+ ts_tx_type_names, compact);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (data->hwtst_config.rx_filter) {
+ ret = ethnl_put_bitset32(skb, ETHTOOL_A_TSCONFIG_RX_FILTERS,
+ &data->hwtst_config.rx_filter,
+ NULL, __HWTSTAMP_FILTER_CNT,
+ ts_rx_filter_names, compact);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (data->hwprov_desc.index >= 0) {
+ struct nlattr *nest;
+
+ nest = nla_nest_start(skb, ETHTOOL_A_TSCONFIG_HWTSTAMP_PROVIDER);
+ if (!nest)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(skb, ETHTOOL_A_TS_HWTSTAMP_PROVIDER_INDEX,
+ data->hwprov_desc.index) ||
+ nla_put_u32(skb,
+ ETHTOOL_A_TS_HWTSTAMP_PROVIDER_QUALIFIER,
+ data->hwprov_desc.qualifier)) {
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+ }
+
+ nla_nest_end(skb, nest);
+ }
+ return 0;
+}
+
+/* TSCONFIG_SET */
+const struct nla_policy ethnl_tsconfig_set_policy[ETHTOOL_A_TSCONFIG_MAX + 1] = {
+ [ETHTOOL_A_TSCONFIG_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy),
+ [ETHTOOL_A_TSCONFIG_HWTSTAMP_PROVIDER] =
+ NLA_POLICY_NESTED(ethnl_ts_hwtst_prov_policy),
+ [ETHTOOL_A_TSCONFIG_HWTSTAMP_FLAGS] = { .type = NLA_NESTED },
+ [ETHTOOL_A_TSCONFIG_RX_FILTERS] = { .type = NLA_NESTED },
+ [ETHTOOL_A_TSCONFIG_TX_TYPES] = { .type = NLA_NESTED },
+};
+
+static int tsconfig_send_reply(struct net_device *dev, struct genl_info *info)
+{
+ struct tsconfig_reply_data *reply_data;
+ struct tsconfig_req_info *req_info;
+ struct sk_buff *rskb;
+ void *reply_payload;
+ int reply_len = 0;
+ int ret;
+
+ req_info = kzalloc(sizeof(*req_info), GFP_KERNEL);
+ if (!req_info)
+ return -ENOMEM;
+ reply_data = kmalloc(sizeof(*reply_data), GFP_KERNEL);
+ if (!reply_data) {
+ kfree(req_info);
+ return -ENOMEM;
+ }
+
+ ASSERT_RTNL();
+ reply_data->base.dev = dev;
+ ret = tsconfig_prepare_data(&req_info->base, &reply_data->base, info);
+ if (ret < 0)
+ goto err_cleanup;
+
+ ret = tsconfig_reply_size(&req_info->base, &reply_data->base);
+ if (ret < 0)
+ goto err_cleanup;
+
+ reply_len = ret + ethnl_reply_header_size();
+ rskb = ethnl_reply_init(reply_len, dev, ETHTOOL_MSG_TSCONFIG_SET_REPLY,
+ ETHTOOL_A_TSCONFIG_HEADER, info, &reply_payload);
+ if (!rskb)
+ goto err_cleanup;
+
+ ret = tsconfig_fill_reply(rskb, &req_info->base, &reply_data->base);
+ if (ret < 0)
+ goto err_cleanup;
+
+ genlmsg_end(rskb, reply_payload);
+ ret = genlmsg_reply(rskb, info);
+
+err_cleanup:
+ kfree(reply_data);
+ kfree(req_info);
+ return ret;
+}
+
+static int ethnl_set_tsconfig_validate(struct ethnl_req_info *req_base,
+ struct genl_info *info)
+{
+ const struct net_device_ops *ops = req_base->dev->netdev_ops;
+
+ if (!ops->ndo_hwtstamp_set || !ops->ndo_hwtstamp_get)
+ return -EOPNOTSUPP;
+
+ return 1;
+}
+
+static struct hwtstamp_provider *
+tsconfig_set_hwprov_from_desc(struct net_device *dev,
+ struct genl_info *info,
+ struct hwtstamp_provider_desc *hwprov_desc)
+{
+ struct kernel_ethtool_ts_info ts_info;
+ struct hwtstamp_provider *hwprov;
+ struct nlattr **tb = info->attrs;
+ struct phy_device *phy = NULL;
+ enum hwtstamp_source source;
+ int ret;
+
+ ret = ethtool_net_get_ts_info_by_phc(dev, &ts_info, hwprov_desc);
+ if (!ret) {
+ /* Found */
+ source = HWTSTAMP_SOURCE_NETDEV;
+ } else {
+ phy = ethtool_phy_get_ts_info_by_phc(dev, &ts_info, hwprov_desc);
+ if (IS_ERR(phy)) {
+ if (PTR_ERR(phy) == -ENODEV)
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ tb[ETHTOOL_A_TSCONFIG_HWTSTAMP_PROVIDER],
+ "phc not in this net device topology");
+ return ERR_CAST(phy);
+ }
+
+ source = HWTSTAMP_SOURCE_PHYLIB;
+ }
+
+ hwprov = kzalloc(sizeof(*hwprov), GFP_KERNEL);
+ if (!hwprov)
+ return ERR_PTR(-ENOMEM);
+
+ hwprov->desc.index = hwprov_desc->index;
+ hwprov->desc.qualifier = hwprov_desc->qualifier;
+ hwprov->source = source;
+ hwprov->phydev = phy;
+
+ return hwprov;
+}
+
+static int ethnl_set_tsconfig(struct ethnl_req_info *req_base,
+ struct genl_info *info)
+{
+ struct kernel_hwtstamp_config hwtst_config = {0};
+ bool hwprov_mod = false, config_mod = false;
+ struct hwtstamp_provider *hwprov = NULL;
+ struct net_device *dev = req_base->dev;
+ struct nlattr **tb = info->attrs;
+ int ret;
+
+ BUILD_BUG_ON(__HWTSTAMP_TX_CNT >= 32);
+ BUILD_BUG_ON(__HWTSTAMP_FILTER_CNT >= 32);
+ BUILD_BUG_ON(__HWTSTAMP_FLAG_CNT > 32);
+
+ if (!netif_device_present(dev))
+ return -ENODEV;
+
+ if (tb[ETHTOOL_A_TSCONFIG_HWTSTAMP_PROVIDER]) {
+ struct hwtstamp_provider_desc __hwprov_desc = {.index = -1};
+ struct hwtstamp_provider *__hwprov;
+
+ __hwprov = rtnl_dereference(dev->hwprov);
+ if (__hwprov) {
+ __hwprov_desc.index = __hwprov->desc.index;
+ __hwprov_desc.qualifier = __hwprov->desc.qualifier;
+ }
+
+ ret = ts_parse_hwtst_provider(tb[ETHTOOL_A_TSCONFIG_HWTSTAMP_PROVIDER],
+ &__hwprov_desc, info->extack,
+ &hwprov_mod);
+ if (ret < 0)
+ return ret;
+
+ if (hwprov_mod) {
+ hwprov = tsconfig_set_hwprov_from_desc(dev, info,
+ &__hwprov_desc);
+ if (IS_ERR(hwprov))
+ return PTR_ERR(hwprov);
+ }
+ }
+
+ /* Get current hwtstamp config if we are not changing the
+ * hwtstamp source. It will be zeroed in the other case.
+ */
+ if (!hwprov_mod) {
+ ret = dev_get_hwtstamp_phylib(dev, &hwtst_config);
+ if (ret < 0 && ret != -EOPNOTSUPP)
+ goto err_free_hwprov;
+ }
+
+ /* Get the hwtstamp config from netlink */
+ if (tb[ETHTOOL_A_TSCONFIG_TX_TYPES]) {
+ u32 req_tx_type;
+
+ req_tx_type = BIT(hwtst_config.tx_type);
+ ret = ethnl_update_bitset32(&req_tx_type,
+ __HWTSTAMP_TX_CNT,
+ tb[ETHTOOL_A_TSCONFIG_TX_TYPES],
+ ts_tx_type_names, info->extack,
+ &config_mod);
+ if (ret < 0)
+ goto err_free_hwprov;
+
+ /* Select only one tx type at a time */
+ if (ffs(req_tx_type) != fls(req_tx_type)) {
+ ret = -EINVAL;
+ goto err_free_hwprov;
+ }
+
+ hwtst_config.tx_type = ffs(req_tx_type) - 1;
+ }
+
+ if (tb[ETHTOOL_A_TSCONFIG_RX_FILTERS]) {
+ u32 req_rx_filter;
+
+ req_rx_filter = BIT(hwtst_config.rx_filter);
+ ret = ethnl_update_bitset32(&req_rx_filter,
+ __HWTSTAMP_FILTER_CNT,
+ tb[ETHTOOL_A_TSCONFIG_RX_FILTERS],
+ ts_rx_filter_names, info->extack,
+ &config_mod);
+ if (ret < 0)
+ goto err_free_hwprov;
+
+ /* Select only one rx filter at a time */
+ if (ffs(req_rx_filter) != fls(req_rx_filter)) {
+ ret = -EINVAL;
+ goto err_free_hwprov;
+ }
+
+ hwtst_config.rx_filter = ffs(req_rx_filter) - 1;
+ }
+
+ if (tb[ETHTOOL_A_TSCONFIG_HWTSTAMP_FLAGS]) {
+ ret = ethnl_update_bitset32(&hwtst_config.flags,
+ __HWTSTAMP_FLAG_CNT,
+ tb[ETHTOOL_A_TSCONFIG_HWTSTAMP_FLAGS],
+ ts_flags_names, info->extack,
+ &config_mod);
+ if (ret < 0)
+ goto err_free_hwprov;
+ }
+
+ ret = net_hwtstamp_validate(&hwtst_config);
+ if (ret)
+ goto err_free_hwprov;
+
+ if (hwprov_mod) {
+ struct kernel_hwtstamp_config zero_config = {0};
+ struct hwtstamp_provider *__hwprov;
+
+ /* Disable current time stamping if we try to enable
+ * another one
+ */
+ ret = dev_set_hwtstamp_phylib(dev, &zero_config, info->extack);
+ if (ret < 0)
+ goto err_free_hwprov;
+
+ /* Change the selected hwtstamp source */
+ __hwprov = rcu_replace_pointer_rtnl(dev->hwprov, hwprov);
+ if (__hwprov)
+ kfree_rcu(__hwprov, rcu_head);
+ }
+
+ if (config_mod) {
+ ret = dev_set_hwtstamp_phylib(dev, &hwtst_config,
+ info->extack);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (hwprov_mod || config_mod) {
+ ret = tsconfig_send_reply(dev, info);
+ if (ret && ret != -EOPNOTSUPP) {
+ NL_SET_ERR_MSG(info->extack,
+ "error while reading the new configuration set");
+ return ret;
+ }
+ }
+
+ /* tsconfig has no notification */
+ return 0;
+
+err_free_hwprov:
+ kfree(hwprov);
+
+ return ret;
+}
+
+const struct ethnl_request_ops ethnl_tsconfig_request_ops = {
+ .request_cmd = ETHTOOL_MSG_TSCONFIG_GET,
+ .reply_cmd = ETHTOOL_MSG_TSCONFIG_GET_REPLY,
+ .hdr_attr = ETHTOOL_A_TSCONFIG_HEADER,
+ .req_info_size = sizeof(struct tsconfig_req_info),
+ .reply_data_size = sizeof(struct tsconfig_reply_data),
+
+ .prepare_data = tsconfig_prepare_data,
+ .reply_size = tsconfig_reply_size,
+ .fill_reply = tsconfig_fill_reply,
+
+ .set_validate = ethnl_set_tsconfig_validate,
+ .set = ethnl_set_tsconfig,
+};
diff --git a/net/ethtool/tsinfo.c b/net/ethtool/tsinfo.c
index 9daed0aab162..ad3866c5a902 100644
--- a/net/ethtool/tsinfo.c
+++ b/net/ethtool/tsinfo.c
@@ -1,39 +1,116 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/net_tstamp.h>
+#include <linux/phy.h>
+#include <linux/phy_link_topology.h>
+#include <linux/ptp_clock_kernel.h>
#include "netlink.h"
#include "common.h"
#include "bitset.h"
+#include "ts.h"
struct tsinfo_req_info {
struct ethnl_req_info base;
+ struct hwtstamp_provider_desc hwprov_desc;
};
struct tsinfo_reply_data {
struct ethnl_reply_data base;
- struct ethtool_ts_info ts_info;
+ struct kernel_ethtool_ts_info ts_info;
+ struct ethtool_ts_stats stats;
};
+#define TSINFO_REQINFO(__req_base) \
+ container_of(__req_base, struct tsinfo_req_info, base)
+
#define TSINFO_REPDATA(__reply_base) \
container_of(__reply_base, struct tsinfo_reply_data, base)
-const struct nla_policy ethnl_tsinfo_get_policy[] = {
+#define ETHTOOL_TS_STAT_CNT \
+ (__ETHTOOL_A_TS_STAT_CNT - (ETHTOOL_A_TS_STAT_UNSPEC + 1))
+
+const struct nla_policy ethnl_tsinfo_get_policy[ETHTOOL_A_TSINFO_MAX + 1] = {
[ETHTOOL_A_TSINFO_HEADER] =
- NLA_POLICY_NESTED(ethnl_header_policy),
+ NLA_POLICY_NESTED(ethnl_header_policy_stats),
+ [ETHTOOL_A_TSINFO_HWTSTAMP_PROVIDER] =
+ NLA_POLICY_NESTED(ethnl_ts_hwtst_prov_policy),
};
+int ts_parse_hwtst_provider(const struct nlattr *nest,
+ struct hwtstamp_provider_desc *hwprov_desc,
+ struct netlink_ext_ack *extack,
+ bool *mod)
+{
+ struct nlattr *tb[ARRAY_SIZE(ethnl_ts_hwtst_prov_policy)];
+ int ret;
+
+ ret = nla_parse_nested(tb,
+ ARRAY_SIZE(ethnl_ts_hwtst_prov_policy) - 1,
+ nest,
+ ethnl_ts_hwtst_prov_policy, extack);
+ if (ret < 0)
+ return ret;
+
+ if (NL_REQ_ATTR_CHECK(extack, nest, tb,
+ ETHTOOL_A_TS_HWTSTAMP_PROVIDER_INDEX) ||
+ NL_REQ_ATTR_CHECK(extack, nest, tb,
+ ETHTOOL_A_TS_HWTSTAMP_PROVIDER_QUALIFIER))
+ return -EINVAL;
+
+ ethnl_update_u32(&hwprov_desc->index,
+ tb[ETHTOOL_A_TS_HWTSTAMP_PROVIDER_INDEX],
+ mod);
+ ethnl_update_u32(&hwprov_desc->qualifier,
+ tb[ETHTOOL_A_TS_HWTSTAMP_PROVIDER_QUALIFIER],
+ mod);
+
+ return 0;
+}
+
+static int
+tsinfo_parse_request(struct ethnl_req_info *req_base, struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ struct tsinfo_req_info *req = TSINFO_REQINFO(req_base);
+ bool mod = false;
+
+ req->hwprov_desc.index = -1;
+
+ if (!tb[ETHTOOL_A_TSINFO_HWTSTAMP_PROVIDER])
+ return 0;
+
+ return ts_parse_hwtst_provider(tb[ETHTOOL_A_TSINFO_HWTSTAMP_PROVIDER],
+ &req->hwprov_desc, extack, &mod);
+}
+
static int tsinfo_prepare_data(const struct ethnl_req_info *req_base,
struct ethnl_reply_data *reply_base,
const struct genl_info *info)
{
struct tsinfo_reply_data *data = TSINFO_REPDATA(reply_base);
+ struct tsinfo_req_info *req = TSINFO_REQINFO(req_base);
struct net_device *dev = reply_base->dev;
int ret;
ret = ethnl_ops_begin(dev);
if (ret < 0)
return ret;
+
+ if (req->hwprov_desc.index != -1) {
+ ret = ethtool_get_ts_info_by_phc(dev, &data->ts_info,
+ &req->hwprov_desc);
+ ethnl_ops_complete(dev);
+ return ret;
+ }
+
+ if (req_base->flags & ETHTOOL_FLAG_STATS) {
+ ethtool_stats_init((u64 *)&data->stats,
+ sizeof(data->stats) / sizeof(u64));
+ if (dev->ethtool_ops->get_ts_stats)
+ dev->ethtool_ops->get_ts_stats(dev, &data->stats);
+ }
+
ret = __ethtool_get_ts_info(dev, &data->ts_info);
ethnl_ops_complete(dev);
@@ -45,7 +122,7 @@ static int tsinfo_reply_size(const struct ethnl_req_info *req_base,
{
const struct tsinfo_reply_data *data = TSINFO_REPDATA(reply_base);
bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
- const struct ethtool_ts_info *ts_info = &data->ts_info;
+ const struct kernel_ethtool_ts_info *ts_info = &data->ts_info;
int len = 0;
int ret;
@@ -77,19 +154,61 @@ static int tsinfo_reply_size(const struct ethnl_req_info *req_base,
return ret;
len += ret; /* _TSINFO_RX_FILTERS */
}
- if (ts_info->phc_index >= 0)
+ if (ts_info->phc_index >= 0) {
len += nla_total_size(sizeof(u32)); /* _TSINFO_PHC_INDEX */
+ /* _TSINFO_HWTSTAMP_PROVIDER */
+ len += nla_total_size(0) + 2 * nla_total_size(sizeof(u32));
+ }
+ if (req_base->flags & ETHTOOL_FLAG_STATS)
+ len += nla_total_size(0) + /* _TSINFO_STATS */
+ nla_total_size_64bit(sizeof(u64)) * ETHTOOL_TS_STAT_CNT;
return len;
}
+static int tsinfo_put_stat(struct sk_buff *skb, u64 val, u16 attrtype)
+{
+ if (val == ETHTOOL_STAT_NOT_SET)
+ return 0;
+ if (nla_put_uint(skb, attrtype, val))
+ return -EMSGSIZE;
+ return 0;
+}
+
+static int tsinfo_put_stats(struct sk_buff *skb,
+ const struct ethtool_ts_stats *stats)
+{
+ struct nlattr *nest;
+
+ nest = nla_nest_start(skb, ETHTOOL_A_TSINFO_STATS);
+ if (!nest)
+ return -EMSGSIZE;
+
+ if (tsinfo_put_stat(skb, stats->tx_stats.pkts,
+ ETHTOOL_A_TS_STAT_TX_PKTS) ||
+ tsinfo_put_stat(skb, stats->tx_stats.onestep_pkts_unconfirmed,
+ ETHTOOL_A_TS_STAT_TX_ONESTEP_PKTS_UNCONFIRMED) ||
+ tsinfo_put_stat(skb, stats->tx_stats.lost,
+ ETHTOOL_A_TS_STAT_TX_LOST) ||
+ tsinfo_put_stat(skb, stats->tx_stats.err,
+ ETHTOOL_A_TS_STAT_TX_ERR))
+ goto err_cancel;
+
+ nla_nest_end(skb, nest);
+ return 0;
+
+err_cancel:
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+}
+
static int tsinfo_fill_reply(struct sk_buff *skb,
const struct ethnl_req_info *req_base,
const struct ethnl_reply_data *reply_base)
{
const struct tsinfo_reply_data *data = TSINFO_REPDATA(reply_base);
bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
- const struct ethtool_ts_info *ts_info = &data->ts_info;
+ const struct kernel_ethtool_ts_info *ts_info = &data->ts_info;
int ret;
if (ts_info->so_timestamping) {
@@ -116,13 +235,294 @@ static int tsinfo_fill_reply(struct sk_buff *skb,
if (ret < 0)
return ret;
}
- if (ts_info->phc_index >= 0 &&
- nla_put_u32(skb, ETHTOOL_A_TSINFO_PHC_INDEX, ts_info->phc_index))
+ if (ts_info->phc_index >= 0) {
+ struct nlattr *nest;
+
+ ret = nla_put_u32(skb, ETHTOOL_A_TSINFO_PHC_INDEX,
+ ts_info->phc_index);
+ if (ret)
+ return -EMSGSIZE;
+
+ nest = nla_nest_start(skb, ETHTOOL_A_TSINFO_HWTSTAMP_PROVIDER);
+ if (!nest)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(skb, ETHTOOL_A_TS_HWTSTAMP_PROVIDER_INDEX,
+ ts_info->phc_index) ||
+ nla_put_u32(skb,
+ ETHTOOL_A_TS_HWTSTAMP_PROVIDER_QUALIFIER,
+ ts_info->phc_qualifier)) {
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+ }
+
+ nla_nest_end(skb, nest);
+ }
+ if (req_base->flags & ETHTOOL_FLAG_STATS &&
+ tsinfo_put_stats(skb, &data->stats))
return -EMSGSIZE;
return 0;
}
+struct ethnl_tsinfo_dump_ctx {
+ struct tsinfo_req_info *req_info;
+ struct tsinfo_reply_data *reply_data;
+ unsigned long pos_ifindex;
+ bool netdev_dump_done;
+ unsigned long pos_phyindex;
+ enum hwtstamp_provider_qualifier pos_phcqualifier;
+};
+
+static void *ethnl_tsinfo_prepare_dump(struct sk_buff *skb,
+ struct net_device *dev,
+ struct tsinfo_reply_data *reply_data,
+ struct netlink_callback *cb)
+{
+ struct ethnl_tsinfo_dump_ctx *ctx = (void *)cb->ctx;
+ void *ehdr = NULL;
+
+ ehdr = ethnl_dump_put(skb, cb,
+ ETHTOOL_MSG_TSINFO_GET_REPLY);
+ if (!ehdr)
+ return ERR_PTR(-EMSGSIZE);
+
+ reply_data = ctx->reply_data;
+ memset(reply_data, 0, sizeof(*reply_data));
+ reply_data->base.dev = dev;
+ reply_data->ts_info.cmd = ETHTOOL_GET_TS_INFO;
+ reply_data->ts_info.phc_index = -1;
+
+ return ehdr;
+}
+
+static int ethnl_tsinfo_end_dump(struct sk_buff *skb,
+ struct net_device *dev,
+ struct tsinfo_req_info *req_info,
+ struct tsinfo_reply_data *reply_data,
+ void *ehdr)
+{
+ int ret;
+
+ reply_data->ts_info.so_timestamping |= SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+
+ ret = ethnl_fill_reply_header(skb, dev, ETHTOOL_A_TSINFO_HEADER);
+ if (ret < 0)
+ return ret;
+
+ ret = tsinfo_fill_reply(skb, &req_info->base, &reply_data->base);
+ if (ret < 0)
+ return ret;
+
+ reply_data->base.dev = NULL;
+ genlmsg_end(skb, ehdr);
+
+ return ret;
+}
+
+static int ethnl_tsinfo_dump_one_phydev(struct sk_buff *skb,
+ struct net_device *dev,
+ struct phy_device *phydev,
+ struct netlink_callback *cb)
+{
+ struct ethnl_tsinfo_dump_ctx *ctx = (void *)cb->ctx;
+ struct tsinfo_reply_data *reply_data;
+ struct tsinfo_req_info *req_info;
+ void *ehdr = NULL;
+ int ret = 0;
+
+ if (!phy_has_tsinfo(phydev))
+ return -EOPNOTSUPP;
+
+ reply_data = ctx->reply_data;
+ req_info = ctx->req_info;
+ ehdr = ethnl_tsinfo_prepare_dump(skb, dev, reply_data, cb);
+ if (IS_ERR(ehdr))
+ return PTR_ERR(ehdr);
+
+ ret = phy_ts_info(phydev, &reply_data->ts_info);
+ if (ret < 0)
+ goto err;
+
+ ret = ethnl_tsinfo_end_dump(skb, dev, req_info, reply_data, ehdr);
+ if (ret < 0)
+ goto err;
+
+ return ret;
+err:
+ genlmsg_cancel(skb, ehdr);
+ return ret;
+}
+
+static int ethnl_tsinfo_dump_one_netdev(struct sk_buff *skb,
+ struct net_device *dev,
+ struct netlink_callback *cb)
+{
+ struct ethnl_tsinfo_dump_ctx *ctx = (void *)cb->ctx;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct tsinfo_reply_data *reply_data;
+ struct tsinfo_req_info *req_info;
+ void *ehdr = NULL;
+ int ret = 0;
+
+ if (!ops->get_ts_info)
+ return -EOPNOTSUPP;
+
+ reply_data = ctx->reply_data;
+ req_info = ctx->req_info;
+ for (; ctx->pos_phcqualifier < HWTSTAMP_PROVIDER_QUALIFIER_CNT;
+ ctx->pos_phcqualifier++) {
+ if (!net_support_hwtstamp_qualifier(dev,
+ ctx->pos_phcqualifier))
+ continue;
+
+ ehdr = ethnl_tsinfo_prepare_dump(skb, dev, reply_data, cb);
+ if (IS_ERR(ehdr)) {
+ ret = PTR_ERR(ehdr);
+ goto err;
+ }
+
+ reply_data->ts_info.phc_qualifier = ctx->pos_phcqualifier;
+ ret = ops->get_ts_info(dev, &reply_data->ts_info);
+ if (ret < 0)
+ goto err;
+
+ ret = ethnl_tsinfo_end_dump(skb, dev, req_info, reply_data,
+ ehdr);
+ if (ret < 0)
+ goto err;
+ }
+
+ return ret;
+
+err:
+ genlmsg_cancel(skb, ehdr);
+ return ret;
+}
+
+static int ethnl_tsinfo_dump_one_net_topo(struct sk_buff *skb,
+ struct net_device *dev,
+ struct netlink_callback *cb)
+{
+ struct ethnl_tsinfo_dump_ctx *ctx = (void *)cb->ctx;
+ struct phy_device_node *pdn;
+ int ret = 0;
+
+ if (!ctx->netdev_dump_done) {
+ ret = ethnl_tsinfo_dump_one_netdev(skb, dev, cb);
+ if (ret < 0 && ret != -EOPNOTSUPP)
+ return ret;
+ ctx->netdev_dump_done = true;
+ }
+
+ if (!dev->link_topo) {
+ if (phy_has_tsinfo(dev->phydev)) {
+ ret = ethnl_tsinfo_dump_one_phydev(skb, dev,
+ dev->phydev, cb);
+ if (ret < 0 && ret != -EOPNOTSUPP)
+ return ret;
+ }
+
+ return 0;
+ }
+
+ xa_for_each_start(&dev->link_topo->phys, ctx->pos_phyindex, pdn,
+ ctx->pos_phyindex) {
+ if (phy_has_tsinfo(pdn->phy)) {
+ ret = ethnl_tsinfo_dump_one_phydev(skb, dev,
+ pdn->phy, cb);
+ if (ret < 0 && ret != -EOPNOTSUPP)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+int ethnl_tsinfo_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct ethnl_tsinfo_dump_ctx *ctx = (void *)cb->ctx;
+ struct net *net = sock_net(skb->sk);
+ struct net_device *dev;
+ int ret = 0;
+
+ rtnl_lock();
+ if (ctx->req_info->base.dev) {
+ ret = ethnl_tsinfo_dump_one_net_topo(skb,
+ ctx->req_info->base.dev,
+ cb);
+ } else {
+ for_each_netdev_dump(net, dev, ctx->pos_ifindex) {
+ ret = ethnl_tsinfo_dump_one_net_topo(skb, dev, cb);
+ if (ret < 0 && ret != -EOPNOTSUPP)
+ break;
+ ctx->pos_phyindex = 0;
+ ctx->netdev_dump_done = false;
+ ctx->pos_phcqualifier = HWTSTAMP_PROVIDER_QUALIFIER_PRECISE;
+ }
+ }
+ rtnl_unlock();
+
+ return ret;
+}
+
+int ethnl_tsinfo_start(struct netlink_callback *cb)
+{
+ const struct genl_dumpit_info *info = genl_dumpit_info(cb);
+ struct ethnl_tsinfo_dump_ctx *ctx = (void *)cb->ctx;
+ struct nlattr **tb = info->info.attrs;
+ struct tsinfo_reply_data *reply_data;
+ struct tsinfo_req_info *req_info;
+ int ret;
+
+ BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
+
+ req_info = kzalloc(sizeof(*req_info), GFP_KERNEL);
+ if (!req_info)
+ return -ENOMEM;
+ reply_data = kzalloc(sizeof(*reply_data), GFP_KERNEL);
+ if (!reply_data) {
+ ret = -ENOMEM;
+ goto free_req_info;
+ }
+
+ ret = ethnl_parse_header_dev_get(&req_info->base,
+ tb[ETHTOOL_A_TSINFO_HEADER],
+ sock_net(cb->skb->sk), cb->extack,
+ false);
+ if (ret < 0)
+ goto free_reply_data;
+
+ ctx->req_info = req_info;
+ ctx->reply_data = reply_data;
+ ctx->pos_ifindex = 0;
+ ctx->pos_phyindex = 0;
+ ctx->netdev_dump_done = false;
+ ctx->pos_phcqualifier = HWTSTAMP_PROVIDER_QUALIFIER_PRECISE;
+
+ return 0;
+
+free_reply_data:
+ kfree(reply_data);
+free_req_info:
+ kfree(req_info);
+
+ return ret;
+}
+
+int ethnl_tsinfo_done(struct netlink_callback *cb)
+{
+ struct ethnl_tsinfo_dump_ctx *ctx = (void *)cb->ctx;
+ struct tsinfo_req_info *req_info = ctx->req_info;
+
+ ethnl_parse_header_dev_put(&req_info->base);
+ kfree(ctx->reply_data);
+ kfree(ctx->req_info);
+
+ return 0;
+}
+
const struct ethnl_request_ops ethnl_tsinfo_request_ops = {
.request_cmd = ETHTOOL_MSG_TSINFO_GET,
.reply_cmd = ETHTOOL_MSG_TSINFO_GET_REPLY,
@@ -130,6 +530,7 @@ const struct ethnl_request_ops ethnl_tsinfo_request_ops = {
.req_info_size = sizeof(struct tsinfo_req_info),
.reply_data_size = sizeof(struct tsinfo_reply_data),
+ .parse_request = tsinfo_parse_request,
.prepare_data = tsinfo_prepare_data,
.reply_size = tsinfo_reply_size,
.fill_reply = tsinfo_fill_reply,
diff --git a/net/ethtool/wol.c b/net/ethtool/wol.c
index 0ed56c9ac1bc..a39d8000d808 100644
--- a/net/ethtool/wol.c
+++ b/net/ethtool/wol.c
@@ -137,7 +137,7 @@ ethnl_set_wol(struct ethnl_req_info *req_info, struct genl_info *info)
ret = dev->ethtool_ops->set_wol(dev, &wol);
if (ret)
return ret;
- dev->wol_enabled = !!wol.wolopts;
+ dev->ethtool->wol_enabled = !!wol.wolopts;
return 1;
}
diff --git a/net/handshake/handshake-test.c b/net/handshake/handshake-test.c
index 34fd1d9b2db8..55442b2f518a 100644
--- a/net/handshake/handshake-test.c
+++ b/net/handshake/handshake-test.c
@@ -17,7 +17,7 @@
#include <uapi/linux/handshake.h>
#include "handshake.h"
-MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
static int test_accept_func(struct handshake_req *req, struct genl_info *info,
int fd)
diff --git a/net/handshake/netlink.c b/net/handshake/netlink.c
index 89637e732866..7e46d130dce2 100644
--- a/net/handshake/netlink.c
+++ b/net/handshake/netlink.c
@@ -153,7 +153,7 @@ int handshake_nl_done_doit(struct sk_buff *skb, struct genl_info *info)
if (!req) {
err = -EBUSY;
trace_handshake_cmd_done_err(net, req, sock->sk, err);
- fput(sock->file);
+ sockfd_put(sock);
return err;
}
@@ -164,7 +164,7 @@ int handshake_nl_done_doit(struct sk_buff *skb, struct genl_info *info)
status = nla_get_u32(info->attrs[HANDSHAKE_A_DONE_STATUS]);
handshake_complete(req, status, info);
- fput(sock->file);
+ sockfd_put(sock);
return 0;
}
diff --git a/net/handshake/request.c b/net/handshake/request.c
index 94d5cef3e048..274d2c89b6b2 100644
--- a/net/handshake/request.c
+++ b/net/handshake/request.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/inet.h>
-#include <linux/fdtable.h>
#include <linux/rhashtable.h>
#include <net/sock.h>
diff --git a/net/handshake/tlshd.c b/net/handshake/tlshd.c
index d697f68c598c..d6f52839827e 100644
--- a/net/handshake/tlshd.c
+++ b/net/handshake/tlshd.c
@@ -213,7 +213,6 @@ static int tls_handshake_accept(struct handshake_req *req,
if (!hdr)
goto out_cancel;
- ret = -EMSGSIZE;
ret = nla_put_s32(msg, HANDSHAKE_A_ACCEPT_SOCKFD, fd);
if (ret < 0)
goto out_cancel;
diff --git a/net/hsr/hsr_debugfs.c b/net/hsr/hsr_debugfs.c
index 1a195efc79cd..5b2cfac3b2ba 100644
--- a/net/hsr/hsr_debugfs.c
+++ b/net/hsr/hsr_debugfs.c
@@ -57,14 +57,11 @@ DEFINE_SHOW_ATTRIBUTE(hsr_node_table);
void hsr_debugfs_rename(struct net_device *dev)
{
struct hsr_priv *priv = netdev_priv(dev);
- struct dentry *d;
+ int err;
- d = debugfs_rename(hsr_debugfs_root_dir, priv->node_tbl_root,
- hsr_debugfs_root_dir, dev->name);
- if (IS_ERR(d))
+ err = debugfs_change_name(priv->node_tbl_root, "%s", dev->name);
+ if (err)
netdev_warn(dev, "failed to rename\n");
- else
- priv->node_tbl_root = d;
}
/* hsr_debugfs_init - create hsr node_table file for dumping
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index 9d71b66183da..b6fb18469439 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -28,29 +28,19 @@ static bool is_slave_up(struct net_device *dev)
return dev && is_admin_up(dev) && netif_oper_up(dev);
}
-static void __hsr_set_operstate(struct net_device *dev, int transition)
-{
- write_lock(&dev_base_lock);
- if (dev->operstate != transition) {
- dev->operstate = transition;
- write_unlock(&dev_base_lock);
- netdev_state_change(dev);
- } else {
- write_unlock(&dev_base_lock);
- }
-}
-
static void hsr_set_operstate(struct hsr_port *master, bool has_carrier)
{
- if (!is_admin_up(master->dev)) {
- __hsr_set_operstate(master->dev, IF_OPER_DOWN);
+ struct net_device *dev = master->dev;
+
+ if (!is_admin_up(dev)) {
+ netdev_set_operstate(dev, IF_OPER_DOWN);
return;
}
if (has_carrier)
- __hsr_set_operstate(master->dev, IF_OPER_UP);
+ netdev_set_operstate(dev, IF_OPER_UP);
else
- __hsr_set_operstate(master->dev, IF_OPER_LOWERLAYERDOWN);
+ netdev_set_operstate(dev, IF_OPER_LOWERLAYERDOWN);
}
static bool hsr_check_carrier(struct hsr_port *master)
@@ -71,39 +61,42 @@ static bool hsr_check_carrier(struct hsr_port *master)
return false;
}
-static void hsr_check_announce(struct net_device *hsr_dev,
- unsigned char old_operstate)
+static void hsr_check_announce(struct net_device *hsr_dev)
{
struct hsr_priv *hsr;
hsr = netdev_priv(hsr_dev);
+ if (netif_running(hsr_dev) && netif_oper_up(hsr_dev)) {
+ /* Enable announce timer and start sending supervisory frames */
+ if (!timer_pending(&hsr->announce_timer)) {
+ hsr->announce_count = 0;
+ mod_timer(&hsr->announce_timer, jiffies +
+ msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL));
+ }
- if (hsr_dev->operstate == IF_OPER_UP && old_operstate != IF_OPER_UP) {
- /* Went up */
- hsr->announce_count = 0;
- mod_timer(&hsr->announce_timer,
- jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL));
+ if (hsr->redbox && !timer_pending(&hsr->announce_proxy_timer))
+ mod_timer(&hsr->announce_proxy_timer, jiffies +
+ msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL) / 2);
+ } else {
+ /* Deactivate the announce timer */
+ timer_delete(&hsr->announce_timer);
+ if (hsr->redbox)
+ timer_delete(&hsr->announce_proxy_timer);
}
-
- if (hsr_dev->operstate != IF_OPER_UP && old_operstate == IF_OPER_UP)
- /* Went down */
- del_timer(&hsr->announce_timer);
}
void hsr_check_carrier_and_operstate(struct hsr_priv *hsr)
{
struct hsr_port *master;
- unsigned char old_operstate;
bool has_carrier;
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
/* netif_stacked_transfer_operstate() cannot be used here since
* it doesn't set IF_OPER_LOWERLAYERDOWN (?)
*/
- old_operstate = master->dev->operstate;
has_carrier = hsr_check_carrier(master);
hsr_set_operstate(master, has_carrier);
- hsr_check_announce(master->dev, old_operstate);
+ hsr_check_announce(master->dev);
}
int hsr_get_max_mtu(struct hsr_priv *hsr)
@@ -133,7 +126,7 @@ static int hsr_dev_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
}
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -142,30 +135,32 @@ static int hsr_dev_open(struct net_device *dev)
{
struct hsr_priv *hsr;
struct hsr_port *port;
- char designation;
+ const char *designation = NULL;
hsr = netdev_priv(dev);
- designation = '\0';
hsr_for_each_port(hsr, port) {
if (port->type == HSR_PT_MASTER)
continue;
switch (port->type) {
case HSR_PT_SLAVE_A:
- designation = 'A';
+ designation = "Slave A";
break;
case HSR_PT_SLAVE_B:
- designation = 'B';
+ designation = "Slave B";
+ break;
+ case HSR_PT_INTERLINK:
+ designation = "Interlink";
break;
default:
- designation = '?';
+ designation = "Unknown";
}
if (!is_slave_up(port->dev))
- netdev_warn(dev, "Slave %c (%s) is not up; please bring it up to get a fully working HSR network\n",
+ netdev_warn(dev, "%s (%s) is not up; please bring it up to get a fully working HSR network\n",
designation, port->dev->name);
}
- if (designation == '\0')
+ if (!designation)
netdev_warn(dev, "No slave devices configured\n");
return 0;
@@ -251,20 +246,22 @@ static const struct header_ops hsr_header_ops = {
.parse = eth_header_parse,
};
-static struct sk_buff *hsr_init_skb(struct hsr_port *master)
+static struct sk_buff *hsr_init_skb(struct hsr_port *master, int extra)
{
struct hsr_priv *hsr = master->hsr;
struct sk_buff *skb;
int hlen, tlen;
+ int len;
hlen = LL_RESERVED_SPACE(master->dev);
tlen = master->dev->needed_tailroom;
+ len = sizeof(struct hsr_sup_tag) + sizeof(struct hsr_sup_payload);
/* skb size is same for PRP/HSR frames, only difference
* being, for PRP it is a trailer and for HSR it is a
- * header
+ * header.
+ * RedBox might use @extra more bytes.
*/
- skb = dev_alloc_skb(sizeof(struct hsr_sup_tag) +
- sizeof(struct hsr_sup_payload) + hlen + tlen);
+ skb = dev_alloc_skb(len + extra + hlen + tlen);
if (!skb)
return skb;
@@ -273,6 +270,8 @@ static struct sk_buff *hsr_init_skb(struct hsr_port *master)
skb->dev = master->dev;
skb->priority = TC_PRIO_CONTROL;
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
if (dev_hard_header(skb, skb->dev, ETH_P_PRP,
hsr->sup_multicast_addr,
skb->dev->dev_addr, skb->len) <= 0)
@@ -280,8 +279,6 @@ static struct sk_buff *hsr_init_skb(struct hsr_port *master)
skb_reset_mac_header(skb);
skb_reset_mac_len(skb);
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
return skb;
out:
@@ -290,14 +287,17 @@ out:
return NULL;
}
-static void send_hsr_supervision_frame(struct hsr_port *master,
- unsigned long *interval)
+static void send_hsr_supervision_frame(struct hsr_port *port,
+ unsigned long *interval,
+ const unsigned char *addr)
{
- struct hsr_priv *hsr = master->hsr;
+ struct hsr_priv *hsr = port->hsr;
__u8 type = HSR_TLV_LIFE_CHECK;
struct hsr_sup_payload *hsr_sp;
+ struct hsr_sup_tlv *hsr_stlv;
struct hsr_sup_tag *hsr_stag;
struct sk_buff *skb;
+ int extra = 0;
*interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
if (hsr->announce_count < 3 && hsr->prot_version == 0) {
@@ -306,9 +306,13 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
hsr->announce_count++;
}
- skb = hsr_init_skb(master);
+ if (hsr->redbox)
+ extra = sizeof(struct hsr_sup_tlv) +
+ sizeof(struct hsr_sup_payload);
+
+ skb = hsr_init_skb(port, extra);
if (!skb) {
- netdev_warn_once(master->dev, "HSR: Could not send supervision frame\n");
+ netdev_warn_once(port->dev, "HSR: Could not send supervision frame\n");
return;
}
@@ -331,29 +335,41 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
hsr_stag->tlv.HSR_TLV_length = hsr->prot_version ?
sizeof(struct hsr_sup_payload) : 12;
- /* Payload: MacAddressA */
+ /* Payload: MacAddressA / SAN MAC from ProxyNodeTable */
hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload));
- ether_addr_copy(hsr_sp->macaddress_A, master->dev->dev_addr);
+ ether_addr_copy(hsr_sp->macaddress_A, addr);
+
+ if (hsr->redbox &&
+ hsr_is_node_in_db(&hsr->proxy_node_db, addr)) {
+ hsr_stlv = skb_put(skb, sizeof(struct hsr_sup_tlv));
+ hsr_stlv->HSR_TLV_type = PRP_TLV_REDBOX_MAC;
+ hsr_stlv->HSR_TLV_length = sizeof(struct hsr_sup_payload);
+
+ /* Payload: MacAddressRedBox */
+ hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload));
+ ether_addr_copy(hsr_sp->macaddress_A, hsr->macaddress_redbox);
+ }
if (skb_put_padto(skb, ETH_ZLEN)) {
spin_unlock_bh(&hsr->seqnr_lock);
return;
}
- hsr_forward_skb(skb, master);
+ hsr_forward_skb(skb, port);
spin_unlock_bh(&hsr->seqnr_lock);
return;
}
static void send_prp_supervision_frame(struct hsr_port *master,
- unsigned long *interval)
+ unsigned long *interval,
+ const unsigned char *addr)
{
struct hsr_priv *hsr = master->hsr;
struct hsr_sup_payload *hsr_sp;
struct hsr_sup_tag *hsr_stag;
struct sk_buff *skb;
- skb = hsr_init_skb(master);
+ skb = hsr_init_skb(master, 0);
if (!skb) {
netdev_warn_once(master->dev, "PRP: Could not send supervision frame\n");
return;
@@ -396,7 +412,7 @@ static void hsr_announce(struct timer_list *t)
rcu_read_lock();
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
- hsr->proto_ops->send_sv_frame(master, &interval);
+ hsr->proto_ops->send_sv_frame(master, &interval, master->dev->dev_addr);
if (is_admin_up(master->dev))
mod_timer(&hsr->announce_timer, jiffies + interval);
@@ -404,6 +420,41 @@ static void hsr_announce(struct timer_list *t)
rcu_read_unlock();
}
+/* Announce (supervision frame) timer function for RedBox
+ */
+static void hsr_proxy_announce(struct timer_list *t)
+{
+ struct hsr_priv *hsr = from_timer(hsr, t, announce_proxy_timer);
+ struct hsr_port *interlink;
+ unsigned long interval = 0;
+ struct hsr_node *node;
+
+ rcu_read_lock();
+ /* RedBOX sends supervisory frames to HSR network with MAC addresses
+ * of SAN nodes stored in ProxyNodeTable.
+ */
+ interlink = hsr_port_get_hsr(hsr, HSR_PT_INTERLINK);
+ if (!interlink)
+ goto done;
+
+ list_for_each_entry_rcu(node, &hsr->proxy_node_db, mac_list) {
+ if (hsr_addr_is_redbox(hsr, node->macaddress_A))
+ continue;
+ hsr->proto_ops->send_sv_frame(interlink, &interval,
+ node->macaddress_A);
+ }
+
+ if (is_admin_up(interlink->dev)) {
+ if (!interval)
+ interval = msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
+
+ mod_timer(&hsr->announce_proxy_timer, jiffies + interval);
+ }
+
+done:
+ rcu_read_unlock();
+}
+
void hsr_del_ports(struct hsr_priv *hsr)
{
struct hsr_port *port;
@@ -416,6 +467,10 @@ void hsr_del_ports(struct hsr_priv *hsr)
if (port)
hsr_del_port(port);
+ port = hsr_port_get_hsr(hsr, HSR_PT_INTERLINK);
+ if (port)
+ hsr_del_port(port);
+
port = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
if (port)
hsr_del_port(port);
@@ -467,6 +522,77 @@ static void hsr_change_rx_flags(struct net_device *dev, int change)
}
}
+static int hsr_ndo_vlan_rx_add_vid(struct net_device *dev,
+ __be16 proto, u16 vid)
+{
+ bool is_slave_a_added = false;
+ bool is_slave_b_added = false;
+ struct hsr_port *port;
+ struct hsr_priv *hsr;
+ int ret = 0;
+
+ hsr = netdev_priv(dev);
+
+ hsr_for_each_port(hsr, port) {
+ if (port->type == HSR_PT_MASTER ||
+ port->type == HSR_PT_INTERLINK)
+ continue;
+
+ ret = vlan_vid_add(port->dev, proto, vid);
+ switch (port->type) {
+ case HSR_PT_SLAVE_A:
+ if (ret) {
+ /* clean up Slave-B */
+ netdev_err(dev, "add vid failed for Slave-A\n");
+ if (is_slave_b_added)
+ vlan_vid_del(port->dev, proto, vid);
+ return ret;
+ }
+
+ is_slave_a_added = true;
+ break;
+
+ case HSR_PT_SLAVE_B:
+ if (ret) {
+ /* clean up Slave-A */
+ netdev_err(dev, "add vid failed for Slave-B\n");
+ if (is_slave_a_added)
+ vlan_vid_del(port->dev, proto, vid);
+ return ret;
+ }
+
+ is_slave_b_added = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int hsr_ndo_vlan_rx_kill_vid(struct net_device *dev,
+ __be16 proto, u16 vid)
+{
+ struct hsr_port *port;
+ struct hsr_priv *hsr;
+
+ hsr = netdev_priv(dev);
+
+ hsr_for_each_port(hsr, port) {
+ switch (port->type) {
+ case HSR_PT_SLAVE_A:
+ case HSR_PT_SLAVE_B:
+ vlan_vid_del(port->dev, proto, vid);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
static const struct net_device_ops hsr_device_ops = {
.ndo_change_mtu = hsr_dev_change_mtu,
.ndo_open = hsr_dev_open,
@@ -475,9 +601,11 @@ static const struct net_device_ops hsr_device_ops = {
.ndo_change_rx_flags = hsr_change_rx_flags,
.ndo_fix_features = hsr_fix_features,
.ndo_set_rx_mode = hsr_set_rx_mode,
+ .ndo_vlan_rx_add_vid = hsr_ndo_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = hsr_ndo_vlan_rx_kill_vid,
};
-static struct device_type hsr_type = {
+static const struct device_type hsr_type = {
.name = "hsr",
};
@@ -510,25 +638,21 @@ void hsr_dev_setup(struct net_device *dev)
dev->netdev_ops = &hsr_device_ops;
SET_NETDEV_DEVTYPE(dev, &hsr_type);
dev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
+ /* Prevent recursive tx locking */
+ dev->lltx = true;
+ /* Not sure about this. Taken from bridge code. netdevice.h says
+ * it means "Does not change network namespaces".
+ */
+ dev->netns_local = true;
dev->needs_free_netdev = true;
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
- NETIF_F_HW_VLAN_CTAG_TX;
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
dev->features = dev->hw_features;
-
- /* Prevent recursive tx locking */
- dev->features |= NETIF_F_LLTX;
- /* VLAN on top of HSR needs testing and probably some work on
- * hsr_header_create() etc.
- */
- dev->features |= NETIF_F_VLAN_CHALLENGED;
- /* Not sure about this. Taken from bridge code. netdev_features.h says
- * it means "Does not change network namespaces".
- */
- dev->features |= NETIF_F_NETNS_LOCAL;
}
/* Return true if dev is a HSR master; return false otherwise.
@@ -539,14 +663,27 @@ bool is_hsr_master(struct net_device *dev)
}
EXPORT_SYMBOL(is_hsr_master);
+struct net_device *hsr_get_port_ndev(struct net_device *ndev,
+ enum hsr_port_type pt)
+{
+ struct hsr_priv *hsr = netdev_priv(ndev);
+ struct hsr_port *port;
+
+ hsr_for_each_port(hsr, port)
+ if (port->type == pt)
+ return port->dev;
+ return NULL;
+}
+EXPORT_SYMBOL(hsr_get_port_ndev);
+
/* Default multicast address for HSR Supervision frames */
static const unsigned char def_multicast_addr[ETH_ALEN] __aligned(2) = {
0x01, 0x15, 0x4e, 0x00, 0x01, 0x00
};
int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
- unsigned char multicast_spec, u8 protocol_version,
- struct netlink_ext_ack *extack)
+ struct net_device *interlink, unsigned char multicast_spec,
+ u8 protocol_version, struct netlink_ext_ack *extack)
{
bool unregister = false;
struct hsr_priv *hsr;
@@ -555,6 +692,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
hsr = netdev_priv(hsr_dev);
INIT_LIST_HEAD(&hsr->ports);
INIT_LIST_HEAD(&hsr->node_db);
+ INIT_LIST_HEAD(&hsr->proxy_node_db);
spin_lock_init(&hsr->list_lock);
eth_hw_addr_set(hsr_dev, slave[0]->dev_addr);
@@ -583,6 +721,8 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
timer_setup(&hsr->announce_timer, hsr_announce, 0);
timer_setup(&hsr->prune_timer, hsr_prune_nodes, 0);
+ timer_setup(&hsr->prune_proxy_timer, hsr_prune_proxy_nodes, 0);
+ timer_setup(&hsr->announce_proxy_timer, hsr_proxy_announce, 0);
ether_addr_copy(hsr->sup_multicast_addr, def_multicast_addr);
hsr->sup_multicast_addr[ETH_ALEN - 1] = multicast_spec;
@@ -601,6 +741,10 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
(slave[1]->features & NETIF_F_HW_HSR_FWD))
hsr->fwd_offloaded = true;
+ if ((slave[0]->features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ (slave[1]->features & NETIF_F_HW_VLAN_CTAG_FILTER))
+ hsr_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
res = register_netdevice(hsr_dev);
if (res)
goto err_unregister;
@@ -615,6 +759,17 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
if (res)
goto err_unregister;
+ if (interlink) {
+ res = hsr_add_port(hsr, interlink, HSR_PT_INTERLINK, extack);
+ if (res)
+ goto err_unregister;
+
+ hsr->redbox = true;
+ ether_addr_copy(hsr->macaddress_redbox, interlink->dev_addr);
+ mod_timer(&hsr->prune_proxy_timer,
+ jiffies + msecs_to_jiffies(PRUNE_PROXY_PERIOD));
+ }
+
hsr_debugfs_init(hsr, hsr_dev);
mod_timer(&hsr->prune_timer, jiffies + msecs_to_jiffies(PRUNE_PERIOD));
diff --git a/net/hsr/hsr_device.h b/net/hsr/hsr_device.h
index 9060c92168f9..655284095b78 100644
--- a/net/hsr/hsr_device.h
+++ b/net/hsr/hsr_device.h
@@ -16,8 +16,8 @@
void hsr_del_ports(struct hsr_priv *hsr);
void hsr_dev_setup(struct net_device *dev);
int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
- unsigned char multicast_spec, u8 protocol_version,
- struct netlink_ext_ack *extack);
+ struct net_device *interlink, unsigned char multicast_spec,
+ u8 protocol_version, struct netlink_ext_ack *extack);
void hsr_check_carrier_and_operstate(struct hsr_priv *hsr);
int hsr_get_max_mtu(struct hsr_priv *hsr);
#endif /* __HSR_DEVICE_H */
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
index 5d68cb181695..a4bacf198555 100644
--- a/net/hsr/hsr_forward.c
+++ b/net/hsr/hsr_forward.c
@@ -117,6 +117,35 @@ static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
return true;
}
+static bool is_proxy_supervision_frame(struct hsr_priv *hsr,
+ struct sk_buff *skb)
+{
+ struct hsr_sup_payload *payload;
+ struct ethhdr *eth_hdr;
+ u16 total_length = 0;
+
+ eth_hdr = (struct ethhdr *)skb_mac_header(skb);
+
+ /* Get the HSR protocol revision. */
+ if (eth_hdr->h_proto == htons(ETH_P_HSR))
+ total_length = sizeof(struct hsrv1_ethhdr_sp);
+ else
+ total_length = sizeof(struct hsrv0_ethhdr_sp);
+
+ if (!pskb_may_pull(skb, total_length + sizeof(struct hsr_sup_payload)))
+ return false;
+
+ skb_pull(skb, total_length);
+ payload = (struct hsr_sup_payload *)skb->data;
+ skb_push(skb, total_length);
+
+ /* For RedBox (HSR-SAN) check if we have received the supervision
+ * frame with MAC addresses from own ProxyNodeTable.
+ */
+ return hsr_is_node_in_db(&hsr->proxy_node_db,
+ payload->macaddress_A);
+}
+
static struct sk_buff *create_stripped_skb_hsr(struct sk_buff *skb_in,
struct hsr_frame_info *frame)
{
@@ -251,6 +280,7 @@ static struct sk_buff *hsr_fill_tag(struct sk_buff *skb,
struct hsr_port *port, u8 proto_version)
{
struct hsr_ethhdr *hsr_ethhdr;
+ unsigned char *pc;
int lsdu_size;
/* pad to minimum packet size which is 60 + 6 (HSR tag) */
@@ -261,7 +291,18 @@ static struct sk_buff *hsr_fill_tag(struct sk_buff *skb,
if (frame->is_vlan)
lsdu_size -= 4;
- hsr_ethhdr = (struct hsr_ethhdr *)skb_mac_header(skb);
+ pc = skb_mac_header(skb);
+ if (frame->is_vlan)
+ /* This 4-byte shift (size of a vlan tag) does not
+ * mean that the ethhdr starts there. But rather it
+ * provides the proper environment for accessing
+ * the fields, such as hsr_tag etc., just like
+ * when the vlan tag is not there. This is because
+ * the hsr tag is after the vlan tag.
+ */
+ hsr_ethhdr = (struct hsr_ethhdr *)(pc + VLAN_HLEN);
+ else
+ hsr_ethhdr = (struct hsr_ethhdr *)pc;
hsr_set_path_id(hsr_ethhdr, port);
set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, lsdu_size);
@@ -339,7 +380,7 @@ struct sk_buff *prp_create_tagged_frame(struct hsr_frame_info *frame,
return skb_clone(frame->skb_std, GFP_ATOMIC);
}
- skb = skb_copy_expand(frame->skb_std, 0,
+ skb = skb_copy_expand(frame->skb_std, skb_headroom(frame->skb_std),
skb_tailroom(frame->skb_std) + HSR_HLEN,
GFP_ATOMIC);
return prp_fill_rct(skb, frame, port);
@@ -377,22 +418,79 @@ static int hsr_xmit(struct sk_buff *skb, struct hsr_port *port,
*/
ether_addr_copy(eth_hdr(skb)->h_source, port->dev->dev_addr);
}
+
+ /* When HSR node is used as RedBox - the frame received from HSR ring
+ * requires source MAC address (SA) replacement to one which can be
+ * recognized by SAN devices (otherwise, frames are dropped by switch)
+ */
+ if (port->type == HSR_PT_INTERLINK)
+ ether_addr_copy(eth_hdr(skb)->h_source,
+ port->hsr->macaddress_redbox);
+
return dev_queue_xmit(skb);
}
bool prp_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port)
{
return ((frame->port_rcv->type == HSR_PT_SLAVE_A &&
- port->type == HSR_PT_SLAVE_B) ||
+ port->type == HSR_PT_SLAVE_B) ||
(frame->port_rcv->type == HSR_PT_SLAVE_B &&
- port->type == HSR_PT_SLAVE_A));
+ port->type == HSR_PT_SLAVE_A));
}
bool hsr_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port)
{
+ struct sk_buff *skb;
+
if (port->dev->features & NETIF_F_HW_HSR_FWD)
return prp_drop_frame(frame, port);
+ /* RedBox specific frames dropping policies
+ *
+ * Do not send HSR supervisory frames to SAN devices
+ */
+ if (frame->is_supervision && port->type == HSR_PT_INTERLINK)
+ return true;
+
+ /* Do not forward to other HSR port (A or B) unicast frames which
+ * are addressed to interlink port (and are in the ProxyNodeTable).
+ */
+ skb = frame->skb_hsr;
+ if (skb && prp_drop_frame(frame, port) &&
+ is_unicast_ether_addr(eth_hdr(skb)->h_dest) &&
+ hsr_is_node_in_db(&port->hsr->proxy_node_db,
+ eth_hdr(skb)->h_dest)) {
+ return true;
+ }
+
+ /* Do not forward to port C (Interlink) frames from nodes A and B
+ * if DA is in NodeTable.
+ */
+ if ((frame->port_rcv->type == HSR_PT_SLAVE_A ||
+ frame->port_rcv->type == HSR_PT_SLAVE_B) &&
+ port->type == HSR_PT_INTERLINK) {
+ skb = frame->skb_hsr;
+ if (skb && is_unicast_ether_addr(eth_hdr(skb)->h_dest) &&
+ hsr_is_node_in_db(&port->hsr->node_db,
+ eth_hdr(skb)->h_dest)) {
+ return true;
+ }
+ }
+
+ /* Do not forward to port A and B unicast frames received on the
+ * interlink port if it is addressed to one of nodes registered in
+ * the ProxyNodeTable.
+ */
+ if ((port->type == HSR_PT_SLAVE_A || port->type == HSR_PT_SLAVE_B) &&
+ frame->port_rcv->type == HSR_PT_INTERLINK) {
+ skb = frame->skb_std;
+ if (skb && is_unicast_ether_addr(eth_hdr(skb)->h_dest) &&
+ hsr_is_node_in_db(&port->hsr->proxy_node_db,
+ eth_hdr(skb)->h_dest)) {
+ return true;
+ }
+ }
+
return false;
}
@@ -442,19 +540,21 @@ static void hsr_forward_do(struct hsr_frame_info *frame)
frame->sequence_nr))
continue;
- if (frame->is_supervision && port->type == HSR_PT_MASTER) {
+ if (frame->is_supervision && port->type == HSR_PT_MASTER &&
+ !frame->is_proxy_supervision) {
hsr_handle_sup_frame(frame);
continue;
}
/* Check if frame is to be dropped. Eg. for PRP no forward
- * between ports.
+ * between ports, or sending HSR supervision to RedBox.
*/
if (hsr->proto_ops->drop_frame &&
hsr->proto_ops->drop_frame(frame, port))
continue;
- if (port->type != HSR_PT_MASTER)
+ if (port->type == HSR_PT_SLAVE_A ||
+ port->type == HSR_PT_SLAVE_B)
skb = hsr->proto_ops->create_tagged_frame(frame, port);
else
skb = hsr->proto_ops->get_untagged_frame(frame, port);
@@ -469,7 +569,9 @@ static void hsr_forward_do(struct hsr_frame_info *frame)
hsr_deliver_master(skb, port->dev, frame->node_src);
} else {
if (!hsr_xmit(skb, port, frame))
- sent = true;
+ if (port->type == HSR_PT_SLAVE_A ||
+ port->type == HSR_PT_SLAVE_B)
+ sent = true;
}
}
}
@@ -503,10 +605,12 @@ static void handle_std_frame(struct sk_buff *skb,
frame->skb_prp = NULL;
frame->skb_std = skb;
- if (port->type != HSR_PT_MASTER) {
+ if (port->type != HSR_PT_MASTER)
frame->is_from_san = true;
- } else {
- /* Sequence nr for the master node */
+
+ if (port->type == HSR_PT_MASTER ||
+ port->type == HSR_PT_INTERLINK) {
+ /* Sequence nr for the master/interlink node */
lockdep_assert_held(&hsr->seqnr_lock);
frame->sequence_nr = hsr->sequence_nr;
hsr->sequence_nr++;
@@ -564,6 +668,7 @@ static int fill_frame_info(struct hsr_frame_info *frame,
{
struct hsr_priv *hsr = port->hsr;
struct hsr_vlan_ethhdr *vlan_hdr;
+ struct list_head *n_db;
struct ethhdr *ethhdr;
__be16 proto;
int ret;
@@ -574,9 +679,16 @@ static int fill_frame_info(struct hsr_frame_info *frame,
memset(frame, 0, sizeof(*frame));
frame->is_supervision = is_supervision_frame(port->hsr, skb);
- frame->node_src = hsr_get_node(port, &hsr->node_db, skb,
- frame->is_supervision,
- port->type);
+ if (frame->is_supervision && hsr->redbox)
+ frame->is_proxy_supervision =
+ is_proxy_supervision_frame(port->hsr, skb);
+
+ n_db = &hsr->node_db;
+ if (port->type == HSR_PT_INTERLINK)
+ n_db = &hsr->proxy_node_db;
+
+ frame->node_src = hsr_get_node(port, n_db, skb,
+ frame->is_supervision, port->type);
if (!frame->node_src)
return -1; /* Unknown node and !is_supervision, or no mem */
@@ -588,11 +700,13 @@ static int fill_frame_info(struct hsr_frame_info *frame,
frame->is_vlan = true;
if (frame->is_vlan) {
- vlan_hdr = (struct hsr_vlan_ethhdr *)ethhdr;
+ /* Note: skb->mac_len might be wrong here. */
+ if (!pskb_may_pull(skb,
+ skb_mac_offset(skb) +
+ offsetofend(struct hsr_vlan_ethhdr, vlanhdr)))
+ return -EINVAL;
+ vlan_hdr = (struct hsr_vlan_ethhdr *)skb_mac_header(skb);
proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto;
- /* FIXME: */
- netdev_warn_once(skb->dev, "VLAN not yet supported");
- return -EINVAL;
}
frame->is_from_san = false;
@@ -621,7 +735,7 @@ void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
/* Gets called for ingress frames as well as egress from master port.
* So check and increment stats for master port only here.
*/
- if (port->type == HSR_PT_MASTER) {
+ if (port->type == HSR_PT_MASTER || port->type == HSR_PT_INTERLINK) {
port->dev->stats.tx_packets++;
port->dev->stats.tx_bytes += skb->len;
}
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index 6d14d935ee82..73bc6f659812 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -36,6 +36,14 @@ static bool seq_nr_after(u16 a, u16 b)
#define seq_nr_before(a, b) seq_nr_after((b), (a))
#define seq_nr_before_or_eq(a, b) (!seq_nr_after((a), (b)))
+bool hsr_addr_is_redbox(struct hsr_priv *hsr, unsigned char *addr)
+{
+ if (!hsr->redbox || !is_valid_ether_addr(hsr->macaddress_redbox))
+ return false;
+
+ return ether_addr_equal(addr, hsr->macaddress_redbox);
+}
+
bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr)
{
struct hsr_self_node *sn;
@@ -71,6 +79,14 @@ static struct hsr_node *find_node_by_addr_A(struct list_head *node_db,
return NULL;
}
+/* Check if node for a given MAC address is already present in data base
+ */
+bool hsr_is_node_in_db(struct list_head *node_db,
+ const unsigned char addr[ETH_ALEN])
+{
+ return !!find_node_by_addr_A(node_db, addr);
+}
+
/* Helper for device init; the self_node is used in hsr_rcv() to recognize
* frames from self that's been looped over the HSR ring.
*/
@@ -223,11 +239,24 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct list_head *node_db,
}
}
+ /* Check if required node is not in proxy nodes table */
+ list_for_each_entry_rcu(node, &hsr->proxy_node_db, mac_list) {
+ if (ether_addr_equal(node->macaddress_A, ethhdr->h_source)) {
+ if (hsr->proto_ops->update_san_info)
+ hsr->proto_ops->update_san_info(node, is_sup);
+ return node;
+ }
+ }
+
/* Everyone may create a node entry, connected node to a HSR/PRP
* device.
*/
if (ethhdr->h_proto == htons(ETH_P_PRP) ||
ethhdr->h_proto == htons(ETH_P_HSR)) {
+ /* Check if skb contains hsr_ethhdr */
+ if (skb->mac_len < sizeof(struct hsr_ethhdr))
+ return NULL;
+
/* Use the existing sequence_nr from the tag as starting point
* for filtering duplicate frames.
*/
@@ -414,6 +443,10 @@ void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
node_dst = find_node_by_addr_A(&port->hsr->node_db,
eth_hdr(skb)->h_dest);
+ if (!node_dst && port->hsr->redbox)
+ node_dst = find_node_by_addr_A(&port->hsr->proxy_node_db,
+ eth_hdr(skb)->h_dest);
+
if (!node_dst) {
if (port->hsr->prot_version != PRP_V1 && net_ratelimit())
netdev_err(skb->dev, "%s: Unknown node\n", __func__);
@@ -557,6 +590,41 @@ void hsr_prune_nodes(struct timer_list *t)
jiffies + msecs_to_jiffies(PRUNE_PERIOD));
}
+void hsr_prune_proxy_nodes(struct timer_list *t)
+{
+ struct hsr_priv *hsr = from_timer(hsr, t, prune_proxy_timer);
+ unsigned long timestamp;
+ struct hsr_node *node;
+ struct hsr_node *tmp;
+
+ spin_lock_bh(&hsr->list_lock);
+ list_for_each_entry_safe(node, tmp, &hsr->proxy_node_db, mac_list) {
+ /* Don't prune RedBox node. */
+ if (hsr_addr_is_redbox(hsr, node->macaddress_A))
+ continue;
+
+ timestamp = node->time_in[HSR_PT_INTERLINK];
+
+ /* Prune old entries */
+ if (time_is_before_jiffies(timestamp +
+ msecs_to_jiffies(HSR_PROXY_NODE_FORGET_TIME))) {
+ hsr_nl_nodedown(hsr, node->macaddress_A);
+ if (!node->removed) {
+ list_del_rcu(&node->mac_list);
+ node->removed = true;
+ /* Note that we need to free this entry later: */
+ kfree_rcu(node, rcu_head);
+ }
+ }
+ }
+
+ spin_unlock_bh(&hsr->list_lock);
+
+ /* Restart timer */
+ mod_timer(&hsr->prune_proxy_timer,
+ jiffies + msecs_to_jiffies(PRUNE_PROXY_PERIOD));
+}
+
void *hsr_get_next_node(struct hsr_priv *hsr, void *_pos,
unsigned char addr[ETH_ALEN])
{
diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
index b23556251d62..993fa950d814 100644
--- a/net/hsr/hsr_framereg.h
+++ b/net/hsr/hsr_framereg.h
@@ -22,6 +22,7 @@ struct hsr_frame_info {
struct hsr_node *node_src;
u16 sequence_nr;
bool is_supervision;
+ bool is_proxy_supervision;
bool is_vlan;
bool is_local_dest;
bool is_local_exclusive;
@@ -35,6 +36,7 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct list_head *node_db,
enum hsr_port_type rx_port);
void hsr_handle_sup_frame(struct hsr_frame_info *frame);
bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr);
+bool hsr_addr_is_redbox(struct hsr_priv *hsr, unsigned char *addr);
void hsr_addr_subst_source(struct hsr_node *node, struct sk_buff *skb);
void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
@@ -46,6 +48,7 @@ int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node,
u16 sequence_nr);
void hsr_prune_nodes(struct timer_list *t);
+void hsr_prune_proxy_nodes(struct timer_list *t);
int hsr_create_self_node(struct hsr_priv *hsr,
const unsigned char addr_a[ETH_ALEN],
@@ -67,6 +70,9 @@ void prp_handle_san_frame(bool san, enum hsr_port_type port,
struct hsr_node *node);
void prp_update_san_info(struct hsr_node *node, bool is_sup);
+bool hsr_is_node_in_db(struct list_head *node_db,
+ const unsigned char addr[ETH_ALEN]);
+
struct hsr_node {
struct list_head mac_list;
/* Protect R/W access to seq_out */
diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
index cb83c8feb746..d7ae32473c41 100644
--- a/net/hsr/hsr_main.c
+++ b/net/hsr/hsr_main.c
@@ -96,7 +96,7 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
break; /* Handled in ndo_change_mtu() */
mtu_max = hsr_get_max_mtu(port->hsr);
master = hsr_port_get_hsr(port->hsr, HSR_PT_MASTER);
- master->dev->mtu = mtu_max;
+ WRITE_ONCE(master->dev->mtu, mtu_max);
break;
case NETDEV_UNREGISTER:
if (!is_hsr_master(dev)) {
@@ -148,14 +148,21 @@ static struct notifier_block hsr_nb = {
static int __init hsr_init(void)
{
- int res;
+ int err;
BUILD_BUG_ON(sizeof(struct hsr_tag) != HSR_HLEN);
- register_netdevice_notifier(&hsr_nb);
- res = hsr_netlink_init();
+ err = register_netdevice_notifier(&hsr_nb);
+ if (err)
+ return err;
+
+ err = hsr_netlink_init();
+ if (err) {
+ unregister_netdevice_notifier(&hsr_nb);
+ return err;
+ }
- return res;
+ return 0;
}
static void __exit hsr_exit(void)
diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
index 18e01791ad79..7561845b8bf6 100644
--- a/net/hsr/hsr_main.h
+++ b/net/hsr/hsr_main.h
@@ -21,6 +21,7 @@
*/
#define HSR_LIFE_CHECK_INTERVAL 2000 /* ms */
#define HSR_NODE_FORGET_TIME 60000 /* ms */
+#define HSR_PROXY_NODE_FORGET_TIME 60000 /* ms */
#define HSR_ANNOUNCE_INTERVAL 100 /* ms */
#define HSR_ENTRY_FORGET_TIME 400 /* ms */
@@ -35,6 +36,7 @@
* HSR_NODE_FORGET_TIME?
*/
#define PRUNE_PERIOD 3000 /* ms */
+#define PRUNE_PROXY_PERIOD 3000 /* ms */
#define HSR_TLV_EOT 0 /* End of TLVs */
#define HSR_TLV_ANNOUNCE 22
#define HSR_TLV_LIFE_CHECK 23
@@ -119,15 +121,6 @@ struct hsrv1_ethhdr_sp {
struct hsr_sup_tag hsr_sup;
} __packed;
-enum hsr_port_type {
- HSR_PT_NONE = 0, /* Must be 0, used by framereg */
- HSR_PT_SLAVE_A,
- HSR_PT_SLAVE_B,
- HSR_PT_INTERLINK,
- HSR_PT_MASTER,
- HSR_PT_PORTS, /* This must be the last item in the enum */
-};
-
/* PRP Redunancy Control Trailor (RCT).
* As defined in IEC-62439-4:2012, the PRP RCT is really { sequence Nr,
* Lan indentifier (LanId), LSDU_size and PRP_suffix = 0x88FB }.
@@ -161,6 +154,7 @@ struct hsr_port {
struct net_device *dev;
struct hsr_priv *hsr;
enum hsr_port_type type;
+ struct rcu_head rcu;
};
struct hsr_frame_info;
@@ -168,7 +162,8 @@ struct hsr_node;
struct hsr_proto_ops {
/* format and send supervision frame */
- void (*send_sv_frame)(struct hsr_port *port, unsigned long *interval);
+ void (*send_sv_frame)(struct hsr_port *port, unsigned long *interval,
+ const unsigned char addr[ETH_ALEN]);
void (*handle_san_frame)(bool san, enum hsr_port_type port,
struct hsr_node *node);
bool (*drop_frame)(struct hsr_frame_info *frame, struct hsr_port *port);
@@ -192,9 +187,12 @@ struct hsr_priv {
struct rcu_head rcu_head;
struct list_head ports;
struct list_head node_db; /* Known HSR nodes */
+ struct list_head proxy_node_db; /* RedBox HSR proxy nodes */
struct hsr_self_node __rcu *self_node; /* MACs of slaves */
struct timer_list announce_timer; /* Supervision frame dispatch */
+ struct timer_list announce_proxy_timer;
struct timer_list prune_timer;
+ struct timer_list prune_proxy_timer;
int announce_count;
u16 sequence_nr;
u16 sup_sequence_nr; /* For HSRv1 separate seq_nr for supervision */
@@ -209,6 +207,8 @@ struct hsr_priv {
* of lan_id
*/
bool fwd_offloaded; /* Forwarding offloaded to HW */
+ bool redbox; /* Device supports HSR RedBox */
+ unsigned char macaddress_redbox[ETH_ALEN];
unsigned char sup_multicast_addr[ETH_ALEN] __aligned(sizeof(u16));
/* Align to u16 boundary to avoid unaligned access
* in ether_addr_equal
diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
index 78fe40eb9f01..b68f2f71d0e1 100644
--- a/net/hsr/hsr_netlink.c
+++ b/net/hsr/hsr_netlink.c
@@ -23,6 +23,7 @@ static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = {
[IFLA_HSR_SUPERVISION_ADDR] = { .len = ETH_ALEN },
[IFLA_HSR_SEQ_NR] = { .type = NLA_U16 },
[IFLA_HSR_PROTOCOL] = { .type = NLA_U8 },
+ [IFLA_HSR_INTERLINK] = { .type = NLA_U32 },
};
/* Here, it seems a netdevice has already been allocated for us, and the
@@ -35,8 +36,8 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
enum hsr_version proto_version;
unsigned char multicast_spec;
u8 proto = HSR_PROTOCOL_HSR;
- struct net_device *link[2];
+ struct net_device *link[2], *interlink = NULL;
if (!data) {
NL_SET_ERR_MSG_MOD(extack, "No slave devices specified");
return -EINVAL;
@@ -67,10 +68,21 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
return -EINVAL;
}
- if (!data[IFLA_HSR_MULTICAST_SPEC])
- multicast_spec = 0;
- else
- multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]);
+ if (data[IFLA_HSR_INTERLINK])
+ interlink = __dev_get_by_index(src_net,
+ nla_get_u32(data[IFLA_HSR_INTERLINK]));
+
+ if (interlink && interlink == link[0]) {
+ NL_SET_ERR_MSG_MOD(extack, "Interlink and Slave1 are the same");
+ return -EINVAL;
+ }
+
+ if (interlink && interlink == link[1]) {
+ NL_SET_ERR_MSG_MOD(extack, "Interlink and Slave2 are the same");
+ return -EINVAL;
+ }
+
+ multicast_spec = nla_get_u8_default(data[IFLA_HSR_MULTICAST_SPEC], 0);
if (data[IFLA_HSR_PROTOCOL])
proto = nla_get_u8(data[IFLA_HSR_PROTOCOL]);
@@ -96,24 +108,34 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
}
}
- if (proto == HSR_PROTOCOL_PRP)
+ if (proto == HSR_PROTOCOL_PRP) {
proto_version = PRP_V1;
+ if (interlink) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Interlink only works with HSR");
+ return -EINVAL;
+ }
+ }
- return hsr_dev_finalize(dev, link, multicast_spec, proto_version, extack);
+ return hsr_dev_finalize(dev, link, interlink, multicast_spec,
+ proto_version, extack);
}
static void hsr_dellink(struct net_device *dev, struct list_head *head)
{
struct hsr_priv *hsr = netdev_priv(dev);
- del_timer_sync(&hsr->prune_timer);
- del_timer_sync(&hsr->announce_timer);
+ timer_delete_sync(&hsr->prune_timer);
+ timer_delete_sync(&hsr->prune_proxy_timer);
+ timer_delete_sync(&hsr->announce_timer);
+ timer_delete_sync(&hsr->announce_proxy_timer);
hsr_debugfs_term(hsr);
hsr_del_ports(hsr);
hsr_del_self_node(hsr);
hsr_del_nodes(&hsr->node_db);
+ hsr_del_nodes(&hsr->proxy_node_db);
unregister_netdevice_queue(dev, head);
}
diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c
index e5742f2a2d52..2a802a5de2ac 100644
--- a/net/hsr/hsr_slave.c
+++ b/net/hsr/hsr_slave.c
@@ -55,6 +55,7 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
protocol = eth_hdr(skb)->h_proto;
if (!(port->dev->features & NETIF_F_HW_HSR_TAG_RM) &&
+ port->type != HSR_PT_INTERLINK &&
hsr->proto_ops->invalid_dan_ingress_frame &&
hsr->proto_ops->invalid_dan_ingress_frame(protocol))
goto finish_pass;
@@ -66,7 +67,16 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
skb_set_network_header(skb, ETH_HLEN + HSR_HLEN);
skb_reset_mac_len(skb);
- hsr_forward_skb(skb, port);
+ /* Only the frames received over the interlink port will assign a
+ * sequence number and require synchronisation vs other sender.
+ */
+ if (port->type == HSR_PT_INTERLINK) {
+ spin_lock_bh(&hsr->seqnr_lock);
+ hsr_forward_skb(skb, port);
+ spin_unlock_bh(&hsr->seqnr_lock);
+ } else {
+ hsr_forward_skb(skb, port);
+ }
finish_consume:
return RX_HANDLER_CONSUMED;
@@ -194,7 +204,6 @@ int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev,
}
list_add_tail_rcu(&port->port_list, &hsr->ports);
- synchronize_rcu();
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
netdev_update_features(master->dev);
@@ -220,11 +229,10 @@ void hsr_del_port(struct hsr_port *port)
netdev_update_features(master->dev);
dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
netdev_rx_handler_unregister(port->dev);
- dev_set_promiscuity(port->dev, -1);
+ if (!port->hsr->fwd_offloaded)
+ dev_set_promiscuity(port->dev, -1);
netdev_upper_dev_unlink(port->dev, master->dev);
}
- synchronize_rcu();
-
- kfree(port);
+ kfree_rcu(port, rcu);
}
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index 2c087b7f17c5..175efd860f7b 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -93,7 +93,7 @@ static int lowpan_neigh_construct(struct net_device *dev, struct neighbour *n)
static int lowpan_get_iflink(const struct net_device *dev)
{
- return lowpan_802154_dev(dev)->wdev->ifindex;
+ return READ_ONCE(lowpan_802154_dev(dev)->wdev->ifindex);
}
static const struct net_device_ops lowpan_netdev_ops = {
@@ -116,7 +116,7 @@ static void lowpan_setup(struct net_device *ldev)
ldev->netdev_ops = &lowpan_netdev_ops;
ldev->header_ops = &lowpan_header_ops;
ldev->needs_free_netdev = true;
- ldev->features |= NETIF_F_NETNS_LOCAL;
+ ldev->netns_local = true;
}
static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -280,5 +280,6 @@ static void __exit lowpan_cleanup_module(void)
module_init(lowpan_init_module);
module_exit(lowpan_cleanup_module);
+MODULE_DESCRIPTION("IPv6 over Low power Wireless Personal Area Network IEEE 802.15.4 core");
MODULE_LICENSE("GPL");
MODULE_ALIAS_RTNL_LINK("lowpan");
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
index 6dd960ec558c..867d637d86f0 100644
--- a/net/ieee802154/6lowpan/reassembly.c
+++ b/net/ieee802154/6lowpan/reassembly.c
@@ -130,7 +130,7 @@ static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
goto err;
fq->q.stamp = skb->tstamp;
- fq->q.mono_delivery_time = skb->mono_delivery_time;
+ fq->q.tstamp_type = skb->tstamp_type;
if (frag_type == LOWPAN_DISPATCH_FRAG1)
fq->q.flags |= INET_FRAG_FIRST_IN;
@@ -338,7 +338,6 @@ static struct ctl_table lowpan_frags_ns_ctl_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
- { }
};
/* secret interval has been deprecated */
@@ -351,7 +350,6 @@ static struct ctl_table lowpan_frags_ctl_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
- { }
};
static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
@@ -370,10 +368,8 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
goto err_alloc;
/* Don't export sysctls to unprivileged users */
- if (net->user_ns != &init_user_ns) {
- table[0].procname = NULL;
+ if (net->user_ns != &init_user_ns)
table_size = 0;
- }
}
table[0].data = &ieee802154_lowpan->fqdir->high_thresh;
@@ -399,7 +395,7 @@ err_alloc:
static void __net_exit lowpan_frags_ns_sysctl_unregister(struct net *net)
{
- struct ctl_table *table;
+ const struct ctl_table *table;
struct netns_ieee802154_lowpan *ieee802154_lowpan =
net_ieee802154_lowpan(net);
diff --git a/net/ieee802154/core.c b/net/ieee802154/core.c
index 60e8fff1347e..88adb04e4072 100644
--- a/net/ieee802154/core.c
+++ b/net/ieee802154/core.c
@@ -226,11 +226,11 @@ int cfg802154_switch_netns(struct cfg802154_registered_device *rdev,
list_for_each_entry(wpan_dev, &rdev->wpan_dev_list, list) {
if (!wpan_dev->netdev)
continue;
- wpan_dev->netdev->features &= ~NETIF_F_NETNS_LOCAL;
+ wpan_dev->netdev->netns_local = false;
err = dev_change_net_namespace(wpan_dev->netdev, net, "wpan%d");
if (err)
break;
- wpan_dev->netdev->features |= NETIF_F_NETNS_LOCAL;
+ wpan_dev->netdev->netns_local = true;
}
if (err) {
@@ -242,11 +242,11 @@ int cfg802154_switch_netns(struct cfg802154_registered_device *rdev,
list) {
if (!wpan_dev->netdev)
continue;
- wpan_dev->netdev->features &= ~NETIF_F_NETNS_LOCAL;
+ wpan_dev->netdev->netns_local = false;
err = dev_change_net_namespace(wpan_dev->netdev, net,
"wpan%d");
WARN_ON(err);
- wpan_dev->netdev->features |= NETIF_F_NETNS_LOCAL;
+ wpan_dev->netdev->netns_local = true;
}
return err;
@@ -291,7 +291,7 @@ static int cfg802154_netdev_notifier_call(struct notifier_block *nb,
switch (state) {
/* TODO NETDEV_DEVTYPE */
case NETDEV_REGISTER:
- dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->netns_local = true;
wpan_dev->identifier = ++rdev->wpan_dev_id;
list_add_rcu(&wpan_dev->list, &rdev->wpan_dev_list);
rdev->devlist_generation++;
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
index 29bf97640166..74ef0a310afb 100644
--- a/net/ieee802154/nl-mac.c
+++ b/net/ieee802154/nl-mac.c
@@ -202,10 +202,7 @@ int ieee802154_associate_req(struct sk_buff *skb, struct genl_info *info)
addr.pan_id = nla_get_shortaddr(
info->attrs[IEEE802154_ATTR_COORD_PAN_ID]);
- if (info->attrs[IEEE802154_ATTR_PAGE])
- page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]);
- else
- page = 0;
+ page = nla_get_u8_default(info->attrs[IEEE802154_ATTR_PAGE], 0);
ret = ieee802154_mlme_ops(dev)->assoc_req(dev, &addr,
nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]),
@@ -338,10 +335,7 @@ int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info)
blx = nla_get_u8(info->attrs[IEEE802154_ATTR_BAT_EXT]);
coord_realign = nla_get_u8(info->attrs[IEEE802154_ATTR_COORD_REALIGN]);
- if (info->attrs[IEEE802154_ATTR_PAGE])
- page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]);
- else
- page = 0;
+ page = nla_get_u8_default(info->attrs[IEEE802154_ATTR_PAGE], 0);
if (addr.short_addr == cpu_to_le16(IEEE802154_ADDR_BROADCAST)) {
ieee802154_nl_start_confirm(dev, IEEE802154_NO_SHORT_ADDRESS);
@@ -388,10 +382,7 @@ int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info)
channels = nla_get_u32(info->attrs[IEEE802154_ATTR_CHANNELS]);
duration = nla_get_u8(info->attrs[IEEE802154_ATTR_DURATION]);
- if (info->attrs[IEEE802154_ATTR_PAGE])
- page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]);
- else
- page = 0;
+ page = nla_get_u8_default(info->attrs[IEEE802154_ATTR_PAGE], 0);
ret = ieee802154_mlme_ops(dev)->scan_req(dev, type, channels,
page, duration);
diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
index 7eb37de3add2..5a024ca60d35 100644
--- a/net/ieee802154/nl802154.c
+++ b/net/ieee802154/nl802154.c
@@ -1438,22 +1438,18 @@ static int nl802154_trigger_scan(struct sk_buff *skb, struct genl_info *info)
}
/* Use current page by default */
- if (info->attrs[NL802154_ATTR_PAGE])
- request->page = nla_get_u8(info->attrs[NL802154_ATTR_PAGE]);
- else
- request->page = wpan_phy->current_page;
+ request->page = nla_get_u8_default(info->attrs[NL802154_ATTR_PAGE],
+ wpan_phy->current_page);
/* Scan all supported channels by default */
- if (info->attrs[NL802154_ATTR_SCAN_CHANNELS])
- request->channels = nla_get_u32(info->attrs[NL802154_ATTR_SCAN_CHANNELS]);
- else
- request->channels = wpan_phy->supported.channels[request->page];
+ request->channels =
+ nla_get_u32_default(info->attrs[NL802154_ATTR_SCAN_CHANNELS],
+ wpan_phy->supported.channels[request->page]);
/* Use maximum duration order by default */
- if (info->attrs[NL802154_ATTR_SCAN_DURATION])
- request->duration = nla_get_u8(info->attrs[NL802154_ATTR_SCAN_DURATION]);
- else
- request->duration = IEEE802154_MAX_SCAN_DURATION;
+ request->duration =
+ nla_get_u8_default(info->attrs[NL802154_ATTR_SCAN_DURATION],
+ IEEE802154_MAX_SCAN_DURATION);
err = rdev_trigger_scan(rdev, request);
if (err) {
@@ -1598,10 +1594,8 @@ nl802154_send_beacons(struct sk_buff *skb, struct genl_info *info)
request->wpan_phy = wpan_phy;
/* Use maximum duration order by default */
- if (info->attrs[NL802154_ATTR_BEACON_INTERVAL])
- request->interval = nla_get_u8(info->attrs[NL802154_ATTR_BEACON_INTERVAL]);
- else
- request->interval = IEEE802154_MAX_SCAN_DURATION;
+ request->interval = nla_get_u8_default(info->attrs[NL802154_ATTR_BEACON_INTERVAL],
+ IEEE802154_MAX_SCAN_DURATION);
err = rdev_send_beacons(rdev, request);
if (err) {
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index 00302e8b9615..18d267921bb5 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -1043,19 +1043,21 @@ static int ieee802154_create(struct net *net, struct socket *sock,
if (sk->sk_prot->hash) {
rc = sk->sk_prot->hash(sk);
- if (rc) {
- sk_common_release(sk);
- goto out;
- }
+ if (rc)
+ goto out_sk_release;
}
if (sk->sk_prot->init) {
rc = sk->sk_prot->init(sk);
if (rc)
- sk_common_release(sk);
+ goto out_sk_release;
}
out:
return rc;
+out_sk_release:
+ sk_common_release(sk);
+ sock->sk = NULL;
+ goto out;
}
static const struct net_proto_family ieee802154_family_ops = {
@@ -1137,4 +1139,5 @@ module_init(af_ieee802154_init);
module_exit(af_ieee802154_remove);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IEEE 802.15.4 socket interface");
MODULE_ALIAS_NETPROTO(PF_IEEE802154);
diff --git a/net/ieee802154/sysfs.c b/net/ieee802154/sysfs.c
index d2903933805c..6708160ebf9f 100644
--- a/net/ieee802154/sysfs.c
+++ b/net/ieee802154/sysfs.c
@@ -93,7 +93,7 @@ static SIMPLE_DEV_PM_OPS(wpan_phy_pm_ops, wpan_phy_suspend, wpan_phy_resume);
#define WPAN_PHY_PM_OPS NULL
#endif
-struct class wpan_phy_class = {
+const struct class wpan_phy_class = {
.name = "ieee802154",
.dev_release = wpan_phy_release,
.dev_groups = pmib_groups,
diff --git a/net/ieee802154/sysfs.h b/net/ieee802154/sysfs.h
index 337545b639e9..69961e166257 100644
--- a/net/ieee802154/sysfs.h
+++ b/net/ieee802154/sysfs.h
@@ -5,6 +5,6 @@
int wpan_phy_sysfs_init(void);
void wpan_phy_sysfs_exit(void);
-extern struct class wpan_phy_class;
+extern const struct class wpan_phy_class;
#endif /* __IEEE802154_SYSFS_H */
diff --git a/net/ieee802154/trace.h b/net/ieee802154/trace.h
index 62aa6465253a..591ce0a16fc0 100644
--- a/net/ieee802154/trace.h
+++ b/net/ieee802154/trace.h
@@ -75,7 +75,7 @@ TRACE_EVENT(802154_rdev_add_virtual_intf,
),
TP_fast_assign(
WPAN_PHY_ASSIGN;
- __assign_str(vir_intf_name, name ? name : "<noname>");
+ __assign_str(vir_intf_name);
__entry->type = type;
__entry->extended_addr = extended_addr;
),
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 8e94ed7c56a0..6d2c97f8e9ef 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -661,7 +661,8 @@ config TCP_CONG_CDG
For further details see:
D.A. Hayes and G. Armitage. "Revisiting TCP congestion control using
- delay gradients." In Networking 2011. Preprint: http://goo.gl/No3vdg
+ delay gradients." In Networking 2011. Preprint:
+ http://caia.swin.edu.au/cv/dahayes/content/networking2011-cdg-preprint.pdf
config TCP_CONG_BBR
tristate "BBR TCP"
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index a5a820ee2026..21f46ee7b6e9 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -119,6 +119,7 @@
#endif
#include <net/l3mdev.h>
#include <net/compat.h>
+#include <net/rps.h>
#include <trace/events/sock.h>
@@ -375,32 +376,30 @@ lookup_protocol:
inet->inet_sport = htons(inet->inet_num);
/* Add to protocol hash chains. */
err = sk->sk_prot->hash(sk);
- if (err) {
- sk_common_release(sk);
- goto out;
- }
+ if (err)
+ goto out_sk_release;
}
if (sk->sk_prot->init) {
err = sk->sk_prot->init(sk);
- if (err) {
- sk_common_release(sk);
- goto out;
- }
+ if (err)
+ goto out_sk_release;
}
if (!kern) {
err = BPF_CGROUP_RUN_PROG_INET_SOCK(sk);
- if (err) {
- sk_common_release(sk);
- goto out;
- }
+ if (err)
+ goto out_sk_release;
}
out:
return err;
out_rcu_unlock:
rcu_read_unlock();
goto out;
+out_sk_release:
+ sk_common_release(sk);
+ sock->sk = NULL;
+ goto out;
}
@@ -757,7 +756,9 @@ void __inet_accept(struct socket *sock, struct socket *newsock, struct sock *new
sock_rps_record_flow(newsk);
WARN_ON(!((1 << newsk->sk_state) &
(TCPF_ESTABLISHED | TCPF_SYN_RECV |
- TCPF_CLOSE_WAIT | TCPF_CLOSE)));
+ TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 |
+ TCPF_CLOSING | TCPF_CLOSE_WAIT |
+ TCPF_CLOSE)));
if (test_bit(SOCK_SUPPORT_ZC, &sock->flags))
set_bit(SOCK_SUPPORT_ZC, &newsock->flags);
@@ -770,16 +771,16 @@ void __inet_accept(struct socket *sock, struct socket *newsock, struct sock *new
* Accept a pending connection. The TCP layer now gives BSD semantics.
*/
-int inet_accept(struct socket *sock, struct socket *newsock, int flags,
- bool kern)
+int inet_accept(struct socket *sock, struct socket *newsock,
+ struct proto_accept_arg *arg)
{
struct sock *sk1 = sock->sk, *sk2;
- int err = -EINVAL;
/* IPV6_ADDRFORM can change sk->sk_prot under us. */
- sk2 = READ_ONCE(sk1->sk_prot)->accept(sk1, flags, &err, kern);
+ arg->err = -EINVAL;
+ sk2 = READ_ONCE(sk1->sk_prot)->accept(sk1, arg);
if (!sk2)
- return err;
+ return arg->err;
lock_sock(sk2);
__inet_accept(sock, newsock, sk2);
@@ -1071,6 +1072,7 @@ const struct proto_ops inet_stream_ops = {
#endif
.splice_eof = inet_splice_eof,
.splice_read = tcp_splice_read,
+ .set_peek_off = sk_set_peek_off,
.read_sock = tcp_read_sock,
.read_skb = tcp_read_skb,
.sendmsg_locked = tcp_sendmsg_locked,
@@ -1103,7 +1105,7 @@ const struct proto_ops inet_dgram_ops = {
.recvmsg = inet_recvmsg,
.mmap = sock_no_mmap,
.splice_eof = inet_splice_eof,
- .set_peek_off = sk_set_peek_off,
+ .set_peek_off = udp_set_peek_off,
#ifdef CONFIG_COMPAT
.compat_ioctl = inet_compat_ioctl,
#endif
@@ -1305,10 +1307,8 @@ static int inet_sk_reselect_saddr(struct sock *sk)
int inet_sk_rebuild_header(struct sock *sk)
{
+ struct rtable *rt = dst_rtable(__sk_dst_check(sk, 0));
struct inet_sock *inet = inet_sk(sk);
- struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
- __be32 daddr;
- struct ip_options_rcu *inet_opt;
struct flowi4 *fl4;
int err;
@@ -1317,17 +1317,9 @@ int inet_sk_rebuild_header(struct sock *sk)
return 0;
/* Reroute. */
- rcu_read_lock();
- inet_opt = rcu_dereference(inet->inet_opt);
- daddr = inet->inet_daddr;
- if (inet_opt && inet_opt->opt.srr)
- daddr = inet_opt->opt.faddr;
- rcu_read_unlock();
fl4 = &inet->cork.fl.u.ip4;
- rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, inet->inet_saddr,
- inet->inet_dport, inet->inet_sport,
- sk->sk_protocol, RT_CONN_FLAGS(sk),
- sk->sk_bound_dev_if);
+ inet_sk_init_flowi4(inet, fl4);
+ rt = ip_route_output_flow(sock_net(sk), fl4, sk);
if (!IS_ERR(rt)) {
err = 0;
sk_setup_caps(sk, &rt->dst);
@@ -1480,7 +1472,6 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
struct sk_buff *p;
unsigned int hlen;
unsigned int off;
- unsigned int id;
int flush = 1;
int proto;
@@ -1506,13 +1497,10 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
goto out;
NAPI_GRO_CB(skb)->proto = proto;
- id = ntohl(*(__be32 *)&iph->id);
- flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id & ~IP_DF));
- id >>= 16;
+ flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (ntohl(*(__be32 *)&iph->id) & ~IP_DF));
list_for_each_entry(p, head, list) {
struct iphdr *iph2;
- u16 flush_id;
if (!NAPI_GRO_CB(p)->same_flow)
continue;
@@ -1529,48 +1517,10 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
NAPI_GRO_CB(p)->same_flow = 0;
continue;
}
-
- /* All fields must match except length and checksum. */
- NAPI_GRO_CB(p)->flush |=
- (iph->ttl ^ iph2->ttl) |
- (iph->tos ^ iph2->tos) |
- ((iph->frag_off ^ iph2->frag_off) & htons(IP_DF));
-
- NAPI_GRO_CB(p)->flush |= flush;
-
- /* We need to store of the IP ID check to be included later
- * when we can verify that this packet does in fact belong
- * to a given flow.
- */
- flush_id = (u16)(id - ntohs(iph2->id));
-
- /* This bit of code makes it much easier for us to identify
- * the cases where we are doing atomic vs non-atomic IP ID
- * checks. Specifically an atomic check can return IP ID
- * values 0 - 0xFFFF, while a non-atomic check can only
- * return 0 or 0xFFFF.
- */
- if (!NAPI_GRO_CB(p)->is_atomic ||
- !(iph->frag_off & htons(IP_DF))) {
- flush_id ^= NAPI_GRO_CB(p)->count;
- flush_id = flush_id ? 0xFFFF : 0;
- }
-
- /* If the previous IP ID value was based on an atomic
- * datagram we can overwrite the value and ignore it.
- */
- if (NAPI_GRO_CB(skb)->is_atomic)
- NAPI_GRO_CB(p)->flush_id = flush_id;
- else
- NAPI_GRO_CB(p)->flush_id |= flush_id;
}
- NAPI_GRO_CB(skb)->is_atomic = !!(iph->frag_off & htons(IP_DF));
NAPI_GRO_CB(skb)->flush |= flush;
- skb_set_network_header(skb, off);
- /* The above will be needed by the transport layer if there is one
- * immediately following this IP hdr.
- */
+ NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark] = off;
/* Note : No need to call skb_gro_postpull_rcsum() here,
* as we already checked checksum over ipv4 header was 0
@@ -1751,19 +1701,6 @@ static const struct net_protocol igmp_protocol = {
};
#endif
-static const struct net_protocol tcp_protocol = {
- .handler = tcp_v4_rcv,
- .err_handler = tcp_v4_err,
- .no_policy = 1,
- .icmp_strict_tag_validation = 1,
-};
-
-static const struct net_protocol udp_protocol = {
- .handler = udp_rcv,
- .err_handler = udp_err,
- .no_policy = 1,
-};
-
static const struct net_protocol icmp_protocol = {
.handler = icmp_rcv,
.err_handler = icmp_err,
@@ -1904,14 +1841,6 @@ static int ipv4_proc_init(void);
* IP protocol layer initialiser
*/
-static struct packet_offload ip_packet_offload __read_mostly = {
- .type = cpu_to_be16(ETH_P_IP),
- .callbacks = {
- .gso_segment = inet_gso_segment,
- .gro_receive = inet_gro_receive,
- .gro_complete = inet_gro_complete,
- },
-};
static const struct net_offload ipip_offload = {
.callbacks = {
@@ -1938,7 +1867,15 @@ static int __init ipv4_offload_init(void)
if (ipip_offload_init() < 0)
pr_crit("%s: Cannot add IPIP protocol offload\n", __func__);
- dev_add_offload(&ip_packet_offload);
+ net_hotdata.ip_packet_offload = (struct packet_offload) {
+ .type = cpu_to_be16(ETH_P_IP),
+ .callbacks = {
+ .gso_segment = inet_gso_segment,
+ .gro_receive = inet_gro_receive,
+ .gro_complete = inet_gro_complete,
+ },
+ };
+ dev_add_offload(&net_hotdata.ip_packet_offload);
return 0;
}
@@ -1992,9 +1929,22 @@ static int __init inet_init(void)
if (inet_add_protocol(&icmp_protocol, IPPROTO_ICMP) < 0)
pr_crit("%s: Cannot add ICMP protocol\n", __func__);
- if (inet_add_protocol(&udp_protocol, IPPROTO_UDP) < 0)
+
+ net_hotdata.udp_protocol = (struct net_protocol) {
+ .handler = udp_rcv,
+ .err_handler = udp_err,
+ .no_policy = 1,
+ };
+ if (inet_add_protocol(&net_hotdata.udp_protocol, IPPROTO_UDP) < 0)
pr_crit("%s: Cannot add UDP protocol\n", __func__);
- if (inet_add_protocol(&tcp_protocol, IPPROTO_TCP) < 0)
+
+ net_hotdata.tcp_protocol = (struct net_protocol) {
+ .handler = tcp_v4_rcv,
+ .err_handler = tcp_v4_err,
+ .no_policy = 1,
+ .icmp_strict_tag_validation = 1,
+ };
+ if (inet_add_protocol(&net_hotdata.tcp_protocol, IPPROTO_TCP) < 0)
pr_crit("%s: Cannot add TCP protocol\n", __func__);
#ifdef CONFIG_IP_MULTICAST
if (inet_add_protocol(&igmp_protocol, IPPROTO_IGMP) < 0)
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 0d0d725b46ad..814300eee39d 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -456,7 +456,8 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
/*unsigned long now; */
struct net *net = dev_net(dev);
- rt = ip_route_output(net, sip, tip, 0, l3mdev_master_ifindex_rcu(dev));
+ rt = ip_route_output(net, sip, tip, 0, l3mdev_master_ifindex_rcu(dev),
+ RT_SCOPE_UNIVERSE);
if (IS_ERR(rt))
return 1;
if (rt->dst.dev != dev) {
@@ -658,10 +659,12 @@ static int arp_xmit_finish(struct net *net, struct sock *sk, struct sk_buff *skb
*/
void arp_xmit(struct sk_buff *skb)
{
+ rcu_read_lock();
/* Send it off, maybe filter it using firewalling first. */
NF_HOOK(NFPROTO_ARP, NF_ARP_OUT,
- dev_net(skb->dev), NULL, skb, NULL, skb->dev,
+ dev_net_rcu(skb->dev), NULL, skb, NULL, skb->dev,
arp_xmit_finish);
+ rcu_read_unlock();
}
EXPORT_SYMBOL(arp_xmit);
@@ -1002,6 +1005,55 @@ out_of_mem:
* User level interface (ioctl)
*/
+static struct net_device *arp_req_dev_by_name(struct net *net, struct arpreq *r,
+ bool getarp)
+{
+ struct net_device *dev;
+
+ if (getarp)
+ dev = dev_get_by_name_rcu(net, r->arp_dev);
+ else
+ dev = __dev_get_by_name(net, r->arp_dev);
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ /* Mmmm... It is wrong... ARPHRD_NETROM == 0 */
+ if (!r->arp_ha.sa_family)
+ r->arp_ha.sa_family = dev->type;
+
+ if ((r->arp_flags & ATF_COM) && r->arp_ha.sa_family != dev->type)
+ return ERR_PTR(-EINVAL);
+
+ return dev;
+}
+
+static struct net_device *arp_req_dev(struct net *net, struct arpreq *r)
+{
+ struct net_device *dev;
+ struct rtable *rt;
+ __be32 ip;
+
+ if (r->arp_dev[0])
+ return arp_req_dev_by_name(net, r, false);
+
+ if (r->arp_flags & ATF_PUBL)
+ return NULL;
+
+ ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr;
+
+ rt = ip_route_output(net, ip, 0, 0, 0, RT_SCOPE_LINK);
+ if (IS_ERR(rt))
+ return ERR_CAST(rt);
+
+ dev = rt->dst.dev;
+ ip_rt_put(rt);
+
+ if (!dev)
+ return ERR_PTR(-EINVAL);
+
+ return dev;
+}
+
/*
* Set (create) an ARP cache entry.
*/
@@ -1022,18 +1074,17 @@ static int arp_req_set_proxy(struct net *net, struct net_device *dev, int on)
static int arp_req_set_public(struct net *net, struct arpreq *r,
struct net_device *dev)
{
- __be32 ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr;
__be32 mask = ((struct sockaddr_in *)&r->arp_netmask)->sin_addr.s_addr;
- if (mask && mask != htonl(0xFFFFFFFF))
- return -EINVAL;
if (!dev && (r->arp_flags & ATF_COM)) {
- dev = dev_getbyhwaddr_rcu(net, r->arp_ha.sa_family,
+ dev = dev_getbyhwaddr(net, r->arp_ha.sa_family,
r->arp_ha.sa_data);
if (!dev)
return -ENODEV;
}
if (mask) {
+ __be32 ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr;
+
if (!pneigh_lookup(&arp_tbl, net, &ip, dev, 1))
return -ENOBUFS;
return 0;
@@ -1042,29 +1093,20 @@ static int arp_req_set_public(struct net *net, struct arpreq *r,
return arp_req_set_proxy(net, dev, 1);
}
-static int arp_req_set(struct net *net, struct arpreq *r,
- struct net_device *dev)
+static int arp_req_set(struct net *net, struct arpreq *r)
{
- __be32 ip;
struct neighbour *neigh;
+ struct net_device *dev;
+ __be32 ip;
int err;
+ dev = arp_req_dev(net, r);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
if (r->arp_flags & ATF_PUBL)
return arp_req_set_public(net, r, dev);
- ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr;
- if (r->arp_flags & ATF_PERM)
- r->arp_flags |= ATF_COM;
- if (!dev) {
- struct rtable *rt = ip_route_output(net, ip, 0, RTO_ONLINK, 0);
-
- if (IS_ERR(rt))
- return PTR_ERR(rt);
- dev = rt->dst.dev;
- ip_rt_put(rt);
- if (!dev)
- return -EINVAL;
- }
switch (dev->type) {
#if IS_ENABLED(CONFIG_FDDI)
case ARPHRD_FDDI:
@@ -1086,12 +1128,18 @@ static int arp_req_set(struct net *net, struct arpreq *r,
break;
}
+ ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr;
+
neigh = __neigh_lookup_errno(&arp_tbl, &ip, dev);
err = PTR_ERR(neigh);
if (!IS_ERR(neigh)) {
unsigned int state = NUD_STALE;
- if (r->arp_flags & ATF_PERM)
+
+ if (r->arp_flags & ATF_PERM) {
+ r->arp_flags |= ATF_COM;
state = NUD_PERMANENT;
+ }
+
err = neigh_update(neigh, (r->arp_flags & ATF_COM) ?
r->arp_ha.sa_data : NULL, state,
NEIGH_UPDATE_F_OVERRIDE |
@@ -1115,27 +1163,40 @@ static unsigned int arp_state_to_flags(struct neighbour *neigh)
* Get an ARP cache entry.
*/
-static int arp_req_get(struct arpreq *r, struct net_device *dev)
+static int arp_req_get(struct net *net, struct arpreq *r)
{
__be32 ip = ((struct sockaddr_in *) &r->arp_pa)->sin_addr.s_addr;
struct neighbour *neigh;
- int err = -ENXIO;
+ struct net_device *dev;
+
+ if (!r->arp_dev[0])
+ return -ENODEV;
+
+ dev = arp_req_dev_by_name(net, r, true);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
neigh = neigh_lookup(&arp_tbl, &ip, dev);
- if (neigh) {
- if (!(READ_ONCE(neigh->nud_state) & NUD_NOARP)) {
- read_lock_bh(&neigh->lock);
- memcpy(r->arp_ha.sa_data, neigh->ha,
- min(dev->addr_len, sizeof(r->arp_ha.sa_data_min)));
- r->arp_flags = arp_state_to_flags(neigh);
- read_unlock_bh(&neigh->lock);
- r->arp_ha.sa_family = dev->type;
- strscpy(r->arp_dev, dev->name, sizeof(r->arp_dev));
- err = 0;
- }
+ if (!neigh)
+ return -ENXIO;
+
+ if (READ_ONCE(neigh->nud_state) & NUD_NOARP) {
neigh_release(neigh);
+ return -ENXIO;
}
- return err;
+
+ read_lock_bh(&neigh->lock);
+ memcpy(r->arp_ha.sa_data, neigh->ha,
+ min(dev->addr_len, sizeof(r->arp_ha.sa_data_min)));
+ r->arp_flags = arp_state_to_flags(neigh);
+ read_unlock_bh(&neigh->lock);
+
+ neigh_release(neigh);
+
+ r->arp_ha.sa_family = dev->type;
+ netdev_copy_name(dev, r->arp_dev);
+
+ return 0;
}
int arp_invalidate(struct net_device *dev, __be32 ip, bool force)
@@ -1156,7 +1217,7 @@ int arp_invalidate(struct net_device *dev, __be32 ip, bool force)
NEIGH_UPDATE_F_ADMIN, 0);
write_lock_bh(&tbl->lock);
neigh_release(neigh);
- neigh_remove_one(neigh, tbl);
+ neigh_remove_one(neigh);
write_unlock_bh(&tbl->lock);
}
@@ -1166,36 +1227,31 @@ int arp_invalidate(struct net_device *dev, __be32 ip, bool force)
static int arp_req_delete_public(struct net *net, struct arpreq *r,
struct net_device *dev)
{
- __be32 ip = ((struct sockaddr_in *) &r->arp_pa)->sin_addr.s_addr;
__be32 mask = ((struct sockaddr_in *)&r->arp_netmask)->sin_addr.s_addr;
- if (mask == htonl(0xFFFFFFFF))
- return pneigh_delete(&arp_tbl, net, &ip, dev);
+ if (mask) {
+ __be32 ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr;
- if (mask)
- return -EINVAL;
+ return pneigh_delete(&arp_tbl, net, &ip, dev);
+ }
return arp_req_set_proxy(net, dev, 0);
}
-static int arp_req_delete(struct net *net, struct arpreq *r,
- struct net_device *dev)
+static int arp_req_delete(struct net *net, struct arpreq *r)
{
+ struct net_device *dev;
__be32 ip;
+ dev = arp_req_dev(net, r);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
if (r->arp_flags & ATF_PUBL)
return arp_req_delete_public(net, r, dev);
ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr;
- if (!dev) {
- struct rtable *rt = ip_route_output(net, ip, 0, RTO_ONLINK, 0);
- if (IS_ERR(rt))
- return PTR_ERR(rt);
- dev = rt->dst.dev;
- ip_rt_put(rt);
- if (!dev)
- return -EINVAL;
- }
+
return arp_invalidate(dev, ip, true);
}
@@ -1205,9 +1261,9 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
{
- int err;
struct arpreq r;
- struct net_device *dev = NULL;
+ __be32 *netmask;
+ int err;
switch (cmd) {
case SIOCDARP:
@@ -1230,42 +1286,34 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
if (!(r.arp_flags & ATF_PUBL) &&
(r.arp_flags & (ATF_NETMASK | ATF_DONTPUB)))
return -EINVAL;
+
+ netmask = &((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr;
if (!(r.arp_flags & ATF_NETMASK))
- ((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr =
- htonl(0xFFFFFFFFUL);
- rtnl_lock();
- if (r.arp_dev[0]) {
- err = -ENODEV;
- dev = __dev_get_by_name(net, r.arp_dev);
- if (!dev)
- goto out;
-
- /* Mmmm... It is wrong... ARPHRD_NETROM==0 */
- if (!r.arp_ha.sa_family)
- r.arp_ha.sa_family = dev->type;
- err = -EINVAL;
- if ((r.arp_flags & ATF_COM) && r.arp_ha.sa_family != dev->type)
- goto out;
- } else if (cmd == SIOCGARP) {
- err = -ENODEV;
- goto out;
- }
+ *netmask = htonl(0xFFFFFFFFUL);
+ else if (*netmask && *netmask != htonl(0xFFFFFFFFUL))
+ return -EINVAL;
switch (cmd) {
case SIOCDARP:
- err = arp_req_delete(net, &r, dev);
+ rtnl_lock();
+ err = arp_req_delete(net, &r);
+ rtnl_unlock();
break;
case SIOCSARP:
- err = arp_req_set(net, &r, dev);
+ rtnl_lock();
+ err = arp_req_set(net, &r);
+ rtnl_unlock();
break;
case SIOCGARP:
- err = arp_req_get(&r, dev);
+ rcu_read_lock();
+ err = arp_req_get(net, &r);
+ rcu_read_unlock();
+
+ if (!err && copy_to_user(arg, &r, sizeof(r)))
+ err = -EFAULT;
break;
}
-out:
- rtnl_unlock();
- if (cmd == SIOCGARP && !err && copy_to_user(arg, &r, sizeof(r)))
- err = -EFAULT;
+
return err;
}
diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c
index ae8b15e6896f..554804774628 100644
--- a/net/ipv4/bpf_tcp_ca.c
+++ b/net/ipv4/bpf_tcp_ca.c
@@ -12,14 +12,11 @@
#include <net/bpf_sk_storage.h>
/* "extern" is to avoid sparse warning. It is only used in bpf_struct_ops.c. */
-extern struct bpf_struct_ops bpf_tcp_congestion_ops;
-
-static u32 unsupported_ops[] = {
- offsetof(struct tcp_congestion_ops, get_info),
-};
+static struct bpf_struct_ops bpf_tcp_congestion_ops;
static const struct btf_type *tcp_sock_type;
static u32 tcp_sock_id, sock_id;
+static const struct btf_type *tcp_congestion_ops_type;
static int bpf_tcp_ca_init(struct btf *btf)
{
@@ -36,19 +33,12 @@ static int bpf_tcp_ca_init(struct btf *btf)
tcp_sock_id = type_id;
tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
- return 0;
-}
-
-static bool is_unsupported(u32 member_offset)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(unsupported_ops); i++) {
- if (member_offset == unsupported_ops[i])
- return true;
- }
+ type_id = btf_find_by_name_kind(btf, "tcp_congestion_ops", BTF_KIND_STRUCT);
+ if (type_id < 0)
+ return -EINVAL;
+ tcp_congestion_ops_type = btf_type_by_id(btf, type_id);
- return false;
+ return 0;
}
static bool bpf_tcp_ca_is_valid_access(int off, int size,
@@ -101,6 +91,9 @@ static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
case offsetof(struct tcp_sock, snd_cwnd_cnt):
end = offsetofend(struct tcp_sock, snd_cwnd_cnt);
break;
+ case offsetof(struct tcp_sock, snd_cwnd_stamp):
+ end = offsetofend(struct tcp_sock, snd_cwnd_stamp);
+ break;
case offsetof(struct tcp_sock, snd_ssthresh):
end = offsetofend(struct tcp_sock, snd_ssthresh);
break;
@@ -149,7 +142,7 @@ static u32 prog_ops_moff(const struct bpf_prog *prog)
u32 midx;
midx = prog->expected_attach_type;
- t = bpf_tcp_congestion_ops.type;
+ t = tcp_congestion_ops_type;
m = &btf_type_member(t)[midx];
return __btf_member_bit_offset(t, m) / 8;
@@ -191,17 +184,17 @@ bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
case BPF_FUNC_ktime_get_coarse_ns:
return &bpf_ktime_get_coarse_ns_proto;
default:
- return bpf_base_func_proto(func_id);
+ return bpf_base_func_proto(func_id, prog);
}
}
-BTF_SET8_START(bpf_tcp_ca_check_kfunc_ids)
+BTF_KFUNCS_START(bpf_tcp_ca_check_kfunc_ids)
BTF_ID_FLAGS(func, tcp_reno_ssthresh)
BTF_ID_FLAGS(func, tcp_reno_cong_avoid)
BTF_ID_FLAGS(func, tcp_reno_undo_cwnd)
BTF_ID_FLAGS(func, tcp_slow_start)
BTF_ID_FLAGS(func, tcp_cong_avoid_ai)
-BTF_SET8_END(bpf_tcp_ca_check_kfunc_ids)
+BTF_KFUNCS_END(bpf_tcp_ca_check_kfunc_ids)
static const struct btf_kfunc_id_set bpf_tcp_ca_kfunc_set = {
.owner = THIS_MODULE,
@@ -242,26 +235,17 @@ static int bpf_tcp_ca_init_member(const struct btf_type *t,
return 0;
}
-static int bpf_tcp_ca_check_member(const struct btf_type *t,
- const struct btf_member *member,
- const struct bpf_prog *prog)
-{
- if (is_unsupported(__btf_member_bit_offset(t, member) / 8))
- return -ENOTSUPP;
- return 0;
-}
-
-static int bpf_tcp_ca_reg(void *kdata)
+static int bpf_tcp_ca_reg(void *kdata, struct bpf_link *link)
{
return tcp_register_congestion_control(kdata);
}
-static void bpf_tcp_ca_unreg(void *kdata)
+static void bpf_tcp_ca_unreg(void *kdata, struct bpf_link *link)
{
tcp_unregister_congestion_control(kdata);
}
-static int bpf_tcp_ca_update(void *kdata, void *old_kdata)
+static int bpf_tcp_ca_update(void *kdata, void *old_kdata, struct bpf_link *link)
{
return tcp_update_congestion_control(kdata, old_kdata);
}
@@ -301,7 +285,8 @@ static u32 bpf_tcp_ca_min_tso_segs(struct sock *sk)
return 0;
}
-static void bpf_tcp_ca_cong_control(struct sock *sk, const struct rate_sample *rs)
+static void bpf_tcp_ca_cong_control(struct sock *sk, u32 ack, int flag,
+ const struct rate_sample *rs)
{
}
@@ -339,21 +324,26 @@ static struct tcp_congestion_ops __bpf_ops_tcp_congestion_ops = {
.release = __bpf_tcp_ca_release,
};
-struct bpf_struct_ops bpf_tcp_congestion_ops = {
+static struct bpf_struct_ops bpf_tcp_congestion_ops = {
.verifier_ops = &bpf_tcp_ca_verifier_ops,
.reg = bpf_tcp_ca_reg,
.unreg = bpf_tcp_ca_unreg,
.update = bpf_tcp_ca_update,
- .check_member = bpf_tcp_ca_check_member,
.init_member = bpf_tcp_ca_init_member,
.init = bpf_tcp_ca_init,
.validate = bpf_tcp_ca_validate,
.name = "tcp_congestion_ops",
.cfi_stubs = &__bpf_ops_tcp_congestion_ops,
+ .owner = THIS_MODULE,
};
static int __init bpf_tcp_ca_kfunc_init(void)
{
- return register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_tcp_ca_kfunc_set);
+ int ret;
+
+ ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_tcp_ca_kfunc_set);
+ ret = ret ?: register_bpf_struct_ops(&bpf_tcp_congestion_ops, tcp_congestion_ops);
+
+ return ret;
}
late_initcall(bpf_tcp_ca_kfunc_init);
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index d048aa833293..740af8541d2f 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -37,7 +37,7 @@
#include <net/cipso_ipv4.h>
#include <linux/atomic.h>
#include <linux/bug.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
/* List of available DOI definitions */
/* XXX - This currently assumes a minimal number of different DOIs in use,
@@ -864,11 +864,8 @@ static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def,
net_clen_bits,
net_spot + 1,
1);
- if (net_spot < 0) {
- if (net_spot == -2)
- return -EFAULT;
+ if (net_spot < 0)
return 0;
- }
switch (doi_def->type) {
case CIPSO_V4_MAP_PASS:
@@ -1813,11 +1810,35 @@ static int cipso_v4_genopt(unsigned char *buf, u32 buf_len,
return CIPSO_V4_HDR_LEN + ret_val;
}
+static int cipso_v4_get_actual_opt_len(const unsigned char *data, int len)
+{
+ int iter = 0, optlen = 0;
+
+ /* determining the new total option length is tricky because of
+ * the padding necessary, the only thing i can think to do at
+ * this point is walk the options one-by-one, skipping the
+ * padding at the end to determine the actual option size and
+ * from there we can determine the new total option length
+ */
+ while (iter < len) {
+ if (data[iter] == IPOPT_END) {
+ break;
+ } else if (data[iter] == IPOPT_NOP) {
+ iter++;
+ } else {
+ iter += data[iter + 1];
+ optlen = iter;
+ }
+ }
+ return optlen;
+}
+
/**
* cipso_v4_sock_setattr - Add a CIPSO option to a socket
* @sk: the socket
* @doi_def: the CIPSO DOI to use
* @secattr: the specific security attributes of the socket
+ * @sk_locked: true if caller holds the socket lock
*
* Description:
* Set the CIPSO option on the given socket using the DOI definition and
@@ -1829,7 +1850,8 @@ static int cipso_v4_genopt(unsigned char *buf, u32 buf_len,
*/
int cipso_v4_sock_setattr(struct sock *sk,
const struct cipso_v4_doi *doi_def,
- const struct netlbl_lsm_secattr *secattr)
+ const struct netlbl_lsm_secattr *secattr,
+ bool sk_locked)
{
int ret_val = -EPERM;
unsigned char *buf = NULL;
@@ -1879,8 +1901,7 @@ int cipso_v4_sock_setattr(struct sock *sk,
sk_inet = inet_sk(sk);
- old = rcu_dereference_protected(sk_inet->inet_opt,
- lockdep_sock_is_held(sk));
+ old = rcu_dereference_protected(sk_inet->inet_opt, sk_locked);
if (inet_test_bit(IS_ICSK, sk)) {
sk_conn = inet_csk(sk);
if (old)
@@ -1955,7 +1976,7 @@ int cipso_v4_req_setattr(struct request_sock *req,
buf = NULL;
req_inet = inet_rsk(req);
- opt = xchg((__force struct ip_options_rcu **)&req_inet->ireq_opt, opt);
+ opt = unrcu_pointer(xchg(&req_inet->ireq_opt, RCU_INITIALIZER(opt)));
if (opt)
kfree_rcu(opt, rcu);
@@ -1988,7 +2009,6 @@ static int cipso_v4_delopt(struct ip_options_rcu __rcu **opt_ptr)
u8 cipso_len;
u8 cipso_off;
unsigned char *cipso_ptr;
- int iter;
int optlen_new;
cipso_off = opt->opt.cipso - sizeof(struct iphdr);
@@ -2008,19 +2028,8 @@ static int cipso_v4_delopt(struct ip_options_rcu __rcu **opt_ptr)
memmove(cipso_ptr, cipso_ptr + cipso_len,
opt->opt.optlen - cipso_off - cipso_len);
- /* determining the new total option length is tricky because of
- * the padding necessary, the only thing i can think to do at
- * this point is walk the options one-by-one, skipping the
- * padding at the end to determine the actual option size and
- * from there we can determine the new total option length */
- iter = 0;
- optlen_new = 0;
- while (iter < opt->opt.optlen)
- if (opt->opt.__data[iter] != IPOPT_NOP) {
- iter += opt->opt.__data[iter + 1];
- optlen_new = iter;
- } else
- iter++;
+ optlen_new = cipso_v4_get_actual_opt_len(opt->opt.__data,
+ opt->opt.optlen);
hdr_delta = opt->opt.optlen;
opt->opt.optlen = (optlen_new + 3) & ~3;
hdr_delta -= opt->opt.optlen;
@@ -2240,7 +2249,8 @@ int cipso_v4_skbuff_setattr(struct sk_buff *skb,
*/
int cipso_v4_skbuff_delattr(struct sk_buff *skb)
{
- int ret_val;
+ int ret_val, cipso_len, hdr_len_actual, new_hdr_len_actual, new_hdr_len,
+ hdr_len_delta;
struct iphdr *iph;
struct ip_options *opt = &IPCB(skb)->opt;
unsigned char *cipso_ptr;
@@ -2253,16 +2263,37 @@ int cipso_v4_skbuff_delattr(struct sk_buff *skb)
if (ret_val < 0)
return ret_val;
- /* the easiest thing to do is just replace the cipso option with noop
- * options since we don't change the size of the packet, although we
- * still need to recalculate the checksum */
-
iph = ip_hdr(skb);
cipso_ptr = (unsigned char *)iph + opt->cipso;
- memset(cipso_ptr, IPOPT_NOOP, cipso_ptr[1]);
+ cipso_len = cipso_ptr[1];
+
+ hdr_len_actual = sizeof(struct iphdr) +
+ cipso_v4_get_actual_opt_len((unsigned char *)(iph + 1),
+ opt->optlen);
+ new_hdr_len_actual = hdr_len_actual - cipso_len;
+ new_hdr_len = (new_hdr_len_actual + 3) & ~3;
+ hdr_len_delta = (iph->ihl << 2) - new_hdr_len;
+
+ /* 1. shift any options after CIPSO to the left */
+ memmove(cipso_ptr, cipso_ptr + cipso_len,
+ new_hdr_len_actual - opt->cipso);
+ /* 2. move the whole IP header to its new place */
+ memmove((unsigned char *)iph + hdr_len_delta, iph, new_hdr_len_actual);
+ /* 3. adjust the skb layout */
+ skb_pull(skb, hdr_len_delta);
+ skb_reset_network_header(skb);
+ iph = ip_hdr(skb);
+ /* 4. re-fill new padding with IPOPT_END (may now be longer) */
+ memset((unsigned char *)iph + new_hdr_len_actual, IPOPT_END,
+ new_hdr_len - new_hdr_len_actual);
+
+ opt->optlen -= hdr_len_delta;
opt->cipso = 0;
opt->is_changed = 1;
-
+ if (hdr_len_delta != 0) {
+ iph->ihl = new_hdr_len >> 2;
+ iph_set_totlen(iph, skb->len);
+ }
ip_send_check(iph);
return 0;
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 2cc50cbfc2a3..4b5bc6eb52e7 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -61,15 +61,17 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
err = -EACCES;
goto out;
}
+
+ /* Update addresses before rehashing */
+ inet->inet_daddr = fl4->daddr;
+ inet->inet_dport = usin->sin_port;
if (!inet->inet_saddr)
- inet->inet_saddr = fl4->saddr; /* Update source address */
+ inet->inet_saddr = fl4->saddr;
if (!inet->inet_rcv_saddr) {
inet->inet_rcv_saddr = fl4->saddr;
if (sk->sk_prot->rehash)
sk->sk_prot->rehash(sk);
}
- inet->inet_daddr = fl4->daddr;
- inet->inet_dport = usin->sin_port;
reuseport_has_conns_set(sk);
sk->sk_state = TCP_ESTABLISHED;
sk_set_txhash(sk);
@@ -100,8 +102,6 @@ EXPORT_SYMBOL(ip4_datagram_connect);
void ip4_datagram_release_cb(struct sock *sk)
{
const struct inet_sock *inet = inet_sk(sk);
- const struct ip_options_rcu *inet_opt;
- __be32 daddr = inet->inet_daddr;
struct dst_entry *dst;
struct flowi4 fl4;
struct rtable *rt;
@@ -113,14 +113,9 @@ void ip4_datagram_release_cb(struct sock *sk)
rcu_read_unlock();
return;
}
- inet_opt = rcu_dereference(inet->inet_opt);
- if (inet_opt && inet_opt->opt.srr)
- daddr = inet_opt->opt.faddr;
- rt = ip_route_output_ports(sock_net(sk), &fl4, sk, daddr,
- inet->inet_saddr, inet->inet_dport,
- inet->inet_sport, sk->sk_protocol,
- RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
+ inet_sk_init_flowi4(inet, &fl4);
+ rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
dst = !IS_ERR(rt) ? &rt->dst : NULL;
sk_dst_set(sk, dst);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index bc74f131fe4d..55b8151759bc 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -119,11 +119,9 @@ struct inet_fill_args {
#define IN4_ADDR_HSIZE_SHIFT 8
#define IN4_ADDR_HSIZE (1U << IN4_ADDR_HSIZE_SHIFT)
-static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
-
static u32 inet_addr_hash(const struct net *net, __be32 addr)
{
- u32 val = (__force u32) addr ^ net_hash_mix(net);
+ u32 val = __ipv4_addr_hash(addr, net_hash_mix(net));
return hash_32(val, IN4_ADDR_HSIZE_SHIFT);
}
@@ -133,13 +131,13 @@ static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
u32 hash = inet_addr_hash(net, ifa->ifa_local);
ASSERT_RTNL();
- hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
+ hlist_add_head_rcu(&ifa->addr_lst, &net->ipv4.inet_addr_lst[hash]);
}
static void inet_hash_remove(struct in_ifaddr *ifa)
{
ASSERT_RTNL();
- hlist_del_init_rcu(&ifa->hash);
+ hlist_del_init_rcu(&ifa->addr_lst);
}
/**
@@ -186,9 +184,8 @@ struct in_ifaddr *inet_lookup_ifaddr_rcu(struct net *net, __be32 addr)
u32 hash = inet_addr_hash(net, addr);
struct in_ifaddr *ifa;
- hlist_for_each_entry_rcu(ifa, &inet_addr_lst[hash], hash)
- if (ifa->ifa_local == addr &&
- net_eq(dev_net(ifa->ifa_dev->dev), net))
+ hlist_for_each_entry_rcu(ifa, &net->ipv4.inet_addr_lst[hash], addr_lst)
+ if (ifa->ifa_local == addr)
return ifa;
return NULL;
@@ -216,22 +213,37 @@ static void devinet_sysctl_unregister(struct in_device *idev)
/* Locks all the inet devices. */
-static struct in_ifaddr *inet_alloc_ifa(void)
+static struct in_ifaddr *inet_alloc_ifa(struct in_device *in_dev)
{
- return kzalloc(sizeof(struct in_ifaddr), GFP_KERNEL_ACCOUNT);
+ struct in_ifaddr *ifa;
+
+ ifa = kzalloc(sizeof(*ifa), GFP_KERNEL_ACCOUNT);
+ if (!ifa)
+ return NULL;
+
+ in_dev_hold(in_dev);
+ ifa->ifa_dev = in_dev;
+
+ INIT_HLIST_NODE(&ifa->addr_lst);
+
+ return ifa;
}
static void inet_rcu_free_ifa(struct rcu_head *head)
{
struct in_ifaddr *ifa = container_of(head, struct in_ifaddr, rcu_head);
- if (ifa->ifa_dev)
- in_dev_put(ifa->ifa_dev);
+
+ in_dev_put(ifa->ifa_dev);
kfree(ifa);
}
static void inet_free_ifa(struct in_ifaddr *ifa)
{
- call_rcu(&ifa->rcu_head, inet_rcu_free_ifa);
+ /* Our reference to ifa->ifa_dev must be freed ASAP
+ * to release the reference to the netdev the same way.
+ * in_dev_put() -> in_dev_finish_destroy() -> netdev_put()
+ */
+ call_rcu_hurry(&ifa->rcu_head, inet_rcu_free_ifa);
}
static void in_dev_free_rcu(struct rcu_head *head)
@@ -283,17 +295,19 @@ static struct in_device *inetdev_init(struct net_device *dev)
/* Account for reference dev->ip_ptr (below) */
refcount_set(&in_dev->refcnt, 1);
- err = devinet_sysctl_register(in_dev);
- if (err) {
- in_dev->dead = 1;
- neigh_parms_release(&arp_tbl, in_dev->arp_parms);
- in_dev_put(in_dev);
- in_dev = NULL;
- goto out;
+ if (dev != blackhole_netdev) {
+ err = devinet_sysctl_register(in_dev);
+ if (err) {
+ in_dev->dead = 1;
+ neigh_parms_release(&arp_tbl, in_dev->arp_parms);
+ in_dev_put(in_dev);
+ in_dev = NULL;
+ goto out;
+ }
+ ip_mc_init_dev(in_dev);
+ if (dev->flags & IFF_UP)
+ ip_mc_up(in_dev);
}
- ip_mc_init_dev(in_dev);
- if (dev->flags & IFF_UP)
- ip_mc_up(in_dev);
/* we can receive as soon as ip_ptr is set -- do this last */
rcu_assign_pointer(dev->ip_ptr, in_dev);
@@ -332,6 +346,19 @@ static void inetdev_destroy(struct in_device *in_dev)
in_dev_put(in_dev);
}
+static int __init inet_blackhole_dev_init(void)
+{
+ int err = 0;
+
+ rtnl_lock();
+ if (!inetdev_init(blackhole_netdev))
+ err = -ENOMEM;
+ rtnl_unlock();
+
+ return err;
+}
+late_initcall(inet_blackhole_dev_init);
+
int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
{
const struct in_ifaddr *ifa;
@@ -469,26 +496,18 @@ static void inet_del_ifa(struct in_device *in_dev,
__inet_del_ifa(in_dev, ifap, destroy, NULL, 0);
}
-static void check_lifetime(struct work_struct *work);
-
-static DECLARE_DELAYED_WORK(check_lifetime_work, check_lifetime);
-
static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
u32 portid, struct netlink_ext_ack *extack)
{
struct in_ifaddr __rcu **last_primary, **ifap;
struct in_device *in_dev = ifa->ifa_dev;
+ struct net *net = dev_net(in_dev->dev);
struct in_validator_info ivi;
struct in_ifaddr *ifa1;
int ret;
ASSERT_RTNL();
- if (!ifa->ifa_local) {
- inet_free_ifa(ifa);
- return 0;
- }
-
ifa->ifa_flags &= ~IFA_F_SECONDARY;
last_primary = &in_dev->ifa_list;
@@ -546,8 +565,8 @@ static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
inet_hash_insert(dev_net(in_dev->dev), ifa);
- cancel_delayed_work(&check_lifetime_work);
- queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
+ cancel_delayed_work(&net->ipv4.addr_chk_work);
+ queue_delayed_work(system_power_efficient_wq, &net->ipv4.addr_chk_work, 0);
/* Send message first, then call notifier.
Notifier will trigger FIB update, so that
@@ -560,26 +579,21 @@ static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
static int inet_insert_ifa(struct in_ifaddr *ifa)
{
+ if (!ifa->ifa_local) {
+ inet_free_ifa(ifa);
+ return 0;
+ }
+
return __inet_insert_ifa(ifa, NULL, 0, NULL);
}
static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
{
- struct in_device *in_dev = __in_dev_get_rtnl(dev);
-
- ASSERT_RTNL();
+ struct in_device *in_dev = __in_dev_get_rtnl_net(dev);
- if (!in_dev) {
- inet_free_ifa(ifa);
- return -ENOBUFS;
- }
ipv4_devconf_setall(in_dev);
neigh_parms_data_state_setall(in_dev->arp_parms);
- if (ifa->ifa_dev != in_dev) {
- WARN_ON(ifa->ifa_dev);
- in_dev_hold(in_dev);
- ifa->ifa_dev = in_dev;
- }
+
if (ipv4_is_loopback(ifa->ifa_local))
ifa->ifa_scope = RT_SCOPE_HOST;
return inet_insert_ifa(ifa);
@@ -629,7 +643,7 @@ static int ip_mc_autojoin_config(struct net *net, bool join,
struct sock *sk = net->ipv4.mc_autojoin_sk;
int ret;
- ASSERT_RTNL();
+ ASSERT_RTNL_NET(net);
lock_sock(sk);
if (join)
@@ -655,22 +669,24 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
struct in_ifaddr *ifa;
int err;
- ASSERT_RTNL();
-
err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
ifa_ipv4_policy, extack);
if (err < 0)
- goto errout;
+ goto out;
ifm = nlmsg_data(nlh);
+
+ rtnl_net_lock(net);
+
in_dev = inetdev_by_index(net, ifm->ifa_index);
if (!in_dev) {
NL_SET_ERR_MSG(extack, "ipv4: Device not found");
err = -ENODEV;
- goto errout;
+ goto unlock;
}
- for (ifap = &in_dev->ifa_list; (ifa = rtnl_dereference(*ifap)) != NULL;
+ for (ifap = &in_dev->ifa_list;
+ (ifa = rtnl_net_dereference(net, *ifap)) != NULL;
ifap = &ifa->ifa_next) {
if (tb[IFA_LOCAL] &&
ifa->ifa_local != nla_get_in_addr(tb[IFA_LOCAL]))
@@ -686,68 +702,76 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
if (ipv4_is_multicast(ifa->ifa_address))
ip_mc_autojoin_config(net, false, ifa);
+
__inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid);
- return 0;
+ goto unlock;
}
NL_SET_ERR_MSG(extack, "ipv4: Address not found");
err = -EADDRNOTAVAIL;
-errout:
+unlock:
+ rtnl_net_unlock(net);
+out:
return err;
}
-#define INFINITY_LIFE_TIME 0xFFFFFFFF
-
static void check_lifetime(struct work_struct *work)
{
unsigned long now, next, next_sec, next_sched;
struct in_ifaddr *ifa;
struct hlist_node *n;
+ struct net *net;
int i;
+ net = container_of(to_delayed_work(work), struct net, ipv4.addr_chk_work);
now = jiffies;
next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
for (i = 0; i < IN4_ADDR_HSIZE; i++) {
+ struct hlist_head *head = &net->ipv4.inet_addr_lst[i];
bool change_needed = false;
rcu_read_lock();
- hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {
- unsigned long age;
-
- if (ifa->ifa_flags & IFA_F_PERMANENT)
+ hlist_for_each_entry_rcu(ifa, head, addr_lst) {
+ unsigned long age, tstamp;
+ u32 preferred_lft;
+ u32 valid_lft;
+ u32 flags;
+
+ flags = READ_ONCE(ifa->ifa_flags);
+ if (flags & IFA_F_PERMANENT)
continue;
+ preferred_lft = READ_ONCE(ifa->ifa_preferred_lft);
+ valid_lft = READ_ONCE(ifa->ifa_valid_lft);
+ tstamp = READ_ONCE(ifa->ifa_tstamp);
/* We try to batch several events at once. */
- age = (now - ifa->ifa_tstamp +
+ age = (now - tstamp +
ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
- if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
- age >= ifa->ifa_valid_lft) {
+ if (valid_lft != INFINITY_LIFE_TIME &&
+ age >= valid_lft) {
change_needed = true;
- } else if (ifa->ifa_preferred_lft ==
+ } else if (preferred_lft ==
INFINITY_LIFE_TIME) {
continue;
- } else if (age >= ifa->ifa_preferred_lft) {
- if (time_before(ifa->ifa_tstamp +
- ifa->ifa_valid_lft * HZ, next))
- next = ifa->ifa_tstamp +
- ifa->ifa_valid_lft * HZ;
+ } else if (age >= preferred_lft) {
+ if (time_before(tstamp + valid_lft * HZ, next))
+ next = tstamp + valid_lft * HZ;
- if (!(ifa->ifa_flags & IFA_F_DEPRECATED))
+ if (!(flags & IFA_F_DEPRECATED))
change_needed = true;
- } else if (time_before(ifa->ifa_tstamp +
- ifa->ifa_preferred_lft * HZ,
+ } else if (time_before(tstamp + preferred_lft * HZ,
next)) {
- next = ifa->ifa_tstamp +
- ifa->ifa_preferred_lft * HZ;
+ next = tstamp + preferred_lft * HZ;
}
}
rcu_read_unlock();
if (!change_needed)
continue;
- rtnl_lock();
- hlist_for_each_entry_safe(ifa, n, &inet_addr_lst[i], hash) {
+
+ rtnl_net_lock(net);
+ hlist_for_each_entry_safe(ifa, n, head, addr_lst) {
unsigned long age;
if (ifa->ifa_flags & IFA_F_PERMANENT)
@@ -763,7 +787,7 @@ static void check_lifetime(struct work_struct *work)
struct in_ifaddr *tmp;
ifap = &ifa->ifa_dev->ifa_list;
- tmp = rtnl_dereference(*ifap);
+ tmp = rtnl_net_dereference(net, *ifap);
while (tmp) {
if (tmp == ifa) {
inet_del_ifa(ifa->ifa_dev,
@@ -771,7 +795,7 @@ static void check_lifetime(struct work_struct *work)
break;
}
ifap = &tmp->ifa_next;
- tmp = rtnl_dereference(*ifap);
+ tmp = rtnl_net_dereference(net, *ifap);
}
} else if (ifa->ifa_preferred_lft !=
INFINITY_LIFE_TIME &&
@@ -781,7 +805,7 @@ static void check_lifetime(struct work_struct *work)
rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
}
}
- rtnl_unlock();
+ rtnl_net_unlock(net);
}
next_sec = round_jiffies_up(next);
@@ -796,63 +820,84 @@ static void check_lifetime(struct work_struct *work)
if (time_before(next_sched, now + ADDRCONF_TIMER_FUZZ_MAX))
next_sched = now + ADDRCONF_TIMER_FUZZ_MAX;
- queue_delayed_work(system_power_efficient_wq, &check_lifetime_work,
- next_sched - now);
+ queue_delayed_work(system_power_efficient_wq, &net->ipv4.addr_chk_work,
+ next_sched - now);
}
static void set_ifa_lifetime(struct in_ifaddr *ifa, __u32 valid_lft,
__u32 prefered_lft)
{
unsigned long timeout;
+ u32 flags;
- ifa->ifa_flags &= ~(IFA_F_PERMANENT | IFA_F_DEPRECATED);
+ flags = ifa->ifa_flags & ~(IFA_F_PERMANENT | IFA_F_DEPRECATED);
timeout = addrconf_timeout_fixup(valid_lft, HZ);
if (addrconf_finite_timeout(timeout))
- ifa->ifa_valid_lft = timeout;
+ WRITE_ONCE(ifa->ifa_valid_lft, timeout);
else
- ifa->ifa_flags |= IFA_F_PERMANENT;
+ flags |= IFA_F_PERMANENT;
timeout = addrconf_timeout_fixup(prefered_lft, HZ);
if (addrconf_finite_timeout(timeout)) {
if (timeout == 0)
- ifa->ifa_flags |= IFA_F_DEPRECATED;
- ifa->ifa_preferred_lft = timeout;
+ flags |= IFA_F_DEPRECATED;
+ WRITE_ONCE(ifa->ifa_preferred_lft, timeout);
}
- ifa->ifa_tstamp = jiffies;
+ WRITE_ONCE(ifa->ifa_flags, flags);
+ WRITE_ONCE(ifa->ifa_tstamp, jiffies);
if (!ifa->ifa_cstamp)
- ifa->ifa_cstamp = ifa->ifa_tstamp;
+ WRITE_ONCE(ifa->ifa_cstamp, ifa->ifa_tstamp);
}
-static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
- __u32 *pvalid_lft, __u32 *pprefered_lft,
- struct netlink_ext_ack *extack)
+static int inet_validate_rtm(struct nlmsghdr *nlh, struct nlattr **tb,
+ struct netlink_ext_ack *extack,
+ __u32 *valid_lft, __u32 *prefered_lft)
{
- struct nlattr *tb[IFA_MAX+1];
- struct in_ifaddr *ifa;
- struct ifaddrmsg *ifm;
- struct net_device *dev;
- struct in_device *in_dev;
+ struct ifaddrmsg *ifm = nlmsg_data(nlh);
int err;
err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
ifa_ipv4_policy, extack);
if (err < 0)
- goto errout;
-
- ifm = nlmsg_data(nlh);
- err = -EINVAL;
+ return err;
if (ifm->ifa_prefixlen > 32) {
NL_SET_ERR_MSG(extack, "ipv4: Invalid prefix length");
- goto errout;
+ return -EINVAL;
}
if (!tb[IFA_LOCAL]) {
NL_SET_ERR_MSG(extack, "ipv4: Local address is not supplied");
- goto errout;
+ return -EINVAL;
}
+ if (tb[IFA_CACHEINFO]) {
+ struct ifa_cacheinfo *ci;
+
+ ci = nla_data(tb[IFA_CACHEINFO]);
+ if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
+ NL_SET_ERR_MSG(extack, "ipv4: address lifetime invalid");
+ return -EINVAL;
+ }
+
+ *valid_lft = ci->ifa_valid;
+ *prefered_lft = ci->ifa_prefered;
+ }
+
+ return 0;
+}
+
+static struct in_ifaddr *inet_rtm_to_ifa(struct net *net, struct nlmsghdr *nlh,
+ struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ struct ifaddrmsg *ifm = nlmsg_data(nlh);
+ struct in_device *in_dev;
+ struct net_device *dev;
+ struct in_ifaddr *ifa;
+ int err;
+
dev = __dev_get_by_index(net, ifm->ifa_index);
err = -ENODEV;
if (!dev) {
@@ -860,12 +905,12 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
goto errout;
}
- in_dev = __in_dev_get_rtnl(dev);
+ in_dev = __in_dev_get_rtnl_net(dev);
err = -ENOBUFS;
if (!in_dev)
goto errout;
- ifa = inet_alloc_ifa();
+ ifa = inet_alloc_ifa(in_dev);
if (!ifa)
/*
* A potential indev allocation can be left alive, it stays
@@ -875,19 +920,14 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
ipv4_devconf_setall(in_dev);
neigh_parms_data_state_setall(in_dev->arp_parms);
- in_dev_hold(in_dev);
if (!tb[IFA_ADDRESS])
tb[IFA_ADDRESS] = tb[IFA_LOCAL];
- INIT_HLIST_NODE(&ifa->hash);
ifa->ifa_prefixlen = ifm->ifa_prefixlen;
ifa->ifa_mask = inet_make_mask(ifm->ifa_prefixlen);
- ifa->ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) :
- ifm->ifa_flags;
+ ifa->ifa_flags = nla_get_u32_default(tb[IFA_FLAGS], ifm->ifa_flags);
ifa->ifa_scope = ifm->ifa_scope;
- ifa->ifa_dev = in_dev;
-
ifa->ifa_local = nla_get_in_addr(tb[IFA_LOCAL]);
ifa->ifa_address = nla_get_in_addr(tb[IFA_ADDRESS]);
@@ -905,76 +945,69 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
if (tb[IFA_PROTO])
ifa->ifa_proto = nla_get_u8(tb[IFA_PROTO]);
- if (tb[IFA_CACHEINFO]) {
- struct ifa_cacheinfo *ci;
-
- ci = nla_data(tb[IFA_CACHEINFO]);
- if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
- NL_SET_ERR_MSG(extack, "ipv4: address lifetime invalid");
- err = -EINVAL;
- goto errout_free;
- }
- *pvalid_lft = ci->ifa_valid;
- *pprefered_lft = ci->ifa_prefered;
- }
-
return ifa;
-errout_free:
- inet_free_ifa(ifa);
errout:
return ERR_PTR(err);
}
-static struct in_ifaddr *find_matching_ifa(struct in_ifaddr *ifa)
+static struct in_ifaddr *find_matching_ifa(struct net *net, struct in_ifaddr *ifa)
{
struct in_device *in_dev = ifa->ifa_dev;
struct in_ifaddr *ifa1;
- if (!ifa->ifa_local)
- return NULL;
-
- in_dev_for_each_ifa_rtnl(ifa1, in_dev) {
+ in_dev_for_each_ifa_rtnl_net(net, ifa1, in_dev) {
if (ifa1->ifa_mask == ifa->ifa_mask &&
inet_ifa_match(ifa1->ifa_address, ifa) &&
ifa1->ifa_local == ifa->ifa_local)
return ifa1;
}
+
return NULL;
}
static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
+ __u32 prefered_lft = INFINITY_LIFE_TIME;
+ __u32 valid_lft = INFINITY_LIFE_TIME;
struct net *net = sock_net(skb->sk);
- struct in_ifaddr *ifa;
struct in_ifaddr *ifa_existing;
- __u32 valid_lft = INFINITY_LIFE_TIME;
- __u32 prefered_lft = INFINITY_LIFE_TIME;
+ struct nlattr *tb[IFA_MAX + 1];
+ struct in_ifaddr *ifa;
+ int ret;
- ASSERT_RTNL();
+ ret = inet_validate_rtm(nlh, tb, extack, &valid_lft, &prefered_lft);
+ if (ret < 0)
+ return ret;
+
+ if (!nla_get_in_addr(tb[IFA_LOCAL]))
+ return 0;
- ifa = rtm_to_ifaddr(net, nlh, &valid_lft, &prefered_lft, extack);
- if (IS_ERR(ifa))
- return PTR_ERR(ifa);
+ rtnl_net_lock(net);
- ifa_existing = find_matching_ifa(ifa);
+ ifa = inet_rtm_to_ifa(net, nlh, tb, extack);
+ if (IS_ERR(ifa)) {
+ ret = PTR_ERR(ifa);
+ goto unlock;
+ }
+
+ ifa_existing = find_matching_ifa(net, ifa);
if (!ifa_existing) {
/* It would be best to check for !NLM_F_CREATE here but
* userspace already relies on not having to provide this.
*/
set_ifa_lifetime(ifa, valid_lft, prefered_lft);
if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) {
- int ret = ip_mc_autojoin_config(net, true, ifa);
-
+ ret = ip_mc_autojoin_config(net, true, ifa);
if (ret < 0) {
NL_SET_ERR_MSG(extack, "ipv4: Multicast auto join failed");
inet_free_ifa(ifa);
- return ret;
+ goto unlock;
}
}
- return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid,
- extack);
+
+ ret = __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid, extack);
} else {
u32 new_metric = ifa->ifa_rt_priority;
u8 new_proto = ifa->ifa_proto;
@@ -984,7 +1017,8 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
if (nlh->nlmsg_flags & NLM_F_EXCL ||
!(nlh->nlmsg_flags & NLM_F_REPLACE)) {
NL_SET_ERR_MSG(extack, "ipv4: Address already assigned");
- return -EEXIST;
+ ret = -EEXIST;
+ goto unlock;
}
ifa = ifa_existing;
@@ -996,12 +1030,16 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
ifa->ifa_proto = new_proto;
set_ifa_lifetime(ifa, valid_lft, prefered_lft);
- cancel_delayed_work(&check_lifetime_work);
+ cancel_delayed_work(&net->ipv4.addr_chk_work);
queue_delayed_work(system_power_efficient_wq,
- &check_lifetime_work, 0);
+ &net->ipv4.addr_chk_work, 0);
rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
}
- return 0;
+
+unlock:
+ rtnl_net_unlock(net);
+
+ return ret;
}
/*
@@ -1088,7 +1126,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr)
goto out;
}
- rtnl_lock();
+ rtnl_net_lock(net);
ret = -ENODEV;
dev = __dev_get_by_name(net, ifr->ifr_name);
@@ -1098,7 +1136,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr)
if (colon)
*colon = ':';
- in_dev = __in_dev_get_rtnl(dev);
+ in_dev = __in_dev_get_rtnl_net(dev);
if (in_dev) {
if (tryaddrmatch) {
/* Matthias Andree */
@@ -1108,7 +1146,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr)
This is checked above. */
for (ifap = &in_dev->ifa_list;
- (ifa = rtnl_dereference(*ifap)) != NULL;
+ (ifa = rtnl_net_dereference(net, *ifap)) != NULL;
ifap = &ifa->ifa_next) {
if (!strcmp(ifr->ifr_name, ifa->ifa_label) &&
sin_orig.sin_addr.s_addr ==
@@ -1122,7 +1160,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr)
comparing just the label */
if (!ifa) {
for (ifap = &in_dev->ifa_list;
- (ifa = rtnl_dereference(*ifap)) != NULL;
+ (ifa = rtnl_net_dereference(net, *ifap)) != NULL;
ifap = &ifa->ifa_next)
if (!strcmp(ifr->ifr_name, ifa->ifa_label))
break;
@@ -1164,6 +1202,9 @@ int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr)
inet_del_ifa(in_dev, ifap, 1);
break;
}
+
+ /* NETDEV_UP/DOWN/CHANGE could touch a peer dev */
+ ASSERT_RTNL();
ret = dev_change_flags(dev, ifr->ifr_flags, NULL);
break;
@@ -1174,10 +1215,12 @@ int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr)
if (!ifa) {
ret = -ENOBUFS;
- ifa = inet_alloc_ifa();
+ if (!in_dev)
+ break;
+ ifa = inet_alloc_ifa(in_dev);
if (!ifa)
break;
- INIT_HLIST_NODE(&ifa->hash);
+
if (colon)
memcpy(ifa->ifa_label, ifr->ifr_name, IFNAMSIZ);
else
@@ -1263,14 +1306,14 @@ int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr)
break;
}
done:
- rtnl_unlock();
+ rtnl_net_unlock(net);
out:
return ret;
}
int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size)
{
- struct in_device *in_dev = __in_dev_get_rtnl(dev);
+ struct in_device *in_dev = __in_dev_get_rtnl_net(dev);
const struct in_ifaddr *ifa;
struct ifreq ifr;
int done = 0;
@@ -1281,7 +1324,7 @@ int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size)
if (!in_dev)
goto out;
- in_dev_for_each_ifa_rtnl(ifa, in_dev) {
+ in_dev_for_each_ifa_rtnl_net(dev_net(dev), ifa, in_dev) {
if (!buf) {
done += size;
continue;
@@ -1312,7 +1355,7 @@ static __be32 in_dev_select_addr(const struct in_device *in_dev,
const struct in_ifaddr *ifa;
in_dev_for_each_ifa_rcu(ifa, in_dev) {
- if (ifa->ifa_flags & IFA_F_SECONDARY)
+ if (READ_ONCE(ifa->ifa_flags) & IFA_F_SECONDARY)
continue;
if (ifa->ifa_scope != RT_SCOPE_LINK &&
ifa->ifa_scope <= scope)
@@ -1328,10 +1371,11 @@ __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
__be32 addr = 0;
unsigned char localnet_scope = RT_SCOPE_HOST;
struct in_device *in_dev;
- struct net *net = dev_net(dev);
+ struct net *net;
int master_idx;
rcu_read_lock();
+ net = dev_net_rcu(dev);
in_dev = __in_dev_get_rcu(dev);
if (!in_dev)
goto no_in_dev;
@@ -1340,7 +1384,7 @@ __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
localnet_scope = RT_SCOPE_LINK;
in_dev_for_each_ifa_rcu(ifa, in_dev) {
- if (ifa->ifa_flags & IFA_F_SECONDARY)
+ if (READ_ONCE(ifa->ifa_flags) & IFA_F_SECONDARY)
continue;
if (min(ifa->ifa_scope, localnet_scope) > scope)
continue;
@@ -1576,16 +1620,13 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
if (!inetdev_valid_mtu(dev->mtu))
break;
if (dev->flags & IFF_LOOPBACK) {
- struct in_ifaddr *ifa = inet_alloc_ifa();
+ struct in_ifaddr *ifa = inet_alloc_ifa(in_dev);
if (ifa) {
- INIT_HLIST_NODE(&ifa->hash);
ifa->ifa_local =
ifa->ifa_address = htonl(INADDR_LOOPBACK);
ifa->ifa_prefixlen = 8;
ifa->ifa_mask = inet_make_mask(8);
- in_dev_hold(in_dev);
- ifa->ifa_dev = in_dev;
ifa->ifa_scope = RT_SCOPE_HOST;
memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
set_ifa_lifetime(ifa, INFINITY_LIFE_TIME,
@@ -1671,12 +1712,14 @@ static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
}
-static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
+static int inet_fill_ifaddr(struct sk_buff *skb, const struct in_ifaddr *ifa,
struct inet_fill_args *args)
{
struct ifaddrmsg *ifm;
struct nlmsghdr *nlh;
+ unsigned long tstamp;
u32 preferred, valid;
+ u32 flags;
nlh = nlmsg_put(skb, args->portid, args->seq, args->event, sizeof(*ifm),
args->flags);
@@ -1686,7 +1729,13 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
ifm = nlmsg_data(nlh);
ifm->ifa_family = AF_INET;
ifm->ifa_prefixlen = ifa->ifa_prefixlen;
- ifm->ifa_flags = ifa->ifa_flags;
+
+ flags = READ_ONCE(ifa->ifa_flags);
+ /* Warning : ifm->ifa_flags is an __u8, it holds only 8 bits.
+ * The 32bit value is given in IFA_FLAGS attribute.
+ */
+ ifm->ifa_flags = (__u8)flags;
+
ifm->ifa_scope = ifa->ifa_scope;
ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
@@ -1694,11 +1743,12 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
goto nla_put_failure;
- if (!(ifm->ifa_flags & IFA_F_PERMANENT)) {
- preferred = ifa->ifa_preferred_lft;
- valid = ifa->ifa_valid_lft;
+ tstamp = READ_ONCE(ifa->ifa_tstamp);
+ if (!(flags & IFA_F_PERMANENT)) {
+ preferred = READ_ONCE(ifa->ifa_preferred_lft);
+ valid = READ_ONCE(ifa->ifa_valid_lft);
if (preferred != INFINITY_LIFE_TIME) {
- long tval = (jiffies - ifa->ifa_tstamp) / HZ;
+ long tval = (jiffies - tstamp) / HZ;
if (preferred > tval)
preferred -= tval;
@@ -1725,10 +1775,10 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
(ifa->ifa_proto &&
nla_put_u8(skb, IFA_PROTO, ifa->ifa_proto)) ||
- nla_put_u32(skb, IFA_FLAGS, ifa->ifa_flags) ||
+ nla_put_u32(skb, IFA_FLAGS, flags) ||
(ifa->ifa_rt_priority &&
nla_put_u32(skb, IFA_RT_PRIORITY, ifa->ifa_rt_priority)) ||
- put_cacheinfo(skb, ifa->ifa_cstamp, ifa->ifa_tstamp,
+ put_cacheinfo(skb, READ_ONCE(ifa->ifa_cstamp), tstamp,
preferred, valid))
goto nla_put_failure;
@@ -1798,15 +1848,15 @@ static int inet_valid_dump_ifaddr_req(const struct nlmsghdr *nlh,
}
static int in_dev_dump_addr(struct in_device *in_dev, struct sk_buff *skb,
- struct netlink_callback *cb, int s_ip_idx,
+ struct netlink_callback *cb, int *s_ip_idx,
struct inet_fill_args *fillargs)
{
struct in_ifaddr *ifa;
int ip_idx = 0;
int err;
- in_dev_for_each_ifa_rtnl(ifa, in_dev) {
- if (ip_idx < s_ip_idx) {
+ in_dev_for_each_ifa_rcu(ifa, in_dev) {
+ if (ip_idx < *s_ip_idx) {
ip_idx++;
continue;
}
@@ -1818,9 +1868,9 @@ static int in_dev_dump_addr(struct in_device *in_dev, struct sk_buff *skb,
ip_idx++;
}
err = 0;
-
+ ip_idx = 0;
done:
- cb->args[2] = ip_idx;
+ *s_ip_idx = ip_idx;
return err;
}
@@ -1830,7 +1880,7 @@ done:
static u32 inet_base_seq(const struct net *net)
{
u32 res = atomic_read(&net->ipv4.dev_addr_genid) +
- net->dev_base_seq;
+ READ_ONCE(net->dev_base_seq);
/* Must not return 0 (see nl_dump_check_consistent()).
* Chose a value far away from 0.
@@ -1852,75 +1902,52 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
};
struct net *net = sock_net(skb->sk);
struct net *tgt_net = net;
- int h, s_h;
- int idx, s_idx;
- int s_ip_idx;
- struct net_device *dev;
+ struct {
+ unsigned long ifindex;
+ int ip_idx;
+ } *ctx = (void *)cb->ctx;
struct in_device *in_dev;
- struct hlist_head *head;
+ struct net_device *dev;
int err = 0;
- s_h = cb->args[0];
- s_idx = idx = cb->args[1];
- s_ip_idx = cb->args[2];
-
+ rcu_read_lock();
if (cb->strict_check) {
err = inet_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net,
skb->sk, cb);
if (err < 0)
- goto put_tgt_net;
+ goto done;
- err = 0;
if (fillargs.ifindex) {
- dev = __dev_get_by_index(tgt_net, fillargs.ifindex);
+ dev = dev_get_by_index_rcu(tgt_net, fillargs.ifindex);
if (!dev) {
err = -ENODEV;
- goto put_tgt_net;
- }
-
- in_dev = __in_dev_get_rtnl(dev);
- if (in_dev) {
- err = in_dev_dump_addr(in_dev, skb, cb, s_ip_idx,
- &fillargs);
+ goto done;
}
- goto put_tgt_net;
- }
- }
-
- for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
- idx = 0;
- head = &tgt_net->dev_index_head[h];
- rcu_read_lock();
- cb->seq = inet_base_seq(tgt_net);
- hlist_for_each_entry_rcu(dev, head, index_hlist) {
- if (idx < s_idx)
- goto cont;
- if (h > s_h || idx > s_idx)
- s_ip_idx = 0;
in_dev = __in_dev_get_rcu(dev);
if (!in_dev)
- goto cont;
-
- err = in_dev_dump_addr(in_dev, skb, cb, s_ip_idx,
- &fillargs);
- if (err < 0) {
- rcu_read_unlock();
goto done;
- }
-cont:
- idx++;
+ err = in_dev_dump_addr(in_dev, skb, cb, &ctx->ip_idx,
+ &fillargs);
+ goto done;
}
- rcu_read_unlock();
}
+ cb->seq = inet_base_seq(tgt_net);
+
+ for_each_netdev_dump(tgt_net, dev, ctx->ifindex) {
+ in_dev = __in_dev_get_rcu(dev);
+ if (!in_dev)
+ continue;
+ err = in_dev_dump_addr(in_dev, skb, cb, &ctx->ip_idx,
+ &fillargs);
+ if (err < 0)
+ goto done;
+ }
done:
- cb->args[0] = h;
- cb->args[1] = idx;
-put_tgt_net:
if (fillargs.netnsid >= 0)
put_net(tgt_net);
-
- return skb->len ? : err;
+ rcu_read_unlock();
+ return err;
}
static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
@@ -1952,8 +1979,7 @@ static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
rtnl_notify(skb, net, portid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
+ rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
}
static size_t inet_get_link_af_size(const struct net_device *dev,
@@ -1982,7 +2008,7 @@ static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
return -EMSGSIZE;
for (i = 0; i < IPV4_DEVCONF_MAX; i++)
- ((u32 *) nla_data(nla))[i] = in_dev->cnf.data[i];
+ ((u32 *) nla_data(nla))[i] = READ_ONCE(in_dev->cnf.data[i]);
return 0;
}
@@ -2068,9 +2094,9 @@ static int inet_netconf_msgsize_devconf(int type)
}
static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
- struct ipv4_devconf *devconf, u32 portid,
- u32 seq, int event, unsigned int flags,
- int type)
+ const struct ipv4_devconf *devconf,
+ u32 portid, u32 seq, int event,
+ unsigned int flags, int type)
{
struct nlmsghdr *nlh;
struct netconfmsg *ncm;
@@ -2095,27 +2121,28 @@ static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
if ((all || type == NETCONFA_FORWARDING) &&
nla_put_s32(skb, NETCONFA_FORWARDING,
- IPV4_DEVCONF(*devconf, FORWARDING)) < 0)
+ IPV4_DEVCONF_RO(*devconf, FORWARDING)) < 0)
goto nla_put_failure;
if ((all || type == NETCONFA_RP_FILTER) &&
nla_put_s32(skb, NETCONFA_RP_FILTER,
- IPV4_DEVCONF(*devconf, RP_FILTER)) < 0)
+ IPV4_DEVCONF_RO(*devconf, RP_FILTER)) < 0)
goto nla_put_failure;
if ((all || type == NETCONFA_MC_FORWARDING) &&
nla_put_s32(skb, NETCONFA_MC_FORWARDING,
- IPV4_DEVCONF(*devconf, MC_FORWARDING)) < 0)
+ IPV4_DEVCONF_RO(*devconf, MC_FORWARDING)) < 0)
goto nla_put_failure;
if ((all || type == NETCONFA_BC_FORWARDING) &&
nla_put_s32(skb, NETCONFA_BC_FORWARDING,
- IPV4_DEVCONF(*devconf, BC_FORWARDING)) < 0)
+ IPV4_DEVCONF_RO(*devconf, BC_FORWARDING)) < 0)
goto nla_put_failure;
if ((all || type == NETCONFA_PROXY_NEIGH) &&
nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
- IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0)
+ IPV4_DEVCONF_RO(*devconf, PROXY_ARP)) < 0)
goto nla_put_failure;
if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
- IPV4_DEVCONF(*devconf, IGNORE_ROUTES_WITH_LINKDOWN)) < 0)
+ IPV4_DEVCONF_RO(*devconf,
+ IGNORE_ROUTES_WITH_LINKDOWN)) < 0)
goto nla_put_failure;
out:
@@ -2148,8 +2175,7 @@ void inet_netconf_notify_devconf(struct net *net, int event, int type,
rtnl_notify(skb, net, 0, RTNLGRP_IPV4_NETCONF, NULL, GFP_KERNEL);
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_IPV4_NETCONF, err);
+ rtnl_set_sk_err(net, RTNLGRP_IPV4_NETCONF, err);
}
static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = {
@@ -2204,21 +2230,20 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(in_skb->sk);
- struct nlattr *tb[NETCONFA_MAX+1];
+ struct nlattr *tb[NETCONFA_MAX + 1];
+ const struct ipv4_devconf *devconf;
+ struct in_device *in_dev = NULL;
+ struct net_device *dev = NULL;
struct sk_buff *skb;
- struct ipv4_devconf *devconf;
- struct in_device *in_dev;
- struct net_device *dev;
int ifindex;
int err;
err = inet_netconf_valid_get_req(in_skb, nlh, tb, extack);
if (err)
- goto errout;
+ return err;
- err = -EINVAL;
if (!tb[NETCONFA_IFINDEX])
- goto errout;
+ return -EINVAL;
ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
switch (ifindex) {
@@ -2229,10 +2254,10 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
devconf = net->ipv4.devconf_dflt;
break;
default:
- dev = __dev_get_by_index(net, ifindex);
- if (!dev)
- goto errout;
- in_dev = __in_dev_get_rtnl(dev);
+ err = -ENODEV;
+ dev = dev_get_by_index(net, ifindex);
+ if (dev)
+ in_dev = in_dev_get(dev);
if (!in_dev)
goto errout;
devconf = &in_dev->cnf;
@@ -2256,6 +2281,9 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
}
err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
errout:
+ if (in_dev)
+ in_dev_put(in_dev);
+ dev_put(dev);
return err;
}
@@ -2264,11 +2292,13 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
{
const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
- int h, s_h;
- int idx, s_idx;
+ struct {
+ unsigned long ifindex;
+ unsigned int all_default;
+ } *ctx = (void *)cb->ctx;
+ const struct in_device *in_dev;
struct net_device *dev;
- struct in_device *in_dev;
- struct hlist_head *head;
+ int err = 0;
if (cb->strict_check) {
struct netlink_ext_ack *extack = cb->extack;
@@ -2285,64 +2315,45 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
}
}
- s_h = cb->args[0];
- s_idx = idx = cb->args[1];
-
- for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
- idx = 0;
- head = &net->dev_index_head[h];
- rcu_read_lock();
- cb->seq = inet_base_seq(net);
- hlist_for_each_entry_rcu(dev, head, index_hlist) {
- if (idx < s_idx)
- goto cont;
- in_dev = __in_dev_get_rcu(dev);
- if (!in_dev)
- goto cont;
-
- if (inet_netconf_fill_devconf(skb, dev->ifindex,
- &in_dev->cnf,
- NETLINK_CB(cb->skb).portid,
- nlh->nlmsg_seq,
- RTM_NEWNETCONF,
- NLM_F_MULTI,
- NETCONFA_ALL) < 0) {
- rcu_read_unlock();
- goto done;
- }
- nl_dump_check_consistent(cb, nlmsg_hdr(skb));
-cont:
- idx++;
- }
- rcu_read_unlock();
+ rcu_read_lock();
+ for_each_netdev_dump(net, dev, ctx->ifindex) {
+ in_dev = __in_dev_get_rcu(dev);
+ if (!in_dev)
+ continue;
+ err = inet_netconf_fill_devconf(skb, dev->ifindex,
+ &in_dev->cnf,
+ NETLINK_CB(cb->skb).portid,
+ nlh->nlmsg_seq,
+ RTM_NEWNETCONF, NLM_F_MULTI,
+ NETCONFA_ALL);
+ if (err < 0)
+ goto done;
}
- if (h == NETDEV_HASHENTRIES) {
- if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
- net->ipv4.devconf_all,
- NETLINK_CB(cb->skb).portid,
- nlh->nlmsg_seq,
- RTM_NEWNETCONF, NLM_F_MULTI,
- NETCONFA_ALL) < 0)
+ if (ctx->all_default == 0) {
+ err = inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
+ net->ipv4.devconf_all,
+ NETLINK_CB(cb->skb).portid,
+ nlh->nlmsg_seq,
+ RTM_NEWNETCONF, NLM_F_MULTI,
+ NETCONFA_ALL);
+ if (err < 0)
goto done;
- else
- h++;
- }
- if (h == NETDEV_HASHENTRIES + 1) {
- if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
- net->ipv4.devconf_dflt,
- NETLINK_CB(cb->skb).portid,
- nlh->nlmsg_seq,
- RTM_NEWNETCONF, NLM_F_MULTI,
- NETCONFA_ALL) < 0)
+ ctx->all_default++;
+ }
+ if (ctx->all_default == 1) {
+ err = inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
+ net->ipv4.devconf_dflt,
+ NETLINK_CB(cb->skb).portid,
+ nlh->nlmsg_seq,
+ RTM_NEWNETCONF, NLM_F_MULTI,
+ NETCONFA_ALL);
+ if (err < 0)
goto done;
- else
- h++;
+ ctx->all_default++;
}
done:
- cb->args[0] = h;
- cb->args[1] = idx;
-
- return skb->len;
+ rcu_read_unlock();
+ return err;
}
#ifdef CONFIG_SYSCTL
@@ -2385,7 +2396,7 @@ static void inet_forward_change(struct net *net)
if (on)
dev_disable_lro(dev);
- in_dev = __in_dev_get_rtnl(dev);
+ in_dev = __in_dev_get_rtnl_net(dev);
if (in_dev) {
IN_DEV_CONF_SET(in_dev, FORWARDING, on);
inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
@@ -2408,7 +2419,7 @@ static int devinet_conf_ifindex(struct net *net, struct ipv4_devconf *cnf)
}
}
-static int devinet_conf_proc(struct ctl_table *ctl, int write,
+static int devinet_conf_proc(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int old_value = *(int *)ctl->data;
@@ -2460,7 +2471,7 @@ static int devinet_conf_proc(struct ctl_table *ctl, int write,
return ret;
}
-static int devinet_sysctl_forward(struct ctl_table *ctl, int write,
+static int devinet_sysctl_forward(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = ctl->data;
@@ -2476,7 +2487,7 @@ static int devinet_sysctl_forward(struct ctl_table *ctl, int write,
if (write && *valp != val) {
if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) {
- if (!rtnl_trylock()) {
+ if (!rtnl_net_trylock(net)) {
/* Restore the original values before restarting */
*valp = val;
*ppos = pos;
@@ -2495,7 +2506,7 @@ static int devinet_sysctl_forward(struct ctl_table *ctl, int write,
idev->dev->ifindex,
cnf);
}
- rtnl_unlock();
+ rtnl_net_unlock(net);
rt_cache_flush(net);
} else
inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
@@ -2507,7 +2518,7 @@ static int devinet_sysctl_forward(struct ctl_table *ctl, int write,
return ret;
}
-static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
+static int ipv4_doint_and_flush(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = ctl->data;
@@ -2546,7 +2557,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
static struct devinet_sysctl_table {
struct ctl_table_header *sysctl_header;
- struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
+ struct ctl_table devinet_vars[IPV4_DEVCONF_MAX];
} devinet_sysctl = {
.devinet_vars = {
DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING, "forwarding",
@@ -2609,7 +2620,7 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name,
if (!t)
goto out;
- for (i = 0; i < ARRAY_SIZE(t->devinet_vars) - 1; i++) {
+ for (i = 0; i < ARRAY_SIZE(t->devinet_vars); i++) {
t->devinet_vars[i].data += (char *)p - (char *)&ipv4_devconf;
t->devinet_vars[i].extra1 = p;
t->devinet_vars[i].extra2 = net;
@@ -2683,20 +2694,26 @@ static struct ctl_table ctl_forward_entry[] = {
.extra1 = &ipv4_devconf,
.extra2 = &init_net,
},
- { },
};
#endif
static __net_init int devinet_init_net(struct net *net)
{
- int err;
- struct ipv4_devconf *all, *dflt;
#ifdef CONFIG_SYSCTL
- struct ctl_table *tbl;
struct ctl_table_header *forw_hdr;
+ struct ctl_table *tbl;
#endif
+ struct ipv4_devconf *all, *dflt;
+ int err;
+ int i;
err = -ENOMEM;
+ net->ipv4.inet_addr_lst = kmalloc_array(IN4_ADDR_HSIZE,
+ sizeof(struct hlist_head),
+ GFP_KERNEL);
+ if (!net->ipv4.inet_addr_lst)
+ goto err_alloc_hash;
+
all = kmemdup(&ipv4_devconf, sizeof(ipv4_devconf), GFP_KERNEL);
if (!all)
goto err_alloc_all;
@@ -2757,6 +2774,11 @@ static __net_init int devinet_init_net(struct net *net)
net->ipv4.forw_hdr = forw_hdr;
#endif
+ for (i = 0; i < IN4_ADDR_HSIZE; i++)
+ INIT_HLIST_HEAD(&net->ipv4.inet_addr_lst[i]);
+
+ INIT_DEFERRABLE_WORK(&net->ipv4.addr_chk_work, check_lifetime);
+
net->ipv4.devconf_all = all;
net->ipv4.devconf_dflt = dflt;
return 0;
@@ -2774,14 +2796,20 @@ err_alloc_ctl:
err_alloc_dflt:
kfree(all);
err_alloc_all:
+ kfree(net->ipv4.inet_addr_lst);
+err_alloc_hash:
return err;
}
static __net_exit void devinet_exit_net(struct net *net)
{
#ifdef CONFIG_SYSCTL
- struct ctl_table *tbl;
+ const struct ctl_table *tbl;
+#endif
+
+ cancel_delayed_work_sync(&net->ipv4.addr_chk_work);
+#ifdef CONFIG_SYSCTL
tbl = net->ipv4.forw_hdr->ctl_table_arg;
unregister_net_sysctl_table(net->ipv4.forw_hdr);
__devinet_sysctl_unregister(net, net->ipv4.devconf_dflt,
@@ -2792,6 +2820,7 @@ static __net_exit void devinet_exit_net(struct net *net)
#endif
kfree(net->ipv4.devconf_dflt);
kfree(net->ipv4.devconf_all);
+ kfree(net->ipv4.inet_addr_lst);
}
static __net_initdata struct pernet_operations devinet_ops = {
@@ -2807,23 +2836,25 @@ static struct rtnl_af_ops inet_af_ops __read_mostly = {
.set_link_af = inet_set_link_af,
};
+static const struct rtnl_msg_handler devinet_rtnl_msg_handlers[] __initconst = {
+ {.protocol = PF_INET, .msgtype = RTM_NEWADDR, .doit = inet_rtm_newaddr,
+ .flags = RTNL_FLAG_DOIT_PERNET},
+ {.protocol = PF_INET, .msgtype = RTM_DELADDR, .doit = inet_rtm_deladdr,
+ .flags = RTNL_FLAG_DOIT_PERNET},
+ {.protocol = PF_INET, .msgtype = RTM_GETADDR, .dumpit = inet_dump_ifaddr,
+ .flags = RTNL_FLAG_DUMP_UNLOCKED | RTNL_FLAG_DUMP_SPLIT_NLM_DONE},
+ {.protocol = PF_INET, .msgtype = RTM_GETNETCONF,
+ .doit = inet_netconf_get_devconf, .dumpit = inet_netconf_dump_devconf,
+ .flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED},
+};
+
void __init devinet_init(void)
{
- int i;
-
- for (i = 0; i < IN4_ADDR_HSIZE; i++)
- INIT_HLIST_HEAD(&inet_addr_lst[i]);
-
register_pernet_subsys(&devinet_ops);
register_netdevice_notifier(&ip_netdev_notifier);
- queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
-
- rtnl_af_register(&inet_af_ops);
+ if (rtnl_af_register(&inet_af_ops))
+ panic("Unable to register inet_af_ops\n");
- rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, 0);
- rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, 0);
- rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, 0);
- rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
- inet_netconf_dump_devconf, 0);
+ rtnl_register_many(devinet_rtnl_msg_handlers);
}
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 4dd9e5040672..0e4076866c0a 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -20,6 +20,7 @@
#include <net/udp.h>
#include <net/tcp.h>
#include <net/espintcp.h>
+#include <linux/skbuff_ref.h>
#include <linux/highmem.h>
@@ -95,7 +96,7 @@ static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
__alignof__(struct scatterlist));
}
-static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
+static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb)
{
struct crypto_aead *aead = x->data;
int extralen = 0;
@@ -114,7 +115,8 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
*/
if (req->src != req->dst)
for (sg = sg_next(req->src); sg; sg = sg_next(sg))
- put_page(sg_page(sg));
+ skb_page_unref(page_to_netmem(sg_page(sg)),
+ skb->pp_recycle);
}
#ifdef CONFIG_INET_ESPINTCP
@@ -238,8 +240,7 @@ static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
#else
static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
{
- kfree_skb(skb);
-
+ WARN_ON(1);
return -EOPNOTSUPP;
}
#endif
@@ -260,7 +261,7 @@ static void esp_output_done(void *data, int err)
}
tmp = ESP_SKB_CB(skb)->tmp;
- esp_ssg_unref(x, tmp);
+ esp_ssg_unref(x, tmp, skb);
kfree(tmp);
if (xo && (xo->flags & XFRM_DEV_RESUME)) {
@@ -278,7 +279,7 @@ static void esp_output_done(void *data, int err)
x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
esp_output_tail_tcp(x, skb);
else
- xfrm_output_resume(skb->sk, skb, err);
+ xfrm_output_resume(skb_to_full_sk(skb), skb, err);
}
}
@@ -347,8 +348,8 @@ static struct ip_esp_hdr *esp_output_udp_encap(struct sk_buff *skb,
__be16 dport)
{
struct udphdr *uh;
- __be32 *udpdata32;
unsigned int len;
+ struct xfrm_offload *xo = xfrm_offload(skb);
len = skb->len + esp->tailen - skb_transport_offset(skb);
if (len + sizeof(struct iphdr) > IP_MAX_MTU)
@@ -360,13 +361,12 @@ static struct ip_esp_hdr *esp_output_udp_encap(struct sk_buff *skb,
uh->len = htons(len);
uh->check = 0;
- *skb_mac_header(skb) = IPPROTO_UDP;
-
- if (encap_type == UDP_ENCAP_ESPINUDP_NON_IKE) {
- udpdata32 = (__be32 *)(uh + 1);
- udpdata32[0] = udpdata32[1] = 0;
- return (struct ip_esp_hdr *)(udpdata32 + 2);
- }
+ /* For IPv4 ESP with UDP encapsulation, if xo is not null, the skb is in the crypto offload
+ * data path, which means that esp_output_udp_encap is called outside of the XFRM stack.
+ * In this case, the mac header doesn't point to the IPv4 protocol field, so don't set it.
+ */
+ if (!xo || encap_type != UDP_ENCAP_ESPINUDP)
+ *skb_mac_header(skb) = IPPROTO_UDP;
return (struct ip_esp_hdr *)(uh + 1);
}
@@ -423,7 +423,6 @@ static int esp_output_encap(struct xfrm_state *x, struct sk_buff *skb,
switch (encap_type) {
default:
case UDP_ENCAP_ESPINUDP:
- case UDP_ENCAP_ESPINUDP_NON_IKE:
esph = esp_output_udp_encap(skb, encap_type, esp, sport, dport);
break;
case TCP_ENCAP_ESPINTCP:
@@ -639,7 +638,7 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
}
if (sg != dsg)
- esp_ssg_unref(x, tmp);
+ esp_ssg_unref(x, tmp, skb);
if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
err = esp_output_tail_tcp(x, skb);
@@ -775,7 +774,6 @@ int esp_input_done2(struct sk_buff *skb, int err)
source = th->source;
break;
case UDP_ENCAP_ESPINUDP:
- case UDP_ENCAP_ESPINUDP_NON_IKE:
source = uh->source;
break;
default:
@@ -818,7 +816,8 @@ int esp_input_done2(struct sk_buff *skb, int err)
}
skb_pull_rcsum(skb, hlen);
- if (x->props.mode == XFRM_MODE_TUNNEL)
+ if (x->props.mode == XFRM_MODE_TUNNEL ||
+ x->props.mode == XFRM_MODE_IPTFS)
skb_reset_transport_header(skb);
else
skb_set_transport_header(skb, -ihl);
@@ -1179,9 +1178,6 @@ static int esp_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
case UDP_ENCAP_ESPINUDP:
x->props.header_len += sizeof(struct udphdr);
break;
- case UDP_ENCAP_ESPINUDP_NON_IKE:
- x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
- break;
#ifdef CONFIG_INET_ESPINTCP
case TCP_ENCAP_ESPINTCP:
/* only the length field, TCP encap is done by
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index b3271957ad9a..e0d94270da28 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -53,9 +53,16 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
if (sp->len == XFRM_MAX_DEPTH)
goto out_reset;
- x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
- (xfrm_address_t *)&ip_hdr(skb)->daddr,
- spi, IPPROTO_ESP, AF_INET);
+ x = xfrm_input_state_lookup(dev_net(skb->dev), skb->mark,
+ (xfrm_address_t *)&ip_hdr(skb)->daddr,
+ spi, IPPROTO_ESP, AF_INET);
+
+ if (unlikely(x && x->dir && x->dir != XFRM_SA_DIR_IN)) {
+ /* non-offload path will record the error and audit log */
+ xfrm_state_put(x);
+ x = NULL;
+ }
+
if (!x)
goto out_reset;
@@ -264,6 +271,7 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
struct esp_info esp;
bool hw_offload = true;
__u32 seq;
+ int encap_type = 0;
esp.inplace = true;
@@ -296,8 +304,10 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
esp.esph = ip_esp_hdr(skb);
+ if (x->encap)
+ encap_type = x->encap->encap_type;
- if (!hw_offload || !skb_is_gso(skb)) {
+ if (!hw_offload || !skb_is_gso(skb) || (hw_offload && encap_type == UDP_ENCAP_ESPINUDP)) {
esp.nfrags = esp_output_head(x, skb, &esp);
if (esp.nfrags < 0)
return esp.nfrags;
@@ -324,6 +334,18 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
+ if (hw_offload && encap_type == UDP_ENCAP_ESPINUDP) {
+ /* In the XFRM stack, the encapsulation protocol is set to iphdr->protocol by
+ * setting *skb_mac_header(skb) (see esp_output_udp_encap()) where skb->mac_header
+ * points to iphdr->protocol (see xfrm4_tunnel_encap_add()).
+ * However, in esp_xmit(), skb->mac_header doesn't point to iphdr->protocol.
+ * Therefore, the protocol field needs to be corrected.
+ */
+ ip_hdr(skb)->protocol = IPPROTO_UDP;
+
+ esph->seq_no = htonl(seq);
+ }
+
ip_hdr(skb)->tot_len = htons(skb->len);
ip_send_check(ip_hdr(skb));
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 390f4be7f7be..272e42d81323 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -293,7 +293,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
.flowi4_iif = LOOPBACK_IFINDEX,
.flowi4_l3mdev = l3mdev_master_ifindex_rcu(dev),
.daddr = ip_hdr(skb)->saddr,
- .flowi4_tos = ip_hdr(skb)->tos & IPTOS_RT_MASK,
+ .flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip_hdr(skb))),
.flowi4_scope = scope,
.flowi4_mark = vmark ? skb->mark : 0,
};
@@ -342,10 +342,11 @@ EXPORT_SYMBOL_GPL(fib_info_nh_uses_dev);
* called with rcu_read_lock()
*/
static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
- u8 tos, int oif, struct net_device *dev,
+ dscp_t dscp, int oif, struct net_device *dev,
int rpf, struct in_device *idev, u32 *itag)
{
struct net *net = dev_net(dev);
+ enum skb_drop_reason reason;
struct flow_keys flkeys;
int ret, no_addr;
struct fib_result res;
@@ -357,7 +358,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
fl4.flowi4_iif = oif ? : LOOPBACK_IFINDEX;
fl4.daddr = src;
fl4.saddr = dst;
- fl4.flowi4_tos = tos;
+ fl4.flowi4_tos = inet_dscp_to_dsfield(dscp);
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
fl4.flowi4_tun_key.tun_id = 0;
fl4.flowi4_flags = 0;
@@ -377,9 +378,15 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
if (fib_lookup(net, &fl4, &res, 0))
goto last_resort;
- if (res.type != RTN_UNICAST &&
- (res.type != RTN_LOCAL || !IN_DEV_ACCEPT_LOCAL(idev)))
- goto e_inval;
+ if (res.type != RTN_UNICAST) {
+ if (res.type != RTN_LOCAL) {
+ reason = SKB_DROP_REASON_IP_INVALID_SOURCE;
+ goto e_inval;
+ } else if (!IN_DEV_ACCEPT_LOCAL(idev)) {
+ reason = SKB_DROP_REASON_IP_LOCAL_SOURCE;
+ goto e_inval;
+ }
+ }
fib_combine_itag(itag, &res);
dev_match = fib_info_nh_uses_dev(res.fi, dev);
@@ -412,14 +419,14 @@ last_resort:
return 0;
e_inval:
- return -EINVAL;
+ return -reason;
e_rpf:
- return -EXDEV;
+ return -SKB_DROP_REASON_IP_RPFILTER;
}
/* Ignore rp_filter for packets protected by IPsec. */
int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
- u8 tos, int oif, struct net_device *dev,
+ dscp_t dscp, int oif, struct net_device *dev,
struct in_device *idev, u32 *itag)
{
int r = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(idev);
@@ -440,7 +447,7 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
* and the same host but different containers are not.
*/
if (inet_lookup_ifaddr_rcu(net, src))
- return -EINVAL;
+ return -SKB_DROP_REASON_IP_LOCAL_SOURCE;
ok:
*itag = 0;
@@ -448,7 +455,8 @@ ok:
}
full_check:
- return __fib_validate_source(skb, src, dst, tos, oif, dev, r, idev, itag);
+ return __fib_validate_source(skb, src, dst, dscp, oif, dev, r, idev,
+ itag);
}
static inline __be32 sk_extract_addr(struct sockaddr *addr)
@@ -916,7 +924,8 @@ int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
struct rtmsg *rtm;
int err, i;
- ASSERT_RTNL();
+ if (filter->rtnl_held)
+ ASSERT_RTNL();
if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
NL_SET_ERR_MSG(extack, "Invalid header for FIB dump request");
@@ -961,7 +970,10 @@ int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
break;
case RTA_OIF:
ifindex = nla_get_u32(tb[i]);
- filter->dev = __dev_get_by_index(net, ifindex);
+ if (filter->rtnl_held)
+ filter->dev = __dev_get_by_index(net, ifindex);
+ else
+ filter->dev = dev_get_by_index_rcu(net, ifindex);
if (!filter->dev)
return -ENODEV;
break;
@@ -983,20 +995,24 @@ EXPORT_SYMBOL_GPL(ip_valid_fib_dump_req);
static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct fib_dump_filter filter = { .dump_routes = true,
- .dump_exceptions = true };
+ struct fib_dump_filter filter = {
+ .dump_routes = true,
+ .dump_exceptions = true,
+ .rtnl_held = false,
+ };
const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
unsigned int h, s_h;
unsigned int e = 0, s_e;
struct fib_table *tb;
struct hlist_head *head;
- int dumped = 0, err;
+ int dumped = 0, err = 0;
+ rcu_read_lock();
if (cb->strict_check) {
err = ip_valid_fib_dump_req(net, nlh, &filter, cb);
if (err < 0)
- return err;
+ goto unlock;
} else if (nlmsg_len(nlh) >= sizeof(struct rtmsg)) {
struct rtmsg *rtm = nlmsg_data(nlh);
@@ -1005,29 +1021,26 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
/* ipv4 does not use prefix flag */
if (filter.flags & RTM_F_PREFIX)
- return skb->len;
+ goto unlock;
if (filter.table_id) {
tb = fib_get_table(net, filter.table_id);
if (!tb) {
if (rtnl_msg_family(cb->nlh) != PF_INET)
- return skb->len;
+ goto unlock;
NL_SET_ERR_MSG(cb->extack, "ipv4: FIB table does not exist");
- return -ENOENT;
+ err = -ENOENT;
+ goto unlock;
}
-
- rcu_read_lock();
err = fib_table_dump(tb, skb, cb, &filter);
- rcu_read_unlock();
- return skb->len ? : err;
+ goto unlock;
}
s_h = cb->args[0];
s_e = cb->args[1];
- rcu_read_lock();
-
+ err = 0;
for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
e = 0;
head = &net->ipv4.fib_table_hash[h];
@@ -1038,25 +1051,20 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
memset(&cb->args[2], 0, sizeof(cb->args) -
2 * sizeof(cb->args[0]));
err = fib_table_dump(tb, skb, cb, &filter);
- if (err < 0) {
- if (likely(skb->len))
- goto out;
-
- goto out_err;
- }
+ if (err < 0)
+ goto out;
dumped = 1;
next:
e++;
}
}
out:
- err = skb->len;
-out_err:
- rcu_read_unlock();
cb->args[1] = e;
cb->args[0] = h;
+unlock:
+ rcu_read_unlock();
return err;
}
@@ -1343,7 +1351,7 @@ static void nl_fib_lookup(struct net *net, struct fib_result_nl *frn)
struct flowi4 fl4 = {
.flowi4_mark = frn->fl_mark,
.daddr = frn->fl_addr,
- .flowi4_tos = frn->fl_tos,
+ .flowi4_tos = frn->fl_tos & INET_DSCP_MASK,
.flowi4_scope = frn->fl_scope,
};
struct fib_table *tb;
@@ -1648,6 +1656,15 @@ static struct pernet_operations fib_net_ops = {
.exit_batch = fib_net_exit_batch,
};
+static const struct rtnl_msg_handler fib_rtnl_msg_handlers[] __initconst = {
+ {.protocol = PF_INET, .msgtype = RTM_NEWROUTE,
+ .doit = inet_rtm_newroute},
+ {.protocol = PF_INET, .msgtype = RTM_DELROUTE,
+ .doit = inet_rtm_delroute},
+ {.protocol = PF_INET, .msgtype = RTM_GETROUTE, .dumpit = inet_dump_fib,
+ .flags = RTNL_FLAG_DUMP_UNLOCKED | RTNL_FLAG_DUMP_SPLIT_NLM_DONE},
+};
+
void __init ip_fib_init(void)
{
fib_trie_init();
@@ -1657,7 +1674,5 @@ void __init ip_fib_init(void)
register_netdevice_notifier(&fib_netdev_notifier);
register_inetaddr_notifier(&fib_inetaddr_notifier);
- rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, 0);
- rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, 0);
- rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, 0);
+ rtnl_register_many(fib_rtnl_msg_handlers);
}
diff --git a/net/ipv4/fib_notifier.c b/net/ipv4/fib_notifier.c
index 0e23ade74493..b1551c26554b 100644
--- a/net/ipv4/fib_notifier.c
+++ b/net/ipv4/fib_notifier.c
@@ -22,15 +22,15 @@ int call_fib4_notifiers(struct net *net, enum fib_event_type event_type,
ASSERT_RTNL();
info->family = AF_INET;
- net->ipv4.fib_seq++;
+ /* Paired with READ_ONCE() in fib4_seq_read() */
+ WRITE_ONCE(net->ipv4.fib_seq, net->ipv4.fib_seq + 1);
return call_fib_notifiers(net, event_type, info);
}
-static unsigned int fib4_seq_read(struct net *net)
+static unsigned int fib4_seq_read(const struct net *net)
{
- ASSERT_RTNL();
-
- return net->ipv4.fib_seq + fib4_rules_seq_read(net);
+ /* Paired with WRITE_ONCE() in call_fib4_notifiers() */
+ return READ_ONCE(net->ipv4.fib_seq) + fib4_rules_seq_read(net);
}
static int fib4_dump(struct net *net, struct notifier_block *nb,
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 5bdd1c016009..9517b8667e00 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -37,6 +37,7 @@ struct fib4_rule {
u8 dst_len;
u8 src_len;
dscp_t dscp;
+ u8 dscp_full:1; /* DSCP or TOS selector */
__be32 src;
__be32 srcmask;
__be32 dst;
@@ -73,7 +74,7 @@ int fib4_rules_dump(struct net *net, struct notifier_block *nb,
return fib_rules_dump(net, nb, AF_INET, extack);
}
-unsigned int fib4_rules_seq_read(struct net *net)
+unsigned int fib4_rules_seq_read(const struct net *net)
{
return fib_rules_seq_read(net, AF_INET);
}
@@ -186,7 +187,15 @@ INDIRECT_CALLABLE_SCOPE int fib4_rule_match(struct fib_rule *rule,
((daddr ^ r->dst) & r->dstmask))
return 0;
- if (r->dscp && r->dscp != inet_dsfield_to_dscp(fl4->flowi4_tos))
+ /* When DSCP selector is used we need to match on the entire DSCP field
+ * in the flow information structure. When TOS selector is used we need
+ * to mask the upper three DSCP bits prior to matching to maintain
+ * legacy behavior.
+ */
+ if (r->dscp_full && r->dscp != inet_dsfield_to_dscp(fl4->flowi4_tos))
+ return 0;
+ else if (!r->dscp_full && r->dscp &&
+ !fib_dscp_masked_match(r->dscp, fl4))
return 0;
if (rule->ip_proto && (rule->ip_proto != fl4->flowi4_proto))
@@ -217,6 +226,20 @@ static struct fib_table *fib_empty_table(struct net *net)
return NULL;
}
+static int fib4_nl2rule_dscp(const struct nlattr *nla, struct fib4_rule *rule4,
+ struct netlink_ext_ack *extack)
+{
+ if (rule4->dscp) {
+ NL_SET_ERR_MSG(extack, "Cannot specify both TOS and DSCP");
+ return -EINVAL;
+ }
+
+ rule4->dscp = inet_dsfield_to_dscp(nla_get_u8(nla) << 2);
+ rule4->dscp_full = true;
+
+ return 0;
+}
+
static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
struct fib_rule_hdr *frh,
struct nlattr **tb,
@@ -226,6 +249,12 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
int err = -EINVAL;
struct fib4_rule *rule4 = (struct fib4_rule *) rule;
+ if (tb[FRA_FLOWLABEL] || tb[FRA_FLOWLABEL_MASK]) {
+ NL_SET_ERR_MSG(extack,
+ "Flow label cannot be specified for IPv4 FIB rules");
+ goto errout;
+ }
+
if (!inet_validate_dscp(frh->tos)) {
NL_SET_ERR_MSG(extack,
"Invalid dsfield (tos): ECN bits must be 0");
@@ -238,6 +267,10 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
}
rule4->dscp = inet_dsfield_to_dscp(frh->tos);
+ if (tb[FRA_DSCP] &&
+ fib4_nl2rule_dscp(tb[FRA_DSCP], rule4, extack) < 0)
+ goto errout;
+
/* split local/main if they are not already split */
err = fib_unmerge(net);
if (err)
@@ -320,9 +353,19 @@ static int fib4_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
if (frh->dst_len && (rule4->dst_len != frh->dst_len))
return 0;
- if (frh->tos && inet_dscp_to_dsfield(rule4->dscp) != frh->tos)
+ if (frh->tos &&
+ (rule4->dscp_full ||
+ inet_dscp_to_dsfield(rule4->dscp) != frh->tos))
return 0;
+ if (tb[FRA_DSCP]) {
+ dscp_t dscp;
+
+ dscp = inet_dsfield_to_dscp(nla_get_u8(tb[FRA_DSCP]) << 2);
+ if (!rule4->dscp_full || rule4->dscp != dscp)
+ return 0;
+ }
+
#ifdef CONFIG_IP_ROUTE_CLASSID
if (tb[FRA_FLOW] && (rule4->tclassid != nla_get_u32(tb[FRA_FLOW])))
return 0;
@@ -344,7 +387,15 @@ static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
frh->dst_len = rule4->dst_len;
frh->src_len = rule4->src_len;
- frh->tos = inet_dscp_to_dsfield(rule4->dscp);
+
+ if (rule4->dscp_full) {
+ frh->tos = 0;
+ if (nla_put_u8(skb, FRA_DSCP,
+ inet_dscp_to_dsfield(rule4->dscp) >> 2))
+ goto nla_put_failure;
+ } else {
+ frh->tos = inet_dscp_to_dsfield(rule4->dscp);
+ }
if ((rule4->dst_len &&
nla_put_in_addr(skb, FRA_DST, rule4->dst)) ||
@@ -366,7 +417,8 @@ static size_t fib4_rule_nlmsg_payload(struct fib_rule *rule)
{
return nla_total_size(4) /* dst */
+ nla_total_size(4) /* src */
- + nla_total_size(4); /* flow */
+ + nla_total_size(4) /* flow */
+ + nla_total_size(1); /* dscp */
}
static void fib4_rule_flush_cache(struct fib_rules_ops *ops)
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 5eb1b8d302bb..d2cee5c314f5 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -50,17 +50,12 @@
#include "fib_lookup.h"
-static DEFINE_SPINLOCK(fib_info_lock);
static struct hlist_head *fib_info_hash;
static struct hlist_head *fib_info_laddrhash;
static unsigned int fib_info_hash_size;
static unsigned int fib_info_hash_bits;
static unsigned int fib_info_cnt;
-#define DEVINDEX_HASHBITS 8
-#define DEVINDEX_HASHSIZE (1U << DEVINDEX_HASHBITS)
-static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE];
-
/* for_nexthops and change_nexthops only used when nexthop object
* is not set in a fib_info. The logic within can reference fib_nh.
*/
@@ -254,18 +249,17 @@ void free_fib_info(struct fib_info *fi)
return;
}
- call_rcu(&fi->rcu, free_fib_info_rcu);
+ call_rcu_hurry(&fi->rcu, free_fib_info_rcu);
}
EXPORT_SYMBOL_GPL(free_fib_info);
void fib_release_info(struct fib_info *fi)
{
- spin_lock_bh(&fib_info_lock);
+ ASSERT_RTNL();
if (fi && refcount_dec_and_test(&fi->fib_treeref)) {
hlist_del(&fi->fib_hash);
- /* Paired with READ_ONCE() in fib_create_info(). */
- WRITE_ONCE(fib_info_cnt, fib_info_cnt - 1);
+ fib_info_cnt--;
if (fi->fib_prefsrc)
hlist_del(&fi->fib_lhash);
@@ -275,14 +269,13 @@ void fib_release_info(struct fib_info *fi)
change_nexthops(fi) {
if (!nexthop_nh->fib_nh_dev)
continue;
- hlist_del(&nexthop_nh->nh_hash);
+ hlist_del_rcu(&nexthop_nh->nh_hash);
} endfor_nexthops(fi)
}
/* Paired with READ_ONCE() from fib_table_lookup() */
WRITE_ONCE(fi->fib_dead, 1);
fib_info_put(fi);
}
- spin_unlock_bh(&fib_info_lock);
}
static inline int nh_comp(struct fib_info *fi, struct fib_info *ofi)
@@ -322,17 +315,9 @@ static inline int nh_comp(struct fib_info *fi, struct fib_info *ofi)
return 0;
}
-static inline unsigned int fib_devindex_hashfn(unsigned int val)
-{
- return hash_32(val, DEVINDEX_HASHBITS);
-}
-
-static struct hlist_head *
-fib_info_devhash_bucket(const struct net_device *dev)
+static struct hlist_head *fib_nh_head(struct net_device *dev)
{
- u32 val = net_hash_mix(dev_net(dev)) ^ dev->ifindex;
-
- return &fib_info_devhash[fib_devindex_hashfn(val)];
+ return &dev->fib_nh_head;
}
static unsigned int fib_info_hashfn_1(int init_val, u8 protocol, u8 scope,
@@ -347,11 +332,10 @@ static unsigned int fib_info_hashfn_1(int init_val, u8 protocol, u8 scope,
return val;
}
-static unsigned int fib_info_hashfn_result(unsigned int val)
+static unsigned int fib_info_hashfn_result(const struct net *net,
+ unsigned int val)
{
- unsigned int mask = (fib_info_hash_size - 1);
-
- return (val ^ (val >> 7) ^ (val >> 12)) & mask;
+ return hash_32(val ^ net_hash_mix(net), fib_info_hash_bits);
}
static inline unsigned int fib_info_hashfn(struct fib_info *fi)
@@ -363,14 +347,14 @@ static inline unsigned int fib_info_hashfn(struct fib_info *fi)
fi->fib_priority);
if (fi->nh) {
- val ^= fib_devindex_hashfn(fi->nh->id);
+ val ^= fi->nh->id;
} else {
for_nexthops(fi) {
- val ^= fib_devindex_hashfn(nh->fib_nh_oif);
+ val ^= nh->fib_nh_oif;
} endfor_nexthops(fi)
}
- return fib_info_hashfn_result(val);
+ return fib_info_hashfn_result(fi->fib_net, val);
}
/* no metrics, only nexthop id */
@@ -381,11 +365,11 @@ static struct fib_info *fib_find_info_nh(struct net *net,
struct fib_info *fi;
unsigned int hash;
- hash = fib_info_hashfn_1(fib_devindex_hashfn(cfg->fc_nh_id),
+ hash = fib_info_hashfn_1(cfg->fc_nh_id,
cfg->fc_protocol, cfg->fc_scope,
(__force u32)cfg->fc_prefsrc,
cfg->fc_priority);
- hash = fib_info_hashfn_result(hash);
+ hash = fib_info_hashfn_result(net, hash);
head = &fib_info_hash[hash];
hlist_for_each_entry(fi, head, fib_hash) {
@@ -437,28 +421,23 @@ static struct fib_info *fib_find_info(struct fib_info *nfi)
}
/* Check, that the gateway is already configured.
- * Used only by redirect accept routine.
+ * Used only by redirect accept routine, under rcu_read_lock();
*/
int ip_fib_check_default(__be32 gw, struct net_device *dev)
{
struct hlist_head *head;
struct fib_nh *nh;
- spin_lock(&fib_info_lock);
-
- head = fib_info_devhash_bucket(dev);
+ head = fib_nh_head(dev);
- hlist_for_each_entry(nh, head, nh_hash) {
- if (nh->fib_nh_dev == dev &&
- nh->fib_nh_gw4 == gw &&
+ hlist_for_each_entry_rcu(nh, head, nh_hash) {
+ DEBUG_NET_WARN_ON_ONCE(nh->fib_nh_dev != dev);
+ if (nh->fib_nh_gw4 == gw &&
!(nh->fib_nh_flags & RTNH_F_DEAD)) {
- spin_unlock(&fib_info_lock);
return 0;
}
}
- spin_unlock(&fib_info_lock);
-
return -1;
}
@@ -543,8 +522,7 @@ void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
info->nlh, GFP_KERNEL);
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(info->nl_net, RTNLGRP_IPV4_ROUTE, err);
+ rtnl_set_sk_err(info->nl_net, RTNLGRP_IPV4_ROUTE, err);
}
static int fib_detect_death(struct fib_info *fi, int order,
@@ -1030,7 +1008,7 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
bool ecn_ca = false;
nla_strscpy(tmp, nla, sizeof(tmp));
- val = tcp_ca_get_key_by_name(fi->fib_net, tmp, &ecn_ca);
+ val = tcp_ca_get_key_by_name(tmp, &ecn_ca);
} else {
if (nla_len(nla) != sizeof(u32))
return false;
@@ -1278,7 +1256,7 @@ static void fib_info_hash_move(struct hlist_head *new_info_hash,
unsigned int old_size = fib_info_hash_size;
unsigned int i;
- spin_lock_bh(&fib_info_lock);
+ ASSERT_RTNL();
old_info_hash = fib_info_hash;
old_laddrhash = fib_info_laddrhash;
fib_info_hash_size = new_size;
@@ -1315,8 +1293,6 @@ static void fib_info_hash_move(struct hlist_head *new_info_hash,
}
}
- spin_unlock_bh(&fib_info_lock);
-
kvfree(old_info_hash);
kvfree(old_laddrhash);
}
@@ -1392,6 +1368,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg,
int nhs = 1;
struct net *net = cfg->fc_nlinfo.nl_net;
+ ASSERT_RTNL();
if (cfg->fc_type > RTN_MAX)
goto err_inval;
@@ -1434,8 +1411,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg,
err = -ENOBUFS;
- /* Paired with WRITE_ONCE() in fib_release_info() */
- if (READ_ONCE(fib_info_cnt) >= fib_info_hash_size) {
+ if (fib_info_cnt >= fib_info_hash_size) {
unsigned int new_size = fib_info_hash_size << 1;
struct hlist_head *new_info_hash;
struct hlist_head *new_laddrhash;
@@ -1459,8 +1435,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg,
fi = kzalloc(struct_size(fi, fib_nh, nhs), GFP_KERNEL);
if (!fi)
goto failure;
- fi->fib_metrics = ip_fib_metrics_init(fi->fib_net, cfg->fc_mx,
- cfg->fc_mx_len, extack);
+ fi->fib_metrics = ip_fib_metrics_init(cfg->fc_mx, cfg->fc_mx_len, extack);
if (IS_ERR(fi->fib_metrics)) {
err = PTR_ERR(fi->fib_metrics);
kfree(fi);
@@ -1595,7 +1570,7 @@ link_it:
refcount_set(&fi->fib_treeref, 1);
refcount_set(&fi->fib_clntref, 1);
- spin_lock_bh(&fib_info_lock);
+
fib_info_cnt++;
hlist_add_head(&fi->fib_hash,
&fib_info_hash[fib_info_hashfn(fi)]);
@@ -1613,11 +1588,10 @@ link_it:
if (!nexthop_nh->fib_nh_dev)
continue;
- head = fib_info_devhash_bucket(nexthop_nh->fib_nh_dev);
- hlist_add_head(&nexthop_nh->nh_hash, head);
+ head = fib_nh_head(nexthop_nh->fib_nh_dev);
+ hlist_add_head_rcu(&nexthop_nh->nh_hash, head);
} endfor_nexthops(fi)
}
- spin_unlock_bh(&fib_info_lock);
return fi;
err_inval:
@@ -1967,12 +1941,12 @@ void fib_nhc_update_mtu(struct fib_nh_common *nhc, u32 new, u32 orig)
void fib_sync_mtu(struct net_device *dev, u32 orig_mtu)
{
- struct hlist_head *head = fib_info_devhash_bucket(dev);
+ struct hlist_head *head = fib_nh_head(dev);
struct fib_nh *nh;
hlist_for_each_entry(nh, head, nh_hash) {
- if (nh->fib_nh_dev == dev)
- fib_nhc_update_mtu(&nh->nh_common, dev->mtu, orig_mtu);
+ DEBUG_NET_WARN_ON_ONCE(nh->fib_nh_dev != dev);
+ fib_nhc_update_mtu(&nh->nh_common, dev->mtu, orig_mtu);
}
}
@@ -1986,7 +1960,7 @@ void fib_sync_mtu(struct net_device *dev, u32 orig_mtu)
*/
int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force)
{
- struct hlist_head *head = fib_info_devhash_bucket(dev);
+ struct hlist_head *head = fib_nh_head(dev);
struct fib_info *prev_fi = NULL;
int scope = RT_SCOPE_NOWHERE;
struct fib_nh *nh;
@@ -2000,7 +1974,8 @@ int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force)
int dead;
BUG_ON(!fi->fib_nhs);
- if (nh->fib_nh_dev != dev || fi == prev_fi)
+ DEBUG_NET_WARN_ON_ONCE(nh->fib_nh_dev != dev);
+ if (fi == prev_fi)
continue;
prev_fi = fi;
dead = 0;
@@ -2067,8 +2042,7 @@ static void fib_select_default(const struct flowi4 *flp, struct fib_result *res)
if (fa->fa_slen != slen)
continue;
- if (fa->fa_dscp &&
- fa->fa_dscp != inet_dsfield_to_dscp(flp->flowi4_tos))
+ if (fa->fa_dscp && !fib_dscp_masked_match(fa->fa_dscp, flp))
continue;
if (fa->tb_id != tb->tb_id)
continue;
@@ -2151,7 +2125,7 @@ int fib_sync_up(struct net_device *dev, unsigned char nh_flags)
}
prev_fi = NULL;
- head = fib_info_devhash_bucket(dev);
+ head = fib_nh_head(dev);
ret = 0;
hlist_for_each_entry(nh, head, nh_hash) {
@@ -2159,7 +2133,8 @@ int fib_sync_up(struct net_device *dev, unsigned char nh_flags)
int alive;
BUG_ON(!fi->fib_nhs);
- if (nh->fib_nh_dev != dev || fi == prev_fi)
+ DEBUG_NET_WARN_ON_ONCE(nh->fib_nh_dev != dev);
+ if (fi == prev_fi)
continue;
prev_fi = fi;
@@ -2270,6 +2245,15 @@ void fib_select_path(struct net *net, struct fib_result *res,
fib_select_default(fl4, res);
check_saddr:
- if (!fl4->saddr)
- fl4->saddr = fib_result_prefsrc(net, res);
+ if (!fl4->saddr) {
+ struct net_device *l3mdev;
+
+ l3mdev = dev_get_by_index_rcu(net, fl4->flowi4_l3mdev);
+
+ if (!l3mdev ||
+ l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) == l3mdev)
+ fl4->saddr = fib_result_prefsrc(net, res);
+ else
+ fl4->saddr = inet_select_addr(l3mdev, 0, RT_SCOPE_LINK);
+ }
}
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 3ff35f811765..d6411ac81096 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -292,15 +292,9 @@ static const int inflate_threshold = 50;
static const int halve_threshold_root = 15;
static const int inflate_threshold_root = 30;
-static void __alias_free_mem(struct rcu_head *head)
-{
- struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
- kmem_cache_free(fn_alias_kmem, fa);
-}
-
static inline void alias_free_mem_rcu(struct fib_alias *fa)
{
- call_rcu(&fa->rcu, __alias_free_mem);
+ kfree_rcu(fa, rcu);
}
#define TNODE_VMALLOC_MAX \
@@ -501,7 +495,7 @@ static void tnode_free(struct key_vector *tn)
if (tnode_free_size >= READ_ONCE(sysctl_fib_sync_mem)) {
tnode_free_size = 0;
- synchronize_rcu();
+ synchronize_net();
}
}
@@ -1580,8 +1574,7 @@ found:
if (index >= (1ul << fa->fa_slen))
continue;
}
- if (fa->fa_dscp &&
- inet_dscp_to_dsfield(fa->fa_dscp) != flp->flowi4_tos)
+ if (fa->fa_dscp && !fib_dscp_masked_match(fa->fa_dscp, flp))
continue;
/* Paired with WRITE_ONCE() in fib_release_info() */
if (READ_ONCE(fi->fib_dead))
@@ -1629,6 +1622,7 @@ set_result:
res->nhc = nhc;
res->type = fa->fa_type;
res->scope = fi->fib_scope;
+ res->dscp = fa->fa_dscp;
res->fi = fi;
res->table = tb;
res->fa_head = &n->leaf;
@@ -2368,7 +2362,7 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
* and key == 0 means the dump has wrapped around and we are done.
*/
if (count && !key)
- return skb->len;
+ return 0;
while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
int err;
@@ -2394,7 +2388,7 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
cb->args[3] = key;
cb->args[2] = count;
- return skb->len;
+ return 0;
}
void __init fib_trie_init(void)
@@ -3005,7 +2999,7 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
seq_printf(seq,
"%s\t%08X\t%08X\t%04X\t%d\t%u\t"
- "%d\t%08X\t%d\t%u\t%u",
+ "%u\t%08X\t%d\t%u\t%u",
nhc->nhc_dev ? nhc->nhc_dev->name : "*",
prefix, gw, flags, 0, 0,
fi->fib_priority,
@@ -3017,7 +3011,7 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
} else {
seq_printf(seq,
"*\t%08X\t%08X\t%04X\t%d\t%u\t"
- "%d\t%08X\t%d\t%u\t%u",
+ "%u\t%08X\t%d\t%u\t%u",
prefix, 0, flags, 0, 0, 0,
mask, 0, 0, 0);
}
diff --git a/net/ipv4/fou_bpf.c b/net/ipv4/fou_bpf.c
index 4da03bf45c9b..54984f3170a8 100644
--- a/net/ipv4/fou_bpf.c
+++ b/net/ipv4/fou_bpf.c
@@ -64,7 +64,7 @@ __bpf_kfunc int bpf_skb_set_fou_encap(struct __sk_buff *skb_ctx,
info->encap.type = TUNNEL_ENCAP_NONE;
}
- if (info->key.tun_flags & TUNNEL_CSUM)
+ if (test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags))
info->encap.flags |= TUNNEL_ENCAP_FLAG_CSUM;
info->encap.sport = encap->sport;
@@ -100,10 +100,10 @@ __bpf_kfunc int bpf_skb_get_fou_encap(struct __sk_buff *skb_ctx,
__bpf_kfunc_end_defs();
-BTF_SET8_START(fou_kfunc_set)
+BTF_KFUNCS_START(fou_kfunc_set)
BTF_ID_FLAGS(func, bpf_skb_set_fou_encap)
BTF_ID_FLAGS(func, bpf_skb_get_fou_encap)
-BTF_SET8_END(fou_kfunc_set)
+BTF_KFUNCS_END(fou_kfunc_set)
static const struct btf_kfunc_id_set fou_bpf_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/ipv4/fou_core.c b/net/ipv4/fou_core.c
index 0c41076e31ed..3e30745e2c09 100644
--- a/net/ipv4/fou_core.c
+++ b/net/ipv4/fou_core.c
@@ -50,7 +50,7 @@ struct fou_net {
static inline struct fou *fou_from_sock(struct sock *sk)
{
- return sk->sk_user_data;
+ return rcu_dereference_sk_user_data(sk);
}
static int fou_recv_pull(struct sk_buff *skb, struct fou *fou, size_t len)
@@ -233,9 +233,15 @@ static struct sk_buff *fou_gro_receive(struct sock *sk,
struct sk_buff *skb)
{
const struct net_offload __rcu **offloads;
- u8 proto = fou_from_sock(sk)->protocol;
+ struct fou *fou = fou_from_sock(sk);
const struct net_offload *ops;
struct sk_buff *pp = NULL;
+ u8 proto;
+
+ if (!fou)
+ goto out;
+
+ proto = fou->protocol;
/* We can clear the encap_mark for FOU as we are essentially doing
* one of two possible things. We are either adding an L4 tunnel
@@ -263,14 +269,24 @@ static int fou_gro_complete(struct sock *sk, struct sk_buff *skb,
int nhoff)
{
const struct net_offload __rcu **offloads;
- u8 proto = fou_from_sock(sk)->protocol;
+ struct fou *fou = fou_from_sock(sk);
const struct net_offload *ops;
- int err = -ENOSYS;
+ u8 proto;
+ int err;
+
+ if (!fou) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ proto = fou->protocol;
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
ops = rcu_dereference(offloads[proto]);
- if (WARN_ON(!ops || !ops->callbacks.gro_complete))
+ if (WARN_ON(!ops || !ops->callbacks.gro_complete)) {
+ err = -ENOSYS;
goto out;
+ }
err = ops->callbacks.gro_complete(skb, nhoff);
@@ -322,6 +338,9 @@ static struct sk_buff *gue_gro_receive(struct sock *sk,
skb_gro_remcsum_init(&grc);
+ if (!fou)
+ goto out;
+
off = skb_gro_offset(skb);
len = off + sizeof(*guehdr);
@@ -351,7 +370,7 @@ static struct sk_buff *gue_gro_receive(struct sock *sk,
optlen = guehdr->hlen << 2;
len += optlen;
- if (skb_gro_header_hard(skb, len)) {
+ if (!skb_gro_may_pull(skb, len)) {
guehdr = skb_gro_header_slow(skb, len, off);
if (unlikely(!guehdr))
goto out;
@@ -433,7 +452,7 @@ next_proto:
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
ops = rcu_dereference(offloads[proto]);
- if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
+ if (!ops || !ops->callbacks.gro_receive)
goto out;
pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
diff --git a/net/ipv4/fou_nl.c b/net/ipv4/fou_nl.c
index 98b90107b5ab..3d9614609b2d 100644
--- a/net/ipv4/fou_nl.c
+++ b/net/ipv4/fou_nl.c
@@ -12,7 +12,7 @@
/* Global operation policy for fou */
const struct nla_policy fou_nl_policy[FOU_ATTR_IFINDEX + 1] = {
- [FOU_ATTR_PORT] = { .type = NLA_U16, },
+ [FOU_ATTR_PORT] = { .type = NLA_BE16, },
[FOU_ATTR_AF] = { .type = NLA_U8, },
[FOU_ATTR_IPPROTO] = { .type = NLA_U8, },
[FOU_ATTR_TYPE] = { .type = NLA_U8, },
@@ -21,7 +21,7 @@ const struct nla_policy fou_nl_policy[FOU_ATTR_IFINDEX + 1] = {
[FOU_ATTR_LOCAL_V6] = { .len = 16, },
[FOU_ATTR_PEER_V4] = { .type = NLA_U32, },
[FOU_ATTR_PEER_V6] = { .len = 16, },
- [FOU_ATTR_PEER_PORT] = { .type = NLA_U16, },
+ [FOU_ATTR_PEER_PORT] = { .type = NLA_BE16, },
[FOU_ATTR_IFINDEX] = { .type = NLA_S32, },
};
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index cbb2b4bb0dfa..6701a98d9a9f 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -73,7 +73,7 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
return -EINVAL;
- tpi->flags = gre_flags_to_tnl_flags(greh->flags);
+ gre_flags_to_tnl_flags(tpi->flags, greh->flags);
hdr_len = gre_calc_hlen(tpi->flags);
if (!pskb_may_pull(skb, nhs + hdr_len))
@@ -217,5 +217,5 @@ module_init(gre_init);
module_exit(gre_exit);
MODULE_DESCRIPTION("GRE over IPv4 demultiplexer driver");
-MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
+MODULE_AUTHOR("D. Kozlov <xeb@mail.ru>");
MODULE_LICENSE("GPL");
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index 311e70bfce40..5028c72d494a 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -174,7 +174,7 @@ static struct sk_buff *gre_gro_receive(struct list_head *head,
grehlen += GRE_HEADER_SECTION;
hlen = off + grehlen;
- if (skb_gro_header_hard(skb, hlen)) {
+ if (!skb_gro_may_pull(skb, hlen)) {
greh = skb_gro_header_slow(skb, hlen, off);
if (unlikely(!greh))
goto out;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index e63a3bf99617..5482edb5aade 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -92,6 +92,10 @@
#include <net/inet_common.h>
#include <net/ip_fib.h>
#include <net/l3mdev.h>
+#include <net/addrconf.h>
+#include <net/inet_dscp.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/icmp.h>
/*
* Build xmit assembly blocks
@@ -217,61 +221,56 @@ static inline void icmp_xmit_unlock(struct sock *sk)
spin_unlock(&sk->sk_lock.slock);
}
-int sysctl_icmp_msgs_per_sec __read_mostly = 1000;
-int sysctl_icmp_msgs_burst __read_mostly = 50;
-
-static struct {
- spinlock_t lock;
- u32 credit;
- u32 stamp;
-} icmp_global = {
- .lock = __SPIN_LOCK_UNLOCKED(icmp_global.lock),
-};
-
/**
* icmp_global_allow - Are we allowed to send one more ICMP message ?
+ * @net: network namespace
*
* Uses a token bucket to limit our ICMP messages to ~sysctl_icmp_msgs_per_sec.
* Returns false if we reached the limit and can not send another packet.
- * Note: called with BH disabled
+ * Works in tandem with icmp_global_consume().
*/
-bool icmp_global_allow(void)
+bool icmp_global_allow(struct net *net)
{
- u32 credit, delta, incr = 0, now = (u32)jiffies;
- bool rc = false;
+ u32 delta, now, oldstamp;
+ int incr, new, old;
- /* Check if token bucket is empty and cannot be refilled
- * without taking the spinlock. The READ_ONCE() are paired
- * with the following WRITE_ONCE() in this same function.
+ /* Note: many cpus could find this condition true.
+ * Then later icmp_global_consume() could consume more credits,
+ * this is an acceptable race.
*/
- if (!READ_ONCE(icmp_global.credit)) {
- delta = min_t(u32, now - READ_ONCE(icmp_global.stamp), HZ);
- if (delta < HZ / 50)
- return false;
- }
+ if (atomic_read(&net->ipv4.icmp_global_credit) > 0)
+ return true;
- spin_lock(&icmp_global.lock);
- delta = min_t(u32, now - icmp_global.stamp, HZ);
- if (delta >= HZ / 50) {
- incr = READ_ONCE(sysctl_icmp_msgs_per_sec) * delta / HZ;
- if (incr)
- WRITE_ONCE(icmp_global.stamp, now);
- }
- credit = min_t(u32, icmp_global.credit + incr,
- READ_ONCE(sysctl_icmp_msgs_burst));
- if (credit) {
- /* We want to use a credit of one in average, but need to randomize
- * it for security reasons.
- */
- credit = max_t(int, credit - get_random_u32_below(3), 0);
- rc = true;
+ now = jiffies;
+ oldstamp = READ_ONCE(net->ipv4.icmp_global_stamp);
+ delta = min_t(u32, now - oldstamp, HZ);
+ if (delta < HZ / 50)
+ return false;
+
+ incr = READ_ONCE(net->ipv4.sysctl_icmp_msgs_per_sec) * delta / HZ;
+ if (!incr)
+ return false;
+
+ if (cmpxchg(&net->ipv4.icmp_global_stamp, oldstamp, now) == oldstamp) {
+ old = atomic_read(&net->ipv4.icmp_global_credit);
+ do {
+ new = min(old + incr, READ_ONCE(net->ipv4.sysctl_icmp_msgs_burst));
+ } while (!atomic_try_cmpxchg(&net->ipv4.icmp_global_credit, &old, new));
}
- WRITE_ONCE(icmp_global.credit, credit);
- spin_unlock(&icmp_global.lock);
- return rc;
+ return true;
}
EXPORT_SYMBOL(icmp_global_allow);
+void icmp_global_consume(struct net *net)
+{
+ int credits = get_random_u32_below(3);
+
+ /* Note: this might make icmp_global.credit negative. */
+ if (credits)
+ atomic_sub(credits, &net->ipv4.icmp_global_credit);
+}
+EXPORT_SYMBOL(icmp_global_consume);
+
static bool icmpv4_mask_allow(struct net *net, int type, int code)
{
if (type > NR_ICMP_TYPES)
@@ -288,14 +287,16 @@ static bool icmpv4_mask_allow(struct net *net, int type, int code)
return false;
}
-static bool icmpv4_global_allow(struct net *net, int type, int code)
+static bool icmpv4_global_allow(struct net *net, int type, int code,
+ bool *apply_ratelimit)
{
if (icmpv4_mask_allow(net, type, code))
return true;
- if (icmp_global_allow())
+ if (icmp_global_allow(net)) {
+ *apply_ratelimit = true;
return true;
-
+ }
__ICMP_INC_STATS(net, ICMP_MIB_RATELIMITGLOBAL);
return false;
}
@@ -305,29 +306,31 @@ static bool icmpv4_global_allow(struct net *net, int type, int code)
*/
static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
- struct flowi4 *fl4, int type, int code)
+ struct flowi4 *fl4, int type, int code,
+ bool apply_ratelimit)
{
struct dst_entry *dst = &rt->dst;
struct inet_peer *peer;
bool rc = true;
- int vif;
- if (icmpv4_mask_allow(net, type, code))
- goto out;
+ if (!apply_ratelimit)
+ return true;
/* No rate limit on loopback */
if (dst->dev && (dst->dev->flags&IFF_LOOPBACK))
goto out;
- vif = l3mdev_master_ifindex(dst->dev);
- peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, vif, 1);
+ rcu_read_lock();
+ peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr,
+ l3mdev_master_ifindex_rcu(dst->dev));
rc = inet_peer_xrlim_allow(peer,
READ_ONCE(net->ipv4.sysctl_icmp_ratelimit));
- if (peer)
- inet_putpeer(peer);
+ rcu_read_unlock();
out:
if (!rc)
__ICMP_INC_STATS(net, ICMP_MIB_RATELIMITHOST);
+ else
+ icmp_global_consume(net);
return rc;
}
@@ -396,9 +399,10 @@ static void icmp_push_reply(struct sock *sk,
static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
{
- struct ipcm_cookie ipc;
struct rtable *rt = skb_rtable(skb);
- struct net *net = dev_net(rt->dst.dev);
+ struct net *net = dev_net_rcu(rt->dst.dev);
+ bool apply_ratelimit = false;
+ struct ipcm_cookie ipc;
struct flowi4 fl4;
struct sock *sk;
struct inet_sock *inet;
@@ -410,11 +414,11 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
if (ip_options_echo(net, &icmp_param->replyopts.opt.opt, skb))
return;
- /* Needed by both icmp_global_allow and icmp_xmit_lock */
+ /* Needed by both icmpv4_global_allow and icmp_xmit_lock */
local_bh_disable();
- /* global icmp_msgs_per_sec */
- if (!icmpv4_global_allow(net, type, code))
+ /* is global icmp_msgs_per_sec exhausted ? */
+ if (!icmpv4_global_allow(net, type, code, &apply_ratelimit))
goto out_bh_enable;
sk = icmp_xmit_lock(net);
@@ -440,14 +444,14 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
fl4.saddr = saddr;
fl4.flowi4_mark = mark;
fl4.flowi4_uid = sock_net_uid(net, NULL);
- fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
+ fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip_hdr(skb)));
fl4.flowi4_proto = IPPROTO_ICMP;
fl4.flowi4_oif = l3mdev_master_ifindex(skb->dev);
security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4));
rt = ip_route_output_key(net, &fl4);
if (IS_ERR(rt))
goto out_unlock;
- if (icmpv4_xrlim_allow(net, rt, &fl4, type, code))
+ if (icmpv4_xrlim_allow(net, rt, &fl4, type, code, apply_ratelimit))
icmp_push_reply(sk, icmp_param, &fl4, &ipc, &rt);
ip_rt_put(rt);
out_unlock:
@@ -473,15 +477,14 @@ static struct net_device *icmp_get_route_lookup_dev(struct sk_buff *skb)
return route_lookup_dev;
}
-static struct rtable *icmp_route_lookup(struct net *net,
- struct flowi4 *fl4,
+static struct rtable *icmp_route_lookup(struct net *net, struct flowi4 *fl4,
struct sk_buff *skb_in,
- const struct iphdr *iph,
- __be32 saddr, u8 tos, u32 mark,
- int type, int code,
- struct icmp_bxm *param)
+ const struct iphdr *iph, __be32 saddr,
+ dscp_t dscp, u32 mark, int type,
+ int code, struct icmp_bxm *param)
{
struct net_device *route_lookup_dev;
+ struct dst_entry *dst, *dst2;
struct rtable *rt, *rt2;
struct flowi4 fl4_dec;
int err;
@@ -492,7 +495,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
fl4->saddr = saddr;
fl4->flowi4_mark = mark;
fl4->flowi4_uid = sock_net_uid(net, NULL);
- fl4->flowi4_tos = RT_TOS(tos);
+ fl4->flowi4_tos = inet_dscp_to_dsfield(dscp);
fl4->flowi4_proto = IPPROTO_ICMP;
fl4->fl4_icmp_type = type;
fl4->fl4_icmp_code = code;
@@ -507,16 +510,20 @@ static struct rtable *icmp_route_lookup(struct net *net,
/* No need to clone since we're just using its address. */
rt2 = rt;
- rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
- flowi4_to_flowi(fl4), NULL, 0);
- if (!IS_ERR(rt)) {
+ dst = xfrm_lookup(net, &rt->dst,
+ flowi4_to_flowi(fl4), NULL, 0);
+ rt = dst_rtable(dst);
+ if (!IS_ERR(dst)) {
if (rt != rt2)
return rt;
- } else if (PTR_ERR(rt) == -EPERM) {
+ if (inet_addr_type_dev_table(net, route_lookup_dev,
+ fl4->daddr) == RTN_LOCAL)
+ return rt;
+ } else if (PTR_ERR(dst) == -EPERM) {
rt = NULL;
- } else
+ } else {
return rt;
-
+ }
err = xfrm_decode_session_reverse(net, skb_in, flowi4_to_flowi(&fl4_dec), AF_INET);
if (err)
goto relookup_failed;
@@ -540,7 +547,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
orefdst = skb_in->_skb_refdst; /* save old refdst */
skb_dst_set(skb_in, NULL);
err = ip_route_input(skb_in, fl4_dec.daddr, fl4_dec.saddr,
- RT_TOS(tos), rt2->dst.dev);
+ dscp, rt2->dst.dev) ? -EINVAL : 0;
dst_release(&rt2->dst);
rt2 = skb_rtable(skb_in);
@@ -550,19 +557,19 @@ static struct rtable *icmp_route_lookup(struct net *net,
if (err)
goto relookup_failed;
- rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst,
- flowi4_to_flowi(&fl4_dec), NULL,
- XFRM_LOOKUP_ICMP);
- if (!IS_ERR(rt2)) {
+ dst2 = xfrm_lookup(net, &rt2->dst, flowi4_to_flowi(&fl4_dec), NULL,
+ XFRM_LOOKUP_ICMP);
+ rt2 = dst_rtable(dst2);
+ if (!IS_ERR(dst2)) {
dst_release(&rt->dst);
memcpy(fl4, &fl4_dec, sizeof(*fl4));
rt = rt2;
- } else if (PTR_ERR(rt2) == -EPERM) {
+ } else if (PTR_ERR(dst2) == -EPERM) {
if (rt)
dst_release(&rt->dst);
return rt2;
} else {
- err = PTR_ERR(rt2);
+ err = PTR_ERR(dst2);
goto relookup_failed;
}
return rt;
@@ -591,6 +598,7 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
int room;
struct icmp_bxm icmp_param;
struct rtable *rt = skb_rtable(skb_in);
+ bool apply_ratelimit = false;
struct ipcm_cookie ipc;
struct flowi4 fl4;
__be32 saddr;
@@ -600,12 +608,14 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
struct sock *sk;
if (!rt)
- goto out;
+ return;
+
+ rcu_read_lock();
if (rt->dst.dev)
- net = dev_net(rt->dst.dev);
+ net = dev_net_rcu(rt->dst.dev);
else if (skb_in->dev)
- net = dev_net(skb_in->dev);
+ net = dev_net_rcu(skb_in->dev);
else
goto out;
@@ -672,7 +682,7 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
}
}
- /* Needed by both icmp_global_allow and icmp_xmit_lock */
+ /* Needed by both icmpv4_global_allow and icmp_xmit_lock */
local_bh_disable();
/* Check global sysctl_icmp_msgs_per_sec ratelimit, unless
@@ -680,7 +690,7 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
* loopback, then peer ratelimit still work (in icmpv4_xrlim_allow)
*/
if (!(skb_in->dev && (skb_in->dev->flags&IFF_LOOPBACK)) &&
- !icmpv4_global_allow(net, type, code))
+ !icmpv4_global_allow(net, type, code, &apply_ratelimit))
goto out_bh_enable;
sk = icmp_xmit_lock(net);
@@ -733,13 +743,14 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
ipc.opt = &icmp_param.replyopts.opt;
ipc.sockc.mark = mark;
- rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark,
- type, code, &icmp_param);
+ rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr,
+ inet_dsfield_to_dscp(tos), mark, type, code,
+ &icmp_param);
if (IS_ERR(rt))
goto out_unlock;
/* peer icmp_ratelimit */
- if (!icmpv4_xrlim_allow(net, rt, &fl4, type, code))
+ if (!icmpv4_xrlim_allow(net, rt, &fl4, type, code, apply_ratelimit))
goto ende;
/* RFC says return as much as we can without exceeding 576 bytes. */
@@ -767,6 +778,8 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
if (!fl4.saddr)
fl4.saddr = htonl(INADDR_DUMMY);
+ trace_icmp_send(skb_in, type, code);
+
icmp_push_reply(sk, &icmp_param, &fl4, &ipc, &rt);
ende:
ip_rt_put(rt);
@@ -774,7 +787,8 @@ out_unlock:
icmp_xmit_unlock(sk);
out_bh_enable:
local_bh_enable();
-out:;
+out:
+ rcu_read_unlock();
}
EXPORT_SYMBOL(__icmp_send);
@@ -823,7 +837,7 @@ static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
* avoid additional coding at protocol handlers.
*/
if (!pskb_may_pull(skb, iph->ihl * 4 + 8)) {
- __ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS);
+ __ICMP_INC_STATS(dev_net_rcu(skb->dev), ICMP_MIB_INERRORS);
return;
}
@@ -857,7 +871,7 @@ static enum skb_drop_reason icmp_unreach(struct sk_buff *skb)
struct net *net;
u32 info = 0;
- net = dev_net(skb_dst(skb)->dev);
+ net = dev_net_rcu(skb_dst(skb)->dev);
/*
* Incomplete header ?
@@ -968,7 +982,7 @@ out_err:
static enum skb_drop_reason icmp_redirect(struct sk_buff *skb)
{
if (skb->len < sizeof(struct iphdr)) {
- __ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS);
+ __ICMP_INC_STATS(dev_net_rcu(skb->dev), ICMP_MIB_INERRORS);
return SKB_DROP_REASON_PKT_TOO_SMALL;
}
@@ -1000,7 +1014,7 @@ static enum skb_drop_reason icmp_echo(struct sk_buff *skb)
struct icmp_bxm icmp_param;
struct net *net;
- net = dev_net(skb_dst(skb)->dev);
+ net = dev_net_rcu(skb_dst(skb)->dev);
/* should there be an ICMP stat for ignored echos? */
if (READ_ONCE(net->ipv4.sysctl_icmp_echo_ignore_all))
return SKB_NOT_DROPPED_YET;
@@ -1029,9 +1043,11 @@ static enum skb_drop_reason icmp_echo(struct sk_buff *skb)
bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr)
{
+ struct net *net = dev_net_rcu(skb->dev);
struct icmp_ext_hdr *ext_hdr, _ext_hdr;
struct icmp_ext_echo_iio *iio, _iio;
- struct net *net = dev_net(skb->dev);
+ struct inet6_dev *in6_dev;
+ struct in_device *in_dev;
struct net_device *dev;
char buff[IFNAMSIZ];
u16 ident_len;
@@ -1115,10 +1131,15 @@ bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr)
/* Fill bits in reply message */
if (dev->flags & IFF_UP)
status |= ICMP_EXT_ECHOREPLY_ACTIVE;
- if (__in_dev_get_rcu(dev) && __in_dev_get_rcu(dev)->ifa_list)
+
+ in_dev = __in_dev_get_rcu(dev);
+ if (in_dev && rcu_access_pointer(in_dev->ifa_list))
status |= ICMP_EXT_ECHOREPLY_IPV4;
- if (!list_empty(&rcu_dereference(dev->ip6_ptr)->addr_list))
+
+ in6_dev = __in6_dev_get(dev);
+ if (in6_dev && !list_empty(&in6_dev->addr_list))
status |= ICMP_EXT_ECHOREPLY_IPV6;
+
dev_put(dev);
icmphdr->un.echo.sequence |= htons(status);
return true;
@@ -1163,7 +1184,7 @@ static enum skb_drop_reason icmp_timestamp(struct sk_buff *skb)
return SKB_NOT_DROPPED_YET;
out_err:
- __ICMP_INC_STATS(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS);
+ __ICMP_INC_STATS(dev_net_rcu(skb_dst(skb)->dev), ICMP_MIB_INERRORS);
return SKB_DROP_REASON_PKT_TOO_SMALL;
}
@@ -1180,7 +1201,7 @@ int icmp_rcv(struct sk_buff *skb)
{
enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
struct rtable *rt = skb_rtable(skb);
- struct net *net = dev_net(rt->dst.dev);
+ struct net *net = dev_net_rcu(rt->dst.dev);
struct icmphdr *icmph;
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
@@ -1353,9 +1374,9 @@ int icmp_err(struct sk_buff *skb, u32 info)
struct iphdr *iph = (struct iphdr *)skb->data;
int offset = iph->ihl<<2;
struct icmphdr *icmph = (struct icmphdr *)(skb->data + offset);
+ struct net *net = dev_net_rcu(skb->dev);
int type = icmp_hdr(skb)->type;
int code = icmp_hdr(skb)->code;
- struct net *net = dev_net(skb->dev);
/*
* Use ping_err to handle all icmp errors except those
@@ -1473,6 +1494,8 @@ static int __net_init icmp_sk_init(struct net *net)
net->ipv4.sysctl_icmp_ratelimit = 1 * HZ;
net->ipv4.sysctl_icmp_ratemask = 0x1818;
net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr = 0;
+ net->ipv4.sysctl_icmp_msgs_per_sec = 1000;
+ net->ipv4.sysctl_icmp_msgs_burst = 50;
return 0;
}
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index efeeca2b1328..3da126cea884 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -88,6 +88,8 @@
#include <linux/byteorder/generic.h>
#include <net/net_namespace.h>
+#include <net/netlink.h>
+#include <net/addrconf.h>
#include <net/arp.h>
#include <net/ip.h>
#include <net/protocol.h>
@@ -120,12 +122,12 @@
*/
#define IGMP_V1_SEEN(in_dev) \
- (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 1 || \
+ (IPV4_DEVCONF_ALL_RO(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 1 || \
IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 1 || \
((in_dev)->mr_v1_seen && \
time_before(jiffies, (in_dev)->mr_v1_seen)))
#define IGMP_V2_SEEN(in_dev) \
- (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 2 || \
+ (IPV4_DEVCONF_ALL_RO(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 2 || \
IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 2 || \
((in_dev)->mr_v2_seen && \
time_before(jiffies, (in_dev)->mr_v2_seen)))
@@ -1430,6 +1432,65 @@ static void ip_mc_hash_remove(struct in_device *in_dev,
*mc_hash = im->next_hash;
}
+static int inet_fill_ifmcaddr(struct sk_buff *skb, struct net_device *dev,
+ const struct ip_mc_list *im, int event)
+{
+ struct ifa_cacheinfo ci;
+ struct ifaddrmsg *ifm;
+ struct nlmsghdr *nlh;
+
+ nlh = nlmsg_put(skb, 0, 0, event, sizeof(struct ifaddrmsg), 0);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ ifm = nlmsg_data(nlh);
+ ifm->ifa_family = AF_INET;
+ ifm->ifa_prefixlen = 32;
+ ifm->ifa_flags = IFA_F_PERMANENT;
+ ifm->ifa_scope = RT_SCOPE_UNIVERSE;
+ ifm->ifa_index = dev->ifindex;
+
+ ci.cstamp = (READ_ONCE(im->mca_cstamp) - INITIAL_JIFFIES) * 100UL / HZ;
+ ci.tstamp = ci.cstamp;
+ ci.ifa_prefered = INFINITY_LIFE_TIME;
+ ci.ifa_valid = INFINITY_LIFE_TIME;
+
+ if (nla_put_in_addr(skb, IFA_MULTICAST, im->multiaddr) < 0 ||
+ nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci) < 0) {
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+ }
+
+ nlmsg_end(skb, nlh);
+ return 0;
+}
+
+static void inet_ifmcaddr_notify(struct net_device *dev,
+ const struct ip_mc_list *im, int event)
+{
+ struct net *net = dev_net(dev);
+ struct sk_buff *skb;
+ int err = -ENOMEM;
+
+ skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct ifaddrmsg)) +
+ nla_total_size(sizeof(__be32)) +
+ nla_total_size(sizeof(struct ifa_cacheinfo)),
+ GFP_KERNEL);
+ if (!skb)
+ goto error;
+
+ err = inet_fill_ifmcaddr(skb, dev, im, event);
+ if (err < 0) {
+ WARN_ON_ONCE(err == -EMSGSIZE);
+ nlmsg_free(skb);
+ goto error;
+ }
+
+ rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MCADDR, NULL, GFP_KERNEL);
+ return;
+error:
+ rtnl_set_sk_err(net, RTNLGRP_IPV4_MCADDR, err);
+}
/*
* A socket has joined a multicast group on device dev.
@@ -1437,18 +1498,34 @@ static void ip_mc_hash_remove(struct in_device *in_dev,
static void ____ip_mc_inc_group(struct in_device *in_dev, __be32 addr,
unsigned int mode, gfp_t gfp)
{
+ struct ip_mc_list __rcu **mc_hash;
struct ip_mc_list *im;
ASSERT_RTNL();
- for_each_pmc_rtnl(in_dev, im) {
- if (im->multiaddr == addr) {
- im->users++;
- ip_mc_add_src(in_dev, &addr, mode, 0, NULL, 0);
- goto out;
+ mc_hash = rtnl_dereference(in_dev->mc_hash);
+ if (mc_hash) {
+ u32 hash = hash_32((__force u32)addr, MC_HASH_SZ_LOG);
+
+ for (im = rtnl_dereference(mc_hash[hash]);
+ im;
+ im = rtnl_dereference(im->next_hash)) {
+ if (im->multiaddr == addr)
+ break;
+ }
+ } else {
+ for_each_pmc_rtnl(in_dev, im) {
+ if (im->multiaddr == addr)
+ break;
}
}
+ if (im) {
+ im->users++;
+ ip_mc_add_src(in_dev, &addr, mode, 0, NULL, 0);
+ goto out;
+ }
+
im = kzalloc(sizeof(*im), gfp);
if (!im)
goto out;
@@ -1457,6 +1534,8 @@ static void ____ip_mc_inc_group(struct in_device *in_dev, __be32 addr,
im->interface = in_dev;
in_dev_hold(in_dev);
im->multiaddr = addr;
+ im->mca_cstamp = jiffies;
+ im->mca_tstamp = im->mca_cstamp;
/* initial mode is (EX, empty) */
im->sfmode = mode;
im->sfcount[mode] = 1;
@@ -1476,6 +1555,7 @@ static void ____ip_mc_inc_group(struct in_device *in_dev, __be32 addr,
igmpv3_del_delrec(in_dev, im);
#endif
igmp_group_added(im);
+ inet_ifmcaddr_notify(in_dev->dev, im, RTM_NEWMULTICAST);
if (!in_dev->dead)
ip_rt_multicast_event(in_dev);
out:
@@ -1689,6 +1769,8 @@ void __ip_mc_dec_group(struct in_device *in_dev, __be32 addr, gfp_t gfp)
*ip = i->next_rcu;
in_dev->mc_count--;
__igmp_group_dropped(i, gfp);
+ inet_ifmcaddr_notify(in_dev->dev, i,
+ RTM_DELMULTICAST);
ip_mc_clear_src(i);
if (!in_dev->dead)
@@ -1842,7 +1924,8 @@ static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
if (!dev) {
struct rtable *rt = ip_route_output(net,
imr->imr_multiaddr.s_addr,
- 0, 0, 0);
+ 0, 0, 0,
+ RT_SCOPE_UNIVERSE);
if (!IS_ERR(rt)) {
dev = rt->dst.dev;
ip_rt_put(rt);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 459af1f89739..e4decfb270fa 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -203,8 +203,15 @@ static bool __inet_bhash2_conflict(const struct sock *sk, struct sock *sk2,
kuid_t sk_uid, bool relax,
bool reuseport_cb_ok, bool reuseport_ok)
{
- if (sk->sk_family == AF_INET && ipv6_only_sock(sk2))
- return false;
+ if (ipv6_only_sock(sk2)) {
+ if (sk->sk_family == AF_INET)
+ return false;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
+ return false;
+#endif
+ }
return inet_bind_conflict(sk, sk2, sk_uid, relax,
reuseport_cb_ok, reuseport_ok);
@@ -229,7 +236,7 @@ static bool inet_bhash2_conflict(const struct sock *sk,
#define sk_for_each_bound_bhash(__sk, __tb2, __tb) \
hlist_for_each_entry(__tb2, &(__tb)->bhash2, bhash_node) \
- sk_for_each_bound(sk2, &(__tb2)->owners)
+ sk_for_each_bound((__sk), &(__tb2)->owners)
/* This should be called only when the tb and tb2 hashbuckets' locks are held */
static int inet_csk_bind_conflict(const struct sock *sk,
@@ -287,6 +294,7 @@ static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l
struct sock_reuseport *reuseport_cb;
struct inet_bind_hashbucket *head2;
struct inet_bind2_bucket *tb2;
+ bool conflict = false;
bool reuseport_cb_ok;
rcu_read_lock();
@@ -299,18 +307,20 @@ static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l
spin_lock(&head2->lock);
- inet_bind_bucket_for_each(tb2, &head2->chain)
- if (inet_bind2_bucket_match_addr_any(tb2, net, port, l3mdev, sk))
- break;
+ inet_bind_bucket_for_each(tb2, &head2->chain) {
+ if (!inet_bind2_bucket_match_addr_any(tb2, net, port, l3mdev, sk))
+ continue;
- if (tb2 && inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok,
- reuseport_ok)) {
- spin_unlock(&head2->lock);
- return true;
+ if (!inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok, reuseport_ok))
+ continue;
+
+ conflict = true;
+ break;
}
spin_unlock(&head2->lock);
- return false;
+
+ return conflict;
}
/*
@@ -651,7 +661,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
/*
* This will accept the next outstanding connection.
*/
-struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
+struct sock *inet_csk_accept(struct sock *sk, struct proto_accept_arg *arg)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct request_sock_queue *queue = &icsk->icsk_accept_queue;
@@ -670,7 +680,7 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
/* Find already established connection */
if (reqsk_queue_empty(queue)) {
- long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
+ long timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK);
/* If this is a non blocking socket don't sleep */
error = -EAGAIN;
@@ -682,6 +692,7 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
goto out_err;
}
req = reqsk_queue_remove(queue, sk);
+ arg->is_empty = reqsk_queue_empty(queue);
newsk = req->sk;
if (sk->sk_protocol == IPPROTO_TCP &&
@@ -703,6 +714,7 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
out:
release_sock(sk);
if (newsk && mem_cgroup_sockets_enabled) {
+ gfp_t gfp = GFP_KERNEL | __GFP_NOFAIL;
int amt = 0;
/* atomically get the memory usage, set and charge the
@@ -720,8 +732,8 @@ out:
}
if (amt)
- mem_cgroup_charge_skmem(newsk->sk_memcg, amt,
- GFP_KERNEL | __GFP_NOFAIL);
+ mem_cgroup_charge_skmem(newsk->sk_memcg, amt, gfp);
+ kmem_cache_charge(newsk, gfp);
release_sock(newsk);
}
@@ -735,7 +747,7 @@ out:
out_err:
newsk = NULL;
req = NULL;
- *err = error;
+ arg->err = error;
goto out;
}
EXPORT_SYMBOL(inet_csk_accept);
@@ -763,7 +775,8 @@ void inet_csk_clear_xmit_timers(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
- icsk->icsk_pending = icsk->icsk_ack.pending = 0;
+ smp_store_release(&icsk->icsk_pending, 0);
+ smp_store_release(&icsk->icsk_ack.pending, 0);
sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
sk_stop_timer(sk, &icsk->icsk_delack_timer);
@@ -771,6 +784,21 @@ void inet_csk_clear_xmit_timers(struct sock *sk)
}
EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
+void inet_csk_clear_xmit_timers_sync(struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+
+ /* ongoing timer handlers need to acquire socket lock. */
+ sock_not_owned_by_me(sk);
+
+ smp_store_release(&icsk->icsk_pending, 0);
+ smp_store_release(&icsk->icsk_ack.pending, 0);
+
+ sk_stop_timer_sync(sk, &icsk->icsk_retransmit_timer);
+ sk_stop_timer_sync(sk, &icsk->icsk_delack_timer);
+ sk_stop_timer_sync(sk, &sk->sk_timer);
+}
+
void inet_csk_delete_keepalive_timer(struct sock *sk)
{
sk_stop_timer(sk, &sk->sk_timer);
@@ -886,6 +914,64 @@ int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req)
}
EXPORT_SYMBOL(inet_rtx_syn_ack);
+static struct request_sock *
+reqsk_alloc_noprof(const struct request_sock_ops *ops, struct sock *sk_listener,
+ bool attach_listener)
+{
+ struct request_sock *req;
+
+ req = kmem_cache_alloc_noprof(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
+ if (!req)
+ return NULL;
+ req->rsk_listener = NULL;
+ if (attach_listener) {
+ if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) {
+ kmem_cache_free(ops->slab, req);
+ return NULL;
+ }
+ req->rsk_listener = sk_listener;
+ }
+ req->rsk_ops = ops;
+ req_to_sk(req)->sk_prot = sk_listener->sk_prot;
+ sk_node_init(&req_to_sk(req)->sk_node);
+ sk_tx_queue_clear(req_to_sk(req));
+ req->saved_syn = NULL;
+ req->syncookie = 0;
+ req->timeout = 0;
+ req->num_timeout = 0;
+ req->num_retrans = 0;
+ req->sk = NULL;
+ refcount_set(&req->rsk_refcnt, 0);
+
+ return req;
+}
+#define reqsk_alloc(...) alloc_hooks(reqsk_alloc_noprof(__VA_ARGS__))
+
+struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
+ struct sock *sk_listener,
+ bool attach_listener)
+{
+ struct request_sock *req = reqsk_alloc(ops, sk_listener,
+ attach_listener);
+
+ if (req) {
+ struct inet_request_sock *ireq = inet_rsk(req);
+
+ ireq->ireq_opt = NULL;
+#if IS_ENABLED(CONFIG_IPV6)
+ ireq->pktopts = NULL;
+#endif
+ atomic64_set(&ireq->ir_cookie, 0);
+ ireq->ireq_state = TCP_NEW_SYN_RECV;
+ write_pnet(&ireq->ireq_net, sock_net(sk_listener));
+ ireq->ireq_family = sk_listener->sk_family;
+ req->timeout = TCP_TIMEOUT_INIT;
+ }
+
+ return req;
+}
+EXPORT_SYMBOL(inet_reqsk_alloc);
+
static struct request_sock *inet_reqsk_clone(struct request_sock *req,
struct sock *sk)
{
@@ -906,8 +992,9 @@ static struct request_sock *inet_reqsk_clone(struct request_sock *req,
memcpy(nreq_sk, req_sk,
offsetof(struct sock, sk_dontcopy_begin));
- memcpy(&nreq_sk->sk_dontcopy_end, &req_sk->sk_dontcopy_end,
- req->rsk_ops->obj_size - offsetof(struct sock, sk_dontcopy_end));
+ unsafe_memcpy(&nreq_sk->sk_dontcopy_end, &req_sk->sk_dontcopy_end,
+ req->rsk_ops->obj_size - offsetof(struct sock, sk_dontcopy_end),
+ /* alloc is larger than struct, see above */);
sk_node_init(&nreq_sk->sk_node);
nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping;
@@ -960,21 +1047,31 @@ static bool reqsk_queue_unlink(struct request_sock *req)
found = __sk_nulls_del_node_init_rcu(sk);
spin_unlock(lock);
}
- if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
- reqsk_put(req);
+
return found;
}
-bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
+static bool __inet_csk_reqsk_queue_drop(struct sock *sk,
+ struct request_sock *req,
+ bool from_timer)
{
bool unlinked = reqsk_queue_unlink(req);
+ if (!from_timer && timer_delete_sync(&req->rsk_timer))
+ reqsk_put(req);
+
if (unlinked) {
reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
reqsk_put(req);
}
+
return unlinked;
}
+
+bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
+{
+ return __inet_csk_reqsk_queue_drop(sk, req, false);
+}
EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req)
@@ -1067,7 +1164,7 @@ static void reqsk_timer_handler(struct timer_list *t)
if (!inet_ehash_insert(req_to_sk(nreq), req_to_sk(oreq), NULL)) {
/* delete timer */
- inet_csk_reqsk_queue_drop(sk_listener, nreq);
+ __inet_csk_reqsk_queue_drop(sk_listener, nreq, true);
goto no_ownership;
}
@@ -1093,28 +1190,38 @@ no_ownership:
}
drop:
- inet_csk_reqsk_queue_drop_and_put(oreq->rsk_listener, oreq);
+ __inet_csk_reqsk_queue_drop(sk_listener, oreq, true);
+ reqsk_put(oreq);
}
-static void reqsk_queue_hash_req(struct request_sock *req,
+static bool reqsk_queue_hash_req(struct request_sock *req,
unsigned long timeout)
{
+ bool found_dup_sk = false;
+
+ if (!inet_ehash_insert(req_to_sk(req), NULL, &found_dup_sk))
+ return false;
+
+ /* The timer needs to be setup after a successful insertion. */
timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
mod_timer(&req->rsk_timer, jiffies + timeout);
- inet_ehash_insert(req_to_sk(req), NULL, NULL);
/* before letting lookups find us, make sure all req fields
* are committed to memory and refcnt initialized.
*/
smp_wmb();
refcount_set(&req->rsk_refcnt, 2 + 1);
+ return true;
}
-void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
+bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
unsigned long timeout)
{
- reqsk_queue_hash_req(req, timeout);
+ if (!reqsk_queue_hash_req(req, timeout))
+ return false;
+
inet_csk_reqsk_queue_added(sk);
+ return true;
}
EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
@@ -1454,20 +1561,13 @@ EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
{
const struct inet_sock *inet = inet_sk(sk);
- const struct ip_options_rcu *inet_opt;
- __be32 daddr = inet->inet_daddr;
struct flowi4 *fl4;
struct rtable *rt;
rcu_read_lock();
- inet_opt = rcu_dereference(inet->inet_opt);
- if (inet_opt && inet_opt->opt.srr)
- daddr = inet_opt->opt.faddr;
fl4 = &fl->u.ip4;
- rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
- inet->inet_saddr, inet->inet_dport,
- inet->inet_sport, sk->sk_protocol,
- RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
+ inet_sk_init_flowi4(inet, fl4);
+ rt = ip_route_output_flow(sock_net(sk), fl4, sk);
if (IS_ERR(rt))
rt = NULL;
if (rt)
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 8e6b6aa0579e..321acc8abf17 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -32,7 +32,7 @@
#include <linux/inet_diag.h>
#include <linux/sock_diag.h>
-static const struct inet_diag_handler **inet_diag_table;
+static const struct inet_diag_handler __rcu **inet_diag_table;
struct inet_diag_entry {
const __be32 *saddr;
@@ -48,28 +48,28 @@ struct inet_diag_entry {
#endif
};
-static DEFINE_MUTEX(inet_diag_table_mutex);
-
static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
{
- if (proto < 0 || proto >= IPPROTO_MAX) {
- mutex_lock(&inet_diag_table_mutex);
- return ERR_PTR(-ENOENT);
- }
+ const struct inet_diag_handler *handler;
- if (!inet_diag_table[proto])
+ if (proto < 0 || proto >= IPPROTO_MAX)
+ return NULL;
+
+ if (!READ_ONCE(inet_diag_table[proto]))
sock_load_diag_module(AF_INET, proto);
- mutex_lock(&inet_diag_table_mutex);
- if (!inet_diag_table[proto])
- return ERR_PTR(-ENOENT);
+ rcu_read_lock();
+ handler = rcu_dereference(inet_diag_table[proto]);
+ if (handler && !try_module_get(handler->owner))
+ handler = NULL;
+ rcu_read_unlock();
- return inet_diag_table[proto];
+ return handler;
}
static void inet_diag_unlock_handler(const struct inet_diag_handler *handler)
{
- mutex_unlock(&inet_diag_table_mutex);
+ module_put(handler->owner);
}
void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk)
@@ -104,9 +104,12 @@ static size_t inet_sk_attr_size(struct sock *sk,
const struct inet_diag_handler *handler;
size_t aux = 0;
- handler = inet_diag_table[req->sdiag_protocol];
+ rcu_read_lock();
+ handler = rcu_dereference(inet_diag_table[req->sdiag_protocol]);
+ DEBUG_NET_WARN_ON_ONCE(!handler);
if (handler && handler->idiag_get_aux_size)
aux = handler->idiag_get_aux_size(sk, net_admin);
+ rcu_read_unlock();
return nla_total_size(sizeof(struct tcp_info))
+ nla_total_size(sizeof(struct inet_diag_msg))
@@ -244,10 +247,17 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
struct nlmsghdr *nlh;
struct nlattr *attr;
void *info = NULL;
+ u8 icsk_pending;
+ int protocol;
cb_data = cb->data;
- handler = inet_diag_table[inet_diag_get_protocol(req, cb_data)];
- BUG_ON(!handler);
+ protocol = inet_diag_get_protocol(req, cb_data);
+
+ /* inet_diag_lock_handler() made sure inet_diag_table[] is stable. */
+ handler = rcu_dereference_protected(inet_diag_table[protocol], 1);
+ DEBUG_NET_WARN_ON_ONCE(!handler);
+ if (!handler)
+ return -ENXIO;
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
cb->nlh->nlmsg_type, sizeof(*r), nlmsg_flags);
@@ -298,14 +308,15 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
goto out;
}
- if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
- icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
- icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
+ icsk_pending = smp_load_acquire(&icsk->icsk_pending);
+ if (icsk_pending == ICSK_TIME_RETRANS ||
+ icsk_pending == ICSK_TIME_REO_TIMEOUT ||
+ icsk_pending == ICSK_TIME_LOSS_PROBE) {
r->idiag_timer = 1;
r->idiag_retrans = icsk->icsk_retransmits;
r->idiag_expires =
jiffies_delta_to_msecs(icsk->icsk_timeout - jiffies);
- } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
+ } else if (icsk_pending == ICSK_TIME_PROBE0) {
r->idiag_timer = 4;
r->idiag_retrans = icsk->icsk_probes_out;
r->idiag_expires =
@@ -433,7 +444,7 @@ static int inet_twsk_diag_fill(struct sock *sk,
inet_diag_msg_common_fill(r, sk);
r->idiag_retrans = 0;
- r->idiag_state = tw->tw_substate;
+ r->idiag_state = READ_ONCE(tw->tw_substate);
r->idiag_timer = 3;
tmo = tw->tw_timer.expires - jiffies;
r->idiag_expires = jiffies_delta_to_msecs(tmo);
@@ -605,9 +616,10 @@ static int inet_diag_cmd_exact(int cmd, struct sk_buff *in_skb,
protocol = inet_diag_get_protocol(req, &dump_data);
handler = inet_diag_lock_handler(protocol);
- if (IS_ERR(handler)) {
- err = PTR_ERR(handler);
- } else if (cmd == SOCK_DIAG_BY_FAMILY) {
+ if (!handler)
+ return -ENOENT;
+
+ if (cmd == SOCK_DIAG_BY_FAMILY) {
struct netlink_callback cb = {
.nlh = nlh,
.skb = in_skb,
@@ -1035,6 +1047,10 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
num = 0;
ilb = &hashinfo->lhash2[i];
+ if (hlist_nulls_empty(&ilb->nulls_head)) {
+ s_num = 0;
+ continue;
+ }
spin_lock(&ilb->lock);
sk_nulls_for_each(sk, node, &ilb->nulls_head) {
struct inet_sock *inet = inet_sk(sk);
@@ -1099,6 +1115,10 @@ resume_bind_walk:
accum = 0;
ibb = &hashinfo->bhash2[i];
+ if (hlist_empty(&ibb->chain)) {
+ s_num = 0;
+ continue;
+ }
spin_lock_bh(&ibb->lock);
inet_bind_bucket_for_each(tb2, &ibb->chain) {
if (!net_eq(ib2_net(tb2), net))
@@ -1191,7 +1211,7 @@ next_chunk:
if (num < s_num)
goto next_normal;
state = (sk->sk_state == TCP_TIME_WAIT) ?
- inet_twsk(sk)->tw_substate : sk->sk_state;
+ READ_ONCE(inet_twsk(sk)->tw_substate) : sk->sk_state;
if (!(idiag_states & (1 << state)))
goto next_normal;
if (r->sdiag_family != AF_UNSPEC &&
@@ -1259,12 +1279,12 @@ static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
again:
prev_min_dump_alloc = cb->min_dump_alloc;
handler = inet_diag_lock_handler(protocol);
- if (!IS_ERR(handler))
+ if (handler) {
handler->dump(skb, cb, r);
- else
- err = PTR_ERR(handler);
- inet_diag_unlock_handler(handler);
-
+ inet_diag_unlock_handler(handler);
+ } else {
+ err = -ENOENT;
+ }
/* The skb is not large enough to fit one sk info and
* inet_sk_diag_fill() has requested for a larger skb.
*/
@@ -1365,6 +1385,7 @@ static int inet_diag_dump_compat(struct sk_buff *skb,
req.sdiag_family = AF_UNSPEC; /* compatibility */
req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
req.idiag_ext = rc->idiag_ext;
+ req.pad = 0;
req.idiag_states = rc->idiag_states;
req.id = rc->id;
@@ -1380,6 +1401,7 @@ static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
req.sdiag_family = rc->idiag_family;
req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type);
req.idiag_ext = rc->idiag_ext;
+ req.pad = 0;
req.idiag_states = rc->idiag_states;
req.id = rc->id;
@@ -1457,10 +1479,9 @@ int inet_diag_handler_get_info(struct sk_buff *skb, struct sock *sk)
}
handler = inet_diag_lock_handler(sk->sk_protocol);
- if (IS_ERR(handler)) {
- inet_diag_unlock_handler(handler);
+ if (!handler) {
nlmsg_cancel(skb, nlh);
- return PTR_ERR(handler);
+ return -ENOENT;
}
attr = handler->idiag_info_size
@@ -1479,6 +1500,7 @@ int inet_diag_handler_get_info(struct sk_buff *skb, struct sock *sk)
}
static const struct sock_diag_handler inet_diag_handler = {
+ .owner = THIS_MODULE,
.family = AF_INET,
.dump = inet_diag_handler_cmd,
.get_info = inet_diag_handler_get_info,
@@ -1486,6 +1508,7 @@ static const struct sock_diag_handler inet_diag_handler = {
};
static const struct sock_diag_handler inet6_diag_handler = {
+ .owner = THIS_MODULE,
.family = AF_INET6,
.dump = inet_diag_handler_cmd,
.get_info = inet_diag_handler_get_info,
@@ -1495,20 +1518,12 @@ static const struct sock_diag_handler inet6_diag_handler = {
int inet_diag_register(const struct inet_diag_handler *h)
{
const __u16 type = h->idiag_type;
- int err = -EINVAL;
if (type >= IPPROTO_MAX)
- goto out;
+ return -EINVAL;
- mutex_lock(&inet_diag_table_mutex);
- err = -EEXIST;
- if (!inet_diag_table[type]) {
- inet_diag_table[type] = h;
- err = 0;
- }
- mutex_unlock(&inet_diag_table_mutex);
-out:
- return err;
+ return !cmpxchg((const struct inet_diag_handler **)&inet_diag_table[type],
+ NULL, h) ? 0 : -EEXIST;
}
EXPORT_SYMBOL_GPL(inet_diag_register);
@@ -1519,12 +1534,16 @@ void inet_diag_unregister(const struct inet_diag_handler *h)
if (type >= IPPROTO_MAX)
return;
- mutex_lock(&inet_diag_table_mutex);
- inet_diag_table[type] = NULL;
- mutex_unlock(&inet_diag_table_mutex);
+ xchg((const struct inet_diag_handler **)&inet_diag_table[type],
+ NULL);
}
EXPORT_SYMBOL_GPL(inet_diag_unregister);
+static const struct sock_diag_inet_compat inet_diag_compat = {
+ .owner = THIS_MODULE,
+ .fn = inet_diag_rcv_msg_compat,
+};
+
static int __init inet_diag_init(void)
{
const int inet_diag_table_size = (IPPROTO_MAX *
@@ -1543,7 +1562,7 @@ static int __init inet_diag_init(void)
if (err)
goto out_free_inet;
- sock_diag_register_inet_compat(inet_diag_rcv_msg_compat);
+ sock_diag_register_inet_compat(&inet_diag_compat);
out:
return err;
@@ -1558,7 +1577,7 @@ static void __exit inet_diag_exit(void)
{
sock_diag_unregister(&inet6_diag_handler);
sock_diag_unregister(&inet_diag_handler);
- sock_diag_unregister_inet_compat(inet_diag_rcv_msg_compat);
+ sock_diag_unregister_inet_compat(&inet_diag_compat);
kfree(inet_diag_table);
}
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 7072fc0783ef..d179a2c84222 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -24,6 +24,8 @@
#include <net/ip.h>
#include <net/ipv6.h>
+#include "../core/sock_destructor.h"
+
/* Use skb->cb to track consecutive/adjacent fragments coming at
* the end of the queue. Nodes in the rb-tree queue will
* contain "runs" of one or more adjacent fragments.
@@ -39,6 +41,7 @@ struct ipfrag_skb_cb {
};
struct sk_buff *next_frag;
int frag_run_len;
+ int ip_defrag_offset;
};
#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
@@ -172,7 +175,7 @@ static void fqdir_free_fn(struct work_struct *work)
}
}
-static DECLARE_WORK(fqdir_free_work, fqdir_free_fn);
+static DECLARE_DELAYED_WORK(fqdir_free_work, fqdir_free_fn);
static void fqdir_work_fn(struct work_struct *work)
{
@@ -181,7 +184,7 @@ static void fqdir_work_fn(struct work_struct *work)
rhashtable_free_and_destroy(&fqdir->rhashtable, inet_frags_free_cb, NULL);
if (llist_add(&fqdir->free_list, &fqdir_free_list))
- queue_work(system_wq, &fqdir_free_work);
+ queue_delayed_work(system_wq, &fqdir_free_work, HZ);
}
int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net)
@@ -396,12 +399,12 @@ int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
*/
if (!last)
fragrun_create(q, skb); /* First fragment. */
- else if (last->ip_defrag_offset + last->len < end) {
+ else if (FRAG_CB(last)->ip_defrag_offset + last->len < end) {
/* This is the common case: skb goes to the end. */
/* Detect and discard overlaps. */
- if (offset < last->ip_defrag_offset + last->len)
+ if (offset < FRAG_CB(last)->ip_defrag_offset + last->len)
return IPFRAG_OVERLAP;
- if (offset == last->ip_defrag_offset + last->len)
+ if (offset == FRAG_CB(last)->ip_defrag_offset + last->len)
fragrun_append_to_last(q, skb);
else
fragrun_create(q, skb);
@@ -418,13 +421,13 @@ int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
parent = *rbn;
curr = rb_to_skb(parent);
- curr_run_end = curr->ip_defrag_offset +
+ curr_run_end = FRAG_CB(curr)->ip_defrag_offset +
FRAG_CB(curr)->frag_run_len;
- if (end <= curr->ip_defrag_offset)
+ if (end <= FRAG_CB(curr)->ip_defrag_offset)
rbn = &parent->rb_left;
else if (offset >= curr_run_end)
rbn = &parent->rb_right;
- else if (offset >= curr->ip_defrag_offset &&
+ else if (offset >= FRAG_CB(curr)->ip_defrag_offset &&
end <= curr_run_end)
return IPFRAG_DUP;
else
@@ -438,7 +441,7 @@ int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
rb_insert_color(&skb->rbnode, &q->rb_fragments);
}
- skb->ip_defrag_offset = offset;
+ FRAG_CB(skb)->ip_defrag_offset = offset;
return IPFRAG_OK;
}
@@ -448,13 +451,28 @@ void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
struct sk_buff *parent)
{
struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments);
- struct sk_buff **nextp;
+ void (*destructor)(struct sk_buff *);
+ unsigned int orig_truesize = 0;
+ struct sk_buff **nextp = NULL;
+ struct sock *sk = skb->sk;
int delta;
+ if (sk && is_skb_wmem(skb)) {
+ /* TX: skb->sk might have been passed as argument to
+ * dst->output and must remain valid until tx completes.
+ *
+ * Move sk to reassembled skb and fix up wmem accounting.
+ */
+ orig_truesize = skb->truesize;
+ destructor = skb->destructor;
+ }
+
if (head != skb) {
fp = skb_clone(skb, GFP_ATOMIC);
- if (!fp)
- return NULL;
+ if (!fp) {
+ head = skb;
+ goto out_restore_sk;
+ }
FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
if (RB_EMPTY_NODE(&skb->rbnode))
FRAG_CB(parent)->next_frag = fp;
@@ -463,6 +481,12 @@ void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
&q->rb_fragments);
if (q->fragments_tail == skb)
q->fragments_tail = fp;
+
+ if (orig_truesize) {
+ /* prevent skb_morph from releasing sk */
+ skb->sk = NULL;
+ skb->destructor = NULL;
+ }
skb_morph(skb, head);
FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
rb_replace_node(&head->rbnode, &skb->rbnode,
@@ -470,13 +494,13 @@ void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
consume_skb(head);
head = skb;
}
- WARN_ON(head->ip_defrag_offset != 0);
+ WARN_ON(FRAG_CB(head)->ip_defrag_offset != 0);
delta = -head->truesize;
/* Head of list must not be cloned. */
if (skb_unclone(head, GFP_ATOMIC))
- return NULL;
+ goto out_restore_sk;
delta += head->truesize;
if (delta)
@@ -492,7 +516,7 @@ void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
clone = alloc_skb(0, GFP_ATOMIC);
if (!clone)
- return NULL;
+ goto out_restore_sk;
skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
skb_frag_list_init(head);
for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
@@ -509,6 +533,21 @@ void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
nextp = &skb_shinfo(head)->frag_list;
}
+out_restore_sk:
+ if (orig_truesize) {
+ int ts_delta = head->truesize - orig_truesize;
+
+ /* if this reassembled skb is fragmented later,
+ * fraglist skbs will get skb->sk assigned from head->sk,
+ * and each frag skb will be released via sock_wfree.
+ *
+ * Update sk_wmem_alloc.
+ */
+ head->sk = sk;
+ head->destructor = destructor;
+ refcount_add(ts_delta, &sk->sk_wmem_alloc);
+ }
+
return nextp;
}
EXPORT_SYMBOL(inet_frag_reasm_prepare);
@@ -516,6 +555,8 @@ EXPORT_SYMBOL(inet_frag_reasm_prepare);
void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
void *reasm_data, bool try_coalesce)
{
+ struct sock *sk = is_skb_wmem(head) ? head->sk : NULL;
+ const unsigned int head_truesize = head->truesize;
struct sk_buff **nextp = reasm_data;
struct rb_node *rbn;
struct sk_buff *fp;
@@ -578,7 +619,10 @@ void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
skb_mark_not_on_list(head);
head->prev = NULL;
head->tstamp = q->stamp;
- head->mono_delivery_time = q->mono_delivery_time;
+ head->tstamp_type = q->tstamp_type;
+
+ if (sk)
+ refcount_add(sum_truesize - head_truesize, &sk->sk_wmem_alloc);
}
EXPORT_SYMBOL(inet_frag_reasm_finish);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 308ff34002ea..9bfcfd016e18 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -24,6 +24,7 @@
#include <net/inet6_hashtables.h>
#endif
#include <net/secure_seq.h>
+#include <net/hotdata.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <net/sock_reuseport.h>
@@ -32,8 +33,6 @@ u32 inet_ehashfn(const struct net *net, const __be32 laddr,
const __u16 lport, const __be32 faddr,
const __be16 fport)
{
- static u32 inet_ehash_secret __read_mostly;
-
net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret));
return __inet_ehashfn(laddr, lport, faddr, fport,
@@ -311,7 +310,7 @@ inet_lhash2_bucket_sk(struct inet_hashinfo *h, struct sock *sk)
return inet_lhash2_bucket(h, hash);
}
-static inline int compute_score(struct sock *sk, struct net *net,
+static inline int compute_score(struct sock *sk, const struct net *net,
const unsigned short hnum, const __be32 daddr,
const int dif, const int sdif)
{
@@ -349,7 +348,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
* Return: NULL if sk doesn't have SO_REUSEPORT set, otherwise a pointer to
* the selected sock or an error.
*/
-struct sock *inet_lookup_reuseport(struct net *net, struct sock *sk,
+struct sock *inet_lookup_reuseport(const struct net *net, struct sock *sk,
struct sk_buff *skb, int doff,
__be32 saddr, __be16 sport,
__be32 daddr, unsigned short hnum,
@@ -375,7 +374,7 @@ EXPORT_SYMBOL_GPL(inet_lookup_reuseport);
*/
/* called with rcu_read_lock() : No refcount taken on the socket */
-static struct sock *inet_lhash2_lookup(struct net *net,
+static struct sock *inet_lhash2_lookup(const struct net *net,
struct inet_listen_hashbucket *ilb2,
struct sk_buff *skb, int doff,
const __be32 saddr, __be16 sport,
@@ -402,7 +401,7 @@ static struct sock *inet_lhash2_lookup(struct net *net,
return result;
}
-struct sock *inet_lookup_run_sk_lookup(struct net *net,
+struct sock *inet_lookup_run_sk_lookup(const struct net *net,
int protocol,
struct sk_buff *skb, int doff,
__be32 saddr, __be16 sport,
@@ -424,7 +423,7 @@ struct sock *inet_lookup_run_sk_lookup(struct net *net,
return sk;
}
-struct sock *__inet_lookup_listener(struct net *net,
+struct sock *__inet_lookup_listener(const struct net *net,
struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const __be32 saddr, __be16 sport,
@@ -489,7 +488,7 @@ void sock_edemux(struct sk_buff *skb)
}
EXPORT_SYMBOL(sock_edemux);
-struct sock *__inet_lookup_established(struct net *net,
+struct sock *__inet_lookup_established(const struct net *net,
struct inet_hashinfo *hashinfo,
const __be32 saddr, const __be16 sport,
const __be32 daddr, const u16 hnum,
@@ -566,7 +565,8 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
if (likely(inet_match(net, sk2, acookie, ports, dif, sdif))) {
if (sk2->sk_state == TCP_TIME_WAIT) {
tw = inet_twsk(sk2);
- if (twsk_unique(sk, sk2, twp))
+ if (sk->sk_protocol == IPPROTO_TCP &&
+ tcp_twsk_unique(sk, sk2, twp))
break;
}
goto not_unique;
@@ -1136,7 +1136,7 @@ error:
sock_prot_inuse_add(net, sk->sk_prot, -1);
spin_lock(lock);
- sk_nulls_del_node_init_rcu(sk);
+ __sk_nulls_del_node_init_rcu(sk);
spin_unlock(lock);
sk->sk_hash = 0;
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 5befa4de5b24..337390ba85b4 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -92,13 +92,22 @@ static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
hlist_nulls_add_head_rcu(&tw->tw_node, list);
}
+static void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
+{
+ __inet_twsk_schedule(tw, timeo, false);
+}
+
/*
- * Enter the time wait state. This is called with locally disabled BH.
+ * Enter the time wait state.
* Essentially we whip up a timewait bucket, copy the relevant info into it
* from the SK, and mess with hash chains and list linkage.
+ *
+ * The caller must not access @tw anymore after this function returns.
*/
-void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
- struct inet_hashinfo *hashinfo)
+void inet_twsk_hashdance_schedule(struct inet_timewait_sock *tw,
+ struct sock *sk,
+ struct inet_hashinfo *hashinfo,
+ int timeo)
{
const struct inet_sock *inet = inet_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -114,6 +123,7 @@ void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
hashinfo->bhash_size)];
bhead2 = inet_bhashfn_portaddr(hashinfo, sk, twsk_net(tw), inet->inet_num);
+ local_bh_disable();
spin_lock(&bhead->lock);
spin_lock(&bhead2->lock);
@@ -129,26 +139,34 @@ void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
spin_lock(lock);
+ /* Step 2: Hash TW into tcp ehash chain */
inet_twsk_add_node_rcu(tw, &ehead->chain);
/* Step 3: Remove SK from hash chain */
if (__sk_nulls_del_node_init_rcu(sk))
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
- spin_unlock(lock);
+ /* Ensure above writes are committed into memory before updating the
+ * refcount.
+ * Provides ordering vs later refcount_inc().
+ */
+ smp_wmb();
/* tw_refcnt is set to 3 because we have :
* - one reference for bhash chain.
* - one reference for ehash chain.
* - one reference for timer.
- * We can use atomic_set() because prior spin_lock()/spin_unlock()
- * committed into memory all tw fields.
* Also note that after this point, we lost our implicit reference
* so we are not allowed to use tw anymore.
*/
refcount_set(&tw->tw_refcnt, 3);
+
+ inet_twsk_schedule(tw, timeo);
+
+ spin_unlock(lock);
+ local_bh_enable();
}
-EXPORT_SYMBOL_GPL(inet_twsk_hashdance);
+EXPORT_SYMBOL_GPL(inet_twsk_hashdance_schedule);
static void tw_timer_handler(struct timer_list *t)
{
@@ -192,7 +210,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
tw->tw_prot = sk->sk_prot_creator;
atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie));
twsk_net_set(tw, sock_net(sk));
- timer_setup(&tw->tw_timer, tw_timer_handler, TIMER_PINNED);
+ timer_setup(&tw->tw_timer, tw_timer_handler, 0);
/*
* Because we use RCU lookups, we should not set tw_refcnt
* to a non null value before everything is setup for this
@@ -217,7 +235,34 @@ EXPORT_SYMBOL_GPL(inet_twsk_alloc);
*/
void inet_twsk_deschedule_put(struct inet_timewait_sock *tw)
{
- if (del_timer_sync(&tw->tw_timer))
+ struct inet_hashinfo *hashinfo = tw->tw_dr->hashinfo;
+ spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
+
+ /* inet_twsk_purge() walks over all sockets, including tw ones,
+ * and removes them via inet_twsk_deschedule_put() after a
+ * refcount_inc_not_zero().
+ *
+ * inet_twsk_hashdance_schedule() must (re)init the refcount before
+ * arming the timer, i.e. inet_twsk_purge can obtain a reference to
+ * a twsk that did not yet schedule the timer.
+ *
+ * The ehash lock synchronizes these two:
+ * After acquiring the lock, the timer is always scheduled (else
+ * timer_shutdown returns false), because hashdance_schedule releases
+ * the ehash lock only after completing the timer initialization.
+ *
+ * Without grabbing the ehash lock, we get:
+ * 1) cpu x sets twsk refcount to 3
+ * 2) cpu y bumps refcount to 4
+ * 3) cpu y calls inet_twsk_deschedule_put() and shuts timer down
+ * 4) cpu x tries to start timer, but mod_timer is a noop post-shutdown
+ * -> timer refcount is never decremented.
+ */
+ spin_lock(lock);
+ /* Makes sure hashdance_schedule() has completed */
+ spin_unlock(lock);
+
+ if (timer_shutdown_sync(&tw->tw_timer))
inet_twsk_kill(tw);
inet_twsk_put(tw);
}
@@ -263,52 +308,51 @@ void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
}
EXPORT_SYMBOL_GPL(__inet_twsk_schedule);
-void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family)
+/* Remove all non full sockets (TIME_WAIT and NEW_SYN_RECV) for dead netns */
+void inet_twsk_purge(struct inet_hashinfo *hashinfo)
{
- struct inet_timewait_sock *tw;
- struct sock *sk;
+ struct inet_ehash_bucket *head = &hashinfo->ehash[0];
+ unsigned int ehash_mask = hashinfo->ehash_mask;
struct hlist_nulls_node *node;
unsigned int slot;
+ struct sock *sk;
+
+ for (slot = 0; slot <= ehash_mask; slot++, head++) {
+ if (hlist_nulls_empty(&head->chain))
+ continue;
- for (slot = 0; slot <= hashinfo->ehash_mask; slot++) {
- struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
restart_rcu:
cond_resched();
rcu_read_lock();
restart:
sk_nulls_for_each_rcu(sk, node, &head->chain) {
- if (sk->sk_state != TCP_TIME_WAIT) {
- /* A kernel listener socket might not hold refcnt for net,
- * so reqsk_timer_handler() could be fired after net is
- * freed. Userspace listener and reqsk never exist here.
- */
- if (unlikely(sk->sk_state == TCP_NEW_SYN_RECV &&
- hashinfo->pernet)) {
- struct request_sock *req = inet_reqsk(sk);
-
- inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
- }
+ int state = inet_sk_state_load(sk);
+ if ((1 << state) & ~(TCPF_TIME_WAIT |
+ TCPF_NEW_SYN_RECV))
continue;
- }
- tw = inet_twsk(sk);
- if ((tw->tw_family != family) ||
- refcount_read(&twsk_net(tw)->ns.count))
+ if (refcount_read(&sock_net(sk)->ns.count))
continue;
- if (unlikely(!refcount_inc_not_zero(&tw->tw_refcnt)))
+ if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
continue;
- if (unlikely((tw->tw_family != family) ||
- refcount_read(&twsk_net(tw)->ns.count))) {
- inet_twsk_put(tw);
+ if (refcount_read(&sock_net(sk)->ns.count)) {
+ sock_gen_put(sk);
goto restart;
}
rcu_read_unlock();
local_bh_disable();
- inet_twsk_deschedule_put(tw);
+ if (state == TCP_TIME_WAIT) {
+ inet_twsk_deschedule_put(inet_twsk(sk));
+ } else {
+ struct request_sock *req = inet_reqsk(sk);
+
+ inet_csk_reqsk_queue_drop_and_put(req->rsk_listener,
+ req);
+ }
local_bh_enable();
goto restart_rcu;
}
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index e9fed83e9b3c..b8b23a77ceb4 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -81,10 +81,7 @@ void __init inet_initpeers(void)
inet_peer_threshold = clamp_val(nr_entries, 4096, 65536 + 128);
- peer_cachep = kmem_cache_create("inet_peer_cache",
- sizeof(struct inet_peer),
- 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
- NULL);
+ peer_cachep = KMEM_CACHE(inet_peer, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
}
/* Called with rcu_read_lock() or base->lock held */
@@ -98,6 +95,7 @@ static struct inet_peer *lookup(const struct inetpeer_addr *daddr,
{
struct rb_node **pp, *parent, *next;
struct inet_peer *p;
+ u32 now;
pp = &base->rb_root.rb_node;
parent = NULL;
@@ -111,8 +109,9 @@ static struct inet_peer *lookup(const struct inetpeer_addr *daddr,
p = rb_entry(parent, struct inet_peer, rb_node);
cmp = inetpeer_addr_cmp(daddr, &p->daddr);
if (cmp == 0) {
- if (!refcount_inc_not_zero(&p->refcnt))
- break;
+ now = jiffies;
+ if (READ_ONCE(p->dtime) != now)
+ WRITE_ONCE(p->dtime, now);
return p;
}
if (gc_stack) {
@@ -131,11 +130,6 @@ static struct inet_peer *lookup(const struct inetpeer_addr *daddr,
return NULL;
}
-static void inetpeer_free_rcu(struct rcu_head *head)
-{
- kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
-}
-
/* perform garbage collect on all items stacked during a lookup */
static void inet_peer_gc(struct inet_peer_base *base,
struct inet_peer *gc_stack[],
@@ -158,9 +152,6 @@ static void inet_peer_gc(struct inet_peer_base *base,
for (i = 0; i < gc_cnt; i++) {
p = gc_stack[i];
- /* The READ_ONCE() pairs with the WRITE_ONCE()
- * in inet_putpeer()
- */
delta = (__u32)jiffies - READ_ONCE(p->dtime);
if (delta < ttl || !refcount_dec_if_one(&p->refcnt))
@@ -171,36 +162,28 @@ static void inet_peer_gc(struct inet_peer_base *base,
if (p) {
rb_erase(&p->rb_node, &base->rb_root);
base->total--;
- call_rcu(&p->rcu, inetpeer_free_rcu);
+ kfree_rcu(p, rcu);
}
}
}
+/* Must be called under RCU : No refcount change is done here. */
struct inet_peer *inet_getpeer(struct inet_peer_base *base,
- const struct inetpeer_addr *daddr,
- int create)
+ const struct inetpeer_addr *daddr)
{
struct inet_peer *p, *gc_stack[PEER_MAX_GC];
struct rb_node **pp, *parent;
unsigned int gc_cnt, seq;
- int invalidated;
/* Attempt a lockless lookup first.
* Because of a concurrent writer, we might not find an existing entry.
*/
- rcu_read_lock();
seq = read_seqbegin(&base->lock);
p = lookup(daddr, base, seq, NULL, &gc_cnt, &parent, &pp);
- invalidated = read_seqretry(&base->lock, seq);
- rcu_read_unlock();
if (p)
return p;
- /* If no writer did a change during our lookup, we can return early. */
- if (!create && !invalidated)
- return NULL;
-
/* retry an exact lookup, taking the lock before.
* At least, nodes should be hot in our cache.
*/
@@ -209,12 +192,12 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
gc_cnt = 0;
p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp);
- if (!p && create) {
+ if (!p) {
p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
if (p) {
p->daddr = *daddr;
p->dtime = (__u32)jiffies;
- refcount_set(&p->refcnt, 2);
+ refcount_set(&p->refcnt, 1);
atomic_set(&p->rid, 0);
p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
p->rate_tokens = 0;
@@ -239,15 +222,9 @@ EXPORT_SYMBOL_GPL(inet_getpeer);
void inet_putpeer(struct inet_peer *p)
{
- /* The WRITE_ONCE() pairs with itself (we run lockless)
- * and the READ_ONCE() in inet_peer_gc()
- */
- WRITE_ONCE(p->dtime, (__u32)jiffies);
-
if (refcount_dec_and_test(&p->refcnt))
- call_rcu(&p->rcu, inetpeer_free_rcu);
+ kfree_rcu(p, rcu);
}
-EXPORT_SYMBOL_GPL(inet_putpeer);
/*
* Check transmit rate limitation for given message.
@@ -269,23 +246,27 @@ EXPORT_SYMBOL_GPL(inet_putpeer);
#define XRLIM_BURST_FACTOR 6
bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
{
- unsigned long now, token;
+ unsigned long now, token, otoken, delta;
bool rc = false;
if (!peer)
return true;
- token = peer->rate_tokens;
+ token = otoken = READ_ONCE(peer->rate_tokens);
now = jiffies;
- token += now - peer->rate_last;
- peer->rate_last = now;
- if (token > XRLIM_BURST_FACTOR * timeout)
- token = XRLIM_BURST_FACTOR * timeout;
+ delta = now - READ_ONCE(peer->rate_last);
+ if (delta) {
+ WRITE_ONCE(peer->rate_last, now);
+ token += delta;
+ if (token > XRLIM_BURST_FACTOR * timeout)
+ token = XRLIM_BURST_FACTOR * timeout;
+ }
if (token >= timeout) {
token -= timeout;
rc = true;
}
- peer->rate_tokens = token;
+ if (token != otoken)
+ WRITE_ONCE(peer->rate_tokens, token);
return rc;
}
EXPORT_SYMBOL(inet_peer_xrlim_allow);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index a4941f53b523..7a435746a22d 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -82,15 +82,20 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
{
struct ipq *qp = container_of(q, struct ipq, q);
- struct net *net = q->fqdir->net;
-
const struct frag_v4_compare_key *key = a;
+ struct net *net = q->fqdir->net;
+ struct inet_peer *p = NULL;
q->key.v4 = *key;
qp->ecn = 0;
- qp->peer = q->fqdir->max_dist ?
- inet_getpeer_v4(net->ipv4.peers, key->saddr, key->vif, 1) :
- NULL;
+ if (q->fqdir->max_dist) {
+ rcu_read_lock();
+ p = inet_getpeer_v4(net->ipv4.peers, key->saddr, key->vif);
+ if (p && !refcount_inc_not_zero(&p->refcnt))
+ p = NULL;
+ rcu_read_unlock();
+ }
+ qp->peer = p;
}
static void ip4_frag_free(struct inet_frag_queue *q)
@@ -132,12 +137,12 @@ static bool frag_expire_skip_icmp(u32 user)
*/
static void ip_expire(struct timer_list *t)
{
+ enum skb_drop_reason reason = SKB_DROP_REASON_FRAG_REASM_TIMEOUT;
struct inet_frag_queue *frag = from_timer(frag, t, timer);
const struct iphdr *iph;
struct sk_buff *head = NULL;
struct net *net;
struct ipq *qp;
- int err;
qp = container_of(frag, struct ipq, q);
net = qp->q.fqdir->net;
@@ -175,14 +180,15 @@ static void ip_expire(struct timer_list *t)
/* skb has no dst, perform route lookup again */
iph = ip_hdr(head);
- err = ip_route_input_noref(head, iph->daddr, iph->saddr,
- iph->tos, head->dev);
- if (err)
+ reason = ip_route_input_noref(head, iph->daddr, iph->saddr,
+ ip4h_dscp(iph), head->dev);
+ if (reason)
goto out;
/* Only an end host needs to send an ICMP
* "Fragment Reassembly Timeout" message, per RFC792.
*/
+ reason = SKB_DROP_REASON_FRAG_REASM_TIMEOUT;
if (frag_expire_skip_icmp(qp->q.key.v4.user) &&
(skb_rtable(head)->rt_type != RTN_LOCAL))
goto out;
@@ -195,7 +201,7 @@ out:
spin_unlock(&qp->q.lock);
out_rcu_unlock:
rcu_read_unlock();
- kfree_skb_reason(head, SKB_DROP_REASON_FRAG_REASM_TIMEOUT);
+ kfree_skb_reason(head, reason);
ipq_put(qp);
}
@@ -355,7 +361,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
qp->iif = dev->ifindex;
qp->q.stamp = skb->tstamp;
- qp->q.mono_delivery_time = skb->mono_delivery_time;
+ qp->q.tstamp_type = skb->tstamp_type;
qp->q.meat += skb->len;
qp->ecn |= ecn;
add_frag_mem_limit(qp->q.fqdir, skb->truesize);
@@ -384,6 +390,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
}
skb_dst_drop(skb);
+ skb_orphan(skb);
return -EINPROGRESS;
insert_error:
@@ -487,7 +494,6 @@ int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
struct ipq *qp;
__IP_INC_STATS(net, IPSTATS_MIB_REASMREQDS);
- skb_orphan(skb);
/* Lookup (or create) queue header */
qp = ip_find(net, ip_hdr(skb), user, vif);
@@ -580,7 +586,6 @@ static struct ctl_table ip4_frags_ns_ctl_table[] = {
.proc_handler = proc_dointvec_minmax,
.extra1 = &dist_min,
},
- { }
};
/* secret interval has been deprecated */
@@ -593,7 +598,6 @@ static struct ctl_table ip4_frags_ctl_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
- { }
};
static int __net_init ip4_frags_ns_ctl_register(struct net *net)
@@ -632,7 +636,7 @@ err_alloc:
static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net)
{
- struct ctl_table *table;
+ const struct ctl_table *table;
table = net->ipv4.frags_hdr->ctl_table_arg;
unregister_net_sysctl_table(net->ipv4.frags_hdr);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 6b9cf5a24c19..ed1b6b44faf8 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -44,6 +44,7 @@
#include <net/gre.h>
#include <net/dst_metadata.h>
#include <net/erspan.h>
+#include <net/inet_dscp.h>
/*
Problems & solutions
@@ -265,6 +266,7 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
struct net *net = dev_net(skb->dev);
struct metadata_dst *tun_dst = NULL;
struct erspan_base_hdr *ershdr;
+ IP_TUNNEL_DECLARE_FLAGS(flags);
struct ip_tunnel_net *itn;
struct ip_tunnel *tunnel;
const struct iphdr *iph;
@@ -272,18 +274,25 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
int ver;
int len;
+ ip_tunnel_flags_copy(flags, tpi->flags);
+
itn = net_generic(net, erspan_net_id);
iph = ip_hdr(skb);
if (is_erspan_type1(gre_hdr_len)) {
ver = 0;
- tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
- tpi->flags | TUNNEL_NO_KEY,
+ __set_bit(IP_TUNNEL_NO_KEY_BIT, flags);
+ tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags,
iph->saddr, iph->daddr, 0);
} else {
+ if (unlikely(!pskb_may_pull(skb,
+ gre_hdr_len + sizeof(*ershdr))))
+ return PACKET_REJECT;
+
ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
ver = ershdr->ver;
- tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
- tpi->flags | TUNNEL_KEY,
+ iph = ip_hdr(skb);
+ __set_bit(IP_TUNNEL_KEY_BIT, flags);
+ tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags,
iph->saddr, iph->daddr, tpi->key);
}
@@ -307,10 +316,9 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
struct ip_tunnel_info *info;
unsigned char *gh;
__be64 tun_id;
- __be16 flags;
- tpi->flags |= TUNNEL_KEY;
- flags = tpi->flags;
+ __set_bit(IP_TUNNEL_KEY_BIT, tpi->flags);
+ ip_tunnel_flags_copy(flags, tpi->flags);
tun_id = key32_to_tunnel_id(tpi->key);
tun_dst = ip_tun_rx_dst(skb, flags,
@@ -333,7 +341,8 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
ERSPAN_V2_MDSIZE);
info = &tun_dst->u.tun_info;
- info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
+ __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT,
+ info->key.tun_flags);
info->options_len = sizeof(*md);
}
@@ -376,10 +385,13 @@ static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
tnl_params = &tunnel->parms.iph;
if (tunnel->collect_md || tnl_params->daddr == 0) {
- __be16 flags;
+ IP_TUNNEL_DECLARE_FLAGS(flags) = { };
__be64 tun_id;
- flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
+ __set_bit(IP_TUNNEL_CSUM_BIT, flags);
+ __set_bit(IP_TUNNEL_KEY_BIT, flags);
+ ip_tunnel_flags_and(flags, tpi->flags, flags);
+
tun_id = key32_to_tunnel_id(tpi->key);
tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
if (!tun_dst)
@@ -459,12 +471,15 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
__be16 proto)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- __be16 flags = tunnel->parms.o_flags;
+ IP_TUNNEL_DECLARE_FLAGS(flags);
+
+ ip_tunnel_flags_copy(flags, tunnel->parms.o_flags);
/* Push GRE header. */
gre_build_header(skb, tunnel->tun_hlen,
flags, proto, tunnel->parms.o_key,
- (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
+ test_bit(IP_TUNNEL_SEQ_BIT, flags) ?
+ htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
}
@@ -478,10 +493,10 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
__be16 proto)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
+ IP_TUNNEL_DECLARE_FLAGS(flags) = { };
struct ip_tunnel_info *tun_info;
const struct ip_tunnel_key *key;
int tunnel_hlen;
- __be16 flags;
tun_info = skb_tunnel_info(skb);
if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
@@ -495,14 +510,19 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
goto err_free_skb;
/* Push Tunnel header. */
- if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
+ if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
+ tunnel->parms.o_flags)))
goto err_free_skb;
- flags = tun_info->key.tun_flags &
- (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
+ __set_bit(IP_TUNNEL_CSUM_BIT, flags);
+ __set_bit(IP_TUNNEL_KEY_BIT, flags);
+ __set_bit(IP_TUNNEL_SEQ_BIT, flags);
+ ip_tunnel_flags_and(flags, tun_info->key.tun_flags, flags);
+
gre_build_header(skb, tunnel_hlen, flags, proto,
tunnel_id_to_key32(tun_info->key.tun_id),
- (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
+ test_bit(IP_TUNNEL_SEQ_BIT, flags) ?
+ htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
@@ -516,6 +536,7 @@ err_free_skb:
static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
+ IP_TUNNEL_DECLARE_FLAGS(flags) = { };
struct ip_tunnel_info *tun_info;
const struct ip_tunnel_key *key;
struct erspan_metadata *md;
@@ -531,7 +552,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
goto err_free_skb;
key = &tun_info->key;
- if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
+ if (!test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, tun_info->key.tun_flags))
goto err_free_skb;
if (tun_info->options_len < sizeof(*md))
goto err_free_skb;
@@ -584,8 +605,9 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
goto err_free_skb;
}
- gre_build_header(skb, 8, TUNNEL_SEQ,
- proto, 0, htonl(atomic_fetch_inc(&tunnel->o_seqno)));
+ __set_bit(IP_TUNNEL_SEQ_BIT, flags);
+ gre_build_header(skb, 8, flags, proto, 0,
+ htonl(atomic_fetch_inc(&tunnel->o_seqno)));
ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
@@ -640,11 +662,11 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
if (skb_cow_head(skb, 0))
goto free_skb;
- tnl_params = (const struct iphdr *)skb->data;
-
- if (!pskb_network_may_pull(skb, pull_len))
+ if (!pskb_may_pull(skb, pull_len))
goto free_skb;
+ tnl_params = (const struct iphdr *)skb->data;
+
/* ip_tunnel_xmit() needs skb->data pointing to gre header. */
skb_pull(skb, pull_len);
skb_reset_mac_header(skb);
@@ -659,7 +681,8 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
tnl_params = &tunnel->parms.iph;
}
- if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
+ if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
+ tunnel->parms.o_flags)))
goto free_skb;
__gre_xmit(skb, dev, tnl_params, skb->protocol);
@@ -701,7 +724,7 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
/* Push ERSPAN header */
if (tunnel->erspan_ver == 0) {
proto = htons(ETH_P_ERSPAN);
- tunnel->parms.o_flags &= ~TUNNEL_SEQ;
+ __clear_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags);
} else if (tunnel->erspan_ver == 1) {
erspan_build_header(skb, ntohl(tunnel->parms.o_key),
tunnel->index,
@@ -716,7 +739,7 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
goto free_skb;
}
- tunnel->parms.o_flags &= ~TUNNEL_KEY;
+ __clear_bit(IP_TUNNEL_KEY_BIT, tunnel->parms.o_flags);
__gre_xmit(skb, dev, &tunnel->parms.iph, proto);
return NETDEV_TX_OK;
@@ -739,7 +762,8 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
+ if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
+ tunnel->parms.o_flags)))
goto free_skb;
if (skb_cow_head(skb, dev->needed_headroom))
@@ -757,7 +781,6 @@ free_skb:
static void ipgre_link_update(struct net_device *dev, bool set_mtu)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- __be16 flags;
int len;
len = tunnel->tun_hlen;
@@ -771,12 +794,11 @@ static void ipgre_link_update(struct net_device *dev, bool set_mtu)
dev->needed_headroom += len;
if (set_mtu)
- dev->mtu = max_t(int, dev->mtu - len, 68);
-
- flags = tunnel->parms.o_flags;
+ WRITE_ONCE(dev->mtu, max_t(int, dev->mtu - len, 68));
- if (flags & TUNNEL_SEQ ||
- (flags & TUNNEL_CSUM && tunnel->encap.type != TUNNEL_ENCAP_NONE)) {
+ if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags) ||
+ (test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.o_flags) &&
+ tunnel->encap.type != TUNNEL_ENCAP_NONE)) {
dev->features &= ~NETIF_F_GSO_SOFTWARE;
dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
} else {
@@ -785,20 +807,29 @@ static void ipgre_link_update(struct net_device *dev, bool set_mtu)
}
}
-static int ipgre_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p,
+static int ipgre_tunnel_ctl(struct net_device *dev,
+ struct ip_tunnel_parm_kern *p,
int cmd)
{
+ __be16 i_flags, o_flags;
int err;
+ if (!ip_tunnel_flags_is_be16_compat(p->i_flags) ||
+ !ip_tunnel_flags_is_be16_compat(p->o_flags))
+ return -EOVERFLOW;
+
+ i_flags = ip_tunnel_flags_to_be16(p->i_flags);
+ o_flags = ip_tunnel_flags_to_be16(p->o_flags);
+
if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
if (p->iph.version != 4 || p->iph.protocol != IPPROTO_GRE ||
p->iph.ihl != 5 || (p->iph.frag_off & htons(~IP_DF)) ||
- ((p->i_flags | p->o_flags) & (GRE_VERSION | GRE_ROUTING)))
+ ((i_flags | o_flags) & (GRE_VERSION | GRE_ROUTING)))
return -EINVAL;
}
- p->i_flags = gre_flags_to_tnl_flags(p->i_flags);
- p->o_flags = gre_flags_to_tnl_flags(p->o_flags);
+ gre_flags_to_tnl_flags(p->i_flags, i_flags);
+ gre_flags_to_tnl_flags(p->o_flags, o_flags);
err = ip_tunnel_ctl(dev, p, cmd);
if (err)
@@ -807,15 +838,18 @@ static int ipgre_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p,
if (cmd == SIOCCHGTUNNEL) {
struct ip_tunnel *t = netdev_priv(dev);
- t->parms.i_flags = p->i_flags;
- t->parms.o_flags = p->o_flags;
+ ip_tunnel_flags_copy(t->parms.i_flags, p->i_flags);
+ ip_tunnel_flags_copy(t->parms.o_flags, p->o_flags);
if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
ipgre_link_update(dev, true);
}
- p->i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
- p->o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
+ i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
+ ip_tunnel_flags_from_be16(p->i_flags, i_flags);
+ o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
+ ip_tunnel_flags_from_be16(p->o_flags, o_flags);
+
return 0;
}
@@ -890,15 +924,18 @@ static int ipgre_open(struct net_device *dev)
struct ip_tunnel *t = netdev_priv(dev);
if (ipv4_is_multicast(t->parms.iph.daddr)) {
- struct flowi4 fl4;
+ struct flowi4 fl4 = {
+ .flowi4_oif = t->parms.link,
+ .flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(&t->parms.iph)),
+ .flowi4_scope = RT_SCOPE_UNIVERSE,
+ .flowi4_proto = IPPROTO_GRE,
+ .saddr = t->parms.iph.saddr,
+ .daddr = t->parms.iph.daddr,
+ .fl4_gre_key = t->parms.o_key,
+ };
struct rtable *rt;
- rt = ip_route_output_gre(t->net, &fl4,
- t->parms.iph.daddr,
- t->parms.iph.saddr,
- t->parms.o_key,
- RT_TOS(t->parms.iph.tos),
- t->parms.link);
+ rt = ip_route_output_key(t->net, &fl4);
if (IS_ERR(rt))
return -EADDRNOTAVAIL;
dev = rt->dst.dev;
@@ -955,7 +992,6 @@ static void ipgre_tunnel_setup(struct net_device *dev)
static void __gre_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel;
- __be16 flags;
tunnel = netdev_priv(dev);
tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
@@ -964,21 +1000,22 @@ static void __gre_tunnel_init(struct net_device *dev)
tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
dev->needed_headroom = tunnel->hlen + sizeof(tunnel->parms.iph);
- dev->features |= GRE_FEATURES | NETIF_F_LLTX;
+ dev->features |= GRE_FEATURES;
dev->hw_features |= GRE_FEATURES;
- flags = tunnel->parms.o_flags;
-
/* TCP offload with GRE SEQ is not supported, nor can we support 2
* levels of outer headers requiring an update.
*/
- if (flags & TUNNEL_SEQ)
+ if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags))
return;
- if (flags & TUNNEL_CSUM && tunnel->encap.type != TUNNEL_ENCAP_NONE)
+ if (test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.o_flags) &&
+ tunnel->encap.type != TUNNEL_ENCAP_NONE)
return;
dev->features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+
+ dev->lltx = true;
}
static int ipgre_tunnel_init(struct net_device *dev)
@@ -1025,14 +1062,16 @@ static int __net_init ipgre_init_net(struct net *net)
return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
}
-static void __net_exit ipgre_exit_batch_net(struct list_head *list_net)
+static void __net_exit ipgre_exit_batch_rtnl(struct list_head *list_net,
+ struct list_head *dev_to_kill)
{
- ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops);
+ ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops,
+ dev_to_kill);
}
static struct pernet_operations ipgre_net_ops = {
.init = ipgre_init_net,
- .exit_batch = ipgre_exit_batch_net,
+ .exit_batch_rtnl = ipgre_exit_batch_rtnl,
.id = &ipgre_net_id,
.size = sizeof(struct ip_tunnel_net),
};
@@ -1129,7 +1168,7 @@ static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
static int ipgre_netlink_parms(struct net_device *dev,
struct nlattr *data[],
struct nlattr *tb[],
- struct ip_tunnel_parm *parms,
+ struct ip_tunnel_parm_kern *parms,
__u32 *fwmark)
{
struct ip_tunnel *t = netdev_priv(dev);
@@ -1145,10 +1184,12 @@ static int ipgre_netlink_parms(struct net_device *dev,
parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
if (data[IFLA_GRE_IFLAGS])
- parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
+ gre_flags_to_tnl_flags(parms->i_flags,
+ nla_get_be16(data[IFLA_GRE_IFLAGS]));
if (data[IFLA_GRE_OFLAGS])
- parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
+ gre_flags_to_tnl_flags(parms->o_flags,
+ nla_get_be16(data[IFLA_GRE_OFLAGS]));
if (data[IFLA_GRE_IKEY])
parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
@@ -1196,7 +1237,7 @@ static int ipgre_netlink_parms(struct net_device *dev,
static int erspan_netlink_parms(struct net_device *dev,
struct nlattr *data[],
struct nlattr *tb[],
- struct ip_tunnel_parm *parms,
+ struct ip_tunnel_parm_kern *parms,
__u32 *fwmark)
{
struct ip_tunnel *t = netdev_priv(dev);
@@ -1355,7 +1396,7 @@ static int ipgre_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
- struct ip_tunnel_parm p;
+ struct ip_tunnel_parm_kern p;
__u32 fwmark = 0;
int err;
@@ -1373,7 +1414,7 @@ static int erspan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
- struct ip_tunnel_parm p;
+ struct ip_tunnel_parm_kern p;
__u32 fwmark = 0;
int err;
@@ -1392,8 +1433,8 @@ static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct ip_tunnel *t = netdev_priv(dev);
+ struct ip_tunnel_parm_kern p;
__u32 fwmark = t->fwmark;
- struct ip_tunnel_parm p;
int err;
err = ipgre_newlink_encap_setup(dev, data);
@@ -1408,8 +1449,8 @@ static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
if (err < 0)
return err;
- t->parms.i_flags = p.i_flags;
- t->parms.o_flags = p.o_flags;
+ ip_tunnel_flags_copy(t->parms.i_flags, p.i_flags);
+ ip_tunnel_flags_copy(t->parms.o_flags, p.o_flags);
ipgre_link_update(dev, !tb[IFLA_MTU]);
@@ -1421,8 +1462,8 @@ static int erspan_changelink(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct ip_tunnel *t = netdev_priv(dev);
+ struct ip_tunnel_parm_kern p;
__u32 fwmark = t->fwmark;
- struct ip_tunnel_parm p;
int err;
err = ipgre_newlink_encap_setup(dev, data);
@@ -1437,8 +1478,8 @@ static int erspan_changelink(struct net_device *dev, struct nlattr *tb[],
if (err < 0)
return err;
- t->parms.i_flags = p.i_flags;
- t->parms.o_flags = p.o_flags;
+ ip_tunnel_flags_copy(t->parms.i_flags, p.i_flags);
+ ip_tunnel_flags_copy(t->parms.o_flags, p.o_flags);
return 0;
}
@@ -1494,8 +1535,10 @@ static size_t ipgre_get_size(const struct net_device *dev)
static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct ip_tunnel *t = netdev_priv(dev);
- struct ip_tunnel_parm *p = &t->parms;
- __be16 o_flags = p->o_flags;
+ struct ip_tunnel_parm_kern *p = &t->parms;
+ IP_TUNNEL_DECLARE_FLAGS(o_flags);
+
+ ip_tunnel_flags_copy(o_flags, p->o_flags);
if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
nla_put_be16(skb, IFLA_GRE_IFLAGS,
@@ -1543,7 +1586,7 @@ static int erspan_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (t->erspan_ver <= 2) {
if (t->erspan_ver != 0 && !t->collect_md)
- t->parms.o_flags |= TUNNEL_KEY;
+ __set_bit(IP_TUNNEL_KEY_BIT, t->parms.o_flags);
if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
goto nla_put_failure;
@@ -1697,14 +1740,16 @@ static int __net_init ipgre_tap_init_net(struct net *net)
return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
}
-static void __net_exit ipgre_tap_exit_batch_net(struct list_head *list_net)
+static void __net_exit ipgre_tap_exit_batch_rtnl(struct list_head *list_net,
+ struct list_head *dev_to_kill)
{
- ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops);
+ ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops,
+ dev_to_kill);
}
static struct pernet_operations ipgre_tap_net_ops = {
.init = ipgre_tap_init_net,
- .exit_batch = ipgre_tap_exit_batch_net,
+ .exit_batch_rtnl = ipgre_tap_exit_batch_rtnl,
.id = &gre_tap_net_id,
.size = sizeof(struct ip_tunnel_net),
};
@@ -1715,14 +1760,16 @@ static int __net_init erspan_init_net(struct net *net)
&erspan_link_ops, "erspan0");
}
-static void __net_exit erspan_exit_batch_net(struct list_head *net_list)
+static void __net_exit erspan_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
- ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops);
+ ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops,
+ dev_to_kill);
}
static struct pernet_operations erspan_net_ops = {
.init = erspan_init_net,
- .exit_batch = erspan_exit_batch_net,
+ .exit_batch_rtnl = erspan_exit_batch_rtnl,
.id = &erspan_net_id,
.size = sizeof(struct ip_tunnel_net),
};
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 5e9c8156656a..30a5e9460d00 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -314,7 +314,7 @@ static bool ip_can_use_hint(const struct sk_buff *skb, const struct iphdr *iph,
int tcp_v4_early_demux(struct sk_buff *skb);
int udp_v4_early_demux(struct sk_buff *skb);
-static int ip_rcv_finish_core(struct net *net, struct sock *sk,
+static int ip_rcv_finish_core(struct net *net,
struct sk_buff *skb, struct net_device *dev,
const struct sk_buff *hint)
{
@@ -322,15 +322,14 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk,
int err, drop_reason;
struct rtable *rt;
- drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
-
if (ip_can_use_hint(skb, iph, hint)) {
- err = ip_route_use_hint(skb, iph->daddr, iph->saddr, iph->tos,
- dev, hint);
- if (unlikely(err))
+ drop_reason = ip_route_use_hint(skb, iph->daddr, iph->saddr,
+ ip4h_dscp(iph), dev, hint);
+ if (unlikely(drop_reason))
goto drop_error;
}
+ drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
if (READ_ONCE(net->ipv4.sysctl_ip_early_demux) &&
!skb_dst(skb) &&
!skb->sk &&
@@ -362,10 +361,11 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk,
* how the packet travels inside Linux networking.
*/
if (!skb_valid_dst(skb)) {
- err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
- iph->tos, dev);
- if (unlikely(err))
+ drop_reason = ip_route_input_noref(skb, iph->daddr, iph->saddr,
+ ip4h_dscp(iph), dev);
+ if (unlikely(drop_reason))
goto drop_error;
+ drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
} else {
struct in_device *in_dev = __in_dev_get_rcu(dev);
@@ -425,10 +425,8 @@ drop:
return NET_RX_DROP;
drop_error:
- if (err == -EXDEV) {
- drop_reason = SKB_DROP_REASON_IP_RPFILTER;
+ if (drop_reason == SKB_DROP_REASON_IP_RPFILTER)
__NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
- }
goto drop;
}
@@ -444,7 +442,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
if (!skb)
return NET_RX_SUCCESS;
- ret = ip_rcv_finish_core(net, sk, skb, dev, NULL);
+ ret = ip_rcv_finish_core(net, skb, dev, NULL);
if (ret != NET_RX_DROP)
ret = dst_input(skb);
return ret;
@@ -591,14 +589,12 @@ static struct sk_buff *ip_extract_route_hint(const struct net *net,
return skb;
}
-static void ip_list_rcv_finish(struct net *net, struct sock *sk,
- struct list_head *head)
+static void ip_list_rcv_finish(struct net *net, struct list_head *head)
{
struct sk_buff *skb, *next, *hint = NULL;
struct dst_entry *curr_dst = NULL;
- struct list_head sublist;
+ LIST_HEAD(sublist);
- INIT_LIST_HEAD(&sublist);
list_for_each_entry_safe(skb, next, head, list) {
struct net_device *dev = skb->dev;
struct dst_entry *dst;
@@ -610,13 +606,13 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk,
skb = l3mdev_ip_rcv(skb);
if (!skb)
continue;
- if (ip_rcv_finish_core(net, sk, skb, dev, hint) == NET_RX_DROP)
+ if (ip_rcv_finish_core(net, skb, dev, hint) == NET_RX_DROP)
continue;
dst = skb_dst(skb);
if (curr_dst != dst) {
hint = ip_extract_route_hint(net, skb,
- ((struct rtable *)dst)->rt_type);
+ dst_rtable(dst)->rt_type);
/* dispatch old sublist */
if (!list_empty(&sublist))
@@ -636,7 +632,7 @@ static void ip_sublist_rcv(struct list_head *head, struct net_device *dev,
{
NF_HOOK_LIST(NFPROTO_IPV4, NF_INET_PRE_ROUTING, net, NULL,
head, dev, NULL, ip_rcv_finish);
- ip_list_rcv_finish(net, NULL, head);
+ ip_list_rcv_finish(net, head);
}
/* Receive a list of IP packets */
@@ -646,9 +642,8 @@ void ip_list_rcv(struct list_head *head, struct packet_type *pt,
struct net_device *curr_dev = NULL;
struct net *curr_net = NULL;
struct sk_buff *skb, *next;
- struct list_head sublist;
+ LIST_HEAD(sublist);
- INIT_LIST_HEAD(&sublist);
list_for_each_entry_safe(skb, next, head, list) {
struct net_device *dev = skb->dev;
struct net *net = dev_net(dev);
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index a9e22a098872..e3321932bec0 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -17,7 +17,7 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/uaccess.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/icmp.h>
@@ -617,7 +617,8 @@ int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev)
orefdst = skb->_skb_refdst;
skb_dst_set(skb, NULL);
- err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, dev);
+ err = ip_route_input(skb, nexthop, iph->saddr, ip4h_dscp(iph),
+ dev) ? -EINVAL : 0;
rt2 = skb_rtable(skb);
if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) {
skb_dst_drop(skb);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 67d846622365..ea7a260bec8a 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -77,6 +77,7 @@
#include <net/inetpeer.h>
#include <net/inet_ecn.h>
#include <net/lwtunnel.h>
+#include <net/inet_dscp.h>
#include <linux/bpf-cgroup.h>
#include <linux/igmp.h>
#include <linux/netfilter_ipv4.h>
@@ -198,7 +199,7 @@ EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
- struct rtable *rt = (struct rtable *)dst;
+ struct rtable *rt = dst_rtable(dst);
struct net_device *dev = dst->dev;
unsigned int hh_len = LL_RESERVED_SPACE(dev);
struct neighbour *neigh;
@@ -475,26 +476,18 @@ int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
goto packet_routed;
/* Make sure we can route this packet. */
- rt = (struct rtable *)__sk_dst_check(sk, 0);
+ rt = dst_rtable(__sk_dst_check(sk, 0));
if (!rt) {
- __be32 daddr;
+ inet_sk_init_flowi4(inet, fl4);
- /* Use correct destination address if we have options. */
- daddr = inet->inet_daddr;
- if (inet_opt && inet_opt->opt.srr)
- daddr = inet_opt->opt.faddr;
+ /* sctp_v4_xmit() uses its own DSCP value */
+ fl4->flowi4_tos = tos & INET_DSCP_MASK;
/* If this fails, retransmit mechanism of transport layer will
* keep trying until route appears or the connection times
* itself out.
*/
- rt = ip_route_output_ports(net, fl4, sk,
- daddr, inet->inet_saddr,
- inet->inet_dport,
- inet->inet_sport,
- sk->sk_protocol,
- RT_CONN_FLAGS_TOS(sk, tos),
- sk->sk_bound_dev_if);
+ rt = ip_route_output_flow(net, fl4, sk);
if (IS_ERR(rt))
goto no_route;
sk_setup_caps(sk, &rt->dst);
@@ -764,7 +757,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
{
struct iphdr *iph;
struct sk_buff *skb2;
- bool mono_delivery_time = skb->mono_delivery_time;
+ u8 tstamp_type = skb->tstamp_type;
struct rtable *rt = skb_rtable(skb);
unsigned int mtu, hlen, ll_rs;
struct ip_fraglist_iter iter;
@@ -856,7 +849,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
}
}
- skb_set_delivery_time(skb, tstamp, mono_delivery_time);
+ skb_set_delivery_time(skb, tstamp, tstamp_type);
err = output(net, sk, skb);
if (!err)
@@ -912,7 +905,7 @@ slow_path:
/*
* Put this fragment into the sending queue.
*/
- skb_set_delivery_time(skb2, tstamp, mono_delivery_time);
+ skb_set_delivery_time(skb2, tstamp, tstamp_type);
err = output(net, sk, skb2);
if (err)
goto fail;
@@ -971,8 +964,8 @@ static int __ip_append_data(struct sock *sk,
bool zc = false;
unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
int csummode = CHECKSUM_NONE;
- struct rtable *rt = (struct rtable *)cork->dst;
- bool paged, hold_tskey, extra_uref = false;
+ struct rtable *rt = dst_rtable(cork->dst);
+ bool paged, hold_tskey = false, extra_uref = false;
unsigned int wmem_alloc_delta = 0;
u32 tskey = 0;
@@ -1048,10 +1041,15 @@ static int __ip_append_data(struct sock *sk,
cork->length += length;
- hold_tskey = cork->tx_flags & SKBTX_ANY_TSTAMP &&
- READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID;
- if (hold_tskey)
- tskey = atomic_inc_return(&sk->sk_tskey) - 1;
+ if (cork->tx_flags & SKBTX_ANY_TSTAMP &&
+ READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) {
+ if (cork->flags & IPCORK_TS_OPT_ID) {
+ tskey = cork->ts_opt_id;
+ } else {
+ tskey = atomic_inc_return(&sk->sk_tskey) - 1;
+ hold_tskey = true;
+ }
+ }
/* So, what's going on in the loop below?
*
@@ -1163,7 +1161,10 @@ alloc_new_skb:
/* [!] NOTE: copy will be negative if pagedlen>0
* because then the equation reduces to -fraggap.
*/
- if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
+ if (copy > 0 &&
+ INDIRECT_CALL_1(getfrag, ip_generic_getfrag,
+ from, data + transhdrlen, offset,
+ copy, fraggap, skb) < 0) {
err = -EFAULT;
kfree_skb(skb);
goto error;
@@ -1207,8 +1208,9 @@ alloc_new_skb:
unsigned int off;
off = skb->len;
- if (getfrag(from, skb_put(skb, copy),
- offset, copy, off, skb) < 0) {
+ if (INDIRECT_CALL_1(getfrag, ip_generic_getfrag,
+ from, skb_put(skb, copy),
+ offset, copy, off, skb) < 0) {
__skb_trim(skb, off);
err = -EFAULT;
goto error;
@@ -1246,7 +1248,8 @@ alloc_new_skb:
get_page(pfrag->page);
}
copy = min_t(int, copy, pfrag->size - pfrag->offset);
- if (getfrag(from,
+ if (INDIRECT_CALL_1(getfrag, ip_generic_getfrag,
+ from,
page_address(pfrag->page) + pfrag->offset,
offset, copy, skb->len, skb) < 0)
goto error_efault;
@@ -1322,10 +1325,14 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
cork->ttl = ipc->ttl;
cork->tos = ipc->tos;
cork->mark = ipc->sockc.mark;
- cork->priority = ipc->priority;
+ cork->priority = ipc->sockc.priority;
cork->transmit_time = ipc->sockc.transmit_time;
cork->tx_flags = 0;
- sock_tx_timestamp(sk, ipc->sockc.tsflags, &cork->tx_flags);
+ sock_tx_timestamp(sk, &ipc->sockc, &cork->tx_flags);
+ if (ipc->sockc.tsflags & SOCKCM_FLAG_TS_OPT_ID) {
+ cork->flags |= IPCORK_TS_OPT_ID;
+ cork->ts_opt_id = ipc->sockc.ts_opt_id;
+ }
return 0;
}
@@ -1390,7 +1397,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
struct inet_sock *inet = inet_sk(sk);
struct net *net = sock_net(sk);
struct ip_options *opt = NULL;
- struct rtable *rt = (struct rtable *)cork->dst;
+ struct rtable *rt = dst_rtable(cork->dst);
struct iphdr *iph;
u8 pmtudisc, ttl;
__be16 df = 0;
@@ -1455,9 +1462,12 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
ip_options_build(skb, opt, cork->addr, rt);
}
- skb->priority = (cork->tos != -1) ? cork->priority: READ_ONCE(sk->sk_priority);
+ skb->priority = cork->priority;
skb->mark = cork->mark;
- skb->tstamp = cork->transmit_time;
+ if (sk_is_tcp(sk))
+ skb_set_delivery_time(skb, cork->transmit_time, SKB_CLOCK_MONOTONIC);
+ else
+ skb_set_delivery_type_by_clockid(skb, cork->transmit_time, sk->sk_clockid);
/*
* Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
* on dst refcount
@@ -1473,7 +1483,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
* by icmp_hdr(skb)->type.
*/
if (sk->sk_type == SOCK_RAW &&
- !inet_test_bit(HDRINCL, sk))
+ !(fl4->flowi4_flags & FLOWI_FLAG_KNOWN_NH))
icmp_type = fl4->fl4_icmp_type;
else
icmp_type = icmp_hdr(skb)->type;
@@ -1583,7 +1593,8 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset,
* Generic function to send a packet as reply to another packet.
* Used to send some TCP resets/acks so far.
*/
-void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
+void ip_send_unicast_reply(struct sock *sk, const struct sock *orig_sk,
+ struct sk_buff *skb,
const struct ip_options *sopt,
__be32 daddr, __be32 saddr,
const struct ip_reply_arg *arg,
@@ -1618,7 +1629,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
flowi4_init_output(&fl4, oif,
IP4_REPLY_MARK(net, skb->mark) ?: sk->sk_mark,
- RT_TOS(arg->tos),
+ arg->tos & INET_DSCP_MASK,
RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
ip_reply_arg_flowi_flags(arg),
daddr, saddr,
@@ -1649,7 +1660,10 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
arg->csumoffset) = csum_fold(csum_add(nskb->csum,
arg->csum));
nskb->ip_summed = CHECKSUM_NONE;
- nskb->mono_delivery_time = !!transmit_time;
+ if (orig_sk)
+ skb_set_owner_edemux(nskb, (struct sock *)orig_sk);
+ if (transmit_time)
+ nskb->tstamp_type = SKB_CLOCK_MONOTONIC;
if (txhash)
skb_set_hash(nskb, txhash, PKT_HASH_TYPE_L4);
ip_push_pending_frames(sk, &fl4);
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 21d2ffa919e9..6d9c5c20b1c4 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -128,20 +128,20 @@ static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
{
- char *secdata;
- u32 seclen, secid;
+ struct lsm_context ctx;
+ u32 secid;
int err;
err = security_socket_getpeersec_dgram(NULL, skb, &secid);
if (err)
return;
- err = security_secid_to_secctx(secid, &secdata, &seclen);
- if (err)
+ err = security_secid_to_secctx(secid, &ctx);
+ if (err < 0)
return;
- put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata);
- security_release_secctx(secdata, seclen);
+ put_cmsg(msg, SOL_IP, SCM_SECURITY, ctx.len, ctx.context);
+ security_release_secctx(&ctx);
}
static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
@@ -315,7 +315,7 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
if (val < 0 || val > 255)
return -EINVAL;
ipc->tos = val;
- ipc->priority = rt_tos2priority(ipc->tos);
+ ipc->sockc.priority = rt_tos2priority(ipc->tos);
break;
case IP_PROTOCOL:
if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
@@ -894,7 +894,7 @@ int do_ip_setsockopt(struct sock *sk, int level, int optname,
{
struct inet_sock *inet = inet_sk(sk);
struct net *net = sock_net(sk);
- int val = 0, err;
+ int val = 0, err, retv;
bool needs_rtnl = setsockopt_needs_rtnl(optname);
switch (optname) {
@@ -938,8 +938,12 @@ int do_ip_setsockopt(struct sock *sk, int level, int optname,
/* If optlen==0, it is equivalent to val == 0 */
- if (optname == IP_ROUTER_ALERT)
- return ip_ra_control(sk, val ? 1 : 0, NULL);
+ if (optname == IP_ROUTER_ALERT) {
+ retv = ip_ra_control(sk, val ? 1 : 0, NULL);
+ if (retv == 0)
+ inet_assign_bit(RTALERT, sk, val);
+ return retv;
+ }
if (ip_mroute_opt(optname))
return ip_mroute_setsockopt(sk, optname, optval, optlen);
@@ -1575,6 +1579,9 @@ int do_ip_getsockopt(struct sock *sk, int level, int optname,
case IP_BIND_ADDRESS_NO_PORT:
val = inet_test_bit(BIND_ADDRESS_NO_PORT, sk);
goto copyval;
+ case IP_ROUTER_ALERT:
+ val = inet_test_bit(RTALERT, sk);
+ goto copyval;
case IP_TTL:
val = READ_ONCE(inet->uc_ttl);
if (val < 0)
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 1b6981de3f29..09b73acf037a 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -43,6 +43,7 @@
#include <net/rtnetlink.h>
#include <net/udp.h>
#include <net/dst_metadata.h>
+#include <net/inet_dscp.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
@@ -56,17 +57,13 @@ static unsigned int ip_tunnel_hash(__be32 key, __be32 remote)
IP_TNL_HASH_BITS);
}
-static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p,
- __be16 flags, __be32 key)
+static bool ip_tunnel_key_match(const struct ip_tunnel_parm_kern *p,
+ const unsigned long *flags, __be32 key)
{
- if (p->i_flags & TUNNEL_KEY) {
- if (flags & TUNNEL_KEY)
- return key == p->i_key;
- else
- /* key expected, none present */
- return false;
- } else
- return !(flags & TUNNEL_KEY);
+ if (!test_bit(IP_TUNNEL_KEY_BIT, flags))
+ return !test_bit(IP_TUNNEL_KEY_BIT, p->i_flags);
+
+ return test_bit(IP_TUNNEL_KEY_BIT, p->i_flags) && p->i_key == key;
}
/* Fallback tunnel: no source, no destination, no key, no options
@@ -81,7 +78,7 @@ static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p,
Given src, dst and key, find appropriate for input tunnel.
*/
struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
- int link, __be16 flags,
+ int link, const unsigned long *flags,
__be32 remote, __be32 local,
__be32 key)
{
@@ -102,10 +99,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
if (!ip_tunnel_key_match(&t->parms, flags, key))
continue;
- if (t->parms.link == link)
+ if (READ_ONCE(t->parms.link) == link)
return t;
- else
- cand = t;
+ cand = t;
}
hlist_for_each_entry_rcu(t, head, hash_node) {
@@ -117,9 +113,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
if (!ip_tunnel_key_match(&t->parms, flags, key))
continue;
- if (t->parms.link == link)
+ if (READ_ONCE(t->parms.link) == link)
return t;
- else if (!cand)
+ if (!cand)
cand = t;
}
@@ -137,22 +133,23 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
if (!ip_tunnel_key_match(&t->parms, flags, key))
continue;
- if (t->parms.link == link)
+ if (READ_ONCE(t->parms.link) == link)
return t;
- else if (!cand)
+ if (!cand)
cand = t;
}
hlist_for_each_entry_rcu(t, head, hash_node) {
- if ((!(flags & TUNNEL_NO_KEY) && t->parms.i_key != key) ||
+ if ((!test_bit(IP_TUNNEL_NO_KEY_BIT, flags) &&
+ t->parms.i_key != key) ||
t->parms.iph.saddr != 0 ||
t->parms.iph.daddr != 0 ||
!(t->dev->flags & IFF_UP))
continue;
- if (t->parms.link == link)
+ if (READ_ONCE(t->parms.link) == link)
return t;
- else if (!cand)
+ if (!cand)
cand = t;
}
@@ -172,7 +169,7 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
EXPORT_SYMBOL_GPL(ip_tunnel_lookup);
static struct hlist_head *ip_bucket(struct ip_tunnel_net *itn,
- struct ip_tunnel_parm *parms)
+ struct ip_tunnel_parm_kern *parms)
{
unsigned int h;
__be32 remote;
@@ -183,7 +180,8 @@ static struct hlist_head *ip_bucket(struct ip_tunnel_net *itn,
else
remote = 0;
- if (!(parms->i_flags & TUNNEL_KEY) && (parms->i_flags & VTI_ISVTI))
+ if (!test_bit(IP_TUNNEL_KEY_BIT, parms->i_flags) &&
+ test_bit(IP_TUNNEL_VTI_BIT, parms->i_flags))
i_key = 0;
h = ip_tunnel_hash(i_key, remote);
@@ -207,21 +205,23 @@ static void ip_tunnel_del(struct ip_tunnel_net *itn, struct ip_tunnel *t)
}
static struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn,
- struct ip_tunnel_parm *parms,
+ struct ip_tunnel_parm_kern *parms,
int type)
{
__be32 remote = parms->iph.daddr;
__be32 local = parms->iph.saddr;
+ IP_TUNNEL_DECLARE_FLAGS(flags);
__be32 key = parms->i_key;
- __be16 flags = parms->i_flags;
int link = parms->link;
struct ip_tunnel *t = NULL;
struct hlist_head *head = ip_bucket(itn, parms);
- hlist_for_each_entry_rcu(t, head, hash_node) {
+ ip_tunnel_flags_copy(flags, parms->i_flags);
+
+ hlist_for_each_entry_rcu(t, head, hash_node, lockdep_rtnl_is_held()) {
if (local == t->parms.iph.saddr &&
remote == t->parms.iph.daddr &&
- link == t->parms.link &&
+ link == READ_ONCE(t->parms.link) &&
type == t->dev->type &&
ip_tunnel_key_match(&t->parms, flags, key))
break;
@@ -231,7 +231,7 @@ static struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn,
static struct net_device *__ip_tunnel_create(struct net *net,
const struct rtnl_link_ops *ops,
- struct ip_tunnel_parm *parms)
+ struct ip_tunnel_parm_kern *parms)
{
int err;
struct ip_tunnel *tunnel;
@@ -294,7 +294,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
ip_tunnel_init_flow(&fl4, iph->protocol, iph->daddr,
iph->saddr, tunnel->parms.o_key,
- RT_TOS(iph->tos), dev_net(dev),
+ iph->tos & INET_DSCP_MASK, tunnel->net,
tunnel->parms.link, tunnel->fwmark, 0, 0);
rt = ip_route_output_key(tunnel->net, &fl4);
@@ -327,7 +327,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
static struct ip_tunnel *ip_tunnel_create(struct net *net,
struct ip_tunnel_net *itn,
- struct ip_tunnel_parm *parms)
+ struct ip_tunnel_parm_kern *parms)
{
struct ip_tunnel *nt;
struct net_device *dev;
@@ -378,7 +378,7 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
bool log_ecn_error)
{
const struct iphdr *iph = ip_hdr(skb);
- int err;
+ int nh, err;
#ifdef CONFIG_NET_IPGRE_BROADCAST
if (ipv4_is_multicast(iph->daddr)) {
@@ -387,15 +387,15 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
}
#endif
- if ((!(tpi->flags&TUNNEL_CSUM) && (tunnel->parms.i_flags&TUNNEL_CSUM)) ||
- ((tpi->flags&TUNNEL_CSUM) && !(tunnel->parms.i_flags&TUNNEL_CSUM))) {
+ if (test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.i_flags) !=
+ test_bit(IP_TUNNEL_CSUM_BIT, tpi->flags)) {
DEV_STATS_INC(tunnel->dev, rx_crc_errors);
DEV_STATS_INC(tunnel->dev, rx_errors);
goto drop;
}
- if (tunnel->parms.i_flags&TUNNEL_SEQ) {
- if (!(tpi->flags&TUNNEL_SEQ) ||
+ if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.i_flags)) {
+ if (!test_bit(IP_TUNNEL_SEQ_BIT, tpi->flags) ||
(tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
DEV_STATS_INC(tunnel->dev, rx_fifo_errors);
DEV_STATS_INC(tunnel->dev, rx_errors);
@@ -404,8 +404,21 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
tunnel->i_seqno = ntohl(tpi->seq) + 1;
}
+ /* Save offset of outer header relative to skb->head,
+ * because we are going to reset the network header to the inner header
+ * and might change skb->head.
+ */
+ nh = skb_network_header(skb) - skb->head;
+
skb_set_network_header(skb, (tunnel->dev->type == ARPHRD_ETHER) ? ETH_HLEN : 0);
+ if (!pskb_inet_may_pull(skb)) {
+ DEV_STATS_INC(tunnel->dev, rx_length_errors);
+ DEV_STATS_INC(tunnel->dev, rx_errors);
+ goto drop;
+ }
+ iph = (struct iphdr *)(skb->head + nh);
+
err = IP_ECN_decapsulate(iph, skb);
if (unlikely(err)) {
if (log_ecn_error)
@@ -531,7 +544,7 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
struct rt6_info *rt6;
__be32 daddr;
- rt6 = skb_valid_dst(skb) ? (struct rt6_info *)skb_dst(skb) :
+ rt6 = skb_valid_dst(skb) ? dst_rt6_info(skb_dst(skb)) :
NULL;
daddr = md ? dst : tunnel->parms.iph.daddr;
@@ -597,9 +610,9 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
}
ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src,
- tunnel_id_to_key32(key->tun_id), RT_TOS(tos),
- dev_net(dev), 0, skb->mark, skb_get_hash(skb),
- key->flow_flags);
+ tunnel_id_to_key32(key->tun_id),
+ tos & INET_DSCP_MASK, tunnel->net, 0, skb->mark,
+ skb_get_hash(skb), key->flow_flags);
if (!tunnel_hlen)
tunnel_hlen = ip_encap_hlen(&tun_info->encap);
@@ -626,7 +639,7 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
goto tx_error;
}
- if (key->tun_flags & TUNNEL_DONT_FRAGMENT)
+ if (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, key->tun_flags))
df = htons(IP_DF);
if (tnl_update_pmtu(dev, skb, rt, df, inner_iph, tunnel_hlen,
key->u.ipv4.dst, true)) {
@@ -760,8 +773,8 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
}
ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
- tunnel->parms.o_key, RT_TOS(tos),
- dev_net(dev), tunnel->parms.link,
+ tunnel->parms.o_key, tos & INET_DSCP_MASK,
+ tunnel->net, READ_ONCE(tunnel->parms.link),
tunnel->fwmark, skb_get_hash(skb), 0);
if (ip_tunnel_encap(skb, &tunnel->encap, &protocol, &fl4) < 0)
@@ -859,7 +872,7 @@ EXPORT_SYMBOL_GPL(ip_tunnel_xmit);
static void ip_tunnel_update(struct ip_tunnel_net *itn,
struct ip_tunnel *t,
struct net_device *dev,
- struct ip_tunnel_parm *p,
+ struct ip_tunnel_parm_kern *p,
bool set_mtu,
__u32 fwmark)
{
@@ -881,17 +894,18 @@ static void ip_tunnel_update(struct ip_tunnel_net *itn,
if (t->parms.link != p->link || t->fwmark != fwmark) {
int mtu;
- t->parms.link = p->link;
+ WRITE_ONCE(t->parms.link, p->link);
t->fwmark = fwmark;
mtu = ip_tunnel_bind_dev(dev);
if (set_mtu)
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
}
dst_cache_reset(&t->dst_cache);
netdev_state_change(dev);
}
-int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
+int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm_kern *p,
+ int cmd)
{
int err = 0;
struct ip_tunnel *t = netdev_priv(dev);
@@ -915,10 +929,10 @@ int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
goto done;
if (p->iph.ttl)
p->iph.frag_off |= htons(IP_DF);
- if (!(p->i_flags & VTI_ISVTI)) {
- if (!(p->i_flags & TUNNEL_KEY))
+ if (!test_bit(IP_TUNNEL_VTI_BIT, p->i_flags)) {
+ if (!test_bit(IP_TUNNEL_KEY_BIT, p->i_flags))
p->i_key = 0;
- if (!(p->o_flags & TUNNEL_KEY))
+ if (!test_bit(IP_TUNNEL_KEY_BIT, p->o_flags))
p->o_key = 0;
}
@@ -993,16 +1007,58 @@ done:
}
EXPORT_SYMBOL_GPL(ip_tunnel_ctl);
+bool ip_tunnel_parm_from_user(struct ip_tunnel_parm_kern *kp,
+ const void __user *data)
+{
+ struct ip_tunnel_parm p;
+
+ if (copy_from_user(&p, data, sizeof(p)))
+ return false;
+
+ strscpy(kp->name, p.name);
+ kp->link = p.link;
+ ip_tunnel_flags_from_be16(kp->i_flags, p.i_flags);
+ ip_tunnel_flags_from_be16(kp->o_flags, p.o_flags);
+ kp->i_key = p.i_key;
+ kp->o_key = p.o_key;
+ memcpy(&kp->iph, &p.iph, min(sizeof(kp->iph), sizeof(p.iph)));
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(ip_tunnel_parm_from_user);
+
+bool ip_tunnel_parm_to_user(void __user *data, struct ip_tunnel_parm_kern *kp)
+{
+ struct ip_tunnel_parm p;
+
+ if (!ip_tunnel_flags_is_be16_compat(kp->i_flags) ||
+ !ip_tunnel_flags_is_be16_compat(kp->o_flags))
+ return false;
+
+ memset(&p, 0, sizeof(p));
+
+ strscpy(p.name, kp->name);
+ p.link = kp->link;
+ p.i_flags = ip_tunnel_flags_to_be16(kp->i_flags);
+ p.o_flags = ip_tunnel_flags_to_be16(kp->o_flags);
+ p.i_key = kp->i_key;
+ p.o_key = kp->o_key;
+ memcpy(&p.iph, &kp->iph, min(sizeof(p.iph), sizeof(kp->iph)));
+
+ return !copy_to_user(data, &p, sizeof(p));
+}
+EXPORT_SYMBOL_GPL(ip_tunnel_parm_to_user);
+
int ip_tunnel_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
void __user *data, int cmd)
{
- struct ip_tunnel_parm p;
+ struct ip_tunnel_parm_kern p;
int err;
- if (copy_from_user(&p, data, sizeof(p)))
+ if (!ip_tunnel_parm_from_user(&p, data))
return -EFAULT;
err = dev->netdev_ops->ndo_tunnel_ctl(dev, &p, cmd);
- if (!err && copy_to_user(data, &p, sizeof(p)))
+ if (!err && !ip_tunnel_parm_to_user(data, &p))
return -EFAULT;
return err;
}
@@ -1027,7 +1083,7 @@ int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
new_mtu = max_mtu;
}
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
EXPORT_SYMBOL_GPL(__ip_tunnel_change_mtu);
@@ -1044,7 +1100,6 @@ static void ip_tunnel_dev_free(struct net_device *dev)
gro_cells_destroy(&tunnel->gro_cells);
dst_cache_destroy(&tunnel->dst_cache);
- free_percpu(dev->tstats);
}
void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
@@ -1065,15 +1120,15 @@ struct net *ip_tunnel_get_link_net(const struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- return tunnel->net;
+ return READ_ONCE(tunnel->net);
}
EXPORT_SYMBOL(ip_tunnel_get_link_net);
int ip_tunnel_get_iflink(const struct net_device *dev)
{
- struct ip_tunnel *tunnel = netdev_priv(dev);
+ const struct ip_tunnel *tunnel = netdev_priv(dev);
- return tunnel->parms.link;
+ return READ_ONCE(tunnel->parms.link);
}
EXPORT_SYMBOL(ip_tunnel_get_iflink);
@@ -1081,7 +1136,7 @@ int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id,
struct rtnl_link_ops *ops, char *devname)
{
struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id);
- struct ip_tunnel_parm parms;
+ struct ip_tunnel_parm_kern parms;
unsigned int i;
itn->rtnl_link_ops = ops;
@@ -1107,7 +1162,7 @@ int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id,
* Allowing to move it to another netns is clearly unsafe.
*/
if (!IS_ERR(itn->fb_tunnel_dev)) {
- itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
+ itn->fb_tunnel_dev->netns_local = true;
itn->fb_tunnel_dev->mtu = ip_tunnel_bind_dev(itn->fb_tunnel_dev);
ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
itn->type = itn->fb_tunnel_dev->type;
@@ -1144,24 +1199,22 @@ static void ip_tunnel_destroy(struct net *net, struct ip_tunnel_net *itn,
}
void ip_tunnel_delete_nets(struct list_head *net_list, unsigned int id,
- struct rtnl_link_ops *ops)
+ struct rtnl_link_ops *ops,
+ struct list_head *dev_to_kill)
{
struct ip_tunnel_net *itn;
struct net *net;
- LIST_HEAD(list);
- rtnl_lock();
+ ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list) {
itn = net_generic(net, id);
- ip_tunnel_destroy(net, itn, &list, ops);
+ ip_tunnel_destroy(net, itn, dev_to_kill, ops);
}
- unregister_netdevice_many(&list);
- rtnl_unlock();
}
EXPORT_SYMBOL_GPL(ip_tunnel_delete_nets);
int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
- struct ip_tunnel_parm *p, __u32 fwmark)
+ struct ip_tunnel_parm_kern *p, __u32 fwmark)
{
struct ip_tunnel *nt;
struct net *net = dev_net(dev);
@@ -1215,7 +1268,7 @@ err_register_netdevice:
EXPORT_SYMBOL_GPL(ip_tunnel_newlink);
int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
- struct ip_tunnel_parm *p, __u32 fwmark)
+ struct ip_tunnel_parm_kern *p, __u32 fwmark)
{
struct ip_tunnel *t;
struct ip_tunnel *tunnel = netdev_priv(dev);
@@ -1260,31 +1313,27 @@ int ip_tunnel_init(struct net_device *dev)
dev->needs_free_netdev = true;
dev->priv_destructor = ip_tunnel_dev_free;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
- if (err) {
- free_percpu(dev->tstats);
+ if (err)
return err;
- }
err = gro_cells_init(&tunnel->gro_cells, dev);
if (err) {
dst_cache_destroy(&tunnel->dst_cache);
- free_percpu(dev->tstats);
return err;
}
tunnel->dev = dev;
tunnel->net = dev_net(dev);
- strcpy(tunnel->parms.name, dev->name);
+ strscpy(tunnel->parms.name, dev->name);
iph->version = 4;
iph->ihl = 5;
if (tunnel->collect_md)
netif_keep_dst(dev);
+ netdev_lockdep_set_classes(dev);
return 0;
}
EXPORT_SYMBOL_GPL(ip_tunnel_init);
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 80ccd6661aa3..a3676155be78 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -125,6 +125,7 @@ EXPORT_SYMBOL_GPL(__iptunnel_pull_header);
struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
gfp_t flags)
{
+ IP_TUNNEL_DECLARE_FLAGS(tun_flags) = { };
struct metadata_dst *res;
struct ip_tunnel_info *dst, *src;
@@ -144,10 +145,10 @@ struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
sizeof(struct in6_addr));
else
dst->key.u.ipv4.dst = src->key.u.ipv4.src;
- dst->key.tun_flags = src->key.tun_flags;
+ ip_tunnel_flags_copy(dst->key.tun_flags, src->key.tun_flags);
dst->mode = src->mode | IP_TUNNEL_INFO_TX;
ip_tunnel_info_opts_set(dst, ip_tunnel_info_opts(src),
- src->options_len, 0);
+ src->options_len, tun_flags);
return res;
}
@@ -497,7 +498,7 @@ static int ip_tun_parse_opts_geneve(struct nlattr *attr,
opt->opt_class = nla_get_be16(attr);
attr = tb[LWTUNNEL_IP_OPT_GENEVE_TYPE];
opt->type = nla_get_u8(attr);
- info->key.tun_flags |= TUNNEL_GENEVE_OPT;
+ __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, info->key.tun_flags);
}
return sizeof(struct geneve_opt) + data_len;
@@ -525,7 +526,7 @@ static int ip_tun_parse_opts_vxlan(struct nlattr *attr,
attr = tb[LWTUNNEL_IP_OPT_VXLAN_GBP];
md->gbp = nla_get_u32(attr);
md->gbp &= VXLAN_GBP_MASK;
- info->key.tun_flags |= TUNNEL_VXLAN_OPT;
+ __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, info->key.tun_flags);
}
return sizeof(struct vxlan_metadata);
@@ -574,7 +575,7 @@ static int ip_tun_parse_opts_erspan(struct nlattr *attr,
set_hwid(&md->u.md2, nla_get_u8(attr));
}
- info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
+ __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, info->key.tun_flags);
}
return sizeof(struct erspan_metadata);
@@ -585,7 +586,7 @@ static int ip_tun_parse_opts(struct nlattr *attr, struct ip_tunnel_info *info,
{
int err, rem, opt_len, opts_len = 0;
struct nlattr *nla;
- __be16 type = 0;
+ u32 type = 0;
if (!attr)
return 0;
@@ -598,7 +599,7 @@ static int ip_tun_parse_opts(struct nlattr *attr, struct ip_tunnel_info *info,
nla_for_each_attr(nla, nla_data(attr), nla_len(attr), rem) {
switch (nla_type(nla)) {
case LWTUNNEL_IP_OPTS_GENEVE:
- if (type && type != TUNNEL_GENEVE_OPT)
+ if (type && type != IP_TUNNEL_GENEVE_OPT_BIT)
return -EINVAL;
opt_len = ip_tun_parse_opts_geneve(nla, info, opts_len,
extack);
@@ -607,7 +608,7 @@ static int ip_tun_parse_opts(struct nlattr *attr, struct ip_tunnel_info *info,
opts_len += opt_len;
if (opts_len > IP_TUNNEL_OPTS_MAX)
return -EINVAL;
- type = TUNNEL_GENEVE_OPT;
+ type = IP_TUNNEL_GENEVE_OPT_BIT;
break;
case LWTUNNEL_IP_OPTS_VXLAN:
if (type)
@@ -617,7 +618,7 @@ static int ip_tun_parse_opts(struct nlattr *attr, struct ip_tunnel_info *info,
if (opt_len < 0)
return opt_len;
opts_len += opt_len;
- type = TUNNEL_VXLAN_OPT;
+ type = IP_TUNNEL_VXLAN_OPT_BIT;
break;
case LWTUNNEL_IP_OPTS_ERSPAN:
if (type)
@@ -627,7 +628,7 @@ static int ip_tun_parse_opts(struct nlattr *attr, struct ip_tunnel_info *info,
if (opt_len < 0)
return opt_len;
opts_len += opt_len;
- type = TUNNEL_ERSPAN_OPT;
+ type = IP_TUNNEL_ERSPAN_OPT_BIT;
break;
default:
return -EINVAL;
@@ -705,10 +706,16 @@ static int ip_tun_build_state(struct net *net, struct nlattr *attr,
if (tb[LWTUNNEL_IP_TOS])
tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]);
- if (tb[LWTUNNEL_IP_FLAGS])
- tun_info->key.tun_flags |=
- (nla_get_be16(tb[LWTUNNEL_IP_FLAGS]) &
- ~TUNNEL_OPTIONS_PRESENT);
+ if (tb[LWTUNNEL_IP_FLAGS]) {
+ IP_TUNNEL_DECLARE_FLAGS(flags);
+
+ ip_tunnel_flags_from_be16(flags,
+ nla_get_be16(tb[LWTUNNEL_IP_FLAGS]));
+ ip_tunnel_clear_options_present(flags);
+
+ ip_tunnel_flags_or(tun_info->key.tun_flags,
+ tun_info->key.tun_flags, flags);
+ }
tun_info->mode = IP_TUNNEL_INFO_TX;
tun_info->options_len = opt_len;
@@ -812,18 +819,18 @@ static int ip_tun_fill_encap_opts(struct sk_buff *skb, int type,
struct nlattr *nest;
int err = 0;
- if (!(tun_info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))
+ if (!ip_tunnel_is_options_present(tun_info->key.tun_flags))
return 0;
nest = nla_nest_start_noflag(skb, type);
if (!nest)
return -ENOMEM;
- if (tun_info->key.tun_flags & TUNNEL_GENEVE_OPT)
+ if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, tun_info->key.tun_flags))
err = ip_tun_fill_encap_opts_geneve(skb, tun_info);
- else if (tun_info->key.tun_flags & TUNNEL_VXLAN_OPT)
+ else if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, tun_info->key.tun_flags))
err = ip_tun_fill_encap_opts_vxlan(skb, tun_info);
- else if (tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT)
+ else if (test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, tun_info->key.tun_flags))
err = ip_tun_fill_encap_opts_erspan(skb, tun_info);
if (err) {
@@ -846,7 +853,8 @@ static int ip_tun_fill_encap_info(struct sk_buff *skb,
nla_put_in_addr(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) ||
nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) ||
nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) ||
- nla_put_be16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags) ||
+ nla_put_be16(skb, LWTUNNEL_IP_FLAGS,
+ ip_tunnel_flags_to_be16(tun_info->key.tun_flags)) ||
ip_tun_fill_encap_opts(skb, LWTUNNEL_IP_OPTS, tun_info))
return -ENOMEM;
@@ -857,11 +865,11 @@ static int ip_tun_opts_nlsize(struct ip_tunnel_info *info)
{
int opt_len;
- if (!(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))
+ if (!ip_tunnel_is_options_present(info->key.tun_flags))
return 0;
opt_len = nla_total_size(0); /* LWTUNNEL_IP_OPTS */
- if (info->key.tun_flags & TUNNEL_GENEVE_OPT) {
+ if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, info->key.tun_flags)) {
struct geneve_opt *opt;
int offset = 0;
@@ -874,10 +882,10 @@ static int ip_tun_opts_nlsize(struct ip_tunnel_info *info)
/* OPT_GENEVE_DATA */
offset += sizeof(*opt) + opt->length * 4;
}
- } else if (info->key.tun_flags & TUNNEL_VXLAN_OPT) {
+ } else if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, info->key.tun_flags)) {
opt_len += nla_total_size(0) /* LWTUNNEL_IP_OPTS_VXLAN */
+ nla_total_size(4); /* OPT_VXLAN_GBP */
- } else if (info->key.tun_flags & TUNNEL_ERSPAN_OPT) {
+ } else if (test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, info->key.tun_flags)) {
struct erspan_metadata *md = ip_tunnel_info_opts(info);
opt_len += nla_total_size(0) /* LWTUNNEL_IP_OPTS_ERSPAN */
@@ -984,10 +992,17 @@ static int ip6_tun_build_state(struct net *net, struct nlattr *attr,
if (tb[LWTUNNEL_IP6_TC])
tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]);
- if (tb[LWTUNNEL_IP6_FLAGS])
- tun_info->key.tun_flags |=
- (nla_get_be16(tb[LWTUNNEL_IP6_FLAGS]) &
- ~TUNNEL_OPTIONS_PRESENT);
+ if (tb[LWTUNNEL_IP6_FLAGS]) {
+ IP_TUNNEL_DECLARE_FLAGS(flags);
+ __be16 data;
+
+ data = nla_get_be16(tb[LWTUNNEL_IP6_FLAGS]);
+ ip_tunnel_flags_from_be16(flags, data);
+ ip_tunnel_clear_options_present(flags);
+
+ ip_tunnel_flags_or(tun_info->key.tun_flags,
+ tun_info->key.tun_flags, flags);
+ }
tun_info->mode = IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_IPV6;
tun_info->options_len = opt_len;
@@ -1008,7 +1023,8 @@ static int ip6_tun_fill_encap_info(struct sk_buff *skb,
nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) ||
nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.tos) ||
nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.ttl) ||
- nla_put_be16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags) ||
+ nla_put_be16(skb, LWTUNNEL_IP6_FLAGS,
+ ip_tunnel_flags_to_be16(tun_info->key.tun_flags)) ||
ip_tun_fill_encap_opts(skb, LWTUNNEL_IP6_OPTS, tun_info))
return -ENOMEM;
@@ -1116,7 +1132,7 @@ bool ip_tunnel_netlink_encap_parms(struct nlattr *data[],
EXPORT_SYMBOL_GPL(ip_tunnel_netlink_encap_parms);
void ip_tunnel_netlink_parms(struct nlattr *data[],
- struct ip_tunnel_parm *parms)
+ struct ip_tunnel_parm_kern *parms)
{
if (data[IFLA_IPTUN_LINK])
parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
@@ -1139,8 +1155,12 @@ void ip_tunnel_netlink_parms(struct nlattr *data[],
if (!data[IFLA_IPTUN_PMTUDISC] || nla_get_u8(data[IFLA_IPTUN_PMTUDISC]))
parms->iph.frag_off = htons(IP_DF);
- if (data[IFLA_IPTUN_FLAGS])
- parms->i_flags = nla_get_be16(data[IFLA_IPTUN_FLAGS]);
+ if (data[IFLA_IPTUN_FLAGS]) {
+ __be16 flags;
+
+ flags = nla_get_be16(data[IFLA_IPTUN_FLAGS]);
+ ip_tunnel_flags_from_be16(parms->i_flags, flags);
+ }
if (data[IFLA_IPTUN_PROTO])
parms->iph.protocol = nla_get_u8(data[IFLA_IPTUN_PROTO]);
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index d1d6bb28ed6e..f0b4419cef34 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -51,8 +51,11 @@ static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi,
const struct iphdr *iph = ip_hdr(skb);
struct net *net = dev_net(skb->dev);
struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
+ IP_TUNNEL_DECLARE_FLAGS(flags) = { };
- tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
+ __set_bit(IP_TUNNEL_NO_KEY_BIT, flags);
+
+ tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags,
iph->saddr, iph->daddr, 0);
if (tunnel) {
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
@@ -167,7 +170,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
struct flowi *fl)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- struct ip_tunnel_parm *parms = &tunnel->parms;
+ struct ip_tunnel_parm_kern *parms = &tunnel->parms;
struct dst_entry *dst = skb_dst(skb);
struct net_device *tdev; /* Device to other host */
int pkt_len = skb->len;
@@ -322,8 +325,11 @@ static int vti4_err(struct sk_buff *skb, u32 info)
const struct iphdr *iph = (const struct iphdr *)skb->data;
int protocol = iph->protocol;
struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
+ IP_TUNNEL_DECLARE_FLAGS(flags) = { };
+
+ __set_bit(IP_TUNNEL_NO_KEY_BIT, flags);
- tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
+ tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags,
iph->daddr, iph->saddr, 0);
if (!tunnel)
return -1;
@@ -373,8 +379,9 @@ static int vti4_err(struct sk_buff *skb, u32 info)
}
static int
-vti_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
+vti_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm_kern *p, int cmd)
{
+ IP_TUNNEL_DECLARE_FLAGS(flags) = { };
int err = 0;
if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
@@ -383,20 +390,26 @@ vti_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
return -EINVAL;
}
- if (!(p->i_flags & GRE_KEY))
+ if (!ip_tunnel_flags_is_be16_compat(p->i_flags) ||
+ !ip_tunnel_flags_is_be16_compat(p->o_flags))
+ return -EOVERFLOW;
+
+ if (!(ip_tunnel_flags_to_be16(p->i_flags) & GRE_KEY))
p->i_key = 0;
- if (!(p->o_flags & GRE_KEY))
+ if (!(ip_tunnel_flags_to_be16(p->o_flags) & GRE_KEY))
p->o_key = 0;
- p->i_flags = VTI_ISVTI;
+ __set_bit(IP_TUNNEL_VTI_BIT, flags);
+ ip_tunnel_flags_copy(p->i_flags, flags);
err = ip_tunnel_ctl(dev, p, cmd);
if (err)
return err;
if (cmd != SIOCDELTUNNEL) {
- p->i_flags |= GRE_KEY;
- p->o_flags |= GRE_KEY;
+ ip_tunnel_flags_from_be16(flags, GRE_KEY);
+ ip_tunnel_flags_or(p->i_flags, p->i_flags, flags);
+ ip_tunnel_flags_or(p->o_flags, p->o_flags, flags);
}
return 0;
}
@@ -430,7 +443,7 @@ static int vti_tunnel_init(struct net_device *dev)
dev->flags = IFF_NOARP;
dev->addr_len = 4;
- dev->features |= NETIF_F_LLTX;
+ dev->lltx = true;
netif_keep_dst(dev);
return ip_tunnel_init(dev);
@@ -510,14 +523,16 @@ static int __net_init vti_init_net(struct net *net)
return 0;
}
-static void __net_exit vti_exit_batch_net(struct list_head *list_net)
+static void __net_exit vti_exit_batch_rtnl(struct list_head *list_net,
+ struct list_head *dev_to_kill)
{
- ip_tunnel_delete_nets(list_net, vti_net_id, &vti_link_ops);
+ ip_tunnel_delete_nets(list_net, vti_net_id, &vti_link_ops,
+ dev_to_kill);
}
static struct pernet_operations vti_net_ops = {
.init = vti_init_net,
- .exit_batch = vti_exit_batch_net,
+ .exit_batch_rtnl = vti_exit_batch_rtnl,
.id = &vti_net_id,
.size = sizeof(struct ip_tunnel_net),
};
@@ -529,7 +544,7 @@ static int vti_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
}
static void vti_netlink_parms(struct nlattr *data[],
- struct ip_tunnel_parm *parms,
+ struct ip_tunnel_parm_kern *parms,
__u32 *fwmark)
{
memset(parms, 0, sizeof(*parms));
@@ -539,7 +554,7 @@ static void vti_netlink_parms(struct nlattr *data[],
if (!data)
return;
- parms->i_flags = VTI_ISVTI;
+ __set_bit(IP_TUNNEL_VTI_BIT, parms->i_flags);
if (data[IFLA_VTI_LINK])
parms->link = nla_get_u32(data[IFLA_VTI_LINK]);
@@ -564,7 +579,7 @@ static int vti_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
- struct ip_tunnel_parm parms;
+ struct ip_tunnel_parm_kern parms;
__u32 fwmark = 0;
vti_netlink_parms(data, &parms, &fwmark);
@@ -576,8 +591,8 @@ static int vti_changelink(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct ip_tunnel *t = netdev_priv(dev);
+ struct ip_tunnel_parm_kern p;
__u32 fwmark = t->fwmark;
- struct ip_tunnel_parm p;
vti_netlink_parms(data, &p, &fwmark);
return ip_tunnel_changelink(dev, tb, &p, fwmark);
@@ -604,7 +619,7 @@ static size_t vti_get_size(const struct net_device *dev)
static int vti_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct ip_tunnel *t = netdev_priv(dev);
- struct ip_tunnel_parm *p = &t->parms;
+ struct ip_tunnel_parm_kern *p = &t->parms;
if (nla_put_u32(skb, IFLA_VTI_LINK, p->link) ||
nla_put_be32(skb, IFLA_VTI_IKEY, p->i_key) ||
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 03afa3871efc..dc0db5895e0e 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -130,13 +130,16 @@ static int ipip_err(struct sk_buff *skb, u32 info)
struct net *net = dev_net(skb->dev);
struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
const struct iphdr *iph = (const struct iphdr *)skb->data;
+ IP_TUNNEL_DECLARE_FLAGS(flags) = { };
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
struct ip_tunnel *t;
int err = 0;
- t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
- iph->daddr, iph->saddr, 0);
+ __set_bit(IP_TUNNEL_NO_KEY_BIT, flags);
+
+ t = ip_tunnel_lookup(itn, skb->dev->ifindex, flags, iph->daddr,
+ iph->saddr, 0);
if (!t) {
err = -ENOENT;
goto out;
@@ -213,13 +216,16 @@ static int ipip_tunnel_rcv(struct sk_buff *skb, u8 ipproto)
{
struct net *net = dev_net(skb->dev);
struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
+ IP_TUNNEL_DECLARE_FLAGS(flags) = { };
struct metadata_dst *tun_dst = NULL;
struct ip_tunnel *tunnel;
const struct iphdr *iph;
+ __set_bit(IP_TUNNEL_NO_KEY_BIT, flags);
+
iph = ip_hdr(skb);
- tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
- iph->saddr, iph->daddr, 0);
+ tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags, iph->saddr,
+ iph->daddr, 0);
if (tunnel) {
const struct tnl_ptk_info *tpi;
@@ -238,7 +244,9 @@ static int ipip_tunnel_rcv(struct sk_buff *skb, u8 ipproto)
if (iptunnel_pull_header(skb, 0, tpi->proto, false))
goto drop;
if (tunnel->collect_md) {
- tun_dst = ip_tun_rx_dst(skb, 0, 0, 0);
+ ip_tunnel_flags_zero(flags);
+
+ tun_dst = ip_tun_rx_dst(skb, flags, 0, 0);
if (!tun_dst)
return 0;
ip_tunnel_md_udp_encap(skb, &tun_dst->u.tun_info);
@@ -330,7 +338,7 @@ static bool ipip_tunnel_ioctl_verify_protocol(u8 ipproto)
}
static int
-ipip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
+ipip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm_kern *p, int cmd)
{
if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
if (p->iph.version != 4 ||
@@ -340,7 +348,8 @@ ipip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
}
p->i_key = p->o_key = 0;
- p->i_flags = p->o_flags = 0;
+ ip_tunnel_flags_zero(p->i_flags);
+ ip_tunnel_flags_zero(p->o_flags);
return ip_tunnel_ctl(dev, p, cmd);
}
@@ -369,7 +378,7 @@ static void ipip_tunnel_setup(struct net_device *dev)
dev->type = ARPHRD_TUNNEL;
dev->flags = IFF_NOARP;
dev->addr_len = 4;
- dev->features |= NETIF_F_LLTX;
+ dev->lltx = true;
netif_keep_dst(dev);
dev->features |= IPIP_FEATURES;
@@ -405,8 +414,8 @@ static int ipip_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
}
static void ipip_netlink_parms(struct nlattr *data[],
- struct ip_tunnel_parm *parms, bool *collect_md,
- __u32 *fwmark)
+ struct ip_tunnel_parm_kern *parms,
+ bool *collect_md, __u32 *fwmark)
{
memset(parms, 0, sizeof(*parms));
@@ -432,8 +441,8 @@ static int ipip_newlink(struct net *src_net, struct net_device *dev,
struct netlink_ext_ack *extack)
{
struct ip_tunnel *t = netdev_priv(dev);
- struct ip_tunnel_parm p;
struct ip_tunnel_encap ipencap;
+ struct ip_tunnel_parm_kern p;
__u32 fwmark = 0;
if (ip_tunnel_netlink_encap_parms(data, &ipencap)) {
@@ -452,8 +461,8 @@ static int ipip_changelink(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct ip_tunnel *t = netdev_priv(dev);
- struct ip_tunnel_parm p;
struct ip_tunnel_encap ipencap;
+ struct ip_tunnel_parm_kern p;
bool collect_md;
__u32 fwmark = t->fwmark;
@@ -510,7 +519,7 @@ static size_t ipip_get_size(const struct net_device *dev)
static int ipip_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- struct ip_tunnel_parm *parm = &tunnel->parms;
+ struct ip_tunnel_parm_kern *parm = &tunnel->parms;
if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
nla_put_in_addr(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) ||
@@ -592,14 +601,16 @@ static int __net_init ipip_init_net(struct net *net)
return ip_tunnel_init_net(net, ipip_net_id, &ipip_link_ops, "tunl0");
}
-static void __net_exit ipip_exit_batch_net(struct list_head *list_net)
+static void __net_exit ipip_exit_batch_rtnl(struct list_head *list_net,
+ struct list_head *dev_to_kill)
{
- ip_tunnel_delete_nets(list_net, ipip_net_id, &ipip_link_ops);
+ ip_tunnel_delete_nets(list_net, ipip_net_id, &ipip_link_ops,
+ dev_to_kill);
}
static struct pernet_operations ipip_net_ops = {
.init = ipip_init_net,
- .exit_batch = ipip_exit_batch_net,
+ .exit_batch_rtnl = ipip_exit_batch_rtnl,
.id = &ipip_net_id,
.size = sizeof(struct ip_tunnel_net),
};
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 362229836510..21ae7594a852 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -62,6 +62,7 @@
#include <net/fib_rules.h>
#include <linux/netconf.h>
#include <net/rtnh.h>
+#include <net/inet_dscp.h>
#include <linux/nospec.h>
@@ -119,6 +120,11 @@ static void ipmr_expire_process(struct timer_list *t);
lockdep_rtnl_is_held() || \
list_empty(&net->ipv4.mr_tables))
+static bool ipmr_can_free_table(struct net *net)
+{
+ return !check_net(net) || !net_initialized(net);
+}
+
static struct mr_table *ipmr_mr_table_iter(struct net *net,
struct mr_table *mrt)
{
@@ -136,7 +142,7 @@ static struct mr_table *ipmr_mr_table_iter(struct net *net,
return ret;
}
-static struct mr_table *ipmr_get_table(struct net *net, u32 id)
+static struct mr_table *__ipmr_get_table(struct net *net, u32 id)
{
struct mr_table *mrt;
@@ -147,6 +153,16 @@ static struct mr_table *ipmr_get_table(struct net *net, u32 id)
return NULL;
}
+static struct mr_table *ipmr_get_table(struct net *net, u32 id)
+{
+ struct mr_table *mrt;
+
+ rcu_read_lock();
+ mrt = __ipmr_get_table(net, id);
+ rcu_read_unlock();
+ return mrt;
+}
+
static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
struct mr_table **mrt)
{
@@ -188,7 +204,7 @@ static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
arg->table = fib_rule_get_table(rule, arg);
- mrt = ipmr_get_table(rule->fr_net, arg->table);
+ mrt = __ipmr_get_table(rule->fr_net, arg->table);
if (!mrt)
return -EAGAIN;
res->mrt = mrt;
@@ -287,7 +303,7 @@ static int ipmr_rules_dump(struct net *net, struct notifier_block *nb,
return fib_rules_dump(net, nb, RTNL_FAMILY_IPMR, extack);
}
-static unsigned int ipmr_rules_seq_read(struct net *net)
+static unsigned int ipmr_rules_seq_read(const struct net *net)
{
return fib_rules_seq_read(net, RTNL_FAMILY_IPMR);
}
@@ -301,6 +317,11 @@ EXPORT_SYMBOL(ipmr_rule_default);
#define ipmr_for_each_table(mrt, net) \
for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
+static bool ipmr_can_free_table(struct net *net)
+{
+ return !check_net(net);
+}
+
static struct mr_table *ipmr_mr_table_iter(struct net *net,
struct mr_table *mrt)
{
@@ -314,6 +335,8 @@ static struct mr_table *ipmr_get_table(struct net *net, u32 id)
return net->ipv4.mrt;
}
+#define __ipmr_get_table ipmr_get_table
+
static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
struct mr_table **mrt)
{
@@ -345,7 +368,7 @@ static int ipmr_rules_dump(struct net *net, struct notifier_block *nb,
return 0;
}
-static unsigned int ipmr_rules_seq_read(struct net *net)
+static unsigned int ipmr_rules_seq_read(const struct net *net)
{
return 0;
}
@@ -402,7 +425,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
if (id != RT_TABLE_DEFAULT && id >= 1000000000)
return ERR_PTR(-EINVAL);
- mrt = ipmr_get_table(net, id);
+ mrt = __ipmr_get_table(net, id);
if (mrt)
return mrt;
@@ -412,6 +435,10 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
static void ipmr_free_table(struct mr_table *mrt)
{
+ struct net *net = read_pnet(&mrt->net);
+
+ WARN_ON_ONCE(!ipmr_can_free_table(net));
+
timer_shutdown_sync(&mrt->ipmr_expire_timer);
mroute_clean_tables(mrt, MRT_FLUSH_VIFS | MRT_FLUSH_VIFS_STATIC |
MRT_FLUSH_MFC | MRT_FLUSH_MFC_STATIC);
@@ -441,7 +468,7 @@ static bool ipmr_init_vif_indev(const struct net_device *dev)
static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
{
struct net_device *tunnel_dev, *new_dev;
- struct ip_tunnel_parm p = { };
+ struct ip_tunnel_parm_kern p = { };
int err;
tunnel_dev = __dev_get_by_name(net, "tunl0");
@@ -536,7 +563,7 @@ static void reg_vif_setup(struct net_device *dev)
dev->flags = IFF_NOARP;
dev->netdev_ops = &reg_vif_netdev_ops;
dev->needs_free_netdev = true;
- dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->netns_local = true;
}
static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
@@ -804,7 +831,7 @@ static void ipmr_update_thresholds(struct mr_table *mrt, struct mr_mfc *cache,
cache->mfc_un.res.maxvif = vifi + 1;
}
}
- cache->mfc_un.res.lastuse = jiffies;
+ WRITE_ONCE(cache->mfc_un.res.lastuse, jiffies);
}
static int vif_add(struct net *net, struct mr_table *mrt,
@@ -1373,7 +1400,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, sockptr_t optval,
goto out_unlock;
}
- mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
+ mrt = __ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
if (!mrt) {
ret = -ENOENT;
goto out_unlock;
@@ -1603,9 +1630,11 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, sockptr_t optval,
if (copy_from_sockptr(&olr, optlen, sizeof(int)))
return -EFAULT;
- olr = min_t(unsigned int, olr, sizeof(int));
if (olr < 0)
return -EINVAL;
+
+ olr = min_t(unsigned int, olr, sizeof(int));
+
if (copy_to_sockptr(optlen, &olr, sizeof(int)))
return -EFAULT;
if (copy_to_sockptr(optval, &val, olr))
@@ -1652,9 +1681,9 @@ int ipmr_ioctl(struct sock *sk, int cmd, void *arg)
rcu_read_lock();
c = ipmr_cache_find(mrt, sr->src.s_addr, sr->grp.s_addr);
if (c) {
- sr->pktcnt = c->_c.mfc_un.res.pkt;
- sr->bytecnt = c->_c.mfc_un.res.bytes;
- sr->wrong_if = c->_c.mfc_un.res.wrong_if;
+ sr->pktcnt = atomic_long_read(&c->_c.mfc_un.res.pkt);
+ sr->bytecnt = atomic_long_read(&c->_c.mfc_un.res.bytes);
+ sr->wrong_if = atomic_long_read(&c->_c.mfc_un.res.wrong_if);
rcu_read_unlock();
return 0;
}
@@ -1724,9 +1753,9 @@ int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
rcu_read_lock();
c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
if (c) {
- sr.pktcnt = c->_c.mfc_un.res.pkt;
- sr.bytecnt = c->_c.mfc_un.res.bytes;
- sr.wrong_if = c->_c.mfc_un.res.wrong_if;
+ sr.pktcnt = atomic_long_read(&c->_c.mfc_un.res.pkt);
+ sr.bytecnt = atomic_long_read(&c->_c.mfc_un.res.bytes);
+ sr.wrong_if = atomic_long_read(&c->_c.mfc_un.res.wrong_if);
rcu_read_unlock();
if (copy_to_user(arg, &sr, sizeof(sr)))
@@ -1866,7 +1895,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
vif->remote, vif->local,
0, 0,
IPPROTO_IPIP,
- RT_TOS(iph->tos), vif->link);
+ iph->tos & INET_DSCP_MASK, vif->link);
if (IS_ERR(rt))
goto out_free;
encap = sizeof(struct iphdr);
@@ -1874,7 +1903,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
0, 0,
IPPROTO_IPIP,
- RT_TOS(iph->tos), vif->link);
+ iph->tos & INET_DSCP_MASK, vif->link);
if (IS_ERR(rt))
goto out_free;
}
@@ -1959,9 +1988,9 @@ static void ip_mr_forward(struct net *net, struct mr_table *mrt,
int vif, ct;
vif = c->_c.mfc_parent;
- c->_c.mfc_un.res.pkt++;
- c->_c.mfc_un.res.bytes += skb->len;
- c->_c.mfc_un.res.lastuse = jiffies;
+ atomic_long_inc(&c->_c.mfc_un.res.pkt);
+ atomic_long_add(skb->len, &c->_c.mfc_un.res.bytes);
+ WRITE_ONCE(c->_c.mfc_un.res.lastuse, jiffies);
if (c->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) {
struct mfc_cache *cache_proxy;
@@ -1992,7 +2021,7 @@ static void ip_mr_forward(struct net *net, struct mr_table *mrt,
goto dont_forward;
}
- c->_c.mfc_un.res.wrong_if++;
+ atomic_long_inc(&c->_c.mfc_un.res.wrong_if);
if (true_vifi >= 0 && mrt->mroute_do_assert &&
/* pimsm uses asserts, when switching from RPT to SPT,
@@ -2078,7 +2107,7 @@ static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
struct flowi4 fl4 = {
.daddr = iph->daddr,
.saddr = iph->saddr,
- .flowi4_tos = RT_TOS(iph->tos),
+ .flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(iph)),
.flowi4_oif = (rt_is_output_route(rt) ?
skb->dev->ifindex : 0),
.flowi4_iif = (rt_is_output_route(rt) ?
@@ -2259,11 +2288,13 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
struct mr_table *mrt;
int err;
- mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
- if (!mrt)
+ rcu_read_lock();
+ mrt = __ipmr_get_table(net, RT_TABLE_DEFAULT);
+ if (!mrt) {
+ rcu_read_unlock();
return -ENOENT;
+ }
- rcu_read_lock();
cache = ipmr_cache_find(mrt, saddr, daddr);
if (!cache && skb->dev) {
int vif = ipmr_find_vif(mrt, skb->dev);
@@ -2404,8 +2435,7 @@ static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
errout:
kfree_skb(skb);
- if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err);
+ rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err);
}
static size_t igmpmsg_netlink_msgsize(size_t payloadlen)
@@ -2544,11 +2574,11 @@ static int ipmr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
if (err < 0)
goto errout;
- src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
- grp = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
- tableid = tb[RTA_TABLE] ? nla_get_u32(tb[RTA_TABLE]) : 0;
+ src = nla_get_in_addr_default(tb[RTA_SRC], 0);
+ grp = nla_get_in_addr_default(tb[RTA_DST], 0);
+ tableid = nla_get_u32_default(tb[RTA_TABLE], 0);
- mrt = ipmr_get_table(net, tableid ? tableid : RT_TABLE_DEFAULT);
+ mrt = __ipmr_get_table(net, tableid ? tableid : RT_TABLE_DEFAULT);
if (!mrt) {
err = -ENOENT;
goto errout_free;
@@ -2587,7 +2617,9 @@ errout_free:
static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct fib_dump_filter filter = {};
+ struct fib_dump_filter filter = {
+ .rtnl_held = true,
+ };
int err;
if (cb->strict_check) {
@@ -2600,7 +2632,7 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
if (filter.table_id) {
struct mr_table *mrt;
- mrt = ipmr_get_table(sock_net(skb->sk), filter.table_id);
+ mrt = __ipmr_get_table(sock_net(skb->sk), filter.table_id);
if (!mrt) {
if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IPMR)
return skb->len;
@@ -2708,7 +2740,7 @@ static int rtm_to_ipmr_mfcc(struct net *net, struct nlmsghdr *nlh,
break;
}
}
- mrt = ipmr_get_table(net, tblid);
+ mrt = __ipmr_get_table(net, tblid);
if (!mrt) {
ret = -ENOENT;
goto out;
@@ -2916,13 +2948,15 @@ static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
struct net *net = seq_file_net(seq);
struct mr_table *mrt;
- mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
- if (!mrt)
+ rcu_read_lock();
+ mrt = __ipmr_get_table(net, RT_TABLE_DEFAULT);
+ if (!mrt) {
+ rcu_read_unlock();
return ERR_PTR(-ENOENT);
+ }
iter->mrt = mrt;
- rcu_read_lock();
return mr_vif_seq_start(seq, pos);
}
@@ -2995,9 +3029,9 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
if (it->cache != &mrt->mfc_unres_queue) {
seq_printf(seq, " %8lu %8lu %8lu",
- mfc->_c.mfc_un.res.pkt,
- mfc->_c.mfc_un.res.bytes,
- mfc->_c.mfc_un.res.wrong_if);
+ atomic_long_read(&mfc->_c.mfc_un.res.pkt),
+ atomic_long_read(&mfc->_c.mfc_un.res.bytes),
+ atomic_long_read(&mfc->_c.mfc_un.res.wrong_if));
for (n = mfc->_c.mfc_un.res.minvif;
n < mfc->_c.mfc_un.res.maxvif; n++) {
if (VIF_EXISTS(mrt, n) &&
@@ -3031,11 +3065,9 @@ static const struct net_protocol pim_protocol = {
};
#endif
-static unsigned int ipmr_seq_read(struct net *net)
+static unsigned int ipmr_seq_read(const struct net *net)
{
- ASSERT_RTNL();
-
- return net->ipv4.ipmr_seq + ipmr_rules_seq_read(net);
+ return READ_ONCE(net->ipv4.ipmr_seq) + ipmr_rules_seq_read(net);
}
static int ipmr_dump(struct net *net, struct notifier_block *nb,
@@ -3135,14 +3167,22 @@ static struct pernet_operations ipmr_net_ops = {
.exit_batch = ipmr_net_exit_batch,
};
+static const struct rtnl_msg_handler ipmr_rtnl_msg_handlers[] __initconst = {
+ {.protocol = RTNL_FAMILY_IPMR, .msgtype = RTM_GETLINK,
+ .dumpit = ipmr_rtm_dumplink},
+ {.protocol = RTNL_FAMILY_IPMR, .msgtype = RTM_NEWROUTE,
+ .doit = ipmr_rtm_route},
+ {.protocol = RTNL_FAMILY_IPMR, .msgtype = RTM_DELROUTE,
+ .doit = ipmr_rtm_route},
+ {.protocol = RTNL_FAMILY_IPMR, .msgtype = RTM_GETROUTE,
+ .doit = ipmr_rtm_getroute, .dumpit = ipmr_rtm_dumproute},
+};
+
int __init ip_mr_init(void)
{
int err;
- mrt_cachep = kmem_cache_create("ip_mrt_cache",
- sizeof(struct mfc_cache),
- 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
- NULL);
+ mrt_cachep = KMEM_CACHE(mfc_cache, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
err = register_pernet_subsys(&ipmr_net_ops);
if (err)
@@ -3158,15 +3198,8 @@ int __init ip_mr_init(void)
goto add_proto_fail;
}
#endif
- rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE,
- ipmr_rtm_getroute, ipmr_rtm_dumproute, 0);
- rtnl_register(RTNL_FAMILY_IPMR, RTM_NEWROUTE,
- ipmr_rtm_route, NULL, 0);
- rtnl_register(RTNL_FAMILY_IPMR, RTM_DELROUTE,
- ipmr_rtm_route, NULL, 0);
-
- rtnl_register(RTNL_FAMILY_IPMR, RTM_GETLINK,
- NULL, ipmr_rtm_dumplink, 0);
+ rtnl_register_many(ipmr_rtnl_msg_handlers);
+
return 0;
#ifdef CONFIG_IP_PIMSM_V2
diff --git a/net/ipv4/ipmr_base.c b/net/ipv4/ipmr_base.c
index 271dc03fc6db..28d77d454d44 100644
--- a/net/ipv4/ipmr_base.c
+++ b/net/ipv4/ipmr_base.c
@@ -263,9 +263,9 @@ int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
lastuse = READ_ONCE(c->mfc_un.res.lastuse);
lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
- mfcs.mfcs_packets = c->mfc_un.res.pkt;
- mfcs.mfcs_bytes = c->mfc_un.res.bytes;
- mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
+ mfcs.mfcs_packets = atomic_long_read(&c->mfc_un.res.pkt);
+ mfcs.mfcs_bytes = atomic_long_read(&c->mfc_un.res.bytes);
+ mfcs.mfcs_wrong_if = atomic_long_read(&c->mfc_un.res.wrong_if);
if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
RTA_PAD))
@@ -310,7 +310,8 @@ int mr_table_dump(struct mr_table *mrt, struct sk_buff *skb,
if (filter->filter_set)
flags |= NLM_F_DUMP_FILTERED;
- list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list) {
+ list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list,
+ lockdep_rtnl_is_held()) {
if (e < s_e)
goto next_entry;
if (filter->dev &&
@@ -329,9 +330,6 @@ next_entry:
list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
if (e < s_e)
goto next_entry2;
- if (filter->dev &&
- !mr_mfc_uses_dev(mrt, mfc, filter->dev))
- goto next_entry2;
err = fill(mrt, skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, mfc, RTM_NEWROUTE, flags);
diff --git a/net/ipv4/metrics.c b/net/ipv4/metrics.c
index 0e3ee1532848..8ddac1f595ed 100644
--- a/net/ipv4/metrics.c
+++ b/net/ipv4/metrics.c
@@ -7,7 +7,7 @@
#include <net/net_namespace.h>
#include <net/tcp.h>
-static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
+static int ip_metrics_convert(struct nlattr *fc_mx,
int fc_mx_len, u32 *metrics,
struct netlink_ext_ack *extack)
{
@@ -31,7 +31,7 @@ static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
char tmp[TCP_CA_NAME_MAX];
nla_strscpy(tmp, nla, sizeof(tmp));
- val = tcp_ca_get_key_by_name(net, tmp, &ecn_ca);
+ val = tcp_ca_get_key_by_name(tmp, &ecn_ca);
if (val == TCP_CA_UNSPEC) {
NL_SET_ERR_MSG(extack, "Unknown tcp congestion algorithm");
return -EINVAL;
@@ -63,7 +63,7 @@ static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
return 0;
}
-struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
+struct dst_metrics *ip_fib_metrics_init(struct nlattr *fc_mx,
int fc_mx_len,
struct netlink_ext_ack *extack)
{
@@ -77,7 +77,7 @@ struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
if (unlikely(!fib_metrics))
return ERR_PTR(-ENOMEM);
- err = ip_metrics_convert(net, fc_mx, fc_mx_len, fib_metrics->metrics,
+ err = ip_metrics_convert(fc_mx, fc_mx_len, fib_metrics->metrics,
extack);
if (!err) {
refcount_set(&fib_metrics->refcnt, 1);
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 591a2737808e..08bc3f2c0078 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -14,6 +14,7 @@
#include <net/route.h>
#include <net/xfrm.h>
#include <net/ip.h>
+#include <net/inet_dscp.h>
#include <net/netfilter/nf_queue.h>
/* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */
@@ -43,7 +44,7 @@ int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, un
*/
fl4.daddr = iph->daddr;
fl4.saddr = saddr;
- fl4.flowi4_tos = RT_TOS(iph->tos);
+ fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(iph));
fl4.flowi4_oif = sk ? sk->sk_bound_dev_if : 0;
fl4.flowi4_l3mdev = l3mdev_master_ifindex(dev);
fl4.flowi4_mark = skb->mark;
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index f71a7e9a7de6..ef8009281da5 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -10,6 +10,16 @@ config NF_DEFRAG_IPV4
tristate
default n
+# old sockopt interface and eval loop
+config IP_NF_IPTABLES_LEGACY
+ tristate "Legacy IP tables support"
+ default n
+ select NETFILTER_XTABLES
+ help
+ iptables is a legacy packet classifier.
+ This is not needed if you are using iptables over nftables
+ (iptables-nft).
+
config NF_SOCKET_IPV4
tristate "IPv4 socket lookup support"
help
@@ -152,7 +162,7 @@ config IP_NF_MATCH_ECN
config IP_NF_MATCH_RPFILTER
tristate '"rpfilter" reverse path filter match support'
depends on NETFILTER_ADVANCED
- depends on IP_NF_MANGLE || IP_NF_RAW
+ depends on IP_NF_MANGLE || IP_NF_RAW || NFT_COMPAT
help
This option allows you to match packets whose replies would
go out via the interface the packet came in.
@@ -173,6 +183,7 @@ config IP_NF_MATCH_TTL
config IP_NF_FILTER
tristate "Packet filtering"
default m if NETFILTER_ADVANCED=n
+ select IP_NF_IPTABLES_LEGACY
help
Packet filtering defines a table `filter', which has a series of
rules for simple packet filtering at local input, forwarding and
@@ -182,7 +193,7 @@ config IP_NF_FILTER
config IP_NF_TARGET_REJECT
tristate "REJECT target support"
- depends on IP_NF_FILTER
+ depends on IP_NF_FILTER || NFT_COMPAT
select NF_REJECT_IPV4
default m if NETFILTER_ADVANCED=n
help
@@ -212,6 +223,7 @@ config IP_NF_NAT
default m if NETFILTER_ADVANCED=n
select NF_NAT
select NETFILTER_XT_NAT
+ select IP_NF_IPTABLES_LEGACY
help
This enables the `nat' table in iptables. This allows masquerading,
port forwarding and other forms of full Network Address Port
@@ -252,6 +264,7 @@ endif # IP_NF_NAT
config IP_NF_MANGLE
tristate "Packet mangling"
default m if NETFILTER_ADVANCED=n
+ select IP_NF_IPTABLES_LEGACY
help
This option adds a `mangle' table to iptables: see the man page for
iptables(8). This table is used for various packet alterations
@@ -261,7 +274,7 @@ config IP_NF_MANGLE
config IP_NF_TARGET_ECN
tristate "ECN target support"
- depends on IP_NF_MANGLE
+ depends on IP_NF_MANGLE || NFT_COMPAT
depends on NETFILTER_ADVANCED
help
This option adds a `ECN' target, which can be used in the iptables mangle
@@ -286,6 +299,7 @@ config IP_NF_TARGET_TTL
# raw + specific targets
config IP_NF_RAW
tristate 'raw table support (required for NOTRACK/TRACE)'
+ select IP_NF_IPTABLES_LEGACY
help
This option adds a `raw' table to iptables. This table is the very
first in the netfilter framework and hooks in at the PREROUTING
@@ -299,6 +313,7 @@ config IP_NF_SECURITY
tristate "Security table"
depends on SECURITY
depends on NETFILTER_ADVANCED
+ select IP_NF_IPTABLES_LEGACY
help
This option adds a `security' table to iptables, for use
with Mandatory Access Control (MAC) policy.
@@ -309,36 +324,42 @@ endif # IP_NF_IPTABLES
# ARP tables
config IP_NF_ARPTABLES
- tristate "ARP tables support"
- select NETFILTER_XTABLES
- select NETFILTER_FAMILY_ARP
- depends on NETFILTER_ADVANCED
+ tristate "Legacy ARPTABLES support"
+ depends on NETFILTER_XTABLES
+ default n
help
- arptables is a general, extensible packet identification framework.
- The ARP packet filtering and mangling (manipulation)subsystems
- use this: say Y or M here if you want to use either of those.
-
- To compile it as a module, choose M here. If unsure, say N.
+ arptables is a legacy packet classifier.
+ This is not needed if you are using arptables over nftables
+ (iptables-nft).
-if IP_NF_ARPTABLES
+config NFT_COMPAT_ARP
+ tristate
+ depends on NF_TABLES_ARP && NFT_COMPAT
+ default m if NFT_COMPAT=m
+ default y if NFT_COMPAT=y
config IP_NF_ARPFILTER
- tristate "ARP packet filtering"
+ tristate "arptables-legacy packet filtering support"
+ select IP_NF_ARPTABLES
+ select NETFILTER_FAMILY_ARP
+ depends on NETFILTER_XTABLES
help
ARP packet filtering defines a table `filter', which has a series of
rules for simple ARP packet filtering at local input and
- local output. On a bridge, you can also specify filtering rules
- for forwarded ARP packets. See the man page for arptables(8).
+ local output. This is only needed for arptables-legacy(8).
+ Neither arptables-nft nor nftables need this to work.
To compile it as a module, choose M here. If unsure, say N.
config IP_NF_ARP_MANGLE
tristate "ARP payload mangling"
+ depends on IP_NF_ARPTABLES || NFT_COMPAT_ARP
help
Allows altering the ARP packet payload: source and destination
hardware and network addresses.
-endif # IP_NF_ARPTABLES
+ This option is needed by both arptables-legacy and arptables-nft.
+ It is not used by nftables.
endmenu
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 5a26f9de1ab9..85502d4dfbb4 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -25,7 +25,7 @@ obj-$(CONFIG_NFT_FIB_IPV4) += nft_fib_ipv4.o
obj-$(CONFIG_NFT_DUP_IPV4) += nft_dup_ipv4.o
# generic IP tables
-obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
+obj-$(CONFIG_IP_NF_IPTABLES_LEGACY) += ip_tables.o
# the three instances of ip_tables
obj-$(CONFIG_IP_NF_FILTER) += iptable_filter.o
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 2407066b0fec..1cdd9c28ab2d 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -826,7 +826,7 @@ static int get_info(struct net *net, void __user *user, const int *len)
sizeof(info.underflow));
info.num_entries = private->number;
info.size = private->size;
- strcpy(info.name, name);
+ strscpy(info.name, name);
if (copy_to_user(user, &info, *len) != 0)
ret = -EFAULT;
@@ -956,6 +956,8 @@ static int do_replace(struct net *net, sockptr_t arg, unsigned int len)
void *loc_cpu_entry;
struct arpt_entry *iter;
+ if (len < sizeof(tmp))
+ return -EINVAL;
if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
return -EFAULT;
@@ -964,6 +966,8 @@ static int do_replace(struct net *net, sockptr_t arg, unsigned int len)
return -ENOMEM;
if (tmp.num_counters == 0)
return -EINVAL;
+ if ((u64)len < (u64)tmp.size + sizeof(tmp))
+ return -EINVAL;
tmp.name[sizeof(tmp.name)-1] = 0;
@@ -1254,6 +1258,8 @@ static int compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
void *loc_cpu_entry;
struct arpt_entry *iter;
+ if (len < sizeof(tmp))
+ return -EINVAL;
if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
return -EFAULT;
@@ -1262,6 +1268,8 @@ static int compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
return -ENOMEM;
if (tmp.num_counters == 0)
return -EINVAL;
+ if ((u64)len < (u64)tmp.size + sizeof(tmp))
+ return -EINVAL;
tmp.name[sizeof(tmp.name)-1] = 0;
@@ -1539,7 +1547,7 @@ int arpt_register_table(struct net *net,
goto out_free;
}
- ops = kmemdup(template_ops, sizeof(*ops) * num_ops, GFP_KERNEL);
+ ops = kmemdup_array(template_ops, num_ops, sizeof(*ops), GFP_KERNEL);
if (!ops) {
ret = -ENOMEM;
goto out_free;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 7da1df4997d0..3d101613f27f 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -981,7 +981,7 @@ static int get_info(struct net *net, void __user *user, const int *len)
sizeof(info.underflow));
info.num_entries = private->number;
info.size = private->size;
- strcpy(info.name, name);
+ strscpy(info.name, name);
if (copy_to_user(user, &info, *len) != 0)
ret = -EFAULT;
@@ -1108,6 +1108,8 @@ do_replace(struct net *net, sockptr_t arg, unsigned int len)
void *loc_cpu_entry;
struct ipt_entry *iter;
+ if (len < sizeof(tmp))
+ return -EINVAL;
if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
return -EFAULT;
@@ -1116,6 +1118,8 @@ do_replace(struct net *net, sockptr_t arg, unsigned int len)
return -ENOMEM;
if (tmp.num_counters == 0)
return -EINVAL;
+ if ((u64)len < (u64)tmp.size + sizeof(tmp))
+ return -EINVAL;
tmp.name[sizeof(tmp.name)-1] = 0;
@@ -1492,6 +1496,8 @@ compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
void *loc_cpu_entry;
struct ipt_entry *iter;
+ if (len < sizeof(tmp))
+ return -EINVAL;
if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
return -EFAULT;
@@ -1500,6 +1506,8 @@ compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
return -ENOMEM;
if (tmp.num_counters == 0)
return -EINVAL;
+ if ((u64)len < (u64)tmp.size + sizeof(tmp))
+ return -EINVAL;
tmp.name[sizeof(tmp.name)-1] = 0;
@@ -1759,7 +1767,7 @@ int ipt_register_table(struct net *net, const struct xt_table *table,
goto out_free;
}
- ops = kmemdup(template_ops, sizeof(*ops) * num_ops, GFP_KERNEL);
+ ops = kmemdup_array(template_ops, num_ops, sizeof(*ops), GFP_KERNEL);
if (!ops) {
ret = -ENOMEM;
goto out_free;
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index ded5bef02f77..a27782d7653e 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
+#include <net/inet_dscp.h>
#include <linux/ip.h>
#include <net/ip.h>
#include <net/ip_fib.h>
@@ -75,7 +76,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
flow.daddr = iph->saddr;
flow.saddr = rpfilter_get_saddr(iph->daddr);
flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
- flow.flowi4_tos = iph->tos & IPTOS_RT_MASK;
+ flow.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(iph));
flow.flowi4_scope = RT_SCOPE_UNIVERSE;
flow.flowi4_l3mdev = l3mdev_master_ifindex_rcu(xt_in(par));
flow.flowi4_uid = sock_net_uid(xt_net(par), NULL);
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index b9062f4552ac..3ab908b74795 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -44,7 +44,7 @@ static int iptable_filter_table_init(struct net *net)
return -ENOMEM;
/* Entry 1 is the FORWARD hook */
((struct ipt_standard *)repl->entries)[1].target.verdict =
- forward ? -NF_ACCEPT - 1 : -NF_DROP - 1;
+ forward ? -NF_ACCEPT - 1 : NF_DROP - 1;
err = ipt_register_table(net, &packet_filter, repl, filter_ops);
kfree(repl);
diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c
index 4d42d0756fd7..a5db7c67d61b 100644
--- a/net/ipv4/netfilter/iptable_nat.c
+++ b/net/ipv4/netfilter/iptable_nat.c
@@ -145,25 +145,27 @@ static struct pernet_operations iptable_nat_net_ops = {
static int __init iptable_nat_init(void)
{
- int ret = xt_register_template(&nf_nat_ipv4_table,
- iptable_nat_table_init);
+ int ret;
+ /* net->gen->ptr[iptable_nat_net_id] must be allocated
+ * before calling iptable_nat_table_init().
+ */
+ ret = register_pernet_subsys(&iptable_nat_net_ops);
if (ret < 0)
return ret;
- ret = register_pernet_subsys(&iptable_nat_net_ops);
- if (ret < 0) {
- xt_unregister_template(&nf_nat_ipv4_table);
- return ret;
- }
+ ret = xt_register_template(&nf_nat_ipv4_table,
+ iptable_nat_table_init);
+ if (ret < 0)
+ unregister_pernet_subsys(&iptable_nat_net_ops);
return ret;
}
static void __exit iptable_nat_exit(void)
{
- unregister_pernet_subsys(&iptable_nat_net_ops);
xt_unregister_template(&nf_nat_ipv4_table);
+ unregister_pernet_subsys(&iptable_nat_net_ops);
}
module_init(iptable_nat_init);
diff --git a/net/ipv4/netfilter/nf_dup_ipv4.c b/net/ipv4/netfilter/nf_dup_ipv4.c
index 6cc5743c553a..25e1e8eb18dd 100644
--- a/net/ipv4/netfilter/nf_dup_ipv4.c
+++ b/net/ipv4/netfilter/nf_dup_ipv4.c
@@ -15,6 +15,7 @@
#include <net/icmp.h>
#include <net/ip.h>
#include <net/route.h>
+#include <net/inet_dscp.h>
#include <net/netfilter/ipv4/nf_dup_ipv4.h>
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <net/netfilter/nf_conntrack.h>
@@ -32,7 +33,7 @@ static bool nf_dup_ipv4_route(struct net *net, struct sk_buff *skb,
fl4.flowi4_oif = oif;
fl4.daddr = gw->s_addr;
- fl4.flowi4_tos = RT_TOS(iph->tos);
+ fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(iph));
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
fl4.flowi4_flags = FLOWI_FLAG_KNOWN_NH;
rt = ip_route_output_key(net, &fl4);
@@ -52,8 +53,9 @@ void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum,
{
struct iphdr *iph;
+ local_bh_disable();
if (this_cpu_read(nf_skb_duplicated))
- return;
+ goto out;
/*
* Copy the skb, and route the copy. Will later return %XT_CONTINUE for
* the original skb, which should continue on its way as if nothing has
@@ -61,7 +63,7 @@ void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum,
*/
skb = pskb_copy(skb, GFP_ATOMIC);
if (skb == NULL)
- return;
+ goto out;
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
/* Avoid counting cloned packets towards the original connection. */
@@ -90,6 +92,8 @@ void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum,
} else {
kfree_skb(skb);
}
+out:
+ local_bh_enable();
}
EXPORT_SYMBOL_GPL(nf_dup_ipv4);
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index 04504b2b51df..87fd945a0d27 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -239,9 +239,8 @@ static int nf_reject_fill_skb_dst(struct sk_buff *skb_in)
void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
int hook)
{
- struct sk_buff *nskb;
- struct iphdr *niph;
const struct tcphdr *oth;
+ struct sk_buff *nskb;
struct tcphdr _oth;
oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
@@ -266,14 +265,12 @@ void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
nskb->mark = IP4_REPLY_MARK(net, oldskb->mark);
skb_reserve(nskb, LL_MAX_HEADER);
- niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
- ip4_dst_hoplimit(skb_dst(nskb)));
+ nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
+ ip4_dst_hoplimit(skb_dst(nskb)));
nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
if (ip_route_me_harder(net, sk, nskb, RTN_UNSPEC))
goto free_nskb;
- niph = ip_hdr(nskb);
-
/* "Never happens" */
if (nskb->len > dst_mtu(skb_dst(nskb)))
goto free_nskb;
@@ -290,6 +287,7 @@ void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
*/
if (nf_bridge_info_exists(oldskb)) {
struct ethhdr *oeth = eth_hdr(oldskb);
+ struct iphdr *niph = ip_hdr(nskb);
struct net_device *br_indev;
br_indev = nf_bridge_get_physindev(oldskb, net);
diff --git a/net/ipv4/netfilter/nf_tproxy_ipv4.c b/net/ipv4/netfilter/nf_tproxy_ipv4.c
index 69e331799604..73e66a088e25 100644
--- a/net/ipv4/netfilter/nf_tproxy_ipv4.c
+++ b/net/ipv4/netfilter/nf_tproxy_ipv4.c
@@ -58,6 +58,8 @@ __be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
laddr = 0;
indev = __in_dev_get_rcu(skb->dev);
+ if (!indev)
+ return daddr;
in_dev_for_each_ifa_rcu(ifa, indev) {
if (ifa->ifa_flags & IFA_F_SECONDARY)
diff --git a/net/ipv4/netfilter/nft_dup_ipv4.c b/net/ipv4/netfilter/nft_dup_ipv4.c
index a522c3a3be52..ef5dd88107dd 100644
--- a/net/ipv4/netfilter/nft_dup_ipv4.c
+++ b/net/ipv4/netfilter/nft_dup_ipv4.c
@@ -40,13 +40,13 @@ static int nft_dup_ipv4_init(const struct nft_ctx *ctx,
if (tb[NFTA_DUP_SREG_ADDR] == NULL)
return -EINVAL;
- err = nft_parse_register_load(tb[NFTA_DUP_SREG_ADDR], &priv->sreg_addr,
+ err = nft_parse_register_load(ctx, tb[NFTA_DUP_SREG_ADDR], &priv->sreg_addr,
sizeof(struct in_addr));
if (err < 0)
return err;
if (tb[NFTA_DUP_SREG_DEV])
- err = nft_parse_register_load(tb[NFTA_DUP_SREG_DEV],
+ err = nft_parse_register_load(ctx, tb[NFTA_DUP_SREG_DEV],
&priv->sreg_dev, sizeof(int));
return err;
diff --git a/net/ipv4/netfilter/nft_fib_ipv4.c b/net/ipv4/netfilter/nft_fib_ipv4.c
index 9eee535c64dd..625adbc42037 100644
--- a/net/ipv4/netfilter/nft_fib_ipv4.c
+++ b/net/ipv4/netfilter/nft_fib_ipv4.c
@@ -10,6 +10,8 @@
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nft_fib.h>
+#include <net/inet_dscp.h>
+#include <net/ip.h>
#include <net/ip_fib.h>
#include <net/route.h>
@@ -22,8 +24,6 @@ static __be32 get_saddr(__be32 addr)
return addr;
}
-#define DSCP_BITS 0xfc
-
void nft_fib4_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
@@ -66,6 +66,7 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
.flowi4_scope = RT_SCOPE_UNIVERSE,
.flowi4_iif = LOOPBACK_IFINDEX,
.flowi4_uid = sock_net_uid(nft_net(pkt), NULL),
+ .flowi4_l3mdev = l3mdev_master_ifindex_rcu(nft_in(pkt)),
};
const struct net_device *oif;
const struct net_device *found;
@@ -84,9 +85,6 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
else
oif = NULL;
- if (priv->flags & NFTA_FIB_F_IIF)
- fl4.flowi4_l3mdev = l3mdev_master_ifindex_rcu(oif);
-
if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
nft_fib_store_result(dest, priv, nft_in(pkt));
@@ -110,7 +108,7 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
if (priv->flags & NFTA_FIB_F_MARK)
fl4.flowi4_mark = pkt->skb->mark;
- fl4.flowi4_tos = iph->tos & DSCP_BITS;
+ fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(iph));
if (priv->flags & NFTA_FIB_F_DADDR) {
fl4.daddr = iph->daddr;
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
index bbff68b5b5d4..09a3d73b45ba 100644
--- a/net/ipv4/nexthop.c
+++ b/net/ipv4/nexthop.c
@@ -26,6 +26,9 @@ static void remove_nexthop(struct net *net, struct nexthop *nh,
#define NH_DEV_HASHBITS 8
#define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS)
+#define NHA_OP_FLAGS_DUMP_ALL (NHA_OP_FLAG_DUMP_STATS | \
+ NHA_OP_FLAG_DUMP_HW_STATS)
+
static const struct nla_policy rtm_nh_policy_new[] = {
[NHA_ID] = { .type = NLA_U32 },
[NHA_GROUP] = { .type = NLA_BINARY },
@@ -37,10 +40,17 @@ static const struct nla_policy rtm_nh_policy_new[] = {
[NHA_ENCAP] = { .type = NLA_NESTED },
[NHA_FDB] = { .type = NLA_FLAG },
[NHA_RES_GROUP] = { .type = NLA_NESTED },
+ [NHA_HW_STATS_ENABLE] = NLA_POLICY_MAX(NLA_U32, true),
};
static const struct nla_policy rtm_nh_policy_get[] = {
[NHA_ID] = { .type = NLA_U32 },
+ [NHA_OP_FLAGS] = NLA_POLICY_MASK(NLA_U32,
+ NHA_OP_FLAGS_DUMP_ALL),
+};
+
+static const struct nla_policy rtm_nh_policy_del[] = {
+ [NHA_ID] = { .type = NLA_U32 },
};
static const struct nla_policy rtm_nh_policy_dump[] = {
@@ -48,6 +58,8 @@ static const struct nla_policy rtm_nh_policy_dump[] = {
[NHA_GROUPS] = { .type = NLA_FLAG },
[NHA_MASTER] = { .type = NLA_U32 },
[NHA_FDB] = { .type = NLA_FLAG },
+ [NHA_OP_FLAGS] = NLA_POLICY_MASK(NLA_U32,
+ NHA_OP_FLAGS_DUMP_ALL),
};
static const struct nla_policy rtm_nh_res_policy_new[] = {
@@ -92,6 +104,7 @@ __nh_notifier_single_info_init(struct nh_notifier_single_info *nh_info,
else if (nh_info->gw_family == AF_INET6)
nh_info->ipv6 = nhi->fib_nhc.nhc_gw.ipv6;
+ nh_info->id = nhi->nh_parent->id;
nh_info->is_reject = nhi->reject_nh;
nh_info->is_fdb = nhi->fdb_nh;
nh_info->has_encap = !!nhi->fib_nhc.nhc_lwtstate;
@@ -131,13 +144,13 @@ static int nh_notifier_mpath_info_init(struct nh_notifier_info *info,
info->nh_grp->num_nh = num_nh;
info->nh_grp->is_fdb = nhg->fdb_nh;
+ info->nh_grp->hw_stats = nhg->hw_stats;
for (i = 0; i < num_nh; i++) {
struct nh_grp_entry *nhge = &nhg->nh_entries[i];
struct nh_info *nhi;
nhi = rtnl_dereference(nhge->nh->nh_info);
- info->nh_grp->nh_entries[i].id = nhge->nh->id;
info->nh_grp->nh_entries[i].weight = nhge->weight;
__nh_notifier_single_info_init(&info->nh_grp->nh_entries[i].nh,
nhi);
@@ -162,6 +175,7 @@ static int nh_notifier_res_table_info_init(struct nh_notifier_info *info,
return -ENOMEM;
info->nh_res_table->num_nh_buckets = num_nh_buckets;
+ info->nh_res_table->hw_stats = nhg->hw_stats;
for (i = 0; i < num_nh_buckets; i++) {
struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
@@ -393,6 +407,7 @@ static int call_nexthop_res_table_notifiers(struct net *net, struct nexthop *nh,
struct nh_notifier_info info = {
.net = net,
.extack = extack,
+ .id = nh->id,
};
struct nh_group *nhg;
int err;
@@ -474,6 +489,7 @@ static void nexthop_free_group(struct nexthop *nh)
struct nh_grp_entry *nhge = &nhg->nh_entries[i];
WARN_ON(!list_empty(&nhge->nh_list));
+ free_percpu(nhge->stats);
nexthop_put(nhge->nh);
}
@@ -654,14 +670,213 @@ nla_put_failure:
return -EMSGSIZE;
}
-static int nla_put_nh_group(struct sk_buff *skb, struct nh_group *nhg)
+static void nh_grp_entry_stats_inc(struct nh_grp_entry *nhge)
+{
+ struct nh_grp_entry_stats *cpu_stats;
+
+ cpu_stats = get_cpu_ptr(nhge->stats);
+ u64_stats_update_begin(&cpu_stats->syncp);
+ u64_stats_inc(&cpu_stats->packets);
+ u64_stats_update_end(&cpu_stats->syncp);
+ put_cpu_ptr(cpu_stats);
+}
+
+static void nh_grp_entry_stats_read(struct nh_grp_entry *nhge,
+ u64 *ret_packets)
+{
+ int i;
+
+ *ret_packets = 0;
+
+ for_each_possible_cpu(i) {
+ struct nh_grp_entry_stats *cpu_stats;
+ unsigned int start;
+ u64 packets;
+
+ cpu_stats = per_cpu_ptr(nhge->stats, i);
+ do {
+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
+ packets = u64_stats_read(&cpu_stats->packets);
+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
+
+ *ret_packets += packets;
+ }
+}
+
+static int nh_notifier_grp_hw_stats_init(struct nh_notifier_info *info,
+ const struct nexthop *nh)
{
+ struct nh_group *nhg;
+ int i;
+
+ ASSERT_RTNL();
+ nhg = rtnl_dereference(nh->nh_grp);
+
+ info->id = nh->id;
+ info->type = NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS;
+ info->nh_grp_hw_stats = kzalloc(struct_size(info->nh_grp_hw_stats,
+ stats, nhg->num_nh),
+ GFP_KERNEL);
+ if (!info->nh_grp_hw_stats)
+ return -ENOMEM;
+
+ info->nh_grp_hw_stats->num_nh = nhg->num_nh;
+ for (i = 0; i < nhg->num_nh; i++) {
+ struct nh_grp_entry *nhge = &nhg->nh_entries[i];
+
+ info->nh_grp_hw_stats->stats[i].id = nhge->nh->id;
+ }
+
+ return 0;
+}
+
+static void nh_notifier_grp_hw_stats_fini(struct nh_notifier_info *info)
+{
+ kfree(info->nh_grp_hw_stats);
+}
+
+void nh_grp_hw_stats_report_delta(struct nh_notifier_grp_hw_stats_info *info,
+ unsigned int nh_idx,
+ u64 delta_packets)
+{
+ info->hw_stats_used = true;
+ info->stats[nh_idx].packets += delta_packets;
+}
+EXPORT_SYMBOL(nh_grp_hw_stats_report_delta);
+
+static void nh_grp_hw_stats_apply_update(struct nexthop *nh,
+ struct nh_notifier_info *info)
+{
+ struct nh_group *nhg;
+ int i;
+
+ ASSERT_RTNL();
+ nhg = rtnl_dereference(nh->nh_grp);
+
+ for (i = 0; i < nhg->num_nh; i++) {
+ struct nh_grp_entry *nhge = &nhg->nh_entries[i];
+
+ nhge->packets_hw += info->nh_grp_hw_stats->stats[i].packets;
+ }
+}
+
+static int nh_grp_hw_stats_update(struct nexthop *nh, bool *hw_stats_used)
+{
+ struct nh_notifier_info info = {
+ .net = nh->net,
+ };
+ struct net *net = nh->net;
+ int err;
+
+ if (nexthop_notifiers_is_empty(net)) {
+ *hw_stats_used = false;
+ return 0;
+ }
+
+ err = nh_notifier_grp_hw_stats_init(&info, nh);
+ if (err)
+ return err;
+
+ err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
+ NEXTHOP_EVENT_HW_STATS_REPORT_DELTA,
+ &info);
+
+ /* Cache whatever we got, even if there was an error, otherwise the
+ * successful stats retrievals would get lost.
+ */
+ nh_grp_hw_stats_apply_update(nh, &info);
+ *hw_stats_used = info.nh_grp_hw_stats->hw_stats_used;
+
+ nh_notifier_grp_hw_stats_fini(&info);
+ return notifier_to_errno(err);
+}
+
+static int nla_put_nh_group_stats_entry(struct sk_buff *skb,
+ struct nh_grp_entry *nhge,
+ u32 op_flags)
+{
+ struct nlattr *nest;
+ u64 packets;
+
+ nh_grp_entry_stats_read(nhge, &packets);
+
+ nest = nla_nest_start(skb, NHA_GROUP_STATS_ENTRY);
+ if (!nest)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(skb, NHA_GROUP_STATS_ENTRY_ID, nhge->nh->id) ||
+ nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS,
+ packets + nhge->packets_hw))
+ goto nla_put_failure;
+
+ if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS &&
+ nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS_HW,
+ nhge->packets_hw))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, nest);
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+}
+
+static int nla_put_nh_group_stats(struct sk_buff *skb, struct nexthop *nh,
+ u32 op_flags)
+{
+ struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
+ struct nlattr *nest;
+ bool hw_stats_used;
+ int err;
+ int i;
+
+ if (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats))
+ goto err_out;
+
+ if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS &&
+ nhg->hw_stats) {
+ err = nh_grp_hw_stats_update(nh, &hw_stats_used);
+ if (err)
+ goto out;
+
+ if (nla_put_u32(skb, NHA_HW_STATS_USED, hw_stats_used))
+ goto err_out;
+ }
+
+ nest = nla_nest_start(skb, NHA_GROUP_STATS);
+ if (!nest)
+ goto err_out;
+
+ for (i = 0; i < nhg->num_nh; i++)
+ if (nla_put_nh_group_stats_entry(skb, &nhg->nh_entries[i],
+ op_flags))
+ goto cancel_out;
+
+ nla_nest_end(skb, nest);
+ return 0;
+
+cancel_out:
+ nla_nest_cancel(skb, nest);
+err_out:
+ err = -EMSGSIZE;
+out:
+ return err;
+}
+
+static int nla_put_nh_group(struct sk_buff *skb, struct nexthop *nh,
+ u32 op_flags, u32 *resp_op_flags)
+{
+ struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
struct nexthop_grp *p;
size_t len = nhg->num_nh * sizeof(*p);
struct nlattr *nla;
u16 group_type = 0;
+ u16 weight;
int i;
+ *resp_op_flags |= NHA_OP_FLAG_RESP_GRP_RESVD_0;
+
if (nhg->hash_threshold)
group_type = NEXTHOP_GRP_TYPE_MPATH;
else if (nhg->resilient)
@@ -676,14 +891,23 @@ static int nla_put_nh_group(struct sk_buff *skb, struct nh_group *nhg)
p = nla_data(nla);
for (i = 0; i < nhg->num_nh; ++i) {
- p->id = nhg->nh_entries[i].nh->id;
- p->weight = nhg->nh_entries[i].weight - 1;
- p += 1;
+ weight = nhg->nh_entries[i].weight - 1;
+
+ *p++ = (struct nexthop_grp) {
+ .id = nhg->nh_entries[i].nh->id,
+ .weight = weight,
+ .weight_high = weight >> 8,
+ };
}
if (nhg->resilient && nla_put_nh_group_res(skb, nhg))
goto nla_put_failure;
+ if (op_flags & NHA_OP_FLAG_DUMP_STATS &&
+ (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats) ||
+ nla_put_nh_group_stats(skb, nh, op_flags)))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
@@ -691,7 +915,8 @@ nla_put_failure:
}
static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
- int event, u32 portid, u32 seq, unsigned int nlflags)
+ int event, u32 portid, u32 seq, unsigned int nlflags,
+ u32 op_flags)
{
struct fib6_nh *fib6_nh;
struct fib_nh *fib_nh;
@@ -715,10 +940,12 @@ static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
if (nh->is_group) {
struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
+ u32 resp_op_flags = 0;
if (nhg->fdb_nh && nla_put_flag(skb, NHA_FDB))
goto nla_put_failure;
- if (nla_put_nh_group(skb, nhg))
+ if (nla_put_nh_group(skb, nh, op_flags, &resp_op_flags) ||
+ nla_put_u32(skb, NHA_OP_FLAGS, resp_op_flags))
goto nla_put_failure;
goto out;
}
@@ -831,7 +1058,9 @@ static size_t nh_nlmsg_size(struct nexthop *nh)
sz += nla_total_size(4); /* NHA_ID */
if (nh->is_group)
- sz += nh_nlmsg_size_grp(nh);
+ sz += nh_nlmsg_size_grp(nh) +
+ nla_total_size(4) + /* NHA_OP_FLAGS */
+ 0;
else
sz += nh_nlmsg_size_single(nh);
@@ -849,7 +1078,7 @@ static void nexthop_notify(int event, struct nexthop *nh, struct nl_info *info)
if (!skb)
goto errout;
- err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags);
+ err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags, 0);
if (err < 0) {
/* -EMSGSIZE implies BUG in nh_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
@@ -861,8 +1090,7 @@ static void nexthop_notify(int event, struct nexthop *nh, struct nl_info *info)
info->nlh, gfp_any());
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(info->nl_net, RTNLGRP_NEXTHOP, err);
+ rtnl_set_sk_err(info->nl_net, RTNLGRP_NEXTHOP, err);
}
static unsigned long nh_res_bucket_used_time(const struct nh_res_bucket *bucket)
@@ -982,8 +1210,7 @@ static void nexthop_bucket_notify(struct nh_res_table *res_table,
rtnl_notify(skb, nh->net, 0, RTNLGRP_NEXTHOP, NULL, GFP_KERNEL);
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(nh->net, RTNLGRP_NEXTHOP, err);
+ rtnl_set_sk_err(nh->net, RTNLGRP_NEXTHOP, err);
}
static bool valid_group_nh(struct nexthop *nh, unsigned int npaths,
@@ -1061,11 +1288,14 @@ static int nh_check_attr_group(struct net *net,
nhg = nla_data(tb[NHA_GROUP]);
for (i = 0; i < len; ++i) {
- if (nhg[i].resvd1 || nhg[i].resvd2) {
- NL_SET_ERR_MSG(extack, "Reserved fields in nexthop_grp must be 0");
+ if (nhg[i].resvd2) {
+ NL_SET_ERR_MSG(extack, "Reserved field in nexthop_grp must be 0");
return -EINVAL;
}
- if (nhg[i].weight > 254) {
+ if (nexthop_grp_weight(&nhg[i]) == 0) {
+ /* 0xffff got passed in, representing weight of 0x10000,
+ * which is too heavy.
+ */
NL_SET_ERR_MSG(extack, "Invalid value for weight");
return -EINVAL;
}
@@ -1104,6 +1334,7 @@ static int nh_check_attr_group(struct net *net,
if (!tb[i])
continue;
switch (i) {
+ case NHA_HW_STATS_ENABLE:
case NHA_FDB:
continue;
case NHA_RES_GROUP:
@@ -1176,6 +1407,7 @@ static struct nexthop *nexthop_select_path_fdb(struct nh_group *nhg, int hash)
if (hash > atomic_read(&nhge->hthr.upper_bound))
continue;
+ nh_grp_entry_stats_inc(nhge);
return nhge->nh;
}
@@ -1185,7 +1417,7 @@ static struct nexthop *nexthop_select_path_fdb(struct nh_group *nhg, int hash)
static struct nexthop *nexthop_select_path_hthr(struct nh_group *nhg, int hash)
{
- struct nexthop *rc = NULL;
+ struct nh_grp_entry *nhge0 = NULL;
int i;
if (nhg->fdb_nh)
@@ -1200,16 +1432,20 @@ static struct nexthop *nexthop_select_path_hthr(struct nh_group *nhg, int hash)
if (!nexthop_is_good_nh(nhge->nh))
continue;
- if (!rc)
- rc = nhge->nh;
+ if (!nhge0)
+ nhge0 = nhge;
if (hash > atomic_read(&nhge->hthr.upper_bound))
continue;
+ nh_grp_entry_stats_inc(nhge);
return nhge->nh;
}
- return rc ? : nhg->nh_entries[0].nh;
+ if (!nhge0)
+ nhge0 = &nhg->nh_entries[0];
+ nh_grp_entry_stats_inc(nhge0);
+ return nhge0->nh;
}
static struct nexthop *nexthop_select_path_res(struct nh_group *nhg, int hash)
@@ -1225,6 +1461,7 @@ static struct nexthop *nexthop_select_path_res(struct nh_group *nhg, int hash)
bucket = &res_table->nh_buckets[bucket_index];
nh_res_bucket_set_busy(bucket);
nhge = rcu_dereference(bucket->nh_entry);
+ nh_grp_entry_stats_inc(nhge);
return nhge->nh;
}
@@ -1654,9 +1891,9 @@ static void nh_res_table_cancel_upkeep(struct nh_res_table *res_table)
static void nh_res_group_rebalance(struct nh_group *nhg,
struct nh_res_table *res_table)
{
- int prev_upper_bound = 0;
- int total = 0;
- int w = 0;
+ u16 prev_upper_bound = 0;
+ u32 total = 0;
+ u32 w = 0;
int i;
INIT_LIST_HEAD(&res_table->uw_nh_entries);
@@ -1666,11 +1903,12 @@ static void nh_res_group_rebalance(struct nh_group *nhg,
for (i = 0; i < nhg->num_nh; ++i) {
struct nh_grp_entry *nhge = &nhg->nh_entries[i];
- int upper_bound;
+ u16 upper_bound;
+ u64 btw;
w += nhge->weight;
- upper_bound = DIV_ROUND_CLOSEST(res_table->num_nh_buckets * w,
- total);
+ btw = ((u64)res_table->num_nh_buckets) * w;
+ upper_bound = DIV_ROUND_CLOSEST_ULL(btw, total);
nhge->res.wants_buckets = upper_bound - prev_upper_bound;
prev_upper_bound = upper_bound;
@@ -1736,8 +1974,8 @@ static void replace_nexthop_grp_res(struct nh_group *oldg,
static void nh_hthr_group_rebalance(struct nh_group *nhg)
{
- int total = 0;
- int w = 0;
+ u32 total = 0;
+ u32 w = 0;
int i;
for (i = 0; i < nhg->num_nh; ++i)
@@ -1745,7 +1983,7 @@ static void nh_hthr_group_rebalance(struct nh_group *nhg)
for (i = 0; i < nhg->num_nh; ++i) {
struct nh_grp_entry *nhge = &nhg->nh_entries[i];
- int upper_bound;
+ u32 upper_bound;
w += nhge->weight;
upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31, total) - 1;
@@ -1798,6 +2036,7 @@ static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
newg->has_v4 = true;
list_del(&nhges[i].nh_list);
+ new_nhges[j].stats = nhges[i].stats;
new_nhges[j].nh_parent = nhges[i].nh_parent;
new_nhges[j].nh = nhges[i].nh;
new_nhges[j].weight = nhges[i].weight;
@@ -1813,6 +2052,7 @@ static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
rcu_assign_pointer(nhp->nh_grp, newg);
list_del(&nhge->nh_list);
+ free_percpu(nhge->stats);
nexthop_put(nhge->nh);
/* Removal of a NH from a resilient group is notified through
@@ -2477,8 +2717,16 @@ static struct nexthop *nexthop_create_group(struct net *net,
if (nhi->family == AF_INET)
nhg->has_v4 = true;
+ nhg->nh_entries[i].stats =
+ netdev_alloc_pcpu_stats(struct nh_grp_entry_stats);
+ if (!nhg->nh_entries[i].stats) {
+ err = -ENOMEM;
+ nexthop_put(nhe);
+ goto out_no_nh;
+ }
nhg->nh_entries[i].nh = nhe;
- nhg->nh_entries[i].weight = entry[i].weight + 1;
+ nhg->nh_entries[i].weight = nexthop_grp_weight(&entry[i]);
+
list_add(&nhg->nh_entries[i].nh_list, &nhe->grp_list);
nhg->nh_entries[i].nh_parent = nh;
}
@@ -2509,6 +2757,9 @@ static struct nexthop *nexthop_create_group(struct net *net,
if (cfg->nh_fdb)
nhg->fdb_nh = 1;
+ if (cfg->nh_hw_stats)
+ nhg->hw_stats = true;
+
rcu_assign_pointer(nh->nh_grp, nhg);
return nh;
@@ -2516,6 +2767,7 @@ static struct nexthop *nexthop_create_group(struct net *net,
out_no_nh:
for (i--; i >= 0; --i) {
list_del(&nhg->nh_entries[i].nh_list);
+ free_percpu(nhg->nh_entries[i].stats);
nexthop_put(nhg->nh_entries[i].nh);
}
@@ -2850,6 +3102,9 @@ static int rtm_to_nh_config(struct net *net, struct sk_buff *skb,
err = rtm_to_nh_config_grp_res(tb[NHA_RES_GROUP],
cfg, extack);
+ if (tb[NHA_HW_STATS_ENABLE])
+ cfg->nh_hw_stats = nla_get_u32(tb[NHA_HW_STATS_ENABLE]);
+
/* no other attributes should be set */
goto out;
}
@@ -2941,6 +3196,10 @@ static int rtm_to_nh_config(struct net *net, struct sk_buff *skb,
goto out;
}
+ if (tb[NHA_HW_STATS_ENABLE]) {
+ NL_SET_ERR_MSG(extack, "Cannot enable nexthop hardware statistics for non-group nexthops");
+ goto out;
+ }
err = 0;
out:
@@ -2966,9 +3225,9 @@ static int rtm_new_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
return err;
}
-static int __nh_valid_get_del_req(const struct nlmsghdr *nlh,
- struct nlattr **tb, u32 *id,
- struct netlink_ext_ack *extack)
+static int nh_valid_get_del_req(const struct nlmsghdr *nlh,
+ struct nlattr **tb, u32 *id, u32 *op_flags,
+ struct netlink_ext_ack *extack)
{
struct nhmsg *nhm = nlmsg_data(nlh);
@@ -2988,28 +3247,17 @@ static int __nh_valid_get_del_req(const struct nlmsghdr *nlh,
return -EINVAL;
}
- return 0;
-}
-
-static int nh_valid_get_del_req(const struct nlmsghdr *nlh, u32 *id,
- struct netlink_ext_ack *extack)
-{
- struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get)];
- int err;
-
- err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
- ARRAY_SIZE(rtm_nh_policy_get) - 1,
- rtm_nh_policy_get, extack);
- if (err < 0)
- return err;
+ if (op_flags)
+ *op_flags = nla_get_u32_default(tb[NHA_OP_FLAGS], 0);
- return __nh_valid_get_del_req(nlh, tb, id, extack);
+ return 0;
}
/* rtnl */
static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
+ struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_del)];
struct net *net = sock_net(skb->sk);
struct nl_info nlinfo = {
.nlh = nlh,
@@ -3020,7 +3268,13 @@ static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
int err;
u32 id;
- err = nh_valid_get_del_req(nlh, &id, extack);
+ err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
+ ARRAY_SIZE(rtm_nh_policy_del) - 1, rtm_nh_policy_del,
+ extack);
+ if (err < 0)
+ return err;
+
+ err = nh_valid_get_del_req(nlh, tb, &id, NULL, extack);
if (err)
return err;
@@ -3037,13 +3291,21 @@ static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
static int rtm_get_nexthop(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
+ struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get)];
struct net *net = sock_net(in_skb->sk);
struct sk_buff *skb = NULL;
struct nexthop *nh;
+ u32 op_flags;
int err;
u32 id;
- err = nh_valid_get_del_req(nlh, &id, extack);
+ err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
+ ARRAY_SIZE(rtm_nh_policy_get) - 1, rtm_nh_policy_get,
+ extack);
+ if (err < 0)
+ return err;
+
+ err = nh_valid_get_del_req(nlh, tb, &id, &op_flags, extack);
if (err)
return err;
@@ -3058,7 +3320,7 @@ static int rtm_get_nexthop(struct sk_buff *in_skb, struct nlmsghdr *nlh,
goto errout_free;
err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP, NETLINK_CB(in_skb).portid,
- nlh->nlmsg_seq, 0);
+ nlh->nlmsg_seq, 0, op_flags);
if (err < 0) {
WARN_ON(err == -EMSGSIZE);
goto errout_free;
@@ -3079,6 +3341,7 @@ struct nh_dump_filter {
bool group_filter;
bool fdb_filter;
u32 res_bucket_nh_id;
+ u32 op_flags;
};
static bool nh_dump_filtered(struct nexthop *nh,
@@ -3166,6 +3429,8 @@ static int nh_valid_dump_req(const struct nlmsghdr *nlh,
if (err < 0)
return err;
+ filter->op_flags = nla_get_u32_default(tb[NHA_OP_FLAGS], 0);
+
return __nh_valid_dump_req(nlh, tb, filter, cb->extack);
}
@@ -3223,7 +3488,7 @@ static int rtm_dump_nexthop_cb(struct sk_buff *skb, struct netlink_callback *cb,
return nh_fill_node(skb, nh, RTM_NEWNEXTHOP,
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI);
+ cb->nlh->nlmsg_seq, NLM_F_MULTI, filter->op_flags);
}
/* rtnl */
@@ -3241,10 +3506,6 @@ static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb)
err = rtm_dump_walk_nexthops(skb, cb, root, ctx,
&rtm_dump_nexthop_cb, &filter);
- if (err < 0) {
- if (likely(skb->len))
- err = skb->len;
- }
cb->seq = net->nexthop.seq;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
@@ -3439,11 +3700,6 @@ static int rtm_dump_nexthop_bucket(struct sk_buff *skb,
&rtm_dump_nexthop_bucket_cb, &dd);
}
- if (err < 0) {
- if (likely(skb->len))
- err = skb->len;
- }
-
cb->seq = net->nexthop.seq;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
return err;
@@ -3483,7 +3739,7 @@ static int nh_valid_get_bucket_req(const struct nlmsghdr *nlh,
if (err < 0)
return err;
- err = __nh_valid_get_del_req(nlh, tb, id, extack);
+ err = nh_valid_get_del_req(nlh, tb, id, NULL, extack);
if (err)
return err;
@@ -3631,17 +3887,24 @@ unlock:
}
EXPORT_SYMBOL(register_nexthop_notifier);
-int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
+int __unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
{
int err;
- rtnl_lock();
err = blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
nb);
- if (err)
- goto unlock;
- nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL);
-unlock:
+ if (!err)
+ nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL);
+ return err;
+}
+EXPORT_SYMBOL(__unregister_nexthop_notifier);
+
+int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
+{
+ int err;
+
+ rtnl_lock();
+ err = __unregister_nexthop_notifier(net, nb);
rtnl_unlock();
return err;
}
@@ -3737,16 +4000,20 @@ out:
}
EXPORT_SYMBOL(nexthop_res_grp_activity_update);
-static void __net_exit nexthop_net_exit_batch(struct list_head *net_list)
+static void __net_exit nexthop_net_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
struct net *net;
- rtnl_lock();
- list_for_each_entry(net, net_list, exit_list) {
+ ASSERT_RTNL();
+ list_for_each_entry(net, net_list, exit_list)
flush_all_nexthops(net);
- kfree(net->nexthop.devhash);
- }
- rtnl_unlock();
+}
+
+static void __net_exit nexthop_net_exit(struct net *net)
+{
+ kfree(net->nexthop.devhash);
+ net->nexthop.devhash = NULL;
}
static int __net_init nexthop_net_init(struct net *net)
@@ -3764,7 +4031,25 @@ static int __net_init nexthop_net_init(struct net *net)
static struct pernet_operations nexthop_net_ops = {
.init = nexthop_net_init,
- .exit_batch = nexthop_net_exit_batch,
+ .exit = nexthop_net_exit,
+ .exit_batch_rtnl = nexthop_net_exit_batch_rtnl,
+};
+
+static const struct rtnl_msg_handler nexthop_rtnl_msg_handlers[] __initconst = {
+ {.msgtype = RTM_NEWNEXTHOP, .doit = rtm_new_nexthop},
+ {.msgtype = RTM_DELNEXTHOP, .doit = rtm_del_nexthop},
+ {.msgtype = RTM_GETNEXTHOP, .doit = rtm_get_nexthop,
+ .dumpit = rtm_dump_nexthop},
+ {.msgtype = RTM_GETNEXTHOPBUCKET, .doit = rtm_get_nexthop_bucket,
+ .dumpit = rtm_dump_nexthop_bucket},
+ {.protocol = PF_INET, .msgtype = RTM_NEWNEXTHOP,
+ .doit = rtm_new_nexthop},
+ {.protocol = PF_INET, .msgtype = RTM_GETNEXTHOP,
+ .dumpit = rtm_dump_nexthop},
+ {.protocol = PF_INET6, .msgtype = RTM_NEWNEXTHOP,
+ .doit = rtm_new_nexthop},
+ {.protocol = PF_INET6, .msgtype = RTM_GETNEXTHOP,
+ .dumpit = rtm_dump_nexthop},
};
static int __init nexthop_init(void)
@@ -3773,19 +4058,7 @@ static int __init nexthop_init(void)
register_netdevice_notifier(&nh_netdev_notifier);
- rtnl_register(PF_UNSPEC, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_DELNEXTHOP, rtm_del_nexthop, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_GETNEXTHOP, rtm_get_nexthop,
- rtm_dump_nexthop, 0);
-
- rtnl_register(PF_INET, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
- rtnl_register(PF_INET, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0);
-
- rtnl_register(PF_INET6, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
- rtnl_register(PF_INET6, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0);
-
- rtnl_register(PF_UNSPEC, RTM_GETNEXTHOPBUCKET, rtm_get_nexthop_bucket,
- rtm_dump_nexthop_bucket, 0);
+ rtnl_register_many(nexthop_rtnl_msg_handlers);
return 0;
}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 823306487a82..619ddc087957 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -946,7 +946,7 @@ static enum skb_drop_reason __ping_queue_rcv_skb(struct sock *sk,
pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n",
inet_sk(sk), inet_sk(sk)->inet_num, skb);
if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0) {
- kfree_skb_reason(skb, reason);
+ sk_skb_reason_drop(sk, skb, reason);
pr_debug("ping_queue_rcv_skb -> failed\n");
return reason;
}
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 5f4654ebff48..affd21a0f572 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -33,6 +33,7 @@
#include <net/protocol.h>
#include <net/tcp.h>
#include <net/mptcp.h>
+#include <net/proto_memory.h>
#include <net/udp.h>
#include <net/udplite.h>
#include <linux/bottom_half.h>
@@ -43,7 +44,7 @@
#include <net/sock.h>
#include <net/raw.h>
-#define TCPUDP_MIB_MAX max_t(u32, UDP_MIB_MAX, TCP_MIB_MAX)
+#define TCPUDP_MIB_MAX MAX_T(u32, UDP_MIB_MAX, TCP_MIB_MAX)
/*
* Report socket allocation statistics [mea@utu.fi]
@@ -188,6 +189,7 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TWKilled", LINUX_MIB_TIMEWAITKILLED),
SNMP_MIB_ITEM("PAWSActive", LINUX_MIB_PAWSACTIVEREJECTED),
SNMP_MIB_ITEM("PAWSEstab", LINUX_MIB_PAWSESTABREJECTED),
+ SNMP_MIB_ITEM("PAWSOldAck", LINUX_MIB_PAWS_OLD_ACK),
SNMP_MIB_ITEM("DelayedACKs", LINUX_MIB_DELAYEDACKS),
SNMP_MIB_ITEM("DelayedACKLocked", LINUX_MIB_DELAYEDACKLOCKED),
SNMP_MIB_ITEM("DelayedACKLost", LINUX_MIB_DELAYEDACKLOST),
@@ -395,7 +397,7 @@ static int snmp_seq_show_ipstats(struct seq_file *seq, void *v)
seq_printf(seq, " %s", snmp4_ipstats_list[i].name);
seq_printf(seq, "\nIp: %d %d",
- IPV4_DEVCONF_ALL(net, FORWARDING) ? 1 : 2,
+ IPV4_DEVCONF_ALL_RO(net, FORWARDING) ? 1 : 2,
READ_ONCE(net->ipv4.sysctl_ip_default_ttl));
BUILD_BUG_ON(offsetof(struct ipstats_mib, mibs) != 0);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index aea89326c697..4304a68d1db0 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -175,6 +175,13 @@ static int raw_v4_input(struct net *net, struct sk_buff *skb,
if (!raw_v4_match(net, sk, iph->protocol,
iph->saddr, iph->daddr, dif, sdif))
continue;
+
+ if (atomic_read(&sk->sk_rmem_alloc) >=
+ READ_ONCE(sk->sk_rcvbuf)) {
+ atomic_inc(&sk->sk_drops);
+ continue;
+ }
+
delivered = 1;
if ((iph->protocol != IPPROTO_ICMP || !icmp_filter(sk, skb)) &&
ip_mc_sf_allow(sk, iph->daddr, iph->saddr,
@@ -294,7 +301,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
ipv4_pktinfo_prepare(sk, skb, true);
if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0) {
- kfree_skb_reason(skb, reason);
+ sk_skb_reason_drop(sk, skb, reason);
return NET_RX_DROP;
}
@@ -305,12 +312,12 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb)
{
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
atomic_inc(&sk->sk_drops);
- kfree_skb_reason(skb, SKB_DROP_REASON_XFRM_POLICY);
+ sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_XFRM_POLICY);
return NET_RX_DROP;
}
nf_reset_ct(skb);
- skb_push(skb, skb->data - skb_network_header(skb));
+ skb_push(skb, -skb_network_offset(skb));
raw_rcv_skb(sk, skb);
return 0;
@@ -350,9 +357,10 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
goto error;
skb_reserve(skb, hlen);
- skb->priority = READ_ONCE(sk->sk_priority);
+ skb->protocol = htons(ETH_P_IP);
+ skb->priority = sockc->priority;
skb->mark = sockc->mark;
- skb->tstamp = sockc->transmit_time;
+ skb_set_delivery_type_by_clockid(skb, sockc->transmit_time, sk->sk_clockid);
skb_dst_set(skb, &rt->dst);
*rtp = NULL;
@@ -362,7 +370,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
skb->ip_summed = CHECKSUM_NONE;
- skb_setup_tx_timestamp(skb, sockc->tsflags);
+ skb_setup_tx_timestamp(skb, sockc);
if (flags & MSG_CONFIRM)
skb_set_dst_pending_confirm(skb, 1);
@@ -604,6 +612,9 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
(hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
daddr, saddr, 0, 0, sk->sk_uid);
+ fl4.fl4_icmp_type = 0;
+ fl4.fl4_icmp_code = 0;
+
if (!hdrincl) {
rfv.msg = msg;
rfv.hlen = 0;
@@ -815,7 +826,7 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
out: return ret;
}
-static int do_raw_setsockopt(struct sock *sk, int level, int optname,
+static int do_raw_setsockopt(struct sock *sk, int optname,
sockptr_t optval, unsigned int optlen)
{
if (optname == ICMP_FILTER) {
@@ -832,11 +843,11 @@ static int raw_setsockopt(struct sock *sk, int level, int optname,
{
if (level != SOL_RAW)
return ip_setsockopt(sk, level, optname, optval, optlen);
- return do_raw_setsockopt(sk, level, optname, optval, optlen);
+ return do_raw_setsockopt(sk, optname, optval, optlen);
}
-static int do_raw_getsockopt(struct sock *sk, int level, int optname,
- char __user *optval, int __user *optlen)
+static int do_raw_getsockopt(struct sock *sk, int optname,
+ char __user *optval, int __user *optlen)
{
if (optname == ICMP_FILTER) {
if (inet_sk(sk)->inet_num != IPPROTO_ICMP)
@@ -852,7 +863,7 @@ static int raw_getsockopt(struct sock *sk, int level, int optname,
{
if (level != SOL_RAW)
return ip_getsockopt(sk, level, optname, optval, optlen);
- return do_raw_getsockopt(sk, level, optname, optval, optlen);
+ return do_raw_getsockopt(sk, optname, optval, optlen);
}
static int raw_ioctl(struct sock *sk, int cmd, int *karg)
diff --git a/net/ipv4/raw_diag.c b/net/ipv4/raw_diag.c
index fe2140c8375c..cc793bd8de25 100644
--- a/net/ipv4/raw_diag.c
+++ b/net/ipv4/raw_diag.c
@@ -213,6 +213,7 @@ static int raw_diag_destroy(struct sk_buff *in_skb,
#endif
static const struct inet_diag_handler raw_diag_handler = {
+ .owner = THIS_MODULE,
.dump = raw_diag_dump,
.dump_one = raw_diag_dump_one,
.idiag_get_info = raw_diag_get_info,
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 16615d107cf0..753704f75b2c 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -106,9 +106,6 @@
#include "fib_lookup.h"
-#define RT_FL_TOS(oldflp4) \
- ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
-
#define RT_GC_TIMEOUT (300*HZ)
#define DEFAULT_MIN_PMTU (512 + 20 + 20)
@@ -132,7 +129,8 @@ struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
INDIRECT_CALLABLE_SCOPE
unsigned int ipv4_mtu(const struct dst_entry *dst);
-static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
+static void ipv4_negative_advice(struct sock *sk,
+ struct dst_entry *dst);
static void ipv4_link_failure(struct sk_buff *skb);
static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu,
@@ -392,7 +390,13 @@ static inline int ip_rt_proc_init(void)
static inline bool rt_is_expired(const struct rtable *rth)
{
- return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
+ bool res;
+
+ rcu_read_lock();
+ res = rth->rt_genid != rt_genid_ipv4(dev_net_rcu(rth->dst.dev));
+ rcu_read_unlock();
+
+ return res;
}
void rt_cache_flush(struct net *net)
@@ -498,15 +502,6 @@ void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
}
EXPORT_SYMBOL(__ip_select_ident);
-static void ip_rt_fix_tos(struct flowi4 *fl4)
-{
- __u8 tos = RT_FL_TOS(fl4);
-
- fl4->flowi4_tos = tos & IPTOS_RT_MASK;
- if (tos & RTO_ONLINK)
- fl4->flowi4_scope = RT_SCOPE_LINK;
-}
-
static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
const struct sock *sk, const struct iphdr *iph,
int oif, __u8 tos, u8 prot, u32 mark,
@@ -523,7 +518,7 @@ static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
sk->sk_protocol;
}
- flowi4_init_output(fl4, oif, mark, tos & IPTOS_RT_MASK, scope,
+ flowi4_init_output(fl4, oif, mark, tos & INET_DSCP_MASK, scope,
prot, flow_flags, iph->daddr, iph->saddr, 0, 0,
sock_net_uid(net, sk));
}
@@ -552,7 +547,7 @@ static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
if (inet_opt && inet_opt->opt.srr)
daddr = inet_opt->opt.faddr;
flowi4_init_output(fl4, sk->sk_bound_dev_if, READ_ONCE(sk->sk_mark),
- ip_sock_rt_tos(sk) & IPTOS_RT_MASK,
+ ip_sock_rt_tos(sk),
ip_sock_rt_scope(sk),
inet_test_bit(HDRINCL, sk) ?
IPPROTO_RAW : sk->sk_protocol,
@@ -831,28 +826,21 @@ static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buf
u32 mark = skb->mark;
__u8 tos = iph->tos;
- rt = (struct rtable *) dst;
+ rt = dst_rtable(dst);
__build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
__ip_do_redirect(rt, skb, &fl4, true);
}
-static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
+static void ipv4_negative_advice(struct sock *sk,
+ struct dst_entry *dst)
{
- struct rtable *rt = (struct rtable *)dst;
- struct dst_entry *ret = dst;
+ struct rtable *rt = dst_rtable(dst);
- if (rt) {
- if (dst->obsolete > 0) {
- ip_rt_put(rt);
- ret = NULL;
- } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
- rt->dst.expires) {
- ip_rt_put(rt);
- ret = NULL;
- }
- }
- return ret;
+ if ((dst->obsolete > 0) ||
+ (rt->rt_flags & RTCF_REDIRECTED) ||
+ rt->dst.expires)
+ sk_dst_reset(sk);
}
/*
@@ -888,11 +876,11 @@ void ip_rt_send_redirect(struct sk_buff *skb)
}
log_martians = IN_DEV_LOG_MARTIANS(in_dev);
vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
- rcu_read_unlock();
net = dev_net(rt->dst.dev);
- peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
+ peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif);
if (!peer) {
+ rcu_read_unlock();
icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
rt_nexthop(rt, ip_hdr(skb)->daddr));
return;
@@ -911,7 +899,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
*/
if (peer->n_redirects >= ip_rt_redirect_number) {
peer->rate_last = jiffies;
- goto out_put_peer;
+ goto out_unlock;
}
/* Check for load limit; set rate_last to the latest sent
@@ -926,16 +914,14 @@ void ip_rt_send_redirect(struct sk_buff *skb)
icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
peer->rate_last = jiffies;
++peer->n_redirects;
-#ifdef CONFIG_IP_ROUTE_VERBOSE
- if (log_martians &&
+ if (IS_ENABLED(CONFIG_IP_ROUTE_VERBOSE) && log_martians &&
peer->n_redirects == ip_rt_redirect_number)
net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
&ip_hdr(skb)->saddr, inet_iif(skb),
&ip_hdr(skb)->daddr, &gw);
-#endif
}
-out_put_peer:
- inet_putpeer(peer);
+out_unlock:
+ rcu_read_unlock();
}
static int ip_error(struct sk_buff *skb)
@@ -995,9 +981,9 @@ static int ip_error(struct sk_buff *skb)
break;
}
+ rcu_read_lock();
peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
- l3mdev_master_ifindex(skb->dev), 1);
-
+ l3mdev_master_ifindex_rcu(skb->dev));
send = true;
if (peer) {
now = jiffies;
@@ -1009,8 +995,9 @@ static int ip_error(struct sk_buff *skb)
peer->rate_tokens -= ip_rt_error_cost;
else
send = false;
- inet_putpeer(peer);
}
+ rcu_read_unlock();
+
if (send)
icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
@@ -1021,9 +1008,9 @@ out: kfree_skb_reason(skb, reason);
static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
{
struct dst_entry *dst = &rt->dst;
- struct net *net = dev_net(dst->dev);
struct fib_result res;
bool lock = false;
+ struct net *net;
u32 old_mtu;
if (ip_mtu_locked(dst))
@@ -1033,6 +1020,8 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
if (old_mtu < mtu)
return;
+ rcu_read_lock();
+ net = dev_net_rcu(dst->dev);
if (mtu < net->ipv4.ip_rt_min_pmtu) {
lock = true;
mtu = min(old_mtu, net->ipv4.ip_rt_min_pmtu);
@@ -1040,17 +1029,29 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
if (rt->rt_pmtu == mtu && !lock &&
time_before(jiffies, dst->expires - net->ipv4.ip_rt_mtu_expires / 2))
- return;
+ goto out;
- rcu_read_lock();
if (fib_lookup(net, fl4, &res, 0) == 0) {
struct fib_nh_common *nhc;
fib_select_path(net, &res, fl4, NULL);
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+ if (fib_info_num_path(res.fi) > 1) {
+ int nhsel;
+
+ for (nhsel = 0; nhsel < fib_info_num_path(res.fi); nhsel++) {
+ nhc = fib_info_nhc(res.fi, nhsel);
+ update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock,
+ jiffies + net->ipv4.ip_rt_mtu_expires);
+ }
+ goto out;
+ }
+#endif /* CONFIG_IP_ROUTE_MULTIPATH */
nhc = FIB_RES_NHC(res);
update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock,
jiffies + net->ipv4.ip_rt_mtu_expires);
}
+out:
rcu_read_unlock();
}
@@ -1058,7 +1059,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu,
bool confirm_neigh)
{
- struct rtable *rt = (struct rtable *) dst;
+ struct rtable *rt = dst_rtable(dst);
struct flowi4 fl4;
ip_rt_build_flow_key(&fl4, sk, skb);
@@ -1129,7 +1130,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
__build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
- rt = (struct rtable *)odst;
+ rt = dst_rtable(odst);
if (odst->obsolete && !odst->ops->check(odst, 0)) {
rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
if (IS_ERR(rt))
@@ -1138,7 +1139,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
new = true;
}
- __ip_rt_update_pmtu((struct rtable *)xfrm_dst_path(&rt->dst), &fl4, mtu);
+ __ip_rt_update_pmtu(dst_rtable(xfrm_dst_path(&rt->dst)), &fl4, mtu);
if (!dst_check(&rt->dst, 0)) {
if (new)
@@ -1195,7 +1196,7 @@ EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
INDIRECT_CALLABLE_SCOPE struct dst_entry *ipv4_dst_check(struct dst_entry *dst,
u32 cookie)
{
- struct rtable *rt = (struct rtable *) dst;
+ struct rtable *rt = dst_rtable(dst);
/* All IPV4 dsts are created with ->obsolete set to the value
* DST_OBSOLETE_FORCE_CHK which forces validation calls down
@@ -1283,7 +1284,7 @@ void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
struct flowi4 fl4 = {
.daddr = iph->daddr,
.saddr = iph->saddr,
- .flowi4_tos = RT_TOS(iph->tos),
+ .flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(iph)),
.flowi4_oif = rt->dst.dev->ifindex,
.flowi4_iif = skb->dev->ifindex,
.flowi4_mark = skb->mark,
@@ -1313,10 +1314,15 @@ static void set_class_tag(struct rtable *rt, u32 tag)
static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
{
- struct net *net = dev_net(dst->dev);
unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr);
- unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
- net->ipv4.ip_rt_min_advmss);
+ unsigned int advmss;
+ struct net *net;
+
+ rcu_read_lock();
+ net = dev_net_rcu(dst->dev);
+ advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
+ net->ipv4.ip_rt_min_advmss);
+ rcu_read_unlock();
return min(advmss, IPV4_MAX_PMTU - header_size);
}
@@ -1501,7 +1507,6 @@ static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt)
struct uncached_list {
spinlock_t lock;
struct list_head head;
- struct list_head quarantine;
};
static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
@@ -1530,10 +1535,8 @@ void rt_del_uncached_list(struct rtable *rt)
static void ipv4_dst_destroy(struct dst_entry *dst)
{
- struct rtable *rt = (struct rtable *)dst;
-
ip_dst_metrics_put(dst);
- rt_del_uncached_list(rt);
+ rt_del_uncached_list(dst_rtable(dst));
}
void rt_flush_dev(struct net_device *dev)
@@ -1554,7 +1557,7 @@ void rt_flush_dev(struct net_device *dev)
rt->dst.dev = blackhole_netdev;
netdev_ref_replace(dev, blackhole_netdev,
&rt->dst.dev_tracker, GFP_ATOMIC);
- list_move(&rt->dst.rt_uncached, &ul->quarantine);
+ list_del_init(&rt->dst.rt_uncached);
}
spin_unlock_bh(&ul->lock);
}
@@ -1688,49 +1691,54 @@ struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt)
EXPORT_SYMBOL(rt_dst_clone);
/* called in rcu_read_lock() section */
-int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
- u8 tos, struct net_device *dev,
- struct in_device *in_dev, u32 *itag)
+enum skb_drop_reason
+ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ dscp_t dscp, struct net_device *dev,
+ struct in_device *in_dev, u32 *itag)
{
- int err;
+ enum skb_drop_reason reason;
/* Primary sanity checks. */
if (!in_dev)
- return -EINVAL;
+ return SKB_DROP_REASON_NOT_SPECIFIED;
- if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
- skb->protocol != htons(ETH_P_IP))
- return -EINVAL;
+ if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
+ return SKB_DROP_REASON_IP_INVALID_SOURCE;
+
+ if (skb->protocol != htons(ETH_P_IP))
+ return SKB_DROP_REASON_INVALID_PROTO;
if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
- return -EINVAL;
+ return SKB_DROP_REASON_IP_LOCALNET;
if (ipv4_is_zeronet(saddr)) {
if (!ipv4_is_local_multicast(daddr) &&
ip_hdr(skb)->protocol != IPPROTO_IGMP)
- return -EINVAL;
+ return SKB_DROP_REASON_IP_INVALID_SOURCE;
} else {
- err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
- in_dev, itag);
- if (err < 0)
- return err;
+ reason = fib_validate_source_reason(skb, saddr, 0, dscp, 0,
+ dev, in_dev, itag);
+ if (reason)
+ return reason;
}
- return 0;
+ return SKB_NOT_DROPPED_YET;
}
/* called in rcu_read_lock() section */
-static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
- u8 tos, struct net_device *dev, int our)
+static enum skb_drop_reason
+ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ dscp_t dscp, struct net_device *dev, int our)
{
struct in_device *in_dev = __in_dev_get_rcu(dev);
unsigned int flags = RTCF_MULTICAST;
+ enum skb_drop_reason reason;
struct rtable *rth;
u32 itag = 0;
- int err;
- err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag);
- if (err)
- return err;
+ reason = ip_mc_validate_source(skb, daddr, saddr, dscp, dev, in_dev,
+ &itag);
+ if (reason)
+ return reason;
if (our)
flags |= RTCF_LOCAL;
@@ -1741,7 +1749,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
false);
if (!rth)
- return -ENOBUFS;
+ return SKB_DROP_REASON_NOMEM;
#ifdef CONFIG_IP_ROUTE_CLASSID
rth->dst.tclassid = itag;
@@ -1757,7 +1765,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
skb_dst_drop(skb);
skb_dst_set(skb, &rth->dst);
- return 0;
+ return SKB_NOT_DROPPED_YET;
}
@@ -1787,11 +1795,12 @@ static void ip_handle_martian_source(struct net_device *dev,
}
/* called in rcu_read_lock() section */
-static int __mkroute_input(struct sk_buff *skb,
- const struct fib_result *res,
- struct in_device *in_dev,
- __be32 daddr, __be32 saddr, u32 tos)
+static enum skb_drop_reason
+__mkroute_input(struct sk_buff *skb, const struct fib_result *res,
+ struct in_device *in_dev, __be32 daddr,
+ __be32 saddr, dscp_t dscp)
{
+ enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
struct fib_nh_common *nhc = FIB_RES_NHC(*res);
struct net_device *dev = nhc->nhc_dev;
struct fib_nh_exception *fnhe;
@@ -1805,12 +1814,13 @@ static int __mkroute_input(struct sk_buff *skb,
out_dev = __in_dev_get_rcu(dev);
if (!out_dev) {
net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
- return -EINVAL;
+ return reason;
}
- err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
+ err = fib_validate_source(skb, saddr, daddr, dscp, FIB_RES_OIF(*res),
in_dev->dev, in_dev, &itag);
if (err < 0) {
+ reason = -err;
ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
saddr);
@@ -1838,7 +1848,7 @@ static int __mkroute_input(struct sk_buff *skb,
*/
if (out_dev == in_dev &&
IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
- err = -EINVAL;
+ reason = SKB_DROP_REASON_ARP_PVLAN_DISABLE;
goto cleanup;
}
}
@@ -1861,7 +1871,7 @@ static int __mkroute_input(struct sk_buff *skb,
rth = rt_dst_alloc(out_dev->dev, 0, res->type,
IN_DEV_ORCONF(out_dev, NOXFRM));
if (!rth) {
- err = -ENOBUFS;
+ reason = SKB_DROP_REASON_NOMEM;
goto cleanup;
}
@@ -1875,9 +1885,9 @@ static int __mkroute_input(struct sk_buff *skb,
lwtunnel_set_redirect(&rth->dst);
skb_dst_set(skb, &rth->dst);
out:
- err = 0;
- cleanup:
- return err;
+ reason = SKB_NOT_DROPPED_YET;
+cleanup:
+ return reason;
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
@@ -1946,7 +1956,7 @@ static u32 fib_multipath_custom_hash_outer(const struct net *net,
hash_keys.ports.dst = keys.ports.dst;
*p_has_inner = !!(keys.control.flags & FLOW_DIS_ENCAPSULATION);
- return flow_hash_from_keys(&hash_keys);
+ return fib_multipath_hash_from_keys(net, &hash_keys);
}
static u32 fib_multipath_custom_hash_inner(const struct net *net,
@@ -1995,7 +2005,7 @@ static u32 fib_multipath_custom_hash_inner(const struct net *net,
if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
hash_keys.ports.dst = keys.ports.dst;
- return flow_hash_from_keys(&hash_keys);
+ return fib_multipath_hash_from_keys(net, &hash_keys);
}
static u32 fib_multipath_custom_hash_skb(const struct net *net,
@@ -2032,7 +2042,7 @@ static u32 fib_multipath_custom_hash_fl4(const struct net *net,
if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
hash_keys.ports.dst = fl4->fl4_dport;
- return flow_hash_from_keys(&hash_keys);
+ return fib_multipath_hash_from_keys(net, &hash_keys);
}
/* if skb is set it will be used and fl4 can be NULL */
@@ -2053,7 +2063,7 @@ int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
hash_keys.addrs.v4addrs.src = fl4->saddr;
hash_keys.addrs.v4addrs.dst = fl4->daddr;
}
- mhash = flow_hash_from_keys(&hash_keys);
+ mhash = fib_multipath_hash_from_keys(net, &hash_keys);
break;
case 1:
/* skb is currently provided only when forwarding */
@@ -2087,7 +2097,7 @@ int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
hash_keys.ports.dst = fl4->fl4_dport;
hash_keys.basic.ip_proto = fl4->flowi4_proto;
}
- mhash = flow_hash_from_keys(&hash_keys);
+ mhash = fib_multipath_hash_from_keys(net, &hash_keys);
break;
case 2:
memset(&hash_keys, 0, sizeof(hash_keys));
@@ -2118,7 +2128,7 @@ int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
hash_keys.addrs.v4addrs.src = fl4->saddr;
hash_keys.addrs.v4addrs.dst = fl4->daddr;
}
- mhash = flow_hash_from_keys(&hash_keys);
+ mhash = fib_multipath_hash_from_keys(net, &hash_keys);
break;
case 3:
if (skb)
@@ -2135,11 +2145,10 @@ int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
}
#endif /* CONFIG_IP_ROUTE_MULTIPATH */
-static int ip_mkroute_input(struct sk_buff *skb,
- struct fib_result *res,
- struct in_device *in_dev,
- __be32 daddr, __be32 saddr, u32 tos,
- struct flow_keys *hkeys)
+static enum skb_drop_reason
+ip_mkroute_input(struct sk_buff *skb, struct fib_result *res,
+ struct in_device *in_dev, __be32 daddr,
+ __be32 saddr, dscp_t dscp, struct flow_keys *hkeys)
{
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (res->fi && fib_info_num_path(res->fi) > 1) {
@@ -2151,47 +2160,57 @@ static int ip_mkroute_input(struct sk_buff *skb,
#endif
/* create a routing cache entry */
- return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
+ return __mkroute_input(skb, res, in_dev, daddr, saddr, dscp);
}
/* Implements all the saddr-related checks as ip_route_input_slow(),
* assuming daddr is valid and the destination is not a local broadcast one.
* Uses the provided hint instead of performing a route lookup.
*/
-int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr,
- u8 tos, struct net_device *dev,
- const struct sk_buff *hint)
+enum skb_drop_reason
+ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ dscp_t dscp, struct net_device *dev,
+ const struct sk_buff *hint)
{
+ enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
struct in_device *in_dev = __in_dev_get_rcu(dev);
struct rtable *rt = skb_rtable(hint);
struct net *net = dev_net(dev);
- int err = -EINVAL;
u32 tag = 0;
- if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
+ if (!in_dev)
+ return reason;
+
+ if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr)) {
+ reason = SKB_DROP_REASON_IP_INVALID_SOURCE;
goto martian_source;
+ }
- if (ipv4_is_zeronet(saddr))
+ if (ipv4_is_zeronet(saddr)) {
+ reason = SKB_DROP_REASON_IP_INVALID_SOURCE;
goto martian_source;
+ }
- if (ipv4_is_loopback(saddr) && !IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
+ if (ipv4_is_loopback(saddr) && !IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) {
+ reason = SKB_DROP_REASON_IP_LOCALNET;
goto martian_source;
+ }
if (rt->rt_type != RTN_LOCAL)
goto skip_validate_source;
- tos &= IPTOS_RT_MASK;
- err = fib_validate_source(skb, saddr, daddr, tos, 0, dev, in_dev, &tag);
- if (err < 0)
+ reason = fib_validate_source_reason(skb, saddr, daddr, dscp, 0, dev,
+ in_dev, &tag);
+ if (reason)
goto martian_source;
skip_validate_source:
skb_dst_copy(skb, hint);
- return 0;
+ return SKB_NOT_DROPPED_YET;
martian_source:
ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
- return err;
+ return reason;
}
/* get device for dst_alloc with local routes */
@@ -2220,10 +2239,12 @@ static struct net_device *ip_rt_get_dev(struct net *net,
* called with rcu_read_lock()
*/
-static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
- u8 tos, struct net_device *dev,
- struct fib_result *res)
+static enum skb_drop_reason
+ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ dscp_t dscp, struct net_device *dev,
+ struct fib_result *res)
{
+ enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
struct in_device *in_dev = __in_dev_get_rcu(dev);
struct flow_keys *flkeys = NULL, _flkeys;
struct net *net = dev_net(dev);
@@ -2251,8 +2272,10 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
fl4.flowi4_tun_key.tun_id = 0;
skb_dst_drop(skb);
- if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
+ if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr)) {
+ reason = SKB_DROP_REASON_IP_INVALID_SOURCE;
goto martian_source;
+ }
res->fi = NULL;
res->table = NULL;
@@ -2262,21 +2285,29 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
/* Accept zero addresses only to limited broadcast;
* I even do not know to fix it or not. Waiting for complains :-)
*/
- if (ipv4_is_zeronet(saddr))
+ if (ipv4_is_zeronet(saddr)) {
+ reason = SKB_DROP_REASON_IP_INVALID_SOURCE;
goto martian_source;
+ }
- if (ipv4_is_zeronet(daddr))
+ if (ipv4_is_zeronet(daddr)) {
+ reason = SKB_DROP_REASON_IP_INVALID_DEST;
goto martian_destination;
+ }
/* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
* and call it once if daddr or/and saddr are loopback addresses
*/
if (ipv4_is_loopback(daddr)) {
- if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
+ if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) {
+ reason = SKB_DROP_REASON_IP_LOCALNET;
goto martian_destination;
+ }
} else if (ipv4_is_loopback(saddr)) {
- if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
+ if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) {
+ reason = SKB_DROP_REASON_IP_LOCALNET;
goto martian_source;
+ }
}
/*
@@ -2286,7 +2317,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
fl4.flowi4_oif = 0;
fl4.flowi4_iif = dev->ifindex;
fl4.flowi4_mark = skb->mark;
- fl4.flowi4_tos = tos;
+ fl4.flowi4_tos = inet_dscp_to_dsfield(dscp);
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
fl4.flowi4_flags = 0;
fl4.daddr = daddr;
@@ -2313,15 +2344,16 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
if (IN_DEV_BFORWARD(in_dev))
goto make_route;
/* not do cache if bc_forwarding is enabled */
- if (IPV4_DEVCONF_ALL(net, BC_FORWARDING))
+ if (IPV4_DEVCONF_ALL_RO(net, BC_FORWARDING))
do_cache = false;
goto brd_input;
}
+ err = -EINVAL;
if (res->type == RTN_LOCAL) {
- err = fib_validate_source(skb, saddr, daddr, tos,
- 0, dev, in_dev, &itag);
- if (err < 0)
+ reason = fib_validate_source_reason(skb, saddr, daddr, dscp,
+ 0, dev, in_dev, &itag);
+ if (reason)
goto martian_source;
goto local_input;
}
@@ -2330,21 +2362,28 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
err = -EHOSTUNREACH;
goto no_route;
}
- if (res->type != RTN_UNICAST)
+ if (res->type != RTN_UNICAST) {
+ reason = SKB_DROP_REASON_IP_INVALID_DEST;
goto martian_destination;
+ }
make_route:
- err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys);
-out: return err;
+ reason = ip_mkroute_input(skb, res, in_dev, daddr, saddr, dscp,
+ flkeys);
+
+out:
+ return reason;
brd_input:
- if (skb->protocol != htons(ETH_P_IP))
- goto e_inval;
+ if (skb->protocol != htons(ETH_P_IP)) {
+ reason = SKB_DROP_REASON_INVALID_PROTO;
+ goto out;
+ }
if (!ipv4_is_zeronet(saddr)) {
- err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
- in_dev, &itag);
- if (err < 0)
+ reason = fib_validate_source_reason(skb, saddr, 0, dscp, 0,
+ dev, in_dev, &itag);
+ if (reason)
goto martian_source;
}
flags |= RTCF_BROADCAST;
@@ -2362,7 +2401,7 @@ local_input:
rth = rcu_dereference(nhc->nhc_rth_input);
if (rt_cache_valid(rth)) {
skb_dst_set_noref(skb, &rth->dst);
- err = 0;
+ reason = SKB_NOT_DROPPED_YET;
goto out;
}
}
@@ -2399,7 +2438,7 @@ local_input:
rt_add_uncached_list(rth);
}
skb_dst_set(skb, &rth->dst);
- err = 0;
+ reason = SKB_NOT_DROPPED_YET;
goto out;
no_route:
@@ -2419,13 +2458,10 @@ martian_destination:
net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
&daddr, &saddr, dev->name);
#endif
-
-e_inval:
- err = -EINVAL;
goto out;
e_nobufs:
- err = -ENOBUFS;
+ reason = SKB_DROP_REASON_NOMEM;
goto out;
martian_source:
@@ -2434,8 +2470,10 @@ martian_source:
}
/* called with rcu_read_lock held */
-static int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
- u8 tos, struct net_device *dev, struct fib_result *res)
+static enum skb_drop_reason
+ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ dscp_t dscp, struct net_device *dev,
+ struct fib_result *res)
{
/* Multicast recognition logic is moved from route cache to here.
* The problem was that too many Ethernet cards have broken/missing
@@ -2449,12 +2487,13 @@ static int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
* route cache entry is created eventually.
*/
if (ipv4_is_multicast(daddr)) {
+ enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
struct in_device *in_dev = __in_dev_get_rcu(dev);
int our = 0;
- int err = -EINVAL;
if (!in_dev)
- return err;
+ return reason;
+
our = ip_check_mc_rcu(in_dev, daddr, saddr,
ip_hdr(skb)->protocol);
@@ -2475,27 +2514,27 @@ static int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
IN_DEV_MFORWARD(in_dev))
#endif
) {
- err = ip_route_input_mc(skb, daddr, saddr,
- tos, dev, our);
+ reason = ip_route_input_mc(skb, daddr, saddr, dscp,
+ dev, our);
}
- return err;
+ return reason;
}
- return ip_route_input_slow(skb, daddr, saddr, tos, dev, res);
+ return ip_route_input_slow(skb, daddr, saddr, dscp, dev, res);
}
-int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
- u8 tos, struct net_device *dev)
+enum skb_drop_reason ip_route_input_noref(struct sk_buff *skb, __be32 daddr,
+ __be32 saddr, dscp_t dscp,
+ struct net_device *dev)
{
+ enum skb_drop_reason reason;
struct fib_result res;
- int err;
- tos &= IPTOS_RT_MASK;
rcu_read_lock();
- err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
+ reason = ip_route_input_rcu(skb, daddr, saddr, dscp, dev, &res);
rcu_read_unlock();
- return err;
+ return reason;
}
EXPORT_SYMBOL(ip_route_input_noref);
@@ -2638,7 +2677,7 @@ struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
struct rtable *rth;
fl4->flowi4_iif = LOOPBACK_IFINDEX;
- ip_rt_fix_tos(fl4);
+ fl4->flowi4_tos &= INET_DSCP_MASK;
rcu_read_lock();
rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
@@ -2831,7 +2870,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
{
- struct rtable *ort = (struct rtable *) dst_orig;
+ struct rtable *ort = dst_rtable(dst_orig);
struct rtable *rt;
rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, DST_OBSOLETE_DEAD, 0);
@@ -2876,9 +2915,9 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
if (flp4->flowi4_proto) {
flp4->flowi4_oif = rt->dst.dev->ifindex;
- rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
- flowi4_to_flowi(flp4),
- sk, 0);
+ rt = dst_rtable(xfrm_lookup_route(net, &rt->dst,
+ flowi4_to_flowi(flp4),
+ sk, 0));
}
return rt;
@@ -2887,9 +2926,9 @@ EXPORT_SYMBOL_GPL(ip_route_output_flow);
/* called with rcu_read_lock held */
static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
- struct rtable *rt, u32 table_id, struct flowi4 *fl4,
- struct sk_buff *skb, u32 portid, u32 seq,
- unsigned int flags)
+ struct rtable *rt, u32 table_id, dscp_t dscp,
+ struct flowi4 *fl4, struct sk_buff *skb, u32 portid,
+ u32 seq, unsigned int flags)
{
struct rtmsg *r;
struct nlmsghdr *nlh;
@@ -2905,7 +2944,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
r->rtm_family = AF_INET;
r->rtm_dst_len = 32;
r->rtm_src_len = 0;
- r->rtm_tos = fl4 ? fl4->flowi4_tos : 0;
+ r->rtm_tos = inet_dscp_to_dsfield(dscp);
r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT;
if (nla_put_u32(skb, RTA_TABLE, table_id))
goto nla_put_failure;
@@ -2993,7 +3032,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
#ifdef CONFIG_IP_MROUTE
if (ipv4_is_multicast(dst) &&
!ipv4_is_local_multicast(dst) &&
- IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
+ IPV4_DEVCONF_ALL_RO(net, MC_FORWARDING)) {
int err = ipmr_get_route(net, skb,
fl4->saddr, fl4->daddr,
r, portid);
@@ -3055,7 +3094,7 @@ static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
goto next;
err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt,
- table_id, NULL, skb,
+ table_id, 0, NULL, skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, flags);
if (err)
@@ -3243,6 +3282,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct flowi4 fl4 = {};
__be32 dst = 0;
__be32 src = 0;
+ dscp_t dscp;
kuid_t uid;
u32 iif;
int err;
@@ -3253,10 +3293,11 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
return err;
rtm = nlmsg_data(nlh);
- src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
- dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
- iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
- mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
+ src = nla_get_in_addr_default(tb[RTA_SRC], 0);
+ dst = nla_get_in_addr_default(tb[RTA_DST], 0);
+ iif = nla_get_u32_default(tb[RTA_IIF], 0);
+ mark = nla_get_u32_default(tb[RTA_MARK], 0);
+ dscp = inet_dsfield_to_dscp(rtm->rtm_tos);
if (tb[RTA_UID])
uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
else
@@ -3281,8 +3322,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
fl4.daddr = dst;
fl4.saddr = src;
- fl4.flowi4_tos = rtm->rtm_tos & IPTOS_RT_MASK;
- fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
+ fl4.flowi4_tos = inet_dscp_to_dsfield(dscp);
+ fl4.flowi4_oif = nla_get_u32_default(tb[RTA_OIF], 0);
fl4.flowi4_mark = mark;
fl4.flowi4_uid = uid;
if (sport)
@@ -3305,9 +3346,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
fl4.flowi4_iif = iif; /* for rt_fill_info */
skb->dev = dev;
skb->mark = mark;
- err = ip_route_input_rcu(skb, dst, src,
- rtm->rtm_tos & IPTOS_RT_MASK, dev,
- &res);
+ err = ip_route_input_rcu(skb, dst, src, dscp, dev,
+ &res) ? -EINVAL : 0;
rt = skb_rtable(skb);
if (err == 0 && rt->dst.error)
@@ -3351,7 +3391,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
fri.tb_id = table_id;
fri.dst = res.prefix;
fri.dst_len = res.prefixlen;
- fri.dscp = inet_dsfield_to_dscp(fl4.flowi4_tos);
+ fri.dscp = res.dscp;
fri.type = rt->rt_type;
fri.offload = 0;
fri.trap = 0;
@@ -3378,8 +3418,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
nlh->nlmsg_seq, RTM_NEWROUTE, &fri, 0);
} else {
- err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
- NETLINK_CB(in_skb).portid,
+ err = rt_fill_info(net, dst, src, rt, table_id, res.dscp, &fl4,
+ skb, NETLINK_CB(in_skb).portid,
nlh->nlmsg_seq, 0);
}
if (err < 0)
@@ -3408,7 +3448,7 @@ static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
static int ip_rt_gc_elasticity __read_mostly = 8;
static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
-static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
+static int ipv4_sysctl_rtcache_flush(const struct ctl_table *__ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = (struct net *)__ctl->extra1;
@@ -3509,7 +3549,6 @@ static struct ctl_table ipv4_route_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
- { }
};
static const char ipv4_route_flush_procname[] = "flush";
@@ -3543,7 +3582,6 @@ static struct ctl_table ipv4_route_netns_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
- { },
};
static __net_init int sysctl_route_net_init(struct net *net)
@@ -3561,16 +3599,14 @@ static __net_init int sysctl_route_net_init(struct net *net)
/* Don't export non-whitelisted sysctls to unprivileged users */
if (net->user_ns != &init_user_ns) {
- if (tbl[0].procname != ipv4_route_flush_procname) {
- tbl[0].procname = NULL;
+ if (tbl[0].procname != ipv4_route_flush_procname)
table_size = 0;
- }
}
/* Update the variables to point into the current struct net
* except for the first element flush
*/
- for (i = 1; i < ARRAY_SIZE(ipv4_route_netns_table) - 1; i++)
+ for (i = 1; i < table_size; i++)
tbl[i].data += (void *)net - (void *)&init_net;
}
tbl[0].extra1 = net;
@@ -3590,7 +3626,7 @@ err_dup:
static __net_exit void sysctl_route_net_exit(struct net *net)
{
- struct ctl_table *tbl;
+ const struct ctl_table *tbl;
tbl = net->ipv4.route_hdr->ctl_table_arg;
unregister_net_sysctl_table(net->ipv4.route_hdr);
@@ -3658,6 +3694,11 @@ static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
#endif /* CONFIG_IP_ROUTE_CLASSID */
+static const struct rtnl_msg_handler ip_rt_rtnl_msg_handlers[] __initconst = {
+ {.protocol = PF_INET, .msgtype = RTM_GETROUTE,
+ .doit = inet_rtm_getroute, .flags = RTNL_FLAG_DOIT_UNLOCKED},
+};
+
int __init ip_rt_init(void)
{
void *idents_hash;
@@ -3684,7 +3725,6 @@ int __init ip_rt_init(void)
struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
INIT_LIST_HEAD(&ul->head);
- INIT_LIST_HEAD(&ul->quarantine);
spin_lock_init(&ul->lock);
}
#ifdef CONFIG_IP_ROUTE_CLASSID
@@ -3693,9 +3733,8 @@ int __init ip_rt_init(void)
panic("IP: failed to allocate ip_rt_acct\n");
#endif
- ipv4_dst_ops.kmem_cachep =
- kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+ ipv4_dst_ops.kmem_cachep = KMEM_CACHE(rtable,
+ SLAB_HWCACHE_ALIGN | SLAB_PANIC);
ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
@@ -3717,8 +3756,7 @@ int __init ip_rt_init(void)
xfrm_init();
xfrm4_init();
#endif
- rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL,
- RTNL_FLAG_DOIT_UNLOCKED);
+ rtnl_register_many(ip_rt_rtnl_msg_handlers);
#ifdef CONFIG_SYSCTL
register_pernet_subsys(&sysctl_route_ops);
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 61f1c96cfe63..1948d15f1f28 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -51,15 +51,6 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
count, &syncookie_secret[c]);
}
-/* Convert one nsec 64bit timestamp to ts (ms or usec resolution) */
-static u64 tcp_ns_to_ts(bool usec_ts, u64 val)
-{
- if (usec_ts)
- return div_u64(val, NSEC_PER_USEC);
-
- return div_u64(val, NSEC_PER_MSEC);
-}
-
/*
* when syncookies are in effect and tcp timestamps are enabled we encode
* tcp options in the lower bits of the timestamp value that will be
@@ -304,6 +295,24 @@ static int cookie_tcp_reqsk_init(struct sock *sk, struct sk_buff *skb,
return 0;
}
+#if IS_ENABLED(CONFIG_BPF)
+struct request_sock *cookie_bpf_check(struct sock *sk, struct sk_buff *skb)
+{
+ struct request_sock *req = inet_reqsk(skb->sk);
+
+ skb->sk = NULL;
+ skb->destructor = NULL;
+
+ if (cookie_tcp_reqsk_init(sk, skb, req)) {
+ reqsk_free(req);
+ req = NULL;
+ }
+
+ return req;
+}
+EXPORT_SYMBOL_GPL(cookie_bpf_check);
+#endif
+
struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
struct sock *sk, struct sk_buff *skb,
struct tcp_options_received *tcp_opt,
@@ -399,16 +408,23 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
struct rtable *rt;
__u8 rcv_wscale;
int full_space;
+ SKB_DR(reason);
if (!READ_ONCE(net->ipv4.sysctl_tcp_syncookies) ||
!th->ack || th->rst)
goto out;
- req = cookie_tcp_check(net, sk, skb);
- if (IS_ERR(req))
- goto out;
- if (!req)
+ if (cookie_bpf_ok(skb)) {
+ req = cookie_bpf_check(sk, skb);
+ } else {
+ req = cookie_tcp_check(net, sk, skb);
+ if (IS_ERR(req))
+ goto out;
+ }
+ if (!req) {
+ SKB_DR_SET(reason, NO_SOCKET);
goto out_drop;
+ }
ireq = inet_rsk(req);
@@ -420,8 +436,10 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
*/
RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
- if (security_inet_conn_request(sk, skb, req))
+ if (security_inet_conn_request(sk, skb, req)) {
+ SKB_DR_SET(reason, SECURITY_HOOK);
goto out_free;
+ }
tcp_ao_syncookie(sk, skb, req, AF_INET);
@@ -438,11 +456,14 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
ireq->ir_loc_addr, th->source, th->dest, sk->sk_uid);
security_req_classify_flow(req, flowi4_to_flowi_common(&fl4));
rt = ip_route_output_key(net, &fl4);
- if (IS_ERR(rt))
+ if (IS_ERR(rt)) {
+ SKB_DR_SET(reason, IP_OUTNOROUTES);
goto out_free;
+ }
/* Try to redo what tcp_v4_send_synack did. */
- req->rsk_window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW);
+ req->rsk_window_clamp = READ_ONCE(tp->window_clamp) ? :
+ dst_metric(&rt->dst, RTAX_WINDOW);
/* limit the window selection if the user enforce a smaller rx buffer */
full_space = tcp_full_space(sk);
if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
@@ -454,19 +475,27 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
ireq->wscale_ok, &rcv_wscale,
dst_metric(&rt->dst, RTAX_INITRWND));
- ireq->rcv_wscale = rcv_wscale;
+ /* req->syncookie is set true only if ACK is validated
+ * by BPF kfunc, then, rcv_wscale is already configured.
+ */
+ if (!req->syncookie)
+ ireq->rcv_wscale = rcv_wscale;
ireq->ecn_ok &= cookie_ecn_ok(net, &rt->dst);
ret = tcp_get_cookie_sock(sk, skb, req, &rt->dst);
/* ip_queue_xmit() depends on our flow being setup
* Normal sockets get it right from inet_csk_route_child_sock()
*/
- if (ret)
- inet_sk(ret)->cork.fl.u.ip4 = fl4;
+ if (!ret) {
+ SKB_DR_SET(reason, NO_SOCKET);
+ goto out_drop;
+ }
+ inet_sk(ret)->cork.fl.u.ip4 = fl4;
out:
return ret;
out_free:
reqsk_free(req);
out_drop:
+ sk_skb_reason_drop(sk, skb, reason);
return NULL;
}
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 7e4f16a7dcc1..42cb5dc9cb24 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -45,6 +45,7 @@ static unsigned int tcp_child_ehash_entries_max = 16 * 1024 * 1024;
static unsigned int udp_child_hash_entries_max = UDP_HTABLE_SIZE_MAX;
static int tcp_plb_max_rounds = 31;
static int tcp_plb_max_cong_thresh = 256;
+static unsigned int tcp_tw_reuse_delay_max = TCP_PAWS_MSL * MSEC_PER_SEC;
/* obsolete */
static int sysctl_tcp_low_latency __read_mostly;
@@ -62,7 +63,7 @@ static void set_local_port_range(struct net *net, unsigned int low, unsigned int
}
/* Validate changes from /proc interface. */
-static int ipv4_local_port_range(struct ctl_table *table, int write,
+static int ipv4_local_port_range(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = table->data;
@@ -96,7 +97,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
}
/* Validate changes from /proc interface. */
-static int ipv4_privileged_ports(struct ctl_table *table, int write,
+static int ipv4_privileged_ports(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = container_of(table->data, struct net,
@@ -130,7 +131,8 @@ static int ipv4_privileged_ports(struct ctl_table *table, int write,
return ret;
}
-static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low, kgid_t *high)
+static void inet_get_ping_group_range_table(const struct ctl_table *table,
+ kgid_t *low, kgid_t *high)
{
kgid_t *data = table->data;
struct net *net =
@@ -145,7 +147,8 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low
}
/* Update system visible IP port range */
-static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t high)
+static void set_ping_group_range(const struct ctl_table *table,
+ kgid_t low, kgid_t high)
{
kgid_t *data = table->data;
struct net *net =
@@ -157,7 +160,7 @@ static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t hig
}
/* Validate changes from /proc interface. */
-static int ipv4_ping_group_range(struct ctl_table *table, int write,
+static int ipv4_ping_group_range(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct user_namespace *user_ns = current_user_ns();
@@ -192,7 +195,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
return ret;
}
-static int ipv4_fwd_update_priority(struct ctl_table *table, int write,
+static int ipv4_fwd_update_priority(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net;
@@ -208,7 +211,7 @@ static int ipv4_fwd_update_priority(struct ctl_table *table, int write,
return ret;
}
-static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
+static int proc_tcp_congestion_control(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = container_of(ctl->data, struct net,
@@ -228,7 +231,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
return ret;
}
-static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
+static int proc_tcp_available_congestion_control(const struct ctl_table *ctl,
int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
@@ -244,7 +247,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
return ret;
}
-static int proc_allowed_congestion_control(struct ctl_table *ctl,
+static int proc_allowed_congestion_control(const struct ctl_table *ctl,
int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
@@ -281,7 +284,7 @@ static int sscanf_key(char *buf, __le32 *key)
return ret;
}
-static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
+static int proc_tcp_fastopen_key(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = container_of(table->data, struct net,
@@ -352,7 +355,7 @@ bad_key:
return ret;
}
-static int proc_tfo_blackhole_detect_timeout(struct ctl_table *table,
+static int proc_tfo_blackhole_detect_timeout(const struct ctl_table *table,
int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
@@ -367,7 +370,7 @@ static int proc_tfo_blackhole_detect_timeout(struct ctl_table *table,
return ret;
}
-static int proc_tcp_available_ulp(struct ctl_table *ctl,
+static int proc_tcp_available_ulp(const struct ctl_table *ctl,
int write, void *buffer, size_t *lenp,
loff_t *ppos)
{
@@ -384,7 +387,7 @@ static int proc_tcp_available_ulp(struct ctl_table *ctl,
return ret;
}
-static int proc_tcp_ehash_entries(struct ctl_table *table, int write,
+static int proc_tcp_ehash_entries(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = container_of(table->data, struct net,
@@ -408,7 +411,7 @@ static int proc_tcp_ehash_entries(struct ctl_table *table, int write,
return proc_dointvec(&tbl, write, buffer, lenp, ppos);
}
-static int proc_udp_hash_entries(struct ctl_table *table, int write,
+static int proc_udp_hash_entries(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = container_of(table->data, struct net,
@@ -432,7 +435,7 @@ static int proc_udp_hash_entries(struct ctl_table *table, int write,
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
-static int proc_fib_multipath_hash_policy(struct ctl_table *table, int write,
+static int proc_fib_multipath_hash_policy(const struct ctl_table *table, int write,
void *buffer, size_t *lenp,
loff_t *ppos)
{
@@ -447,7 +450,7 @@ static int proc_fib_multipath_hash_policy(struct ctl_table *table, int write,
return ret;
}
-static int proc_fib_multipath_hash_fields(struct ctl_table *table, int write,
+static int proc_fib_multipath_hash_fields(const struct ctl_table *table, int write,
void *buffer, size_t *lenp,
loff_t *ppos)
{
@@ -462,6 +465,61 @@ static int proc_fib_multipath_hash_fields(struct ctl_table *table, int write,
return ret;
}
+
+static u32 proc_fib_multipath_hash_rand_seed __ro_after_init;
+
+static void proc_fib_multipath_hash_init_rand_seed(void)
+{
+ get_random_bytes(&proc_fib_multipath_hash_rand_seed,
+ sizeof(proc_fib_multipath_hash_rand_seed));
+}
+
+static void proc_fib_multipath_hash_set_seed(struct net *net, u32 user_seed)
+{
+ struct sysctl_fib_multipath_hash_seed new = {
+ .user_seed = user_seed,
+ .mp_seed = (user_seed ? user_seed :
+ proc_fib_multipath_hash_rand_seed),
+ };
+
+ WRITE_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed, new);
+}
+
+static int proc_fib_multipath_hash_seed(const struct ctl_table *table, int write,
+ void *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ struct sysctl_fib_multipath_hash_seed *mphs;
+ struct net *net = table->data;
+ struct ctl_table tmp;
+ u32 user_seed;
+ int ret;
+
+ mphs = &net->ipv4.sysctl_fib_multipath_hash_seed;
+ user_seed = mphs->user_seed;
+
+ tmp = *table;
+ tmp.data = &user_seed;
+
+ ret = proc_douintvec_minmax(&tmp, write, buffer, lenp, ppos);
+
+ if (write && ret == 0) {
+ proc_fib_multipath_hash_set_seed(net, user_seed);
+ call_netevent_notifiers(NETEVENT_IPV4_MPATH_HASH_UPDATE, net);
+ }
+
+ return ret;
+}
+#else
+
+static void proc_fib_multipath_hash_init_rand_seed(void)
+{
+}
+
+static void proc_fib_multipath_hash_set_seed(struct net *net, u32 user_seed)
+{
+}
+
#endif
static struct ctl_table ipv4_table[] = {
@@ -544,22 +602,6 @@ static struct ctl_table ipv4_table[] = {
.proc_handler = proc_tcp_available_ulp,
},
{
- .procname = "icmp_msgs_per_sec",
- .data = &sysctl_icmp_msgs_per_sec,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- },
- {
- .procname = "icmp_msgs_burst",
- .data = &sysctl_icmp_msgs_burst,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- },
- {
.procname = "udp_mem",
.data = &sysctl_udp_mem,
.maxlen = sizeof(sysctl_udp_mem),
@@ -575,7 +617,6 @@ static struct ctl_table ipv4_table[] = {
.extra1 = &sysctl_fib_sync_mem_min,
.extra2 = &sysctl_fib_sync_mem_max,
},
- { }
};
static struct ctl_table ipv4_net_table[] = {
@@ -646,6 +687,22 @@ static struct ctl_table ipv4_net_table[] = {
.proc_handler = proc_dointvec
},
{
+ .procname = "icmp_msgs_per_sec",
+ .data = &init_net.ipv4.sysctl_icmp_msgs_per_sec,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ },
+ {
+ .procname = "icmp_msgs_burst",
+ .data = &init_net.ipv4.sysctl_icmp_msgs_burst,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ },
+ {
.procname = "ping_group_range",
.data = &init_net.ipv4.ping_group_range.range,
.maxlen = sizeof(gid_t)*2,
@@ -1010,6 +1067,15 @@ static struct ctl_table ipv4_net_table[] = {
.extra2 = SYSCTL_TWO,
},
{
+ .procname = "tcp_tw_reuse_delay",
+ .data = &init_net.ipv4.sysctl_tcp_tw_reuse_delay,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_douintvec_minmax,
+ .extra1 = SYSCTL_ONE,
+ .extra2 = &tcp_tw_reuse_delay_max,
+ },
+ {
.procname = "tcp_max_syn_backlog",
.data = &init_net.ipv4.sysctl_max_syn_backlog,
.maxlen = sizeof(int),
@@ -1071,6 +1137,13 @@ static struct ctl_table ipv4_net_table[] = {
.extra1 = SYSCTL_ONE,
.extra2 = &fib_multipath_hash_fields_all_mask,
},
+ {
+ .procname = "fib_multipath_hash_seed",
+ .data = &init_net,
+ .maxlen = sizeof(u32),
+ .mode = 0644,
+ .proc_handler = proc_fib_multipath_hash_seed,
+ },
#endif
{
.procname = "ip_unprivileged_port_start",
@@ -1502,11 +1575,19 @@ static struct ctl_table ipv4_net_table[] = {
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ONE,
},
- { }
+ {
+ .procname = "tcp_rto_min_us",
+ .data = &init_net.ipv4.sysctl_tcp_rto_min_us,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ONE,
+ },
};
static __net_init int ipv4_sysctl_init_net(struct net *net)
{
+ size_t table_size = ARRAY_SIZE(ipv4_net_table);
struct ctl_table *table;
table = ipv4_net_table;
@@ -1517,7 +1598,7 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
if (!table)
goto err_alloc;
- for (i = 0; i < ARRAY_SIZE(ipv4_net_table) - 1; i++) {
+ for (i = 0; i < table_size; i++) {
if (table[i].data) {
/* Update the variables to point into
* the current struct net
@@ -1533,7 +1614,7 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
}
net->ipv4.ipv4_hdr = register_net_sysctl_sz(net, "net/ipv4", table,
- ARRAY_SIZE(ipv4_net_table));
+ table_size);
if (!net->ipv4.ipv4_hdr)
goto err_reg;
@@ -1541,6 +1622,8 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
if (!net->ipv4.sysctl_local_reserved_ports)
goto err_ports;
+ proc_fib_multipath_hash_set_seed(net, 0);
+
return 0;
err_ports:
@@ -1554,7 +1637,7 @@ err_alloc:
static __net_exit void ipv4_sysctl_exit_net(struct net *net)
{
- struct ctl_table *table;
+ const struct ctl_table *table;
kfree(net->ipv4.sysctl_local_reserved_ports);
table = net->ipv4.ipv4_hdr->ctl_table_arg;
@@ -1575,6 +1658,8 @@ static __init int sysctl_ipv4_init(void)
if (!hdr)
return -ENOMEM;
+ proc_fib_multipath_hash_init_rand_seed();
+
if (register_pernet_subsys(&ipv4_sysctl_ops)) {
unregister_net_sysctl_table(hdr);
return -ENOMEM;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index c82dc42f57c6..57df7c1d2faa 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -272,13 +272,20 @@
#include <net/inet_common.h>
#include <net/tcp.h>
#include <net/mptcp.h>
+#include <net/proto_memory.h>
#include <net/xfrm.h>
#include <net/ip.h>
#include <net/sock.h>
+#include <net/rstreason.h>
#include <linux/uaccess.h>
#include <asm/ioctls.h>
#include <net/busy_poll.h>
+#include <net/hotdata.h>
+#include <trace/events/tcp.h>
+#include <net/rps.h>
+
+#include "../core/devmem.h"
/* Track pending CMSGs. */
enum {
@@ -289,6 +296,9 @@ enum {
DEFINE_PER_CPU(unsigned int, tcp_orphan_count);
EXPORT_PER_CPU_SYMBOL_GPL(tcp_orphan_count);
+DEFINE_PER_CPU(u32, tcp_tw_isn);
+EXPORT_PER_CPU_SYMBOL_GPL(tcp_tw_isn);
+
long sysctl_tcp_mem[3] __read_mostly;
EXPORT_SYMBOL(sysctl_tcp_mem);
@@ -413,6 +423,7 @@ void tcp_init_sock(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
+ int rto_min_us;
tp->out_of_order_queue = RB_ROOT;
sk->tcp_rtx_queue = RB_ROOT;
@@ -421,7 +432,8 @@ void tcp_init_sock(struct sock *sk)
INIT_LIST_HEAD(&tp->tsorted_sent_queue);
icsk->icsk_rto = TCP_TIMEOUT_INIT;
- icsk->icsk_rto_min = TCP_RTO_MIN;
+ rto_min_us = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rto_min_us);
+ icsk->icsk_rto_min = usecs_to_jiffies(rto_min_us);
icsk->icsk_delack_max = TCP_DELACK_MAX;
tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U);
@@ -461,18 +473,20 @@ void tcp_init_sock(struct sock *sk)
set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
sk_sockets_allocated_inc(sk);
+ xa_init_flags(&sk->sk_user_frags, XA_FLAGS_ALLOC1);
}
EXPORT_SYMBOL(tcp_init_sock);
-static void tcp_tx_timestamp(struct sock *sk, u16 tsflags)
+static void tcp_tx_timestamp(struct sock *sk, struct sockcm_cookie *sockc)
{
struct sk_buff *skb = tcp_write_queue_tail(sk);
+ u32 tsflags = sockc->tsflags;
if (tsflags && skb) {
struct skb_shared_info *shinfo = skb_shinfo(skb);
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
- sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags);
+ sock_tx_timestamp(sk, sockc, &shinfo->tx_flags);
if (tsflags & SOF_TIMESTAMPING_TX_ACK)
tcb->txstamp_ack = 1;
if (tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
@@ -591,7 +605,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
*/
mask |= EPOLLOUT | EPOLLWRNORM;
}
- /* This barrier is coupled with smp_wmb() in tcp_reset() */
+ /* This barrier is coupled with smp_wmb() in tcp_done_with_error() */
smp_rmb();
if (READ_ONCE(sk->sk_err) ||
!skb_queue_empty_lockless(&sk->sk_error_queue))
@@ -974,7 +988,7 @@ int tcp_wmem_schedule(struct sock *sk, int copy)
* Use whatever is left in sk->sk_forward_alloc and tcp_wmem[0]
* to guarantee some progress.
*/
- left = sock_net(sk)->ipv4.sysctl_tcp_wmem[0] - sk->sk_wmem_queued;
+ left = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[0]) - sk->sk_wmem_queued;
if (left > 0)
sk_forced_mem_schedule(sk, min(left, copy));
return min(copy, sk->sk_forward_alloc);
@@ -1158,6 +1172,9 @@ new_segment:
process_backlog++;
+#ifdef CONFIG_SKB_DECRYPTED
+ skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
+#endif
tcp_skb_entail(sk, skb);
copy = size_goal;
@@ -1183,7 +1200,7 @@ new_segment:
if (!skb_can_coalesce(skb, i, pfrag->page,
pfrag->offset)) {
- if (i >= READ_ONCE(sysctl_max_skb_frags)) {
+ if (i >= READ_ONCE(net_hotdata.sysctl_max_skb_frags)) {
tcp_mark_push(tp, skb);
goto new_segment;
}
@@ -1305,7 +1322,7 @@ wait_for_space:
out:
if (copied) {
- tcp_tx_timestamp(sk, sockc.tsflags);
+ tcp_tx_timestamp(sk, &sockc);
tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
}
out_nopush:
@@ -1415,8 +1432,6 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
struct sk_buff *skb;
int copied = 0, err = 0;
- /* XXX -- need to support SO_PEEK_OFF */
-
skb_rbtree_walk(skb, &sk->tcp_rtx_queue) {
err = skb_copy_datagram_msg(skb, 0, msg, skb->len);
if (err)
@@ -1550,12 +1565,13 @@ EXPORT_SYMBOL(tcp_recv_skb);
* or for 'peeking' the socket using this routine
* (although both would be easy to implement).
*/
-int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
- sk_read_actor_t recv_actor)
+static int __tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
+ sk_read_actor_t recv_actor, bool noack,
+ u32 *copied_seq)
{
struct sk_buff *skb;
struct tcp_sock *tp = tcp_sk(sk);
- u32 seq = tp->copied_seq;
+ u32 seq = *copied_seq;
u32 offset;
int copied = 0;
@@ -1609,9 +1625,12 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
tcp_eat_recv_skb(sk, skb);
if (!desc->count)
break;
- WRITE_ONCE(tp->copied_seq, seq);
+ WRITE_ONCE(*copied_seq, seq);
}
- WRITE_ONCE(tp->copied_seq, seq);
+ WRITE_ONCE(*copied_seq, seq);
+
+ if (noack)
+ goto out;
tcp_rcv_space_adjust(sk);
@@ -1620,10 +1639,25 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
tcp_recv_skb(sk, seq, &offset);
tcp_cleanup_rbuf(sk, copied);
}
+out:
return copied;
}
+
+int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
+ sk_read_actor_t recv_actor)
+{
+ return __tcp_read_sock(sk, desc, recv_actor, false,
+ &tcp_sk(sk)->copied_seq);
+}
EXPORT_SYMBOL(tcp_read_sock);
+int tcp_read_sock_noack(struct sock *sk, read_descriptor_t *desc,
+ sk_read_actor_t recv_actor, bool noack,
+ u32 *copied_seq)
+{
+ return __tcp_read_sock(sk, desc, recv_actor, noack, copied_seq);
+}
+
int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
{
struct sk_buff *skb;
@@ -1720,7 +1754,7 @@ int tcp_set_rcvlowat(struct sock *sk, int val)
space = tcp_space_from_win(sk, val);
if (space > sk->sk_rcvbuf) {
WRITE_ONCE(sk->sk_rcvbuf, space);
- tcp_sk(sk)->window_clamp = val;
+ WRITE_ONCE(tcp_sk(sk)->window_clamp, val);
}
return 0;
}
@@ -2149,6 +2183,9 @@ static int tcp_zerocopy_receive(struct sock *sk,
skb = tcp_recv_skb(sk, seq, &offset);
}
+ if (!skb_frags_readable(skb))
+ break;
+
if (TCP_SKB_CB(skb)->has_rxtstamp) {
tcp_update_recv_tstamps(skb, tss);
zc->msg_flags |= TCP_CMSG_TS;
@@ -2166,6 +2203,9 @@ static int tcp_zerocopy_receive(struct sock *sk,
break;
}
page = skb_frag_page(frags);
+ if (WARN_ON_ONCE(!page))
+ break;
+
prefetchw(page);
pages[pages_to_map++] = page;
length += PAGE_SIZE;
@@ -2224,6 +2264,7 @@ void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
struct scm_timestamping_internal *tss)
{
int new_tstamp = sock_flag(sk, SOCK_TSTAMP_NEW);
+ u32 tsflags = READ_ONCE(sk->sk_tsflags);
bool has_timestamping = false;
if (tss->ts[0].tv_sec || tss->ts[0].tv_nsec) {
@@ -2263,14 +2304,18 @@ void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
}
}
- if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_SOFTWARE)
+ if (tsflags & SOF_TIMESTAMPING_SOFTWARE &&
+ (tsflags & SOF_TIMESTAMPING_RX_SOFTWARE ||
+ !(tsflags & SOF_TIMESTAMPING_OPT_RX_FILTER)))
has_timestamping = true;
else
tss->ts[0] = (struct timespec64) {0};
}
if (tss->ts[2].tv_sec || tss->ts[2].tv_nsec) {
- if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_RAW_HARDWARE)
+ if (tsflags & SOF_TIMESTAMPING_RAW_HARDWARE &&
+ (tsflags & SOF_TIMESTAMPING_RX_HARDWARE ||
+ !(tsflags & SOF_TIMESTAMPING_OPT_RX_FILTER)))
has_timestamping = true;
else
tss->ts[2] = (struct timespec64) {0};
@@ -2306,6 +2351,214 @@ static int tcp_inq_hint(struct sock *sk)
return inq;
}
+/* batch __xa_alloc() calls and reduce xa_lock()/xa_unlock() overhead. */
+struct tcp_xa_pool {
+ u8 max; /* max <= MAX_SKB_FRAGS */
+ u8 idx; /* idx <= max */
+ __u32 tokens[MAX_SKB_FRAGS];
+ netmem_ref netmems[MAX_SKB_FRAGS];
+};
+
+static void tcp_xa_pool_commit_locked(struct sock *sk, struct tcp_xa_pool *p)
+{
+ int i;
+
+ /* Commit part that has been copied to user space. */
+ for (i = 0; i < p->idx; i++)
+ __xa_cmpxchg(&sk->sk_user_frags, p->tokens[i], XA_ZERO_ENTRY,
+ (__force void *)p->netmems[i], GFP_KERNEL);
+ /* Rollback what has been pre-allocated and is no longer needed. */
+ for (; i < p->max; i++)
+ __xa_erase(&sk->sk_user_frags, p->tokens[i]);
+
+ p->max = 0;
+ p->idx = 0;
+}
+
+static void tcp_xa_pool_commit(struct sock *sk, struct tcp_xa_pool *p)
+{
+ if (!p->max)
+ return;
+
+ xa_lock_bh(&sk->sk_user_frags);
+
+ tcp_xa_pool_commit_locked(sk, p);
+
+ xa_unlock_bh(&sk->sk_user_frags);
+}
+
+static int tcp_xa_pool_refill(struct sock *sk, struct tcp_xa_pool *p,
+ unsigned int max_frags)
+{
+ int err, k;
+
+ if (p->idx < p->max)
+ return 0;
+
+ xa_lock_bh(&sk->sk_user_frags);
+
+ tcp_xa_pool_commit_locked(sk, p);
+
+ for (k = 0; k < max_frags; k++) {
+ err = __xa_alloc(&sk->sk_user_frags, &p->tokens[k],
+ XA_ZERO_ENTRY, xa_limit_31b, GFP_KERNEL);
+ if (err)
+ break;
+ }
+
+ xa_unlock_bh(&sk->sk_user_frags);
+
+ p->max = k;
+ p->idx = 0;
+ return k ? 0 : err;
+}
+
+/* On error, returns the -errno. On success, returns number of bytes sent to the
+ * user. May not consume all of @remaining_len.
+ */
+static int tcp_recvmsg_dmabuf(struct sock *sk, const struct sk_buff *skb,
+ unsigned int offset, struct msghdr *msg,
+ int remaining_len)
+{
+ struct dmabuf_cmsg dmabuf_cmsg = { 0 };
+ struct tcp_xa_pool tcp_xa_pool;
+ unsigned int start;
+ int i, copy, n;
+ int sent = 0;
+ int err = 0;
+
+ tcp_xa_pool.max = 0;
+ tcp_xa_pool.idx = 0;
+ do {
+ start = skb_headlen(skb);
+
+ if (skb_frags_readable(skb)) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ /* Copy header. */
+ copy = start - offset;
+ if (copy > 0) {
+ copy = min(copy, remaining_len);
+
+ n = copy_to_iter(skb->data + offset, copy,
+ &msg->msg_iter);
+ if (n != copy) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ offset += copy;
+ remaining_len -= copy;
+
+ /* First a dmabuf_cmsg for # bytes copied to user
+ * buffer.
+ */
+ memset(&dmabuf_cmsg, 0, sizeof(dmabuf_cmsg));
+ dmabuf_cmsg.frag_size = copy;
+ err = put_cmsg_notrunc(msg, SOL_SOCKET,
+ SO_DEVMEM_LINEAR,
+ sizeof(dmabuf_cmsg),
+ &dmabuf_cmsg);
+ if (err)
+ goto out;
+
+ sent += copy;
+
+ if (remaining_len == 0)
+ goto out;
+ }
+
+ /* after that, send information of dmabuf pages through a
+ * sequence of cmsg
+ */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ struct net_iov *niov;
+ u64 frag_offset;
+ int end;
+
+ /* !skb_frags_readable() should indicate that ALL the
+ * frags in this skb are dmabuf net_iovs. We're checking
+ * for that flag above, but also check individual frags
+ * here. If the tcp stack is not setting
+ * skb_frags_readable() correctly, we still don't want
+ * to crash here.
+ */
+ if (!skb_frag_net_iov(frag)) {
+ net_err_ratelimited("Found non-dmabuf skb with net_iov");
+ err = -ENODEV;
+ goto out;
+ }
+
+ niov = skb_frag_net_iov(frag);
+ end = start + skb_frag_size(frag);
+ copy = end - offset;
+
+ if (copy > 0) {
+ copy = min(copy, remaining_len);
+
+ frag_offset = net_iov_virtual_addr(niov) +
+ skb_frag_off(frag) + offset -
+ start;
+ dmabuf_cmsg.frag_offset = frag_offset;
+ dmabuf_cmsg.frag_size = copy;
+ err = tcp_xa_pool_refill(sk, &tcp_xa_pool,
+ skb_shinfo(skb)->nr_frags - i);
+ if (err)
+ goto out;
+
+ /* Will perform the exchange later */
+ dmabuf_cmsg.frag_token = tcp_xa_pool.tokens[tcp_xa_pool.idx];
+ dmabuf_cmsg.dmabuf_id = net_iov_binding_id(niov);
+
+ offset += copy;
+ remaining_len -= copy;
+
+ err = put_cmsg_notrunc(msg, SOL_SOCKET,
+ SO_DEVMEM_DMABUF,
+ sizeof(dmabuf_cmsg),
+ &dmabuf_cmsg);
+ if (err)
+ goto out;
+
+ atomic_long_inc(&niov->pp_ref_count);
+ tcp_xa_pool.netmems[tcp_xa_pool.idx++] = skb_frag_netmem(frag);
+
+ sent += copy;
+
+ if (remaining_len == 0)
+ goto out;
+ }
+ start = end;
+ }
+
+ tcp_xa_pool_commit(sk, &tcp_xa_pool);
+ if (!remaining_len)
+ goto out;
+
+ /* if remaining_len is not satisfied yet, we need to go to the
+ * next frag in the frag_list to satisfy remaining_len.
+ */
+ skb = skb_shinfo(skb)->frag_list ?: skb->next;
+
+ offset = offset - start;
+ } while (skb);
+
+ if (remaining_len) {
+ err = -EFAULT;
+ goto out;
+ }
+
+out:
+ tcp_xa_pool_commit(sk, &tcp_xa_pool);
+ if (!sent)
+ sent = err;
+
+ return sent;
+}
+
/*
* This routine copies from a sock struct into the user buffer.
*
@@ -2319,6 +2572,7 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
int *cmsg_flags)
{
struct tcp_sock *tp = tcp_sk(sk);
+ int last_copied_dmabuf = -1; /* uninitialized */
int copied = 0;
u32 peek_seq;
u32 *seq;
@@ -2327,6 +2581,7 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
int target; /* Read at least this many bytes */
long timeo;
struct sk_buff *skb, *last;
+ u32 peek_offset = 0;
u32 urg_hole = 0;
err = -ENOTCONN;
@@ -2360,7 +2615,8 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
seq = &tp->copied_seq;
if (flags & MSG_PEEK) {
- peek_seq = tp->copied_seq;
+ peek_offset = max(sk_peek_offset(sk, flags), 0);
+ peek_seq = tp->copied_seq + peek_offset;
seq = &peek_seq;
}
@@ -2463,11 +2719,11 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
}
if ((flags & MSG_PEEK) &&
- (peek_seq - copied - urg_hole != tp->copied_seq)) {
+ (peek_seq - peek_offset - copied - urg_hole != tp->copied_seq)) {
net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n",
current->comm,
task_pid_nr(current));
- peek_seq = tp->copied_seq;
+ peek_seq = tp->copied_seq + peek_offset;
}
continue;
@@ -2496,19 +2752,51 @@ found_ok_skb:
}
if (!(flags & MSG_TRUNC)) {
- err = skb_copy_datagram_msg(skb, offset, msg, used);
- if (err) {
- /* Exception. Bailout! */
- if (!copied)
- copied = -EFAULT;
+ if (last_copied_dmabuf != -1 &&
+ last_copied_dmabuf != !skb_frags_readable(skb))
break;
+
+ if (skb_frags_readable(skb)) {
+ err = skb_copy_datagram_msg(skb, offset, msg,
+ used);
+ if (err) {
+ /* Exception. Bailout! */
+ if (!copied)
+ copied = -EFAULT;
+ break;
+ }
+ } else {
+ if (!(flags & MSG_SOCK_DEVMEM)) {
+ /* dmabuf skbs can only be received
+ * with the MSG_SOCK_DEVMEM flag.
+ */
+ if (!copied)
+ copied = -EFAULT;
+
+ break;
+ }
+
+ err = tcp_recvmsg_dmabuf(sk, skb, offset, msg,
+ used);
+ if (err <= 0) {
+ if (!copied)
+ copied = -EFAULT;
+
+ break;
+ }
+ used = err;
}
}
+ last_copied_dmabuf = !skb_frags_readable(skb);
+
WRITE_ONCE(*seq, *seq + used);
copied += used;
len -= used;
-
+ if (flags & MSG_PEEK)
+ sk_peek_offset_fwd(sk, used);
+ else
+ sk_peek_offset_bwd(sk, used);
tcp_rcv_space_adjust(sk);
skip_copy:
@@ -2636,6 +2924,10 @@ void tcp_set_state(struct sock *sk, int state)
if (oldstate != TCP_ESTABLISHED)
TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
break;
+ case TCP_CLOSE_WAIT:
+ if (oldstate == TCP_SYN_RECV)
+ TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
+ break;
case TCP_CLOSE:
if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
@@ -2647,7 +2939,7 @@ void tcp_set_state(struct sock *sk, int state)
inet_put_port(sk);
fallthrough;
default:
- if (oldstate == TCP_ESTABLISHED)
+ if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT)
TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
}
@@ -2709,7 +3001,7 @@ void tcp_shutdown(struct sock *sk, int how)
/* If we've already sent a FIN, or it's a closed state, skip this. */
if ((1 << sk->sk_state) &
(TCPF_ESTABLISHED | TCPF_SYN_SENT |
- TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
+ TCPF_CLOSE_WAIT)) {
/* Clear out any half completed packets. FIN if needed. */
if (tcp_close_state(sk))
tcp_send_fin(sk);
@@ -2743,7 +3035,15 @@ static bool tcp_too_many_orphans(int shift)
READ_ONCE(sysctl_tcp_max_orphans);
}
-bool tcp_check_oom(struct sock *sk, int shift)
+static bool tcp_out_of_memory(const struct sock *sk)
+{
+ if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
+ sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
+ return true;
+ return false;
+}
+
+bool tcp_check_oom(const struct sock *sk, int shift)
{
bool too_many_orphans, out_of_socket_memory;
@@ -2804,7 +3104,8 @@ void __tcp_close(struct sock *sk, long timeout)
/* Unread data was tossed, zap the connection. */
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
tcp_set_state(sk, TCP_CLOSE);
- tcp_send_active_reset(sk, sk->sk_allocation);
+ tcp_send_active_reset(sk, sk->sk_allocation,
+ SK_RST_REASON_TCP_ABORT_ON_CLOSE);
} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
/* Check zero linger _after_ checking for unread data. */
sk->sk_prot->disconnect(sk, 0);
@@ -2818,7 +3119,7 @@ void __tcp_close(struct sock *sk, long timeout)
* machine. State transitions:
*
* TCP_ESTABLISHED -> TCP_FIN_WAIT1
- * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
+ * TCP_SYN_RECV -> TCP_FIN_WAIT1 (it is difficult)
* TCP_CLOSE_WAIT -> TCP_LAST_ACK
*
* are legal only when FIN has been sent (i.e. in window),
@@ -2878,7 +3179,8 @@ adjudge_to_death:
struct tcp_sock *tp = tcp_sk(sk);
if (READ_ONCE(tp->linger2) < 0) {
tcp_set_state(sk, TCP_CLOSE);
- tcp_send_active_reset(sk, GFP_ATOMIC);
+ tcp_send_active_reset(sk, GFP_ATOMIC,
+ SK_RST_REASON_TCP_ABORT_ON_LINGER);
__NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPABORTONLINGER);
} else {
@@ -2896,7 +3198,8 @@ adjudge_to_death:
if (sk->sk_state != TCP_CLOSE) {
if (tcp_check_oom(sk, 0)) {
tcp_set_state(sk, TCP_CLOSE);
- tcp_send_active_reset(sk, GFP_ATOMIC);
+ tcp_send_active_reset(sk, GFP_ATOMIC,
+ SK_RST_REASON_TCP_ABORT_ON_MEMORY);
__NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPABORTONMEMORY);
} else if (!check_net(sock_net(sk))) {
@@ -2930,6 +3233,8 @@ void tcp_close(struct sock *sk, long timeout)
lock_sock(sk);
__tcp_close(sk, timeout);
release_sock(sk);
+ if (!sk->sk_net_refcnt)
+ inet_csk_clear_xmit_timers_sync(sk);
sock_put(sk);
}
EXPORT_SYMBOL(tcp_close);
@@ -2992,13 +3297,16 @@ int tcp_disconnect(struct sock *sk, int flags)
inet_csk_listen_stop(sk);
} else if (unlikely(tp->repair)) {
WRITE_ONCE(sk->sk_err, ECONNABORTED);
- } else if (tcp_need_reset(old_state) ||
- (tp->snd_nxt != tp->write_seq &&
- (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
+ } else if (tcp_need_reset(old_state)) {
+ tcp_send_active_reset(sk, gfp_any(), SK_RST_REASON_TCP_STATE);
+ WRITE_ONCE(sk->sk_err, ECONNRESET);
+ } else if (tp->snd_nxt != tp->write_seq &&
+ (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK)) {
/* The last check adjusts for discrepancy of Linux wrt. RFC
* states
*/
- tcp_send_active_reset(sk, gfp_any());
+ tcp_send_active_reset(sk, gfp_any(),
+ SK_RST_REASON_TCP_DISCONNECT_WITH_DATA);
WRITE_ONCE(sk->sk_err, ECONNRESET);
} else if (old_state == TCP_SYN_SENT)
WRITE_ONCE(sk->sk_err, ECONNRESET);
@@ -3007,6 +3315,7 @@ int tcp_disconnect(struct sock *sk, int flags)
__skb_queue_purge(&sk->sk_receive_queue);
WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
WRITE_ONCE(tp->urg_data, 0);
+ sk_set_peek_off(sk, -1);
tcp_write_queue_purge(sk);
tcp_fastopen_active_disable_ofo_check(sk);
skb_rbtree_purge(&tp->out_of_order_queue);
@@ -3040,7 +3349,7 @@ int tcp_disconnect(struct sock *sk, int flags)
tp->window_clamp = 0;
tp->delivered = 0;
tp->delivered_ce = 0;
- if (icsk->icsk_ca_ops->release)
+ if (icsk->icsk_ca_initialized && icsk->icsk_ca_ops->release)
icsk->icsk_ca_ops->release(sk);
memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
icsk->icsk_ca_initialized = 0;
@@ -3055,7 +3364,7 @@ int tcp_disconnect(struct sock *sk, int flags)
icsk->icsk_ack.rcv_mss = TCP_MIN_MSS;
memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
__sk_dst_reset(sk);
- dst_release(xchg((__force struct dst_entry **)&sk->sk_rx_dst, NULL));
+ dst_release(unrcu_pointer(xchg(&sk->sk_rx_dst, NULL)));
tcp_saved_syn_free(tp);
tp->compressed_ack = 0;
tp->segs_in = 0;
@@ -3376,7 +3685,7 @@ int tcp_set_window_clamp(struct sock *sk, int val)
if (!val) {
if (sk->sk_state != TCP_CLOSE)
return -EINVAL;
- tp->window_clamp = 0;
+ WRITE_ONCE(tp->window_clamp, 0);
} else {
u32 new_rcv_ssthresh, old_window_clamp = tp->window_clamp;
u32 new_window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
@@ -3385,7 +3694,7 @@ int tcp_set_window_clamp(struct sock *sk, int val)
if (new_window_clamp == old_window_clamp)
return 0;
- tp->window_clamp = new_window_clamp;
+ WRITE_ONCE(tp->window_clamp, new_window_clamp);
if (new_window_clamp < old_window_clamp) {
/* need to apply the reserved mem provisioning only
* when shrinking the window clamp
@@ -4010,11 +4319,11 @@ int do_tcp_getsockopt(struct sock *sk, int level,
if (copy_from_sockptr(&len, optlen, sizeof(int)))
return -EFAULT;
- len = min_t(unsigned int, len, sizeof(int));
-
if (len < 0)
return -EINVAL;
+ len = min_t(unsigned int, len, sizeof(int));
+
switch (optname) {
case TCP_MAXSEG:
val = tp->mss_cache;
@@ -4054,7 +4363,7 @@ int do_tcp_getsockopt(struct sock *sk, int level,
TCP_RTO_MAX / HZ);
break;
case TCP_WINDOW_CLAMP:
- val = tp->window_clamp;
+ val = READ_ONCE(tp->window_clamp);
break;
case TCP_INFO: {
struct tcp_info info;
@@ -4339,6 +4648,9 @@ zerocopy_rcv_out:
return err;
}
+ case TCP_IS_MPTCP:
+ val = 0;
+ break;
default:
return -ENOPROTOOPT;
}
@@ -4427,7 +4739,7 @@ int tcp_md5_hash_key(struct tcp_sigpool *hp,
EXPORT_SYMBOL(tcp_md5_hash_key);
/* Called with rcu_read_lock() */
-enum skb_drop_reason
+static enum skb_drop_reason
tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
const void *saddr, const void *daddr,
int family, int l3index, const __u8 *hash_location)
@@ -4447,7 +4759,7 @@ tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
if (!key && hash_location) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
- tcp_hash_fail("Unexpected MD5 Hash found", family, skb, "");
+ trace_tcp_hash_md5_unexpected(sk, skb);
return SKB_DROP_REASON_TCP_MD5UNEXPECTED;
}
@@ -4462,29 +4774,90 @@ tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
NULL, skb);
if (genhash || memcmp(hash_location, newhash, 16) != 0) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
- if (family == AF_INET) {
- tcp_hash_fail("MD5 Hash failed", AF_INET, skb, "%s L3 index %d",
- genhash ? "tcp_v4_calc_md5_hash failed"
- : "", l3index);
- } else {
- if (genhash) {
- tcp_hash_fail("MD5 Hash failed",
- AF_INET6, skb, "L3 index %d",
- l3index);
- } else {
- tcp_hash_fail("MD5 Hash mismatch",
- AF_INET6, skb, "L3 index %d",
- l3index);
- }
- }
+ trace_tcp_hash_md5_mismatch(sk, skb);
return SKB_DROP_REASON_TCP_MD5FAILURE;
}
return SKB_NOT_DROPPED_YET;
}
-EXPORT_SYMBOL(tcp_inbound_md5_hash);
+#else
+static inline enum skb_drop_reason
+tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
+ const void *saddr, const void *daddr,
+ int family, int l3index, const __u8 *hash_location)
+{
+ return SKB_NOT_DROPPED_YET;
+}
#endif
+/* Called with rcu_read_lock() */
+enum skb_drop_reason
+tcp_inbound_hash(struct sock *sk, const struct request_sock *req,
+ const struct sk_buff *skb,
+ const void *saddr, const void *daddr,
+ int family, int dif, int sdif)
+{
+ const struct tcphdr *th = tcp_hdr(skb);
+ const struct tcp_ao_hdr *aoh;
+ const __u8 *md5_location;
+ int l3index;
+
+ /* Invalid option or two times meet any of auth options */
+ if (tcp_parse_auth_options(th, &md5_location, &aoh)) {
+ trace_tcp_hash_bad_header(sk, skb);
+ return SKB_DROP_REASON_TCP_AUTH_HDR;
+ }
+
+ if (req) {
+ if (tcp_rsk_used_ao(req) != !!aoh) {
+ u8 keyid, rnext, maclen;
+
+ if (aoh) {
+ keyid = aoh->keyid;
+ rnext = aoh->rnext_keyid;
+ maclen = tcp_ao_hdr_maclen(aoh);
+ } else {
+ keyid = rnext = maclen = 0;
+ }
+
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOBAD);
+ trace_tcp_ao_handshake_failure(sk, skb, keyid, rnext, maclen);
+ return SKB_DROP_REASON_TCP_AOFAILURE;
+ }
+ }
+
+ /* sdif set, means packet ingressed via a device
+ * in an L3 domain and dif is set to the l3mdev
+ */
+ l3index = sdif ? dif : 0;
+
+ /* Fast path: unsigned segments */
+ if (likely(!md5_location && !aoh)) {
+ /* Drop if there's TCP-MD5 or TCP-AO key with any rcvid/sndid
+ * for the remote peer. On TCP-AO established connection
+ * the last key is impossible to remove, so there's
+ * always at least one current_key.
+ */
+ if (tcp_ao_required(sk, saddr, family, l3index, true)) {
+ trace_tcp_hash_ao_required(sk, skb);
+ return SKB_DROP_REASON_TCP_AONOTFOUND;
+ }
+ if (unlikely(tcp_md5_do_lookup(sk, l3index, saddr, family))) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
+ trace_tcp_hash_md5_required(sk, skb);
+ return SKB_DROP_REASON_TCP_MD5NOTFOUND;
+ }
+ return SKB_NOT_DROPPED_YET;
+ }
+
+ if (aoh)
+ return tcp_inbound_ao_hash(sk, skb, family, req, l3index, aoh);
+
+ return tcp_inbound_md5_hash(sk, skb, saddr, daddr, family,
+ l3index, md5_location);
+}
+EXPORT_SYMBOL_GPL(tcp_inbound_hash);
+
void tcp_done(struct sock *sk)
{
struct request_sock *req;
@@ -4539,6 +4912,13 @@ int tcp_abort(struct sock *sk, int err)
/* Don't race with userspace socket closes such as tcp_close. */
lock_sock(sk);
+ /* Avoid closing the same socket twice. */
+ if (sk->sk_state == TCP_CLOSE) {
+ if (!has_current_bpf_ctx())
+ release_sock(sk);
+ return -ENOENT;
+ }
+
if (sk->sk_state == TCP_LISTEN) {
tcp_set_state(sk, TCP_CLOSE);
inet_csk_listen_stop(sk);
@@ -4548,19 +4928,13 @@ int tcp_abort(struct sock *sk, int err)
local_bh_disable();
bh_lock_sock(sk);
- if (!sock_flag(sk, SOCK_DEAD)) {
- WRITE_ONCE(sk->sk_err, err);
- /* This barrier is coupled with smp_rmb() in tcp_poll() */
- smp_wmb();
- sk_error_report(sk);
- if (tcp_need_reset(sk->sk_state))
- tcp_send_active_reset(sk, GFP_ATOMIC);
- tcp_done(sk);
- }
+ if (tcp_need_reset(sk->sk_state))
+ tcp_send_active_reset(sk, GFP_ATOMIC,
+ SK_RST_REASON_TCP_STATE);
+ tcp_done_with_error(sk, err);
bh_unlock_sock(sk);
local_bh_enable();
- tcp_write_queue_purge(sk);
if (!has_current_bpf_ctx())
release_sock(sk);
return 0;
@@ -4645,16 +5019,16 @@ static void __init tcp_struct_check(void)
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, lsndtime);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, mdev_us);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tcp_wstamp_ns);
- CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tcp_clock_cache);
- CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tcp_mstamp);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, rtt_seq);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tsorted_sent_queue);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, highest_sack);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, ecn_flags);
- CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_tx, 113);
+ CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_tx, 89);
/* TXRX read-write hotpath cache lines */
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, pred_flags);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, tcp_clock_cache);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, tcp_mstamp);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_nxt);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_nxt);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_una);
@@ -4667,7 +5041,11 @@ static void __init tcp_struct_check(void)
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, app_limited);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_wnd);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rx_opt);
- CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_txrx, 76);
+
+ /* 32bit arches with 8byte alignment on u64 fields might need padding
+ * before tcp_clock_cache.
+ */
+ CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_txrx, 92 + 4);
/* RX read-write hotpath cache lines */
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, bytes_received);
diff --git a/net/ipv4/tcp_ao.c b/net/ipv4/tcp_ao.c
index 87db432c6bb4..bbb8d5f0eae7 100644
--- a/net/ipv4/tcp_ao.c
+++ b/net/ipv4/tcp_ao.c
@@ -16,6 +16,7 @@
#include <net/tcp.h>
#include <net/ipv6.h>
#include <net/icmp.h>
+#include <trace/events/tcp.h>
DEFINE_STATIC_KEY_DEFERRED_FALSE(tcp_ao_needed, HZ);
@@ -108,12 +109,13 @@ bool tcp_ao_ignore_icmp(const struct sock *sk, int family, int type, int code)
* it's known that the keys in ao_info are matching peer's
* family/address/VRF/etc.
*/
-struct tcp_ao_key *tcp_ao_established_key(struct tcp_ao_info *ao,
+struct tcp_ao_key *tcp_ao_established_key(const struct sock *sk,
+ struct tcp_ao_info *ao,
int sndid, int rcvid)
{
struct tcp_ao_key *key;
- hlist_for_each_entry_rcu(key, &ao->head, node) {
+ hlist_for_each_entry_rcu(key, &ao->head, node, lockdep_sock_is_held(sk)) {
if ((sndid >= 0 && key->sndid != sndid) ||
(rcvid >= 0 && key->rcvid != rcvid))
continue;
@@ -204,7 +206,7 @@ static struct tcp_ao_key *__tcp_ao_do_lookup(const struct sock *sk, int l3index,
if (!ao)
return NULL;
- hlist_for_each_entry_rcu(key, &ao->head, node) {
+ hlist_for_each_entry_rcu(key, &ao->head, node, lockdep_sock_is_held(sk)) {
u8 prefixlen = min(prefix, key->prefixlen);
if (!tcp_ao_key_cmp(key, l3index, addr, prefixlen,
@@ -266,32 +268,49 @@ static void tcp_ao_key_free_rcu(struct rcu_head *head)
kfree_sensitive(key);
}
-void tcp_ao_destroy_sock(struct sock *sk, bool twsk)
+static void tcp_ao_info_free_rcu(struct rcu_head *head)
{
- struct tcp_ao_info *ao;
+ struct tcp_ao_info *ao = container_of(head, struct tcp_ao_info, rcu);
struct tcp_ao_key *key;
struct hlist_node *n;
+ hlist_for_each_entry_safe(key, n, &ao->head, node) {
+ hlist_del(&key->node);
+ tcp_sigpool_release(key->tcp_sigpool_id);
+ kfree_sensitive(key);
+ }
+ kfree(ao);
+ static_branch_slow_dec_deferred(&tcp_ao_needed);
+}
+
+static void tcp_ao_sk_omem_free(struct sock *sk, struct tcp_ao_info *ao)
+{
+ size_t total_ao_sk_mem = 0;
+ struct tcp_ao_key *key;
+
+ hlist_for_each_entry(key, &ao->head, node)
+ total_ao_sk_mem += tcp_ao_sizeof_key(key);
+ atomic_sub(total_ao_sk_mem, &sk->sk_omem_alloc);
+}
+
+void tcp_ao_destroy_sock(struct sock *sk, bool twsk)
+{
+ struct tcp_ao_info *ao;
+
if (twsk) {
ao = rcu_dereference_protected(tcp_twsk(sk)->ao_info, 1);
- tcp_twsk(sk)->ao_info = NULL;
+ rcu_assign_pointer(tcp_twsk(sk)->ao_info, NULL);
} else {
ao = rcu_dereference_protected(tcp_sk(sk)->ao_info, 1);
- tcp_sk(sk)->ao_info = NULL;
+ rcu_assign_pointer(tcp_sk(sk)->ao_info, NULL);
}
if (!ao || !refcount_dec_and_test(&ao->refcnt))
return;
- hlist_for_each_entry_safe(key, n, &ao->head, node) {
- hlist_del_rcu(&key->node);
- if (!twsk)
- atomic_sub(tcp_ao_sizeof_key(key), &sk->sk_omem_alloc);
- call_rcu(&key->rcu, tcp_ao_key_free_rcu);
- }
-
- kfree_rcu(ao, rcu);
- static_branch_slow_dec_deferred(&tcp_ao_needed);
+ if (!twsk)
+ tcp_ao_sk_omem_free(sk, ao);
+ call_rcu(&ao->rcu, tcp_ao_info_free_rcu);
}
void tcp_ao_time_wait(struct tcp_timewait_sock *tcptw, struct tcp_sock *tp)
@@ -509,9 +528,9 @@ static int tcp_ao_hash_header(struct tcp_sigpool *hp,
bool exclude_options, u8 *hash,
int hash_offset, int hash_len)
{
- int err, len = th->doff << 2;
struct scatterlist sg;
u8 *hdr = hp->scratch;
+ int err, len;
/* We are not allowed to change tcphdr, make a local copy */
if (exclude_options) {
@@ -775,7 +794,7 @@ int tcp_ao_prepare_reset(const struct sock *sk, struct sk_buff *skb,
if (!ao_info)
return -ENOENT;
- *key = tcp_ao_established_key(ao_info, aoh->rnext_keyid, -1);
+ *key = tcp_ao_established_key(sk, ao_info, aoh->rnext_keyid, -1);
if (!*key)
return -ENOENT;
*traffic_key = snd_other_key(*key);
@@ -884,17 +903,16 @@ tcp_ao_verify_hash(const struct sock *sk, const struct sk_buff *skb,
const struct tcp_ao_hdr *aoh, struct tcp_ao_key *key,
u8 *traffic_key, u8 *phash, u32 sne, int l3index)
{
- u8 maclen = aoh->length - sizeof(struct tcp_ao_hdr);
const struct tcphdr *th = tcp_hdr(skb);
+ u8 maclen = tcp_ao_hdr_maclen(aoh);
void *hash_buf = NULL;
if (maclen != tcp_ao_maclen(key)) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOBAD);
atomic64_inc(&info->counters.pkt_bad);
atomic64_inc(&key->pkt_bad);
- tcp_hash_fail("AO hash wrong length", family, skb,
- "%u != %d L3index: %d", maclen,
- tcp_ao_maclen(key), l3index);
+ trace_tcp_ao_wrong_maclen(sk, skb, aoh->keyid,
+ aoh->rnext_keyid, maclen);
return SKB_DROP_REASON_TCP_AOFAILURE;
}
@@ -909,8 +927,8 @@ tcp_ao_verify_hash(const struct sock *sk, const struct sk_buff *skb,
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOBAD);
atomic64_inc(&info->counters.pkt_bad);
atomic64_inc(&key->pkt_bad);
- tcp_hash_fail("AO hash mismatch", family, skb,
- "L3index: %d", l3index);
+ trace_tcp_ao_mismatch(sk, skb, aoh->keyid,
+ aoh->rnext_keyid, maclen);
kfree(hash_buf);
return SKB_DROP_REASON_TCP_AOFAILURE;
}
@@ -927,19 +945,21 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
int l3index, const struct tcp_ao_hdr *aoh)
{
const struct tcphdr *th = tcp_hdr(skb);
+ u8 maclen = tcp_ao_hdr_maclen(aoh);
u8 *phash = (u8 *)(aoh + 1); /* hash goes just after the header */
struct tcp_ao_info *info;
enum skb_drop_reason ret;
struct tcp_ao_key *key;
__be32 sisn, disn;
u8 *traffic_key;
+ int state;
u32 sne = 0;
info = rcu_dereference(tcp_sk(sk)->ao_info);
if (!info) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOKEYNOTFOUND);
- tcp_hash_fail("AO key not found", family, skb,
- "keyid: %u L3index: %d", aoh->keyid, l3index);
+ trace_tcp_ao_key_not_found(sk, skb, aoh->keyid,
+ aoh->rnext_keyid, maclen);
return SKB_DROP_REASON_TCP_AOUNEXPECTED;
}
@@ -948,8 +968,9 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
disn = 0;
}
+ state = READ_ONCE(sk->sk_state);
/* Fast-path */
- if (likely((1 << sk->sk_state) & TCP_AO_ESTABLISHED)) {
+ if (likely((1 << state) & TCP_AO_ESTABLISHED)) {
enum skb_drop_reason err;
struct tcp_ao_key *current_key;
@@ -959,7 +980,7 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
*/
key = READ_ONCE(info->rnext_key);
if (key->rcvid != aoh->keyid) {
- key = tcp_ao_established_key(info, -1, aoh->keyid);
+ key = tcp_ao_established_key(sk, info, -1, aoh->keyid);
if (!key)
goto key_not_found;
}
@@ -979,8 +1000,11 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
current_key = READ_ONCE(info->current_key);
/* Key rotation: the peer asks us to use new key (RNext) */
if (unlikely(aoh->rnext_keyid != current_key->sndid)) {
+ trace_tcp_ao_rnext_request(sk, skb, current_key->sndid,
+ aoh->rnext_keyid,
+ tcp_ao_hdr_maclen(aoh));
/* If the key is not found we do nothing. */
- key = tcp_ao_established_key(info, aoh->rnext_keyid, -1);
+ key = tcp_ao_established_key(sk, info, aoh->rnext_keyid, -1);
if (key)
/* pairs with tcp_ao_del_cmd */
WRITE_ONCE(info->current_key, key);
@@ -988,6 +1012,9 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
return SKB_NOT_DROPPED_YET;
}
+ if (unlikely(state == TCP_CLOSE))
+ return SKB_DROP_REASON_TCP_CLOSE;
+
/* Lookup key based on peer address and keyid.
* current_key and rnext_key must not be used on tcp listen
* sockets as otherwise:
@@ -1001,7 +1028,7 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
if (th->syn && !th->ack)
goto verify_hash;
- if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV)) {
+ if ((1 << state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV)) {
/* Make the initial syn the likely case here */
if (unlikely(req)) {
sne = tcp_ao_compute_sne(0, tcp_rsk(req)->rcv_isn,
@@ -1018,14 +1045,14 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
/* no way to figure out initial sisn/disn - drop */
return SKB_DROP_REASON_TCP_FLAGS;
}
- } else if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
+ } else if ((1 << state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
disn = info->lisn;
if (th->syn || th->rst)
sisn = th->seq;
else
sisn = info->risn;
} else {
- WARN_ONCE(1, "TCP-AO: Unexpected sk_state %d", sk->sk_state);
+ WARN_ONCE(1, "TCP-AO: Unexpected sk_state %d", state);
return SKB_DROP_REASON_TCP_AOFAILURE;
}
verify_hash:
@@ -1041,8 +1068,8 @@ verify_hash:
key_not_found:
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOKEYNOTFOUND);
atomic64_inc(&info->counters.key_not_found);
- tcp_hash_fail("Requested by the peer AO key id not found",
- family, skb, "L3index: %d", l3index);
+ trace_tcp_ao_key_not_found(sk, skb, aoh->keyid,
+ aoh->rnext_keyid, maclen);
return SKB_DROP_REASON_TCP_AOKEYNOTFOUND;
}
@@ -1068,6 +1095,7 @@ void tcp_ao_connect_init(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_ao_info *ao_info;
+ struct hlist_node *next;
union tcp_ao_addr *addr;
struct tcp_ao_key *key;
int family, l3index;
@@ -1090,7 +1118,7 @@ void tcp_ao_connect_init(struct sock *sk)
l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
sk->sk_bound_dev_if);
- hlist_for_each_entry_rcu(key, &ao_info->head, node) {
+ hlist_for_each_entry_safe(key, next, &ao_info->head, node) {
if (!tcp_ao_key_cmp(key, l3index, addr, key->prefixlen, family, -1, -1))
continue;
@@ -1136,7 +1164,7 @@ void tcp_ao_established(struct sock *sk)
if (!ao)
return;
- hlist_for_each_entry_rcu(key, &ao->head, node)
+ hlist_for_each_entry_rcu(key, &ao->head, node, lockdep_sock_is_held(sk))
tcp_ao_cache_traffic_keys(sk, ao, key);
}
@@ -1153,7 +1181,7 @@ void tcp_ao_finish_connect(struct sock *sk, struct sk_buff *skb)
WRITE_ONCE(ao->risn, tcp_hdr(skb)->seq);
ao->rcv_sne = 0;
- hlist_for_each_entry_rcu(key, &ao->head, node)
+ hlist_for_each_entry_rcu(key, &ao->head, node, lockdep_sock_is_held(sk))
tcp_ao_cache_traffic_keys(sk, ao, key);
}
@@ -1229,14 +1257,14 @@ int tcp_ao_copy_all_matching(const struct sock *sk, struct sock *newsk,
key_head = rcu_dereference(hlist_first_rcu(&new_ao->head));
first_key = hlist_entry_safe(key_head, struct tcp_ao_key, node);
- key = tcp_ao_established_key(new_ao, tcp_rsk(req)->ao_keyid, -1);
+ key = tcp_ao_established_key(req_to_sk(req), new_ao, tcp_rsk(req)->ao_keyid, -1);
if (key)
new_ao->current_key = key;
else
new_ao->current_key = first_key;
/* set rnext_key */
- key = tcp_ao_established_key(new_ao, -1, tcp_rsk(req)->ao_rcv_next);
+ key = tcp_ao_established_key(req_to_sk(req), new_ao, -1, tcp_rsk(req)->ao_rcv_next);
if (key)
new_ao->rnext_key = key;
else
@@ -1830,12 +1858,12 @@ static int tcp_ao_del_cmd(struct sock *sk, unsigned short int family,
* if there's any.
*/
if (cmd.set_current) {
- new_current = tcp_ao_established_key(ao_info, cmd.current_key, -1);
+ new_current = tcp_ao_established_key(sk, ao_info, cmd.current_key, -1);
if (!new_current)
return -ENOENT;
}
if (cmd.set_rnext) {
- new_rnext = tcp_ao_established_key(ao_info, -1, cmd.rnext);
+ new_rnext = tcp_ao_established_key(sk, ao_info, -1, cmd.rnext);
if (!new_rnext)
return -ENOENT;
}
@@ -1875,7 +1903,8 @@ static int tcp_ao_del_cmd(struct sock *sk, unsigned short int family,
* "It is presumed that an MKT affecting a particular
* connection cannot be destroyed during an active connection"
*/
- hlist_for_each_entry_rcu(key, &ao_info->head, node) {
+ hlist_for_each_entry_rcu(key, &ao_info->head, node,
+ lockdep_sock_is_held(sk)) {
if (cmd.sndid != key->sndid ||
cmd.rcvid != key->rcvid)
continue;
@@ -1962,8 +1991,10 @@ static int tcp_ao_info_cmd(struct sock *sk, unsigned short int family,
first = true;
}
- if (cmd.ao_required && tcp_ao_required_verify(sk))
- return -EKEYREJECTED;
+ if (cmd.ao_required && tcp_ao_required_verify(sk)) {
+ err = -EKEYREJECTED;
+ goto out;
+ }
/* For sockets in TCP_CLOSED it's possible set keys that aren't
* matching the future peer (address/port/VRF/etc),
@@ -1971,14 +2002,14 @@ static int tcp_ao_info_cmd(struct sock *sk, unsigned short int family,
* if there's any.
*/
if (cmd.set_current) {
- new_current = tcp_ao_established_key(ao_info, cmd.current_key, -1);
+ new_current = tcp_ao_established_key(sk, ao_info, cmd.current_key, -1);
if (!new_current) {
err = -ENOENT;
goto out;
}
}
if (cmd.set_rnext) {
- new_rnext = tcp_ao_established_key(ao_info, -1, cmd.rnext);
+ new_rnext = tcp_ao_established_key(sk, ao_info, -1, cmd.rnext);
if (!new_rnext) {
err = -ENOENT;
goto out;
@@ -2072,7 +2103,8 @@ int tcp_v4_parse_ao(struct sock *sk, int cmd, sockptr_t optval, int optlen)
* The layout of the fields in the user and kernel structures is expected to
* be the same (including in the 32bit vs 64bit case).
*/
-static int tcp_ao_copy_mkts_to_user(struct tcp_ao_info *ao_info,
+static int tcp_ao_copy_mkts_to_user(const struct sock *sk,
+ struct tcp_ao_info *ao_info,
sockptr_t optval, sockptr_t optlen)
{
struct tcp_ao_getsockopt opt_in, opt_out;
@@ -2200,7 +2232,8 @@ static int tcp_ao_copy_mkts_to_user(struct tcp_ao_info *ao_info,
/* May change in RX, while we're dumping, pre-fetch it */
current_key = READ_ONCE(ao_info->current_key);
- hlist_for_each_entry_rcu(key, &ao_info->head, node) {
+ hlist_for_each_entry_rcu(key, &ao_info->head, node,
+ lockdep_sock_is_held(sk)) {
if (opt_in.get_all)
goto match;
@@ -2280,7 +2313,7 @@ int tcp_ao_get_mkts(struct sock *sk, sockptr_t optval, sockptr_t optlen)
if (!ao_info)
return -ENOENT;
- return tcp_ao_copy_mkts_to_user(ao_info, optval, optlen);
+ return tcp_ao_copy_mkts_to_user(sk, ao_info, optval, optlen);
}
int tcp_ao_get_sock_info(struct sock *sk, sockptr_t optval, sockptr_t optlen)
@@ -2367,7 +2400,7 @@ int tcp_ao_set_repair(struct sock *sk, sockptr_t optval, unsigned int optlen)
WRITE_ONCE(ao->snd_sne, cmd.snd_sne);
WRITE_ONCE(ao->rcv_sne, cmd.rcv_sne);
- hlist_for_each_entry_rcu(key, &ao->head, node)
+ hlist_for_each_entry_rcu(key, &ao->head, node, lockdep_sock_is_held(sk))
tcp_ao_cache_traffic_keys(sk, ao, key);
return 0;
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 22358032dd48..760941e55153 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -1024,7 +1024,7 @@ static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
bbr_update_gains(sk);
}
-__bpf_kfunc static void bbr_main(struct sock *sk, const struct rate_sample *rs)
+__bpf_kfunc static void bbr_main(struct sock *sk, u32 ack, int flag, const struct rate_sample *rs)
{
struct bbr *bbr = inet_csk_ca(sk);
u32 bw;
@@ -1155,9 +1155,7 @@ static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = {
.set_state = bbr_set_state,
};
-BTF_SET8_START(tcp_bbr_check_kfunc_ids)
-#ifdef CONFIG_X86
-#ifdef CONFIG_DYNAMIC_FTRACE
+BTF_KFUNCS_START(tcp_bbr_check_kfunc_ids)
BTF_ID_FLAGS(func, bbr_init)
BTF_ID_FLAGS(func, bbr_main)
BTF_ID_FLAGS(func, bbr_sndbuf_expand)
@@ -1166,9 +1164,7 @@ BTF_ID_FLAGS(func, bbr_cwnd_event)
BTF_ID_FLAGS(func, bbr_ssthresh)
BTF_ID_FLAGS(func, bbr_min_tso_segs)
BTF_ID_FLAGS(func, bbr_set_state)
-#endif
-#endif
-BTF_SET8_END(tcp_bbr_check_kfunc_ids)
+BTF_KFUNCS_END(tcp_bbr_check_kfunc_ids)
static const struct btf_kfunc_id_set tcp_bbr_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index 53b0d62fd2c2..ba581785adb4 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -30,7 +30,7 @@ void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
}
static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
- struct sk_msg *msg, u32 apply_bytes, int flags)
+ struct sk_msg *msg, u32 apply_bytes)
{
bool apply = apply_bytes;
struct scatterlist *sge;
@@ -49,13 +49,14 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
sge = sk_msg_elem(msg, i);
size = (apply && apply_bytes < sge->length) ?
apply_bytes : sge->length;
- if (!sk_wmem_schedule(sk, size)) {
+ if (!__sk_rmem_schedule(sk, size, false)) {
if (!copied)
ret = -ENOMEM;
break;
}
sk_mem_charge(sk, size);
+ atomic_add(size, &sk->sk_rmem_alloc);
sk_msg_xfer(tmp, msg, i, size);
copied += size;
if (sge->length)
@@ -74,7 +75,8 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
if (!ret) {
msg->sg.start = i;
- sk_psock_queue_msg(psock, tmp);
+ if (!sk_psock_queue_msg(psock, tmp))
+ atomic_sub(copied, &sk->sk_rmem_alloc);
sk_psock_data_ready(sk, psock);
} else {
sk_msg_free(sk, tmp);
@@ -167,7 +169,7 @@ int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
if (unlikely(!psock))
return -EPIPE;
- ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes, flags) :
+ ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes) :
tcp_bpf_push_locked(sk, msg, bytes, flags, false);
sk_psock_put(sk, psock);
return ret;
@@ -221,11 +223,11 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk,
int flags,
int *addr_len)
{
- struct tcp_sock *tcp = tcp_sk(sk);
int peek = flags & MSG_PEEK;
- u32 seq = tcp->copied_seq;
struct sk_psock *psock;
+ struct tcp_sock *tcp;
int copied = 0;
+ u32 seq;
if (unlikely(flags & MSG_ERRQUEUE))
return inet_recv_error(sk, msg, len, addr_len);
@@ -238,7 +240,8 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk,
return tcp_recvmsg(sk, msg, len, flags, addr_len);
lock_sock(sk);
-
+ tcp = tcp_sk(sk);
+ seq = tcp->copied_seq;
/* We may have received data on the sk_receive_queue pre-accept and
* then we can not use read_skb in this context because we haven't
* assigned a sk_socket yet so have no link to the ops. The work-around
@@ -440,7 +443,6 @@ more_data:
cork = true;
psock->cork = NULL;
}
- sk_msg_return(sk, msg, tosend);
release_sock(sk);
origsize = msg->sg.size;
@@ -452,8 +454,9 @@ more_data:
sock_put(sk_redir);
lock_sock(sk);
+ sk_mem_uncharge(sk, sent);
if (unlikely(ret < 0)) {
- int free = sk_msg_free_nocharge(sk, msg);
+ int free = sk_msg_free(sk, msg);
if (!cork)
*copied -= free;
@@ -467,7 +470,7 @@ more_data:
break;
case __SK_DROP:
default:
- sk_msg_free_partial(sk, msg, tosend);
+ sk_msg_free(sk, msg);
sk_msg_apply_bytes(psock, tosend);
*copied -= (tosend + delta);
return -EACCES;
@@ -483,11 +486,8 @@ more_data:
}
if (msg &&
msg->sg.data[msg->sg.start].page_link &&
- msg->sg.data[msg->sg.start].length) {
- if (eval == __SK_REDIRECT)
- sk_mem_charge(sk, tosend - sent);
+ msg->sg.data[msg->sg.start].length)
goto more_data;
- }
}
return ret;
}
@@ -495,7 +495,7 @@ more_data:
static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
{
struct sk_msg tmp, *msg_tx = NULL;
- int copied = 0, err = 0;
+ int copied = 0, err = 0, ret = 0;
struct sk_psock *psock;
long timeo;
int flags;
@@ -538,14 +538,14 @@ static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
copy = msg_tx->sg.size - osize;
}
- err = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_tx,
+ ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_tx,
copy);
- if (err < 0) {
+ if (ret < 0) {
sk_msg_trim(sk, msg_tx, osize);
goto out_err;
}
- copied += copy;
+ copied += ret;
if (psock->cork_bytes) {
if (size > psock->cork_bytes)
psock->cork_bytes = 0;
@@ -577,7 +577,7 @@ out_err:
err = sk_stream_error(sk, msg->msg_flags, err);
release_sock(sk);
sk_psock_put(sk, psock);
- return copied ? copied : err;
+ return copied > 0 ? copied : err;
}
enum {
@@ -646,6 +646,42 @@ static int tcp_bpf_assert_proto_ops(struct proto *ops)
ops->sendmsg == tcp_sendmsg ? 0 : -ENOTSUPP;
}
+#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
+int tcp_bpf_strp_read_sock(struct strparser *strp, read_descriptor_t *desc,
+ sk_read_actor_t recv_actor)
+{
+ struct sock *sk = strp->sk;
+ struct sk_psock *psock;
+ struct tcp_sock *tp;
+ int copied = 0;
+
+ tp = tcp_sk(sk);
+ rcu_read_lock();
+ psock = sk_psock(sk);
+ if (WARN_ON_ONCE(!psock)) {
+ desc->error = -EINVAL;
+ goto out;
+ }
+
+ psock->ingress_bytes = 0;
+ copied = tcp_read_sock_noack(sk, desc, recv_actor, true,
+ &psock->copied_seq);
+ if (copied < 0)
+ goto out;
+ /* recv_actor may redirect skb to another socket (SK_REDIRECT) or
+ * just put skb into ingress queue of current socket (SK_PASS).
+ * For SK_REDIRECT, we need to ack the frame immediately but for
+ * SK_PASS, we want to delay the ack until tcp_bpf_recvmsg_parser().
+ */
+ tp->copied_seq = psock->copied_seq - psock->ingress_bytes;
+ tcp_rcv_space_adjust(sk);
+ __tcp_cleanup_rbuf(sk, copied - psock->ingress_bytes);
+out:
+ rcu_read_unlock();
+ return copied;
+}
+#endif /* CONFIG_BPF_STREAM_PARSER */
+
int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
{
int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4;
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 1b34050a7538..df758adbb445 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -46,8 +46,7 @@ void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
}
/* Must be called with rcu lock held */
-static struct tcp_congestion_ops *tcp_ca_find_autoload(struct net *net,
- const char *name)
+static struct tcp_congestion_ops *tcp_ca_find_autoload(const char *name)
{
struct tcp_congestion_ops *ca = tcp_ca_find(name);
@@ -146,11 +145,7 @@ EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
int tcp_update_congestion_control(struct tcp_congestion_ops *ca, struct tcp_congestion_ops *old_ca)
{
struct tcp_congestion_ops *existing;
- int ret;
-
- ret = tcp_validate_congestion_control(ca);
- if (ret)
- return ret;
+ int ret = 0;
ca->key = jhash(ca->name, sizeof(ca->name), strlen(ca->name));
@@ -182,7 +177,7 @@ int tcp_update_congestion_control(struct tcp_congestion_ops *ca, struct tcp_cong
return ret;
}
-u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca)
+u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca)
{
const struct tcp_congestion_ops *ca;
u32 key = TCP_CA_UNSPEC;
@@ -190,7 +185,7 @@ u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca)
might_sleep();
rcu_read_lock();
- ca = tcp_ca_find_autoload(net, name);
+ ca = tcp_ca_find_autoload(name);
if (ca) {
key = ca->key;
*ecn_ca = ca->flags & TCP_CONG_NEEDS_ECN;
@@ -207,9 +202,10 @@ char *tcp_ca_get_name_by_key(u32 key, char *buffer)
rcu_read_lock();
ca = tcp_ca_find_key(key);
- if (ca)
- ret = strncpy(buffer, ca->name,
- TCP_CA_NAME_MAX);
+ if (ca) {
+ strscpy(buffer, ca->name, TCP_CA_NAME_MAX);
+ ret = buffer;
+ }
rcu_read_unlock();
return ret;
@@ -274,8 +270,9 @@ void tcp_cleanup_congestion_control(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
- if (icsk->icsk_ca_ops->release)
+ if (icsk->icsk_ca_initialized && icsk->icsk_ca_ops->release)
icsk->icsk_ca_ops->release(sk);
+ icsk->icsk_ca_initialized = 0;
bpf_module_put(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner);
}
@@ -287,7 +284,7 @@ int tcp_set_default_congestion_control(struct net *net, const char *name)
int ret;
rcu_read_lock();
- ca = tcp_ca_find_autoload(net, name);
+ ca = tcp_ca_find_autoload(name);
if (!ca) {
ret = -ENOENT;
} else if (!bpf_try_module_get(ca, ca->owner)) {
@@ -342,7 +339,7 @@ void tcp_get_default_congestion_control(struct net *net, char *name)
rcu_read_lock();
ca = rcu_dereference(net->ipv4.tcp_congestion_control);
- strncpy(name, ca->name, TCP_CA_NAME_MAX);
+ strscpy(name, ca->name, TCP_CA_NAME_MAX);
rcu_read_unlock();
}
@@ -425,7 +422,7 @@ int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
if (!load)
ca = tcp_ca_find(name);
else
- ca = tcp_ca_find_autoload(sock_net(sk), name);
+ ca = tcp_ca_find_autoload(name);
/* No change asking for existing value */
if (ca == icsk->icsk_ca_ops) {
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 0fd78ecb67e7..76c23675ae50 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -392,6 +392,10 @@ static void hystart_update(struct sock *sk, u32 delay)
if (after(tp->snd_una, ca->end_seq))
bictcp_hystart_reset(sk);
+ /* hystart triggers when cwnd is larger than some threshold */
+ if (tcp_snd_cwnd(tp) < hystart_low_window)
+ return;
+
if (hystart_detect & HYSTART_ACK_TRAIN) {
u32 now = bictcp_clock_us(sk);
@@ -467,9 +471,7 @@ __bpf_kfunc static void cubictcp_acked(struct sock *sk, const struct ack_sample
if (ca->delay_min == 0 || ca->delay_min > delay)
ca->delay_min = delay;
- /* hystart triggers when cwnd is larger than some threshold */
- if (!ca->found && tcp_in_slow_start(tp) && hystart &&
- tcp_snd_cwnd(tp) >= hystart_low_window)
+ if (!ca->found && tcp_in_slow_start(tp) && hystart)
hystart_update(sk, delay);
}
@@ -485,18 +487,14 @@ static struct tcp_congestion_ops cubictcp __read_mostly = {
.name = "cubic",
};
-BTF_SET8_START(tcp_cubic_check_kfunc_ids)
-#ifdef CONFIG_X86
-#ifdef CONFIG_DYNAMIC_FTRACE
+BTF_KFUNCS_START(tcp_cubic_check_kfunc_ids)
BTF_ID_FLAGS(func, cubictcp_init)
BTF_ID_FLAGS(func, cubictcp_recalc_ssthresh)
BTF_ID_FLAGS(func, cubictcp_cong_avoid)
BTF_ID_FLAGS(func, cubictcp_state)
BTF_ID_FLAGS(func, cubictcp_cwnd_event)
BTF_ID_FLAGS(func, cubictcp_acked)
-#endif
-#endif
-BTF_SET8_END(tcp_cubic_check_kfunc_ids)
+BTF_KFUNCS_END(tcp_cubic_check_kfunc_ids)
static const struct btf_kfunc_id_set tcp_cubic_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index bb23bb5b387a..8a45a4aea933 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -58,7 +58,18 @@ struct dctcp {
};
static unsigned int dctcp_shift_g __read_mostly = 4; /* g = 1/2^4 */
-module_param(dctcp_shift_g, uint, 0644);
+
+static int dctcp_shift_g_set(const char *val, const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp, 0, 10);
+}
+
+static const struct kernel_param_ops dctcp_shift_g_ops = {
+ .set = dctcp_shift_g_set,
+ .get = param_get_uint,
+};
+
+module_param_cb(dctcp_shift_g, &dctcp_shift_g_ops, &dctcp_shift_g, 0644);
MODULE_PARM_DESC(dctcp_shift_g, "parameter g for updating dctcp_alpha");
static unsigned int dctcp_alpha_on_init __read_mostly = DCTCP_MAX_ALPHA;
@@ -260,18 +271,14 @@ static struct tcp_congestion_ops dctcp_reno __read_mostly = {
.name = "dctcp-reno",
};
-BTF_SET8_START(tcp_dctcp_check_kfunc_ids)
-#ifdef CONFIG_X86
-#ifdef CONFIG_DYNAMIC_FTRACE
+BTF_KFUNCS_START(tcp_dctcp_check_kfunc_ids)
BTF_ID_FLAGS(func, dctcp_init)
BTF_ID_FLAGS(func, dctcp_update_alpha)
BTF_ID_FLAGS(func, dctcp_cwnd_event)
BTF_ID_FLAGS(func, dctcp_ssthresh)
BTF_ID_FLAGS(func, dctcp_cwnd_undo)
BTF_ID_FLAGS(func, dctcp_state)
-#endif
-#endif
-BTF_SET8_END(tcp_dctcp_check_kfunc_ids)
+BTF_KFUNCS_END(tcp_dctcp_check_kfunc_ids)
static const struct btf_kfunc_id_set tcp_dctcp_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 4cbe4b44425a..f428ecf9120f 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -222,6 +222,7 @@ static int tcp_diag_destroy(struct sk_buff *in_skb,
#endif
static const struct inet_diag_handler tcp_diag_handler = {
+ .owner = THIS_MODULE,
.dump = tcp_diag_dump,
.dump_one = tcp_diag_dump_one,
.idiag_get_info = tcp_diag_get_info,
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 8ed54e7334a9..32b28fc21b63 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -49,7 +49,7 @@ void tcp_fastopen_ctx_destroy(struct net *net)
{
struct tcp_fastopen_context *ctxt;
- ctxt = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, NULL);
+ ctxt = unrcu_pointer(xchg(&net->ipv4.tcp_fastopen_ctx, NULL));
if (ctxt)
call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free);
@@ -80,9 +80,10 @@ int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
if (sk) {
q = &inet_csk(sk)->icsk_accept_queue.fastopenq;
- octx = xchg((__force struct tcp_fastopen_context **)&q->ctx, ctx);
+ octx = unrcu_pointer(xchg(&q->ctx, RCU_INITIALIZER(ctx)));
} else {
- octx = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, ctx);
+ octx = unrcu_pointer(xchg(&net->ipv4.tcp_fastopen_ctx,
+ RCU_INITIALIZER(ctx)));
}
if (octx)
@@ -177,7 +178,7 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
if (!skb)
return;
- skb_dst_drop(skb);
+ tcp_cleanup_skb(skb);
/* segs_in has been initialized to 1 in tcp_create_openreq_child().
* Hence, reset segs_in to 0 before calling tcp_segs_in()
* to avoid double counting. Also, tcp_segs_in() expects
@@ -194,7 +195,7 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
- __skb_queue_tail(&sk->sk_receive_queue, skb);
+ tcp_add_receive_queue(sk, skb);
tp->syn_data_acked = 1;
/* u64_stats_update_begin(&tp->syncp) not needed here,
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 52b1f2665dfa..81b96331b2bb 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -185,7 +185,7 @@ static inline void htcp_alpha_update(struct htcp *ca)
u32 scale = (HZ << 3) / (10 * minRTT);
/* clamping ratio to interval [0.5,10]<<3 */
- scale = min(max(scale, 1U << 2), 10U << 3);
+ scale = clamp(scale, 1U << 2, 10U << 3);
factor = (factor << 3) / scale;
if (!factor)
factor = 1;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index df7b13f0e5e0..0cbf81bf3d45 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -72,9 +72,10 @@
#include <linux/prefetch.h>
#include <net/dst.h>
#include <net/tcp.h>
+#include <net/proto_memory.h>
#include <net/inet_common.h>
#include <linux/ipsec.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/errqueue.h>
#include <trace/events/tcp.h>
#include <linux/jump_label_ratelimit.h>
@@ -237,9 +238,20 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
*/
if (unlikely(len != icsk->icsk_ack.rcv_mss)) {
u64 val = (u64)skb->len << TCP_RMEM_TO_WIN_SCALE;
+ u8 old_ratio = tcp_sk(sk)->scaling_ratio;
do_div(val, skb->truesize);
tcp_sk(sk)->scaling_ratio = val ? val : 1;
+
+ if (old_ratio != tcp_sk(sk)->scaling_ratio) {
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ val = tcp_win_from_space(sk, sk->sk_rcvbuf);
+ tcp_set_window_clamp(sk, val);
+
+ if (tp->window_clamp < tp->rcvq_space.space)
+ tp->rcvq_space.space = tp->window_clamp;
+ }
}
icsk->icsk_ack.rcv_mss = min_t(unsigned int, len,
tcp_sk(sk)->advmss);
@@ -563,19 +575,20 @@ static void tcp_init_buffer_space(struct sock *sk)
maxwin = tcp_full_space(sk);
if (tp->window_clamp >= maxwin) {
- tp->window_clamp = maxwin;
+ WRITE_ONCE(tp->window_clamp, maxwin);
if (tcp_app_win && maxwin > 4 * tp->advmss)
- tp->window_clamp = max(maxwin -
- (maxwin >> tcp_app_win),
- 4 * tp->advmss);
+ WRITE_ONCE(tp->window_clamp,
+ max(maxwin - (maxwin >> tcp_app_win),
+ 4 * tp->advmss));
}
/* Force reservation of one segment. */
if (tcp_app_win &&
tp->window_clamp > 2 * tp->advmss &&
tp->window_clamp + tp->advmss > maxwin)
- tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss);
+ WRITE_ONCE(tp->window_clamp,
+ max(2 * tp->advmss, maxwin - tp->advmss));
tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp);
tp->snd_cwnd_stamp = tcp_jiffies32;
@@ -773,7 +786,8 @@ void tcp_rcv_space_adjust(struct sock *sk)
WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
/* Make the window clamp follow along. */
- tp->window_clamp = tcp_win_from_space(sk, rcvbuf);
+ WRITE_ONCE(tp->window_clamp,
+ tcp_win_from_space(sk, rcvbuf));
}
}
tp->rcvq_space.space = copied;
@@ -911,7 +925,7 @@ static void tcp_rtt_estimator(struct sock *sk, long mrtt_us)
tp->rtt_seq = tp->snd_nxt;
tp->mdev_max_us = tcp_rto_min_us(sk);
- tcp_bpf_rtt(sk);
+ tcp_bpf_rtt(sk, mrtt_us, srtt);
}
} else {
/* no previous measure. */
@@ -921,7 +935,7 @@ static void tcp_rtt_estimator(struct sock *sk, long mrtt_us)
tp->mdev_max_us = tp->rttvar_us;
tp->rtt_seq = tp->snd_nxt;
- tcp_bpf_rtt(sk);
+ tcp_bpf_rtt(sk, mrtt_us, srtt);
}
tp->srtt_us = max(1U, srtt);
}
@@ -1164,7 +1178,7 @@ static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered,
* L|R 1 - orig is lost, retransmit is in flight.
* S|R 1 - orig reached receiver, retrans is still in flight.
* (L|S|R is logically valid, it could occur when L|R is sacked,
- * but it is equivalent to plain S and code short-curcuits it to S.
+ * but it is equivalent to plain S and code short-circuits it to S.
* L|S is logically invalid, it would mean -1 packet in flight 8))
*
* These 6 states form finite state machine, controlled by the following events:
@@ -2126,8 +2140,16 @@ void tcp_clear_retrans(struct tcp_sock *tp)
static inline void tcp_init_undo(struct tcp_sock *tp)
{
tp->undo_marker = tp->snd_una;
+
/* Retransmission still in flight may cause DSACKs later. */
- tp->undo_retrans = tp->retrans_out ? : -1;
+ /* First, account for regular retransmits in flight: */
+ tp->undo_retrans = tp->retrans_out;
+ /* Next, account for TLP retransmits in flight: */
+ if (tp->tlp_high_seq && tp->tlp_retrans)
+ tp->undo_retrans++;
+ /* Finally, avoid 0, because undo_retrans==0 means "can undo now": */
+ if (!tp->undo_retrans)
+ tp->undo_retrans = -1;
}
static bool tcp_is_rack(const struct sock *sk)
@@ -2206,6 +2228,7 @@ void tcp_enter_loss(struct sock *sk)
tcp_set_ca_state(sk, TCP_CA_Loss);
tp->high_seq = tp->snd_nxt;
+ tp->tlp_high_seq = 0;
tcp_ecn_queue_cwr(tp);
/* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
@@ -2456,8 +2479,22 @@ static bool tcp_skb_spurious_retrans(const struct tcp_sock *tp,
*/
static inline bool tcp_packet_delayed(const struct tcp_sock *tp)
{
- return tp->retrans_stamp &&
- tcp_tsopt_ecr_before(tp, tp->retrans_stamp);
+ const struct sock *sk = (const struct sock *)tp;
+
+ if (tp->retrans_stamp &&
+ tcp_tsopt_ecr_before(tp, tp->retrans_stamp))
+ return true; /* got echoed TS before first retransmission */
+
+ /* Check if nothing was retransmitted (retrans_stamp==0), which may
+ * happen in fast recovery due to TSQ. But we ignore zero retrans_stamp
+ * in TCP_SYN_SENT, since when we set FLAG_SYN_ACKED we also clear
+ * retrans_stamp even if we had retransmitted the SYN.
+ */
+ if (!tp->retrans_stamp && /* no record of a retransmit/SYN? */
+ sk->sk_state != TCP_SYN_SENT) /* not the FLAG_SYN_ACKED case? */
+ return true; /* nothing was retransmitted */
+
+ return false;
}
/* Undo procedures. */
@@ -2491,6 +2528,16 @@ static bool tcp_any_retrans_done(const struct sock *sk)
return false;
}
+/* If loss recovery is finished and there are no retransmits out in the
+ * network, then we clear retrans_stamp so that upon the next loss recovery
+ * retransmits_timed_out() and timestamp-undo are using the correct value.
+ */
+static void tcp_retrans_stamp_cleanup(struct sock *sk)
+{
+ if (!tcp_any_retrans_done(sk))
+ tcp_sk(sk)->retrans_stamp = 0;
+}
+
static void DBGUNDO(struct sock *sk, const char *msg)
{
#if FASTRETRANS_DEBUG > 1
@@ -2779,13 +2826,37 @@ static void tcp_mtup_probe_success(struct sock *sk)
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS);
}
+/* Sometimes we deduce that packets have been dropped due to reasons other than
+ * congestion, like path MTU reductions or failed client TFO attempts. In these
+ * cases we call this function to retransmit as many packets as cwnd allows,
+ * without reducing cwnd. Given that retransmits will set retrans_stamp to a
+ * non-zero value (and may do so in a later calling context due to TSQ), we
+ * also enter CA_Loss so that we track when all retransmitted packets are ACKed
+ * and clear retrans_stamp when that happens (to ensure later recurring RTOs
+ * are using the correct retrans_stamp and don't declare ETIMEDOUT
+ * prematurely).
+ */
+static void tcp_non_congestion_loss_retransmit(struct sock *sk)
+{
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (icsk->icsk_ca_state != TCP_CA_Loss) {
+ tp->high_seq = tp->snd_nxt;
+ tp->snd_ssthresh = tcp_current_ssthresh(sk);
+ tp->prior_ssthresh = 0;
+ tp->undo_marker = 0;
+ tcp_set_ca_state(sk, TCP_CA_Loss);
+ }
+ tcp_xmit_retransmit_queue(sk);
+}
+
/* Do a simple retransmit without using the backoff mechanisms in
* tcp_timer. This is used for path mtu discovery.
* The socket is already locked here.
*/
void tcp_simple_retransmit(struct sock *sk)
{
- const struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
int mss;
@@ -2825,14 +2896,7 @@ void tcp_simple_retransmit(struct sock *sk)
* in network, but units changed and effective
* cwnd/ssthresh really reduced now.
*/
- if (icsk->icsk_ca_state != TCP_CA_Loss) {
- tp->high_seq = tp->snd_nxt;
- tp->snd_ssthresh = tcp_current_ssthresh(sk);
- tp->prior_ssthresh = 0;
- tp->undo_marker = 0;
- tcp_set_ca_state(sk, TCP_CA_Loss);
- }
- tcp_xmit_retransmit_queue(sk);
+ tcp_non_congestion_loss_retransmit(sk);
}
EXPORT_SYMBOL(tcp_simple_retransmit);
@@ -2841,6 +2905,9 @@ void tcp_enter_recovery(struct sock *sk, bool ece_ack)
struct tcp_sock *tp = tcp_sk(sk);
int mib_idx;
+ /* Start the clock with our fast retransmit, for undo and ETIMEDOUT. */
+ tcp_retrans_stamp_cleanup(sk);
+
if (tcp_is_reno(tp))
mib_idx = LINUX_MIB_TCPRENORECOVERY;
else
@@ -3057,7 +3124,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
return;
if (tcp_try_undo_dsack(sk))
- tcp_try_keep_open(sk);
+ tcp_try_to_open(sk, flag);
tcp_identify_packet_loss(sk, ack_flag);
if (icsk->icsk_ca_state != TCP_CA_Recovery) {
@@ -3539,7 +3606,7 @@ static void tcp_cong_control(struct sock *sk, u32 ack, u32 acked_sacked,
const struct inet_connection_sock *icsk = inet_csk(sk);
if (icsk->icsk_ca_ops->cong_control) {
- icsk->icsk_ca_ops->cong_control(sk, rs);
+ icsk->icsk_ca_ops->cong_control(sk, ack, flag, rs);
return;
}
@@ -3575,8 +3642,10 @@ static void tcp_snd_sne_update(struct tcp_sock *tp, u32 ack)
ao = rcu_dereference_protected(tp->ao_info,
lockdep_sock_is_held((struct sock *)tp));
- if (ao && ack < tp->snd_una)
+ if (ao && ack < tp->snd_una) {
ao->snd_sne++;
+ trace_tcp_ao_snd_sne_update((struct sock *)tp, ao->snd_sne);
+ }
#endif
}
@@ -3601,8 +3670,10 @@ static void tcp_rcv_sne_update(struct tcp_sock *tp, u32 seq)
ao = rcu_dereference_protected(tp->ao_info,
lockdep_sock_is_held((struct sock *)tp));
- if (ao && seq < tp->rcv_nxt)
+ if (ao && seq < tp->rcv_nxt) {
ao->rcv_sne++;
+ trace_tcp_ao_rcv_sne_update((struct sock *)tp, ao->rcv_sne);
+ }
#endif
}
@@ -4204,6 +4275,13 @@ void tcp_parse_options(const struct net *net,
*/
break;
#endif
+#ifdef CONFIG_TCP_AO
+ case TCPOPT_AO:
+ /* TCP AO has already been checked
+ * (see tcp_inbound_ao_hash()).
+ */
+ break;
+#endif
case TCPOPT_FASTOPEN:
tcp_parse_fastopen_option(
opsize - TCPOLEN_FASTOPEN_BASE,
@@ -4378,34 +4456,40 @@ static u32 tcp_tsval_replay(const struct sock *sk)
return inet_csk(sk)->icsk_rto * 1200 / HZ;
}
-static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
+static enum skb_drop_reason tcp_disordered_ack_check(const struct sock *sk,
+ const struct sk_buff *skb)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct tcphdr *th = tcp_hdr(skb);
- u32 seq = TCP_SKB_CB(skb)->seq;
+ SKB_DR_INIT(reason, TCP_RFC7323_PAWS);
u32 ack = TCP_SKB_CB(skb)->ack_seq;
+ u32 seq = TCP_SKB_CB(skb)->seq;
- return /* 1. Pure ACK with correct sequence number. */
- (th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) &&
+ /* 1. Is this not a pure ACK ? */
+ if (!th->ack || seq != TCP_SKB_CB(skb)->end_seq)
+ return reason;
- /* 2. ... and duplicate ACK. */
- ack == tp->snd_una &&
+ /* 2. Is its sequence not the expected one ? */
+ if (seq != tp->rcv_nxt)
+ return before(seq, tp->rcv_nxt) ?
+ SKB_DROP_REASON_TCP_RFC7323_PAWS_ACK :
+ reason;
- /* 3. ... and does not update window. */
- !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) &&
+ /* 3. Is this not a duplicate ACK ? */
+ if (ack != tp->snd_una)
+ return reason;
- /* 4. ... and sits in replay window. */
- (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <=
- tcp_tsval_replay(sk);
-}
+ /* 4. Is this updating the window ? */
+ if (tcp_may_update_window(tp, ack, seq, ntohs(th->window) <<
+ tp->rx_opt.snd_wscale))
+ return reason;
-static inline bool tcp_paws_discard(const struct sock *sk,
- const struct sk_buff *skb)
-{
- const struct tcp_sock *tp = tcp_sk(sk);
+ /* 5. Is this not in the replay window ? */
+ if ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) >
+ tcp_tsval_replay(sk))
+ return reason;
- return !tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW) &&
- !tcp_disordered_ack(sk, skb);
+ return 0;
}
/* Check segment sequence number for validity.
@@ -4433,9 +4517,26 @@ static enum skb_drop_reason tcp_sequence(const struct tcp_sock *tp,
return SKB_NOT_DROPPED_YET;
}
+
+void tcp_done_with_error(struct sock *sk, int err)
+{
+ /* This barrier is coupled with smp_rmb() in tcp_poll() */
+ WRITE_ONCE(sk->sk_err, err);
+ smp_wmb();
+
+ tcp_write_queue_purge(sk);
+ tcp_done(sk);
+
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk_error_report(sk);
+}
+EXPORT_SYMBOL(tcp_done_with_error);
+
/* When we get a reset we do this. */
void tcp_reset(struct sock *sk, struct sk_buff *skb)
{
+ int err;
+
trace_tcp_receive_reset(sk);
/* mptcp can't tell us to ignore reset pkts,
@@ -4447,24 +4548,17 @@ void tcp_reset(struct sock *sk, struct sk_buff *skb)
/* We want the right error as BSD sees it (and indeed as we do). */
switch (sk->sk_state) {
case TCP_SYN_SENT:
- WRITE_ONCE(sk->sk_err, ECONNREFUSED);
+ err = ECONNREFUSED;
break;
case TCP_CLOSE_WAIT:
- WRITE_ONCE(sk->sk_err, EPIPE);
+ err = EPIPE;
break;
case TCP_CLOSE:
return;
default:
- WRITE_ONCE(sk->sk_err, ECONNRESET);
+ err = ECONNRESET;
}
- /* This barrier is coupled with smp_rmb() in tcp_poll() */
- smp_wmb();
-
- tcp_write_queue_purge(sk);
- tcp_done(sk);
-
- if (!sock_flag(sk, SOCK_DEAD))
- sk_error_report(sk);
+ tcp_done_with_error(sk, err);
}
/*
@@ -4800,14 +4894,9 @@ static bool tcp_try_coalesce(struct sock *sk,
if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq)
return false;
- if (!mptcp_skb_can_collapse(to, from))
+ if (!tcp_skb_can_collapse_rx(to, from))
return false;
-#ifdef CONFIG_TLS_DEVICE
- if (from->decrypted != to->decrypted)
- return false;
-#endif
-
if (!skb_try_coalesce(to, from, fragstolen, &delta))
return false;
@@ -4844,11 +4933,11 @@ static bool tcp_ooo_try_coalesce(struct sock *sk,
return res;
}
-static void tcp_drop_reason(struct sock *sk, struct sk_buff *skb,
- enum skb_drop_reason reason)
+noinline_for_tracing static void
+tcp_drop_reason(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason reason)
{
sk_drops_add(sk, skb);
- kfree_skb_reason(skb, reason);
+ sk_skb_reason_drop(sk, skb, reason);
}
/* This one checks to see if we can put data from the
@@ -4887,7 +4976,7 @@ static void tcp_ofo_queue(struct sock *sk)
tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
if (!eaten)
- __skb_queue_tail(&sk->sk_receive_queue, skb);
+ tcp_add_receive_queue(sk, skb);
else
kfree_skb_partial(skb, fragstolen);
@@ -5079,7 +5168,7 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb,
skb, fragstolen)) ? 1 : 0;
tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq);
if (!eaten) {
- __skb_queue_tail(&sk->sk_receive_queue, skb);
+ tcp_add_receive_queue(sk, skb);
skb_set_owner_r(skb, sk);
}
return eaten;
@@ -5162,7 +5251,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
__kfree_skb(skb);
return;
}
- skb_dst_drop(skb);
+ tcp_cleanup_skb(skb);
__skb_pull(skb, tcp_hdr(skb)->doff * 4);
reason = SKB_DROP_REASON_NOT_SPECIFIED;
@@ -5174,6 +5263,16 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
*/
if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
if (tcp_receive_window(tp) == 0) {
+ /* Some stacks are known to send bare FIN packets
+ * in a loop even if we send RWIN 0 in our ACK.
+ * Accepting this FIN does not hurt memory pressure
+ * because the FIN flag will simply be merged to the
+ * receive queue tail skb in most cases.
+ */
+ if (!skb->len &&
+ (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
+ goto queue_and_out;
+
reason = SKB_DROP_REASON_TCP_ZEROWINDOW;
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP);
goto out_of_window;
@@ -5188,7 +5287,7 @@ queue_and_out:
inet_csk_schedule_ack(sk);
sk->sk_data_ready(sk);
- if (skb_queue_len(&sk->sk_receive_queue)) {
+ if (skb_queue_len(&sk->sk_receive_queue) && skb->len) {
reason = SKB_DROP_REASON_PROTO_MEM;
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
goto drop;
@@ -5331,6 +5430,9 @@ restart:
for (end_of_skbs = true; skb != NULL && skb != tail; skb = n) {
n = tcp_skb_next(skb, list);
+ if (!skb_frags_readable(skb))
+ goto skip_this;
+
/* No new bits? It is possible on ofo queue. */
if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
skb = tcp_collapse_one(sk, skb, list, root);
@@ -5351,17 +5453,20 @@ restart:
break;
}
- if (n && n != tail && mptcp_skb_can_collapse(skb, n) &&
+ if (n && n != tail && skb_frags_readable(n) &&
+ tcp_skb_can_collapse_rx(skb, n) &&
TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(n)->seq) {
end_of_skbs = false;
break;
}
+skip_this:
/* Decided to skip this, advance start seq. */
start = TCP_SKB_CB(skb)->end_seq;
}
if (end_of_skbs ||
- (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
+ (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) ||
+ !skb_frags_readable(skb))
return;
__skb_queue_head_init(&tmp);
@@ -5375,9 +5480,7 @@ restart:
break;
memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
-#ifdef CONFIG_TLS_DEVICE
- nskb->decrypted = skb->decrypted;
-#endif
+ skb_copy_decrypted(nskb, skb);
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
if (list)
__skb_queue_before(list, skb, nskb);
@@ -5404,13 +5507,10 @@ restart:
skb = tcp_collapse_one(sk, skb, list, root);
if (!skb ||
skb == tail ||
- !mptcp_skb_can_collapse(nskb, skb) ||
- (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
+ !tcp_skb_can_collapse_rx(nskb, skb) ||
+ (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) ||
+ !skb_frags_readable(skb))
goto end;
-#ifdef CONFIG_TLS_DEVICE
- if (skb->decrypted != nskb->decrypted)
- goto end;
-#endif
}
}
}
@@ -5861,23 +5961,35 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
SKB_DR(reason);
/* RFC1323: H1. Apply PAWS check first. */
- if (tcp_fast_parse_options(sock_net(sk), skb, th, tp) &&
- tp->rx_opt.saw_tstamp &&
- tcp_paws_discard(sk, skb)) {
- if (!th->rst) {
- if (unlikely(th->syn))
- goto syn_challenge;
- NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
- if (!tcp_oow_rate_limited(sock_net(sk), skb,
- LINUX_MIB_TCPACKSKIPPEDPAWS,
- &tp->last_oow_ack_time))
- tcp_send_dupack(sk, skb);
- SKB_DR_SET(reason, TCP_RFC7323_PAWS);
- goto discard;
- }
- /* Reset is accepted even if it did not pass PAWS. */
+ if (!tcp_fast_parse_options(sock_net(sk), skb, th, tp) ||
+ !tp->rx_opt.saw_tstamp ||
+ tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW))
+ goto step1;
+
+ reason = tcp_disordered_ack_check(sk, skb);
+ if (!reason)
+ goto step1;
+ /* Reset is accepted even if it did not pass PAWS. */
+ if (th->rst)
+ goto step1;
+ if (unlikely(th->syn))
+ goto syn_challenge;
+
+ /* Old ACK are common, increment PAWS_OLD_ACK
+ * and do not send a dupack.
+ */
+ if (reason == SKB_DROP_REASON_TCP_RFC7323_PAWS_ACK) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWS_OLD_ACK);
+ goto discard;
}
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
+ if (!tcp_oow_rate_limited(sock_net(sk), skb,
+ LINUX_MIB_TCPACKSKIPPEDPAWS,
+ &tp->last_oow_ack_time))
+ tcp_send_dupack(sk, skb);
+ goto discard;
+step1:
/* Step 1: check sequence number */
reason = tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
if (reason) {
@@ -5949,6 +6061,11 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
* RFC 5961 4.2 : Send a challenge ack
*/
if (th->syn) {
+ if (sk->sk_state == TCP_SYN_RECV && sk->sk_socket && th->ack &&
+ TCP_SKB_CB(skb)->seq + 1 == TCP_SKB_CB(skb)->end_seq &&
+ TCP_SKB_CB(skb)->seq + 1 == tp->rcv_nxt &&
+ TCP_SKB_CB(skb)->ack_seq == tp->snd_nxt)
+ goto pass;
syn_challenge:
if (syn_inerr)
TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
@@ -5958,6 +6075,7 @@ syn_challenge:
goto discard;
}
+pass:
bpf_skops_parse_hdr(sk, skb);
return true;
@@ -6114,7 +6232,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS);
/* Bulk data transfer: receiver */
- skb_dst_drop(skb);
+ tcp_cleanup_skb(skb);
__skb_pull(skb, tcp_header_len);
eaten = tcp_queue_rcv(sk, skb, &fragstolen);
@@ -6288,7 +6406,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
tp->fastopen_client_fail = TFO_DATA_NOT_ACKED;
skb_rbtree_walk_from(data)
tcp_mark_skb_lost(sk, data);
- tcp_xmit_retransmit_queue(sk);
+ tcp_non_congestion_loss_retransmit(sk);
NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPFASTOPENACTIVEFAIL);
return true;
@@ -6361,6 +6479,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
inet_csk_reset_xmit_timer(sk,
ICSK_TIME_RETRANS,
TCP_TIMEOUT_MIN, TCP_RTO_MAX);
+ SKB_DR_SET(reason, TCP_INVALID_ACK_SEQUENCE);
goto reset_and_undo;
}
@@ -6369,6 +6488,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
tcp_time_stamp_ts(tp))) {
NET_INC_STATS(sock_net(sk),
LINUX_MIB_PAWSACTIVEREJECTED);
+ SKB_DR_SET(reason, TCP_RFC7323_PAWS);
goto reset_and_undo;
}
@@ -6424,7 +6544,8 @@ consume:
if (!tp->rx_opt.wscale_ok) {
tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0;
- tp->window_clamp = min(tp->window_clamp, 65535U);
+ WRITE_ONCE(tp->window_clamp,
+ min(tp->window_clamp, 65535U));
}
if (tp->rx_opt.saw_tstamp) {
@@ -6572,7 +6693,8 @@ discard_and_undo:
reset_and_undo:
tcp_clear_options(&tp->rx_opt);
tp->rx_opt.mss_clamp = saved_clamp;
- return 1;
+ /* we can reuse/return @reason to its caller to handle the exception */
+ return reason;
}
static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
@@ -6586,10 +6708,17 @@ static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss && !tp->packets_out)
tcp_try_undo_recovery(sk);
- /* Reset rtx states to prevent spurious retransmits_timed_out() */
tcp_update_rto_time(tp);
- tp->retrans_stamp = 0;
inet_csk(sk)->icsk_retransmits = 0;
+ /* In tcp_fastopen_synack_timer() on the first SYNACK RTO we set
+ * retrans_stamp but don't enter CA_Loss, so in case that happened we
+ * need to zero retrans_stamp here to prevent spurious
+ * retransmits_timed_out(). However, if the ACK of our SYNACK caused us
+ * to enter CA_Recovery then we need to leave retrans_stamp as it was
+ * set entering CA_Recovery, for correct retransmits_timed_out() and
+ * undo behavior.
+ */
+ tcp_retrans_stamp_cleanup(sk);
/* Once we leave TCP_SYN_RECV or TCP_FIN_WAIT_1,
* we no longer need req so release it.
@@ -6616,14 +6745,14 @@ static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
* address independent.
*/
-int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
+enum skb_drop_reason
+tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcphdr *th = tcp_hdr(skb);
struct request_sock *req;
int queued = 0;
- bool acceptable;
SKB_DR(reason);
switch (sk->sk_state) {
@@ -6633,7 +6762,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
case TCP_LISTEN:
if (th->ack)
- return 1;
+ return SKB_DROP_REASON_TCP_FLAGS;
if (th->rst) {
SKB_DR_SET(reason, TCP_RESET);
@@ -6649,12 +6778,10 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
*/
rcu_read_lock();
local_bh_disable();
- acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0;
+ icsk->icsk_af_ops->conn_request(sk, skb);
local_bh_enable();
rcu_read_unlock();
- if (!acceptable)
- return 1;
consume_skb(skb);
return 0;
}
@@ -6699,17 +6826,25 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
return 0;
/* step 5: check the ACK field */
- acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH |
- FLAG_UPDATE_TS_RECENT |
- FLAG_NO_CHALLENGE_ACK) > 0;
-
- if (!acceptable) {
- if (sk->sk_state == TCP_SYN_RECV)
- return 1; /* send one RST */
- tcp_send_challenge_ack(sk);
- SKB_DR_SET(reason, TCP_OLD_ACK);
- goto discard;
+ reason = tcp_ack(sk, skb, FLAG_SLOWPATH |
+ FLAG_UPDATE_TS_RECENT |
+ FLAG_NO_CHALLENGE_ACK);
+
+ if ((int)reason <= 0) {
+ if (sk->sk_state == TCP_SYN_RECV) {
+ /* send one RST */
+ if (!reason)
+ return SKB_DROP_REASON_TCP_OLD_ACK;
+ return -reason;
+ }
+ /* accept old ack during closing */
+ if ((int)reason < 0) {
+ tcp_send_challenge_ack(sk);
+ reason = -reason;
+ goto discard;
+ }
}
+ SKB_DR_SET(reason, NOT_SPECIFIED);
switch (sk->sk_state) {
case TCP_SYN_RECV:
tp->delivered++; /* SYN-ACK delivery isn't tracked in tcp_ack */
@@ -6752,6 +6887,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
tcp_initialize_rcv_mss(sk);
tcp_fast_path_on(tp);
+ if (sk->sk_shutdown & SEND_SHUTDOWN)
+ tcp_shutdown(sk, SEND_SHUTDOWN);
break;
case TCP_FIN_WAIT1: {
@@ -6777,7 +6914,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
if (READ_ONCE(tp->linger2) < 0) {
tcp_done(sk);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
- return 1;
+ return SKB_DROP_REASON_TCP_ABORT_ON_DATA;
}
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
@@ -6786,7 +6923,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
tcp_fastopen_active_disable(sk);
tcp_done(sk);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
- return 1;
+ return SKB_DROP_REASON_TCP_ABORT_ON_DATA;
}
tmo = tcp_fin_time(sk);
@@ -6851,7 +6988,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
tcp_reset(sk, skb);
- return 1;
+ return SKB_DROP_REASON_TCP_ABORT_ON_DATA;
}
}
fallthrough;
@@ -6962,35 +7099,10 @@ static void tcp_openreq_init(struct request_sock *req,
#endif
}
-struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
- struct sock *sk_listener,
- bool attach_listener)
-{
- struct request_sock *req = reqsk_alloc(ops, sk_listener,
- attach_listener);
-
- if (req) {
- struct inet_request_sock *ireq = inet_rsk(req);
-
- ireq->ireq_opt = NULL;
-#if IS_ENABLED(CONFIG_IPV6)
- ireq->pktopts = NULL;
-#endif
- atomic64_set(&ireq->ir_cookie, 0);
- ireq->ireq_state = TCP_NEW_SYN_RECV;
- write_pnet(&ireq->ireq_net, sock_net(sk_listener));
- ireq->ireq_family = sk_listener->sk_family;
- req->timeout = TCP_TIMEOUT_INIT;
- }
-
- return req;
-}
-EXPORT_SYMBOL(inet_reqsk_alloc);
-
/*
* Return true if a syncookie should be sent
*/
-static bool tcp_syn_flood_action(const struct sock *sk, const char *proto)
+static bool tcp_syn_flood_action(struct sock *sk, const char *proto)
{
struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
const char *msg = "Dropping request";
@@ -7091,7 +7203,6 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
struct sock *sk, struct sk_buff *skb)
{
struct tcp_fastopen_cookie foc = { .len = -1 };
- __u32 isn = TCP_SKB_CB(skb)->tcp_tw_isn;
struct tcp_options_received tmp_opt;
struct tcp_sock *tp = tcp_sk(sk);
struct net *net = sock_net(sk);
@@ -7101,21 +7212,28 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
struct dst_entry *dst;
struct flowi fl;
u8 syncookies;
+ u32 isn;
#ifdef CONFIG_TCP_AO
const struct tcp_ao_hdr *aoh;
#endif
- syncookies = READ_ONCE(net->ipv4.sysctl_tcp_syncookies);
+ isn = __this_cpu_read(tcp_tw_isn);
+ if (isn) {
+ /* TW buckets are converted to open requests without
+ * limitations, they conserve resources and peer is
+ * evidently real one.
+ */
+ __this_cpu_write(tcp_tw_isn, 0);
+ } else {
+ syncookies = READ_ONCE(net->ipv4.sysctl_tcp_syncookies);
- /* TW buckets are converted to open requests without
- * limitations, they conserve resources and peer is
- * evidently real one.
- */
- if ((syncookies == 2 || inet_csk_reqsk_queue_is_full(sk)) && !isn) {
- want_cookie = tcp_syn_flood_action(sk, rsk_ops->slab_name);
- if (!want_cookie)
- goto drop;
+ if (syncookies == 2 || inet_csk_reqsk_queue_is_full(sk)) {
+ want_cookie = tcp_syn_flood_action(sk,
+ rsk_ops->slab_name);
+ if (!want_cookie)
+ goto drop;
+ }
}
if (sk_acceptq_is_full(sk)) {
@@ -7154,7 +7272,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
/* Note: tcp_v6_init_req() might override ir_iif for link locals */
inet_rsk(req)->ir_iif = inet_request_bound_dev_if(sk, skb);
- dst = af_ops->route_req(sk, skb, &fl, req);
+ dst = af_ops->route_req(sk, skb, &fl, req, isn);
if (!dst)
goto drop_and_free;
@@ -7231,7 +7349,13 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
tcp_rsk(req)->tfo_listener = false;
if (!want_cookie) {
req->timeout = tcp_timeout_init((struct sock *)req);
- inet_csk_reqsk_queue_hash_add(sk, req, req->timeout);
+ if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req,
+ req->timeout))) {
+ reqsk_free(req);
+ dst_release(dst);
+ return 0;
+ }
+
}
af_ops->send_synack(sk, dst, &fl, req, &foc,
!want_cookie ? TCP_SYNACK_NORMAL :
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 0c50c5a32b84..2632844d2c35 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -70,6 +70,7 @@
#include <net/xfrm.h>
#include <net/secure_seq.h>
#include <net/busy_poll.h>
+#include <net/rstreason.h>
#include <linux/inet.h>
#include <linux/ipv6.h>
@@ -78,6 +79,7 @@
#include <linux/seq_file.h>
#include <linux/inetdevice.h>
#include <linux/btf_ids.h>
+#include <linux/skbuff_ref.h>
#include <crypto/hash.h>
#include <linux/scatterlist.h>
@@ -92,7 +94,11 @@ static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
struct inet_hashinfo tcp_hashinfo;
EXPORT_SYMBOL(tcp_hashinfo);
-static DEFINE_PER_CPU(struct sock *, ipv4_tcp_sk);
+static DEFINE_PER_CPU(struct sock_bh_locked, ipv4_tcp_sk) = {
+ .bh_lock = INIT_LOCAL_LOCK(bh_lock),
+};
+
+static DEFINE_MUTEX(tcp_exit_batch_mutex);
static u32 tcp_v4_init_seq(const struct sk_buff *skb)
{
@@ -113,6 +119,11 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
const struct inet_timewait_sock *tw = inet_twsk(sktw);
const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
struct tcp_sock *tp = tcp_sk(sk);
+ int ts_recent_stamp;
+ u32 reuse_thresh;
+
+ if (READ_ONCE(tw->tw_substate) == TCP_FIN_WAIT2)
+ reuse = 0;
if (reuse == 2) {
/* Still does not detect *everything* that goes through
@@ -151,9 +162,17 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
If TW bucket has been already destroyed we fall back to VJ's scheme
and use initial timestamp retrieved from peer table.
*/
- if (tcptw->tw_ts_recent_stamp &&
- (!twp || (reuse && time_after32(ktime_get_seconds(),
- tcptw->tw_ts_recent_stamp)))) {
+ ts_recent_stamp = READ_ONCE(tcptw->tw_ts_recent_stamp);
+ reuse_thresh = READ_ONCE(tw->tw_entry_stamp) +
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tw_reuse_delay);
+ if (ts_recent_stamp &&
+ (!twp || (reuse && time_after32(tcp_clock_ms(), reuse_thresh)))) {
+ /* inet_twsk_hashdance_schedule() sets sk_refcnt after putting twsk
+ * and releasing the bucket lock.
+ */
+ if (unlikely(!refcount_inc_not_zero(&sktw->sk_refcnt)))
+ return 0;
+
/* In case of repair and re-using TIME-WAIT sockets we still
* want to be sure that it is safe as above but honor the
* sequence numbers and time stamps set as part of the repair
@@ -171,10 +190,10 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
if (!seq)
seq = 1;
WRITE_ONCE(tp->write_seq, seq);
- tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
- tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
+ tp->rx_opt.ts_recent = READ_ONCE(tcptw->tw_ts_recent);
+ tp->rx_opt.ts_recent_stamp = ts_recent_stamp;
}
- sock_hold(sktw);
+
return 1;
}
@@ -604,15 +623,10 @@ int tcp_v4_err(struct sk_buff *skb, u32 info)
ip_icmp_error(sk, skb, err, th->dest, info, (u8 *)th);
- if (!sock_owned_by_user(sk)) {
- WRITE_ONCE(sk->sk_err, err);
-
- sk_error_report(sk);
-
- tcp_done(sk);
- } else {
+ if (!sock_owned_by_user(sk))
+ tcp_done_with_error(sk, err);
+ else
WRITE_ONCE(sk->sk_err_soft, err);
- }
goto out;
}
@@ -723,7 +737,8 @@ out:
* Exception: precedence violation. We do not implement it in any case.
*/
-static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
+static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb,
+ enum sk_rst_reason reason)
{
const struct tcphdr *th = tcp_hdr(skb);
struct {
@@ -866,11 +881,10 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
* routing might fail in this case. No choice here, if we choose to force
* input interface, we will misroute in case of asymmetric route.
*/
- if (sk) {
+ if (sk)
arg.bound_dev_if = sk->sk_bound_dev_if;
- if (sk_fullsock(sk))
- trace_tcp_send_reset(sk, skb);
- }
+
+ trace_tcp_send_reset(sk, skb, reason);
BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
offsetof(struct inet_timewait_sock, tw_bound_dev_if));
@@ -878,11 +892,13 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
arg.tos = ip_hdr(skb)->tos;
arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
local_bh_disable();
- ctl_sk = this_cpu_read(ipv4_tcp_sk);
+ local_lock_nested_bh(&ipv4_tcp_sk.bh_lock);
+ ctl_sk = this_cpu_read(ipv4_tcp_sk.sock);
+
sock_net_set(ctl_sk, net);
if (sk) {
ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
- inet_twsk(sk)->tw_mark : sk->sk_mark;
+ inet_twsk(sk)->tw_mark : READ_ONCE(sk->sk_mark);
ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
inet_twsk(sk)->tw_priority : READ_ONCE(sk->sk_priority);
transmit_time = tcp_transmit_time(sk);
@@ -893,7 +909,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
ctl_sk->sk_mark = 0;
ctl_sk->sk_priority = 0;
}
- ip_send_unicast_reply(ctl_sk,
+ ip_send_unicast_reply(ctl_sk, sk,
skb, &TCP_SKB_CB(skb)->header.h4.opt,
ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
&arg, arg.iov[0].iov_len,
@@ -903,6 +919,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
sock_net_set(ctl_sk, &init_net);
__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
__TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
+ local_unlock_nested_bh(&ipv4_tcp_sk.bh_lock);
local_bh_enable();
#ifdef CONFIG_TCP_MD5SIG
@@ -998,14 +1015,15 @@ static void tcp_v4_send_ack(const struct sock *sk,
arg.tos = tos;
arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
local_bh_disable();
- ctl_sk = this_cpu_read(ipv4_tcp_sk);
+ local_lock_nested_bh(&ipv4_tcp_sk.bh_lock);
+ ctl_sk = this_cpu_read(ipv4_tcp_sk.sock);
sock_net_set(ctl_sk, net);
ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
inet_twsk(sk)->tw_mark : READ_ONCE(sk->sk_mark);
ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
inet_twsk(sk)->tw_priority : READ_ONCE(sk->sk_priority);
transmit_time = tcp_transmit_time(sk);
- ip_send_unicast_reply(ctl_sk,
+ ip_send_unicast_reply(ctl_sk, sk,
skb, &TCP_SKB_CB(skb)->header.h4.opt,
ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
&arg, arg.iov[0].iov_len,
@@ -1013,6 +1031,7 @@ static void tcp_v4_send_ack(const struct sock *sk,
sock_net_set(ctl_sk, &init_net);
__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
+ local_unlock_nested_bh(&ipv4_tcp_sk.bh_lock);
local_bh_enable();
}
@@ -1036,7 +1055,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
}
if (aoh)
- key.ao_key = tcp_ao_established_key(ao_info, aoh->rnext_keyid, -1);
+ key.ao_key = tcp_ao_established_key(sk, ao_info,
+ aoh->rnext_keyid, -1);
}
}
if (key.ao_key) {
@@ -1050,19 +1070,17 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
#else
if (0) {
#endif
-#ifdef CONFIG_TCP_MD5SIG
- } else if (static_branch_unlikely(&tcp_md5_needed.key)) {
+ } else if (static_branch_tcp_md5()) {
key.md5_key = tcp_twsk_md5_key(tcptw);
if (key.md5_key)
key.type = TCP_KEY_MD5;
-#endif
}
tcp_v4_send_ack(sk, skb,
- tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
+ tcptw->tw_snd_nxt, READ_ONCE(tcptw->tw_rcv_nxt),
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
tcp_tw_tsval(tcptw),
- tcptw->tw_ts_recent,
+ READ_ONCE(tcptw->tw_ts_recent),
tw->tw_bound_dev_if, &key,
tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
tw->tw_tos,
@@ -1124,8 +1142,7 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
#else
if (0) {
#endif
-#ifdef CONFIG_TCP_MD5SIG
- } else if (static_branch_unlikely(&tcp_md5_needed.key)) {
+ } else if (static_branch_tcp_md5()) {
const union tcp_md5_addr *addr;
int l3index;
@@ -1134,17 +1151,11 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
key.md5_key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
if (key.md5_key)
key.type = TCP_KEY_MD5;
-#endif
}
- /* RFC 7323 2.3
- * The window field (SEG.WND) of every outgoing segment, with the
- * exception of <SYN> segments, MUST be right-shifted by
- * Rcv.Wind.Shift bits:
- */
tcp_v4_send_ack(sk, skb, seq,
tcp_rsk(req)->rcv_nxt,
- req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
+ tcp_synack_window(req) >> inet_rsk(req)->rcv_wscale,
tcp_rsk_tsval(tcp_rsk(req)),
READ_ONCE(req->ts_recent),
0, &key,
@@ -1667,7 +1678,8 @@ static void tcp_v4_init_req(struct request_sock *req,
static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
struct sk_buff *skb,
struct flowi *fl,
- struct request_sock *req)
+ struct request_sock *req,
+ u32 tw_isn)
{
tcp_v4_init_req(req, sk, skb);
@@ -1907,7 +1919,6 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
}
- reason = SKB_DROP_REASON_NOT_SPECIFIED;
if (tcp_checksum_complete(skb))
goto csum_err;
@@ -1915,9 +1926,10 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
struct sock *nsk = tcp_v4_cookie_check(sk, skb);
if (!nsk)
- goto discard;
+ return 0;
if (nsk != sk) {
- if (tcp_child_process(sk, nsk, skb)) {
+ reason = tcp_child_process(sk, nsk, skb);
+ if (reason) {
rsk = nsk;
goto reset;
}
@@ -1926,16 +1938,17 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
} else
sock_rps_save_rxhash(sk, skb);
- if (tcp_rcv_state_process(sk, skb)) {
+ reason = tcp_rcv_state_process(sk, skb);
+ if (reason) {
rsk = sk;
goto reset;
}
return 0;
reset:
- tcp_v4_send_reset(rsk, skb);
+ tcp_v4_send_reset(rsk, skb, sk_rst_convert_drop_reason(reason));
discard:
- kfree_skb_reason(skb, reason);
+ sk_skb_reason_drop(sk, skb, reason);
/* Be careful here. If this function gets more complicated and
* gcc suffers from register pressure on the x86, sk (in %ebx)
* might be destroyed here. This current version compiles correctly,
@@ -1994,7 +2007,7 @@ int tcp_v4_early_demux(struct sk_buff *skb)
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
enum skb_drop_reason *reason)
{
- u32 limit, tail_gso_size, tail_gso_segs;
+ u32 tail_gso_size, tail_gso_segs;
struct skb_shared_info *shinfo;
const struct tcphdr *th;
struct tcphdr *thtail;
@@ -2003,6 +2016,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
bool fragstolen;
u32 gso_segs;
u32 gso_size;
+ u64 limit;
int delta;
/* In case all data was pulled from skb frags (in __pskb_pull_tail()),
@@ -2013,7 +2027,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
*/
skb_condense(skb);
- skb_dst_drop(skb);
+ tcp_cleanup_skb(skb);
if (unlikely(tcp_checksum_complete(skb))) {
bh_unlock_sock(sk);
@@ -2044,10 +2058,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
((TCP_SKB_CB(tail)->tcp_flags ^
TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
-#ifdef CONFIG_TLS_DEVICE
- tail->decrypted != skb->decrypted ||
-#endif
- !mptcp_skb_can_collapse(tail, skb) ||
+ !tcp_skb_can_collapse_rx(tail, skb) ||
thtail->doff != th->doff ||
memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
goto no_coalesce;
@@ -2100,7 +2111,13 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
__skb_push(skb, hdrlen);
no_coalesce:
- limit = (u32)READ_ONCE(sk->sk_rcvbuf) + (u32)(READ_ONCE(sk->sk_sndbuf) >> 1);
+ /* sk->sk_backlog.len is reset only at the end of __release_sock().
+ * Both sk->sk_backlog.len and sk->sk_rmem_alloc could reach
+ * sk_rcvbuf in normal conditions.
+ */
+ limit = ((u64)READ_ONCE(sk->sk_rcvbuf)) << 1;
+
+ limit += ((u32)READ_ONCE(sk->sk_sndbuf)) >> 1;
/* Only socket owner can try to collapse/prune rx queues
* to reduce memory overhead, so add a little headroom here.
@@ -2108,6 +2125,8 @@ no_coalesce:
*/
limit += 64 * 1024;
+ limit = min_t(u64, limit, UINT_MAX);
+
if (unlikely(sk_add_backlog(sk, skb, limit))) {
bh_unlock_sock(sk);
*reason = SKB_DROP_REASON_SOCKET_BACKLOG;
@@ -2147,7 +2166,6 @@ static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
skb->len - th->doff * 4);
TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
- TCP_SKB_CB(skb)->tcp_tw_isn = 0;
TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
TCP_SKB_CB(skb)->sacked = 0;
TCP_SKB_CB(skb)->has_rxtstamp =
@@ -2166,9 +2184,10 @@ int tcp_v4_rcv(struct sk_buff *skb)
int dif = inet_iif(skb);
const struct iphdr *iph;
const struct tcphdr *th;
+ struct sock *sk = NULL;
bool refcounted;
- struct sock *sk;
int ret;
+ u32 isn;
drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
if (skb->pkt_type != PACKET_HOST)
@@ -2206,7 +2225,6 @@ lookup:
if (!sk)
goto no_tcp_socket;
-process:
if (sk->sk_state == TCP_TIME_WAIT)
goto do_time_wait;
@@ -2275,15 +2293,21 @@ process:
if (nsk == sk) {
reqsk_put(req);
tcp_v4_restore_cb(skb);
- } else if (tcp_child_process(sk, nsk, skb)) {
- tcp_v4_send_reset(nsk, skb);
- goto discard_and_relse;
} else {
+ drop_reason = tcp_child_process(sk, nsk, skb);
+ if (drop_reason) {
+ enum sk_rst_reason rst_reason;
+
+ rst_reason = sk_rst_convert_drop_reason(drop_reason);
+ tcp_v4_send_reset(nsk, skb, rst_reason);
+ goto discard_and_relse;
+ }
sock_put(sk);
return 0;
}
}
+process:
if (static_branch_unlikely(&ip4_min_ttl)) {
/* min_ttl can be changed concurrently from do_ip_setsockopt() */
if (unlikely(iph->ttl < READ_ONCE(inet_sk(sk)->min_ttl))) {
@@ -2354,13 +2378,13 @@ csum_error:
bad_packet:
__TCP_INC_STATS(net, TCP_MIB_INERRS);
} else {
- tcp_v4_send_reset(NULL, skb);
+ tcp_v4_send_reset(NULL, skb, sk_rst_convert_drop_reason(drop_reason));
}
discard_it:
SKB_DR_OR(drop_reason, NOT_SPECIFIED);
/* Discard frame. */
- kfree_skb_reason(skb, drop_reason);
+ sk_skb_reason_drop(sk, skb, drop_reason);
return 0;
discard_and_relse:
@@ -2382,7 +2406,7 @@ do_time_wait:
inet_twsk_put(inet_twsk(sk));
goto csum_error;
}
- switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
+ switch (tcp_timewait_state_process(inet_twsk(sk), skb, th, &isn)) {
case TCP_TW_SYN: {
struct sock *sk2 = inet_lookup_listener(net,
net->ipv4.tcp_death_row.hashinfo,
@@ -2396,6 +2420,7 @@ do_time_wait:
sk = sk2;
tcp_v4_restore_cb(skb);
refcounted = false;
+ __this_cpu_write(tcp_tw_isn, isn);
goto process;
}
}
@@ -2405,7 +2430,7 @@ do_time_wait:
tcp_v4_timewait_ack(sk, skb);
break;
case TCP_TW_RST:
- tcp_v4_send_reset(sk, skb);
+ tcp_v4_send_reset(sk, skb, SK_RST_REASON_TCP_TIMEWAIT_SOCKET);
inet_twsk_deschedule_put(inet_twsk(sk));
goto discard_it;
case TCP_TW_SUCCESS:;
@@ -2415,7 +2440,6 @@ do_time_wait:
static struct timewait_sock_ops tcp_timewait_sock_ops = {
.twsk_obj_size = sizeof(struct tcp_timewait_sock),
- .twsk_unique = tcp_twsk_unique,
.twsk_destructor= tcp_twsk_destructor,
};
@@ -2492,10 +2516,25 @@ static void tcp_md5sig_info_free_rcu(struct rcu_head *head)
}
#endif
+static void tcp_release_user_frags(struct sock *sk)
+{
+#ifdef CONFIG_PAGE_POOL
+ unsigned long index;
+ void *netmem;
+
+ xa_for_each(&sk->sk_user_frags, index, netmem)
+ WARN_ON_ONCE(!napi_pp_put_page((__force netmem_ref)netmem));
+#endif
+}
+
void tcp_v4_destroy_sock(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
+ tcp_release_user_frags(sk);
+
+ xa_destroy(&sk->sk_user_frags);
+
trace_tcp_destroy_sock(sk);
tcp_clear_xmit_timers(sk);
@@ -2864,15 +2903,17 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
__be32 src = inet->inet_rcv_saddr;
__u16 destp = ntohs(inet->inet_dport);
__u16 srcp = ntohs(inet->inet_sport);
+ u8 icsk_pending;
int rx_queue;
int state;
- if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
- icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
- icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
+ icsk_pending = smp_load_acquire(&icsk->icsk_pending);
+ if (icsk_pending == ICSK_TIME_RETRANS ||
+ icsk_pending == ICSK_TIME_REO_TIMEOUT ||
+ icsk_pending == ICSK_TIME_LOSS_PROBE) {
timer_active = 1;
timer_expires = icsk->icsk_timeout;
- } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
+ } else if (icsk_pending == ICSK_TIME_PROBE0) {
timer_active = 4;
timer_expires = icsk->icsk_timeout;
} else if (timer_pending(&sk->sk_timer)) {
@@ -2928,7 +2969,7 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
seq_printf(f, "%4d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
- i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
+ i, src, srcp, dest, destp, READ_ONCE(tw->tw_substate), 0, 0,
3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
refcount_read(&tw->tw_refcnt), tw);
}
@@ -3418,6 +3459,7 @@ static int __net_init tcp_sk_init(struct net *net)
net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
net->ipv4.sysctl_tcp_tw_reuse = 2;
+ net->ipv4.sysctl_tcp_tw_reuse_delay = 1 * MSEC_PER_SEC;
net->ipv4.sysctl_tcp_no_ssthresh_metrics_save = 1;
refcount_set(&net->ipv4.tcp_death_row.tw_refcount, 1);
@@ -3490,6 +3532,7 @@ static int __net_init tcp_sk_init(struct net *net)
net->ipv4.sysctl_tcp_shrink_window = 0;
net->ipv4.sysctl_tcp_pingpong_thresh = 1;
+ net->ipv4.sysctl_tcp_rto_min_us = jiffies_to_usecs(TCP_RTO_MIN);
return 0;
}
@@ -3498,13 +3541,25 @@ static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
{
struct net *net;
- tcp_twsk_purge(net_exit_list, AF_INET);
+ /* make sure concurrent calls to tcp_sk_exit_batch from net_cleanup_work
+ * and failed setup_net error unwinding path are serialized.
+ *
+ * tcp_twsk_purge() handles twsk in any dead netns, not just those in
+ * net_exit_list, the thread that dismantles a particular twsk must
+ * do so without other thread progressing to refcount_dec_and_test() of
+ * tcp_death_row.tw_refcount.
+ */
+ mutex_lock(&tcp_exit_batch_mutex);
+
+ tcp_twsk_purge(net_exit_list);
list_for_each_entry(net, net_exit_list, exit_list) {
inet_pernet_hashinfo_free(net->ipv4.tcp_death_row.hashinfo);
WARN_ON_ONCE(!refcount_dec_and_test(&net->ipv4.tcp_death_row.tw_refcount));
tcp_fastopen_ctx_destroy(net);
}
+
+ mutex_unlock(&tcp_exit_batch_mutex);
}
static struct pernet_operations __net_initdata tcp_sk_ops = {
@@ -3604,7 +3659,9 @@ void __init tcp_v4_init(void)
*/
inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
- per_cpu(ipv4_tcp_sk, cpu) = sk;
+ sk->sk_clockid = CLOCK_MONOTONIC;
+
+ per_cpu(ipv4_tcp_sk.sock, cpu) = sk;
}
if (register_pernet_subsys(&tcp_sk_ops))
panic("Failed to create the TCP control socket.\n");
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index c2a925538542..95669935494e 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -617,8 +617,13 @@ static struct genl_family tcp_metrics_nl_family;
static const struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
[TCP_METRICS_ATTR_ADDR_IPV4] = { .type = NLA_U32, },
- [TCP_METRICS_ATTR_ADDR_IPV6] = { .type = NLA_BINARY,
- .len = sizeof(struct in6_addr), },
+ [TCP_METRICS_ATTR_ADDR_IPV6] =
+ NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
+
+ [TCP_METRICS_ATTR_SADDR_IPV4] = { .type = NLA_U32, },
+ [TCP_METRICS_ATTR_SADDR_IPV6] =
+ NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
+
/* Following attributes are not received for GET/DEL,
* we keep them for reference
*/
@@ -766,6 +771,7 @@ static int tcp_metrics_nl_dump(struct sk_buff *skb,
unsigned int max_rows = 1U << tcp_metrics_hash_log;
unsigned int row, s_row = cb->args[0];
int s_col = cb->args[1], col = s_col;
+ int res = 0;
for (row = s_row; row < max_rows; row++, s_col = 0) {
struct tcp_metrics_block *tm;
@@ -778,7 +784,8 @@ static int tcp_metrics_nl_dump(struct sk_buff *skb,
continue;
if (col < s_col)
continue;
- if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
+ res = tcp_metrics_dump_info(skb, cb, tm);
+ if (res < 0) {
rcu_read_unlock();
goto done;
}
@@ -789,7 +796,7 @@ static int tcp_metrics_nl_dump(struct sk_buff *skb,
done:
cb->args[0] = row;
cb->args[1] = col;
- return skb->len;
+ return res;
}
static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
@@ -808,8 +815,6 @@ static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
if (a) {
struct in6_addr in6;
- if (nla_len(a) != sizeof(struct in6_addr))
- return -EINVAL;
in6 = nla_get_in6_addr(a);
inetpeer_set_addr_v6(addr, &in6);
if (hash)
@@ -986,6 +991,7 @@ static struct genl_family tcp_metrics_nl_family __ro_after_init = {
.maxattr = TCP_METRICS_ATTR_MAX,
.policy = tcp_metrics_nl_policy,
.netnsok = true,
+ .parallel_ops = true,
.module = THIS_MODULE,
.small_ops = tcp_metrics_nl_ops,
.n_small_ops = ARRAY_SIZE(tcp_metrics_nl_ops),
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 9e85f2a0bddd..dfdb7a4608a8 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -22,6 +22,7 @@
#include <net/tcp.h>
#include <net/xfrm.h>
#include <net/busy_poll.h>
+#include <net/rstreason.h>
static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
{
@@ -51,16 +52,17 @@ tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
return TCP_TW_SUCCESS;
}
-static void twsk_rcv_nxt_update(struct tcp_timewait_sock *tcptw, u32 seq)
+static void twsk_rcv_nxt_update(struct tcp_timewait_sock *tcptw, u32 seq,
+ u32 rcv_nxt)
{
#ifdef CONFIG_TCP_AO
struct tcp_ao_info *ao;
ao = rcu_dereference(tcptw->ao_info);
- if (unlikely(ao && seq < tcptw->tw_rcv_nxt))
+ if (unlikely(ao && seq < rcv_nxt))
WRITE_ONCE(ao->rcv_sne, ao->rcv_sne + 1);
#endif
- tcptw->tw_rcv_nxt = seq;
+ WRITE_ONCE(tcptw->tw_rcv_nxt, seq);
}
/*
@@ -95,45 +97,48 @@ static void twsk_rcv_nxt_update(struct tcp_timewait_sock *tcptw, u32 seq)
*/
enum tcp_tw_status
tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
- const struct tcphdr *th)
+ const struct tcphdr *th, u32 *tw_isn)
{
- struct tcp_options_received tmp_opt;
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
+ u32 rcv_nxt = READ_ONCE(tcptw->tw_rcv_nxt);
+ struct tcp_options_received tmp_opt;
bool paws_reject = false;
+ int ts_recent_stamp;
tmp_opt.saw_tstamp = 0;
- if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
+ ts_recent_stamp = READ_ONCE(tcptw->tw_ts_recent_stamp);
+ if (th->doff > (sizeof(*th) >> 2) && ts_recent_stamp) {
tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
if (tmp_opt.saw_tstamp) {
if (tmp_opt.rcv_tsecr)
tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
- tmp_opt.ts_recent = tcptw->tw_ts_recent;
- tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
+ tmp_opt.ts_recent = READ_ONCE(tcptw->tw_ts_recent);
+ tmp_opt.ts_recent_stamp = ts_recent_stamp;
paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
}
}
- if (tw->tw_substate == TCP_FIN_WAIT2) {
+ if (READ_ONCE(tw->tw_substate) == TCP_FIN_WAIT2) {
/* Just repeat all the checks of tcp_rcv_state_process() */
/* Out of window, send ACK */
if (paws_reject ||
!tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
- tcptw->tw_rcv_nxt,
- tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
+ rcv_nxt,
+ rcv_nxt + tcptw->tw_rcv_wnd))
return tcp_timewait_check_oow_rate_limit(
tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
if (th->rst)
goto kill;
- if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
+ if (th->syn && !before(TCP_SKB_CB(skb)->seq, rcv_nxt))
return TCP_TW_RST;
/* Dup ACK? */
if (!th->ack ||
- !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
+ !after(TCP_SKB_CB(skb)->end_seq, rcv_nxt) ||
TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
inet_twsk_put(tw);
return TCP_TW_SUCCESS;
@@ -143,16 +148,22 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
* reset.
*/
if (!th->fin ||
- TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
+ TCP_SKB_CB(skb)->end_seq != rcv_nxt + 1)
return TCP_TW_RST;
/* FIN arrived, enter true time-wait state. */
- tw->tw_substate = TCP_TIME_WAIT;
- twsk_rcv_nxt_update(tcptw, TCP_SKB_CB(skb)->end_seq);
+ WRITE_ONCE(tw->tw_substate, TCP_TIME_WAIT);
+ twsk_rcv_nxt_update(tcptw, TCP_SKB_CB(skb)->end_seq,
+ rcv_nxt);
if (tmp_opt.saw_tstamp) {
- tcptw->tw_ts_recent_stamp = ktime_get_seconds();
- tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
+ u64 ts = tcp_clock_ms();
+
+ WRITE_ONCE(tw->tw_entry_stamp, ts);
+ WRITE_ONCE(tcptw->tw_ts_recent_stamp,
+ div_u64(ts, MSEC_PER_SEC));
+ WRITE_ONCE(tcptw->tw_ts_recent,
+ tmp_opt.rcv_tsval);
}
inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
@@ -177,7 +188,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
*/
if (!paws_reject &&
- (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
+ (TCP_SKB_CB(skb)->seq == rcv_nxt &&
(TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
/* In window segment, it may be only reset or bare ack. */
@@ -196,8 +207,10 @@ kill:
}
if (tmp_opt.saw_tstamp) {
- tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
- tcptw->tw_ts_recent_stamp = ktime_get_seconds();
+ WRITE_ONCE(tcptw->tw_ts_recent,
+ tmp_opt.rcv_tsval);
+ WRITE_ONCE(tcptw->tw_ts_recent_stamp,
+ ktime_get_seconds());
}
inet_twsk_put(tw);
@@ -222,13 +235,13 @@ kill:
*/
if (th->syn && !th->rst && !th->ack && !paws_reject &&
- (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
+ (after(TCP_SKB_CB(skb)->seq, rcv_nxt) ||
(tmp_opt.saw_tstamp &&
- (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
+ (s32)(READ_ONCE(tcptw->tw_ts_recent) - tmp_opt.rcv_tsval) < 0))) {
u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
if (isn == 0)
isn++;
- TCP_SKB_CB(skb)->tcp_tw_isn = isn;
+ *tw_isn = isn;
return TCP_TW_SYN;
}
@@ -306,6 +319,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
tw->tw_mark = sk->sk_mark;
tw->tw_priority = READ_ONCE(sk->sk_priority);
tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
+ /* refreshed when we enter true TIME-WAIT state */
+ tw->tw_entry_stamp = tcp_time_stamp_ms(tp);
tcptw->tw_rcv_nxt = tp->rcv_nxt;
tcptw->tw_snd_nxt = tp->snd_nxt;
tcptw->tw_rcv_wnd = tcp_receive_window(tp);
@@ -316,6 +331,10 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
tcptw->tw_last_oow_ack_time = 0;
tcptw->tw_tx_delay = tp->tcp_tx_delay;
tw->tw_txhash = sk->sk_txhash;
+ tw->tw_tx_queue_mapping = sk->sk_tx_queue_mapping;
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
+ tw->tw_rx_queue_mapping = sk->sk_rx_queue_mapping;
+#endif
#if IS_ENABLED(CONFIG_IPV6)
if (tw->tw_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
@@ -338,17 +357,10 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
if (state == TCP_TIME_WAIT)
timeo = TCP_TIMEWAIT_LEN;
- /* tw_timer is pinned, so we need to make sure BH are disabled
- * in following section, otherwise timer handler could run before
- * we complete the initialization.
- */
- local_bh_disable();
- inet_twsk_schedule(tw, timeo);
/* Linkage updates.
* Note that access to tw after this point is illegal.
*/
- inet_twsk_hashdance(tw, sk, net->ipv4.tcp_death_row.hashinfo);
- local_bh_enable();
+ inet_twsk_hashdance_schedule(tw, sk, net->ipv4.tcp_death_row.hashinfo, timeo);
} else {
/* Sorry, if we're out of memory, just CLOSE this
* socket up. We've got bigger problems than
@@ -388,7 +400,7 @@ void tcp_twsk_destructor(struct sock *sk)
}
EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
-void tcp_twsk_purge(struct list_head *net_exit_list, int family)
+void tcp_twsk_purge(struct list_head *net_exit_list)
{
bool purged_once = false;
struct net *net;
@@ -396,18 +408,13 @@ void tcp_twsk_purge(struct list_head *net_exit_list, int family)
list_for_each_entry(net, net_exit_list, exit_list) {
if (net->ipv4.tcp_death_row.hashinfo->pernet) {
/* Even if tw_refcount == 1, we must clean up kernel reqsk */
- inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo, family);
+ inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo);
} else if (!purged_once) {
- /* The last refcount is decremented in tcp_sk_exit_batch() */
- if (refcount_read(&net->ipv4.tcp_death_row.tw_refcount) == 1)
- continue;
-
- inet_twsk_purge(&tcp_hashinfo, family);
+ inet_twsk_purge(&tcp_hashinfo);
purged_once = true;
}
}
}
-EXPORT_SYMBOL_GPL(tcp_twsk_purge);
/* Warning : This function is called without sk_listener being locked.
* Be sure to read socket fields once, as their value could change under us.
@@ -519,9 +526,6 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
const struct tcp_sock *oldtp;
struct tcp_sock *newtp;
u32 seq;
-#ifdef CONFIG_TCP_AO
- struct tcp_ao_key *ao_key;
-#endif
if (!newsk)
return NULL;
@@ -612,10 +616,14 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
#endif
#ifdef CONFIG_TCP_AO
newtp->ao_info = NULL;
- ao_key = treq->af_specific->ao_lookup(sk, req,
- tcp_rsk(req)->ao_keyid, -1);
- if (ao_key)
- newtp->tcp_header_len += tcp_ao_len_aligned(ao_key);
+
+ if (tcp_rsk_used_ao(req)) {
+ struct tcp_ao_key *ao_key;
+
+ ao_key = treq->af_specific->ao_lookup(sk, req, tcp_rsk(req)->ao_keyid, -1);
+ if (ao_key)
+ newtp->tcp_header_len += tcp_ao_len_aligned(ao_key);
+ }
#endif
if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
@@ -629,6 +637,8 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
+ xa_init_flags(&newsk->sk_user_frags, XA_FLAGS_ALLOC1);
+
return newsk;
}
EXPORT_SYMBOL(tcp_create_openreq_child);
@@ -787,8 +797,11 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
/* RFC793: "first check sequence number". */
- if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
- tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
+ if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq,
+ TCP_SKB_CB(skb)->end_seq,
+ tcp_rsk(req)->rcv_nxt,
+ tcp_rsk(req)->rcv_nxt +
+ tcp_synack_window(req))) {
/* Out of window: send ACK and drop. */
if (!(flg & TCP_FLAG_RST) &&
!tcp_oow_rate_limited(sock_net(sk), skb,
@@ -802,12 +815,6 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
/* In sequence, PAWS is OK. */
- /* TODO: We probably should defer ts_recent change once
- * we take ownership of @req.
- */
- if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
- WRITE_ONCE(req->ts_recent, tmp_opt.rcv_tsval);
-
if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
/* Truncate SYN, it is out of window starting
at tcp_rsk(req)->rcv_isn + 1. */
@@ -856,6 +863,10 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
if (!child)
goto listen_overflow;
+ if (own_req && tmp_opt.saw_tstamp &&
+ !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
+ tcp_sk(child)->rx_opt.ts_recent = tmp_opt.rcv_tsval;
+
if (own_req && rsk_drop_req(req)) {
reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
@@ -883,7 +894,7 @@ embryonic_reset:
* avoid becoming vulnerable to outside attack aiming at
* resetting legit local connections.
*/
- req->rsk_ops->send_reset(sk, skb);
+ req->rsk_ops->send_reset(sk, skb, SK_RST_REASON_INVALID_SYN);
} else if (fastopen) { /* received a valid RST pkt */
reqsk_fastopen_remove(sk, req, true);
tcp_reset(sk, skb);
@@ -911,11 +922,11 @@ EXPORT_SYMBOL(tcp_check_req);
* be created.
*/
-int tcp_child_process(struct sock *parent, struct sock *child,
- struct sk_buff *skb)
+enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
+ struct sk_buff *skb)
__releases(&((child)->sk_lock.slock))
{
- int ret = 0;
+ enum skb_drop_reason reason = SKB_NOT_DROPPED_YET;
int state = child->sk_state;
/* record sk_napi_id and sk_rx_queue_mapping of child. */
@@ -923,7 +934,7 @@ int tcp_child_process(struct sock *parent, struct sock *child,
tcp_segs_in(tcp_sk(child), skb);
if (!sock_owned_by_user(child)) {
- ret = tcp_rcv_state_process(child, skb);
+ reason = tcp_rcv_state_process(child, skb);
/* Wakeup parent, send SIGIO */
if (state == TCP_SYN_RECV && child->sk_state != state)
parent->sk_data_ready(parent);
@@ -937,6 +948,6 @@ int tcp_child_process(struct sock *parent, struct sock *child,
bh_unlock_sock(child);
sock_put(child);
- return ret;
+ return reason;
}
EXPORT_SYMBOL(tcp_child_process);
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 8311c38267b5..2dfac79dc78b 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -13,12 +13,15 @@
#include <net/tcp.h>
#include <net/protocol.h>
-static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
+static void tcp_gso_tstamp(struct sk_buff *skb, struct sk_buff *gso_skb,
unsigned int seq, unsigned int mss)
{
+ u32 flags = skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP;
+ u32 ts_seq = skb_shinfo(gso_skb)->tskey;
+
while (skb) {
if (before(ts_seq, seq + mss)) {
- skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
+ skb_shinfo(skb)->tx_flags |= flags;
skb_shinfo(skb)->tskey = ts_seq;
return;
}
@@ -28,6 +31,70 @@ static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
}
}
+static void __tcpv4_gso_segment_csum(struct sk_buff *seg,
+ __be32 *oldip, __be32 newip,
+ __be16 *oldport, __be16 newport)
+{
+ struct tcphdr *th;
+ struct iphdr *iph;
+
+ if (*oldip == newip && *oldport == newport)
+ return;
+
+ th = tcp_hdr(seg);
+ iph = ip_hdr(seg);
+
+ inet_proto_csum_replace4(&th->check, seg, *oldip, newip, true);
+ inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false);
+ *oldport = newport;
+
+ csum_replace4(&iph->check, *oldip, newip);
+ *oldip = newip;
+}
+
+static struct sk_buff *__tcpv4_gso_segment_list_csum(struct sk_buff *segs)
+{
+ const struct tcphdr *th;
+ const struct iphdr *iph;
+ struct sk_buff *seg;
+ struct tcphdr *th2;
+ struct iphdr *iph2;
+
+ seg = segs;
+ th = tcp_hdr(seg);
+ iph = ip_hdr(seg);
+ th2 = tcp_hdr(seg->next);
+ iph2 = ip_hdr(seg->next);
+
+ if (!(*(const u32 *)&th->source ^ *(const u32 *)&th2->source) &&
+ iph->daddr == iph2->daddr && iph->saddr == iph2->saddr)
+ return segs;
+
+ while ((seg = seg->next)) {
+ th2 = tcp_hdr(seg);
+ iph2 = ip_hdr(seg);
+
+ __tcpv4_gso_segment_csum(seg,
+ &iph2->saddr, iph->saddr,
+ &th2->source, th->source);
+ __tcpv4_gso_segment_csum(seg,
+ &iph2->daddr, iph->daddr,
+ &th2->dest, th->dest);
+ }
+
+ return segs;
+}
+
+static struct sk_buff *__tcp4_gso_segment_list(struct sk_buff *skb,
+ netdev_features_t features)
+{
+ skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
+ if (IS_ERR(skb))
+ return skb;
+
+ return __tcpv4_gso_segment_list_csum(skb);
+}
+
static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
@@ -37,6 +104,15 @@ static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
return ERR_PTR(-EINVAL);
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) {
+ struct tcphdr *th = tcp_hdr(skb);
+
+ if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size)
+ return __tcp4_gso_segment_list(skb, features);
+
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+
if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
const struct iphdr *iph = ip_hdr(skb);
struct tcphdr *th = tcp_hdr(skb);
@@ -73,6 +149,9 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
if (thlen < sizeof(*th))
goto out;
+ if (unlikely(skb_checksum_start(skb) != skb_transport_header(skb)))
+ goto out;
+
if (!pskb_may_pull(skb, thlen))
goto out;
@@ -117,8 +196,8 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
th = tcp_hdr(skb);
seq = ntohl(th->seq);
- if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
- tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
+ if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP))
+ tcp_gso_tstamp(segs, gso_skb, seq, mss);
newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta));
@@ -178,63 +257,76 @@ out:
return segs;
}
-struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
+struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th)
{
- struct sk_buff *pp = NULL;
+ struct tcphdr *th2;
struct sk_buff *p;
+
+ list_for_each_entry(p, head, list) {
+ if (!NAPI_GRO_CB(p)->same_flow)
+ continue;
+
+ th2 = tcp_hdr(p);
+ if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
+ NAPI_GRO_CB(p)->same_flow = 0;
+ continue;
+ }
+
+ return p;
+ }
+
+ return NULL;
+}
+
+struct tcphdr *tcp_gro_pull_header(struct sk_buff *skb)
+{
+ unsigned int thlen, hlen, off;
struct tcphdr *th;
- struct tcphdr *th2;
- unsigned int len;
- unsigned int thlen;
- __be32 flags;
- unsigned int mss = 1;
- unsigned int hlen;
- unsigned int off;
- int flush = 1;
- int i;
off = skb_gro_offset(skb);
hlen = off + sizeof(*th);
th = skb_gro_header(skb, hlen, off);
if (unlikely(!th))
- goto out;
+ return NULL;
thlen = th->doff * 4;
if (thlen < sizeof(*th))
- goto out;
+ return NULL;
hlen = off + thlen;
- if (skb_gro_header_hard(skb, hlen)) {
+ if (!skb_gro_may_pull(skb, hlen)) {
th = skb_gro_header_slow(skb, hlen, off);
if (unlikely(!th))
- goto out;
+ return NULL;
}
skb_gro_pull(skb, thlen);
- len = skb_gro_len(skb);
- flags = tcp_flag_word(th);
-
- list_for_each_entry(p, head, list) {
- if (!NAPI_GRO_CB(p)->same_flow)
- continue;
+ return th;
+}
- th2 = tcp_hdr(p);
+struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
+ struct tcphdr *th)
+{
+ unsigned int thlen = th->doff * 4;
+ struct sk_buff *pp = NULL;
+ struct sk_buff *p;
+ struct tcphdr *th2;
+ unsigned int len;
+ __be32 flags;
+ unsigned int mss = 1;
+ int flush = 1;
+ int i;
- if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
- NAPI_GRO_CB(p)->same_flow = 0;
- continue;
- }
+ len = skb_gro_len(skb);
+ flags = tcp_flag_word(th);
- goto found;
- }
- p = NULL;
- goto out_check_final;
+ p = tcp_gro_lookup(head, th);
+ if (!p)
+ goto out_check_final;
-found:
- /* Include the IP ID check below from the inner most IP hdr */
- flush = NAPI_GRO_CB(p)->flush;
- flush |= (__force int)(flags & TCP_FLAG_CWR);
+ th2 = tcp_hdr(p);
+ flush = (__force int)(flags & TCP_FLAG_CWR);
flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
@@ -242,16 +334,7 @@ found:
flush |= *(u32 *)((u8 *)th + i) ^
*(u32 *)((u8 *)th2 + i);
- /* When we receive our second frame we can made a decision on if we
- * continue this flow as an atomic flow with a fixed ID or if we use
- * an incrementing ID.
- */
- if (NAPI_GRO_CB(p)->flush_id != 1 ||
- NAPI_GRO_CB(p)->count != 1 ||
- !NAPI_GRO_CB(p)->is_atomic)
- flush |= NAPI_GRO_CB(p)->flush_id;
- else
- NAPI_GRO_CB(p)->is_atomic = false;
+ flush |= gro_receive_network_flush(th, th2, p);
mss = skb_shinfo(p)->gso_size;
@@ -265,9 +348,19 @@ found:
flush |= (len - 1) >= mss;
flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
-#ifdef CONFIG_TLS_DEVICE
- flush |= p->decrypted ^ skb->decrypted;
-#endif
+ flush |= skb_cmp_decrypted(p, skb);
+
+ if (unlikely(NAPI_GRO_CB(p)->is_flist)) {
+ flush |= (__force int)(flags ^ tcp_flag_word(th2));
+ flush |= skb->ip_summed != p->ip_summed;
+ flush |= skb->csum_level != p->csum_level;
+ flush |= NAPI_GRO_CB(p)->count >= 64;
+
+ if (flush || skb_gro_receive_list(p, skb))
+ mss = 1;
+
+ goto out_check_final;
+ }
if (flush || skb_gro_receive(p, skb)) {
mss = 1;
@@ -290,7 +383,6 @@ out_check_final:
if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
pp = p;
-out:
NAPI_GRO_CB(skb)->flush |= (flush != 0);
return pp;
@@ -299,60 +391,110 @@ out:
void tcp_gro_complete(struct sk_buff *skb)
{
struct tcphdr *th = tcp_hdr(skb);
+ struct skb_shared_info *shinfo;
+
+ if (skb->encapsulation)
+ skb->inner_transport_header = skb->transport_header;
skb->csum_start = (unsigned char *)th - skb->head;
skb->csum_offset = offsetof(struct tcphdr, check);
skb->ip_summed = CHECKSUM_PARTIAL;
- skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+ shinfo = skb_shinfo(skb);
+ shinfo->gso_segs = NAPI_GRO_CB(skb)->count;
if (th->cwr)
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
-
- if (skb->encapsulation)
- skb->inner_transport_header = skb->transport_header;
+ shinfo->gso_type |= SKB_GSO_TCP_ECN;
}
EXPORT_SYMBOL(tcp_gro_complete);
+static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb,
+ struct tcphdr *th)
+{
+ const struct iphdr *iph;
+ struct sk_buff *p;
+ struct sock *sk;
+ struct net *net;
+ int iif, sdif;
+
+ if (likely(!(skb->dev->features & NETIF_F_GRO_FRAGLIST)))
+ return;
+
+ p = tcp_gro_lookup(head, th);
+ if (p) {
+ NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
+ return;
+ }
+
+ inet_get_iif_sdif(skb, &iif, &sdif);
+ iph = skb_gro_network_header(skb);
+ net = dev_net(skb->dev);
+ sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
+ iph->saddr, th->source,
+ iph->daddr, ntohs(th->dest),
+ iif, sdif);
+ NAPI_GRO_CB(skb)->is_flist = !sk;
+ if (sk)
+ sock_put(sk);
+}
+
INDIRECT_CALLABLE_SCOPE
struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
{
+ struct tcphdr *th;
+
/* Don't bother verifying checksum if we're going to flush anyway. */
if (!NAPI_GRO_CB(skb)->flush &&
skb_gro_checksum_validate(skb, IPPROTO_TCP,
- inet_gro_compute_pseudo)) {
- NAPI_GRO_CB(skb)->flush = 1;
- return NULL;
- }
+ inet_gro_compute_pseudo))
+ goto flush;
+
+ th = tcp_gro_pull_header(skb);
+ if (!th)
+ goto flush;
- return tcp_gro_receive(head, skb);
+ tcp4_check_fraglist_gro(head, skb, th);
+
+ return tcp_gro_receive(head, skb, th);
+
+flush:
+ NAPI_GRO_CB(skb)->flush = 1;
+ return NULL;
}
INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
{
- const struct iphdr *iph = ip_hdr(skb);
+ const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
+ const struct iphdr *iph = (struct iphdr *)(skb->data + offset);
struct tcphdr *th = tcp_hdr(skb);
+ if (unlikely(NAPI_GRO_CB(skb)->is_flist)) {
+ skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV4;
+ skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+
+ __skb_incr_checksum_unnecessary(skb);
+
+ return 0;
+ }
+
th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
iph->daddr, 0);
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
- if (NAPI_GRO_CB(skb)->is_atomic)
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4 |
+ (NAPI_GRO_CB(skb)->ip_fixedid * SKB_GSO_TCP_FIXEDID);
tcp_gro_complete(skb);
return 0;
}
-static const struct net_offload tcpv4_offload = {
- .callbacks = {
- .gso_segment = tcp4_gso_segment,
- .gro_receive = tcp4_gro_receive,
- .gro_complete = tcp4_gro_complete,
- },
-};
-
int __init tcpv4_offload_init(void)
{
- return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
+ net_hotdata.tcpv4_offload = (struct net_offload) {
+ .callbacks = {
+ .gso_segment = tcp4_gso_segment,
+ .gro_receive = tcp4_gro_receive,
+ .gro_complete = tcp4_gro_complete,
+ },
+ };
+ return inet_add_offload(&net_hotdata.tcpv4_offload, IPPROTO_TCP);
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e3167ad96567..bc95d2a5924f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -39,11 +39,13 @@
#include <net/tcp.h>
#include <net/mptcp.h>
+#include <net/proto_memory.h>
#include <linux/compiler.h>
#include <linux/gfp.h>
#include <linux/module.h>
#include <linux/static_key.h>
+#include <linux/skbuff_ref.h>
#include <trace/events/tcp.h>
@@ -203,16 +205,17 @@ static inline void tcp_event_ack_sent(struct sock *sk, u32 rcv_nxt)
* This MUST be enforced by all callers.
*/
void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
- __u32 *rcv_wnd, __u32 *window_clamp,
+ __u32 *rcv_wnd, __u32 *__window_clamp,
int wscale_ok, __u8 *rcv_wscale,
__u32 init_rcv_wnd)
{
unsigned int space = (__space < 0 ? 0 : __space);
+ u32 window_clamp = READ_ONCE(*__window_clamp);
/* If no clamp set the clamp to the max possible scaled window */
- if (*window_clamp == 0)
- (*window_clamp) = (U16_MAX << TCP_MAX_WSCALE);
- space = min(*window_clamp, space);
+ if (window_clamp == 0)
+ window_clamp = (U16_MAX << TCP_MAX_WSCALE);
+ space = min(window_clamp, space);
/* Quantize space offering to a multiple of mss if possible. */
if (space > mss)
@@ -229,7 +232,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows))
(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
else
- (*rcv_wnd) = min_t(u32, space, U16_MAX);
+ (*rcv_wnd) = space;
if (init_rcv_wnd)
*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
@@ -239,12 +242,13 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
/* Set window scaling on max possible window */
space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
space = max_t(u32, space, READ_ONCE(sysctl_rmem_max));
- space = min_t(u32, space, *window_clamp);
+ space = min_t(u32, space, window_clamp);
*rcv_wscale = clamp_t(int, ilog2(space) - 15,
0, TCP_MAX_WSCALE);
}
/* Set the clamp no higher than max representable value */
- (*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp);
+ WRITE_ONCE(*__window_clamp,
+ min_t(__u32, U16_MAX << (*rcv_wscale), window_clamp));
}
EXPORT_SYMBOL(tcp_select_initial_window);
@@ -261,11 +265,14 @@ static u16 tcp_select_window(struct sock *sk)
u32 cur_win, new_win;
/* Make the window 0 if we failed to queue the data because we
- * are out of memory. The window is temporary, so we don't store
- * it on the socket.
+ * are out of memory.
*/
- if (unlikely(inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOMEM))
+ if (unlikely(inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOMEM)) {
+ tp->pred_flags = 0;
+ tp->rcv_wnd = 0;
+ tp->rcv_wup = tp->rcv_nxt;
return 0;
+ }
cur_win = tcp_receive_window(tp);
new_win = __tcp_select_window(sk);
@@ -879,8 +886,10 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
unsigned int size;
if (mptcp_syn_options(sk, skb, &size, &opts->mptcp)) {
- opts->options |= OPTION_MPTCP;
- remaining -= size;
+ if (remaining >= size) {
+ opts->options |= OPTION_MPTCP;
+ remaining -= size;
+ }
}
}
@@ -1297,7 +1306,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
tp = tcp_sk(sk);
prior_wstamp = tp->tcp_wstamp_ns;
tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache);
- skb_set_delivery_time(skb, tp->tcp_wstamp_ns, true);
+ skb_set_delivery_time(skb, tp->tcp_wstamp_ns, SKB_CLOCK_MONOTONIC);
if (clone_it) {
oskb = skb;
@@ -1499,18 +1508,22 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
}
/* Initialize TSO segments for a packet. */
-static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now)
+static int tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now)
{
+ int tso_segs;
+
if (skb->len <= mss_now) {
/* Avoid the costly divide in the normal
* non-TSO case.
*/
- tcp_skb_pcount_set(skb, 1);
TCP_SKB_CB(skb)->tcp_gso_size = 0;
- } else {
- tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now));
- TCP_SKB_CB(skb)->tcp_gso_size = mss_now;
+ tcp_skb_pcount_set(skb, 1);
+ return 1;
}
+ TCP_SKB_CB(skb)->tcp_gso_size = mss_now;
+ tso_segs = DIV_ROUND_UP(skb->len, mss_now);
+ tcp_skb_pcount_set(skb, tso_segs);
+ return tso_segs;
}
/* Pcount in the middle of the write queue got changed, we need to do various
@@ -1647,7 +1660,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
skb_split(skb, buff, len);
- skb_set_delivery_time(buff, skb->tstamp, true);
+ skb_set_delivery_time(buff, skb->tstamp, SKB_CLOCK_MONOTONIC);
tcp_fragment_tstamp(skb, buff);
old_factor = tcp_skb_pcount(skb);
@@ -2070,16 +2083,10 @@ static unsigned int tcp_mss_split_point(const struct sock *sk,
/* Can at least one segment of SKB be sent right now, according to the
* congestion window rules? If so, return how many segments are allowed.
*/
-static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
- const struct sk_buff *skb)
+static u32 tcp_cwnd_test(const struct tcp_sock *tp)
{
u32 in_flight, cwnd, halfcwnd;
- /* Don't be strict about the congestion window for the final FIN. */
- if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
- tcp_skb_pcount(skb) == 1)
- return 1;
-
in_flight = tcp_packets_in_flight(tp);
cwnd = tcp_snd_cwnd(tp);
if (in_flight >= cwnd)
@@ -2100,10 +2107,9 @@ static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now)
{
int tso_segs = tcp_skb_pcount(skb);
- if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
- tcp_set_skb_tso_segs(skb, mss_now);
- tso_segs = tcp_skb_pcount(skb);
- }
+ if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now))
+ return tcp_set_skb_tso_segs(skb, mss_now);
+
return tso_segs;
}
@@ -2341,9 +2347,7 @@ static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
if (len <= skb->len)
break;
- if (unlikely(TCP_SKB_CB(skb)->eor) ||
- tcp_has_tx_tstamp(skb) ||
- !skb_pure_zcopy_same(skb, next))
+ if (tcp_has_tx_tstamp(skb) || !tcp_skb_can_collapse(skb, next))
return false;
len -= skb->len;
@@ -2403,6 +2407,21 @@ commit:
return 0;
}
+/* tcp_mtu_probe() and tcp_grow_skb() can both eat an skb (src) if
+ * all its payload was moved to another one (dst).
+ * Make sure to transfer tcp_flags, eor, and tstamp.
+ */
+static void tcp_eat_one_skb(struct sock *sk,
+ struct sk_buff *dst,
+ struct sk_buff *src)
+{
+ TCP_SKB_CB(dst)->tcp_flags |= TCP_SKB_CB(src)->tcp_flags;
+ TCP_SKB_CB(dst)->eor = TCP_SKB_CB(src)->eor;
+ tcp_skb_collapse_tstamp(dst, src);
+ tcp_unlink_write_queue(src, sk);
+ tcp_wmem_free_skb(sk, src);
+}
+
/* Create a new MTU probe if we are ready.
* MTU probe is regularly attempting to increase the path MTU by
* deliberately sending larger packets. This discovers routing
@@ -2508,16 +2527,7 @@ static int tcp_mtu_probe(struct sock *sk)
copy = min_t(int, skb->len, probe_size - len);
if (skb->len <= copy) {
- /* We've eaten all the data from this skb.
- * Throw it away. */
- TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
- /* If this is the last SKB we copy and eor is set
- * we need to propagate it to the new skb.
- */
- TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
- tcp_skb_collapse_tstamp(nskb, skb);
- tcp_unlink_write_queue(skb, sk);
- tcp_wmem_free_skb(sk, skb);
+ tcp_eat_one_skb(sk, nskb, skb);
} else {
TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
~(TCPHDR_FIN|TCPHDR_PSH);
@@ -2683,6 +2693,35 @@ void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type)
tcp_chrono_set(tp, TCP_CHRONO_BUSY);
}
+/* First skb in the write queue is smaller than ideal packet size.
+ * Check if we can move payload from the second skb in the queue.
+ */
+static void tcp_grow_skb(struct sock *sk, struct sk_buff *skb, int amount)
+{
+ struct sk_buff *next_skb = skb->next;
+ unsigned int nlen;
+
+ if (tcp_skb_is_last(sk, skb))
+ return;
+
+ if (!tcp_skb_can_collapse(skb, next_skb))
+ return;
+
+ nlen = min_t(u32, amount, next_skb->len);
+ if (!nlen || !skb_shift(skb, next_skb, nlen))
+ return;
+
+ TCP_SKB_CB(skb)->end_seq += nlen;
+ TCP_SKB_CB(next_skb)->seq += nlen;
+
+ if (!next_skb->len) {
+ /* In case FIN is set, we need to update end_seq */
+ TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
+
+ tcp_eat_one_skb(sk, skb, next_skb);
+ }
+}
+
/* This routine writes packets to the network. It advances the
* send_head. This happens as incoming acks open up the remote
* window for us.
@@ -2703,10 +2742,9 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
unsigned int tso_segs, sent_pkts;
- int cwnd_quota;
+ u32 cwnd_quota, max_segs;
int result;
bool is_cwnd_limited = false, is_rwnd_limited = false;
- u32 max_segs;
sent_pkts = 0;
@@ -2724,11 +2762,12 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
max_segs = tcp_tso_segs(sk, mss_now);
while ((skb = tcp_send_head(sk))) {
unsigned int limit;
+ int missing_bytes;
if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
/* "skb_mstamp_ns" is used as a start point for the retransmit timer */
tp->tcp_wstamp_ns = tp->tcp_clock_cache;
- skb_set_delivery_time(skb, tp->tcp_wstamp_ns, true);
+ skb_set_delivery_time(skb, tp->tcp_wstamp_ns, SKB_CLOCK_MONOTONIC);
list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
tcp_init_tso_segs(skb, mss_now);
goto repair; /* Skip network transmission */
@@ -2737,10 +2776,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
if (tcp_pacing_check(sk))
break;
- tso_segs = tcp_init_tso_segs(skb, mss_now);
- BUG_ON(!tso_segs);
-
- cwnd_quota = tcp_cwnd_test(tp, skb);
+ cwnd_quota = tcp_cwnd_test(tp);
if (!cwnd_quota) {
if (push_one == 2)
/* Force out a loss probe pkt. */
@@ -2748,6 +2784,12 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
else
break;
}
+ cwnd_quota = min(cwnd_quota, max_segs);
+ missing_bytes = cwnd_quota * mss_now - skb->len;
+ if (missing_bytes > 0)
+ tcp_grow_skb(sk, skb, missing_bytes);
+
+ tso_segs = tcp_set_skb_tso_segs(skb, mss_now);
if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) {
is_rwnd_limited = true;
@@ -2769,9 +2811,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
limit = mss_now;
if (tso_segs > 1 && !tcp_urg_mode(tp))
limit = tcp_mss_split_point(sk, skb, mss_now,
- min_t(unsigned int,
- cwnd_quota,
- max_segs),
+ cwnd_quota,
nonagle);
if (skb->len > limit &&
@@ -2919,10 +2959,8 @@ void tcp_send_loss_probe(struct sock *sk)
}
skb = skb_rb_last(&sk->tcp_rtx_queue);
if (unlikely(!skb)) {
- WARN_ONCE(tp->packets_out,
- "invalid inflight: %u state %u cwnd %u mss %d\n",
- tp->packets_out, sk->sk_state, tcp_snd_cwnd(tp), mss);
- inet_csk(sk)->icsk_pending = 0;
+ tcp_warn_once(sk, tp->packets_out, "invalid inflight: ");
+ smp_store_release(&inet_csk(sk)->icsk_pending, 0);
return;
}
@@ -2955,7 +2993,7 @@ probe_sent:
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
/* Reset s.t. tcp_rearm_rto will restart timer from now */
- inet_csk(sk)->icsk_pending = 0;
+ smp_store_release(&inet_csk(sk)->icsk_pending, 0);
rearm_timer:
tcp_rearm_rto(sk);
}
@@ -3227,6 +3265,8 @@ static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
return false;
if (skb_cloned(skb))
return false;
+ if (!skb_frags_readable(skb))
+ return false;
/* Some heuristics for collapsing over SACK'd could be invented */
if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
return false;
@@ -3387,11 +3427,6 @@ start:
err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
}
- /* To avoid taking spuriously low RTT samples based on a timestamp
- * for a transmit that never happened, always mark EVER_RETRANS
- */
- TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
-
if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RETRANS_CB_FLAG))
tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RETRANS_CB,
TCP_SKB_CB(skb)->seq, segs, err);
@@ -3401,6 +3436,12 @@ start:
} else if (err != -EBUSY) {
NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs);
}
+
+ /* To avoid taking spuriously low RTT samples based on a timestamp
+ * for a transmit that never happened, always mark EVER_RETRANS
+ */
+ TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
+
return err;
}
@@ -3563,7 +3604,9 @@ void tcp_send_fin(struct sock *sk)
return;
}
} else {
- skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
+ skb = alloc_skb_fclone(MAX_TCP_HEADER,
+ sk_gfp_mask(sk, GFP_ATOMIC |
+ __GFP_NOWARN));
if (unlikely(!skb))
return;
@@ -3583,7 +3626,8 @@ void tcp_send_fin(struct sock *sk)
* was unread data in the receive queue. This behavior is recommended
* by RFC 2525, section 2.17. -DaveM
*/
-void tcp_send_active_reset(struct sock *sk, gfp_t priority)
+void tcp_send_active_reset(struct sock *sk, gfp_t priority,
+ enum sk_rst_reason reason)
{
struct sk_buff *skb;
@@ -3608,7 +3652,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
/* skb of trace_tcp_send_reset() keeps the skb that caused RST,
* skb here is different to the troublesome skb, so use NULL
*/
- trace_tcp_send_reset(sk, NULL);
+ trace_tcp_send_reset(sk, NULL, reason);
}
/* Send a crossed SYN-ACK during socket establishment.
@@ -3687,7 +3731,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
switch (synack_type) {
case TCP_SYNACK_NORMAL:
- skb_set_owner_w(skb, req_to_sk(req));
+ skb_set_owner_edemux(skb, req_to_sk(req));
break;
case TCP_SYNACK_COOKIE:
/* Under synflood, we do not attach skb to a socket,
@@ -3711,11 +3755,11 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
#ifdef CONFIG_SYN_COOKIES
if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok))
skb_set_delivery_time(skb, cookie_init_timestamp(req, now),
- true);
+ SKB_CLOCK_MONOTONIC);
else
#endif
{
- skb_set_delivery_time(skb, now, true);
+ skb_set_delivery_time(skb, now, SKB_CLOCK_MONOTONIC);
if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */
tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb);
}
@@ -3727,6 +3771,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
#ifdef CONFIG_TCP_AO
struct tcp_ao_key *ao_key = NULL;
u8 keyid = tcp_rsk(req)->ao_keyid;
+ u8 rnext = tcp_rsk(req)->ao_rcv_next;
ao_key = tcp_sk(sk)->af_specific->ao_lookup(sk, req_to_sk(req),
keyid, -1);
@@ -3736,6 +3781,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
* ao_keyid (RFC5925 RNextKeyID), so let's keep it simple here.
*/
if (unlikely(!ao_key)) {
+ trace_tcp_ao_synack_no_key(sk, keyid, rnext);
rcu_read_unlock();
kfree_skb(skb);
net_warn_ratelimited("TCP-AO: the keyid %u from SYN packet is not present - not sending SYNACK\n",
@@ -3802,7 +3848,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
bpf_skops_write_hdr_opt((struct sock *)sk, skb, req, syn_skb,
synack_type, &opts);
- skb_set_delivery_time(skb, now, true);
+ skb_set_delivery_time(skb, now, SKB_CLOCK_MONOTONIC);
tcp_add_tx_delay(skb, tp);
return skb;
@@ -3855,7 +3901,7 @@ static void tcp_connect_init(struct sock *sk)
tcp_ca_dst_init(sk, dst);
if (!tp->window_clamp)
- tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
+ WRITE_ONCE(tp->window_clamp, dst_metric(dst, RTAX_WINDOW));
tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
tcp_initialize_rcv_mss(sk);
@@ -3863,7 +3909,7 @@ static void tcp_connect_init(struct sock *sk)
/* limit the window selection if the user enforce a smaller rx buffer */
if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
(tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
- tp->window_clamp = tcp_full_space(sk);
+ WRITE_ONCE(tp->window_clamp, tcp_full_space(sk));
rcv_wnd = tcp_rwnd_init_bpf(sk);
if (rcv_wnd == 0)
@@ -3986,7 +4032,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);
- skb_set_delivery_time(syn, syn_data->skb_mstamp_ns, true);
+ skb_set_delivery_time(syn, syn_data->skb_mstamp_ns, SKB_CLOCK_MONOTONIC);
/* Now full SYN+DATA was cloned and sent (or not),
* remove the SYN from the original skb (syn_data)
@@ -4088,7 +4134,10 @@ int tcp_connect(struct sock *sk)
if (unlikely(!buff))
return -ENOBUFS;
- tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
+ /* SYN eats a sequence byte, write_seq updated by
+ * tcp_connect_queue_skb().
+ */
+ tcp_init_nondata_skb(buff, tp->write_seq, TCPHDR_SYN);
tcp_mstamp_refresh(tp);
tp->retrans_stamp = tcp_time_stamp_ts(tp);
tcp_connect_queue_skb(sk, buff);
@@ -4122,16 +4171,9 @@ EXPORT_SYMBOL(tcp_connect);
u32 tcp_delack_max(const struct sock *sk)
{
- const struct dst_entry *dst = __sk_dst_get(sk);
- u32 delack_max = inet_csk(sk)->icsk_delack_max;
-
- if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) {
- u32 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
- u32 delack_from_rto_min = max_t(int, 1, rto_min - 1);
+ u32 delack_from_rto_min = max(tcp_rto_min(sk), 2) - 1;
- delack_max = min_t(u32, delack_max, delack_from_rto_min);
- }
- return delack_max;
+ return min(inet_csk(sk)->icsk_delack_max, delack_from_rto_min);
}
/* Send out a delayed ack, the caller does the policy checking
@@ -4185,7 +4227,8 @@ void tcp_send_delayed_ack(struct sock *sk)
if (!time_before(timeout, icsk->icsk_ack.timeout))
timeout = icsk->icsk_ack.timeout;
}
- icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
+ smp_store_release(&icsk->icsk_ack.pending,
+ icsk->icsk_ack.pending | ICSK_ACK_SCHED | ICSK_ACK_TIMER);
icsk->icsk_ack.timeout = timeout;
sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
}
diff --git a/net/ipv4/tcp_sigpool.c b/net/ipv4/tcp_sigpool.c
index 8512cb09ebc0..d8a4f192873a 100644
--- a/net/ipv4/tcp_sigpool.c
+++ b/net/ipv4/tcp_sigpool.c
@@ -10,7 +10,14 @@
#include <net/tcp.h>
static size_t __scratch_size;
-static DEFINE_PER_CPU(void __rcu *, sigpool_scratch);
+struct sigpool_scratch {
+ local_lock_t bh_lock;
+ void __rcu *pad;
+};
+
+static DEFINE_PER_CPU(struct sigpool_scratch, sigpool_scratch) = {
+ .bh_lock = INIT_LOCAL_LOCK(bh_lock),
+};
struct sigpool_entry {
struct crypto_ahash *hash;
@@ -72,7 +79,7 @@ static int sigpool_reserve_scratch(size_t size)
break;
}
- old_scratch = rcu_replace_pointer(per_cpu(sigpool_scratch, cpu),
+ old_scratch = rcu_replace_pointer(per_cpu(sigpool_scratch.pad, cpu),
scratch, lockdep_is_held(&cpool_mutex));
if (!cpu_online(cpu) || !old_scratch) {
kfree(old_scratch);
@@ -93,7 +100,7 @@ static void sigpool_scratch_free(void)
int cpu;
for_each_possible_cpu(cpu)
- kfree(rcu_replace_pointer(per_cpu(sigpool_scratch, cpu),
+ kfree(rcu_replace_pointer(per_cpu(sigpool_scratch.pad, cpu),
NULL, lockdep_is_held(&cpool_mutex)));
__scratch_size = 0;
}
@@ -277,7 +284,8 @@ int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c) __cond_acquires(RC
/* Pairs with tcp_sigpool_reserve_scratch(), scratch area is
* valid (allocated) until tcp_sigpool_end().
*/
- c->scratch = rcu_dereference_bh(*this_cpu_ptr(&sigpool_scratch));
+ local_lock_nested_bh(&sigpool_scratch.bh_lock);
+ c->scratch = rcu_dereference_bh(*this_cpu_ptr(&sigpool_scratch.pad));
return 0;
}
EXPORT_SYMBOL_GPL(tcp_sigpool_start);
@@ -286,6 +294,7 @@ void tcp_sigpool_end(struct tcp_sigpool *c) __releases(RCU_BH)
{
struct crypto_ahash *hash = crypto_ahash_reqtfm(c->req);
+ local_unlock_nested_bh(&sigpool_scratch.bh_lock);
rcu_read_unlock_bh();
ahash_request_free(c->req);
crypto_free_ahash(hash);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index d1ad20ce1c8c..b412ed88ccd9 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -22,10 +22,11 @@
#include <linux/module.h>
#include <linux/gfp.h>
#include <net/tcp.h>
+#include <net/rstreason.h>
static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
{
- struct inet_connection_sock *icsk = inet_csk(sk);
+ const struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_sock *tp = tcp_sk(sk);
u32 elapsed, user_timeout;
s32 remaining;
@@ -47,7 +48,7 @@ static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when)
{
- struct inet_connection_sock *icsk = inet_csk(sk);
+ const struct inet_connection_sock *icsk = inet_csk(sk);
u32 remaining, user_timeout;
s32 elapsed;
@@ -73,11 +74,7 @@ u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when)
static void tcp_write_err(struct sock *sk)
{
- WRITE_ONCE(sk->sk_err, READ_ONCE(sk->sk_err_soft) ? : ETIMEDOUT);
- sk_error_report(sk);
-
- tcp_write_queue_purge(sk);
- tcp_done(sk);
+ tcp_done_with_error(sk, READ_ONCE(sk->sk_err_soft) ? : ETIMEDOUT);
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
}
@@ -127,7 +124,8 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
(!tp->snd_wnd && !tp->packets_out))
do_reset = true;
if (do_reset)
- tcp_send_active_reset(sk, GFP_ATOMIC);
+ tcp_send_active_reset(sk, GFP_ATOMIC,
+ SK_RST_REASON_TCP_ABORT_ON_MEMORY);
tcp_done(sk);
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
return 1;
@@ -284,6 +282,7 @@ static int tcp_write_timeout(struct sock *sk)
expired = retransmits_timed_out(sk, retry_until,
READ_ONCE(icsk->icsk_user_timeout));
tcp_fastopen_active_detect_blackhole(sk, expired);
+ mptcp_active_detect_blackhole(sk, expired);
if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG))
tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RTO_CB,
@@ -362,6 +361,14 @@ static void tcp_delack_timer(struct timer_list *t)
from_timer(icsk, t, icsk_delack_timer);
struct sock *sk = &icsk->icsk_inet.sk;
+ /* Avoid taking socket spinlock if there is no ACK to send.
+ * The compressed_ack check is racy, but a separate hrtimer
+ * will take care of it eventually.
+ */
+ if (!(smp_load_acquire(&icsk->icsk_ack.pending) & ICSK_ACK_TIMER) &&
+ !READ_ONCE(tcp_sk(sk)->compressed_ack))
+ goto out;
+
bh_lock_sock(sk);
if (!sock_owned_by_user(sk)) {
tcp_delack_timer_handler(sk);
@@ -372,6 +379,7 @@ static void tcp_delack_timer(struct timer_list *t)
sock_hold(sk);
}
bh_unlock_sock(sk);
+out:
sock_put(sk);
}
@@ -481,11 +489,26 @@ static bool tcp_rtx_probe0_timed_out(const struct sock *sk,
const struct sk_buff *skb,
u32 rtx_delta)
{
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+ u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
const struct tcp_sock *tp = tcp_sk(sk);
- const int timeout = TCP_RTO_MAX * 2;
- u32 rcv_delta;
+ int timeout = TCP_RTO_MAX * 2;
+ s32 rcv_delta;
- rcv_delta = inet_csk(sk)->icsk_timeout - tp->rcv_tstamp;
+ if (user_timeout) {
+ /* If user application specified a TCP_USER_TIMEOUT,
+ * it does not want win 0 packets to 'reset the timer'
+ * while retransmits are not making progress.
+ */
+ if (rtx_delta > user_timeout)
+ return true;
+ timeout = min_t(u32, timeout, msecs_to_jiffies(user_timeout));
+ }
+ /* Note: timer interrupt might have been delayed by at least one jiffy,
+ * and tp->rcv_tstamp might very well have been written recently.
+ * rcv_delta can thus be negative.
+ */
+ rcv_delta = icsk->icsk_timeout - tp->rcv_tstamp;
if (rcv_delta <= timeout)
return false;
@@ -530,8 +553,6 @@ void tcp_retransmit_timer(struct sock *sk)
if (WARN_ON_ONCE(!skb))
return;
- tp->tlp_high_seq = 0;
-
if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
!((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
/* Receiver dastardly shrinks window. Our retransmits
@@ -689,11 +710,11 @@ void tcp_write_timer_handler(struct sock *sk)
tcp_send_loss_probe(sk);
break;
case ICSK_TIME_RETRANS:
- icsk->icsk_pending = 0;
+ smp_store_release(&icsk->icsk_pending, 0);
tcp_retransmit_timer(sk);
break;
case ICSK_TIME_PROBE0:
- icsk->icsk_pending = 0;
+ smp_store_release(&icsk->icsk_pending, 0);
tcp_probe_timer(sk);
break;
}
@@ -705,6 +726,10 @@ static void tcp_write_timer(struct timer_list *t)
from_timer(icsk, t, icsk_retransmit_timer);
struct sock *sk = &icsk->icsk_inet.sk;
+ /* Avoid locking the socket when there is no pending event. */
+ if (!smp_load_acquire(&icsk->icsk_pending))
+ goto out;
+
bh_lock_sock(sk);
if (!sock_owned_by_user(sk)) {
tcp_write_timer_handler(sk);
@@ -714,6 +739,7 @@ static void tcp_write_timer(struct timer_list *t)
sock_hold(sk);
}
bh_unlock_sock(sk);
+out:
sock_put(sk);
}
@@ -768,7 +794,7 @@ static void tcp_keepalive_timer (struct timer_list *t)
goto out;
}
}
- tcp_send_active_reset(sk, GFP_ATOMIC);
+ tcp_send_active_reset(sk, GFP_ATOMIC, SK_RST_REASON_TCP_STATE);
goto death;
}
@@ -795,7 +821,8 @@ static void tcp_keepalive_timer (struct timer_list *t)
icsk->icsk_probes_out > 0) ||
(user_timeout == 0 &&
icsk->icsk_probes_out >= keepalive_probes(tp))) {
- tcp_send_active_reset(sk, GFP_ATOMIC);
+ tcp_send_active_reset(sk, GFP_ATOMIC,
+ SK_RST_REASON_TCP_KEEPALIVE_TIMEOUT);
tcp_write_err(sk);
goto out;
}
@@ -838,6 +865,7 @@ static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer)
* LINUX_MIB_TCPACKCOMPRESSED accurate.
*/
tp->compressed_ack--;
+ tcp_mstamp_refresh(tp);
tcp_send_ack(sk);
}
} else {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index e474b201900f..a9bb9ce5438e 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -100,6 +100,7 @@
#include <net/net_namespace.h>
#include <net/icmp.h>
#include <net/inet_hashtables.h>
+#include <net/ip.h>
#include <net/ip_tunnels.h>
#include <net/route.h>
#include <net/checksum.h>
@@ -326,6 +327,8 @@ found:
goto fail_unlock;
}
+ sock_set_flag(sk, SOCK_RCU_FREE);
+
sk_add_node_rcu(sk, &hslot->head);
hslot->count++;
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
@@ -342,7 +345,7 @@ found:
hslot2->count++;
spin_unlock(&hslot2->lock);
}
- sock_set_flag(sk, SOCK_RCU_FREE);
+
error = 0;
fail_unlock:
spin_unlock_bh(&hslot->lock);
@@ -363,7 +366,7 @@ int udp_v4_get_port(struct sock *sk, unsigned short snum)
return udp_lib_get_port(sk, snum, hash2_nulladdr);
}
-static int compute_score(struct sock *sk, struct net *net,
+static int compute_score(struct sock *sk, const struct net *net,
__be32 saddr, __be16 sport,
__be32 daddr, unsigned short hnum,
int dif, int sdif)
@@ -407,20 +410,61 @@ static int compute_score(struct sock *sk, struct net *net,
return score;
}
-INDIRECT_CALLABLE_SCOPE
u32 udp_ehashfn(const struct net *net, const __be32 laddr, const __u16 lport,
const __be32 faddr, const __be16 fport)
{
- static u32 udp_ehash_secret __read_mostly;
-
net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret));
return __inet_ehashfn(laddr, lport, faddr, fport,
udp_ehash_secret + net_hash_mix(net));
}
+EXPORT_SYMBOL(udp_ehashfn);
+
+/**
+ * udp4_lib_lookup1() - Simplified lookup using primary hash (destination port)
+ * @net: Network namespace
+ * @saddr: Source address, network order
+ * @sport: Source port, network order
+ * @daddr: Destination address, network order
+ * @hnum: Destination port, host order
+ * @dif: Destination interface index
+ * @sdif: Destination bridge port index, if relevant
+ * @udptable: Set of UDP hash tables
+ *
+ * Simplified lookup to be used as fallback if no sockets are found due to a
+ * potential race between (receive) address change, and lookup happening before
+ * the rehash operation. This function ignores SO_REUSEPORT groups while scoring
+ * result sockets, because if we have one, we don't need the fallback at all.
+ *
+ * Called under rcu_read_lock().
+ *
+ * Return: socket with highest matching score if any, NULL if none
+ */
+static struct sock *udp4_lib_lookup1(const struct net *net,
+ __be32 saddr, __be16 sport,
+ __be32 daddr, unsigned int hnum,
+ int dif, int sdif,
+ const struct udp_table *udptable)
+{
+ unsigned int slot = udp_hashfn(net, hnum, udptable->mask);
+ struct udp_hslot *hslot = &udptable->hash[slot];
+ struct sock *sk, *result = NULL;
+ int score, badness = 0;
+
+ sk_for_each_rcu(sk, &hslot->head) {
+ score = compute_score(sk, net,
+ saddr, sport, daddr, hnum, dif, sdif);
+ if (score > badness) {
+ result = sk;
+ badness = score;
+ }
+ }
+
+ return result;
+}
/* called with rcu_read_lock() */
-static struct sock *udp4_lib_lookup2(struct net *net,
+static struct sock *udp4_lib_lookup2(const struct net *net,
__be32 saddr, __be16 sport,
__be32 daddr, unsigned int hnum,
int dif, int sdif,
@@ -429,15 +473,21 @@ static struct sock *udp4_lib_lookup2(struct net *net,
{
struct sock *sk, *result;
int score, badness;
+ bool need_rescore;
result = NULL;
badness = 0;
udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
- score = compute_score(sk, net, saddr, sport,
- daddr, hnum, dif, sdif);
+ need_rescore = false;
+rescore:
+ score = compute_score(need_rescore ? result : sk, net, saddr,
+ sport, daddr, hnum, dif, sdif);
if (score > badness) {
badness = score;
+ if (need_rescore)
+ continue;
+
if (sk->sk_state == TCP_ESTABLISHED) {
result = sk;
continue;
@@ -458,29 +508,191 @@ static struct sock *udp4_lib_lookup2(struct net *net,
if (IS_ERR(result))
continue;
- badness = compute_score(result, net, saddr, sport,
- daddr, hnum, dif, sdif);
-
+ /* compute_score is too long of a function to be
+ * inlined, and calling it again here yields
+ * measureable overhead for some
+ * workloads. Work around it by jumping
+ * backwards to rescore 'result'.
+ */
+ need_rescore = true;
+ goto rescore;
}
}
return result;
}
+#if IS_ENABLED(CONFIG_BASE_SMALL)
+static struct sock *udp4_lib_lookup4(const struct net *net,
+ __be32 saddr, __be16 sport,
+ __be32 daddr, unsigned int hnum,
+ int dif, int sdif,
+ struct udp_table *udptable)
+{
+ return NULL;
+}
+
+static void udp_rehash4(struct udp_table *udptable, struct sock *sk,
+ u16 newhash4)
+{
+}
+
+static void udp_unhash4(struct udp_table *udptable, struct sock *sk)
+{
+}
+#else /* !CONFIG_BASE_SMALL */
+static struct sock *udp4_lib_lookup4(const struct net *net,
+ __be32 saddr, __be16 sport,
+ __be32 daddr, unsigned int hnum,
+ int dif, int sdif,
+ struct udp_table *udptable)
+{
+ const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
+ const struct hlist_nulls_node *node;
+ struct udp_hslot *hslot4;
+ unsigned int hash4, slot;
+ struct udp_sock *up;
+ struct sock *sk;
+
+ hash4 = udp_ehashfn(net, daddr, hnum, saddr, sport);
+ slot = hash4 & udptable->mask;
+ hslot4 = &udptable->hash4[slot];
+ INET_ADDR_COOKIE(acookie, saddr, daddr);
+
+begin:
+ /* SLAB_TYPESAFE_BY_RCU not used, so we don't need to touch sk_refcnt */
+ udp_lrpa_for_each_entry_rcu(up, node, &hslot4->nulls_head) {
+ sk = (struct sock *)up;
+ if (inet_match(net, sk, acookie, ports, dif, sdif))
+ return sk;
+ }
+
+ /* if the nulls value we got at the end of this lookup is not the
+ * expected one, we must restart lookup. We probably met an item that
+ * was moved to another chain due to rehash.
+ */
+ if (get_nulls_value(node) != slot)
+ goto begin;
+
+ return NULL;
+}
+
+/* udp_rehash4() only checks hslot4, and hash4_cnt is not processed. */
+static void udp_rehash4(struct udp_table *udptable, struct sock *sk,
+ u16 newhash4)
+{
+ struct udp_hslot *hslot4, *nhslot4;
+
+ hslot4 = udp_hashslot4(udptable, udp_sk(sk)->udp_lrpa_hash);
+ nhslot4 = udp_hashslot4(udptable, newhash4);
+ udp_sk(sk)->udp_lrpa_hash = newhash4;
+
+ if (hslot4 != nhslot4) {
+ spin_lock_bh(&hslot4->lock);
+ hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_lrpa_node);
+ hslot4->count--;
+ spin_unlock_bh(&hslot4->lock);
+
+ spin_lock_bh(&nhslot4->lock);
+ hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_lrpa_node,
+ &nhslot4->nulls_head);
+ nhslot4->count++;
+ spin_unlock_bh(&nhslot4->lock);
+ }
+}
+
+static void udp_unhash4(struct udp_table *udptable, struct sock *sk)
+{
+ struct udp_hslot *hslot2, *hslot4;
+
+ if (udp_hashed4(sk)) {
+ hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
+ hslot4 = udp_hashslot4(udptable, udp_sk(sk)->udp_lrpa_hash);
+
+ spin_lock(&hslot4->lock);
+ hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_lrpa_node);
+ hslot4->count--;
+ spin_unlock(&hslot4->lock);
+
+ spin_lock(&hslot2->lock);
+ udp_hash4_dec(hslot2);
+ spin_unlock(&hslot2->lock);
+ }
+}
+
+void udp_lib_hash4(struct sock *sk, u16 hash)
+{
+ struct udp_hslot *hslot, *hslot2, *hslot4;
+ struct net *net = sock_net(sk);
+ struct udp_table *udptable;
+
+ /* Connected udp socket can re-connect to another remote address, which
+ * will be handled by rehash. Thus no need to redo hash4 here.
+ */
+ if (udp_hashed4(sk))
+ return;
+
+ udptable = net->ipv4.udp_table;
+ hslot = udp_hashslot(udptable, net, udp_sk(sk)->udp_port_hash);
+ hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
+ hslot4 = udp_hashslot4(udptable, hash);
+ udp_sk(sk)->udp_lrpa_hash = hash;
+
+ spin_lock_bh(&hslot->lock);
+ if (rcu_access_pointer(sk->sk_reuseport_cb))
+ reuseport_detach_sock(sk);
+
+ spin_lock(&hslot4->lock);
+ hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_lrpa_node,
+ &hslot4->nulls_head);
+ hslot4->count++;
+ spin_unlock(&hslot4->lock);
+
+ spin_lock(&hslot2->lock);
+ udp_hash4_inc(hslot2);
+ spin_unlock(&hslot2->lock);
+
+ spin_unlock_bh(&hslot->lock);
+}
+EXPORT_SYMBOL(udp_lib_hash4);
+
+/* call with sock lock */
+void udp4_hash4(struct sock *sk)
+{
+ struct net *net = sock_net(sk);
+ unsigned int hash;
+
+ if (sk_unhashed(sk) || sk->sk_rcv_saddr == htonl(INADDR_ANY))
+ return;
+
+ hash = udp_ehashfn(net, sk->sk_rcv_saddr, sk->sk_num,
+ sk->sk_daddr, sk->sk_dport);
+
+ udp_lib_hash4(sk, hash);
+}
+EXPORT_SYMBOL(udp4_hash4);
+#endif /* CONFIG_BASE_SMALL */
+
/* UDP is nearly always wildcards out the wazoo, it makes no sense to try
* harder than this. -DaveM
*/
-struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
+struct sock *__udp4_lib_lookup(const struct net *net, __be32 saddr,
__be16 sport, __be32 daddr, __be16 dport, int dif,
int sdif, struct udp_table *udptable, struct sk_buff *skb)
{
unsigned short hnum = ntohs(dport);
- unsigned int hash2, slot2;
struct udp_hslot *hslot2;
struct sock *result, *sk;
+ unsigned int hash2;
hash2 = ipv4_portaddr_hash(net, daddr, hnum);
- slot2 = hash2 & udptable->mask;
- hslot2 = &udptable->hash2[slot2];
+ hslot2 = udp_hashslot2(udptable, hash2);
+
+ if (udp_has_hash4(hslot2)) {
+ result = udp4_lib_lookup4(net, saddr, sport, daddr, hnum,
+ dif, sdif, udptable);
+ if (result) /* udp4_lib_lookup4 return sk or NULL */
+ return result;
+ }
/* Lookup connected or non-wildcard socket */
result = udp4_lib_lookup2(net, saddr, sport,
@@ -507,12 +719,24 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
/* Lookup wildcard sockets */
hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
- slot2 = hash2 & udptable->mask;
- hslot2 = &udptable->hash2[slot2];
+ hslot2 = udp_hashslot2(udptable, hash2);
result = udp4_lib_lookup2(net, saddr, sport,
htonl(INADDR_ANY), hnum, dif, sdif,
hslot2, skb);
+ if (!IS_ERR_OR_NULL(result))
+ goto done;
+
+ /* Primary hash (destination port) lookup as fallback for this race:
+ * 1. __ip4_datagram_connect() sets sk_rcv_saddr
+ * 2. lookup (this function): new sk_rcv_saddr, hashes not updated yet
+ * 3. rehash operation updating _secondary and four-tuple_ hashes
+ * The primary hash doesn't need an update after 1., so, thanks to this
+ * further step, 1. and 3. don't need to be atomic against the lookup.
+ */
+ result = udp4_lib_lookup1(net, saddr, sport, daddr, hnum, dif, sdif,
+ udptable);
+
done:
if (IS_ERR(result))
return NULL;
@@ -534,7 +758,8 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb,
__be16 sport, __be16 dport)
{
- const struct iphdr *iph = ip_hdr(skb);
+ const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
+ const struct iphdr *iph = (struct iphdr *)(skb->data + offset);
struct net *net = dev_net(skb->dev);
int iif, sdif;
@@ -549,7 +774,7 @@ struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb,
* Does increment socket refcount.
*/
#if IS_ENABLED(CONFIG_NF_TPROXY_IPV4) || IS_ENABLED(CONFIG_NF_SOCKET_IPV4)
-struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
+struct sock *udp4_lib_lookup(const struct net *net, __be32 saddr, __be16 sport,
__be32 daddr, __be16 dport, int dif)
{
struct sock *sk;
@@ -584,6 +809,13 @@ static inline bool __udp_is_mcast_sock(struct net *net, const struct sock *sk,
}
DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key);
+EXPORT_SYMBOL(udp_encap_needed_key);
+
+#if IS_ENABLED(CONFIG_IPV6)
+DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
+EXPORT_SYMBOL(udpv6_encap_needed_key);
+#endif
+
void udp_encap_enable(void)
{
static_branch_inc(&udp_encap_needed_key);
@@ -909,9 +1141,9 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
const int hlen = skb_network_header_len(skb) +
sizeof(struct udphdr);
- if (hlen + cork->gso_size > cork->fragsize) {
+ if (hlen + min(datalen, cork->gso_size) > cork->fragsize) {
kfree_skb(skb);
- return -EINVAL;
+ return -EMSGSIZE;
}
if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
kfree_skb(skb);
@@ -921,8 +1153,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
kfree_skb(skb);
return -EINVAL;
}
- if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
- dst_xfrm(skb_dst(skb))) {
+ if (is_udplite || dst_xfrm(skb_dst(skb))) {
kfree_skb(skb);
return -EIO;
}
@@ -932,8 +1163,10 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
cork->gso_size);
+
+ /* Don't checksum the payload, skb will get segmented */
+ goto csum_partial;
}
- goto csum_partial;
}
if (is_udplite) /* UDP-Lite */
@@ -1118,16 +1351,17 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (msg->msg_controllen) {
err = udp_cmsg_send(sk, msg, &ipc.gso_size);
- if (err > 0)
+ if (err > 0) {
err = ip_cmsg_send(sk, msg, &ipc,
sk->sk_family == AF_INET6);
+ connected = 0;
+ }
if (unlikely(err < 0)) {
kfree(ipc.opt);
return err;
}
if (ipc.opt)
free = 1;
- connected = 0;
}
if (!ipc.opt) {
struct ip_options_rcu *inet_opt;
@@ -1200,7 +1434,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
}
if (connected)
- rt = (struct rtable *)sk_dst_check(sk, 0);
+ rt = dst_rtable(sk_dst_check(sk, 0));
if (!rt) {
struct net *net = sock_net(sk);
@@ -1494,13 +1728,14 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
struct sk_buff_head *list = &sk->sk_receive_queue;
int rmem, err = -ENOMEM;
spinlock_t *busy = NULL;
- int size;
+ int size, rcvbuf;
- /* try to avoid the costly atomic add/sub pair when the receive
- * queue is full; always allow at least a packet
+ /* Immediately drop when the receive queue is full.
+ * Always allow at least one packet.
*/
rmem = atomic_read(&sk->sk_rmem_alloc);
- if (rmem > sk->sk_rcvbuf)
+ rcvbuf = READ_ONCE(sk->sk_rcvbuf);
+ if (rmem > rcvbuf)
goto drop;
/* Under mem pressure, it might be helpful to help udp_recvmsg()
@@ -1509,7 +1744,7 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
* - Less cache line misses at copyout() time
* - Less work at consume_skb() (less alien page frag freeing)
*/
- if (rmem > (sk->sk_rcvbuf >> 1)) {
+ if (rmem > (rcvbuf >> 1)) {
skb_condense(skb);
busy = busylock_acquire(sk);
@@ -1517,12 +1752,7 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
size = skb->truesize;
udp_set_dev_scratch(skb);
- /* we drop only if the receive buf is full and the receive
- * queue contains some other skb
- */
- rmem = atomic_add_return(size, &sk->sk_rmem_alloc);
- if (rmem > (size + (unsigned int)sk->sk_rcvbuf))
- goto uncharge_drop;
+ atomic_add(size, &sk->sk_rmem_alloc);
spin_lock(&list->lock);
err = udp_rmem_schedule(sk, size);
@@ -1589,7 +1819,8 @@ int udp_init_sock(struct sock *sk)
void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
{
- sk_peek_offset_bwd(sk, len);
+ if (unlikely(READ_ONCE(udp_sk(sk)->peeking_with_offset)))
+ sk_peek_offset_bwd(sk, len);
if (!skb_unref(skb))
return;
@@ -1908,6 +2139,18 @@ int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
}
EXPORT_SYMBOL(udp_pre_connect);
+static int udp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+{
+ int res;
+
+ lock_sock(sk);
+ res = __ip4_datagram_connect(sk, uaddr, addr_len);
+ if (!res)
+ udp4_hash4(sk);
+ release_sock(sk);
+ return res;
+}
+
int __udp_disconnect(struct sock *sk, int flags)
{
struct inet_sock *inet = inet_sk(sk);
@@ -1967,6 +2210,8 @@ void udp_lib_unhash(struct sock *sk)
hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
hslot2->count--;
spin_unlock(&hslot2->lock);
+
+ udp_unhash4(udptable, sk);
}
spin_unlock_bh(&hslot->lock);
}
@@ -1976,20 +2221,20 @@ EXPORT_SYMBOL(udp_lib_unhash);
/*
* inet_rcv_saddr was changed, we must rehash secondary hash
*/
-void udp_lib_rehash(struct sock *sk, u16 newhash)
+void udp_lib_rehash(struct sock *sk, u16 newhash, u16 newhash4)
{
if (sk_hashed(sk)) {
struct udp_table *udptable = udp_get_table_prot(sk);
struct udp_hslot *hslot, *hslot2, *nhslot2;
+ hslot = udp_hashslot(udptable, sock_net(sk),
+ udp_sk(sk)->udp_port_hash);
hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
nhslot2 = udp_hashslot2(udptable, newhash);
udp_sk(sk)->udp_portaddr_hash = newhash;
if (hslot2 != nhslot2 ||
rcu_access_pointer(sk->sk_reuseport_cb)) {
- hslot = udp_hashslot(udptable, sock_net(sk),
- udp_sk(sk)->udp_port_hash);
/* we must lock primary chain too */
spin_lock_bh(&hslot->lock);
if (rcu_access_pointer(sk->sk_reuseport_cb))
@@ -2010,6 +2255,29 @@ void udp_lib_rehash(struct sock *sk, u16 newhash)
spin_unlock_bh(&hslot->lock);
}
+
+ /* Now process hash4 if necessary:
+ * (1) update hslot4;
+ * (2) update hslot2->hash4_cnt.
+ * Note that hslot2/hslot4 should be checked separately, as
+ * either of them may change with the other unchanged.
+ */
+ if (udp_hashed4(sk)) {
+ spin_lock_bh(&hslot->lock);
+
+ udp_rehash4(udptable, sk, newhash4);
+ if (hslot2 != nhslot2) {
+ spin_lock(&hslot2->lock);
+ udp_hash4_dec(hslot2);
+ spin_unlock(&hslot2->lock);
+
+ spin_lock(&nhslot2->lock);
+ udp_hash4_inc(nhslot2);
+ spin_unlock(&nhslot2->lock);
+ }
+
+ spin_unlock_bh(&hslot->lock);
+ }
}
}
EXPORT_SYMBOL(udp_lib_rehash);
@@ -2019,7 +2287,11 @@ void udp_v4_rehash(struct sock *sk)
u16 new_hash = ipv4_portaddr_hash(sock_net(sk),
inet_sk(sk)->inet_rcv_saddr,
inet_sk(sk)->inet_num);
- udp_lib_rehash(sk, new_hash);
+ u16 new_hash4 = udp_ehashfn(sock_net(sk),
+ sk->sk_rcv_saddr, sk->sk_num,
+ sk->sk_daddr, sk->sk_dport);
+
+ udp_lib_rehash(sk, new_hash, new_hash4);
}
static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
@@ -2050,8 +2322,8 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
drop_reason = SKB_DROP_REASON_PROTO_MEM;
}
UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
- kfree_skb_reason(skb, drop_reason);
- trace_udp_fail_queue_rcv_skb(rc, sk);
+ trace_udp_fail_queue_rcv_skb(rc, sk, skb);
+ sk_skb_reason_drop(sk, skb, drop_reason);
return -1;
}
@@ -2173,7 +2445,7 @@ csum_error:
drop:
__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
atomic_inc(&sk->sk_drops);
- kfree_skb_reason(skb, drop_reason);
+ sk_skb_reason_drop(sk, skb, drop_reason);
return -1;
}
@@ -2207,7 +2479,7 @@ bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
struct dst_entry *old;
if (dst_hold_safe(dst)) {
- old = xchg((__force struct dst_entry **)&sk->sk_rx_dst, dst);
+ old = unrcu_pointer(xchg(&sk->sk_rx_dst, RCU_INITIALIZER(dst)));
dst_release(old);
return old != dst;
}
@@ -2241,7 +2513,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
udptable->mask;
hash2 = ipv4_portaddr_hash(net, daddr, hnum) & udptable->mask;
start_lookup:
- hslot = &udptable->hash2[hash2];
+ hslot = &udptable->hash2[hash2].hslot;
offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
}
@@ -2360,7 +2632,7 @@ static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int proto)
{
- struct sock *sk;
+ struct sock *sk = NULL;
struct udphdr *uh;
unsigned short ulen;
struct rtable *rt = skb_rtable(skb);
@@ -2437,7 +2709,7 @@ no_sk:
* Hmm. We got an UDP packet to a port to which we
* don't wanna listen. Ignore it.
*/
- kfree_skb_reason(skb, drop_reason);
+ sk_skb_reason_drop(sk, skb, drop_reason);
return 0;
short_packet:
@@ -2462,7 +2734,7 @@ csum_error:
__UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
drop:
__UDP_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
- kfree_skb_reason(skb, drop_reason);
+ sk_skb_reason_drop(sk, skb, drop_reason);
return 0;
}
@@ -2512,14 +2784,13 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net,
struct udp_table *udptable = net->ipv4.udp_table;
INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr);
unsigned short hnum = ntohs(loc_port);
- unsigned int hash2, slot2;
struct udp_hslot *hslot2;
+ unsigned int hash2;
__portpair ports;
struct sock *sk;
hash2 = ipv4_portaddr_hash(net, loc_addr, hnum);
- slot2 = hash2 & udptable->mask;
- hslot2 = &udptable->hash2[slot2];
+ hslot2 = udp_hashslot2(udptable, hash2);
ports = INET_COMBINED_PORTS(rmt_port, hnum);
udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
@@ -2569,11 +2840,12 @@ int udp_v4_early_demux(struct sk_buff *skb)
uh->source, iph->saddr, dif, sdif);
}
- if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
+ if (!sk)
return 0;
skb->sk = sk;
- skb->destructor = sock_efree;
+ DEBUG_NET_WARN_ON_ONCE(sk_is_refcounted(sk));
+ skb->destructor = sock_pfree;
dst = rcu_dereference(sk->sk_rx_dst);
if (dst)
@@ -2593,7 +2865,7 @@ int udp_v4_early_demux(struct sk_buff *skb)
if (!inet_sk(sk)->inet_daddr && in_dev)
return ip_mc_validate_source(skb, iph->daddr,
iph->saddr,
- iph->tos & IPTOS_RT_MASK,
+ ip4h_dscp(iph),
skb->dev, in_dev, &itag);
}
return 0;
@@ -2688,8 +2960,6 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
#ifdef CONFIG_XFRM
case UDP_ENCAP_ESPINUDP:
set_xfrm_gro_udp_encap_rcv(val, sk->sk_family, sk);
- fallthrough;
- case UDP_ENCAP_ESPINUDP_NON_IKE:
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == AF_INET6)
WRITE_ONCE(up->encap_rcv,
@@ -2792,11 +3062,11 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
if (get_user(len, optlen))
return -EFAULT;
- len = min_t(unsigned int, len, sizeof(int));
-
if (len < 0)
return -EINVAL;
+ len = min_t(unsigned int, len, sizeof(int));
+
switch (optname) {
case UDP_CORK:
val = udp_test_bit(CORK, sk);
@@ -2914,7 +3184,7 @@ struct proto udp_prot = {
.owner = THIS_MODULE,
.close = udp_lib_close,
.pre_connect = udp_pre_connect,
- .connect = ip4_datagram_connect,
+ .connect = udp_connect,
.disconnect = udp_disconnect,
.ioctl = udp_ioctl,
.init = udp_init_sock,
@@ -3161,7 +3431,7 @@ again:
batch_sks = 0;
for (; state->bucket <= udptable->mask; state->bucket++) {
- struct udp_hslot *hslot2 = &udptable->hash2[state->bucket];
+ struct udp_hslot *hslot2 = &udptable->hash2[state->bucket].hslot;
if (hlist_empty(&hslot2->head))
continue;
@@ -3402,10 +3672,12 @@ __setup("uhash_entries=", set_uhash_entries);
void __init udp_table_init(struct udp_table *table, const char *name)
{
- unsigned int i;
+ unsigned int i, slot_size;
+ slot_size = sizeof(struct udp_hslot) + sizeof(struct udp_hslot_main) +
+ udp_hash4_slot_size();
table->hash = alloc_large_system_hash(name,
- 2 * sizeof(struct udp_hslot),
+ slot_size,
uhash_entries,
21, /* one slot per 2 MB */
0,
@@ -3414,17 +3686,18 @@ void __init udp_table_init(struct udp_table *table, const char *name)
UDP_HTABLE_SIZE_MIN,
UDP_HTABLE_SIZE_MAX);
- table->hash2 = table->hash + (table->mask + 1);
+ table->hash2 = (void *)(table->hash + (table->mask + 1));
for (i = 0; i <= table->mask; i++) {
INIT_HLIST_HEAD(&table->hash[i].head);
table->hash[i].count = 0;
spin_lock_init(&table->hash[i].lock);
}
for (i = 0; i <= table->mask; i++) {
- INIT_HLIST_HEAD(&table->hash2[i].head);
- table->hash2[i].count = 0;
- spin_lock_init(&table->hash2[i].lock);
+ INIT_HLIST_HEAD(&table->hash2[i].hslot.head);
+ table->hash2[i].hslot.count = 0;
+ spin_lock_init(&table->hash2[i].hslot.lock);
}
+ udp_table_hash4_init(table);
}
u32 udp_flow_hashrnd(void)
@@ -3450,18 +3723,21 @@ static void __net_init udp_sysctl_init(struct net *net)
static struct udp_table __net_init *udp_pernet_table_alloc(unsigned int hash_entries)
{
struct udp_table *udptable;
+ unsigned int slot_size;
int i;
udptable = kmalloc(sizeof(*udptable), GFP_KERNEL);
if (!udptable)
goto out;
- udptable->hash = vmalloc_huge(hash_entries * 2 * sizeof(struct udp_hslot),
+ slot_size = sizeof(struct udp_hslot) + sizeof(struct udp_hslot_main) +
+ udp_hash4_slot_size();
+ udptable->hash = vmalloc_huge(hash_entries * slot_size,
GFP_KERNEL_ACCOUNT);
if (!udptable->hash)
goto free_table;
- udptable->hash2 = udptable->hash + hash_entries;
+ udptable->hash2 = (void *)(udptable->hash + hash_entries);
udptable->mask = hash_entries - 1;
udptable->log = ilog2(hash_entries);
@@ -3470,10 +3746,11 @@ static struct udp_table __net_init *udp_pernet_table_alloc(unsigned int hash_ent
udptable->hash[i].count = 0;
spin_lock_init(&udptable->hash[i].lock);
- INIT_HLIST_HEAD(&udptable->hash2[i].head);
- udptable->hash2[i].count = 0;
- spin_lock_init(&udptable->hash2[i].lock);
+ INIT_HLIST_HEAD(&udptable->hash2[i].hslot.head);
+ udptable->hash2[i].hslot.count = 0;
+ spin_lock_init(&udptable->hash2[i].hslot.lock);
}
+ udp_table_hash4_init(udptable);
return udptable;
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index dc41a22ee80e..38cb3a28e4ed 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -237,6 +237,7 @@ static int udplite_diag_destroy(struct sk_buff *in_skb,
#endif
static const struct inet_diag_handler udp_diag_handler = {
+ .owner = THIS_MODULE,
.dump = udp_diag_dump,
.dump_one = udp_diag_dump_one,
.idiag_get_info = udp_diag_get_info,
@@ -260,6 +261,7 @@ static int udplite_diag_dump_one(struct netlink_callback *cb,
}
static const struct inet_diag_handler udplite_diag_handler = {
+ .owner = THIS_MODULE,
.dump = udplite_diag_dump,
.dump_one = udplite_diag_dump_one,
.idiag_get_info = udp_diag_get_info,
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 6c95d28d0c4a..ecfca59f31f1 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -278,6 +278,17 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
if (gso_skb->len <= sizeof(*uh) + mss)
return ERR_PTR(-EINVAL);
+ if (unlikely(skb_checksum_start(gso_skb) !=
+ skb_transport_header(gso_skb) &&
+ !(skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST)))
+ return ERR_PTR(-EINVAL);
+
+ /* We don't know if egress device can segment and checksum the packet
+ * when IPv6 extension headers are present. Fall back to software GSO.
+ */
+ if (gso_skb->ip_summed != CHECKSUM_PARTIAL)
+ features &= ~(NETIF_F_GSO_UDP_L4 | NETIF_F_CSUM_MASK);
+
if (skb_gso_ok(gso_skb, features | NETIF_F_GSO_ROBUST)) {
/* Packet is from an untrusted source, reset gso_segs. */
skb_shinfo(gso_skb)->gso_segs = DIV_ROUND_UP(gso_skb->len - sizeof(*uh),
@@ -285,20 +296,42 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
return NULL;
}
- if (skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST)
- return __udp_gso_segment_list(gso_skb, features, is_ipv6);
+ if (skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST) {
+ /* Detect modified geometry and pass those to skb_segment. */
+ if (skb_pagelen(gso_skb) - sizeof(*uh) == skb_shinfo(gso_skb)->gso_size)
+ return __udp_gso_segment_list(gso_skb, features, is_ipv6);
+
+ /* Setup csum, as fraglist skips this in udp4_gro_receive. */
+ gso_skb->csum_start = skb_transport_header(gso_skb) - gso_skb->head;
+ gso_skb->csum_offset = offsetof(struct udphdr, check);
+ gso_skb->ip_summed = CHECKSUM_PARTIAL;
+
+ uh = udp_hdr(gso_skb);
+ if (is_ipv6)
+ uh->check = ~udp_v6_check(gso_skb->len,
+ &ipv6_hdr(gso_skb)->saddr,
+ &ipv6_hdr(gso_skb)->daddr, 0);
+ else
+ uh->check = ~udp_v4_check(gso_skb->len,
+ ip_hdr(gso_skb)->saddr,
+ ip_hdr(gso_skb)->daddr, 0);
+ }
skb_pull(gso_skb, sizeof(*uh));
/* clear destructor to avoid skb_segment assigning it to tail */
copy_dtor = gso_skb->destructor == sock_wfree;
- if (copy_dtor)
+ if (copy_dtor) {
gso_skb->destructor = NULL;
+ gso_skb->sk = NULL;
+ }
segs = skb_segment(gso_skb, features);
if (IS_ERR_OR_NULL(segs)) {
- if (copy_dtor)
+ if (copy_dtor) {
gso_skb->destructor = sock_wfree;
+ gso_skb->sk = sk;
+ }
return segs;
}
@@ -357,6 +390,14 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
else
uh->check = gso_make_checksum(seg, ~check) ? : CSUM_MANGLED_0;
+ /* On the TX path, CHECKSUM_NONE and CHECKSUM_UNNECESSARY have the same
+ * meaning. However, check for bad offloads in the GSO stack expects the
+ * latter, if the checksum was calculated in software. To vouch for the
+ * segment skbs we actually need to set it on the gso_skb.
+ */
+ if (gso_skb->ip_summed == CHECKSUM_NONE)
+ gso_skb->ip_summed = CHECKSUM_UNNECESSARY;
+
/* update refcount for the packet */
if (copy_dtor) {
int delta = sum_truesize - gso_skb->truesize;
@@ -433,32 +474,6 @@ out:
return segs;
}
-static int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
-{
- if (unlikely(p->len + skb->len >= 65536))
- return -E2BIG;
-
- if (NAPI_GRO_CB(p)->last == p)
- skb_shinfo(p)->frag_list = skb;
- else
- NAPI_GRO_CB(p)->last->next = skb;
-
- skb_pull(skb, skb_gro_offset(skb));
-
- NAPI_GRO_CB(p)->last = skb;
- NAPI_GRO_CB(p)->count++;
- p->data_len += skb->len;
-
- /* sk owenrship - if any - completely transferred to the aggregated packet */
- skb->destructor = NULL;
- p->truesize += skb->truesize;
- p->len += skb->len;
-
- NAPI_GRO_CB(skb)->same_flow = 1;
-
- return 0;
-}
-
#define UDP_GRO_CNT_MAX 64
static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
@@ -470,6 +485,7 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
struct sk_buff *p;
unsigned int ulen;
int ret = 0;
+ int flush;
/* requires non zero csum, for symmetry with GSO */
if (!uh->check) {
@@ -503,13 +519,15 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
return p;
}
+ flush = gro_receive_network_flush(uh, uh2, p);
+
/* Terminate the flow on len mismatch or if it grow "too much".
* Under small packet flood GRO count could elsewhere grow a lot
* leading to excessive truesize values.
* On len mismatch merge the first packet shorter than gso_size,
* otherwise complete the GRO packet.
*/
- if (ulen > ntohs(uh2->len)) {
+ if (ulen > ntohs(uh2->len) || flush) {
pp = p;
} else {
if (NAPI_GRO_CB(skb)->is_flist) {
@@ -551,11 +569,19 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
unsigned int off = skb_gro_offset(skb);
int flush = 1;
- /* we can do L4 aggregation only if the packet can't land in a tunnel
- * otherwise we could corrupt the inner stream
+ /* We can do L4 aggregation only if the packet can't land in a tunnel
+ * otherwise we could corrupt the inner stream. Detecting such packets
+ * cannot be foolproof and the aggregation might still happen in some
+ * cases. Such packets should be caught in udp_unexpected_gso later.
*/
NAPI_GRO_CB(skb)->is_flist = 0;
if (!sk || !udp_sk(sk)->gro_receive) {
+ /* If the packet was locally encapsulated in a UDP tunnel that
+ * wasn't detected above, do not GRO.
+ */
+ if (skb->encapsulation)
+ goto out;
+
if (skb->dev->features & NETIF_F_GRO_FRAGLIST)
NAPI_GRO_CB(skb)->is_flist = sk ? !udp_test_bit(GRO_ENABLED, sk) : 1;
@@ -709,7 +735,8 @@ EXPORT_SYMBOL(udp_gro_complete);
INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff)
{
- const struct iphdr *iph = ip_hdr(skb);
+ const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
+ const struct iphdr *iph = (struct iphdr *)(skb->data + offset);
struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
/* do fraglist only if there is no outer UDP encap (or we already processed it) */
@@ -719,13 +746,7 @@ INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff)
skb_shinfo(skb)->gso_type |= (SKB_GSO_FRAGLIST|SKB_GSO_UDP_L4);
skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
- if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
- if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
- skb->csum_level++;
- } else {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = 0;
- }
+ __skb_incr_checksum_unnecessary(skb);
return 0;
}
@@ -737,15 +758,14 @@ INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff)
return udp_gro_complete(skb, nhoff, udp4_lib_lookup_skb);
}
-static const struct net_offload udpv4_offload = {
- .callbacks = {
- .gso_segment = udp4_ufo_fragment,
- .gro_receive = udp4_gro_receive,
- .gro_complete = udp4_gro_complete,
- },
-};
-
int __init udpv4_offload_init(void)
{
- return inet_add_offload(&udpv4_offload, IPPROTO_UDP);
+ net_hotdata.udpv4_offload = (struct net_offload) {
+ .callbacks = {
+ .gso_segment = udp4_ufo_fragment,
+ .gro_receive = udp4_gro_receive,
+ .gro_complete = udp4_gro_complete,
+ },
+ };
+ return inet_add_offload(&net_hotdata.udpv4_offload, IPPROTO_UDP);
}
diff --git a/net/ipv4/udp_tunnel_core.c b/net/ipv4/udp_tunnel_core.c
index 860aff5f8599..619a53eb672d 100644
--- a/net/ipv4/udp_tunnel_core.c
+++ b/net/ipv4/udp_tunnel_core.c
@@ -6,6 +6,7 @@
#include <net/dst_metadata.h>
#include <net/udp.h>
#include <net/udp_tunnel.h>
+#include <net/inet_dscp.h>
int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
struct socket **sockp)
@@ -183,7 +184,8 @@ void udp_tunnel_sock_release(struct socket *sock)
EXPORT_SYMBOL_GPL(udp_tunnel_sock_release);
struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
- __be16 flags, __be64 tunnel_id, int md_size)
+ const unsigned long *flags,
+ __be64 tunnel_id, int md_size)
{
struct metadata_dst *tun_dst;
struct ip_tunnel_info *info;
@@ -199,7 +201,7 @@ struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
info->key.tp_src = udp_hdr(skb)->source;
info->key.tp_dst = udp_hdr(skb)->dest;
if (udp_hdr(skb)->check)
- info->key.tun_flags |= TUNNEL_CSUM;
+ __set_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
return tun_dst;
}
EXPORT_SYMBOL_GPL(udp_tun_rx_dst);
@@ -231,7 +233,7 @@ struct rtable *udp_tunnel_dst_lookup(struct sk_buff *skb,
fl4.saddr = key->u.ipv4.src;
fl4.fl4_dport = dport;
fl4.fl4_sport = sport;
- fl4.flowi4_tos = RT_TOS(tos);
+ fl4.flowi4_tos = tos & INET_DSCP_MASK;
fl4.flowi4_flags = key->flow_flags;
rt = ip_route_output_key(net, &fl4);
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index c54676998eb6..b5b06323cfd9 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -33,7 +33,7 @@ static inline int xfrm4_rcv_encap_finish(struct net *net, struct sock *sk,
const struct iphdr *iph = ip_hdr(skb);
if (ip_route_input_noref(skb, iph->daddr, iph->saddr,
- iph->tos, skb->dev))
+ ip4h_dscp(iph), skb->dev))
goto drop;
}
@@ -58,12 +58,16 @@ int xfrm4_transport_finish(struct sk_buff *skb, int async)
return -iph->protocol;
#endif
- __skb_push(skb, skb->data - skb_network_header(skb));
+ __skb_push(skb, -skb_network_offset(skb));
iph->tot_len = htons(skb->len);
ip_send_check(iph);
if (xo && (xo->flags & XFRM_GRO)) {
- skb_mac_header_rebuild(skb);
+ /* The full l2 header needs to be preserved so that re-injecting the packet at l2
+ * works correctly in the presence of vlan tags.
+ */
+ skb_mac_header_rebuild_full(skb, xo->orig_mac_len);
+ skb_reset_network_header(skb);
skb_reset_transport_header(skb);
return 0;
}
@@ -113,19 +117,6 @@ static int __xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb, bool pull
/* Must be an IKE packet.. pass it through */
return 1;
break;
- case UDP_ENCAP_ESPINUDP_NON_IKE:
- /* Check if this is a keepalive packet. If so, eat it. */
- if (len == 1 && udpdata[0] == 0xff) {
- return -EINVAL;
- } else if (len > 2 * sizeof(u32) + sizeof(struct ip_esp_hdr) &&
- udpdata32[0] == 0 && udpdata32[1] == 0) {
-
- /* ESP Packet with Non-IKE marker */
- len = sizeof(struct udphdr) + 2 * sizeof(u32);
- } else
- /* Must be an IKE packet.. pass it through */
- return 1;
- break;
}
/* At this point we are sure that this is an ESPinUDP packet,
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index c33bca2c3841..7fb6205619e7 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -14,50 +14,47 @@
#include <linux/inetdevice.h>
#include <net/dst.h>
#include <net/xfrm.h>
+#include <net/inet_dscp.h>
#include <net/ip.h>
#include <net/l3mdev.h>
-static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
- int tos, int oif,
- const xfrm_address_t *saddr,
- const xfrm_address_t *daddr,
- u32 mark)
+static struct dst_entry *__xfrm4_dst_lookup(struct flowi4 *fl4,
+ const struct xfrm_dst_lookup_params *params)
{
struct rtable *rt;
memset(fl4, 0, sizeof(*fl4));
- fl4->daddr = daddr->a4;
- fl4->flowi4_tos = tos;
- fl4->flowi4_l3mdev = l3mdev_master_ifindex_by_index(net, oif);
- fl4->flowi4_mark = mark;
- if (saddr)
- fl4->saddr = saddr->a4;
-
- rt = __ip_route_output_key(net, fl4);
+ fl4->daddr = params->daddr->a4;
+ fl4->flowi4_tos = inet_dscp_to_dsfield(params->dscp);
+ fl4->flowi4_l3mdev = l3mdev_master_ifindex_by_index(params->net,
+ params->oif);
+ fl4->flowi4_mark = params->mark;
+ if (params->saddr)
+ fl4->saddr = params->saddr->a4;
+ fl4->flowi4_proto = params->ipproto;
+ fl4->uli = params->uli;
+
+ rt = __ip_route_output_key(params->net, fl4);
if (!IS_ERR(rt))
return &rt->dst;
return ERR_CAST(rt);
}
-static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, int oif,
- const xfrm_address_t *saddr,
- const xfrm_address_t *daddr,
- u32 mark)
+static struct dst_entry *xfrm4_dst_lookup(const struct xfrm_dst_lookup_params *params)
{
struct flowi4 fl4;
- return __xfrm4_dst_lookup(net, &fl4, tos, oif, saddr, daddr, mark);
+ return __xfrm4_dst_lookup(&fl4, params);
}
-static int xfrm4_get_saddr(struct net *net, int oif,
- xfrm_address_t *saddr, xfrm_address_t *daddr,
- u32 mark)
+static int xfrm4_get_saddr(xfrm_address_t *saddr,
+ const struct xfrm_dst_lookup_params *params)
{
struct dst_entry *dst;
struct flowi4 fl4;
- dst = __xfrm4_dst_lookup(net, &fl4, 0, oif, NULL, daddr, mark);
+ dst = __xfrm4_dst_lookup(&fl4, params);
if (IS_ERR(dst))
return -EHOSTUNREACH;
@@ -69,7 +66,7 @@ static int xfrm4_get_saddr(struct net *net, int oif,
static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
const struct flowi *fl)
{
- struct rtable *rt = (struct rtable *)xdst->route;
+ struct rtable *rt = dst_rtable(xdst->route);
const struct flowi4 *fl4 = &fl->u.ip4;
xdst->u.rt.rt_iif = fl4->flowi4_iif;
@@ -152,7 +149,6 @@ static struct ctl_table xfrm4_policy_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
- { }
};
static __net_init int xfrm4_net_sysctl_init(struct net *net)
@@ -186,7 +182,7 @@ err_alloc:
static __net_exit void xfrm4_net_sysctl_exit(struct net *net)
{
- struct ctl_table *table;
+ const struct ctl_table *table;
if (!net->ipv4.xfrm4_hdr)
return;
diff --git a/net/ipv4/xfrm4_protocol.c b/net/ipv4/xfrm4_protocol.c
index b146ce88c5d0..4ee624d8e66f 100644
--- a/net/ipv4/xfrm4_protocol.c
+++ b/net/ipv4/xfrm4_protocol.c
@@ -76,7 +76,7 @@ int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
const struct iphdr *iph = ip_hdr(skb);
if (ip_route_input_noref(skb, iph->daddr, iph->saddr,
- iph->tos, skb->dev))
+ ip4h_dscp(iph), skb->dev))
goto drop;
}
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 08d4b7132d4c..1c9c686d9522 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -323,6 +323,7 @@ config IPV6_RPL_LWTUNNEL
bool "IPv6: RPL Source Routing Header support"
depends on IPV6
select LWTUNNEL
+ select DST_CACHE
help
Support for RFC6554 RPL Source Routing Header using the lightweight
tunnels mechanism.
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 055230b669cf..ac8cc1076536 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -63,6 +63,7 @@
#include <linux/string.h>
#include <linux/hash.h>
+#include <net/ip_tunnels.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/snmp.h>
@@ -84,15 +85,13 @@
#include <linux/netconf.h>
#include <linux/random.h>
#include <linux/uaccess.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/export.h>
#include <linux/ioam6.h>
-#define INFINITY_LIFE_TIME 0xFFFFFFFF
-
#define IPV6_MAX_STRLEN \
sizeof("ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255")
@@ -195,6 +194,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
.use_tempaddr = 0,
.temp_valid_lft = TEMP_VALID_LIFETIME,
.temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
+ .regen_min_advance = REGEN_MIN_ADVANCE,
.regen_max_retry = REGEN_MAX_RETRY,
.max_desync_factor = MAX_DESYNC_FACTOR,
.max_addresses = IPV6_MAX_ADDRESSES,
@@ -237,6 +237,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
.ioam6_id_wide = IOAM6_DEFAULT_IF_ID_WIDE,
.ndisc_evict_nocarrier = 1,
.ra_honor_pio_life = 0,
+ .ra_honor_pio_pflag = 0,
};
static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -257,6 +258,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
.use_tempaddr = 0,
.temp_valid_lft = TEMP_VALID_LIFETIME,
.temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
+ .regen_min_advance = REGEN_MIN_ADVANCE,
.regen_max_retry = REGEN_MAX_RETRY,
.max_desync_factor = MAX_DESYNC_FACTOR,
.max_addresses = IPV6_MAX_ADDRESSES,
@@ -299,6 +301,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
.ioam6_id_wide = IOAM6_DEFAULT_IF_ID_WIDE,
.ndisc_evict_nocarrier = 1,
.ra_honor_pio_life = 0,
+ .ra_honor_pio_pflag = 0,
};
/* Check if link is ready: is it up and is a valid qdisc available */
@@ -549,7 +552,8 @@ static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
goto out;
if ((all || type == NETCONFA_FORWARDING) &&
- nla_put_s32(skb, NETCONFA_FORWARDING, devconf->forwarding) < 0)
+ nla_put_s32(skb, NETCONFA_FORWARDING,
+ READ_ONCE(devconf->forwarding)) < 0)
goto nla_put_failure;
#ifdef CONFIG_IPV6_MROUTE
if ((all || type == NETCONFA_MC_FORWARDING) &&
@@ -558,12 +562,13 @@ static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
goto nla_put_failure;
#endif
if ((all || type == NETCONFA_PROXY_NEIGH) &&
- nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0)
+ nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
+ READ_ONCE(devconf->proxy_ndp)) < 0)
goto nla_put_failure;
if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
- devconf->ignore_routes_with_linkdown) < 0)
+ READ_ONCE(devconf->ignore_routes_with_linkdown)) < 0)
goto nla_put_failure;
out:
@@ -713,7 +718,7 @@ errout:
static u32 inet6_base_seq(const struct net *net)
{
u32 res = atomic_read(&net->ipv6.dev_addr_genid) +
- net->dev_base_seq;
+ READ_ONCE(net->dev_base_seq);
/* Must not return 0 (see nl_dump_check_consistent()).
* Chose a value far away from 0.
@@ -723,17 +728,18 @@ static u32 inet6_base_seq(const struct net *net)
return res;
}
-
static int inet6_netconf_dump_devconf(struct sk_buff *skb,
struct netlink_callback *cb)
{
const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
- int h, s_h;
- int idx, s_idx;
+ struct {
+ unsigned long ifindex;
+ unsigned int all_default;
+ } *ctx = (void *)cb->ctx;
struct net_device *dev;
struct inet6_dev *idev;
- struct hlist_head *head;
+ int err = 0;
if (cb->strict_check) {
struct netlink_ext_ack *extack = cb->extack;
@@ -750,64 +756,46 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
}
}
- s_h = cb->args[0];
- s_idx = idx = cb->args[1];
-
- for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
- idx = 0;
- head = &net->dev_index_head[h];
- rcu_read_lock();
- cb->seq = inet6_base_seq(net);
- hlist_for_each_entry_rcu(dev, head, index_hlist) {
- if (idx < s_idx)
- goto cont;
- idev = __in6_dev_get(dev);
- if (!idev)
- goto cont;
-
- if (inet6_netconf_fill_devconf(skb, dev->ifindex,
- &idev->cnf,
- NETLINK_CB(cb->skb).portid,
- nlh->nlmsg_seq,
- RTM_NEWNETCONF,
- NLM_F_MULTI,
- NETCONFA_ALL) < 0) {
- rcu_read_unlock();
- goto done;
- }
- nl_dump_check_consistent(cb, nlmsg_hdr(skb));
-cont:
- idx++;
- }
- rcu_read_unlock();
+ rcu_read_lock();
+ for_each_netdev_dump(net, dev, ctx->ifindex) {
+ idev = __in6_dev_get(dev);
+ if (!idev)
+ continue;
+ err = inet6_netconf_fill_devconf(skb, dev->ifindex,
+ &idev->cnf,
+ NETLINK_CB(cb->skb).portid,
+ nlh->nlmsg_seq,
+ RTM_NEWNETCONF,
+ NLM_F_MULTI,
+ NETCONFA_ALL);
+ if (err < 0)
+ goto done;
}
- if (h == NETDEV_HASHENTRIES) {
- if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
- net->ipv6.devconf_all,
- NETLINK_CB(cb->skb).portid,
- nlh->nlmsg_seq,
- RTM_NEWNETCONF, NLM_F_MULTI,
- NETCONFA_ALL) < 0)
+ if (ctx->all_default == 0) {
+ err = inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
+ net->ipv6.devconf_all,
+ NETLINK_CB(cb->skb).portid,
+ nlh->nlmsg_seq,
+ RTM_NEWNETCONF, NLM_F_MULTI,
+ NETCONFA_ALL);
+ if (err < 0)
goto done;
- else
- h++;
- }
- if (h == NETDEV_HASHENTRIES + 1) {
- if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
- net->ipv6.devconf_dflt,
- NETLINK_CB(cb->skb).portid,
- nlh->nlmsg_seq,
- RTM_NEWNETCONF, NLM_F_MULTI,
- NETCONFA_ALL) < 0)
+ ctx->all_default++;
+ }
+ if (ctx->all_default == 1) {
+ err = inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
+ net->ipv6.devconf_dflt,
+ NETLINK_CB(cb->skb).portid,
+ nlh->nlmsg_seq,
+ RTM_NEWNETCONF, NLM_F_MULTI,
+ NETCONFA_ALL);
+ if (err < 0)
goto done;
- else
- h++;
+ ctx->all_default++;
}
done:
- cb->args[0] = h;
- cb->args[1] = idx;
-
- return skb->len;
+ rcu_read_unlock();
+ return err;
}
#ifdef CONFIG_SYSCTL
@@ -864,27 +852,27 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
struct inet6_dev *idev;
for_each_netdev(net, dev) {
- idev = __in6_dev_get(dev);
+ idev = __in6_dev_get_rtnl_net(dev);
if (idev) {
int changed = (!idev->cnf.forwarding) ^ (!newf);
- idev->cnf.forwarding = newf;
+
+ WRITE_ONCE(idev->cnf.forwarding, newf);
if (changed)
dev_forward_change(idev);
}
}
}
-static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
+static int addrconf_fixup_forwarding(const struct ctl_table *table, int *p, int newf)
{
- struct net *net;
+ struct net *net = (struct net *)table->extra2;
int old;
- if (!rtnl_trylock())
+ if (!rtnl_net_trylock(net))
return restart_syscall();
- net = (struct net *)table->extra2;
old = *p;
- *p = newf;
+ WRITE_ONCE(*p, newf);
if (p == &net->ipv6.devconf_dflt->forwarding) {
if ((!newf) ^ (!old))
@@ -892,14 +880,14 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
NETCONFA_FORWARDING,
NETCONFA_IFINDEX_DEFAULT,
net->ipv6.devconf_dflt);
- rtnl_unlock();
+ rtnl_net_unlock(net);
return 0;
}
if (p == &net->ipv6.devconf_all->forwarding) {
int old_dflt = net->ipv6.devconf_dflt->forwarding;
- net->ipv6.devconf_dflt->forwarding = newf;
+ WRITE_ONCE(net->ipv6.devconf_dflt->forwarding, newf);
if ((!newf) ^ (!old_dflt))
inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
NETCONFA_FORWARDING,
@@ -914,7 +902,7 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
net->ipv6.devconf_all);
} else if ((!newf) ^ (!old))
dev_forward_change((struct inet6_dev *)table->extra1);
- rtnl_unlock();
+ rtnl_net_unlock(net);
if (newf)
rt6_purge_dflt_routers(net);
@@ -927,11 +915,11 @@ static void addrconf_linkdown_change(struct net *net, __s32 newf)
struct inet6_dev *idev;
for_each_netdev(net, dev) {
- idev = __in6_dev_get(dev);
+ idev = __in6_dev_get_rtnl_net(dev);
if (idev) {
int changed = (!idev->cnf.ignore_routes_with_linkdown) ^ (!newf);
- idev->cnf.ignore_routes_with_linkdown = newf;
+ WRITE_ONCE(idev->cnf.ignore_routes_with_linkdown, newf);
if (changed)
inet6_netconf_notify_devconf(dev_net(dev),
RTM_NEWNETCONF,
@@ -942,17 +930,16 @@ static void addrconf_linkdown_change(struct net *net, __s32 newf)
}
}
-static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)
+static int addrconf_fixup_linkdown(const struct ctl_table *table, int *p, int newf)
{
- struct net *net;
+ struct net *net = (struct net *)table->extra2;
int old;
- if (!rtnl_trylock())
+ if (!rtnl_net_trylock(net))
return restart_syscall();
- net = (struct net *)table->extra2;
old = *p;
- *p = newf;
+ WRITE_ONCE(*p, newf);
if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) {
if ((!newf) ^ (!old))
@@ -961,12 +948,12 @@ static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)
NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
NETCONFA_IFINDEX_DEFAULT,
net->ipv6.devconf_dflt);
- rtnl_unlock();
+ rtnl_net_unlock(net);
return 0;
}
if (p == &net->ipv6.devconf_all->ignore_routes_with_linkdown) {
- net->ipv6.devconf_dflt->ignore_routes_with_linkdown = newf;
+ WRITE_ONCE(net->ipv6.devconf_dflt->ignore_routes_with_linkdown, newf);
addrconf_linkdown_change(net, newf);
if ((!newf) ^ (!old))
inet6_netconf_notify_devconf(net,
@@ -975,7 +962,8 @@ static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)
NETCONFA_IFINDEX_ALL,
net->ipv6.devconf_all);
}
- rtnl_unlock();
+
+ rtnl_net_unlock(net);
return 1;
}
@@ -1027,7 +1015,7 @@ ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
static u32 inet6_addr_hash(const struct net *net, const struct in6_addr *addr)
{
- u32 val = ipv6_addr_hash(addr) ^ net_hash_mix(net);
+ u32 val = __ipv6_addr_jhash(addr, net_hash_mix(net));
return hash_32(val, IN6_ADDR_HSIZE_SHIFT);
}
@@ -1270,6 +1258,7 @@ static void
cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires,
bool del_rt, bool del_peer)
{
+ struct fib6_table *table;
struct fib6_info *f6i;
f6i = addrconf_get_prefix_route(del_peer ? &ifp->peer_addr : &ifp->addr,
@@ -1279,8 +1268,15 @@ cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires,
if (del_rt)
ip6_del_rt(dev_net(ifp->idev->dev), f6i, false);
else {
- if (!(f6i->fib6_flags & RTF_EXPIRES))
+ if (!(f6i->fib6_flags & RTF_EXPIRES)) {
+ table = f6i->fib6_table;
+ spin_lock_bh(&table->tb6_lock);
+
fib6_set_expires(f6i, expires);
+ fib6_add_gc_list(f6i);
+
+ spin_unlock_bh(&table->tb6_lock);
+ }
fib6_info_release(f6i);
}
}
@@ -1346,12 +1342,21 @@ out:
in6_ifa_put(ifp);
}
+static unsigned long ipv6_get_regen_advance(const struct inet6_dev *idev)
+{
+ return READ_ONCE(idev->cnf.regen_min_advance) +
+ READ_ONCE(idev->cnf.regen_max_retry) *
+ READ_ONCE(idev->cnf.dad_transmits) *
+ max(NEIGH_VAR(idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
+}
+
static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, bool block)
{
struct inet6_dev *idev = ifp->idev;
unsigned long tmp_tstamp, age;
unsigned long regen_advance;
unsigned long now = jiffies;
+ u32 if_public_preferred_lft;
s32 cnf_temp_preferred_lft;
struct inet6_ifaddr *ift;
struct ifa6_config cfg;
@@ -1363,7 +1368,7 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, bool block)
retry:
in6_dev_hold(idev);
- if (idev->cnf.use_tempaddr <= 0) {
+ if (READ_ONCE(idev->cnf.use_tempaddr) <= 0) {
write_unlock_bh(&idev->lock);
pr_info("%s: use_tempaddr is disabled\n", __func__);
in6_dev_put(idev);
@@ -1371,8 +1376,8 @@ retry:
goto out;
}
spin_lock_bh(&ifp->lock);
- if (ifp->regen_count++ >= idev->cnf.regen_max_retry) {
- idev->cnf.use_tempaddr = -1; /*XXX*/
+ if (ifp->regen_count++ >= READ_ONCE(idev->cnf.regen_max_retry)) {
+ WRITE_ONCE(idev->cnf.use_tempaddr, -1); /*XXX*/
spin_unlock_bh(&ifp->lock);
write_unlock_bh(&idev->lock);
pr_warn("%s: regeneration time exceeded - disabled temporary address support\n",
@@ -1387,16 +1392,14 @@ retry:
age = (now - ifp->tstamp) / HZ;
- regen_advance = idev->cnf.regen_max_retry *
- idev->cnf.dad_transmits *
- max(NEIGH_VAR(idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
+ regen_advance = ipv6_get_regen_advance(idev);
/* recalculate max_desync_factor each time and update
* idev->desync_factor if it's larger
*/
cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
max_desync_factor = min_t(long,
- idev->cnf.max_desync_factor,
+ READ_ONCE(idev->cnf.max_desync_factor),
cnf_temp_preferred_lft - regen_advance);
if (unlikely(idev->desync_factor > max_desync_factor)) {
@@ -1409,11 +1412,13 @@ retry:
}
}
+ if_public_preferred_lft = ifp->prefered_lft;
+
memset(&cfg, 0, sizeof(cfg));
cfg.valid_lft = min_t(__u32, ifp->valid_lft,
- idev->cnf.temp_valid_lft + age);
+ READ_ONCE(idev->cnf.temp_valid_lft) + age);
cfg.preferred_lft = cnf_temp_preferred_lft + age - idev->desync_factor;
- cfg.preferred_lft = min_t(__u32, ifp->prefered_lft, cfg.preferred_lft);
+ cfg.preferred_lft = min_t(__u32, if_public_preferred_lft, cfg.preferred_lft);
cfg.preferred_lft = min_t(__u32, cfg.valid_lft, cfg.preferred_lft);
cfg.plen = ifp->prefix_len;
@@ -1422,19 +1427,41 @@ retry:
write_unlock_bh(&idev->lock);
- /* A temporary address is created only if this calculated Preferred
- * Lifetime is greater than REGEN_ADVANCE time units. In particular,
- * an implementation must not create a temporary address with a zero
- * Preferred Lifetime.
+ /* From RFC 4941:
+ *
+ * A temporary address is created only if this calculated Preferred
+ * Lifetime is greater than REGEN_ADVANCE time units. In
+ * particular, an implementation must not create a temporary address
+ * with a zero Preferred Lifetime.
+ *
+ * ...
+ *
+ * When creating a temporary address, the lifetime values MUST be
+ * derived from the corresponding prefix as follows:
+ *
+ * ...
+ *
+ * * Its Preferred Lifetime is the lower of the Preferred Lifetime
+ * of the public address or TEMP_PREFERRED_LIFETIME -
+ * DESYNC_FACTOR.
+ *
+ * To comply with the RFC's requirements, clamp the preferred lifetime
+ * to a minimum of regen_advance, unless that would exceed valid_lft or
+ * ifp->prefered_lft.
+ *
* Use age calculation as in addrconf_verify to avoid unnecessary
* temporary addresses being generated.
*/
age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
if (cfg.preferred_lft <= regen_advance + age) {
- in6_ifa_put(ifp);
- in6_dev_put(idev);
- ret = -1;
- goto out;
+ cfg.preferred_lft = regen_advance + age + 1;
+ if (cfg.preferred_lft > cfg.valid_lft ||
+ cfg.preferred_lft > if_public_preferred_lft) {
+ in6_ifa_put(ifp);
+ in6_dev_put(idev);
+ ret = -1;
+ goto out;
+ }
}
cfg.ifa_flags = IFA_F_TEMPORARY;
@@ -1513,15 +1540,17 @@ static inline int ipv6_saddr_preferred(int type)
return 0;
}
-static bool ipv6_use_optimistic_addr(struct net *net,
- struct inet6_dev *idev)
+static bool ipv6_use_optimistic_addr(const struct net *net,
+ const struct inet6_dev *idev)
{
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
if (!idev)
return false;
- if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad)
+ if (!READ_ONCE(net->ipv6.devconf_all->optimistic_dad) &&
+ !READ_ONCE(idev->cnf.optimistic_dad))
return false;
- if (!net->ipv6.devconf_all->use_optimistic && !idev->cnf.use_optimistic)
+ if (!READ_ONCE(net->ipv6.devconf_all->use_optimistic) &&
+ !READ_ONCE(idev->cnf.use_optimistic))
return false;
return true;
@@ -1530,13 +1559,14 @@ static bool ipv6_use_optimistic_addr(struct net *net,
#endif
}
-static bool ipv6_allow_optimistic_dad(struct net *net,
- struct inet6_dev *idev)
+static bool ipv6_allow_optimistic_dad(const struct net *net,
+ const struct inet6_dev *idev)
{
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
if (!idev)
return false;
- if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad)
+ if (!READ_ONCE(net->ipv6.devconf_all->optimistic_dad) &&
+ !READ_ONCE(idev->cnf.optimistic_dad))
return false;
return true;
@@ -1642,7 +1672,7 @@ static int ipv6_get_saddr_eval(struct net *net,
*/
int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ?
!!(dst->prefs & IPV6_PREFER_SRC_TMP) :
- score->ifa->idev->cnf.use_tempaddr >= 2;
+ READ_ONCE(score->ifa->idev->cnf.use_tempaddr) >= 2;
ret = (!(score->ifa->flags & IFA_F_TEMPORARY)) ^ preftmp;
break;
}
@@ -1818,7 +1848,7 @@ int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
idev = __in6_dev_get(dst_dev);
if ((dst_type & IPV6_ADDR_MULTICAST) ||
dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL ||
- (idev && idev->cnf.use_oif_addrs_only)) {
+ (idev && READ_ONCE(idev->cnf.use_oif_addrs_only))) {
use_oif_addr = true;
}
}
@@ -1842,7 +1872,8 @@ int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
master, &dst,
scores, hiscore_idx);
- if (scores[hiscore_idx].ifa)
+ if (scores[hiscore_idx].ifa &&
+ scores[hiscore_idx].scopedist >= 0)
goto out;
}
@@ -2061,9 +2092,10 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add
if (ipv6_addr_equal(&ifp->addr, addr)) {
if (!dev || ifp->idev->dev == dev ||
!(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
- result = ifp;
- in6_ifa_hold(ifp);
- break;
+ if (in6_ifa_hold_safe(ifp)) {
+ result = ifp;
+ break;
+ }
}
}
}
@@ -2125,6 +2157,7 @@ void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp)
{
struct inet6_dev *idev = ifp->idev;
struct net *net = dev_net(idev->dev);
+ int max_addresses;
if (addrconf_dad_end(ifp)) {
in6_ifa_put(ifp);
@@ -2162,9 +2195,9 @@ void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp)
spin_unlock_bh(&ifp->lock);
- if (idev->cnf.max_addresses &&
- ipv6_count_addresses(idev) >=
- idev->cnf.max_addresses)
+ max_addresses = READ_ONCE(idev->cnf.max_addresses);
+ if (max_addresses &&
+ ipv6_count_addresses(idev) >= max_addresses)
goto lock_errdad;
net_info_ratelimited("%s: generating new stable privacy address because of DAD conflict\n",
@@ -2536,6 +2569,24 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
return idev;
}
+static void delete_tempaddrs(struct inet6_dev *idev,
+ struct inet6_ifaddr *ifp)
+{
+ struct inet6_ifaddr *ift, *tmp;
+
+ write_lock_bh(&idev->lock);
+ list_for_each_entry_safe(ift, tmp, &idev->tempaddr_list, tmp_list) {
+ if (ift->ifpub != ifp)
+ continue;
+
+ in6_ifa_hold(ift);
+ write_unlock_bh(&idev->lock);
+ ipv6_del_addr(ift);
+ write_lock_bh(&idev->lock);
+ }
+ write_unlock_bh(&idev->lock);
+}
+
static void manage_tempaddrs(struct inet6_dev *idev,
struct inet6_ifaddr *ifp,
__u32 valid_lft, __u32 prefered_lft,
@@ -2561,11 +2612,11 @@ static void manage_tempaddrs(struct inet6_dev *idev,
* (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR), respectively.
*/
age = (now - ift->cstamp) / HZ;
- max_valid = idev->cnf.temp_valid_lft - age;
+ max_valid = READ_ONCE(idev->cnf.temp_valid_lft) - age;
if (max_valid < 0)
max_valid = 0;
- max_prefered = idev->cnf.temp_prefered_lft -
+ max_prefered = READ_ONCE(idev->cnf.temp_prefered_lft) -
idev->desync_factor - age;
if (max_prefered < 0)
max_prefered = 0;
@@ -2598,7 +2649,7 @@ static void manage_tempaddrs(struct inet6_dev *idev,
if (list_empty(&idev->tempaddr_list) && (valid_lft || prefered_lft))
create = true;
- if (create && idev->cnf.use_tempaddr > 0) {
+ if (create && READ_ONCE(idev->cnf.use_tempaddr) > 0) {
/* When a new public address is created as described
* in [ADDRCONF], also create a new temporary address.
*/
@@ -2626,7 +2677,7 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
int create = 0, update_lft = 0;
if (!ifp && valid_lft) {
- int max_addresses = in6_dev->cnf.max_addresses;
+ int max_addresses = READ_ONCE(in6_dev->cnf.max_addresses);
struct ifa6_config cfg = {
.pfx = addr,
.plen = pinfo->prefix_len,
@@ -2638,8 +2689,8 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
};
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
- if ((net->ipv6.devconf_all->optimistic_dad ||
- in6_dev->cnf.optimistic_dad) &&
+ if ((READ_ONCE(net->ipv6.devconf_all->optimistic_dad) ||
+ READ_ONCE(in6_dev->cnf.optimistic_dad)) &&
!net->ipv6.devconf_all->forwarding && sllao)
cfg.ifa_flags |= IFA_F_OPTIMISTIC;
#endif
@@ -2688,7 +2739,7 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
*/
update_lft = !create && stored_lft;
- if (update_lft && !in6_dev->cnf.ra_honor_pio_life) {
+ if (update_lft && !READ_ONCE(in6_dev->cnf.ra_honor_pio_life)) {
const u32 minimum_lft = min_t(u32,
stored_lft, MIN_VALID_LIFETIME);
valid_lft = max(valid_lft, minimum_lft);
@@ -2697,7 +2748,7 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
if (update_lft) {
ifp->valid_lft = valid_lft;
ifp->prefered_lft = prefered_lft;
- ifp->tstamp = now;
+ WRITE_ONCE(ifp->tstamp, now);
flags = ifp->flags;
ifp->flags &= ~IFA_F_DEPRECATED;
spin_unlock_bh(&ifp->lock);
@@ -2721,12 +2772,14 @@ EXPORT_SYMBOL_GPL(addrconf_prefix_rcv_add_addr);
void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
{
struct prefix_info *pinfo;
+ struct fib6_table *table;
__u32 valid_lft;
__u32 prefered_lft;
int addr_type, err;
u32 addr_flags = 0;
struct inet6_dev *in6_dev;
struct net *net = dev_net(dev);
+ bool ignore_autoconf = false;
pinfo = (struct prefix_info *) opt;
@@ -2797,11 +2850,20 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
if (valid_lft == 0) {
ip6_del_rt(net, rt, false);
rt = NULL;
- } else if (addrconf_finite_timeout(rt_expires)) {
- /* not infinity */
- fib6_set_expires(rt, jiffies + rt_expires);
} else {
- fib6_clean_expires(rt);
+ table = rt->fib6_table;
+ spin_lock_bh(&table->tb6_lock);
+
+ if (addrconf_finite_timeout(rt_expires)) {
+ /* not infinity */
+ fib6_set_expires(rt, jiffies + rt_expires);
+ fib6_add_gc_list(rt);
+ } else {
+ fib6_clean_expires(rt);
+ fib6_remove_gc_list(rt);
+ }
+
+ spin_unlock_bh(&table->tb6_lock);
}
} else if (valid_lft) {
clock_t expires = 0;
@@ -2820,7 +2882,8 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
/* Try to figure out our local address for this prefix */
- if (pinfo->autoconf && in6_dev->cnf.autoconf) {
+ ignore_autoconf = READ_ONCE(in6_dev->cnf.ra_honor_pio_pflag) && pinfo->preferpd;
+ if (pinfo->autoconf && in6_dev->cnf.autoconf && !ignore_autoconf) {
struct in6_addr addr;
bool tokenized = false, dev_addr_generated = false;
@@ -2876,7 +2939,7 @@ put:
static int addrconf_set_sit_dstaddr(struct net *net, struct net_device *dev,
struct in6_ifreq *ireq)
{
- struct ip_tunnel_parm p = { };
+ struct ip_tunnel_parm_kern p = { };
int err;
if (!(ipv6_addr_type(&ireq->ifr6_addr) & IPV6_ADDR_COMPATv4))
@@ -2916,11 +2979,11 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
return -EFAULT;
- rtnl_lock();
+ rtnl_net_lock(net);
dev = __dev_get_by_index(net, ireq.ifr6_ifindex);
if (dev && dev->type == ARPHRD_SIT)
err = addrconf_set_sit_dstaddr(net, dev, &ireq);
- rtnl_unlock();
+ rtnl_net_unlock(net);
return err;
}
@@ -2944,39 +3007,25 @@ static int ipv6_mc_config(struct sock *sk, bool join,
/*
* Manual configuration of address on an interface
*/
-static int inet6_addr_add(struct net *net, int ifindex,
- struct ifa6_config *cfg,
+static int inet6_addr_add(struct net *net, struct net_device *dev,
+ struct ifa6_config *cfg, clock_t expires, u32 flags,
struct netlink_ext_ack *extack)
{
struct inet6_ifaddr *ifp;
struct inet6_dev *idev;
- struct net_device *dev;
- unsigned long timeout;
- clock_t expires;
- u32 flags;
- ASSERT_RTNL();
+ ASSERT_RTNL_NET(net);
if (cfg->plen > 128) {
NL_SET_ERR_MSG_MOD(extack, "Invalid prefix length");
return -EINVAL;
}
- /* check the lifetime */
- if (!cfg->valid_lft || cfg->preferred_lft > cfg->valid_lft) {
- NL_SET_ERR_MSG_MOD(extack, "address lifetime invalid");
- return -EINVAL;
- }
-
if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR && cfg->plen != 64) {
NL_SET_ERR_MSG_MOD(extack, "address with \"mngtmpaddr\" flag must have a prefix length of 64");
return -EINVAL;
}
- dev = __dev_get_by_index(net, ifindex);
- if (!dev)
- return -ENODEV;
-
idev = addrconf_add_dev(dev);
if (IS_ERR(idev)) {
NL_SET_ERR_MSG_MOD(extack, "IPv6 is disabled on this device");
@@ -2985,7 +3034,7 @@ static int inet6_addr_add(struct net *net, int ifindex,
if (cfg->ifa_flags & IFA_F_MCAUTOJOIN) {
int ret = ipv6_mc_config(net->ipv6.mc_autojoin_sk,
- true, cfg->pfx, ifindex);
+ true, cfg->pfx, dev->ifindex);
if (ret < 0) {
NL_SET_ERR_MSG_MOD(extack, "Multicast auto join failed");
@@ -2995,24 +3044,6 @@ static int inet6_addr_add(struct net *net, int ifindex,
cfg->scope = ipv6_addr_scope(cfg->pfx);
- timeout = addrconf_timeout_fixup(cfg->valid_lft, HZ);
- if (addrconf_finite_timeout(timeout)) {
- expires = jiffies_to_clock_t(timeout * HZ);
- cfg->valid_lft = timeout;
- flags = RTF_EXPIRES;
- } else {
- expires = 0;
- flags = 0;
- cfg->ifa_flags |= IFA_F_PERMANENT;
- }
-
- timeout = addrconf_timeout_fixup(cfg->preferred_lft, HZ);
- if (addrconf_finite_timeout(timeout)) {
- if (timeout == 0)
- cfg->ifa_flags |= IFA_F_DEPRECATED;
- cfg->preferred_lft = timeout;
- }
-
ifp = ipv6_add_addr(idev, cfg, true, extack);
if (!IS_ERR(ifp)) {
if (!(cfg->ifa_flags & IFA_F_NOPREFIXROUTE)) {
@@ -3040,7 +3071,7 @@ static int inet6_addr_add(struct net *net, int ifindex,
return 0;
} else if (cfg->ifa_flags & IFA_F_MCAUTOJOIN) {
ipv6_mc_config(net->ipv6.mc_autojoin_sk, false,
- cfg->pfx, ifindex);
+ cfg->pfx, dev->ifindex);
}
return PTR_ERR(ifp);
@@ -3065,7 +3096,7 @@ static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags,
return -ENODEV;
}
- idev = __in6_dev_get(dev);
+ idev = __in6_dev_get_rtnl_net(dev);
if (!idev) {
NL_SET_ERR_MSG_MOD(extack, "IPv6 is disabled on this device");
return -ENXIO;
@@ -3078,11 +3109,12 @@ static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags,
in6_ifa_hold(ifp);
read_unlock_bh(&idev->lock);
- if (!(ifp->flags & IFA_F_TEMPORARY) &&
- (ifa_flags & IFA_F_MANAGETEMPADDR))
- manage_tempaddrs(idev, ifp, 0, 0, false,
- jiffies);
ipv6_del_addr(ifp);
+
+ if (!(ifp->flags & IFA_F_TEMPORARY) &&
+ (ifp->flags & IFA_F_MANAGETEMPADDR))
+ delete_tempaddrs(idev, ifp);
+
addrconf_verify_rtnl(net);
if (ipv6_addr_is_multicast(pfx)) {
ipv6_mc_config(net->ipv6.mc_autojoin_sk,
@@ -3105,6 +3137,7 @@ int addrconf_add_ifaddr(struct net *net, void __user *arg)
.preferred_lft = INFINITY_LIFE_TIME,
.valid_lft = INFINITY_LIFE_TIME,
};
+ struct net_device *dev;
struct in6_ifreq ireq;
int err;
@@ -3117,9 +3150,13 @@ int addrconf_add_ifaddr(struct net *net, void __user *arg)
cfg.pfx = &ireq.ifr6_addr;
cfg.plen = ireq.ifr6_prefixlen;
- rtnl_lock();
- err = inet6_addr_add(net, ireq.ifr6_ifindex, &cfg, NULL);
- rtnl_unlock();
+ rtnl_net_lock(net);
+ dev = __dev_get_by_index(net, ireq.ifr6_ifindex);
+ if (dev)
+ err = inet6_addr_add(net, dev, &cfg, 0, 0, NULL);
+ else
+ err = -ENODEV;
+ rtnl_net_unlock(net);
return err;
}
@@ -3134,10 +3171,10 @@ int addrconf_del_ifaddr(struct net *net, void __user *arg)
if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
return -EFAULT;
- rtnl_lock();
+ rtnl_net_lock(net);
err = inet6_addr_del(net, ireq.ifr6_ifindex, 0, &ireq.ifr6_addr,
ireq.ifr6_prefixlen, NULL);
- rtnl_unlock();
+ rtnl_net_unlock(net);
return err;
}
@@ -3262,8 +3299,8 @@ void addrconf_add_linklocal(struct inet6_dev *idev,
struct inet6_ifaddr *ifp;
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
- if ((dev_net(idev->dev)->ipv6.devconf_all->optimistic_dad ||
- idev->cnf.optimistic_dad) &&
+ if ((READ_ONCE(dev_net(idev->dev)->ipv6.devconf_all->optimistic_dad) ||
+ READ_ONCE(idev->cnf.optimistic_dad)) &&
!dev_net(idev->dev)->ipv6.devconf_all->forwarding)
cfg.ifa_flags |= IFA_F_OPTIMISTIC;
#endif
@@ -3442,7 +3479,8 @@ static void addrconf_dev_config(struct net_device *dev)
/* this device type has no EUI support */
if (dev->type == ARPHRD_NONE &&
idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)
- idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_RANDOM;
+ WRITE_ONCE(idev->cnf.addr_gen_mode,
+ IN6_ADDR_GEN_MODE_RANDOM);
addrconf_addr_gen(idev, false);
}
@@ -3620,7 +3658,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
if (idev) {
rt6_mtu_change(dev, dev->mtu);
- idev->cnf.mtu6 = dev->mtu;
+ WRITE_ONCE(idev->cnf.mtu6, dev->mtu);
break;
}
@@ -3712,9 +3750,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
if (idev->cnf.mtu6 != dev->mtu &&
dev->mtu >= IPV6_MIN_MTU) {
rt6_mtu_change(dev, dev->mtu);
- idev->cnf.mtu6 = dev->mtu;
+ WRITE_ONCE(idev->cnf.mtu6, dev->mtu);
}
- idev->tstamp = jiffies;
+ WRITE_ONCE(idev->tstamp, jiffies);
inet6_ifinfo_notify(RTM_NEWLINK, idev);
/*
@@ -3834,10 +3872,10 @@ static int addrconf_ifdown(struct net_device *dev, bool unregister)
*/
if (!unregister && !idev->cnf.disable_ipv6) {
/* aggregate the system setting and interface setting */
- int _keep_addr = net->ipv6.devconf_all->keep_addr_on_down;
+ int _keep_addr = READ_ONCE(net->ipv6.devconf_all->keep_addr_on_down);
if (!_keep_addr)
- _keep_addr = idev->cnf.keep_addr_on_down;
+ _keep_addr = READ_ONCE(idev->cnf.keep_addr_on_down);
keep_addr = (_keep_addr > 0);
}
@@ -3956,7 +3994,7 @@ restart:
ipv6_mc_down(idev);
}
- idev->tstamp = jiffies;
+ WRITE_ONCE(idev->tstamp, jiffies);
idev->ra_mtu = 0;
/* Last: Shot the device (if unregistered) */
@@ -3974,6 +4012,7 @@ static void addrconf_rs_timer(struct timer_list *t)
struct inet6_dev *idev = from_timer(idev, t, rs_timer);
struct net_device *dev = idev->dev;
struct in6_addr lladdr;
+ int rtr_solicits;
write_lock(&idev->lock);
if (idev->dead || !(idev->if_flags & IF_READY))
@@ -3986,7 +4025,9 @@ static void addrconf_rs_timer(struct timer_list *t)
if (idev->if_flags & IF_RA_RCVD)
goto out;
- if (idev->rs_probes++ < idev->cnf.rtr_solicits || idev->cnf.rtr_solicits < 0) {
+ rtr_solicits = READ_ONCE(idev->cnf.rtr_solicits);
+
+ if (idev->rs_probes++ < rtr_solicits || rtr_solicits < 0) {
write_unlock(&idev->lock);
if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
ndisc_send_rs(dev, &lladdr,
@@ -3996,11 +4037,12 @@ static void addrconf_rs_timer(struct timer_list *t)
write_lock(&idev->lock);
idev->rs_interval = rfc3315_s14_backoff_update(
- idev->rs_interval, idev->cnf.rtr_solicit_max_interval);
+ idev->rs_interval,
+ READ_ONCE(idev->cnf.rtr_solicit_max_interval));
/* The wait after the last probe can be shorter */
addrconf_mod_rs_timer(idev, (idev->rs_probes ==
- idev->cnf.rtr_solicits) ?
- idev->cnf.rtr_solicit_delay :
+ READ_ONCE(idev->cnf.rtr_solicits)) ?
+ READ_ONCE(idev->cnf.rtr_solicit_delay) :
idev->rs_interval);
} else {
/*
@@ -4021,24 +4063,25 @@ put:
*/
static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
{
- unsigned long rand_num;
struct inet6_dev *idev = ifp->idev;
+ unsigned long rand_num;
u64 nonce;
if (ifp->flags & IFA_F_OPTIMISTIC)
rand_num = 0;
else
- rand_num = get_random_u32_below(idev->cnf.rtr_solicit_delay ? : 1);
+ rand_num = get_random_u32_below(
+ READ_ONCE(idev->cnf.rtr_solicit_delay) ? : 1);
nonce = 0;
- if (idev->cnf.enhanced_dad ||
- dev_net(idev->dev)->ipv6.devconf_all->enhanced_dad) {
+ if (READ_ONCE(idev->cnf.enhanced_dad) ||
+ READ_ONCE(dev_net(idev->dev)->ipv6.devconf_all->enhanced_dad)) {
do
get_random_bytes(&nonce, 6);
while (nonce == 0);
}
ifp->dad_nonce = nonce;
- ifp->dad_probes = idev->cnf.dad_transmits;
+ ifp->dad_probes = READ_ONCE(idev->cnf.dad_transmits);
addrconf_mod_dad_work(ifp, rand_num);
}
@@ -4058,8 +4101,8 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
net = dev_net(dev);
if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
- (net->ipv6.devconf_all->accept_dad < 1 &&
- idev->cnf.accept_dad < 1) ||
+ (READ_ONCE(net->ipv6.devconf_all->accept_dad) < 1 &&
+ READ_ONCE(idev->cnf.accept_dad) < 1) ||
!(ifp->flags&IFA_F_TENTATIVE) ||
ifp->flags & IFA_F_NODAD) {
bool send_na = false;
@@ -4134,6 +4177,7 @@ static void addrconf_dad_work(struct work_struct *w)
struct inet6_dev *idev = ifp->idev;
bool bump_id, disable_ipv6 = false;
struct in6_addr mcaddr;
+ struct net *net;
enum {
DAD_PROCESS,
@@ -4141,7 +4185,9 @@ static void addrconf_dad_work(struct work_struct *w)
DAD_ABORT,
} action = DAD_PROCESS;
- rtnl_lock();
+ net = dev_net(idev->dev);
+
+ rtnl_net_lock(net);
spin_lock_bh(&ifp->lock);
if (ifp->state == INET6_IFADDR_STATE_PREDAD) {
@@ -4151,8 +4197,8 @@ static void addrconf_dad_work(struct work_struct *w)
action = DAD_ABORT;
ifp->state = INET6_IFADDR_STATE_POSTDAD;
- if ((dev_net(idev->dev)->ipv6.devconf_all->accept_dad > 1 ||
- idev->cnf.accept_dad > 1) &&
+ if ((READ_ONCE(net->ipv6.devconf_all->accept_dad) > 1 ||
+ READ_ONCE(idev->cnf.accept_dad) > 1) &&
!idev->cnf.disable_ipv6 &&
!(ifp->flags & IFA_F_STABLE_PRIVACY)) {
struct in6_addr addr;
@@ -4163,7 +4209,7 @@ static void addrconf_dad_work(struct work_struct *w)
if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
ipv6_addr_equal(&ifp->addr, &addr)) {
/* DAD failed for link-local based on MAC */
- idev->cnf.disable_ipv6 = 1;
+ WRITE_ONCE(idev->cnf.disable_ipv6, 1);
pr_info("%s: IPv6 being disabled!\n",
ifp->idev->dev->name);
@@ -4233,7 +4279,7 @@ static void addrconf_dad_work(struct work_struct *w)
ifp->dad_nonce);
out:
in6_ifa_put(ifp);
- rtnl_unlock();
+ rtnl_net_unlock(net);
}
/* ifp->idev must be at least read locked */
@@ -4277,7 +4323,7 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
send_rs = send_mld &&
ipv6_accept_ra(ifp->idev) &&
- ifp->idev->cnf.rtr_solicits != 0 &&
+ READ_ONCE(ifp->idev->cnf.rtr_solicits) != 0 &&
(dev->flags & IFF_LOOPBACK) == 0 &&
(dev->type != ARPHRD_TUNNEL) &&
!netif_is_team_port(dev);
@@ -4291,8 +4337,8 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
/* send unsolicited NA if enabled */
if (send_na &&
- (ifp->idev->cnf.ndisc_notify ||
- dev_net(dev)->ipv6.devconf_all->ndisc_notify)) {
+ (READ_ONCE(ifp->idev->cnf.ndisc_notify) ||
+ READ_ONCE(dev_net(dev)->ipv6.devconf_all->ndisc_notify))) {
ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifp->addr,
/*router=*/ !!ifp->idev->cnf.forwarding,
/*solicited=*/ false, /*override=*/ true,
@@ -4312,7 +4358,7 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
write_lock_bh(&ifp->idev->lock);
spin_lock(&ifp->lock);
ifp->idev->rs_interval = rfc3315_s14_backoff_init(
- ifp->idev->cnf.rtr_solicit_interval);
+ READ_ONCE(ifp->idev->cnf.rtr_solicit_interval));
ifp->idev->rs_probes = 1;
ifp->idev->if_flags |= IF_RS_SENT;
addrconf_mod_rs_timer(ifp->idev, ifp->idev->rs_interval);
@@ -4592,9 +4638,7 @@ restart:
!ifp->regen_count && ifp->ifpub) {
/* This is a non-regenerated temporary addr. */
- unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
- ifp->idev->cnf.dad_transmits *
- max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
+ unsigned long regen_advance = ipv6_get_regen_advance(ifp->idev);
if (age + regen_advance >= ifp->prefered_lft) {
struct inet6_ifaddr *ifpub = ifp->ifpub;
@@ -4683,9 +4727,9 @@ static void addrconf_verify_work(struct work_struct *w)
struct net *net = container_of(to_delayed_work(w), struct net,
ipv6.addr_chk_work);
- rtnl_lock();
+ rtnl_net_lock(net);
addrconf_verify_rtnl(net);
- rtnl_unlock();
+ rtnl_net_unlock(net);
}
static void addrconf_verify(struct net *net)
@@ -4743,19 +4787,24 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
if (!pfx)
return -EINVAL;
- ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
+ ifa_flags = nla_get_u32_default(tb[IFA_FLAGS], ifm->ifa_flags);
/* We ignore other flags so far. */
ifa_flags &= IFA_F_MANAGETEMPADDR;
- return inet6_addr_del(net, ifm->ifa_index, ifa_flags, pfx,
- ifm->ifa_prefixlen, extack);
+ rtnl_net_lock(net);
+ err = inet6_addr_del(net, ifm->ifa_index, ifa_flags, pfx,
+ ifm->ifa_prefixlen, extack);
+ rtnl_net_unlock(net);
+
+ return err;
}
-static int modify_prefix_route(struct inet6_ifaddr *ifp,
+static int modify_prefix_route(struct net *net, struct inet6_ifaddr *ifp,
unsigned long expires, u32 flags,
bool modify_peer)
{
+ struct fib6_table *table;
struct fib6_info *f6i;
u32 prio;
@@ -4775,32 +4824,36 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp,
ifp->prefix_len,
ifp->rt_priority, ifp->idev->dev,
expires, flags, GFP_KERNEL);
- } else {
- if (!expires)
+ return 0;
+ }
+ if (f6i != net->ipv6.fib6_null_entry) {
+ table = f6i->fib6_table;
+ spin_lock_bh(&table->tb6_lock);
+
+ if (!(flags & RTF_EXPIRES)) {
fib6_clean_expires(f6i);
- else
+ fib6_remove_gc_list(f6i);
+ } else {
fib6_set_expires(f6i, expires);
+ fib6_add_gc_list(f6i);
+ }
- fib6_info_release(f6i);
+ spin_unlock_bh(&table->tb6_lock);
}
+ fib6_info_release(f6i);
return 0;
}
static int inet6_addr_modify(struct net *net, struct inet6_ifaddr *ifp,
- struct ifa6_config *cfg)
+ struct ifa6_config *cfg, clock_t expires,
+ u32 flags)
{
- u32 flags;
- clock_t expires;
- unsigned long timeout;
bool was_managetempaddr;
- bool had_prefixroute;
bool new_peer = false;
+ bool had_prefixroute;
- ASSERT_RTNL();
-
- if (!cfg->valid_lft || cfg->preferred_lft > cfg->valid_lft)
- return -EINVAL;
+ ASSERT_RTNL_NET(net);
if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR &&
(ifp->flags & IFA_F_TEMPORARY || ifp->prefix_len != 64))
@@ -4809,24 +4862,6 @@ static int inet6_addr_modify(struct net *net, struct inet6_ifaddr *ifp,
if (!(ifp->flags & IFA_F_TENTATIVE) || ifp->flags & IFA_F_DADFAILED)
cfg->ifa_flags &= ~IFA_F_OPTIMISTIC;
- timeout = addrconf_timeout_fixup(cfg->valid_lft, HZ);
- if (addrconf_finite_timeout(timeout)) {
- expires = jiffies_to_clock_t(timeout * HZ);
- cfg->valid_lft = timeout;
- flags = RTF_EXPIRES;
- } else {
- expires = 0;
- flags = 0;
- cfg->ifa_flags |= IFA_F_PERMANENT;
- }
-
- timeout = addrconf_timeout_fixup(cfg->preferred_lft, HZ);
- if (addrconf_finite_timeout(timeout)) {
- if (timeout == 0)
- cfg->ifa_flags |= IFA_F_DEPRECATED;
- cfg->preferred_lft = timeout;
- }
-
if (cfg->peer_pfx &&
memcmp(&ifp->peer_addr, cfg->peer_pfx, sizeof(struct in6_addr))) {
if (!ipv6_addr_any(&ifp->peer_addr))
@@ -4842,13 +4877,13 @@ static int inet6_addr_modify(struct net *net, struct inet6_ifaddr *ifp,
IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR |
IFA_F_NOPREFIXROUTE);
ifp->flags |= cfg->ifa_flags;
- ifp->tstamp = jiffies;
- ifp->valid_lft = cfg->valid_lft;
- ifp->prefered_lft = cfg->preferred_lft;
- ifp->ifa_proto = cfg->ifa_proto;
+ WRITE_ONCE(ifp->tstamp, jiffies);
+ WRITE_ONCE(ifp->valid_lft, cfg->valid_lft);
+ WRITE_ONCE(ifp->prefered_lft, cfg->preferred_lft);
+ WRITE_ONCE(ifp->ifa_proto, cfg->ifa_proto);
if (cfg->rt_priority && cfg->rt_priority != ifp->rt_priority)
- ifp->rt_priority = cfg->rt_priority;
+ WRITE_ONCE(ifp->rt_priority, cfg->rt_priority);
if (new_peer)
ifp->peer_addr = *cfg->peer_pfx;
@@ -4861,7 +4896,7 @@ static int inet6_addr_modify(struct net *net, struct inet6_ifaddr *ifp,
int rc = -ENOENT;
if (had_prefixroute)
- rc = modify_prefix_route(ifp, expires, flags, false);
+ rc = modify_prefix_route(net, ifp, expires, flags, false);
/* prefix route could have been deleted; if so restore it */
if (rc == -ENOENT) {
@@ -4871,7 +4906,7 @@ static int inet6_addr_modify(struct net *net, struct inet6_ifaddr *ifp,
}
if (had_prefixroute && !ipv6_addr_any(&ifp->peer_addr))
- rc = modify_prefix_route(ifp, expires, flags, true);
+ rc = modify_prefix_route(net, ifp, expires, flags, true);
if (rc == -ENOENT && !ipv6_addr_any(&ifp->peer_addr)) {
addrconf_prefix_route(&ifp->peer_addr, ifp->prefix_len,
@@ -4893,14 +4928,12 @@ static int inet6_addr_modify(struct net *net, struct inet6_ifaddr *ifp,
}
if (was_managetempaddr || ifp->flags & IFA_F_MANAGETEMPADDR) {
- if (was_managetempaddr &&
- !(ifp->flags & IFA_F_MANAGETEMPADDR)) {
- cfg->valid_lft = 0;
- cfg->preferred_lft = 0;
- }
- manage_tempaddrs(ifp->idev, ifp, cfg->valid_lft,
- cfg->preferred_lft, !was_managetempaddr,
- jiffies);
+ if (was_managetempaddr && !(ifp->flags & IFA_F_MANAGETEMPADDR))
+ delete_tempaddrs(ifp->idev, ifp);
+ else
+ manage_tempaddrs(ifp->idev, ifp, cfg->valid_lft,
+ cfg->preferred_lft, !was_managetempaddr,
+ jiffies);
}
addrconf_verify_rtnl(net);
@@ -4913,13 +4946,16 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
- struct ifaddrmsg *ifm;
struct nlattr *tb[IFA_MAX+1];
struct in6_addr *peer_pfx;
struct inet6_ifaddr *ifa;
struct net_device *dev;
struct inet6_dev *idev;
struct ifa6_config cfg;
+ struct ifaddrmsg *ifm;
+ unsigned long timeout;
+ clock_t expires;
+ u32 flags;
int err;
err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
@@ -4942,8 +4978,18 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
if (tb[IFA_PROTO])
cfg.ifa_proto = nla_get_u8(tb[IFA_PROTO]);
+ cfg.ifa_flags = nla_get_u32_default(tb[IFA_FLAGS], ifm->ifa_flags);
+
+ /* We ignore other flags so far. */
+ cfg.ifa_flags &= IFA_F_NODAD | IFA_F_HOMEADDRESS |
+ IFA_F_MANAGETEMPADDR | IFA_F_NOPREFIXROUTE |
+ IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC;
+
+ cfg.ifa_flags |= IFA_F_PERMANENT;
cfg.valid_lft = INFINITY_LIFE_TIME;
cfg.preferred_lft = INFINITY_LIFE_TIME;
+ expires = 0;
+ flags = 0;
if (tb[IFA_CACHEINFO]) {
struct ifa_cacheinfo *ci;
@@ -4951,27 +4997,43 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
ci = nla_data(tb[IFA_CACHEINFO]);
cfg.valid_lft = ci->ifa_valid;
cfg.preferred_lft = ci->ifa_prefered;
+
+ if (!cfg.valid_lft || cfg.preferred_lft > cfg.valid_lft) {
+ NL_SET_ERR_MSG_MOD(extack, "address lifetime invalid");
+ return -EINVAL;
+ }
+
+ timeout = addrconf_timeout_fixup(cfg.valid_lft, HZ);
+ if (addrconf_finite_timeout(timeout)) {
+ cfg.ifa_flags &= ~IFA_F_PERMANENT;
+ cfg.valid_lft = timeout;
+ expires = jiffies_to_clock_t(timeout * HZ);
+ flags = RTF_EXPIRES;
+ }
+
+ timeout = addrconf_timeout_fixup(cfg.preferred_lft, HZ);
+ if (addrconf_finite_timeout(timeout)) {
+ if (timeout == 0)
+ cfg.ifa_flags |= IFA_F_DEPRECATED;
+
+ cfg.preferred_lft = timeout;
+ }
}
+ rtnl_net_lock(net);
+
dev = __dev_get_by_index(net, ifm->ifa_index);
if (!dev) {
NL_SET_ERR_MSG_MOD(extack, "Unable to find the interface");
- return -ENODEV;
+ err = -ENODEV;
+ goto unlock;
}
- if (tb[IFA_FLAGS])
- cfg.ifa_flags = nla_get_u32(tb[IFA_FLAGS]);
- else
- cfg.ifa_flags = ifm->ifa_flags;
-
- /* We ignore other flags so far. */
- cfg.ifa_flags &= IFA_F_NODAD | IFA_F_HOMEADDRESS |
- IFA_F_MANAGETEMPADDR | IFA_F_NOPREFIXROUTE |
- IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC;
-
idev = ipv6_find_idev(dev);
- if (IS_ERR(idev))
- return PTR_ERR(idev);
+ if (IS_ERR(idev)) {
+ err = PTR_ERR(idev);
+ goto unlock;
+ }
if (!ipv6_allow_optimistic_dad(net, idev))
cfg.ifa_flags &= ~IFA_F_OPTIMISTIC;
@@ -4979,7 +5041,8 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
if (cfg.ifa_flags & IFA_F_NODAD &&
cfg.ifa_flags & IFA_F_OPTIMISTIC) {
NL_SET_ERR_MSG(extack, "IFA_F_NODAD and IFA_F_OPTIMISTIC are mutually exclusive");
- return -EINVAL;
+ err = -EINVAL;
+ goto unlock;
}
ifa = ipv6_get_ifaddr(net, cfg.pfx, dev, 1);
@@ -4988,7 +5051,8 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
* It would be best to check for !NLM_F_CREATE here but
* userspace already relies on not having to provide this.
*/
- return inet6_addr_add(net, ifm->ifa_index, &cfg, extack);
+ err = inet6_addr_add(net, dev, &cfg, expires, flags, extack);
+ goto unlock;
}
if (nlh->nlmsg_flags & NLM_F_EXCL ||
@@ -4996,10 +5060,12 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
NL_SET_ERR_MSG_MOD(extack, "address already assigned");
err = -EEXIST;
} else {
- err = inet6_addr_modify(net, ifa, &cfg);
+ err = inet6_addr_modify(net, ifa, &cfg, expires, flags);
}
in6_ifa_put(ifa);
+unlock:
+ rtnl_net_unlock(net);
return err;
}
@@ -5053,33 +5119,21 @@ static inline int inet6_ifaddr_msgsize(void)
+ nla_total_size(4) /* IFA_RT_PRIORITY */;
}
-enum addr_type_t {
- UNICAST_ADDR,
- MULTICAST_ADDR,
- ANYCAST_ADDR,
-};
-
-struct inet6_fill_args {
- u32 portid;
- u32 seq;
- int event;
- unsigned int flags;
- int netnsid;
- int ifindex;
- enum addr_type_t type;
-};
-
-static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
+static int inet6_fill_ifaddr(struct sk_buff *skb,
+ const struct inet6_ifaddr *ifa,
struct inet6_fill_args *args)
{
- struct nlmsghdr *nlh;
+ struct nlmsghdr *nlh;
u32 preferred, valid;
+ u32 flags, priority;
+ u8 proto;
nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
sizeof(struct ifaddrmsg), args->flags);
if (!nlh)
return -EMSGSIZE;
+ flags = READ_ONCE(ifa->flags);
put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
ifa->idev->dev->ifindex);
@@ -5087,13 +5141,14 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
goto error;
- spin_lock_bh(&ifa->lock);
- if (!((ifa->flags&IFA_F_PERMANENT) &&
- (ifa->prefered_lft == INFINITY_LIFE_TIME))) {
- preferred = ifa->prefered_lft;
- valid = ifa->valid_lft;
+ preferred = READ_ONCE(ifa->prefered_lft);
+ valid = READ_ONCE(ifa->valid_lft);
+
+ if (!((flags & IFA_F_PERMANENT) &&
+ (preferred == INFINITY_LIFE_TIME))) {
if (preferred != INFINITY_LIFE_TIME) {
- long tval = (jiffies - ifa->tstamp)/HZ;
+ long tval = (jiffies - READ_ONCE(ifa->tstamp)) / HZ;
+
if (preferred > tval)
preferred -= tval;
else
@@ -5109,28 +5164,29 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
preferred = INFINITY_LIFE_TIME;
valid = INFINITY_LIFE_TIME;
}
- spin_unlock_bh(&ifa->lock);
if (!ipv6_addr_any(&ifa->peer_addr)) {
if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 ||
nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->peer_addr) < 0)
goto error;
- } else
+ } else {
if (nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->addr) < 0)
goto error;
+ }
- if (ifa->rt_priority &&
- nla_put_u32(skb, IFA_RT_PRIORITY, ifa->rt_priority))
+ priority = READ_ONCE(ifa->rt_priority);
+ if (priority && nla_put_u32(skb, IFA_RT_PRIORITY, priority))
goto error;
- if (put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0)
+ if (put_cacheinfo(skb, ifa->cstamp, READ_ONCE(ifa->tstamp),
+ preferred, valid) < 0)
goto error;
- if (nla_put_u32(skb, IFA_FLAGS, ifa->flags) < 0)
+ if (nla_put_u32(skb, IFA_FLAGS, flags) < 0)
goto error;
- if (ifa->ifa_proto &&
- nla_put_u8(skb, IFA_PROTO, ifa->ifa_proto))
+ proto = READ_ONCE(ifa->ifa_proto);
+ if (proto && nla_put_u8(skb, IFA_PROTO, proto))
goto error;
nlmsg_end(skb, nlh);
@@ -5141,14 +5197,16 @@ error:
return -EMSGSIZE;
}
-static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
- struct inet6_fill_args *args)
+int inet6_fill_ifmcaddr(struct sk_buff *skb,
+ const struct ifmcaddr6 *ifmca,
+ struct inet6_fill_args *args)
{
- struct nlmsghdr *nlh;
- u8 scope = RT_SCOPE_UNIVERSE;
int ifindex = ifmca->idev->dev->ifindex;
+ u8 scope = RT_SCOPE_UNIVERSE;
+ struct nlmsghdr *nlh;
- if (ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE)
+ if (!args->force_rt_scope_universe &&
+ ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE)
scope = RT_SCOPE_SITE;
nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
@@ -5164,7 +5222,7 @@ static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 ||
- put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp,
+ put_cacheinfo(skb, ifmca->mca_cstamp, READ_ONCE(ifmca->mca_tstamp),
INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
@@ -5174,13 +5232,14 @@ static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
return 0;
}
-static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
- struct inet6_fill_args *args)
+int inet6_fill_ifacaddr(struct sk_buff *skb,
+ const struct ifacaddr6 *ifaca,
+ struct inet6_fill_args *args)
{
struct net_device *dev = fib6_info_nh_dev(ifaca->aca_rt);
int ifindex = dev ? dev->ifindex : 1;
- struct nlmsghdr *nlh;
u8 scope = RT_SCOPE_UNIVERSE;
+ struct nlmsghdr *nlh;
if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE)
scope = RT_SCOPE_SITE;
@@ -5198,7 +5257,7 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 ||
- put_cacheinfo(skb, ifaca->aca_cstamp, ifaca->aca_tstamp,
+ put_cacheinfo(skb, ifaca->aca_cstamp, READ_ONCE(ifaca->aca_tstamp),
INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
@@ -5209,24 +5268,23 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
}
/* called with rcu_read_lock() */
-static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
- struct netlink_callback *cb, int s_ip_idx,
+static int in6_dump_addrs(const struct inet6_dev *idev, struct sk_buff *skb,
+ struct netlink_callback *cb, int *s_ip_idx,
struct inet6_fill_args *fillargs)
{
- struct ifmcaddr6 *ifmca;
- struct ifacaddr6 *ifaca;
+ const struct ifmcaddr6 *ifmca;
+ const struct ifacaddr6 *ifaca;
int ip_idx = 0;
- int err = 1;
+ int err = 0;
- read_lock_bh(&idev->lock);
switch (fillargs->type) {
case UNICAST_ADDR: {
- struct inet6_ifaddr *ifa;
+ const struct inet6_ifaddr *ifa;
fillargs->event = RTM_NEWADDR;
/* unicast address incl. temp addr */
- list_for_each_entry(ifa, &idev->addr_list, if_list) {
- if (ip_idx < s_ip_idx)
+ list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
+ if (ip_idx < *s_ip_idx)
goto next;
err = inet6_fill_ifaddr(skb, ifa, fillargs);
if (err < 0)
@@ -5238,27 +5296,25 @@ next:
break;
}
case MULTICAST_ADDR:
- read_unlock_bh(&idev->lock);
fillargs->event = RTM_GETMULTICAST;
/* multicast address */
- for (ifmca = rtnl_dereference(idev->mc_list);
+ for (ifmca = rcu_dereference(idev->mc_list);
ifmca;
- ifmca = rtnl_dereference(ifmca->next), ip_idx++) {
- if (ip_idx < s_ip_idx)
+ ifmca = rcu_dereference(ifmca->next), ip_idx++) {
+ if (ip_idx < *s_ip_idx)
continue;
err = inet6_fill_ifmcaddr(skb, ifmca, fillargs);
if (err < 0)
break;
}
- read_lock_bh(&idev->lock);
break;
case ANYCAST_ADDR:
fillargs->event = RTM_GETANYCAST;
/* anycast address */
- for (ifaca = idev->ac_list; ifaca;
- ifaca = ifaca->aca_next, ip_idx++) {
- if (ip_idx < s_ip_idx)
+ for (ifaca = rcu_dereference(idev->ac_list); ifaca;
+ ifaca = rcu_dereference(ifaca->aca_next), ip_idx++) {
+ if (ip_idx < *s_ip_idx)
continue;
err = inet6_fill_ifacaddr(skb, ifaca, fillargs);
if (err < 0)
@@ -5268,8 +5324,7 @@ next:
default:
break;
}
- read_unlock_bh(&idev->lock);
- cb->args[2] = ip_idx;
+ *s_ip_idx = err ? ip_idx : 0;
return err;
}
@@ -5332,6 +5387,7 @@ static int inet6_valid_dump_ifaddr_req(const struct nlmsghdr *nlh,
static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
enum addr_type_t type)
{
+ struct net *tgt_net = sock_net(skb->sk);
const struct nlmsghdr *nlh = cb->nlh;
struct inet6_fill_args fillargs = {
.portid = NETLINK_CB(cb->skb).portid,
@@ -5339,73 +5395,55 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
.flags = NLM_F_MULTI,
.netnsid = -1,
.type = type,
+ .force_rt_scope_universe = false,
};
- struct net *tgt_net = sock_net(skb->sk);
- int idx, s_idx, s_ip_idx;
- int h, s_h;
+ struct {
+ unsigned long ifindex;
+ int ip_idx;
+ } *ctx = (void *)cb->ctx;
struct net_device *dev;
struct inet6_dev *idev;
- struct hlist_head *head;
int err = 0;
- s_h = cb->args[0];
- s_idx = idx = cb->args[1];
- s_ip_idx = cb->args[2];
-
+ rcu_read_lock();
if (cb->strict_check) {
err = inet6_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net,
skb->sk, cb);
if (err < 0)
- goto put_tgt_net;
+ goto done;
err = 0;
if (fillargs.ifindex) {
- dev = __dev_get_by_index(tgt_net, fillargs.ifindex);
+ dev = dev_get_by_index_rcu(tgt_net, fillargs.ifindex);
if (!dev) {
err = -ENODEV;
- goto put_tgt_net;
+ goto done;
}
idev = __in6_dev_get(dev);
- if (idev) {
- err = in6_dump_addrs(idev, skb, cb, s_ip_idx,
+ if (idev)
+ err = in6_dump_addrs(idev, skb, cb,
+ &ctx->ip_idx,
&fillargs);
- if (err > 0)
- err = 0;
- }
- goto put_tgt_net;
+ goto done;
}
}
- rcu_read_lock();
cb->seq = inet6_base_seq(tgt_net);
- for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
- idx = 0;
- head = &tgt_net->dev_index_head[h];
- hlist_for_each_entry_rcu(dev, head, index_hlist) {
- if (idx < s_idx)
- goto cont;
- if (h > s_h || idx > s_idx)
- s_ip_idx = 0;
- idev = __in6_dev_get(dev);
- if (!idev)
- goto cont;
-
- if (in6_dump_addrs(idev, skb, cb, s_ip_idx,
- &fillargs) < 0)
- goto done;
-cont:
- idx++;
- }
+ for_each_netdev_dump(tgt_net, dev, ctx->ifindex) {
+ idev = __in6_dev_get(dev);
+ if (!idev)
+ continue;
+ err = in6_dump_addrs(idev, skb, cb, &ctx->ip_idx,
+ &fillargs);
+ if (err < 0)
+ goto done;
}
done:
rcu_read_unlock();
- cb->args[0] = h;
- cb->args[1] = idx;
-put_tgt_net:
if (fillargs.netnsid >= 0)
put_net(tgt_net);
- return skb->len ? : err;
+ return err;
}
static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
@@ -5486,6 +5524,7 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
.event = RTM_NEWADDR,
.flags = 0,
.netnsid = -1,
+ .force_rt_scope_universe = false,
};
struct ifaddrmsg *ifm;
struct nlattr *tb[IFA_MAX+1];
@@ -5557,6 +5596,7 @@ static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
.event = event,
.flags = 0,
.netnsid = -1,
+ .force_rt_scope_universe = false,
};
int err = -ENOBUFS;
@@ -5574,91 +5614,100 @@ static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
+ rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
}
-static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
- __s32 *array, int bytes)
+static void ipv6_store_devconf(const struct ipv6_devconf *cnf,
+ __s32 *array, int bytes)
{
BUG_ON(bytes < (DEVCONF_MAX * 4));
memset(array, 0, bytes);
- array[DEVCONF_FORWARDING] = cnf->forwarding;
- array[DEVCONF_HOPLIMIT] = cnf->hop_limit;
- array[DEVCONF_MTU6] = cnf->mtu6;
- array[DEVCONF_ACCEPT_RA] = cnf->accept_ra;
- array[DEVCONF_ACCEPT_REDIRECTS] = cnf->accept_redirects;
- array[DEVCONF_AUTOCONF] = cnf->autoconf;
- array[DEVCONF_DAD_TRANSMITS] = cnf->dad_transmits;
- array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits;
+ array[DEVCONF_FORWARDING] = READ_ONCE(cnf->forwarding);
+ array[DEVCONF_HOPLIMIT] = READ_ONCE(cnf->hop_limit);
+ array[DEVCONF_MTU6] = READ_ONCE(cnf->mtu6);
+ array[DEVCONF_ACCEPT_RA] = READ_ONCE(cnf->accept_ra);
+ array[DEVCONF_ACCEPT_REDIRECTS] = READ_ONCE(cnf->accept_redirects);
+ array[DEVCONF_AUTOCONF] = READ_ONCE(cnf->autoconf);
+ array[DEVCONF_DAD_TRANSMITS] = READ_ONCE(cnf->dad_transmits);
+ array[DEVCONF_RTR_SOLICITS] = READ_ONCE(cnf->rtr_solicits);
array[DEVCONF_RTR_SOLICIT_INTERVAL] =
- jiffies_to_msecs(cnf->rtr_solicit_interval);
+ jiffies_to_msecs(READ_ONCE(cnf->rtr_solicit_interval));
array[DEVCONF_RTR_SOLICIT_MAX_INTERVAL] =
- jiffies_to_msecs(cnf->rtr_solicit_max_interval);
+ jiffies_to_msecs(READ_ONCE(cnf->rtr_solicit_max_interval));
array[DEVCONF_RTR_SOLICIT_DELAY] =
- jiffies_to_msecs(cnf->rtr_solicit_delay);
- array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version;
+ jiffies_to_msecs(READ_ONCE(cnf->rtr_solicit_delay));
+ array[DEVCONF_FORCE_MLD_VERSION] = READ_ONCE(cnf->force_mld_version);
array[DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL] =
- jiffies_to_msecs(cnf->mldv1_unsolicited_report_interval);
+ jiffies_to_msecs(READ_ONCE(cnf->mldv1_unsolicited_report_interval));
array[DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL] =
- jiffies_to_msecs(cnf->mldv2_unsolicited_report_interval);
- array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr;
- array[DEVCONF_TEMP_VALID_LFT] = cnf->temp_valid_lft;
- array[DEVCONF_TEMP_PREFERED_LFT] = cnf->temp_prefered_lft;
- array[DEVCONF_REGEN_MAX_RETRY] = cnf->regen_max_retry;
- array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor;
- array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
- array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
- array[DEVCONF_RA_DEFRTR_METRIC] = cnf->ra_defrtr_metric;
- array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] = cnf->accept_ra_min_hop_limit;
- array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
+ jiffies_to_msecs(READ_ONCE(cnf->mldv2_unsolicited_report_interval));
+ array[DEVCONF_USE_TEMPADDR] = READ_ONCE(cnf->use_tempaddr);
+ array[DEVCONF_TEMP_VALID_LFT] = READ_ONCE(cnf->temp_valid_lft);
+ array[DEVCONF_TEMP_PREFERED_LFT] = READ_ONCE(cnf->temp_prefered_lft);
+ array[DEVCONF_REGEN_MAX_RETRY] = READ_ONCE(cnf->regen_max_retry);
+ array[DEVCONF_MAX_DESYNC_FACTOR] = READ_ONCE(cnf->max_desync_factor);
+ array[DEVCONF_MAX_ADDRESSES] = READ_ONCE(cnf->max_addresses);
+ array[DEVCONF_ACCEPT_RA_DEFRTR] = READ_ONCE(cnf->accept_ra_defrtr);
+ array[DEVCONF_RA_DEFRTR_METRIC] = READ_ONCE(cnf->ra_defrtr_metric);
+ array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] =
+ READ_ONCE(cnf->accept_ra_min_hop_limit);
+ array[DEVCONF_ACCEPT_RA_PINFO] = READ_ONCE(cnf->accept_ra_pinfo);
#ifdef CONFIG_IPV6_ROUTER_PREF
- array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
+ array[DEVCONF_ACCEPT_RA_RTR_PREF] = READ_ONCE(cnf->accept_ra_rtr_pref);
array[DEVCONF_RTR_PROBE_INTERVAL] =
- jiffies_to_msecs(cnf->rtr_probe_interval);
+ jiffies_to_msecs(READ_ONCE(cnf->rtr_probe_interval));
#ifdef CONFIG_IPV6_ROUTE_INFO
- array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] = cnf->accept_ra_rt_info_min_plen;
- array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
+ array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] =
+ READ_ONCE(cnf->accept_ra_rt_info_min_plen);
+ array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] =
+ READ_ONCE(cnf->accept_ra_rt_info_max_plen);
#endif
#endif
- array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
- array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
+ array[DEVCONF_PROXY_NDP] = READ_ONCE(cnf->proxy_ndp);
+ array[DEVCONF_ACCEPT_SOURCE_ROUTE] =
+ READ_ONCE(cnf->accept_source_route);
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
- array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad;
- array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic;
+ array[DEVCONF_OPTIMISTIC_DAD] = READ_ONCE(cnf->optimistic_dad);
+ array[DEVCONF_USE_OPTIMISTIC] = READ_ONCE(cnf->use_optimistic);
#endif
#ifdef CONFIG_IPV6_MROUTE
array[DEVCONF_MC_FORWARDING] = atomic_read(&cnf->mc_forwarding);
#endif
- array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6;
- array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
- array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
- array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
- array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc;
- array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
- array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu;
- array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] = cnf->ignore_routes_with_linkdown;
+ array[DEVCONF_DISABLE_IPV6] = READ_ONCE(cnf->disable_ipv6);
+ array[DEVCONF_ACCEPT_DAD] = READ_ONCE(cnf->accept_dad);
+ array[DEVCONF_FORCE_TLLAO] = READ_ONCE(cnf->force_tllao);
+ array[DEVCONF_NDISC_NOTIFY] = READ_ONCE(cnf->ndisc_notify);
+ array[DEVCONF_SUPPRESS_FRAG_NDISC] =
+ READ_ONCE(cnf->suppress_frag_ndisc);
+ array[DEVCONF_ACCEPT_RA_FROM_LOCAL] =
+ READ_ONCE(cnf->accept_ra_from_local);
+ array[DEVCONF_ACCEPT_RA_MTU] = READ_ONCE(cnf->accept_ra_mtu);
+ array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] =
+ READ_ONCE(cnf->ignore_routes_with_linkdown);
/* we omit DEVCONF_STABLE_SECRET for now */
- array[DEVCONF_USE_OIF_ADDRS_ONLY] = cnf->use_oif_addrs_only;
- array[DEVCONF_DROP_UNICAST_IN_L2_MULTICAST] = cnf->drop_unicast_in_l2_multicast;
- array[DEVCONF_DROP_UNSOLICITED_NA] = cnf->drop_unsolicited_na;
- array[DEVCONF_KEEP_ADDR_ON_DOWN] = cnf->keep_addr_on_down;
- array[DEVCONF_SEG6_ENABLED] = cnf->seg6_enabled;
+ array[DEVCONF_USE_OIF_ADDRS_ONLY] = READ_ONCE(cnf->use_oif_addrs_only);
+ array[DEVCONF_DROP_UNICAST_IN_L2_MULTICAST] =
+ READ_ONCE(cnf->drop_unicast_in_l2_multicast);
+ array[DEVCONF_DROP_UNSOLICITED_NA] = READ_ONCE(cnf->drop_unsolicited_na);
+ array[DEVCONF_KEEP_ADDR_ON_DOWN] = READ_ONCE(cnf->keep_addr_on_down);
+ array[DEVCONF_SEG6_ENABLED] = READ_ONCE(cnf->seg6_enabled);
#ifdef CONFIG_IPV6_SEG6_HMAC
- array[DEVCONF_SEG6_REQUIRE_HMAC] = cnf->seg6_require_hmac;
+ array[DEVCONF_SEG6_REQUIRE_HMAC] = READ_ONCE(cnf->seg6_require_hmac);
#endif
- array[DEVCONF_ENHANCED_DAD] = cnf->enhanced_dad;
- array[DEVCONF_ADDR_GEN_MODE] = cnf->addr_gen_mode;
- array[DEVCONF_DISABLE_POLICY] = cnf->disable_policy;
- array[DEVCONF_NDISC_TCLASS] = cnf->ndisc_tclass;
- array[DEVCONF_RPL_SEG_ENABLED] = cnf->rpl_seg_enabled;
- array[DEVCONF_IOAM6_ENABLED] = cnf->ioam6_enabled;
- array[DEVCONF_IOAM6_ID] = cnf->ioam6_id;
- array[DEVCONF_IOAM6_ID_WIDE] = cnf->ioam6_id_wide;
- array[DEVCONF_NDISC_EVICT_NOCARRIER] = cnf->ndisc_evict_nocarrier;
- array[DEVCONF_ACCEPT_UNTRACKED_NA] = cnf->accept_untracked_na;
- array[DEVCONF_ACCEPT_RA_MIN_LFT] = cnf->accept_ra_min_lft;
+ array[DEVCONF_ENHANCED_DAD] = READ_ONCE(cnf->enhanced_dad);
+ array[DEVCONF_ADDR_GEN_MODE] = READ_ONCE(cnf->addr_gen_mode);
+ array[DEVCONF_DISABLE_POLICY] = READ_ONCE(cnf->disable_policy);
+ array[DEVCONF_NDISC_TCLASS] = READ_ONCE(cnf->ndisc_tclass);
+ array[DEVCONF_RPL_SEG_ENABLED] = READ_ONCE(cnf->rpl_seg_enabled);
+ array[DEVCONF_IOAM6_ENABLED] = READ_ONCE(cnf->ioam6_enabled);
+ array[DEVCONF_IOAM6_ID] = READ_ONCE(cnf->ioam6_id);
+ array[DEVCONF_IOAM6_ID_WIDE] = READ_ONCE(cnf->ioam6_id_wide);
+ array[DEVCONF_NDISC_EVICT_NOCARRIER] =
+ READ_ONCE(cnf->ndisc_evict_nocarrier);
+ array[DEVCONF_ACCEPT_UNTRACKED_NA] =
+ READ_ONCE(cnf->accept_untracked_na);
+ array[DEVCONF_ACCEPT_RA_MIN_LFT] = READ_ONCE(cnf->accept_ra_min_lft);
}
static inline size_t inet6_ifla6_size(void)
@@ -5738,13 +5787,14 @@ static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
u32 ext_filter_mask)
{
- struct nlattr *nla;
struct ifla_cacheinfo ci;
+ struct nlattr *nla;
+ u32 ra_mtu;
- if (nla_put_u32(skb, IFLA_INET6_FLAGS, idev->if_flags))
+ if (nla_put_u32(skb, IFLA_INET6_FLAGS, READ_ONCE(idev->if_flags)))
goto nla_put_failure;
ci.max_reasm_len = IPV6_MAXPLEN;
- ci.tstamp = cstamp_delta(idev->tstamp);
+ ci.tstamp = cstamp_delta(READ_ONCE(idev->tstamp));
ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
ci.retrans_time = jiffies_to_msecs(NEIGH_VAR(idev->nd_parms, RETRANS_TIME));
if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
@@ -5776,11 +5826,12 @@ static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
memcpy(nla_data(nla), idev->token.s6_addr, nla_len(nla));
read_unlock_bh(&idev->lock);
- if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->cnf.addr_gen_mode))
+ if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE,
+ READ_ONCE(idev->cnf.addr_gen_mode)))
goto nla_put_failure;
- if (idev->ra_mtu &&
- nla_put_u32(skb, IFLA_INET6_RA_MTU, idev->ra_mtu))
+ ra_mtu = READ_ONCE(idev->ra_mtu);
+ if (ra_mtu && nla_put_u32(skb, IFLA_INET6_RA_MTU, ra_mtu))
goto nla_put_failure;
return 0;
@@ -5842,7 +5893,7 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token,
return -EINVAL;
}
- if (idev->cnf.rtr_solicits == 0) {
+ if (READ_ONCE(idev->cnf.rtr_solicits) == 0) {
NL_SET_ERR_MSG(extack,
"Router solicitation is disabled on device");
return -EINVAL;
@@ -5875,7 +5926,7 @@ update_lft:
if (update_rs) {
idev->if_flags |= IF_RS_SENT;
idev->rs_interval = rfc3315_s14_backoff_init(
- idev->cnf.rtr_solicit_interval);
+ READ_ONCE(idev->cnf.rtr_solicit_interval));
idev->rs_probes = 1;
addrconf_mod_rs_timer(idev, idev->rs_interval);
}
@@ -5981,7 +6032,7 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla,
if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
- idev->cnf.addr_gen_mode = mode;
+ WRITE_ONCE(idev->cnf.addr_gen_mode, mode);
}
return 0;
@@ -5993,6 +6044,7 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
struct net_device *dev = idev->dev;
struct ifinfomsg *hdr;
struct nlmsghdr *nlh;
+ int ifindex, iflink;
void *protoinfo;
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
@@ -6003,18 +6055,20 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
hdr->ifi_family = AF_INET6;
hdr->__ifi_pad = 0;
hdr->ifi_type = dev->type;
- hdr->ifi_index = dev->ifindex;
+ ifindex = READ_ONCE(dev->ifindex);
+ hdr->ifi_index = ifindex;
hdr->ifi_flags = dev_get_flags(dev);
hdr->ifi_change = 0;
+ iflink = dev_get_iflink(dev);
if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
(dev->addr_len &&
nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
- nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
- (dev->ifindex != dev_get_iflink(dev) &&
- nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
+ nla_put_u32(skb, IFLA_MTU, READ_ONCE(dev->mtu)) ||
+ (ifindex != iflink &&
+ nla_put_u32(skb, IFLA_LINK, iflink)) ||
nla_put_u8(skb, IFLA_OPERSTATE,
- netif_running(dev) ? dev->operstate : IF_OPER_DOWN))
+ netif_running(dev) ? READ_ONCE(dev->operstate) : IF_OPER_DOWN))
goto nla_put_failure;
protoinfo = nla_nest_start_noflag(skb, IFLA_PROTINFO);
if (!protoinfo)
@@ -6060,50 +6114,39 @@ static int inet6_valid_dump_ifinfo(const struct nlmsghdr *nlh,
static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
- int h, s_h;
- int idx = 0, s_idx;
+ struct {
+ unsigned long ifindex;
+ } *ctx = (void *)cb->ctx;
struct net_device *dev;
struct inet6_dev *idev;
- struct hlist_head *head;
+ int err;
/* only requests using strict checking can pass data to
* influence the dump
*/
if (cb->strict_check) {
- int err = inet6_valid_dump_ifinfo(cb->nlh, cb->extack);
+ err = inet6_valid_dump_ifinfo(cb->nlh, cb->extack);
if (err < 0)
return err;
}
- s_h = cb->args[0];
- s_idx = cb->args[1];
-
+ err = 0;
rcu_read_lock();
- for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
- idx = 0;
- head = &net->dev_index_head[h];
- hlist_for_each_entry_rcu(dev, head, index_hlist) {
- if (idx < s_idx)
- goto cont;
- idev = __in6_dev_get(dev);
- if (!idev)
- goto cont;
- if (inet6_fill_ifinfo(skb, idev,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_NEWLINK, NLM_F_MULTI) < 0)
- goto out;
-cont:
- idx++;
- }
+ for_each_netdev_dump(net, dev, ctx->ifindex) {
+ idev = __in6_dev_get(dev);
+ if (!idev)
+ continue;
+ err = inet6_fill_ifinfo(skb, idev,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWLINK, NLM_F_MULTI);
+ if (err < 0)
+ break;
}
-out:
rcu_read_unlock();
- cb->args[1] = idx;
- cb->args[0] = h;
- return skb->len;
+ return err;
}
void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
@@ -6126,8 +6169,7 @@ void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFINFO, NULL, GFP_ATOMIC);
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_IPV6_IFINFO, err);
+ rtnl_set_sk_err(net, RTNLGRP_IPV6_IFINFO, err);
}
static inline size_t inet6_prefix_nlmsg_size(void)
@@ -6194,8 +6236,7 @@ static void inet6_prefix_notify(int event, struct inet6_dev *idev,
rtnl_notify(skb, net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC);
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
+ rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
}
static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
@@ -6262,7 +6303,7 @@ static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
#ifdef CONFIG_SYSCTL
-static int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
+static int addrconf_sysctl_forward(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = ctl->data;
@@ -6287,7 +6328,7 @@ static int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
return ret;
}
-static int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
+static int addrconf_sysctl_mtu(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct inet6_dev *idev = ctl->extra1;
@@ -6321,44 +6362,45 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
struct inet6_dev *idev;
for_each_netdev(net, dev) {
- idev = __in6_dev_get(dev);
+ idev = __in6_dev_get_rtnl_net(dev);
if (idev) {
int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
- idev->cnf.disable_ipv6 = newf;
+
+ WRITE_ONCE(idev->cnf.disable_ipv6, newf);
if (changed)
dev_disable_change(idev);
}
}
}
-static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
+static int addrconf_disable_ipv6(const struct ctl_table *table, int *p, int newf)
{
- struct net *net;
+ struct net *net = (struct net *)table->extra2;
int old;
- if (!rtnl_trylock())
- return restart_syscall();
-
- net = (struct net *)table->extra2;
- old = *p;
- *p = newf;
-
if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
- rtnl_unlock();
+ WRITE_ONCE(*p, newf);
return 0;
}
+ if (!rtnl_net_trylock(net))
+ return restart_syscall();
+
+ old = *p;
+ WRITE_ONCE(*p, newf);
+
if (p == &net->ipv6.devconf_all->disable_ipv6) {
- net->ipv6.devconf_dflt->disable_ipv6 = newf;
+ WRITE_ONCE(net->ipv6.devconf_dflt->disable_ipv6, newf);
addrconf_disable_change(net, newf);
- } else if ((!newf) ^ (!old))
+ } else if ((!newf) ^ (!old)) {
dev_disable_change((struct inet6_dev *)table->extra1);
+ }
- rtnl_unlock();
+ rtnl_net_unlock(net);
return 0;
}
-static int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
+static int addrconf_sysctl_disable(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = ctl->data;
@@ -6383,7 +6425,7 @@ static int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
return ret;
}
-static int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
+static int addrconf_sysctl_proxy_ndp(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = ctl->data;
@@ -6397,20 +6439,20 @@ static int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
if (write && old != new) {
struct net *net = ctl->extra2;
- if (!rtnl_trylock())
+ if (!rtnl_net_trylock(net))
return restart_syscall();
- if (valp == &net->ipv6.devconf_dflt->proxy_ndp)
+ if (valp == &net->ipv6.devconf_dflt->proxy_ndp) {
inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
NETCONFA_PROXY_NEIGH,
NETCONFA_IFINDEX_DEFAULT,
net->ipv6.devconf_dflt);
- else if (valp == &net->ipv6.devconf_all->proxy_ndp)
+ } else if (valp == &net->ipv6.devconf_all->proxy_ndp) {
inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
NETCONFA_PROXY_NEIGH,
NETCONFA_IFINDEX_ALL,
net->ipv6.devconf_all);
- else {
+ } else {
struct inet6_dev *idev = ctl->extra1;
inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
@@ -6418,13 +6460,13 @@ static int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
idev->dev->ifindex,
&idev->cnf);
}
- rtnl_unlock();
+ rtnl_net_unlock(net);
}
return ret;
}
-static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
+static int addrconf_sysctl_addr_gen_mode(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp,
loff_t *ppos)
{
@@ -6438,7 +6480,7 @@ static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
.mode = ctl->mode,
};
- if (!rtnl_trylock())
+ if (!rtnl_net_trylock(net))
return restart_syscall();
new_val = *((u32 *)ctl->data);
@@ -6460,33 +6502,34 @@ static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
}
if (idev->cnf.addr_gen_mode != new_val) {
- idev->cnf.addr_gen_mode = new_val;
+ WRITE_ONCE(idev->cnf.addr_gen_mode, new_val);
addrconf_init_auto_addrs(idev->dev);
}
} else if (&net->ipv6.devconf_all->addr_gen_mode == ctl->data) {
struct net_device *dev;
- net->ipv6.devconf_dflt->addr_gen_mode = new_val;
+ WRITE_ONCE(net->ipv6.devconf_dflt->addr_gen_mode, new_val);
for_each_netdev(net, dev) {
- idev = __in6_dev_get(dev);
+ idev = __in6_dev_get_rtnl_net(dev);
if (idev &&
idev->cnf.addr_gen_mode != new_val) {
- idev->cnf.addr_gen_mode = new_val;
+ WRITE_ONCE(idev->cnf.addr_gen_mode,
+ new_val);
addrconf_init_auto_addrs(idev->dev);
}
}
}
- *((u32 *)ctl->data) = new_val;
+ WRITE_ONCE(*((u32 *)ctl->data), new_val);
}
out:
- rtnl_unlock();
+ rtnl_net_unlock(net);
return ret;
}
-static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
+static int addrconf_sysctl_stable_secret(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp,
loff_t *ppos)
{
@@ -6503,7 +6546,7 @@ static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
lctl.maxlen = IPV6_MAX_STRLEN;
lctl.data = str;
- if (!rtnl_trylock())
+ if (!rtnl_net_trylock(net))
return restart_syscall();
if (!write && !secret->initialized) {
@@ -6533,27 +6576,28 @@ static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
struct net_device *dev;
for_each_netdev(net, dev) {
- struct inet6_dev *idev = __in6_dev_get(dev);
+ struct inet6_dev *idev = __in6_dev_get_rtnl_net(dev);
if (idev) {
- idev->cnf.addr_gen_mode =
- IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
+ WRITE_ONCE(idev->cnf.addr_gen_mode,
+ IN6_ADDR_GEN_MODE_STABLE_PRIVACY);
}
}
} else {
struct inet6_dev *idev = ctl->extra1;
- idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
+ WRITE_ONCE(idev->cnf.addr_gen_mode,
+ IN6_ADDR_GEN_MODE_STABLE_PRIVACY);
}
out:
- rtnl_unlock();
+ rtnl_net_unlock(net);
return err;
}
static
-int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl,
+int addrconf_sysctl_ignore_routes_with_linkdown(const struct ctl_table *ctl,
int write, void *buffer,
size_t *lenp,
loff_t *ppos)
@@ -6621,27 +6665,26 @@ void addrconf_disable_policy_idev(struct inet6_dev *idev, int val)
}
static
-int addrconf_disable_policy(struct ctl_table *ctl, int *valp, int val)
+int addrconf_disable_policy(const struct ctl_table *ctl, int *valp, int val)
{
+ struct net *net = (struct net *)ctl->extra2;
struct inet6_dev *idev;
- struct net *net;
- if (!rtnl_trylock())
- return restart_syscall();
-
- *valp = val;
-
- net = (struct net *)ctl->extra2;
if (valp == &net->ipv6.devconf_dflt->disable_policy) {
- rtnl_unlock();
+ WRITE_ONCE(*valp, val);
return 0;
}
+ if (!rtnl_net_trylock(net))
+ return restart_syscall();
+
+ WRITE_ONCE(*valp, val);
+
if (valp == &net->ipv6.devconf_all->disable_policy) {
struct net_device *dev;
for_each_netdev(net, dev) {
- idev = __in6_dev_get(dev);
+ idev = __in6_dev_get_rtnl_net(dev);
if (idev)
addrconf_disable_policy_idev(idev, val);
}
@@ -6650,11 +6693,11 @@ int addrconf_disable_policy(struct ctl_table *ctl, int *valp, int val)
addrconf_disable_policy_idev(idev, val);
}
- rtnl_unlock();
+ rtnl_net_unlock(net);
return 0;
}
-static int addrconf_sysctl_disable_policy(struct ctl_table *ctl, int write,
+static int addrconf_sysctl_disable_policy(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = ctl->data;
@@ -6806,6 +6849,13 @@ static const struct ctl_table addrconf_sysctl[] = {
.proc_handler = proc_dointvec,
},
{
+ .procname = "regen_min_advance",
+ .data = &ipv6_devconf.regen_min_advance,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
.procname = "regen_max_retry",
.data = &ipv6_devconf.regen_max_retry,
.maxlen = sizeof(int),
@@ -6871,6 +6921,15 @@ static const struct ctl_table addrconf_sysctl[] = {
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
+ {
+ .procname = "ra_honor_pio_pflag",
+ .data = &ipv6_devconf.ra_honor_pio_pflag,
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
#ifdef CONFIG_IPV6_ROUTER_PREF
{
.procname = "accept_ra_rtr_pref",
@@ -7130,14 +7189,12 @@ static const struct ctl_table addrconf_sysctl[] = {
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_TWO,
},
- {
- /* sentinel */
- }
};
static int __addrconf_sysctl_register(struct net *net, char *dev_name,
struct inet6_dev *idev, struct ipv6_devconf *p)
{
+ size_t table_size = ARRAY_SIZE(addrconf_sysctl);
int i, ifindex;
struct ctl_table *table;
char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
@@ -7146,7 +7203,7 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name,
if (!table)
goto out;
- for (i = 0; table[i].data; i++) {
+ for (i = 0; i < table_size; i++) {
table[i].data += (char *)p - (char *)&ipv6_devconf;
/* If one of these is already set, then it is not safe to
* overwrite either of them: this makes proc_dointvec_minmax
@@ -7161,7 +7218,7 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name,
snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name);
p->sysctl_header = register_net_sysctl_sz(net, path, table,
- ARRAY_SIZE(addrconf_sysctl));
+ table_size);
if (!p->sysctl_header)
goto free;
@@ -7184,7 +7241,7 @@ out:
static void __addrconf_sysctl_unregister(struct net *net,
struct ipv6_devconf *p, int ifindex)
{
- struct ctl_table *table;
+ const struct ctl_table *table;
if (!p->sysctl_header)
return;
@@ -7345,6 +7402,27 @@ static struct rtnl_af_ops inet6_ops __read_mostly = {
.set_link_af = inet6_set_link_af,
};
+static const struct rtnl_msg_handler addrconf_rtnl_msg_handlers[] __initconst_or_module = {
+ {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETLINK,
+ .dumpit = inet6_dump_ifinfo, .flags = RTNL_FLAG_DUMP_UNLOCKED},
+ {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_NEWADDR,
+ .doit = inet6_rtm_newaddr, .flags = RTNL_FLAG_DOIT_PERNET},
+ {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_DELADDR,
+ .doit = inet6_rtm_deladdr, .flags = RTNL_FLAG_DOIT_PERNET},
+ {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETADDR,
+ .doit = inet6_rtm_getaddr, .dumpit = inet6_dump_ifaddr,
+ .flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED},
+ {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETMULTICAST,
+ .dumpit = inet6_dump_ifmcaddr,
+ .flags = RTNL_FLAG_DUMP_UNLOCKED},
+ {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETANYCAST,
+ .dumpit = inet6_dump_ifacaddr,
+ .flags = RTNL_FLAG_DUMP_UNLOCKED},
+ {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETNETCONF,
+ .doit = inet6_netconf_get_devconf, .dumpit = inet6_netconf_dump_devconf,
+ .flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED},
+};
+
/*
* Init / cleanup code
*/
@@ -7365,15 +7443,16 @@ int __init addrconf_init(void)
if (err < 0)
goto out_addrlabel;
- addrconf_wq = create_workqueue("ipv6_addrconf");
+ /* All works using addrconf_wq need to lock rtnl. */
+ addrconf_wq = create_singlethread_workqueue("ipv6_addrconf");
if (!addrconf_wq) {
err = -ENOMEM;
goto out_nowq;
}
- rtnl_lock();
+ rtnl_net_lock(&init_net);
idev = ipv6_add_dev(blackhole_netdev);
- rtnl_unlock();
+ rtnl_net_unlock(&init_net);
if (IS_ERR(idev)) {
err = PTR_ERR(idev);
goto errlo;
@@ -7385,40 +7464,14 @@ int __init addrconf_init(void)
addrconf_verify(&init_net);
- rtnl_af_register(&inet6_ops);
+ err = rtnl_af_register(&inet6_ops);
+ if (err)
+ goto erraf;
- err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETLINK,
- NULL, inet6_dump_ifinfo, 0);
- if (err < 0)
+ err = rtnl_register_many(addrconf_rtnl_msg_handlers);
+ if (err)
goto errout;
- err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWADDR,
- inet6_rtm_newaddr, NULL, 0);
- if (err < 0)
- goto errout;
- err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELADDR,
- inet6_rtm_deladdr, NULL, 0);
- if (err < 0)
- goto errout;
- err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETADDR,
- inet6_rtm_getaddr, inet6_dump_ifaddr,
- RTNL_FLAG_DOIT_UNLOCKED);
- if (err < 0)
- goto errout;
- err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETMULTICAST,
- NULL, inet6_dump_ifmcaddr, 0);
- if (err < 0)
- goto errout;
- err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETANYCAST,
- NULL, inet6_dump_ifacaddr, 0);
- if (err < 0)
- goto errout;
- err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETNETCONF,
- inet6_netconf_get_devconf,
- inet6_netconf_dump_devconf,
- RTNL_FLAG_DOIT_UNLOCKED);
- if (err < 0)
- goto errout;
err = ipv6_addr_label_rtnl_register();
if (err < 0)
goto errout;
@@ -7427,6 +7480,7 @@ int __init addrconf_init(void)
errout:
rtnl_unregister_all(PF_INET6);
rtnl_af_unregister(&inet6_ops);
+erraf:
unregister_netdevice_notifier(&ipv6_dev_notf);
errlo:
destroy_workqueue(addrconf_wq);
@@ -7448,17 +7502,17 @@ void addrconf_cleanup(void)
rtnl_af_unregister(&inet6_ops);
- rtnl_lock();
+ rtnl_net_lock(&init_net);
/* clean dev list */
for_each_netdev(&init_net, dev) {
- if (__in6_dev_get(dev) == NULL)
+ if (!__in6_dev_get_rtnl_net(dev))
continue;
addrconf_ifdown(dev, true);
}
addrconf_ifdown(init_net.loopback_dev, true);
- rtnl_unlock();
+ rtnl_net_unlock(&init_net);
destroy_workqueue(addrconf_wq);
}
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index 17ac45aa7194..ab054f329e12 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -234,7 +234,8 @@ static int __ip6addrlbl_add(struct net *net, struct ip6addrlbl_entry *newp,
hlist_add_head_rcu(&newp->list, &net->ipv6.ip6addrlbl_table.head);
out:
if (!ret)
- net->ipv6.ip6addrlbl_table.seq++;
+ WRITE_ONCE(net->ipv6.ip6addrlbl_table.seq,
+ net->ipv6.ip6addrlbl_table.seq + 1);
return ret;
}
@@ -445,7 +446,7 @@ static void ip6addrlbl_putmsg(struct nlmsghdr *nlh,
};
static int ip6addrlbl_fill(struct sk_buff *skb,
- struct ip6addrlbl_entry *p,
+ const struct ip6addrlbl_entry *p,
u32 lseq,
u32 portid, u32 seq, int event,
unsigned int flags)
@@ -498,7 +499,8 @@ static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb)
struct net *net = sock_net(skb->sk);
struct ip6addrlbl_entry *p;
int idx = 0, s_idx = cb->args[0];
- int err;
+ int err = 0;
+ u32 lseq;
if (cb->strict_check) {
err = ip6addrlbl_valid_dump_req(nlh, cb->extack);
@@ -507,10 +509,11 @@ static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb)
}
rcu_read_lock();
+ lseq = READ_ONCE(net->ipv6.ip6addrlbl_table.seq);
hlist_for_each_entry_rcu(p, &net->ipv6.ip6addrlbl_table.head, list) {
if (idx >= s_idx) {
err = ip6addrlbl_fill(skb, p,
- net->ipv6.ip6addrlbl_table.seq,
+ lseq,
NETLINK_CB(cb->skb).portid,
nlh->nlmsg_seq,
RTM_NEWADDRLABEL,
@@ -522,7 +525,7 @@ static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb)
}
rcu_read_unlock();
cb->args[0] = idx;
- return skb->len;
+ return err;
}
static inline int ip6addrlbl_msgsize(void)
@@ -614,7 +617,7 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
rcu_read_lock();
p = __ipv6_addr_label(net, addr, ipv6_addr_type(addr), ifal->ifal_index);
- lseq = net->ipv6.ip6addrlbl_table.seq;
+ lseq = READ_ONCE(net->ipv6.ip6addrlbl_table.seq);
if (p)
err = ip6addrlbl_fill(skb, p, lseq,
NETLINK_CB(in_skb).portid,
@@ -631,22 +634,17 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
return err;
}
+static const struct rtnl_msg_handler ipv6_adddr_label_rtnl_msg_handlers[] __initconst_or_module = {
+ {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_NEWADDRLABEL,
+ .doit = ip6addrlbl_newdel, .flags = RTNL_FLAG_DOIT_UNLOCKED},
+ {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_DELADDRLABEL,
+ .doit = ip6addrlbl_newdel, .flags = RTNL_FLAG_DOIT_UNLOCKED},
+ {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETADDRLABEL,
+ .doit = ip6addrlbl_get, .dumpit = ip6addrlbl_dump,
+ .flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED},
+};
+
int __init ipv6_addr_label_rtnl_register(void)
{
- int ret;
-
- ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWADDRLABEL,
- ip6addrlbl_newdel,
- NULL, RTNL_FLAG_DOIT_UNLOCKED);
- if (ret < 0)
- return ret;
- ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELADDRLABEL,
- ip6addrlbl_newdel,
- NULL, RTNL_FLAG_DOIT_UNLOCKED);
- if (ret < 0)
- return ret;
- ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETADDRLABEL,
- ip6addrlbl_get,
- ip6addrlbl_dump, RTNL_FLAG_DOIT_UNLOCKED);
- return ret;
+ return rtnl_register_many(ipv6_adddr_label_rtnl_msg_handlers);
}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 959bfd9f6344..f60ec8b0f8ea 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -64,6 +64,7 @@
#include <net/xfrm.h>
#include <net/ioam6.h>
#include <net/rawv6.h>
+#include <net/rps.h>
#include <linux/uaccess.h>
#include <linux/mroute6.h>
@@ -251,31 +252,29 @@ lookup_protocol:
*/
inet->inet_sport = htons(inet->inet_num);
err = sk->sk_prot->hash(sk);
- if (err) {
- sk_common_release(sk);
- goto out;
- }
+ if (err)
+ goto out_sk_release;
}
if (sk->sk_prot->init) {
err = sk->sk_prot->init(sk);
- if (err) {
- sk_common_release(sk);
- goto out;
- }
+ if (err)
+ goto out_sk_release;
}
if (!kern) {
err = BPF_CGROUP_RUN_PROG_INET_SOCK(sk);
- if (err) {
- sk_common_release(sk);
- goto out;
- }
+ if (err)
+ goto out_sk_release;
}
out:
return err;
out_rcu_unlock:
rcu_read_unlock();
goto out;
+out_sk_release:
+ sk_common_release(sk);
+ sock->sk = NULL;
+ goto out;
}
static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
@@ -508,7 +507,7 @@ void inet6_cleanup_sock(struct sock *sk)
/* Free tx options */
- opt = xchg((__force struct ipv6_txoptions **)&np->opt, NULL);
+ opt = unrcu_pointer(xchg(&np->opt, NULL));
if (opt) {
atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
txopt_put(opt);
@@ -707,6 +706,7 @@ const struct proto_ops inet6_stream_ops = {
.splice_eof = inet_splice_eof,
.sendmsg_locked = tcp_sendmsg_locked,
.splice_read = tcp_splice_read,
+ .set_peek_off = sk_set_peek_off,
.read_sock = tcp_read_sock,
.read_skb = tcp_read_skb,
.peek_len = tcp_peek_len,
@@ -736,7 +736,7 @@ const struct proto_ops inet6_dgram_ops = {
.recvmsg = inet6_recvmsg, /* retpoline's sake */
.read_skb = udp_read_skb,
.mmap = sock_no_mmap,
- .set_peek_off = sk_set_peek_off,
+ .set_peek_off = udp_set_peek_off,
#ifdef CONFIG_COMPAT
.compat_ioctl = inet6_compat_ioctl,
#endif
@@ -1059,6 +1059,7 @@ static const struct ipv6_stub ipv6_stub_impl = {
.nd_tbl = &nd_tbl,
.ipv6_fragment = ip6_fragment,
.ipv6_dev_find = ipv6_dev_find,
+ .ip6_xmit = ip6_xmit,
};
static const struct ipv6_bpf_stub ipv6_bpf_stub_impl = {
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index bb17f484ee2c..21e01695b48c 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -49,9 +49,10 @@ static DEFINE_SPINLOCK(acaddr_hash_lock);
static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr);
-static u32 inet6_acaddr_hash(struct net *net, const struct in6_addr *addr)
+static u32 inet6_acaddr_hash(const struct net *net,
+ const struct in6_addr *addr)
{
- u32 val = ipv6_addr_hash(addr) ^ net_hash_mix(net);
+ u32 val = __ipv6_addr_jhash(addr, net_hash_mix(net));
return hash_32(val, IN6_ADDR_HSIZE_SHIFT);
}
@@ -252,9 +253,8 @@ static void aca_free_rcu(struct rcu_head *h)
static void aca_put(struct ifacaddr6 *ac)
{
- if (refcount_dec_and_test(&ac->aca_refcnt)) {
- call_rcu(&ac->rcu, aca_free_rcu);
- }
+ if (refcount_dec_and_test(&ac->aca_refcnt))
+ call_rcu_hurry(&ac->rcu, aca_free_rcu);
}
static struct ifacaddr6 *aca_alloc(struct fib6_info *f6i,
@@ -278,6 +278,37 @@ static struct ifacaddr6 *aca_alloc(struct fib6_info *f6i,
return aca;
}
+static void inet6_ifacaddr_notify(struct net_device *dev,
+ const struct ifacaddr6 *ifaca, int event)
+{
+ struct inet6_fill_args fillargs = {
+ .event = event,
+ .netnsid = -1,
+ };
+ struct net *net = dev_net(dev);
+ struct sk_buff *skb;
+ int err = -ENOMEM;
+
+ skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct ifaddrmsg)) +
+ nla_total_size(sizeof(struct in6_addr)) +
+ nla_total_size(sizeof(struct ifa_cacheinfo)),
+ GFP_KERNEL);
+ if (!skb)
+ goto error;
+
+ err = inet6_fill_ifacaddr(skb, ifaca, &fillargs);
+ if (err < 0) {
+ pr_err("Failed to fill in anycast addresses (err %d)\n", err);
+ nlmsg_free(skb);
+ goto error;
+ }
+
+ rtnl_notify(skb, net, 0, RTNLGRP_IPV6_ACADDR, NULL, GFP_KERNEL);
+ return;
+error:
+ rtnl_set_sk_err(net, RTNLGRP_IPV6_ACADDR, err);
+}
+
/*
* device anycast group inc (add if not found)
*/
@@ -296,7 +327,8 @@ int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr)
goto out;
}
- for (aca = idev->ac_list; aca; aca = aca->aca_next) {
+ for (aca = rtnl_dereference(idev->ac_list); aca;
+ aca = rtnl_dereference(aca->aca_next)) {
if (ipv6_addr_equal(&aca->aca_addr, addr)) {
aca->aca_users++;
err = 0;
@@ -317,13 +349,13 @@ int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr)
goto out;
}
- aca->aca_next = idev->ac_list;
- idev->ac_list = aca;
-
/* Hold this for addrconf_join_solict() below before we unlock,
* it is already exposed via idev->ac_list.
*/
aca_get(aca);
+ aca->aca_next = idev->ac_list;
+ rcu_assign_pointer(idev->ac_list, aca);
+
write_unlock_bh(&idev->lock);
ipv6_add_acaddr_hash(net, aca);
@@ -332,6 +364,8 @@ int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr)
addrconf_join_solict(idev->dev, &aca->aca_addr);
+ inet6_ifacaddr_notify(idev->dev, aca, RTM_NEWANYCAST);
+
aca_put(aca);
return 0;
out:
@@ -350,7 +384,8 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr)
write_lock_bh(&idev->lock);
prev_aca = NULL;
- for (aca = idev->ac_list; aca; aca = aca->aca_next) {
+ for (aca = rtnl_dereference(idev->ac_list); aca;
+ aca = rtnl_dereference(aca->aca_next)) {
if (ipv6_addr_equal(&aca->aca_addr, addr))
break;
prev_aca = aca;
@@ -364,15 +399,17 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr)
return 0;
}
if (prev_aca)
- prev_aca->aca_next = aca->aca_next;
+ rcu_assign_pointer(prev_aca->aca_next, aca->aca_next);
else
- idev->ac_list = aca->aca_next;
+ rcu_assign_pointer(idev->ac_list, aca->aca_next);
write_unlock_bh(&idev->lock);
ipv6_del_acaddr_hash(aca);
addrconf_leave_solict(idev, &aca->aca_addr);
ip6_del_rt(dev_net(idev->dev), aca->aca_rt, false);
+ inet6_ifacaddr_notify(idev->dev, aca, RTM_DELANYCAST);
+
aca_put(aca);
return 0;
}
@@ -392,8 +429,8 @@ void ipv6_ac_destroy_dev(struct inet6_dev *idev)
struct ifacaddr6 *aca;
write_lock_bh(&idev->lock);
- while ((aca = idev->ac_list) != NULL) {
- idev->ac_list = aca->aca_next;
+ while ((aca = rtnl_dereference(idev->ac_list)) != NULL) {
+ rcu_assign_pointer(idev->ac_list, aca->aca_next);
write_unlock_bh(&idev->lock);
ipv6_del_acaddr_hash(aca);
@@ -420,11 +457,10 @@ static bool ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *ad
idev = __in6_dev_get(dev);
if (idev) {
- read_lock_bh(&idev->lock);
- for (aca = idev->ac_list; aca; aca = aca->aca_next)
+ for (aca = rcu_dereference(idev->ac_list); aca;
+ aca = rcu_dereference(aca->aca_next))
if (ipv6_addr_equal(&aca->aca_addr, addr))
break;
- read_unlock_bh(&idev->lock);
return aca != NULL;
}
return false;
@@ -477,30 +513,25 @@ bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev,
struct ac6_iter_state {
struct seq_net_private p;
struct net_device *dev;
- struct inet6_dev *idev;
};
#define ac6_seq_private(seq) ((struct ac6_iter_state *)(seq)->private)
static inline struct ifacaddr6 *ac6_get_first(struct seq_file *seq)
{
- struct ifacaddr6 *im = NULL;
struct ac6_iter_state *state = ac6_seq_private(seq);
struct net *net = seq_file_net(seq);
+ struct ifacaddr6 *im = NULL;
- state->idev = NULL;
for_each_netdev_rcu(net, state->dev) {
struct inet6_dev *idev;
+
idev = __in6_dev_get(state->dev);
if (!idev)
continue;
- read_lock_bh(&idev->lock);
- im = idev->ac_list;
- if (im) {
- state->idev = idev;
+ im = rcu_dereference(idev->ac_list);
+ if (im)
break;
- }
- read_unlock_bh(&idev->lock);
}
return im;
}
@@ -508,22 +539,17 @@ static inline struct ifacaddr6 *ac6_get_first(struct seq_file *seq)
static struct ifacaddr6 *ac6_get_next(struct seq_file *seq, struct ifacaddr6 *im)
{
struct ac6_iter_state *state = ac6_seq_private(seq);
+ struct inet6_dev *idev;
- im = im->aca_next;
+ im = rcu_dereference(im->aca_next);
while (!im) {
- if (likely(state->idev != NULL))
- read_unlock_bh(&state->idev->lock);
-
state->dev = next_net_device_rcu(state->dev);
- if (!state->dev) {
- state->idev = NULL;
+ if (!state->dev)
break;
- }
- state->idev = __in6_dev_get(state->dev);
- if (!state->idev)
+ idev = __in6_dev_get(state->dev);
+ if (!idev)
continue;
- read_lock_bh(&state->idev->lock);
- im = state->idev->ac_list;
+ im = rcu_dereference(idev->ac_list);
}
return im;
}
@@ -555,12 +581,6 @@ static void *ac6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
static void ac6_seq_stop(struct seq_file *seq, void *v)
__releases(RCU)
{
- struct ac6_iter_state *state = ac6_seq_private(seq);
-
- if (likely(state->idev != NULL)) {
- read_unlock_bh(&state->idev->lock);
- state->idev = NULL;
- }
rcu_read_unlock();
}
diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
index 1578ed9e97d8..dbcea9fee626 100644
--- a/net/ipv6/calipso.c
+++ b/net/ipv6/calipso.c
@@ -29,7 +29,7 @@
#include <net/calipso.h>
#include <linux/atomic.h>
#include <linux/bug.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/crc-ccitt.h>
/* Maximium size of the calipso option including
@@ -657,11 +657,8 @@ static int calipso_map_cat_ntoh(const struct calipso_doi *doi_def,
net_clen_bits,
spot + 1,
1);
- if (spot < 0) {
- if (spot == -2)
- return -EFAULT;
+ if (spot < 0)
return 0;
- }
ret_val = netlbl_catmap_setbit(&secattr->attr.mls.cat,
spot,
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 6e6efe026cdc..9e73944e3b53 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -36,6 +36,7 @@
#include <net/tcp.h>
#include <net/espintcp.h>
#include <net/inet6_hashtables.h>
+#include <linux/skbuff_ref.h>
#include <linux/highmem.h>
@@ -112,7 +113,7 @@ static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
__alignof__(struct scatterlist));
}
-static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
+static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb)
{
struct crypto_aead *aead = x->data;
int extralen = 0;
@@ -131,7 +132,8 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
*/
if (req->src != req->dst)
for (sg = sg_next(req->src); sg; sg = sg_next(sg))
- put_page(sg_page(sg));
+ skb_page_unref(page_to_netmem(sg_page(sg)),
+ skb->pp_recycle);
}
#ifdef CONFIG_INET6_ESPINTCP
@@ -255,8 +257,7 @@ static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
#else
static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
{
- kfree_skb(skb);
-
+ WARN_ON(1);
return -EOPNOTSUPP;
}
#endif
@@ -294,7 +295,7 @@ static void esp_output_done(void *data, int err)
}
tmp = ESP_SKB_CB(skb)->tmp;
- esp_ssg_unref(x, tmp);
+ esp_ssg_unref(x, tmp, skb);
kfree(tmp);
esp_output_encap_csum(skb);
@@ -314,7 +315,7 @@ static void esp_output_done(void *data, int err)
x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
esp_output_tail_tcp(x, skb);
else
- xfrm_output_resume(skb->sk, skb, err);
+ xfrm_output_resume(skb_to_full_sk(skb), skb, err);
}
}
@@ -383,7 +384,6 @@ static struct ip_esp_hdr *esp6_output_udp_encap(struct sk_buff *skb,
__be16 dport)
{
struct udphdr *uh;
- __be32 *udpdata32;
unsigned int len;
len = skb->len + esp->tailen - skb_transport_offset(skb);
@@ -398,12 +398,6 @@ static struct ip_esp_hdr *esp6_output_udp_encap(struct sk_buff *skb,
*skb_mac_header(skb) = IPPROTO_UDP;
- if (encap_type == UDP_ENCAP_ESPINUDP_NON_IKE) {
- udpdata32 = (__be32 *)(uh + 1);
- udpdata32[0] = udpdata32[1] = 0;
- return (struct ip_esp_hdr *)(udpdata32 + 2);
- }
-
return (struct ip_esp_hdr *)(uh + 1);
}
@@ -459,7 +453,6 @@ static int esp6_output_encap(struct xfrm_state *x, struct sk_buff *skb,
switch (encap_type) {
default:
case UDP_ENCAP_ESPINUDP:
- case UDP_ENCAP_ESPINUDP_NON_IKE:
esph = esp6_output_udp_encap(skb, encap_type, esp, sport, dport);
break;
case TCP_ENCAP_ESPINTCP:
@@ -677,7 +670,7 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
}
if (sg != dsg)
- esp_ssg_unref(x, tmp);
+ esp_ssg_unref(x, tmp, skb);
if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
err = esp_output_tail_tcp(x, skb);
@@ -822,7 +815,6 @@ int esp6_input_done2(struct sk_buff *skb, int err)
source = th->source;
break;
case UDP_ENCAP_ESPINUDP:
- case UDP_ENCAP_ESPINUDP_NON_IKE:
source = uh->source;
break;
default:
@@ -867,7 +859,8 @@ int esp6_input_done2(struct sk_buff *skb, int err)
skb_postpull_rcsum(skb, skb_network_header(skb),
skb_network_header_len(skb));
skb_pull_rcsum(skb, hlen);
- if (x->props.mode == XFRM_MODE_TUNNEL)
+ if (x->props.mode == XFRM_MODE_TUNNEL ||
+ x->props.mode == XFRM_MODE_IPTFS)
skb_reset_transport_header(skb);
else
skb_set_transport_header(skb, -hdr_len);
@@ -1232,9 +1225,6 @@ static int esp6_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
case UDP_ENCAP_ESPINUDP:
x->props.header_len += sizeof(struct udphdr);
break;
- case UDP_ENCAP_ESPINUDP_NON_IKE:
- x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
- break;
#ifdef CONFIG_INET6_ESPINTCP
case TCP_ENCAP_ESPINTCP:
/* only the length field, TCP encap is done by
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index 527b7caddbc6..7b41fb4f00b5 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -80,9 +80,16 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
if (sp->len == XFRM_MAX_DEPTH)
goto out_reset;
- x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
- (xfrm_address_t *)&ipv6_hdr(skb)->daddr,
- spi, IPPROTO_ESP, AF_INET6);
+ x = xfrm_input_state_lookup(dev_net(skb->dev), skb->mark,
+ (xfrm_address_t *)&ipv6_hdr(skb)->daddr,
+ spi, IPPROTO_ESP, AF_INET6);
+
+ if (unlikely(x && x->dir && x->dir != XFRM_SA_DIR_IN)) {
+ /* non-offload path will record the error and audit log */
+ xfrm_state_put(x);
+ x = NULL;
+ }
+
if (!x)
goto out_reset;
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 02e9ffb63af1..6789623b2b0d 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -50,6 +50,7 @@
#endif
#include <net/rpl.h>
#include <linux/ioam6.h>
+#include <linux/ioam6_genl.h>
#include <net/ioam6.h>
#include <net/dst_metadata.h>
@@ -378,9 +379,8 @@ static int ipv6_srh_rcv(struct sk_buff *skb)
idev = __in6_dev_get(skb->dev);
- accept_seg6 = net->ipv6.devconf_all->seg6_enabled;
- if (accept_seg6 > idev->cnf.seg6_enabled)
- accept_seg6 = idev->cnf.seg6_enabled;
+ accept_seg6 = min(READ_ONCE(net->ipv6.devconf_all->seg6_enabled),
+ READ_ONCE(idev->cnf.seg6_enabled));
if (!accept_seg6) {
kfree_skb(skb);
@@ -654,10 +654,13 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb)
struct ipv6_rt_hdr *hdr;
struct rt0_hdr *rthdr;
struct net *net = dev_net(skb->dev);
- int accept_source_route = net->ipv6.devconf_all->accept_source_route;
+ int accept_source_route;
- if (idev && accept_source_route > idev->cnf.accept_source_route)
- accept_source_route = idev->cnf.accept_source_route;
+ accept_source_route = READ_ONCE(net->ipv6.devconf_all->accept_source_route);
+
+ if (idev)
+ accept_source_route = min(accept_source_route,
+ READ_ONCE(idev->cnf.accept_source_route));
if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
!pskb_may_pull(skb, (skb_transport_offset(skb) +
@@ -801,7 +804,7 @@ looped_back:
ip6_route_input(skb);
if (skb_dst(skb)->error) {
- skb_push(skb, skb->data - skb_network_header(skb));
+ skb_push(skb, -skb_network_offset(skb));
dst_input(skb);
return -1;
}
@@ -818,7 +821,7 @@ looped_back:
goto looped_back;
}
- skb_push(skb, skb->data - skb_network_header(skb));
+ skb_push(skb, -skb_network_offset(skb));
dst_input(skb);
return -1;
@@ -880,14 +883,6 @@ void ipv6_exthdrs_exit(void)
Hop-by-hop options.
**********************************/
-/*
- * Note: we cannot rely on skb_dst(skb) before we assign it in ip6_route_input().
- */
-static inline struct net *ipv6_skb_net(struct sk_buff *skb)
-{
- return skb_dst(skb) ? dev_net(skb_dst(skb)->dev) : dev_net(skb->dev);
-}
-
/* Router Alert as of RFC 2711 */
static bool ipv6_hop_ra(struct sk_buff *skb, int optoff)
@@ -918,7 +913,7 @@ static bool ipv6_hop_ioam(struct sk_buff *skb, int optoff)
goto drop;
/* Ignore if IOAM is not enabled on ingress */
- if (!__in6_dev_get(skb->dev)->cnf.ioam6_enabled)
+ if (!READ_ONCE(__in6_dev_get(skb->dev)->cnf.ioam6_enabled))
goto ignore;
/* Truncated Option header */
@@ -938,7 +933,7 @@ static bool ipv6_hop_ioam(struct sk_buff *skb, int optoff)
goto drop;
/* Ignore if the IOAM namespace is unknown */
- ns = ioam6_namespace(ipv6_skb_net(skb), trace->namespace_id);
+ ns = ioam6_namespace(dev_net(skb->dev), trace->namespace_id);
if (!ns)
goto ignore;
@@ -954,6 +949,9 @@ static bool ipv6_hop_ioam(struct sk_buff *skb, int optoff)
+ optoff + sizeof(*hdr));
ioam6_fill_trace_data(skb, ns, trace, true);
+
+ ioam6_event(IOAM6_EVENT_TRACE, dev_net(skb->dev),
+ GFP_ATOMIC, (void *)trace, hdr->opt_len - 2);
break;
default:
break;
diff --git a/net/ipv6/fib6_notifier.c b/net/ipv6/fib6_notifier.c
index f87ae33e1d01..949b72610df7 100644
--- a/net/ipv6/fib6_notifier.c
+++ b/net/ipv6/fib6_notifier.c
@@ -22,7 +22,7 @@ int call_fib6_notifiers(struct net *net, enum fib_event_type event_type,
return call_fib_notifiers(net, event_type, info);
}
-static unsigned int fib6_seq_read(struct net *net)
+static unsigned int fib6_seq_read(const struct net *net)
{
return fib6_tables_seq_read(net) + fib6_rules_seq_read(net);
}
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 7523c4baef35..67d39114d9a6 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -26,14 +26,17 @@ struct fib6_rule {
struct fib_rule common;
struct rt6key src;
struct rt6key dst;
+ __be32 flowlabel;
+ __be32 flowlabel_mask;
dscp_t dscp;
+ u8 dscp_full:1; /* DSCP or TOS selector */
};
static bool fib6_rule_matchall(const struct fib_rule *rule)
{
struct fib6_rule *r = container_of(rule, struct fib6_rule, common);
- if (r->dst.plen || r->src.plen || r->dscp)
+ if (r->dst.plen || r->src.plen || r->dscp || r->flowlabel_mask)
return false;
return fib_rule_matchall(rule);
}
@@ -55,7 +58,7 @@ int fib6_rules_dump(struct net *net, struct notifier_block *nb,
return fib_rules_dump(net, nb, AF_INET6, extack);
}
-unsigned int fib6_rules_seq_read(struct net *net)
+unsigned int fib6_rules_seq_read(const struct net *net)
{
return fib_rules_seq_read(net, AF_INET6);
}
@@ -233,8 +236,12 @@ static int __fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
rt = pol_lookup_func(lookup,
net, table, flp6, arg->lookup_data, flags);
if (rt != net->ipv6.ip6_null_entry) {
+ struct inet6_dev *idev = ip6_dst_idev(&rt->dst);
+
+ if (!idev)
+ goto again;
err = fib6_rule_saddr(net, rule, flags, flp6,
- ip6_dst_idev(&rt->dst)->dev);
+ idev->dev);
if (err == -EAGAIN)
goto again;
@@ -327,6 +334,9 @@ INDIRECT_CALLABLE_SCOPE int fib6_rule_match(struct fib_rule *rule,
if (r->dscp && r->dscp != ip6_dscp(fl6->flowlabel))
return 0;
+ if ((r->flowlabel ^ flowi6_get_flowlabel(fl6)) & r->flowlabel_mask)
+ return 0;
+
if (rule->ip_proto && (rule->ip_proto != fl6->flowi6_proto))
return 0;
@@ -341,6 +351,49 @@ INDIRECT_CALLABLE_SCOPE int fib6_rule_match(struct fib_rule *rule,
return 1;
}
+static int fib6_nl2rule_dscp(const struct nlattr *nla, struct fib6_rule *rule6,
+ struct netlink_ext_ack *extack)
+{
+ if (rule6->dscp) {
+ NL_SET_ERR_MSG(extack, "Cannot specify both TOS and DSCP");
+ return -EINVAL;
+ }
+
+ rule6->dscp = inet_dsfield_to_dscp(nla_get_u8(nla) << 2);
+ rule6->dscp_full = true;
+
+ return 0;
+}
+
+static int fib6_nl2rule_flowlabel(struct nlattr **tb, struct fib6_rule *rule6,
+ struct netlink_ext_ack *extack)
+{
+ __be32 flowlabel, flowlabel_mask;
+
+ if (NL_REQ_ATTR_CHECK(extack, NULL, tb, FRA_FLOWLABEL) ||
+ NL_REQ_ATTR_CHECK(extack, NULL, tb, FRA_FLOWLABEL_MASK))
+ return -EINVAL;
+
+ flowlabel = nla_get_be32(tb[FRA_FLOWLABEL]);
+ flowlabel_mask = nla_get_be32(tb[FRA_FLOWLABEL_MASK]);
+
+ if (flowlabel_mask & ~IPV6_FLOWLABEL_MASK) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[FRA_FLOWLABEL_MASK],
+ "Invalid flow label mask");
+ return -EINVAL;
+ }
+
+ if (flowlabel & ~flowlabel_mask) {
+ NL_SET_ERR_MSG(extack, "Flow label and mask do not match");
+ return -EINVAL;
+ }
+
+ rule6->flowlabel = flowlabel;
+ rule6->flowlabel_mask = flowlabel_mask;
+
+ return 0;
+}
+
static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
struct fib_rule_hdr *frh,
struct nlattr **tb,
@@ -357,6 +410,13 @@ static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
}
rule6->dscp = inet_dsfield_to_dscp(frh->tos);
+ if (tb[FRA_DSCP] && fib6_nl2rule_dscp(tb[FRA_DSCP], rule6, extack) < 0)
+ goto errout;
+
+ if ((tb[FRA_FLOWLABEL] || tb[FRA_FLOWLABEL_MASK]) &&
+ fib6_nl2rule_flowlabel(tb, rule6, extack) < 0)
+ goto errout;
+
if (rule->action == FR_ACT_TO_TBL && !rule->l3mdev) {
if (rule->table == RT6_TABLE_UNSPEC) {
NL_SET_ERR_MSG(extack, "Invalid table");
@@ -409,7 +469,25 @@ static int fib6_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
if (frh->dst_len && (rule6->dst.plen != frh->dst_len))
return 0;
- if (frh->tos && inet_dscp_to_dsfield(rule6->dscp) != frh->tos)
+ if (frh->tos &&
+ (rule6->dscp_full ||
+ inet_dscp_to_dsfield(rule6->dscp) != frh->tos))
+ return 0;
+
+ if (tb[FRA_DSCP]) {
+ dscp_t dscp;
+
+ dscp = inet_dsfield_to_dscp(nla_get_u8(tb[FRA_DSCP]) << 2);
+ if (!rule6->dscp_full || rule6->dscp != dscp)
+ return 0;
+ }
+
+ if (tb[FRA_FLOWLABEL] &&
+ nla_get_be32(tb[FRA_FLOWLABEL]) != rule6->flowlabel)
+ return 0;
+
+ if (tb[FRA_FLOWLABEL_MASK] &&
+ nla_get_be32(tb[FRA_FLOWLABEL_MASK]) != rule6->flowlabel_mask)
return 0;
if (frh->src_len &&
@@ -430,7 +508,20 @@ static int fib6_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
frh->dst_len = rule6->dst.plen;
frh->src_len = rule6->src.plen;
- frh->tos = inet_dscp_to_dsfield(rule6->dscp);
+
+ if (rule6->dscp_full) {
+ frh->tos = 0;
+ if (nla_put_u8(skb, FRA_DSCP,
+ inet_dscp_to_dsfield(rule6->dscp) >> 2))
+ goto nla_put_failure;
+ } else {
+ frh->tos = inet_dscp_to_dsfield(rule6->dscp);
+ }
+
+ if (rule6->flowlabel_mask &&
+ (nla_put_be32(skb, FRA_FLOWLABEL, rule6->flowlabel) ||
+ nla_put_be32(skb, FRA_FLOWLABEL_MASK, rule6->flowlabel_mask)))
+ goto nla_put_failure;
if ((rule6->dst.plen &&
nla_put_in6_addr(skb, FRA_DST, &rule6->dst.addr)) ||
@@ -446,7 +537,15 @@ nla_put_failure:
static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule)
{
return nla_total_size(16) /* dst */
- + nla_total_size(16); /* src */
+ + nla_total_size(16) /* src */
+ + nla_total_size(1) /* dscp */
+ + nla_total_size(4) /* flowlabel */
+ + nla_total_size(4); /* flowlabel mask */
+}
+
+static void fib6_rule_flush_cache(struct fib_rules_ops *ops)
+{
+ rt_genid_bump_ipv6(ops->fro_net);
}
static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = {
@@ -461,6 +560,7 @@ static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = {
.compare = fib6_rule_compare,
.fill = fib6_rule_fill,
.nlmsg_payload = fib6_rule_nlmsg_payload,
+ .flush_cache = fib6_rule_flush_cache,
.nlgroup = RTNLGRP_IPV6_RULE,
.owner = THIS_MODULE,
.fro_net = &init_net,
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 1635da07285f..4d14ab7f7e99 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -76,7 +76,7 @@ static int icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
{
/* icmpv6_notify checks 8 bytes can be pulled, icmp6hdr is 8 bytes */
struct icmp6hdr *icmp6 = (struct icmp6hdr *) (skb->data + offset);
- struct net *net = dev_net(skb->dev);
+ struct net *net = dev_net_rcu(skb->dev);
if (type == ICMPV6_PKT_TOOBIG)
ip6_update_pmtu(skb, net, info, skb->dev->ifindex, 0, sock_net_uid(net, NULL));
@@ -175,14 +175,16 @@ static bool icmpv6_mask_allow(struct net *net, int type)
return false;
}
-static bool icmpv6_global_allow(struct net *net, int type)
+static bool icmpv6_global_allow(struct net *net, int type,
+ bool *apply_ratelimit)
{
if (icmpv6_mask_allow(net, type))
return true;
- if (icmp_global_allow())
+ if (icmp_global_allow(net)) {
+ *apply_ratelimit = true;
return true;
-
+ }
__ICMP_INC_STATS(net, ICMP_MIB_RATELIMITGLOBAL);
return false;
}
@@ -191,13 +193,13 @@ static bool icmpv6_global_allow(struct net *net, int type)
* Check the ICMP output rate limit
*/
static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
- struct flowi6 *fl6)
+ struct flowi6 *fl6, bool apply_ratelimit)
{
struct net *net = sock_net(sk);
struct dst_entry *dst;
bool res = false;
- if (icmpv6_mask_allow(net, type))
+ if (!apply_ratelimit)
return true;
/*
@@ -212,7 +214,7 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
} else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
res = true;
} else {
- struct rt6_info *rt = (struct rt6_info *)dst;
+ struct rt6_info *rt = dst_rt6_info(dst);
int tmo = net->ipv6.sysctl.icmpv6_time;
struct inet_peer *peer;
@@ -220,14 +222,16 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
if (rt->rt6i_dst.plen < 128)
tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
- peer = inet_getpeer_v6(net->ipv6.peers, &fl6->daddr, 1);
+ rcu_read_lock();
+ peer = inet_getpeer_v6(net->ipv6.peers, &fl6->daddr);
res = inet_peer_xrlim_allow(peer, tmo);
- if (peer)
- inet_putpeer(peer);
+ rcu_read_unlock();
}
if (!res)
__ICMP6_INC_STATS(net, ip6_dst_idev(dst),
ICMP6_MIB_RATELIMITHOST);
+ else
+ icmp_global_consume(net);
dst_release(dst);
return res;
}
@@ -241,7 +245,7 @@ static bool icmpv6_rt_has_prefsrc(struct sock *sk, u8 type,
dst = ip6_route_output(net, sk, fl6);
if (!dst->error) {
- struct rt6_info *rt = (struct rt6_info *)dst;
+ struct rt6_info *rt = dst_rt6_info(dst);
struct in6_addr prefsrc;
rt6_get_prefsrc(rt, &prefsrc);
@@ -452,6 +456,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
struct net *net;
struct ipv6_pinfo *np;
const struct in6_addr *saddr = NULL;
+ bool apply_ratelimit = false;
struct dst_entry *dst;
struct icmp6hdr tmp_hdr;
struct flowi6 fl6;
@@ -468,7 +473,10 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
if (!skb->dev)
return;
- net = dev_net(skb->dev);
+
+ rcu_read_lock();
+
+ net = dev_net_rcu(skb->dev);
mark = IP6_REPLY_MARK(net, skb->mark);
/*
* Make sure we respect the rules
@@ -491,7 +499,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
!(type == ICMPV6_PARAMPROB &&
code == ICMPV6_UNK_OPTION &&
(opt_unrec(skb, info))))
- return;
+ goto out;
saddr = NULL;
}
@@ -521,7 +529,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
net_dbg_ratelimited("icmp6_send: addr_any/mcast source [%pI6c > %pI6c]\n",
&hdr->saddr, &hdr->daddr);
- return;
+ goto out;
}
/*
@@ -530,14 +538,15 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
if (is_ineligible(skb)) {
net_dbg_ratelimited("icmp6_send: no reply to icmp error [%pI6c > %pI6c]\n",
&hdr->saddr, &hdr->daddr);
- return;
+ goto out;
}
- /* Needed by both icmp_global_allow and icmpv6_xmit_lock */
+ /* Needed by both icmpv6_global_allow and icmpv6_xmit_lock */
local_bh_disable();
/* Check global sysctl_icmp_msgs_per_sec ratelimit */
- if (!(skb->dev->flags & IFF_LOOPBACK) && !icmpv6_global_allow(net, type))
+ if (!(skb->dev->flags & IFF_LOOPBACK) &&
+ !icmpv6_global_allow(net, type, &apply_ratelimit))
goto out_bh_enable;
mip6_addr_swap(skb, parm);
@@ -575,8 +584,8 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
np = inet6_sk(sk);
- if (!icmpv6_xrlim_allow(sk, type, &fl6))
- goto out;
+ if (!icmpv6_xrlim_allow(sk, type, &fl6, apply_ratelimit))
+ goto out_unlock;
tmp_hdr.icmp6_type = type;
tmp_hdr.icmp6_code = code;
@@ -594,7 +603,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
dst = icmpv6_route_lookup(net, skb, sk, &fl6);
if (IS_ERR(dst))
- goto out;
+ goto out_unlock;
ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
@@ -610,13 +619,12 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
goto out_dst_release;
}
- rcu_read_lock();
idev = __in6_dev_get(skb->dev);
if (ip6_append_data(sk, icmpv6_getfrag, &msg,
len + sizeof(struct icmp6hdr),
sizeof(struct icmp6hdr),
- &ipc6, &fl6, (struct rt6_info *)dst,
+ &ipc6, &fl6, dst_rt6_info(dst),
MSG_DONTWAIT)) {
ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
ip6_flush_pending_frames(sk);
@@ -624,13 +632,15 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
len + sizeof(struct icmp6hdr));
}
- rcu_read_unlock();
+
out_dst_release:
dst_release(dst);
-out:
+out_unlock:
icmpv6_xmit_unlock(sk);
out_bh_enable:
local_bh_enable();
+out:
+ rcu_read_unlock();
}
EXPORT_SYMBOL(icmp6_send);
@@ -673,8 +683,8 @@ int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
skb_pull(skb2, nhs);
skb_reset_network_header(skb2);
- rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0,
- skb, 0);
+ rt = rt6_lookup(dev_net_rcu(skb->dev), &ipv6_hdr(skb2)->saddr,
+ NULL, 0, skb, 0);
if (rt && rt->dst.dev)
skb2->dev = rt->dst.dev;
@@ -711,12 +721,13 @@ EXPORT_SYMBOL(ip6_err_gen_icmpv6_unreach);
static enum skb_drop_reason icmpv6_echo_reply(struct sk_buff *skb)
{
- struct net *net = dev_net(skb->dev);
+ struct net *net = dev_net_rcu(skb->dev);
struct sock *sk;
struct inet6_dev *idev;
struct ipv6_pinfo *np;
const struct in6_addr *saddr = NULL;
struct icmp6hdr *icmph = icmp6_hdr(skb);
+ bool apply_ratelimit = false;
struct icmp6hdr tmp_hdr;
struct flowi6 fl6;
struct icmpv6_msg msg;
@@ -781,8 +792,9 @@ static enum skb_drop_reason icmpv6_echo_reply(struct sk_buff *skb)
goto out;
/* Check the ratelimit */
- if ((!(skb->dev->flags & IFF_LOOPBACK) && !icmpv6_global_allow(net, ICMPV6_ECHO_REPLY)) ||
- !icmpv6_xrlim_allow(sk, ICMPV6_ECHO_REPLY, &fl6))
+ if ((!(skb->dev->flags & IFF_LOOPBACK) &&
+ !icmpv6_global_allow(net, ICMPV6_ECHO_REPLY, &apply_ratelimit)) ||
+ !icmpv6_xrlim_allow(sk, ICMPV6_ECHO_REPLY, &fl6, apply_ratelimit))
goto out_dst_release;
idev = __in6_dev_get(skb->dev);
@@ -803,7 +815,7 @@ static enum skb_drop_reason icmpv6_echo_reply(struct sk_buff *skb)
if (ip6_append_data(sk, icmpv6_getfrag, &msg,
skb->len + sizeof(struct icmp6hdr),
sizeof(struct icmp6hdr), &ipc6, &fl6,
- (struct rt6_info *)dst, MSG_DONTWAIT)) {
+ dst_rt6_info(dst), MSG_DONTWAIT)) {
__ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
ip6_flush_pending_frames(sk);
} else {
@@ -824,7 +836,7 @@ enum skb_drop_reason icmpv6_notify(struct sk_buff *skb, u8 type,
u8 code, __be32 info)
{
struct inet6_skb_parm *opt = IP6CB(skb);
- struct net *net = dev_net(skb->dev);
+ struct net *net = dev_net_rcu(skb->dev);
const struct inet6_protocol *ipprot;
enum skb_drop_reason reason;
int inner_offset;
@@ -881,7 +893,7 @@ out:
static int icmpv6_rcv(struct sk_buff *skb)
{
enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
- struct net *net = dev_net(skb->dev);
+ struct net *net = dev_net_rcu(skb->dev);
struct net_device *dev = icmp6_dev(skb);
struct inet6_dev *idev = __in6_dev_get(dev);
const struct in6_addr *saddr, *daddr;
@@ -913,7 +925,7 @@ static int icmpv6_rcv(struct sk_buff *skb)
skb_set_network_header(skb, nh);
}
- __ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_INMSGS);
+ __ICMP6_INC_STATS(dev_net_rcu(dev), idev, ICMP6_MIB_INMSGS);
saddr = &ipv6_hdr(skb)->saddr;
daddr = &ipv6_hdr(skb)->daddr;
@@ -931,7 +943,7 @@ static int icmpv6_rcv(struct sk_buff *skb)
type = hdr->icmp6_type;
- ICMP6MSGIN_INC_STATS(dev_net(dev), idev, type);
+ ICMP6MSGIN_INC_STATS(dev_net_rcu(dev), idev, type);
switch (type) {
case ICMPV6_ECHO_REQUEST:
@@ -1026,9 +1038,9 @@ static int icmpv6_rcv(struct sk_buff *skb)
csum_error:
reason = SKB_DROP_REASON_ICMP_CSUM;
- __ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_CSUMERRORS);
+ __ICMP6_INC_STATS(dev_net_rcu(dev), idev, ICMP6_MIB_CSUMERRORS);
discard_it:
- __ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_INERRORS);
+ __ICMP6_INC_STATS(dev_net_rcu(dev), idev, ICMP6_MIB_INERRORS);
drop_no_count:
kfree_skb_reason(skb, reason);
return 0;
@@ -1206,7 +1218,6 @@ static struct ctl_table ipv6_icmp_table_template[] = {
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
- { },
};
struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
diff --git a/net/ipv6/ila/ila.h b/net/ipv6/ila/ila.h
index ad5f6f6ba333..85b92917849b 100644
--- a/net/ipv6/ila/ila.h
+++ b/net/ipv6/ila/ila.h
@@ -108,6 +108,7 @@ int ila_lwt_init(void);
void ila_lwt_fini(void);
int ila_xlat_init_net(struct net *net);
+void ila_xlat_pre_exit_net(struct net *net);
void ila_xlat_exit_net(struct net *net);
int ila_xlat_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info);
diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
index 8c1ce78956ba..7d574f5132e2 100644
--- a/net/ipv6/ila/ila_lwt.c
+++ b/net/ipv6/ila/ila_lwt.c
@@ -38,7 +38,7 @@ static inline struct ila_params *ila_params_lwtunnel(
static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct dst_entry *orig_dst = skb_dst(skb);
- struct rt6_info *rt = (struct rt6_info *)orig_dst;
+ struct rt6_info *rt = dst_rt6_info(orig_dst);
struct ila_lwt *ilwt = ila_lwt_lwtunnel(orig_dst->lwtstate);
struct dst_entry *dst;
int err = -EINVAL;
@@ -58,7 +58,9 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
return orig_dst->lwtstate->orig_output(net, sk, skb);
}
+ local_bh_disable();
dst = dst_cache_get(&ilwt->dst_cache);
+ local_bh_enable();
if (unlikely(!dst)) {
struct ipv6hdr *ip6h = ipv6_hdr(skb);
struct flowi6 fl6;
@@ -70,7 +72,7 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_oif = orig_dst->dev->ifindex;
fl6.flowi6_iif = LOOPBACK_IFINDEX;
- fl6.daddr = *rt6_nexthop((struct rt6_info *)orig_dst,
+ fl6.daddr = *rt6_nexthop(dst_rt6_info(orig_dst),
&ip6h->daddr);
dst = ip6_route_output(net, NULL, &fl6);
@@ -86,10 +88,15 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
goto drop;
}
- if (ilwt->connected)
+ /* cache only if we don't create a dst reference loop */
+ if (ilwt->connected && orig_dst->lwtstate != dst->lwtstate) {
+ local_bh_disable();
dst_cache_set_ip6(&ilwt->dst_cache, dst, &fl6.saddr);
+ local_bh_enable();
+ }
}
+ skb_dst_drop(skb);
skb_dst_set(skb, dst);
return dst_output(net, sk, skb);
diff --git a/net/ipv6/ila/ila_main.c b/net/ipv6/ila/ila_main.c
index 69caed07315f..976c78efbae1 100644
--- a/net/ipv6/ila/ila_main.c
+++ b/net/ipv6/ila/ila_main.c
@@ -71,6 +71,11 @@ ila_xlat_init_fail:
return err;
}
+static __net_exit void ila_pre_exit_net(struct net *net)
+{
+ ila_xlat_pre_exit_net(net);
+}
+
static __net_exit void ila_exit_net(struct net *net)
{
ila_xlat_exit_net(net);
@@ -78,6 +83,7 @@ static __net_exit void ila_exit_net(struct net *net)
static struct pernet_operations ila_net_ops = {
.init = ila_init_net,
+ .pre_exit = ila_pre_exit_net,
.exit = ila_exit_net,
.id = &ila_net_id,
.size = sizeof(struct ila_net),
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
index 67e8c9440977..1d41b2ab4884 100644
--- a/net/ipv6/ila/ila_xlat.c
+++ b/net/ipv6/ila/ila_xlat.c
@@ -105,16 +105,11 @@ static int parse_nl_config(struct genl_info *info,
xp->ip.locator_match.v64 = (__force __be64)nla_get_u64(
info->attrs[ILA_ATTR_LOCATOR_MATCH]);
- if (info->attrs[ILA_ATTR_CSUM_MODE])
- xp->ip.csum_mode = nla_get_u8(info->attrs[ILA_ATTR_CSUM_MODE]);
- else
- xp->ip.csum_mode = ILA_CSUM_NO_ACTION;
+ xp->ip.csum_mode = nla_get_u8_default(info->attrs[ILA_ATTR_CSUM_MODE],
+ ILA_CSUM_NO_ACTION);
- if (info->attrs[ILA_ATTR_IDENT_TYPE])
- xp->ip.ident_type = nla_get_u8(
- info->attrs[ILA_ATTR_IDENT_TYPE]);
- else
- xp->ip.ident_type = ILA_ATYPE_USE_FORMAT;
+ xp->ip.ident_type = nla_get_u8_default(info->attrs[ILA_ATTR_IDENT_TYPE],
+ ILA_ATYPE_USE_FORMAT);
if (info->attrs[ILA_ATTR_IFINDEX])
xp->ifindex = nla_get_s32(info->attrs[ILA_ATTR_IFINDEX]);
@@ -200,6 +195,8 @@ static const struct nf_hook_ops ila_nf_hook_ops[] = {
},
};
+static DEFINE_MUTEX(ila_mutex);
+
static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp)
{
struct ila_net *ilan = net_generic(net, ila_net_id);
@@ -207,16 +204,20 @@ static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp)
spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match);
int err = 0, order;
- if (!ilan->xlat.hooks_registered) {
+ if (!READ_ONCE(ilan->xlat.hooks_registered)) {
/* We defer registering net hooks in the namespace until the
* first mapping is added.
*/
- err = nf_register_net_hooks(net, ila_nf_hook_ops,
- ARRAY_SIZE(ila_nf_hook_ops));
+ mutex_lock(&ila_mutex);
+ if (!ilan->xlat.hooks_registered) {
+ err = nf_register_net_hooks(net, ila_nf_hook_ops,
+ ARRAY_SIZE(ila_nf_hook_ops));
+ if (!err)
+ WRITE_ONCE(ilan->xlat.hooks_registered, true);
+ }
+ mutex_unlock(&ila_mutex);
if (err)
return err;
-
- ilan->xlat.hooks_registered = true;
}
ila = kzalloc(sizeof(*ila), GFP_KERNEL);
@@ -619,6 +620,15 @@ int ila_xlat_init_net(struct net *net)
return 0;
}
+void ila_xlat_pre_exit_net(struct net *net)
+{
+ struct ila_net *ilan = net_generic(net, ila_net_id);
+
+ if (ilan->xlat.hooks_registered)
+ nf_unregister_net_hooks(net, ila_nf_hook_ops,
+ ARRAY_SIZE(ila_nf_hook_ops));
+}
+
void ila_xlat_exit_net(struct net *net)
{
struct ila_net *ilan = net_generic(net, ila_net_id);
@@ -626,10 +636,6 @@ void ila_xlat_exit_net(struct net *net)
rhashtable_free_and_destroy(&ilan->xlat.rhash_table, ila_free_cb, NULL);
free_bucket_spinlocks(ilan->xlat.locks);
-
- if (ilan->xlat.hooks_registered)
- nf_unregister_net_hooks(net, ila_nf_hook_ops,
- ARRAY_SIZE(ila_nf_hook_ops));
}
static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila)
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index b0e8d278e8a9..9ec05e354baa 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -14,27 +14,26 @@
#include <linux/random.h>
#include <net/addrconf.h>
+#include <net/hotdata.h>
#include <net/inet_connection_sock.h>
#include <net/inet_hashtables.h>
#include <net/inet6_hashtables.h>
#include <net/secure_seq.h>
#include <net/ip.h>
#include <net/sock_reuseport.h>
+#include <net/tcp.h>
u32 inet6_ehashfn(const struct net *net,
const struct in6_addr *laddr, const u16 lport,
const struct in6_addr *faddr, const __be16 fport)
{
- static u32 inet6_ehash_secret __read_mostly;
- static u32 ipv6_hash_secret __read_mostly;
-
u32 lhash, fhash;
net_get_random_once(&inet6_ehash_secret, sizeof(inet6_ehash_secret));
- net_get_random_once(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
+ net_get_random_once(&tcp_ipv6_hash_secret, sizeof(tcp_ipv6_hash_secret));
lhash = (__force u32)laddr->s6_addr32[3];
- fhash = __ipv6_addr_jhash(faddr, ipv6_hash_secret);
+ fhash = __ipv6_addr_jhash(faddr, tcp_ipv6_hash_secret);
return __inet6_ehashfn(lhash, lport, fhash, fport,
inet6_ehash_secret + net_hash_mix(net));
@@ -47,7 +46,7 @@ EXPORT_SYMBOL_GPL(inet6_ehashfn);
*
* The sockhash lock must be held as a reader here.
*/
-struct sock *__inet6_lookup_established(struct net *net,
+struct sock *__inet6_lookup_established(const struct net *net,
struct inet_hashinfo *hashinfo,
const struct in6_addr *saddr,
const __be16 sport,
@@ -90,7 +89,7 @@ found:
}
EXPORT_SYMBOL(__inet6_lookup_established);
-static inline int compute_score(struct sock *sk, struct net *net,
+static inline int compute_score(struct sock *sk, const struct net *net,
const unsigned short hnum,
const struct in6_addr *daddr,
const int dif, const int sdif)
@@ -127,7 +126,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
* Return: NULL if sk doesn't have SO_REUSEPORT set, otherwise a pointer to
* the selected sock or an error.
*/
-struct sock *inet6_lookup_reuseport(struct net *net, struct sock *sk,
+struct sock *inet6_lookup_reuseport(const struct net *net, struct sock *sk,
struct sk_buff *skb, int doff,
const struct in6_addr *saddr,
__be16 sport,
@@ -148,7 +147,7 @@ struct sock *inet6_lookup_reuseport(struct net *net, struct sock *sk,
EXPORT_SYMBOL_GPL(inet6_lookup_reuseport);
/* called with rcu_read_lock() */
-static struct sock *inet6_lhash2_lookup(struct net *net,
+static struct sock *inet6_lhash2_lookup(const struct net *net,
struct inet_listen_hashbucket *ilb2,
struct sk_buff *skb, int doff,
const struct in6_addr *saddr,
@@ -175,7 +174,7 @@ static struct sock *inet6_lhash2_lookup(struct net *net,
return result;
}
-struct sock *inet6_lookup_run_sk_lookup(struct net *net,
+struct sock *inet6_lookup_run_sk_lookup(const struct net *net,
int protocol,
struct sk_buff *skb, int doff,
const struct in6_addr *saddr,
@@ -200,7 +199,7 @@ struct sock *inet6_lookup_run_sk_lookup(struct net *net,
}
EXPORT_SYMBOL_GPL(inet6_lookup_run_sk_lookup);
-struct sock *inet6_lookup_listener(struct net *net,
+struct sock *inet6_lookup_listener(const struct net *net,
struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const struct in6_addr *saddr,
@@ -244,7 +243,8 @@ done:
}
EXPORT_SYMBOL_GPL(inet6_lookup_listener);
-struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
+struct sock *inet6_lookup(const struct net *net,
+ struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const struct in6_addr *saddr, const __be16 sport,
const struct in6_addr *daddr, const __be16 dport,
@@ -291,7 +291,8 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
dif, sdif))) {
if (sk2->sk_state == TCP_TIME_WAIT) {
tw = inet_twsk(sk2);
- if (twsk_unique(sk, sk2, twp))
+ if (sk->sk_protocol == IPPROTO_TCP &&
+ tcp_twsk_unique(sk, sk2, twp))
break;
}
goto not_unique;
diff --git a/net/ipv6/ioam6.c b/net/ipv6/ioam6.c
index 571f0e4d9cf3..a84d332f952f 100644
--- a/net/ipv6/ioam6.c
+++ b/net/ipv6/ioam6.c
@@ -135,15 +135,11 @@ static int ioam6_genl_addns(struct sk_buff *skb, struct genl_info *info)
ns->id = id;
- if (!info->attrs[IOAM6_ATTR_NS_DATA])
- data32 = IOAM6_U32_UNAVAILABLE;
- else
- data32 = nla_get_u32(info->attrs[IOAM6_ATTR_NS_DATA]);
+ data32 = nla_get_u32_default(info->attrs[IOAM6_ATTR_NS_DATA],
+ IOAM6_U32_UNAVAILABLE);
- if (!info->attrs[IOAM6_ATTR_NS_DATA_WIDE])
- data64 = IOAM6_U64_UNAVAILABLE;
- else
- data64 = nla_get_u64(info->attrs[IOAM6_ATTR_NS_DATA_WIDE]);
+ data64 = nla_get_u64_default(info->attrs[IOAM6_ATTR_NS_DATA_WIDE],
+ IOAM6_U64_UNAVAILABLE);
ns->data = cpu_to_be32(data32);
ns->data_wide = cpu_to_be64(data64);
@@ -612,6 +608,68 @@ static const struct genl_ops ioam6_genl_ops[] = {
},
};
+#define IOAM6_GENL_EV_GRP_OFFSET 0
+
+static const struct genl_multicast_group ioam6_mcgrps[] = {
+ [IOAM6_GENL_EV_GRP_OFFSET] = { .name = IOAM6_GENL_EV_GRP_NAME,
+ .flags = GENL_MCAST_CAP_NET_ADMIN },
+};
+
+static int ioam6_event_put_trace(struct sk_buff *skb,
+ struct ioam6_trace_hdr *trace,
+ unsigned int len)
+{
+ if (nla_put_u16(skb, IOAM6_EVENT_ATTR_TRACE_NAMESPACE,
+ be16_to_cpu(trace->namespace_id)) ||
+ nla_put_u8(skb, IOAM6_EVENT_ATTR_TRACE_NODELEN, trace->nodelen) ||
+ nla_put_u32(skb, IOAM6_EVENT_ATTR_TRACE_TYPE,
+ be32_to_cpu(trace->type_be32)) ||
+ nla_put(skb, IOAM6_EVENT_ATTR_TRACE_DATA,
+ len - sizeof(struct ioam6_trace_hdr) - trace->remlen * 4,
+ trace->data + trace->remlen * 4))
+ return 1;
+
+ return 0;
+}
+
+void ioam6_event(enum ioam6_event_type type, struct net *net, gfp_t gfp,
+ void *opt, unsigned int opt_len)
+{
+ struct nlmsghdr *nlh;
+ struct sk_buff *skb;
+
+ if (!genl_has_listeners(&ioam6_genl_family, net,
+ IOAM6_GENL_EV_GRP_OFFSET))
+ return;
+
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ if (!skb)
+ return;
+
+ nlh = genlmsg_put(skb, 0, 0, &ioam6_genl_family, 0, type);
+ if (!nlh)
+ goto nla_put_failure;
+
+ switch (type) {
+ case IOAM6_EVENT_UNSPEC:
+ WARN_ON_ONCE(1);
+ break;
+ case IOAM6_EVENT_TRACE:
+ if (ioam6_event_put_trace(skb, (struct ioam6_trace_hdr *)opt,
+ opt_len))
+ goto nla_put_failure;
+ break;
+ }
+
+ genlmsg_end(skb, nlh);
+ genlmsg_multicast_netns(&ioam6_genl_family, net, skb, 0,
+ IOAM6_GENL_EV_GRP_OFFSET, gfp);
+ return;
+
+nla_put_failure:
+ nlmsg_free(skb);
+}
+
static struct genl_family ioam6_genl_family __ro_after_init = {
.name = IOAM6_GENL_NAME,
.version = IOAM6_GENL_VERSION,
@@ -620,6 +678,8 @@ static struct genl_family ioam6_genl_family __ro_after_init = {
.ops = ioam6_genl_ops,
.n_ops = ARRAY_SIZE(ioam6_genl_ops),
.resv_start_op = IOAM6_CMD_NS_SET_SCHEMA + 1,
+ .mcgrps = ioam6_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(ioam6_mcgrps),
.module = THIS_MODULE,
};
@@ -663,7 +723,7 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
if (!skb->dev)
raw16 = IOAM6_U16_UNAVAILABLE;
else
- raw16 = (__force u16)__in6_dev_get(skb->dev)->cnf.ioam6_id;
+ raw16 = (__force u16)READ_ONCE(__in6_dev_get(skb->dev)->cnf.ioam6_id);
*(__be16 *)data = cpu_to_be16(raw16);
data += sizeof(__be16);
@@ -671,7 +731,7 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
if (skb_dst(skb)->dev->flags & IFF_LOOPBACK)
raw16 = IOAM6_U16_UNAVAILABLE;
else
- raw16 = (__force u16)__in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id;
+ raw16 = (__force u16)READ_ONCE(__in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id);
*(__be16 *)data = cpu_to_be16(raw16);
data += sizeof(__be16);
@@ -758,7 +818,7 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
if (!skb->dev)
raw32 = IOAM6_U32_UNAVAILABLE;
else
- raw32 = __in6_dev_get(skb->dev)->cnf.ioam6_id_wide;
+ raw32 = READ_ONCE(__in6_dev_get(skb->dev)->cnf.ioam6_id_wide);
*(__be32 *)data = cpu_to_be32(raw32);
data += sizeof(__be32);
@@ -766,7 +826,7 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
if (skb_dst(skb)->dev->flags & IFF_LOOPBACK)
raw32 = IOAM6_U32_UNAVAILABLE;
else
- raw32 = __in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id_wide;
+ raw32 = READ_ONCE(__in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id_wide);
*(__be32 *)data = cpu_to_be32(raw32);
data += sizeof(__be32);
diff --git a/net/ipv6/ioam6_iptunnel.c b/net/ipv6/ioam6_iptunnel.c
index 7563f8c6aa87..09065187378e 100644
--- a/net/ipv6/ioam6_iptunnel.c
+++ b/net/ipv6/ioam6_iptunnel.c
@@ -42,8 +42,10 @@ struct ioam6_lwt {
struct ioam6_lwt_freq freq;
atomic_t pkt_cnt;
u8 mode;
+ bool has_tunsrc;
+ struct in6_addr tunsrc;
struct in6_addr tundst;
- struct ioam6_lwt_encap tuninfo;
+ struct ioam6_lwt_encap tuninfo;
};
static const struct netlink_range_validation freq_range = {
@@ -72,8 +74,10 @@ static const struct nla_policy ioam6_iptunnel_policy[IOAM6_IPTUNNEL_MAX + 1] = {
[IOAM6_IPTUNNEL_MODE] = NLA_POLICY_RANGE(NLA_U8,
IOAM6_IPTUNNEL_MODE_MIN,
IOAM6_IPTUNNEL_MODE_MAX),
+ [IOAM6_IPTUNNEL_SRC] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
[IOAM6_IPTUNNEL_DST] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
- [IOAM6_IPTUNNEL_TRACE] = NLA_POLICY_EXACT_LEN(sizeof(struct ioam6_trace_hdr)),
+ [IOAM6_IPTUNNEL_TRACE] = NLA_POLICY_EXACT_LEN(
+ sizeof(struct ioam6_trace_hdr)),
};
static bool ioam6_validate_trace_hdr(struct ioam6_trace_hdr *trace)
@@ -85,7 +89,7 @@ static bool ioam6_validate_trace_hdr(struct ioam6_trace_hdr *trace)
trace->type.bit12 | trace->type.bit13 | trace->type.bit14 |
trace->type.bit15 | trace->type.bit16 | trace->type.bit17 |
trace->type.bit18 | trace->type.bit19 | trace->type.bit20 |
- trace->type.bit21)
+ trace->type.bit21 | trace->type.bit23)
return false;
trace->nodelen = 0;
@@ -138,10 +142,13 @@ static int ioam6_build_state(struct net *net, struct nlattr *nla,
}
}
- if (!tb[IOAM6_IPTUNNEL_MODE])
- mode = IOAM6_IPTUNNEL_MODE_INLINE;
- else
- mode = nla_get_u8(tb[IOAM6_IPTUNNEL_MODE]);
+ mode = nla_get_u8_default(tb[IOAM6_IPTUNNEL_MODE],
+ IOAM6_IPTUNNEL_MODE_INLINE);
+
+ if (tb[IOAM6_IPTUNNEL_SRC] && mode == IOAM6_IPTUNNEL_MODE_INLINE) {
+ NL_SET_ERR_MSG(extack, "no tunnel src expected with this mode");
+ return -EINVAL;
+ }
if (!tb[IOAM6_IPTUNNEL_DST] && mode != IOAM6_IPTUNNEL_MODE_INLINE) {
NL_SET_ERR_MSG(extack, "this mode needs a tunnel destination");
@@ -167,19 +174,40 @@ static int ioam6_build_state(struct net *net, struct nlattr *nla,
ilwt = ioam6_lwt_state(lwt);
err = dst_cache_init(&ilwt->cache, GFP_ATOMIC);
- if (err) {
- kfree(lwt);
- return err;
- }
+ if (err)
+ goto free_lwt;
atomic_set(&ilwt->pkt_cnt, 0);
ilwt->freq.k = freq_k;
ilwt->freq.n = freq_n;
ilwt->mode = mode;
- if (tb[IOAM6_IPTUNNEL_DST])
+
+ if (!tb[IOAM6_IPTUNNEL_SRC]) {
+ ilwt->has_tunsrc = false;
+ } else {
+ ilwt->has_tunsrc = true;
+ ilwt->tunsrc = nla_get_in6_addr(tb[IOAM6_IPTUNNEL_SRC]);
+
+ if (ipv6_addr_any(&ilwt->tunsrc)) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IOAM6_IPTUNNEL_SRC],
+ "invalid tunnel source address");
+ err = -EINVAL;
+ goto free_cache;
+ }
+ }
+
+ if (tb[IOAM6_IPTUNNEL_DST]) {
ilwt->tundst = nla_get_in6_addr(tb[IOAM6_IPTUNNEL_DST]);
+ if (ipv6_addr_any(&ilwt->tundst)) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IOAM6_IPTUNNEL_DST],
+ "invalid tunnel dest address");
+ err = -EINVAL;
+ goto free_cache;
+ }
+ }
+
tuninfo = ioam6_lwt_info(lwt);
tuninfo->eh.hdrlen = ((sizeof(*tuninfo) + len_aligned) >> 3) - 1;
tuninfo->pad[0] = IPV6_TLV_PADN;
@@ -201,6 +229,11 @@ static int ioam6_build_state(struct net *net, struct nlattr *nla,
*ts = lwt;
return 0;
+free_cache:
+ dst_cache_destroy(&ilwt->cache);
+free_lwt:
+ kfree(lwt);
+ return err;
}
static int ioam6_do_fill(struct net *net, struct sk_buff *skb)
@@ -220,14 +253,15 @@ static int ioam6_do_fill(struct net *net, struct sk_buff *skb)
}
static int ioam6_do_inline(struct net *net, struct sk_buff *skb,
- struct ioam6_lwt_encap *tuninfo)
+ struct ioam6_lwt_encap *tuninfo,
+ struct dst_entry *cache_dst)
{
struct ipv6hdr *oldhdr, *hdr;
int hdrlen, err;
hdrlen = (tuninfo->eh.hdrlen + 1) << 3;
- err = skb_cow_head(skb, hdrlen + skb->mac_len);
+ err = skb_cow_head(skb, hdrlen + dst_dev_overhead(cache_dst, skb));
if (unlikely(err))
return err;
@@ -256,7 +290,10 @@ static int ioam6_do_inline(struct net *net, struct sk_buff *skb,
static int ioam6_do_encap(struct net *net, struct sk_buff *skb,
struct ioam6_lwt_encap *tuninfo,
- struct in6_addr *tundst)
+ bool has_tunsrc,
+ struct in6_addr *tunsrc,
+ struct in6_addr *tundst,
+ struct dst_entry *cache_dst)
{
struct dst_entry *dst = skb_dst(skb);
struct ipv6hdr *hdr, *inner_hdr;
@@ -265,7 +302,7 @@ static int ioam6_do_encap(struct net *net, struct sk_buff *skb,
hdrlen = (tuninfo->eh.hdrlen + 1) << 3;
len = sizeof(*hdr) + hdrlen;
- err = skb_cow_head(skb, len + skb->mac_len);
+ err = skb_cow_head(skb, len + dst_dev_overhead(cache_dst, skb));
if (unlikely(err))
return err;
@@ -285,8 +322,12 @@ static int ioam6_do_encap(struct net *net, struct sk_buff *skb,
hdr->nexthdr = NEXTHDR_HOP;
hdr->payload_len = cpu_to_be16(skb->len - sizeof(*hdr));
hdr->daddr = *tundst;
- ipv6_dev_get_saddr(net, dst->dev, &hdr->daddr,
- IPV6_PREFER_SRC_PUBLIC, &hdr->saddr);
+
+ if (has_tunsrc)
+ memcpy(&hdr->saddr, tunsrc, sizeof(*tunsrc));
+ else
+ ipv6_dev_get_saddr(net, dst->dev, &hdr->daddr,
+ IPV6_PREFER_SRC_PUBLIC, &hdr->saddr);
skb_postpush_rcsum(skb, hdr, len);
@@ -295,8 +336,7 @@ static int ioam6_do_encap(struct net *net, struct sk_buff *skb,
static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
- struct dst_entry *dst = skb_dst(skb);
- struct in6_addr orig_daddr;
+ struct dst_entry *dst = skb_dst(skb), *cache_dst = NULL;
struct ioam6_lwt *ilwt;
int err = -EINVAL;
u32 pkt_cnt;
@@ -311,7 +351,9 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
if (pkt_cnt % ilwt->freq.n >= ilwt->freq.k)
goto out;
- orig_daddr = ipv6_hdr(skb)->daddr;
+ local_bh_disable();
+ cache_dst = dst_cache_get(&ilwt->cache);
+ local_bh_enable();
switch (ilwt->mode) {
case IOAM6_IPTUNNEL_MODE_INLINE:
@@ -320,7 +362,7 @@ do_inline:
if (ipv6_hdr(skb)->nexthdr == NEXTHDR_HOP)
goto out;
- err = ioam6_do_inline(net, skb, &ilwt->tuninfo);
+ err = ioam6_do_inline(net, skb, &ilwt->tuninfo, cache_dst);
if (unlikely(err))
goto drop;
@@ -328,7 +370,9 @@ do_inline:
case IOAM6_IPTUNNEL_MODE_ENCAP:
do_encap:
/* Encapsulation (ip6ip6) */
- err = ioam6_do_encap(net, skb, &ilwt->tuninfo, &ilwt->tundst);
+ err = ioam6_do_encap(net, skb, &ilwt->tuninfo,
+ ilwt->has_tunsrc, &ilwt->tunsrc,
+ &ilwt->tundst, cache_dst);
if (unlikely(err))
goto drop;
@@ -346,46 +390,48 @@ do_encap:
goto drop;
}
- err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
- if (unlikely(err))
- goto drop;
+ if (unlikely(!cache_dst)) {
+ struct ipv6hdr *hdr = ipv6_hdr(skb);
+ struct flowi6 fl6;
- if (!ipv6_addr_equal(&orig_daddr, &ipv6_hdr(skb)->daddr)) {
- preempt_disable();
- dst = dst_cache_get(&ilwt->cache);
- preempt_enable();
-
- if (unlikely(!dst)) {
- struct ipv6hdr *hdr = ipv6_hdr(skb);
- struct flowi6 fl6;
-
- memset(&fl6, 0, sizeof(fl6));
- fl6.daddr = hdr->daddr;
- fl6.saddr = hdr->saddr;
- fl6.flowlabel = ip6_flowinfo(hdr);
- fl6.flowi6_mark = skb->mark;
- fl6.flowi6_proto = hdr->nexthdr;
-
- dst = ip6_route_output(net, NULL, &fl6);
- if (dst->error) {
- err = dst->error;
- dst_release(dst);
- goto drop;
- }
-
- preempt_disable();
- dst_cache_set_ip6(&ilwt->cache, dst, &fl6.saddr);
- preempt_enable();
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.daddr = hdr->daddr;
+ fl6.saddr = hdr->saddr;
+ fl6.flowlabel = ip6_flowinfo(hdr);
+ fl6.flowi6_mark = skb->mark;
+ fl6.flowi6_proto = hdr->nexthdr;
+
+ cache_dst = ip6_route_output(net, NULL, &fl6);
+ if (cache_dst->error) {
+ err = cache_dst->error;
+ goto drop;
}
- skb_dst_drop(skb);
- skb_dst_set(skb, dst);
+ /* cache only if we don't create a dst reference loop */
+ if (dst->lwtstate != cache_dst->lwtstate) {
+ local_bh_disable();
+ dst_cache_set_ip6(&ilwt->cache, cache_dst, &fl6.saddr);
+ local_bh_enable();
+ }
+
+ err = skb_cow_head(skb, LL_RESERVED_SPACE(cache_dst->dev));
+ if (unlikely(err))
+ goto drop;
+ }
+ /* avoid lwtunnel_output() reentry loop when destination is the same
+ * after transformation (e.g., with the inline mode)
+ */
+ if (dst->lwtstate != cache_dst->lwtstate) {
+ skb_dst_drop(skb);
+ skb_dst_set(skb, cache_dst);
return dst_output(net, sk, skb);
}
out:
+ dst_release(cache_dst);
return dst->lwtstate->orig_output(net, sk, skb);
drop:
+ dst_release(cache_dst);
kfree_skb(skb);
return err;
}
@@ -414,6 +460,13 @@ static int ioam6_fill_encap_info(struct sk_buff *skb,
goto ret;
if (ilwt->mode != IOAM6_IPTUNNEL_MODE_INLINE) {
+ if (ilwt->has_tunsrc) {
+ err = nla_put_in6_addr(skb, IOAM6_IPTUNNEL_SRC,
+ &ilwt->tunsrc);
+ if (err)
+ goto ret;
+ }
+
err = nla_put_in6_addr(skb, IOAM6_IPTUNNEL_DST, &ilwt->tundst);
if (err)
goto ret;
@@ -435,8 +488,12 @@ static int ioam6_encap_nlsize(struct lwtunnel_state *lwtstate)
nla_total_size(sizeof(ilwt->mode)) +
nla_total_size(sizeof(ilwt->tuninfo.traceh));
- if (ilwt->mode != IOAM6_IPTUNNEL_MODE_INLINE)
+ if (ilwt->mode != IOAM6_IPTUNNEL_MODE_INLINE) {
+ if (ilwt->has_tunsrc)
+ nlsize += nla_total_size(sizeof(ilwt->tunsrc));
+
nlsize += nla_total_size(sizeof(ilwt->tundst));
+ }
return nlsize;
}
@@ -451,17 +508,21 @@ static int ioam6_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
return (ilwt_a->freq.k != ilwt_b->freq.k ||
ilwt_a->freq.n != ilwt_b->freq.n ||
ilwt_a->mode != ilwt_b->mode ||
+ ilwt_a->has_tunsrc != ilwt_b->has_tunsrc ||
(ilwt_a->mode != IOAM6_IPTUNNEL_MODE_INLINE &&
!ipv6_addr_equal(&ilwt_a->tundst, &ilwt_b->tundst)) ||
+ (ilwt_a->mode != IOAM6_IPTUNNEL_MODE_INLINE &&
+ ilwt_a->has_tunsrc &&
+ !ipv6_addr_equal(&ilwt_a->tunsrc, &ilwt_b->tunsrc)) ||
trace_a->namespace_id != trace_b->namespace_id);
}
static const struct lwtunnel_encap_ops ioam6_iptun_ops = {
.build_state = ioam6_build_state,
.destroy_state = ioam6_destroy_state,
- .output = ioam6_output,
+ .output = ioam6_output,
.fill_encap = ioam6_fill_encap_info,
- .get_encap_size = ioam6_encap_nlsize,
+ .get_encap_size = ioam6_encap_nlsize,
.cmp_encap = ioam6_encap_cmp,
.owner = THIS_MODULE,
};
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 4fc2cae0d116..c134ba202c4c 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -160,6 +160,8 @@ struct fib6_info *fib6_info_alloc(gfp_t gfp_flags, bool with_fib6_nh)
INIT_LIST_HEAD(&f6i->fib6_siblings);
refcount_set(&f6i->fib6_ref, 1);
+ INIT_HLIST_NODE(&f6i->gc_link);
+
return f6i;
}
@@ -196,16 +198,9 @@ static void node_free_immediate(struct net *net, struct fib6_node *fn)
net->ipv6.rt6_stats->fib_nodes--;
}
-static void node_free_rcu(struct rcu_head *head)
-{
- struct fib6_node *fn = container_of(head, struct fib6_node, rcu);
-
- kmem_cache_free(fib6_node_kmem, fn);
-}
-
static void node_free(struct net *net, struct fib6_node *fn)
{
- call_rcu(&fn->rcu, node_free_rcu);
+ kfree_rcu(fn, rcu);
net->ipv6.rt6_stats->fib_nodes--;
}
@@ -246,6 +241,7 @@ static struct fib6_table *fib6_alloc_table(struct net *net, u32 id)
net->ipv6.fib6_null_entry);
table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
inet_peer_base_init(&table->tb6_peers);
+ INIT_HLIST_HEAD(&table->tb6_gc_hlist);
}
return table;
@@ -342,17 +338,17 @@ static void __net_init fib6_tables_init(struct net *net)
#endif
-unsigned int fib6_tables_seq_read(struct net *net)
+unsigned int fib6_tables_seq_read(const struct net *net)
{
unsigned int h, fib_seq = 0;
rcu_read_lock();
for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
- struct hlist_head *head = &net->ipv6.fib_table_hash[h];
- struct fib6_table *tb;
+ const struct hlist_head *head = &net->ipv6.fib_table_hash[h];
+ const struct fib6_table *tb;
hlist_for_each_entry_rcu(tb, head, tb6_hlist)
- fib_seq += tb->fib_seq;
+ fib_seq += READ_ONCE(tb->fib_seq);
}
rcu_read_unlock();
@@ -397,7 +393,7 @@ int call_fib6_entry_notifiers(struct net *net,
.rt = rt,
};
- rt->fib6_table->fib_seq++;
+ WRITE_ONCE(rt->fib6_table->fib_seq, rt->fib6_table->fib_seq + 1);
return call_fib6_notifiers(net, event_type, &info.info);
}
@@ -413,7 +409,7 @@ int call_fib6_multipath_entry_notifiers(struct net *net,
.nsiblings = nsiblings,
};
- rt->fib6_table->fib_seq++;
+ WRITE_ONCE(rt->fib6_table->fib_seq, rt->fib6_table->fib_seq + 1);
return call_fib6_notifiers(net, event_type, &info.info);
}
@@ -424,7 +420,7 @@ int call_fib6_entry_notifiers_replace(struct net *net, struct fib6_info *rt)
.nsiblings = rt->fib6_nsiblings,
};
- rt->fib6_table->fib_seq++;
+ WRITE_ONCE(rt->fib6_table->fib_seq, rt->fib6_table->fib_seq + 1);
return call_fib6_notifiers(net, FIB_EVENT_ENTRY_REPLACE, &info.info);
}
@@ -617,23 +613,25 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct rt6_rtnl_dump_arg arg = { .filter.dump_exceptions = true,
- .filter.dump_routes = true };
+ struct rt6_rtnl_dump_arg arg = {
+ .filter.dump_exceptions = true,
+ .filter.dump_routes = true,
+ .filter.rtnl_held = false,
+ };
const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
- unsigned int h, s_h;
unsigned int e = 0, s_e;
+ struct hlist_head *head;
struct fib6_walker *w;
struct fib6_table *tb;
- struct hlist_head *head;
- int res = 0;
+ unsigned int h, s_h;
+ int err = 0;
+ rcu_read_lock();
if (cb->strict_check) {
- int err;
-
err = ip_valid_fib_dump_req(net, nlh, &arg.filter, cb);
if (err < 0)
- return err;
+ goto unlock;
} else if (nlmsg_len(nlh) >= sizeof(struct rtmsg)) {
struct rtmsg *rtm = nlmsg_data(nlh);
@@ -645,19 +643,21 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
if (!w) {
/* New dump:
*
- * 1. hook callback destructor.
- */
- cb->args[3] = (long)cb->done;
- cb->done = fib6_dump_done;
-
- /*
- * 2. allocate and initialize walker.
+ * 1. allocate and initialize walker.
*/
w = kzalloc(sizeof(*w), GFP_ATOMIC);
- if (!w)
- return -ENOMEM;
+ if (!w) {
+ err = -ENOMEM;
+ goto unlock;
+ }
w->func = fib6_dump_node;
cb->args[2] = (long)w;
+
+ /* 2. hook callback destructor.
+ */
+ cb->args[3] = (long)cb->done;
+ cb->done = fib6_dump_done;
+
}
arg.skb = skb;
@@ -669,46 +669,46 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
tb = fib6_get_table(net, arg.filter.table_id);
if (!tb) {
if (rtnl_msg_family(cb->nlh) != PF_INET6)
- goto out;
+ goto unlock;
NL_SET_ERR_MSG_MOD(cb->extack, "FIB table does not exist");
- return -ENOENT;
+ err = -ENOENT;
+ goto unlock;
}
if (!cb->args[0]) {
- res = fib6_dump_table(tb, skb, cb);
- if (!res)
+ err = fib6_dump_table(tb, skb, cb);
+ if (!err)
cb->args[0] = 1;
}
- goto out;
+ goto unlock;
}
s_h = cb->args[0];
s_e = cb->args[1];
- rcu_read_lock();
for (h = s_h; h < FIB6_TABLE_HASHSZ; h++, s_e = 0) {
e = 0;
head = &net->ipv6.fib_table_hash[h];
hlist_for_each_entry_rcu(tb, head, tb6_hlist) {
if (e < s_e)
goto next;
- res = fib6_dump_table(tb, skb, cb);
- if (res != 0)
- goto out_unlock;
+ err = fib6_dump_table(tb, skb, cb);
+ if (err != 0)
+ goto out;
next:
e++;
}
}
-out_unlock:
- rcu_read_unlock();
+out:
cb->args[1] = e;
cb->args[0] = h;
-out:
- res = res < 0 ? res : skb->len;
- if (res <= 0)
+
+unlock:
+ rcu_read_unlock();
+ if (err <= 0)
fib6_dump_end(cb);
- return res;
+ return err;
}
void fib6_metric_set(struct fib6_info *f6i, int metric, u32 val)
@@ -751,8 +751,6 @@ static struct fib6_node *fib6_add_1(struct net *net,
int bit;
__be32 dir = 0;
- RT6_TRACE("fib6_add_1\n");
-
/* insert node in tree */
fn = root;
@@ -961,6 +959,7 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
if (!fib6_nh->rt6i_pcpu)
return;
+ rcu_read_lock();
/* release the reference to this fib entry from
* all of its cached pcpu routes
*/
@@ -969,7 +968,9 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
struct rt6_info *pcpu_rt;
ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
- pcpu_rt = *ppcpu_rt;
+
+ /* Paired with xchg() in rt6_get_pcpu_route() */
+ pcpu_rt = READ_ONCE(*ppcpu_rt);
/* only dropping the 'from' reference if the cached route
* is using 'match'. The cached pcpu_rt->from only changes
@@ -979,10 +980,11 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
if (pcpu_rt && rcu_access_pointer(pcpu_rt->from) == match) {
struct fib6_info *from;
- from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
+ from = unrcu_pointer(xchg(&pcpu_rt->from, NULL));
fib6_info_release(from);
}
}
+ rcu_read_unlock();
}
struct fib6_nh_pcpu_arg {
@@ -1057,6 +1059,9 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn,
lockdep_is_held(&table->tb6_lock));
}
}
+
+ fib6_clean_expires(rt);
+ fib6_remove_gc_list(rt);
}
/*
@@ -1117,10 +1122,13 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
rt->fib6_nsiblings = 0;
if (!(iter->fib6_flags & RTF_EXPIRES))
return -EEXIST;
- if (!(rt->fib6_flags & RTF_EXPIRES))
+ if (!(rt->fib6_flags & RTF_EXPIRES)) {
fib6_clean_expires(iter);
- else
+ fib6_remove_gc_list(iter);
+ } else {
fib6_set_expires(iter, rt->expires);
+ fib6_add_gc_list(iter);
+ }
if (rt->fib6_pmtu)
fib6_metric_set(iter, RTAX_MTU,
@@ -1175,8 +1183,8 @@ next_iter:
while (sibling) {
if (sibling->fib6_metric == rt->fib6_metric &&
rt6_qualify_for_ecmp(sibling)) {
- list_add_tail(&rt->fib6_siblings,
- &sibling->fib6_siblings);
+ list_add_tail_rcu(&rt->fib6_siblings,
+ &sibling->fib6_siblings);
break;
}
sibling = rcu_dereference_protected(sibling->fib6_next,
@@ -1237,7 +1245,7 @@ add:
fib6_siblings)
sibling->fib6_nsiblings--;
rt->fib6_nsiblings = 0;
- list_del_init(&rt->fib6_siblings);
+ list_del_rcu(&rt->fib6_siblings);
rt6_multipath_rebalance(next_sibling);
return err;
}
@@ -1375,7 +1383,10 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
struct nl_info *info, struct netlink_ext_ack *extack)
{
struct fib6_table *table = rt->fib6_table;
- struct fib6_node *fn, *pn = NULL;
+ struct fib6_node *fn;
+#ifdef CONFIG_IPV6_SUBTREES
+ struct fib6_node *pn = NULL;
+#endif
int err = -ENOMEM;
int allow_create = 1;
int replace_required = 0;
@@ -1399,9 +1410,9 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
goto out;
}
+#ifdef CONFIG_IPV6_SUBTREES
pn = fn;
-#ifdef CONFIG_IPV6_SUBTREES
if (rt->fib6_src.plen) {
struct fib6_node *sn;
@@ -1479,6 +1490,10 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
if (rt->nh)
list_add(&rt->nh_list, &rt->nh->f6i_list);
__fib6_update_sernum_upto_root(rt, fib6_new_sernum(info->nl_net));
+
+ if (rt->fib6_flags & RTF_EXPIRES)
+ fib6_add_gc_list(rt);
+
fib6_start_gc(info->nl_net, rt);
}
@@ -1803,7 +1818,7 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
lockdep_is_held(&table->tb6_lock));
struct fib6_info *new_fn_leaf;
- RT6_TRACE("fixing tree: plen=%d iter=%d\n", fn->fn_bit, iter);
+ pr_debug("fixing tree: plen=%d iter=%d\n", fn->fn_bit, iter);
iter++;
WARN_ON(fn->fn_flags & RTN_RTINFO);
@@ -1866,7 +1881,8 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
FOR_WALKERS(net, w) {
if (!child) {
if (w->node == fn) {
- RT6_TRACE("W %p adjusted by delnode 1, s=%d/%d\n", w, w->state, nstate);
+ pr_debug("W %p adjusted by delnode 1, s=%d/%d\n",
+ w, w->state, nstate);
w->node = pn;
w->state = nstate;
}
@@ -1874,10 +1890,12 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
if (w->node == fn) {
w->node = child;
if (children&2) {
- RT6_TRACE("W %p adjusted by delnode 2, s=%d\n", w, w->state);
+ pr_debug("W %p adjusted by delnode 2, s=%d\n",
+ w, w->state);
w->state = w->state >= FWS_R ? FWS_U : FWS_INIT;
} else {
- RT6_TRACE("W %p adjusted by delnode 2, s=%d\n", w, w->state);
+ pr_debug("W %p adjusted by delnode 2, s=%d\n",
+ w, w->state);
w->state = w->state >= FWS_C ? FWS_U : FWS_INIT;
}
}
@@ -1905,8 +1923,6 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
struct net *net = info->nl_net;
bool notify_del = false;
- RT6_TRACE("fib6_del_route\n");
-
/* If the deleted route is the first in the node and it is not part of
* a multipath route, then we need to replace it with the next route
* in the node, if exists.
@@ -1947,7 +1963,7 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
&rt->fib6_siblings, fib6_siblings)
sibling->fib6_nsiblings--;
rt->fib6_nsiblings = 0;
- list_del_init(&rt->fib6_siblings);
+ list_del_rcu(&rt->fib6_siblings);
rt6_multipath_rebalance(next_sibling);
}
@@ -1955,7 +1971,7 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
read_lock(&net->ipv6.fib6_walker_lock);
FOR_WALKERS(net, w) {
if (w->state == FWS_C && w->leaf == rt) {
- RT6_TRACE("walker %p adjusted by delroute\n", w);
+ pr_debug("walker %p adjusted by delroute\n", w);
w->leaf = rcu_dereference_protected(rt->fib6_next,
lockdep_is_held(&table->tb6_lock));
if (!w->leaf)
@@ -2281,9 +2297,8 @@ static void fib6_flush_trees(struct net *net)
* Garbage collection
*/
-static int fib6_age(struct fib6_info *rt, void *arg)
+static int fib6_age(struct fib6_info *rt, struct fib6_gc_args *gc_args)
{
- struct fib6_gc_args *gc_args = arg;
unsigned long now = jiffies;
/*
@@ -2293,7 +2308,7 @@ static int fib6_age(struct fib6_info *rt, void *arg)
if (rt->fib6_flags & RTF_EXPIRES && rt->expires) {
if (time_after(now, rt->expires)) {
- RT6_TRACE("expiring %p\n", rt);
+ pr_debug("expiring %p\n", rt);
return -1;
}
gc_args->more++;
@@ -2308,6 +2323,42 @@ static int fib6_age(struct fib6_info *rt, void *arg)
return 0;
}
+static void fib6_gc_table(struct net *net,
+ struct fib6_table *tb6,
+ struct fib6_gc_args *gc_args)
+{
+ struct fib6_info *rt;
+ struct hlist_node *n;
+ struct nl_info info = {
+ .nl_net = net,
+ .skip_notify = false,
+ };
+
+ hlist_for_each_entry_safe(rt, n, &tb6->tb6_gc_hlist, gc_link)
+ if (fib6_age(rt, gc_args) == -1)
+ fib6_del(rt, &info);
+}
+
+static void fib6_gc_all(struct net *net, struct fib6_gc_args *gc_args)
+{
+ struct fib6_table *table;
+ struct hlist_head *head;
+ unsigned int h;
+
+ rcu_read_lock();
+ for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
+ head = &net->ipv6.fib_table_hash[h];
+ hlist_for_each_entry_rcu(table, head, tb6_hlist) {
+ spin_lock_bh(&table->tb6_lock);
+
+ fib6_gc_table(net, table, gc_args);
+
+ spin_unlock_bh(&table->tb6_lock);
+ }
+ }
+ rcu_read_unlock();
+}
+
void fib6_run_gc(unsigned long expires, struct net *net, bool force)
{
struct fib6_gc_args gc_args;
@@ -2323,7 +2374,7 @@ void fib6_run_gc(unsigned long expires, struct net *net, bool force)
net->ipv6.sysctl.ip6_rt_gc_interval;
gc_args.more = 0;
- fib6_clean_all(net, fib6_age, &gc_args);
+ fib6_gc_all(net, &gc_args);
now = jiffies;
net->ipv6.ip6_rt_last_gc = now;
@@ -2383,6 +2434,7 @@ static int __net_init fib6_net_init(struct net *net)
net->ipv6.fib6_main_tbl->tb6_root.fn_flags =
RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
inet_peer_base_init(&net->ipv6.fib6_main_tbl->tb6_peers);
+ INIT_HLIST_HEAD(&net->ipv6.fib6_main_tbl->tb6_gc_hlist);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
net->ipv6.fib6_local_tbl = kzalloc(sizeof(*net->ipv6.fib6_local_tbl),
@@ -2395,6 +2447,7 @@ static int __net_init fib6_net_init(struct net *net)
net->ipv6.fib6_local_tbl->tb6_root.fn_flags =
RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
inet_peer_base_init(&net->ipv6.fib6_local_tbl->tb6_peers);
+ INIT_HLIST_HEAD(&net->ipv6.fib6_local_tbl->tb6_gc_hlist);
#endif
fib6_tables_init(net);
@@ -2440,14 +2493,18 @@ static struct pernet_operations fib6_net_ops = {
.exit = fib6_net_exit,
};
+static const struct rtnl_msg_handler fib6_rtnl_msg_handlers[] __initconst_or_module = {
+ {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETROUTE,
+ .dumpit = inet6_dump_fib,
+ .flags = RTNL_FLAG_DUMP_UNLOCKED | RTNL_FLAG_DUMP_SPLIT_NLM_DONE},
+};
+
int __init fib6_init(void)
{
int ret = -ENOMEM;
- fib6_node_kmem = kmem_cache_create("fib6_nodes",
- sizeof(struct fib6_node), 0,
- SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT,
- NULL);
+ fib6_node_kmem = KMEM_CACHE(fib6_node,
+ SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT);
if (!fib6_node_kmem)
goto out;
@@ -2455,8 +2512,7 @@ int __init fib6_init(void)
if (ret)
goto out_kmem_cache_create;
- ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE, NULL,
- inet6_dump_fib, 0);
+ ret = rtnl_register_many(fib6_rtnl_msg_handlers);
if (ret)
goto out_unregister_subsys;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 070d87abf7c0..235808cfec70 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -496,11 +496,11 @@ static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
tpi->proto);
if (tunnel) {
if (tunnel->parms.collect_md) {
+ IP_TUNNEL_DECLARE_FLAGS(flags);
struct metadata_dst *tun_dst;
__be64 tun_id;
- __be16 flags;
- flags = tpi->flags;
+ ip_tunnel_flags_copy(flags, tpi->flags);
tun_id = key32_to_tunnel_id(tpi->key);
tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id, 0);
@@ -528,6 +528,9 @@ static int ip6erspan_rcv(struct sk_buff *skb,
struct ip6_tnl *tunnel;
u8 ver;
+ if (unlikely(!pskb_may_pull(skb, sizeof(*ershdr))))
+ return PACKET_REJECT;
+
ipv6h = ipv6_hdr(skb);
ershdr = (struct erspan_base_hdr *)skb->data;
ver = ershdr->ver;
@@ -548,14 +551,14 @@ static int ip6erspan_rcv(struct sk_buff *skb,
if (tunnel->parms.collect_md) {
struct erspan_metadata *pkt_md, *md;
+ IP_TUNNEL_DECLARE_FLAGS(flags);
struct metadata_dst *tun_dst;
struct ip_tunnel_info *info;
unsigned char *gh;
__be64 tun_id;
- __be16 flags;
- tpi->flags |= TUNNEL_KEY;
- flags = tpi->flags;
+ __set_bit(IP_TUNNEL_KEY_BIT, tpi->flags);
+ ip_tunnel_flags_copy(flags, tpi->flags);
tun_id = key32_to_tunnel_id(tpi->key);
tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id,
@@ -577,7 +580,8 @@ static int ip6erspan_rcv(struct sk_buff *skb,
md2 = &md->u.md2;
memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
ERSPAN_V2_MDSIZE);
- info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
+ __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT,
+ info->key.tun_flags);
info->options_len = sizeof(*md);
ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
@@ -745,8 +749,8 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
__u32 *pmtu, __be16 proto)
{
struct ip6_tnl *tunnel = netdev_priv(dev);
+ IP_TUNNEL_DECLARE_FLAGS(flags);
__be16 protocol;
- __be16 flags;
if (dev->type == ARPHRD_ETHER)
IPCB(skb)->flags = 0;
@@ -778,8 +782,11 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
fl6->fl6_gre_key = tunnel_id_to_key32(key->tun_id);
dsfield = key->tos;
- flags = key->tun_flags &
- (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
+ ip_tunnel_flags_zero(flags);
+ __set_bit(IP_TUNNEL_CSUM_BIT, flags);
+ __set_bit(IP_TUNNEL_KEY_BIT, flags);
+ __set_bit(IP_TUNNEL_SEQ_BIT, flags);
+ ip_tunnel_flags_and(flags, flags, key->tun_flags);
tun_hlen = gre_calc_hlen(flags);
if (skb_cow_head(skb, dev->needed_headroom ?: tun_hlen + tunnel->encap_hlen))
@@ -788,19 +795,21 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
gre_build_header(skb, tun_hlen,
flags, protocol,
tunnel_id_to_key32(tun_info->key.tun_id),
- (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno))
- : 0);
+ test_bit(IP_TUNNEL_SEQ_BIT, flags) ?
+ htonl(atomic_fetch_inc(&tunnel->o_seqno)) :
+ 0);
} else {
if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
return -ENOMEM;
- flags = tunnel->parms.o_flags;
+ ip_tunnel_flags_copy(flags, tunnel->parms.o_flags);
gre_build_header(skb, tunnel->tun_hlen, flags,
protocol, tunnel->parms.o_key,
- (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno))
- : 0);
+ test_bit(IP_TUNNEL_SEQ_BIT, flags) ?
+ htonl(atomic_fetch_inc(&tunnel->o_seqno)) :
+ 0);
}
return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
@@ -822,7 +831,8 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
prepare_ip6gre_xmit_ipv4(skb, dev, &fl6,
&dsfield, &encap_limit);
- err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
+ err = gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
+ t->parms.o_flags));
if (err)
return -1;
@@ -856,7 +866,8 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
prepare_ip6gre_xmit_ipv6(skb, dev, &fl6, &dsfield, &encap_limit))
return -1;
- if (gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM)))
+ if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
+ t->parms.o_flags)))
return -1;
err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit,
@@ -883,7 +894,8 @@ static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
prepare_ip6gre_xmit_other(skb, dev, &fl6, &dsfield, &encap_limit))
return -1;
- err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
+ err = gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
+ t->parms.o_flags));
if (err)
return err;
err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, skb->protocol);
@@ -936,6 +948,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
struct ip_tunnel_info *tun_info = NULL;
struct ip6_tnl *t = netdev_priv(dev);
struct dst_entry *dst = skb_dst(skb);
+ IP_TUNNEL_DECLARE_FLAGS(flags) = { };
bool truncate = false;
int encap_limit = -1;
__u8 dsfield = false;
@@ -979,7 +992,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
if (skb_cow_head(skb, dev->needed_headroom ?: t->hlen))
goto tx_err;
- t->parms.o_flags &= ~TUNNEL_KEY;
+ __clear_bit(IP_TUNNEL_KEY_BIT, t->parms.o_flags);
IPCB(skb)->flags = 0;
/* For collect_md mode, derive fl6 from the tunnel key,
@@ -1004,7 +1017,8 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
fl6.fl6_gre_key = tunnel_id_to_key32(key->tun_id);
dsfield = key->tos;
- if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
+ if (!test_bit(IP_TUNNEL_ERSPAN_OPT_BIT,
+ tun_info->key.tun_flags))
goto tx_err;
if (tun_info->options_len < sizeof(*md))
goto tx_err;
@@ -1065,7 +1079,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
}
/* Push GRE header. */
- gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(atomic_fetch_inc(&t->o_seqno)));
+ __set_bit(IP_TUNNEL_SEQ_BIT, flags);
+ gre_build_header(skb, 8, flags, proto, 0,
+ htonl(atomic_fetch_inc(&t->o_seqno)));
/* TooBig packet may have updated dst->dev's mtu */
if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
@@ -1208,8 +1224,8 @@ static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t,
t->parms.proto = p->proto;
t->parms.i_key = p->i_key;
t->parms.o_key = p->o_key;
- t->parms.i_flags = p->i_flags;
- t->parms.o_flags = p->o_flags;
+ ip_tunnel_flags_copy(t->parms.i_flags, p->i_flags);
+ ip_tunnel_flags_copy(t->parms.o_flags, p->o_flags);
t->parms.fwmark = p->fwmark;
t->parms.erspan_ver = p->erspan_ver;
t->parms.index = p->index;
@@ -1238,8 +1254,8 @@ static void ip6gre_tnl_parm_from_user(struct __ip6_tnl_parm *p,
p->link = u->link;
p->i_key = u->i_key;
p->o_key = u->o_key;
- p->i_flags = gre_flags_to_tnl_flags(u->i_flags);
- p->o_flags = gre_flags_to_tnl_flags(u->o_flags);
+ gre_flags_to_tnl_flags(p->i_flags, u->i_flags);
+ gre_flags_to_tnl_flags(p->o_flags, u->o_flags);
memcpy(p->name, u->name, sizeof(u->name));
}
@@ -1391,7 +1407,7 @@ static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
ipv6h->daddr = t->parms.raddr;
p = (__be16 *)(ipv6h + 1);
- p[0] = t->parms.o_flags;
+ p[0] = ip_tunnel_flags_to_be16(t->parms.o_flags);
p[1] = htons(type);
/*
@@ -1418,7 +1434,6 @@ static const struct net_device_ops ip6gre_netdev_ops = {
.ndo_start_xmit = ip6gre_tunnel_xmit,
.ndo_siocdevprivate = ip6gre_tunnel_siocdevprivate,
.ndo_change_mtu = ip6_tnl_change_mtu,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_get_iflink = ip6_tnl_get_iflink,
};
@@ -1428,7 +1443,6 @@ static void ip6gre_dev_free(struct net_device *dev)
gro_cells_destroy(&t->gro_cells);
dst_cache_destroy(&t->dst_cache);
- free_percpu(dev->tstats);
}
static void ip6gre_tunnel_setup(struct net_device *dev)
@@ -1437,6 +1451,7 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
dev->needs_free_netdev = true;
dev->priv_destructor = ip6gre_dev_free;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
dev->type = ARPHRD_IP6GRE;
dev->flags |= IFF_NOARP;
@@ -1455,23 +1470,23 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
static void ip6gre_tnl_init_features(struct net_device *dev)
{
struct ip6_tnl *nt = netdev_priv(dev);
- __be16 flags;
- dev->features |= GRE6_FEATURES | NETIF_F_LLTX;
+ dev->features |= GRE6_FEATURES;
dev->hw_features |= GRE6_FEATURES;
- flags = nt->parms.o_flags;
-
/* TCP offload with GRE SEQ is not supported, nor can we support 2
* levels of outer headers requiring an update.
*/
- if (flags & TUNNEL_SEQ)
+ if (test_bit(IP_TUNNEL_SEQ_BIT, nt->parms.o_flags))
return;
- if (flags & TUNNEL_CSUM && nt->encap.type != TUNNEL_ENCAP_NONE)
+ if (test_bit(IP_TUNNEL_CSUM_BIT, nt->parms.o_flags) &&
+ nt->encap.type != TUNNEL_ENCAP_NONE)
return;
dev->features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+
+ dev->lltx = true;
}
static int ip6gre_tunnel_init_common(struct net_device *dev)
@@ -1486,13 +1501,9 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
tunnel->net = dev_net(dev);
strcpy(tunnel->parms.name, dev->name);
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
if (ret)
- goto cleanup_alloc_pcpu_stats;
+ return ret;
ret = gro_cells_init(&tunnel->gro_cells, dev);
if (ret)
@@ -1511,13 +1522,11 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
ip6gre_tnl_init_features(dev);
netdev_hold(dev, &tunnel->dev_tracker, GFP_KERNEL);
+ netdev_lockdep_set_classes(dev);
return 0;
cleanup_dst_cache_init:
dst_cache_destroy(&tunnel->dst_cache);
-cleanup_alloc_pcpu_stats:
- free_percpu(dev->tstats);
- dev->tstats = NULL;
return ret;
}
@@ -1612,8 +1621,7 @@ static int __net_init ip6gre_init_net(struct net *net)
/* FB netdevice is special: we have one, and only one per netns.
* Allowing to move it to another netns is clearly unsafe.
*/
- ign->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
-
+ ign->fb_tunnel_dev->netns_local = true;
ip6gre_fb_tunnel_init(ign->fb_tunnel_dev);
ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops;
@@ -1632,21 +1640,19 @@ err_alloc_dev:
return err;
}
-static void __net_exit ip6gre_exit_batch_net(struct list_head *net_list)
+static void __net_exit ip6gre_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
struct net *net;
- LIST_HEAD(list);
- rtnl_lock();
+ ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list)
- ip6gre_destroy_tunnels(net, &list);
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ ip6gre_destroy_tunnels(net, dev_to_kill);
}
static struct pernet_operations ip6gre_net_ops = {
.init = ip6gre_init_net,
- .exit_batch = ip6gre_exit_batch_net,
+ .exit_batch_rtnl = ip6gre_exit_batch_rtnl,
.id = &ip6gre_net_id,
.size = sizeof(struct ip6gre_net),
};
@@ -1793,12 +1799,12 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
if (data[IFLA_GRE_IFLAGS])
- parms->i_flags = gre_flags_to_tnl_flags(
- nla_get_be16(data[IFLA_GRE_IFLAGS]));
+ gre_flags_to_tnl_flags(parms->i_flags,
+ nla_get_be16(data[IFLA_GRE_IFLAGS]));
if (data[IFLA_GRE_OFLAGS])
- parms->o_flags = gre_flags_to_tnl_flags(
- nla_get_be16(data[IFLA_GRE_OFLAGS]));
+ gre_flags_to_tnl_flags(parms->o_flags,
+ nla_get_be16(data[IFLA_GRE_OFLAGS]));
if (data[IFLA_GRE_IKEY])
parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
@@ -1851,7 +1857,6 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = ip6_tnl_change_mtu,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_get_iflink = ip6_tnl_get_iflink,
};
@@ -1880,13 +1885,9 @@ static int ip6erspan_tap_init(struct net_device *dev)
tunnel->net = dev_net(dev);
strcpy(tunnel->parms.name, dev->name);
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
if (ret)
- goto cleanup_alloc_pcpu_stats;
+ return ret;
ret = gro_cells_init(&tunnel->gro_cells, dev);
if (ret)
@@ -1903,13 +1904,11 @@ static int ip6erspan_tap_init(struct net_device *dev)
ip6erspan_tnl_link_config(tunnel, 1);
netdev_hold(dev, &tunnel->dev_tracker, GFP_KERNEL);
+ netdev_lockdep_set_classes(dev);
return 0;
cleanup_dst_cache_init:
dst_cache_destroy(&tunnel->dst_cache);
-cleanup_alloc_pcpu_stats:
- free_percpu(dev->tstats);
- dev->tstats = NULL;
return ret;
}
@@ -1920,7 +1919,6 @@ static const struct net_device_ops ip6erspan_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = ip6_tnl_change_mtu,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_get_iflink = ip6_tnl_get_iflink,
};
@@ -1934,6 +1932,7 @@ static void ip6gre_tap_setup(struct net_device *dev)
dev->needs_free_netdev = true;
dev->priv_destructor = ip6gre_dev_free;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
netif_keep_dst(dev);
@@ -2144,11 +2143,13 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
struct __ip6_tnl_parm *p = &t->parms;
- __be16 o_flags = p->o_flags;
+ IP_TUNNEL_DECLARE_FLAGS(o_flags);
+
+ ip_tunnel_flags_copy(o_flags, p->o_flags);
if (p->erspan_ver == 1 || p->erspan_ver == 2) {
if (!p->collect_md)
- o_flags |= TUNNEL_KEY;
+ __set_bit(IP_TUNNEL_KEY_BIT, o_flags);
if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver))
goto nla_put_failure;
@@ -2234,6 +2235,7 @@ static void ip6erspan_tap_setup(struct net_device *dev)
dev->needs_free_netdev = true;
dev->priv_destructor = ip6gre_dev_free;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
netif_keep_dst(dev);
@@ -2405,7 +2407,7 @@ static void __exit ip6gre_fini(void)
module_init(ip6gre_init);
module_exit(ip6gre_fini);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
+MODULE_AUTHOR("D. Kozlov <xeb@mail.ru>");
MODULE_DESCRIPTION("GRE over IPv6 tunneling device");
MODULE_ALIAS_RTNL_LINK("ip6gre");
MODULE_ALIAS_RTNL_LINK("ip6gretap");
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index b8378814532c..39da6a7ce5f1 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -111,9 +111,8 @@ static void ip6_list_rcv_finish(struct net *net, struct sock *sk,
{
struct sk_buff *skb, *next, *hint = NULL;
struct dst_entry *curr_dst = NULL;
- struct list_head sublist;
+ LIST_HEAD(sublist);
- INIT_LIST_HEAD(&sublist);
list_for_each_entry_safe(skb, next, head, list) {
struct dst_entry *dst;
@@ -168,9 +167,9 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
SKB_DR_SET(reason, NOT_SPECIFIED);
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL ||
- !idev || unlikely(idev->cnf.disable_ipv6)) {
+ !idev || unlikely(READ_ONCE(idev->cnf.disable_ipv6))) {
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
- if (idev && unlikely(idev->cnf.disable_ipv6))
+ if (idev && unlikely(READ_ONCE(idev->cnf.disable_ipv6)))
SKB_DR_SET(reason, IPV6DISABLED);
goto drop;
}
@@ -236,7 +235,7 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
if (!ipv6_addr_is_multicast(&hdr->daddr) &&
(skb->pkt_type == PACKET_BROADCAST ||
skb->pkt_type == PACKET_MULTICAST) &&
- idev->cnf.drop_unicast_in_l2_multicast) {
+ READ_ONCE(idev->cnf.drop_unicast_in_l2_multicast)) {
SKB_DR_SET(reason, UNICAST_IN_L2_MULTICAST);
goto err;
}
@@ -327,9 +326,8 @@ void ipv6_list_rcv(struct list_head *head, struct packet_type *pt,
struct net_device *curr_dev = NULL;
struct net *curr_net = NULL;
struct sk_buff *skb, *next;
- struct list_head sublist;
+ LIST_HEAD(sublist);
- INIT_LIST_HEAD(&sublist);
list_for_each_entry_safe(skb, next, head, list) {
struct net_device *dev = skb->dev;
struct net *net = dev_net(dev);
@@ -479,9 +477,7 @@ discard:
static int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
skb_clear_delivery_time(skb);
- rcu_read_lock();
ip6_protocol_deliver_rcu(net, skb, 0, false);
- rcu_read_unlock();
return 0;
}
@@ -489,9 +485,15 @@ static int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *sk
int ip6_input(struct sk_buff *skb)
{
- return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_IN,
- dev_net(skb->dev), NULL, skb, skb->dev, NULL,
- ip6_input_finish);
+ int res;
+
+ rcu_read_lock();
+ res = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_IN,
+ dev_net_rcu(skb->dev), NULL, skb, skb->dev, NULL,
+ ip6_input_finish);
+ rcu_read_unlock();
+
+ return res;
}
EXPORT_SYMBOL_GPL(ip6_input);
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index cca64c7809be..9822163428b0 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -67,7 +67,7 @@ static int ipv6_gro_pull_exthdrs(struct sk_buff *skb, int off, int proto)
off += len;
}
- skb_gro_pull(skb, off - skb_network_offset(skb));
+ skb_gro_pull(skb, off - skb_gro_receive_network_offset(skb));
return proto;
}
@@ -236,7 +236,7 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
if (unlikely(!iph))
goto out;
- skb_set_network_header(skb, off);
+ NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark] = off;
flush += ntohs(iph->payload_len) != skb->len - hlen;
@@ -259,7 +259,7 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
NAPI_GRO_CB(skb)->proto = proto;
flush--;
- nlen = skb_network_header_len(skb);
+ nlen = skb_gro_offset(skb) - off;
list_for_each_entry(p, head, list) {
const struct ipv6hdr *iph2;
@@ -290,19 +290,8 @@ not_same_flow:
nlen - sizeof(struct ipv6hdr)))
goto not_same_flow;
}
- /* flush if Traffic Class fields are different */
- NAPI_GRO_CB(p)->flush |= !!((first_word & htonl(0x0FF00000)) |
- (__force __be32)(iph->hop_limit ^ iph2->hop_limit));
- NAPI_GRO_CB(p)->flush |= flush;
-
- /* If the previous IP ID value was based on an atomic
- * datagram we can overwrite the value and ignore it.
- */
- if (NAPI_GRO_CB(skb)->is_atomic)
- NAPI_GRO_CB(p)->flush_id = 0;
}
- NAPI_GRO_CB(skb)->is_atomic = true;
NAPI_GRO_CB(skb)->flush |= flush;
skb_gro_postpull_rcsum(skb, iph, nlen);
@@ -419,14 +408,6 @@ static int ip4ip6_gro_complete(struct sk_buff *skb, int nhoff)
return inet_gro_complete(skb, nhoff);
}
-static struct packet_offload ipv6_packet_offload __read_mostly = {
- .type = cpu_to_be16(ETH_P_IPV6),
- .callbacks = {
- .gso_segment = ipv6_gso_segment,
- .gro_receive = ipv6_gro_receive,
- .gro_complete = ipv6_gro_complete,
- },
-};
static struct sk_buff *sit_gso_segment(struct sk_buff *skb,
netdev_features_t features)
@@ -486,7 +467,15 @@ static int __init ipv6_offload_init(void)
if (ipv6_exthdrs_offload_init() < 0)
pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__);
- dev_add_offload(&ipv6_packet_offload);
+ net_hotdata.ipv6_packet_offload = (struct packet_offload) {
+ .type = cpu_to_be16(ETH_P_IPV6),
+ .callbacks = {
+ .gso_segment = ipv6_gso_segment,
+ .gro_receive = ipv6_gro_receive,
+ .gro_complete = ipv6_gro_complete,
+ },
+ };
+ dev_add_offload(&net_hotdata.ipv6_packet_offload);
inet_add_offload(&sit_offload, IPPROTO_IPV6);
inet6_add_offload(&ip6ip6_offload, IPPROTO_IPV6);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 31b86fe661aa..d577bf2f3053 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -70,11 +70,15 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
/* Be paranoid, rather than too clever. */
if (unlikely(hh_len > skb_headroom(skb)) && dev->header_ops) {
+ /* Make sure idev stays alive */
+ rcu_read_lock();
skb = skb_expand_head(skb, hh_len);
if (!skb) {
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
+ rcu_read_unlock();
return -ENOMEM;
}
+ rcu_read_unlock();
}
hdr = ipv6_hdr(skb);
@@ -120,10 +124,10 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
rcu_read_lock();
- nexthop = rt6_nexthop((struct rt6_info *)dst, daddr);
+ nexthop = rt6_nexthop(dst_rt6_info(dst), daddr);
neigh = __ipv6_neigh_lookup_noref(dev, nexthop);
- if (unlikely(IS_ERR_OR_NULL(neigh))) {
+ if (IS_ERR_OR_NULL(neigh)) {
if (unlikely(!neigh))
neigh = __neigh_create(&nd_tbl, nexthop, dev, false);
if (IS_ERR(neigh)) {
@@ -234,7 +238,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
skb->protocol = htons(ETH_P_IPV6);
skb->dev = dev;
- if (unlikely(idev->cnf.disable_ipv6)) {
+ if (unlikely(!idev || READ_ONCE(idev->cnf.disable_ipv6))) {
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
kfree_skb_reason(skb, SKB_DROP_REASON_IPV6DISABLED);
return 0;
@@ -283,11 +287,15 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
head_room += opt->opt_nflen + opt->opt_flen;
if (unlikely(head_room > skb_headroom(skb))) {
+ /* Make sure idev stays alive */
+ rcu_read_lock();
skb = skb_expand_head(skb, head_room);
if (!skb) {
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
+ rcu_read_unlock();
return -ENOBUFS;
}
+ rcu_read_unlock();
}
if (opt) {
@@ -501,7 +509,7 @@ int ip6_forward(struct sk_buff *skb)
u32 mtu;
idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
- if (net->ipv6.devconf_all->forwarding == 0)
+ if (READ_ONCE(net->ipv6.devconf_all->forwarding) == 0)
goto error;
if (skb->pkt_type != PACKET_HOST)
@@ -513,8 +521,8 @@ int ip6_forward(struct sk_buff *skb)
if (skb_warn_if_lro(skb))
goto drop;
- if (!net->ipv6.devconf_all->disable_policy &&
- (!idev || !idev->cnf.disable_policy) &&
+ if (!READ_ONCE(net->ipv6.devconf_all->disable_policy) &&
+ (!idev || !READ_ONCE(idev->cnf.disable_policy)) &&
!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
goto drop;
@@ -552,7 +560,7 @@ int ip6_forward(struct sk_buff *skb)
}
/* XXX: idev->cnf.proxy_ndp? */
- if (net->ipv6.devconf_all->proxy_ndp &&
+ if (READ_ONCE(net->ipv6.devconf_all->proxy_ndp) &&
pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
int proxied = ip6_forward_proxy_check(skb);
if (proxied > 0) {
@@ -599,21 +607,21 @@ int ip6_forward(struct sk_buff *skb)
* send a redirect.
*/
- rt = (struct rt6_info *) dst;
+ rt = dst_rt6_info(dst);
if (rt->rt6i_flags & RTF_GATEWAY)
target = &rt->rt6i_gateway;
else
target = &hdr->daddr;
- peer = inet_getpeer_v6(net->ipv6.peers, &hdr->daddr, 1);
+ rcu_read_lock();
+ peer = inet_getpeer_v6(net->ipv6.peers, &hdr->daddr);
/* Limit redirects both by destination (here)
and by source (inside ndisc_send_redirect)
*/
if (inet_peer_xrlim_allow(peer, 1*HZ))
ndisc_send_redirect(skb, target);
- if (peer)
- inet_putpeer(peer);
+ rcu_read_unlock();
} else {
int addrtype = ipv6_addr_type(&hdr->saddr);
@@ -856,10 +864,10 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
int (*output)(struct net *, struct sock *, struct sk_buff *))
{
struct sk_buff *frag;
- struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
+ struct rt6_info *rt = dst_rt6_info(skb_dst(skb));
struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
inet6_sk(skb->sk) : NULL;
- bool mono_delivery_time = skb->mono_delivery_time;
+ u8 tstamp_type = skb->tstamp_type;
struct ip6_frag_state state;
unsigned int mtu, hlen, nexthdr_offset;
ktime_t tstamp = skb->tstamp;
@@ -955,7 +963,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
if (iter.frag)
ip6_fraglist_prepare(skb, &iter);
- skb_set_delivery_time(skb, tstamp, mono_delivery_time);
+ skb_set_delivery_time(skb, tstamp, tstamp_type);
err = output(net, sk, skb);
if (!err)
IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
@@ -1016,7 +1024,7 @@ slow_path:
/*
* Put this fragment into the sending queue.
*/
- skb_set_delivery_time(frag, tstamp, mono_delivery_time);
+ skb_set_delivery_time(frag, tstamp, tstamp_type);
err = output(net, sk, frag);
if (err)
goto fail;
@@ -1063,7 +1071,7 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
return NULL;
}
- rt = (struct rt6_info *)dst;
+ rt = dst_rt6_info(dst);
/* Yes, checking route validity in not connected
* case is not very simple. Take into account,
* that we do not support routing by source, TOS,
@@ -1118,12 +1126,13 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
struct rt6_info *rt;
*dst = ip6_route_output(net, sk, fl6);
- rt = (*dst)->error ? NULL : (struct rt6_info *)*dst;
+ rt = (*dst)->error ? NULL : dst_rt6_info(*dst);
rcu_read_lock();
from = rt ? rcu_dereference(rt->from) : NULL;
err = ip6_route_get_saddr(net, from, &fl6->daddr,
sk ? READ_ONCE(inet6_sk(sk)->srcprefs) : 0,
+ fl6->flowi6_l3mdev,
&fl6->saddr);
rcu_read_unlock();
@@ -1159,7 +1168,7 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
* dst entry and replace it instead with the
* dst entry of the nexthop router
*/
- rt = (struct rt6_info *) *dst;
+ rt = dst_rt6_info(*dst);
rcu_read_lock();
n = __ipv6_neigh_lookup_noref(rt->dst.dev,
rt6_nexthop(rt, &fl6->daddr));
@@ -1392,8 +1401,12 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
cork->base.gso_size = ipc6->gso_size;
cork->base.tx_flags = 0;
cork->base.mark = ipc6->sockc.mark;
- sock_tx_timestamp(sk, ipc6->sockc.tsflags, &cork->base.tx_flags);
-
+ cork->base.priority = ipc6->sockc.priority;
+ sock_tx_timestamp(sk, &ipc6->sockc, &cork->base.tx_flags);
+ if (ipc6->sockc.tsflags & SOCKCM_FLAG_TS_OPT_ID) {
+ cork->base.flags |= IPCORK_TS_OPT_ID;
+ cork->base.ts_opt_id = ipc6->sockc.ts_opt_id;
+ }
cork->base.length = 0;
cork->base.transmit_time = ipc6->sockc.transmit_time;
@@ -1423,8 +1436,8 @@ static int __ip6_append_data(struct sock *sk,
int offset = 0;
bool zc = false;
u32 tskey = 0;
- struct rt6_info *rt = (struct rt6_info *)cork->dst;
- bool paged, hold_tskey, extra_uref = false;
+ struct rt6_info *rt = dst_rt6_info(cork->dst);
+ bool paged, hold_tskey = false, extra_uref = false;
struct ipv6_txoptions *opt = v6_cork->opt;
int csummode = CHECKSUM_NONE;
unsigned int maxnonfragsize, headersize;
@@ -1534,10 +1547,15 @@ emsgsize:
flags &= ~MSG_SPLICE_PAGES;
}
- hold_tskey = cork->tx_flags & SKBTX_ANY_TSTAMP &&
- READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID;
- if (hold_tskey)
- tskey = atomic_inc_return(&sk->sk_tskey) - 1;
+ if (cork->tx_flags & SKBTX_ANY_TSTAMP &&
+ READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) {
+ if (cork->flags & IPCORK_TS_OPT_ID) {
+ tskey = cork->ts_opt_id;
+ } else {
+ tskey = atomic_inc_return(&sk->sk_tskey) - 1;
+ hold_tskey = true;
+ }
+ }
/*
* Let's try using as much space as possible.
@@ -1680,8 +1698,9 @@ alloc_new_skb:
pskb_trim_unique(skb_prev, maxfraglen);
}
if (copy > 0 &&
- getfrag(from, data + transhdrlen, offset,
- copy, fraggap, skb) < 0) {
+ INDIRECT_CALL_1(getfrag, ip_generic_getfrag,
+ from, data + transhdrlen, offset,
+ copy, fraggap, skb) < 0) {
err = -EFAULT;
kfree_skb(skb);
goto error;
@@ -1725,8 +1744,9 @@ alloc_new_skb:
unsigned int off;
off = skb->len;
- if (getfrag(from, skb_put(skb, copy),
- offset, copy, off, skb) < 0) {
+ if (INDIRECT_CALL_1(getfrag, ip_generic_getfrag,
+ from, skb_put(skb, copy),
+ offset, copy, off, skb) < 0) {
__skb_trim(skb, off);
err = -EFAULT;
goto error;
@@ -1764,7 +1784,8 @@ alloc_new_skb:
get_page(pfrag->page);
}
copy = min_t(int, copy, pfrag->size - pfrag->offset);
- if (getfrag(from,
+ if (INDIRECT_CALL_1(getfrag, ip_generic_getfrag,
+ from,
page_address(pfrag->page) + pfrag->offset,
offset, copy, skb->len, skb) < 0)
goto error_efault;
@@ -1877,7 +1898,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
struct net *net = sock_net(sk);
struct ipv6hdr *hdr;
struct ipv6_txoptions *opt = v6_cork->opt;
- struct rt6_info *rt = (struct rt6_info *)cork->base.dst;
+ struct rt6_info *rt = dst_rt6_info(cork->base.dst);
struct flowi6 *fl6 = &cork->fl.u.ip6;
unsigned char proto = fl6->flowi6_proto;
@@ -1922,9 +1943,12 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
hdr->saddr = fl6->saddr;
hdr->daddr = *final_dst;
- skb->priority = READ_ONCE(sk->sk_priority);
+ skb->priority = cork->base.priority;
skb->mark = cork->base.mark;
- skb->tstamp = cork->base.transmit_time;
+ if (sk_is_tcp(sk))
+ skb_set_delivery_time(skb, cork->base.transmit_time, SKB_CLOCK_MONOTONIC);
+ else
+ skb_set_delivery_type_by_clockid(skb, cork->base.transmit_time, sk->sk_clockid);
ip6_cork_steal_dst(skb, cork);
IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS);
@@ -1933,7 +1957,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
u8 icmp6_type;
if (sk->sk_socket->type == SOCK_RAW &&
- !inet_test_bit(HDRINCL, sk))
+ !(fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH))
icmp6_type = fl6->fl6_icmp_type;
else
icmp6_type = icmp6_hdr(skb)->icmp6_type;
@@ -1949,9 +1973,10 @@ out:
int ip6_send_skb(struct sk_buff *skb)
{
struct net *net = sock_net(skb->sk);
- struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
+ struct rt6_info *rt = dst_rt6_info(skb_dst(skb));
int err;
+ rcu_read_lock();
err = ip6_local_out(net, skb->sk, skb);
if (err) {
if (err > 0)
@@ -1961,6 +1986,7 @@ int ip6_send_skb(struct sk_buff *skb)
IPSTATS_MIB_OUTDISCARDS);
}
+ rcu_read_unlock();
return err;
}
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 9bbabf750a21..48fd53b98972 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -53,6 +53,7 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/dst_metadata.h>
+#include <net/inet_dscp.h>
MODULE_AUTHOR("Ville Nuorvala");
MODULE_DESCRIPTION("IPv6 tunneling device");
@@ -247,7 +248,6 @@ static void ip6_dev_free(struct net_device *dev)
gro_cells_destroy(&t->gro_cells);
dst_cache_destroy(&t->dst_cache);
- free_percpu(dev->tstats);
}
static int ip6_tnl_create2(struct net_device *dev)
@@ -609,7 +609,8 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
/* Try to guess incoming interface */
rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, eiph->saddr,
- 0, 0, 0, IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
+ 0, 0, 0, IPPROTO_IPIP,
+ eiph->tos & INET_DSCP_MASK, 0);
if (IS_ERR(rt))
goto out;
@@ -620,7 +621,8 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (rt->rt_flags & RTCF_LOCAL) {
rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
eiph->daddr, eiph->saddr, 0, 0,
- IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
+ IPPROTO_IPIP,
+ eiph->tos & INET_DSCP_MASK, 0);
if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL6) {
if (!IS_ERR(rt))
ip_rt_put(rt);
@@ -628,8 +630,8 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
}
skb_dst_set(skb2, &rt->dst);
} else {
- if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
- skb2->dev) ||
+ if (ip_route_input(skb2, eiph->daddr, eiph->saddr,
+ ip4h_dscp(eiph), skb2->dev) ||
skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
goto out;
}
@@ -799,17 +801,15 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
const struct ipv6hdr *ipv6h;
int nh, err;
- if ((!(tpi->flags & TUNNEL_CSUM) &&
- (tunnel->parms.i_flags & TUNNEL_CSUM)) ||
- ((tpi->flags & TUNNEL_CSUM) &&
- !(tunnel->parms.i_flags & TUNNEL_CSUM))) {
+ if (test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.i_flags) !=
+ test_bit(IP_TUNNEL_CSUM_BIT, tpi->flags)) {
DEV_STATS_INC(tunnel->dev, rx_crc_errors);
DEV_STATS_INC(tunnel->dev, rx_errors);
goto drop;
}
- if (tunnel->parms.i_flags & TUNNEL_SEQ) {
- if (!(tpi->flags & TUNNEL_SEQ) ||
+ if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.i_flags)) {
+ if (!test_bit(IP_TUNNEL_SEQ_BIT, tpi->flags) ||
(tunnel->i_seqno &&
(s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
DEV_STATS_INC(tunnel->dev, rx_fifo_errors);
@@ -947,7 +947,9 @@ static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
if (iptunnel_pull_header(skb, 0, tpi->proto, false))
goto drop;
if (t->parms.collect_md) {
- tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0);
+ IP_TUNNEL_DECLARE_FLAGS(flags) = { };
+
+ tun_dst = ipv6_tun_rx_dst(skb, flags, 0, 0);
if (!tun_dst)
goto drop;
}
@@ -1508,7 +1510,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
tdev = __dev_get_by_index(t->net, p->link);
if (tdev) {
- dev->hard_header_len = tdev->hard_header_len + t_hlen;
+ dev->needed_headroom = tdev->hard_header_len +
+ tdev->needed_headroom + t_hlen;
mtu = min_t(unsigned int, tdev->mtu, IP6_MAX_MTU);
mtu = mtu - t_hlen;
@@ -1732,7 +1735,9 @@ ip6_tnl_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
{
struct ip6_tnl *tnl = netdev_priv(dev);
+ int t_hlen;
+ t_hlen = tnl->hlen + sizeof(struct ipv6hdr);
if (tnl->parms.proto == IPPROTO_IPV6) {
if (new_mtu < IPV6_MIN_MTU)
return -EINVAL;
@@ -1741,13 +1746,13 @@ int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
}
if (tnl->parms.proto == IPPROTO_IPV6 || tnl->parms.proto == 0) {
- if (new_mtu > IP6_MAX_MTU - dev->hard_header_len)
+ if (new_mtu > IP6_MAX_MTU - dev->hard_header_len - t_hlen)
return -EINVAL;
} else {
- if (new_mtu > IP_MAX_MTU - dev->hard_header_len)
+ if (new_mtu > IP_MAX_MTU - dev->hard_header_len - t_hlen)
return -EINVAL;
}
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
EXPORT_SYMBOL(ip6_tnl_change_mtu);
@@ -1756,7 +1761,7 @@ int ip6_tnl_get_iflink(const struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
- return t->parms.link;
+ return READ_ONCE(t->parms.link);
}
EXPORT_SYMBOL(ip6_tnl_get_iflink);
@@ -1847,7 +1852,8 @@ static void ip6_tnl_dev_setup(struct net_device *dev)
dev->type = ARPHRD_TUNNEL6;
dev->flags |= IFF_NOARP;
dev->addr_len = sizeof(struct in6_addr);
- dev->features |= NETIF_F_LLTX;
+ dev->lltx = true;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
netif_keep_dst(dev);
dev->features |= IPXIPX_FEATURES;
@@ -1873,13 +1879,10 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
t->dev = dev;
t->net = dev_net(dev);
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
ret = dst_cache_init(&t->dst_cache, GFP_KERNEL);
if (ret)
- goto free_stats;
+ return ret;
ret = gro_cells_init(&t->gro_cells, dev);
if (ret)
@@ -1890,21 +1893,18 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
t_hlen = t->hlen + sizeof(struct ipv6hdr);
dev->type = ARPHRD_TUNNEL6;
- dev->hard_header_len = LL_MAX_HEADER + t_hlen;
dev->mtu = ETH_DATA_LEN - t_hlen;
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
dev->mtu -= 8;
dev->min_mtu = ETH_MIN_MTU;
- dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len;
+ dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len - t_hlen;
netdev_hold(dev, &t->dev_tracker, GFP_KERNEL);
+ netdev_lockdep_set_classes(dev);
return 0;
destroy_dst:
dst_cache_destroy(&t->dst_cache);
-free_stats:
- free_percpu(dev->tstats);
- dev->tstats = NULL;
return ret;
}
@@ -2151,7 +2151,7 @@ struct net *ip6_tnl_get_link_net(const struct net_device *dev)
{
struct ip6_tnl *tunnel = netdev_priv(dev);
- return tunnel->net;
+ return READ_ONCE(tunnel->net);
}
EXPORT_SYMBOL(ip6_tnl_get_link_net);
@@ -2261,7 +2261,7 @@ static int __net_init ip6_tnl_init_net(struct net *net)
/* FB netdevice is special: we have one, and only one per netns.
* Allowing to move it to another netns is clearly unsafe.
*/
- ip6n->fb_tnl_dev->features |= NETIF_F_NETNS_LOCAL;
+ ip6n->fb_tnl_dev->netns_local = true;
err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
if (err < 0)
@@ -2282,21 +2282,19 @@ err_alloc_dev:
return err;
}
-static void __net_exit ip6_tnl_exit_batch_net(struct list_head *net_list)
+static void __net_exit ip6_tnl_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
struct net *net;
- LIST_HEAD(list);
- rtnl_lock();
+ ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list)
- ip6_tnl_destroy_tunnels(net, &list);
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ ip6_tnl_destroy_tunnels(net, dev_to_kill);
}
static struct pernet_operations ip6_tnl_net_ops = {
.init = ip6_tnl_init_net,
- .exit_batch = ip6_tnl_exit_batch_net,
+ .exit_batch_rtnl = ip6_tnl_exit_batch_rtnl,
.id = &ip6_tnl_net_id,
.size = sizeof(struct ip6_tnl_net),
};
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index e550240c85e1..590737c27537 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -174,11 +174,6 @@ vti6_tnl_unlink(struct vti6_net *ip6n, struct ip6_tnl *t)
}
}
-static void vti6_dev_free(struct net_device *dev)
-{
- free_percpu(dev->tstats);
-}
-
static int vti6_tnl_create2(struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
@@ -671,7 +666,8 @@ static void vti6_link_config(struct ip6_tnl *t, bool keep_mtu)
dev->flags &= ~IFF_POINTOPOINT;
if (keep_mtu && dev->mtu) {
- dev->mtu = clamp(dev->mtu, dev->min_mtu, dev->max_mtu);
+ WRITE_ONCE(dev->mtu,
+ clamp(dev->mtu, dev->min_mtu, dev->max_mtu));
return;
}
@@ -892,7 +888,6 @@ static const struct net_device_ops vti6_netdev_ops = {
.ndo_uninit = vti6_dev_uninit,
.ndo_start_xmit = vti6_tnl_xmit,
.ndo_siocdevprivate = vti6_siocdevprivate,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_get_iflink = ip6_tnl_get_iflink,
};
@@ -908,8 +903,8 @@ static void vti6_dev_setup(struct net_device *dev)
dev->netdev_ops = &vti6_netdev_ops;
dev->header_ops = &ip_tunnel_header_ops;
dev->needs_free_netdev = true;
- dev->priv_destructor = vti6_dev_free;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
dev->type = ARPHRD_TUNNEL6;
dev->min_mtu = IPV4_MIN_MTU;
dev->max_mtu = IP_MAX_MTU - sizeof(struct ipv6hdr);
@@ -931,10 +926,8 @@ static inline int vti6_dev_init_gen(struct net_device *dev)
t->dev = dev;
t->net = dev_net(dev);
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
netdev_hold(dev, &t->dev_tracker, GFP_KERNEL);
+ netdev_lockdep_set_classes(dev);
return 0;
}
@@ -1174,24 +1167,22 @@ err_alloc_dev:
return err;
}
-static void __net_exit vti6_exit_batch_net(struct list_head *net_list)
+static void __net_exit vti6_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
struct vti6_net *ip6n;
struct net *net;
- LIST_HEAD(list);
- rtnl_lock();
+ ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list) {
ip6n = net_generic(net, vti6_net_id);
- vti6_destroy_tunnels(ip6n, &list);
+ vti6_destroy_tunnels(ip6n, dev_to_kill);
}
- unregister_netdevice_many(&list);
- rtnl_unlock();
}
static struct pernet_operations vti6_net_ops = {
.init = vti6_init_net,
- .exit_batch = vti6_exit_batch_net,
+ .exit_batch_rtnl = vti6_exit_batch_rtnl,
.id = &vti6_net_id,
.size = sizeof(struct vti6_net),
};
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 9782c180fee6..535e9f72514c 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -108,6 +108,11 @@ static void ipmr_expire_process(struct timer_list *t);
lockdep_rtnl_is_held() || \
list_empty(&net->ipv6.mr6_tables))
+static bool ip6mr_can_free_table(struct net *net)
+{
+ return !check_net(net) || !net_initialized(net);
+}
+
static struct mr_table *ip6mr_mr_table_iter(struct net *net,
struct mr_table *mrt)
{
@@ -125,7 +130,7 @@ static struct mr_table *ip6mr_mr_table_iter(struct net *net,
return ret;
}
-static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
+static struct mr_table *__ip6mr_get_table(struct net *net, u32 id)
{
struct mr_table *mrt;
@@ -136,6 +141,16 @@ static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
return NULL;
}
+static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
+{
+ struct mr_table *mrt;
+
+ rcu_read_lock();
+ mrt = __ip6mr_get_table(net, id);
+ rcu_read_unlock();
+ return mrt;
+}
+
static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
struct mr_table **mrt)
{
@@ -177,7 +192,7 @@ static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
arg->table = fib_rule_get_table(rule, arg);
- mrt = ip6mr_get_table(rule->fr_net, arg->table);
+ mrt = __ip6mr_get_table(rule->fr_net, arg->table);
if (!mrt)
return -EAGAIN;
res->mrt = mrt;
@@ -276,7 +291,7 @@ static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb,
return fib_rules_dump(net, nb, RTNL_FAMILY_IP6MR, extack);
}
-static unsigned int ip6mr_rules_seq_read(struct net *net)
+static unsigned int ip6mr_rules_seq_read(const struct net *net)
{
return fib_rules_seq_read(net, RTNL_FAMILY_IP6MR);
}
@@ -291,6 +306,11 @@ EXPORT_SYMBOL(ip6mr_rule_default);
#define ip6mr_for_each_table(mrt, net) \
for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
+static bool ip6mr_can_free_table(struct net *net)
+{
+ return !check_net(net);
+}
+
static struct mr_table *ip6mr_mr_table_iter(struct net *net,
struct mr_table *mrt)
{
@@ -304,6 +324,8 @@ static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
return net->ipv6.mrt6;
}
+#define __ip6mr_get_table ip6mr_get_table
+
static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
struct mr_table **mrt)
{
@@ -335,7 +357,7 @@ static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb,
return 0;
}
-static unsigned int ip6mr_rules_seq_read(struct net *net)
+static unsigned int ip6mr_rules_seq_read(const struct net *net)
{
return 0;
}
@@ -382,7 +404,7 @@ static struct mr_table *ip6mr_new_table(struct net *net, u32 id)
{
struct mr_table *mrt;
- mrt = ip6mr_get_table(net, id);
+ mrt = __ip6mr_get_table(net, id);
if (mrt)
return mrt;
@@ -392,6 +414,10 @@ static struct mr_table *ip6mr_new_table(struct net *net, u32 id)
static void ip6mr_free_table(struct mr_table *mrt)
{
+ struct net *net = read_pnet(&mrt->net);
+
+ WARN_ON_ONCE(!ip6mr_can_free_table(net));
+
timer_shutdown_sync(&mrt->ipmr_expire_timer);
mroute_clean_tables(mrt, MRT6_FLUSH_MIFS | MRT6_FLUSH_MIFS_STATIC |
MRT6_FLUSH_MFC | MRT6_FLUSH_MFC_STATIC);
@@ -411,13 +437,15 @@ static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
struct net *net = seq_file_net(seq);
struct mr_table *mrt;
- mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
- if (!mrt)
+ rcu_read_lock();
+ mrt = __ip6mr_get_table(net, RT6_TABLE_DFLT);
+ if (!mrt) {
+ rcu_read_unlock();
return ERR_PTR(-ENOENT);
+ }
iter->mrt = mrt;
- rcu_read_lock();
return mr_vif_seq_start(seq, pos);
}
@@ -492,9 +520,9 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
if (it->cache != &mrt->mfc_unres_queue) {
seq_printf(seq, " %8lu %8lu %8lu",
- mfc->_c.mfc_un.res.pkt,
- mfc->_c.mfc_un.res.bytes,
- mfc->_c.mfc_un.res.wrong_if);
+ atomic_long_read(&mfc->_c.mfc_un.res.pkt),
+ atomic_long_read(&mfc->_c.mfc_un.res.bytes),
+ atomic_long_read(&mfc->_c.mfc_un.res.wrong_if));
for (n = mfc->_c.mfc_un.res.minvif;
n < mfc->_c.mfc_un.res.maxvif; n++) {
if (VIF_EXISTS(mrt, n) &&
@@ -640,7 +668,7 @@ static void reg_vif_setup(struct net_device *dev)
dev->flags = IFF_NOARP;
dev->netdev_ops = &reg_vif_netdev_ops;
dev->needs_free_netdev = true;
- dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->netns_local = true;
}
static struct net_device *ip6mr_reg_vif(struct net *net, struct mr_table *mrt)
@@ -856,7 +884,7 @@ static void ip6mr_update_thresholds(struct mr_table *mrt,
cache->mfc_un.res.maxvif = vifi + 1;
}
}
- cache->mfc_un.res.lastuse = jiffies;
+ WRITE_ONCE(cache->mfc_un.res.lastuse, jiffies);
}
static int mif6_add(struct net *net, struct mr_table *mrt,
@@ -1260,11 +1288,9 @@ static int ip6mr_device_event(struct notifier_block *this,
return NOTIFY_DONE;
}
-static unsigned int ip6mr_seq_read(struct net *net)
+static unsigned int ip6mr_seq_read(const struct net *net)
{
- ASSERT_RTNL();
-
- return net->ipv6.ipmr_seq + ip6mr_rules_seq_read(net);
+ return READ_ONCE(net->ipv6.ipmr_seq) + ip6mr_rules_seq_read(net);
}
static int ip6mr_dump(struct net *net, struct notifier_block *nb,
@@ -1369,14 +1395,17 @@ static struct pernet_operations ip6mr_net_ops = {
.exit_batch = ip6mr_net_exit_batch,
};
+static const struct rtnl_msg_handler ip6mr_rtnl_msg_handlers[] __initconst_or_module = {
+ {.owner = THIS_MODULE, .protocol = RTNL_FAMILY_IP6MR,
+ .msgtype = RTM_GETROUTE,
+ .doit = ip6mr_rtm_getroute, .dumpit = ip6mr_rtm_dumproute},
+};
+
int __init ip6_mr_init(void)
{
int err;
- mrt_cachep = kmem_cache_create("ip6_mrt_cache",
- sizeof(struct mfc6_cache),
- 0, SLAB_HWCACHE_ALIGN,
- NULL);
+ mrt_cachep = KMEM_CACHE(mfc6_cache, SLAB_HWCACHE_ALIGN);
if (!mrt_cachep)
return -ENOMEM;
@@ -1394,9 +1423,8 @@ int __init ip6_mr_init(void)
goto add_proto_fail;
}
#endif
- err = rtnl_register_module(THIS_MODULE, RTNL_FAMILY_IP6MR, RTM_GETROUTE,
- ip6mr_rtm_getroute, ip6mr_rtm_dumproute, 0);
- if (err == 0)
+ err = rtnl_register_many(ip6mr_rtnl_msg_handlers);
+ if (!err)
return 0;
#ifdef CONFIG_IPV6_PIMSM_V2
@@ -1411,9 +1439,9 @@ reg_pernet_fail:
return err;
}
-void ip6_mr_cleanup(void)
+void __init ip6_mr_cleanup(void)
{
- rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE);
+ rtnl_unregister_many(ip6mr_rtnl_msg_handlers);
#ifdef CONFIG_IPV6_PIMSM_V2
inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
#endif
@@ -1917,9 +1945,9 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void *arg)
c = ip6mr_cache_find(mrt, &sr->src.sin6_addr,
&sr->grp.sin6_addr);
if (c) {
- sr->pktcnt = c->_c.mfc_un.res.pkt;
- sr->bytecnt = c->_c.mfc_un.res.bytes;
- sr->wrong_if = c->_c.mfc_un.res.wrong_if;
+ sr->pktcnt = atomic_long_read(&c->_c.mfc_un.res.pkt);
+ sr->bytecnt = atomic_long_read(&c->_c.mfc_un.res.bytes);
+ sr->wrong_if = atomic_long_read(&c->_c.mfc_un.res.wrong_if);
rcu_read_unlock();
return 0;
}
@@ -1989,9 +2017,9 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
rcu_read_lock();
c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
if (c) {
- sr.pktcnt = c->_c.mfc_un.res.pkt;
- sr.bytecnt = c->_c.mfc_un.res.bytes;
- sr.wrong_if = c->_c.mfc_un.res.wrong_if;
+ sr.pktcnt = atomic_long_read(&c->_c.mfc_un.res.pkt);
+ sr.bytecnt = atomic_long_read(&c->_c.mfc_un.res.bytes);
+ sr.wrong_if = atomic_long_read(&c->_c.mfc_un.res.wrong_if);
rcu_read_unlock();
if (copy_to_user(arg, &sr, sizeof(sr)))
@@ -2114,9 +2142,9 @@ static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
int true_vifi = ip6mr_find_vif(mrt, dev);
vif = c->_c.mfc_parent;
- c->_c.mfc_un.res.pkt++;
- c->_c.mfc_un.res.bytes += skb->len;
- c->_c.mfc_un.res.lastuse = jiffies;
+ atomic_long_inc(&c->_c.mfc_un.res.pkt);
+ atomic_long_add(skb->len, &c->_c.mfc_un.res.bytes);
+ WRITE_ONCE(c->_c.mfc_un.res.lastuse, jiffies);
if (ipv6_addr_any(&c->mf6c_origin) && true_vifi >= 0) {
struct mfc6_cache *cache_proxy;
@@ -2134,7 +2162,7 @@ static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
* Wrong interface: drop packet and (maybe) send PIM assert.
*/
if (rcu_access_pointer(mrt->vif_table[vif].dev) != dev) {
- c->_c.mfc_un.res.wrong_if++;
+ atomic_long_inc(&c->_c.mfc_un.res.wrong_if);
if (true_vifi >= 0 && mrt->mroute_do_assert &&
/* pimsm uses asserts, when switching from RPT to SPT,
@@ -2276,13 +2304,15 @@ int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
int err;
struct mr_table *mrt;
struct mfc6_cache *cache;
- struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
+ struct rt6_info *rt = dst_rt6_info(skb_dst(skb));
- mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
- if (!mrt)
+ rcu_read_lock();
+ mrt = __ip6mr_get_table(net, RT6_TABLE_DFLT);
+ if (!mrt) {
+ rcu_read_unlock();
return -ENOENT;
+ }
- rcu_read_lock();
cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
if (!cache && skb->dev) {
int vif = ip6mr_find_vif(mrt, skb->dev);
@@ -2434,8 +2464,7 @@ static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
errout:
kfree_skb(skb);
- if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
+ rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
}
static size_t mrt6msg_netlink_msgsize(size_t payloadlen)
@@ -2561,9 +2590,9 @@ static int ip6mr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
src = nla_get_in6_addr(tb[RTA_SRC]);
if (tb[RTA_DST])
grp = nla_get_in6_addr(tb[RTA_DST]);
- tableid = tb[RTA_TABLE] ? nla_get_u32(tb[RTA_TABLE]) : 0;
+ tableid = nla_get_u32_default(tb[RTA_TABLE], 0);
- mrt = ip6mr_get_table(net, tableid ?: RT_TABLE_DEFAULT);
+ mrt = __ip6mr_get_table(net, tableid ?: RT_TABLE_DEFAULT);
if (!mrt) {
NL_SET_ERR_MSG_MOD(extack, "MR table does not exist");
return -ENOENT;
@@ -2595,7 +2624,9 @@ static int ip6mr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
{
const struct nlmsghdr *nlh = cb->nlh;
- struct fib_dump_filter filter = {};
+ struct fib_dump_filter filter = {
+ .rtnl_held = true,
+ };
int err;
if (cb->strict_check) {
@@ -2608,7 +2639,7 @@ static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
if (filter.table_id) {
struct mr_table *mrt;
- mrt = ip6mr_get_table(sock_net(skb->sk), filter.table_id);
+ mrt = __ip6mr_get_table(sock_net(skb->sk), filter.table_id);
if (!mrt) {
if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IP6MR)
return skb->len;
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 56c3c467f9de..1e225e6489ea 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -111,8 +111,7 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
}
}
- opt = xchg((__force struct ipv6_txoptions **)&inet6_sk(sk)->opt,
- opt);
+ opt = unrcu_pointer(xchg(&inet6_sk(sk)->opt, RCU_INITIALIZER(opt)));
sk_dst_reset(sk);
return opt;
@@ -948,6 +947,8 @@ done:
if (optlen < sizeof(int))
goto e_inval;
retv = ip6_ra_control(sk, val);
+ if (retv == 0)
+ inet6_assign_bit(RTALERT, sk, valbool);
break;
case IPV6_FLOWLABEL_MGR:
retv = ipv6_flowlabel_opt(sk, optval, optlen);
@@ -984,7 +985,7 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
int err;
if (level == SOL_IP && sk->sk_type != SOCK_RAW)
- return udp_prot.setsockopt(sk, level, optname, optval, optlen);
+ return ip_setsockopt(sk, level, optname, optval, optlen);
if (level != SOL_IPV6)
return -ENOPROTOOPT;
@@ -1346,7 +1347,7 @@ int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
}
if (val < 0)
- val = sock_net(sk)->ipv6.devconf_all->hop_limit;
+ val = READ_ONCE(sock_net(sk)->ipv6.devconf_all->hop_limit);
break;
}
@@ -1445,6 +1446,10 @@ int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
val = np->rxopt.bits.recvfragsize;
break;
+ case IPV6_ROUTER_ALERT:
+ val = inet6_test_bit(RTALERT, sk);
+ break;
+
case IPV6_ROUTER_ALERT_ISOLATE:
val = inet6_test_bit(RTALERT_ISOLATE, sk);
break;
@@ -1470,7 +1475,7 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname,
int err;
if (level == SOL_IP && sk->sk_type != SOCK_RAW)
- return udp_prot.getsockopt(sk, level, optname, optval, optlen);
+ return ip_getsockopt(sk, level, optname, optval, optlen);
if (level != SOL_IPV6)
return -ENOPROTOOPT;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index bc6e0a0bad3c..65831b4fee1f 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -33,8 +33,10 @@
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/netdevice.h>
+#include <linux/if_addr.h>
#include <linux/if_arp.h>
#include <linux/route.h>
+#include <linux/rtnetlink.h>
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
@@ -47,6 +49,7 @@
#include <linux/netfilter_ipv6.h>
#include <net/net_namespace.h>
+#include <net/netlink.h>
#include <net/sock.h>
#include <net/snmp.h>
@@ -159,9 +162,9 @@ static int unsolicited_report_interval(struct inet6_dev *idev)
int iv;
if (mld_in_v1_mode(idev))
- iv = idev->cnf.mldv1_unsolicited_report_interval;
+ iv = READ_ONCE(idev->cnf.mldv1_unsolicited_report_interval);
else
- iv = idev->cnf.mldv2_unsolicited_report_interval;
+ iv = READ_ONCE(idev->cnf.mldv2_unsolicited_report_interval);
return iv > 0 ? iv : 1;
}
@@ -586,7 +589,8 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
const struct in6_addr *group;
struct ipv6_mc_socklist *pmc;
struct ip6_sf_socklist *psl;
- int i, count, copycount;
+ unsigned int count;
+ int i, copycount;
group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
@@ -610,7 +614,7 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
psl = sock_dereference(pmc->sflist, sk);
count = psl ? psl->sl_count : 0;
- copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
+ copycount = min(count, gsf->gf_numsrc);
gsf->gf_numsrc = count;
for (i = 0; i < copycount; i++) {
struct sockaddr_in6 *psin6;
@@ -900,6 +904,41 @@ static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
return mc;
}
+static void inet6_ifmcaddr_notify(struct net_device *dev,
+ const struct ifmcaddr6 *ifmca, int event)
+{
+ struct inet6_fill_args fillargs = {
+ .portid = 0,
+ .seq = 0,
+ .event = event,
+ .flags = 0,
+ .netnsid = -1,
+ .force_rt_scope_universe = true,
+ };
+ struct net *net = dev_net(dev);
+ struct sk_buff *skb;
+ int err = -ENOMEM;
+
+ skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct ifaddrmsg)) +
+ nla_total_size(sizeof(struct in6_addr)) +
+ nla_total_size(sizeof(struct ifa_cacheinfo)),
+ GFP_KERNEL);
+ if (!skb)
+ goto error;
+
+ err = inet6_fill_ifmcaddr(skb, ifmca, &fillargs);
+ if (err < 0) {
+ WARN_ON_ONCE(err == -EMSGSIZE);
+ nlmsg_free(skb);
+ goto error;
+ }
+
+ rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MCADDR, NULL, GFP_KERNEL);
+ return;
+error:
+ rtnl_set_sk_err(net, RTNLGRP_IPV6_MCADDR, err);
+}
+
/*
* device multicast group inc (add if not found)
*/
@@ -947,6 +986,7 @@ static int __ipv6_dev_mc_inc(struct net_device *dev,
mld_del_delrec(idev, mc);
igmp6_group_added(mc);
+ inet6_ifmcaddr_notify(dev, mc, RTM_NEWMULTICAST);
mutex_unlock(&idev->mc_lock);
ma_put(mc);
return 0;
@@ -976,6 +1016,8 @@ int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
*map = ma->next;
igmp6_group_dropped(ma);
+ inet6_ifmcaddr_notify(idev->dev, ma,
+ RTM_DELMULTICAST);
ip6_mc_clear_src(ma);
mutex_unlock(&idev->mc_lock);
@@ -1020,29 +1062,31 @@ bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
rcu_read_lock();
idev = __in6_dev_get(dev);
- if (idev) {
- for_each_mc_rcu(idev, mc) {
- if (ipv6_addr_equal(&mc->mca_addr, group))
- break;
- }
- if (mc) {
- if (src_addr && !ipv6_addr_any(src_addr)) {
- struct ip6_sf_list *psf;
+ if (!idev)
+ goto unlock;
+ for_each_mc_rcu(idev, mc) {
+ if (ipv6_addr_equal(&mc->mca_addr, group))
+ break;
+ }
+ if (!mc)
+ goto unlock;
+ if (src_addr && !ipv6_addr_any(src_addr)) {
+ struct ip6_sf_list *psf;
- for_each_psf_rcu(mc, psf) {
- if (ipv6_addr_equal(&psf->sf_addr, src_addr))
- break;
- }
- if (psf)
- rv = psf->sf_count[MCAST_INCLUDE] ||
- psf->sf_count[MCAST_EXCLUDE] !=
- mc->mca_sfcount[MCAST_EXCLUDE];
- else
- rv = mc->mca_sfcount[MCAST_EXCLUDE] != 0;
- } else
- rv = true; /* don't filter unspecified source */
+ for_each_psf_rcu(mc, psf) {
+ if (ipv6_addr_equal(&psf->sf_addr, src_addr))
+ break;
}
+ if (psf)
+ rv = READ_ONCE(psf->sf_count[MCAST_INCLUDE]) ||
+ READ_ONCE(psf->sf_count[MCAST_EXCLUDE]) !=
+ READ_ONCE(mc->mca_sfcount[MCAST_EXCLUDE]);
+ else
+ rv = READ_ONCE(mc->mca_sfcount[MCAST_EXCLUDE]) != 0;
+ } else {
+ rv = true; /* don't filter unspecified source */
}
+unlock:
rcu_read_unlock();
return rv;
}
@@ -1202,15 +1246,15 @@ static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
static int mld_force_mld_version(const struct inet6_dev *idev)
{
+ const struct net *net = dev_net(idev->dev);
+ int all_force;
+
+ all_force = READ_ONCE(net->ipv6.devconf_all->force_mld_version);
/* Normally, both are 0 here. If enforcement to a particular is
* being used, individual device enforcement will have a lower
* precedence over 'all' device (.../conf/all/force_mld_version).
*/
-
- if (dev_net(idev->dev)->ipv6.devconf_all->force_mld_version != 0)
- return dev_net(idev->dev)->ipv6.devconf_all->force_mld_version;
- else
- return idev->cnf.force_mld_version;
+ return all_force ?: READ_ONCE(idev->cnf.force_mld_version);
}
static bool mld_in_v2_mode_only(const struct inet6_dev *idev)
@@ -1729,21 +1773,19 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
struct net_device *dev = idev->dev;
int hlen = LL_RESERVED_SPACE(dev);
int tlen = dev->needed_tailroom;
- struct net *net = dev_net(dev);
const struct in6_addr *saddr;
struct in6_addr addr_buf;
struct mld2_report *pmr;
struct sk_buff *skb;
unsigned int size;
struct sock *sk;
- int err;
+ struct net *net;
- sk = net->ipv6.igmp_sk;
/* we assume size > sizeof(ra) here
* Also try to not allocate high-order pages for big MTU
*/
size = min_t(int, mtu, PAGE_SIZE / 2) + hlen + tlen;
- skb = sock_alloc_send_skb(sk, size, 1, &err);
+ skb = alloc_skb(size, GFP_KERNEL);
if (!skb)
return NULL;
@@ -1751,6 +1793,12 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
skb_reserve(skb, hlen);
skb_tailroom_reserve(skb, mtu, tlen);
+ rcu_read_lock();
+
+ net = dev_net_rcu(dev);
+ sk = net->ipv6.igmp_sk;
+ skb_set_owner_w(skb, sk);
+
if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
/* <draft-ietf-magma-mld-source-05.txt>:
* use unspecified address as the source address
@@ -1762,6 +1810,8 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
ip6_mc_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0);
+ rcu_read_unlock();
+
skb_put_data(skb, ra, sizeof(ra));
skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data);
@@ -2121,21 +2171,21 @@ static void mld_send_cr(struct inet6_dev *idev)
static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
{
- struct net *net = dev_net(dev);
- struct sock *sk = net->ipv6.igmp_sk;
+ const struct in6_addr *snd_addr, *saddr;
+ int err, len, payload_len, full_len;
+ struct in6_addr addr_buf;
struct inet6_dev *idev;
struct sk_buff *skb;
struct mld_msg *hdr;
- const struct in6_addr *snd_addr, *saddr;
- struct in6_addr addr_buf;
int hlen = LL_RESERVED_SPACE(dev);
int tlen = dev->needed_tailroom;
- int err, len, payload_len, full_len;
u8 ra[8] = { IPPROTO_ICMPV6, 0,
IPV6_TLV_ROUTERALERT, 2, 0, 0,
IPV6_TLV_PADN, 0 };
- struct flowi6 fl6;
struct dst_entry *dst;
+ struct flowi6 fl6;
+ struct net *net;
+ struct sock *sk;
if (type == ICMPV6_MGM_REDUCTION)
snd_addr = &in6addr_linklocal_allrouters;
@@ -2146,19 +2196,21 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
payload_len = len + sizeof(ra);
full_len = sizeof(struct ipv6hdr) + payload_len;
- rcu_read_lock();
- IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_OUTREQUESTS);
- rcu_read_unlock();
+ skb = alloc_skb(hlen + tlen + full_len, GFP_KERNEL);
- skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err);
+ rcu_read_lock();
+ net = dev_net_rcu(dev);
+ idev = __in6_dev_get(dev);
+ IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
if (!skb) {
- rcu_read_lock();
- IP6_INC_STATS(net, __in6_dev_get(dev),
- IPSTATS_MIB_OUTDISCARDS);
+ IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
rcu_read_unlock();
return;
}
+ sk = net->ipv6.igmp_sk;
+ skb_set_owner_w(skb, sk);
+
skb->priority = TC_PRIO_CONTROL;
skb_reserve(skb, hlen);
@@ -2183,9 +2235,6 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
IPPROTO_ICMPV6,
csum_partial(hdr, len, 0));
- rcu_read_lock();
- idev = __in6_dev_get(skb->dev);
-
icmpv6_flow_init(sk, &fl6, type,
&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
skb->dev->ifindex);
@@ -2284,7 +2333,7 @@ static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
/* source filter not found, or count wrong => bug */
return -ESRCH;
}
- psf->sf_count[sfmode]--;
+ WRITE_ONCE(psf->sf_count[sfmode], psf->sf_count[sfmode] - 1);
if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) {
struct inet6_dev *idev = pmc->idev;
@@ -2390,7 +2439,7 @@ static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
rcu_assign_pointer(pmc->mca_sources, psf);
}
}
- psf->sf_count[sfmode]++;
+ WRITE_ONCE(psf->sf_count[sfmode], psf->sf_count[sfmode] + 1);
return 0;
}
@@ -2502,7 +2551,8 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
sf_markstate(pmc);
isexclude = pmc->mca_sfmode == MCAST_EXCLUDE;
if (!delta)
- pmc->mca_sfcount[sfmode]++;
+ WRITE_ONCE(pmc->mca_sfcount[sfmode],
+ pmc->mca_sfcount[sfmode] + 1);
err = 0;
for (i = 0; i < sfcount; i++) {
err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i]);
@@ -2513,7 +2563,8 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
int j;
if (!delta)
- pmc->mca_sfcount[sfmode]--;
+ WRITE_ONCE(pmc->mca_sfcount[sfmode],
+ pmc->mca_sfcount[sfmode] - 1);
for (j = 0; j < i; j++)
ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]);
} else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
@@ -2558,7 +2609,8 @@ static void ip6_mc_clear_src(struct ifmcaddr6 *pmc)
RCU_INIT_POINTER(pmc->mca_sources, NULL);
pmc->mca_sfmode = MCAST_EXCLUDE;
pmc->mca_sfcount[MCAST_INCLUDE] = 0;
- pmc->mca_sfcount[MCAST_EXCLUDE] = 1;
+ /* Paired with the READ_ONCE() from ipv6_chk_mcast_addr() */
+ WRITE_ONCE(pmc->mca_sfcount[MCAST_EXCLUDE], 1);
}
/* called with mc_lock */
@@ -2719,7 +2771,6 @@ void ipv6_mc_down(struct inet6_dev *idev)
/* Should stop work after group drop. or we will
* start work again in mld_ifc_event()
*/
- synchronize_net();
mld_query_stop_work(idev);
mld_report_stop_work(idev);
@@ -3074,8 +3125,8 @@ static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
state->dev->ifindex, state->dev->name,
&state->im->mca_addr,
&psf->sf_addr,
- psf->sf_count[MCAST_INCLUDE],
- psf->sf_count[MCAST_EXCLUDE]);
+ READ_ONCE(psf->sf_count[MCAST_INCLUDE]),
+ READ_ONCE(psf->sf_count[MCAST_EXCLUDE]));
}
return 0;
}
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index a19999b30bc0..8699d1a188dc 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -200,9 +200,9 @@ static inline int ndisc_is_useropt(const struct net_device *dev,
return opt->nd_opt_type == ND_OPT_PREFIX_INFO ||
opt->nd_opt_type == ND_OPT_RDNSS ||
opt->nd_opt_type == ND_OPT_DNSSL ||
+ opt->nd_opt_type == ND_OPT_6CO ||
opt->nd_opt_type == ND_OPT_CAPTIVE_PORTAL ||
- opt->nd_opt_type == ND_OPT_PREF64 ||
- ndisc_ops_is_useropt(dev, opt->nd_opt_type);
+ opt->nd_opt_type == ND_OPT_PREF64;
}
static struct nd_opt_hdr *ndisc_next_useropt(const struct net_device *dev,
@@ -227,6 +227,7 @@ struct ndisc_options *ndisc_parse_options(const struct net_device *dev,
return NULL;
memset(ndopts, 0, sizeof(*ndopts));
while (opt_len) {
+ bool unknown = false;
int l;
if (opt_len < sizeof(struct nd_opt_hdr))
return NULL;
@@ -262,22 +263,23 @@ struct ndisc_options *ndisc_parse_options(const struct net_device *dev,
break;
#endif
default:
- if (ndisc_is_useropt(dev, nd_opt)) {
- ndopts->nd_useropts_end = nd_opt;
- if (!ndopts->nd_useropts)
- ndopts->nd_useropts = nd_opt;
- } else {
- /*
- * Unknown options must be silently ignored,
- * to accommodate future extension to the
- * protocol.
- */
- ND_PRINTK(2, notice,
- "%s: ignored unsupported option; type=%d, len=%d\n",
- __func__,
- nd_opt->nd_opt_type,
- nd_opt->nd_opt_len);
- }
+ unknown = true;
+ }
+ if (ndisc_is_useropt(dev, nd_opt)) {
+ ndopts->nd_useropts_end = nd_opt;
+ if (!ndopts->nd_useropts)
+ ndopts->nd_useropts = nd_opt;
+ } else if (unknown) {
+ /*
+ * Unknown options must be silently ignored,
+ * to accommodate future extension to the
+ * protocol.
+ */
+ ND_PRINTK(2, notice,
+ "%s: ignored unsupported option; type=%d, len=%d\n",
+ __func__,
+ nd_opt->nd_opt_type,
+ nd_opt->nd_opt_len);
}
next_opt:
opt_len -= l;
@@ -416,15 +418,11 @@ static struct sk_buff *ndisc_alloc_skb(struct net_device *dev,
{
int hlen = LL_RESERVED_SPACE(dev);
int tlen = dev->needed_tailroom;
- struct sock *sk = dev_net(dev)->ipv6.ndisc_sk;
struct sk_buff *skb;
skb = alloc_skb(hlen + sizeof(struct ipv6hdr) + len + tlen, GFP_ATOMIC);
- if (!skb) {
- ND_PRINTK(0, err, "ndisc: %s failed to allocate an skb\n",
- __func__);
+ if (!skb)
return NULL;
- }
skb->protocol = htons(ETH_P_IPV6);
skb->dev = dev;
@@ -435,7 +433,9 @@ static struct sk_buff *ndisc_alloc_skb(struct net_device *dev,
/* Manually assign socket ownership as we avoid calling
* sock_alloc_send_pskb() to bypass wmem buffer limits
*/
- skb_set_owner_w(skb, sk);
+ rcu_read_lock();
+ skb_set_owner_w(skb, dev_net_rcu(dev)->ipv6.ndisc_sk);
+ rcu_read_unlock();
return skb;
}
@@ -451,7 +451,7 @@ static void ip6_nd_hdr(struct sk_buff *skb,
rcu_read_lock();
idev = __in6_dev_get(skb->dev);
- tclass = idev ? idev->cnf.ndisc_tclass : 0;
+ tclass = idev ? READ_ONCE(idev->cnf.ndisc_tclass) : 0;
rcu_read_unlock();
skb_push(skb, sizeof(*hdr));
@@ -471,16 +471,20 @@ static void ip6_nd_hdr(struct sk_buff *skb,
void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr,
const struct in6_addr *saddr)
{
+ struct icmp6hdr *icmp6h = icmp6_hdr(skb);
struct dst_entry *dst = skb_dst(skb);
- struct net *net = dev_net(skb->dev);
- struct sock *sk = net->ipv6.ndisc_sk;
struct inet6_dev *idev;
+ struct net *net;
+ struct sock *sk;
int err;
- struct icmp6hdr *icmp6h = icmp6_hdr(skb);
u8 type;
type = icmp6h->icmp6_type;
+ rcu_read_lock();
+
+ net = dev_net_rcu(skb->dev);
+ sk = net->ipv6.ndisc_sk;
if (!dst) {
struct flowi6 fl6;
int oif = skb->dev->ifindex;
@@ -488,6 +492,7 @@ void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr,
icmpv6_flow_init(sk, &fl6, type, saddr, daddr, oif);
dst = icmp6_dst_alloc(skb->dev, &fl6);
if (IS_ERR(dst)) {
+ rcu_read_unlock();
kfree_skb(skb);
return;
}
@@ -502,7 +507,6 @@ void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr,
ip6_nd_hdr(skb, saddr, daddr, READ_ONCE(inet6_sk(sk)->hop_limit), skb->len);
- rcu_read_lock();
idev = __in6_dev_get(dst->dev);
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
@@ -535,7 +539,7 @@ void ndisc_send_na(struct net_device *dev, const struct in6_addr *daddr,
src_addr = solicited_addr;
if (ifp->flags & IFA_F_OPTIMISTIC)
override = false;
- inc_opt |= ifp->idev->cnf.force_tllao;
+ inc_opt |= READ_ONCE(ifp->idev->cnf.force_tllao);
in6_ifa_put(ifp);
} else {
if (ipv6_dev_get_saddr(dev_net(dev), dev, daddr,
@@ -903,8 +907,9 @@ have_ifp:
}
if (ipv6_chk_acast_addr(net, dev, &msg->target) ||
- (idev->cnf.forwarding &&
- (net->ipv6.devconf_all->proxy_ndp || idev->cnf.proxy_ndp) &&
+ (READ_ONCE(idev->cnf.forwarding) &&
+ (READ_ONCE(net->ipv6.devconf_all->proxy_ndp) ||
+ READ_ONCE(idev->cnf.proxy_ndp)) &&
(is_router = pndisc_is_router(&msg->target, dev)) >= 0)) {
if (!(NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED) &&
skb->pkt_type != PACKET_HOST &&
@@ -929,7 +934,7 @@ have_ifp:
}
if (is_router < 0)
- is_router = idev->cnf.forwarding;
+ is_router = READ_ONCE(idev->cnf.forwarding);
if (dad) {
ndisc_send_na(dev, &in6addr_linklocal_allnodes, &msg->target,
@@ -973,7 +978,7 @@ static int accept_untracked_na(struct net_device *dev, struct in6_addr *saddr)
{
struct inet6_dev *idev = __in6_dev_get(dev);
- switch (idev->cnf.accept_untracked_na) {
+ switch (READ_ONCE(idev->cnf.accept_untracked_na)) {
case 0: /* Don't accept untracked na (absent in neighbor cache) */
return 0;
case 1: /* Create new entries from na if currently untracked */
@@ -1024,7 +1029,7 @@ static enum skb_drop_reason ndisc_recv_na(struct sk_buff *skb)
* drop_unsolicited_na takes precedence over accept_untracked_na
*/
if (!msg->icmph.icmp6_solicited && idev &&
- idev->cnf.drop_unsolicited_na)
+ READ_ONCE(idev->cnf.drop_unsolicited_na))
return reason;
if (!ndisc_parse_options(dev, msg->opt, ndoptlen, &ndopts))
@@ -1080,7 +1085,7 @@ static enum skb_drop_reason ndisc_recv_na(struct sk_buff *skb)
* Note that we don't do a (daddr == all-routers-mcast) check.
*/
new_state = msg->icmph.icmp6_solicited ? NUD_REACHABLE : NUD_STALE;
- if (!neigh && lladdr && idev && idev->cnf.forwarding) {
+ if (!neigh && lladdr && idev && READ_ONCE(idev->cnf.forwarding)) {
if (accept_untracked_na(dev, saddr)) {
neigh = neigh_create(&nd_tbl, &msg->target, dev);
new_state = NUD_STALE;
@@ -1100,7 +1105,8 @@ static enum skb_drop_reason ndisc_recv_na(struct sk_buff *skb)
* has already sent a NA to us.
*/
if (lladdr && !memcmp(lladdr, dev->dev_addr, dev->addr_len) &&
- net->ipv6.devconf_all->forwarding && net->ipv6.devconf_all->proxy_ndp &&
+ READ_ONCE(net->ipv6.devconf_all->forwarding) &&
+ READ_ONCE(net->ipv6.devconf_all->proxy_ndp) &&
pneigh_lookup(&nd_tbl, net, &msg->target, dev, 0)) {
/* XXX: idev->cnf.proxy_ndp */
goto out;
@@ -1148,7 +1154,7 @@ static enum skb_drop_reason ndisc_recv_rs(struct sk_buff *skb)
}
/* Don't accept RS if we're not in router mode */
- if (!idev->cnf.forwarding)
+ if (!READ_ONCE(idev->cnf.forwarding))
goto out;
/*
@@ -1237,6 +1243,7 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
struct ndisc_options ndopts;
struct fib6_info *rt = NULL;
struct inet6_dev *in6_dev;
+ struct fib6_table *table;
u32 defrtr_usr_metric;
unsigned int pref = 0;
__u32 old_if_flags;
@@ -1317,7 +1324,7 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
if (old_if_flags != in6_dev->if_flags)
send_ifinfo_notify = true;
- if (!in6_dev->cnf.accept_ra_defrtr) {
+ if (!READ_ONCE(in6_dev->cnf.accept_ra_defrtr)) {
ND_PRINTK(2, info,
"RA: %s, defrtr is false for dev: %s\n",
__func__, skb->dev->name);
@@ -1325,7 +1332,8 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
}
lifetime = ntohs(ra_msg->icmph.icmp6_rt_lifetime);
- if (lifetime != 0 && lifetime < in6_dev->cnf.accept_ra_min_lft) {
+ if (lifetime != 0 &&
+ lifetime < READ_ONCE(in6_dev->cnf.accept_ra_min_lft)) {
ND_PRINTK(2, info,
"RA: router lifetime (%ds) is too short: %s\n",
lifetime, skb->dev->name);
@@ -1336,7 +1344,7 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
* accept_ra_from_local is set to true.
*/
net = dev_net(in6_dev->dev);
- if (!in6_dev->cnf.accept_ra_from_local &&
+ if (!READ_ONCE(in6_dev->cnf.accept_ra_from_local) &&
ipv6_chk_addr(net, &ipv6_hdr(skb)->saddr, in6_dev->dev, 0)) {
ND_PRINTK(2, info,
"RA from local address detected on dev: %s: default router ignored\n",
@@ -1348,7 +1356,7 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
pref = ra_msg->icmph.icmp6_router_pref;
/* 10b is handled as if it were 00b (medium) */
if (pref == ICMPV6_ROUTER_PREF_INVALID ||
- !in6_dev->cnf.accept_ra_rtr_pref)
+ !READ_ONCE(in6_dev->cnf.accept_ra_rtr_pref))
pref = ICMPV6_ROUTER_PREF_MEDIUM;
#endif
/* routes added from RAs do not use nexthop objects */
@@ -1382,7 +1390,8 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
neigh_release(neigh);
rt = rt6_add_dflt_router(net, &ipv6_hdr(skb)->saddr,
- skb->dev, pref, defrtr_usr_metric);
+ skb->dev, pref, defrtr_usr_metric,
+ lifetime);
if (!rt) {
ND_PRINTK(0, err,
"RA: %s failed to add default route\n",
@@ -1409,12 +1418,21 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
inet6_rt_notify(RTM_NEWROUTE, rt, &nlinfo, NLM_F_REPLACE);
}
- if (rt)
+ if (rt) {
+ table = rt->fib6_table;
+ spin_lock_bh(&table->tb6_lock);
+
fib6_set_expires(rt, jiffies + (HZ * lifetime));
- if (in6_dev->cnf.accept_ra_min_hop_limit < 256 &&
+ fib6_add_gc_list(rt);
+
+ spin_unlock_bh(&table->tb6_lock);
+ }
+ if (READ_ONCE(in6_dev->cnf.accept_ra_min_hop_limit) < 256 &&
ra_msg->icmph.icmp6_hop_limit) {
- if (in6_dev->cnf.accept_ra_min_hop_limit <= ra_msg->icmph.icmp6_hop_limit) {
- in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
+ if (READ_ONCE(in6_dev->cnf.accept_ra_min_hop_limit) <=
+ ra_msg->icmph.icmp6_hop_limit) {
+ WRITE_ONCE(in6_dev->cnf.hop_limit,
+ ra_msg->icmph.icmp6_hop_limit);
fib6_metric_set(rt, RTAX_HOPLIMIT,
ra_msg->icmph.icmp6_hop_limit);
} else {
@@ -1496,7 +1514,7 @@ skip_linkparms:
}
#ifdef CONFIG_IPV6_ROUTE_INFO
- if (!in6_dev->cnf.accept_ra_from_local &&
+ if (!READ_ONCE(in6_dev->cnf.accept_ra_from_local) &&
ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr,
in6_dev->dev, 0)) {
ND_PRINTK(2, info,
@@ -1505,7 +1523,7 @@ skip_linkparms:
goto skip_routeinfo;
}
- if (in6_dev->cnf.accept_ra_rtr_pref && ndopts.nd_opts_ri) {
+ if (READ_ONCE(in6_dev->cnf.accept_ra_rtr_pref) && ndopts.nd_opts_ri) {
struct nd_opt_hdr *p;
for (p = ndopts.nd_opts_ri;
p;
@@ -1517,14 +1535,14 @@ skip_linkparms:
continue;
#endif
if (ri->prefix_len == 0 &&
- !in6_dev->cnf.accept_ra_defrtr)
+ !READ_ONCE(in6_dev->cnf.accept_ra_defrtr))
continue;
if (ri->lifetime != 0 &&
- ntohl(ri->lifetime) < in6_dev->cnf.accept_ra_min_lft)
+ ntohl(ri->lifetime) < READ_ONCE(in6_dev->cnf.accept_ra_min_lft))
continue;
- if (ri->prefix_len < in6_dev->cnf.accept_ra_rt_info_min_plen)
+ if (ri->prefix_len < READ_ONCE(in6_dev->cnf.accept_ra_rt_info_min_plen))
continue;
- if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen)
+ if (ri->prefix_len > READ_ONCE(in6_dev->cnf.accept_ra_rt_info_max_plen))
continue;
rt6_route_rcv(skb->dev, (u8 *)p, (p->nd_opt_len) << 3,
&ipv6_hdr(skb)->saddr);
@@ -1544,7 +1562,7 @@ skip_routeinfo:
}
#endif
- if (in6_dev->cnf.accept_ra_pinfo && ndopts.nd_opts_pi) {
+ if (READ_ONCE(in6_dev->cnf.accept_ra_pinfo) && ndopts.nd_opts_pi) {
struct nd_opt_hdr *p;
for (p = ndopts.nd_opts_pi;
p;
@@ -1555,7 +1573,7 @@ skip_routeinfo:
}
}
- if (ndopts.nd_opts_mtu && in6_dev->cnf.accept_ra_mtu) {
+ if (ndopts.nd_opts_mtu && READ_ONCE(in6_dev->cnf.accept_ra_mtu)) {
__be32 n;
u32 mtu;
@@ -1569,8 +1587,8 @@ skip_routeinfo:
if (mtu < IPV6_MIN_MTU || mtu > skb->dev->mtu) {
ND_PRINTK(2, warn, "RA: invalid mtu: %d\n", mtu);
- } else if (in6_dev->cnf.mtu6 != mtu) {
- in6_dev->cnf.mtu6 = mtu;
+ } else if (READ_ONCE(in6_dev->cnf.mtu6) != mtu) {
+ WRITE_ONCE(in6_dev->cnf.mtu6, mtu);
fib6_metric_set(rt, RTAX_MTU, mtu);
rt6_mtu_change(skb->dev, mtu);
}
@@ -1678,7 +1696,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
bool ret;
if (netif_is_l3_master(skb->dev)) {
- dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
+ dev = dev_get_by_index_rcu(dev_net(skb->dev), IPCB(skb)->iif);
if (!dev)
return;
}
@@ -1708,17 +1726,19 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
if (IS_ERR(dst))
return;
- rt = (struct rt6_info *) dst;
+ rt = dst_rt6_info(dst);
if (rt->rt6i_flags & RTF_GATEWAY) {
ND_PRINTK(2, warn,
"Redirect: destination is not a neighbour\n");
goto release;
}
- peer = inet_getpeer_v6(net->ipv6.peers, &ipv6_hdr(skb)->saddr, 1);
+
+ rcu_read_lock();
+ peer = inet_getpeer_v6(net->ipv6.peers, &ipv6_hdr(skb)->saddr);
ret = inet_peer_xrlim_allow(peer, 1*HZ);
- if (peer)
- inet_putpeer(peer);
+ rcu_read_unlock();
+
if (!ret)
goto release;
@@ -1804,7 +1824,7 @@ static bool ndisc_suppress_frag_ndisc(struct sk_buff *skb)
if (!idev)
return true;
if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED &&
- idev->cnf.suppress_frag_ndisc) {
+ READ_ONCE(idev->cnf.suppress_frag_ndisc)) {
net_warn_ratelimited("Received fragmented ndisc packet. Carefully consider disabling suppress_frag_ndisc.\n");
return true;
}
@@ -1881,8 +1901,8 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
idev = in6_dev_get(dev);
if (!idev)
break;
- if (idev->cnf.ndisc_notify ||
- net->ipv6.devconf_all->ndisc_notify)
+ if (READ_ONCE(idev->cnf.ndisc_notify) ||
+ READ_ONCE(net->ipv6.devconf_all->ndisc_notify))
ndisc_send_unsol_na(dev);
in6_dev_put(idev);
break;
@@ -1891,8 +1911,8 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
if (!idev)
evict_nocarrier = true;
else {
- evict_nocarrier = idev->cnf.ndisc_evict_nocarrier &&
- net->ipv6.devconf_all->ndisc_evict_nocarrier;
+ evict_nocarrier = READ_ONCE(idev->cnf.ndisc_evict_nocarrier) &&
+ READ_ONCE(net->ipv6.devconf_all->ndisc_evict_nocarrier);
in6_dev_put(idev);
}
@@ -1922,13 +1942,13 @@ static struct notifier_block ndisc_netdev_notifier = {
};
#ifdef CONFIG_SYSCTL
-static void ndisc_warn_deprecated_sysctl(struct ctl_table *ctl,
+static void ndisc_warn_deprecated_sysctl(const struct ctl_table *ctl,
const char *func, const char *dev_name)
{
static char warncomm[TASK_COMM_LEN];
static int warned;
if (strcmp(warncomm, current->comm) && warned < 5) {
- strcpy(warncomm, current->comm);
+ strscpy(warncomm, current->comm);
pr_warn("process `%s' is using deprecated sysctl (%s) net.ipv6.neigh.%s.%s - use net.ipv6.neigh.%s.%s_ms instead\n",
warncomm, func,
dev_name, ctl->procname,
@@ -1937,7 +1957,7 @@ static void ndisc_warn_deprecated_sysctl(struct ctl_table *ctl,
}
}
-int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, void *buffer,
+int ndisc_ifinfo_sysctl_change(const struct ctl_table *ctl, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
struct net_device *dev = ctl->extra1;
@@ -1966,7 +1986,7 @@ int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, void *buffer,
if (ctl->data == &NEIGH_VAR(idev->nd_parms, BASE_REACHABLE_TIME))
idev->nd_parms->reachable_time =
neigh_rand_reach_time(NEIGH_VAR(idev->nd_parms, BASE_REACHABLE_TIME));
- idev->tstamp = jiffies;
+ WRITE_ONCE(idev->tstamp, jiffies);
inet6_ifinfo_notify(RTM_NEWLINK, idev);
in6_dev_put(idev);
}
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 53d255838e6a..581ce055bf52 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -36,6 +36,7 @@ int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff
.flowi6_uid = sock_net_uid(net, sk),
.daddr = iph->daddr,
.saddr = iph->saddr,
+ .flowlabel = ip6_flowinfo(iph),
};
int err;
@@ -126,7 +127,7 @@ int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
struct sk_buff *))
{
int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
- bool mono_delivery_time = skb->mono_delivery_time;
+ u8 tstamp_type = skb->tstamp_type;
ktime_t tstamp = skb->tstamp;
struct ip6_frag_state state;
u8 *prevhdr, nexthdr = 0;
@@ -192,7 +193,7 @@ int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
if (iter.frag)
ip6_fraglist_prepare(skb, &iter);
- skb_set_delivery_time(skb, tstamp, mono_delivery_time);
+ skb_set_delivery_time(skb, tstamp, tstamp_type);
err = output(net, sk, data, skb);
if (err || !iter.frag)
break;
@@ -225,7 +226,7 @@ slow_path:
goto blackhole;
}
- skb_set_delivery_time(skb2, tstamp, mono_delivery_time);
+ skb_set_delivery_time(skb2, tstamp, tstamp_type);
err = output(net, sk, data, skb2);
if (err)
goto blackhole;
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 0ba62f4868f9..e087a8e97ba7 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -6,6 +6,17 @@
menu "IPv6: Netfilter Configuration"
depends on INET && IPV6 && NETFILTER
+# old sockopt interface and eval loop
+config IP6_NF_IPTABLES_LEGACY
+ tristate "Legacy IP6 tables support"
+ depends on INET && IPV6
+ select NETFILTER_XTABLES
+ default n
+ help
+ ip6tables is a legacy packet classifier.
+ This is not needed if you are using iptables over nftables
+ (iptables-nft).
+
config NF_SOCKET_IPV6
tristate "IPv6 socket lookup support"
help
@@ -147,7 +158,7 @@ config IP6_NF_MATCH_MH
config IP6_NF_MATCH_RPFILTER
tristate '"rpfilter" reverse path filter match support'
depends on NETFILTER_ADVANCED
- depends on IP6_NF_MANGLE || IP6_NF_RAW
+ depends on IP6_NF_MANGLE || IP6_NF_RAW || NFT_COMPAT
help
This option allows you to match packets whose replies would
go out via the interface the packet came in.
@@ -186,6 +197,8 @@ config IP6_NF_TARGET_HL
config IP6_NF_FILTER
tristate "Packet filtering"
default m if NETFILTER_ADVANCED=n
+ select IP6_NF_IPTABLES_LEGACY
+ tristate
help
Packet filtering defines a table `filter', which has a series of
rules for simple packet filtering at local input, forwarding and
@@ -195,7 +208,7 @@ config IP6_NF_FILTER
config IP6_NF_TARGET_REJECT
tristate "REJECT target support"
- depends on IP6_NF_FILTER
+ depends on IP6_NF_FILTER || NFT_COMPAT
select NF_REJECT_IPV6
default m if NETFILTER_ADVANCED=n
help
@@ -221,6 +234,7 @@ config IP6_NF_TARGET_SYNPROXY
config IP6_NF_MANGLE
tristate "Packet mangling"
default m if NETFILTER_ADVANCED=n
+ select IP6_NF_IPTABLES_LEGACY
help
This option adds a `mangle' table to iptables: see the man page for
iptables(8). This table is used for various packet alterations
@@ -230,6 +244,7 @@ config IP6_NF_MANGLE
config IP6_NF_RAW
tristate 'raw table support (required for TRACE)'
+ select IP6_NF_IPTABLES_LEGACY
help
This option adds a `raw' table to ip6tables. This table is the very
first in the netfilter framework and hooks in at the PREROUTING
@@ -243,6 +258,7 @@ config IP6_NF_SECURITY
tristate "Security table"
depends on SECURITY
depends on NETFILTER_ADVANCED
+ select IP6_NF_IPTABLES_LEGACY
help
This option adds a `security' table to iptables, for use
with Mandatory Access Control (MAC) policy.
@@ -254,6 +270,7 @@ config IP6_NF_NAT
depends on NF_CONNTRACK
depends on NETFILTER_ADVANCED
select NF_NAT
+ select IP6_NF_IPTABLES_LEGACY
select NETFILTER_XT_NAT
help
This enables the `nat' table in ip6tables. This allows masquerading,
@@ -262,25 +279,23 @@ config IP6_NF_NAT
To compile it as a module, choose M here. If unsure, say N.
-if IP6_NF_NAT
-
config IP6_NF_TARGET_MASQUERADE
tristate "MASQUERADE target support"
select NETFILTER_XT_TARGET_MASQUERADE
+ depends on IP6_NF_NAT
help
This is a backwards-compat option for the user's convenience
(e.g. when running oldconfig). It selects NETFILTER_XT_TARGET_MASQUERADE.
config IP6_NF_TARGET_NPT
tristate "NPT (Network Prefix translation) target support"
+ depends on IP6_NF_NAT || NFT_COMPAT
help
This option adds the `SNPT' and `DNPT' target, which perform
stateless IPv6-to-IPv6 Network Prefix Translation per RFC 6296.
To compile it as a module, choose M here. If unsure, say N.
-endif # IP6_NF_NAT
-
endif # IP6_NF_IPTABLES
endmenu
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index b8d6dc9aeeb6..66ce6fa5b2f5 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -4,7 +4,7 @@
#
# Link order matters here.
-obj-$(CONFIG_IP6_NF_IPTABLES) += ip6_tables.o
+obj-$(CONFIG_IP6_NF_IPTABLES_LEGACY) += ip6_tables.o
obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index fd9f049d6d41..7d5602950ae7 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1125,6 +1125,8 @@ do_replace(struct net *net, sockptr_t arg, unsigned int len)
void *loc_cpu_entry;
struct ip6t_entry *iter;
+ if (len < sizeof(tmp))
+ return -EINVAL;
if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
return -EFAULT;
@@ -1133,6 +1135,8 @@ do_replace(struct net *net, sockptr_t arg, unsigned int len)
return -ENOMEM;
if (tmp.num_counters == 0)
return -EINVAL;
+ if ((u64)len < (u64)tmp.size + sizeof(tmp))
+ return -EINVAL;
tmp.name[sizeof(tmp.name)-1] = 0;
@@ -1501,6 +1505,8 @@ compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
void *loc_cpu_entry;
struct ip6t_entry *iter;
+ if (len < sizeof(tmp))
+ return -EINVAL;
if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
return -EFAULT;
@@ -1509,6 +1515,8 @@ compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
return -ENOMEM;
if (tmp.num_counters == 0)
return -EINVAL;
+ if ((u64)len < (u64)tmp.size + sizeof(tmp))
+ return -EINVAL;
tmp.name[sizeof(tmp.name)-1] = 0;
@@ -1765,7 +1773,7 @@ int ip6t_register_table(struct net *net, const struct xt_table *table,
goto out_free;
}
- ops = kmemdup(template_ops, sizeof(*ops) * num_ops, GFP_KERNEL);
+ ops = kmemdup_array(template_ops, num_ops, sizeof(*ops), GFP_KERNEL);
if (!ops) {
ret = -ENOMEM;
goto out_free;
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index df785ebda0ca..e8992693e14a 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -43,7 +43,7 @@ static int ip6table_filter_table_init(struct net *net)
return -ENOMEM;
/* Entry 1 is the FORWARD hook */
((struct ip6t_standard *)repl->entries)[1].target.verdict =
- forward ? -NF_ACCEPT - 1 : -NF_DROP - 1;
+ forward ? -NF_ACCEPT - 1 : NF_DROP - 1;
err = ip6t_register_table(net, &packet_filter, repl, filter_ops);
kfree(repl);
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
index 52cf104e3478..e119d4f090cc 100644
--- a/net/ipv6/netfilter/ip6table_nat.c
+++ b/net/ipv6/netfilter/ip6table_nat.c
@@ -147,23 +147,27 @@ static struct pernet_operations ip6table_nat_net_ops = {
static int __init ip6table_nat_init(void)
{
- int ret = xt_register_template(&nf_nat_ipv6_table,
- ip6table_nat_table_init);
+ int ret;
+ /* net->gen->ptr[ip6table_nat_net_id] must be allocated
+ * before calling ip6t_nat_register_lookups().
+ */
+ ret = register_pernet_subsys(&ip6table_nat_net_ops);
if (ret < 0)
return ret;
- ret = register_pernet_subsys(&ip6table_nat_net_ops);
+ ret = xt_register_template(&nf_nat_ipv6_table,
+ ip6table_nat_table_init);
if (ret)
- xt_unregister_template(&nf_nat_ipv6_table);
+ unregister_pernet_subsys(&ip6table_nat_net_ops);
return ret;
}
static void __exit ip6table_nat_exit(void)
{
- unregister_pernet_subsys(&ip6table_nat_net_ops);
xt_unregister_template(&nf_nat_ipv6_table);
+ unregister_pernet_subsys(&ip6table_nat_net_ops);
}
module_init(ip6table_nat_init);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index b2dd48911c8d..4120e67a8ce6 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -62,7 +62,6 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
- { }
};
static int nf_ct_frag6_sysctl_register(struct net *net)
@@ -105,7 +104,7 @@ err_alloc:
static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
{
struct nft_ct_frag6_pernet *nf_frag = nf_frag_pernet(net);
- struct ctl_table *table;
+ const struct ctl_table *table;
table = nf_frag->nf_frag_frags_hdr->ctl_table_arg;
unregister_net_sysctl_table(nf_frag->nf_frag_frags_hdr);
@@ -155,6 +154,10 @@ static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
};
struct inet_frag_queue *q;
+ if (!(ipv6_addr_type(&hdr->daddr) & (IPV6_ADDR_MULTICAST |
+ IPV6_ADDR_LINKLOCAL)))
+ key.iif = 0;
+
q = inet_frag_find(nf_frag->fqdir, &key);
if (!q)
return NULL;
@@ -264,7 +267,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
fq->iif = dev->ifindex;
fq->q.stamp = skb->tstamp;
- fq->q.mono_delivery_time = skb->mono_delivery_time;
+ fq->q.tstamp_type = skb->tstamp_type;
fq->q.meat += skb->len;
fq->ecn |= ecn;
if (payload_len > fq->q.max_size)
@@ -294,6 +297,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
}
skb_dst_drop(skb);
+ skb_orphan(skb);
return -EINPROGRESS;
insert_error:
@@ -327,9 +331,9 @@ static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
if (!reasm_data)
goto err;
- payload_len = ((skb->data - skb_network_header(skb)) -
+ payload_len = -skb_network_offset(skb) -
sizeof(struct ipv6hdr) + fq->q.len -
- sizeof(struct frag_hdr));
+ sizeof(struct frag_hdr);
if (payload_len > IPV6_MAXPLEN) {
net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
payload_len);
@@ -469,7 +473,6 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
hdr = ipv6_hdr(skb);
fhdr = (struct frag_hdr *)skb_transport_header(skb);
- skb_orphan(skb);
fq = fq_find(net, fhdr->identification, user, hdr,
skb->dev ? skb->dev->ifindex : 0);
if (fq == NULL) {
diff --git a/net/ipv6/netfilter/nf_dup_ipv6.c b/net/ipv6/netfilter/nf_dup_ipv6.c
index a0a2de30be3e..0c39c77fe8a8 100644
--- a/net/ipv6/netfilter/nf_dup_ipv6.c
+++ b/net/ipv6/netfilter/nf_dup_ipv6.c
@@ -47,11 +47,12 @@ static bool nf_dup_ipv6_route(struct net *net, struct sk_buff *skb,
void nf_dup_ipv6(struct net *net, struct sk_buff *skb, unsigned int hooknum,
const struct in6_addr *gw, int oif)
{
+ local_bh_disable();
if (this_cpu_read(nf_skb_duplicated))
- return;
+ goto out;
skb = pskb_copy(skb, GFP_ATOMIC);
if (skb == NULL)
- return;
+ goto out;
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
nf_reset_ct(skb);
@@ -69,6 +70,8 @@ void nf_dup_ipv6(struct net *net, struct sk_buff *skb, unsigned int hooknum,
} else {
kfree_skb(skb);
}
+out:
+ local_bh_enable();
}
EXPORT_SYMBOL_GPL(nf_dup_ipv6);
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index 196dd4ecb5e2..9ae2b2725bf9 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -83,7 +83,7 @@ struct sk_buff *nf_reject_skb_v6_tcp_reset(struct net *net,
skb_reserve(nskb, LL_MAX_HEADER);
nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
- net->ipv6.devconf_all->hop_limit);
+ READ_ONCE(net->ipv6.devconf_all->hop_limit));
nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen);
nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
@@ -124,7 +124,7 @@ struct sk_buff *nf_reject_skb_v6_unreach(struct net *net,
skb_reserve(nskb, LL_MAX_HEADER);
nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6,
- net->ipv6.devconf_all->hop_limit);
+ READ_ONCE(net->ipv6.devconf_all->hop_limit));
skb_reset_transport_header(nskb);
icmp6h = skb_put_zero(nskb, sizeof(struct icmp6hdr));
@@ -223,33 +223,23 @@ void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
const struct tcphdr *oth, unsigned int otcplen)
{
struct tcphdr *tcph;
- int needs_ack;
skb_reset_transport_header(nskb);
- tcph = skb_put(nskb, sizeof(struct tcphdr));
+ tcph = skb_put_zero(nskb, sizeof(struct tcphdr));
/* Truncate to length (no data) */
tcph->doff = sizeof(struct tcphdr)/4;
tcph->source = oth->dest;
tcph->dest = oth->source;
if (oth->ack) {
- needs_ack = 0;
tcph->seq = oth->ack_seq;
- tcph->ack_seq = 0;
} else {
- needs_ack = 1;
tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin +
otcplen - (oth->doff<<2));
- tcph->seq = 0;
+ tcph->ack = 1;
}
- /* Reset flags */
- ((u_int8_t *)tcph)[13] = 0;
tcph->rst = 1;
- tcph->ack = needs_ack;
- tcph->window = 0;
- tcph->urg_ptr = 0;
- tcph->check = 0;
/* Adjust TCP checksum */
tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr,
@@ -278,13 +268,12 @@ static int nf_reject6_fill_skb_dst(struct sk_buff *skb_in)
void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
int hook)
{
- struct sk_buff *nskb;
- struct tcphdr _otcph;
- const struct tcphdr *otcph;
- unsigned int otcplen, hh_len;
const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
- struct ipv6hdr *ip6h;
struct dst_entry *dst = NULL;
+ const struct tcphdr *otcph;
+ struct sk_buff *nskb;
+ struct tcphdr _otcph;
+ unsigned int otcplen;
struct flowi6 fl6;
if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
@@ -323,9 +312,8 @@ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
if (IS_ERR(dst))
return;
- hh_len = (dst->dev->hard_header_len + 15)&~15;
- nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr)
- + sizeof(struct tcphdr) + dst->trailer_len,
+ nskb = alloc_skb(LL_MAX_HEADER + sizeof(struct ipv6hdr) +
+ sizeof(struct tcphdr) + dst->trailer_len,
GFP_ATOMIC);
if (!nskb) {
@@ -338,9 +326,8 @@ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
nskb->mark = fl6.flowi6_mark;
- skb_reserve(nskb, hh_len + dst->header_len);
- ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
- ip6_dst_hoplimit(dst));
+ skb_reserve(nskb, LL_MAX_HEADER);
+ nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP, ip6_dst_hoplimit(dst));
nf_reject_ip6_tcphdr_put(nskb, oldskb, otcph, otcplen);
nf_ct_attach(nskb, oldskb);
@@ -355,6 +342,7 @@ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
*/
if (nf_bridge_info_exists(oldskb)) {
struct ethhdr *oeth = eth_hdr(oldskb);
+ struct ipv6hdr *ip6h = ipv6_hdr(nskb);
struct net_device *br_indev;
br_indev = nf_bridge_get_physindev(oldskb, net);
diff --git a/net/ipv6/netfilter/nft_dup_ipv6.c b/net/ipv6/netfilter/nft_dup_ipv6.c
index c82f3fdd4a65..492a811828a7 100644
--- a/net/ipv6/netfilter/nft_dup_ipv6.c
+++ b/net/ipv6/netfilter/nft_dup_ipv6.c
@@ -38,13 +38,13 @@ static int nft_dup_ipv6_init(const struct nft_ctx *ctx,
if (tb[NFTA_DUP_SREG_ADDR] == NULL)
return -EINVAL;
- err = nft_parse_register_load(tb[NFTA_DUP_SREG_ADDR], &priv->sreg_addr,
+ err = nft_parse_register_load(ctx, tb[NFTA_DUP_SREG_ADDR], &priv->sreg_addr,
sizeof(struct in6_addr));
if (err < 0)
return err;
if (tb[NFTA_DUP_SREG_DEV])
- err = nft_parse_register_load(tb[NFTA_DUP_SREG_DEV],
+ err = nft_parse_register_load(ctx, tb[NFTA_DUP_SREG_DEV],
&priv->sreg_dev, sizeof(int));
return err;
diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c
index 36dc14b34388..c9f1634b3838 100644
--- a/net/ipv6/netfilter/nft_fib_ipv6.c
+++ b/net/ipv6/netfilter/nft_fib_ipv6.c
@@ -41,8 +41,6 @@ static int nft_fib6_flowi_init(struct flowi6 *fl6, const struct nft_fib *priv,
if (ipv6_addr_type(&fl6->daddr) & IPV6_ADDR_LINKLOCAL) {
lookup_flags |= RT6_LOOKUP_F_IFACE;
fl6->flowi6_oif = get_ifindex(dev ? dev : pkt->skb->dev);
- } else if (priv->flags & NFTA_FIB_F_IIF) {
- fl6->flowi6_l3mdev = l3mdev_master_ifindex_rcu(dev);
}
if (ipv6_addr_type(&fl6->saddr) & IPV6_ADDR_UNICAST)
@@ -75,6 +73,8 @@ static u32 __nft_fib6_eval_type(const struct nft_fib *priv,
else if (priv->flags & NFTA_FIB_F_OIF)
dev = nft_out(pkt);
+ fl6.flowi6_l3mdev = l3mdev_master_ifindex_rcu(dev);
+
nft_fib6_flowi_init(&fl6, priv, pkt, dev, iph);
if (dev && nf_ipv6_chk_addr(nft_net(pkt), &fl6.daddr, dev, true))
@@ -165,6 +165,7 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
.flowi6_iif = LOOPBACK_IFINDEX,
.flowi6_proto = pkt->tprot,
.flowi6_uid = sock_net_uid(nft_net(pkt), NULL),
+ .flowi6_l3mdev = l3mdev_master_ifindex_rcu(nft_in(pkt)),
};
struct rt6_info *rt;
int lookup_flags;
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index b5205311f372..806d4b5dd1e6 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -111,9 +111,9 @@ int ip6_dst_hoplimit(struct dst_entry *dst)
rcu_read_lock();
idev = __in6_dev_get(dev);
if (idev)
- hoplimit = idev->cnf.hop_limit;
+ hoplimit = READ_ONCE(idev->cnf.hop_limit);
else
- hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit;
+ hoplimit = READ_ONCE(dev_net(dev)->ipv6.devconf_all->hop_limit);
rcu_read_unlock();
}
return hoplimit;
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index ef2059c88955..46b8adf6e7f8 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -119,6 +119,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
return -EINVAL;
ipcm6_init_sk(&ipc6, sk);
+ ipc6.sockc.priority = READ_ONCE(sk->sk_priority);
ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags);
ipc6.sockc.mark = READ_ONCE(sk->sk_mark);
@@ -154,7 +155,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
dst = ip6_sk_dst_lookup_flow(sk, &fl6, daddr, false);
if (IS_ERR(dst))
return PTR_ERR(dst);
- rt = (struct rt6_info *) dst;
+ rt = dst_rt6_info(dst);
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
fl6.flowi6_oif = READ_ONCE(np->mcast_oif);
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 6d1d9221649d..752327b10dde 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -27,7 +27,7 @@
#include <net/ipv6.h>
#define MAX4(a, b, c, d) \
- max_t(u32, max_t(u32, a, b), max_t(u32, c, d))
+ MAX_T(u32, MAX_T(u32, a, b), MAX_T(u32, c, d))
#define SNMP_MIB_MAX MAX4(UDP_MIB_MAX, TCP_MIB_MAX, \
IPSTATS_MIB_MAX, ICMP_MIB_MAX)
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 03dbb874c363..a45aba090aa4 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -160,6 +160,13 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
if (!raw_v6_match(net, sk, nexthdr, daddr, saddr,
inet6_iif(skb), inet6_sdif(skb)))
continue;
+
+ if (atomic_read(&sk->sk_rmem_alloc) >=
+ READ_ONCE(sk->sk_rcvbuf)) {
+ atomic_inc(&sk->sk_drops);
+ continue;
+ }
+
delivered = true;
switch (nexthdr) {
case IPPROTO_ICMPV6:
@@ -288,8 +295,7 @@ out:
}
static void rawv6_err(struct sock *sk, struct sk_buff *skb,
- struct inet6_skb_parm *opt,
- u8 type, u8 code, int offset, __be32 info)
+ u8 type, u8 code, int offset, __be32 info)
{
bool recverr = inet6_test_bit(RECVERR6, sk);
struct ipv6_pinfo *np = inet6_sk(sk);
@@ -344,7 +350,7 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
if (!raw_v6_match(net, sk, nexthdr, &ip6h->saddr, &ip6h->daddr,
inet6_iif(skb), inet6_iif(skb)))
continue;
- rawv6_err(sk, skb, NULL, type, code, inner_offset, info);
+ rawv6_err(sk, skb, type, code, inner_offset, info);
}
rcu_read_unlock();
}
@@ -356,14 +362,14 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
skb_checksum_complete(skb)) {
atomic_inc(&sk->sk_drops);
- kfree_skb_reason(skb, SKB_DROP_REASON_SKB_CSUM);
+ sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_SKB_CSUM);
return NET_RX_DROP;
}
/* Charge it to the socket. */
skb_dst_drop(skb);
if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0) {
- kfree_skb_reason(skb, reason);
+ sk_skb_reason_drop(sk, skb, reason);
return NET_RX_DROP;
}
@@ -384,7 +390,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
atomic_inc(&sk->sk_drops);
- kfree_skb_reason(skb, SKB_DROP_REASON_XFRM_POLICY);
+ sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_XFRM_POLICY);
return NET_RX_DROP;
}
nf_reset_ct(skb);
@@ -409,7 +415,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
if (inet_test_bit(HDRINCL, sk)) {
if (skb_checksum_complete(skb)) {
atomic_inc(&sk->sk_drops);
- kfree_skb_reason(skb, SKB_DROP_REASON_SKB_CSUM);
+ sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_SKB_CSUM);
return NET_RX_DROP;
}
}
@@ -592,7 +598,7 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
struct ipv6hdr *iph;
struct sk_buff *skb;
int err;
- struct rt6_info *rt = (struct rt6_info *)*dstp;
+ struct rt6_info *rt = dst_rt6_info(*dstp);
int hlen = LL_RESERVED_SPACE(rt->dst.dev);
int tlen = rt->dst.dev->needed_tailroom;
@@ -613,9 +619,9 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
skb_reserve(skb, hlen);
skb->protocol = htons(ETH_P_IPV6);
- skb->priority = READ_ONCE(sk->sk_priority);
+ skb->priority = sockc->priority;
skb->mark = sockc->mark;
- skb->tstamp = sockc->transmit_time;
+ skb_set_delivery_type_by_clockid(skb, sockc->transmit_time, sk->sk_clockid);
skb_put(skb, length);
skb_reset_network_header(skb);
@@ -623,7 +629,7 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
skb->ip_summed = CHECKSUM_NONE;
- skb_setup_tx_timestamp(skb, sockc->tsflags);
+ skb_setup_tx_timestamp(skb, sockc);
if (flags & MSG_CONFIRM)
skb_set_dst_pending_confirm(skb, 1);
@@ -774,6 +780,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
ipcm6_init(&ipc6);
ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags);
ipc6.sockc.mark = fl6.flowi6_mark;
+ ipc6.sockc.priority = READ_ONCE(sk->sk_priority);
if (sin6) {
if (addr_len < SIN6_LEN_RFC2133)
@@ -911,7 +918,7 @@ back_from_confirm:
ipc6.opt = opt;
lock_sock(sk);
err = ip6_append_data(sk, raw6_getfrag, &rfv,
- len, 0, &ipc6, &fl6, (struct rt6_info *)dst,
+ len, 0, &ipc6, &fl6, dst_rt6_info(dst),
msg->msg_flags);
if (err)
@@ -935,7 +942,7 @@ do_confirm:
goto done;
}
-static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
+static int rawv6_seticmpfilter(struct sock *sk, int optname,
sockptr_t optval, int optlen)
{
switch (optname) {
@@ -952,7 +959,7 @@ static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
return 0;
}
-static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
+static int rawv6_geticmpfilter(struct sock *sk, int optname,
char __user *optval, int __user *optlen)
{
int len;
@@ -1038,7 +1045,7 @@ static int rawv6_setsockopt(struct sock *sk, int level, int optname,
case SOL_ICMPV6:
if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
return -EOPNOTSUPP;
- return rawv6_seticmpfilter(sk, level, optname, optval, optlen);
+ return rawv6_seticmpfilter(sk, optname, optval, optlen);
case SOL_IPV6:
if (optname == IPV6_CHECKSUM ||
optname == IPV6_HDRINCL)
@@ -1099,7 +1106,7 @@ static int rawv6_getsockopt(struct sock *sk, int level, int optname,
case SOL_ICMPV6:
if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
return -EOPNOTSUPP;
- return rawv6_geticmpfilter(sk, level, optname, optval, optlen);
+ return rawv6_geticmpfilter(sk, optname, optval, optlen);
case SOL_IPV6:
if (optname == IPV6_CHECKSUM ||
optname == IPV6_HDRINCL)
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 5ebc47da1000..a48be617a8ab 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -198,7 +198,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
fq->iif = dev->ifindex;
fq->q.stamp = skb->tstamp;
- fq->q.mono_delivery_time = skb->mono_delivery_time;
+ fq->q.tstamp_type = skb->tstamp_type;
fq->q.meat += skb->len;
fq->ecn |= ecn;
add_frag_mem_limit(fq->q.fqdir, skb->truesize);
@@ -272,9 +272,9 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
if (!reasm_data)
goto out_oom;
- payload_len = ((skb->data - skb_network_header(skb)) -
+ payload_len = -skb_network_offset(skb) -
sizeof(struct ipv6hdr) + fq->q.len -
- sizeof(struct frag_hdr));
+ sizeof(struct frag_hdr);
if (payload_len > IPV6_MAXPLEN)
goto out_oversize;
@@ -369,7 +369,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
* the source of the fragment, with the Pointer field set to zero.
*/
nexthdr = hdr->nexthdr;
- if (ipv6frag_thdr_truncated(skb, skb_transport_offset(skb), &nexthdr)) {
+ if (ipv6frag_thdr_truncated(skb, skb_network_offset(skb) + sizeof(struct ipv6hdr), &nexthdr)) {
__IP6_INC_STATS(net, __in6_dev_get_safely(skb->dev),
IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_INCOMP, 0);
@@ -436,7 +436,6 @@ static struct ctl_table ip6_frags_ns_ctl_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
- { }
};
/* secret interval has been deprecated */
@@ -449,7 +448,6 @@ static struct ctl_table ip6_frags_ctl_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
- { }
};
static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
@@ -487,7 +485,7 @@ err_alloc:
static void __net_exit ip6_frags_ns_sysctl_unregister(struct net *net)
{
- struct ctl_table *table;
+ const struct ctl_table *table;
table = net->ipv6.sysctl.frags_hdr->ctl_table_arg;
unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index ef815ba583a8..15ce21afc8c6 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -87,7 +87,8 @@ struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int ip6_default_advmss(const struct dst_entry *dst);
INDIRECT_CALLABLE_SCOPE
unsigned int ip6_mtu(const struct dst_entry *dst);
-static struct dst_entry *ip6_negative_advice(struct dst_entry *);
+static void ip6_negative_advice(struct sock *sk,
+ struct dst_entry *dst);
static void ip6_dst_destroy(struct dst_entry *);
static void ip6_dst_ifdown(struct dst_entry *,
struct net_device *dev);
@@ -130,7 +131,6 @@ static struct fib6_info *rt6_get_route_info(struct net *net,
struct uncached_list {
spinlock_t lock;
struct list_head head;
- struct list_head quarantine;
};
static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
@@ -174,7 +174,7 @@ static void rt6_uncached_list_flush_dev(struct net_device *dev)
struct net_device *rt_dev = rt->dst.dev;
bool handled = false;
- if (rt_idev->dev == dev) {
+ if (rt_idev && rt_idev->dev == dev) {
rt->rt6i_idev = in6_dev_get(blackhole_netdev);
in6_dev_put(rt_idev);
handled = true;
@@ -188,8 +188,7 @@ static void rt6_uncached_list_flush_dev(struct net_device *dev)
handled = true;
}
if (handled)
- list_move(&rt->dst.rt_uncached,
- &ul->quarantine);
+ list_del_init(&rt->dst.rt_uncached);
}
spin_unlock_bh(&ul->lock);
}
@@ -226,7 +225,7 @@ static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
struct sk_buff *skb,
const void *daddr)
{
- const struct rt6_info *rt = container_of(dst, struct rt6_info, dst);
+ const struct rt6_info *rt = dst_rt6_info(dst);
return ip6_neigh_lookup(rt6_nexthop(rt, &in6addr_any),
dst->dev, skb, daddr);
@@ -234,8 +233,8 @@ static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
{
+ const struct rt6_info *rt = dst_rt6_info(dst);
struct net_device *dev = dst->dev;
- struct rt6_info *rt = (struct rt6_info *)dst;
daddr = choose_neigh_daddr(rt6_nexthop(rt, &in6addr_any), NULL, daddr);
if (!daddr)
@@ -354,7 +353,7 @@ EXPORT_SYMBOL(ip6_dst_alloc);
static void ip6_dst_destroy(struct dst_entry *dst)
{
- struct rt6_info *rt = (struct rt6_info *)dst;
+ struct rt6_info *rt = dst_rt6_info(dst);
struct fib6_info *from;
struct inet6_dev *idev;
@@ -367,14 +366,15 @@ static void ip6_dst_destroy(struct dst_entry *dst)
in6_dev_put(idev);
}
- from = xchg((__force struct fib6_info **)&rt->from, NULL);
+ from = unrcu_pointer(xchg(&rt->from, NULL));
fib6_info_release(from);
}
static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
{
- struct rt6_info *rt = (struct rt6_info *)dst;
+ struct rt6_info *rt = dst_rt6_info(dst);
struct inet6_dev *idev = rt->rt6i_idev;
+ struct fib6_info *from;
if (idev && idev->dev != blackhole_netdev) {
struct inet6_dev *blackhole_idev = in6_dev_get(blackhole_netdev);
@@ -384,6 +384,8 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
in6_dev_put(idev);
}
}
+ from = unrcu_pointer(xchg(&rt->from, NULL));
+ fib6_info_release(from);
}
static bool __rt6_check_expired(const struct rt6_info *rt)
@@ -414,8 +416,8 @@ void fib6_select_path(const struct net *net, struct fib6_result *res,
struct flowi6 *fl6, int oif, bool have_oif_match,
const struct sk_buff *skb, int strict)
{
- struct fib6_info *sibling, *next_sibling;
struct fib6_info *match = res->f6i;
+ struct fib6_info *sibling;
if (!match->nh && (!match->fib6_nsiblings || have_oif_match))
goto out;
@@ -441,8 +443,8 @@ void fib6_select_path(const struct net *net, struct fib6_result *res,
if (fl6->mp_hash <= atomic_read(&match->fib6_nh->fib_nh_upper_bound))
goto out;
- list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings,
- fib6_siblings) {
+ list_for_each_entry_rcu(sibling, &match->fib6_siblings,
+ fib6_siblings) {
const struct fib6_nh *nh = sibling->fib6_nh;
int nh_upper_bound;
@@ -637,6 +639,8 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
rcu_read_lock();
last_probe = READ_ONCE(fib6_nh->last_probe);
idev = __in6_dev_get(dev);
+ if (!idev)
+ goto out;
neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
if (neigh) {
if (READ_ONCE(neigh->nud_state) & NUD_VALID)
@@ -645,14 +649,15 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
write_lock_bh(&neigh->lock);
if (!(neigh->nud_state & NUD_VALID) &&
time_after(jiffies,
- neigh->updated + idev->cnf.rtr_probe_interval)) {
+ neigh->updated +
+ READ_ONCE(idev->cnf.rtr_probe_interval))) {
work = kmalloc(sizeof(*work), GFP_ATOMIC);
if (work)
__neigh_set_probe_once(neigh);
}
write_unlock_bh(&neigh->lock);
} else if (time_after(jiffies, last_probe +
- idev->cnf.rtr_probe_interval)) {
+ READ_ONCE(idev->cnf.rtr_probe_interval))) {
work = kmalloc(sizeof(*work), GFP_ATOMIC);
}
@@ -931,6 +936,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
struct net *net = dev_net(dev);
struct route_info *rinfo = (struct route_info *) opt;
struct in6_addr prefix_buf, *prefix;
+ struct fib6_table *table;
unsigned int pref;
unsigned long lifetime;
struct fib6_info *rt;
@@ -989,10 +995,18 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
(rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
if (rt) {
- if (!addrconf_finite_timeout(lifetime))
+ table = rt->fib6_table;
+ spin_lock_bh(&table->tb6_lock);
+
+ if (!addrconf_finite_timeout(lifetime)) {
fib6_clean_expires(rt);
- else
+ fib6_remove_gc_list(rt);
+ } else {
fib6_set_expires(rt, jiffies + HZ * lifetime);
+ fib6_add_gc_list(rt);
+ }
+
+ spin_unlock_bh(&table->tb6_lock);
fib6_info_release(rt);
}
@@ -1278,7 +1292,7 @@ struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup);
if (dst->error == 0)
- return (struct rt6_info *) dst;
+ return dst_rt6_info(dst);
dst_release(dst);
@@ -1398,6 +1412,7 @@ static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
struct rt6_info *prev, **p;
p = this_cpu_ptr(res->nh->rt6i_pcpu);
+ /* Paired with READ_ONCE() in __fib6_drop_pcpu_from() */
prev = xchg(p, NULL);
if (prev) {
dst_dev_put(&prev->dst);
@@ -1426,7 +1441,7 @@ static struct rt6_info *rt6_make_pcpu_route(struct net *net,
if (res->f6i->fib6_destroying) {
struct fib6_info *from;
- from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
+ from = unrcu_pointer(xchg(&pcpu_rt->from, NULL));
fib6_info_release(from);
}
@@ -1443,7 +1458,6 @@ static DEFINE_SPINLOCK(rt6_exception_lock);
static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
struct rt6_exception *rt6_ex)
{
- struct fib6_info *from;
struct net *net;
if (!bucket || !rt6_ex)
@@ -1455,8 +1469,6 @@ static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
/* purge completely the exception to allow releasing the held resources:
* some [sk] cache may keep the dst around for unlimited time
*/
- from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL);
- fib6_info_release(from);
dst_dev_put(&rt6_ex->rt6i->dst);
hlist_del_rcu(&rt6_ex->hlist);
@@ -1587,7 +1599,7 @@ static unsigned int fib6_mtu(const struct fib6_result *res)
rcu_read_lock();
idev = __in6_dev_get(dev);
- mtu = idev->cnf.mtu6;
+ mtu = READ_ONCE(idev->cnf.mtu6);
rcu_read_unlock();
}
@@ -2085,12 +2097,12 @@ static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
*/
if (!(rt->rt6i_flags & RTF_EXPIRES)) {
if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
- RT6_TRACE("aging clone %p\n", rt);
+ pr_debug("aging clone %p\n", rt);
rt6_remove_exception(bucket, rt6_ex);
return;
}
} else if (time_after(jiffies, rt->dst.expires)) {
- RT6_TRACE("purging expired route %p\n", rt);
+ pr_debug("purging expired route %p\n", rt);
rt6_remove_exception(bucket, rt6_ex);
return;
}
@@ -2101,8 +2113,8 @@ static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
if (!(neigh && (neigh->flags & NTF_ROUTER))) {
- RT6_TRACE("purging route %p via non-router but gateway\n",
- rt);
+ pr_debug("purging route %p via non-router but gateway\n",
+ rt);
rt6_remove_exception(bucket, rt6_ex);
return;
}
@@ -2211,7 +2223,7 @@ struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
strict |= flags & RT6_LOOKUP_F_IFACE;
strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
- if (net->ipv6.devconf_all->forwarding == 0)
+ if (READ_ONCE(net->ipv6.devconf_all->forwarding) == 0)
strict |= RT6_LOOKUP_F_REACHABLE;
rcu_read_lock();
@@ -2362,7 +2374,7 @@ static u32 rt6_multipath_custom_hash_outer(const struct net *net,
hash_keys.ports.dst = keys.ports.dst;
*p_has_inner = !!(keys.control.flags & FLOW_DIS_ENCAPSULATION);
- return flow_hash_from_keys(&hash_keys);
+ return fib_multipath_hash_from_keys(net, &hash_keys);
}
static u32 rt6_multipath_custom_hash_inner(const struct net *net,
@@ -2411,7 +2423,7 @@ static u32 rt6_multipath_custom_hash_inner(const struct net *net,
if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
hash_keys.ports.dst = keys.ports.dst;
- return flow_hash_from_keys(&hash_keys);
+ return fib_multipath_hash_from_keys(net, &hash_keys);
}
static u32 rt6_multipath_custom_hash_skb(const struct net *net,
@@ -2450,7 +2462,7 @@ static u32 rt6_multipath_custom_hash_fl6(const struct net *net,
if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
hash_keys.ports.dst = fl6->fl6_dport;
- return flow_hash_from_keys(&hash_keys);
+ return fib_multipath_hash_from_keys(net, &hash_keys);
}
/* if skb is set it will be used and fl6 can be NULL */
@@ -2472,7 +2484,7 @@ u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
hash_keys.basic.ip_proto = fl6->flowi6_proto;
}
- mhash = flow_hash_from_keys(&hash_keys);
+ mhash = fib_multipath_hash_from_keys(net, &hash_keys);
break;
case 1:
if (skb) {
@@ -2504,7 +2516,7 @@ u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
hash_keys.ports.dst = fl6->fl6_dport;
hash_keys.basic.ip_proto = fl6->flowi6_proto;
}
- mhash = flow_hash_from_keys(&hash_keys);
+ mhash = fib_multipath_hash_from_keys(net, &hash_keys);
break;
case 2:
memset(&hash_keys, 0, sizeof(hash_keys));
@@ -2541,7 +2553,7 @@ u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
hash_keys.basic.ip_proto = fl6->flowi6_proto;
}
- mhash = flow_hash_from_keys(&hash_keys);
+ mhash = fib_multipath_hash_from_keys(net, &hash_keys);
break;
case 3:
if (skb)
@@ -2637,7 +2649,7 @@ struct dst_entry *ip6_route_output_flags(struct net *net,
rcu_read_lock();
dst = ip6_route_output_flags_noref(net, sk, fl6, flags);
- rt6 = (struct rt6_info *)dst;
+ rt6 = dst_rt6_info(dst);
/* For dst cached in uncached_list, refcnt is already taken. */
if (list_empty(&rt6->dst.rt_uncached) && !dst_hold_safe(dst)) {
dst = &net->ipv6.ip6_null_entry->dst;
@@ -2651,7 +2663,7 @@ EXPORT_SYMBOL_GPL(ip6_route_output_flags);
struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
{
- struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
+ struct rt6_info *rt, *ort = dst_rt6_info(dst_orig);
struct net_device *loopback_dev = net->loopback_dev;
struct dst_entry *new = NULL;
@@ -2734,7 +2746,7 @@ INDIRECT_CALLABLE_SCOPE struct dst_entry *ip6_dst_check(struct dst_entry *dst,
struct fib6_info *from;
struct rt6_info *rt;
- rt = container_of(dst, struct rt6_info, dst);
+ rt = dst_rt6_info(dst);
if (rt->sernum)
return rt6_is_valid(rt) ? dst : NULL;
@@ -2760,24 +2772,24 @@ INDIRECT_CALLABLE_SCOPE struct dst_entry *ip6_dst_check(struct dst_entry *dst,
}
EXPORT_INDIRECT_CALLABLE(ip6_dst_check);
-static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
+static void ip6_negative_advice(struct sock *sk,
+ struct dst_entry *dst)
{
- struct rt6_info *rt = (struct rt6_info *) dst;
+ struct rt6_info *rt = dst_rt6_info(dst);
- if (rt) {
- if (rt->rt6i_flags & RTF_CACHE) {
- rcu_read_lock();
- if (rt6_check_expired(rt)) {
- rt6_remove_exception_rt(rt);
- dst = NULL;
- }
- rcu_read_unlock();
- } else {
- dst_release(dst);
- dst = NULL;
+ if (rt->rt6i_flags & RTF_CACHE) {
+ rcu_read_lock();
+ if (rt6_check_expired(rt)) {
+ /* rt/dst can not be destroyed yet,
+ * because of rcu_read_lock()
+ */
+ sk_dst_reset(sk);
+ rt6_remove_exception_rt(rt);
}
+ rcu_read_unlock();
+ return;
}
- return dst;
+ sk_dst_reset(sk);
}
static void ip6_link_failure(struct sk_buff *skb)
@@ -2786,7 +2798,7 @@ static void ip6_link_failure(struct sk_buff *skb)
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
- rt = (struct rt6_info *) skb_dst(skb);
+ rt = dst_rt6_info(skb_dst(skb));
if (rt) {
rcu_read_lock();
if (rt->rt6i_flags & RTF_CACHE) {
@@ -2842,7 +2854,7 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
bool confirm_neigh)
{
const struct in6_addr *daddr, *saddr;
- struct rt6_info *rt6 = (struct rt6_info *)dst;
+ struct rt6_info *rt6 = dst_rt6_info(dst);
/* Note: do *NOT* check dst_metric_locked(dst, RTAX_MTU)
* IPv6 pmtu discovery isn't optional, so 'mtu lock' cannot disable it.
@@ -3184,13 +3196,18 @@ static unsigned int ip6_default_advmss(const struct dst_entry *dst)
{
struct net_device *dev = dst->dev;
unsigned int mtu = dst_mtu(dst);
- struct net *net = dev_net(dev);
+ struct net *net;
mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
+ rcu_read_lock();
+
+ net = dev_net_rcu(dev);
if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
+ rcu_read_unlock();
+
/*
* Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
* corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
@@ -3240,8 +3257,8 @@ u32 ip6_mtu_from_fib6(const struct fib6_result *res,
mtu = IPV6_MIN_MTU;
idev = __in6_dev_get(dev);
- if (idev && idev->cnf.mtu6 > mtu)
- mtu = idev->cnf.mtu6;
+ if (idev)
+ mtu = max_t(u32, mtu, READ_ONCE(idev->cnf.mtu6));
}
mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
@@ -3591,7 +3608,7 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
if (!dev)
goto out;
- if (idev->cnf.disable_ipv6) {
+ if (!idev || idev->cnf.disable_ipv6) {
NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
err = -EACCES;
goto out;
@@ -3627,7 +3644,8 @@ out:
in6_dev_put(idev);
if (err) {
- lwtstate_put(fib6_nh->fib_nh_lws);
+ fib_nh_common_release(&fib6_nh->nh_common);
+ fib6_nh->nh_common.nhc_pcpu_rth_output = NULL;
fib6_nh->fib_nh_lws = NULL;
netdev_put(dev, dev_tracker);
}
@@ -3750,7 +3768,7 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
if (!rt)
goto out;
- rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len,
+ rt->fib6_metrics = ip_fib_metrics_init(cfg->fc_mx, cfg->fc_mx_len,
extack);
if (IS_ERR(rt->fib6_metrics)) {
err = PTR_ERR(rt->fib6_metrics);
@@ -3765,8 +3783,6 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
if (cfg->fc_flags & RTF_EXPIRES)
fib6_set_expires(rt, jiffies +
clock_t_to_jiffies(cfg->fc_expires));
- else
- fib6_clean_expires(rt);
if (cfg->fc_protocol == RTPROT_UNSPEC)
cfg->fc_protocol = RTPROT_BOOT;
@@ -3787,10 +3803,12 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
if (nh) {
if (rt->fib6_src.plen) {
NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
+ err = -EINVAL;
goto out_free;
}
if (!nexthop_get(nh)) {
NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
+ err = -ENOENT;
goto out_free;
}
rt->nh = nh;
@@ -4142,7 +4160,8 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
in6_dev = __in6_dev_get(skb->dev);
if (!in6_dev)
return;
- if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
+ if (READ_ONCE(in6_dev->cnf.forwarding) ||
+ !READ_ONCE(in6_dev->cnf.accept_redirects))
return;
/* RFC2461 8.1:
@@ -4165,7 +4184,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
}
}
- rt = (struct rt6_info *) dst;
+ rt = dst_rt6_info(dst);
if (rt->rt6i_flags & RTF_REJECT) {
net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
return;
@@ -4355,7 +4374,8 @@ struct fib6_info *rt6_add_dflt_router(struct net *net,
const struct in6_addr *gwaddr,
struct net_device *dev,
unsigned int pref,
- u32 defrtr_usr_metric)
+ u32 defrtr_usr_metric,
+ int lifetime)
{
struct fib6_config cfg = {
.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
@@ -4368,6 +4388,7 @@ struct fib6_info *rt6_add_dflt_router(struct net *net,
.fc_nlinfo.portid = 0,
.fc_nlinfo.nlh = NULL,
.fc_nlinfo.nl_net = net,
+ .fc_expires = jiffies_to_clock_t(lifetime * HZ),
};
cfg.fc_gateway = *gwaddr;
@@ -4434,7 +4455,7 @@ static void rtmsg_to_fib6_config(struct net *net,
.fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
: RT6_TABLE_MAIN,
.fc_ifindex = rtmsg->rtmsg_ifindex,
- .fc_metric = rtmsg->rtmsg_metric ? : IP6_RT_PRIO_USER,
+ .fc_metric = rtmsg->rtmsg_metric,
.fc_expires = rtmsg->rtmsg_info,
.fc_dst_len = rtmsg->rtmsg_dst_len,
.fc_src_len = rtmsg->rtmsg_src_len,
@@ -4464,6 +4485,9 @@ int ipv6_route_ioctl(struct net *net, unsigned int cmd, struct in6_rtmsg *rtmsg)
rtnl_lock();
switch (cmd) {
case SIOCADDRT:
+ /* Only do the default setting of fc_metric in route adding */
+ if (cfg.fc_metric == 0)
+ cfg.fc_metric = IP6_RT_PRIO_USER;
err = ip6_route_add(&cfg, GFP_KERNEL, NULL);
break;
case SIOCDELRT:
@@ -4574,8 +4598,8 @@ struct fib6_info *addrconf_f6i_alloc(struct net *net,
f6i->dst_nocount = true;
if (!anycast &&
- (net->ipv6.devconf_all->disable_policy ||
- idev->cnf.disable_policy))
+ (READ_ONCE(net->ipv6.devconf_all->disable_policy) ||
+ READ_ONCE(idev->cnf.disable_policy)))
f6i->dst_nopolicy = true;
}
@@ -4989,6 +5013,7 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
[RTA_SPORT] = { .type = NLA_U16 },
[RTA_DPORT] = { .type = NLA_U16 },
[RTA_NH_ID] = { .type = NLA_U32 },
+ [RTA_FLOWLABEL] = { .type = NLA_BE32 },
};
static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -5014,6 +5039,12 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
goto errout;
}
+ if (tb[RTA_FLOWLABEL]) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[RTA_FLOWLABEL],
+ "Flow label cannot be specified for this operation");
+ goto errout;
+ }
+
*cfg = (struct fib6_config){
.fc_table = rtm->rtm_table,
.fc_dst_len = rtm->rtm_dst_len,
@@ -5179,14 +5210,18 @@ static void ip6_route_mpath_notify(struct fib6_info *rt,
* nexthop. Since sibling routes are always added at the end of
* the list, find the first sibling of the last route appended
*/
+ rcu_read_lock();
+
if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->fib6_nsiblings) {
- rt = list_first_entry(&rt_last->fib6_siblings,
- struct fib6_info,
- fib6_siblings);
+ rt = list_first_or_null_rcu(&rt_last->fib6_siblings,
+ struct fib6_info,
+ fib6_siblings);
}
if (rt)
inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
+
+ rcu_read_unlock();
}
static bool ip6_route_mpath_should_notify(const struct fib6_info *rt)
@@ -5531,17 +5566,21 @@ static size_t rt6_nlmsg_size(struct fib6_info *f6i)
nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size,
&nexthop_len);
} else {
- struct fib6_info *sibling, *next_sibling;
struct fib6_nh *nh = f6i->fib6_nh;
+ struct fib6_info *sibling;
nexthop_len = 0;
if (f6i->fib6_nsiblings) {
rt6_nh_nlmsg_size(nh, &nexthop_len);
- list_for_each_entry_safe(sibling, next_sibling,
- &f6i->fib6_siblings, fib6_siblings) {
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(sibling, &f6i->fib6_siblings,
+ fib6_siblings) {
rt6_nh_nlmsg_size(sibling->fib6_nh, &nexthop_len);
}
+
+ rcu_read_unlock();
}
nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
}
@@ -5597,7 +5636,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
int iif, int type, u32 portid, u32 seq,
unsigned int flags)
{
- struct rt6_info *rt6 = (struct rt6_info *)dst;
+ struct rt6_info *rt6 = dst_rt6_info(dst);
struct rt6key *rt6_dst, *rt6_src;
u32 *pmetrics, table, rt6_flags;
unsigned char nh_flags = 0;
@@ -5671,7 +5710,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
goto nla_put_failure;
} else if (dest) {
struct in6_addr saddr_buf;
- if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 &&
+ if (ip6_route_get_saddr(net, rt, dest, 0, 0, &saddr_buf) == 0 &&
nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
goto nla_put_failure;
}
@@ -5705,7 +5744,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
lwtunnel_fill_encap(skb, dst->lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
goto nla_put_failure;
} else if (rt->fib6_nsiblings) {
- struct fib6_info *sibling, *next_sibling;
+ struct fib6_info *sibling;
struct nlattr *mp;
mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
@@ -5717,14 +5756,21 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
0) < 0)
goto nla_put_failure;
- list_for_each_entry_safe(sibling, next_sibling,
- &rt->fib6_siblings, fib6_siblings) {
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(sibling, &rt->fib6_siblings,
+ fib6_siblings) {
if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
sibling->fib6_nh->fib_nh_weight,
- AF_INET6, 0) < 0)
+ AF_INET6, 0) < 0) {
+ rcu_read_unlock();
+
goto nla_put_failure;
+ }
}
+ rcu_read_unlock();
+
nla_nest_end(skb, mp);
} else if (rt->nh) {
if (nla_put_u32(skb, RTA_NH_ID, rt->nh->id))
@@ -5982,6 +6028,13 @@ static int inet6_rtm_valid_getroute_req(struct sk_buff *skb,
return -EINVAL;
}
+ if (tb[RTA_FLOWLABEL] &&
+ (nla_get_be32(tb[RTA_FLOWLABEL]) & ~IPV6_FLOWLABEL_MASK)) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[RTA_FLOWLABEL],
+ "Invalid flow label");
+ return -EINVAL;
+ }
+
for (i = 0; i <= RTA_MAX; i++) {
if (!tb[i])
continue;
@@ -5996,6 +6049,7 @@ static int inet6_rtm_valid_getroute_req(struct sk_buff *skb,
case RTA_SPORT:
case RTA_DPORT:
case RTA_IP_PROTO:
+ case RTA_FLOWLABEL:
break;
default:
NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get route request");
@@ -6018,6 +6072,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct sk_buff *skb;
struct rtmsg *rtm;
struct flowi6 fl6 = {};
+ __be32 flowlabel;
bool fibmatch;
err = inet6_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
@@ -6026,7 +6081,6 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
err = -EINVAL;
rtm = nlmsg_data(nlh);
- fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
if (tb[RTA_SRC]) {
@@ -6072,6 +6126,9 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
goto errout;
}
+ flowlabel = nla_get_be32_default(tb[RTA_FLOWLABEL], 0);
+ fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, flowlabel);
+
if (iif) {
struct net_device *dev;
int flags = 0;
@@ -6100,7 +6157,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
}
- rt = container_of(dst, struct rt6_info, dst);
+ rt = dst_rt6_info(dst);
if (rt->dst.error) {
err = rt->dst.error;
ip6_rt_put(rt);
@@ -6161,7 +6218,7 @@ void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
err = -ENOBUFS;
seq = info->nlh ? info->nlh->nlmsg_seq : 0;
- skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
+ skb = nlmsg_new(rt6_nlmsg_size(rt), GFP_ATOMIC);
if (!skb)
goto errout;
@@ -6174,11 +6231,10 @@ void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
goto errout;
}
rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
- info->nlh, gfp_any());
+ info->nlh, GFP_ATOMIC);
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
+ rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
}
void fib6_rt_update(struct net *net, struct fib6_info *rt,
@@ -6204,8 +6260,7 @@ void fib6_rt_update(struct net *net, struct fib6_info *rt,
info->nlh, gfp_any());
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
+ rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
}
void fib6_info_hw_flags_set(struct net *net, struct fib6_info *f6i,
@@ -6318,7 +6373,7 @@ static int rt6_stats_seq_show(struct seq_file *seq, void *v)
#ifdef CONFIG_SYSCTL
-static int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
+static int ipv6_sysctl_rtcache_flush(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net;
@@ -6327,12 +6382,12 @@ static int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
if (!write)
return -EINVAL;
- net = (struct net *)ctl->extra1;
- delay = net->ipv6.sysctl.flush_delay;
ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
if (ret)
return ret;
+ net = (struct net *)ctl->extra1;
+ delay = net->ipv6.sysctl.flush_delay;
fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
return 0;
}
@@ -6417,7 +6472,6 @@ static struct ctl_table ipv6_route_table_template[] = {
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
- { }
};
struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
@@ -6441,10 +6495,6 @@ struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
table[10].data = &net->ipv6.sysctl.skip_notify_on_dev_down;
-
- /* Don't export sysctls to unprivileged users */
- if (net->user_ns != &init_user_ns)
- table[1].procname = NULL;
}
return table;
@@ -6671,6 +6721,15 @@ static void bpf_iter_unregister(void)
#endif
#endif
+static const struct rtnl_msg_handler ip6_route_rtnl_msg_handlers[] __initconst_or_module = {
+ {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_NEWROUTE,
+ .doit = inet6_rtm_newroute},
+ {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_DELROUTE,
+ .doit = inet6_rtm_delroute},
+ {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETROUTE,
+ .doit = inet6_rtm_getroute, .flags = RTNL_FLAG_DOIT_UNLOCKED},
+};
+
int __init ip6_route_init(void)
{
int ret;
@@ -6713,19 +6772,7 @@ int __init ip6_route_init(void)
if (ret)
goto fib6_rules_init;
- ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWROUTE,
- inet6_rtm_newroute, NULL, 0);
- if (ret < 0)
- goto out_register_late_subsys;
-
- ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELROUTE,
- inet6_rtm_delroute, NULL, 0);
- if (ret < 0)
- goto out_register_late_subsys;
-
- ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE,
- inet6_rtm_getroute, NULL,
- RTNL_FLAG_DOIT_UNLOCKED);
+ ret = rtnl_register_many(ip6_route_rtnl_msg_handlers);
if (ret < 0)
goto out_register_late_subsys;
@@ -6745,7 +6792,6 @@ int __init ip6_route_init(void)
struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
INIT_LIST_HEAD(&ul->head);
- INIT_LIST_HEAD(&ul->quarantine);
spin_lock_init(&ul->lock);
}
diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c
index a013b92cbb86..7c05ac846646 100644
--- a/net/ipv6/rpl_iptunnel.c
+++ b/net/ipv6/rpl_iptunnel.c
@@ -125,7 +125,8 @@ static void rpl_destroy_state(struct lwtunnel_state *lwt)
}
static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt,
- const struct ipv6_rpl_sr_hdr *srh)
+ const struct ipv6_rpl_sr_hdr *srh,
+ struct dst_entry *cache_dst)
{
struct ipv6_rpl_sr_hdr *isrh, *csrh;
const struct ipv6hdr *oldhdr;
@@ -153,7 +154,7 @@ static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt,
hdrlen = ((csrh->hdrlen + 1) << 3);
- err = skb_cow_head(skb, hdrlen + skb->mac_len);
+ err = skb_cow_head(skb, hdrlen + dst_dev_overhead(cache_dst, skb));
if (unlikely(err)) {
kfree(buf);
return err;
@@ -186,7 +187,8 @@ static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt,
return 0;
}
-static int rpl_do_srh(struct sk_buff *skb, const struct rpl_lwt *rlwt)
+static int rpl_do_srh(struct sk_buff *skb, const struct rpl_lwt *rlwt,
+ struct dst_entry *cache_dst)
{
struct dst_entry *dst = skb_dst(skb);
struct rpl_iptunnel_encap *tinfo;
@@ -196,7 +198,7 @@ static int rpl_do_srh(struct sk_buff *skb, const struct rpl_lwt *rlwt)
tinfo = rpl_encap_lwtunnel(dst->lwtstate);
- return rpl_do_srh_inline(skb, rlwt, tinfo->srh);
+ return rpl_do_srh_inline(skb, rlwt, tinfo->srh, cache_dst);
}
static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
@@ -208,14 +210,14 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
rlwt = rpl_lwt_lwtunnel(orig_dst->lwtstate);
- err = rpl_do_srh(skb, rlwt);
+ local_bh_disable();
+ dst = dst_cache_get(&rlwt->cache);
+ local_bh_enable();
+
+ err = rpl_do_srh(skb, rlwt, dst);
if (unlikely(err))
goto drop;
- preempt_disable();
- dst = dst_cache_get(&rlwt->cache);
- preempt_enable();
-
if (unlikely(!dst)) {
struct ipv6hdr *hdr = ipv6_hdr(skb);
struct flowi6 fl6;
@@ -230,25 +232,28 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
dst = ip6_route_output(net, NULL, &fl6);
if (dst->error) {
err = dst->error;
- dst_release(dst);
goto drop;
}
- preempt_disable();
- dst_cache_set_ip6(&rlwt->cache, dst, &fl6.saddr);
- preempt_enable();
+ /* cache only if we don't create a dst reference loop */
+ if (orig_dst->lwtstate != dst->lwtstate) {
+ local_bh_disable();
+ dst_cache_set_ip6(&rlwt->cache, dst, &fl6.saddr);
+ local_bh_enable();
+ }
+
+ err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+ if (unlikely(err))
+ goto drop;
}
skb_dst_drop(skb);
skb_dst_set(skb, dst);
- err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
- if (unlikely(err))
- goto drop;
-
return dst_output(net, sk, skb);
drop:
+ dst_release(dst);
kfree_skb(skb);
return err;
}
@@ -257,40 +262,54 @@ static int rpl_input(struct sk_buff *skb)
{
struct dst_entry *orig_dst = skb_dst(skb);
struct dst_entry *dst = NULL;
+ struct lwtunnel_state *lwtst;
struct rpl_lwt *rlwt;
int err;
- rlwt = rpl_lwt_lwtunnel(orig_dst->lwtstate);
+ /* We cannot dereference "orig_dst" once ip6_route_input() or
+ * skb_dst_drop() is called. However, in order to detect a dst loop, we
+ * need the address of its lwtstate. So, save the address of lwtstate
+ * now and use it later as a comparison.
+ */
+ lwtst = orig_dst->lwtstate;
- err = rpl_do_srh(skb, rlwt);
- if (unlikely(err)) {
- kfree_skb(skb);
- return err;
- }
+ rlwt = rpl_lwt_lwtunnel(lwtst);
- preempt_disable();
+ local_bh_disable();
dst = dst_cache_get(&rlwt->cache);
- preempt_enable();
+ local_bh_enable();
+
+ err = rpl_do_srh(skb, rlwt, dst);
+ if (unlikely(err)) {
+ dst_release(dst);
+ goto drop;
+ }
if (!dst) {
ip6_route_input(skb);
dst = skb_dst(skb);
- if (!dst->error) {
- preempt_disable();
+
+ /* cache only if we don't create a dst reference loop */
+ if (!dst->error && lwtst != dst->lwtstate) {
+ local_bh_disable();
dst_cache_set_ip6(&rlwt->cache, dst,
&ipv6_hdr(skb)->saddr);
- preempt_enable();
+ local_bh_enable();
}
+
+ err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+ if (unlikely(err))
+ goto drop;
} else {
skb_dst_drop(skb);
skb_dst_set(skb, dst);
}
- err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
- if (unlikely(err))
- return err;
-
return dst_input(skb);
+
+drop:
+ kfree_skb(skb);
+ return err;
}
static int nla_put_rpl_srh(struct sk_buff *skb, int attrtype,
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
index 35508abd76f4..180da19c148c 100644
--- a/net/ipv6/seg6.c
+++ b/net/ipv6/seg6.c
@@ -21,9 +21,7 @@
#include <net/genetlink.h>
#include <linux/seg6.h>
#include <linux/seg6_genl.h>
-#ifdef CONFIG_IPV6_SEG6_HMAC
#include <net/seg6_hmac.h>
-#endif
bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len, bool reduced)
{
@@ -437,13 +435,11 @@ static int __net_init seg6_net_init(struct net *net)
net->ipv6.seg6_data = sdata;
-#ifdef CONFIG_IPV6_SEG6_HMAC
if (seg6_hmac_net_init(net)) {
kfree(rcu_dereference_raw(sdata->tun_src));
kfree(sdata);
return -ENOMEM;
}
-#endif
return 0;
}
@@ -452,9 +448,7 @@ static void __net_exit seg6_net_exit(struct net *net)
{
struct seg6_pernet_data *sdata = seg6_pernet(net);
-#ifdef CONFIG_IPV6_SEG6_HMAC
seg6_hmac_net_exit(net);
-#endif
kfree(rcu_dereference_raw(sdata->tun_src));
kfree(sdata);
@@ -520,39 +514,28 @@ int __init seg6_init(void)
if (err)
goto out_unregister_pernet;
-#ifdef CONFIG_IPV6_SEG6_LWTUNNEL
err = seg6_iptunnel_init();
if (err)
goto out_unregister_genl;
err = seg6_local_init();
- if (err) {
- seg6_iptunnel_exit();
- goto out_unregister_genl;
- }
-#endif
+ if (err)
+ goto out_unregister_iptun;
-#ifdef CONFIG_IPV6_SEG6_HMAC
err = seg6_hmac_init();
if (err)
- goto out_unregister_iptun;
-#endif
+ goto out_unregister_seg6;
pr_info("Segment Routing with IPv6\n");
out:
return err;
-#ifdef CONFIG_IPV6_SEG6_HMAC
-out_unregister_iptun:
-#ifdef CONFIG_IPV6_SEG6_LWTUNNEL
+out_unregister_seg6:
seg6_local_exit();
+out_unregister_iptun:
seg6_iptunnel_exit();
-#endif
-#endif
-#ifdef CONFIG_IPV6_SEG6_LWTUNNEL
out_unregister_genl:
genl_unregister_family(&seg6_genl_family);
-#endif
out_unregister_pernet:
unregister_pernet_subsys(&ip6_segments_ops);
goto out;
@@ -560,12 +543,9 @@ out_unregister_pernet:
void seg6_exit(void)
{
-#ifdef CONFIG_IPV6_SEG6_HMAC
seg6_hmac_exit();
-#endif
-#ifdef CONFIG_IPV6_SEG6_LWTUNNEL
+ seg6_local_exit();
seg6_iptunnel_exit();
-#endif
- unregister_pernet_subsys(&ip6_segments_ops);
genl_unregister_family(&seg6_genl_family);
+ unregister_pernet_subsys(&ip6_segments_ops);
}
diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
index d43c50a7310d..bbf5b84a70fc 100644
--- a/net/ipv6/seg6_hmac.c
+++ b/net/ipv6/seg6_hmac.c
@@ -241,6 +241,7 @@ bool seg6_hmac_validate_skb(struct sk_buff *skb)
struct sr6_tlv_hmac *tlv;
struct ipv6_sr_hdr *srh;
struct inet6_dev *idev;
+ int require_hmac;
idev = __in6_dev_get(skb->dev);
@@ -248,16 +249,17 @@ bool seg6_hmac_validate_skb(struct sk_buff *skb)
tlv = seg6_get_tlv_hmac(srh);
+ require_hmac = READ_ONCE(idev->cnf.seg6_require_hmac);
/* mandatory check but no tlv */
- if (idev->cnf.seg6_require_hmac > 0 && !tlv)
+ if (require_hmac > 0 && !tlv)
return false;
/* no check */
- if (idev->cnf.seg6_require_hmac < 0)
+ if (require_hmac < 0)
return true;
/* check only if present */
- if (idev->cnf.seg6_require_hmac == 0 && !tlv)
+ if (require_hmac == 0 && !tlv)
return true;
/* now, seg6_require_hmac >= 0 && tlv */
@@ -354,6 +356,7 @@ static int seg6_hmac_init_algo(void)
struct crypto_shash *tfm;
struct shash_desc *shash;
int i, alg_count, cpu;
+ int ret = -ENOMEM;
alg_count = ARRAY_SIZE(hmac_algos);
@@ -364,12 +367,14 @@ static int seg6_hmac_init_algo(void)
algo = &hmac_algos[i];
algo->tfms = alloc_percpu(struct crypto_shash *);
if (!algo->tfms)
- return -ENOMEM;
+ goto error_out;
for_each_possible_cpu(cpu) {
tfm = crypto_alloc_shash(algo->name, 0, 0);
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
+ if (IS_ERR(tfm)) {
+ ret = PTR_ERR(tfm);
+ goto error_out;
+ }
p_tfm = per_cpu_ptr(algo->tfms, cpu);
*p_tfm = tfm;
}
@@ -381,18 +386,22 @@ static int seg6_hmac_init_algo(void)
algo->shashs = alloc_percpu(struct shash_desc *);
if (!algo->shashs)
- return -ENOMEM;
+ goto error_out;
for_each_possible_cpu(cpu) {
shash = kzalloc_node(shsize, GFP_KERNEL,
cpu_to_node(cpu));
if (!shash)
- return -ENOMEM;
+ goto error_out;
*per_cpu_ptr(algo->shashs, cpu) = shash;
}
}
return 0;
+
+error_out:
+ seg6_hmac_exit();
+ return ret;
}
int __init seg6_hmac_init(void)
@@ -410,22 +419,29 @@ int __net_init seg6_hmac_net_init(struct net *net)
void seg6_hmac_exit(void)
{
struct seg6_hmac_algo *algo = NULL;
+ struct crypto_shash *tfm;
+ struct shash_desc *shash;
int i, alg_count, cpu;
alg_count = ARRAY_SIZE(hmac_algos);
for (i = 0; i < alg_count; i++) {
algo = &hmac_algos[i];
- for_each_possible_cpu(cpu) {
- struct crypto_shash *tfm;
- struct shash_desc *shash;
- shash = *per_cpu_ptr(algo->shashs, cpu);
- kfree(shash);
- tfm = *per_cpu_ptr(algo->tfms, cpu);
- crypto_free_shash(tfm);
+ if (algo->shashs) {
+ for_each_possible_cpu(cpu) {
+ shash = *per_cpu_ptr(algo->shashs, cpu);
+ kfree(shash);
+ }
+ free_percpu(algo->shashs);
+ }
+
+ if (algo->tfms) {
+ for_each_possible_cpu(cpu) {
+ tfm = *per_cpu_ptr(algo->tfms, cpu);
+ crypto_free_shash(tfm);
+ }
+ free_percpu(algo->tfms);
}
- free_percpu(algo->tfms);
- free_percpu(algo->shashs);
}
}
EXPORT_SYMBOL(seg6_hmac_exit);
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index 03b877ff4558..51583461ae29 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -124,8 +124,8 @@ static __be32 seg6_make_flowlabel(struct net *net, struct sk_buff *skb,
return flowlabel;
}
-/* encapsulate an IPv6 packet within an outer IPv6 header with a given SRH */
-int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
+static int __seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh,
+ int proto, struct dst_entry *cache_dst)
{
struct dst_entry *dst = skb_dst(skb);
struct net *net = dev_net(dst->dev);
@@ -137,7 +137,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
hdrlen = (osrh->hdrlen + 1) << 3;
tot_len = hdrlen + sizeof(*hdr);
- err = skb_cow_head(skb, tot_len + skb->mac_len);
+ err = skb_cow_head(skb, tot_len + dst_dev_overhead(cache_dst, skb));
if (unlikely(err))
return err;
@@ -197,11 +197,18 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
return 0;
}
+
+/* encapsulate an IPv6 packet within an outer IPv6 header with a given SRH */
+int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
+{
+ return __seg6_do_srh_encap(skb, osrh, proto, NULL);
+}
EXPORT_SYMBOL_GPL(seg6_do_srh_encap);
/* encapsulate an IPv6 packet within an outer IPv6 header with reduced SRH */
static int seg6_do_srh_encap_red(struct sk_buff *skb,
- struct ipv6_sr_hdr *osrh, int proto)
+ struct ipv6_sr_hdr *osrh, int proto,
+ struct dst_entry *cache_dst)
{
__u8 first_seg = osrh->first_segment;
struct dst_entry *dst = skb_dst(skb);
@@ -230,7 +237,7 @@ static int seg6_do_srh_encap_red(struct sk_buff *skb,
tot_len = red_hdrlen + sizeof(struct ipv6hdr);
- err = skb_cow_head(skb, tot_len + skb->mac_len);
+ err = skb_cow_head(skb, tot_len + dst_dev_overhead(cache_dst, skb));
if (unlikely(err))
return err;
@@ -317,8 +324,8 @@ out:
return 0;
}
-/* insert an SRH within an IPv6 packet, just after the IPv6 header */
-int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
+static int __seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh,
+ struct dst_entry *cache_dst)
{
struct ipv6hdr *hdr, *oldhdr;
struct ipv6_sr_hdr *isrh;
@@ -326,7 +333,7 @@ int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
hdrlen = (osrh->hdrlen + 1) << 3;
- err = skb_cow_head(skb, hdrlen + skb->mac_len);
+ err = skb_cow_head(skb, hdrlen + dst_dev_overhead(cache_dst, skb));
if (unlikely(err))
return err;
@@ -369,9 +376,8 @@ int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
return 0;
}
-EXPORT_SYMBOL_GPL(seg6_do_srh_inline);
-static int seg6_do_srh(struct sk_buff *skb)
+static int seg6_do_srh(struct sk_buff *skb, struct dst_entry *cache_dst)
{
struct dst_entry *dst = skb_dst(skb);
struct seg6_iptunnel_encap *tinfo;
@@ -384,7 +390,7 @@ static int seg6_do_srh(struct sk_buff *skb)
if (skb->protocol != htons(ETH_P_IPV6))
return -EINVAL;
- err = seg6_do_srh_inline(skb, tinfo->srh);
+ err = __seg6_do_srh_inline(skb, tinfo->srh, cache_dst);
if (err)
return err;
break;
@@ -402,9 +408,11 @@ static int seg6_do_srh(struct sk_buff *skb)
return -EINVAL;
if (tinfo->mode == SEG6_IPTUN_MODE_ENCAP)
- err = seg6_do_srh_encap(skb, tinfo->srh, proto);
+ err = __seg6_do_srh_encap(skb, tinfo->srh,
+ proto, cache_dst);
else
- err = seg6_do_srh_encap_red(skb, tinfo->srh, proto);
+ err = seg6_do_srh_encap_red(skb, tinfo->srh,
+ proto, cache_dst);
if (err)
return err;
@@ -425,11 +433,13 @@ static int seg6_do_srh(struct sk_buff *skb)
skb_push(skb, skb->mac_len);
if (tinfo->mode == SEG6_IPTUN_MODE_L2ENCAP)
- err = seg6_do_srh_encap(skb, tinfo->srh,
- IPPROTO_ETHERNET);
+ err = __seg6_do_srh_encap(skb, tinfo->srh,
+ IPPROTO_ETHERNET,
+ cache_dst);
else
err = seg6_do_srh_encap_red(skb, tinfo->srh,
- IPPROTO_ETHERNET);
+ IPPROTO_ETHERNET,
+ cache_dst);
if (err)
return err;
@@ -444,6 +454,13 @@ static int seg6_do_srh(struct sk_buff *skb)
return 0;
}
+/* insert an SRH within an IPv6 packet, just after the IPv6 header */
+int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
+{
+ return __seg6_do_srh_inline(skb, osrh, NULL);
+}
+EXPORT_SYMBOL_GPL(seg6_do_srh_inline);
+
static int seg6_input_finish(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
@@ -455,45 +472,58 @@ static int seg6_input_core(struct net *net, struct sock *sk,
{
struct dst_entry *orig_dst = skb_dst(skb);
struct dst_entry *dst = NULL;
+ struct lwtunnel_state *lwtst;
struct seg6_lwt *slwt;
int err;
- err = seg6_do_srh(skb);
- if (unlikely(err)) {
- kfree_skb(skb);
- return err;
- }
+ /* We cannot dereference "orig_dst" once ip6_route_input() or
+ * skb_dst_drop() is called. However, in order to detect a dst loop, we
+ * need the address of its lwtstate. So, save the address of lwtstate
+ * now and use it later as a comparison.
+ */
+ lwtst = orig_dst->lwtstate;
- slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
+ slwt = seg6_lwt_lwtunnel(lwtst);
- preempt_disable();
+ local_bh_disable();
dst = dst_cache_get(&slwt->cache);
- preempt_enable();
+ local_bh_enable();
+
+ err = seg6_do_srh(skb, dst);
+ if (unlikely(err)) {
+ dst_release(dst);
+ goto drop;
+ }
if (!dst) {
ip6_route_input(skb);
dst = skb_dst(skb);
- if (!dst->error) {
- preempt_disable();
+
+ /* cache only if we don't create a dst reference loop */
+ if (!dst->error && lwtst != dst->lwtstate) {
+ local_bh_disable();
dst_cache_set_ip6(&slwt->cache, dst,
&ipv6_hdr(skb)->saddr);
- preempt_enable();
+ local_bh_enable();
}
+
+ err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+ if (unlikely(err))
+ goto drop;
} else {
skb_dst_drop(skb);
skb_dst_set(skb, dst);
}
- err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
- if (unlikely(err))
- return err;
-
if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
dev_net(skb->dev), NULL, skb, NULL,
skb_dst(skb)->dev, seg6_input_finish);
return seg6_input_finish(dev_net(skb->dev), NULL, skb);
+drop:
+ kfree_skb(skb);
+ return err;
}
static int seg6_input_nf(struct sk_buff *skb)
@@ -529,15 +559,15 @@ static int seg6_output_core(struct net *net, struct sock *sk,
struct seg6_lwt *slwt;
int err;
- err = seg6_do_srh(skb);
- if (unlikely(err))
- goto drop;
-
slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
- preempt_disable();
+ local_bh_disable();
dst = dst_cache_get(&slwt->cache);
- preempt_enable();
+ local_bh_enable();
+
+ err = seg6_do_srh(skb, dst);
+ if (unlikely(err))
+ goto drop;
if (unlikely(!dst)) {
struct ipv6hdr *hdr = ipv6_hdr(skb);
@@ -553,28 +583,31 @@ static int seg6_output_core(struct net *net, struct sock *sk,
dst = ip6_route_output(net, NULL, &fl6);
if (dst->error) {
err = dst->error;
- dst_release(dst);
goto drop;
}
- preempt_disable();
- dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr);
- preempt_enable();
+ /* cache only if we don't create a dst reference loop */
+ if (orig_dst->lwtstate != dst->lwtstate) {
+ local_bh_disable();
+ dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr);
+ local_bh_enable();
+ }
+
+ err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+ if (unlikely(err))
+ goto drop;
}
skb_dst_drop(skb);
skb_dst_set(skb, dst);
- err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
- if (unlikely(err))
- goto drop;
-
if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb,
NULL, skb_dst(skb)->dev, dst_output);
return dst_output(net, sk, skb);
drop:
+ dst_release(dst);
kfree_skb(skb);
return err;
}
diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
index 24e2b4b494cb..ac1dbd492c22 100644
--- a/net/ipv6/seg6_local.c
+++ b/net/ipv6/seg6_local.c
@@ -941,8 +941,8 @@ static int input_action_end_dx6(struct sk_buff *skb,
if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING,
- dev_net(skb->dev), NULL, skb, NULL,
- skb_dst(skb)->dev, input_action_end_dx6_finish);
+ dev_net(skb->dev), NULL, skb, skb->dev,
+ NULL, input_action_end_dx6_finish);
return input_action_end_dx6_finish(dev_net(skb->dev), NULL, skb);
drop:
@@ -954,10 +954,10 @@ static int input_action_end_dx4_finish(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
struct dst_entry *orig_dst = skb_dst(skb);
+ enum skb_drop_reason reason;
struct seg6_local_lwt *slwt;
struct iphdr *iph;
__be32 nhaddr;
- int err;
slwt = seg6_local_lwtunnel(orig_dst->lwtstate);
@@ -967,9 +967,9 @@ static int input_action_end_dx4_finish(struct net *net, struct sock *sk,
skb_dst_drop(skb);
- err = ip_route_input(skb, nhaddr, iph->saddr, 0, skb->dev);
- if (err) {
- kfree_skb(skb);
+ reason = ip_route_input(skb, nhaddr, iph->saddr, 0, skb->dev);
+ if (reason) {
+ kfree_skb_reason(skb, reason);
return -EINVAL;
}
@@ -991,8 +991,8 @@ static int input_action_end_dx4(struct sk_buff *skb,
if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
- dev_net(skb->dev), NULL, skb, NULL,
- skb_dst(skb)->dev, input_action_end_dx4_finish);
+ dev_net(skb->dev), NULL, skb, skb->dev,
+ NULL, input_action_end_dx4_finish);
return input_action_end_dx4_finish(dev_net(skb->dev), NULL, skb);
drop:
@@ -1174,8 +1174,8 @@ drop:
static int input_action_end_dt4(struct sk_buff *skb,
struct seg6_local_lwt *slwt)
{
+ enum skb_drop_reason reason;
struct iphdr *iph;
- int err;
if (!decap_and_validate(skb, IPPROTO_IPIP))
goto drop;
@@ -1193,8 +1193,8 @@ static int input_action_end_dt4(struct sk_buff *skb,
iph = ip_hdr(skb);
- err = ip_route_input(skb, iph->daddr, iph->saddr, 0, skb->dev);
- if (unlikely(err))
+ reason = ip_route_input(skb, iph->daddr, iph->saddr, 0, skb->dev);
+ if (unlikely(reason))
goto drop;
return dst_input(skb);
@@ -1380,7 +1380,9 @@ drop:
return err;
}
-DEFINE_PER_CPU(struct seg6_bpf_srh_state, seg6_bpf_srh_states);
+DEFINE_PER_CPU(struct seg6_bpf_srh_state, seg6_bpf_srh_states) = {
+ .bh_lock = INIT_LOCAL_LOCK(bh_lock),
+};
bool seg6_bpf_has_valid_srh(struct sk_buff *skb)
{
@@ -1388,6 +1390,7 @@ bool seg6_bpf_has_valid_srh(struct sk_buff *skb)
this_cpu_ptr(&seg6_bpf_srh_states);
struct ipv6_sr_hdr *srh = srh_state->srh;
+ lockdep_assert_held(&srh_state->bh_lock);
if (unlikely(srh == NULL))
return false;
@@ -1408,8 +1411,7 @@ bool seg6_bpf_has_valid_srh(struct sk_buff *skb)
static int input_action_end_bpf(struct sk_buff *skb,
struct seg6_local_lwt *slwt)
{
- struct seg6_bpf_srh_state *srh_state =
- this_cpu_ptr(&seg6_bpf_srh_states);
+ struct seg6_bpf_srh_state *srh_state;
struct ipv6_sr_hdr *srh;
int ret;
@@ -1420,10 +1422,14 @@ static int input_action_end_bpf(struct sk_buff *skb,
}
advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
- /* preempt_disable is needed to protect the per-CPU buffer srh_state,
- * which is also accessed by the bpf_lwt_seg6_* helpers
+ /* The access to the per-CPU buffer srh_state is protected by running
+ * always in softirq context (with disabled BH). On PREEMPT_RT the
+ * required locking is provided by the following local_lock_nested_bh()
+ * statement. It is also accessed by the bpf_lwt_seg6_* helpers via
+ * bpf_prog_run_save_cb().
*/
- preempt_disable();
+ local_lock_nested_bh(&seg6_bpf_srh_states.bh_lock);
+ srh_state = this_cpu_ptr(&seg6_bpf_srh_states);
srh_state->srh = srh;
srh_state->hdrlen = srh->hdrlen << 3;
srh_state->valid = true;
@@ -1446,15 +1452,15 @@ static int input_action_end_bpf(struct sk_buff *skb,
if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
goto drop;
+ local_unlock_nested_bh(&seg6_bpf_srh_states.bh_lock);
- preempt_enable();
if (ret != BPF_REDIRECT)
seg6_lookup_nexthop(skb, NULL, 0);
return dst_input(skb);
drop:
- preempt_enable();
+ local_unlock_nested_bh(&seg6_bpf_srh_states.bh_lock);
kfree_skb(skb);
return -EINVAL;
}
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 5e9f625b76e3..39bd8951bfca 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -51,6 +51,7 @@
#include <net/dsfield.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#include <net/inet_dscp.h>
/*
This version of net/ipv6/sit.c is cloned of net/ipv4/ip_gre.c
@@ -132,8 +133,8 @@ static struct ip_tunnel *ipip6_tunnel_lookup(struct net *net,
return NULL;
}
-static struct ip_tunnel __rcu **__ipip6_bucket(struct sit_net *sitn,
- struct ip_tunnel_parm *parms)
+static struct ip_tunnel __rcu **
+__ipip6_bucket(struct sit_net *sitn, struct ip_tunnel_parm_kern *parms)
{
__be32 remote = parms->iph.daddr;
__be32 local = parms->iph.saddr;
@@ -207,7 +208,7 @@ static int ipip6_tunnel_create(struct net_device *dev)
__dev_addr_set(dev, &t->parms.iph.saddr, 4);
memcpy(dev->broadcast, &t->parms.iph.daddr, 4);
- if ((__force u16)t->parms.i_flags & SIT_ISATAP)
+ if (test_bit(IP_TUNNEL_SIT_ISATAP_BIT, t->parms.i_flags))
dev->priv_flags |= IFF_ISATAP;
dev->rtnl_link_ops = &sit_link_ops;
@@ -226,7 +227,8 @@ out:
}
static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
- struct ip_tunnel_parm *parms, int create)
+ struct ip_tunnel_parm_kern *parms,
+ int create)
{
__be32 remote = parms->iph.daddr;
__be32 local = parms->iph.saddr;
@@ -934,8 +936,8 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
}
flowi4_init_output(&fl4, tunnel->parms.link, tunnel->fwmark,
- RT_TOS(tos), RT_SCOPE_UNIVERSE, IPPROTO_IPV6,
- 0, dst, tiph->saddr, 0, 0,
+ tos & INET_DSCP_MASK, RT_SCOPE_UNIVERSE,
+ IPPROTO_IPV6, 0, dst, tiph->saddr, 0, 0,
sock_net_uid(tunnel->net, NULL));
rt = dst_cache_get_ip4(&tunnel->dst_cache, &fl4.saddr);
@@ -1110,7 +1112,7 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
iph->daddr, iph->saddr,
0, 0,
IPPROTO_IPV6,
- RT_TOS(iph->tos),
+ iph->tos & INET_DSCP_MASK,
tunnel->parms.link);
if (!IS_ERR(rt)) {
@@ -1135,7 +1137,8 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
dev->needed_headroom = t_hlen + hlen;
}
-static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p,
+static void ipip6_tunnel_update(struct ip_tunnel *t,
+ struct ip_tunnel_parm_kern *p,
__u32 fwmark)
{
struct net *net = t->net;
@@ -1196,11 +1199,11 @@ static int
ipip6_tunnel_get6rd(struct net_device *dev, struct ip_tunnel_parm __user *data)
{
struct ip_tunnel *t = netdev_priv(dev);
+ struct ip_tunnel_parm_kern p;
struct ip_tunnel_6rd ip6rd;
- struct ip_tunnel_parm p;
if (dev == dev_to_sit_net(dev)->fb_tunnel_dev) {
- if (copy_from_user(&p, data, sizeof(p)))
+ if (!ip_tunnel_parm_from_user(&p, data))
return -EFAULT;
t = ipip6_tunnel_locate(t->net, &p, 0);
}
@@ -1251,7 +1254,7 @@ static bool ipip6_valid_ip_proto(u8 ipproto)
}
static int
-__ipip6_tunnel_ioctl_validate(struct net *net, struct ip_tunnel_parm *p)
+__ipip6_tunnel_ioctl_validate(struct net *net, struct ip_tunnel_parm_kern *p)
{
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
@@ -1268,7 +1271,7 @@ __ipip6_tunnel_ioctl_validate(struct net *net, struct ip_tunnel_parm *p)
}
static int
-ipip6_tunnel_get(struct net_device *dev, struct ip_tunnel_parm *p)
+ipip6_tunnel_get(struct net_device *dev, struct ip_tunnel_parm_kern *p)
{
struct ip_tunnel *t = netdev_priv(dev);
@@ -1281,7 +1284,7 @@ ipip6_tunnel_get(struct net_device *dev, struct ip_tunnel_parm *p)
}
static int
-ipip6_tunnel_add(struct net_device *dev, struct ip_tunnel_parm *p)
+ipip6_tunnel_add(struct net_device *dev, struct ip_tunnel_parm_kern *p)
{
struct ip_tunnel *t = netdev_priv(dev);
int err;
@@ -1297,7 +1300,7 @@ ipip6_tunnel_add(struct net_device *dev, struct ip_tunnel_parm *p)
}
static int
-ipip6_tunnel_change(struct net_device *dev, struct ip_tunnel_parm *p)
+ipip6_tunnel_change(struct net_device *dev, struct ip_tunnel_parm_kern *p)
{
struct ip_tunnel *t = netdev_priv(dev);
int err;
@@ -1328,7 +1331,7 @@ ipip6_tunnel_change(struct net_device *dev, struct ip_tunnel_parm *p)
}
static int
-ipip6_tunnel_del(struct net_device *dev, struct ip_tunnel_parm *p)
+ipip6_tunnel_del(struct net_device *dev, struct ip_tunnel_parm_kern *p)
{
struct ip_tunnel *t = netdev_priv(dev);
@@ -1348,7 +1351,8 @@ ipip6_tunnel_del(struct net_device *dev, struct ip_tunnel_parm *p)
}
static int
-ipip6_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
+ipip6_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm_kern *p,
+ int cmd)
{
switch (cmd) {
case SIOCGETTUNNEL:
@@ -1398,7 +1402,6 @@ static const struct net_device_ops ipip6_netdev_ops = {
.ndo_uninit = ipip6_tunnel_uninit,
.ndo_start_xmit = sit_tunnel_xmit,
.ndo_siocdevprivate = ipip6_tunnel_siocdevprivate,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_get_iflink = ip_tunnel_get_iflink,
.ndo_tunnel_ctl = ipip6_tunnel_ctl,
};
@@ -1408,7 +1411,6 @@ static void ipip6_dev_free(struct net_device *dev)
struct ip_tunnel *tunnel = netdev_priv(dev);
dst_cache_destroy(&tunnel->dst_cache);
- free_percpu(dev->tstats);
}
#define SIT_FEATURES (NETIF_F_SG | \
@@ -1434,9 +1436,11 @@ static void ipip6_tunnel_setup(struct net_device *dev)
dev->flags = IFF_NOARP;
netif_keep_dst(dev);
dev->addr_len = 4;
- dev->features |= NETIF_F_LLTX;
+ dev->lltx = true;
dev->features |= SIT_FEATURES;
dev->hw_features |= SIT_FEATURES;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+
}
static int ipip6_tunnel_init(struct net_device *dev)
@@ -1449,17 +1453,13 @@ static int ipip6_tunnel_init(struct net_device *dev)
strcpy(tunnel->parms.name, dev->name);
ipip6_tunnel_bind_dev(dev);
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
- if (err) {
- free_percpu(dev->tstats);
- dev->tstats = NULL;
+ if (err)
return err;
- }
+
netdev_hold(dev, &tunnel->dev_tracker, GFP_KERNEL);
+ netdev_lockdep_set_classes(dev);
return 0;
}
@@ -1494,7 +1494,7 @@ static int ipip6_validate(struct nlattr *tb[], struct nlattr *data[],
}
static void ipip6_netlink_parms(struct nlattr *data[],
- struct ip_tunnel_parm *parms,
+ struct ip_tunnel_parm_kern *parms,
__u32 *fwmark)
{
memset(parms, 0, sizeof(*parms));
@@ -1603,8 +1603,8 @@ static int ipip6_changelink(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct ip_tunnel *t = netdev_priv(dev);
- struct ip_tunnel_parm p;
struct ip_tunnel_encap ipencap;
+ struct ip_tunnel_parm_kern p;
struct net *net = t->net;
struct sit_net *sitn = net_generic(net, sit_net_id);
#ifdef CONFIG_IPV6_SIT_6RD
@@ -1691,7 +1691,7 @@ static size_t ipip6_get_size(const struct net_device *dev)
static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- struct ip_tunnel_parm *parm = &tunnel->parms;
+ struct ip_tunnel_parm_kern *parm = &tunnel->parms;
if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
nla_put_in_addr(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) ||
@@ -1701,7 +1701,8 @@ static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u8(skb, IFLA_IPTUN_PMTUDISC,
!!(parm->iph.frag_off & htons(IP_DF))) ||
nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->iph.protocol) ||
- nla_put_be16(skb, IFLA_IPTUN_FLAGS, parm->i_flags) ||
+ nla_put_be16(skb, IFLA_IPTUN_FLAGS,
+ ip_tunnel_flags_to_be16(parm->i_flags)) ||
nla_put_u32(skb, IFLA_IPTUN_FWMARK, tunnel->fwmark))
goto nla_put_failure;
@@ -1855,7 +1856,7 @@ static int __net_init sit_init_net(struct net *net)
/* FB netdevice is special: we have one, and only one per netns.
* Allowing to move it to another netns is clearly unsafe.
*/
- sitn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
+ sitn->fb_tunnel_dev->netns_local = true;
err = register_netdev(sitn->fb_tunnel_dev);
if (err)
@@ -1875,22 +1876,19 @@ err_alloc_dev:
return err;
}
-static void __net_exit sit_exit_batch_net(struct list_head *net_list)
+static void __net_exit sit_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
- LIST_HEAD(list);
struct net *net;
- rtnl_lock();
+ ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list)
- sit_destroy_tunnels(net, &list);
-
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ sit_destroy_tunnels(net, dev_to_kill);
}
static struct pernet_operations sit_net_ops = {
.init = sit_init_net,
- .exit_batch = sit_exit_batch_net,
+ .exit_batch_rtnl = sit_exit_batch_rtnl,
.id = &sit_net_id,
.size = sizeof(struct sit_net),
};
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index c8d2ca27220c..9d83eadd308b 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -177,24 +177,33 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
struct sock *ret = sk;
__u8 rcv_wscale;
int full_space;
+ SKB_DR(reason);
if (!READ_ONCE(net->ipv4.sysctl_tcp_syncookies) ||
!th->ack || th->rst)
goto out;
- req = cookie_tcp_check(net, sk, skb);
- if (IS_ERR(req))
- goto out;
- if (!req)
+ if (cookie_bpf_ok(skb)) {
+ req = cookie_bpf_check(sk, skb);
+ } else {
+ req = cookie_tcp_check(net, sk, skb);
+ if (IS_ERR(req))
+ goto out;
+ }
+ if (!req) {
+ SKB_DR_SET(reason, NO_SOCKET);
goto out_drop;
+ }
ireq = inet_rsk(req);
ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
- if (security_inet_conn_request(sk, skb, req))
+ if (security_inet_conn_request(sk, skb, req)) {
+ SKB_DR_SET(reason, SECURITY_HOOK);
goto out_free;
+ }
if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
@@ -231,11 +240,13 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
security_req_classify_flow(req, flowi6_to_flowi_common(&fl6));
dst = ip6_dst_lookup_flow(net, sk, &fl6, final_p);
- if (IS_ERR(dst))
+ if (IS_ERR(dst)) {
+ SKB_DR_SET(reason, IP_OUTNOROUTES);
goto out_free;
+ }
}
- req->rsk_window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
+ req->rsk_window_clamp = READ_ONCE(tp->window_clamp) ? :dst_metric(dst, RTAX_WINDOW);
/* limit the window selection if the user enforce a smaller rx buffer */
full_space = tcp_full_space(sk);
if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
@@ -247,14 +258,23 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
ireq->wscale_ok, &rcv_wscale,
dst_metric(dst, RTAX_INITRWND));
- ireq->rcv_wscale = rcv_wscale;
+ /* req->syncookie is set true only if ACK is validated
+ * by BPF kfunc, then, rcv_wscale is already configured.
+ */
+ if (!req->syncookie)
+ ireq->rcv_wscale = rcv_wscale;
ireq->ecn_ok &= cookie_ecn_ok(net, dst);
ret = tcp_get_cookie_sock(sk, skb, req, dst);
+ if (!ret) {
+ SKB_DR_SET(reason, NO_SOCKET);
+ goto out_drop;
+ }
out:
return ret;
out_free:
reqsk_free(req);
out_drop:
+ sk_skb_reason_drop(sk, skb, reason);
return NULL;
}
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 888676163e90..d2cd33e2698d 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -30,7 +30,7 @@ static u32 rt6_multipath_hash_fields_all_mask =
static u32 ioam6_id_max = IOAM6_DEFAULT_ID;
static u64 ioam6_id_wide_max = IOAM6_DEFAULT_ID_WIDE;
-static int proc_rt6_multipath_hash_policy(struct ctl_table *table, int write,
+static int proc_rt6_multipath_hash_policy(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net;
@@ -46,7 +46,7 @@ static int proc_rt6_multipath_hash_policy(struct ctl_table *table, int write,
}
static int
-proc_rt6_multipath_hash_fields(struct ctl_table *table, int write, void *buffer,
+proc_rt6_multipath_hash_fields(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
struct net *net;
@@ -213,7 +213,6 @@ static struct ctl_table ipv6_table_template[] = {
.proc_handler = proc_doulongvec_minmax,
.extra2 = &ioam6_id_wide_max,
},
- { }
};
static struct ctl_table ipv6_rotable[] = {
@@ -248,11 +247,11 @@ static struct ctl_table ipv6_rotable[] = {
.proc_handler = proc_dointvec,
},
#endif /* CONFIG_NETLABEL */
- { }
};
static int __net_init ipv6_sysctl_net_init(struct net *net)
{
+ size_t table_size = ARRAY_SIZE(ipv6_table_template);
struct ctl_table *ipv6_table;
struct ctl_table *ipv6_route_table;
struct ctl_table *ipv6_icmp_table;
@@ -264,7 +263,7 @@ static int __net_init ipv6_sysctl_net_init(struct net *net)
if (!ipv6_table)
goto out;
/* Update the variables to point into the current struct net */
- for (i = 0; i < ARRAY_SIZE(ipv6_table_template) - 1; i++)
+ for (i = 0; i < table_size; i++)
ipv6_table[i].data += (void *)net - (void *)&init_net;
ipv6_route_table = ipv6_route_sysctl_init(net);
@@ -276,8 +275,7 @@ static int __net_init ipv6_sysctl_net_init(struct net *net)
goto out_ipv6_route_table;
net->ipv6.sysctl.hdr = register_net_sysctl_sz(net, "net/ipv6",
- ipv6_table,
- ARRAY_SIZE(ipv6_table_template));
+ ipv6_table, table_size);
if (!net->ipv6.sysctl.hdr)
goto out_ipv6_icmp_table;
@@ -313,9 +311,9 @@ out_ipv6_table:
static void __net_exit ipv6_sysctl_net_exit(struct net *net)
{
- struct ctl_table *ipv6_table;
- struct ctl_table *ipv6_route_table;
- struct ctl_table *ipv6_icmp_table;
+ const struct ctl_table *ipv6_table;
+ const struct ctl_table *ipv6_route_table;
+ const struct ctl_table *ipv6_icmp_table;
ipv6_table = net->ipv6.sysctl.hdr->ctl_table_arg;
ipv6_route_table = net->ipv6.sysctl.route_hdr->ctl_table_arg;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 57b25b1fc9d9..2debdf085a3b 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -58,7 +58,9 @@
#include <net/timewait_sock.h>
#include <net/inet_common.h>
#include <net/secure_seq.h>
+#include <net/hotdata.h>
#include <net/busy_poll.h>
+#include <net/rstreason.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
@@ -68,7 +70,8 @@
#include <trace/events/tcp.h>
-static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
+static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb,
+ enum sk_rst_reason reason);
static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req);
@@ -94,11 +97,9 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
struct dst_entry *dst = skb_dst(skb);
if (dst && dst_hold_safe(dst)) {
- const struct rt6_info *rt = (const struct rt6_info *)dst;
-
rcu_assign_pointer(sk->sk_rx_dst, dst);
sk->sk_rx_dst_ifindex = skb->skb_iif;
- sk->sk_rx_dst_cookie = rt6_get_cookie(rt);
+ sk->sk_rx_dst_cookie = rt6_get_cookie(dst_rt6_info(dst));
}
}
@@ -489,14 +490,10 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
ipv6_icmp_error(sk, skb, err, th->dest, ntohl(info), (u8 *)th);
- if (!sock_owned_by_user(sk)) {
- WRITE_ONCE(sk->sk_err, err);
- sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
-
- tcp_done(sk);
- } else {
+ if (!sock_owned_by_user(sk))
+ tcp_done_with_error(sk, err);
+ else
WRITE_ONCE(sk->sk_err_soft, err);
- }
goto out;
case TCP_LISTEN:
break;
@@ -792,7 +789,8 @@ clear_hash_nostart:
static void tcp_v6_init_req(struct request_sock *req,
const struct sock *sk_listener,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ u32 tw_isn)
{
bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
struct inet_request_sock *ireq = inet_rsk(req);
@@ -806,7 +804,7 @@ static void tcp_v6_init_req(struct request_sock *req,
ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
ireq->ir_iif = tcp_v6_iif(skb);
- if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
+ if (!tw_isn &&
(ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
np->rxopt.bits.rxinfo ||
np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
@@ -819,9 +817,10 @@ static void tcp_v6_init_req(struct request_sock *req,
static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
struct sk_buff *skb,
struct flowi *fl,
- struct request_sock *req)
+ struct request_sock *req,
+ u32 tw_isn)
{
- tcp_v6_init_req(req, sk, skb);
+ tcp_v6_init_req(req, sk, skb, tw_isn);
if (security_inet_conn_request(sk, skb, req))
return NULL;
@@ -968,11 +967,14 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
}
if (sk) {
+ /* unconstify the socket only to attach it to buff with care. */
+ skb_set_owner_edemux(buff, (struct sock *)sk);
+
if (sk->sk_state == TCP_TIME_WAIT)
mark = inet_twsk(sk)->tw_mark;
else
mark = READ_ONCE(sk->sk_mark);
- skb_set_delivery_time(buff, tcp_transmit_time(sk), true);
+ skb_set_delivery_time(buff, tcp_transmit_time(sk), SKB_CLOCK_MONOTONIC);
}
if (txhash) {
/* autoflowlabel/skb_get_hash_flowi6 rely on buff->hash */
@@ -1005,7 +1007,8 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
kfree_skb(buff);
}
-static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
+static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb,
+ enum sk_rst_reason reason)
{
const struct tcphdr *th = tcp_hdr(skb);
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
@@ -1112,7 +1115,6 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
if (sk) {
oif = sk->sk_bound_dev_if;
if (sk_fullsock(sk)) {
- trace_tcp_send_reset(sk, skb);
if (inet6_test_bit(REPFLOW, sk))
label = ip6_flowlabel(ipv6h);
priority = READ_ONCE(sk->sk_priority);
@@ -1128,6 +1130,8 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
label = ip6_flowlabel(ipv6h);
}
+ trace_tcp_send_reset(sk, skb, reason);
+
tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, 1,
ipv6_get_dsfield(ipv6h), label, priority, txhash,
&key);
@@ -1168,8 +1172,8 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh))
goto out;
if (aoh)
- key.ao_key = tcp_ao_established_key(ao_info,
- aoh->rnext_keyid, -1);
+ key.ao_key = tcp_ao_established_key(sk, ao_info,
+ aoh->rnext_keyid, -1);
}
}
if (key.ao_key) {
@@ -1192,12 +1196,13 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
#endif
}
- tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
+ tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt,
+ READ_ONCE(tcptw->tw_rcv_nxt),
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
tcp_tw_tsval(tcptw),
- tcptw->tw_ts_recent, tw->tw_bound_dev_if, &key,
- tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel), tw->tw_priority,
- tw->tw_txhash);
+ READ_ONCE(tcptw->tw_ts_recent), tw->tw_bound_dev_if,
+ &key, tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel),
+ tw->tw_priority, tw->tw_txhash);
#ifdef CONFIG_TCP_AO
out:
@@ -1267,15 +1272,10 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
* sk->sk_state == TCP_SYN_RECV -> for Fast Open.
*/
- /* RFC 7323 2.3
- * The window field (SEG.WND) of every outgoing segment, with the
- * exception of <SYN> segments, MUST be right-shifted by
- * Rcv.Wind.Shift bits:
- */
tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
tcp_rsk(req)->rcv_nxt,
- req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
+ tcp_synack_window(req) >> inet_rsk(req)->rcv_wscale,
tcp_rsk_tsval(tcp_rsk(req)),
READ_ONCE(req->ts_recent), sk->sk_bound_dev_if,
&key, ipv6_get_dsfield(ipv6_hdr(skb)), 0,
@@ -1439,7 +1439,6 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
*/
newsk->sk_gso_type = SKB_GSO_TCPV6;
- ip6_dst_store(newsk, dst, NULL, NULL);
inet6_sk_rx_dst_set(newsk, skb);
inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
@@ -1450,6 +1449,8 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
+ ip6_dst_store(newsk, dst, NULL, NULL);
+
newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
newnp->saddr = ireq->ir_v6_loc_addr;
newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
@@ -1620,10 +1621,9 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
by tcp. Feel free to propose better solution.
--ANK (980728)
*/
- if (np->rxopt.all)
+ if (np->rxopt.all && sk->sk_state != TCP_LISTEN)
opt_skb = skb_clone_and_charge_r(skb, sk);
- reason = SKB_DROP_REASON_NOT_SPECIFIED;
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
struct dst_entry *dst;
@@ -1653,31 +1653,30 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
if (sk->sk_state == TCP_LISTEN) {
struct sock *nsk = tcp_v6_cookie_check(sk, skb);
- if (!nsk)
- goto discard;
-
if (nsk != sk) {
- if (tcp_child_process(sk, nsk, skb))
- goto reset;
- if (opt_skb)
- __kfree_skb(opt_skb);
+ if (nsk) {
+ reason = tcp_child_process(sk, nsk, skb);
+ if (reason)
+ goto reset;
+ }
return 0;
}
} else
sock_rps_save_rxhash(sk, skb);
- if (tcp_rcv_state_process(sk, skb))
+ reason = tcp_rcv_state_process(sk, skb);
+ if (reason)
goto reset;
if (opt_skb)
goto ipv6_pktoptions;
return 0;
reset:
- tcp_v6_send_reset(sk, skb);
+ tcp_v6_send_reset(sk, skb, sk_rst_convert_drop_reason(reason));
discard:
if (opt_skb)
__kfree_skb(opt_skb);
- kfree_skb_reason(skb, reason);
+ sk_skb_reason_drop(sk, skb, reason);
return 0;
csum_err:
reason = SKB_DROP_REASON_TCP_CSUM;
@@ -1737,7 +1736,6 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
skb->len - th->doff*4);
TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
- TCP_SKB_CB(skb)->tcp_tw_isn = 0;
TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
TCP_SKB_CB(skb)->sacked = 0;
TCP_SKB_CB(skb)->has_rxtstamp =
@@ -1751,9 +1749,10 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
int dif = inet6_iif(skb);
const struct tcphdr *th;
const struct ipv6hdr *hdr;
+ struct sock *sk = NULL;
bool refcounted;
- struct sock *sk;
int ret;
+ u32 isn;
struct net *net = dev_net(skb->dev);
drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
@@ -1790,7 +1789,6 @@ lookup:
if (!sk)
goto no_tcp_socket;
-process:
if (sk->sk_state == TCP_TIME_WAIT)
goto do_time_wait;
@@ -1856,15 +1854,21 @@ process:
if (nsk == sk) {
reqsk_put(req);
tcp_v6_restore_cb(skb);
- } else if (tcp_child_process(sk, nsk, skb)) {
- tcp_v6_send_reset(nsk, skb);
- goto discard_and_relse;
} else {
+ drop_reason = tcp_child_process(sk, nsk, skb);
+ if (drop_reason) {
+ enum sk_rst_reason rst_reason;
+
+ rst_reason = sk_rst_convert_drop_reason(drop_reason);
+ tcp_v6_send_reset(nsk, skb, rst_reason);
+ goto discard_and_relse;
+ }
sock_put(sk);
return 0;
}
}
+process:
if (static_branch_unlikely(&ip6_min_hopcount)) {
/* min_hopcount can be changed concurrently from do_ipv6_setsockopt() */
if (unlikely(hdr->hop_limit < READ_ONCE(tcp_inet6_sk(sk)->min_hopcount))) {
@@ -1933,12 +1937,12 @@ csum_error:
bad_packet:
__TCP_INC_STATS(net, TCP_MIB_INERRS);
} else {
- tcp_v6_send_reset(NULL, skb);
+ tcp_v6_send_reset(NULL, skb, sk_rst_convert_drop_reason(drop_reason));
}
discard_it:
SKB_DR_OR(drop_reason, NOT_SPECIFIED);
- kfree_skb_reason(skb, drop_reason);
+ sk_skb_reason_drop(sk, skb, drop_reason);
return 0;
discard_and_relse:
@@ -1961,7 +1965,7 @@ do_time_wait:
goto csum_error;
}
- switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
+ switch (tcp_timewait_state_process(inet_twsk(sk), skb, th, &isn)) {
case TCP_TW_SYN:
{
struct sock *sk2;
@@ -1979,6 +1983,7 @@ do_time_wait:
sk = sk2;
tcp_v6_restore_cb(skb);
refcounted = false;
+ __this_cpu_write(tcp_tw_isn, isn);
goto process;
}
}
@@ -1988,7 +1993,7 @@ do_time_wait:
tcp_v6_timewait_ack(sk, skb);
break;
case TCP_TW_RST:
- tcp_v6_send_reset(sk, skb);
+ tcp_v6_send_reset(sk, skb, SK_RST_REASON_TCP_TIMEWAIT_SOCKET);
inet_twsk_deschedule_put(inet_twsk(sk));
goto discard_it;
case TCP_TW_SUCCESS:
@@ -2038,7 +2043,6 @@ void tcp_v6_early_demux(struct sk_buff *skb)
static struct timewait_sock_ops tcp6_timewait_sock_ops = {
.twsk_obj_size = sizeof(struct tcp6_timewait_sock),
- .twsk_unique = tcp_twsk_unique,
.twsk_destructor = tcp_twsk_destructor,
};
@@ -2174,6 +2178,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
const struct tcp_sock *tp = tcp_sk(sp);
const struct inet_connection_sock *icsk = inet_csk(sp);
const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
+ u8 icsk_pending;
int rx_queue;
int state;
@@ -2182,12 +2187,13 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
destp = ntohs(inet->inet_dport);
srcp = ntohs(inet->inet_sport);
- if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
- icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
- icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
+ icsk_pending = smp_load_acquire(&icsk->icsk_pending);
+ if (icsk_pending == ICSK_TIME_RETRANS ||
+ icsk_pending == ICSK_TIME_REO_TIMEOUT ||
+ icsk_pending == ICSK_TIME_LOSS_PROBE) {
timer_active = 1;
timer_expires = icsk->icsk_timeout;
- } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
+ } else if (icsk_pending == ICSK_TIME_PROBE0) {
timer_active = 4;
timer_expires = icsk->icsk_timeout;
} else if (timer_pending(&sp->sk_timer)) {
@@ -2256,7 +2262,7 @@ static void get_timewait6_sock(struct seq_file *seq,
src->s6_addr32[2], src->s6_addr32[3], srcp,
dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3], destp,
- tw->tw_substate, 0, 0,
+ READ_ONCE(tw->tw_substate), 0, 0,
3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
refcount_read(&tw->tw_refcnt), tw);
}
@@ -2365,11 +2371,6 @@ struct proto tcpv6_prot = {
};
EXPORT_SYMBOL_GPL(tcpv6_prot);
-static const struct inet6_protocol tcpv6_protocol = {
- .handler = tcp_v6_rcv,
- .err_handler = tcp_v6_err,
- .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
-};
static struct inet_protosw tcpv6_protosw = {
.type = SOCK_STREAM,
@@ -2382,8 +2383,14 @@ static struct inet_protosw tcpv6_protosw = {
static int __net_init tcpv6_net_init(struct net *net)
{
- return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
- SOCK_RAW, IPPROTO_TCP, net);
+ int res;
+
+ res = inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
+ SOCK_RAW, IPPROTO_TCP, net);
+ if (!res)
+ net->ipv6.tcp_sk->sk_clockid = CLOCK_MONOTONIC;
+
+ return res;
}
static void __net_exit tcpv6_net_exit(struct net *net)
@@ -2391,22 +2398,21 @@ static void __net_exit tcpv6_net_exit(struct net *net)
inet_ctl_sock_destroy(net->ipv6.tcp_sk);
}
-static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
-{
- tcp_twsk_purge(net_exit_list, AF_INET6);
-}
-
static struct pernet_operations tcpv6_net_ops = {
.init = tcpv6_net_init,
.exit = tcpv6_net_exit,
- .exit_batch = tcpv6_net_exit_batch,
};
int __init tcpv6_init(void)
{
int ret;
- ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
+ net_hotdata.tcpv6_protocol = (struct inet6_protocol) {
+ .handler = tcp_v6_rcv,
+ .err_handler = tcp_v6_err,
+ .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
+ };
+ ret = inet6_add_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);
if (ret)
goto out;
@@ -2431,7 +2437,7 @@ out_tcpv6_pernet_subsys:
out_tcpv6_protosw:
inet6_unregister_protosw(&tcpv6_protosw);
out_tcpv6_protocol:
- inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
+ inet6_del_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);
goto out;
}
@@ -2439,5 +2445,5 @@ void tcpv6_exit(void)
{
unregister_pernet_subsys(&tcpv6_net_ops);
inet6_unregister_protosw(&tcpv6_protosw);
- inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
+ inet6_del_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);
}
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index bf0c957e4b5e..ae2da28f9dfb 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -7,31 +7,84 @@
*/
#include <linux/indirect_call_wrapper.h>
#include <linux/skbuff.h>
+#include <net/inet6_hashtables.h>
#include <net/gro.h>
#include <net/protocol.h>
#include <net/tcp.h>
#include <net/ip6_checksum.h>
#include "ip6_offload.h"
+static void tcp6_check_fraglist_gro(struct list_head *head, struct sk_buff *skb,
+ struct tcphdr *th)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ const struct ipv6hdr *hdr;
+ struct sk_buff *p;
+ struct sock *sk;
+ struct net *net;
+ int iif, sdif;
+
+ if (likely(!(skb->dev->features & NETIF_F_GRO_FRAGLIST)))
+ return;
+
+ p = tcp_gro_lookup(head, th);
+ if (p) {
+ NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
+ return;
+ }
+
+ inet6_get_iif_sdif(skb, &iif, &sdif);
+ hdr = skb_gro_network_header(skb);
+ net = dev_net(skb->dev);
+ sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
+ &hdr->saddr, th->source,
+ &hdr->daddr, ntohs(th->dest),
+ iif, sdif);
+ NAPI_GRO_CB(skb)->is_flist = !sk;
+ if (sk)
+ sock_put(sk);
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+}
+
INDIRECT_CALLABLE_SCOPE
struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)
{
+ struct tcphdr *th;
+
/* Don't bother verifying checksum if we're going to flush anyway. */
if (!NAPI_GRO_CB(skb)->flush &&
skb_gro_checksum_validate(skb, IPPROTO_TCP,
- ip6_gro_compute_pseudo)) {
- NAPI_GRO_CB(skb)->flush = 1;
- return NULL;
- }
+ ip6_gro_compute_pseudo))
+ goto flush;
+
+ th = tcp_gro_pull_header(skb);
+ if (!th)
+ goto flush;
- return tcp_gro_receive(head, skb);
+ tcp6_check_fraglist_gro(head, skb, th);
+
+ return tcp_gro_receive(head, skb, th);
+
+flush:
+ NAPI_GRO_CB(skb)->flush = 1;
+ return NULL;
}
INDIRECT_CALLABLE_SCOPE int tcp6_gro_complete(struct sk_buff *skb, int thoff)
{
- const struct ipv6hdr *iph = ipv6_hdr(skb);
+ const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
+ const struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + offset);
struct tcphdr *th = tcp_hdr(skb);
+ if (unlikely(NAPI_GRO_CB(skb)->is_flist)) {
+ skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV6;
+ skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+
+ __skb_incr_checksum_unnecessary(skb);
+
+ return 0;
+ }
+
th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
&iph->daddr, 0);
skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
@@ -40,6 +93,70 @@ INDIRECT_CALLABLE_SCOPE int tcp6_gro_complete(struct sk_buff *skb, int thoff)
return 0;
}
+static void __tcpv6_gso_segment_csum(struct sk_buff *seg,
+ struct in6_addr *oldip,
+ const struct in6_addr *newip,
+ __be16 *oldport, __be16 newport)
+{
+ struct tcphdr *th = tcp_hdr(seg);
+
+ if (!ipv6_addr_equal(oldip, newip)) {
+ inet_proto_csum_replace16(&th->check, seg,
+ oldip->s6_addr32,
+ newip->s6_addr32,
+ true);
+ *oldip = *newip;
+ }
+
+ if (*oldport == newport)
+ return;
+
+ inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false);
+ *oldport = newport;
+}
+
+static struct sk_buff *__tcpv6_gso_segment_list_csum(struct sk_buff *segs)
+{
+ const struct tcphdr *th;
+ const struct ipv6hdr *iph;
+ struct sk_buff *seg;
+ struct tcphdr *th2;
+ struct ipv6hdr *iph2;
+
+ seg = segs;
+ th = tcp_hdr(seg);
+ iph = ipv6_hdr(seg);
+ th2 = tcp_hdr(seg->next);
+ iph2 = ipv6_hdr(seg->next);
+
+ if (!(*(const u32 *)&th->source ^ *(const u32 *)&th2->source) &&
+ ipv6_addr_equal(&iph->saddr, &iph2->saddr) &&
+ ipv6_addr_equal(&iph->daddr, &iph2->daddr))
+ return segs;
+
+ while ((seg = seg->next)) {
+ th2 = tcp_hdr(seg);
+ iph2 = ipv6_hdr(seg);
+
+ __tcpv6_gso_segment_csum(seg, &iph2->saddr, &iph->saddr,
+ &th2->source, th->source);
+ __tcpv6_gso_segment_csum(seg, &iph2->daddr, &iph->daddr,
+ &th2->dest, th->dest);
+ }
+
+ return segs;
+}
+
+static struct sk_buff *__tcp6_gso_segment_list(struct sk_buff *skb,
+ netdev_features_t features)
+{
+ skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
+ if (IS_ERR(skb))
+ return skb;
+
+ return __tcpv6_gso_segment_list_csum(skb);
+}
+
static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
@@ -51,6 +168,15 @@ static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
if (!pskb_may_pull(skb, sizeof(*th)))
return ERR_PTR(-EINVAL);
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) {
+ struct tcphdr *th = tcp_hdr(skb);
+
+ if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size)
+ return __tcp6_gso_segment_list(skb, features);
+
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+
if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
struct tcphdr *th = tcp_hdr(skb);
@@ -66,15 +192,15 @@ static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
return tcp_gso_segment(skb, features);
}
-static const struct net_offload tcpv6_offload = {
- .callbacks = {
- .gso_segment = tcp6_gso_segment,
- .gro_receive = tcp6_gro_receive,
- .gro_complete = tcp6_gro_complete,
- },
-};
int __init tcpv6_offload_init(void)
{
- return inet6_add_offload(&tcpv6_offload, IPPROTO_TCP);
+ net_hotdata.tcpv6_offload = (struct net_offload) {
+ .callbacks = {
+ .gso_segment = tcp6_gso_segment,
+ .gro_receive = tcp6_gro_receive,
+ .gro_complete = tcp6_gro_complete,
+ },
+ };
+ return inet6_add_offload(&net_hotdata.tcpv6_offload, IPPROTO_TCP);
}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 3f2249b4cd5f..c6ea438b5c75 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -34,6 +34,7 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/indirect_call_wrapper.h>
+#include <trace/events/udp.h>
#include <net/addrconf.h>
#include <net/ndisc.h>
@@ -45,7 +46,6 @@
#include <net/tcp_states.h>
#include <net/ip6_checksum.h>
#include <net/ip6_tunnel.h>
-#include <trace/events/udp.h>
#include <net/xfrm.h>
#include <net/inet_hashtables.h>
#include <net/inet6_hashtables.h>
@@ -79,9 +79,6 @@ u32 udp6_ehashfn(const struct net *net,
const struct in6_addr *faddr,
const __be16 fport)
{
- static u32 udp6_ehash_secret __read_mostly;
- static u32 udp_ipv6_hash_secret __read_mostly;
-
u32 lhash, fhash;
net_get_random_once(&udp6_ehash_secret,
@@ -113,11 +110,22 @@ void udp_v6_rehash(struct sock *sk)
u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
&sk->sk_v6_rcv_saddr,
inet_sk(sk)->inet_num);
+ u16 new_hash4;
- udp_lib_rehash(sk, new_hash);
+ if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) {
+ new_hash4 = udp_ehashfn(sock_net(sk),
+ sk->sk_rcv_saddr, sk->sk_num,
+ sk->sk_daddr, sk->sk_dport);
+ } else {
+ new_hash4 = udp6_ehashfn(sock_net(sk),
+ &sk->sk_v6_rcv_saddr, sk->sk_num,
+ &sk->sk_v6_daddr, sk->sk_dport);
+ }
+
+ udp_lib_rehash(sk, new_hash, new_hash4);
}
-static int compute_score(struct sock *sk, struct net *net,
+static int compute_score(struct sock *sk, const struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, unsigned short hnum,
int dif, int sdif)
@@ -162,8 +170,51 @@ static int compute_score(struct sock *sk, struct net *net,
return score;
}
+/**
+ * udp6_lib_lookup1() - Simplified lookup using primary hash (destination port)
+ * @net: Network namespace
+ * @saddr: Source address, network order
+ * @sport: Source port, network order
+ * @daddr: Destination address, network order
+ * @hnum: Destination port, host order
+ * @dif: Destination interface index
+ * @sdif: Destination bridge port index, if relevant
+ * @udptable: Set of UDP hash tables
+ *
+ * Simplified lookup to be used as fallback if no sockets are found due to a
+ * potential race between (receive) address change, and lookup happening before
+ * the rehash operation. This function ignores SO_REUSEPORT groups while scoring
+ * result sockets, because if we have one, we don't need the fallback at all.
+ *
+ * Called under rcu_read_lock().
+ *
+ * Return: socket with highest matching score if any, NULL if none
+ */
+static struct sock *udp6_lib_lookup1(const struct net *net,
+ const struct in6_addr *saddr, __be16 sport,
+ const struct in6_addr *daddr,
+ unsigned int hnum, int dif, int sdif,
+ const struct udp_table *udptable)
+{
+ unsigned int slot = udp_hashfn(net, hnum, udptable->mask);
+ struct udp_hslot *hslot = &udptable->hash[slot];
+ struct sock *sk, *result = NULL;
+ int score, badness = 0;
+
+ sk_for_each_rcu(sk, &hslot->head) {
+ score = compute_score(sk, net,
+ saddr, sport, daddr, hnum, dif, sdif);
+ if (score > badness) {
+ result = sk;
+ badness = score;
+ }
+ }
+
+ return result;
+}
+
/* called with rcu_read_lock() */
-static struct sock *udp6_lib_lookup2(struct net *net,
+static struct sock *udp6_lib_lookup2(const struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, unsigned int hnum,
int dif, int sdif, struct udp_hslot *hslot2,
@@ -171,15 +222,21 @@ static struct sock *udp6_lib_lookup2(struct net *net,
{
struct sock *sk, *result;
int score, badness;
+ bool need_rescore;
result = NULL;
badness = -1;
udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
- score = compute_score(sk, net, saddr, sport,
- daddr, hnum, dif, sdif);
+ need_rescore = false;
+rescore:
+ score = compute_score(need_rescore ? result : sk, net, saddr,
+ sport, daddr, hnum, dif, sdif);
if (score > badness) {
badness = score;
+ if (need_rescore)
+ continue;
+
if (sk->sk_state == TCP_ESTABLISHED) {
result = sk;
continue;
@@ -200,28 +257,108 @@ static struct sock *udp6_lib_lookup2(struct net *net,
if (IS_ERR(result))
continue;
- badness = compute_score(sk, net, saddr, sport,
- daddr, hnum, dif, sdif);
+ /* compute_score is too long of a function to be
+ * inlined, and calling it again here yields
+ * measureable overhead for some
+ * workloads. Work around it by jumping
+ * backwards to rescore 'result'.
+ */
+ need_rescore = true;
+ goto rescore;
}
}
return result;
}
+#if IS_ENABLED(CONFIG_BASE_SMALL)
+static struct sock *udp6_lib_lookup4(const struct net *net,
+ const struct in6_addr *saddr, __be16 sport,
+ const struct in6_addr *daddr,
+ unsigned int hnum, int dif, int sdif,
+ struct udp_table *udptable)
+{
+ return NULL;
+}
+
+static void udp6_hash4(struct sock *sk)
+{
+}
+#else /* !CONFIG_BASE_SMALL */
+static struct sock *udp6_lib_lookup4(const struct net *net,
+ const struct in6_addr *saddr, __be16 sport,
+ const struct in6_addr *daddr,
+ unsigned int hnum, int dif, int sdif,
+ struct udp_table *udptable)
+{
+ const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
+ const struct hlist_nulls_node *node;
+ struct udp_hslot *hslot4;
+ unsigned int hash4, slot;
+ struct udp_sock *up;
+ struct sock *sk;
+
+ hash4 = udp6_ehashfn(net, daddr, hnum, saddr, sport);
+ slot = hash4 & udptable->mask;
+ hslot4 = &udptable->hash4[slot];
+
+begin:
+ udp_lrpa_for_each_entry_rcu(up, node, &hslot4->nulls_head) {
+ sk = (struct sock *)up;
+ if (inet6_match(net, sk, saddr, daddr, ports, dif, sdif))
+ return sk;
+ }
+
+ /* if the nulls value we got at the end of this lookup is not the
+ * expected one, we must restart lookup. We probably met an item that
+ * was moved to another chain due to rehash.
+ */
+ if (get_nulls_value(node) != slot)
+ goto begin;
+
+ return NULL;
+}
+
+static void udp6_hash4(struct sock *sk)
+{
+ struct net *net = sock_net(sk);
+ unsigned int hash;
+
+ if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) {
+ udp4_hash4(sk);
+ return;
+ }
+
+ if (sk_unhashed(sk) || ipv6_addr_any(&sk->sk_v6_rcv_saddr))
+ return;
+
+ hash = udp6_ehashfn(net, &sk->sk_v6_rcv_saddr, sk->sk_num,
+ &sk->sk_v6_daddr, sk->sk_dport);
+
+ udp_lib_hash4(sk, hash);
+}
+#endif /* CONFIG_BASE_SMALL */
+
/* rcu_read_lock() must be held */
-struct sock *__udp6_lib_lookup(struct net *net,
+struct sock *__udp6_lib_lookup(const struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport,
int dif, int sdif, struct udp_table *udptable,
struct sk_buff *skb)
{
unsigned short hnum = ntohs(dport);
- unsigned int hash2, slot2;
struct udp_hslot *hslot2;
struct sock *result, *sk;
+ unsigned int hash2;
hash2 = ipv6_portaddr_hash(net, daddr, hnum);
- slot2 = hash2 & udptable->mask;
- hslot2 = &udptable->hash2[slot2];
+ hslot2 = udp_hashslot2(udptable, hash2);
+
+ if (udp_has_hash4(hslot2)) {
+ result = udp6_lib_lookup4(net, saddr, sport, daddr, hnum,
+ dif, sdif, udptable);
+ if (result) /* udp6_lib_lookup4 return sk or NULL */
+ return result;
+ }
/* Lookup connected or non-wildcard sockets */
result = udp6_lib_lookup2(net, saddr, sport,
@@ -248,12 +385,18 @@ struct sock *__udp6_lib_lookup(struct net *net,
/* Lookup wildcard sockets */
hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
- slot2 = hash2 & udptable->mask;
- hslot2 = &udptable->hash2[slot2];
+ hslot2 = udp_hashslot2(udptable, hash2);
result = udp6_lib_lookup2(net, saddr, sport,
&in6addr_any, hnum, dif, sdif,
hslot2, skb);
+ if (!IS_ERR_OR_NULL(result))
+ goto done;
+
+ /* Cover address change/lookup/rehash race: see __udp4_lib_lookup() */
+ result = udp6_lib_lookup1(net, saddr, sport, daddr, hnum, dif, sdif,
+ udptable);
+
done:
if (IS_ERR(result))
return NULL;
@@ -275,7 +418,8 @@ static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
__be16 sport, __be16 dport)
{
- const struct ipv6hdr *iph = ipv6_hdr(skb);
+ const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
+ const struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + offset);
struct net *net = dev_net(skb->dev);
int iif, sdif;
@@ -290,7 +434,7 @@ struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
* Does increment socket refcount.
*/
#if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
-struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
+struct sock *udp6_lib_lookup(const struct net *net, const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport, int dif)
{
struct sock *sk;
@@ -450,7 +594,7 @@ csum_copy_err:
goto try_again;
}
-DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
+DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
void udpv6_encap_enable(void)
{
static_branch_inc(&udpv6_encap_needed_key);
@@ -661,8 +805,8 @@ static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
drop_reason = SKB_DROP_REASON_PROTO_MEM;
}
UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
- kfree_skb_reason(skb, drop_reason);
- trace_udp_fail_queue_rcv_skb(rc, sk);
+ trace_udp_fail_queue_rcv_skb(rc, sk, skb);
+ sk_skb_reason_drop(sk, skb, drop_reason);
return -1;
}
@@ -765,7 +909,7 @@ csum_error:
drop:
__UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
atomic_inc(&sk->sk_drops);
- kfree_skb_reason(skb, drop_reason);
+ sk_skb_reason_drop(sk, skb, drop_reason);
return -1;
}
@@ -849,7 +993,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
udptable->mask;
hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask;
start_lookup:
- hslot = &udptable->hash2[hash2];
+ hslot = &udptable->hash2[hash2].hslot;
offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
}
@@ -900,11 +1044,8 @@ start_lookup:
static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
{
- if (udp_sk_rx_dst_set(sk, dst)) {
- const struct rt6_info *rt = (const struct rt6_info *)dst;
-
- sk->sk_rx_dst_cookie = rt6_get_cookie(rt);
- }
+ if (udp_sk_rx_dst_set(sk, dst))
+ sk->sk_rx_dst_cookie = rt6_get_cookie(dst_rt6_info(dst));
}
/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
@@ -932,8 +1073,8 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
const struct in6_addr *saddr, *daddr;
struct net *net = dev_net(skb->dev);
+ struct sock *sk = NULL;
struct udphdr *uh;
- struct sock *sk;
bool refcounted;
u32 ulen = 0;
@@ -1025,7 +1166,7 @@ no_sk:
__UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
- kfree_skb_reason(skb, reason);
+ sk_skb_reason_drop(sk, skb, reason);
return 0;
short_packet:
@@ -1046,7 +1187,7 @@ csum_error:
__UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
discard:
__UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
- kfree_skb_reason(skb, reason);
+ sk_skb_reason_drop(sk, skb, reason);
return 0;
}
@@ -1058,14 +1199,13 @@ static struct sock *__udp6_lib_demux_lookup(struct net *net,
{
struct udp_table *udptable = net->ipv4.udp_table;
unsigned short hnum = ntohs(loc_port);
- unsigned int hash2, slot2;
struct udp_hslot *hslot2;
+ unsigned int hash2;
__portpair ports;
struct sock *sk;
hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
- slot2 = hash2 & udptable->mask;
- hslot2 = &udptable->hash2[slot2];
+ hslot2 = udp_hashslot2(udptable, hash2);
ports = INET_COMBINED_PORTS(rmt_port, hnum);
udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
@@ -1101,11 +1241,12 @@ void udp_v6_early_demux(struct sk_buff *skb)
else
return;
- if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
+ if (!sk)
return;
skb->sk = sk;
- skb->destructor = sock_efree;
+ DEBUG_NET_WARN_ON_ONCE(sk_is_refcounted(sk));
+ skb->destructor = sock_pfree;
dst = rcu_dereference(sk->sk_rx_dst);
if (dst)
@@ -1161,6 +1302,18 @@ static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, &addr_len);
}
+static int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+{
+ int res;
+
+ lock_sock(sk);
+ res = __ip6_datagram_connect(sk, uaddr, addr_len);
+ if (!res)
+ udp6_hash4(sk);
+ release_sock(sk);
+ return res;
+}
+
/**
* udp6_hwcsum_outgoing - handle outgoing HW checksumming
* @sk: socket we are sending on
@@ -1236,9 +1389,9 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
const int hlen = skb_network_header_len(skb) +
sizeof(struct udphdr);
- if (hlen + cork->gso_size > cork->fragsize) {
+ if (hlen + min(datalen, cork->gso_size) > cork->fragsize) {
kfree_skb(skb);
- return -EINVAL;
+ return -EMSGSIZE;
}
if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
kfree_skb(skb);
@@ -1248,8 +1401,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
kfree_skb(skb);
return -EINVAL;
}
- if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
- dst_xfrm(skb_dst(skb))) {
+ if (is_udplite || dst_xfrm(skb_dst(skb))) {
kfree_skb(skb);
return -EIO;
}
@@ -1259,8 +1411,10 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
cork->gso_size);
+
+ /* Don't checksum the payload, skb will get segmented */
+ goto csum_partial;
}
- goto csum_partial;
}
if (is_udplite)
@@ -1344,6 +1498,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
ipc6.gso_size = READ_ONCE(up->gso_size);
ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags);
ipc6.sockc.mark = READ_ONCE(sk->sk_mark);
+ ipc6.sockc.priority = READ_ONCE(sk->sk_priority);
/* destination address check */
if (sin6) {
@@ -1476,9 +1631,11 @@ do_udp_sendmsg:
ipc6.opt = opt;
err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
- if (err > 0)
+ if (err > 0) {
err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, fl6,
&ipc6);
+ connected = false;
+ }
if (err < 0) {
fl6_sock_release(flowlabel);
return err;
@@ -1490,7 +1647,6 @@ do_udp_sendmsg:
}
if (!(opt->opt_nflen|opt->opt_flen))
opt = NULL;
- connected = false;
}
if (!opt) {
opt = txopt_get(np);
@@ -1573,7 +1729,7 @@ back_from_confirm:
skb = ip6_make_skb(sk, getfrag, msg, ulen,
sizeof(struct udphdr), &ipc6,
- (struct rt6_info *)dst,
+ dst_rt6_info(dst),
msg->msg_flags, &cork);
err = PTR_ERR(skb);
if (!IS_ERR_OR_NULL(skb))
@@ -1600,7 +1756,7 @@ do_append_data:
ipc6.dontfrag = inet6_test_bit(DONTFRAG, sk);
up->len += ulen;
err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
- &ipc6, fl6, (struct rt6_info *)dst,
+ &ipc6, fl6, dst_rt6_info(dst),
corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
if (err)
udp_v6_flush_pending_frames(sk);
@@ -1702,11 +1858,6 @@ int udpv6_getsockopt(struct sock *sk, int level, int optname,
return ipv6_getsockopt(sk, level, optname, optval, optlen);
}
-static const struct inet6_protocol udpv6_protocol = {
- .handler = udpv6_rcv,
- .err_handler = udpv6_err,
- .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
-};
/* ------------------------------------------------------------------------ */
#ifdef CONFIG_PROC_FS
@@ -1759,7 +1910,7 @@ struct proto udpv6_prot = {
.owner = THIS_MODULE,
.close = udp_lib_close,
.pre_connect = udpv6_pre_connect,
- .connect = ip6_datagram_connect,
+ .connect = udpv6_connect,
.disconnect = udp_disconnect,
.ioctl = udp_ioctl,
.init = udpv6_init_sock,
@@ -1803,7 +1954,12 @@ int __init udpv6_init(void)
{
int ret;
- ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
+ net_hotdata.udpv6_protocol = (struct inet6_protocol) {
+ .handler = udpv6_rcv,
+ .err_handler = udpv6_err,
+ .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
+ };
+ ret = inet6_add_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
if (ret)
goto out;
@@ -1814,12 +1970,12 @@ out:
return ret;
out_udpv6_protocol:
- inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
+ inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
goto out;
}
void udpv6_exit(void)
{
inet6_unregister_protosw(&udpv6_protosw);
- inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
+ inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
}
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 6b95ba241ebe..b41152dd4246 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -164,7 +164,8 @@ flush:
INDIRECT_CALLABLE_SCOPE int udp6_gro_complete(struct sk_buff *skb, int nhoff)
{
- const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
+ const struct ipv6hdr *ipv6h = (struct ipv6hdr *)(skb->data + offset);
struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
/* do fraglist only if there is no outer UDP encap (or we already processed it) */
@@ -174,13 +175,7 @@ INDIRECT_CALLABLE_SCOPE int udp6_gro_complete(struct sk_buff *skb, int nhoff)
skb_shinfo(skb)->gso_type |= (SKB_GSO_FRAGLIST|SKB_GSO_UDP_L4);
skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
- if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
- if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
- skb->csum_level++;
- } else {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = 0;
- }
+ __skb_incr_checksum_unnecessary(skb);
return 0;
}
@@ -192,20 +187,19 @@ INDIRECT_CALLABLE_SCOPE int udp6_gro_complete(struct sk_buff *skb, int nhoff)
return udp_gro_complete(skb, nhoff, udp6_lib_lookup_skb);
}
-static const struct net_offload udpv6_offload = {
- .callbacks = {
- .gso_segment = udp6_ufo_fragment,
- .gro_receive = udp6_gro_receive,
- .gro_complete = udp6_gro_complete,
- },
-};
-
-int udpv6_offload_init(void)
+int __init udpv6_offload_init(void)
{
- return inet6_add_offload(&udpv6_offload, IPPROTO_UDP);
+ net_hotdata.udpv6_offload = (struct net_offload) {
+ .callbacks = {
+ .gso_segment = udp6_ufo_fragment,
+ .gro_receive = udp6_gro_receive,
+ .gro_complete = udp6_gro_complete,
+ },
+ };
+ return inet6_add_offload(&net_hotdata.udpv6_offload, IPPROTO_UDP);
}
int udpv6_offload_exit(void)
{
- return inet6_del_offload(&udpv6_offload, IPPROTO_UDP);
+ return inet6_del_offload(&net_hotdata.udpv6_offload, IPPROTO_UDP);
}
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index 6e36e5047fba..4abc5e9d6322 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -43,7 +43,7 @@ static int xfrm6_transport_finish2(struct net *net, struct sock *sk,
int xfrm6_transport_finish(struct sk_buff *skb, int async)
{
struct xfrm_offload *xo = xfrm_offload(skb);
- int nhlen = skb->data - skb_network_header(skb);
+ int nhlen = -skb_network_offset(skb);
skb_network_header(skb)[IP6CB(skb)->nhoff] =
XFRM_MODE_SKB_CB(skb)->protocol;
@@ -58,7 +58,11 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
skb_postpush_rcsum(skb, skb_network_header(skb), nhlen);
if (xo && (xo->flags & XFRM_GRO)) {
- skb_mac_header_rebuild(skb);
+ /* The full l2 header needs to be preserved so that re-injecting the packet at l2
+ * works correctly in the presence of vlan tags.
+ */
+ skb_mac_header_rebuild_full(skb, xo->orig_mac_len);
+ skb_reset_network_header(skb);
skb_reset_transport_header(skb);
return 0;
}
@@ -109,19 +113,6 @@ static int __xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb, bool pull
/* Must be an IKE packet.. pass it through */
return 1;
break;
- case UDP_ENCAP_ESPINUDP_NON_IKE:
- /* Check if this is a keepalive packet. If so, eat it. */
- if (len == 1 && udpdata[0] == 0xff) {
- return -EINVAL;
- } else if (len > 2 * sizeof(u32) + sizeof(struct ip_esp_hdr) &&
- udpdata32[0] == 0 && udpdata32[1] == 0) {
-
- /* ESP Packet with Non-IKE marker */
- len = sizeof(struct udphdr) + 2 * sizeof(u32);
- } else
- /* Must be an IKE packet.. pass it through */
- return 1;
- break;
}
/* At this point we are sure that this is an ESPinUDP packet,
@@ -279,6 +270,13 @@ int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
if (!x)
continue;
+ if (unlikely(x->dir && x->dir != XFRM_SA_DIR_IN)) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEDIRERROR);
+ xfrm_state_put(x);
+ x = NULL;
+ continue;
+ }
+
spin_lock(&x->lock);
if ((!i || (x->props.flags & XFRM_STATE_WILDRECV)) &&
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 5f7b1fdbffe6..b3d5d1f266ee 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -82,14 +82,14 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
toobig = skb->len > mtu && !skb_is_gso(skb);
- if (toobig && xfrm6_local_dontfrag(skb->sk)) {
+ if (toobig && xfrm6_local_dontfrag(sk)) {
xfrm6_local_rxpmtu(skb, mtu);
kfree_skb(skb);
return -EMSGSIZE;
} else if (toobig && xfrm6_noneed_fragment(skb)) {
skb->ignore_df = 1;
goto skip_frag;
- } else if (!skb->ignore_df && toobig && skb->sk) {
+ } else if (!skb->ignore_df && toobig && sk) {
xfrm_local_error(skb, mtu);
kfree_skb(skb);
return -EMSGSIZE;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 42fb6996b077..1f19b6f14484 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -23,23 +23,24 @@
#include <net/ip6_route.h>
#include <net/l3mdev.h>
-static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif,
- const xfrm_address_t *saddr,
- const xfrm_address_t *daddr,
- u32 mark)
+static struct dst_entry *xfrm6_dst_lookup(const struct xfrm_dst_lookup_params *params)
{
struct flowi6 fl6;
struct dst_entry *dst;
int err;
memset(&fl6, 0, sizeof(fl6));
- fl6.flowi6_l3mdev = l3mdev_master_ifindex_by_index(net, oif);
- fl6.flowi6_mark = mark;
- memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr));
- if (saddr)
- memcpy(&fl6.saddr, saddr, sizeof(fl6.saddr));
+ fl6.flowi6_l3mdev = l3mdev_master_ifindex_by_index(params->net,
+ params->oif);
+ fl6.flowi6_mark = params->mark;
+ memcpy(&fl6.daddr, params->daddr, sizeof(fl6.daddr));
+ if (params->saddr)
+ memcpy(&fl6.saddr, params->saddr, sizeof(fl6.saddr));
- dst = ip6_route_output(net, NULL, &fl6);
+ fl6.flowi4_proto = params->ipproto;
+ fl6.uli = params->uli;
+
+ dst = ip6_route_output(params->net, NULL, &fl6);
err = dst->error;
if (dst->error) {
@@ -50,19 +51,25 @@ static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif,
return dst;
}
-static int xfrm6_get_saddr(struct net *net, int oif,
- xfrm_address_t *saddr, xfrm_address_t *daddr,
- u32 mark)
+static int xfrm6_get_saddr(xfrm_address_t *saddr,
+ const struct xfrm_dst_lookup_params *params)
{
struct dst_entry *dst;
struct net_device *dev;
+ struct inet6_dev *idev;
- dst = xfrm6_dst_lookup(net, 0, oif, NULL, daddr, mark);
+ dst = xfrm6_dst_lookup(params);
if (IS_ERR(dst))
return -EHOSTUNREACH;
- dev = ip6_dst_idev(dst)->dev;
- ipv6_dev_get_saddr(dev_net(dev), dev, &daddr->in6, 0, &saddr->in6);
+ idev = ip6_dst_idev(dst);
+ if (!idev) {
+ dst_release(dst);
+ return -EHOSTUNREACH;
+ }
+ dev = idev->dev;
+ ipv6_dev_get_saddr(dev_net(dev), dev, &params->daddr->in6, 0,
+ &saddr->in6);
dst_release(dst);
return 0;
}
@@ -70,7 +77,7 @@ static int xfrm6_get_saddr(struct net *net, int oif,
static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
const struct flowi *fl)
{
- struct rt6_info *rt = (struct rt6_info *)xdst->route;
+ struct rt6_info *rt = dst_rt6_info(xdst->route);
xdst->u.dst.dev = dev;
netdev_hold(dev, &xdst->u.dst.dev_tracker, GFP_ATOMIC);
@@ -184,7 +191,6 @@ static struct ctl_table xfrm6_policy_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
- { }
};
static int __net_init xfrm6_net_sysctl_init(struct net *net)
@@ -218,7 +224,7 @@ err_alloc:
static void __net_exit xfrm6_net_sysctl_exit(struct net *net)
{
- struct ctl_table *table;
+ const struct ctl_table *table;
if (!net->ipv6.sysctl.xfrm6_hdr)
return;
@@ -285,8 +291,14 @@ int __init xfrm6_init(void)
ret = register_pernet_subsys(&xfrm6_net_ops);
if (ret)
goto out_protocol;
+
+ ret = xfrm_nat_keepalive_init(AF_INET6);
+ if (ret)
+ goto out_nat_keepalive;
out:
return ret;
+out_nat_keepalive:
+ unregister_pernet_subsys(&xfrm6_net_ops);
out_protocol:
xfrm6_protocol_fini();
out_state:
@@ -298,6 +310,7 @@ out_policy:
void xfrm6_fini(void)
{
+ xfrm_nat_keepalive_fini(AF_INET6);
unregister_pernet_subsys(&xfrm6_net_ops);
xfrm6_protocol_fini();
xfrm6_policy_fini();
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index f6cb94f82cc3..bf140ef781c1 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -355,10 +355,7 @@ static int __init xfrm6_tunnel_init(void)
{
int rv;
- xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi",
- sizeof(struct xfrm6_tunnel_spi),
- 0, SLAB_HWCACHE_ALIGN,
- NULL);
+ xfrm6_tunnel_spi_kmem = KMEM_CACHE(xfrm6_tunnel_spi, SLAB_HWCACHE_ALIGN);
if (!xfrm6_tunnel_spi_kmem)
return -ENOMEM;
rv = register_pernet_subsys(&xfrm6_tunnel_net_ops);
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 498a0c35b7bb..7929df08d4e0 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -184,7 +184,7 @@ static void iucv_sock_wake_msglim(struct sock *sk)
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
wake_up_interruptible_all(&wq->wait);
- sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
+ sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT);
rcu_read_unlock();
}
@@ -335,8 +335,8 @@ static void iucv_sever_path(struct sock *sk, int with_user_data)
struct iucv_sock *iucv = iucv_sk(sk);
struct iucv_path *path = iucv->path;
- if (iucv->path) {
- iucv->path = NULL;
+ /* Whoever resets the path pointer, must sever and free it. */
+ if (xchg(&iucv->path, NULL)) {
if (with_user_data) {
low_nmcpy(user_data, iucv->src_name);
high_nmcpy(user_data, iucv->dst_name);
@@ -795,7 +795,7 @@ done:
/* Accept a pending connection */
static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
- int flags, bool kern)
+ struct proto_accept_arg *arg)
{
DECLARE_WAITQUEUE(wait, current);
struct sock *sk = sock->sk, *nsk;
@@ -809,7 +809,7 @@ static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
goto done;
}
- timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
+ timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK);
/* Wait for an incoming connection */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
@@ -1060,13 +1060,12 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
int i;
/* skip iucv_array lying in the headroom */
- iba[0].address = (u32)(addr_t)skb->data;
+ iba[0].address = virt_to_dma32(skb->data);
iba[0].length = (u32)skb_headlen(skb);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- iba[i + 1].address =
- (u32)(addr_t)skb_frag_address(frag);
+ iba[i + 1].address = virt_to_dma32(skb_frag_address(frag));
iba[i + 1].length = (u32)skb_frag_size(frag);
}
err = pr_iucv->message_send(iucv->path, &txmsg,
@@ -1162,13 +1161,12 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
struct iucv_array *iba = (struct iucv_array *)skb->head;
int i;
- iba[0].address = (u32)(addr_t)skb->data;
+ iba[0].address = virt_to_dma32(skb->data);
iba[0].length = (u32)skb_headlen(skb);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- iba[i + 1].address =
- (u32)(addr_t)skb_frag_address(frag);
+ iba[i + 1].address = virt_to_dma32(skb_frag_address(frag));
iba[i + 1].length = (u32)skb_frag_size(frag);
}
rc = pr_iucv->message_receive(path, msg,
@@ -1238,7 +1236,9 @@ static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
return -EOPNOTSUPP;
/* receive/dequeue next skb:
- * the function understands MSG_PEEK and, thus, does not dequeue skb */
+ * the function understands MSG_PEEK and, thus, does not dequeue skb
+ * only refcount is increased.
+ */
skb = skb_recv_datagram(sk, flags, &err);
if (!skb) {
if (sk->sk_shutdown & RCV_SHUTDOWN)
@@ -1254,9 +1254,8 @@ static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
cskb = skb;
if (skb_copy_datagram_msg(cskb, offset, msg, copied)) {
- if (!(flags & MSG_PEEK))
- skb_queue_head(&sk->sk_receive_queue, skb);
- return -EFAULT;
+ err = -EFAULT;
+ goto err_out;
}
/* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
@@ -1273,11 +1272,8 @@ static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
sizeof(IUCV_SKB_CB(skb)->class),
(void *)&IUCV_SKB_CB(skb)->class);
- if (err) {
- if (!(flags & MSG_PEEK))
- skb_queue_head(&sk->sk_receive_queue, skb);
- return err;
- }
+ if (err)
+ goto err_out;
/* Mark read part of skb as used */
if (!(flags & MSG_PEEK)) {
@@ -1333,8 +1329,18 @@ done:
/* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
copied = rlen;
+ if (flags & MSG_PEEK)
+ skb_unref(skb);
return copied;
+
+err_out:
+ if (!(flags & MSG_PEEK))
+ skb_queue_head(&sk->sk_receive_queue, skb);
+ else
+ skb_unref(skb);
+
+ return err;
}
static inline __poll_t iucv_accept_poll(struct sock *parent)
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index b0b3e9c5af44..d3e9efab7f4b 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -62,7 +62,7 @@
#define IUCV_IPNORPY 0x10
#define IUCV_IPALL 0x80
-static int iucv_bus_match(struct device *dev, struct device_driver *drv)
+static int iucv_bus_match(struct device *dev, const struct device_driver *drv)
{
return 0;
}
@@ -73,8 +73,44 @@ const struct bus_type iucv_bus = {
};
EXPORT_SYMBOL(iucv_bus);
-struct device *iucv_root;
-EXPORT_SYMBOL(iucv_root);
+static struct device *iucv_root;
+
+static void iucv_release_device(struct device *device)
+{
+ kfree(device);
+}
+
+struct device *iucv_alloc_device(const struct attribute_group **attrs,
+ struct device_driver *driver,
+ void *priv, const char *fmt, ...)
+{
+ struct device *dev;
+ va_list vargs;
+ char buf[20];
+ int rc;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ goto out_error;
+ va_start(vargs, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, vargs);
+ rc = dev_set_name(dev, "%s", buf);
+ va_end(vargs);
+ if (rc)
+ goto out_error;
+ dev->bus = &iucv_bus;
+ dev->parent = iucv_root;
+ dev->driver = driver;
+ dev->groups = attrs;
+ dev->release = iucv_release_device;
+ dev_set_drvdata(dev, priv);
+ return dev;
+
+out_error:
+ kfree(dev);
+ return NULL;
+}
+EXPORT_SYMBOL(iucv_alloc_device);
static int iucv_available;
@@ -210,7 +246,7 @@ struct iucv_cmd_dpl {
u8 iprmmsg[8];
u32 ipsrccls;
u32 ipmsgtag;
- u32 ipbfadr2;
+ dma32_t ipbfadr2;
u32 ipbfln2f;
u32 res;
} __attribute__ ((packed,aligned(8)));
@@ -226,11 +262,11 @@ struct iucv_cmd_db {
u8 iprcode;
u32 ipmsgid;
u32 iptrgcls;
- u32 ipbfadr1;
+ dma32_t ipbfadr1;
u32 ipbfln1f;
u32 ipsrccls;
u32 ipmsgtag;
- u32 ipbfadr2;
+ dma32_t ipbfadr2;
u32 ipbfln2f;
u32 res;
} __attribute__ ((packed,aligned(8)));
@@ -286,6 +322,7 @@ static union iucv_param *iucv_param_irq[NR_CPUS];
*/
static inline int __iucv_call_b2f0(int command, union iucv_param *parm)
{
+ unsigned long reg1 = virt_to_phys(parm);
int cc;
asm volatile(
@@ -296,7 +333,7 @@ static inline int __iucv_call_b2f0(int command, union iucv_param *parm)
" srl %[cc],28\n"
: [cc] "=&d" (cc), "+m" (*parm)
: [reg0] "d" ((unsigned long)command),
- [reg1] "d" ((unsigned long)parm)
+ [reg1] "d" (reg1)
: "cc", "0", "1");
return cc;
}
@@ -431,7 +468,7 @@ static void iucv_declare_cpu(void *data)
/* Declare interrupt buffer. */
parm = iucv_param_irq[cpu];
memset(parm, 0, sizeof(union iucv_param));
- parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]);
+ parm->db.ipbfadr1 = virt_to_dma32(iucv_irq_data[cpu]);
rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm);
if (rc) {
char *err = "Unknown";
@@ -519,7 +556,7 @@ static void iucv_setmask_mp(void)
*/
static void iucv_setmask_up(void)
{
- cpumask_t cpumask;
+ static cpumask_t cpumask;
int cpu;
/* Disable all cpu but the first in cpu_irq_cpumask. */
@@ -627,23 +664,33 @@ static int iucv_cpu_online(unsigned int cpu)
static int iucv_cpu_down_prep(unsigned int cpu)
{
- cpumask_t cpumask;
+ cpumask_var_t cpumask;
+ int ret = 0;
if (!iucv_path_table)
return 0;
- cpumask_copy(&cpumask, &iucv_buffer_cpumask);
- cpumask_clear_cpu(cpu, &cpumask);
- if (cpumask_empty(&cpumask))
+ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpumask_copy(cpumask, &iucv_buffer_cpumask);
+ cpumask_clear_cpu(cpu, cpumask);
+ if (cpumask_empty(cpumask)) {
/* Can't offline last IUCV enabled cpu. */
- return -EINVAL;
+ ret = -EINVAL;
+ goto __free_cpumask;
+ }
iucv_retrieve_cpu(NULL);
if (!cpumask_empty(&iucv_irq_cpumask))
- return 0;
+ goto __free_cpumask;
+
smp_call_function_single(cpumask_first(&iucv_buffer_cpumask),
iucv_allow_cpu, NULL, 1);
- return 0;
+
+__free_cpumask:
+ free_cpumask_var(cpumask);
+ return ret;
}
/**
@@ -1080,8 +1127,7 @@ static int iucv_message_receive_iprmdata(struct iucv_path *path,
size = (size < 8) ? size : 8;
for (array = buffer; size > 0; array++) {
copy = min_t(size_t, size, array->length);
- memcpy((u8 *)(addr_t) array->address,
- rmmsg, copy);
+ memcpy(dma32_to_virt(array->address), rmmsg, copy);
rmmsg += copy;
size -= copy;
}
@@ -1123,7 +1169,7 @@ int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
parm = iucv_param[smp_processor_id()];
memset(parm, 0, sizeof(union iucv_param));
- parm->db.ipbfadr1 = (u32)(addr_t) buffer;
+ parm->db.ipbfadr1 = virt_to_dma32(buffer);
parm->db.ipbfln1f = (u32) size;
parm->db.ipmsgid = msg->id;
parm->db.ippathid = path->pathid;
@@ -1241,7 +1287,7 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
parm->dpl.iptrgcls = msg->class;
memcpy(parm->dpl.iprmmsg, reply, min_t(size_t, size, 8));
} else {
- parm->db.ipbfadr1 = (u32)(addr_t) reply;
+ parm->db.ipbfadr1 = virt_to_dma32(reply);
parm->db.ipbfln1f = (u32) size;
parm->db.ippathid = path->pathid;
parm->db.ipflags1 = flags;
@@ -1293,7 +1339,7 @@ int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
parm->dpl.ipmsgtag = msg->tag;
memcpy(parm->dpl.iprmmsg, buffer, 8);
} else {
- parm->db.ipbfadr1 = (u32)(addr_t) buffer;
+ parm->db.ipbfadr1 = virt_to_dma32(buffer);
parm->db.ipbfln1f = (u32) size;
parm->db.ippathid = path->pathid;
parm->db.ipflags1 = flags | IUCV_IPNORPY;
@@ -1378,7 +1424,7 @@ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
parm->dpl.iptrgcls = msg->class;
parm->dpl.ipsrccls = srccls;
parm->dpl.ipmsgtag = msg->tag;
- parm->dpl.ipbfadr2 = (u32)(addr_t) answer;
+ parm->dpl.ipbfadr2 = virt_to_dma32(answer);
parm->dpl.ipbfln2f = (u32) asize;
memcpy(parm->dpl.iprmmsg, buffer, 8);
} else {
@@ -1387,9 +1433,9 @@ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
parm->db.iptrgcls = msg->class;
parm->db.ipsrccls = srccls;
parm->db.ipmsgtag = msg->tag;
- parm->db.ipbfadr1 = (u32)(addr_t) buffer;
+ parm->db.ipbfadr1 = virt_to_dma32(buffer);
parm->db.ipbfln1f = (u32) size;
- parm->db.ipbfadr2 = (u32)(addr_t) answer;
+ parm->db.ipbfadr2 = virt_to_dma32(answer);
parm->db.ipbfln2f = (u32) asize;
}
rc = iucv_call_b2f0(IUCV_SEND, parm);
@@ -1903,6 +1949,6 @@ static void __exit iucv_exit(void)
subsys_initcall(iucv_init);
module_exit(iucv_exit);
-MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
+MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert <felfert@millenux.com>");
MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver");
MODULE_LICENSE("GPL");
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 1184d40167b8..24aec295a51c 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -627,7 +627,8 @@ retry:
skb = txm->frag_skb;
}
- if (WARN_ON(!skb_shinfo(skb)->nr_frags)) {
+ if (WARN_ON(!skb_shinfo(skb)->nr_frags) ||
+ WARN_ON_ONCE(!skb_frag_page(&skb_shinfo(skb)->frags[0]))) {
ret = -EINVAL;
goto out;
}
@@ -637,8 +638,8 @@ retry:
msize += skb_frag_size(&skb_shinfo(skb)->frags[i]);
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE,
- skb_shinfo(skb)->frags, skb_shinfo(skb)->nr_frags,
- msize);
+ (const struct bio_vec *)skb_shinfo(skb)->frags,
+ skb_shinfo(skb)->nr_frags, msize);
iov_iter_advance(&msg.msg_iter, txm->frag_offset);
do {
@@ -754,6 +755,7 @@ static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
!(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR);
int err = -EPIPE;
+ mutex_lock(&kcm->tx_mutex);
lock_sock(sk);
/* Per tcp_sendmsg this should be in poll */
@@ -925,6 +927,7 @@ partial_message:
KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
release_sock(sk);
+ mutex_unlock(&kcm->tx_mutex);
return copied;
out_error:
@@ -950,6 +953,7 @@ out_error:
sk->sk_write_space(sk);
release_sock(sk);
+ mutex_unlock(&kcm->tx_mutex);
return err;
}
@@ -1152,10 +1156,11 @@ static int kcm_getsockopt(struct socket *sock, int level, int optname,
if (get_user(len, optlen))
return -EFAULT;
- len = min_t(unsigned int, len, sizeof(int));
if (len < 0)
return -EINVAL;
+ len = min_t(unsigned int, len, sizeof(int));
+
switch (optname) {
case KCM_RECV_DISABLE:
val = kcm->rx_disabled;
@@ -1202,6 +1207,7 @@ static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
spin_unlock_bh(&mux->lock);
INIT_WORK(&kcm->tx_work, kcm_tx_work);
+ mutex_init(&kcm->tx_mutex);
spin_lock_bh(&mux->rx_lock);
kcm_rcv_ready(kcm);
@@ -1578,14 +1584,6 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
return err;
}
-static void free_mux(struct rcu_head *rcu)
-{
- struct kcm_mux *mux = container_of(rcu,
- struct kcm_mux, rcu);
-
- kmem_cache_free(kcm_muxp, mux);
-}
-
static void release_mux(struct kcm_mux *mux)
{
struct kcm_net *knet = mux->knet;
@@ -1613,7 +1611,7 @@ static void release_mux(struct kcm_mux *mux)
knet->count--;
mutex_unlock(&knet->mutex);
- call_rcu(&mux->rcu, free_mux);
+ kfree_rcu(mux, rcu);
}
static void kcm_done(struct kcm_sock *kcm)
@@ -1877,15 +1875,11 @@ static int __init kcm_init(void)
{
int err = -ENOMEM;
- kcm_muxp = kmem_cache_create("kcm_mux_cache",
- sizeof(struct kcm_mux), 0,
- SLAB_HWCACHE_ALIGN, NULL);
+ kcm_muxp = KMEM_CACHE(kcm_mux, SLAB_HWCACHE_ALIGN);
if (!kcm_muxp)
goto fail;
- kcm_psockp = kmem_cache_create("kcm_psock_cache",
- sizeof(struct kcm_psock), 0,
- SLAB_HWCACHE_ALIGN, NULL);
+ kcm_psockp = KMEM_CACHE(kcm_psock, SLAB_HWCACHE_ALIGN);
if (!kcm_psockp)
goto fail;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index f79fb99271ed..c56bb4f451e6 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1354,7 +1354,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_
}
if (hdr->sadb_msg_seq) {
- x = xfrm_find_acq_byseq(net, DUMMY_MARK, hdr->sadb_msg_seq);
+ x = xfrm_find_acq_byseq(net, DUMMY_MARK, hdr->sadb_msg_seq, UINT_MAX);
if (x && !xfrm_addr_equal(&x->id.daddr, xdaddr, family)) {
xfrm_state_put(x);
x = NULL;
@@ -1362,7 +1362,8 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_
}
if (!x)
- x = xfrm_find_acq(net, &dummy_mark, mode, reqid, 0, proto, xdaddr, xsaddr, 1, family);
+ x = xfrm_find_acq(net, &dummy_mark, mode, reqid, 0, UINT_MAX,
+ proto, xdaddr, xsaddr, 1, family);
if (x == NULL)
return -ENOENT;
@@ -1417,7 +1418,7 @@ static int pfkey_acquire(struct sock *sk, struct sk_buff *skb, const struct sadb
if (hdr->sadb_msg_seq == 0 || hdr->sadb_msg_errno == 0)
return 0;
- x = xfrm_find_acq_byseq(net, DUMMY_MARK, hdr->sadb_msg_seq);
+ x = xfrm_find_acq_byseq(net, DUMMY_MARK, hdr->sadb_msg_seq, UINT_MAX);
if (x == NULL)
return 0;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 8d21ff25f160..369a2f2e459c 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -39,7 +39,6 @@
#include <linux/ip.h>
#include <linux/udp.h>
#include <linux/l2tp.h>
-#include <linux/hash.h>
#include <linux/sort.h>
#include <linux/file.h>
#include <linux/nsproxy.h>
@@ -61,7 +60,6 @@
#include <linux/atomic.h>
#include "l2tp_core.h"
-#include "trace.h"
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -88,6 +86,11 @@
/* Default trace flags */
#define L2TP_DEFAULT_DEBUG_FLAGS 0
+#define L2TP_DEPTH_NESTING 2
+#if L2TP_DEPTH_NESTING == SINGLE_DEPTH_NESTING
+#error "L2TP requires its own lockdep subclass"
+#endif
+
/* Private data stored for received packets in the skb.
*/
struct l2tp_skb_cb {
@@ -107,99 +110,106 @@ struct l2tp_net {
/* Lock for write access to l2tp_tunnel_idr */
spinlock_t l2tp_tunnel_idr_lock;
struct idr l2tp_tunnel_idr;
- struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2];
- /* Lock for write access to l2tp_session_hlist */
- spinlock_t l2tp_session_hlist_lock;
+ /* Lock for write access to l2tp_v[23]_session_idr/htable */
+ spinlock_t l2tp_session_idr_lock;
+ struct idr l2tp_v2_session_idr;
+ struct idr l2tp_v3_session_idr;
+ struct hlist_head l2tp_v3_session_htable[16];
};
-#if IS_ENABLED(CONFIG_IPV6)
-static bool l2tp_sk_is_v6(struct sock *sk)
+static u32 l2tp_v2_session_key(u16 tunnel_id, u16 session_id)
{
- return sk->sk_family == PF_INET6 &&
- !ipv6_addr_v4mapped(&sk->sk_v6_daddr);
+ return ((u32)tunnel_id) << 16 | session_id;
}
-#endif
-static inline struct l2tp_net *l2tp_pernet(const struct net *net)
+static unsigned long l2tp_v3_session_hashkey(struct sock *sk, u32 session_id)
{
- return net_generic(net, l2tp_net_id);
+ return ((unsigned long)sk) + session_id;
}
-/* Session hash global list for L2TPv3.
- * The session_id SHOULD be random according to RFC3931, but several
- * L2TP implementations use incrementing session_ids. So we do a real
- * hash on the session_id, rather than a simple bitmask.
- */
-static inline struct hlist_head *
-l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
+#if IS_ENABLED(CONFIG_IPV6)
+static bool l2tp_sk_is_v6(struct sock *sk)
{
- return &pn->l2tp_session_hlist[hash_32(session_id, L2TP_HASH_BITS_2)];
+ return sk->sk_family == PF_INET6 &&
+ !ipv6_addr_v4mapped(&sk->sk_v6_daddr);
}
+#endif
-/* Session hash list.
- * The session_id SHOULD be random according to RFC2661, but several
- * L2TP implementations (Cisco and Microsoft) use incrementing
- * session_ids. So we do a real hash on the session_id, rather than a
- * simple bitmask.
- */
-static inline struct hlist_head *
-l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
+static struct l2tp_net *l2tp_pernet(const struct net *net)
{
- return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
+ return net_generic(net, l2tp_net_id);
}
static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
{
+ struct sock *sk = tunnel->sock;
+
trace_free_tunnel(tunnel);
- sock_put(tunnel->sock);
- /* the tunnel is freed in the socket destructor */
+
+ if (sk) {
+ /* Disable udp encapsulation */
+ switch (tunnel->encap) {
+ case L2TP_ENCAPTYPE_UDP:
+ /* No longer an encapsulation socket. See net/ipv4/udp.c */
+ WRITE_ONCE(udp_sk(sk)->encap_type, 0);
+ udp_sk(sk)->encap_rcv = NULL;
+ udp_sk(sk)->encap_destroy = NULL;
+ break;
+ case L2TP_ENCAPTYPE_IP:
+ break;
+ }
+
+ tunnel->sock = NULL;
+ sock_put(sk);
+ }
+
+ kfree_rcu(tunnel, rcu);
}
static void l2tp_session_free(struct l2tp_session *session)
{
trace_free_session(session);
if (session->tunnel)
- l2tp_tunnel_dec_refcount(session->tunnel);
- kfree(session);
+ l2tp_tunnel_put(session->tunnel);
+ kfree_rcu(session, rcu);
}
-struct l2tp_tunnel *l2tp_sk_to_tunnel(struct sock *sk)
+struct l2tp_tunnel *l2tp_sk_to_tunnel(const struct sock *sk)
{
- struct l2tp_tunnel *tunnel = sk->sk_user_data;
+ const struct net *net = sock_net(sk);
+ unsigned long tunnel_id, tmp;
+ struct l2tp_tunnel *tunnel;
+ struct l2tp_net *pn;
- if (tunnel)
- if (WARN_ON(tunnel->magic != L2TP_TUNNEL_MAGIC))
- return NULL;
+ rcu_read_lock_bh();
+ pn = l2tp_pernet(net);
+ idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
+ if (tunnel &&
+ tunnel->sock == sk &&
+ refcount_inc_not_zero(&tunnel->ref_count)) {
+ rcu_read_unlock_bh();
+ return tunnel;
+ }
+ }
+ rcu_read_unlock_bh();
- return tunnel;
+ return NULL;
}
EXPORT_SYMBOL_GPL(l2tp_sk_to_tunnel);
-void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel)
-{
- refcount_inc(&tunnel->ref_count);
-}
-EXPORT_SYMBOL_GPL(l2tp_tunnel_inc_refcount);
-
-void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel)
+void l2tp_tunnel_put(struct l2tp_tunnel *tunnel)
{
if (refcount_dec_and_test(&tunnel->ref_count))
l2tp_tunnel_free(tunnel);
}
-EXPORT_SYMBOL_GPL(l2tp_tunnel_dec_refcount);
+EXPORT_SYMBOL_GPL(l2tp_tunnel_put);
-void l2tp_session_inc_refcount(struct l2tp_session *session)
-{
- refcount_inc(&session->ref_count);
-}
-EXPORT_SYMBOL_GPL(l2tp_session_inc_refcount);
-
-void l2tp_session_dec_refcount(struct l2tp_session *session)
+void l2tp_session_put(struct l2tp_session *session)
{
if (refcount_dec_and_test(&session->ref_count))
l2tp_session_free(session);
}
-EXPORT_SYMBOL_GPL(l2tp_session_dec_refcount);
+EXPORT_SYMBOL_GPL(l2tp_session_put);
/* Lookup a tunnel. A new reference is held on the returned tunnel. */
struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
@@ -219,92 +229,202 @@ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
}
EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
-struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
+struct l2tp_tunnel *l2tp_tunnel_get_next(const struct net *net, unsigned long *key)
{
struct l2tp_net *pn = l2tp_pernet(net);
- unsigned long tunnel_id, tmp;
- struct l2tp_tunnel *tunnel;
- int count = 0;
+ struct l2tp_tunnel *tunnel = NULL;
rcu_read_lock_bh();
- idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
- if (tunnel && ++count > nth &&
- refcount_inc_not_zero(&tunnel->ref_count)) {
+again:
+ tunnel = idr_get_next_ul(&pn->l2tp_tunnel_idr, key);
+ if (tunnel) {
+ if (refcount_inc_not_zero(&tunnel->ref_count)) {
rcu_read_unlock_bh();
return tunnel;
}
+ (*key)++;
+ goto again;
}
rcu_read_unlock_bh();
return NULL;
}
-EXPORT_SYMBOL_GPL(l2tp_tunnel_get_nth);
+EXPORT_SYMBOL_GPL(l2tp_tunnel_get_next);
-struct l2tp_session *l2tp_tunnel_get_session(struct l2tp_tunnel *tunnel,
- u32 session_id)
+struct l2tp_session *l2tp_v3_session_get(const struct net *net, struct sock *sk, u32 session_id)
{
- struct hlist_head *session_list;
+ const struct l2tp_net *pn = l2tp_pernet(net);
struct l2tp_session *session;
- session_list = l2tp_session_id_hash(tunnel, session_id);
-
rcu_read_lock_bh();
- hlist_for_each_entry_rcu(session, session_list, hlist)
- if (session->session_id == session_id) {
- l2tp_session_inc_refcount(session);
- rcu_read_unlock_bh();
+ session = idr_find(&pn->l2tp_v3_session_idr, session_id);
+ if (session && !hash_hashed(&session->hlist) &&
+ refcount_inc_not_zero(&session->ref_count)) {
+ rcu_read_unlock_bh();
+ return session;
+ }
- return session;
+ /* If we get here and session is non-NULL, the session_id
+ * collides with one in another tunnel. If sk is non-NULL,
+ * find the session matching sk.
+ */
+ if (session && sk) {
+ unsigned long key = l2tp_v3_session_hashkey(sk, session->session_id);
+
+ hash_for_each_possible_rcu(pn->l2tp_v3_session_htable, session,
+ hlist, key) {
+ /* session->tunnel may be NULL if another thread is in
+ * l2tp_session_register and has added an item to
+ * l2tp_v3_session_htable but hasn't yet added the
+ * session to its tunnel's session_list.
+ */
+ struct l2tp_tunnel *tunnel = READ_ONCE(session->tunnel);
+
+ if (session->session_id == session_id &&
+ tunnel && tunnel->sock == sk &&
+ refcount_inc_not_zero(&session->ref_count)) {
+ rcu_read_unlock_bh();
+ return session;
+ }
}
+ }
rcu_read_unlock_bh();
return NULL;
}
-EXPORT_SYMBOL_GPL(l2tp_tunnel_get_session);
+EXPORT_SYMBOL_GPL(l2tp_v3_session_get);
-struct l2tp_session *l2tp_session_get(const struct net *net, u32 session_id)
+struct l2tp_session *l2tp_v2_session_get(const struct net *net, u16 tunnel_id, u16 session_id)
{
- struct hlist_head *session_list;
+ u32 session_key = l2tp_v2_session_key(tunnel_id, session_id);
+ const struct l2tp_net *pn = l2tp_pernet(net);
struct l2tp_session *session;
- session_list = l2tp_session_id_hash_2(l2tp_pernet(net), session_id);
+ rcu_read_lock_bh();
+ session = idr_find(&pn->l2tp_v2_session_idr, session_key);
+ if (session && refcount_inc_not_zero(&session->ref_count)) {
+ rcu_read_unlock_bh();
+ return session;
+ }
+ rcu_read_unlock_bh();
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(l2tp_v2_session_get);
+
+struct l2tp_session *l2tp_session_get(const struct net *net, struct sock *sk, int pver,
+ u32 tunnel_id, u32 session_id)
+{
+ if (pver == L2TP_HDR_VER_2)
+ return l2tp_v2_session_get(net, tunnel_id, session_id);
+ else
+ return l2tp_v3_session_get(net, sk, session_id);
+}
+EXPORT_SYMBOL_GPL(l2tp_session_get);
+
+static struct l2tp_session *l2tp_v2_session_get_next(const struct net *net,
+ u16 tid,
+ unsigned long *key)
+{
+ struct l2tp_net *pn = l2tp_pernet(net);
+ struct l2tp_session *session = NULL;
+
+ /* Start searching within the range of the tid */
+ if (*key == 0)
+ *key = l2tp_v2_session_key(tid, 0);
rcu_read_lock_bh();
- hlist_for_each_entry_rcu(session, session_list, global_hlist)
- if (session->session_id == session_id) {
- l2tp_session_inc_refcount(session);
- rcu_read_unlock_bh();
+again:
+ session = idr_get_next_ul(&pn->l2tp_v2_session_idr, key);
+ if (session) {
+ struct l2tp_tunnel *tunnel = READ_ONCE(session->tunnel);
+ /* ignore sessions with id 0 as they are internal for pppol2tp */
+ if (session->session_id == 0) {
+ (*key)++;
+ goto again;
+ }
+
+ if (tunnel->tunnel_id == tid &&
+ refcount_inc_not_zero(&session->ref_count)) {
+ rcu_read_unlock_bh();
return session;
}
+
+ (*key)++;
+ if (tunnel->tunnel_id == tid)
+ goto again;
+ }
rcu_read_unlock_bh();
return NULL;
}
-EXPORT_SYMBOL_GPL(l2tp_session_get);
-struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth)
+static struct l2tp_session *l2tp_v3_session_get_next(const struct net *net,
+ u32 tid, struct sock *sk,
+ unsigned long *key)
{
- int hash;
- struct l2tp_session *session;
- int count = 0;
+ struct l2tp_net *pn = l2tp_pernet(net);
+ struct l2tp_session *session = NULL;
rcu_read_lock_bh();
- for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
- hlist_for_each_entry_rcu(session, &tunnel->session_hlist[hash], hlist) {
- if (++count > nth) {
- l2tp_session_inc_refcount(session);
+again:
+ session = idr_get_next_ul(&pn->l2tp_v3_session_idr, key);
+ if (session && !hash_hashed(&session->hlist)) {
+ struct l2tp_tunnel *tunnel = READ_ONCE(session->tunnel);
+
+ if (tunnel && tunnel->tunnel_id == tid &&
+ refcount_inc_not_zero(&session->ref_count)) {
+ rcu_read_unlock_bh();
+ return session;
+ }
+
+ (*key)++;
+ goto again;
+ }
+
+ /* If we get here and session is non-NULL, the IDR entry may be one
+ * where the session_id collides with one in another tunnel. Check
+ * session_htable for a match. There can only be one session of a given
+ * ID per tunnel so we can return as soon as a match is found.
+ */
+ if (session && hash_hashed(&session->hlist)) {
+ unsigned long hkey = l2tp_v3_session_hashkey(sk, session->session_id);
+ u32 sid = session->session_id;
+
+ hash_for_each_possible_rcu(pn->l2tp_v3_session_htable, session,
+ hlist, hkey) {
+ struct l2tp_tunnel *tunnel = READ_ONCE(session->tunnel);
+
+ if (session->session_id == sid &&
+ tunnel && tunnel->tunnel_id == tid &&
+ refcount_inc_not_zero(&session->ref_count)) {
rcu_read_unlock_bh();
return session;
}
}
+
+ /* If no match found, the colliding session ID isn't in our
+ * tunnel so try the next session ID.
+ */
+ (*key)++;
+ goto again;
}
rcu_read_unlock_bh();
return NULL;
}
-EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
+
+struct l2tp_session *l2tp_session_get_next(const struct net *net, struct sock *sk, int pver,
+ u32 tunnel_id, unsigned long *key)
+{
+ if (pver == L2TP_HDR_VER_2)
+ return l2tp_v2_session_get_next(net, tunnel_id, key);
+ else
+ return l2tp_v3_session_get_next(net, tunnel_id, sk, key);
+}
+EXPORT_SYMBOL_GPL(l2tp_session_get_next);
/* Lookup a session by interface name.
* This is very inefficient but is only used by management interfaces.
@@ -313,86 +433,190 @@ struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
const char *ifname)
{
struct l2tp_net *pn = l2tp_pernet(net);
- int hash;
+ unsigned long tunnel_id, tmp;
struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel;
rcu_read_lock_bh();
- for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
- hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
- if (!strcmp(session->ifname, ifname)) {
- l2tp_session_inc_refcount(session);
- rcu_read_unlock_bh();
-
- return session;
+ idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
+ if (tunnel) {
+ list_for_each_entry_rcu(session, &tunnel->session_list, list) {
+ if (!strcmp(session->ifname, ifname)) {
+ refcount_inc(&session->ref_count);
+ rcu_read_unlock_bh();
+
+ return session;
+ }
}
}
}
-
rcu_read_unlock_bh();
return NULL;
}
EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
+static void l2tp_session_coll_list_add(struct l2tp_session_coll_list *clist,
+ struct l2tp_session *session)
+{
+ refcount_inc(&session->ref_count);
+ WARN_ON_ONCE(session->coll_list);
+ session->coll_list = clist;
+ spin_lock(&clist->lock);
+ list_add(&session->clist, &clist->list);
+ spin_unlock(&clist->lock);
+}
+
+static int l2tp_session_collision_add(struct l2tp_net *pn,
+ struct l2tp_session *session1,
+ struct l2tp_session *session2)
+{
+ struct l2tp_session_coll_list *clist;
+
+ lockdep_assert_held(&pn->l2tp_session_idr_lock);
+
+ if (!session2)
+ return -EEXIST;
+
+ /* If existing session is in IP-encap tunnel, refuse new session */
+ if (session2->tunnel->encap == L2TP_ENCAPTYPE_IP)
+ return -EEXIST;
+
+ clist = session2->coll_list;
+ if (!clist) {
+ /* First collision. Allocate list to manage the collided sessions
+ * and add the existing session to the list.
+ */
+ clist = kmalloc(sizeof(*clist), GFP_ATOMIC);
+ if (!clist)
+ return -ENOMEM;
+
+ spin_lock_init(&clist->lock);
+ INIT_LIST_HEAD(&clist->list);
+ refcount_set(&clist->ref_count, 1);
+ l2tp_session_coll_list_add(clist, session2);
+ }
+
+ /* If existing session isn't already in the session hlist, add it. */
+ if (!hash_hashed(&session2->hlist))
+ hash_add_rcu(pn->l2tp_v3_session_htable, &session2->hlist,
+ session2->hlist_key);
+
+ /* Add new session to the hlist and collision list */
+ hash_add_rcu(pn->l2tp_v3_session_htable, &session1->hlist,
+ session1->hlist_key);
+ refcount_inc(&clist->ref_count);
+ l2tp_session_coll_list_add(clist, session1);
+
+ return 0;
+}
+
+static void l2tp_session_collision_del(struct l2tp_net *pn,
+ struct l2tp_session *session)
+{
+ struct l2tp_session_coll_list *clist = session->coll_list;
+ unsigned long session_key = session->session_id;
+ struct l2tp_session *session2;
+
+ lockdep_assert_held(&pn->l2tp_session_idr_lock);
+
+ hash_del_rcu(&session->hlist);
+
+ if (clist) {
+ /* Remove session from its collision list. If there
+ * are other sessions with the same ID, replace this
+ * session's IDR entry with that session, otherwise
+ * remove the IDR entry. If this is the last session,
+ * the collision list data is freed.
+ */
+ spin_lock(&clist->lock);
+ list_del_init(&session->clist);
+ session2 = list_first_entry_or_null(&clist->list, struct l2tp_session, clist);
+ if (session2) {
+ void *old = idr_replace(&pn->l2tp_v3_session_idr, session2, session_key);
+
+ WARN_ON_ONCE(IS_ERR_VALUE(old));
+ } else {
+ void *removed = idr_remove(&pn->l2tp_v3_session_idr, session_key);
+
+ WARN_ON_ONCE(removed != session);
+ }
+ session->coll_list = NULL;
+ spin_unlock(&clist->lock);
+ if (refcount_dec_and_test(&clist->ref_count))
+ kfree(clist);
+ l2tp_session_put(session);
+ }
+}
+
int l2tp_session_register(struct l2tp_session *session,
struct l2tp_tunnel *tunnel)
{
- struct l2tp_session *session_walk;
- struct hlist_head *g_head;
- struct hlist_head *head;
- struct l2tp_net *pn;
+ struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
+ struct l2tp_session *other_session = NULL;
+ void *old = NULL;
+ u32 session_key;
int err;
- head = l2tp_session_id_hash(tunnel, session->session_id);
+ spin_lock_bh(&tunnel->list_lock);
+ spin_lock_bh(&pn->l2tp_session_idr_lock);
- spin_lock_bh(&tunnel->hlist_lock);
if (!tunnel->acpt_newsess) {
err = -ENODEV;
- goto err_tlock;
+ goto out;
}
- hlist_for_each_entry(session_walk, head, hlist)
- if (session_walk->session_id == session->session_id) {
- err = -EEXIST;
- goto err_tlock;
- }
-
if (tunnel->version == L2TP_HDR_VER_3) {
- pn = l2tp_pernet(tunnel->l2tp_net);
- g_head = l2tp_session_id_hash_2(pn, session->session_id);
-
- spin_lock_bh(&pn->l2tp_session_hlist_lock);
-
+ session_key = session->session_id;
+ err = idr_alloc_u32(&pn->l2tp_v3_session_idr, NULL,
+ &session_key, session_key, GFP_ATOMIC);
/* IP encap expects session IDs to be globally unique, while
- * UDP encap doesn't.
+ * UDP encap doesn't. This isn't per the RFC, which says that
+ * sessions are identified only by the session ID, but is to
+ * support existing userspace which depends on it.
*/
- hlist_for_each_entry(session_walk, g_head, global_hlist)
- if (session_walk->session_id == session->session_id &&
- (session_walk->tunnel->encap == L2TP_ENCAPTYPE_IP ||
- tunnel->encap == L2TP_ENCAPTYPE_IP)) {
- err = -EEXIST;
- goto err_tlock_pnlock;
- }
-
- l2tp_tunnel_inc_refcount(tunnel);
- hlist_add_head_rcu(&session->global_hlist, g_head);
-
- spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+ if (err == -ENOSPC && tunnel->encap == L2TP_ENCAPTYPE_UDP) {
+ other_session = idr_find(&pn->l2tp_v3_session_idr,
+ session_key);
+ err = l2tp_session_collision_add(pn, session,
+ other_session);
+ }
} else {
- l2tp_tunnel_inc_refcount(tunnel);
+ session_key = l2tp_v2_session_key(tunnel->tunnel_id,
+ session->session_id);
+ err = idr_alloc_u32(&pn->l2tp_v2_session_idr, NULL,
+ &session_key, session_key, GFP_ATOMIC);
}
- hlist_add_head_rcu(&session->hlist, head);
- spin_unlock_bh(&tunnel->hlist_lock);
+ if (err) {
+ if (err == -ENOSPC)
+ err = -EEXIST;
+ goto out;
+ }
- trace_register_session(session);
+ refcount_inc(&tunnel->ref_count);
+ WRITE_ONCE(session->tunnel, tunnel);
+ list_add_rcu(&session->list, &tunnel->session_list);
- return 0;
+ /* this makes session available to lockless getters */
+ if (tunnel->version == L2TP_HDR_VER_3) {
+ if (!other_session)
+ old = idr_replace(&pn->l2tp_v3_session_idr, session, session_key);
+ } else {
+ old = idr_replace(&pn->l2tp_v2_session_idr, session, session_key);
+ }
-err_tlock_pnlock:
- spin_unlock_bh(&pn->l2tp_session_hlist_lock);
-err_tlock:
- spin_unlock_bh(&tunnel->hlist_lock);
+ /* old should be NULL, unless something removed or modified
+ * the IDR entry after our idr_alloc_32 above (which shouldn't
+ * happen).
+ */
+ WARN_ON_ONCE(old);
+out:
+ spin_unlock_bh(&pn->l2tp_session_idr_lock);
+ spin_unlock_bh(&tunnel->list_lock);
+
+ if (!err)
+ trace_register_session(session);
return err;
}
@@ -693,7 +917,8 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
if (!session->lns_mode && !session->send_seq) {
trace_session_seqnum_lns_enable(session);
session->send_seq = 1;
- l2tp_session_set_header_len(session, tunnel->version);
+ l2tp_session_set_header_len(session, tunnel->version,
+ tunnel->encap);
}
} else {
/* No sequence numbers.
@@ -714,7 +939,8 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
if (!session->lns_mode && session->send_seq) {
trace_session_seqnum_lns_disable(session);
session->send_seq = 0;
- l2tp_session_set_header_len(session, tunnel->version);
+ l2tp_session_set_header_len(session, tunnel->version,
+ tunnel->encap);
} else if (session->send_seq) {
pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
session->name);
@@ -785,18 +1011,14 @@ static void l2tp_session_queue_purge(struct l2tp_session *session)
}
}
-/* Internal UDP receive frame. Do the real work of receiving an L2TP data frame
- * here. The skb is not on a list when we get here.
- * Returns 0 if the packet was a data packet and was successfully passed on.
- * Returns 1 if the packet was not a good data packet and could not be
- * forwarded. All such packets are passed up to userspace to deal with.
- */
-static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
+/* UDP encapsulation receive handler. See net/ipv4/udp.c for details. */
+int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct l2tp_session *session = NULL;
+ struct l2tp_tunnel *tunnel = NULL;
+ struct net *net = sock_net(sk);
unsigned char *ptr, *optr;
u16 hdrflags;
- u32 tunnel_id, session_id;
u16 version;
int length;
@@ -806,11 +1028,8 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
__skb_pull(skb, sizeof(struct udphdr));
/* Short packet? */
- if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
- pr_debug_ratelimited("%s: recv short packet (len=%d)\n",
- tunnel->name, skb->len);
- goto invalid;
- }
+ if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX))
+ goto pass;
/* Point to L2TP header */
optr = skb->data;
@@ -819,13 +1038,8 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
/* Get L2TP header flags */
hdrflags = ntohs(*(__be16 *)ptr);
- /* Check protocol version */
+ /* Get protocol version */
version = hdrflags & L2TP_HDR_VER_MASK;
- if (version != tunnel->version) {
- pr_debug_ratelimited("%s: recv protocol version mismatch: got %d expected %d\n",
- tunnel->name, version, tunnel->version);
- goto invalid;
- }
/* Get length of L2TP packet */
length = skb->len;
@@ -837,7 +1051,9 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
/* Skip flags */
ptr += 2;
- if (tunnel->version == L2TP_HDR_VER_2) {
+ if (version == L2TP_HDR_VER_2) {
+ u16 tunnel_id, session_id;
+
/* If length is present, skip it */
if (hdrflags & L2TP_HDRFLAG_L)
ptr += 2;
@@ -847,33 +1063,40 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
ptr += 2;
session_id = ntohs(*(__be16 *)ptr);
ptr += 2;
+
+ session = l2tp_v2_session_get(net, tunnel_id, session_id);
} else {
+ u32 session_id;
+
ptr += 2; /* skip reserved bits */
- tunnel_id = tunnel->tunnel_id;
session_id = ntohl(*(__be32 *)ptr);
ptr += 4;
+
+ session = l2tp_v3_session_get(net, sk, session_id);
}
- /* Find the session context */
- session = l2tp_tunnel_get_session(tunnel, session_id);
if (!session || !session->recv_skb) {
if (session)
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
/* Not found? Pass to userspace to deal with */
- pr_debug_ratelimited("%s: no session found (%u/%u). Passing up.\n",
- tunnel->name, tunnel_id, session_id);
goto pass;
}
- if (tunnel->version == L2TP_HDR_VER_3 &&
+ tunnel = session->tunnel;
+
+ /* Check protocol version */
+ if (version != tunnel->version)
+ goto invalid;
+
+ if (version == L2TP_HDR_VER_3 &&
l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) {
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
goto invalid;
}
l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
return 0;
@@ -886,38 +1109,25 @@ pass:
return 1;
}
+EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
-/* UDP encapsulation receive handler. See net/ipv4/udp.c.
- * Return codes:
- * 0 : success.
- * <0: error
- * >0: skb should be passed up to userspace as UDP.
- */
-int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+/* UDP encapsulation receive error handler. See net/ipv4/udp.c for details. */
+static void l2tp_udp_encap_err_recv(struct sock *sk, struct sk_buff *skb, int err,
+ __be16 port, u32 info, u8 *payload)
{
- struct l2tp_tunnel *tunnel;
+ sk->sk_err = err;
+ sk_error_report(sk);
- /* Note that this is called from the encap_rcv hook inside an
- * RCU-protected region, but without the socket being locked.
- * Hence we use rcu_dereference_sk_user_data to access the
- * tunnel data structure rather the usual l2tp_sk_to_tunnel
- * accessor function.
- */
- tunnel = rcu_dereference_sk_user_data(sk);
- if (!tunnel)
- goto pass_up;
- if (WARN_ON(tunnel->magic != L2TP_TUNNEL_MAGIC))
- goto pass_up;
-
- if (l2tp_udp_recv_core(tunnel, skb))
- goto pass_up;
-
- return 0;
-
-pass_up:
- return 1;
+ if (ip_hdr(skb)->version == IPVERSION) {
+ if (inet_test_bit(RECVERR, sk))
+ return ip_icmp_error(sk, skb, err, port, info, payload);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ if (inet6_test_bit(RECVERR6, sk))
+ return ipv6_icmp_error(sk, skb, err, port, info, payload);
+#endif
+ }
}
-EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
/************************************************************************
* Transmit handling
@@ -1041,7 +1251,13 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, uns
IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
nf_reset_ct(skb);
- bh_lock_sock_nested(sk);
+ /* L2TP uses its own lockdep subclass to avoid lockdep splats caused by
+ * nested socket calls on the same lockdep socket class. This can
+ * happen when data from a user socket is routed over l2tp, which uses
+ * another userspace socket.
+ */
+ spin_lock_nested(&sk->sk_lock.slock, L2TP_DEPTH_NESTING);
+
if (sock_owned_by_user(sk)) {
kfree_skb(skb);
ret = NET_XMIT_DROP;
@@ -1093,7 +1309,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, uns
ret = l2tp_xmit_queue(tunnel, skb, &inet->cork.fl);
out_unlock:
- bh_unlock_sock(sk);
+ spin_unlock(&sk->sk_lock.slock);
return ret;
}
@@ -1124,66 +1340,38 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
* Tinnel and session create/destroy.
*****************************************************************************/
-/* Tunnel socket destruct hook.
- * The tunnel context is deleted only when all session sockets have been
- * closed.
- */
-static void l2tp_tunnel_destruct(struct sock *sk)
-{
- struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
-
- if (!tunnel)
- goto end;
-
- /* Disable udp encapsulation */
- switch (tunnel->encap) {
- case L2TP_ENCAPTYPE_UDP:
- /* No longer an encapsulation socket. See net/ipv4/udp.c */
- WRITE_ONCE(udp_sk(sk)->encap_type, 0);
- udp_sk(sk)->encap_rcv = NULL;
- udp_sk(sk)->encap_destroy = NULL;
- break;
- case L2TP_ENCAPTYPE_IP:
- break;
- }
-
- /* Remove hooks into tunnel socket */
- write_lock_bh(&sk->sk_callback_lock);
- sk->sk_destruct = tunnel->old_sk_destruct;
- sk->sk_user_data = NULL;
- write_unlock_bh(&sk->sk_callback_lock);
-
- /* Call the original destructor */
- if (sk->sk_destruct)
- (*sk->sk_destruct)(sk);
-
- kfree_rcu(tunnel, rcu);
-end:
- return;
-}
-
-/* Remove an l2tp session from l2tp_core's hash lists. */
+/* Remove an l2tp session from l2tp_core's lists. */
static void l2tp_session_unhash(struct l2tp_session *session)
{
struct l2tp_tunnel *tunnel = session->tunnel;
- /* Remove the session from core hashes */
if (tunnel) {
- /* Remove from the per-tunnel hash */
- spin_lock_bh(&tunnel->hlist_lock);
- hlist_del_init_rcu(&session->hlist);
- spin_unlock_bh(&tunnel->hlist_lock);
-
- /* For L2TPv3 we have a per-net hash: remove from there, too */
- if (tunnel->version != L2TP_HDR_VER_2) {
- struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
-
- spin_lock_bh(&pn->l2tp_session_hlist_lock);
- hlist_del_init_rcu(&session->global_hlist);
- spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+ struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
+ struct l2tp_session *removed = session;
+
+ spin_lock_bh(&tunnel->list_lock);
+ spin_lock_bh(&pn->l2tp_session_idr_lock);
+
+ /* Remove from the per-tunnel list */
+ list_del_init(&session->list);
+
+ /* Remove from per-net IDR */
+ if (tunnel->version == L2TP_HDR_VER_3) {
+ if (hash_hashed(&session->hlist))
+ l2tp_session_collision_del(pn, session);
+ else
+ removed = idr_remove(&pn->l2tp_v3_session_idr,
+ session->session_id);
+ } else {
+ u32 session_key = l2tp_v2_session_key(tunnel->tunnel_id,
+ session->session_id);
+ removed = idr_remove(&pn->l2tp_v2_session_idr,
+ session_key);
}
+ WARN_ON_ONCE(removed && removed != session);
- synchronize_rcu();
+ spin_unlock_bh(&pn->l2tp_session_idr_lock);
+ spin_unlock_bh(&tunnel->list_lock);
}
}
@@ -1192,37 +1380,24 @@ static void l2tp_session_unhash(struct l2tp_session *session)
static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
{
struct l2tp_session *session;
- int hash;
- spin_lock_bh(&tunnel->hlist_lock);
+ spin_lock_bh(&tunnel->list_lock);
tunnel->acpt_newsess = false;
- for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
-again:
- hlist_for_each_entry_rcu(session, &tunnel->session_hlist[hash], hlist) {
- hlist_del_init_rcu(&session->hlist);
-
- spin_unlock_bh(&tunnel->hlist_lock);
- l2tp_session_delete(session);
- spin_lock_bh(&tunnel->hlist_lock);
-
- /* Now restart from the beginning of this hash
- * chain. We always remove a session from the
- * list so we are guaranteed to make forward
- * progress.
- */
- goto again;
- }
- }
- spin_unlock_bh(&tunnel->hlist_lock);
+ list_for_each_entry(session, &tunnel->session_list, list)
+ l2tp_session_delete(session);
+ spin_unlock_bh(&tunnel->list_lock);
}
/* Tunnel socket destroy hook for UDP encapsulation */
static void l2tp_udp_encap_destroy(struct sock *sk)
{
- struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
+ struct l2tp_tunnel *tunnel;
- if (tunnel)
+ tunnel = l2tp_sk_to_tunnel(sk);
+ if (tunnel) {
l2tp_tunnel_delete(tunnel);
+ l2tp_tunnel_put(tunnel);
+ }
}
static void l2tp_tunnel_remove(struct net *net, struct l2tp_tunnel *tunnel)
@@ -1256,10 +1431,10 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
l2tp_tunnel_remove(tunnel->l2tp_net, tunnel);
/* drop initial ref */
- l2tp_tunnel_dec_refcount(tunnel);
+ l2tp_tunnel_put(tunnel);
/* drop workqueue ref */
- l2tp_tunnel_dec_refcount(tunnel);
+ l2tp_tunnel_put(tunnel);
}
/* Create a socket for the tunnel, if one isn't set up by
@@ -1405,10 +1580,10 @@ int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id,
tunnel->tunnel_id = tunnel_id;
tunnel->peer_tunnel_id = peer_tunnel_id;
- tunnel->magic = L2TP_TUNNEL_MAGIC;
sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
- spin_lock_init(&tunnel->hlist_lock);
+ spin_lock_init(&tunnel->list_lock);
tunnel->acpt_newsess = true;
+ INIT_LIST_HEAD(&tunnel->session_list);
tunnel->encap = encap;
@@ -1418,8 +1593,6 @@ int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id,
/* Init delete workqueue struct */
INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
- INIT_LIST_HEAD(&tunnel->list);
-
err = 0;
err:
if (tunnelp)
@@ -1432,6 +1605,8 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
enum l2tp_encap_type encap)
{
+ struct l2tp_tunnel *tunnel;
+
if (!net_eq(sock_net(sk), net))
return -EINVAL;
@@ -1445,9 +1620,15 @@ static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
(encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP))
return -EPROTONOSUPPORT;
- if (sk->sk_user_data)
+ if (encap == L2TP_ENCAPTYPE_UDP && sk->sk_user_data)
return -EBUSY;
+ tunnel = l2tp_sk_to_tunnel(sk);
+ if (tunnel) {
+ l2tp_tunnel_put(tunnel);
+ return -EBUSY;
+ }
+
return 0;
}
@@ -1485,22 +1666,19 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
ret = l2tp_validate_socket(sk, net, tunnel->encap);
if (ret < 0)
goto err_inval_sock;
- rcu_assign_sk_user_data(sk, tunnel);
write_unlock_bh(&sk->sk_callback_lock);
if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
struct udp_tunnel_sock_cfg udp_cfg = {
- .sk_user_data = tunnel,
.encap_type = UDP_ENCAP_L2TPINUDP,
.encap_rcv = l2tp_udp_encap_recv,
+ .encap_err_rcv = l2tp_udp_encap_err_recv,
.encap_destroy = l2tp_udp_encap_destroy,
};
setup_udp_tunnel_sock(net, sock, &udp_cfg);
}
- tunnel->old_sk_destruct = sk->sk_destruct;
- sk->sk_destruct = &l2tp_tunnel_destruct;
sk->sk_allocation = GFP_ATOMIC;
release_sock(sk);
@@ -1539,7 +1717,7 @@ void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
{
if (!test_and_set_bit(0, &tunnel->dead)) {
trace_delete_tunnel(tunnel);
- l2tp_tunnel_inc_refcount(tunnel);
+ refcount_inc(&tunnel->ref_count);
queue_work(l2tp_wq, &tunnel->del_work);
}
}
@@ -1547,23 +1725,37 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
void l2tp_session_delete(struct l2tp_session *session)
{
- if (test_and_set_bit(0, &session->dead))
- return;
+ if (!test_and_set_bit(0, &session->dead)) {
+ trace_delete_session(session);
+ refcount_inc(&session->ref_count);
+ queue_work(l2tp_wq, &session->del_work);
+ }
+}
+EXPORT_SYMBOL_GPL(l2tp_session_delete);
+
+/* Workqueue session deletion function */
+static void l2tp_session_del_work(struct work_struct *work)
+{
+ struct l2tp_session *session = container_of(work, struct l2tp_session,
+ del_work);
- trace_delete_session(session);
l2tp_session_unhash(session);
l2tp_session_queue_purge(session);
if (session->session_close)
(*session->session_close)(session);
- l2tp_session_dec_refcount(session);
+ /* drop initial ref */
+ l2tp_session_put(session);
+
+ /* drop workqueue ref */
+ l2tp_session_put(session);
}
-EXPORT_SYMBOL_GPL(l2tp_session_delete);
/* We come here whenever a session's send_seq, cookie_len or
* l2specific_type parameters are set.
*/
-void l2tp_session_set_header_len(struct l2tp_session *session, int version)
+void l2tp_session_set_header_len(struct l2tp_session *session, int version,
+ enum l2tp_encap_type encap)
{
if (version == L2TP_HDR_VER_2) {
session->hdr_len = 6;
@@ -1572,7 +1764,7 @@ void l2tp_session_set_header_len(struct l2tp_session *session, int version)
} else {
session->hdr_len = 4 + session->cookie_len;
session->hdr_len += l2tp_get_l2specific_len(session);
- if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
+ if (encap == L2TP_ENCAPTYPE_UDP)
session->hdr_len += 4;
}
}
@@ -1586,7 +1778,6 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
session = kzalloc(sizeof(*session) + priv_size, GFP_KERNEL);
if (session) {
session->magic = L2TP_SESSION_MAGIC;
- session->tunnel = tunnel;
session->session_id = session_id;
session->peer_session_id = peer_session_id;
@@ -1606,8 +1797,11 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
skb_queue_head_init(&session->reorder_q);
+ session->hlist_key = l2tp_v3_session_hashkey(tunnel->sock, session->session_id);
INIT_HLIST_NODE(&session->hlist);
- INIT_HLIST_NODE(&session->global_hlist);
+ INIT_LIST_HEAD(&session->clist);
+ INIT_LIST_HEAD(&session->list);
+ INIT_WORK(&session->del_work, l2tp_session_del_work);
if (cfg) {
session->pwtype = cfg->pw_type;
@@ -1622,7 +1816,7 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len);
}
- l2tp_session_set_header_len(session, tunnel->version);
+ l2tp_session_set_header_len(session, tunnel->version, tunnel->encap);
refcount_set(&session->ref_count, 1);
@@ -1640,25 +1834,22 @@ EXPORT_SYMBOL_GPL(l2tp_session_create);
static __net_init int l2tp_init_net(struct net *net)
{
struct l2tp_net *pn = net_generic(net, l2tp_net_id);
- int hash;
idr_init(&pn->l2tp_tunnel_idr);
spin_lock_init(&pn->l2tp_tunnel_idr_lock);
- for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
- INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]);
-
- spin_lock_init(&pn->l2tp_session_hlist_lock);
+ idr_init(&pn->l2tp_v2_session_idr);
+ idr_init(&pn->l2tp_v3_session_idr);
+ spin_lock_init(&pn->l2tp_session_idr_lock);
return 0;
}
-static __net_exit void l2tp_exit_net(struct net *net)
+static __net_exit void l2tp_pre_exit_net(struct net *net)
{
struct l2tp_net *pn = l2tp_pernet(net);
struct l2tp_tunnel *tunnel = NULL;
unsigned long tunnel_id, tmp;
- int hash;
rcu_read_lock_bh();
idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
@@ -1667,18 +1858,50 @@ static __net_exit void l2tp_exit_net(struct net *net)
}
rcu_read_unlock_bh();
- if (l2tp_wq)
- flush_workqueue(l2tp_wq);
- rcu_barrier();
+ if (l2tp_wq) {
+ /* Run all TUNNEL_DELETE work items just queued. */
+ __flush_workqueue(l2tp_wq);
+
+ /* Each TUNNEL_DELETE work item will queue a SESSION_DELETE
+ * work item for each session in the tunnel. Flush the
+ * workqueue again to process these.
+ */
+ __flush_workqueue(l2tp_wq);
+ }
+}
+
+static int l2tp_idr_item_unexpected(int id, void *p, void *data)
+{
+ const char *idr_name = data;
+
+ pr_err("l2tp: %s IDR not empty at net %d exit\n", idr_name, id);
+ WARN_ON_ONCE(1);
+ return 1;
+}
+
+static __net_exit void l2tp_exit_net(struct net *net)
+{
+ struct l2tp_net *pn = l2tp_pernet(net);
- for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
- WARN_ON_ONCE(!hlist_empty(&pn->l2tp_session_hlist[hash]));
+ /* Our per-net IDRs should be empty. Check that is so, to
+ * help catch cleanup races or refcnt leaks.
+ */
+ idr_for_each(&pn->l2tp_v2_session_idr, l2tp_idr_item_unexpected,
+ "v2_session");
+ idr_for_each(&pn->l2tp_v3_session_idr, l2tp_idr_item_unexpected,
+ "v3_session");
+ idr_for_each(&pn->l2tp_tunnel_idr, l2tp_idr_item_unexpected,
+ "tunnel");
+
+ idr_destroy(&pn->l2tp_v2_session_idr);
+ idr_destroy(&pn->l2tp_v3_session_idr);
idr_destroy(&pn->l2tp_tunnel_idr);
}
static struct pernet_operations l2tp_net_ops = {
.init = l2tp_init_net,
.exit = l2tp_exit_net,
+ .pre_exit = l2tp_pre_exit_net,
.id = &l2tp_net_id,
.size = sizeof(struct l2tp_net),
};
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 91ebf0a3f499..ffd8ced3a51f 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -16,17 +16,8 @@
#endif
/* Random numbers used for internal consistency checks of tunnel and session structures */
-#define L2TP_TUNNEL_MAGIC 0x42114DDA
#define L2TP_SESSION_MAGIC 0x0C04EB7D
-/* Per tunnel session hash table size */
-#define L2TP_HASH_BITS 4
-#define L2TP_HASH_SIZE BIT(L2TP_HASH_BITS)
-
-/* System-wide session hash table size */
-#define L2TP_HASH_BITS_2 8
-#define L2TP_HASH_SIZE_2 BIT(L2TP_HASH_BITS_2)
-
struct sk_buff;
struct l2tp_stats {
@@ -61,15 +52,21 @@ struct l2tp_session_cfg {
char *ifname;
};
+struct l2tp_session_coll_list {
+ spinlock_t lock; /* for access to list */
+ struct list_head list;
+ refcount_t ref_count;
+};
+
/* Represents a session (pseudowire) instance.
* Tracks runtime state including cookies, dataplane packet sequencing, and IO statistics.
- * Is linked into a per-tunnel session hashlist; and in the case of an L2TPv3 session into
- * an additional per-net ("global") hashlist.
+ * Is linked into a per-tunnel session list and a per-net ("global") IDR tree.
*/
#define L2TP_SESSION_NAME_MAX 32
struct l2tp_session {
int magic; /* should be L2TP_SESSION_MAGIC */
long dead;
+ struct rcu_head rcu;
struct l2tp_tunnel *tunnel; /* back pointer to tunnel context */
u32 session_id;
@@ -88,8 +85,12 @@ struct l2tp_session {
u32 nr_oos; /* NR of last OOS packet */
int nr_oos_count; /* for OOS recovery */
int nr_oos_count_max;
- struct hlist_node hlist; /* hash list node */
+ struct list_head list; /* per-tunnel list node */
refcount_t ref_count;
+ struct hlist_node hlist; /* per-net session hlist */
+ unsigned long hlist_key; /* key for session hlist */
+ struct l2tp_session_coll_list *coll_list; /* session collision list */
+ struct list_head clist; /* for coll_list */
char name[L2TP_SESSION_NAME_MAX]; /* for logging */
char ifname[IFNAMSIZ];
@@ -102,7 +103,7 @@ struct l2tp_session {
int reorder_skip; /* set if skip to next nr */
enum l2tp_pwtype pwtype;
struct l2tp_stats stats;
- struct hlist_node global_hlist; /* global hash list node */
+ struct work_struct del_work;
/* Session receive handler for data packets.
* Each pseudowire implementation should implement this callback in order to
@@ -114,7 +115,7 @@ struct l2tp_session {
/* Session close handler.
* Each pseudowire implementation may implement this callback in order to carry
* out pseudowire-specific shutdown actions.
- * The callback is called by core after unhashing the session and purging its
+ * The callback is called by core after unlisting the session and purging its
* reorder queue.
*/
void (*session_close)(struct l2tp_session *session);
@@ -150,22 +151,19 @@ struct l2tp_tunnel_cfg {
/* Represents a tunnel instance.
* Tracks runtime state including IO statistics.
* Holds the tunnel socket (either passed from userspace or directly created by the kernel).
- * Maintains a hashlist of sessions belonging to the tunnel instance.
+ * Maintains a list of sessions belonging to the tunnel instance.
* Is linked into a per-net list of tunnels.
*/
#define L2TP_TUNNEL_NAME_MAX 20
struct l2tp_tunnel {
- int magic; /* Should be L2TP_TUNNEL_MAGIC */
-
unsigned long dead;
struct rcu_head rcu;
- spinlock_t hlist_lock; /* write-protection for session_hlist */
+ spinlock_t list_lock; /* write-protection for session_list */
bool acpt_newsess; /* indicates whether this tunnel accepts
- * new sessions. Protected by hlist_lock.
+ * new sessions. Protected by list_lock.
*/
- struct hlist_head session_hlist[L2TP_HASH_SIZE];
- /* hashed list of sessions, hashed by id */
+ struct list_head session_list; /* list of sessions */
u32 tunnel_id;
u32 peer_tunnel_id;
int version; /* 2=>L2TPv2, 3=>L2TPv3 */
@@ -174,11 +172,9 @@ struct l2tp_tunnel {
enum l2tp_encap_type encap;
struct l2tp_stats stats;
- struct list_head list; /* list node on per-namespace list of tunnels */
struct net *l2tp_net; /* the net we belong to */
refcount_t ref_count;
- void (*old_sk_destruct)(struct sock *sk);
struct sock *sock; /* parent socket */
int fd; /* parent fd, if tunnel socket was created
* by userspace
@@ -213,22 +209,22 @@ static inline void *l2tp_session_priv(struct l2tp_session *session)
}
/* Tunnel and session refcounts */
-void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel);
-void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel);
-void l2tp_session_inc_refcount(struct l2tp_session *session);
-void l2tp_session_dec_refcount(struct l2tp_session *session);
+void l2tp_tunnel_put(struct l2tp_tunnel *tunnel);
+void l2tp_session_put(struct l2tp_session *session);
/* Tunnel and session lookup.
* These functions take a reference on the instances they return, so
* the caller must ensure that the reference is dropped appropriately.
*/
struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id);
-struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth);
-struct l2tp_session *l2tp_tunnel_get_session(struct l2tp_tunnel *tunnel,
- u32 session_id);
-
-struct l2tp_session *l2tp_session_get(const struct net *net, u32 session_id);
-struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth);
+struct l2tp_tunnel *l2tp_tunnel_get_next(const struct net *net, unsigned long *key);
+
+struct l2tp_session *l2tp_v3_session_get(const struct net *net, struct sock *sk, u32 session_id);
+struct l2tp_session *l2tp_v2_session_get(const struct net *net, u16 tunnel_id, u16 session_id);
+struct l2tp_session *l2tp_session_get(const struct net *net, struct sock *sk, int pver,
+ u32 tunnel_id, u32 session_id);
+struct l2tp_session *l2tp_session_get_next(const struct net *net, struct sock *sk, int pver,
+ u32 tunnel_id, unsigned long *key);
struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
const char *ifname);
@@ -261,7 +257,8 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
/* Transmit path helpers for sending packets over the tunnel socket. */
-void l2tp_session_set_header_len(struct l2tp_session *session, int version);
+void l2tp_session_set_header_len(struct l2tp_session *session, int version,
+ enum l2tp_encap_type encap);
int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb);
/* Pseudowire management.
@@ -274,10 +271,7 @@ void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
/* IOCTL helper for IP encap modules. */
int l2tp_ioctl(struct sock *sk, int cmd, int *karg);
-/* Extract the tunnel structure from a socket's sk_user_data pointer,
- * validating the tunnel magic feather.
- */
-struct l2tp_tunnel *l2tp_sk_to_tunnel(struct sock *sk);
+struct l2tp_tunnel *l2tp_sk_to_tunnel(const struct sock *sk);
static inline int l2tp_get_l2specific_len(struct l2tp_session *session)
{
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index 4595b56d175d..2d0c8275a3a8 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -34,8 +34,8 @@ static struct dentry *rootdir;
struct l2tp_dfs_seq_data {
struct net *net;
netns_tracker ns_tracker;
- int tunnel_idx; /* current tunnel */
- int session_idx; /* index of session within current tunnel */
+ unsigned long tkey; /* lookup key of current tunnel */
+ unsigned long skey; /* lookup key of current session */
struct l2tp_tunnel *tunnel;
struct l2tp_session *session; /* NULL means get next tunnel */
};
@@ -44,23 +44,25 @@ static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd)
{
/* Drop reference taken during previous invocation */
if (pd->tunnel)
- l2tp_tunnel_dec_refcount(pd->tunnel);
+ l2tp_tunnel_put(pd->tunnel);
- pd->tunnel = l2tp_tunnel_get_nth(pd->net, pd->tunnel_idx);
- pd->tunnel_idx++;
+ pd->tunnel = l2tp_tunnel_get_next(pd->net, &pd->tkey);
+ pd->tkey++;
}
static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd)
{
/* Drop reference taken during previous invocation */
if (pd->session)
- l2tp_session_dec_refcount(pd->session);
+ l2tp_session_put(pd->session);
- pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx);
- pd->session_idx++;
+ pd->session = l2tp_session_get_next(pd->net, pd->tunnel->sock,
+ pd->tunnel->version,
+ pd->tunnel->tunnel_id, &pd->skey);
+ pd->skey++;
if (!pd->session) {
- pd->session_idx = 0;
+ pd->skey = 0;
l2tp_dfs_next_tunnel(pd);
}
}
@@ -109,11 +111,11 @@ static void l2tp_dfs_seq_stop(struct seq_file *p, void *v)
* or l2tp_dfs_next_tunnel().
*/
if (pd->session) {
- l2tp_session_dec_refcount(pd->session);
+ l2tp_session_put(pd->session);
pd->session = NULL;
}
if (pd->tunnel) {
- l2tp_tunnel_dec_refcount(pd->tunnel);
+ l2tp_tunnel_put(pd->tunnel);
pd->tunnel = NULL;
}
}
@@ -123,17 +125,14 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
struct l2tp_tunnel *tunnel = v;
struct l2tp_session *session;
int session_count = 0;
- int hash;
rcu_read_lock_bh();
- for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
- hlist_for_each_entry_rcu(session, &tunnel->session_hlist[hash], hlist) {
- /* Session ID of zero is a dummy/reserved value used by pppol2tp */
- if (session->session_id == 0)
- continue;
+ list_for_each_entry_rcu(session, &tunnel->session_list, list) {
+ /* Session ID of zero is a dummy/reserved value used by pppol2tp */
+ if (session->session_id == 0)
+ continue;
- session_count++;
- }
+ session_count++;
}
rcu_read_unlock_bh();
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 25ca89f80414..e83691073496 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -72,35 +72,22 @@ static netdev_tx_t l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev
unsigned int len = skb->len;
int ret = l2tp_xmit_skb(session, skb);
- if (likely(ret == NET_XMIT_SUCCESS)) {
- DEV_STATS_ADD(dev, tx_bytes, len);
- DEV_STATS_INC(dev, tx_packets);
- } else {
- DEV_STATS_INC(dev, tx_dropped);
- }
- return NETDEV_TX_OK;
-}
+ if (likely(ret == NET_XMIT_SUCCESS))
+ dev_dstats_tx_add(dev, len);
+ else
+ dev_dstats_tx_dropped(dev);
-static void l2tp_eth_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
-{
- stats->tx_bytes = DEV_STATS_READ(dev, tx_bytes);
- stats->tx_packets = DEV_STATS_READ(dev, tx_packets);
- stats->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
- stats->rx_bytes = DEV_STATS_READ(dev, rx_bytes);
- stats->rx_packets = DEV_STATS_READ(dev, rx_packets);
- stats->rx_errors = DEV_STATS_READ(dev, rx_errors);
+ return NETDEV_TX_OK;
}
static const struct net_device_ops l2tp_eth_netdev_ops = {
.ndo_init = l2tp_eth_dev_init,
.ndo_uninit = l2tp_eth_dev_uninit,
.ndo_start_xmit = l2tp_eth_dev_xmit,
- .ndo_get_stats64 = l2tp_eth_get_stats64,
.ndo_set_mac_address = eth_mac_addr,
};
-static struct device_type l2tpeth_type = {
+static const struct device_type l2tpeth_type = {
.name = "l2tpeth",
};
@@ -109,9 +96,10 @@ static void l2tp_eth_dev_setup(struct net_device *dev)
SET_NETDEV_DEVTYPE(dev, &l2tpeth_type);
ether_setup(dev);
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
- dev->features |= NETIF_F_LLTX;
+ dev->lltx = true;
dev->netdev_ops = &l2tp_eth_netdev_ops;
dev->needs_free_netdev = true;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
}
static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
@@ -127,6 +115,9 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
/* checksums verified by L2TP */
skb->ip_summed = CHECKSUM_NONE;
+ /* drop outer flow-hash */
+ skb_clear_hash(skb);
+
skb_dst_drop(skb);
nf_reset_ct(skb);
@@ -135,12 +126,11 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
if (!dev)
goto error_rcu;
- if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
- DEV_STATS_INC(dev, rx_packets);
- DEV_STATS_ADD(dev, rx_bytes, data_len);
- } else {
+ if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS)
+ dev_dstats_rx_add(dev, data_len);
+ else
DEV_STATS_INC(dev, rx_errors);
- }
+
rcu_read_unlock();
return;
@@ -280,7 +270,7 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel,
spriv = l2tp_session_priv(session);
- l2tp_session_inc_refcount(session);
+ refcount_inc(&session->ref_count);
rtnl_lock();
@@ -298,7 +288,7 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel,
if (rc < 0) {
rtnl_unlock();
l2tp_session_delete(session);
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
free_netdev(dev);
return rc;
@@ -309,17 +299,17 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel,
rtnl_unlock();
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
__module_get(THIS_MODULE);
return 0;
err_sess_dev:
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
free_netdev(dev);
err_sess:
- kfree(session);
+ l2tp_session_put(session);
err:
return rc;
}
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 9a2a9ed3ba47..29795d2839e8 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -22,9 +22,19 @@
#include <net/tcp_states.h>
#include <net/protocol.h>
#include <net/xfrm.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
#include "l2tp_core.h"
+/* per-net private data for this module */
+static unsigned int l2tp_ip_net_id;
+struct l2tp_ip_net {
+ rwlock_t l2tp_ip_lock;
+ struct hlist_head l2tp_ip_table;
+ struct hlist_head l2tp_ip_bind_table;
+};
+
struct l2tp_ip_sock {
/* inet_sock has to be the first member of l2tp_ip_sock */
struct inet_sock inet;
@@ -33,21 +43,23 @@ struct l2tp_ip_sock {
u32 peer_conn_id;
};
-static DEFINE_RWLOCK(l2tp_ip_lock);
-static struct hlist_head l2tp_ip_table;
-static struct hlist_head l2tp_ip_bind_table;
-
-static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
+static struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
{
return (struct l2tp_ip_sock *)sk;
}
+static struct l2tp_ip_net *l2tp_ip_pernet(const struct net *net)
+{
+ return net_generic(net, l2tp_ip_net_id);
+}
+
static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr,
__be32 raddr, int dif, u32 tunnel_id)
{
+ struct l2tp_ip_net *pn = l2tp_ip_pernet(net);
struct sock *sk;
- sk_for_each_bound(sk, &l2tp_ip_bind_table) {
+ sk_for_each_bound(sk, &pn->l2tp_ip_bind_table) {
const struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
const struct inet_sock *inet = inet_sk(sk);
int bound_dev_if;
@@ -113,6 +125,7 @@ found:
static int l2tp_ip_recv(struct sk_buff *skb)
{
struct net *net = dev_net(skb->dev);
+ struct l2tp_ip_net *pn;
struct sock *sk;
u32 session_id;
u32 tunnel_id;
@@ -121,6 +134,8 @@ static int l2tp_ip_recv(struct sk_buff *skb)
struct l2tp_tunnel *tunnel = NULL;
struct iphdr *iph;
+ pn = l2tp_ip_pernet(net);
+
if (!pskb_may_pull(skb, 4))
goto discard;
@@ -140,7 +155,7 @@ static int l2tp_ip_recv(struct sk_buff *skb)
}
/* Ok, this is a data packet. Lookup the session. */
- session = l2tp_session_get(net, session_id);
+ session = l2tp_v3_session_get(net, NULL, session_id);
if (!session)
goto discard;
@@ -152,7 +167,7 @@ static int l2tp_ip_recv(struct sk_buff *skb)
goto discard_sess;
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
return 0;
@@ -167,15 +182,15 @@ pass_up:
tunnel_id = ntohl(*(__be32 *)&skb->data[4]);
iph = (struct iphdr *)skb_network_header(skb);
- read_lock_bh(&l2tp_ip_lock);
+ read_lock_bh(&pn->l2tp_ip_lock);
sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, inet_iif(skb),
tunnel_id);
if (!sk) {
- read_unlock_bh(&l2tp_ip_lock);
+ read_unlock_bh(&pn->l2tp_ip_lock);
goto discard;
}
sock_hold(sk);
- read_unlock_bh(&l2tp_ip_lock);
+ read_unlock_bh(&pn->l2tp_ip_lock);
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_put;
@@ -185,7 +200,7 @@ pass_up:
return sk_receive_skb(sk, skb, 1);
discard_sess:
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
goto discard;
discard_put:
@@ -198,21 +213,25 @@ discard:
static int l2tp_ip_hash(struct sock *sk)
{
+ struct l2tp_ip_net *pn = l2tp_ip_pernet(sock_net(sk));
+
if (sk_unhashed(sk)) {
- write_lock_bh(&l2tp_ip_lock);
- sk_add_node(sk, &l2tp_ip_table);
- write_unlock_bh(&l2tp_ip_lock);
+ write_lock_bh(&pn->l2tp_ip_lock);
+ sk_add_node(sk, &pn->l2tp_ip_table);
+ write_unlock_bh(&pn->l2tp_ip_lock);
}
return 0;
}
static void l2tp_ip_unhash(struct sock *sk)
{
+ struct l2tp_ip_net *pn = l2tp_ip_pernet(sock_net(sk));
+
if (sk_unhashed(sk))
return;
- write_lock_bh(&l2tp_ip_lock);
+ write_lock_bh(&pn->l2tp_ip_lock);
sk_del_node_init(sk);
- write_unlock_bh(&l2tp_ip_lock);
+ write_unlock_bh(&pn->l2tp_ip_lock);
}
static int l2tp_ip_open(struct sock *sk)
@@ -226,23 +245,26 @@ static int l2tp_ip_open(struct sock *sk)
static void l2tp_ip_close(struct sock *sk, long timeout)
{
- write_lock_bh(&l2tp_ip_lock);
+ struct l2tp_ip_net *pn = l2tp_ip_pernet(sock_net(sk));
+
+ write_lock_bh(&pn->l2tp_ip_lock);
hlist_del_init(&sk->sk_bind_node);
sk_del_node_init(sk);
- write_unlock_bh(&l2tp_ip_lock);
+ write_unlock_bh(&pn->l2tp_ip_lock);
sk_common_release(sk);
}
static void l2tp_ip_destroy_sock(struct sock *sk)
{
- struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
- struct sk_buff *skb;
+ struct l2tp_tunnel *tunnel;
- while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
- kfree_skb(skb);
+ __skb_queue_purge(&sk->sk_write_queue);
- if (tunnel)
+ tunnel = l2tp_sk_to_tunnel(sk);
+ if (tunnel) {
l2tp_tunnel_delete(tunnel);
+ l2tp_tunnel_put(tunnel);
+ }
}
static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
@@ -250,6 +272,7 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
struct inet_sock *inet = inet_sk(sk);
struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *)uaddr;
struct net *net = sock_net(sk);
+ struct l2tp_ip_net *pn;
int ret;
int chk_addr_ret;
@@ -280,10 +303,11 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
inet->inet_saddr = 0; /* Use device */
- write_lock_bh(&l2tp_ip_lock);
+ pn = l2tp_ip_pernet(net);
+ write_lock_bh(&pn->l2tp_ip_lock);
if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, 0,
sk->sk_bound_dev_if, addr->l2tp_conn_id)) {
- write_unlock_bh(&l2tp_ip_lock);
+ write_unlock_bh(&pn->l2tp_ip_lock);
ret = -EADDRINUSE;
goto out;
}
@@ -291,9 +315,9 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
sk_dst_reset(sk);
l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
- sk_add_bind_node(sk, &l2tp_ip_bind_table);
+ sk_add_bind_node(sk, &pn->l2tp_ip_bind_table);
sk_del_node_init(sk);
- write_unlock_bh(&l2tp_ip_lock);
+ write_unlock_bh(&pn->l2tp_ip_lock);
ret = 0;
sock_reset_flag(sk, SOCK_ZAPPED);
@@ -307,6 +331,7 @@ out:
static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
+ struct l2tp_ip_net *pn = l2tp_ip_pernet(sock_net(sk));
int rc;
if (addr_len < sizeof(*lsa))
@@ -329,10 +354,10 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
- write_lock_bh(&l2tp_ip_lock);
+ write_lock_bh(&pn->l2tp_ip_lock);
hlist_del_init(&sk->sk_bind_node);
- sk_add_bind_node(sk, &l2tp_ip_bind_table);
- write_unlock_bh(&l2tp_ip_lock);
+ sk_add_bind_node(sk, &pn->l2tp_ip_bind_table);
+ write_unlock_bh(&pn->l2tp_ip_lock);
out_sk:
release_sock(sk);
@@ -400,7 +425,6 @@ static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
int rc;
struct inet_sock *inet = inet_sk(sk);
struct rtable *rt = NULL;
- struct flowi4 *fl4;
int connected = 0;
__be32 daddr;
@@ -430,7 +454,6 @@ static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (sk->sk_state != TCP_ESTABLISHED)
goto out;
- daddr = inet->inet_daddr;
connected = 1;
}
@@ -457,29 +480,24 @@ static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
goto error;
}
- fl4 = &inet->cork.fl.u.ip4;
if (connected)
- rt = (struct rtable *)__sk_dst_check(sk, 0);
+ rt = dst_rtable(__sk_dst_check(sk, 0));
rcu_read_lock();
if (!rt) {
- const struct ip_options_rcu *inet_opt;
+ struct flowi4 *fl4 = &inet->cork.fl.u.ip4;
- inet_opt = rcu_dereference(inet->inet_opt);
+ inet_sk_init_flowi4(inet, fl4);
- /* Use correct destination address if we have options. */
- if (inet_opt && inet_opt->opt.srr)
- daddr = inet_opt->opt.faddr;
+ /* Overwrite ->daddr if msg->msg_name was provided */
+ if (!connected)
+ fl4->daddr = daddr;
/* If this fails, retransmit mechanism of transport layer will
* keep trying until route appears or the connection times
* itself out.
*/
- rt = ip_route_output_ports(sock_net(sk), fl4, sk,
- daddr, inet->inet_saddr,
- inet->inet_dport, inet->inet_sport,
- sk->sk_protocol, RT_CONN_FLAGS(sk),
- sk->sk_bound_dev_if);
+ rt = ip_route_output_flow(sock_net(sk), fl4, sk);
if (IS_ERR(rt))
goto no_route;
if (connected) {
@@ -637,25 +655,58 @@ static struct net_protocol l2tp_ip_protocol __read_mostly = {
.handler = l2tp_ip_recv,
};
+static __net_init int l2tp_ip_init_net(struct net *net)
+{
+ struct l2tp_ip_net *pn = net_generic(net, l2tp_ip_net_id);
+
+ rwlock_init(&pn->l2tp_ip_lock);
+ INIT_HLIST_HEAD(&pn->l2tp_ip_table);
+ INIT_HLIST_HEAD(&pn->l2tp_ip_bind_table);
+ return 0;
+}
+
+static __net_exit void l2tp_ip_exit_net(struct net *net)
+{
+ struct l2tp_ip_net *pn = l2tp_ip_pernet(net);
+
+ write_lock_bh(&pn->l2tp_ip_lock);
+ WARN_ON_ONCE(hlist_count_nodes(&pn->l2tp_ip_table) != 0);
+ WARN_ON_ONCE(hlist_count_nodes(&pn->l2tp_ip_bind_table) != 0);
+ write_unlock_bh(&pn->l2tp_ip_lock);
+}
+
+static struct pernet_operations l2tp_ip_net_ops = {
+ .init = l2tp_ip_init_net,
+ .exit = l2tp_ip_exit_net,
+ .id = &l2tp_ip_net_id,
+ .size = sizeof(struct l2tp_ip_net),
+};
+
static int __init l2tp_ip_init(void)
{
int err;
pr_info("L2TP IP encapsulation support (L2TPv3)\n");
+ err = register_pernet_device(&l2tp_ip_net_ops);
+ if (err)
+ goto out;
+
err = proto_register(&l2tp_ip_prot, 1);
if (err != 0)
- goto out;
+ goto out1;
err = inet_add_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
if (err)
- goto out1;
+ goto out2;
inet_register_protosw(&l2tp_ip_protosw);
return 0;
-out1:
+out2:
proto_unregister(&l2tp_ip_prot);
+out1:
+ unregister_pernet_device(&l2tp_ip_net_ops);
out:
return err;
}
@@ -665,6 +716,7 @@ static void __exit l2tp_ip_exit(void)
inet_unregister_protosw(&l2tp_ip_protosw);
inet_del_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
proto_unregister(&l2tp_ip_prot);
+ unregister_pernet_device(&l2tp_ip_net_ops);
}
module_init(l2tp_ip_init);
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 7bf14cf9ffaa..f4c1da070826 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -22,6 +22,8 @@
#include <net/tcp_states.h>
#include <net/protocol.h>
#include <net/xfrm.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
#include <net/transp_v6.h>
#include <net/addrconf.h>
@@ -29,6 +31,14 @@
#include "l2tp_core.h"
+/* per-net private data for this module */
+static unsigned int l2tp_ip6_net_id;
+struct l2tp_ip6_net {
+ rwlock_t l2tp_ip6_lock;
+ struct hlist_head l2tp_ip6_table;
+ struct hlist_head l2tp_ip6_bind_table;
+};
+
struct l2tp_ip6_sock {
/* inet_sock has to be the first member of l2tp_ip6_sock */
struct inet_sock inet;
@@ -39,23 +49,25 @@ struct l2tp_ip6_sock {
struct ipv6_pinfo inet6;
};
-static DEFINE_RWLOCK(l2tp_ip6_lock);
-static struct hlist_head l2tp_ip6_table;
-static struct hlist_head l2tp_ip6_bind_table;
-
-static inline struct l2tp_ip6_sock *l2tp_ip6_sk(const struct sock *sk)
+static struct l2tp_ip6_sock *l2tp_ip6_sk(const struct sock *sk)
{
return (struct l2tp_ip6_sock *)sk;
}
+static struct l2tp_ip6_net *l2tp_ip6_pernet(const struct net *net)
+{
+ return net_generic(net, l2tp_ip6_net_id);
+}
+
static struct sock *__l2tp_ip6_bind_lookup(const struct net *net,
const struct in6_addr *laddr,
const struct in6_addr *raddr,
int dif, u32 tunnel_id)
{
+ struct l2tp_ip6_net *pn = l2tp_ip6_pernet(net);
struct sock *sk;
- sk_for_each_bound(sk, &l2tp_ip6_bind_table) {
+ sk_for_each_bound(sk, &pn->l2tp_ip6_bind_table) {
const struct in6_addr *sk_laddr = inet6_rcv_saddr(sk);
const struct in6_addr *sk_raddr = &sk->sk_v6_daddr;
const struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
@@ -123,6 +135,7 @@ found:
static int l2tp_ip6_recv(struct sk_buff *skb)
{
struct net *net = dev_net(skb->dev);
+ struct l2tp_ip6_net *pn;
struct sock *sk;
u32 session_id;
u32 tunnel_id;
@@ -131,6 +144,8 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
struct l2tp_tunnel *tunnel = NULL;
struct ipv6hdr *iph;
+ pn = l2tp_ip6_pernet(net);
+
if (!pskb_may_pull(skb, 4))
goto discard;
@@ -150,7 +165,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
}
/* Ok, this is a data packet. Lookup the session. */
- session = l2tp_session_get(net, session_id);
+ session = l2tp_v3_session_get(net, NULL, session_id);
if (!session)
goto discard;
@@ -162,7 +177,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
goto discard_sess;
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
return 0;
@@ -177,15 +192,15 @@ pass_up:
tunnel_id = ntohl(*(__be32 *)&skb->data[4]);
iph = ipv6_hdr(skb);
- read_lock_bh(&l2tp_ip6_lock);
+ read_lock_bh(&pn->l2tp_ip6_lock);
sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr,
inet6_iif(skb), tunnel_id);
if (!sk) {
- read_unlock_bh(&l2tp_ip6_lock);
+ read_unlock_bh(&pn->l2tp_ip6_lock);
goto discard;
}
sock_hold(sk);
- read_unlock_bh(&l2tp_ip6_lock);
+ read_unlock_bh(&pn->l2tp_ip6_lock);
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_put;
@@ -195,7 +210,7 @@ pass_up:
return sk_receive_skb(sk, skb, 1);
discard_sess:
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
goto discard;
discard_put:
@@ -208,21 +223,25 @@ discard:
static int l2tp_ip6_hash(struct sock *sk)
{
+ struct l2tp_ip6_net *pn = l2tp_ip6_pernet(sock_net(sk));
+
if (sk_unhashed(sk)) {
- write_lock_bh(&l2tp_ip6_lock);
- sk_add_node(sk, &l2tp_ip6_table);
- write_unlock_bh(&l2tp_ip6_lock);
+ write_lock_bh(&pn->l2tp_ip6_lock);
+ sk_add_node(sk, &pn->l2tp_ip6_table);
+ write_unlock_bh(&pn->l2tp_ip6_lock);
}
return 0;
}
static void l2tp_ip6_unhash(struct sock *sk)
{
+ struct l2tp_ip6_net *pn = l2tp_ip6_pernet(sock_net(sk));
+
if (sk_unhashed(sk))
return;
- write_lock_bh(&l2tp_ip6_lock);
+ write_lock_bh(&pn->l2tp_ip6_lock);
sk_del_node_init(sk);
- write_unlock_bh(&l2tp_ip6_lock);
+ write_unlock_bh(&pn->l2tp_ip6_lock);
}
static int l2tp_ip6_open(struct sock *sk)
@@ -236,24 +255,29 @@ static int l2tp_ip6_open(struct sock *sk)
static void l2tp_ip6_close(struct sock *sk, long timeout)
{
- write_lock_bh(&l2tp_ip6_lock);
+ struct l2tp_ip6_net *pn = l2tp_ip6_pernet(sock_net(sk));
+
+ write_lock_bh(&pn->l2tp_ip6_lock);
hlist_del_init(&sk->sk_bind_node);
sk_del_node_init(sk);
- write_unlock_bh(&l2tp_ip6_lock);
+ write_unlock_bh(&pn->l2tp_ip6_lock);
sk_common_release(sk);
}
static void l2tp_ip6_destroy_sock(struct sock *sk)
{
- struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
+ struct l2tp_tunnel *tunnel;
lock_sock(sk);
ip6_flush_pending_frames(sk);
release_sock(sk);
- if (tunnel)
+ tunnel = l2tp_sk_to_tunnel(sk);
+ if (tunnel) {
l2tp_tunnel_delete(tunnel);
+ l2tp_tunnel_put(tunnel);
+ }
}
static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
@@ -262,11 +286,14 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
struct ipv6_pinfo *np = inet6_sk(sk);
struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *)uaddr;
struct net *net = sock_net(sk);
+ struct l2tp_ip6_net *pn;
__be32 v4addr = 0;
int bound_dev_if;
int addr_type;
int err;
+ pn = l2tp_ip6_pernet(net);
+
if (addr->l2tp_family != AF_INET6)
return -EINVAL;
if (addr_len < sizeof(*addr))
@@ -324,10 +351,10 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
}
rcu_read_unlock();
- write_lock_bh(&l2tp_ip6_lock);
+ write_lock_bh(&pn->l2tp_ip6_lock);
if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, NULL, bound_dev_if,
addr->l2tp_conn_id)) {
- write_unlock_bh(&l2tp_ip6_lock);
+ write_unlock_bh(&pn->l2tp_ip6_lock);
err = -EADDRINUSE;
goto out_unlock;
}
@@ -340,9 +367,9 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id;
- sk_add_bind_node(sk, &l2tp_ip6_bind_table);
+ sk_add_bind_node(sk, &pn->l2tp_ip6_bind_table);
sk_del_node_init(sk);
- write_unlock_bh(&l2tp_ip6_lock);
+ write_unlock_bh(&pn->l2tp_ip6_lock);
sock_reset_flag(sk, SOCK_ZAPPED);
release_sock(sk);
@@ -364,6 +391,7 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
struct in6_addr *daddr;
int addr_type;
int rc;
+ struct l2tp_ip6_net *pn;
if (addr_len < sizeof(*lsa))
return -EINVAL;
@@ -395,10 +423,11 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
l2tp_ip6_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
- write_lock_bh(&l2tp_ip6_lock);
+ pn = l2tp_ip6_pernet(sock_net(sk));
+ write_lock_bh(&pn->l2tp_ip6_lock);
hlist_del_init(&sk->sk_bind_node);
- sk_add_bind_node(sk, &l2tp_ip6_bind_table);
- write_unlock_bh(&l2tp_ip6_lock);
+ sk_add_bind_node(sk, &pn->l2tp_ip6_bind_table);
+ write_unlock_bh(&pn->l2tp_ip6_lock);
out_sk:
release_sock(sk);
@@ -630,7 +659,7 @@ back_from_confirm:
ulen = len + (skb_queue_empty(&sk->sk_write_queue) ? transhdrlen : 0);
err = ip6_append_data(sk, ip_generic_getfrag, msg,
ulen, transhdrlen, &ipc6,
- &fl6, (struct rt6_info *)dst,
+ &fl6, dst_rt6_info(dst),
msg->msg_flags);
if (err)
ip6_flush_pending_frames(sk);
@@ -765,25 +794,58 @@ static struct inet6_protocol l2tp_ip6_protocol __read_mostly = {
.handler = l2tp_ip6_recv,
};
+static __net_init int l2tp_ip6_init_net(struct net *net)
+{
+ struct l2tp_ip6_net *pn = net_generic(net, l2tp_ip6_net_id);
+
+ rwlock_init(&pn->l2tp_ip6_lock);
+ INIT_HLIST_HEAD(&pn->l2tp_ip6_table);
+ INIT_HLIST_HEAD(&pn->l2tp_ip6_bind_table);
+ return 0;
+}
+
+static __net_exit void l2tp_ip6_exit_net(struct net *net)
+{
+ struct l2tp_ip6_net *pn = l2tp_ip6_pernet(net);
+
+ write_lock_bh(&pn->l2tp_ip6_lock);
+ WARN_ON_ONCE(hlist_count_nodes(&pn->l2tp_ip6_table) != 0);
+ WARN_ON_ONCE(hlist_count_nodes(&pn->l2tp_ip6_bind_table) != 0);
+ write_unlock_bh(&pn->l2tp_ip6_lock);
+}
+
+static struct pernet_operations l2tp_ip6_net_ops = {
+ .init = l2tp_ip6_init_net,
+ .exit = l2tp_ip6_exit_net,
+ .id = &l2tp_ip6_net_id,
+ .size = sizeof(struct l2tp_ip6_net),
+};
+
static int __init l2tp_ip6_init(void)
{
int err;
pr_info("L2TP IP encapsulation support for IPv6 (L2TPv3)\n");
+ err = register_pernet_device(&l2tp_ip6_net_ops);
+ if (err)
+ goto out;
+
err = proto_register(&l2tp_ip6_prot, 1);
if (err != 0)
- goto out;
+ goto out1;
err = inet6_add_protocol(&l2tp_ip6_protocol, IPPROTO_L2TP);
if (err)
- goto out1;
+ goto out2;
inet6_register_protosw(&l2tp_ip6_protosw);
return 0;
-out1:
+out2:
proto_unregister(&l2tp_ip6_prot);
+out1:
+ unregister_pernet_device(&l2tp_ip6_net_ops);
out:
return err;
}
@@ -793,6 +855,7 @@ static void __exit l2tp_ip6_exit(void)
inet6_unregister_protosw(&l2tp_ip6_protosw);
inet6_del_protocol(&l2tp_ip6_protocol, IPPROTO_L2TP);
proto_unregister(&l2tp_ip6_prot);
+ unregister_pernet_device(&l2tp_ip6_net_ops);
}
module_init(l2tp_ip6_init);
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index a901fd14fe3b..59457c0c14aa 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -61,8 +61,9 @@ static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info)
session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
tunnel = l2tp_tunnel_get(net, tunnel_id);
if (tunnel) {
- session = l2tp_tunnel_get_session(tunnel, session_id);
- l2tp_tunnel_dec_refcount(tunnel);
+ session = l2tp_session_get(net, tunnel->sock, tunnel->version,
+ tunnel_id, session_id);
+ l2tp_tunnel_put(tunnel);
}
}
@@ -115,7 +116,7 @@ static int l2tp_tunnel_notify(struct genl_family *family,
NLM_F_ACK, tunnel, cmd);
if (ret >= 0) {
- ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
+ ret = genlmsg_multicast_allns(family, msg, 0, 0);
/* We don't care if no one is listening */
if (ret == -ESRCH)
ret = 0;
@@ -143,7 +144,7 @@ static int l2tp_session_notify(struct genl_family *family,
NLM_F_ACK, session, cmd);
if (ret >= 0) {
- ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
+ ret = genlmsg_multicast_allns(family, msg, 0, 0);
/* We don't care if no one is listening */
if (ret == -ESRCH)
ret = 0;
@@ -241,7 +242,7 @@ static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info
if (ret < 0)
goto out;
- l2tp_tunnel_inc_refcount(tunnel);
+ refcount_inc(&tunnel->ref_count);
ret = l2tp_tunnel_register(tunnel, net, &cfg);
if (ret < 0) {
kfree(tunnel);
@@ -249,7 +250,7 @@ static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info
}
ret = l2tp_tunnel_notify(&l2tp_nl_family, info, tunnel,
L2TP_CMD_TUNNEL_CREATE);
- l2tp_tunnel_dec_refcount(tunnel);
+ l2tp_tunnel_put(tunnel);
out:
return ret;
@@ -279,7 +280,7 @@ static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info
l2tp_tunnel_delete(tunnel);
- l2tp_tunnel_dec_refcount(tunnel);
+ l2tp_tunnel_put(tunnel);
out:
return ret;
@@ -307,7 +308,7 @@ static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info
ret = l2tp_tunnel_notify(&l2tp_nl_family, info,
tunnel, L2TP_CMD_TUNNEL_MODIFY);
- l2tp_tunnel_dec_refcount(tunnel);
+ l2tp_tunnel_put(tunnel);
out:
return ret;
@@ -478,42 +479,48 @@ static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info)
if (ret < 0)
goto err_nlmsg_tunnel;
- l2tp_tunnel_dec_refcount(tunnel);
+ l2tp_tunnel_put(tunnel);
return genlmsg_unicast(net, msg, info->snd_portid);
err_nlmsg_tunnel:
- l2tp_tunnel_dec_refcount(tunnel);
+ l2tp_tunnel_put(tunnel);
err_nlmsg:
nlmsg_free(msg);
err:
return ret;
}
+struct l2tp_nl_cb_data {
+ unsigned long tkey;
+ unsigned long skey;
+};
+
static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
- int ti = cb->args[0];
+ struct l2tp_nl_cb_data *cbd = (void *)&cb->ctx[0];
+ unsigned long key = cbd->tkey;
struct l2tp_tunnel *tunnel;
struct net *net = sock_net(skb->sk);
for (;;) {
- tunnel = l2tp_tunnel_get_nth(net, ti);
+ tunnel = l2tp_tunnel_get_next(net, &key);
if (!tunnel)
goto out;
if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
tunnel, L2TP_CMD_TUNNEL_GET) < 0) {
- l2tp_tunnel_dec_refcount(tunnel);
+ l2tp_tunnel_put(tunnel);
goto out;
}
- l2tp_tunnel_dec_refcount(tunnel);
+ l2tp_tunnel_put(tunnel);
- ti++;
+ key++;
}
out:
- cb->args[0] = ti;
+ cbd->tkey = key;
return skb->len;
}
@@ -635,16 +642,17 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
&cfg);
if (ret >= 0) {
- session = l2tp_tunnel_get_session(tunnel, session_id);
+ session = l2tp_session_get(net, tunnel->sock, tunnel->version,
+ tunnel_id, session_id);
if (session) {
ret = l2tp_session_notify(&l2tp_nl_family, info, session,
L2TP_CMD_SESSION_CREATE);
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
}
}
out_tunnel:
- l2tp_tunnel_dec_refcount(tunnel);
+ l2tp_tunnel_put(tunnel);
out:
return ret;
}
@@ -669,7 +677,7 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf
if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete)
l2tp_nl_cmd_ops[pw_type]->session_delete(session);
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
out:
return ret;
@@ -690,8 +698,10 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
session->recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]);
if (info->attrs[L2TP_ATTR_SEND_SEQ]) {
+ struct l2tp_tunnel *tunnel = session->tunnel;
+
session->send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]);
- l2tp_session_set_header_len(session, session->tunnel->version);
+ l2tp_session_set_header_len(session, tunnel->version, tunnel->encap);
}
if (info->attrs[L2TP_ATTR_LNS_MODE])
@@ -703,7 +713,7 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
ret = l2tp_session_notify(&l2tp_nl_family, info,
session, L2TP_CMD_SESSION_MODIFY);
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
out:
return ret;
@@ -814,57 +824,59 @@ static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info)
ret = genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
return ret;
err_ref_msg:
nlmsg_free(msg);
err_ref:
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
err:
return ret;
}
static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
+ struct l2tp_nl_cb_data *cbd = (void *)&cb->ctx[0];
struct net *net = sock_net(skb->sk);
struct l2tp_session *session;
struct l2tp_tunnel *tunnel = NULL;
- int ti = cb->args[0];
- int si = cb->args[1];
+ unsigned long tkey = cbd->tkey;
+ unsigned long skey = cbd->skey;
for (;;) {
if (!tunnel) {
- tunnel = l2tp_tunnel_get_nth(net, ti);
+ tunnel = l2tp_tunnel_get_next(net, &tkey);
if (!tunnel)
goto out;
}
- session = l2tp_session_get_nth(tunnel, si);
+ session = l2tp_session_get_next(net, tunnel->sock, tunnel->version,
+ tunnel->tunnel_id, &skey);
if (!session) {
- ti++;
- l2tp_tunnel_dec_refcount(tunnel);
+ tkey++;
+ l2tp_tunnel_put(tunnel);
tunnel = NULL;
- si = 0;
+ skey = 0;
continue;
}
if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
session, L2TP_CMD_SESSION_GET) < 0) {
- l2tp_session_dec_refcount(session);
- l2tp_tunnel_dec_refcount(tunnel);
+ l2tp_session_put(session);
+ l2tp_tunnel_put(tunnel);
break;
}
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
- si++;
+ skey++;
}
out:
- cb->args[0] = ti;
- cb->args[1] = si;
+ cbd->tkey = tkey;
+ cbd->skey = skey;
return skb->len;
}
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index f011af6601c9..53baf2dd5d5d 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -119,7 +119,6 @@ struct pppol2tp_session {
struct mutex sk_lock; /* Protects .sk */
struct sock __rcu *sk; /* Pointer to the session PPPoX socket */
struct sock *__sk; /* Copy of .sk, for cleanup */
- struct rcu_head rcu; /* For asynchronous release */
};
static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb);
@@ -150,27 +149,23 @@ static struct sock *pppol2tp_session_get_sock(struct l2tp_session *session)
/* Helpers to obtain tunnel/session contexts from sockets.
*/
-static inline struct l2tp_session *pppol2tp_sock_to_session(struct sock *sk)
+static struct l2tp_session *pppol2tp_sock_to_session(struct sock *sk)
{
struct l2tp_session *session;
if (!sk)
return NULL;
- sock_hold(sk);
- session = (struct l2tp_session *)(sk->sk_user_data);
- if (!session) {
- sock_put(sk);
- goto out;
- }
- if (WARN_ON(session->magic != L2TP_SESSION_MAGIC)) {
- session = NULL;
- sock_put(sk);
- goto out;
+ rcu_read_lock();
+ session = rcu_dereference_sk_user_data(sk);
+ if (session && refcount_inc_not_zero(&session->ref_count)) {
+ rcu_read_unlock();
+ WARN_ON_ONCE(session->magic != L2TP_SESSION_MAGIC);
+ return session;
}
+ rcu_read_unlock();
-out:
- return session;
+ return NULL;
}
/*****************************************************************************
@@ -318,12 +313,12 @@ static int pppol2tp_sendmsg(struct socket *sock, struct msghdr *m,
l2tp_xmit_skb(session, skb);
local_bh_enable();
- sock_put(sk);
+ l2tp_session_put(session);
return total_len;
error_put_sess:
- sock_put(sk);
+ l2tp_session_put(session);
error:
return error;
}
@@ -377,12 +372,12 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
l2tp_xmit_skb(session, skb);
local_bh_enable();
- sock_put(sk);
+ l2tp_session_put(session);
return 1;
abort_put_sess:
- sock_put(sk);
+ l2tp_session_put(session);
abort:
/* Free the original skb */
kfree_skb(skb);
@@ -393,29 +388,32 @@ abort:
* Session (and tunnel control) socket create/destroy.
*****************************************************************************/
-static void pppol2tp_put_sk(struct rcu_head *head)
-{
- struct pppol2tp_session *ps;
-
- ps = container_of(head, typeof(*ps), rcu);
- sock_put(ps->__sk);
-}
-
/* Really kill the session socket. (Called from sock_put() if
* refcnt == 0.)
*/
static void pppol2tp_session_destruct(struct sock *sk)
{
- struct l2tp_session *session = sk->sk_user_data;
-
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_write_queue);
+}
- if (session) {
- sk->sk_user_data = NULL;
- if (WARN_ON(session->magic != L2TP_SESSION_MAGIC))
- return;
- l2tp_session_dec_refcount(session);
+static void pppol2tp_session_close(struct l2tp_session *session)
+{
+ struct pppol2tp_session *ps;
+
+ ps = l2tp_session_priv(session);
+ mutex_lock(&ps->sk_lock);
+ ps->__sk = rcu_dereference_protected(ps->sk,
+ lockdep_is_held(&ps->sk_lock));
+ RCU_INIT_POINTER(ps->sk, NULL);
+ mutex_unlock(&ps->sk_lock);
+ if (ps->__sk) {
+ /* detach socket */
+ rcu_assign_sk_user_data(ps->__sk, NULL);
+ sock_put(ps->__sk);
+
+ /* drop ref taken when we referenced socket via sk_user_data */
+ l2tp_session_put(session);
}
}
@@ -444,30 +442,13 @@ static int pppol2tp_release(struct socket *sock)
session = pppol2tp_sock_to_session(sk);
if (session) {
- struct pppol2tp_session *ps;
-
l2tp_session_delete(session);
-
- ps = l2tp_session_priv(session);
- mutex_lock(&ps->sk_lock);
- ps->__sk = rcu_dereference_protected(ps->sk,
- lockdep_is_held(&ps->sk_lock));
- RCU_INIT_POINTER(ps->sk, NULL);
- mutex_unlock(&ps->sk_lock);
- call_rcu(&ps->rcu, pppol2tp_put_sk);
-
- /* Rely on the sock_put() call at the end of the function for
- * dropping the reference held by pppol2tp_sock_to_session().
- * The last reference will be dropped by pppol2tp_put_sk().
- */
+ /* drop ref taken by pppol2tp_sock_to_session */
+ l2tp_session_put(session);
}
release_sock(sk);
- /* This will delete the session context via
- * pppol2tp_session_destruct() if the socket's refcnt drops to
- * zero.
- */
sock_put(sk);
return 0;
@@ -506,6 +487,7 @@ static int pppol2tp_create(struct net *net, struct socket *sock, int kern)
goto out;
sock_init_data(sock, sk);
+ sock_set_flag(sk, SOCK_RCU_FREE);
sock->state = SS_UNCONNECTED;
sock->ops = &pppol2tp_ops;
@@ -542,6 +524,7 @@ static void pppol2tp_session_init(struct l2tp_session *session)
struct pppol2tp_session *ps;
session->recv_skb = pppol2tp_recv;
+ session->session_close = pppol2tp_session_close;
if (IS_ENABLED(CONFIG_L2TP_DEBUGFS))
session->show = pppol2tp_show;
@@ -685,7 +668,7 @@ static struct l2tp_tunnel *pppol2tp_tunnel_get(struct net *net,
if (error < 0)
return ERR_PTR(error);
- l2tp_tunnel_inc_refcount(tunnel);
+ refcount_inc(&tunnel->ref_count);
error = l2tp_tunnel_register(tunnel, net, &tcfg);
if (error < 0) {
kfree(tunnel);
@@ -701,7 +684,7 @@ static struct l2tp_tunnel *pppol2tp_tunnel_get(struct net *net,
/* Error if socket is not prepped */
if (!tunnel->sock) {
- l2tp_tunnel_dec_refcount(tunnel);
+ l2tp_tunnel_put(tunnel);
return ERR_PTR(-ENOENT);
}
}
@@ -753,7 +736,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
if (tunnel->peer_tunnel_id == 0)
tunnel->peer_tunnel_id = info.peer_tunnel_id;
- session = l2tp_tunnel_get_session(tunnel, info.session_id);
+ session = l2tp_session_get(sock_net(sk), tunnel->sock, tunnel->version,
+ info.tunnel_id, info.session_id);
if (session) {
drop_refcnt = true;
@@ -786,18 +770,20 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
goto end;
}
+ drop_refcnt = true;
+
pppol2tp_session_init(session);
ps = l2tp_session_priv(session);
- l2tp_session_inc_refcount(session);
+ refcount_inc(&session->ref_count);
mutex_lock(&ps->sk_lock);
error = l2tp_session_register(session, tunnel);
if (error < 0) {
mutex_unlock(&ps->sk_lock);
- kfree(session);
+ l2tp_session_put(session);
goto end;
}
- drop_refcnt = true;
+
new_session = true;
}
@@ -829,12 +815,13 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
out_no_ppp:
/* This is how we get the session context from the socket. */
- sk->sk_user_data = session;
+ sock_hold(sk);
+ rcu_assign_sk_user_data(sk, session);
rcu_assign_pointer(ps->sk, sk);
mutex_unlock(&ps->sk_lock);
/* Keep the reference we've grabbed on the session: sk doesn't expect
- * the session to disappear. pppol2tp_session_destruct() is responsible
+ * the session to disappear. pppol2tp_session_close() is responsible
* for dropping it.
*/
drop_refcnt = false;
@@ -849,8 +836,8 @@ end:
l2tp_tunnel_delete(tunnel);
}
if (drop_refcnt)
- l2tp_session_dec_refcount(session);
- l2tp_tunnel_dec_refcount(tunnel);
+ l2tp_session_put(session);
+ l2tp_tunnel_put(tunnel);
release_sock(sk);
return error;
@@ -890,7 +877,7 @@ static int pppol2tp_session_create(struct net *net, struct l2tp_tunnel *tunnel,
return 0;
err_sess:
- kfree(session);
+ l2tp_session_put(session);
err:
return error;
}
@@ -1001,7 +988,7 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
error = len;
- sock_put(sk);
+ l2tp_session_put(session);
end:
return error;
}
@@ -1045,17 +1032,18 @@ static int pppol2tp_tunnel_copy_stats(struct pppol2tp_ioc_stats *stats,
/* If session_id is set, search the corresponding session in the
* context of this tunnel and record the session's statistics.
*/
- session = l2tp_tunnel_get_session(tunnel, stats->session_id);
+ session = l2tp_session_get(tunnel->l2tp_net, tunnel->sock, tunnel->version,
+ tunnel->tunnel_id, stats->session_id);
if (!session)
return -EBADR;
if (session->pwtype != L2TP_PWTYPE_PPP) {
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
return -EBADR;
}
pppol2tp_copy_stats(stats, &session->stats);
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
return 0;
}
@@ -1203,7 +1191,8 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ :
PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
}
- l2tp_session_set_header_len(session, session->tunnel->version);
+ l2tp_session_set_header_len(session, session->tunnel->version,
+ session->tunnel->encap);
break;
case PPPOL2TP_SO_LNSMODE:
@@ -1272,7 +1261,7 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
err = pppol2tp_session_setsockopt(sk, session, optname, val);
}
- sock_put(sk);
+ l2tp_session_put(session);
end:
return err;
}
@@ -1356,11 +1345,11 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
if (get_user(len, optlen))
return -EFAULT;
- len = min_t(unsigned int, len, sizeof(int));
-
if (len < 0)
return -EINVAL;
+ len = min_t(unsigned int, len, sizeof(int));
+
err = -ENOTCONN;
if (!sk->sk_user_data)
goto end;
@@ -1393,7 +1382,7 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
err = 0;
end_put_sess:
- sock_put(sk);
+ l2tp_session_put(session);
end:
return err;
}
@@ -1404,14 +1393,12 @@ end:
* L2TPv2, we dump only L2TPv2 tunnels and sessions here.
*****************************************************************************/
-static unsigned int pppol2tp_net_id;
-
#ifdef CONFIG_PROC_FS
struct pppol2tp_seq_data {
struct seq_net_private p;
- int tunnel_idx; /* current tunnel */
- int session_idx; /* index of session within current tunnel */
+ unsigned long tkey; /* lookup key of current tunnel */
+ unsigned long skey; /* lookup key of current session */
struct l2tp_tunnel *tunnel;
struct l2tp_session *session; /* NULL means get next tunnel */
};
@@ -1420,17 +1407,17 @@ static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd)
{
/* Drop reference taken during previous invocation */
if (pd->tunnel)
- l2tp_tunnel_dec_refcount(pd->tunnel);
+ l2tp_tunnel_put(pd->tunnel);
for (;;) {
- pd->tunnel = l2tp_tunnel_get_nth(net, pd->tunnel_idx);
- pd->tunnel_idx++;
+ pd->tunnel = l2tp_tunnel_get_next(net, &pd->tkey);
+ pd->tkey++;
/* Only accept L2TPv2 tunnels */
if (!pd->tunnel || pd->tunnel->version == 2)
return;
- l2tp_tunnel_dec_refcount(pd->tunnel);
+ l2tp_tunnel_put(pd->tunnel);
}
}
@@ -1438,13 +1425,15 @@ static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd)
{
/* Drop reference taken during previous invocation */
if (pd->session)
- l2tp_session_dec_refcount(pd->session);
+ l2tp_session_put(pd->session);
- pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx);
- pd->session_idx++;
+ pd->session = l2tp_session_get_next(net, pd->tunnel->sock,
+ pd->tunnel->version,
+ pd->tunnel->tunnel_id, &pd->skey);
+ pd->skey++;
if (!pd->session) {
- pd->session_idx = 0;
+ pd->skey = 0;
pppol2tp_next_tunnel(net, pd);
}
}
@@ -1496,11 +1485,11 @@ static void pppol2tp_seq_stop(struct seq_file *p, void *v)
* or pppol2tp_next_tunnel().
*/
if (pd->session) {
- l2tp_session_dec_refcount(pd->session);
+ l2tp_session_put(pd->session);
pd->session = NULL;
}
if (pd->tunnel) {
- l2tp_tunnel_dec_refcount(pd->tunnel);
+ l2tp_tunnel_put(pd->tunnel);
pd->tunnel = NULL;
}
}
@@ -1511,7 +1500,7 @@ static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
seq_printf(m, "\nTUNNEL '%s', %c %d\n",
tunnel->name,
- (tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N',
+ tunnel->sock ? 'Y' : 'N',
refcount_read(&tunnel->ref_count) - 1);
seq_printf(m, " %08x %ld/%ld/%ld %ld/%ld/%ld\n",
0,
@@ -1639,7 +1628,6 @@ static __net_exit void pppol2tp_exit_net(struct net *net)
static struct pernet_operations pppol2tp_net_ops = {
.init = pppol2tp_init_net,
.exit = pppol2tp_exit_net,
- .id = &pppol2tp_net_id,
};
/*****************************************************************************
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index fde1140d899e..0259cde394ba 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -688,14 +688,13 @@ static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
* llc_ui_accept - accept a new incoming connection.
* @sock: Socket which connections arrive on.
* @newsock: Socket to move incoming connection to.
- * @flags: User specified operational flags.
- * @kern: If the socket is kernel internal
+ * @arg: User specified arguments
*
* Accept a new incoming connection.
* Returns 0 upon success, negative otherwise.
*/
-static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags,
- bool kern)
+static int llc_ui_accept(struct socket *sock, struct socket *newsock,
+ struct proto_accept_arg *arg)
{
struct sock *sk = sock->sk, *newsk;
struct llc_sock *llc, *newllc;
@@ -1099,7 +1098,7 @@ static int llc_ui_setsockopt(struct socket *sock, int level, int optname,
lock_sock(sk);
if (unlikely(level != SOL_LLC || optlen != sizeof(int)))
goto out;
- rc = copy_from_sockptr(&opt, optval, sizeof(opt));
+ rc = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
if (rc)
goto out;
rc = -EINVAL;
diff --git a/net/llc/llc_c_st.c b/net/llc/llc_c_st.c
index 2467573b5f84..1c267db304df 100644
--- a/net/llc/llc_c_st.c
+++ b/net/llc/llc_c_st.c
@@ -42,7 +42,7 @@ static const llc_conn_action_t llc_common_actions_1[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_1 = {
+static const struct llc_conn_state_trans llc_common_state_trans_1 = {
.ev = llc_conn_ev_disc_req,
.next_state = LLC_CONN_STATE_D_CONN,
.ev_qualifiers = NONE,
@@ -59,7 +59,7 @@ static const llc_conn_action_t llc_common_actions_2[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_2 = {
+static const struct llc_conn_state_trans llc_common_state_trans_2 = {
.ev = llc_conn_ev_rst_req,
.next_state = LLC_CONN_STATE_RESET,
.ev_qualifiers = NONE,
@@ -79,7 +79,7 @@ static const llc_conn_action_t llc_common_actions_3[] = {
[8] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_3 = {
+static const struct llc_conn_state_trans llc_common_state_trans_3 = {
.ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -95,7 +95,7 @@ static const llc_conn_action_t llc_common_actions_4[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_4 = {
+static const struct llc_conn_state_trans llc_common_state_trans_4 = {
.ev = llc_conn_ev_rx_disc_cmd_pbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = NONE,
@@ -114,7 +114,7 @@ static const llc_conn_action_t llc_common_actions_5[] = {
[7] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_5 = {
+static const struct llc_conn_state_trans llc_common_state_trans_5 = {
.ev = llc_conn_ev_rx_frmr_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_RESET,
.ev_qualifiers = NONE,
@@ -129,7 +129,7 @@ static const llc_conn_action_t llc_common_actions_6[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_6 = {
+static const struct llc_conn_state_trans llc_common_state_trans_6 = {
.ev = llc_conn_ev_rx_dm_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = NONE,
@@ -145,7 +145,7 @@ static const llc_conn_action_t llc_common_actions_7a[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_7a = {
+static const struct llc_conn_state_trans llc_common_state_trans_7a = {
.ev = llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr,
.next_state = LLC_CONN_STATE_ERROR,
.ev_qualifiers = NONE,
@@ -161,7 +161,7 @@ static const llc_conn_action_t llc_common_actions_7b[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_7b = {
+static const struct llc_conn_state_trans llc_common_state_trans_7b = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns,
.next_state = LLC_CONN_STATE_ERROR,
.ev_qualifiers = NONE,
@@ -177,7 +177,7 @@ static const llc_conn_action_t llc_common_actions_8a[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_8a = {
+static const struct llc_conn_state_trans llc_common_state_trans_8a = {
.ev = llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr,
.next_state = LLC_CONN_STATE_ERROR,
.ev_qualifiers = NONE,
@@ -193,7 +193,7 @@ static const llc_conn_action_t llc_common_actions_8b[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_8b = {
+static const struct llc_conn_state_trans llc_common_state_trans_8b = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns,
.next_state = LLC_CONN_STATE_ERROR,
.ev_qualifiers = NONE,
@@ -209,7 +209,7 @@ static const llc_conn_action_t llc_common_actions_8c[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_8c = {
+static const struct llc_conn_state_trans llc_common_state_trans_8c = {
.ev = llc_conn_ev_rx_bad_pdu,
.next_state = LLC_CONN_STATE_ERROR,
.ev_qualifiers = NONE,
@@ -225,7 +225,7 @@ static const llc_conn_action_t llc_common_actions_9[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_9 = {
+static const struct llc_conn_state_trans llc_common_state_trans_9 = {
.ev = llc_conn_ev_rx_ua_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_ERROR,
.ev_qualifiers = NONE,
@@ -247,7 +247,7 @@ static const llc_conn_action_t llc_common_actions_10[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_10 = {
+static const struct llc_conn_state_trans llc_common_state_trans_10 = {
.ev = llc_conn_ev_rx_xxx_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_ERROR,
.ev_qualifiers = llc_common_ev_qfyrs_10,
@@ -270,7 +270,7 @@ static const llc_conn_action_t llc_common_actions_11a[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_11a = {
+static const struct llc_conn_state_trans llc_common_state_trans_11a = {
.ev = llc_conn_ev_p_tmr_exp,
.next_state = LLC_CONN_STATE_RESET,
.ev_qualifiers = llc_common_ev_qfyrs_11a,
@@ -292,7 +292,7 @@ static const llc_conn_action_t llc_common_actions_11b[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_11b = {
+static const struct llc_conn_state_trans llc_common_state_trans_11b = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_RESET,
.ev_qualifiers = llc_common_ev_qfyrs_11b,
@@ -314,7 +314,7 @@ static const llc_conn_action_t llc_common_actions_11c[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_11c = {
+static const struct llc_conn_state_trans llc_common_state_trans_11c = {
.ev = llc_conn_ev_rej_tmr_exp,
.next_state = LLC_CONN_STATE_RESET,
.ev_qualifiers = llc_common_ev_qfyrs_11c,
@@ -336,7 +336,7 @@ static const llc_conn_action_t llc_common_actions_11d[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_11d = {
+static const struct llc_conn_state_trans llc_common_state_trans_11d = {
.ev = llc_conn_ev_busy_tmr_exp,
.next_state = LLC_CONN_STATE_RESET,
.ev_qualifiers = llc_common_ev_qfyrs_11d,
@@ -347,7 +347,7 @@ static struct llc_conn_state_trans llc_common_state_trans_11d = {
* Common dummy state transition; must be last entry for all state
* transition groups - it'll be on .bss, so will be zeroed.
*/
-static struct llc_conn_state_trans llc_common_state_trans_end;
+static const struct llc_conn_state_trans llc_common_state_trans_end;
/* LLC_CONN_STATE_ADM transitions */
/* State transitions for LLC_CONN_EV_CONN_REQ event */
@@ -359,7 +359,7 @@ static const llc_conn_action_t llc_adm_actions_1[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_adm_state_trans_1 = {
+static const struct llc_conn_state_trans llc_adm_state_trans_1 = {
.ev = llc_conn_ev_conn_req,
.next_state = LLC_CONN_STATE_SETUP,
.ev_qualifiers = NONE,
@@ -378,7 +378,7 @@ static const llc_conn_action_t llc_adm_actions_2[] = {
[7] = NULL,
};
-static struct llc_conn_state_trans llc_adm_state_trans_2 = {
+static const struct llc_conn_state_trans llc_adm_state_trans_2 = {
.ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -392,7 +392,7 @@ static const llc_conn_action_t llc_adm_actions_3[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_adm_state_trans_3 = {
+static const struct llc_conn_state_trans llc_adm_state_trans_3 = {
.ev = llc_conn_ev_rx_disc_cmd_pbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = NONE,
@@ -406,7 +406,7 @@ static const llc_conn_action_t llc_adm_actions_4[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_adm_state_trans_4 = {
+static const struct llc_conn_state_trans llc_adm_state_trans_4 = {
.ev = llc_conn_ev_rx_xxx_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = NONE,
@@ -419,7 +419,7 @@ static const llc_conn_action_t llc_adm_actions_5[] = {
[1] = NULL,
};
-static struct llc_conn_state_trans llc_adm_state_trans_5 = {
+static const struct llc_conn_state_trans llc_adm_state_trans_5 = {
.ev = llc_conn_ev_rx_any_frame,
.next_state = LLC_CONN_OUT_OF_SVC,
.ev_qualifiers = NONE,
@@ -430,7 +430,7 @@ static struct llc_conn_state_trans llc_adm_state_trans_5 = {
* Array of pointers;
* one to each transition
*/
-static struct llc_conn_state_trans *llc_adm_state_transitions[] = {
+static const struct llc_conn_state_trans *llc_adm_state_transitions[] = {
[0] = &llc_adm_state_trans_1, /* Request */
[1] = &llc_common_state_trans_end,
[2] = &llc_common_state_trans_end, /* local_busy */
@@ -453,7 +453,7 @@ static const llc_conn_action_t llc_setup_actions_1[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_setup_state_trans_1 = {
+static const struct llc_conn_state_trans llc_setup_state_trans_1 = {
.ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x,
.next_state = LLC_CONN_STATE_SETUP,
.ev_qualifiers = NONE,
@@ -477,7 +477,7 @@ static const llc_conn_action_t llc_setup_actions_2[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_setup_state_trans_2 = {
+static const struct llc_conn_state_trans llc_setup_state_trans_2 = {
.ev = llc_conn_ev_rx_ua_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_setup_ev_qfyrs_2,
@@ -498,7 +498,7 @@ static const llc_conn_action_t llc_setup_actions_3[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_setup_state_trans_3 = {
+static const struct llc_conn_state_trans llc_setup_state_trans_3 = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_setup_ev_qfyrs_3,
@@ -519,7 +519,7 @@ static const llc_conn_action_t llc_setup_actions_4[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_setup_state_trans_4 = {
+static const struct llc_conn_state_trans llc_setup_state_trans_4 = {
.ev = llc_conn_ev_rx_disc_cmd_pbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_setup_ev_qfyrs_4,
@@ -539,7 +539,7 @@ static const llc_conn_action_t llc_setup_actions_5[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_setup_state_trans_5 = {
+static const struct llc_conn_state_trans llc_setup_state_trans_5 = {
.ev = llc_conn_ev_rx_dm_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_setup_ev_qfyrs_5,
@@ -560,7 +560,7 @@ static const llc_conn_action_t llc_setup_actions_7[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_setup_state_trans_7 = {
+static const struct llc_conn_state_trans llc_setup_state_trans_7 = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_SETUP,
.ev_qualifiers = llc_setup_ev_qfyrs_7,
@@ -581,7 +581,7 @@ static const llc_conn_action_t llc_setup_actions_8[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_setup_state_trans_8 = {
+static const struct llc_conn_state_trans llc_setup_state_trans_8 = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_setup_ev_qfyrs_8,
@@ -592,7 +592,7 @@ static struct llc_conn_state_trans llc_setup_state_trans_8 = {
* Array of pointers;
* one to each transition
*/
-static struct llc_conn_state_trans *llc_setup_state_transitions[] = {
+static const struct llc_conn_state_trans *llc_setup_state_transitions[] = {
[0] = &llc_common_state_trans_end, /* Request */
[1] = &llc_common_state_trans_end, /* local busy */
[2] = &llc_common_state_trans_end, /* init_pf_cycle */
@@ -622,7 +622,7 @@ static const llc_conn_action_t llc_normal_actions_1[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_1 = {
+static const struct llc_conn_state_trans llc_normal_state_trans_1 = {
.ev = llc_conn_ev_data_req,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_normal_ev_qfyrs_1,
@@ -643,7 +643,7 @@ static const llc_conn_action_t llc_normal_actions_2[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_2 = {
+static const struct llc_conn_state_trans llc_normal_state_trans_2 = {
.ev = llc_conn_ev_data_req,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_normal_ev_qfyrs_2,
@@ -660,7 +660,7 @@ static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_2_1[] = {
/* just one member, NULL, .bss zeroes it */
static const llc_conn_action_t llc_normal_actions_2_1[1];
-static struct llc_conn_state_trans llc_normal_state_trans_2_1 = {
+static const struct llc_conn_state_trans llc_normal_state_trans_2_1 = {
.ev = llc_conn_ev_data_req,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_normal_ev_qfyrs_2_1,
@@ -680,7 +680,7 @@ static const llc_conn_action_t llc_normal_actions_3[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_3 = {
+static const struct llc_conn_state_trans llc_normal_state_trans_3 = {
.ev = llc_conn_ev_local_busy_detected,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_normal_ev_qfyrs_3,
@@ -700,7 +700,7 @@ static const llc_conn_action_t llc_normal_actions_4[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_4 = {
+static const struct llc_conn_state_trans llc_normal_state_trans_4 = {
.ev = llc_conn_ev_local_busy_detected,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_normal_ev_qfyrs_4,
@@ -723,7 +723,7 @@ static const llc_conn_action_t llc_normal_actions_5a[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_5a = {
+static const struct llc_conn_state_trans llc_normal_state_trans_5a = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_normal_ev_qfyrs_5a,
@@ -746,7 +746,7 @@ static const llc_conn_action_t llc_normal_actions_5b[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_5b = {
+static const struct llc_conn_state_trans llc_normal_state_trans_5b = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_normal_ev_qfyrs_5b,
@@ -769,7 +769,7 @@ static const llc_conn_action_t llc_normal_actions_5c[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_5c = {
+static const struct llc_conn_state_trans llc_normal_state_trans_5c = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_normal_ev_qfyrs_5c,
@@ -790,7 +790,7 @@ static const llc_conn_action_t llc_normal_actions_6a[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_6a = {
+static const struct llc_conn_state_trans llc_normal_state_trans_6a = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_normal_ev_qfyrs_6a,
@@ -811,7 +811,7 @@ static const llc_conn_action_t llc_normal_actions_6b[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_6b = {
+static const struct llc_conn_state_trans llc_normal_state_trans_6b = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_normal_ev_qfyrs_6b,
@@ -827,7 +827,7 @@ static const llc_conn_action_t llc_normal_actions_7[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_7 = {
+static const struct llc_conn_state_trans llc_normal_state_trans_7 = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -850,7 +850,7 @@ static const llc_conn_action_t llc_normal_actions_8[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_8a = {
+static const struct llc_conn_state_trans llc_normal_state_trans_8a = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_normal_ev_qfyrs_8a,
@@ -863,7 +863,7 @@ static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_8b[] = {
[1] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_8b = {
+static const struct llc_conn_state_trans llc_normal_state_trans_8b = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_normal_ev_qfyrs_8b,
@@ -884,7 +884,7 @@ static const llc_conn_action_t llc_normal_actions_9a[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_9a = {
+static const struct llc_conn_state_trans llc_normal_state_trans_9a = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_normal_ev_qfyrs_9a,
@@ -905,7 +905,7 @@ static const llc_conn_action_t llc_normal_actions_9b[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_9b = {
+static const struct llc_conn_state_trans llc_normal_state_trans_9b = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_normal_ev_qfyrs_9b,
@@ -922,7 +922,7 @@ static const llc_conn_action_t llc_normal_actions_10[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_10 = {
+static const struct llc_conn_state_trans llc_normal_state_trans_10 = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -937,7 +937,7 @@ static const llc_conn_action_t llc_normal_actions_11a[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_11a = {
+static const struct llc_conn_state_trans llc_normal_state_trans_11a = {
.ev = llc_conn_ev_rx_rr_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -952,7 +952,7 @@ static const llc_conn_action_t llc_normal_actions_11b[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_11b = {
+static const struct llc_conn_state_trans llc_normal_state_trans_11b = {
.ev = llc_conn_ev_rx_rr_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -973,7 +973,7 @@ static const llc_conn_action_t llc_normal_actions_11c[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_11c = {
+static const struct llc_conn_state_trans llc_normal_state_trans_11c = {
.ev = llc_conn_ev_rx_rr_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_normal_ev_qfyrs_11c,
@@ -990,7 +990,7 @@ static const llc_conn_action_t llc_normal_actions_12[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_12 = {
+static const struct llc_conn_state_trans llc_normal_state_trans_12 = {
.ev = llc_conn_ev_rx_rr_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -1005,7 +1005,7 @@ static const llc_conn_action_t llc_normal_actions_13a[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_13a = {
+static const struct llc_conn_state_trans llc_normal_state_trans_13a = {
.ev = llc_conn_ev_rx_rnr_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -1020,7 +1020,7 @@ static const llc_conn_action_t llc_normal_actions_13b[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_13b = {
+static const struct llc_conn_state_trans llc_normal_state_trans_13b = {
.ev = llc_conn_ev_rx_rnr_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -1040,7 +1040,7 @@ static const llc_conn_action_t llc_normal_actions_13c[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_13c = {
+static const struct llc_conn_state_trans llc_normal_state_trans_13c = {
.ev = llc_conn_ev_rx_rnr_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_normal_ev_qfyrs_13c,
@@ -1057,7 +1057,7 @@ static const llc_conn_action_t llc_normal_actions_14[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_14 = {
+static const struct llc_conn_state_trans llc_normal_state_trans_14 = {
.ev = llc_conn_ev_rx_rnr_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -1080,7 +1080,7 @@ static const llc_conn_action_t llc_normal_actions_15a[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_15a = {
+static const struct llc_conn_state_trans llc_normal_state_trans_15a = {
.ev = llc_conn_ev_rx_rej_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_normal_ev_qfyrs_15a,
@@ -1103,7 +1103,7 @@ static const llc_conn_action_t llc_normal_actions_15b[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_15b = {
+static const struct llc_conn_state_trans llc_normal_state_trans_15b = {
.ev = llc_conn_ev_rx_rej_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_normal_ev_qfyrs_15b,
@@ -1125,7 +1125,7 @@ static const llc_conn_action_t llc_normal_actions_16a[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_16a = {
+static const struct llc_conn_state_trans llc_normal_state_trans_16a = {
.ev = llc_conn_ev_rx_rej_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_normal_ev_qfyrs_16a,
@@ -1147,7 +1147,7 @@ static const llc_conn_action_t llc_normal_actions_16b[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_16b = {
+static const struct llc_conn_state_trans llc_normal_state_trans_16b = {
.ev = llc_conn_ev_rx_rej_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_normal_ev_qfyrs_16b,
@@ -1164,7 +1164,7 @@ static const llc_conn_action_t llc_normal_actions_17[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_17 = {
+static const struct llc_conn_state_trans llc_normal_state_trans_17 = {
.ev = llc_conn_ev_rx_rej_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -1183,7 +1183,7 @@ static const llc_conn_action_t llc_normal_actions_18[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_18 = {
+static const struct llc_conn_state_trans llc_normal_state_trans_18 = {
.ev = llc_conn_ev_init_p_f_cycle,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_normal_ev_qfyrs_18,
@@ -1205,7 +1205,7 @@ static const llc_conn_action_t llc_normal_actions_19[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_19 = {
+static const struct llc_conn_state_trans llc_normal_state_trans_19 = {
.ev = llc_conn_ev_p_tmr_exp,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = llc_normal_ev_qfyrs_19,
@@ -1228,7 +1228,7 @@ static const llc_conn_action_t llc_normal_actions_20a[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_20a = {
+static const struct llc_conn_state_trans llc_normal_state_trans_20a = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = llc_normal_ev_qfyrs_20a,
@@ -1251,7 +1251,7 @@ static const llc_conn_action_t llc_normal_actions_20b[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_20b = {
+static const struct llc_conn_state_trans llc_normal_state_trans_20b = {
.ev = llc_conn_ev_busy_tmr_exp,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = llc_normal_ev_qfyrs_20b,
@@ -1270,7 +1270,7 @@ static const llc_conn_action_t llc_normal_actions_21[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_21 = {
+static const struct llc_conn_state_trans llc_normal_state_trans_21 = {
.ev = llc_conn_ev_tx_buffer_full,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_normal_ev_qfyrs_21,
@@ -1281,7 +1281,7 @@ static struct llc_conn_state_trans llc_normal_state_trans_21 = {
* Array of pointers;
* one to each transition
*/
-static struct llc_conn_state_trans *llc_normal_state_transitions[] = {
+static const struct llc_conn_state_trans *llc_normal_state_transitions[] = {
[0] = &llc_normal_state_trans_1, /* Requests */
[1] = &llc_normal_state_trans_2,
[2] = &llc_normal_state_trans_2_1,
@@ -1354,7 +1354,7 @@ static const llc_conn_action_t llc_busy_actions_1[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_1 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_1 = {
.ev = llc_conn_ev_data_req,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_1,
@@ -1374,7 +1374,7 @@ static const llc_conn_action_t llc_busy_actions_2[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_2 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_2 = {
.ev = llc_conn_ev_data_req,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_2,
@@ -1391,7 +1391,7 @@ static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_2_1[] = {
/* just one member, NULL, .bss zeroes it */
static const llc_conn_action_t llc_busy_actions_2_1[1];
-static struct llc_conn_state_trans llc_busy_state_trans_2_1 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_2_1 = {
.ev = llc_conn_ev_data_req,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_2_1,
@@ -1411,7 +1411,7 @@ static const llc_conn_action_t llc_busy_actions_3[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_3 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_3 = {
.ev = llc_conn_ev_local_busy_cleared,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_busy_ev_qfyrs_3,
@@ -1431,7 +1431,7 @@ static const llc_conn_action_t llc_busy_actions_4[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_4 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_4 = {
.ev = llc_conn_ev_local_busy_cleared,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_busy_ev_qfyrs_4,
@@ -1450,7 +1450,7 @@ static const llc_conn_action_t llc_busy_actions_5[] = {
[1] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_5 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_5 = {
.ev = llc_conn_ev_local_busy_cleared,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_busy_ev_qfyrs_5,
@@ -1469,7 +1469,7 @@ static const llc_conn_action_t llc_busy_actions_6[] = {
[1] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_6 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_6 = {
.ev = llc_conn_ev_local_busy_cleared,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_busy_ev_qfyrs_6,
@@ -1488,7 +1488,7 @@ static const llc_conn_action_t llc_busy_actions_7[] = {
[1] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_7 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_7 = {
.ev = llc_conn_ev_local_busy_cleared,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_busy_ev_qfyrs_7,
@@ -1507,7 +1507,7 @@ static const llc_conn_action_t llc_busy_actions_8[] = {
[1] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_8 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_8 = {
.ev = llc_conn_ev_local_busy_cleared,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_busy_ev_qfyrs_8,
@@ -1529,7 +1529,7 @@ static const llc_conn_action_t llc_busy_actions_9a[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_9a = {
+static const struct llc_conn_state_trans llc_busy_state_trans_9a = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_9a,
@@ -1551,7 +1551,7 @@ static const llc_conn_action_t llc_busy_actions_9b[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_9b = {
+static const struct llc_conn_state_trans llc_busy_state_trans_9b = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_9b,
@@ -1571,7 +1571,7 @@ static const llc_conn_action_t llc_busy_actions_10a[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_10a = {
+static const struct llc_conn_state_trans llc_busy_state_trans_10a = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_10a,
@@ -1591,7 +1591,7 @@ static const llc_conn_action_t llc_busy_actions_10b[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_10b = {
+static const struct llc_conn_state_trans llc_busy_state_trans_10b = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_10b,
@@ -1606,7 +1606,7 @@ static const llc_conn_action_t llc_busy_actions_11[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_11 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_11 = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = NONE,
@@ -1624,7 +1624,7 @@ static const llc_conn_action_t llc_busy_actions_12[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_12 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_12 = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = NONE,
@@ -1649,7 +1649,7 @@ static const llc_conn_action_t llc_busy_actions_13a[] = {
[8] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_13a = {
+static const struct llc_conn_state_trans llc_busy_state_trans_13a = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_13a,
@@ -1674,7 +1674,7 @@ static const llc_conn_action_t llc_busy_actions_13b[] = {
[8] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_13b = {
+static const struct llc_conn_state_trans llc_busy_state_trans_13b = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_13b,
@@ -1697,7 +1697,7 @@ static const llc_conn_action_t llc_busy_actions_14a[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_14a = {
+static const struct llc_conn_state_trans llc_busy_state_trans_14a = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_14a,
@@ -1720,7 +1720,7 @@ static const llc_conn_action_t llc_busy_actions_14b[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_14b = {
+static const struct llc_conn_state_trans llc_busy_state_trans_14b = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_14b,
@@ -1735,7 +1735,7 @@ static const llc_conn_action_t llc_busy_actions_15a[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_15a = {
+static const struct llc_conn_state_trans llc_busy_state_trans_15a = {
.ev = llc_conn_ev_rx_rr_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = NONE,
@@ -1750,7 +1750,7 @@ static const llc_conn_action_t llc_busy_actions_15b[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_15b = {
+static const struct llc_conn_state_trans llc_busy_state_trans_15b = {
.ev = llc_conn_ev_rx_rr_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = NONE,
@@ -1770,7 +1770,7 @@ static const llc_conn_action_t llc_busy_actions_15c[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_15c = {
+static const struct llc_conn_state_trans llc_busy_state_trans_15c = {
.ev = llc_conn_ev_rx_rr_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_15c,
@@ -1785,7 +1785,7 @@ static const llc_conn_action_t llc_busy_actions_16[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_16 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_16 = {
.ev = llc_conn_ev_rx_rr_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = NONE,
@@ -1800,7 +1800,7 @@ static const llc_conn_action_t llc_busy_actions_17a[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_17a = {
+static const struct llc_conn_state_trans llc_busy_state_trans_17a = {
.ev = llc_conn_ev_rx_rnr_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = NONE,
@@ -1815,7 +1815,7 @@ static const llc_conn_action_t llc_busy_actions_17b[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_17b = {
+static const struct llc_conn_state_trans llc_busy_state_trans_17b = {
.ev = llc_conn_ev_rx_rnr_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = NONE,
@@ -1835,7 +1835,7 @@ static const llc_conn_action_t llc_busy_actions_17c[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_17c = {
+static const struct llc_conn_state_trans llc_busy_state_trans_17c = {
.ev = llc_conn_ev_rx_rnr_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_17c,
@@ -1850,7 +1850,7 @@ static const llc_conn_action_t llc_busy_actions_18[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_18 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_18 = {
.ev = llc_conn_ev_rx_rnr_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = NONE,
@@ -1872,7 +1872,7 @@ static const llc_conn_action_t llc_busy_actions_19a[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_19a = {
+static const struct llc_conn_state_trans llc_busy_state_trans_19a = {
.ev = llc_conn_ev_rx_rej_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_19a,
@@ -1894,7 +1894,7 @@ static const llc_conn_action_t llc_busy_actions_19b[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_19b = {
+static const struct llc_conn_state_trans llc_busy_state_trans_19b = {
.ev = llc_conn_ev_rx_rej_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_19b,
@@ -1915,7 +1915,7 @@ static const llc_conn_action_t llc_busy_actions_20a[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_20a = {
+static const struct llc_conn_state_trans llc_busy_state_trans_20a = {
.ev = llc_conn_ev_rx_rej_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_20a,
@@ -1936,7 +1936,7 @@ static const llc_conn_action_t llc_busy_actions_20b[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_20b = {
+static const struct llc_conn_state_trans llc_busy_state_trans_20b = {
.ev = llc_conn_ev_rx_rej_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_20b,
@@ -1953,7 +1953,7 @@ static const llc_conn_action_t llc_busy_actions_21[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_21 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_21 = {
.ev = llc_conn_ev_rx_rej_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = NONE,
@@ -1972,7 +1972,7 @@ static const llc_conn_action_t llc_busy_actions_22[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_22 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_22 = {
.ev = llc_conn_ev_init_p_f_cycle,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_22,
@@ -1993,7 +1993,7 @@ static const llc_conn_action_t llc_busy_actions_23[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_23 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_23 = {
.ev = llc_conn_ev_p_tmr_exp,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_23,
@@ -2015,7 +2015,7 @@ static const llc_conn_action_t llc_busy_actions_24a[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_24a = {
+static const struct llc_conn_state_trans llc_busy_state_trans_24a = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_24a,
@@ -2037,7 +2037,7 @@ static const llc_conn_action_t llc_busy_actions_24b[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_24b = {
+static const struct llc_conn_state_trans llc_busy_state_trans_24b = {
.ev = llc_conn_ev_busy_tmr_exp,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_24b,
@@ -2060,7 +2060,7 @@ static const llc_conn_action_t llc_busy_actions_25[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_25 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_25 = {
.ev = llc_conn_ev_rej_tmr_exp,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_25,
@@ -2079,7 +2079,7 @@ static const llc_conn_action_t llc_busy_actions_26[] = {
[1] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_26 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_26 = {
.ev = llc_conn_ev_rej_tmr_exp,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_26,
@@ -2090,7 +2090,7 @@ static struct llc_conn_state_trans llc_busy_state_trans_26 = {
* Array of pointers;
* one to each transition
*/
-static struct llc_conn_state_trans *llc_busy_state_transitions[] = {
+static const struct llc_conn_state_trans *llc_busy_state_transitions[] = {
[0] = &llc_common_state_trans_1, /* Request */
[1] = &llc_common_state_trans_2,
[2] = &llc_busy_state_trans_1,
@@ -2166,7 +2166,7 @@ static const llc_conn_action_t llc_reject_actions_1[] = {
[1] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_1 = {
+static const struct llc_conn_state_trans llc_reject_state_trans_1 = {
.ev = llc_conn_ev_data_req,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_reject_ev_qfyrs_1,
@@ -2185,7 +2185,7 @@ static const llc_conn_action_t llc_reject_actions_2[] = {
[1] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_2 = {
+static const struct llc_conn_state_trans llc_reject_state_trans_2 = {
.ev = llc_conn_ev_data_req,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_reject_ev_qfyrs_2,
@@ -2202,7 +2202,7 @@ static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_2_1[] = {
/* just one member, NULL, .bss zeroes it */
static const llc_conn_action_t llc_reject_actions_2_1[1];
-static struct llc_conn_state_trans llc_reject_state_trans_2_1 = {
+static const struct llc_conn_state_trans llc_reject_state_trans_2_1 = {
.ev = llc_conn_ev_data_req,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_reject_ev_qfyrs_2_1,
@@ -2222,7 +2222,7 @@ static const llc_conn_action_t llc_reject_actions_3[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_3 = {
+static const struct llc_conn_state_trans llc_reject_state_trans_3 = {
.ev = llc_conn_ev_local_busy_detected,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_reject_ev_qfyrs_3,
@@ -2241,7 +2241,7 @@ static const llc_conn_action_t llc_reject_actions_4[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_4 = {
+static const struct llc_conn_state_trans llc_reject_state_trans_4 = {
.ev = llc_conn_ev_local_busy_detected,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_reject_ev_qfyrs_4,
@@ -2256,7 +2256,7 @@ static const llc_conn_action_t llc_reject_actions_5a[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_5a = {
+static const struct llc_conn_state_trans llc_reject_state_trans_5a = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -2271,7 +2271,7 @@ static const llc_conn_action_t llc_reject_actions_5b[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_5b = {
+static const struct llc_conn_state_trans llc_reject_state_trans_5b = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -2291,7 +2291,7 @@ static const llc_conn_action_t llc_reject_actions_5c[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_5c = {
+static const struct llc_conn_state_trans llc_reject_state_trans_5c = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_reject_ev_qfyrs_5c,
@@ -2305,7 +2305,7 @@ static const llc_conn_action_t llc_reject_actions_6[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_6 = {
+static const struct llc_conn_state_trans llc_reject_state_trans_6 = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -2330,7 +2330,7 @@ static const llc_conn_action_t llc_reject_actions_7a[] = {
};
-static struct llc_conn_state_trans llc_reject_state_trans_7a = {
+static const struct llc_conn_state_trans llc_reject_state_trans_7a = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_reject_ev_qfyrs_7a,
@@ -2354,7 +2354,7 @@ static const llc_conn_action_t llc_reject_actions_7b[] = {
[7] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_7b = {
+static const struct llc_conn_state_trans llc_reject_state_trans_7b = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_reject_ev_qfyrs_7b,
@@ -2376,7 +2376,7 @@ static const llc_conn_action_t llc_reject_actions_8a[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_8a = {
+static const struct llc_conn_state_trans llc_reject_state_trans_8a = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_reject_ev_qfyrs_8a,
@@ -2398,7 +2398,7 @@ static const llc_conn_action_t llc_reject_actions_8b[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_8b = {
+static const struct llc_conn_state_trans llc_reject_state_trans_8b = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_reject_ev_qfyrs_8b,
@@ -2415,7 +2415,7 @@ static const llc_conn_action_t llc_reject_actions_9[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_9 = {
+static const struct llc_conn_state_trans llc_reject_state_trans_9 = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -2430,7 +2430,7 @@ static const llc_conn_action_t llc_reject_actions_10a[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_10a = {
+static const struct llc_conn_state_trans llc_reject_state_trans_10a = {
.ev = llc_conn_ev_rx_rr_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -2445,7 +2445,7 @@ static const llc_conn_action_t llc_reject_actions_10b[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_10b = {
+static const struct llc_conn_state_trans llc_reject_state_trans_10b = {
.ev = llc_conn_ev_rx_rr_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -2465,7 +2465,7 @@ static const llc_conn_action_t llc_reject_actions_10c[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_10c = {
+static const struct llc_conn_state_trans llc_reject_state_trans_10c = {
.ev = llc_conn_ev_rx_rr_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_reject_ev_qfyrs_10c,
@@ -2480,7 +2480,7 @@ static const llc_conn_action_t llc_reject_actions_11[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_11 = {
+static const struct llc_conn_state_trans llc_reject_state_trans_11 = {
.ev = llc_conn_ev_rx_rr_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -2495,7 +2495,7 @@ static const llc_conn_action_t llc_reject_actions_12a[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_12a = {
+static const struct llc_conn_state_trans llc_reject_state_trans_12a = {
.ev = llc_conn_ev_rx_rnr_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -2510,7 +2510,7 @@ static const llc_conn_action_t llc_reject_actions_12b[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_12b = {
+static const struct llc_conn_state_trans llc_reject_state_trans_12b = {
.ev = llc_conn_ev_rx_rnr_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -2530,7 +2530,7 @@ static const llc_conn_action_t llc_reject_actions_12c[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_12c = {
+static const struct llc_conn_state_trans llc_reject_state_trans_12c = {
.ev = llc_conn_ev_rx_rnr_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_reject_ev_qfyrs_12c,
@@ -2545,7 +2545,7 @@ static const llc_conn_action_t llc_reject_actions_13[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_13 = {
+static const struct llc_conn_state_trans llc_reject_state_trans_13 = {
.ev = llc_conn_ev_rx_rnr_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -2567,7 +2567,7 @@ static const llc_conn_action_t llc_reject_actions_14a[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_14a = {
+static const struct llc_conn_state_trans llc_reject_state_trans_14a = {
.ev = llc_conn_ev_rx_rej_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_reject_ev_qfyrs_14a,
@@ -2589,7 +2589,7 @@ static const llc_conn_action_t llc_reject_actions_14b[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_14b = {
+static const struct llc_conn_state_trans llc_reject_state_trans_14b = {
.ev = llc_conn_ev_rx_rej_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_reject_ev_qfyrs_14b,
@@ -2610,7 +2610,7 @@ static const llc_conn_action_t llc_reject_actions_15a[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_15a = {
+static const struct llc_conn_state_trans llc_reject_state_trans_15a = {
.ev = llc_conn_ev_rx_rej_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_reject_ev_qfyrs_15a,
@@ -2631,7 +2631,7 @@ static const llc_conn_action_t llc_reject_actions_15b[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_15b = {
+static const struct llc_conn_state_trans llc_reject_state_trans_15b = {
.ev = llc_conn_ev_rx_rej_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_reject_ev_qfyrs_15b,
@@ -2647,7 +2647,7 @@ static const llc_conn_action_t llc_reject_actions_16[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_16 = {
+static const struct llc_conn_state_trans llc_reject_state_trans_16 = {
.ev = llc_conn_ev_rx_rej_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -2666,7 +2666,7 @@ static const llc_conn_action_t llc_reject_actions_17[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_17 = {
+static const struct llc_conn_state_trans llc_reject_state_trans_17 = {
.ev = llc_conn_ev_init_p_f_cycle,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_reject_ev_qfyrs_17,
@@ -2688,7 +2688,7 @@ static const llc_conn_action_t llc_reject_actions_18[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_18 = {
+static const struct llc_conn_state_trans llc_reject_state_trans_18 = {
.ev = llc_conn_ev_rej_tmr_exp,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_reject_ev_qfyrs_18,
@@ -2710,7 +2710,7 @@ static const llc_conn_action_t llc_reject_actions_19[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_19 = {
+static const struct llc_conn_state_trans llc_reject_state_trans_19 = {
.ev = llc_conn_ev_p_tmr_exp,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = llc_reject_ev_qfyrs_19,
@@ -2733,7 +2733,7 @@ static const llc_conn_action_t llc_reject_actions_20a[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_20a = {
+static const struct llc_conn_state_trans llc_reject_state_trans_20a = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = llc_reject_ev_qfyrs_20a,
@@ -2756,7 +2756,7 @@ static const llc_conn_action_t llc_reject_actions_20b[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_20b = {
+static const struct llc_conn_state_trans llc_reject_state_trans_20b = {
.ev = llc_conn_ev_busy_tmr_exp,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = llc_reject_ev_qfyrs_20b,
@@ -2767,7 +2767,7 @@ static struct llc_conn_state_trans llc_reject_state_trans_20b = {
* Array of pointers;
* one to each transition
*/
-static struct llc_conn_state_trans *llc_reject_state_transitions[] = {
+static const struct llc_conn_state_trans *llc_reject_state_transitions[] = {
[0] = &llc_common_state_trans_1, /* Request */
[1] = &llc_common_state_trans_2,
[2] = &llc_common_state_trans_end,
@@ -2834,7 +2834,7 @@ static const llc_conn_ev_qfyr_t llc_await_ev_qfyrs_1_0[] = {
/* just one member, NULL, .bss zeroes it */
static const llc_conn_action_t llc_await_actions_1_0[1];
-static struct llc_conn_state_trans llc_await_state_trans_1_0 = {
+static const struct llc_conn_state_trans llc_await_state_trans_1_0 = {
.ev = llc_conn_ev_data_req,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = llc_await_ev_qfyrs_1_0,
@@ -2848,7 +2848,7 @@ static const llc_conn_action_t llc_await_actions_1[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_1 = {
+static const struct llc_conn_state_trans llc_await_state_trans_1 = {
.ev = llc_conn_ev_local_busy_detected,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -2867,7 +2867,7 @@ static const llc_conn_action_t llc_await_actions_2[] = {
[7] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_2 = {
+static const struct llc_conn_state_trans llc_await_state_trans_2 = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -2883,7 +2883,7 @@ static const llc_conn_action_t llc_await_actions_3a[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_3a = {
+static const struct llc_conn_state_trans llc_await_state_trans_3a = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = NONE,
@@ -2899,7 +2899,7 @@ static const llc_conn_action_t llc_await_actions_3b[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_3b = {
+static const struct llc_conn_state_trans llc_await_state_trans_3b = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = NONE,
@@ -2916,7 +2916,7 @@ static const llc_conn_action_t llc_await_actions_4[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_4 = {
+static const struct llc_conn_state_trans llc_await_state_trans_4 = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = NONE,
@@ -2935,7 +2935,7 @@ static const llc_conn_action_t llc_await_actions_5[] = {
[7] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_5 = {
+static const struct llc_conn_state_trans llc_await_state_trans_5 = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -2952,7 +2952,7 @@ static const llc_conn_action_t llc_await_actions_6a[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_6a = {
+static const struct llc_conn_state_trans llc_await_state_trans_6a = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = NONE,
@@ -2969,7 +2969,7 @@ static const llc_conn_action_t llc_await_actions_6b[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_6b = {
+static const struct llc_conn_state_trans llc_await_state_trans_6b = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = NONE,
@@ -2986,7 +2986,7 @@ static const llc_conn_action_t llc_await_actions_7[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_7 = {
+static const struct llc_conn_state_trans llc_await_state_trans_7 = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = NONE,
@@ -3003,7 +3003,7 @@ static const llc_conn_action_t llc_await_actions_8a[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_8a = {
+static const struct llc_conn_state_trans llc_await_state_trans_8a = {
.ev = llc_conn_ev_rx_rr_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -3020,7 +3020,7 @@ static const llc_conn_action_t llc_await_actions_8b[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_8b = {
+static const struct llc_conn_state_trans llc_await_state_trans_8b = {
.ev = llc_conn_ev_rx_rej_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -3035,7 +3035,7 @@ static const llc_conn_action_t llc_await_actions_9a[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_9a = {
+static const struct llc_conn_state_trans llc_await_state_trans_9a = {
.ev = llc_conn_ev_rx_rr_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = NONE,
@@ -3050,7 +3050,7 @@ static const llc_conn_action_t llc_await_actions_9b[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_9b = {
+static const struct llc_conn_state_trans llc_await_state_trans_9b = {
.ev = llc_conn_ev_rx_rr_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = NONE,
@@ -3065,7 +3065,7 @@ static const llc_conn_action_t llc_await_actions_9c[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_9c = {
+static const struct llc_conn_state_trans llc_await_state_trans_9c = {
.ev = llc_conn_ev_rx_rej_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = NONE,
@@ -3080,7 +3080,7 @@ static const llc_conn_action_t llc_await_actions_9d[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_9d = {
+static const struct llc_conn_state_trans llc_await_state_trans_9d = {
.ev = llc_conn_ev_rx_rej_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = NONE,
@@ -3096,7 +3096,7 @@ static const llc_conn_action_t llc_await_actions_10a[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_10a = {
+static const struct llc_conn_state_trans llc_await_state_trans_10a = {
.ev = llc_conn_ev_rx_rr_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = NONE,
@@ -3112,7 +3112,7 @@ static const llc_conn_action_t llc_await_actions_10b[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_10b = {
+static const struct llc_conn_state_trans llc_await_state_trans_10b = {
.ev = llc_conn_ev_rx_rej_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = NONE,
@@ -3128,7 +3128,7 @@ static const llc_conn_action_t llc_await_actions_11[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_11 = {
+static const struct llc_conn_state_trans llc_await_state_trans_11 = {
.ev = llc_conn_ev_rx_rnr_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -3143,7 +3143,7 @@ static const llc_conn_action_t llc_await_actions_12a[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_12a = {
+static const struct llc_conn_state_trans llc_await_state_trans_12a = {
.ev = llc_conn_ev_rx_rnr_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = NONE,
@@ -3158,7 +3158,7 @@ static const llc_conn_action_t llc_await_actions_12b[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_12b = {
+static const struct llc_conn_state_trans llc_await_state_trans_12b = {
.ev = llc_conn_ev_rx_rnr_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = NONE,
@@ -3174,7 +3174,7 @@ static const llc_conn_action_t llc_await_actions_13[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_13 = {
+static const struct llc_conn_state_trans llc_await_state_trans_13 = {
.ev = llc_conn_ev_rx_rnr_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = NONE,
@@ -3194,7 +3194,7 @@ static const llc_conn_action_t llc_await_actions_14[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_14 = {
+static const struct llc_conn_state_trans llc_await_state_trans_14 = {
.ev = llc_conn_ev_p_tmr_exp,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = llc_await_ev_qfyrs_14,
@@ -3205,7 +3205,7 @@ static struct llc_conn_state_trans llc_await_state_trans_14 = {
* Array of pointers;
* one to each transition
*/
-static struct llc_conn_state_trans *llc_await_state_transitions[] = {
+static const struct llc_conn_state_trans *llc_await_state_transitions[] = {
[0] = &llc_common_state_trans_1, /* Request */
[1] = &llc_common_state_trans_2,
[2] = &llc_await_state_trans_1_0,
@@ -3263,7 +3263,7 @@ static const llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_1_0[] = {
/* just one member, NULL, .bss zeroes it */
static const llc_conn_action_t llc_await_busy_actions_1_0[1];
-static struct llc_conn_state_trans llc_await_busy_state_trans_1_0 = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_1_0 = {
.ev = llc_conn_ev_data_req,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = llc_await_busy_ev_qfyrs_1_0,
@@ -3282,7 +3282,7 @@ static const llc_conn_action_t llc_await_busy_actions_1[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_1 = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_1 = {
.ev = llc_conn_ev_local_busy_cleared,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = llc_await_busy_ev_qfyrs_1,
@@ -3300,7 +3300,7 @@ static const llc_conn_action_t llc_await_busy_actions_2[] = {
[1] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_2 = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_2 = {
.ev = llc_conn_ev_local_busy_cleared,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = llc_await_busy_ev_qfyrs_2,
@@ -3318,7 +3318,7 @@ static const llc_conn_action_t llc_await_busy_actions_3[] = {
[1] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_3 = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_3 = {
.ev = llc_conn_ev_local_busy_cleared,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = llc_await_busy_ev_qfyrs_3,
@@ -3337,7 +3337,7 @@ static const llc_conn_action_t llc_await_busy_actions_4[] = {
[7] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_4 = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_4 = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = NONE,
@@ -3353,7 +3353,7 @@ static const llc_conn_action_t llc_await_busy_actions_5a[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_5a = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_5a = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3369,7 +3369,7 @@ static const llc_conn_action_t llc_await_busy_actions_5b[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_5b = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_5b = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3385,7 +3385,7 @@ static const llc_conn_action_t llc_await_busy_actions_6[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_6 = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_6 = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3406,7 +3406,7 @@ static const llc_conn_action_t llc_await_busy_actions_7[] = {
[9] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_7 = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_7 = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = NONE,
@@ -3424,7 +3424,7 @@ static const llc_conn_action_t llc_await_busy_actions_8a[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_8a = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_8a = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3442,7 +3442,7 @@ static const llc_conn_action_t llc_await_busy_actions_8b[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_8b = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_8b = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3460,7 +3460,7 @@ static const llc_conn_action_t llc_await_busy_actions_9[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_9 = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_9 = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3477,7 +3477,7 @@ static const llc_conn_action_t llc_await_busy_actions_10a[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_10a = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_10a = {
.ev = llc_conn_ev_rx_rr_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = NONE,
@@ -3494,7 +3494,7 @@ static const llc_conn_action_t llc_await_busy_actions_10b[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_10b = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_10b = {
.ev = llc_conn_ev_rx_rej_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = NONE,
@@ -3509,7 +3509,7 @@ static const llc_conn_action_t llc_await_busy_actions_11a[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_11a = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_11a = {
.ev = llc_conn_ev_rx_rr_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3524,7 +3524,7 @@ static const llc_conn_action_t llc_await_busy_actions_11b[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_11b = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_11b = {
.ev = llc_conn_ev_rx_rr_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3539,7 +3539,7 @@ static const llc_conn_action_t llc_await_busy_actions_11c[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_11c = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_11c = {
.ev = llc_conn_ev_rx_rej_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3554,7 +3554,7 @@ static const llc_conn_action_t llc_await_busy_actions_11d[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_11d = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_11d = {
.ev = llc_conn_ev_rx_rej_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3570,7 +3570,7 @@ static const llc_conn_action_t llc_await_busy_actions_12a[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_12a = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_12a = {
.ev = llc_conn_ev_rx_rr_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3586,7 +3586,7 @@ static const llc_conn_action_t llc_await_busy_actions_12b[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_12b = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_12b = {
.ev = llc_conn_ev_rx_rej_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3602,7 +3602,7 @@ static const llc_conn_action_t llc_await_busy_actions_13[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_13 = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_13 = {
.ev = llc_conn_ev_rx_rnr_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = NONE,
@@ -3617,7 +3617,7 @@ static const llc_conn_action_t llc_await_busy_actions_14a[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_14a = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_14a = {
.ev = llc_conn_ev_rx_rnr_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3632,7 +3632,7 @@ static const llc_conn_action_t llc_await_busy_actions_14b[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_14b = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_14b = {
.ev = llc_conn_ev_rx_rnr_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3648,7 +3648,7 @@ static const llc_conn_action_t llc_await_busy_actions_15[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_15 = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_15 = {
.ev = llc_conn_ev_rx_rnr_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3668,7 +3668,7 @@ static const llc_conn_action_t llc_await_busy_actions_16[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_16 = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_16 = {
.ev = llc_conn_ev_p_tmr_exp,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = llc_await_busy_ev_qfyrs_16,
@@ -3679,7 +3679,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_16 = {
* Array of pointers;
* one to each transition
*/
-static struct llc_conn_state_trans *llc_await_busy_state_transitions[] = {
+static const struct llc_conn_state_trans *llc_await_busy_state_transitions[] = {
[0] = &llc_common_state_trans_1, /* Request */
[1] = &llc_common_state_trans_2,
[2] = &llc_await_busy_state_trans_1_0,
@@ -3739,7 +3739,7 @@ static const llc_conn_ev_qfyr_t llc_await_reject_ev_qfyrs_1_0[] = {
/* just one member, NULL, .bss zeroes it */
static const llc_conn_action_t llc_await_reject_actions_1_0[1];
-static struct llc_conn_state_trans llc_await_reject_state_trans_1_0 = {
+static const struct llc_conn_state_trans llc_await_reject_state_trans_1_0 = {
.ev = llc_conn_ev_data_req,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = llc_await_reject_ev_qfyrs_1_0,
@@ -3753,7 +3753,7 @@ static const llc_conn_action_t llc_await_rejct_actions_1[] = {
[2] = NULL
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_1 = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_1 = {
.ev = llc_conn_ev_local_busy_detected,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3767,7 +3767,7 @@ static const llc_conn_action_t llc_await_rejct_actions_2a[] = {
[2] = NULL
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_2a = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_2a = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = NONE,
@@ -3781,7 +3781,7 @@ static const llc_conn_action_t llc_await_rejct_actions_2b[] = {
[2] = NULL
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_2b = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_2b = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = NONE,
@@ -3796,7 +3796,7 @@ static const llc_conn_action_t llc_await_rejct_actions_3[] = {
[3] = NULL
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_3 = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_3 = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = NONE,
@@ -3816,7 +3816,7 @@ static const llc_conn_action_t llc_await_rejct_actions_4[] = {
[8] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_4 = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_4 = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -3834,7 +3834,7 @@ static const llc_conn_action_t llc_await_rejct_actions_5a[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_5a = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_5a = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = NONE,
@@ -3852,7 +3852,7 @@ static const llc_conn_action_t llc_await_rejct_actions_5b[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_5b = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_5b = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = NONE,
@@ -3870,7 +3870,7 @@ static const llc_conn_action_t llc_await_rejct_actions_6[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_6 = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_6 = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = NONE,
@@ -3887,7 +3887,7 @@ static const llc_conn_action_t llc_await_rejct_actions_7a[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_7a = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_7a = {
.ev = llc_conn_ev_rx_rr_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -3904,7 +3904,7 @@ static const llc_conn_action_t llc_await_rejct_actions_7b[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_7b = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_7b = {
.ev = llc_conn_ev_rx_rej_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -3921,7 +3921,7 @@ static const llc_conn_action_t llc_await_rejct_actions_7c[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_7c = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_7c = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -3936,7 +3936,7 @@ static const llc_conn_action_t llc_await_rejct_actions_8a[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_8a = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_8a = {
.ev = llc_conn_ev_rx_rr_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = NONE,
@@ -3951,7 +3951,7 @@ static const llc_conn_action_t llc_await_rejct_actions_8b[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_8b = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_8b = {
.ev = llc_conn_ev_rx_rr_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = NONE,
@@ -3966,7 +3966,7 @@ static const llc_conn_action_t llc_await_rejct_actions_8c[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_8c = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_8c = {
.ev = llc_conn_ev_rx_rej_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = NONE,
@@ -3981,7 +3981,7 @@ static const llc_conn_action_t llc_await_rejct_actions_8d[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_8d = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_8d = {
.ev = llc_conn_ev_rx_rej_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = NONE,
@@ -3997,7 +3997,7 @@ static const llc_conn_action_t llc_await_rejct_actions_9a[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_9a = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_9a = {
.ev = llc_conn_ev_rx_rr_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = NONE,
@@ -4013,7 +4013,7 @@ static const llc_conn_action_t llc_await_rejct_actions_9b[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_9b = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_9b = {
.ev = llc_conn_ev_rx_rej_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = NONE,
@@ -4029,7 +4029,7 @@ static const llc_conn_action_t llc_await_rejct_actions_10[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_10 = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_10 = {
.ev = llc_conn_ev_rx_rnr_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -4044,7 +4044,7 @@ static const llc_conn_action_t llc_await_rejct_actions_11a[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_11a = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_11a = {
.ev = llc_conn_ev_rx_rnr_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = NONE,
@@ -4059,7 +4059,7 @@ static const llc_conn_action_t llc_await_rejct_actions_11b[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_11b = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_11b = {
.ev = llc_conn_ev_rx_rnr_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = NONE,
@@ -4075,7 +4075,7 @@ static const llc_conn_action_t llc_await_rejct_actions_12[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_12 = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_12 = {
.ev = llc_conn_ev_rx_rnr_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = NONE,
@@ -4095,7 +4095,7 @@ static const llc_conn_action_t llc_await_rejct_actions_13[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_13 = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_13 = {
.ev = llc_conn_ev_p_tmr_exp,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = llc_await_rejct_ev_qfyrs_13,
@@ -4106,7 +4106,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_13 = {
* Array of pointers;
* one to each transition
*/
-static struct llc_conn_state_trans *llc_await_rejct_state_transitions[] = {
+static const struct llc_conn_state_trans *llc_await_rejct_state_transitions[] = {
[0] = &llc_await_reject_state_trans_1_0,
[1] = &llc_common_state_trans_1, /* requests */
[2] = &llc_common_state_trans_2,
@@ -4171,7 +4171,7 @@ static const llc_conn_action_t llc_d_conn_actions_1[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_d_conn_state_trans_1 = {
+static const struct llc_conn_state_trans llc_d_conn_state_trans_1 = {
.ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_d_conn_ev_qfyrs_1,
@@ -4194,7 +4194,7 @@ static const llc_conn_action_t llc_d_conn_actions_1_1[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_d_conn_state_trans_1_1 = {
+static const struct llc_conn_state_trans llc_d_conn_state_trans_1_1 = {
.ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_d_conn_ev_qfyrs_1_1,
@@ -4218,7 +4218,7 @@ static const llc_conn_action_t llc_d_conn_actions_2[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_d_conn_state_trans_2 = {
+static const struct llc_conn_state_trans llc_d_conn_state_trans_2 = {
.ev = llc_conn_ev_rx_ua_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_d_conn_ev_qfyrs_2,
@@ -4241,7 +4241,7 @@ static const llc_conn_action_t llc_d_conn_actions_2_1[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_d_conn_state_trans_2_1 = {
+static const struct llc_conn_state_trans llc_d_conn_state_trans_2_1 = {
.ev = llc_conn_ev_rx_ua_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_d_conn_ev_qfyrs_2_1,
@@ -4254,7 +4254,7 @@ static const llc_conn_action_t llc_d_conn_actions_3[] = {
[1] = NULL,
};
-static struct llc_conn_state_trans llc_d_conn_state_trans_3 = {
+static const struct llc_conn_state_trans llc_d_conn_state_trans_3 = {
.ev = llc_conn_ev_rx_disc_cmd_pbit_set_x,
.next_state = LLC_CONN_STATE_D_CONN,
.ev_qualifiers = NONE,
@@ -4277,7 +4277,7 @@ static const llc_conn_action_t llc_d_conn_actions_4[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_d_conn_state_trans_4 = {
+static const struct llc_conn_state_trans llc_d_conn_state_trans_4 = {
.ev = llc_conn_ev_rx_dm_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_d_conn_ev_qfyrs_4,
@@ -4299,7 +4299,7 @@ static const llc_conn_action_t llc_d_conn_actions_4_1[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_d_conn_state_trans_4_1 = {
+static const struct llc_conn_state_trans llc_d_conn_state_trans_4_1 = {
.ev = llc_conn_ev_rx_dm_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_d_conn_ev_qfyrs_4_1,
@@ -4318,7 +4318,7 @@ static const llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_5[] = {
/* just one member, NULL, .bss zeroes it */
static const llc_conn_action_t llc_d_conn_actions_5[1];
-static struct llc_conn_state_trans llc_d_conn_state_trans_5 = {
+static const struct llc_conn_state_trans llc_d_conn_state_trans_5 = {
.ev = llc_conn_ev_data_req,
.next_state = LLC_CONN_STATE_D_CONN,
.ev_qualifiers = llc_d_conn_ev_qfyrs_5,
@@ -4338,7 +4338,7 @@ static const llc_conn_action_t llc_d_conn_actions_6[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_d_conn_state_trans_6 = {
+static const struct llc_conn_state_trans llc_d_conn_state_trans_6 = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_D_CONN,
.ev_qualifiers = llc_d_conn_ev_qfyrs_6,
@@ -4359,7 +4359,7 @@ static const llc_conn_action_t llc_d_conn_actions_7[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_d_conn_state_trans_7 = {
+static const struct llc_conn_state_trans llc_d_conn_state_trans_7 = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_d_conn_ev_qfyrs_7,
@@ -4379,7 +4379,7 @@ static const llc_conn_action_t llc_d_conn_actions_8[] = {
[1] = NULL,
};
-static struct llc_conn_state_trans llc_d_conn_state_trans_8 = {
+static const struct llc_conn_state_trans llc_d_conn_state_trans_8 = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_d_conn_ev_qfyrs_8,
@@ -4390,7 +4390,7 @@ static struct llc_conn_state_trans llc_d_conn_state_trans_8 = {
* Array of pointers;
* one to each transition
*/
-static struct llc_conn_state_trans *llc_d_conn_state_transitions[] = {
+static const struct llc_conn_state_trans *llc_d_conn_state_transitions[] = {
[0] = &llc_d_conn_state_trans_5, /* Request */
[1] = &llc_common_state_trans_end,
[2] = &llc_common_state_trans_end, /* Local busy */
@@ -4419,7 +4419,7 @@ static const llc_conn_action_t llc_rst_actions_1[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_rst_state_trans_1 = {
+static const struct llc_conn_state_trans llc_rst_state_trans_1 = {
.ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x,
.next_state = LLC_CONN_STATE_RESET,
.ev_qualifiers = NONE,
@@ -4447,7 +4447,7 @@ static const llc_conn_action_t llc_rst_actions_2[] = {
[7] = NULL,
};
-static struct llc_conn_state_trans llc_rst_state_trans_2 = {
+static const struct llc_conn_state_trans llc_rst_state_trans_2 = {
.ev = llc_conn_ev_rx_ua_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_rst_ev_qfyrs_2,
@@ -4475,7 +4475,7 @@ static const llc_conn_action_t llc_rst_actions_2_1[] = {
[7] = NULL,
};
-static struct llc_conn_state_trans llc_rst_state_trans_2_1 = {
+static const struct llc_conn_state_trans llc_rst_state_trans_2_1 = {
.ev = llc_conn_ev_rx_ua_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_rst_ev_qfyrs_2_1,
@@ -4495,7 +4495,7 @@ static const llc_conn_action_t llc_rst_actions_3[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_rst_state_trans_3 = {
+static const struct llc_conn_state_trans llc_rst_state_trans_3 = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_rst_ev_qfyrs_3,
@@ -4518,7 +4518,7 @@ static const llc_conn_action_t llc_rst_actions_4[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_rst_state_trans_4 = {
+static const struct llc_conn_state_trans llc_rst_state_trans_4 = {
.ev = llc_conn_ev_rx_disc_cmd_pbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_rst_ev_qfyrs_4,
@@ -4541,7 +4541,7 @@ static const llc_conn_action_t llc_rst_actions_4_1[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_rst_state_trans_4_1 = {
+static const struct llc_conn_state_trans llc_rst_state_trans_4_1 = {
.ev = llc_conn_ev_rx_disc_cmd_pbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_rst_ev_qfyrs_4_1,
@@ -4564,7 +4564,7 @@ static const llc_conn_action_t llc_rst_actions_5[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_rst_state_trans_5 = {
+static const struct llc_conn_state_trans llc_rst_state_trans_5 = {
.ev = llc_conn_ev_rx_dm_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_rst_ev_qfyrs_5,
@@ -4586,7 +4586,7 @@ static const llc_conn_action_t llc_rst_actions_5_1[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_rst_state_trans_5_1 = {
+static const struct llc_conn_state_trans llc_rst_state_trans_5_1 = {
.ev = llc_conn_ev_rx_dm_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_rst_ev_qfyrs_5_1,
@@ -4602,7 +4602,7 @@ static const llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_6[] = {
/* just one member, NULL, .bss zeroes it */
static const llc_conn_action_t llc_rst_actions_6[1];
-static struct llc_conn_state_trans llc_rst_state_trans_6 = {
+static const struct llc_conn_state_trans llc_rst_state_trans_6 = {
.ev = llc_conn_ev_data_req,
.next_state = LLC_CONN_STATE_RESET,
.ev_qualifiers = llc_rst_ev_qfyrs_6,
@@ -4623,7 +4623,7 @@ static const llc_conn_action_t llc_rst_actions_7[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_rst_state_trans_7 = {
+static const struct llc_conn_state_trans llc_rst_state_trans_7 = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_RESET,
.ev_qualifiers = llc_rst_ev_qfyrs_7,
@@ -4644,7 +4644,7 @@ static const llc_conn_action_t llc_rst_actions_8[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_rst_state_trans_8 = {
+static const struct llc_conn_state_trans llc_rst_state_trans_8 = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_rst_ev_qfyrs_8,
@@ -4665,7 +4665,7 @@ static const llc_conn_action_t llc_rst_actions_8_1[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_rst_state_trans_8_1 = {
+static const struct llc_conn_state_trans llc_rst_state_trans_8_1 = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_rst_ev_qfyrs_8_1,
@@ -4676,7 +4676,7 @@ static struct llc_conn_state_trans llc_rst_state_trans_8_1 = {
* Array of pointers;
* one to each transition
*/
-static struct llc_conn_state_trans *llc_rst_state_transitions[] = {
+static const struct llc_conn_state_trans *llc_rst_state_transitions[] = {
[0] = &llc_rst_state_trans_6, /* Request */
[1] = &llc_common_state_trans_end,
[2] = &llc_common_state_trans_end, /* Local busy */
@@ -4710,7 +4710,7 @@ static const llc_conn_action_t llc_error_actions_1[] = {
[8] = NULL,
};
-static struct llc_conn_state_trans llc_error_state_trans_1 = {
+static const struct llc_conn_state_trans llc_error_state_trans_1 = {
.ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -4726,7 +4726,7 @@ static const llc_conn_action_t llc_error_actions_2[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_error_state_trans_2 = {
+static const struct llc_conn_state_trans llc_error_state_trans_2 = {
.ev = llc_conn_ev_rx_disc_cmd_pbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = NONE,
@@ -4741,7 +4741,7 @@ static const llc_conn_action_t llc_error_actions_3[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_error_state_trans_3 = {
+static const struct llc_conn_state_trans llc_error_state_trans_3 = {
.ev = llc_conn_ev_rx_dm_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = NONE,
@@ -4757,7 +4757,7 @@ static const llc_conn_action_t llc_error_actions_4[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_error_state_trans_4 = {
+static const struct llc_conn_state_trans llc_error_state_trans_4 = {
.ev = llc_conn_ev_rx_frmr_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_RESET,
.ev_qualifiers = NONE,
@@ -4770,7 +4770,7 @@ static const llc_conn_action_t llc_error_actions_5[] = {
[1] = NULL,
};
-static struct llc_conn_state_trans llc_error_state_trans_5 = {
+static const struct llc_conn_state_trans llc_error_state_trans_5 = {
.ev = llc_conn_ev_rx_xxx_cmd_pbit_set_x,
.next_state = LLC_CONN_STATE_ERROR,
.ev_qualifiers = NONE,
@@ -4778,7 +4778,7 @@ static struct llc_conn_state_trans llc_error_state_trans_5 = {
};
/* State transitions for LLC_CONN_EV_RX_XXX_RSP_Fbit_SET_X event */
-static struct llc_conn_state_trans llc_error_state_trans_6 = {
+static const struct llc_conn_state_trans llc_error_state_trans_6 = {
.ev = llc_conn_ev_rx_xxx_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_ERROR,
.ev_qualifiers = NONE,
@@ -4798,7 +4798,7 @@ static const llc_conn_action_t llc_error_actions_7[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_error_state_trans_7 = {
+static const struct llc_conn_state_trans llc_error_state_trans_7 = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_ERROR,
.ev_qualifiers = llc_error_ev_qfyrs_7,
@@ -4820,7 +4820,7 @@ static const llc_conn_action_t llc_error_actions_8[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_error_state_trans_8 = {
+static const struct llc_conn_state_trans llc_error_state_trans_8 = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_RESET,
.ev_qualifiers = llc_error_ev_qfyrs_8,
@@ -4836,7 +4836,7 @@ static const llc_conn_ev_qfyr_t llc_error_ev_qfyrs_9[] = {
/* just one member, NULL, .bss zeroes it */
static const llc_conn_action_t llc_error_actions_9[1];
-static struct llc_conn_state_trans llc_error_state_trans_9 = {
+static const struct llc_conn_state_trans llc_error_state_trans_9 = {
.ev = llc_conn_ev_data_req,
.next_state = LLC_CONN_STATE_ERROR,
.ev_qualifiers = llc_error_ev_qfyrs_9,
@@ -4847,7 +4847,7 @@ static struct llc_conn_state_trans llc_error_state_trans_9 = {
* Array of pointers;
* one to each transition
*/
-static struct llc_conn_state_trans *llc_error_state_transitions[] = {
+static const struct llc_conn_state_trans *llc_error_state_transitions[] = {
[0] = &llc_error_state_trans_9, /* Request */
[1] = &llc_common_state_trans_end,
[2] = &llc_common_state_trans_end, /* Local busy */
@@ -4873,7 +4873,7 @@ static const llc_conn_action_t llc_temp_actions_1[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_temp_state_trans_1 = {
+static const struct llc_conn_state_trans llc_temp_state_trans_1 = {
.ev = llc_conn_ev_disc_req,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = NONE,
@@ -4884,7 +4884,7 @@ static struct llc_conn_state_trans llc_temp_state_trans_1 = {
* Array of pointers;
* one to each transition
*/
-static struct llc_conn_state_trans *llc_temp_state_transitions[] = {
+static const struct llc_conn_state_trans *llc_temp_state_transitions[] = {
[0] = &llc_temp_state_trans_1, /* requests */
[1] = &llc_common_state_trans_end,
[2] = &llc_common_state_trans_end, /* local busy */
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 0a3f5e0bec00..afc6974eafda 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -34,10 +34,10 @@ static int llc_find_offset(int state, int ev_type);
static void llc_conn_send_pdus(struct sock *sk);
static int llc_conn_service(struct sock *sk, struct sk_buff *skb);
static int llc_exec_conn_trans_actions(struct sock *sk,
- struct llc_conn_state_trans *trans,
+ const struct llc_conn_state_trans *trans,
struct sk_buff *ev);
-static struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk,
- struct sk_buff *skb);
+static const struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk,
+ struct sk_buff *skb);
/* Offset table on connection states transition diagram */
static int llc_offset_table[NBR_CONN_STATES][NBR_CONN_EV];
@@ -356,9 +356,9 @@ static void llc_conn_send_pdus(struct sock *sk)
*/
static int llc_conn_service(struct sock *sk, struct sk_buff *skb)
{
- int rc = 1;
+ const struct llc_conn_state_trans *trans;
struct llc_sock *llc = llc_sk(sk);
- struct llc_conn_state_trans *trans;
+ int rc = 1;
if (llc->state > NBR_CONN_STATES)
goto out;
@@ -384,10 +384,10 @@ out:
* This function finds transition that matches with happened event.
* Returns pointer to found transition on success, %NULL otherwise.
*/
-static struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk,
- struct sk_buff *skb)
+static const struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk,
+ struct sk_buff *skb)
{
- struct llc_conn_state_trans **next_trans;
+ const struct llc_conn_state_trans **next_trans;
const llc_conn_ev_qfyr_t *next_qualifier;
struct llc_conn_state_ev *ev = llc_conn_ev(skb);
struct llc_sock *llc = llc_sk(sk);
@@ -432,7 +432,7 @@ static struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk,
* success, 1 to indicate failure of at least one action.
*/
static int llc_exec_conn_trans_actions(struct sock *sk,
- struct llc_conn_state_trans *trans,
+ const struct llc_conn_state_trans *trans,
struct sk_buff *skb)
{
int rc = 0;
@@ -635,8 +635,8 @@ u8 llc_data_accept_state(u8 state)
*/
static u16 __init llc_find_next_offset(struct llc_conn_state *state, u16 offset)
{
+ const struct llc_conn_state_trans **next_trans;
u16 cnt = 0;
- struct llc_conn_state_trans **next_trans;
for (next_trans = state->transitions + offset;
(*next_trans)->ev; next_trans++)
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
index 51bccfb00a9c..61b0159b2fbe 100644
--- a/net/llc/llc_input.c
+++ b/net/llc/llc_input.c
@@ -124,8 +124,8 @@ static inline int llc_fixup_skb(struct sk_buff *skb)
if (unlikely(!pskb_may_pull(skb, llc_len)))
return 0;
- skb->transport_header += llc_len;
skb_pull(skb, llc_len);
+ skb_reset_transport_header(skb);
if (skb->protocol == htons(ETH_P_802_2)) {
__be16 pdulen;
s32 data_size;
diff --git a/net/llc/llc_s_ac.c b/net/llc/llc_s_ac.c
index 06fb8e6944b0..7a0cae9a8111 100644
--- a/net/llc/llc_s_ac.c
+++ b/net/llc/llc_s_ac.c
@@ -24,7 +24,7 @@
#include <net/llc_s_ac.h>
#include <net/llc_s_ev.h>
#include <net/llc_sap.h>
-
+#include <net/sock.h>
/**
* llc_sap_action_unitdata_ind - forward UI PDU to network layer
@@ -40,6 +40,26 @@ int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb)
return 0;
}
+static int llc_prepare_and_xmit(struct sk_buff *skb)
+{
+ struct llc_sap_state_ev *ev = llc_sap_ev(skb);
+ struct sk_buff *nskb;
+ int rc;
+
+ rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
+ if (rc)
+ return rc;
+
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ return -ENOMEM;
+
+ if (skb->sk)
+ skb_set_owner_w(nskb, skb->sk);
+
+ return dev_queue_xmit(nskb);
+}
+
/**
* llc_sap_action_send_ui - sends UI PDU resp to UNITDATA REQ to MAC layer
* @sap: SAP
@@ -52,17 +72,12 @@ int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb)
int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb)
{
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
- int rc;
llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap,
ev->daddr.lsap, LLC_PDU_CMD);
llc_pdu_init_as_ui_cmd(skb);
- rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
- if (likely(!rc)) {
- skb_get(skb);
- rc = dev_queue_xmit(skb);
- }
- return rc;
+
+ return llc_prepare_and_xmit(skb);
}
/**
@@ -77,17 +92,12 @@ int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb)
int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb)
{
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
- int rc;
llc_pdu_header_init(skb, LLC_PDU_TYPE_U_XID, ev->saddr.lsap,
ev->daddr.lsap, LLC_PDU_CMD);
llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0);
- rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
- if (likely(!rc)) {
- skb_get(skb);
- rc = dev_queue_xmit(skb);
- }
- return rc;
+
+ return llc_prepare_and_xmit(skb);
}
/**
@@ -133,17 +143,12 @@ out:
int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb)
{
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
- int rc;
llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap,
ev->daddr.lsap, LLC_PDU_CMD);
llc_pdu_init_as_test_cmd(skb);
- rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
- if (likely(!rc)) {
- skb_get(skb);
- rc = dev_queue_xmit(skb);
- }
- return rc;
+
+ return llc_prepare_and_xmit(skb);
}
int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb)
diff --git a/net/llc/llc_s_st.c b/net/llc/llc_s_st.c
index 308c616883a4..acccc827c562 100644
--- a/net/llc/llc_s_st.c
+++ b/net/llc/llc_s_st.c
@@ -24,7 +24,7 @@
* last entry for this state
* all members are zeros, .bss zeroes it
*/
-static struct llc_sap_state_trans llc_sap_state_trans_end;
+static const struct llc_sap_state_trans llc_sap_state_trans_end;
/* state LLC_SAP_STATE_INACTIVE transition for
* LLC_SAP_EV_ACTIVATION_REQ event
@@ -34,14 +34,14 @@ static const llc_sap_action_t llc_sap_inactive_state_actions_1[] = {
[1] = NULL,
};
-static struct llc_sap_state_trans llc_sap_inactive_state_trans_1 = {
+static const struct llc_sap_state_trans llc_sap_inactive_state_trans_1 = {
.ev = llc_sap_ev_activation_req,
.next_state = LLC_SAP_STATE_ACTIVE,
.ev_actions = llc_sap_inactive_state_actions_1,
};
/* array of pointers; one to each transition */
-static struct llc_sap_state_trans *llc_sap_inactive_state_transitions[] = {
+static const struct llc_sap_state_trans *llc_sap_inactive_state_transitions[] = {
[0] = &llc_sap_inactive_state_trans_1,
[1] = &llc_sap_state_trans_end,
};
@@ -52,7 +52,7 @@ static const llc_sap_action_t llc_sap_active_state_actions_1[] = {
[1] = NULL,
};
-static struct llc_sap_state_trans llc_sap_active_state_trans_1 = {
+static const struct llc_sap_state_trans llc_sap_active_state_trans_1 = {
.ev = llc_sap_ev_rx_ui,
.next_state = LLC_SAP_STATE_ACTIVE,
.ev_actions = llc_sap_active_state_actions_1,
@@ -64,7 +64,7 @@ static const llc_sap_action_t llc_sap_active_state_actions_2[] = {
[1] = NULL,
};
-static struct llc_sap_state_trans llc_sap_active_state_trans_2 = {
+static const struct llc_sap_state_trans llc_sap_active_state_trans_2 = {
.ev = llc_sap_ev_unitdata_req,
.next_state = LLC_SAP_STATE_ACTIVE,
.ev_actions = llc_sap_active_state_actions_2,
@@ -76,7 +76,7 @@ static const llc_sap_action_t llc_sap_active_state_actions_3[] = {
[1] = NULL,
};
-static struct llc_sap_state_trans llc_sap_active_state_trans_3 = {
+static const struct llc_sap_state_trans llc_sap_active_state_trans_3 = {
.ev = llc_sap_ev_xid_req,
.next_state = LLC_SAP_STATE_ACTIVE,
.ev_actions = llc_sap_active_state_actions_3,
@@ -88,7 +88,7 @@ static const llc_sap_action_t llc_sap_active_state_actions_4[] = {
[1] = NULL,
};
-static struct llc_sap_state_trans llc_sap_active_state_trans_4 = {
+static const struct llc_sap_state_trans llc_sap_active_state_trans_4 = {
.ev = llc_sap_ev_rx_xid_c,
.next_state = LLC_SAP_STATE_ACTIVE,
.ev_actions = llc_sap_active_state_actions_4,
@@ -100,7 +100,7 @@ static const llc_sap_action_t llc_sap_active_state_actions_5[] = {
[1] = NULL,
};
-static struct llc_sap_state_trans llc_sap_active_state_trans_5 = {
+static const struct llc_sap_state_trans llc_sap_active_state_trans_5 = {
.ev = llc_sap_ev_rx_xid_r,
.next_state = LLC_SAP_STATE_ACTIVE,
.ev_actions = llc_sap_active_state_actions_5,
@@ -112,7 +112,7 @@ static const llc_sap_action_t llc_sap_active_state_actions_6[] = {
[1] = NULL,
};
-static struct llc_sap_state_trans llc_sap_active_state_trans_6 = {
+static const struct llc_sap_state_trans llc_sap_active_state_trans_6 = {
.ev = llc_sap_ev_test_req,
.next_state = LLC_SAP_STATE_ACTIVE,
.ev_actions = llc_sap_active_state_actions_6,
@@ -124,7 +124,7 @@ static const llc_sap_action_t llc_sap_active_state_actions_7[] = {
[1] = NULL,
};
-static struct llc_sap_state_trans llc_sap_active_state_trans_7 = {
+static const struct llc_sap_state_trans llc_sap_active_state_trans_7 = {
.ev = llc_sap_ev_rx_test_c,
.next_state = LLC_SAP_STATE_ACTIVE,
.ev_actions = llc_sap_active_state_actions_7
@@ -136,7 +136,7 @@ static const llc_sap_action_t llc_sap_active_state_actions_8[] = {
[1] = NULL,
};
-static struct llc_sap_state_trans llc_sap_active_state_trans_8 = {
+static const struct llc_sap_state_trans llc_sap_active_state_trans_8 = {
.ev = llc_sap_ev_rx_test_r,
.next_state = LLC_SAP_STATE_ACTIVE,
.ev_actions = llc_sap_active_state_actions_8,
@@ -150,14 +150,14 @@ static const llc_sap_action_t llc_sap_active_state_actions_9[] = {
[1] = NULL,
};
-static struct llc_sap_state_trans llc_sap_active_state_trans_9 = {
+static const struct llc_sap_state_trans llc_sap_active_state_trans_9 = {
.ev = llc_sap_ev_deactivation_req,
.next_state = LLC_SAP_STATE_INACTIVE,
.ev_actions = llc_sap_active_state_actions_9
};
/* array of pointers; one to each transition */
-static struct llc_sap_state_trans *llc_sap_active_state_transitions[] = {
+static const struct llc_sap_state_trans *llc_sap_active_state_transitions[] = {
[0] = &llc_sap_active_state_trans_2,
[1] = &llc_sap_active_state_trans_1,
[2] = &llc_sap_active_state_trans_3,
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index 116c0e479183..6cd03c2ae7d5 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -114,12 +114,12 @@ void llc_sap_rtn_pdu(struct llc_sap *sap, struct sk_buff *skb)
* Returns the pointer to found transition on success or %NULL for
* failure.
*/
-static struct llc_sap_state_trans *llc_find_sap_trans(struct llc_sap *sap,
- struct sk_buff *skb)
+static const struct llc_sap_state_trans *llc_find_sap_trans(struct llc_sap *sap,
+ struct sk_buff *skb)
{
int i = 0;
- struct llc_sap_state_trans *rc = NULL;
- struct llc_sap_state_trans **next_trans;
+ const struct llc_sap_state_trans *rc = NULL;
+ const struct llc_sap_state_trans **next_trans;
struct llc_sap_state *curr_state = &llc_sap_state_table[sap->state - 1];
/*
* Search thru events for this state until list exhausted or until
@@ -143,7 +143,7 @@ static struct llc_sap_state_trans *llc_find_sap_trans(struct llc_sap *sap,
* Returns 0 for success and 1 for failure of at least one action.
*/
static int llc_exec_sap_trans_actions(struct llc_sap *sap,
- struct llc_sap_state_trans *trans,
+ const struct llc_sap_state_trans *trans,
struct sk_buff *skb)
{
int rc = 0;
@@ -166,8 +166,8 @@ static int llc_exec_sap_trans_actions(struct llc_sap *sap,
*/
static int llc_sap_next_state(struct llc_sap *sap, struct sk_buff *skb)
{
+ const struct llc_sap_state_trans *trans;
int rc = 1;
- struct llc_sap_state_trans *trans;
if (sap->state > LLC_NR_SAP_STATES)
goto out;
diff --git a/net/llc/sysctl_net_llc.c b/net/llc/sysctl_net_llc.c
index 8443a6d841b0..c8d88e2508fc 100644
--- a/net/llc/sysctl_net_llc.c
+++ b/net/llc/sysctl_net_llc.c
@@ -11,10 +11,6 @@
#include <net/net_namespace.h>
#include <net/llc.h>
-#ifndef CONFIG_SYSCTL
-#error This file should not be compiled without CONFIG_SYSCTL defined
-#endif
-
static struct ctl_table llc2_timeout_table[] = {
{
.procname = "ack",
@@ -44,11 +40,6 @@ static struct ctl_table llc2_timeout_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
- { },
-};
-
-static struct ctl_table llc_station_table[] = {
- { },
};
static struct ctl_table_header *llc2_timeout_header;
@@ -56,8 +47,9 @@ static struct ctl_table_header *llc_station_header;
int __init llc_sysctl_init(void)
{
+ struct ctl_table empty[1] = {};
llc2_timeout_header = register_net_sysctl(&init_net, "net/llc/llc2/timeout", llc2_timeout_table);
- llc_station_header = register_net_sysctl(&init_net, "net/llc/station", llc_station_table);
+ llc_station_header = register_net_sysctl_sz(&init_net, "net/llc/station", empty, 0);
if (!llc2_timeout_header || !llc_station_header) {
llc_sysctl_exit();
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 13438cc0a6b1..cf0f7780fb10 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -96,7 +96,7 @@ config MAC80211_DEBUGFS
config MAC80211_MESSAGE_TRACING
bool "Trace all mac80211 debug messages"
- depends on MAC80211
+ depends on MAC80211 && TRACING
help
Select this option to have mac80211 register the
mac80211_msg trace subsystem with tracepoints to
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 4406b4f8f3b9..a33884967f21 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -29,7 +29,7 @@ mac80211-y := \
spectmgmt.o \
tx.o \
key.o \
- util.o \
+ util.o parse.o \
wme.o \
chan.o \
trace.o mlme.o \
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 9bffac7a4974..f3fbe5a4395e 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -170,28 +170,63 @@ static void sta_rx_agg_reorder_timer_expired(struct timer_list *t)
rcu_read_unlock();
}
-static void ieee80211_add_addbaext(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb,
- const struct ieee80211_addba_ext_ie *req,
- u16 buf_size)
+void ieee80211_add_addbaext(struct sk_buff *skb,
+ const u8 req_addba_ext_data,
+ u16 buf_size)
{
- struct ieee80211_addba_ext_ie *resp;
+ struct ieee80211_addba_ext_ie *addba_ext;
u8 *pos;
pos = skb_put_zero(skb, 2 + sizeof(struct ieee80211_addba_ext_ie));
*pos++ = WLAN_EID_ADDBA_EXT;
*pos++ = sizeof(struct ieee80211_addba_ext_ie);
- resp = (struct ieee80211_addba_ext_ie *)pos;
- resp->data = req->data & IEEE80211_ADDBA_EXT_NO_FRAG;
+ addba_ext = (struct ieee80211_addba_ext_ie *)pos;
- resp->data |= u8_encode_bits(buf_size >> IEEE80211_ADDBA_EXT_BUF_SIZE_SHIFT,
- IEEE80211_ADDBA_EXT_BUF_SIZE_MASK);
+ addba_ext->data = IEEE80211_ADDBA_EXT_NO_FRAG;
+ if (req_addba_ext_data)
+ addba_ext->data &= req_addba_ext_data;
+
+ addba_ext->data |=
+ u8_encode_bits(buf_size >> IEEE80211_ADDBA_EXT_BUF_SIZE_SHIFT,
+ IEEE80211_ADDBA_EXT_BUF_SIZE_MASK);
+}
+
+u8 ieee80211_retrieve_addba_ext_data(struct sta_info *sta,
+ const void *elem_data, ssize_t elem_len,
+ u16 *buf_size)
+{
+ struct ieee802_11_elems *elems;
+ u8 buf_size_1k, data = 0;
+
+ if (!sta->sta.deflink.he_cap.has_he)
+ return 0;
+
+ if (elem_len <= 0)
+ return 0;
+
+ elems = ieee802_11_parse_elems(elem_data, elem_len, true, NULL);
+
+ if (elems && !elems->parse_error && elems->addba_ext_ie) {
+ data = elems->addba_ext_ie->data;
+
+ if (!sta->sta.deflink.eht_cap.has_eht || !buf_size)
+ goto free;
+
+ buf_size_1k = u8_get_bits(elems->addba_ext_ie->data,
+ IEEE80211_ADDBA_EXT_BUF_SIZE_MASK);
+ *buf_size |= (u16)buf_size_1k <<
+ IEEE80211_ADDBA_EXT_BUF_SIZE_SHIFT;
+ }
+free:
+ kfree(elems);
+
+ return data;
}
static void ieee80211_send_addba_resp(struct sta_info *sta, u8 *da, u16 tid,
u8 dialog_token, u16 status, u16 policy,
u16 buf_size, u16 timeout,
- const struct ieee80211_addba_ext_ie *addbaext)
+ const u8 req_addba_ext_data)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_local *local = sdata->local;
@@ -207,20 +242,7 @@ static void ieee80211_send_addba_resp(struct sta_info *sta, u8 *da, u16 tid,
return;
skb_reserve(skb, local->hw.extra_tx_headroom);
- mgmt = skb_put_zero(skb, 24);
- memcpy(mgmt->da, da, ETH_ALEN);
- memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
- if (sdata->vif.type == NL80211_IFTYPE_AP ||
- sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
- sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
- memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
- else if (sdata->vif.type == NL80211_IFTYPE_STATION)
- memcpy(mgmt->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN);
- else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
- memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN);
-
- mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
- IEEE80211_STYPE_ACTION);
+ mgmt = ieee80211_mgmt_ba(skb, da, sdata);
skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_resp));
mgmt->u.action.category = WLAN_CATEGORY_BACK;
@@ -236,8 +258,8 @@ static void ieee80211_send_addba_resp(struct sta_info *sta, u8 *da, u16 tid,
mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout);
mgmt->u.action.u.addba_resp.status = cpu_to_le16(status);
- if (sta->sta.deflink.he_cap.has_he && addbaext)
- ieee80211_add_addbaext(sdata, skb, addbaext, buf_size);
+ if (sta->sta.deflink.he_cap.has_he)
+ ieee80211_add_addbaext(skb, req_addba_ext_data, buf_size);
ieee80211_tx_skb(sdata, skb);
}
@@ -246,7 +268,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
u8 dialog_token, u16 timeout,
u16 start_seq_num, u16 ba_policy, u16 tid,
u16 buf_size, bool tx, bool auto_seq,
- const struct ieee80211_addba_ext_ie *addbaext)
+ const u8 addba_ext_data)
{
struct ieee80211_local *local = sta->sdata->local;
struct tid_ampdu_rx *tid_agg_rx;
@@ -432,7 +454,7 @@ end:
if (tx)
ieee80211_send_addba_resp(sta, sta->sta.addr, tid,
dialog_token, status, 1, buf_size,
- timeout, addbaext);
+ timeout, addba_ext_data);
}
void ieee80211_process_addba_request(struct ieee80211_local *local,
@@ -441,9 +463,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
size_t len)
{
u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num;
- struct ieee802_11_elems *elems = NULL;
- u8 dialog_token;
- int ies_len;
+ u8 dialog_token, addba_ext_data;
/* extract session parameters from addba request frame */
dialog_token = mgmt->u.action.u.addba_req.dialog_token;
@@ -456,28 +476,17 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
- ies_len = len - offsetof(struct ieee80211_mgmt,
- u.action.u.addba_req.variable);
- if (ies_len) {
- elems = ieee802_11_parse_elems(mgmt->u.action.u.addba_req.variable,
- ies_len, true, NULL);
- if (!elems || elems->parse_error)
- goto free;
- }
-
- if (sta->sta.deflink.eht_cap.has_eht && elems && elems->addba_ext_ie) {
- u8 buf_size_1k = u8_get_bits(elems->addba_ext_ie->data,
- IEEE80211_ADDBA_EXT_BUF_SIZE_MASK);
-
- buf_size |= buf_size_1k << IEEE80211_ADDBA_EXT_BUF_SIZE_SHIFT;
- }
+ addba_ext_data =
+ ieee80211_retrieve_addba_ext_data(sta,
+ mgmt->u.action.u.addba_req.variable,
+ len -
+ offsetof(typeof(*mgmt),
+ u.action.u.addba_req.variable),
+ &buf_size);
__ieee80211_start_rx_ba_session(sta, dialog_token, timeout,
start_seq_num, ba_policy, tid,
- buf_size, true, false,
- elems ? elems->addba_ext_ie : NULL);
-free:
- kfree(elems);
+ buf_size, true, false, addba_ext_data);
}
void ieee80211_manage_rx_ba_offl(struct ieee80211_vif *vif,
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index b8a278355e18..61f2cac37728 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -58,36 +58,24 @@
* complete.
*/
-static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
- const u8 *da, u16 tid,
+static void ieee80211_send_addba_request(struct sta_info *sta, u16 tid,
u8 dialog_token, u16 start_seq_num,
u16 agg_size, u16 timeout)
{
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
u16 capab;
- skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
-
+ skb = dev_alloc_skb(sizeof(*mgmt) +
+ 2 + sizeof(struct ieee80211_addba_ext_ie) +
+ local->hw.extra_tx_headroom);
if (!skb)
return;
skb_reserve(skb, local->hw.extra_tx_headroom);
- mgmt = skb_put_zero(skb, 24);
- memcpy(mgmt->da, da, ETH_ALEN);
- memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
- if (sdata->vif.type == NL80211_IFTYPE_AP ||
- sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
- sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
- memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
- else if (sdata->vif.type == NL80211_IFTYPE_STATION)
- memcpy(mgmt->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN);
- else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
- memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN);
-
- mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
- IEEE80211_STYPE_ACTION);
+ mgmt = ieee80211_mgmt_ba(skb, sta->sta.addr, sdata);
skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req));
@@ -106,6 +94,9 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
mgmt->u.action.u.addba_req.start_seq_num =
cpu_to_le16(start_seq_num << 4);
+ if (sta->sta.deflink.he_cap.has_he)
+ ieee80211_add_addbaext(skb, 0, agg_size);
+
ieee80211_tx_skb_tid(sdata, skb, tid, -1);
}
@@ -473,8 +464,11 @@ static void ieee80211_send_addba_with_timeout(struct sta_info *sta,
sta->ampdu_mlme.addba_req_num[tid]++;
spin_unlock_bh(&sta->lock);
- if (sta->sta.deflink.he_cap.has_he) {
+ if (sta->sta.deflink.eht_cap.has_eht) {
buf_size = local->hw.max_tx_aggregation_subframes;
+ } else if (sta->sta.deflink.he_cap.has_he) {
+ buf_size = min_t(u16, local->hw.max_tx_aggregation_subframes,
+ IEEE80211_MAX_AMPDU_BUF_HE);
} else {
/*
* We really should use what the driver told us it will
@@ -486,9 +480,8 @@ static void ieee80211_send_addba_with_timeout(struct sta_info *sta,
}
/* send AddBA request */
- ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
- tid_tx->dialog_token, tid_tx->ssn,
- buf_size, tid_tx->timeout);
+ ieee80211_send_addba_request(sta, tid, tid_tx->dialog_token,
+ tid_tx->ssn, buf_size, tid_tx->timeout);
WARN_ON(test_and_set_bit(HT_AGG_STATE_SENT_ADDBA, &tid_tx->state));
}
@@ -616,7 +609,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
return -EINVAL;
if (!pubsta->deflink.ht_cap.ht_supported &&
- sta->sdata->vif.bss_conf.chandef.chan->band != NL80211_BAND_6GHZ)
+ !pubsta->deflink.vht_cap.vht_supported &&
+ !pubsta->deflink.he_cap.has_he &&
+ !pubsta->deflink.eht_cap.has_eht)
return -EINVAL;
if (WARN_ON_ONCE(!local->ops->ampdu_action))
@@ -808,7 +803,7 @@ void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
if (!test_bit(HT_AGG_STATE_SENT_ADDBA, &tid_tx->state)) {
ieee80211_send_addba_with_timeout(sta, tid_tx);
- /* RESPONSE_RECEIVED state whould trigger the flow again */
+ /* RESPONSE_RECEIVED state would trigger the flow again */
return;
}
@@ -981,6 +976,13 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
amsdu = capab & IEEE80211_ADDBA_PARAM_AMSDU_MASK;
tid = u16_get_bits(capab, IEEE80211_ADDBA_PARAM_TID_MASK);
buf_size = u16_get_bits(capab, IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK);
+
+ ieee80211_retrieve_addba_ext_data(sta,
+ mgmt->u.action.u.addba_resp.variable,
+ len - offsetof(typeof(*mgmt),
+ u.action.u.addba_resp.variable),
+ &buf_size);
+
buf_size = min(buf_size, local->hw.max_tx_aggregation_subframes);
txq = sta->sta.txq[tid];
diff --git a/net/mac80211/airtime.c b/net/mac80211/airtime.c
index fdf8b658fede..c61df637232a 100644
--- a/net/mac80211/airtime.c
+++ b/net/mac80211/airtime.c
@@ -55,10 +55,21 @@
#define HE_DURATION_S(shift, streams, gi, bps) \
(HE_DURATION(streams, gi, bps) >> shift)
+/* gi in HE/EHT is identical. It matches enum nl80211_eht_gi as well */
+#define EHT_GI_08 HE_GI_08
+#define EHT_GI_16 HE_GI_16
+#define EHT_GI_32 HE_GI_32
+
+#define EHT_DURATION(streams, gi, bps) \
+ HE_DURATION(streams, gi, bps)
+#define EHT_DURATION_S(shift, streams, gi, bps) \
+ HE_DURATION_S(shift, streams, gi, bps)
+
#define BW_20 0
#define BW_40 1
#define BW_80 2
#define BW_160 3
+#define BW_320 4
/*
* Define group sort order: HT40 -> SGI -> #streams
@@ -68,17 +79,26 @@
#define IEEE80211_VHT_STREAM_GROUPS 8 /* BW(=4) * SGI(=2) */
#define IEEE80211_HE_MAX_STREAMS 8
+#define IEEE80211_HE_STREAM_GROUPS 12 /* BW(=4) * GI(=3) */
+
+#define IEEE80211_EHT_MAX_STREAMS 8
+#define IEEE80211_EHT_STREAM_GROUPS 15 /* BW(=5) * GI(=3) */
#define IEEE80211_HT_GROUPS_NB (IEEE80211_MAX_STREAMS * \
IEEE80211_HT_STREAM_GROUPS)
#define IEEE80211_VHT_GROUPS_NB (IEEE80211_MAX_STREAMS * \
IEEE80211_VHT_STREAM_GROUPS)
+#define IEEE80211_HE_GROUPS_NB (IEEE80211_HE_MAX_STREAMS * \
+ IEEE80211_HE_STREAM_GROUPS)
+#define IEEE80211_EHT_GROUPS_NB (IEEE80211_EHT_MAX_STREAMS * \
+ IEEE80211_EHT_STREAM_GROUPS)
#define IEEE80211_HT_GROUP_0 0
#define IEEE80211_VHT_GROUP_0 (IEEE80211_HT_GROUP_0 + IEEE80211_HT_GROUPS_NB)
#define IEEE80211_HE_GROUP_0 (IEEE80211_VHT_GROUP_0 + IEEE80211_VHT_GROUPS_NB)
+#define IEEE80211_EHT_GROUP_0 (IEEE80211_HE_GROUP_0 + IEEE80211_HE_GROUPS_NB)
-#define MCS_GROUP_RATES 12
+#define MCS_GROUP_RATES 14
#define HT_GROUP_IDX(_streams, _sgi, _ht40) \
IEEE80211_HT_GROUP_0 + \
@@ -203,6 +223,69 @@
#define HE_GROUP(_streams, _gi, _bw) \
__HE_GROUP(_streams, _gi, _bw, \
HE_GROUP_SHIFT(_streams, _gi, _bw))
+
+#define EHT_BW2VBPS(_bw, r5, r4, r3, r2, r1) \
+ ((_bw) == BW_320 ? r5 : BW2VBPS(_bw, r4, r3, r2, r1))
+
+#define EHT_GROUP_IDX(_streams, _gi, _bw) \
+ (IEEE80211_EHT_GROUP_0 + \
+ IEEE80211_EHT_MAX_STREAMS * 3 * (_bw) + \
+ IEEE80211_EHT_MAX_STREAMS * (_gi) + \
+ (_streams) - 1)
+
+#define __EHT_GROUP(_streams, _gi, _bw, _s) \
+ [EHT_GROUP_IDX(_streams, _gi, _bw)] = { \
+ .shift = _s, \
+ .duration = { \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 1960, 980, 490, 234, 117)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 3920, 1960, 980, 468, 234)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 5880, 2937, 1470, 702, 351)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 7840, 3920, 1960, 936, 468)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 11760, 5880, 2940, 1404, 702)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 15680, 7840, 3920, 1872, 936)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 17640, 8820, 4410, 2106, 1053)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 19600, 9800, 4900, 2340, 1170)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 23520, 11760, 5880, 2808, 1404)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 26133, 13066, 6533, 3120, 1560)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 29400, 14700, 7350, 3510, 1755)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 32666, 16333, 8166, 3900, 1950)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 35280, 17640, 8820, 4212, 2106)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 39200, 19600, 9800, 4680, 2340)) \
+ } \
+}
+
+#define EHT_GROUP_SHIFT(_streams, _gi, _bw) \
+ GROUP_SHIFT(EHT_DURATION(_streams, _gi, \
+ EHT_BW2VBPS(_bw, 1960, 980, 490, 234, 117)))
+
+#define EHT_GROUP(_streams, _gi, _bw) \
+ __EHT_GROUP(_streams, _gi, _bw, \
+ EHT_GROUP_SHIFT(_streams, _gi, _bw))
+
+#define EHT_GROUP_RANGE(_gi, _bw) \
+ EHT_GROUP(1, _gi, _bw), \
+ EHT_GROUP(2, _gi, _bw), \
+ EHT_GROUP(3, _gi, _bw), \
+ EHT_GROUP(4, _gi, _bw), \
+ EHT_GROUP(5, _gi, _bw), \
+ EHT_GROUP(6, _gi, _bw), \
+ EHT_GROUP(7, _gi, _bw), \
+ EHT_GROUP(8, _gi, _bw)
+
struct mcs_group {
u8 shift;
u16 duration[MCS_GROUP_RATES];
@@ -376,6 +459,26 @@ static const struct mcs_group airtime_mcs_groups[] = {
HE_GROUP(6, HE_GI_32, BW_160),
HE_GROUP(7, HE_GI_32, BW_160),
HE_GROUP(8, HE_GI_32, BW_160),
+
+ EHT_GROUP_RANGE(EHT_GI_08, BW_20),
+ EHT_GROUP_RANGE(EHT_GI_16, BW_20),
+ EHT_GROUP_RANGE(EHT_GI_32, BW_20),
+
+ EHT_GROUP_RANGE(EHT_GI_08, BW_40),
+ EHT_GROUP_RANGE(EHT_GI_16, BW_40),
+ EHT_GROUP_RANGE(EHT_GI_32, BW_40),
+
+ EHT_GROUP_RANGE(EHT_GI_08, BW_80),
+ EHT_GROUP_RANGE(EHT_GI_16, BW_80),
+ EHT_GROUP_RANGE(EHT_GI_32, BW_80),
+
+ EHT_GROUP_RANGE(EHT_GI_08, BW_160),
+ EHT_GROUP_RANGE(EHT_GI_16, BW_160),
+ EHT_GROUP_RANGE(EHT_GI_32, BW_160),
+
+ EHT_GROUP_RANGE(EHT_GI_08, BW_320),
+ EHT_GROUP_RANGE(EHT_GI_16, BW_320),
+ EHT_GROUP_RANGE(EHT_GI_32, BW_320),
};
static u32
@@ -422,6 +525,9 @@ static u32 ieee80211_get_rate_duration(struct ieee80211_hw *hw,
case RATE_INFO_BW_160:
bw = BW_160;
break;
+ case RATE_INFO_BW_320:
+ bw = BW_320;
+ break;
default:
WARN_ON_ONCE(1);
return 0;
@@ -443,14 +549,27 @@ static u32 ieee80211_get_rate_duration(struct ieee80211_hw *hw,
idx = status->rate_idx;
group = HE_GROUP_IDX(streams, status->he_gi, bw);
break;
+ case RX_ENC_EHT:
+ streams = status->nss;
+ idx = status->rate_idx;
+ group = EHT_GROUP_IDX(streams, status->eht.gi, bw);
+ break;
default:
WARN_ON_ONCE(1);
return 0;
}
- if (WARN_ON_ONCE((status->encoding != RX_ENC_HE && streams > 4) ||
- (status->encoding == RX_ENC_HE && streams > 8)))
- return 0;
+ switch (status->encoding) {
+ case RX_ENC_EHT:
+ case RX_ENC_HE:
+ if (WARN_ON_ONCE(streams > 8))
+ return 0;
+ break;
+ default:
+ if (WARN_ON_ONCE(streams > 4))
+ return 0;
+ break;
+ }
if (idx >= MCS_GROUP_RATES)
return 0;
@@ -517,7 +636,9 @@ static bool ieee80211_fill_rate_info(struct ieee80211_hw *hw,
stat->nss = ri->nss;
stat->rate_idx = ri->mcs;
- if (ri->flags & RATE_INFO_FLAGS_HE_MCS)
+ if (ri->flags & RATE_INFO_FLAGS_EHT_MCS)
+ stat->encoding = RX_ENC_EHT;
+ else if (ri->flags & RATE_INFO_FLAGS_HE_MCS)
stat->encoding = RX_ENC_HE;
else if (ri->flags & RATE_INFO_FLAGS_VHT_MCS)
stat->encoding = RX_ENC_VHT;
@@ -529,7 +650,14 @@ static bool ieee80211_fill_rate_info(struct ieee80211_hw *hw,
if (ri->flags & RATE_INFO_FLAGS_SHORT_GI)
stat->enc_flags |= RX_ENC_FLAG_SHORT_GI;
- stat->he_gi = ri->he_gi;
+ switch (stat->encoding) {
+ case RX_ENC_EHT:
+ stat->eht.gi = ri->eht_gi;
+ break;
+ default:
+ stat->he_gi = ri->he_gi;
+ break;
+ }
if (stat->encoding != RX_ENC_LEGACY)
return true;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 327682995c92..9351c64608a9 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -105,8 +105,11 @@ static int ieee80211_set_mon_options(struct ieee80211_sub_if_data *sdata,
}
/* also validate MU-MIMO change */
- monitor_sdata = wiphy_dereference(local->hw.wiphy,
- local->monitor_sdata);
+ if (ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR))
+ monitor_sdata = sdata;
+ else
+ monitor_sdata = wiphy_dereference(local->hw.wiphy,
+ local->monitor_sdata);
if (!monitor_sdata &&
(params->vht_mumimo_groups || params->vht_mumimo_follow_addr))
@@ -114,7 +117,9 @@ static int ieee80211_set_mon_options(struct ieee80211_sub_if_data *sdata,
/* apply all changes now - no failures allowed */
- if (monitor_sdata)
+ if (monitor_sdata &&
+ (ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF) ||
+ ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR)))
ieee80211_set_mu_mimo_follow(monitor_sdata, params);
if (params->flags) {
@@ -138,7 +143,7 @@ static int ieee80211_set_mon_options(struct ieee80211_sub_if_data *sdata,
}
static int ieee80211_set_ap_mbssid_options(struct ieee80211_sub_if_data *sdata,
- struct cfg80211_mbssid_config params,
+ struct cfg80211_mbssid_config *params,
struct ieee80211_bss_conf *link_conf)
{
struct ieee80211_sub_if_data *tx_sdata;
@@ -149,10 +154,10 @@ static int ieee80211_set_ap_mbssid_options(struct ieee80211_sub_if_data *sdata,
link_conf->ema_ap = false;
link_conf->bssid_indicator = 0;
- if (sdata->vif.type != NL80211_IFTYPE_AP || !params.tx_wdev)
+ if (sdata->vif.type != NL80211_IFTYPE_AP || !params->tx_wdev)
return -EINVAL;
- tx_sdata = IEEE80211_WDEV_TO_SUB_IF(params.tx_wdev);
+ tx_sdata = IEEE80211_WDEV_TO_SUB_IF(params->tx_wdev);
if (!tx_sdata)
return -EINVAL;
@@ -161,9 +166,9 @@ static int ieee80211_set_ap_mbssid_options(struct ieee80211_sub_if_data *sdata,
} else {
sdata->vif.mbssid_tx_vif = &tx_sdata->vif;
link_conf->nontransmitted = true;
- link_conf->bssid_index = params.index;
+ link_conf->bssid_index = params->index;
}
- if (params.ema)
+ if (params->ema)
link_conf->ema_ap = true;
return 0;
@@ -194,6 +199,24 @@ static struct wireless_dev *ieee80211_add_iface(struct wiphy *wiphy,
}
}
+ /* Let the driver know that an interface is going to be added.
+ * Indicate so only for interface types that will be added to the
+ * driver.
+ */
+ switch (type) {
+ case NL80211_IFTYPE_AP_VLAN:
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF) ||
+ !(params->flags & MONITOR_FLAG_ACTIVE))
+ break;
+ fallthrough;
+ default:
+ drv_prep_add_interface(local,
+ ieee80211_vif_type_p2p(&sdata->vif));
+ break;
+ }
+
return wdev;
}
@@ -263,7 +286,7 @@ static int ieee80211_start_p2p_device(struct wiphy *wiphy,
lockdep_assert_wiphy(sdata->local->hw.wiphy);
- ret = ieee80211_check_combinations(sdata, NULL, 0, 0);
+ ret = ieee80211_check_combinations(sdata, NULL, 0, 0, -1);
if (ret < 0)
return ret;
@@ -285,7 +308,7 @@ static int ieee80211_start_nan(struct wiphy *wiphy,
lockdep_assert_wiphy(sdata->local->hw.wiphy);
- ret = ieee80211_check_combinations(sdata, NULL, 0, 0);
+ ret = ieee80211_check_combinations(sdata, NULL, 0, 0, -1);
if (ret < 0)
return ret;
@@ -480,6 +503,9 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
if (IS_ERR(link))
return PTR_ERR(link);
+ if (WARN_ON(pairwise && link_id >= 0))
+ return -EINVAL;
+
if (pairwise && params->mode == NL80211_KEY_SET_TX)
return ieee80211_set_tx(sdata, mac_addr, key_idx);
@@ -502,10 +528,12 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
if (IS_ERR(key))
return PTR_ERR(key);
- key->conf.link_id = link_id;
-
- if (pairwise)
+ if (pairwise) {
key->conf.flags |= IEEE80211_KEY_FLAG_PAIRWISE;
+ key->conf.link_id = -1;
+ } else {
+ key->conf.link_id = link->link_id;
+ }
if (params->mode == NL80211_KEY_NO_TX)
key->conf.flags |= IEEE80211_KEY_FLAG_NO_AUTO_TX;
@@ -742,9 +770,6 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
break;
}
- params.key = key->conf.key;
- params.key_len = key->conf.keylen;
-
callback(cookie, &params);
err = 0;
@@ -882,37 +907,40 @@ static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
}
static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
+ struct net_device *dev,
struct cfg80211_chan_def *chandef)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
struct ieee80211_sub_if_data *sdata;
- int ret = 0;
+ struct ieee80211_chan_req chanreq = { .oper = *chandef };
+ int ret;
lockdep_assert_wiphy(local->hw.wiphy);
- if (cfg80211_chandef_identical(&local->monitor_chandef, chandef))
- return 0;
+ sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ if (!ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR)) {
+ if (cfg80211_chandef_identical(&local->monitor_chanreq.oper,
+ &chanreq.oper))
+ return 0;
- if (local->use_chanctx) {
- sdata = wiphy_dereference(local->hw.wiphy,
- local->monitor_sdata);
- if (sdata) {
- ieee80211_link_release_channel(&sdata->deflink);
- ret = ieee80211_link_use_channel(&sdata->deflink,
- chandef,
- IEEE80211_CHANCTX_EXCLUSIVE);
- }
- } else {
- if (local->open_count == local->monitors) {
- local->_oper_chandef = *chandef;
- ieee80211_hw_config(local, 0);
- }
+ sdata = wiphy_dereference(wiphy, local->monitor_sdata);
+ if (!sdata)
+ goto done;
}
- if (ret == 0)
- local->monitor_chandef = *chandef;
+ if (rcu_access_pointer(sdata->deflink.conf->chanctx_conf) &&
+ cfg80211_chandef_identical(&sdata->vif.bss_conf.chanreq.oper,
+ &chanreq.oper))
+ return 0;
- return ret;
+ ieee80211_link_release_channel(&sdata->deflink);
+ ret = ieee80211_link_use_channel(&sdata->deflink, &chanreq,
+ IEEE80211_CHANCTX_SHARED);
+ if (ret)
+ return ret;
+done:
+ local->monitor_chanreq = chanreq;
+ return 0;
}
static int
@@ -953,7 +981,8 @@ ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
static int ieee80211_set_fils_discovery(struct ieee80211_sub_if_data *sdata,
struct cfg80211_fils_discovery *params,
struct ieee80211_link_data *link,
- struct ieee80211_bss_conf *link_conf)
+ struct ieee80211_bss_conf *link_conf,
+ u64 *changed)
{
struct fils_discovery_data *new, *old = NULL;
struct ieee80211_fils_discovery *fd;
@@ -980,7 +1009,8 @@ static int ieee80211_set_fils_discovery(struct ieee80211_sub_if_data *sdata,
RCU_INIT_POINTER(link->u.ap.fils_discovery, NULL);
}
- return BSS_CHANGED_FILS_DISCOVERY;
+ *changed |= BSS_CHANGED_FILS_DISCOVERY;
+ return 0;
}
static int
@@ -1063,13 +1093,13 @@ ieee80211_copy_mbssid_beacon(u8 *pos, struct cfg80211_mbssid_elems *dst,
{
int i, offset = 0;
+ dst->cnt = src->cnt;
for (i = 0; i < src->cnt; i++) {
memcpy(pos + offset, src->elem[i].data, src->elem[i].len);
dst->elem[i].len = src->elem[i].len;
dst->elem[i].data = pos + offset;
offset += dst->elem[i].len;
}
- dst->cnt = src->cnt;
return offset;
}
@@ -1240,6 +1270,30 @@ ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
return 0;
}
+static u8 ieee80211_num_beaconing_links(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_link_data *link;
+ u8 link_id, num = 0;
+
+ if (sdata->vif.type != NL80211_IFTYPE_AP &&
+ sdata->vif.type != NL80211_IFTYPE_P2P_GO)
+ return num;
+
+ if (!sdata->vif.valid_links)
+ return num;
+
+ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
+ link = sdata_dereference(sdata->link[link_id], sdata);
+ if (!link)
+ continue;
+
+ if (sdata_dereference(link->u.ap.beacon, sdata))
+ num++;
+ }
+
+ return num;
+}
+
static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ap_settings *params)
{
@@ -1258,6 +1312,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
unsigned int link_id = params->beacon.link_id;
struct ieee80211_link_data *link;
struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_chan_req chanreq = { .oper = params->chandef };
lockdep_assert_wiphy(local->hw.wiphy);
@@ -1271,9 +1326,6 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
if (old)
return -EALREADY;
- if (params->smps_mode != NL80211_SMPS_OFF)
- return -EOPNOTSUPP;
-
link->smps_mode = IEEE80211_SMPS_OFF;
link->needed_rx_chains = sdata->local->rx_chains;
@@ -1341,8 +1393,6 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
return -EOPNOTSUPP;
link_conf->eht_support = true;
- link_conf->eht_puncturing = params->punct_bitmap;
- changed |= BSS_CHANGED_EHT_PUNCTURING;
link_conf->eht_su_beamformer =
params->eht_cap->fixed.phy_cap_info[0] &
@@ -1355,6 +1405,11 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
(IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ |
IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ);
+ link_conf->eht_80mhz_full_bw_ul_mumimo =
+ params->eht_cap->fixed.phy_cap_info[7] &
+ (IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ);
} else {
link_conf->eht_su_beamformer = false;
link_conf->eht_su_beamformee = false;
@@ -1364,13 +1419,13 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
if (sdata->vif.type == NL80211_IFTYPE_AP &&
params->mbssid_config.tx_wdev) {
err = ieee80211_set_ap_mbssid_options(sdata,
- params->mbssid_config,
+ &params->mbssid_config,
link_conf);
if (err)
return err;
}
- err = ieee80211_link_use_channel(link, &params->chandef,
+ err = ieee80211_link_use_channel(link, &chanreq,
IEEE80211_CHANCTX_SHARED);
if (!err)
ieee80211_link_copy_chanctx_to_vlans(link, false);
@@ -1445,10 +1500,9 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
goto error;
err = ieee80211_set_fils_discovery(sdata, &params->fils_discovery,
- link, link_conf);
+ link, link_conf, &changed);
if (err < 0)
goto error;
- changed |= err;
err = ieee80211_set_unsol_bcast_probe_resp(sdata,
&params->unsol_bcast_probe_resp,
@@ -1463,7 +1517,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
if (old)
kfree_rcu(old, rcu_head);
RCU_INIT_POINTER(link->u.ap.beacon, NULL);
- sdata->u.ap.active = false;
+
+ if (ieee80211_num_beaconing_links(sdata) == 0)
+ sdata->u.ap.active = false;
+
goto error;
}
@@ -1471,7 +1528,9 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_SSID);
ieee80211_link_info_change_notify(sdata, link, changed);
- netif_carrier_on(dev);
+ if (ieee80211_num_beaconing_links(sdata) <= 1)
+ netif_carrier_on(dev);
+
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
netif_carrier_on(vlan->dev);
@@ -1519,10 +1578,9 @@ static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
return err;
err = ieee80211_set_fils_discovery(sdata, &params->fils_discovery,
- link, link_conf);
+ link, link_conf, &changed);
if (err < 0)
return err;
- changed |= err;
err = ieee80211_set_unsol_bcast_probe_resp(sdata,
&params->unsol_bcast_probe_resp,
@@ -1565,6 +1623,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_link_data *link =
sdata_dereference(sdata->link[link_id], sdata);
struct ieee80211_bss_conf *link_conf = link->conf;
+ LIST_HEAD(keys);
lockdep_assert_wiphy(local->hw.wiphy);
@@ -1582,21 +1641,20 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
/* abort any running channel switch or color change */
link_conf->csa_active = false;
link_conf->color_change_active = false;
- if (link->csa_block_tx) {
- ieee80211_wake_vif_queues(local, sdata,
- IEEE80211_QUEUE_STOP_REASON_CSA);
- link->csa_block_tx = false;
- }
+ ieee80211_vif_unblock_queues_csa(sdata);
ieee80211_free_next_beacon(link);
/* turn off carrier for this interface and dependent VLANs */
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
netif_carrier_off(vlan->dev);
- netif_carrier_off(dev);
+
+ if (ieee80211_num_beaconing_links(sdata) <= 1) {
+ netif_carrier_off(dev);
+ sdata->u.ap.active = false;
+ }
/* remove beacon and probe response */
- sdata->u.ap.active = false;
RCU_INIT_POINTER(link->u.ap.beacon, NULL);
RCU_INIT_POINTER(link->u.ap.probe_resp, NULL);
RCU_INIT_POINTER(link->u.ap.fils_discovery, NULL);
@@ -1618,8 +1676,13 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
link_conf->ema_ap = false;
link_conf->bssid_indicator = 0;
- __sta_info_flush(sdata, true);
- ieee80211_free_keys(sdata, true);
+ __sta_info_flush(sdata, true, link_id, NULL);
+
+ ieee80211_remove_link_keys(link, &keys);
+ if (!list_empty(&keys)) {
+ synchronize_net();
+ ieee80211_free_key_list(local, &keys);
+ }
link_conf->enable_beacon = false;
sdata->beacon_rate_set = false;
@@ -1628,12 +1691,12 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
ieee80211_link_info_change_notify(sdata, link,
BSS_CHANGED_BEACON_ENABLED);
- if (sdata->wdev.cac_started) {
- chandef = link_conf->chandef;
+ if (sdata->wdev.links[link_id].cac_started) {
+ chandef = link_conf->chanreq.oper;
wiphy_delayed_work_cancel(wiphy, &link->dfs_cac_timer_work);
cfg80211_cac_event(sdata->dev, &chandef,
NL80211_RADAR_CAC_ABORTED,
- GFP_KERNEL);
+ GFP_KERNEL, link_id);
}
drv_stop_ap(sdata->local, sdata, link_conf);
@@ -1671,7 +1734,7 @@ static int sta_apply_auth_flags(struct ieee80211_local *local,
* before drv_sta_state() is called.
*/
if (!test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
- rate_control_rate_init(sta);
+ rate_control_rate_init_all_links(sta);
ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
if (ret)
@@ -1773,11 +1836,17 @@ static void sta_apply_mesh_params(struct ieee80211_local *local,
#endif
}
+enum sta_link_apply_mode {
+ STA_LINK_MODE_NEW,
+ STA_LINK_MODE_STA_MODIFY,
+ STA_LINK_MODE_LINK_MODIFY,
+};
+
static int sta_link_apply_parameters(struct ieee80211_local *local,
- struct sta_info *sta, bool new_link,
+ struct sta_info *sta,
+ enum sta_link_apply_mode mode,
struct link_station_parameters *params)
{
- int ret = 0;
struct ieee80211_supported_band *sband;
struct ieee80211_sub_if_data *sdata = sta->sdata;
u32 link_id = params->link_id < 0 ? 0 : params->link_id;
@@ -1786,18 +1855,29 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
struct link_sta_info *link_sta =
rcu_dereference_protected(sta->link[link_id],
lockdep_is_held(&local->hw.wiphy->mtx));
-
- /*
- * If there are no changes, then accept a link that exist,
- * unless it's a new link.
- */
- if (params->link_id >= 0 && !new_link &&
- !params->link_mac && !params->txpwr_set &&
- !params->supported_rates_len &&
- !params->ht_capa && !params->vht_capa &&
- !params->he_capa && !params->eht_capa &&
- !params->opmode_notif_used)
- return 0;
+ bool changes = params->link_mac ||
+ params->txpwr_set ||
+ params->supported_rates_len ||
+ params->ht_capa ||
+ params->vht_capa ||
+ params->he_capa ||
+ params->eht_capa ||
+ params->opmode_notif_used;
+
+ switch (mode) {
+ case STA_LINK_MODE_NEW:
+ if (!params->link_mac)
+ return -EINVAL;
+ break;
+ case STA_LINK_MODE_LINK_MODIFY:
+ break;
+ case STA_LINK_MODE_STA_MODIFY:
+ if (params->link_id >= 0)
+ break;
+ if (!changes)
+ return 0;
+ break;
+ }
if (!link || !link_sta)
return -EINVAL;
@@ -1807,18 +1887,18 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
return -EINVAL;
if (params->link_mac) {
- if (new_link) {
+ if (mode == STA_LINK_MODE_NEW) {
memcpy(link_sta->addr, params->link_mac, ETH_ALEN);
memcpy(link_sta->pub->addr, params->link_mac, ETH_ALEN);
} else if (!ether_addr_equal(link_sta->addr,
params->link_mac)) {
return -EINVAL;
}
- } else if (new_link) {
- return -EINVAL;
}
if (params->txpwr_set) {
+ int ret;
+
link_sta->pub->txpwr.type = params->txpwr.type;
if (params->txpwr.type == NL80211_TX_POWER_LIMITED)
link_sta->pub->txpwr.power = params->txpwr.power;
@@ -1829,7 +1909,7 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
if (params->supported_rates &&
params->supported_rates_len) {
- ieee80211_parse_bitrates(link->conf->chandef.width,
+ ieee80211_parse_bitrates(link->conf->chanreq.oper.width,
sband, params->supported_rates,
params->supported_rates_len,
&link_sta->pub->supp_rates[sband->band]);
@@ -1860,6 +1940,8 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
params->eht_capa_len,
link_sta);
+ ieee80211_sta_init_nss(link_sta);
+
if (params->opmode_notif_used) {
/* returned value is only needed for rc update, but the
* rc isn't initialized here yet, so ignore it
@@ -1869,9 +1951,7 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
sband->band);
}
- ieee80211_sta_set_rx_nss(link_sta);
-
- return ret;
+ return 0;
}
static int sta_apply_parameters(struct ieee80211_local *local,
@@ -1944,6 +2024,9 @@ static int sta_apply_parameters(struct ieee80211_local *local,
clear_sta_flag(sta, WLAN_STA_TDLS_PEER);
}
+ if (mask & BIT(NL80211_STA_FLAG_SPP_AMSDU))
+ sta->sta.spp_amsdu = set & BIT(NL80211_STA_FLAG_SPP_AMSDU);
+
/* mark TDLS channel switch support, if the AP allows it */
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) &&
!sdata->deflink.u.mgd.tdls_chan_switch_prohibited &&
@@ -1984,7 +2067,7 @@ static int sta_apply_parameters(struct ieee80211_local *local,
if (params->listen_interval >= 0)
sta->listen_interval = params->listen_interval;
- ret = sta_link_apply_parameters(local, sta, false,
+ ret = sta_link_apply_parameters(local, sta, STA_LINK_MODE_STA_MODIFY,
&params->link_sta_params);
if (ret)
return ret;
@@ -2080,7 +2163,7 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
*/
if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER) &&
test_sta_flag(sta, WLAN_STA_ASSOC))
- rate_control_rate_init(sta);
+ rate_control_rate_init_all_links(sta);
return sta_info_insert(sta);
}
@@ -2095,7 +2178,7 @@ static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev,
if (params->mac)
return sta_info_destroy_addr_bss(sdata, params->mac);
- sta_info_flush(sdata);
+ sta_info_flush(sdata, params->link_id);
return 0;
}
@@ -2164,15 +2247,14 @@ static int ieee80211_change_station(struct wiphy *wiphy,
}
if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
- sta->sdata->u.vlan.sta) {
- ieee80211_clear_fast_rx(sta);
+ sta->sdata->u.vlan.sta)
RCU_INIT_POINTER(sta->sdata->u.vlan.sta, NULL);
- }
if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
ieee80211_vif_dec_num_mcast(sta->sdata);
sta->sdata = vlansdata;
+ ieee80211_check_fast_rx(sta);
ieee80211_check_fast_xmit(sta);
if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
@@ -2602,6 +2684,7 @@ static int ieee80211_join_mesh(struct wiphy *wiphy, struct net_device *dev,
const struct mesh_setup *setup)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_chan_req chanreq = { .oper = setup->chandef };
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
int err;
@@ -2618,7 +2701,7 @@ static int ieee80211_join_mesh(struct wiphy *wiphy, struct net_device *dev,
sdata->deflink.smps_mode = IEEE80211_SMPS_OFF;
sdata->deflink.needed_rx_chains = sdata->local->rx_chains;
- err = ieee80211_link_use_channel(&sdata->deflink, &setup->chandef,
+ err = ieee80211_link_use_channel(&sdata->deflink, &chanreq,
IEEE80211_CHANCTX_SHARED);
if (err)
return err;
@@ -2661,7 +2744,7 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
return -EINVAL;
if (params->basic_rates) {
- if (!ieee80211_parse_bitrates(link->conf->chandef.width,
+ if (!ieee80211_parse_bitrates(link->conf->chanreq.oper.width,
wiphy->bands[sband->band],
params->basic_rates,
params->basic_rates_len,
@@ -2919,8 +3002,9 @@ static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev,
memcpy(sdata->vif.bss_conf.mcast_rate, rate,
sizeof(int) * NUM_NL80211_BANDS);
- ieee80211_link_info_change_notify(sdata, &sdata->deflink,
- BSS_CHANGED_MCAST_RATE);
+ if (ieee80211_sdata_running(sdata))
+ ieee80211_link_info_change_notify(sdata, &sdata->deflink,
+ BSS_CHANGED_MCAST_RATE);
return 0;
}
@@ -2991,102 +3075,147 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
enum nl80211_tx_power_setting txp_type = type;
bool update_txp_type = false;
bool has_monitor = false;
+ int user_power_level;
+ int old_power = local->user_power_level;
lockdep_assert_wiphy(local->hw.wiphy);
+ switch (type) {
+ case NL80211_TX_POWER_AUTOMATIC:
+ user_power_level = IEEE80211_UNSET_POWER_LEVEL;
+ txp_type = NL80211_TX_POWER_LIMITED;
+ break;
+ case NL80211_TX_POWER_LIMITED:
+ case NL80211_TX_POWER_FIXED:
+ if (mbm < 0 || (mbm % 100))
+ return -EOPNOTSUPP;
+ user_power_level = MBM_TO_DBM(mbm);
+ break;
+ default:
+ return -EINVAL;
+ }
+
if (wdev) {
sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
- if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
+ !ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR)) {
+ if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
+ return -EOPNOTSUPP;
+
sdata = wiphy_dereference(local->hw.wiphy,
local->monitor_sdata);
if (!sdata)
return -EOPNOTSUPP;
}
- switch (type) {
- case NL80211_TX_POWER_AUTOMATIC:
- sdata->deflink.user_power_level =
- IEEE80211_UNSET_POWER_LEVEL;
- txp_type = NL80211_TX_POWER_LIMITED;
- break;
- case NL80211_TX_POWER_LIMITED:
- case NL80211_TX_POWER_FIXED:
- if (mbm < 0 || (mbm % 100))
- return -EOPNOTSUPP;
- sdata->deflink.user_power_level = MBM_TO_DBM(mbm);
- break;
- }
+ for (int link_id = 0;
+ link_id < ARRAY_SIZE(sdata->link);
+ link_id++) {
+ struct ieee80211_link_data *link =
+ wiphy_dereference(wiphy, sdata->link[link_id]);
- if (txp_type != sdata->vif.bss_conf.txpower_type) {
- update_txp_type = true;
- sdata->vif.bss_conf.txpower_type = txp_type;
- }
+ if (!link)
+ continue;
+
+ link->user_power_level = user_power_level;
- ieee80211_recalc_txpower(sdata, update_txp_type);
+ if (txp_type != link->conf->txpower_type) {
+ update_txp_type = true;
+ link->conf->txpower_type = txp_type;
+ }
+ ieee80211_recalc_txpower(link, update_txp_type);
+ }
return 0;
}
- switch (type) {
- case NL80211_TX_POWER_AUTOMATIC:
- local->user_power_level = IEEE80211_UNSET_POWER_LEVEL;
- txp_type = NL80211_TX_POWER_LIMITED;
- break;
- case NL80211_TX_POWER_LIMITED:
- case NL80211_TX_POWER_FIXED:
- if (mbm < 0 || (mbm % 100))
- return -EOPNOTSUPP;
- local->user_power_level = MBM_TO_DBM(mbm);
- break;
- }
+ local->user_power_level = user_power_level;
list_for_each_entry(sdata, &local->interfaces, list) {
- if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
+ !ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR)) {
has_monitor = true;
continue;
}
- sdata->deflink.user_power_level = local->user_power_level;
- if (txp_type != sdata->vif.bss_conf.txpower_type)
- update_txp_type = true;
- sdata->vif.bss_conf.txpower_type = txp_type;
+
+ for (int link_id = 0;
+ link_id < ARRAY_SIZE(sdata->link);
+ link_id++) {
+ struct ieee80211_link_data *link =
+ wiphy_dereference(wiphy, sdata->link[link_id]);
+
+ if (!link)
+ continue;
+
+ link->user_power_level = local->user_power_level;
+ if (txp_type != link->conf->txpower_type)
+ update_txp_type = true;
+ link->conf->txpower_type = txp_type;
+ }
}
list_for_each_entry(sdata, &local->interfaces, list) {
- if (sdata->vif.type == NL80211_IFTYPE_MONITOR)
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
+ !ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR))
continue;
- ieee80211_recalc_txpower(sdata, update_txp_type);
+
+ for (int link_id = 0;
+ link_id < ARRAY_SIZE(sdata->link);
+ link_id++) {
+ struct ieee80211_link_data *link =
+ wiphy_dereference(wiphy, sdata->link[link_id]);
+
+ if (!link)
+ continue;
+
+ ieee80211_recalc_txpower(link, update_txp_type);
+ }
}
if (has_monitor) {
sdata = wiphy_dereference(local->hw.wiphy,
local->monitor_sdata);
- if (sdata) {
+ if (sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) {
sdata->deflink.user_power_level = local->user_power_level;
if (txp_type != sdata->vif.bss_conf.txpower_type)
update_txp_type = true;
sdata->vif.bss_conf.txpower_type = txp_type;
- ieee80211_recalc_txpower(sdata, update_txp_type);
+ ieee80211_recalc_txpower(&sdata->deflink,
+ update_txp_type);
}
}
+ if (local->emulate_chanctx &&
+ (old_power != local->user_power_level))
+ ieee80211_hw_conf_chan(local);
+
return 0;
}
static int ieee80211_get_tx_power(struct wiphy *wiphy,
struct wireless_dev *wdev,
+ unsigned int link_id,
int *dbm)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+ struct ieee80211_link_data *link_data;
- if (local->ops->get_txpower)
- return drv_get_txpower(local, sdata, dbm);
+ if (local->ops->get_txpower &&
+ (sdata->flags & IEEE80211_SDATA_IN_DRIVER))
+ return drv_get_txpower(local, sdata, link_id, dbm);
- if (!local->use_chanctx)
+ if (local->emulate_chanctx) {
*dbm = local->hw.conf.power_level;
- else
- *dbm = sdata->vif.bss_conf.txpower;
+ } else {
+ link_data = wiphy_dereference(wiphy, sdata->link[link_id]);
+
+ if (link_data)
+ *dbm = link_data->conf->txpower;
+ else
+ return -ENOLINK;
+ }
/* INT_MIN indicates no power level was set yet */
if (*dbm == INT_MIN)
@@ -3153,8 +3282,7 @@ int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
if (WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION))
return -EINVAL;
- if (ieee80211_vif_is_mld(&sdata->vif) &&
- !(sdata->vif.active_links & BIT(link->link_id)))
+ if (!ieee80211_vif_link_active(&sdata->vif, link->link_id))
return 0;
old_req = link->u.mgd.req_smps;
@@ -3176,7 +3304,7 @@ int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
* the new value until we associate.
*/
if (!sdata->u.mgd.associated ||
- link->conf->chandef.width == NL80211_CHAN_WIDTH_20_NOHT)
+ link->conf->chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT)
return 0;
ap = sdata->vif.cfg.ap_addr;
@@ -3207,7 +3335,7 @@ int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
if (err)
link->u.mgd.req_smps = old_req;
else if (smps_mode != IEEE80211_SMPS_OFF && tdls_peer_found)
- ieee80211_teardown_tdls_peers(sdata);
+ ieee80211_teardown_tdls_peers(link);
return err;
}
@@ -3254,33 +3382,57 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
+static void ieee80211_set_cqm_rssi_link(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_link_data *link,
+ s32 rssi_thold, u32 rssi_hyst,
+ s32 rssi_low, s32 rssi_high)
+{
+ struct ieee80211_bss_conf *conf;
+
+ if (!link || !link->conf)
+ return;
+
+ conf = link->conf;
+
+ if (rssi_thold && rssi_hyst &&
+ rssi_thold == conf->cqm_rssi_thold &&
+ rssi_hyst == conf->cqm_rssi_hyst)
+ return;
+
+ conf->cqm_rssi_thold = rssi_thold;
+ conf->cqm_rssi_hyst = rssi_hyst;
+ conf->cqm_rssi_low = rssi_low;
+ conf->cqm_rssi_high = rssi_high;
+ link->u.mgd.last_cqm_event_signal = 0;
+
+ if (!ieee80211_vif_link_active(&sdata->vif, link->link_id))
+ return;
+
+ if (sdata->u.mgd.associated &&
+ (sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
+ ieee80211_link_info_change_notify(sdata, link, BSS_CHANGED_CQM);
+}
+
static int ieee80211_set_cqm_rssi_config(struct wiphy *wiphy,
struct net_device *dev,
s32 rssi_thold, u32 rssi_hyst)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_vif *vif = &sdata->vif;
- struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
-
- if (rssi_thold == bss_conf->cqm_rssi_thold &&
- rssi_hyst == bss_conf->cqm_rssi_hyst)
- return 0;
+ int link_id;
- if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER &&
- !(sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
+ if (vif->driver_flags & IEEE80211_VIF_BEACON_FILTER &&
+ !(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
return -EOPNOTSUPP;
- bss_conf->cqm_rssi_thold = rssi_thold;
- bss_conf->cqm_rssi_hyst = rssi_hyst;
- bss_conf->cqm_rssi_low = 0;
- bss_conf->cqm_rssi_high = 0;
- sdata->deflink.u.mgd.last_cqm_event_signal = 0;
+ /* For MLD, handle CQM change on all the active links */
+ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
+ struct ieee80211_link_data *link =
+ sdata_dereference(sdata->link[link_id], sdata);
- /* tell the driver upon association, unless already associated */
- if (sdata->u.mgd.associated &&
- sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)
- ieee80211_link_info_change_notify(sdata, &sdata->deflink,
- BSS_CHANGED_CQM);
+ ieee80211_set_cqm_rssi_link(sdata, link, rssi_thold, rssi_hyst,
+ 0, 0);
+ }
return 0;
}
@@ -3291,22 +3443,19 @@ static int ieee80211_set_cqm_rssi_range_config(struct wiphy *wiphy,
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_vif *vif = &sdata->vif;
- struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ int link_id;
- if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)
+ if (vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)
return -EOPNOTSUPP;
- bss_conf->cqm_rssi_low = rssi_low;
- bss_conf->cqm_rssi_high = rssi_high;
- bss_conf->cqm_rssi_thold = 0;
- bss_conf->cqm_rssi_hyst = 0;
- sdata->deflink.u.mgd.last_cqm_event_signal = 0;
+ /* For MLD, handle CQM change on all the active links */
+ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
+ struct ieee80211_link_data *link =
+ sdata_dereference(sdata->link[link_id], sdata);
- /* tell the driver upon association, unless already associated */
- if (sdata->u.mgd.associated &&
- sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)
- ieee80211_link_info_change_notify(sdata, &sdata->deflink,
- BSS_CHANGED_CQM);
+ ieee80211_set_cqm_rssi_link(sdata, link, 0, 0,
+ rssi_low, rssi_high);
+ }
return 0;
}
@@ -3331,9 +3480,11 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
* so at a basic rate so that all clients can receive it.
*/
if (rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf) &&
- sdata->vif.bss_conf.chandef.chan) {
+ sdata->vif.bss_conf.chanreq.oper.chan) {
u32 basic_rates = sdata->vif.bss_conf.basic_rates;
- enum nl80211_band band = sdata->vif.bss_conf.chandef.chan->band;
+ enum nl80211_band band;
+
+ band = sdata->vif.bss_conf.chanreq.oper.chan->band;
if (!(mask->control[band].legacy & basic_rates))
return -EINVAL;
@@ -3382,54 +3533,58 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
static int ieee80211_start_radar_detection(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_chan_def *chandef,
- u32 cac_time_ms)
+ u32 cac_time_ms, int link_id)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_chan_req chanreq = { .oper = *chandef };
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_link_data *link_data;
int err;
lockdep_assert_wiphy(local->hw.wiphy);
- if (!list_empty(&local->roc_list) || local->scanning) {
- err = -EBUSY;
- goto out_unlock;
- }
+ if (!list_empty(&local->roc_list) || local->scanning)
+ return -EBUSY;
+
+ link_data = sdata_dereference(sdata->link[link_id], sdata);
+ if (!link_data)
+ return -ENOLINK;
/* whatever, but channel contexts should not complain about that one */
- sdata->deflink.smps_mode = IEEE80211_SMPS_OFF;
- sdata->deflink.needed_rx_chains = local->rx_chains;
+ link_data->smps_mode = IEEE80211_SMPS_OFF;
+ link_data->needed_rx_chains = local->rx_chains;
- err = ieee80211_link_use_channel(&sdata->deflink, chandef,
+ err = ieee80211_link_use_channel(link_data, &chanreq,
IEEE80211_CHANCTX_SHARED);
if (err)
- goto out_unlock;
+ return err;
- wiphy_delayed_work_queue(wiphy, &sdata->deflink.dfs_cac_timer_work,
+ wiphy_delayed_work_queue(wiphy, &link_data->dfs_cac_timer_work,
msecs_to_jiffies(cac_time_ms));
- out_unlock:
- return err;
+ return 0;
}
static void ieee80211_end_cac(struct wiphy *wiphy,
- struct net_device *dev)
+ struct net_device *dev, unsigned int link_id)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_link_data *link_data;
lockdep_assert_wiphy(local->hw.wiphy);
list_for_each_entry(sdata, &local->interfaces, list) {
- /* it might be waiting for the local->mtx, but then
- * by the time it gets it, sdata->wdev.cac_started
- * will no longer be true
- */
+ link_data = sdata_dereference(sdata->link[link_id], sdata);
+ if (!link_data)
+ continue;
+
wiphy_delayed_work_cancel(wiphy,
- &sdata->deflink.dfs_cac_timer_work);
+ &link_data->dfs_cac_timer_work);
- if (sdata->wdev.cac_started) {
- ieee80211_link_release_channel(&sdata->deflink);
- sdata->wdev.cac_started = false;
+ if (sdata->wdev.links[link_id].cac_started) {
+ ieee80211_link_release_channel(link_data);
+ sdata->wdev.links[link_id].cac_started = false;
}
}
}
@@ -3542,13 +3697,24 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
return new_beacon;
}
-void ieee80211_csa_finish(struct ieee80211_vif *vif)
+void ieee80211_csa_finish(struct ieee80211_vif *vif, unsigned int link_id)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_link_data *link_data;
+
+ if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS))
+ return;
rcu_read_lock();
+ link_data = rcu_dereference(sdata->link[link_id]);
+ if (WARN_ON(!link_data)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ /* TODO: MBSSID with MLO changes */
if (vif->mbssid_tx_vif == vif) {
/* Trigger ieee80211_csa_finish() on the non-transmitting
* interfaces when channel switch is received on
@@ -3564,41 +3730,41 @@ void ieee80211_csa_finish(struct ieee80211_vif *vif)
continue;
wiphy_work_queue(iter->local->hw.wiphy,
- &iter->deflink.csa_finalize_work);
+ &iter->deflink.csa.finalize_work);
}
}
- wiphy_work_queue(local->hw.wiphy, &sdata->deflink.csa_finalize_work);
+ wiphy_work_queue(local->hw.wiphy, &link_data->csa.finalize_work);
rcu_read_unlock();
}
EXPORT_SYMBOL(ieee80211_csa_finish);
-void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif, bool block_tx)
+void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
- sdata->deflink.csa_block_tx = block_tx;
sdata_info(sdata, "channel switch failed, disconnecting\n");
wiphy_work_queue(local->hw.wiphy, &ifmgd->csa_connection_drop_work);
}
EXPORT_SYMBOL(ieee80211_channel_switch_disconnect);
-static int ieee80211_set_after_csa_beacon(struct ieee80211_sub_if_data *sdata,
+static int ieee80211_set_after_csa_beacon(struct ieee80211_link_data *link_data,
u64 *changed)
{
+ struct ieee80211_sub_if_data *sdata = link_data->sdata;
int err;
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP:
- if (!sdata->deflink.u.ap.next_beacon)
+ if (!link_data->u.ap.next_beacon)
return -EINVAL;
- err = ieee80211_assign_beacon(sdata, &sdata->deflink,
- sdata->deflink.u.ap.next_beacon,
+ err = ieee80211_assign_beacon(sdata, link_data,
+ link_data->u.ap.next_beacon,
NULL, NULL, changed);
- ieee80211_free_next_beacon(&sdata->deflink);
+ ieee80211_free_next_beacon(link_data);
if (err < 0)
return err;
@@ -3627,6 +3793,7 @@ static int __ieee80211_csa_finalize(struct ieee80211_link_data *link_data)
{
struct ieee80211_sub_if_data *sdata = link_data->sdata;
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_bss_conf *link_conf = link_data->conf;
u64 changed = 0;
int err;
@@ -3648,40 +3815,29 @@ static int __ieee80211_csa_finalize(struct ieee80211_link_data *link_data)
if (link_data->reserved_ready)
return 0;
- return ieee80211_link_use_reserved_context(&sdata->deflink);
+ return ieee80211_link_use_reserved_context(link_data);
}
- if (!cfg80211_chandef_identical(&link_data->conf->chandef,
- &link_data->csa_chandef))
+ if (!cfg80211_chandef_identical(&link_conf->chanreq.oper,
+ &link_data->csa.chanreq.oper))
return -EINVAL;
- sdata->vif.bss_conf.csa_active = false;
+ link_conf->csa_active = false;
- err = ieee80211_set_after_csa_beacon(sdata, &changed);
+ err = ieee80211_set_after_csa_beacon(link_data, &changed);
if (err)
return err;
- if (sdata->vif.bss_conf.eht_puncturing != sdata->vif.bss_conf.csa_punct_bitmap) {
- sdata->vif.bss_conf.eht_puncturing =
- sdata->vif.bss_conf.csa_punct_bitmap;
- changed |= BSS_CHANGED_EHT_PUNCTURING;
- }
-
ieee80211_link_info_change_notify(sdata, link_data, changed);
- if (link_data->csa_block_tx) {
- ieee80211_wake_vif_queues(local, sdata,
- IEEE80211_QUEUE_STOP_REASON_CSA);
- link_data->csa_block_tx = false;
- }
+ ieee80211_vif_unblock_queues_csa(sdata);
err = drv_post_channel_switch(link_data);
if (err)
return err;
- cfg80211_ch_switch_notify(sdata->dev, &link_data->csa_chandef,
- link_data->link_id,
- link_data->conf->eht_puncturing);
+ cfg80211_ch_switch_notify(sdata->dev, &link_data->csa.chanreq.oper,
+ link_data->link_id);
return 0;
}
@@ -3691,7 +3847,8 @@ static void ieee80211_csa_finalize(struct ieee80211_link_data *link_data)
struct ieee80211_sub_if_data *sdata = link_data->sdata;
if (__ieee80211_csa_finalize(link_data)) {
- sdata_info(sdata, "failed to finalize CSA, disconnecting\n");
+ sdata_info(sdata, "failed to finalize CSA on link %d, disconnecting\n",
+ link_data->link_id);
cfg80211_stop_iface(sdata->local->hw.wiphy, &sdata->wdev,
GFP_KERNEL);
}
@@ -3700,7 +3857,7 @@ static void ieee80211_csa_finalize(struct ieee80211_link_data *link_data)
void ieee80211_csa_finalize_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct ieee80211_link_data *link =
- container_of(work, struct ieee80211_link_data, csa_finalize_work);
+ container_of(work, struct ieee80211_link_data, csa.finalize_work);
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
@@ -3716,18 +3873,19 @@ void ieee80211_csa_finalize_work(struct wiphy *wiphy, struct wiphy_work *work)
ieee80211_csa_finalize(link);
}
-static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
+static int ieee80211_set_csa_beacon(struct ieee80211_link_data *link_data,
struct cfg80211_csa_settings *params,
u64 *changed)
{
+ struct ieee80211_sub_if_data *sdata = link_data->sdata;
struct ieee80211_csa_settings csa = {};
int err;
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP:
- sdata->deflink.u.ap.next_beacon =
+ link_data->u.ap.next_beacon =
cfg80211_beacon_dup(&params->beacon_after);
- if (!sdata->deflink.u.ap.next_beacon)
+ if (!link_data->u.ap.next_beacon)
return -ENOMEM;
/*
@@ -3753,7 +3911,7 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
IEEE80211_MAX_CNTDWN_COUNTERS_NUM) ||
(params->n_counter_offsets_presp >
IEEE80211_MAX_CNTDWN_COUNTERS_NUM)) {
- ieee80211_free_next_beacon(&sdata->deflink);
+ ieee80211_free_next_beacon(link_data);
return -EINVAL;
}
@@ -3763,11 +3921,11 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
csa.n_counter_offsets_presp = params->n_counter_offsets_presp;
csa.count = params->count;
- err = ieee80211_assign_beacon(sdata, &sdata->deflink,
+ err = ieee80211_assign_beacon(sdata, link_data,
&params->beacon_csa, &csa,
NULL, changed);
if (err < 0) {
- ieee80211_free_next_beacon(&sdata->deflink);
+ ieee80211_free_next_beacon(link_data);
return err;
}
@@ -3814,7 +3972,7 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
/* changes into another band are not supported */
- if (sdata->vif.bss_conf.chandef.chan->band !=
+ if (sdata->vif.bss_conf.chanreq.oper.chan->band !=
params->chandef.chan->band)
return -EINVAL;
@@ -3848,13 +4006,13 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
return 0;
}
-static void ieee80211_color_change_abort(struct ieee80211_sub_if_data *sdata)
+static void ieee80211_color_change_abort(struct ieee80211_link_data *link)
{
- sdata->vif.bss_conf.color_change_active = false;
+ link->conf->color_change_active = false;
- ieee80211_free_next_beacon(&sdata->deflink);
+ ieee80211_free_next_beacon(link);
- cfg80211_color_change_aborted_notify(sdata->dev);
+ cfg80211_color_change_aborted_notify(link->sdata->dev, link->link_id);
}
static int
@@ -3862,11 +4020,17 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_csa_settings *params)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_chan_req chanreq = { .oper = params->chandef };
struct ieee80211_local *local = sdata->local;
- struct ieee80211_channel_switch ch_switch;
+ struct ieee80211_channel_switch ch_switch = {
+ .link_id = params->link_id,
+ };
struct ieee80211_chanctx_conf *conf;
struct ieee80211_chanctx *chanctx;
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_link_data *link_data;
u64 changed = 0;
+ u8 link_id = params->link_id;
int err;
lockdep_assert_wiphy(local->hw.wiphy);
@@ -3874,19 +4038,26 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
if (!list_empty(&local->roc_list) || local->scanning)
return -EBUSY;
- if (sdata->wdev.cac_started)
+ if (sdata->wdev.links[link_id].cac_started)
return -EBUSY;
- if (cfg80211_chandef_identical(&params->chandef,
- &sdata->vif.bss_conf.chandef))
+ if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS))
+ return -EINVAL;
+
+ link_data = wiphy_dereference(wiphy, sdata->link[link_id]);
+ if (!link_data)
+ return -ENOLINK;
+
+ link_conf = link_data->conf;
+
+ if (chanreq.oper.punctured && !link_conf->eht_support)
return -EINVAL;
/* don't allow another channel switch if one is already active. */
- if (sdata->vif.bss_conf.csa_active)
+ if (link_conf->csa_active)
return -EBUSY;
- conf = rcu_dereference_protected(sdata->vif.bss_conf.chanctx_conf,
- lockdep_is_held(&local->hw.wiphy->mtx));
+ conf = wiphy_dereference(wiphy, link_conf->chanctx_conf);
if (!conf) {
err = -EBUSY;
goto out;
@@ -3903,60 +4074,52 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
ch_switch.timestamp = 0;
ch_switch.device_timestamp = 0;
ch_switch.block_tx = params->block_tx;
- ch_switch.chandef = params->chandef;
+ ch_switch.chandef = chanreq.oper;
ch_switch.count = params->count;
err = drv_pre_channel_switch(sdata, &ch_switch);
if (err)
goto out;
- err = ieee80211_link_reserve_chanctx(&sdata->deflink, &params->chandef,
+ err = ieee80211_link_reserve_chanctx(link_data, &chanreq,
chanctx->mode,
params->radar_required);
if (err)
goto out;
/* if reservation is invalid then this will fail */
- err = ieee80211_check_combinations(sdata, NULL, chanctx->mode, 0);
+ err = ieee80211_check_combinations(sdata, NULL, chanctx->mode, 0, -1);
if (err) {
- ieee80211_link_unreserve_chanctx(&sdata->deflink);
+ ieee80211_link_unreserve_chanctx(link_data);
goto out;
}
/* if there is a color change in progress, abort it */
- if (sdata->vif.bss_conf.color_change_active)
- ieee80211_color_change_abort(sdata);
+ if (link_conf->color_change_active)
+ ieee80211_color_change_abort(link_data);
- err = ieee80211_set_csa_beacon(sdata, params, &changed);
+ err = ieee80211_set_csa_beacon(link_data, params, &changed);
if (err) {
- ieee80211_link_unreserve_chanctx(&sdata->deflink);
+ ieee80211_link_unreserve_chanctx(link_data);
goto out;
}
- if (params->punct_bitmap && !sdata->vif.bss_conf.eht_support)
- goto out;
-
- sdata->deflink.csa_chandef = params->chandef;
- sdata->deflink.csa_block_tx = params->block_tx;
- sdata->vif.bss_conf.csa_active = true;
- sdata->vif.bss_conf.csa_punct_bitmap = params->punct_bitmap;
+ link_data->csa.chanreq = chanreq;
+ link_conf->csa_active = true;
- if (sdata->deflink.csa_block_tx)
- ieee80211_stop_vif_queues(local, sdata,
- IEEE80211_QUEUE_STOP_REASON_CSA);
+ if (params->block_tx)
+ ieee80211_vif_block_queues_csa(sdata);
cfg80211_ch_switch_started_notify(sdata->dev,
- &sdata->deflink.csa_chandef, 0,
- params->count, params->block_tx,
- sdata->vif.bss_conf.csa_punct_bitmap);
+ &link_data->csa.chanreq.oper, link_id,
+ params->count, params->block_tx);
if (changed) {
- ieee80211_link_info_change_notify(sdata, &sdata->deflink,
- changed);
- drv_channel_switch_beacon(sdata, &params->chandef);
+ ieee80211_link_info_change_notify(sdata, link_data, changed);
+ drv_channel_switch_beacon(sdata, &link_data->csa.chanreq.oper);
} else {
/* if the beacon didn't change, we can finalize immediately */
- ieee80211_csa_finalize(&sdata->deflink);
+ ieee80211_csa_finalize(link_data);
}
out:
@@ -4206,15 +4369,13 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
chanctx_conf = rcu_dereference(link->conf->chanctx_conf);
if (chanctx_conf) {
- *chandef = link->conf->chandef;
+ *chandef = link->conf->chanreq.oper;
ret = 0;
- } else if (local->open_count > 0 &&
+ } else if (!ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR) &&
+ local->open_count > 0 &&
local->open_count == local->monitors &&
sdata->vif.type == NL80211_IFTYPE_MONITOR) {
- if (local->use_chanctx)
- *chandef = local->monitor_chandef;
- else
- *chandef = local->_oper_chandef;
+ *chandef = local->monitor_chanreq.oper;
ret = 0;
}
out:
@@ -4262,12 +4423,13 @@ static int ieee80211_set_ap_chanwidth(struct wiphy *wiphy,
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_link_data *link;
+ struct ieee80211_chan_req chanreq = { .oper = *chandef };
int ret;
u64 changed = 0;
link = sdata_dereference(sdata->link[link_id], sdata);
- ret = ieee80211_link_change_bandwidth(link, chandef, &changed);
+ ret = ieee80211_link_change_chanreq(link, &chanreq, &changed);
if (ret == 0)
ieee80211_link_info_change_notify(sdata, link, changed);
@@ -4589,20 +4751,22 @@ static int ieee80211_set_sar_specs(struct wiphy *wiphy,
}
static int
-ieee80211_set_after_color_change_beacon(struct ieee80211_sub_if_data *sdata,
+ieee80211_set_after_color_change_beacon(struct ieee80211_link_data *link,
u64 *changed)
{
+ struct ieee80211_sub_if_data *sdata = link->sdata;
+
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP: {
int ret;
- if (!sdata->deflink.u.ap.next_beacon)
+ if (!link->u.ap.next_beacon)
return -EINVAL;
- ret = ieee80211_assign_beacon(sdata, &sdata->deflink,
- sdata->deflink.u.ap.next_beacon,
+ ret = ieee80211_assign_beacon(sdata, link,
+ link->u.ap.next_beacon,
NULL, NULL, changed);
- ieee80211_free_next_beacon(&sdata->deflink);
+ ieee80211_free_next_beacon(link);
if (ret < 0)
return ret;
@@ -4618,18 +4782,19 @@ ieee80211_set_after_color_change_beacon(struct ieee80211_sub_if_data *sdata,
}
static int
-ieee80211_set_color_change_beacon(struct ieee80211_sub_if_data *sdata,
+ieee80211_set_color_change_beacon(struct ieee80211_link_data *link,
struct cfg80211_color_change_settings *params,
u64 *changed)
{
+ struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_color_change_settings color_change = {};
int err;
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP:
- sdata->deflink.u.ap.next_beacon =
+ link->u.ap.next_beacon =
cfg80211_beacon_dup(&params->beacon_next);
- if (!sdata->deflink.u.ap.next_beacon)
+ if (!link->u.ap.next_beacon)
return -ENOMEM;
if (params->count <= 1)
@@ -4641,11 +4806,11 @@ ieee80211_set_color_change_beacon(struct ieee80211_sub_if_data *sdata,
params->counter_offset_presp;
color_change.count = params->count;
- err = ieee80211_assign_beacon(sdata, &sdata->deflink,
+ err = ieee80211_assign_beacon(sdata, link,
&params->beacon_color_change,
NULL, &color_change, changed);
if (err < 0) {
- ieee80211_free_next_beacon(&sdata->deflink);
+ ieee80211_free_next_beacon(link);
return err;
}
break;
@@ -4657,16 +4822,18 @@ ieee80211_set_color_change_beacon(struct ieee80211_sub_if_data *sdata,
}
static void
-ieee80211_color_change_bss_config_notify(struct ieee80211_sub_if_data *sdata,
+ieee80211_color_change_bss_config_notify(struct ieee80211_link_data *link,
u8 color, int enable, u64 changed)
{
+ struct ieee80211_sub_if_data *sdata = link->sdata;
+
lockdep_assert_wiphy(sdata->local->hw.wiphy);
- sdata->vif.bss_conf.he_bss_color.color = color;
- sdata->vif.bss_conf.he_bss_color.enabled = enable;
+ link->conf->he_bss_color.color = color;
+ link->conf->he_bss_color.enabled = enable;
changed |= BSS_CHANGED_HE_BSS_COLOR;
- ieee80211_link_info_change_notify(sdata, &sdata->deflink, changed);
+ ieee80211_link_info_change_notify(sdata, link, changed);
if (!sdata->vif.bss_conf.nontransmitted && sdata->vif.mbssid_tx_vif) {
struct ieee80211_sub_if_data *child;
@@ -4683,26 +4850,27 @@ ieee80211_color_change_bss_config_notify(struct ieee80211_sub_if_data *sdata,
}
}
-static int ieee80211_color_change_finalize(struct ieee80211_sub_if_data *sdata)
+static int ieee80211_color_change_finalize(struct ieee80211_link_data *link)
{
+ struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
u64 changed = 0;
int err;
lockdep_assert_wiphy(local->hw.wiphy);
- sdata->vif.bss_conf.color_change_active = false;
+ link->conf->color_change_active = false;
- err = ieee80211_set_after_color_change_beacon(sdata, &changed);
+ err = ieee80211_set_after_color_change_beacon(link, &changed);
if (err) {
- cfg80211_color_change_aborted_notify(sdata->dev);
+ cfg80211_color_change_aborted_notify(sdata->dev, link->link_id);
return err;
}
- ieee80211_color_change_bss_config_notify(sdata,
- sdata->vif.bss_conf.color_change_color,
+ ieee80211_color_change_bss_config_notify(link,
+ link->conf->color_change_color,
1, changed);
- cfg80211_color_change_notify(sdata->dev);
+ cfg80211_color_change_notify(sdata->dev, link->link_id);
return 0;
}
@@ -4710,63 +4878,98 @@ static int ieee80211_color_change_finalize(struct ieee80211_sub_if_data *sdata)
void ieee80211_color_change_finalize_work(struct wiphy *wiphy,
struct wiphy_work *work)
{
- struct ieee80211_sub_if_data *sdata =
- container_of(work, struct ieee80211_sub_if_data,
- deflink.color_change_finalize_work);
+ struct ieee80211_link_data *link =
+ container_of(work, struct ieee80211_link_data,
+ color_change_finalize_work);
+ struct ieee80211_sub_if_data *sdata = link->sdata;
+ struct ieee80211_bss_conf *link_conf = link->conf;
struct ieee80211_local *local = sdata->local;
lockdep_assert_wiphy(local->hw.wiphy);
/* AP might have been stopped while waiting for the lock. */
- if (!sdata->vif.bss_conf.color_change_active)
+ if (!link_conf->color_change_active)
return;
if (!ieee80211_sdata_running(sdata))
return;
- ieee80211_color_change_finalize(sdata);
+ ieee80211_color_change_finalize(link);
}
-void ieee80211_color_collision_detection_work(struct work_struct *work)
+void ieee80211_color_collision_detection_work(struct wiphy *wiphy,
+ struct wiphy_work *work)
{
- struct delayed_work *delayed_work = to_delayed_work(work);
struct ieee80211_link_data *link =
- container_of(delayed_work, struct ieee80211_link_data,
- color_collision_detect_work);
+ container_of(work, struct ieee80211_link_data,
+ color_collision_detect_work.work);
struct ieee80211_sub_if_data *sdata = link->sdata;
- cfg80211_obss_color_collision_notify(sdata->dev, link->color_bitmap);
+ cfg80211_obss_color_collision_notify(sdata->dev, link->color_bitmap,
+ link->link_id);
}
-void ieee80211_color_change_finish(struct ieee80211_vif *vif)
+void ieee80211_color_change_finish(struct ieee80211_vif *vif, u8 link_id)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_link_data *link;
+
+ if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS))
+ return;
+
+ rcu_read_lock();
+
+ link = rcu_dereference(sdata->link[link_id]);
+ if (WARN_ON(!link)) {
+ rcu_read_unlock();
+ return;
+ }
wiphy_work_queue(sdata->local->hw.wiphy,
- &sdata->deflink.color_change_finalize_work);
+ &link->color_change_finalize_work);
+
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(ieee80211_color_change_finish);
void
ieee80211_obss_color_collision_notify(struct ieee80211_vif *vif,
- u64 color_bitmap, gfp_t gfp)
+ u64 color_bitmap, u8 link_id)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
- struct ieee80211_link_data *link = &sdata->deflink;
+ struct ieee80211_link_data *link;
- if (sdata->vif.bss_conf.color_change_active || sdata->vif.bss_conf.csa_active)
+ if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS))
return;
- if (delayed_work_pending(&link->color_collision_detect_work))
+ rcu_read_lock();
+
+ link = rcu_dereference(sdata->link[link_id]);
+ if (WARN_ON(!link)) {
+ rcu_read_unlock();
return;
+ }
+
+ if (link->conf->color_change_active || link->conf->csa_active) {
+ rcu_read_unlock();
+ return;
+ }
+
+ if (wiphy_delayed_work_pending(sdata->local->hw.wiphy,
+ &link->color_collision_detect_work)) {
+ rcu_read_unlock();
+ return;
+ }
link->color_bitmap = color_bitmap;
/* queue the color collision detection event every 500 ms in order to
* avoid sending too much netlink messages to userspace.
*/
- ieee80211_queue_delayed_work(&sdata->local->hw,
- &link->color_collision_detect_work,
- msecs_to_jiffies(500));
+ wiphy_delayed_work_queue(sdata->local->hw.wiphy,
+ &link->color_collision_detect_work,
+ msecs_to_jiffies(500));
+
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(ieee80211_obss_color_collision_notify);
@@ -4776,36 +4979,48 @@ ieee80211_color_change(struct wiphy *wiphy, struct net_device *dev,
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_link_data *link;
+ u8 link_id = params->link_id;
u64 changed = 0;
int err;
lockdep_assert_wiphy(local->hw.wiphy);
- if (sdata->vif.bss_conf.nontransmitted)
+ if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS))
+ return -EINVAL;
+
+ link = wiphy_dereference(wiphy, sdata->link[link_id]);
+ if (!link)
+ return -ENOLINK;
+
+ link_conf = link->conf;
+
+ if (link_conf->nontransmitted)
return -EINVAL;
/* don't allow another color change if one is already active or if csa
* is active
*/
- if (sdata->vif.bss_conf.color_change_active || sdata->vif.bss_conf.csa_active) {
+ if (link_conf->color_change_active || link_conf->csa_active) {
err = -EBUSY;
goto out;
}
- err = ieee80211_set_color_change_beacon(sdata, params, &changed);
+ err = ieee80211_set_color_change_beacon(link, params, &changed);
if (err)
goto out;
- sdata->vif.bss_conf.color_change_active = true;
- sdata->vif.bss_conf.color_change_color = params->color;
+ link_conf->color_change_active = true;
+ link_conf->color_change_color = params->color;
- cfg80211_color_change_started_notify(sdata->dev, params->count);
+ cfg80211_color_change_started_notify(sdata->dev, params->count, link_id);
if (changed)
- ieee80211_color_change_bss_config_notify(sdata, 0, 0, changed);
+ ieee80211_color_change_bss_config_notify(link, 0, 0, changed);
else
/* if the beacon didn't change, we can finalize immediately */
- ieee80211_color_change_finalize(sdata);
+ ieee80211_color_change_finalize(link);
out:
@@ -4843,19 +5058,29 @@ static void ieee80211_del_intf_link(struct wiphy *wiphy,
unsigned int link_id)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+ u16 new_links = wdev->valid_links & ~BIT(link_id);
lockdep_assert_wiphy(sdata->local->hw.wiphy);
- ieee80211_vif_set_links(sdata, wdev->valid_links, 0);
+ /* During the link teardown process, certain functions require the
+ * link_id to remain in the valid_links bitmap. Therefore, instead
+ * of removing the link_id from the bitmap, pass a masked value to
+ * simulate as if link_id does not exist anymore.
+ */
+ ieee80211_vif_set_links(sdata, new_links, 0);
}
-static int sta_add_link_station(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- struct link_station_parameters *params)
+static int
+ieee80211_add_link_station(struct wiphy *wiphy, struct net_device *dev,
+ struct link_station_parameters *params)
{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = wiphy_priv(wiphy);
struct sta_info *sta;
int ret;
+ lockdep_assert_wiphy(local->hw.wiphy);
+
sta = sta_info_get_bss(sdata, params->mld_mac);
if (!sta)
return -ENOENT;
@@ -4870,34 +5095,33 @@ static int sta_add_link_station(struct ieee80211_local *local,
if (ret)
return ret;
- ret = sta_link_apply_parameters(local, sta, true, params);
+ ret = sta_link_apply_parameters(local, sta, STA_LINK_MODE_NEW, params);
if (ret) {
ieee80211_sta_free_link(sta, params->link_id);
return ret;
}
+ if (test_sta_flag(sta, WLAN_STA_ASSOC)) {
+ struct link_sta_info *link_sta;
+
+ link_sta = sdata_dereference(sta->link[params->link_id], sdata);
+ rate_control_rate_init(link_sta);
+ }
+
/* ieee80211_sta_activate_link frees the link upon failure */
return ieee80211_sta_activate_link(sta, params->link_id);
}
static int
-ieee80211_add_link_station(struct wiphy *wiphy, struct net_device *dev,
+ieee80211_mod_link_station(struct wiphy *wiphy, struct net_device *dev,
struct link_station_parameters *params)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = wiphy_priv(wiphy);
-
- lockdep_assert_wiphy(sdata->local->hw.wiphy);
-
- return sta_add_link_station(local, sdata, params);
-}
-
-static int sta_mod_link_station(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- struct link_station_parameters *params)
-{
struct sta_info *sta;
+ lockdep_assert_wiphy(local->hw.wiphy);
+
sta = sta_info_get_bss(sdata, params->mld_mac);
if (!sta)
return -ENOENT;
@@ -4905,26 +5129,19 @@ static int sta_mod_link_station(struct ieee80211_local *local,
if (!(sta->sta.valid_links & BIT(params->link_id)))
return -EINVAL;
- return sta_link_apply_parameters(local, sta, false, params);
+ return sta_link_apply_parameters(local, sta, STA_LINK_MODE_LINK_MODIFY,
+ params);
}
static int
-ieee80211_mod_link_station(struct wiphy *wiphy, struct net_device *dev,
- struct link_station_parameters *params)
+ieee80211_del_link_station(struct wiphy *wiphy, struct net_device *dev,
+ struct link_station_del_parameters *params)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- struct ieee80211_local *local = wiphy_priv(wiphy);
+ struct sta_info *sta;
lockdep_assert_wiphy(sdata->local->hw.wiphy);
- return sta_mod_link_station(local, sdata, params);
-}
-
-static int sta_del_link_station(struct ieee80211_sub_if_data *sdata,
- struct link_station_del_parameters *params)
-{
- struct sta_info *sta;
-
sta = sta_info_get_bss(sdata, params->mld_mac);
if (!sta)
return -ENOENT;
@@ -4941,17 +5158,6 @@ static int sta_del_link_station(struct ieee80211_sub_if_data *sdata,
return 0;
}
-static int
-ieee80211_del_link_station(struct wiphy *wiphy, struct net_device *dev,
- struct link_station_del_parameters *params)
-{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
- lockdep_assert_wiphy(sdata->local->hw.wiphy);
-
- return sta_del_link_station(sdata, params);
-}
-
static int ieee80211_set_hw_timestamp(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_set_hw_timestamp *hwts)
@@ -4968,6 +5174,29 @@ static int ieee80211_set_hw_timestamp(struct wiphy *wiphy,
return local->ops->set_hw_timestamp(&local->hw, &sdata->vif, hwts);
}
+static int
+ieee80211_set_ttlm(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ttlm_params *params)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ lockdep_assert_wiphy(sdata->local->hw.wiphy);
+
+ return ieee80211_req_neg_ttlm(sdata, params);
+}
+
+static int
+ieee80211_assoc_ml_reconf(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_assoc_link *add_links,
+ u16 rem_links)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ lockdep_assert_wiphy(sdata->local->hw.wiphy);
+
+ return ieee80211_mgd_assoc_ml_reconf(sdata, add_links, rem_links);
+}
+
const struct cfg80211_ops mac80211_config_ops = {
.add_virtual_intf = ieee80211_add_iface,
.del_virtual_intf = ieee80211_del_iface,
@@ -5080,4 +5309,7 @@ const struct cfg80211_ops mac80211_config_ops = {
.mod_link_station = ieee80211_mod_link_station,
.del_link_station = ieee80211_del_link_station,
.set_hw_timestamp = ieee80211_set_hw_timestamp,
+ .set_ttlm = ieee80211_set_ttlm,
+ .get_radio_mask = ieee80211_get_radio_mask,
+ .assoc_ml_reconf = ieee80211_assoc_ml_reconf,
};
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index ef4c2cebc080..dc28f2b0957a 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* mac80211 - channel management
- * Copyright 2020 - 2022 Intel Corporation
+ * Copyright 2020 - 2024 Intel Corporation
*/
#include <linux/nl80211.h>
@@ -47,24 +47,29 @@ int ieee80211_chanctx_refcount(struct ieee80211_local *local,
ieee80211_chanctx_num_reserved(local, ctx);
}
-static int ieee80211_num_chanctx(struct ieee80211_local *local)
+static int ieee80211_num_chanctx(struct ieee80211_local *local, int radio_idx)
{
struct ieee80211_chanctx *ctx;
int num = 0;
lockdep_assert_wiphy(local->hw.wiphy);
- list_for_each_entry(ctx, &local->chanctx_list, list)
+ list_for_each_entry(ctx, &local->chanctx_list, list) {
+ if (radio_idx >= 0 && ctx->conf.radio_idx != radio_idx)
+ continue;
num++;
+ }
return num;
}
-static bool ieee80211_can_create_new_chanctx(struct ieee80211_local *local)
+static bool ieee80211_can_create_new_chanctx(struct ieee80211_local *local,
+ int radio_idx)
{
lockdep_assert_wiphy(local->hw.wiphy);
- return ieee80211_num_chanctx(local) < ieee80211_max_num_channels(local);
+ return ieee80211_num_chanctx(local, radio_idx) <
+ ieee80211_max_num_channels(local, radio_idx);
}
static struct ieee80211_chanctx *
@@ -81,87 +86,122 @@ ieee80211_link_get_chanctx(struct ieee80211_link_data *link)
return container_of(conf, struct ieee80211_chanctx, conf);
}
-static const struct cfg80211_chan_def *
-ieee80211_chanctx_reserved_chandef(struct ieee80211_local *local,
+bool ieee80211_chanreq_identical(const struct ieee80211_chan_req *a,
+ const struct ieee80211_chan_req *b)
+{
+ if (!cfg80211_chandef_identical(&a->oper, &b->oper))
+ return false;
+ if (!a->ap.chan && !b->ap.chan)
+ return true;
+ return cfg80211_chandef_identical(&a->ap, &b->ap);
+}
+
+static const struct ieee80211_chan_req *
+ieee80211_chanreq_compatible(const struct ieee80211_chan_req *a,
+ const struct ieee80211_chan_req *b,
+ struct ieee80211_chan_req *tmp)
+{
+ const struct cfg80211_chan_def *compat;
+
+ if (a->ap.chan && b->ap.chan &&
+ !cfg80211_chandef_identical(&a->ap, &b->ap))
+ return NULL;
+
+ compat = cfg80211_chandef_compatible(&a->oper, &b->oper);
+ if (!compat)
+ return NULL;
+
+ /* Note: later code assumes this always fills & returns tmp if compat */
+ tmp->oper = *compat;
+ tmp->ap = a->ap.chan ? a->ap : b->ap;
+ return tmp;
+}
+
+static const struct ieee80211_chan_req *
+ieee80211_chanctx_compatible(struct ieee80211_chanctx *ctx,
+ const struct ieee80211_chan_req *req,
+ struct ieee80211_chan_req *tmp)
+{
+ const struct ieee80211_chan_req *ret;
+ struct ieee80211_chan_req tmp2;
+
+ *tmp = (struct ieee80211_chan_req){
+ .oper = ctx->conf.def,
+ .ap = ctx->conf.ap,
+ };
+
+ ret = ieee80211_chanreq_compatible(tmp, req, &tmp2);
+ if (!ret)
+ return NULL;
+ *tmp = *ret;
+ return tmp;
+}
+
+static const struct ieee80211_chan_req *
+ieee80211_chanctx_reserved_chanreq(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
- const struct cfg80211_chan_def *compat)
+ const struct ieee80211_chan_req *req,
+ struct ieee80211_chan_req *tmp)
{
struct ieee80211_link_data *link;
lockdep_assert_wiphy(local->hw.wiphy);
- list_for_each_entry(link, &ctx->reserved_links,
- reserved_chanctx_list) {
- if (!compat)
- compat = &link->reserved_chandef;
+ if (WARN_ON(!req))
+ return NULL;
- compat = cfg80211_chandef_compatible(&link->reserved_chandef,
- compat);
- if (!compat)
+ list_for_each_entry(link, &ctx->reserved_links, reserved_chanctx_list) {
+ req = ieee80211_chanreq_compatible(&link->reserved, req, tmp);
+ if (!req)
break;
}
- return compat;
+ return req;
}
-static const struct cfg80211_chan_def *
+static const struct ieee80211_chan_req *
ieee80211_chanctx_non_reserved_chandef(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
- const struct cfg80211_chan_def *compat)
+ const struct ieee80211_chan_req *compat,
+ struct ieee80211_chan_req *tmp)
{
struct ieee80211_link_data *link;
+ const struct ieee80211_chan_req *comp_def = compat;
lockdep_assert_wiphy(local->hw.wiphy);
- list_for_each_entry(link, &ctx->assigned_links,
- assigned_chanctx_list) {
+ list_for_each_entry(link, &ctx->assigned_links, assigned_chanctx_list) {
struct ieee80211_bss_conf *link_conf = link->conf;
if (link->reserved_chanctx)
continue;
- if (!compat)
- compat = &link_conf->chandef;
-
- compat = cfg80211_chandef_compatible(
- &link_conf->chandef, compat);
- if (!compat)
+ comp_def = ieee80211_chanreq_compatible(&link_conf->chanreq,
+ comp_def, tmp);
+ if (!comp_def)
break;
}
- return compat;
-}
-
-static const struct cfg80211_chan_def *
-ieee80211_chanctx_combined_chandef(struct ieee80211_local *local,
- struct ieee80211_chanctx *ctx,
- const struct cfg80211_chan_def *compat)
-{
- lockdep_assert_wiphy(local->hw.wiphy);
-
- compat = ieee80211_chanctx_reserved_chandef(local, ctx, compat);
- if (!compat)
- return NULL;
-
- compat = ieee80211_chanctx_non_reserved_chandef(local, ctx, compat);
- if (!compat)
- return NULL;
-
- return compat;
+ return comp_def;
}
static bool
-ieee80211_chanctx_can_reserve_chandef(struct ieee80211_local *local,
- struct ieee80211_chanctx *ctx,
- const struct cfg80211_chan_def *def)
+ieee80211_chanctx_can_reserve(struct ieee80211_local *local,
+ struct ieee80211_chanctx *ctx,
+ const struct ieee80211_chan_req *req)
{
+ struct ieee80211_chan_req tmp;
+
lockdep_assert_wiphy(local->hw.wiphy);
- if (ieee80211_chanctx_combined_chandef(local, ctx, def))
- return true;
+ if (!ieee80211_chanctx_reserved_chanreq(local, ctx, req, &tmp))
+ return false;
+
+ if (!ieee80211_chanctx_non_reserved_chandef(local, ctx, req, &tmp))
+ return false;
if (!list_empty(&ctx->reserved_links) &&
- ieee80211_chanctx_reserved_chandef(local, ctx, def))
+ ieee80211_chanctx_reserved_chanreq(local, ctx, req, &tmp))
return true;
return false;
@@ -169,7 +209,7 @@ ieee80211_chanctx_can_reserve_chandef(struct ieee80211_local *local,
static struct ieee80211_chanctx *
ieee80211_find_reservation_chanctx(struct ieee80211_local *local,
- const struct cfg80211_chan_def *chandef,
+ const struct ieee80211_chan_req *chanreq,
enum ieee80211_chanctx_mode mode)
{
struct ieee80211_chanctx *ctx;
@@ -186,8 +226,7 @@ ieee80211_find_reservation_chanctx(struct ieee80211_local *local,
if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE)
continue;
- if (!ieee80211_chanctx_can_reserve_chandef(local, ctx,
- chandef))
+ if (!ieee80211_chanctx_can_reserve(local, ctx, chanreq))
continue;
return ctx;
@@ -202,12 +241,19 @@ static enum nl80211_chan_width ieee80211_get_sta_bw(struct sta_info *sta,
enum ieee80211_sta_rx_bandwidth width;
struct link_sta_info *link_sta;
- link_sta = rcu_dereference(sta->link[link_id]);
+ link_sta = wiphy_dereference(sta->local->hw.wiphy, sta->link[link_id]);
/* no effect if this STA has no presence on this link */
if (!link_sta)
return NL80211_CHAN_WIDTH_20_NOHT;
+ /*
+ * We assume that TX/RX might be asymmetric (so e.g. VHT operating
+ * mode notification changes what a STA wants to receive, but not
+ * necessarily what it will transmit to us), and therefore use the
+ * capabilities here. Calling it RX bandwidth capability is a bit
+ * wrong though, since capabilities are in fact symmetric.
+ */
width = ieee80211_sta_cap_rx_bw(link_sta);
switch (width) {
@@ -240,13 +286,16 @@ static enum nl80211_chan_width ieee80211_get_sta_bw(struct sta_info *sta,
}
static enum nl80211_chan_width
-ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata,
- unsigned int link_id)
+ieee80211_get_max_required_bw(struct ieee80211_link_data *link)
{
+ struct ieee80211_sub_if_data *sdata = link->sdata;
+ unsigned int link_id = link->link_id;
enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT;
struct sta_info *sta;
- list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
+ lockdep_assert_wiphy(sdata->local->hw.wiphy);
+
+ list_for_each_entry(sta, &sdata->local->sta_list, list) {
if (sdata != sta->sdata &&
!(sta->sdata->bss && sta->sdata->bss == sdata->bss))
continue;
@@ -258,53 +307,65 @@ ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata,
}
static enum nl80211_chan_width
-ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_chanctx *ctx,
- struct ieee80211_link_data *rsvd_for)
+ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
+ struct ieee80211_chanctx *ctx,
+ struct ieee80211_link_data *rsvd_for,
+ bool check_reserved)
{
+ struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_link_data *link;
enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT;
- struct ieee80211_vif *vif = &sdata->vif;
- int link_id;
- rcu_read_lock();
- for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
- enum nl80211_chan_width width = NL80211_CHAN_WIDTH_20_NOHT;
- struct ieee80211_link_data *link =
- rcu_dereference(sdata->link[link_id]);
+ if (WARN_ON(check_reserved && rsvd_for))
+ return ctx->conf.def.width;
- if (!link)
- continue;
+ for_each_sdata_link(local, link) {
+ enum nl80211_chan_width width = NL80211_CHAN_WIDTH_20_NOHT;
- if (link != rsvd_for &&
- rcu_access_pointer(link->conf->chanctx_conf) != &ctx->conf)
+ if (check_reserved) {
+ if (link->reserved_chanctx != ctx)
+ continue;
+ } else if (link != rsvd_for &&
+ rcu_access_pointer(link->conf->chanctx_conf) != &ctx->conf)
continue;
- switch (vif->type) {
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_AP_VLAN:
- width = ieee80211_get_max_required_bw(sdata, link_id);
- break;
+ switch (link->sdata->vif.type) {
case NL80211_IFTYPE_STATION:
+ if (!link->sdata->vif.cfg.assoc) {
+ /*
+ * The AP's sta->bandwidth may not yet be set
+ * at this point (pre-association), so simply
+ * take the width from the chandef. We cannot
+ * have TDLS peers yet (only after association).
+ */
+ width = link->conf->chanreq.oper.width;
+ break;
+ }
/*
- * The ap's sta->bandwidth is not set yet at this
- * point, so take the width from the chandef, but
- * account also for TDLS peers
+ * otherwise just use min_def like in AP, depending on what
+ * we currently think the AP STA (and possibly TDLS peers)
+ * require(s)
*/
- width = max(link->conf->chandef.width,
- ieee80211_get_max_required_bw(sdata, link_id));
+ fallthrough;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ width = ieee80211_get_max_required_bw(link);
break;
case NL80211_IFTYPE_P2P_DEVICE:
case NL80211_IFTYPE_NAN:
continue;
+ case NL80211_IFTYPE_MONITOR:
+ WARN_ON_ONCE(!ieee80211_hw_check(&local->hw,
+ NO_VIRTUAL_MONITOR));
+ fallthrough;
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_OCB:
- width = link->conf->chandef.width;
+ width = link->conf->chanreq.oper.width;
break;
case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_UNSPECIFIED:
case NUM_NL80211_IFTYPES:
- case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO:
WARN_ON_ONCE(1);
@@ -312,40 +373,13 @@ ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata,
max_bw = max(max_bw, width);
}
- rcu_read_unlock();
-
- return max_bw;
-}
-
-static enum nl80211_chan_width
-ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
- struct ieee80211_chanctx *ctx,
- struct ieee80211_link_data *rsvd_for)
-{
- struct ieee80211_sub_if_data *sdata;
- enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT;
-
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- enum nl80211_chan_width width;
-
- if (!ieee80211_sdata_running(sdata))
- continue;
-
- width = ieee80211_get_chanctx_vif_max_required_bw(sdata, ctx,
- rsvd_for);
-
- max_bw = max(max_bw, width);
- }
/* use the configured bandwidth in case of monitor interface */
- sdata = rcu_dereference(local->monitor_sdata);
+ sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata);
if (sdata &&
rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf) == &ctx->conf)
max_bw = max(max_bw, ctx->conf.def.width);
- rcu_read_unlock();
-
return max_bw;
}
@@ -357,7 +391,8 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
static u32
_ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
- struct ieee80211_link_data *rsvd_for)
+ struct ieee80211_link_data *rsvd_for,
+ bool check_reserved)
{
enum nl80211_chan_width max_bw;
struct cfg80211_chan_def min_def;
@@ -377,12 +412,13 @@ _ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
return 0;
}
- max_bw = ieee80211_get_chanctx_max_required_bw(local, ctx, rsvd_for);
+ max_bw = ieee80211_get_chanctx_max_required_bw(local, ctx, rsvd_for,
+ check_reserved);
/* downgrade chandef up to max_bw */
min_def = ctx->conf.def;
while (min_def.width > max_bw)
- ieee80211_chandef_downgrade(&min_def);
+ ieee80211_chandef_downgrade(&min_def, NULL);
if (cfg80211_chandef_identical(&ctx->conf.min_def, &min_def))
return 0;
@@ -391,15 +427,12 @@ _ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
if (!ctx->driver_present)
return 0;
- return IEEE80211_CHANCTX_CHANGE_MIN_WIDTH;
+ return IEEE80211_CHANCTX_CHANGE_MIN_DEF;
}
-/* calling this function is assuming that station vif is updated to
- * lates changes by calling ieee80211_link_update_chandef
- */
static void ieee80211_chan_bw_change(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
- bool narrowed)
+ bool reserved, bool narrowed)
{
struct sta_info *sta;
struct ieee80211_supported_band *sband =
@@ -416,13 +449,17 @@ static void ieee80211_chan_bw_change(struct ieee80211_local *local,
continue;
for (link_id = 0; link_id < ARRAY_SIZE(sta->sdata->link); link_id++) {
- struct ieee80211_bss_conf *link_conf =
- rcu_dereference(sdata->vif.link_conf[link_id]);
+ struct ieee80211_link_data *link =
+ rcu_dereference(sdata->link[link_id]);
+ struct ieee80211_bss_conf *link_conf;
+ struct cfg80211_chan_def *new_chandef;
struct link_sta_info *link_sta;
- if (!link_conf)
+ if (!link)
continue;
+ link_conf = link->conf;
+
if (rcu_access_pointer(link_conf->chanctx_conf) != &ctx->conf)
continue;
@@ -430,19 +467,25 @@ static void ieee80211_chan_bw_change(struct ieee80211_local *local,
if (!link_sta)
continue;
- new_sta_bw = ieee80211_sta_cur_vht_bw(link_sta);
+ if (reserved)
+ new_chandef = &link->reserved.oper;
+ else
+ new_chandef = &link_conf->chanreq.oper;
+
+ new_sta_bw = _ieee80211_sta_cur_vht_bw(link_sta,
+ new_chandef);
/* nothing change */
if (new_sta_bw == link_sta->pub->bandwidth)
continue;
/* vif changed to narrow BW and narrow BW for station wasn't
- * requested or vise versa */
+ * requested or vice versa */
if ((new_sta_bw < link_sta->pub->bandwidth) == !narrowed)
continue;
link_sta->pub->bandwidth = new_sta_bw;
- rate_control_rate_update(local, sband, sta, link_id,
+ rate_control_rate_update(local, sband, link_sta,
IEEE80211_RC_BW_CHANGED);
}
}
@@ -456,29 +499,36 @@ static void ieee80211_chan_bw_change(struct ieee80211_local *local,
*/
void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
- struct ieee80211_link_data *rsvd_for)
+ struct ieee80211_link_data *rsvd_for,
+ bool check_reserved)
{
- u32 changed = _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for);
+ u32 changed = _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for,
+ check_reserved);
if (!changed)
return;
/* check is BW narrowed */
- ieee80211_chan_bw_change(local, ctx, true);
+ ieee80211_chan_bw_change(local, ctx, false, true);
drv_change_chanctx(local, ctx, changed);
/* check is BW wider */
- ieee80211_chan_bw_change(local, ctx, false);
+ ieee80211_chan_bw_change(local, ctx, false, false);
}
static void _ieee80211_change_chanctx(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
struct ieee80211_chanctx *old_ctx,
- const struct cfg80211_chan_def *chandef,
+ const struct ieee80211_chan_req *chanreq,
struct ieee80211_link_data *rsvd_for)
{
- u32 changed;
+ const struct cfg80211_chan_def *chandef = &chanreq->oper;
+ struct ieee80211_chan_req ctx_req = {
+ .oper = ctx->conf.def,
+ .ap = ctx->conf.ap,
+ };
+ u32 changed = 0;
/* expected to handle only 20/40/80/160/320 channel widths */
switch (chandef->width) {
@@ -498,49 +548,56 @@ static void _ieee80211_change_chanctx(struct ieee80211_local *local,
* due to maybe not returning from it, e.g in case new context was added
* first time with all parameters up to date.
*/
- ieee80211_chan_bw_change(local, old_ctx, true);
+ ieee80211_chan_bw_change(local, old_ctx, false, true);
- if (cfg80211_chandef_identical(&ctx->conf.def, chandef)) {
- ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for);
+ if (ieee80211_chanreq_identical(&ctx_req, chanreq)) {
+ ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for, false);
return;
}
- WARN_ON(!cfg80211_chandef_compatible(&ctx->conf.def, chandef));
+ WARN_ON(ieee80211_chanctx_refcount(local, ctx) > 1 &&
+ !cfg80211_chandef_compatible(&ctx->conf.def, &chanreq->oper));
ieee80211_remove_wbrf(local, &ctx->conf.def);
+ if (!cfg80211_chandef_identical(&ctx->conf.def, &chanreq->oper)) {
+ if (ctx->conf.def.width != chanreq->oper.width)
+ changed |= IEEE80211_CHANCTX_CHANGE_WIDTH;
+ if (ctx->conf.def.punctured != chanreq->oper.punctured)
+ changed |= IEEE80211_CHANCTX_CHANGE_PUNCTURING;
+ }
+ if (!cfg80211_chandef_identical(&ctx->conf.ap, &chanreq->ap))
+ changed |= IEEE80211_CHANCTX_CHANGE_AP;
ctx->conf.def = *chandef;
+ ctx->conf.ap = chanreq->ap;
/* check if min chanctx also changed */
- changed = IEEE80211_CHANCTX_CHANGE_WIDTH |
- _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for);
+ changed |= _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for, false);
ieee80211_add_wbrf(local, &ctx->conf.def);
drv_change_chanctx(local, ctx, changed);
- if (!local->use_chanctx) {
- local->_oper_chandef = *chandef;
- ieee80211_hw_config(local, 0);
- }
-
- /* check is BW wider */
- ieee80211_chan_bw_change(local, old_ctx, false);
+ /* check if BW is wider */
+ ieee80211_chan_bw_change(local, old_ctx, false, false);
}
static void ieee80211_change_chanctx(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
struct ieee80211_chanctx *old_ctx,
- const struct cfg80211_chan_def *chandef)
+ const struct ieee80211_chan_req *chanreq)
{
- _ieee80211_change_chanctx(local, ctx, old_ctx, chandef, NULL);
+ _ieee80211_change_chanctx(local, ctx, old_ctx, chanreq, NULL);
}
+/* Note: if successful, the returned chanctx is reserved for the link */
static struct ieee80211_chanctx *
ieee80211_find_chanctx(struct ieee80211_local *local,
- const struct cfg80211_chan_def *chandef,
+ struct ieee80211_link_data *link,
+ const struct ieee80211_chan_req *chanreq,
enum ieee80211_chanctx_mode mode)
{
+ struct ieee80211_chan_req tmp;
struct ieee80211_chanctx *ctx;
lockdep_assert_wiphy(local->hw.wiphy);
@@ -548,8 +605,11 @@ ieee80211_find_chanctx(struct ieee80211_local *local,
if (mode == IEEE80211_CHANCTX_EXCLUSIVE)
return NULL;
+ if (WARN_ON(link->reserved_chanctx))
+ return NULL;
+
list_for_each_entry(ctx, &local->chanctx_list, list) {
- const struct cfg80211_chan_def *compat;
+ const struct ieee80211_chan_req *compat;
if (ctx->replace_state != IEEE80211_CHANCTX_REPLACE_NONE)
continue;
@@ -557,15 +617,25 @@ ieee80211_find_chanctx(struct ieee80211_local *local,
if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE)
continue;
- compat = cfg80211_chandef_compatible(&ctx->conf.def, chandef);
+ compat = ieee80211_chanctx_compatible(ctx, chanreq, &tmp);
if (!compat)
continue;
- compat = ieee80211_chanctx_reserved_chandef(local, ctx,
- compat);
+ compat = ieee80211_chanctx_reserved_chanreq(local, ctx,
+ compat, &tmp);
if (!compat)
continue;
+ /*
+ * Reserve the chanctx temporarily, as the driver might change
+ * active links during callbacks we make into it below and/or
+ * later during assignment, which could (otherwise) cause the
+ * context to actually be removed.
+ */
+ link->reserved_chanctx = ctx;
+ list_add(&link->reserved_chanctx_list,
+ &ctx->reserved_links);
+
ieee80211_change_chanctx(local, ctx, ctx, compat);
return ctx;
@@ -576,26 +646,14 @@ ieee80211_find_chanctx(struct ieee80211_local *local,
bool ieee80211_is_radar_required(struct ieee80211_local *local)
{
- struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_link_data *link;
lockdep_assert_wiphy(local->hw.wiphy);
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- unsigned int link_id;
-
- for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
- struct ieee80211_link_data *link;
-
- link = rcu_dereference(sdata->link[link_id]);
-
- if (link && link->radar_required) {
- rcu_read_unlock();
- return true;
- }
- }
+ for_each_sdata_link(local, link) {
+ if (link->radar_required)
+ return true;
}
- rcu_read_unlock();
return false;
}
@@ -605,44 +663,26 @@ ieee80211_chanctx_radar_required(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx)
{
struct ieee80211_chanctx_conf *conf = &ctx->conf;
- struct ieee80211_sub_if_data *sdata;
- bool required = false;
+ struct ieee80211_link_data *link;
lockdep_assert_wiphy(local->hw.wiphy);
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- unsigned int link_id;
-
- if (!ieee80211_sdata_running(sdata))
+ for_each_sdata_link(local, link) {
+ if (rcu_access_pointer(link->conf->chanctx_conf) != conf)
continue;
- for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
- struct ieee80211_link_data *link;
-
- link = rcu_dereference(sdata->link[link_id]);
- if (!link)
- continue;
-
- if (rcu_access_pointer(link->conf->chanctx_conf) != conf)
- continue;
- if (!link->radar_required)
- continue;
- required = true;
- break;
- }
-
- if (required)
- break;
+ if (!link->radar_required)
+ continue;
+ return true;
}
- rcu_read_unlock();
- return required;
+ return false;
}
static struct ieee80211_chanctx *
ieee80211_alloc_chanctx(struct ieee80211_local *local,
- const struct cfg80211_chan_def *chandef,
- enum ieee80211_chanctx_mode mode)
+ const struct ieee80211_chan_req *chanreq,
+ enum ieee80211_chanctx_mode mode,
+ int radio_idx)
{
struct ieee80211_chanctx *ctx;
@@ -654,12 +694,15 @@ ieee80211_alloc_chanctx(struct ieee80211_local *local,
INIT_LIST_HEAD(&ctx->assigned_links);
INIT_LIST_HEAD(&ctx->reserved_links);
- ctx->conf.def = *chandef;
+ ctx->conf.def = chanreq->oper;
+ ctx->conf.ap = chanreq->ap;
ctx->conf.rx_chains_static = 1;
ctx->conf.rx_chains_dynamic = 1;
ctx->mode = mode;
ctx->conf.radar_enabled = false;
- _ieee80211_recalc_chanctx_min_def(local, ctx, NULL);
+ ctx->conf.radio_idx = radio_idx;
+ ctx->radar_detected = false;
+ _ieee80211_recalc_chanctx_min_def(local, ctx, NULL, false);
return ctx;
}
@@ -674,23 +717,15 @@ static int ieee80211_add_chanctx(struct ieee80211_local *local,
ieee80211_add_wbrf(local, &ctx->conf.def);
- if (!local->use_chanctx)
- local->hw.conf.radar_enabled = ctx->conf.radar_enabled;
-
/* turn idle off *before* setting channel -- some drivers need that */
changed = ieee80211_idle_off(local);
if (changed)
ieee80211_hw_config(local, changed);
- if (!local->use_chanctx) {
- local->_oper_chandef = ctx->conf.def;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
- } else {
- err = drv_add_chanctx(local, ctx);
- if (err) {
- ieee80211_recalc_idle(local);
- return err;
- }
+ err = drv_add_chanctx(local, ctx);
+ if (err) {
+ ieee80211_recalc_idle(local);
+ return err;
}
return 0;
@@ -698,74 +733,56 @@ static int ieee80211_add_chanctx(struct ieee80211_local *local,
static struct ieee80211_chanctx *
ieee80211_new_chanctx(struct ieee80211_local *local,
- const struct cfg80211_chan_def *chandef,
- enum ieee80211_chanctx_mode mode)
+ const struct ieee80211_chan_req *chanreq,
+ enum ieee80211_chanctx_mode mode,
+ bool assign_on_failure,
+ int radio_idx)
{
struct ieee80211_chanctx *ctx;
int err;
lockdep_assert_wiphy(local->hw.wiphy);
- ctx = ieee80211_alloc_chanctx(local, chandef, mode);
+ ctx = ieee80211_alloc_chanctx(local, chanreq, mode, radio_idx);
if (!ctx)
return ERR_PTR(-ENOMEM);
err = ieee80211_add_chanctx(local, ctx);
- if (err) {
+ if (!assign_on_failure && err) {
kfree(ctx);
return ERR_PTR(err);
}
+ /* We ignored a driver error, see _ieee80211_set_active_links */
+ WARN_ON_ONCE(err && !local->in_reconfig);
list_add_rcu(&ctx->list, &local->chanctx_list);
return ctx;
}
static void ieee80211_del_chanctx(struct ieee80211_local *local,
- struct ieee80211_chanctx *ctx)
+ struct ieee80211_chanctx *ctx,
+ bool skip_idle_recalc)
{
lockdep_assert_wiphy(local->hw.wiphy);
- if (!local->use_chanctx) {
- struct cfg80211_chan_def *chandef = &local->_oper_chandef;
- /* S1G doesn't have 20MHz, so get the correct width for the
- * current channel.
- */
- if (chandef->chan->band == NL80211_BAND_S1GHZ)
- chandef->width =
- ieee80211_s1g_channel_width(chandef->chan);
- else
- chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
- chandef->center_freq1 = chandef->chan->center_freq;
- chandef->freq1_offset = chandef->chan->freq_offset;
- chandef->center_freq2 = 0;
-
- /* NOTE: Disabling radar is only valid here for
- * single channel context. To be sure, check it ...
- */
- WARN_ON(local->hw.conf.radar_enabled &&
- !list_empty(&local->chanctx_list));
-
- local->hw.conf.radar_enabled = false;
-
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
- } else {
- drv_remove_chanctx(local, ctx);
- }
+ drv_remove_chanctx(local, ctx);
- ieee80211_recalc_idle(local);
+ if (!skip_idle_recalc)
+ ieee80211_recalc_idle(local);
ieee80211_remove_wbrf(local, &ctx->conf.def);
}
static void ieee80211_free_chanctx(struct ieee80211_local *local,
- struct ieee80211_chanctx *ctx)
+ struct ieee80211_chanctx *ctx,
+ bool skip_idle_recalc)
{
lockdep_assert_wiphy(local->hw.wiphy);
WARN_ON_ONCE(ieee80211_chanctx_refcount(local, ctx) != 0);
list_del_rcu(&ctx->list);
- ieee80211_del_chanctx(local, ctx);
+ ieee80211_del_chanctx(local, ctx, skip_idle_recalc);
kfree_rcu(ctx, rcu_head);
}
@@ -773,64 +790,64 @@ void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx)
{
struct ieee80211_chanctx_conf *conf = &ctx->conf;
- struct ieee80211_sub_if_data *sdata;
- const struct cfg80211_chan_def *compat = NULL;
+ const struct ieee80211_chan_req *compat = NULL;
+ struct ieee80211_link_data *link;
+ struct ieee80211_chan_req tmp;
struct sta_info *sta;
lockdep_assert_wiphy(local->hw.wiphy);
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- int link_id;
-
- if (!ieee80211_sdata_running(sdata))
- continue;
+ for_each_sdata_link(local, link) {
+ struct ieee80211_bss_conf *link_conf;
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ if (link->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
continue;
- for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
- struct ieee80211_bss_conf *link_conf =
- rcu_dereference(sdata->vif.link_conf[link_id]);
+ link_conf = link->conf;
- if (!link_conf)
- continue;
-
- if (rcu_access_pointer(link_conf->chanctx_conf) != conf)
- continue;
+ if (rcu_access_pointer(link_conf->chanctx_conf) != conf)
+ continue;
- if (!compat)
- compat = &link_conf->chandef;
+ if (!compat)
+ compat = &link_conf->chanreq;
- compat = cfg80211_chandef_compatible(&link_conf->chandef,
- compat);
- if (WARN_ON_ONCE(!compat))
- break;
- }
+ compat = ieee80211_chanreq_compatible(&link_conf->chanreq,
+ compat, &tmp);
+ if (WARN_ON_ONCE(!compat))
+ return;
}
- if (WARN_ON_ONCE(!compat)) {
- rcu_read_unlock();
+ if (WARN_ON_ONCE(!compat))
return;
- }
/* TDLS peers can sometimes affect the chandef width */
- list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ list_for_each_entry(sta, &local->sta_list, list) {
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct ieee80211_chan_req tdls_chanreq = {};
+ int tdls_link_id;
+
if (!sta->uploaded ||
!test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW) ||
!test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
!sta->tdls_chandef.chan)
continue;
- compat = cfg80211_chandef_compatible(&sta->tdls_chandef,
- compat);
+ tdls_link_id = ieee80211_tdls_sta_link_id(sta);
+ link = sdata_dereference(sdata->link[tdls_link_id], sdata);
+ if (!link)
+ continue;
+
+ if (rcu_access_pointer(link->conf->chanctx_conf) != conf)
+ continue;
+
+ tdls_chanreq.oper = sta->tdls_chandef;
+
+ /* note this always fills and returns &tmp if compat */
+ compat = ieee80211_chanreq_compatible(&tdls_chanreq,
+ compat, &tmp);
if (WARN_ON_ONCE(!compat))
- break;
+ return;
}
- rcu_read_unlock();
-
- if (!compat)
- return;
ieee80211_change_chanctx(local, ctx, ctx, compat);
}
@@ -849,22 +866,19 @@ static void ieee80211_recalc_radar_chanctx(struct ieee80211_local *local,
chanctx->conf.radar_enabled = radar_enabled;
- if (!local->use_chanctx) {
- local->hw.conf.radar_enabled = chanctx->conf.radar_enabled;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
- }
-
drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RADAR);
}
static int ieee80211_assign_link_chanctx(struct ieee80211_link_data *link,
- struct ieee80211_chanctx *new_ctx)
+ struct ieee80211_chanctx *new_ctx,
+ bool assign_on_failure)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
struct ieee80211_chanctx_conf *conf;
struct ieee80211_chanctx *curr_ctx = NULL;
- int ret = 0;
+ bool new_idle;
+ int ret;
if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_NAN))
return -EOPNOTSUPP;
@@ -882,37 +896,58 @@ static int ieee80211_assign_link_chanctx(struct ieee80211_link_data *link,
if (new_ctx) {
/* recalc considering the link we'll use it for now */
- ieee80211_recalc_chanctx_min_def(local, new_ctx, link);
+ ieee80211_recalc_chanctx_min_def(local, new_ctx, link, false);
ret = drv_assign_vif_chanctx(local, sdata, link->conf, new_ctx);
- if (ret)
- goto out;
-
- conf = &new_ctx->conf;
- list_add(&link->assigned_chanctx_list,
- &new_ctx->assigned_links);
+ if (assign_on_failure || !ret) {
+ /* Need to continue, see _ieee80211_set_active_links */
+ WARN_ON_ONCE(ret && !local->in_reconfig);
+ ret = 0;
+
+ /* succeeded, so commit it to the data structures */
+ conf = &new_ctx->conf;
+ list_add(&link->assigned_chanctx_list,
+ &new_ctx->assigned_links);
+ }
+ } else {
+ ret = 0;
}
-out:
rcu_assign_pointer(link->conf->chanctx_conf, conf);
- sdata->vif.cfg.idle = !conf;
-
if (curr_ctx && ieee80211_chanctx_num_assigned(local, curr_ctx) > 0) {
ieee80211_recalc_chanctx_chantype(local, curr_ctx);
ieee80211_recalc_smps_chanctx(local, curr_ctx);
ieee80211_recalc_radar_chanctx(local, curr_ctx);
- ieee80211_recalc_chanctx_min_def(local, curr_ctx, NULL);
+ ieee80211_recalc_chanctx_min_def(local, curr_ctx, NULL, false);
}
if (new_ctx && ieee80211_chanctx_num_assigned(local, new_ctx) > 0) {
- ieee80211_recalc_txpower(sdata, false);
- ieee80211_recalc_chanctx_min_def(local, new_ctx, NULL);
+ ieee80211_recalc_txpower(link, false);
+ ieee80211_recalc_chanctx_min_def(local, new_ctx, NULL, false);
+ }
+
+ if (conf) {
+ new_idle = false;
+ } else {
+ struct ieee80211_link_data *tmp;
+
+ new_idle = true;
+ for_each_sdata_link(local, tmp) {
+ if (rcu_access_pointer(tmp->conf->chanctx_conf)) {
+ new_idle = false;
+ break;
+ }
+ }
}
- if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
- sdata->vif.type != NL80211_IFTYPE_MONITOR)
- ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_IDLE);
+ if (new_idle != sdata->vif.cfg.idle) {
+ sdata->vif.cfg.idle = new_idle;
+
+ if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
+ sdata->vif.type != NL80211_IFTYPE_MONITOR)
+ ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_IDLE);
+ }
ieee80211_check_fast_xmit_iface(sdata);
@@ -924,23 +959,23 @@ void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
{
struct ieee80211_sub_if_data *sdata;
u8 rx_chains_static, rx_chains_dynamic;
+ struct ieee80211_link_data *link;
lockdep_assert_wiphy(local->hw.wiphy);
rx_chains_static = 1;
rx_chains_dynamic = 1;
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+ for_each_sdata_link(local, link) {
u8 needed_static, needed_dynamic;
- unsigned int link_id;
- if (!ieee80211_sdata_running(sdata))
- continue;
-
- switch (sdata->vif.type) {
+ switch (link->sdata->vif.type) {
case NL80211_IFTYPE_STATION:
- if (!sdata->u.mgd.associated)
+ if (!link->sdata->u.mgd.associated)
+ continue;
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ if (!ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR))
continue;
break;
case NL80211_IFTYPE_AP:
@@ -952,59 +987,43 @@ void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
continue;
}
- for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
- struct ieee80211_link_data *link;
-
- link = rcu_dereference(sdata->link[link_id]);
-
- if (!link)
- continue;
-
- if (rcu_access_pointer(link->conf->chanctx_conf) != &chanctx->conf)
- continue;
+ if (rcu_access_pointer(link->conf->chanctx_conf) != &chanctx->conf)
+ continue;
- switch (link->smps_mode) {
- default:
- WARN_ONCE(1, "Invalid SMPS mode %d\n",
- link->smps_mode);
- fallthrough;
- case IEEE80211_SMPS_OFF:
- needed_static = link->needed_rx_chains;
- needed_dynamic = link->needed_rx_chains;
- break;
- case IEEE80211_SMPS_DYNAMIC:
- needed_static = 1;
- needed_dynamic = link->needed_rx_chains;
- break;
- case IEEE80211_SMPS_STATIC:
- needed_static = 1;
- needed_dynamic = 1;
- break;
- }
+ if (link->sdata->vif.type == NL80211_IFTYPE_MONITOR) {
+ rx_chains_dynamic = rx_chains_static = local->rx_chains;
+ break;
+ }
- rx_chains_static = max(rx_chains_static, needed_static);
- rx_chains_dynamic = max(rx_chains_dynamic, needed_dynamic);
+ switch (link->smps_mode) {
+ default:
+ WARN_ONCE(1, "Invalid SMPS mode %d\n",
+ link->smps_mode);
+ fallthrough;
+ case IEEE80211_SMPS_OFF:
+ needed_static = link->needed_rx_chains;
+ needed_dynamic = link->needed_rx_chains;
+ break;
+ case IEEE80211_SMPS_DYNAMIC:
+ needed_static = 1;
+ needed_dynamic = link->needed_rx_chains;
+ break;
+ case IEEE80211_SMPS_STATIC:
+ needed_static = 1;
+ needed_dynamic = 1;
+ break;
}
+
+ rx_chains_static = max(rx_chains_static, needed_static);
+ rx_chains_dynamic = max(rx_chains_dynamic, needed_dynamic);
}
/* Disable SMPS for the monitor interface */
- sdata = rcu_dereference(local->monitor_sdata);
+ sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata);
if (sdata &&
rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf) == &chanctx->conf)
rx_chains_dynamic = rx_chains_static = local->rx_chains;
- rcu_read_unlock();
-
- if (!local->use_chanctx) {
- if (rx_chains_static > 1)
- local->smps_mode = IEEE80211_SMPS_OFF;
- else if (rx_chains_dynamic > 1)
- local->smps_mode = IEEE80211_SMPS_DYNAMIC;
- else
- local->smps_mode = IEEE80211_SMPS_STATIC;
- ieee80211_hw_config(local, 0);
- }
-
if (rx_chains_static == chanctx->conf.rx_chains_static &&
rx_chains_dynamic == chanctx->conf.rx_chains_dynamic)
return;
@@ -1043,17 +1062,16 @@ __ieee80211_link_copy_chanctx_to_vlans(struct ieee80211_link_data *link,
if (clear)
conf = NULL;
- rcu_read_lock();
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) {
struct ieee80211_bss_conf *vlan_conf;
- vlan_conf = rcu_dereference(vlan->vif.link_conf[link_id]);
+ vlan_conf = wiphy_dereference(local->hw.wiphy,
+ vlan->vif.link_conf[link_id]);
if (WARN_ON(!vlan_conf))
continue;
rcu_assign_pointer(vlan_conf->chanctx_conf, conf);
}
- rcu_read_unlock();
}
void ieee80211_link_copy_chanctx_to_vlans(struct ieee80211_link_data *link,
@@ -1095,104 +1113,151 @@ int ieee80211_link_unreserve_chanctx(struct ieee80211_link_data *link)
list_del_rcu(&ctx->list);
kfree_rcu(ctx, rcu_head);
} else {
- ieee80211_free_chanctx(sdata->local, ctx);
+ ieee80211_free_chanctx(sdata->local, ctx, false);
}
}
return 0;
}
+static struct ieee80211_chanctx *
+ieee80211_replace_chanctx(struct ieee80211_local *local,
+ const struct ieee80211_chan_req *chanreq,
+ enum ieee80211_chanctx_mode mode,
+ struct ieee80211_chanctx *curr_ctx)
+{
+ struct ieee80211_chanctx *new_ctx, *ctx;
+ struct wiphy *wiphy = local->hw.wiphy;
+ const struct wiphy_radio *radio;
+
+ if (!curr_ctx || (curr_ctx->replace_state ==
+ IEEE80211_CHANCTX_WILL_BE_REPLACED) ||
+ !list_empty(&curr_ctx->reserved_links)) {
+ /*
+ * Another link already requested this context for a
+ * reservation. Find another one hoping all links assigned
+ * to it will also switch soon enough.
+ *
+ * TODO: This needs a little more work as some cases
+ * (more than 2 chanctx capable devices) may fail which could
+ * otherwise succeed provided some channel context juggling was
+ * performed.
+ *
+ * Consider ctx1..3, link1..6, each ctx has 2 links. link1 and
+ * link2 from ctx1 request new different chandefs starting 2
+ * in-place reservations with ctx4 and ctx5 replacing ctx1 and
+ * ctx2 respectively. Next link5 and link6 from ctx3 reserve
+ * ctx4. If link3 and link4 remain on ctx2 as they are then this
+ * fails unless `replace_ctx` from ctx5 is replaced with ctx3.
+ */
+ list_for_each_entry(ctx, &local->chanctx_list, list) {
+ if (ctx->replace_state !=
+ IEEE80211_CHANCTX_REPLACE_NONE)
+ continue;
+
+ if (!list_empty(&ctx->reserved_links))
+ continue;
+
+ if (ctx->conf.radio_idx >= 0) {
+ radio = &wiphy->radio[ctx->conf.radio_idx];
+ if (!cfg80211_radio_chandef_valid(radio, &chanreq->oper))
+ continue;
+ }
+
+ curr_ctx = ctx;
+ break;
+ }
+ }
+
+ /*
+ * If that's true then all available contexts already have reservations
+ * and cannot be used.
+ */
+ if (!curr_ctx || (curr_ctx->replace_state ==
+ IEEE80211_CHANCTX_WILL_BE_REPLACED) ||
+ !list_empty(&curr_ctx->reserved_links))
+ return ERR_PTR(-EBUSY);
+
+ new_ctx = ieee80211_alloc_chanctx(local, chanreq, mode, -1);
+ if (!new_ctx)
+ return ERR_PTR(-ENOMEM);
+
+ new_ctx->replace_ctx = curr_ctx;
+ new_ctx->replace_state = IEEE80211_CHANCTX_REPLACES_OTHER;
+
+ curr_ctx->replace_ctx = new_ctx;
+ curr_ctx->replace_state = IEEE80211_CHANCTX_WILL_BE_REPLACED;
+
+ list_add_rcu(&new_ctx->list, &local->chanctx_list);
+
+ return new_ctx;
+}
+
+static bool
+ieee80211_find_available_radio(struct ieee80211_local *local,
+ const struct ieee80211_chan_req *chanreq,
+ u32 radio_mask, int *radio_idx)
+{
+ struct wiphy *wiphy = local->hw.wiphy;
+ const struct wiphy_radio *radio;
+ int i;
+
+ *radio_idx = -1;
+ if (!wiphy->n_radio)
+ return true;
+
+ for (i = 0; i < wiphy->n_radio; i++) {
+ if (!(radio_mask & BIT(i)))
+ continue;
+
+ radio = &wiphy->radio[i];
+ if (!cfg80211_radio_chandef_valid(radio, &chanreq->oper))
+ continue;
+
+ if (!ieee80211_can_create_new_chanctx(local, i))
+ continue;
+
+ *radio_idx = i;
+ return true;
+ }
+
+ return false;
+}
+
int ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link,
- const struct cfg80211_chan_def *chandef,
+ const struct ieee80211_chan_req *chanreq,
enum ieee80211_chanctx_mode mode,
bool radar_required)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
- struct ieee80211_chanctx *new_ctx, *curr_ctx, *ctx;
+ struct ieee80211_chanctx *new_ctx, *curr_ctx;
+ int radio_idx;
lockdep_assert_wiphy(local->hw.wiphy);
curr_ctx = ieee80211_link_get_chanctx(link);
- if (curr_ctx && local->use_chanctx && !local->ops->switch_vif_chanctx)
+ if (curr_ctx && !local->ops->switch_vif_chanctx)
return -EOPNOTSUPP;
- new_ctx = ieee80211_find_reservation_chanctx(local, chandef, mode);
+ new_ctx = ieee80211_find_reservation_chanctx(local, chanreq, mode);
if (!new_ctx) {
- if (ieee80211_can_create_new_chanctx(local)) {
- new_ctx = ieee80211_new_chanctx(local, chandef, mode);
- if (IS_ERR(new_ctx))
- return PTR_ERR(new_ctx);
- } else {
- if (!curr_ctx ||
- (curr_ctx->replace_state ==
- IEEE80211_CHANCTX_WILL_BE_REPLACED) ||
- !list_empty(&curr_ctx->reserved_links)) {
- /*
- * Another link already requested this context
- * for a reservation. Find another one hoping
- * all links assigned to it will also switch
- * soon enough.
- *
- * TODO: This needs a little more work as some
- * cases (more than 2 chanctx capable devices)
- * may fail which could otherwise succeed
- * provided some channel context juggling was
- * performed.
- *
- * Consider ctx1..3, link1..6, each ctx has 2
- * links. link1 and link2 from ctx1 request new
- * different chandefs starting 2 in-place
- * reserations with ctx4 and ctx5 replacing
- * ctx1 and ctx2 respectively. Next link5 and
- * link6 from ctx3 reserve ctx4. If link3 and
- * link4 remain on ctx2 as they are then this
- * fails unless `replace_ctx` from ctx5 is
- * replaced with ctx3.
- */
- list_for_each_entry(ctx, &local->chanctx_list,
- list) {
- if (ctx->replace_state !=
- IEEE80211_CHANCTX_REPLACE_NONE)
- continue;
-
- if (!list_empty(&ctx->reserved_links))
- continue;
-
- curr_ctx = ctx;
- break;
- }
- }
-
- /*
- * If that's true then all available contexts already
- * have reservations and cannot be used.
- */
- if (!curr_ctx ||
- (curr_ctx->replace_state ==
- IEEE80211_CHANCTX_WILL_BE_REPLACED) ||
- !list_empty(&curr_ctx->reserved_links))
- return -EBUSY;
-
- new_ctx = ieee80211_alloc_chanctx(local, chandef, mode);
- if (!new_ctx)
- return -ENOMEM;
-
- new_ctx->replace_ctx = curr_ctx;
- new_ctx->replace_state =
- IEEE80211_CHANCTX_REPLACES_OTHER;
-
- curr_ctx->replace_ctx = new_ctx;
- curr_ctx->replace_state =
- IEEE80211_CHANCTX_WILL_BE_REPLACED;
-
- list_add_rcu(&new_ctx->list, &local->chanctx_list);
- }
+ if (ieee80211_can_create_new_chanctx(local, -1) &&
+ ieee80211_find_available_radio(local, chanreq,
+ sdata->wdev.radio_mask,
+ &radio_idx))
+ new_ctx = ieee80211_new_chanctx(local, chanreq, mode,
+ false, radio_idx);
+ else
+ new_ctx = ieee80211_replace_chanctx(local, chanreq,
+ mode, curr_ctx);
+ if (IS_ERR(new_ctx))
+ return PTR_ERR(new_ctx);
}
list_add(&link->reserved_chanctx_list, &new_ctx->reserved_links);
link->reserved_chanctx = new_ctx;
- link->reserved_chandef = *chandef;
+ link->reserved = *chanreq;
link->reserved_radar_required = radar_required;
link->reserved_ready = false;
@@ -1210,11 +1275,11 @@ ieee80211_link_chanctx_reservation_complete(struct ieee80211_link_data *link)
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_OCB:
wiphy_work_queue(sdata->local->hw.wiphy,
- &link->csa_finalize_work);
+ &link->csa.finalize_work);
break;
case NL80211_IFTYPE_STATION:
wiphy_delayed_work_queue(sdata->local->hw.wiphy,
- &link->u.mgd.chswitch_work, 0);
+ &link->u.mgd.csa.switch_work, 0);
break;
case NL80211_IFTYPE_UNSPECIFIED:
case NL80211_IFTYPE_AP_VLAN:
@@ -1231,29 +1296,28 @@ ieee80211_link_chanctx_reservation_complete(struct ieee80211_link_data *link)
}
static void
-ieee80211_link_update_chandef(struct ieee80211_link_data *link,
- const struct cfg80211_chan_def *chandef)
+ieee80211_link_update_chanreq(struct ieee80211_link_data *link,
+ const struct ieee80211_chan_req *chanreq)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
unsigned int link_id = link->link_id;
struct ieee80211_sub_if_data *vlan;
- link->conf->chandef = *chandef;
+ link->conf->chanreq = *chanreq;
if (sdata->vif.type != NL80211_IFTYPE_AP)
return;
- rcu_read_lock();
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) {
struct ieee80211_bss_conf *vlan_conf;
- vlan_conf = rcu_dereference(vlan->vif.link_conf[link_id]);
+ vlan_conf = wiphy_dereference(sdata->local->hw.wiphy,
+ vlan->vif.link_conf[link_id]);
if (WARN_ON(!vlan_conf))
continue;
- vlan_conf->chandef = *chandef;
+ vlan_conf->chanreq = *chanreq;
}
- rcu_read_unlock();
}
static int
@@ -1264,7 +1328,8 @@ ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link)
struct ieee80211_local *local = sdata->local;
struct ieee80211_vif_chanctx_switch vif_chsw[1] = {};
struct ieee80211_chanctx *old_ctx, *new_ctx;
- const struct cfg80211_chan_def *chandef;
+ const struct ieee80211_chan_req *chanreq;
+ struct ieee80211_chan_req tmp;
u64 changed = 0;
int err;
@@ -1286,17 +1351,18 @@ ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link)
IEEE80211_CHANCTX_REPLACES_OTHER))
return -EINVAL;
- chandef = ieee80211_chanctx_non_reserved_chandef(local, new_ctx,
- &link->reserved_chandef);
- if (WARN_ON(!chandef))
+ chanreq = ieee80211_chanctx_non_reserved_chandef(local, new_ctx,
+ &link->reserved,
+ &tmp);
+ if (WARN_ON(!chanreq))
return -EINVAL;
- if (link_conf->chandef.width != link->reserved_chandef.width)
+ if (link_conf->chanreq.oper.width != link->reserved.oper.width)
changed = BSS_CHANGED_BANDWIDTH;
- ieee80211_link_update_chandef(link, &link->reserved_chandef);
+ ieee80211_link_update_chanreq(link, &link->reserved);
- _ieee80211_change_chanctx(local, new_ctx, old_ctx, chandef, link);
+ _ieee80211_change_chanctx(local, new_ctx, old_ctx, chanreq, link);
vif_chsw[0].vif = &sdata->vif;
vif_chsw[0].old_ctx = &old_ctx->conf;
@@ -1310,7 +1376,7 @@ ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link)
CHANCTX_SWMODE_REASSIGN_VIF);
if (err) {
if (ieee80211_chanctx_refcount(local, new_ctx) == 0)
- ieee80211_free_chanctx(local, new_ctx);
+ ieee80211_free_chanctx(local, new_ctx, false);
goto out;
}
@@ -1324,9 +1390,9 @@ ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link)
ieee80211_check_fast_xmit_iface(sdata);
if (ieee80211_chanctx_refcount(local, old_ctx) == 0)
- ieee80211_free_chanctx(local, old_ctx);
+ ieee80211_free_chanctx(local, old_ctx, false);
- ieee80211_recalc_chanctx_min_def(local, new_ctx, NULL);
+ ieee80211_recalc_chanctx_min_def(local, new_ctx, NULL, false);
ieee80211_recalc_smps_chanctx(local, new_ctx);
ieee80211_recalc_radar_chanctx(local, new_ctx);
@@ -1344,7 +1410,8 @@ ieee80211_link_use_reserved_assign(struct ieee80211_link_data *link)
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
struct ieee80211_chanctx *old_ctx, *new_ctx;
- const struct cfg80211_chan_def *chandef;
+ const struct ieee80211_chan_req *chanreq;
+ struct ieee80211_chan_req tmp;
int err;
old_ctx = ieee80211_link_get_chanctx(link);
@@ -1363,20 +1430,21 @@ ieee80211_link_use_reserved_assign(struct ieee80211_link_data *link)
IEEE80211_CHANCTX_REPLACES_OTHER))
return -EINVAL;
- chandef = ieee80211_chanctx_non_reserved_chandef(local, new_ctx,
- &link->reserved_chandef);
- if (WARN_ON(!chandef))
+ chanreq = ieee80211_chanctx_non_reserved_chandef(local, new_ctx,
+ &link->reserved,
+ &tmp);
+ if (WARN_ON(!chanreq))
return -EINVAL;
- ieee80211_change_chanctx(local, new_ctx, new_ctx, chandef);
+ ieee80211_change_chanctx(local, new_ctx, new_ctx, chanreq);
list_del(&link->reserved_chanctx_list);
link->reserved_chanctx = NULL;
- err = ieee80211_assign_link_chanctx(link, new_ctx);
+ err = ieee80211_assign_link_chanctx(link, new_ctx, false);
if (err) {
if (ieee80211_chanctx_refcount(local, new_ctx) == 0)
- ieee80211_free_chanctx(local, new_ctx);
+ ieee80211_free_chanctx(local, new_ctx, false);
goto out;
}
@@ -1412,24 +1480,6 @@ ieee80211_link_has_in_place_reservation(struct ieee80211_link_data *link)
return true;
}
-static int ieee80211_chsw_switch_hwconf(struct ieee80211_local *local,
- struct ieee80211_chanctx *new_ctx)
-{
- const struct cfg80211_chan_def *chandef;
-
- lockdep_assert_wiphy(local->hw.wiphy);
-
- chandef = ieee80211_chanctx_reserved_chandef(local, new_ctx, NULL);
- if (WARN_ON(!chandef))
- return -EINVAL;
-
- local->hw.conf.radar_enabled = new_ctx->conf.radar_enabled;
- local->_oper_chandef = *chandef;
- ieee80211_hw_config(local, 0);
-
- return 0;
-}
-
static int ieee80211_chsw_switch_vifs(struct ieee80211_local *local,
int n_vifs)
{
@@ -1491,7 +1541,7 @@ static int ieee80211_chsw_switch_ctxs(struct ieee80211_local *local)
if (!list_empty(&ctx->replace_ctx->assigned_links))
continue;
- ieee80211_del_chanctx(local, ctx->replace_ctx);
+ ieee80211_del_chanctx(local, ctx->replace_ctx, false);
err = ieee80211_add_chanctx(local, ctx);
if (err)
goto err;
@@ -1508,7 +1558,7 @@ err:
if (!list_empty(&ctx->replace_ctx->assigned_links))
continue;
- ieee80211_del_chanctx(local, ctx);
+ ieee80211_del_chanctx(local, ctx, false);
WARN_ON(ieee80211_add_chanctx(local, ctx->replace_ctx));
}
@@ -1518,7 +1568,6 @@ err:
static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
{
struct ieee80211_chanctx *ctx, *ctx_tmp, *old_ctx;
- struct ieee80211_chanctx *new_ctx = NULL;
int err, n_assigned, n_reserved, n_ready;
int n_ctx = 0, n_vifs_switch = 0, n_vifs_assign = 0, n_vifs_ctxless = 0;
@@ -1551,9 +1600,6 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
goto err;
}
- if (!local->use_chanctx)
- new_ctx = ctx;
-
n_ctx++;
n_assigned = 0;
@@ -1607,32 +1653,49 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
if (WARN_ON(n_ctx == 0) ||
WARN_ON(n_vifs_switch == 0 &&
n_vifs_assign == 0 &&
- n_vifs_ctxless == 0) ||
- WARN_ON(n_ctx > 1 && !local->use_chanctx) ||
- WARN_ON(!new_ctx && !local->use_chanctx)) {
+ n_vifs_ctxless == 0)) {
err = -EINVAL;
goto err;
}
+ /* update station rate control and min width before switch */
+ list_for_each_entry(ctx, &local->chanctx_list, list) {
+ struct ieee80211_link_data *link;
+
+ if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER)
+ continue;
+
+ if (WARN_ON(!ctx->replace_ctx)) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ list_for_each_entry(link, &ctx->reserved_links,
+ reserved_chanctx_list) {
+ if (!ieee80211_link_has_in_place_reservation(link))
+ continue;
+
+ ieee80211_chan_bw_change(local,
+ ieee80211_link_get_chanctx(link),
+ true, true);
+ }
+
+ ieee80211_recalc_chanctx_min_def(local, ctx, NULL, true);
+ }
+
/*
* All necessary vifs are ready. Perform the switch now depending on
* reservations and driver capabilities.
*/
- if (local->use_chanctx) {
- if (n_vifs_switch > 0) {
- err = ieee80211_chsw_switch_vifs(local, n_vifs_switch);
- if (err)
- goto err;
- }
+ if (n_vifs_switch > 0) {
+ err = ieee80211_chsw_switch_vifs(local, n_vifs_switch);
+ if (err)
+ goto err;
+ }
- if (n_vifs_assign > 0 || n_vifs_ctxless > 0) {
- err = ieee80211_chsw_switch_ctxs(local);
- if (err)
- goto err;
- }
- } else {
- err = ieee80211_chsw_switch_hwconf(local, new_ctx);
+ if (n_vifs_assign > 0 || n_vifs_ctxless > 0) {
+ err = ieee80211_chsw_switch_ctxs(local);
if (err)
goto err;
}
@@ -1672,22 +1735,22 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
link->radar_required = link->reserved_radar_required;
- if (link_conf->chandef.width != link->reserved_chandef.width)
+ if (link_conf->chanreq.oper.width != link->reserved.oper.width)
changed = BSS_CHANGED_BANDWIDTH;
- ieee80211_link_update_chandef(link, &link->reserved_chandef);
+ ieee80211_link_update_chanreq(link, &link->reserved);
if (changed)
ieee80211_link_info_change_notify(sdata,
link,
changed);
- ieee80211_recalc_txpower(sdata, false);
+ ieee80211_recalc_txpower(link, false);
}
ieee80211_recalc_chanctx_chantype(local, ctx);
ieee80211_recalc_smps_chanctx(local, ctx);
ieee80211_recalc_radar_chanctx(local, ctx);
- ieee80211_recalc_chanctx_min_def(local, ctx, NULL);
+ ieee80211_recalc_chanctx_min_def(local, ctx, NULL, false);
list_for_each_entry_safe(link, link_tmp, &ctx->reserved_links,
reserved_chanctx_list) {
@@ -1700,6 +1763,7 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
link->reserved_chanctx = NULL;
ieee80211_link_chanctx_reservation_complete(link);
+ ieee80211_chan_bw_change(local, ctx, false, false);
}
/*
@@ -1772,7 +1836,8 @@ err:
return err;
}
-static void __ieee80211_link_release_channel(struct ieee80211_link_data *link)
+void __ieee80211_link_release_channel(struct ieee80211_link_data *link,
+ bool skip_idle_recalc)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_bss_conf *link_conf = link->conf;
@@ -1798,9 +1863,9 @@ static void __ieee80211_link_release_channel(struct ieee80211_link_data *link)
ieee80211_link_unreserve_chanctx(link);
}
- ieee80211_assign_link_chanctx(link, NULL);
+ ieee80211_assign_link_chanctx(link, NULL, false);
if (ieee80211_chanctx_refcount(local, ctx) == 0)
- ieee80211_free_chanctx(local, ctx);
+ ieee80211_free_chanctx(local, ctx, skip_idle_recalc);
link->radar_required = false;
@@ -1809,56 +1874,74 @@ static void __ieee80211_link_release_channel(struct ieee80211_link_data *link)
ieee80211_vif_use_reserved_switch(local);
}
-int ieee80211_link_use_channel(struct ieee80211_link_data *link,
- const struct cfg80211_chan_def *chandef,
- enum ieee80211_chanctx_mode mode)
+int _ieee80211_link_use_channel(struct ieee80211_link_data *link,
+ const struct ieee80211_chan_req *chanreq,
+ enum ieee80211_chanctx_mode mode,
+ bool assign_on_failure)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
struct ieee80211_chanctx *ctx;
u8 radar_detect_width = 0;
+ bool reserved = false;
+ int radio_idx;
int ret;
lockdep_assert_wiphy(local->hw.wiphy);
- if (sdata->vif.active_links &&
- !(sdata->vif.active_links & BIT(link->link_id))) {
- ieee80211_link_update_chandef(link, chandef);
+ if (!ieee80211_vif_link_active(&sdata->vif, link->link_id)) {
+ ieee80211_link_update_chanreq(link, chanreq);
return 0;
}
ret = cfg80211_chandef_dfs_required(local->hw.wiphy,
- chandef,
+ &chanreq->oper,
sdata->wdev.iftype);
if (ret < 0)
goto out;
if (ret > 0)
- radar_detect_width = BIT(chandef->width);
+ radar_detect_width = BIT(chanreq->oper.width);
link->radar_required = ret;
- ret = ieee80211_check_combinations(sdata, chandef, mode,
- radar_detect_width);
+ ret = ieee80211_check_combinations(sdata, &chanreq->oper, mode,
+ radar_detect_width, -1);
if (ret < 0)
goto out;
- __ieee80211_link_release_channel(link);
-
- ctx = ieee80211_find_chanctx(local, chandef, mode);
- if (!ctx)
- ctx = ieee80211_new_chanctx(local, chandef, mode);
+ __ieee80211_link_release_channel(link, false);
+
+ ctx = ieee80211_find_chanctx(local, link, chanreq, mode);
+ /* Note: context is now reserved */
+ if (ctx)
+ reserved = true;
+ else if (!ieee80211_find_available_radio(local, chanreq,
+ sdata->wdev.radio_mask,
+ &radio_idx))
+ ctx = ERR_PTR(-EBUSY);
+ else
+ ctx = ieee80211_new_chanctx(local, chanreq, mode,
+ assign_on_failure, radio_idx);
if (IS_ERR(ctx)) {
ret = PTR_ERR(ctx);
goto out;
}
- ieee80211_link_update_chandef(link, chandef);
+ ieee80211_link_update_chanreq(link, chanreq);
+
+ ret = ieee80211_assign_link_chanctx(link, ctx, assign_on_failure);
+
+ if (reserved) {
+ /* remove reservation */
+ WARN_ON(link->reserved_chanctx != ctx);
+ link->reserved_chanctx = NULL;
+ list_del(&link->reserved_chanctx_list);
+ }
- ret = ieee80211_assign_link_chanctx(link, ctx);
if (ret) {
/* if assign fails refcount stays the same */
if (ieee80211_chanctx_refcount(local, ctx) == 0)
- ieee80211_free_chanctx(local, ctx);
+ ieee80211_free_chanctx(local, ctx, false);
goto out;
}
@@ -1932,28 +2015,79 @@ int ieee80211_link_use_reserved_context(struct ieee80211_link_data *link)
return 0;
}
-int ieee80211_link_change_bandwidth(struct ieee80211_link_data *link,
- const struct cfg80211_chan_def *chandef,
- u64 *changed)
+/*
+ * This is similar to ieee80211_chanctx_compatible(), but rechecks
+ * against all the links actually using it (except the one that's
+ * passed, since that one is changing).
+ * This is done in order to allow changes to the AP's bandwidth for
+ * wider bandwidth OFDMA purposes, which wouldn't be treated as
+ * compatible by ieee80211_chanctx_recheck() but is OK if the link
+ * requesting the update is the only one using it.
+ */
+static const struct ieee80211_chan_req *
+ieee80211_chanctx_recheck(struct ieee80211_local *local,
+ struct ieee80211_link_data *skip_link,
+ struct ieee80211_chanctx *ctx,
+ const struct ieee80211_chan_req *req,
+ struct ieee80211_chan_req *tmp)
+{
+ const struct ieee80211_chan_req *ret = req;
+ struct ieee80211_link_data *link;
+
+ lockdep_assert_wiphy(local->hw.wiphy);
+
+ for_each_sdata_link(local, link) {
+ if (link == skip_link)
+ continue;
+
+ if (rcu_access_pointer(link->conf->chanctx_conf) == &ctx->conf) {
+ ret = ieee80211_chanreq_compatible(ret,
+ &link->conf->chanreq,
+ tmp);
+ if (!ret)
+ return NULL;
+ }
+
+ if (link->reserved_chanctx == ctx) {
+ ret = ieee80211_chanreq_compatible(ret,
+ &link->reserved,
+ tmp);
+ if (!ret)
+ return NULL;
+ }
+ }
+
+ *tmp = *ret;
+ return tmp;
+}
+
+int ieee80211_link_change_chanreq(struct ieee80211_link_data *link,
+ const struct ieee80211_chan_req *chanreq,
+ u64 *changed)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_bss_conf *link_conf = link->conf;
struct ieee80211_local *local = sdata->local;
struct ieee80211_chanctx_conf *conf;
struct ieee80211_chanctx *ctx;
- const struct cfg80211_chan_def *compat;
+ const struct ieee80211_chan_req *compat;
+ struct ieee80211_chan_req tmp;
lockdep_assert_wiphy(local->hw.wiphy);
- if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
+ if (!cfg80211_chandef_usable(sdata->local->hw.wiphy,
+ &chanreq->oper,
IEEE80211_CHAN_DISABLED))
return -EINVAL;
- if (cfg80211_chandef_identical(chandef, &link_conf->chandef))
+ /* for non-HT 20 MHz the rest doesn't matter */
+ if (chanreq->oper.width == NL80211_CHAN_WIDTH_20_NOHT &&
+ cfg80211_chandef_identical(&chanreq->oper, &link_conf->chanreq.oper))
return 0;
- if (chandef->width == NL80211_CHAN_WIDTH_20_NOHT ||
- link_conf->chandef.width == NL80211_CHAN_WIDTH_20_NOHT)
+ /* but you cannot switch to/from it */
+ if (chanreq->oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ link_conf->chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT)
return -EINVAL;
conf = rcu_dereference_protected(link_conf->chanctx_conf,
@@ -1963,13 +2097,14 @@ int ieee80211_link_change_bandwidth(struct ieee80211_link_data *link,
ctx = container_of(conf, struct ieee80211_chanctx, conf);
- compat = cfg80211_chandef_compatible(&conf->def, chandef);
+ compat = ieee80211_chanctx_recheck(local, link, ctx, chanreq, &tmp);
if (!compat)
return -EINVAL;
switch (ctx->replace_state) {
case IEEE80211_CHANCTX_REPLACE_NONE:
- if (!ieee80211_chanctx_reserved_chandef(local, ctx, compat))
+ if (!ieee80211_chanctx_reserved_chanreq(local, ctx, compat,
+ &tmp))
return -EBUSY;
break;
case IEEE80211_CHANCTX_WILL_BE_REPLACED:
@@ -1984,7 +2119,7 @@ int ieee80211_link_change_bandwidth(struct ieee80211_link_data *link,
break;
}
- ieee80211_link_update_chandef(link, chandef);
+ ieee80211_link_update_chanreq(link, chanreq);
ieee80211_recalc_chanctx_chantype(local, ctx);
@@ -1999,7 +2134,7 @@ void ieee80211_link_release_channel(struct ieee80211_link_data *link)
lockdep_assert_wiphy(sdata->local->hw.wiphy);
if (rcu_access_pointer(link->conf->chanctx_conf))
- __ieee80211_link_release_channel(link);
+ __ieee80211_link_release_channel(link, false);
}
void ieee80211_link_vlan_copy_chanctx(struct ieee80211_link_data *link)
@@ -2019,12 +2154,11 @@ void ieee80211_link_vlan_copy_chanctx(struct ieee80211_link_data *link)
ap = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap);
- rcu_read_lock();
- ap_conf = rcu_dereference(ap->vif.link_conf[link_id]);
- conf = rcu_dereference_protected(ap_conf->chanctx_conf,
- lockdep_is_held(&local->hw.wiphy->mtx));
+ ap_conf = wiphy_dereference(local->hw.wiphy,
+ ap->vif.link_conf[link_id]);
+ conf = wiphy_dereference(local->hw.wiphy,
+ ap_conf->chanctx_conf);
rcu_assign_pointer(link_conf->chanctx_conf, conf);
- rcu_read_unlock();
}
void ieee80211_iter_chan_contexts_atomic(
diff --git a/net/mac80211/debug.h b/net/mac80211/debug.h
index d49894df2351..5b81998cb0c9 100644
--- a/net/mac80211/debug.h
+++ b/net/mac80211/debug.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Portions
- * Copyright (C) 2022 - 2023 Intel Corporation
+ * Copyright (C) 2022 - 2024 Intel Corporation
*/
#ifndef __MAC80211_DEBUG_H
#define __MAC80211_DEBUG_H
@@ -152,16 +152,25 @@ do { \
else \
_sdata_err((link)->sdata, fmt, ##__VA_ARGS__); \
} while (0)
-#define link_dbg(link, fmt, ...) \
+#define link_id_info(sdata, link_id, fmt, ...) \
do { \
- if (ieee80211_vif_is_mld(&(link)->sdata->vif)) \
- _sdata_dbg(1, (link)->sdata, "[link %d] " fmt, \
- (link)->link_id, \
- ##__VA_ARGS__); \
+ if (ieee80211_vif_is_mld(&sdata->vif)) \
+ _sdata_info(sdata, "[link %d] " fmt, link_id, \
+ ##__VA_ARGS__); \
else \
- _sdata_dbg(1, (link)->sdata, fmt, \
- ##__VA_ARGS__); \
+ _sdata_info(sdata, fmt, ##__VA_ARGS__); \
} while (0)
+#define _link_id_dbg(print, sdata, link_id, fmt, ...) \
+ do { \
+ if (ieee80211_vif_is_mld(&(sdata)->vif)) \
+ _sdata_dbg(print, sdata, "[link %d] " fmt, \
+ link_id, ##__VA_ARGS__); \
+ else \
+ _sdata_dbg(print, sdata, fmt, ##__VA_ARGS__); \
+ } while (0)
+#define link_dbg(link, fmt, ...) \
+ _link_id_dbg(1, (link)->sdata, (link)->link_id, \
+ fmt, ##__VA_ARGS__)
#define ht_dbg(sdata, fmt, ...) \
_sdata_dbg(MAC80211_HT_DEBUG, \
@@ -226,6 +235,9 @@ do { \
#define mlme_dbg(sdata, fmt, ...) \
_sdata_dbg(MAC80211_MLME_DEBUG, \
sdata, fmt, ##__VA_ARGS__)
+#define mlme_link_id_dbg(sdata, link_id, fmt, ...) \
+ _link_id_dbg(MAC80211_MLME_DEBUG, sdata, link_id, \
+ fmt, ##__VA_ARGS__)
#define mlme_dbg_ratelimited(sdata, fmt, ...) \
_sdata_dbg(MAC80211_MLME_DEBUG && net_ratelimit(), \
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 74be49191e70..bf0a2902d93c 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -4,7 +4,7 @@
*
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright (C) 2018 - 2019, 2021-2023 Intel Corporation
+ * Copyright (C) 2018 - 2019, 2021-2024 Intel Corporation
*/
#include <linux/debugfs.h>
@@ -42,9 +42,8 @@ static ssize_t name## _read(struct file *file, char __user *userbuf, \
}
#define DEBUGFS_READONLY_FILE_OPS(name) \
-static const struct file_operations name## _ops = { \
+static const struct debugfs_short_fops name## _ops = { \
.read = name## _read, \
- .open = simple_open, \
.llseek = generic_file_llseek, \
};
@@ -142,10 +141,9 @@ static ssize_t aqm_write(struct file *file,
return -EINVAL;
}
-static const struct file_operations aqm_ops = {
+static const struct debugfs_short_fops aqm_ops = {
.write = aqm_write,
.read = aqm_read,
- .open = simple_open,
.llseek = default_llseek,
};
@@ -194,10 +192,9 @@ static ssize_t airtime_flags_write(struct file *file,
return count;
}
-static const struct file_operations airtime_flags_ops = {
+static const struct debugfs_short_fops airtime_flags_ops = {
.write = airtime_flags_write,
.read = airtime_flags_read,
- .open = simple_open,
.llseek = default_llseek,
};
@@ -225,9 +222,8 @@ static ssize_t aql_pending_read(struct file *file,
buf, len);
}
-static const struct file_operations aql_pending_ops = {
+static const struct debugfs_short_fops aql_pending_ops = {
.read = aql_pending_read,
- .open = simple_open,
.llseek = default_llseek,
};
@@ -288,7 +284,8 @@ static ssize_t aql_txq_limit_write(struct file *file,
q_limit_low_old = local->aql_txq_limit_low[ac];
q_limit_high_old = local->aql_txq_limit_high[ac];
- wiphy_lock(local->hw.wiphy);
+ guard(wiphy)(local->hw.wiphy);
+
local->aql_txq_limit_low[ac] = q_limit_low;
local->aql_txq_limit_high[ac] = q_limit_high;
@@ -300,15 +297,13 @@ static ssize_t aql_txq_limit_write(struct file *file,
sta->airtime[ac].aql_limit_high = q_limit_high;
}
}
- wiphy_unlock(local->hw.wiphy);
return count;
}
-static const struct file_operations aql_txq_limit_ops = {
+static const struct debugfs_short_fops aql_txq_limit_ops = {
.write = aql_txq_limit_write,
.read = aql_txq_limit_read,
- .open = simple_open,
.llseek = default_llseek,
};
@@ -355,10 +350,9 @@ static ssize_t aql_enable_write(struct file *file, const char __user *user_buf,
return count;
}
-static const struct file_operations aql_enable_ops = {
+static const struct debugfs_short_fops aql_enable_ops = {
.write = aql_enable_write,
.read = aql_enable_read,
- .open = simple_open,
.llseek = default_llseek,
};
@@ -406,10 +400,9 @@ static ssize_t force_tx_status_write(struct file *file,
return count;
}
-static const struct file_operations force_tx_status_ops = {
+static const struct debugfs_short_fops force_tx_status_ops = {
.write = force_tx_status_write,
.read = force_tx_status_read,
- .open = simple_open,
.llseek = default_llseek,
};
@@ -434,9 +427,8 @@ static ssize_t reset_write(struct file *file, const char __user *user_buf,
return count;
}
-static const struct file_operations reset_ops = {
+static const struct debugfs_short_fops reset_ops = {
.write = reset_write,
- .open = simple_open,
.llseek = noop_llseek,
};
#endif
@@ -456,6 +448,7 @@ static const char *hw_flag_names[] = {
FLAG(SUPPORTS_DYNAMIC_PS),
FLAG(MFP_CAPABLE),
FLAG(WANT_MONITOR_VIF),
+ FLAG(NO_VIRTUAL_MONITOR),
FLAG(NO_AUTO_VIF),
FLAG(SW_CRYPTO_CONTROL),
FLAG(SUPPORT_FAST_XMIT),
@@ -483,7 +476,6 @@ static const char *hw_flag_names[] = {
FLAG(REPORTS_LOW_ACK),
FLAG(SUPPORTS_TX_FRAG),
FLAG(SUPPORTS_TDLS_BUFFER_STA),
- FLAG(DEAUTH_NEED_MGD_TX_PREP),
FLAG(DOESNT_SUPPORT_QOS_NDP),
FLAG(BUFF_MMPDU_TXQ),
FLAG(SUPPORTS_VHT_EXT_NSS_BW),
@@ -498,6 +490,8 @@ static const char *hw_flag_names[] = {
FLAG(DETECTS_COLOR_COLLISION),
FLAG(MLO_MCAST_MULTI_LINK_TX),
FLAG(DISALLOW_PUNCTURING),
+ FLAG(DISALLOW_PUNCTURING_5GHZ),
+ FLAG(HANDLES_QUIET_CSA),
#undef FLAG
};
@@ -622,9 +616,8 @@ static ssize_t stats_ ##name## _read(struct file *file, \
print_devstats_##name); \
} \
\
-static const struct file_operations stats_ ##name## _ops = { \
+static const struct debugfs_short_fops stats_ ##name## _ops = { \
.read = stats_ ##name## _read, \
- .open = simple_open, \
.llseek = generic_file_llseek, \
};
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 7e54da508765..117f58af5ff9 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -26,17 +26,15 @@ static ssize_t key_##name##_read(struct file *file, \
#define KEY_READ_X(name) KEY_READ(name, name, "0x%x\n")
#define KEY_OPS(name) \
-static const struct file_operations key_ ##name## _ops = { \
+static const struct debugfs_short_fops key_ ##name## _ops = { \
.read = key_##name##_read, \
- .open = simple_open, \
.llseek = generic_file_llseek, \
}
#define KEY_OPS_W(name) \
-static const struct file_operations key_ ##name## _ops = { \
+static const struct debugfs_short_fops key_ ##name## _ops = { \
.read = key_##name##_read, \
.write = key_##name##_write, \
- .open = simple_open, \
.llseek = generic_file_llseek, \
}
@@ -49,9 +47,8 @@ static const struct file_operations key_ ##name## _ops = { \
#define KEY_CONF_READ_D(name) KEY_CONF_READ(name, "%d\n")
#define KEY_CONF_OPS(name) \
-static const struct file_operations key_ ##name## _ops = { \
+static const struct debugfs_short_fops key_ ##name## _ops = { \
.read = key_conf_##name##_read, \
- .open = simple_open, \
.llseek = generic_file_llseek, \
}
@@ -405,25 +402,6 @@ void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata)
}
}
-void ieee80211_debugfs_key_add_mgmt_default(struct ieee80211_sub_if_data *sdata)
-{
- char buf[50];
- struct ieee80211_key *key;
-
- if (!sdata->vif.debugfs_dir)
- return;
-
- key = wiphy_dereference(sdata->local->hw.wiphy,
- sdata->deflink.default_mgmt_key);
- if (key) {
- sprintf(buf, "../keys/%d", key->debugfs.cnt);
- sdata->debugfs.default_mgmt_key =
- debugfs_create_symlink("default_mgmt_key",
- sdata->vif.debugfs_dir, buf);
- } else
- ieee80211_debugfs_key_remove_mgmt_default(sdata);
-}
-
void ieee80211_debugfs_key_remove_mgmt_default(struct ieee80211_sub_if_data *sdata)
{
if (!sdata)
@@ -434,27 +412,6 @@ void ieee80211_debugfs_key_remove_mgmt_default(struct ieee80211_sub_if_data *sda
}
void
-ieee80211_debugfs_key_add_beacon_default(struct ieee80211_sub_if_data *sdata)
-{
- char buf[50];
- struct ieee80211_key *key;
-
- if (!sdata->vif.debugfs_dir)
- return;
-
- key = wiphy_dereference(sdata->local->hw.wiphy,
- sdata->deflink.default_beacon_key);
- if (key) {
- sprintf(buf, "../keys/%d", key->debugfs.cnt);
- sdata->debugfs.default_beacon_key =
- debugfs_create_symlink("default_beacon_key",
- sdata->vif.debugfs_dir, buf);
- } else {
- ieee80211_debugfs_key_remove_beacon_default(sdata);
- }
-}
-
-void
ieee80211_debugfs_key_remove_beacon_default(struct ieee80211_sub_if_data *sdata)
{
if (!sdata)
@@ -463,10 +420,3 @@ ieee80211_debugfs_key_remove_beacon_default(struct ieee80211_sub_if_data *sdata)
debugfs_remove(sdata->debugfs.default_beacon_key);
sdata->debugfs.default_beacon_key = NULL;
}
-
-void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key,
- struct sta_info *sta)
-{
- debugfs_remove(key->debugfs.stalink);
- key->debugfs.stalink = NULL;
-}
diff --git a/net/mac80211/debugfs_key.h b/net/mac80211/debugfs_key.h
index af7cf495f8d1..e17a48d5c6cc 100644
--- a/net/mac80211/debugfs_key.h
+++ b/net/mac80211/debugfs_key.h
@@ -6,16 +6,10 @@
void ieee80211_debugfs_key_add(struct ieee80211_key *key);
void ieee80211_debugfs_key_remove(struct ieee80211_key *key);
void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata);
-void ieee80211_debugfs_key_add_mgmt_default(
- struct ieee80211_sub_if_data *sdata);
void ieee80211_debugfs_key_remove_mgmt_default(
struct ieee80211_sub_if_data *sdata);
-void ieee80211_debugfs_key_add_beacon_default(
- struct ieee80211_sub_if_data *sdata);
void ieee80211_debugfs_key_remove_beacon_default(
struct ieee80211_sub_if_data *sdata);
-void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key,
- struct sta_info *sta);
#else
static inline void ieee80211_debugfs_key_add(struct ieee80211_key *key)
{}
@@ -24,21 +18,12 @@ static inline void ieee80211_debugfs_key_remove(struct ieee80211_key *key)
static inline void ieee80211_debugfs_key_update_default(
struct ieee80211_sub_if_data *sdata)
{}
-static inline void ieee80211_debugfs_key_add_mgmt_default(
- struct ieee80211_sub_if_data *sdata)
-{}
static inline void ieee80211_debugfs_key_remove_mgmt_default(
struct ieee80211_sub_if_data *sdata)
{}
-static inline void ieee80211_debugfs_key_add_beacon_default(
- struct ieee80211_sub_if_data *sdata)
-{}
static inline void ieee80211_debugfs_key_remove_beacon_default(
struct ieee80211_sub_if_data *sdata)
{}
-static inline void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key,
- struct sta_info *sta)
-{}
#endif
#endif /* __MAC80211_DEBUGFS_KEY_H */
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 68596ef78b15..54c479910d05 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -221,10 +221,9 @@ static ssize_t ieee80211_if_fmt_##name( \
}
#define _IEEE80211_IF_FILE_OPS(name, _read, _write) \
-static const struct file_operations name##_ops = { \
+static const struct debugfs_short_fops name##_ops = { \
.read = (_read), \
.write = (_write), \
- .open = simple_open, \
.llseek = generic_file_llseek, \
}
@@ -728,7 +727,7 @@ static ssize_t ieee80211_if_parse_active_links(struct ieee80211_sub_if_data *sda
{
u16 active_links;
- if (kstrtou16(buf, 0, &active_links))
+ if (kstrtou16(buf, 0, &active_links) || !active_links)
return -EINVAL;
return ieee80211_set_active_links(&sdata->vif, active_links) ?: buflen;
@@ -1026,16 +1025,7 @@ void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata)
void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
{
- struct dentry *dir;
- char buf[10 + IFNAMSIZ];
-
- dir = sdata->vif.debugfs_dir;
-
- if (IS_ERR_OR_NULL(dir))
- return;
-
- sprintf(buf, "netdev:%s", sdata->name);
- debugfs_rename(dir->d_parent, dir, dir->d_parent, buf);
+ debugfs_change_name(sdata->vif.debugfs_dir, "netdev:%s", sdata->name);
}
void ieee80211_debugfs_recreate_netdev(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 1e9389c49a57..a67a9d316008 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -30,17 +30,15 @@ static ssize_t sta_ ##name## _read(struct file *file, \
#define STA_READ_D(name, field) STA_READ(name, field, "%d\n")
#define STA_OPS(name) \
-static const struct file_operations sta_ ##name## _ops = { \
+static const struct debugfs_short_fops sta_ ##name## _ops = { \
.read = sta_##name##_read, \
- .open = simple_open, \
.llseek = generic_file_llseek, \
}
#define STA_OPS_RW(name) \
-static const struct file_operations sta_ ##name## _ops = { \
+static const struct debugfs_short_fops sta_ ##name## _ops = { \
.read = sta_##name##_read, \
.write = sta_##name##_write, \
- .open = simple_open, \
.llseek = generic_file_llseek, \
}
@@ -450,9 +448,8 @@ STA_OPS_RW(agg_status);
/* link sta attributes */
#define LINK_STA_OPS(name) \
-static const struct file_operations link_sta_ ##name## _ops = { \
+static const struct debugfs_short_fops link_sta_ ##name## _ops = { \
.read = link_sta_##name##_read, \
- .open = simple_open, \
.llseek = generic_file_llseek, \
}
diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
index 3b7f70073fc3..35349a7f16cb 100644
--- a/net/mac80211/driver-ops.c
+++ b/net/mac80211/driver-ops.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2015 Intel Deutschland GmbH
- * Copyright (C) 2022-2023 Intel Corporation
+ * Copyright (C) 2022-2024 Intel Corporation
*/
#include <net/mac80211.h>
#include "ieee80211_i.h"
@@ -33,7 +33,7 @@ int drv_start(struct ieee80211_local *local)
return ret;
}
-void drv_stop(struct ieee80211_local *local)
+void drv_stop(struct ieee80211_local *local, bool suspend)
{
might_sleep();
lockdep_assert_wiphy(local->hw.wiphy);
@@ -41,8 +41,8 @@ void drv_stop(struct ieee80211_local *local)
if (WARN_ON(!local->started))
return;
- trace_drv_stop(local);
- local->ops->stop(&local->hw);
+ trace_drv_stop(local, suspend);
+ local->ops->stop(&local->hw, suspend);
trace_drv_return_void(local);
/* sync away all work on the tasklet before clearing started */
@@ -65,6 +65,7 @@ int drv_add_interface(struct ieee80211_local *local,
if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
(sdata->vif.type == NL80211_IFTYPE_MONITOR &&
!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF) &&
+ !ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR) &&
!(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE))))
return -EINVAL;
@@ -115,8 +116,14 @@ void drv_remove_interface(struct ieee80211_local *local,
sdata->flags &= ~IEEE80211_SDATA_IN_DRIVER;
- /* Remove driver debugfs entries */
- ieee80211_debugfs_recreate_netdev(sdata, sdata->vif.valid_links);
+ /*
+ * Remove driver debugfs entries.
+ * The virtual monitor interface doesn't get a debugfs
+ * entry, so it's exempt here.
+ */
+ if (sdata != rcu_access_pointer(local->monitor_sdata))
+ ieee80211_debugfs_recreate_netdev(sdata,
+ sdata->vif.valid_links);
trace_drv_remove_interface(local, sdata);
local->ops->remove_interface(&local->hw, &sdata->vif);
@@ -181,9 +188,10 @@ int drv_sta_set_txpwr(struct ieee80211_local *local,
return ret;
}
-void drv_sta_rc_update(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- struct ieee80211_sta *sta, u32 changed)
+void drv_link_sta_rc_update(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_link_sta *link_sta,
+ u32 changed)
{
sdata = get_bss_sdata(sdata);
if (!check_sdata_in_driver(sdata))
@@ -193,10 +201,10 @@ void drv_sta_rc_update(struct ieee80211_local *local,
(sdata->vif.type != NL80211_IFTYPE_ADHOC &&
sdata->vif.type != NL80211_IFTYPE_MESH_POINT));
- trace_drv_sta_rc_update(local, sdata, sta, changed);
- if (local->ops->sta_rc_update)
- local->ops->sta_rc_update(&local->hw, &sdata->vif,
- sta, changed);
+ trace_drv_link_sta_rc_update(local, sdata, link_sta, changed);
+ if (local->ops->link_sta_rc_update)
+ local->ops->link_sta_rc_update(&local->hw, &sdata->vif,
+ link_sta, changed);
trace_drv_return_void(local);
}
@@ -214,8 +222,7 @@ int drv_conf_tx(struct ieee80211_local *local,
if (!check_sdata_in_driver(sdata))
return -EIO;
- if (sdata->vif.active_links &&
- !(sdata->vif.active_links & BIT(link->link_id)))
+ if (!ieee80211_vif_link_active(&sdata->vif, link->link_id))
return 0;
if (params->cw_min == 0 || params->cw_min > params->cw_max) {
@@ -312,11 +319,22 @@ int drv_assign_vif_chanctx(struct ieee80211_local *local,
might_sleep();
lockdep_assert_wiphy(local->hw.wiphy);
+ /*
+ * We should perhaps push emulate chanctx down and only
+ * make it call ->config() when the chanctx is actually
+ * assigned here (and unassigned below), but that's yet
+ * another change to all drivers to add assign/unassign
+ * emulation callbacks. Maybe later.
+ */
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
+ local->emulate_chanctx &&
+ !ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
+ return 0;
+
if (!check_sdata_in_driver(sdata))
return -EIO;
- if (sdata->vif.active_links &&
- !(sdata->vif.active_links & BIT(link_conf->link_id)))
+ if (!ieee80211_vif_link_active(&sdata->vif, link_conf->link_id))
return 0;
trace_drv_assign_vif_chanctx(local, sdata, link_conf, ctx);
@@ -340,11 +358,15 @@ void drv_unassign_vif_chanctx(struct ieee80211_local *local,
might_sleep();
lockdep_assert_wiphy(local->hw.wiphy);
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
+ local->emulate_chanctx &&
+ !ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
+ return;
+
if (!check_sdata_in_driver(sdata))
return;
- if (sdata->vif.active_links &&
- !(sdata->vif.active_links & BIT(link_conf->link_id)))
+ if (!ieee80211_vif_link_active(&sdata->vif, link_conf->link_id))
return;
trace_drv_unassign_vif_chanctx(local, sdata, link_conf, ctx);
@@ -461,8 +483,7 @@ void drv_link_info_changed(struct ieee80211_local *local,
if (!check_sdata_in_driver(sdata))
return;
- if (sdata->vif.active_links &&
- !(sdata->vif.active_links & BIT(link_id)))
+ if (!ieee80211_vif_link_active(&sdata->vif, link_id))
return;
trace_drv_link_info_changed(local, sdata, info, changed);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index eb482fb8c3af..5acecc7bd4a9 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -2,7 +2,7 @@
/*
* Portions of this file
* Copyright(c) 2016 Intel Deutschland GmbH
-* Copyright (C) 2018 - 2019, 2021 - 2023 Intel Corporation
+* Copyright (C) 2018-2019, 2021-2024 Intel Corporation
*/
#ifndef __MAC80211_DRIVER_OPS
@@ -88,7 +88,7 @@ static inline int drv_get_et_sset_count(struct ieee80211_sub_if_data *sdata,
}
int drv_start(struct ieee80211_local *local);
-void drv_stop(struct ieee80211_local *local);
+void drv_stop(struct ieee80211_local *local, bool suspend);
#ifdef CONFIG_PM
static inline int drv_suspend(struct ieee80211_local *local,
@@ -594,9 +594,9 @@ int drv_sta_set_txpwr(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct sta_info *sta);
-void drv_sta_rc_update(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- struct ieee80211_sta *sta, u32 changed);
+void drv_link_sta_rc_update(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_link_sta *link_sta, u32 changed);
static inline void drv_sta_rate_tbl_update(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
@@ -724,6 +724,9 @@ static inline void drv_flush_sta(struct ieee80211_local *local,
if (sdata && !check_sdata_in_driver(sdata))
return;
+ if (!sta->uploaded)
+ return;
+
trace_drv_flush_sta(local, sdata, &sta->sta);
if (local->ops->flush_sta)
local->ops->flush_sta(&local->hw, &sdata->vif, &sta->sta);
@@ -1150,6 +1153,9 @@ drv_pre_channel_switch(struct ieee80211_sub_if_data *sdata,
if (!check_sdata_in_driver(sdata))
return -EIO;
+ if (!ieee80211_vif_link_active(&sdata->vif, ch_switch->link_id))
+ return 0;
+
trace_drv_pre_channel_switch(local, sdata, ch_switch);
if (local->ops->pre_channel_switch)
ret = local->ops->pre_channel_switch(&local->hw, &sdata->vif,
@@ -1171,6 +1177,9 @@ drv_post_channel_switch(struct ieee80211_link_data *link)
if (!check_sdata_in_driver(sdata))
return -EIO;
+ if (!ieee80211_vif_link_active(&sdata->vif, link->link_id))
+ return 0;
+
trace_drv_post_channel_switch(local, sdata);
if (local->ops->post_channel_switch)
ret = local->ops->post_channel_switch(&local->hw, &sdata->vif,
@@ -1180,8 +1189,9 @@ drv_post_channel_switch(struct ieee80211_link_data *link)
}
static inline void
-drv_abort_channel_switch(struct ieee80211_sub_if_data *sdata)
+drv_abort_channel_switch(struct ieee80211_link_data *link)
{
+ struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
might_sleep();
@@ -1190,10 +1200,14 @@ drv_abort_channel_switch(struct ieee80211_sub_if_data *sdata)
if (!check_sdata_in_driver(sdata))
return;
+ if (!ieee80211_vif_link_active(&sdata->vif, link->link_id))
+ return;
+
trace_drv_abort_channel_switch(local, sdata);
if (local->ops->abort_channel_switch)
- local->ops->abort_channel_switch(&local->hw, &sdata->vif);
+ local->ops->abort_channel_switch(&local->hw, &sdata->vif,
+ link->conf);
}
static inline void
@@ -1208,6 +1222,9 @@ drv_channel_switch_rx_beacon(struct ieee80211_sub_if_data *sdata,
if (!check_sdata_in_driver(sdata))
return;
+ if (!ieee80211_vif_link_active(&sdata->vif, ch_switch->link_id))
+ return;
+
trace_drv_channel_switch_rx_beacon(local, sdata, ch_switch);
if (local->ops->channel_switch_rx_beacon)
local->ops->channel_switch_rx_beacon(&local->hw, &sdata->vif,
@@ -1259,7 +1276,8 @@ static inline u32 drv_get_expected_throughput(struct ieee80211_local *local,
}
static inline int drv_get_txpower(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata, int *dbm)
+ struct ieee80211_sub_if_data *sdata,
+ unsigned int link_id, int *dbm)
{
int ret;
@@ -1269,8 +1287,8 @@ static inline int drv_get_txpower(struct ieee80211_local *local,
if (!local->ops->get_txpower)
return -EOPNOTSUPP;
- ret = local->ops->get_txpower(&local->hw, &sdata->vif, dbm);
- trace_drv_get_txpower(local, sdata, *dbm, ret);
+ ret = local->ops->get_txpower(&local->hw, &sdata->vif, link_id, dbm);
+ trace_drv_get_txpower(local, sdata, link_id, *dbm, ret);
return ret;
}
@@ -1695,4 +1713,35 @@ int drv_change_sta_links(struct ieee80211_local *local,
struct ieee80211_sta *sta,
u16 old_links, u16 new_links);
+static inline enum ieee80211_neg_ttlm_res
+drv_can_neg_ttlm(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_neg_ttlm *neg_ttlm)
+{
+ enum ieee80211_neg_ttlm_res res = NEG_TTLM_RES_REJECT;
+
+ might_sleep();
+ if (!check_sdata_in_driver(sdata))
+ return -EIO;
+
+ trace_drv_can_neg_ttlm(local, sdata, neg_ttlm);
+ if (local->ops->can_neg_ttlm)
+ res = local->ops->can_neg_ttlm(&local->hw, &sdata->vif,
+ neg_ttlm);
+ trace_drv_neg_ttlm_res(local, sdata, res, neg_ttlm);
+
+ return res;
+}
+
+static inline void
+drv_prep_add_interface(struct ieee80211_local *local,
+ enum nl80211_iftype type)
+{
+ trace_drv_prep_add_interface(local, type);
+ if (local->ops->prep_add_interface)
+ local->ops->prep_add_interface(&local->hw, type);
+
+ trace_drv_return_void(local);
+}
+
#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/drop.h b/net/mac80211/drop.h
index 12a6f0e9eca6..59e3ec4dc960 100644
--- a/net/mac80211/drop.h
+++ b/net/mac80211/drop.h
@@ -2,7 +2,7 @@
/*
* mac80211 drop reason list
*
- * Copyright (C) 2023 Intel Corporation
+ * Copyright (C) 2023-2024 Intel Corporation
*/
#ifndef MAC80211_DROP_H
@@ -66,6 +66,7 @@ typedef unsigned int __bitwise ieee80211_rx_result;
R(RX_DROP_U_UNEXPECTED_STA_4ADDR) \
R(RX_DROP_U_UNEXPECTED_VLAN_MCAST) \
R(RX_DROP_U_NOT_PORT_CONTROL) \
+ R(RX_DROP_U_UNKNOWN_ACTION_REJECTED) \
/* this line for the trailing \ - add before this */
/* having two enums allows for checking ieee80211_rx_result use with sparse */
diff --git a/net/mac80211/eht.c b/net/mac80211/eht.c
index ddc7acc68335..fd41046e3b68 100644
--- a/net/mac80211/eht.c
+++ b/net/mac80211/eht.c
@@ -2,7 +2,7 @@
/*
* EHT handling
*
- * Copyright(c) 2021-2023 Intel Corporation
+ * Copyright(c) 2021-2025 Intel Corporation
*/
#include "ieee80211_i.h"
@@ -75,4 +75,30 @@ ieee80211_eht_cap_ie_to_sta_eht_cap(struct ieee80211_sub_if_data *sdata,
link_sta->cur_max_bandwidth = ieee80211_sta_cap_rx_bw(link_sta);
link_sta->pub->bandwidth = ieee80211_sta_cur_vht_bw(link_sta);
+
+ /*
+ * The MPDU length bits are reserved on all but 2.4 GHz and get set via
+ * VHT (5 GHz) or HE (6 GHz) capabilities.
+ */
+ if (sband->band != NL80211_BAND_2GHZ)
+ return;
+
+ switch (u8_get_bits(eht_cap->eht_cap_elem.mac_cap_info[0],
+ IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK)) {
+ case IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454:
+ link_sta->pub->agg.max_amsdu_len =
+ IEEE80211_MAX_MPDU_LEN_VHT_11454;
+ break;
+ case IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_7991:
+ link_sta->pub->agg.max_amsdu_len =
+ IEEE80211_MAX_MPDU_LEN_VHT_7991;
+ break;
+ case IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_3895:
+ default:
+ link_sta->pub->agg.max_amsdu_len =
+ IEEE80211_MAX_MPDU_LEN_VHT_3895;
+ break;
+ }
+
+ ieee80211_sta_recalc_aggregates(&link_sta->sta->sta);
}
diff --git a/net/mac80211/ethtool.c b/net/mac80211/ethtool.c
index 99f6174a9d69..42f7ee142ce3 100644
--- a/net/mac80211/ethtool.c
+++ b/net/mac80211/ethtool.c
@@ -19,16 +19,13 @@ static int ieee80211_set_ringparam(struct net_device *dev,
struct netlink_ext_ack *extack)
{
struct ieee80211_local *local = wiphy_priv(dev->ieee80211_ptr->wiphy);
- int ret;
if (rp->rx_mini_pending != 0 || rp->rx_jumbo_pending != 0)
return -EINVAL;
- wiphy_lock(local->hw.wiphy);
- ret = drv_set_ringparam(local, rp->tx_pending, rp->rx_pending);
- wiphy_unlock(local->hw.wiphy);
+ guard(wiphy)(local->hw.wiphy);
- return ret;
+ return drv_set_ringparam(local, rp->tx_pending, rp->rx_pending);
}
static void ieee80211_get_ringparam(struct net_device *dev,
@@ -40,10 +37,10 @@ static void ieee80211_get_ringparam(struct net_device *dev,
memset(rp, 0, sizeof(*rp));
- wiphy_lock(local->hw.wiphy);
+ guard(wiphy)(local->hw.wiphy);
+
drv_get_ringparam(local, &rp->tx_pending, &rp->tx_max_pending,
&rp->rx_pending, &rp->rx_max_pending);
- wiphy_unlock(local->hw.wiphy);
}
static const char ieee80211_gstrings_sta_stats[][ETH_GSTRING_LEN] = {
@@ -109,7 +106,7 @@ static void ieee80211_get_stats(struct net_device *dev,
* network device.
*/
- wiphy_lock(local->hw.wiphy);
+ guard(wiphy)(local->hw.wiphy);
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
sta = sta_info_get_bss(sdata, sdata->deflink.u.mgd.bssid);
@@ -160,6 +157,10 @@ do_survey:
chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
if (chanctx_conf)
channel = chanctx_conf->def.chan;
+ else if (local->open_count > 0 &&
+ local->open_count == local->monitors &&
+ sdata->vif.type == NL80211_IFTYPE_MONITOR)
+ channel = local->monitor_chanreq.oper.chan;
else
channel = NULL;
rcu_read_unlock();
@@ -205,13 +206,10 @@ do_survey:
else
data[i++] = -1LL;
- if (WARN_ON(i != STA_STATS_LEN)) {
- wiphy_unlock(local->hw.wiphy);
+ if (WARN_ON(i != STA_STATS_LEN))
return;
- }
drv_get_et_stats(sdata, stats, &(data[STA_STATS_LEN]));
- wiphy_unlock(local->hw.wiphy);
}
static void ieee80211_get_strings(struct net_device *dev, u32 sset, u8 *data)
diff --git a/net/mac80211/he.c b/net/mac80211/he.c
index 9f5ffdc9db28..5792ef77e986 100644
--- a/net/mac80211/he.c
+++ b/net/mac80211/he.c
@@ -3,10 +3,11 @@
* HE handling
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2019 - 2023 Intel Corporation
+ * Copyright(c) 2019 - 2024 Intel Corporation
*/
#include "ieee80211_i.h"
+#include "rate.h"
static void
ieee80211_update_from_he_6ghz_capa(const struct ieee80211_he_6ghz_capa *he_6ghz_capa,
@@ -230,15 +231,137 @@ ieee80211_he_spr_ie_to_bss_conf(struct ieee80211_vif *vif,
if (!he_spr_ie_elem)
return;
+
+ he_obss_pd->sr_ctrl = he_spr_ie_elem->he_sr_control;
data = he_spr_ie_elem->optional;
if (he_spr_ie_elem->he_sr_control &
IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
- data++;
+ he_obss_pd->non_srg_max_offset = *data++;
+
if (he_spr_ie_elem->he_sr_control &
IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT) {
- he_obss_pd->max_offset = *data++;
he_obss_pd->min_offset = *data++;
+ he_obss_pd->max_offset = *data++;
+ memcpy(he_obss_pd->bss_color_bitmap, data, 8);
+ data += 8;
+ memcpy(he_obss_pd->partial_bssid_bitmap, data, 8);
he_obss_pd->enable = true;
}
}
+
+static void ieee80211_link_sta_rc_update_omi(struct ieee80211_link_data *link,
+ struct link_sta_info *link_sta)
+{
+ struct ieee80211_sub_if_data *sdata = link->sdata;
+ struct ieee80211_supported_band *sband;
+ enum ieee80211_sta_rx_bandwidth new_bw;
+ enum nl80211_band band;
+
+ band = link->conf->chanreq.oper.chan->band;
+ sband = sdata->local->hw.wiphy->bands[band];
+
+ new_bw = ieee80211_sta_cur_vht_bw(link_sta);
+ if (link_sta->pub->bandwidth == new_bw)
+ return;
+
+ link_sta->pub->bandwidth = new_bw;
+ rate_control_rate_update(sdata->local, sband, link_sta,
+ IEEE80211_RC_BW_CHANGED);
+}
+
+bool ieee80211_prepare_rx_omi_bw(struct ieee80211_link_sta *pub_link_sta,
+ enum ieee80211_sta_rx_bandwidth bw)
+{
+ struct sta_info *sta = container_of(pub_link_sta->sta,
+ struct sta_info, sta);
+ struct ieee80211_local *local = sta->sdata->local;
+ struct link_sta_info *link_sta =
+ sdata_dereference(sta->link[pub_link_sta->link_id], sta->sdata);
+ struct ieee80211_link_data *link =
+ sdata_dereference(sta->sdata->link[pub_link_sta->link_id],
+ sta->sdata);
+ struct ieee80211_chanctx_conf *conf;
+ struct ieee80211_chanctx *chanctx;
+ bool ret;
+
+ if (WARN_ON(!link || !link_sta || link_sta->pub != pub_link_sta))
+ return false;
+
+ conf = sdata_dereference(link->conf->chanctx_conf, sta->sdata);
+ if (WARN_ON(!conf))
+ return false;
+
+ trace_api_prepare_rx_omi_bw(local, sta->sdata, link_sta, bw);
+
+ chanctx = container_of(conf, typeof(*chanctx), conf);
+
+ if (link_sta->rx_omi_bw_staging == bw) {
+ ret = false;
+ goto trace;
+ }
+
+ /* must call this API in pairs */
+ if (WARN_ON(link_sta->rx_omi_bw_tx != link_sta->rx_omi_bw_staging ||
+ link_sta->rx_omi_bw_rx != link_sta->rx_omi_bw_staging)) {
+ ret = false;
+ goto trace;
+ }
+
+ if (bw < link_sta->rx_omi_bw_staging) {
+ link_sta->rx_omi_bw_tx = bw;
+ ieee80211_link_sta_rc_update_omi(link, link_sta);
+ } else {
+ link_sta->rx_omi_bw_rx = bw;
+ ieee80211_recalc_chanctx_min_def(local, chanctx, NULL, false);
+ }
+
+ link_sta->rx_omi_bw_staging = bw;
+ ret = true;
+trace:
+ trace_api_return_bool(local, ret);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ieee80211_prepare_rx_omi_bw);
+
+void ieee80211_finalize_rx_omi_bw(struct ieee80211_link_sta *pub_link_sta)
+{
+ struct sta_info *sta = container_of(pub_link_sta->sta,
+ struct sta_info, sta);
+ struct ieee80211_local *local = sta->sdata->local;
+ struct link_sta_info *link_sta =
+ sdata_dereference(sta->link[pub_link_sta->link_id], sta->sdata);
+ struct ieee80211_link_data *link =
+ sdata_dereference(sta->sdata->link[pub_link_sta->link_id],
+ sta->sdata);
+ struct ieee80211_chanctx_conf *conf;
+ struct ieee80211_chanctx *chanctx;
+
+ if (WARN_ON(!link || !link_sta || link_sta->pub != pub_link_sta))
+ return;
+
+ conf = sdata_dereference(link->conf->chanctx_conf, sta->sdata);
+ if (WARN_ON(!conf))
+ return;
+
+ trace_api_finalize_rx_omi_bw(local, sta->sdata, link_sta);
+
+ chanctx = container_of(conf, typeof(*chanctx), conf);
+
+ if (link_sta->rx_omi_bw_tx != link_sta->rx_omi_bw_staging) {
+ /* rate control in finalize only when widening bandwidth */
+ WARN_ON(link_sta->rx_omi_bw_tx > link_sta->rx_omi_bw_staging);
+ link_sta->rx_omi_bw_tx = link_sta->rx_omi_bw_staging;
+ ieee80211_link_sta_rc_update_omi(link, link_sta);
+ }
+
+ if (link_sta->rx_omi_bw_rx != link_sta->rx_omi_bw_staging) {
+ /* channel context in finalize only when narrowing bandwidth */
+ WARN_ON(link_sta->rx_omi_bw_rx < link_sta->rx_omi_bw_staging);
+ link_sta->rx_omi_bw_rx = link_sta->rx_omi_bw_staging;
+ ieee80211_recalc_chanctx_min_def(local, chanctx, NULL, false);
+ }
+
+ trace_api_return_void(local);
+}
+EXPORT_SYMBOL_GPL(ieee80211_finalize_rx_omi_bw);
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 749f4ecab990..32390d8a9d75 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -9,7 +9,7 @@
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2007-2010, Intel Corporation
* Copyright 2017 Intel Deutschland GmbH
- * Copyright(c) 2020-2023 Intel Corporation
+ * Copyright(c) 2020-2024 Intel Corporation
*/
#include <linux/ieee80211.h>
@@ -257,7 +257,7 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
if (WARN_ON(!link_conf))
width = NL80211_CHAN_WIDTH_20_NOHT;
else
- width = link_conf->chandef.width;
+ width = link_conf->chanreq.oper.width;
switch (width) {
default:
@@ -379,7 +379,7 @@ void ieee80211_ba_session_work(struct wiphy *wiphy, struct wiphy_work *work)
sta->ampdu_mlme.tid_rx_manage_offl))
__ieee80211_start_rx_ba_session(sta, 0, 0, 0, 1, tid,
IEEE80211_MAX_AMPDU_BUF_HT,
- false, true, NULL);
+ false, true, 0);
if (test_and_clear_bit(tid + IEEE80211_NUM_TIDS,
sta->ampdu_mlme.tid_rx_manage_offl))
@@ -467,20 +467,7 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
return;
skb_reserve(skb, local->hw.extra_tx_headroom);
- mgmt = skb_put_zero(skb, 24);
- memcpy(mgmt->da, da, ETH_ALEN);
- memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
- if (sdata->vif.type == NL80211_IFTYPE_AP ||
- sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
- sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
- memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
- else if (sdata->vif.type == NL80211_IFTYPE_STATION)
- memcpy(mgmt->bssid, sdata->deflink.u.mgd.bssid, ETH_ALEN);
- else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
- memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN);
-
- mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
- IEEE80211_STYPE_ACTION);
+ mgmt = ieee80211_mgmt_ba(skb, da, sdata);
skb_put(skb, 1 + sizeof(mgmt->u.action.u.delba));
@@ -580,7 +567,7 @@ int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
/* we'll do more on status of this frame */
info = IEEE80211_SKB_CB(skb);
info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
- /* we have 12 bits, and need 6: link_id 4, smps 2 */
+ /* we have 13 bits, and need 6: link_id 4, smps 2 */
info->status_data = IEEE80211_STATUS_TYPE_SMPS |
u16_encode_bits(status_link_id << 2 | smps,
IEEE80211_STATUS_SUBDATA_MASK);
@@ -603,6 +590,8 @@ void ieee80211_request_smps(struct ieee80211_vif *vif, unsigned int link_id,
if (WARN_ON(!link))
goto out;
+ trace_api_request_smps(sdata->local, sdata, link, smps_mode);
+
if (link->u.mgd.driver_smps_mode == smps_mode)
goto out;
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 8f2b445a5ec3..05a945df3259 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -9,7 +9,7 @@
* Copyright 2009, Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
- * Copyright(c) 2018-2023 Intel Corporation
+ * Copyright(c) 2018-2024 Intel Corporation
*/
#include <linux/delay.h>
@@ -223,7 +223,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt;
struct cfg80211_bss *bss;
u64 bss_change;
- struct cfg80211_chan_def chandef;
+ struct ieee80211_chan_req chanreq = {};
struct ieee80211_channel *chan;
struct beacon_data *presp;
struct cfg80211_inform_bss bss_meta = {};
@@ -237,7 +237,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
drv_reset_tsf(local, sdata);
if (!ether_addr_equal(ifibss->bssid, bssid))
- sta_info_flush(sdata);
+ sta_info_flush(sdata, -1);
/* if merging, indicate to driver that we leave the old IBSS */
if (sdata->vif.cfg.ibss_joined) {
@@ -245,6 +245,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
sdata->vif.cfg.ibss_creator = false;
sdata->vif.bss_conf.enable_beacon = false;
netif_carrier_off(sdata->dev);
+ synchronize_net();
ieee80211_bss_info_change_notify(sdata,
BSS_CHANGED_IBSS |
BSS_CHANGED_BEACON_ENABLED);
@@ -257,22 +258,22 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
kfree_rcu(presp, rcu_head);
/* make a copy of the chandef, it could be modified below. */
- chandef = *req_chandef;
- chan = chandef.chan;
- if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef,
+ chanreq.oper = *req_chandef;
+ chan = chanreq.oper.chan;
+ if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chanreq.oper,
NL80211_IFTYPE_ADHOC)) {
- if (chandef.width == NL80211_CHAN_WIDTH_5 ||
- chandef.width == NL80211_CHAN_WIDTH_10 ||
- chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- chandef.width == NL80211_CHAN_WIDTH_20) {
+ if (chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ chanreq.oper.width == NL80211_CHAN_WIDTH_10 ||
+ chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ chanreq.oper.width == NL80211_CHAN_WIDTH_20) {
sdata_info(sdata,
"Failed to join IBSS, beacons forbidden\n");
return;
}
- chandef.width = NL80211_CHAN_WIDTH_20;
- chandef.center_freq1 = chan->center_freq;
+ chanreq.oper.width = NL80211_CHAN_WIDTH_20;
+ chanreq.oper.center_freq1 = chan->center_freq;
/* check again for downgraded chandef */
- if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef,
+ if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chanreq.oper,
NL80211_IFTYPE_ADHOC)) {
sdata_info(sdata,
"Failed to join IBSS, beacons forbidden\n");
@@ -281,7 +282,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
}
err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
- &chandef, NL80211_IFTYPE_ADHOC);
+ &chanreq.oper, NL80211_IFTYPE_ADHOC);
if (err < 0) {
sdata_info(sdata,
"Failed to join IBSS, invalid chandef\n");
@@ -295,7 +296,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
radar_required = err;
- if (ieee80211_link_use_channel(&sdata->deflink, &chandef,
+ if (ieee80211_link_use_channel(&sdata->deflink, &chanreq,
ifibss->fixed_channel ?
IEEE80211_CHANCTX_SHARED :
IEEE80211_CHANCTX_EXCLUSIVE)) {
@@ -307,7 +308,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
memcpy(ifibss->bssid, bssid, ETH_ALEN);
presp = ieee80211_ibss_build_presp(sdata, beacon_int, basic_rates,
- capability, tsf, &chandef,
+ capability, tsf, &chanreq.oper,
&have_higher_than_11mbit, NULL);
if (!presp)
return;
@@ -533,12 +534,12 @@ int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata, u64 *changed)
IEEE80211_PRIVACY(ifibss->privacy));
/* XXX: should not really modify cfg80211 data */
if (cbss) {
- cbss->channel = sdata->deflink.csa_chandef.chan;
+ cbss->channel = sdata->deflink.csa.chanreq.oper.chan;
cfg80211_put_bss(sdata->local->hw.wiphy, cbss);
}
}
- ifibss->chandef = sdata->deflink.csa_chandef;
+ ifibss->chandef = sdata->deflink.csa.chanreq.oper;
/* generate the beacon */
return ieee80211_ibss_csa_beacon(sdata, NULL, changed);
@@ -569,7 +570,7 @@ static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta)
if (!sta->sdata->u.ibss.control_port)
sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED);
- rate_control_rate_init(sta);
+ rate_control_rate_init(&sta->deflink);
/* If it fails, maybe we raced another insertion? */
if (sta_info_insert_rcu(sta))
@@ -682,7 +683,7 @@ static void ieee80211_ibss_disconnect(struct ieee80211_sub_if_data *sdata)
ifibss->state = IEEE80211_IBSS_MLME_SEARCH;
- sta_info_flush(sdata);
+ sta_info_flush(sdata, -1);
spin_lock_bh(&ifibss->incomplete_lock);
while (!list_empty(&ifibss->incomplete_stations)) {
@@ -757,21 +758,22 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
enum nl80211_channel_type ch_type;
int err;
- ieee80211_conn_flags_t conn_flags;
+ struct ieee80211_conn_settings conn = {
+ .mode = IEEE80211_CONN_MODE_HT,
+ .bw_limit = IEEE80211_CONN_BW_LIMIT_40,
+ };
u32 vht_cap_info = 0;
lockdep_assert_wiphy(sdata->local->hw.wiphy);
- conn_flags = IEEE80211_CONN_DISABLE_VHT;
-
switch (ifibss->chandef.width) {
case NL80211_CHAN_WIDTH_5:
case NL80211_CHAN_WIDTH_10:
case NL80211_CHAN_WIDTH_20_NOHT:
- conn_flags |= IEEE80211_CONN_DISABLE_HT;
+ conn.mode = IEEE80211_CONN_MODE_LEGACY;
fallthrough;
case NL80211_CHAN_WIDTH_20:
- conn_flags |= IEEE80211_CONN_DISABLE_40MHZ;
+ conn.bw_limit = IEEE80211_CONN_BW_LIMIT_20;
break;
default:
break;
@@ -783,8 +785,9 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
memset(&params, 0, sizeof(params));
err = ieee80211_parse_ch_switch_ie(sdata, elems,
ifibss->chandef.chan->band,
- vht_cap_info,
- conn_flags, ifibss->bssid, &csa_ie);
+ vht_cap_info, &conn,
+ ifibss->bssid, false,
+ &csa_ie);
/* can't switch to destination channel, fail */
if (err < 0)
goto disconnect;
@@ -798,7 +801,7 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
goto disconnect;
params.count = csa_ie.count;
- params.chandef = csa_ie.chandef;
+ params.chandef = csa_ie.chanreq.oper;
switch (ifibss->chandef.width) {
case NL80211_CHAN_WIDTH_20_NOHT:
@@ -857,7 +860,7 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
params.radar_required = err;
if (cfg80211_chandef_identical(&params.chandef,
- &sdata->vif.bss_conf.chandef)) {
+ &sdata->vif.bss_conf.chanreq.oper)) {
ibss_dbg(sdata,
"received csa with an identical chandef, ignoring\n");
return true;
@@ -1066,11 +1069,12 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata,
/* Force rx_nss recalculation */
sta->sta.deflink.rx_nss = 0;
- rate_control_rate_init(sta);
+ rate_control_rate_init(&sta->deflink);
if (sta->sta.deflink.rx_nss != rx_nss)
changed |= IEEE80211_RC_NSS_CHANGED;
- drv_sta_rc_update(local, sdata, &sta->sta, changed);
+ drv_link_sta_rc_update(local, sdata, &sta->sta.deflink,
+ changed);
}
rcu_read_unlock();
@@ -1744,7 +1748,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
IEEE80211_CHANCTX_SHARED : IEEE80211_CHANCTX_EXCLUSIVE;
ret = ieee80211_check_combinations(sdata, &params->chandef, chanmode,
- radar_detect_width);
+ radar_detect_width, -1);
if (ret < 0)
return ret;
@@ -1823,8 +1827,8 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
- ieee80211_ibss_disconnect(sdata);
ifibss->ssid_len = 0;
+ ieee80211_ibss_disconnect(sdata);
eth_zero_addr(ifibss->bssid);
/* remove beacon */
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 0b2b53550bd9..e7dc3f0cfc9a 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -5,7 +5,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2015 Intel Mobile Communications GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#ifndef IEEE80211_I_H
@@ -26,6 +26,7 @@
#include <linux/idr.h>
#include <linux/rhashtable.h>
#include <linux/rbtree.h>
+#include <kunit/visibility.h>
#include <net/ieee80211_radiotap.h>
#include <net/cfg80211.h>
#include <net/mac80211.h>
@@ -89,7 +90,8 @@ enum ieee80211_status_data {
IEEE80211_STATUS_TYPE_MASK = 0x00f,
IEEE80211_STATUS_TYPE_INVALID = 0,
IEEE80211_STATUS_TYPE_SMPS = 1,
- IEEE80211_STATUS_SUBDATA_MASK = 0xff0,
+ IEEE80211_STATUS_TYPE_NEG_TTLM = 2,
+ IEEE80211_STATUS_SUBDATA_MASK = 0x1ff0,
};
static inline bool
@@ -131,7 +133,7 @@ struct ieee80211_bss {
};
/**
- * enum ieee80211_corrupt_data_flags - BSS data corruption flags
+ * enum ieee80211_bss_corrupt_data_flags - BSS data corruption flags
* @IEEE80211_BSS_CORRUPT_BEACON: last beacon frame received was corrupted
* @IEEE80211_BSS_CORRUPT_PROBE_RESP: last probe response received was corrupted
*
@@ -144,7 +146,7 @@ enum ieee80211_bss_corrupt_data_flags {
};
/**
- * enum ieee80211_valid_data_flags - BSS valid data flags
+ * enum ieee80211_bss_valid_data_flags - BSS valid data flags
* @IEEE80211_BSS_VALID_WMM: WMM/UAPSD data was gathered from non-corrupt IE
* @IEEE80211_BSS_VALID_RATES: Supported rates were gathered from non-corrupt IE
* @IEEE80211_BSS_VALID_ERP: ERP flag was gathered from non-corrupt IE
@@ -370,25 +372,40 @@ enum ieee80211_sta_flags {
IEEE80211_STA_ENABLE_RRM = BIT(15),
};
-typedef u32 __bitwise ieee80211_conn_flags_t;
-
-enum ieee80211_conn_flags {
- IEEE80211_CONN_DISABLE_HT = (__force ieee80211_conn_flags_t)BIT(0),
- IEEE80211_CONN_DISABLE_40MHZ = (__force ieee80211_conn_flags_t)BIT(1),
- IEEE80211_CONN_DISABLE_VHT = (__force ieee80211_conn_flags_t)BIT(2),
- IEEE80211_CONN_DISABLE_80P80MHZ = (__force ieee80211_conn_flags_t)BIT(3),
- IEEE80211_CONN_DISABLE_160MHZ = (__force ieee80211_conn_flags_t)BIT(4),
- IEEE80211_CONN_DISABLE_HE = (__force ieee80211_conn_flags_t)BIT(5),
- IEEE80211_CONN_DISABLE_EHT = (__force ieee80211_conn_flags_t)BIT(6),
- IEEE80211_CONN_DISABLE_320MHZ = (__force ieee80211_conn_flags_t)BIT(7),
+enum ieee80211_conn_mode {
+ IEEE80211_CONN_MODE_S1G,
+ IEEE80211_CONN_MODE_LEGACY,
+ IEEE80211_CONN_MODE_HT,
+ IEEE80211_CONN_MODE_VHT,
+ IEEE80211_CONN_MODE_HE,
+ IEEE80211_CONN_MODE_EHT,
};
+#define IEEE80211_CONN_MODE_HIGHEST IEEE80211_CONN_MODE_EHT
+
+enum ieee80211_conn_bw_limit {
+ IEEE80211_CONN_BW_LIMIT_20,
+ IEEE80211_CONN_BW_LIMIT_40,
+ IEEE80211_CONN_BW_LIMIT_80,
+ IEEE80211_CONN_BW_LIMIT_160, /* also 80+80 */
+ IEEE80211_CONN_BW_LIMIT_320,
+};
+
+struct ieee80211_conn_settings {
+ enum ieee80211_conn_mode mode;
+ enum ieee80211_conn_bw_limit bw_limit;
+};
+
+extern const struct ieee80211_conn_settings ieee80211_conn_settings_unlimited;
+
struct ieee80211_mgd_auth_data {
struct cfg80211_bss *bss;
unsigned long timeout;
int tries;
u16 algorithm, expected_transaction;
+ unsigned long userspace_selectors[BITS_TO_LONGS(128)];
+
u8 key[WLAN_KEY_LEN_WEP104];
u8 key_len, key_idx;
bool done, waiting;
@@ -416,7 +433,7 @@ struct ieee80211_mgd_assoc_data {
size_t elems_len;
u8 *elems; /* pointing to inside ie[] below */
- ieee80211_conn_flags_t conn_flags;
+ struct ieee80211_conn_settings conn;
u16 status;
@@ -429,6 +446,8 @@ struct ieee80211_mgd_assoc_data {
const u8 *supp_rates;
u8 supp_rates_len;
+ unsigned long userspace_selectors[BITS_TO_LONGS(128)];
+
unsigned long timeout;
int tries;
@@ -441,6 +460,7 @@ struct ieee80211_mgd_assoc_data {
bool timeout_started;
bool comeback; /* whether the AP has requested association comeback */
bool s1g;
+ bool spp_amsdu;
unsigned int assoc_link_id;
@@ -509,6 +529,8 @@ struct ieee80211_if_managed {
unsigned int flags;
+ u16 mcast_seq_last;
+
bool status_acked;
bool status_received;
__le16 status_fc;
@@ -579,6 +601,20 @@ struct ieee80211_if_managed {
/* TID-to-link mapping support */
struct wiphy_delayed_work ttlm_work;
struct ieee80211_adv_ttlm_info ttlm_info;
+ struct wiphy_work teardown_ttlm_work;
+
+ /* dialog token enumerator for neg TTLM request */
+ u8 dialog_token_alloc;
+ struct wiphy_delayed_work neg_ttlm_timeout_work;
+
+ /* Locally initiated multi-link reconfiguration */
+ struct {
+ struct ieee80211_mgd_assoc_data *add_links_data;
+ struct wiphy_delayed_work wk;
+ u16 removed_links;
+ u16 added_links;
+ u8 dialog_token;
+ } reconf;
};
struct ieee80211_if_ibss {
@@ -664,7 +700,7 @@ struct mesh_csa_settings {
};
/**
- * struct mesh_table
+ * struct mesh_table - mesh hash table
*
* @known_gates: list of known mesh gates and their mpaths by the station. The
* gate's mpath may or may not be resolved and active.
@@ -866,6 +902,12 @@ struct ieee80211_chanctx {
enum ieee80211_chanctx_mode mode;
bool driver_present;
+ /* temporary data for search algorithm etc. */
+ struct ieee80211_chan_req req;
+
+ bool radar_detected;
+
+ /* MUST be last - ends in a flexible-array member. */
struct ieee80211_chanctx_conf conf;
};
@@ -938,7 +980,7 @@ struct ieee80211_link_data_managed {
enum ieee80211_smps_mode req_smps, /* requested smps mode */
driver_smps_mode; /* smps mode request */
- ieee80211_conn_flags_t conn_flags;
+ struct ieee80211_conn_settings conn;
s16 p2p_noa_index;
@@ -949,9 +991,15 @@ struct ieee80211_link_data_managed {
bool disable_wmm_tracking;
bool operating_11g_mode;
- bool csa_waiting_bcn;
- bool csa_ignored_same_chan;
- struct wiphy_delayed_work chswitch_work;
+ struct {
+ struct wiphy_delayed_work switch_work;
+ struct cfg80211_chan_def ap_chandef;
+ struct ieee80211_parsed_tpe tpe;
+ unsigned long time;
+ bool waiting_bcn;
+ bool ignored_same_chan;
+ bool blocked_tx;
+ } csa;
struct wiphy_work request_smps_work;
/* used to reconfigure hardware SM PS */
@@ -981,10 +1029,6 @@ struct ieee80211_link_data_managed {
int wmm_last_param_set;
int mu_edca_last_param_set;
-
- u8 bss_param_ch_cnt;
-
- struct cfg80211_bss *bss;
};
struct ieee80211_link_data_ap {
@@ -1012,20 +1056,21 @@ struct ieee80211_link_data {
struct ieee80211_key __rcu *default_mgmt_key;
struct ieee80211_key __rcu *default_beacon_key;
- struct wiphy_work csa_finalize_work;
- bool csa_block_tx;
bool operating_11g_mode;
- struct cfg80211_chan_def csa_chandef;
+ struct {
+ struct wiphy_work finalize_work;
+ struct ieee80211_chan_req chanreq;
+ } csa;
struct wiphy_work color_change_finalize_work;
- struct delayed_work color_collision_detect_work;
+ struct wiphy_delayed_work color_collision_detect_work;
u64 color_bitmap;
/* context reservation -- protected with wiphy mutex */
struct ieee80211_chanctx *reserved_chanctx;
- struct cfg80211_chan_def reserved_chandef;
+ struct ieee80211_chan_req reserved;
bool reserved_radar_required;
bool reserved_ready;
@@ -1137,6 +1182,8 @@ struct ieee80211_sub_if_data {
struct wiphy_work activate_links_work;
u16 desired_active_links;
+ u16 restart_active_links;
+
#ifdef CONFIG_MAC80211_DEBUGFS
struct {
struct dentry *subdir_stations;
@@ -1160,6 +1207,19 @@ struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p)
#define sdata_dereference(p, sdata) \
wiphy_dereference(sdata->local->hw.wiphy, p)
+#define for_each_sdata_link(_local, _link) \
+ /* outer loop just to define the variables ... */ \
+ for (struct ieee80211_sub_if_data *___sdata = NULL; \
+ !___sdata; \
+ ___sdata = (void *)~0 /* always stop */) \
+ list_for_each_entry(___sdata, &(_local)->interfaces, list) \
+ if (ieee80211_sdata_running(___sdata)) \
+ for (int ___link_id = 0; \
+ ___link_id < ARRAY_SIZE(___sdata->link); \
+ ___link_id++) \
+ if ((_link = wiphy_dereference((_local)->hw.wiphy, \
+ ___sdata->link[___link_id])))
+
static inline int
ieee80211_get_mbssid_beacon_len(struct cfg80211_mbssid_elems *elems,
struct cfg80211_rnr_elems *rnr_elems,
@@ -1320,7 +1380,7 @@ struct ieee80211_local {
spinlock_t queue_stop_reason_lock;
int open_count;
- int monitors, cooked_mntrs;
+ int monitors, cooked_mntrs, tx_mntrs;
/* number of interfaces with corresponding FIF_ flags */
int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
fif_probe_req;
@@ -1330,7 +1390,8 @@ struct ieee80211_local {
bool wiphy_ciphers_allocated;
- bool use_chanctx;
+ struct cfg80211_chan_def dflt_chandef;
+ bool emulate_chanctx;
/* protects the aggregated multicast list and filter calls */
spinlock_t filter_lock;
@@ -1456,8 +1517,6 @@ struct ieee80211_local {
enum mac80211_scan_state next_scan_state;
struct wiphy_delayed_work scan_work;
struct ieee80211_sub_if_data __rcu *scan_sdata;
- /* For backward compatibility only -- do not use */
- struct cfg80211_chan_def _oper_chandef;
/* Temporary remain-on-channel for off-channel operations */
struct ieee80211_channel *tmp_channel;
@@ -1531,8 +1590,6 @@ struct ieee80211_local {
int user_power_level; /* in dBm, for all interfaces */
- enum ieee80211_smps_mode smps_mode;
-
struct work_struct restart_work;
#ifdef CONFIG_MAC80211_DEBUGFS
@@ -1559,7 +1616,7 @@ struct ieee80211_local {
/* virtual monitor interface */
struct ieee80211_sub_if_data __rcu *monitor_sdata;
- struct cfg80211_chan_def monitor_chandef;
+ struct ieee80211_chan_req monitor_chanreq;
/* extended capabilities provided by mac80211 */
u8 ext_capa[8];
@@ -1624,7 +1681,7 @@ ieee80211_get_link_sband(struct ieee80211_link_data *link)
/* this struct holds the value parsing from channel switch IE */
struct ieee80211_csa_ie {
- struct cfg80211_chan_def chandef;
+ struct ieee80211_chan_req chanreq;
u8 mode;
u8 count;
u8 ttl;
@@ -1633,6 +1690,14 @@ struct ieee80211_csa_ie {
u32 max_switch_time;
};
+enum ieee80211_elems_parse_error {
+ IEEE80211_PARSE_ERR_INVALID_END = BIT(0),
+ IEEE80211_PARSE_ERR_DUP_ELEM = BIT(1),
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE = BIT(2),
+ IEEE80211_PARSE_ERR_UNEXPECTED_ELEM = BIT(3),
+ IEEE80211_PARSE_ERR_DUP_NEST_ML_BASIC = BIT(4),
+};
+
/* Parsed Information Elements */
struct ieee802_11_elems {
const u8 *ie_start;
@@ -1663,7 +1728,6 @@ struct ieee802_11_elems {
const struct ieee80211_he_spr *he_spr;
const struct ieee80211_mu_edca_param_set *mu_edca_param_set;
const struct ieee80211_he_6ghz_capa *he_6ghz_capa;
- const struct ieee80211_tx_pwr_env *tx_pwr_env[IEEE80211_TPE_MAX_IE_COUNT];
const u8 *uora_element;
const u8 *mesh_id;
const u8 *peering;
@@ -1698,9 +1762,14 @@ struct ieee802_11_elems {
const struct ieee80211_eht_operation *eht_operation;
const struct ieee80211_multi_link_elem *ml_basic;
const struct ieee80211_multi_link_elem *ml_reconf;
+ const struct ieee80211_multi_link_elem *ml_epcs;
const struct ieee80211_bandwidth_indication *bandwidth_indication;
const struct ieee80211_ttlm_elem *ttlm[IEEE80211_TTLM_MAX_CNT];
+ /* not the order in the psd values is per element, not per chandef */
+ struct ieee80211_parsed_tpe tpe;
+ struct ieee80211_parsed_tpe csa_tpe;
+
/* length of them, respectively */
u8 ext_capab_len;
u8 ssid_len;
@@ -1719,19 +1788,12 @@ struct ieee802_11_elems {
u8 perr_len;
u8 country_elem_len;
u8 bssid_index_len;
- u8 tx_pwr_env_len[IEEE80211_TPE_MAX_IE_COUNT];
- u8 tx_pwr_env_num;
u8 eht_cap_len;
/* mult-link element can be de-fragmented and thus u8 is not sufficient */
size_t ml_basic_len;
size_t ml_reconf_len;
-
- /* The basic Multi-Link element in the original IEs */
- const struct element *ml_basic_elem;
-
- /* The reconfiguration Multi-Link element in the original IEs */
- const struct element *ml_reconf_elem;
+ size_t ml_epcs_len;
u8 ttlm_num;
@@ -1743,16 +1805,8 @@ struct ieee802_11_elems {
struct ieee80211_mle_per_sta_profile *prof;
size_t sta_prof_len;
- /* whether a parse error occurred while retrieving these elements */
- bool parse_error;
-
- /*
- * scratch buffer that can be used for various element parsing related
- * tasks, e.g., element de-fragmentation etc.
- */
- size_t scratch_len;
- u8 *scratch_pos;
- u8 scratch[] __counted_by(scratch_len);
+ /* whether/which parse error occurred while retrieving these elements */
+ u8 parse_error;
};
static inline struct ieee80211_local *hw_to_local(
@@ -1782,6 +1836,9 @@ ieee80211_have_rx_timestamp(struct ieee80211_rx_status *status)
void ieee80211_vif_inc_num_mcast(struct ieee80211_sub_if_data *sdata);
void ieee80211_vif_dec_num_mcast(struct ieee80211_sub_if_data *sdata);
+void ieee80211_vif_block_queues_csa(struct ieee80211_sub_if_data *sdata);
+void ieee80211_vif_unblock_queues_csa(struct ieee80211_sub_if_data *sdata);
+
/* This function returns the number of multicast stations connected to this
* interface. It returns -1 if that number is not tracked, that is for netdevs
* not in AP or AP_VLAN mode or when using 4addr.
@@ -1801,6 +1858,8 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
unsigned int mpdu_len,
unsigned int mpdu_offset);
int ieee80211_hw_config(struct ieee80211_local *local, u32 changed);
+int ieee80211_hw_conf_chan(struct ieee80211_local *local);
+void ieee80211_hw_conf_init(struct ieee80211_local *local);
void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx);
void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
u64 changed);
@@ -1812,6 +1871,8 @@ void ieee80211_link_info_change_notify(struct ieee80211_sub_if_data *sdata,
void ieee80211_configure_filter(struct ieee80211_local *local);
u64 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata);
+void ieee80211_handle_queued_frames(struct ieee80211_local *local);
+
u64 ieee80211_mgmt_tx_cookie(struct ieee80211_local *local);
int ieee80211_attach_ack_skb(struct ieee80211_local *local, struct sk_buff *skb,
u64 *cookie, gfp_t gfp);
@@ -1935,6 +1996,7 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local);
void ieee80211_offchannel_return(struct ieee80211_local *local);
void ieee80211_roc_setup(struct ieee80211_local *local);
void ieee80211_start_next_roc(struct ieee80211_local *local);
+void ieee80211_reconfig_roc(struct ieee80211_local *local);
void ieee80211_roc_purge(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata);
int ieee80211_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
@@ -1955,7 +2017,8 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
/* color change handling */
void ieee80211_color_change_finalize_work(struct wiphy *wiphy,
struct wiphy_work *work);
-void ieee80211_color_collision_detection_work(struct work_struct *work);
+void ieee80211_color_collision_detection_work(struct wiphy *wiphy,
+ struct wiphy_work *work);
/* interface handling */
#define MAC80211_SUPPORTED_FEATURES_TX (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \
@@ -1985,8 +2048,8 @@ void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata);
int ieee80211_add_virtual_monitor(struct ieee80211_local *local);
void ieee80211_del_virtual_monitor(struct ieee80211_local *local);
-bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata);
-void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata,
+bool __ieee80211_recalc_txpower(struct ieee80211_link_data *link);
+void ieee80211_recalc_txpower(struct ieee80211_link_data *link,
bool update_bss);
void ieee80211_recalc_offload(struct ieee80211_local *local);
@@ -2023,8 +2086,6 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
u32 info_flags,
u32 ctrl_flags,
u64 *cookie);
-void ieee80211_purge_tx_queue(struct ieee80211_hw *hw,
- struct sk_buff_head *skbs);
struct sk_buff *
ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, u32 info_flags);
@@ -2063,16 +2124,19 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
enum ieee80211_smps_mode smps, const u8 *da,
const u8 *bssid, int link_id);
-bool ieee80211_smps_is_restrictive(enum ieee80211_smps_mode smps_mode_old,
- enum ieee80211_smps_mode smps_mode_new);
-
+void ieee80211_add_addbaext(struct sk_buff *skb,
+ const u8 req_addba_ext_data,
+ u16 buf_size);
+u8 ieee80211_retrieve_addba_ext_data(struct sta_info *sta,
+ const void *elem_data, ssize_t elem_len,
+ u16 *buf_size);
void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
u16 initiator, u16 reason, bool stop);
void __ieee80211_start_rx_ba_session(struct sta_info *sta,
u8 dialog_token, u16 timeout,
u16 start_seq_num, u16 ba_policy, u16 tid,
u16 buf_size, bool tx, bool auto_seq,
- const struct ieee80211_addba_ext_ie *addbaext);
+ const u8 addba_ext_data);
void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
enum ieee80211_agg_stop_reason reason);
void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
@@ -2087,6 +2151,29 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
struct ieee80211_mgmt *mgmt,
size_t len);
+static inline struct ieee80211_mgmt *
+ieee80211_mgmt_ba(struct sk_buff *skb, const u8 *da,
+ struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_mgmt *mgmt = skb_put_zero(skb, 24);
+
+ ether_addr_copy(mgmt->da, da);
+ ether_addr_copy(mgmt->sa, sdata->vif.addr);
+
+ if (sdata->vif.type == NL80211_IFTYPE_AP ||
+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+ sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
+ ether_addr_copy(mgmt->bssid, sdata->vif.addr);
+ else if (sdata->vif.type == NL80211_IFTYPE_STATION)
+ ether_addr_copy(mgmt->bssid, sdata->vif.cfg.ap_addr);
+ else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
+ ether_addr_copy(mgmt->bssid, sdata->u.ibss.bssid);
+
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ACTION);
+ return mgmt;
+}
+
int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
enum ieee80211_agg_stop_reason reason);
void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
@@ -2109,12 +2196,22 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
const struct ieee80211_vht_cap *vht_cap_ie2,
struct link_sta_info *link_sta);
enum ieee80211_sta_rx_bandwidth
-ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta);
-enum ieee80211_sta_rx_bandwidth
-ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta);
-void ieee80211_sta_set_rx_nss(struct link_sta_info *link_sta);
+_ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta,
+ struct cfg80211_chan_def *chandef);
+static inline enum ieee80211_sta_rx_bandwidth
+ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta)
+{
+ return _ieee80211_sta_cap_rx_bw(link_sta, NULL);
+}
enum ieee80211_sta_rx_bandwidth
-ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width);
+_ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta,
+ struct cfg80211_chan_def *chandef);
+static inline enum ieee80211_sta_rx_bandwidth
+ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta)
+{
+ return _ieee80211_sta_cur_vht_bw(link_sta, NULL);
+}
+void ieee80211_sta_init_nss(struct link_sta_info *link_sta);
enum nl80211_chan_width
ieee80211_sta_cap_chan_bw(struct link_sta_info *link_sta);
void ieee80211_process_mu_groups(struct ieee80211_sub_if_data *sdata,
@@ -2166,10 +2263,11 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
* @elems: parsed 802.11 elements received with the frame
* @current_band: indicates the current band
* @vht_cap_info: VHT capabilities of the transmitter
- * @conn_flags: contains information about own capabilities and restrictions
- * to decide which channel switch announcements can be accepted, using
- * flags from &enum ieee80211_conn_flags.
+ * @conn: contains information about own capabilities and restrictions
+ * to decide which channel switch announcements can be accepted
* @bssid: the currently connected bssid (for reporting)
+ * @unprot_action: whether the frame was an unprotected frame or not,
+ * used for reporting
* @csa_ie: parsed 802.11 csa elements on count, mode, chandef and mesh ttl.
* All of them will be filled with if success only.
* Return: 0 on success, <0 on error and >0 if there is nothing to parse.
@@ -2178,12 +2276,13 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems *elems,
enum nl80211_band current_band,
u32 vht_cap_info,
- ieee80211_conn_flags_t conn_flags, u8 *bssid,
+ struct ieee80211_conn_settings *conn,
+ u8 *bssid, bool unprot_action,
struct ieee80211_csa_ie *csa_ie);
/* Suspend/resume and hw reconfiguration */
int ieee80211_reconfig(struct ieee80211_local *local);
-void ieee80211_stop_device(struct ieee80211_local *local);
+void ieee80211_stop_device(struct ieee80211_local *local, bool suspend);
int __ieee80211_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan);
@@ -2202,11 +2301,15 @@ static inline int __ieee80211_resume(struct ieee80211_hw *hw)
/* utility functions/constants */
extern const void *const mac80211_wiphy_privid; /* for wiphy privid */
+const char *ieee80211_conn_mode_str(enum ieee80211_conn_mode mode);
+enum ieee80211_conn_bw_limit
+ieee80211_min_bw_limit_from_chandef(struct cfg80211_chan_def *chandef);
int ieee80211_frame_duration(enum nl80211_band band, size_t len,
int rate, int erp, int short_preamble);
void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata,
struct ieee80211_tx_queue_params *qparam,
int ac);
+void ieee80211_clear_tpe(struct ieee80211_parsed_tpe *tpe);
void ieee80211_set_wmm_default(struct ieee80211_link_data *link,
bool bss_notify, bool enable_qos);
void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
@@ -2243,6 +2346,7 @@ static inline void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata,
/**
* struct ieee80211_elems_parse_params - element parsing parameters
+ * @mode: connection mode for parsing
* @start: pointer to the elements
* @len: length of the elements
* @action: %true if the elements came from an action frame
@@ -2260,6 +2364,7 @@ static inline void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata,
* for EHT capabilities parsing)
*/
struct ieee80211_elems_parse_params {
+ enum ieee80211_conn_mode mode;
const u8 *start;
size_t len;
bool action;
@@ -2279,6 +2384,7 @@ ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
struct cfg80211_bss *bss)
{
struct ieee80211_elems_parse_params params = {
+ .mode = IEEE80211_CONN_MODE_HIGHEST,
.start = start,
.len = len,
.action = action,
@@ -2317,17 +2423,13 @@ void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata);
void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
struct ieee80211_hdr *hdr, bool ack, u16 tx_time);
-
+unsigned int
+ieee80211_get_vif_queues(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata);
void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
unsigned long queues,
enum queue_stop_reason reason,
bool refcounted);
-void ieee80211_stop_vif_queues(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- enum queue_stop_reason reason);
-void ieee80211_wake_vif_queues(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- enum queue_stop_reason reason);
void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw,
unsigned long queues,
enum queue_stop_reason reason,
@@ -2338,6 +2440,43 @@ void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
enum queue_stop_reason reason,
bool refcounted);
+static inline void
+ieee80211_stop_vif_queues(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ enum queue_stop_reason reason)
+{
+ ieee80211_stop_queues_by_reason(&local->hw,
+ ieee80211_get_vif_queues(local, sdata),
+ reason, true);
+}
+
+static inline void
+ieee80211_wake_vif_queues(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ enum queue_stop_reason reason)
+{
+ ieee80211_wake_queues_by_reason(&local->hw,
+ ieee80211_get_vif_queues(local, sdata),
+ reason, true);
+}
+static inline void
+ieee80211_stop_vif_queues_norefcount(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ enum queue_stop_reason reason)
+{
+ ieee80211_stop_queues_by_reason(&local->hw,
+ ieee80211_get_vif_queues(local, sdata),
+ reason, false);
+}
+static inline void
+ieee80211_wake_vif_queues_norefcount(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ enum queue_stop_reason reason)
+{
+ ieee80211_wake_queues_by_reason(&local->hw,
+ ieee80211_get_vif_queues(local, sdata),
+ reason, false);
+}
void ieee80211_add_pending_skb(struct ieee80211_local *local,
struct sk_buff *skb);
void ieee80211_add_pending_skbs(struct ieee80211_local *local,
@@ -2360,7 +2499,7 @@ static inline bool ieee80211_can_run_worker(struct ieee80211_local *local)
/*
* If quiescing is set, we are racing with __ieee80211_suspend.
* __ieee80211_suspend flushes the workers after setting quiescing,
- * and we check quiescing / suspended before enqueing new workers.
+ * and we check quiescing / suspended before enqueuing new workers.
* We should abort the worker to avoid the races below.
*/
if (local->quiescing)
@@ -2408,7 +2547,6 @@ void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
const u8 *da, const u8 *bssid,
u16 stype, u16 reason,
bool send_frame, u8 *frame_buf);
-u8 *ieee80211_write_he_6ghz_cap(u8 *pos, __le16 cap, u8 *end);
enum {
IEEE80211_PROBE_FLAG_DIRECTED = BIT(0),
@@ -2453,32 +2591,36 @@ u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
u32 cap);
u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
const struct cfg80211_chan_def *chandef);
-u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata, u8 iftype);
-u8 *ieee80211_ie_build_he_cap(ieee80211_conn_flags_t disable_flags, u8 *pos,
- const struct ieee80211_sta_he_cap *he_cap,
- u8 *end);
-void ieee80211_ie_build_he_6ghz_cap(struct ieee80211_sub_if_data *sdata,
- enum ieee80211_smps_mode smps_mode,
- struct sk_buff *skb);
-u8 *ieee80211_ie_build_he_oper(u8 *pos, struct cfg80211_chan_def *chandef);
-u8 *ieee80211_ie_build_eht_oper(u8 *pos, struct cfg80211_chan_def *chandef,
+u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata);
+u8 *ieee80211_ie_build_he_oper(u8 *pos, const struct cfg80211_chan_def *chandef);
+u8 *ieee80211_ie_build_eht_oper(u8 *pos, const struct cfg80211_chan_def *chandef,
const struct ieee80211_sta_eht_cap *eht_cap);
int ieee80211_parse_bitrates(enum nl80211_chan_width width,
const struct ieee80211_supported_band *sband,
const u8 *srates, int srates_len, u32 *rates);
-int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb, bool need_basic,
- enum nl80211_band band);
-int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb, bool need_basic,
- enum nl80211_band band);
u8 *ieee80211_add_wmm_info_ie(u8 *buf, u8 qosinfo);
void ieee80211_add_s1g_capab_ie(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta_s1g_cap *caps,
struct sk_buff *skb);
void ieee80211_add_aid_request_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb);
-u8 *ieee80211_ie_build_s1g_cap(u8 *pos, struct ieee80211_sta_s1g_cap *s1g_cap);
+
+/* element building in SKBs */
+int ieee80211_put_srates_elem(struct sk_buff *skb,
+ const struct ieee80211_supported_band *sband,
+ u32 basic_rates, u32 rate_flags, u32 masked_rates,
+ u8 element_id);
+int ieee80211_put_he_cap(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ const struct ieee80211_supported_band *sband,
+ const struct ieee80211_conn_settings *conn);
+int ieee80211_put_he_6ghz_cap(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ enum ieee80211_smps_mode smps_mode);
+int ieee80211_put_eht_cap(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ const struct ieee80211_supported_band *sband,
+ const struct ieee80211_conn_settings *conn);
/* channel management */
bool ieee80211_chandef_ht_oper(const struct ieee80211_ht_operation *ht_oper,
@@ -2488,23 +2630,46 @@ bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw, u32 vht_cap_info,
const struct ieee80211_ht_operation *htop,
struct cfg80211_chan_def *chandef);
void ieee80211_chandef_eht_oper(const struct ieee80211_eht_operation_info *info,
- bool support_160, bool support_320,
struct cfg80211_chan_def *chandef);
-bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
+bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_local *local,
const struct ieee80211_he_operation *he_oper,
const struct ieee80211_eht_operation *eht_oper,
struct cfg80211_chan_def *chandef);
bool ieee80211_chandef_s1g_oper(const struct ieee80211_s1g_oper_ie *oper,
struct cfg80211_chan_def *chandef);
-ieee80211_conn_flags_t ieee80211_chandef_downgrade(struct cfg80211_chan_def *c);
+void ieee80211_chandef_downgrade(struct cfg80211_chan_def *chandef,
+ struct ieee80211_conn_settings *conn);
+static inline void
+ieee80211_chanreq_downgrade(struct ieee80211_chan_req *chanreq,
+ struct ieee80211_conn_settings *conn)
+{
+ ieee80211_chandef_downgrade(&chanreq->oper, conn);
+ if (WARN_ON(!conn))
+ return;
+ if (conn->mode < IEEE80211_CONN_MODE_EHT)
+ chanreq->ap.chan = NULL;
+}
+
+bool ieee80211_chanreq_identical(const struct ieee80211_chan_req *a,
+ const struct ieee80211_chan_req *b);
int __must_check
+_ieee80211_link_use_channel(struct ieee80211_link_data *link,
+ const struct ieee80211_chan_req *req,
+ enum ieee80211_chanctx_mode mode,
+ bool assign_on_failure);
+
+static inline int __must_check
ieee80211_link_use_channel(struct ieee80211_link_data *link,
- const struct cfg80211_chan_def *chandef,
- enum ieee80211_chanctx_mode mode);
+ const struct ieee80211_chan_req *req,
+ enum ieee80211_chanctx_mode mode)
+{
+ return _ieee80211_link_use_channel(link, req, mode, false);
+}
+
int __must_check
ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link,
- const struct cfg80211_chan_def *chandef,
+ const struct ieee80211_chan_req *req,
enum ieee80211_chanctx_mode mode,
bool radar_required);
int __must_check
@@ -2512,9 +2677,11 @@ ieee80211_link_use_reserved_context(struct ieee80211_link_data *link);
int ieee80211_link_unreserve_chanctx(struct ieee80211_link_data *link);
int __must_check
-ieee80211_link_change_bandwidth(struct ieee80211_link_data *link,
- const struct cfg80211_chan_def *chandef,
- u64 *changed);
+ieee80211_link_change_chanreq(struct ieee80211_link_data *link,
+ const struct ieee80211_chan_req *req,
+ u64 *changed);
+void __ieee80211_link_release_channel(struct ieee80211_link_data *link,
+ bool skip_idle_recalc);
void ieee80211_link_release_channel(struct ieee80211_link_data *link);
void ieee80211_link_vlan_copy_chanctx(struct ieee80211_link_data *link);
void ieee80211_link_copy_chanctx_to_vlans(struct ieee80211_link_data *link,
@@ -2526,11 +2693,13 @@ void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
struct ieee80211_chanctx *chanctx);
void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
- struct ieee80211_link_data *rsvd_for);
+ struct ieee80211_link_data *rsvd_for,
+ bool check_reserved);
bool ieee80211_is_radar_required(struct ieee80211_local *local);
void ieee80211_dfs_cac_timer_work(struct wiphy *wiphy, struct wiphy_work *work);
-void ieee80211_dfs_cac_cancel(struct ieee80211_local *local);
+void ieee80211_dfs_cac_cancel(struct ieee80211_local *local,
+ struct ieee80211_chanctx *chanctx);
void ieee80211_dfs_radar_detected_work(struct wiphy *wiphy,
struct wiphy_work *work);
int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata,
@@ -2541,8 +2710,9 @@ void ieee80211_recalc_dtim(struct ieee80211_local *local,
int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
const struct cfg80211_chan_def *chandef,
enum ieee80211_chanctx_mode chanmode,
- u8 radar_detect);
-int ieee80211_max_num_channels(struct ieee80211_local *local);
+ u8 radar_detect, int radio_idx);
+int ieee80211_max_num_channels(struct ieee80211_local *local, int radio_idx);
+u32 ieee80211_get_radio_mask(struct wiphy *wiphy, struct net_device *dev);
void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx);
@@ -2561,7 +2731,7 @@ int ieee80211_tdls_channel_switch(struct wiphy *wiphy, struct net_device *dev,
void ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy,
struct net_device *dev,
const u8 *addr);
-void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata);
+void ieee80211_teardown_tdls_peers(struct ieee80211_link_data *link);
void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata,
const u8 *peer, u16 reason);
void
@@ -2589,12 +2759,7 @@ u32 ieee80211_calc_expected_tx_airtime(struct ieee80211_hw *hw,
void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache);
void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache);
-u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata, u8 iftype);
-u8 *ieee80211_ie_build_eht_cap(u8 *pos,
- const struct ieee80211_sta_he_cap *he_cap,
- const struct ieee80211_sta_eht_cap *eht_cap,
- u8 *end,
- bool for_ap);
+u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata);
void
ieee80211_eht_cap_ie_to_sta_eht_cap(struct ieee80211_sub_if_data *sdata,
@@ -2603,16 +2768,33 @@ ieee80211_eht_cap_ie_to_sta_eht_cap(struct ieee80211_sub_if_data *sdata,
const struct ieee80211_eht_cap_elem *eht_cap_ie_elem,
u8 eht_cap_len,
struct link_sta_info *link_sta);
+void ieee80211_process_neg_ttlm_req(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgmt *mgmt, size_t len);
+void ieee80211_process_neg_ttlm_res(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgmt *mgmt, size_t len);
+int ieee80211_req_neg_ttlm(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_ttlm_params *params);
void ieee80211_check_wbrf_support(struct ieee80211_local *local);
void ieee80211_add_wbrf(struct ieee80211_local *local, struct cfg80211_chan_def *chandef);
void ieee80211_remove_wbrf(struct ieee80211_local *local, struct cfg80211_chan_def *chandef);
+int ieee80211_mgd_assoc_ml_reconf(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_assoc_link *add_links,
+ u16 rem_links);
+
+void ieee80211_process_ml_reconf_resp(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgmt *mgmt, size_t len);
#if IS_ENABLED(CONFIG_MAC80211_KUNIT_TEST)
#define EXPORT_SYMBOL_IF_MAC80211_KUNIT(sym) EXPORT_SYMBOL_IF_KUNIT(sym)
#define VISIBLE_IF_MAC80211_KUNIT
ieee80211_rx_result
ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx);
+int ieee80211_calc_chandef_subchan_offset(const struct cfg80211_chan_def *ap,
+ u8 n_partial_subchans);
+void ieee80211_rearrange_tpe_psd(struct ieee80211_parsed_tpe_psd *psd,
+ const struct cfg80211_chan_def *ap,
+ const struct cfg80211_chan_def *used);
#else
#define EXPORT_SYMBOL_IF_MAC80211_KUNIT(sym)
#define VISIBLE_IF_MAC80211_KUNIT static
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 11c4caa4748e..738de269e13f 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -8,7 +8,7 @@
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (c) 2016 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/slab.h>
#include <linux/kernel.h>
@@ -44,13 +44,13 @@
static void ieee80211_iface_work(struct wiphy *wiphy, struct wiphy_work *work);
-bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
+bool __ieee80211_recalc_txpower(struct ieee80211_link_data *link)
{
struct ieee80211_chanctx_conf *chanctx_conf;
int power;
rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
+ chanctx_conf = rcu_dereference(link->conf->chanctx_conf);
if (!chanctx_conf) {
rcu_read_unlock();
return false;
@@ -59,27 +59,26 @@ bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
power = ieee80211_chandef_max_power(&chanctx_conf->def);
rcu_read_unlock();
- if (sdata->deflink.user_power_level != IEEE80211_UNSET_POWER_LEVEL)
- power = min(power, sdata->deflink.user_power_level);
+ if (link->user_power_level != IEEE80211_UNSET_POWER_LEVEL)
+ power = min(power, link->user_power_level);
- if (sdata->deflink.ap_power_level != IEEE80211_UNSET_POWER_LEVEL)
- power = min(power, sdata->deflink.ap_power_level);
+ if (link->ap_power_level != IEEE80211_UNSET_POWER_LEVEL)
+ power = min(power, link->ap_power_level);
- if (power != sdata->vif.bss_conf.txpower) {
- sdata->vif.bss_conf.txpower = power;
- ieee80211_hw_config(sdata->local, 0);
+ if (power != link->conf->txpower) {
+ link->conf->txpower = power;
return true;
}
return false;
}
-void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata,
+void ieee80211_recalc_txpower(struct ieee80211_link_data *link,
bool update_bss)
{
- if (__ieee80211_recalc_txpower(sdata) ||
- (update_bss && ieee80211_sdata_running(sdata)))
- ieee80211_link_info_change_notify(sdata, &sdata->deflink,
+ if (__ieee80211_recalc_txpower(link) ||
+ (update_bss && ieee80211_sdata_running(link->sdata)))
+ ieee80211_link_info_change_notify(link->sdata, link,
BSS_CHANGED_TXPOWER);
}
@@ -296,7 +295,6 @@ static int ieee80211_change_mac(struct net_device *dev, void *addr)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
- int ret;
/*
* This happens during unregistration if there's a bond device
@@ -306,11 +304,9 @@ static int ieee80211_change_mac(struct net_device *dev, void *addr)
if (!dev->ieee80211_ptr->registered)
return 0;
- wiphy_lock(local->hw.wiphy);
- ret = _ieee80211_change_mac(sdata, addr);
- wiphy_unlock(local->hw.wiphy);
+ guard(wiphy)(local->hw.wiphy);
- return ret;
+ return _ieee80211_change_mac(sdata, addr);
}
static inline int identical_mac_addr_allowed(int type1, int type2)
@@ -397,7 +393,7 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
}
}
- return ieee80211_check_combinations(sdata, NULL, 0, 0);
+ return ieee80211_check_combinations(sdata, NULL, 0, 0, -1);
}
static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata,
@@ -446,22 +442,20 @@ static int ieee80211_open(struct net_device *dev)
if (!is_valid_ether_addr(dev->dev_addr))
return -EADDRNOTAVAIL;
- wiphy_lock(sdata->local->hw.wiphy);
+ guard(wiphy)(sdata->local->hw.wiphy);
+
err = ieee80211_check_concurrent_iface(sdata, sdata->vif.type);
if (err)
- goto out;
+ return err;
- err = ieee80211_do_open(&sdata->wdev, true);
-out:
- wiphy_unlock(sdata->local->hw.wiphy);
-
- return err;
+ return ieee80211_do_open(&sdata->wdev, true);
}
static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_down)
{
struct ieee80211_local *local = sdata->local;
unsigned long flags;
+ struct sk_buff_head freeq;
struct sk_buff *skb, *tmp;
u32 hw_reconf_flags = 0;
int i, flushed;
@@ -511,7 +505,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
* would have removed them, but in other modes there shouldn't
* be any stations.
*/
- flushed = sta_info_flush(sdata);
+ flushed = sta_info_flush(sdata, -1);
WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_AP_VLAN && flushed > 0);
/* don't count this interface for allmulti while it is down */
@@ -543,26 +537,22 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
sdata->vif.bss_conf.csa_active = false;
if (sdata->vif.type == NL80211_IFTYPE_STATION)
- sdata->deflink.u.mgd.csa_waiting_bcn = false;
- if (sdata->deflink.csa_block_tx) {
- ieee80211_wake_vif_queues(local, sdata,
- IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->deflink.csa_block_tx = false;
- }
+ sdata->deflink.u.mgd.csa.waiting_bcn = false;
+ ieee80211_vif_unblock_queues_csa(sdata);
- wiphy_work_cancel(local->hw.wiphy, &sdata->deflink.csa_finalize_work);
+ wiphy_work_cancel(local->hw.wiphy, &sdata->deflink.csa.finalize_work);
wiphy_work_cancel(local->hw.wiphy,
&sdata->deflink.color_change_finalize_work);
wiphy_delayed_work_cancel(local->hw.wiphy,
&sdata->deflink.dfs_cac_timer_work);
- if (sdata->wdev.cac_started) {
- chandef = sdata->vif.bss_conf.chandef;
+ if (sdata->wdev.links[0].cac_started) {
+ chandef = sdata->vif.bss_conf.chanreq.oper;
WARN_ON(local->suspended);
ieee80211_link_release_channel(&sdata->deflink);
cfg80211_cac_event(sdata->dev, &chandef,
NL80211_RADAR_CAC_ABORTED,
- GFP_KERNEL);
+ GFP_KERNEL, 0);
}
if (sdata->vif.type == NL80211_IFTYPE_AP) {
@@ -641,18 +631,32 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
skb_queue_purge(&sdata->status_queue);
}
+ /*
+ * Since ieee80211_free_txskb() may issue __dev_queue_xmit()
+ * which should be called with interrupts enabled, reclamation
+ * is done in two phases:
+ */
+ __skb_queue_head_init(&freeq);
+
+ /* unlink from local queues... */
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
for (i = 0; i < IEEE80211_MAX_QUEUES; i++) {
skb_queue_walk_safe(&local->pending[i], skb, tmp) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (info->control.vif == &sdata->vif) {
__skb_unlink(skb, &local->pending[i]);
- ieee80211_free_txskb(&local->hw, skb);
+ __skb_queue_tail(&freeq, skb);
}
}
}
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+ /* ... and perform actual reclamation with interrupts enabled. */
+ skb_queue_walk_safe(&freeq, skb, tmp) {
+ __skb_unlink(skb, &freeq);
+ ieee80211_free_txskb(&local->hw, skb);
+ }
+
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
ieee80211_txq_remove_vlan(local, sdata);
@@ -686,14 +690,21 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
ieee80211_del_virtual_monitor(local);
ieee80211_recalc_idle(local);
+ ieee80211_recalc_offload(local);
- if (!(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE))
+ if (!(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) &&
+ !ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR))
break;
+ ieee80211_link_release_channel(&sdata->deflink);
fallthrough;
default:
- if (going_down)
- drv_remove_interface(local, sdata);
+ if (!going_down)
+ break;
+ drv_remove_interface(local, sdata);
+
+ /* Clear private driver data to prevent reuse */
+ memset(sdata->vif.drv_priv, 0, local->hw.vif_data_size);
}
ieee80211_recalc_ps(local);
@@ -702,7 +713,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
wiphy_delayed_work_flush(local->hw.wiphy, &local->scan_work);
if (local->open_count == 0) {
- ieee80211_stop_device(local);
+ ieee80211_stop_device(local, false);
/* no reconfiguring after stop! */
return;
@@ -758,11 +769,11 @@ static int ieee80211_stop(struct net_device *dev)
ieee80211_stop_mbssid(sdata);
}
- wiphy_lock(sdata->local->hw.wiphy);
+ guard(wiphy)(sdata->local->hw.wiphy);
+
wiphy_work_cancel(sdata->local->hw.wiphy, &sdata->activate_links_work);
ieee80211_do_stop(sdata, true);
- wiphy_unlock(sdata->local->hw.wiphy);
return 0;
}
@@ -815,12 +826,6 @@ static void ieee80211_uninit(struct net_device *dev)
ieee80211_teardown_sdata(IEEE80211_DEV_TO_SUB_IF(dev));
}
-static void
-ieee80211_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
-{
- dev_fetch_sw_netstats(stats, dev->tstats);
-}
-
static int ieee80211_netdev_setup_tc(struct net_device *dev,
enum tc_setup_type type, void *type_data)
{
@@ -837,7 +842,6 @@ static const struct net_device_ops ieee80211_dataif_ops = {
.ndo_start_xmit = ieee80211_subif_start_xmit,
.ndo_set_rx_mode = ieee80211_set_multicast_list,
.ndo_set_mac_address = ieee80211_change_mac,
- .ndo_get_stats64 = ieee80211_get_stats64,
.ndo_setup_tc = ieee80211_netdev_setup_tc,
};
@@ -877,7 +881,6 @@ static const struct net_device_ops ieee80211_monitorif_ops = {
.ndo_set_rx_mode = ieee80211_set_multicast_list,
.ndo_set_mac_address = ieee80211_change_mac,
.ndo_select_queue = ieee80211_monitor_select_queue,
- .ndo_get_stats64 = ieee80211_get_stats64,
};
static int ieee80211_netdev_fill_forward_path(struct net_device_path_ctx *ctx,
@@ -945,7 +948,6 @@ static const struct net_device_ops ieee80211_dataif_8023_ops = {
.ndo_start_xmit = ieee80211_subif_start_xmit_8023,
.ndo_set_rx_mode = ieee80211_set_multicast_list,
.ndo_set_mac_address = ieee80211_change_mac,
- .ndo_get_stats64 = ieee80211_get_stats64,
.ndo_fill_forward_path = ieee80211_netdev_fill_forward_path,
.ndo_setup_tc = ieee80211_netdev_setup_tc,
};
@@ -1080,6 +1082,8 @@ void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata,
ADJUST(CONTROL, control);
ADJUST(CONTROL, pspoll);
ADJUST(OTHER_BSS, other_bss);
+ if (!(flags & MONITOR_FLAG_SKIP_TX))
+ local->tx_mntrs += offset;
#undef ADJUST
}
@@ -1121,13 +1125,11 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
struct ieee80211_sub_if_data *sdata;
int ret;
- if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
- return 0;
-
ASSERT_RTNL();
lockdep_assert_wiphy(local->hw.wiphy);
- if (local->monitor_sdata)
+ if (local->monitor_sdata ||
+ ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR))
return 0;
sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL);
@@ -1145,11 +1147,13 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
ieee80211_set_default_queues(sdata);
- ret = drv_add_interface(local, sdata);
- if (WARN_ON(ret)) {
- /* ok .. stupid driver, it asked for this! */
- kfree(sdata);
- return ret;
+ if (ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) {
+ ret = drv_add_interface(local, sdata);
+ if (WARN_ON(ret)) {
+ /* ok .. stupid driver, it asked for this! */
+ kfree(sdata);
+ return ret;
+ }
}
set_bit(SDATA_STATE_RUNNING, &sdata->state);
@@ -1164,7 +1168,7 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
rcu_assign_pointer(local->monitor_sdata, sdata);
mutex_unlock(&local->iflist_mtx);
- ret = ieee80211_link_use_channel(&sdata->deflink, &local->monitor_chandef,
+ ret = ieee80211_link_use_channel(&sdata->deflink, &local->monitor_chanreq,
IEEE80211_CHANCTX_EXCLUSIVE);
if (ret) {
mutex_lock(&local->iflist_mtx);
@@ -1187,7 +1191,7 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata;
- if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
+ if (ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR))
return;
ASSERT_RTNL();
@@ -1202,15 +1206,17 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
return;
}
+ clear_bit(SDATA_STATE_RUNNING, &sdata->state);
+ ieee80211_link_release_channel(&sdata->deflink);
+
+ if (ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
+ drv_remove_interface(local, sdata);
+
RCU_INIT_POINTER(local->monitor_sdata, NULL);
mutex_unlock(&local->iflist_mtx);
synchronize_net();
- ieee80211_link_release_channel(&sdata->deflink);
-
- drv_remove_interface(local, sdata);
-
kfree(sdata);
}
@@ -1252,7 +1258,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
sdata->vif.cab_queue = master->vif.cab_queue;
memcpy(sdata->vif.hw_queue, master->vif.hw_queue,
sizeof(sdata->vif.hw_queue));
- sdata->vif.bss_conf.chandef = master->vif.bss_conf.chandef;
+ sdata->vif.bss_conf.chanreq = master->vif.bss_conf.chanreq;
sdata->crypto_tx_tailroom_needed_cnt +=
master->crypto_tx_tailroom_needed_cnt;
@@ -1288,8 +1294,6 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
res = drv_start(local);
if (res)
goto err_del_bss;
- /* we're brought up, everything changes */
- hw_reconf_flags = ~0;
ieee80211_led_radio(local, true);
ieee80211_mod_tpt_led_trig(local,
IEEE80211_TPT_LEDTRIG_FL_RADIO, 0);
@@ -1309,6 +1313,8 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
}
}
+ sdata->vif.addr_valid = sdata->vif.type != NL80211_IFTYPE_MONITOR ||
+ (sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE);
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP_VLAN:
/* no need to tell driver, but set carrier and chanctx */
@@ -1326,7 +1332,8 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
break;
}
- if (sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) {
+ if ((sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) ||
+ ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR)) {
res = drv_add_interface(local, sdata);
if (res)
goto err_stop;
@@ -1436,7 +1443,9 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
if (coming_up)
local->open_count++;
- if (hw_reconf_flags)
+ if (local->open_count == 1)
+ ieee80211_hw_conf_init(local);
+ else if (hw_reconf_flags)
ieee80211_hw_config(local, hw_reconf_flags);
ieee80211_recalc_ps(local);
@@ -1448,7 +1457,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
drv_remove_interface(local, sdata);
err_stop:
if (!local->open_count)
- drv_stop(local);
+ drv_stop(local, false);
err_del_bss:
sdata->bss = NULL;
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
@@ -1458,11 +1467,6 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
return res;
}
-static void ieee80211_if_free(struct net_device *dev)
-{
- free_percpu(dev->tstats);
-}
-
static void ieee80211_if_setup(struct net_device *dev)
{
ether_setup(dev);
@@ -1470,7 +1474,6 @@ static void ieee80211_if_setup(struct net_device *dev)
dev->priv_flags |= IFF_NO_QUEUE;
dev->netdev_ops = &ieee80211_dataif_ops;
dev->needs_free_netdev = true;
- dev->priv_destructor = ieee80211_if_free;
}
static void ieee80211_iface_process_skb(struct ieee80211_local *local,
@@ -1546,6 +1549,26 @@ static void ieee80211_iface_process_skb(struct ieee80211_local *local,
default:
break;
}
+ } else if (ieee80211_is_action(mgmt->frame_control) &&
+ mgmt->u.action.category == WLAN_CATEGORY_PROTECTED_EHT) {
+ if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ switch (mgmt->u.action.u.ttlm_req.action_code) {
+ case WLAN_PROTECTED_EHT_ACTION_TTLM_REQ:
+ ieee80211_process_neg_ttlm_req(sdata, mgmt,
+ skb->len);
+ break;
+ case WLAN_PROTECTED_EHT_ACTION_TTLM_RES:
+ ieee80211_process_neg_ttlm_res(sdata, mgmt,
+ skb->len);
+ break;
+ case WLAN_PROTECTED_EHT_ACTION_LINK_RECONFIG_RESP:
+ ieee80211_process_ml_reconf_resp(sdata, mgmt,
+ skb->len);
+ break;
+ default:
+ break;
+ }
+ }
} else if (ieee80211_is_ext(mgmt->frame_control)) {
if (sdata->vif.type == NL80211_IFTYPE_STATION)
ieee80211_sta_rx_queued_ext(sdata, skb);
@@ -1683,8 +1706,13 @@ static void ieee80211_activate_links_work(struct wiphy *wiphy,
struct ieee80211_sub_if_data *sdata =
container_of(work, struct ieee80211_sub_if_data,
activate_links_work);
+ struct ieee80211_local *local = wiphy_priv(wiphy);
+
+ if (local->in_reconfig)
+ return;
ieee80211_set_active_links(&sdata->vif, sdata->desired_active_links);
+ sdata->desired_active_links = 0;
}
/*
@@ -2080,11 +2108,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
dev_net_set(ndev, wiphy_net(local->hw.wiphy));
- ndev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!ndev->tstats) {
- free_netdev(ndev);
- return -ENOMEM;
- }
+ ndev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
ndev->needed_headroom = local->tx_headroom +
4*6 /* four MAC addresses */
@@ -2097,7 +2121,6 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
ret = dev_alloc_name(ndev, ndev->name);
if (ret < 0) {
- ieee80211_if_free(ndev);
free_netdev(ndev);
return ret;
}
@@ -2162,9 +2185,6 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
ieee80211_set_default_queues(sdata);
- sdata->deflink.ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
- sdata->deflink.user_power_level = local->user_power_level;
-
/* setup type-dependent data */
ieee80211_setup_sdata(sdata, type);
@@ -2258,7 +2278,7 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
*/
cfg80211_shutdown_all_interfaces(local->hw.wiphy);
- wiphy_lock(local->hw.wiphy);
+ guard(wiphy)(local->hw.wiphy);
WARN(local->open_count, "%s: open count remains %d\n",
wiphy_name(local->hw.wiphy), local->open_count);
@@ -2288,7 +2308,6 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
if (!netdev)
kfree(sdata);
}
- wiphy_unlock(local->hw.wiphy);
}
static int netdev_notify(struct notifier_block *nb,
@@ -2342,3 +2361,22 @@ void ieee80211_vif_dec_num_mcast(struct ieee80211_sub_if_data *sdata)
else if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
atomic_dec(&sdata->u.vlan.num_mcast_sta);
}
+
+void ieee80211_vif_block_queues_csa(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+
+ if (ieee80211_hw_check(&local->hw, HANDLES_QUIET_CSA))
+ return;
+
+ ieee80211_stop_vif_queues_norefcount(local, sdata,
+ IEEE80211_QUEUE_STOP_REASON_CSA);
+}
+
+void ieee80211_vif_unblock_queues_csa(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+
+ ieee80211_wake_vif_queues_norefcount(local, sdata,
+ IEEE80211_QUEUE_STOP_REASON_CSA);
+}
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index af74d7f9d94d..dcf8643a0baa 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -6,7 +6,7 @@
* Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright 2018-2020, 2022-2023 Intel Corporation
+ * Copyright 2018-2020, 2022-2024 Intel Corporation
*/
#include <crypto/utils.h>
@@ -18,7 +18,7 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <net/mac80211.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
#include "debugfs_key.h"
@@ -925,6 +925,10 @@ int ieee80211_key_link(struct ieee80211_key *key,
*/
key->color = atomic_inc_return(&key_color);
+ /* keep this flag for easier access later */
+ if (sta && sta->sta.spp_amsdu)
+ key->conf.flags |= IEEE80211_KEY_FLAG_SPP_AMSDU;
+
increment_tailroom_need_count(sdata);
ret = ieee80211_key_replace(sdata, link, sta, pairwise, old_key, key);
@@ -983,6 +987,26 @@ void ieee80211_reenable_keys(struct ieee80211_sub_if_data *sdata)
}
}
+static void
+ieee80211_key_iter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key *key,
+ void (*iter)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *data),
+ void *iter_data)
+{
+ /* skip keys of station in removal process */
+ if (key->sta && key->sta->removed)
+ return;
+ if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
+ return;
+ iter(hw, vif, key->sta ? &key->sta->sta : NULL,
+ &key->conf, iter_data);
+}
+
void ieee80211_iter_keys(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
void (*iter)(struct ieee80211_hw *hw,
@@ -1001,16 +1025,13 @@ void ieee80211_iter_keys(struct ieee80211_hw *hw,
if (vif) {
sdata = vif_to_sdata(vif);
list_for_each_entry_safe(key, tmp, &sdata->key_list, list)
- iter(hw, &sdata->vif,
- key->sta ? &key->sta->sta : NULL,
- &key->conf, iter_data);
+ ieee80211_key_iter(hw, vif, key, iter, iter_data);
} else {
list_for_each_entry(sdata, &local->interfaces, list)
list_for_each_entry_safe(key, tmp,
&sdata->key_list, list)
- iter(hw, &sdata->vif,
- key->sta ? &key->sta->sta : NULL,
- &key->conf, iter_data);
+ ieee80211_key_iter(hw, &sdata->vif, key,
+ iter, iter_data);
}
}
EXPORT_SYMBOL(ieee80211_iter_keys);
@@ -1027,17 +1048,8 @@ _ieee80211_iter_keys_rcu(struct ieee80211_hw *hw,
{
struct ieee80211_key *key;
- list_for_each_entry_rcu(key, &sdata->key_list, list) {
- /* skip keys of station in removal process */
- if (key->sta && key->sta->removed)
- continue;
- if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
- continue;
-
- iter(hw, &sdata->vif,
- key->sta ? &key->sta->sta : NULL,
- &key->conf, iter_data);
- }
+ list_for_each_entry_rcu(key, &sdata->key_list, list)
+ ieee80211_key_iter(hw, &sdata->vif, key, iter, iter_data);
}
void ieee80211_iter_keys_rcu(struct ieee80211_hw *hw,
@@ -1368,12 +1380,19 @@ EXPORT_SYMBOL_GPL(ieee80211_remove_key);
struct ieee80211_key_conf *
ieee80211_gtk_rekey_add(struct ieee80211_vif *vif,
- struct ieee80211_key_conf *keyconf)
+ struct ieee80211_key_conf *keyconf,
+ int link_id)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
struct ieee80211_key *key;
int err;
+ struct ieee80211_link_data *link_data =
+ link_id < 0 ? &sdata->deflink :
+ sdata_dereference(sdata->link[link_id], sdata);
+
+ if (WARN_ON(!link_data))
+ return ERR_PTR(-EINVAL);
if (WARN_ON(!local->wowlan))
return ERR_PTR(-EINVAL);
@@ -1390,8 +1409,9 @@ ieee80211_gtk_rekey_add(struct ieee80211_vif *vif,
if (sdata->u.mgd.mfp != IEEE80211_MFP_DISABLED)
key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
- /* FIXME: this function needs to get a link ID */
- err = ieee80211_key_link(key, &sdata->deflink, NULL);
+ key->conf.link_id = link_data->link_id;
+
+ err = ieee80211_key_link(key, link_data, NULL);
if (err)
return ERR_PTR(err);
diff --git a/net/mac80211/link.c b/net/mac80211/link.c
index d4f86955afa6..58a76bcd6ae6 100644
--- a/net/mac80211/link.c
+++ b/net/mac80211/link.c
@@ -2,7 +2,7 @@
/*
* MLO link handling
*
- * Copyright (C) 2022-2023 Intel Corporation
+ * Copyright (C) 2022-2024 Intel Corporation
*/
#include <linux/slab.h>
#include <linux/kernel.h>
@@ -36,13 +36,16 @@ void ieee80211_link_init(struct ieee80211_sub_if_data *sdata,
link->conf = link_conf;
link_conf->link_id = link_id;
link_conf->vif = &sdata->vif;
+ link->ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
+ link->user_power_level = sdata->local->user_power_level;
+ link_conf->txpower = INT_MIN;
- wiphy_work_init(&link->csa_finalize_work,
+ wiphy_work_init(&link->csa.finalize_work,
ieee80211_csa_finalize_work);
wiphy_work_init(&link->color_change_finalize_work,
ieee80211_color_change_finalize_work);
- INIT_DELAYED_WORK(&link->color_collision_detect_work,
- ieee80211_color_collision_detection_work);
+ wiphy_delayed_work_init(&link->color_collision_detect_work,
+ ieee80211_color_collision_detection_work);
INIT_LIST_HEAD(&link->assigned_chanctx_list);
INIT_LIST_HEAD(&link->reserved_chanctx_list);
wiphy_delayed_work_init(&link->dfs_cac_timer_work,
@@ -72,7 +75,22 @@ void ieee80211_link_stop(struct ieee80211_link_data *link)
if (link->sdata->vif.type == NL80211_IFTYPE_STATION)
ieee80211_mgd_stop_link(link);
- cancel_delayed_work_sync(&link->color_collision_detect_work);
+ wiphy_delayed_work_cancel(link->sdata->local->hw.wiphy,
+ &link->color_collision_detect_work);
+ wiphy_work_cancel(link->sdata->local->hw.wiphy,
+ &link->color_change_finalize_work);
+ wiphy_work_cancel(link->sdata->local->hw.wiphy,
+ &link->csa.finalize_work);
+
+ if (link->sdata->wdev.links[link->link_id].cac_started) {
+ wiphy_delayed_work_cancel(link->sdata->local->hw.wiphy,
+ &link->dfs_cac_timer_work);
+ cfg80211_cac_event(link->sdata->dev,
+ &link->conf->chanreq.oper,
+ NL80211_RADAR_CAC_ABORTED,
+ GFP_KERNEL, link->link_id);
+ }
+
ieee80211_link_release_channel(link);
}
@@ -354,9 +372,52 @@ static int _ieee80211_set_active_links(struct ieee80211_sub_if_data *sdata,
link = sdata_dereference(sdata->link[link_id], sdata);
- /* FIXME: kill TDLS connections on the link */
+ ieee80211_teardown_tdls_peers(link);
- ieee80211_link_release_channel(link);
+ __ieee80211_link_release_channel(link, true);
+
+ /*
+ * If CSA is (still) active while the link is deactivated,
+ * just schedule the channel switch work for the time we
+ * had previously calculated, and we'll take the process
+ * from there.
+ */
+ if (link->conf->csa_active)
+ wiphy_delayed_work_queue(local->hw.wiphy,
+ &link->u.mgd.csa.switch_work,
+ link->u.mgd.csa.time -
+ jiffies);
+ }
+
+ for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_link_data *link;
+
+ link = sdata_dereference(sdata->link[link_id], sdata);
+
+ /*
+ * This call really should not fail. Unfortunately, it appears
+ * that this may happen occasionally with some drivers. Should
+ * it happen, we are stuck in a bad place as going backwards is
+ * not really feasible.
+ *
+ * So lets just tell link_use_channel that it must not fail to
+ * assign the channel context (from mac80211's perspective) and
+ * assume the driver is going to trigger a recovery flow if it
+ * had a failure.
+ * That really is not great nor guaranteed to work. But at least
+ * the internal mac80211 state remains consistent and there is
+ * a chance that we can recover.
+ */
+ ret = _ieee80211_link_use_channel(link,
+ &link->conf->chanreq,
+ IEEE80211_CHANCTX_SHARED,
+ true);
+ WARN_ON_ONCE(ret);
+
+ /*
+ * inform about the link info changed parameters after all
+ * stations are also added
+ */
}
list_for_each_entry(sta, &local->sta_list, list) {
@@ -402,10 +463,6 @@ static int _ieee80211_set_active_links(struct ieee80211_sub_if_data *sdata,
link = sdata_dereference(sdata->link[link_id], sdata);
- ret = ieee80211_link_use_channel(link, &link->conf->chandef,
- IEEE80211_CHANCTX_SHARED);
- WARN_ON_ONCE(ret);
-
ieee80211_mgd_set_link_qos_params(link);
ieee80211_link_info_change_notify(sdata, link,
BSS_CHANGED_ERP_CTS_PROT |
@@ -444,10 +501,16 @@ int ieee80211_set_active_links(struct ieee80211_vif *vif, u16 active_links)
lockdep_assert_wiphy(local->hw.wiphy);
- if (!drv_can_activate_links(local, sdata, active_links))
+ if (WARN_ON(!active_links))
return -EINVAL;
old_active = sdata->vif.active_links;
+ if (old_active == active_links)
+ return 0;
+
+ if (!drv_can_activate_links(local, sdata, active_links))
+ return -EINVAL;
+
if (old_active & active_links) {
/*
* if there's at least one link that stays active across
@@ -472,6 +535,9 @@ void ieee80211_set_active_links_async(struct ieee80211_vif *vif,
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ if (WARN_ON(!active_links))
+ return;
+
if (!ieee80211_sdata_running(sdata))
return;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index f2ece7793573..53e5aee46885 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -5,7 +5,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <net/mac80211.h>
@@ -93,16 +93,32 @@ static void ieee80211_reconfig_filter(struct wiphy *wiphy,
ieee80211_configure_filter(local);
}
-static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local)
+static u32 ieee80211_calc_hw_conf_chan(struct ieee80211_local *local,
+ struct ieee80211_chanctx_conf *ctx)
{
struct ieee80211_sub_if_data *sdata;
struct cfg80211_chan_def chandef = {};
+ struct cfg80211_chan_def *oper = NULL;
+ enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_STATIC;
u32 changed = 0;
int power;
u32 offchannel_flag;
+ if (!local->emulate_chanctx)
+ return 0;
+
offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
+ if (ctx && !WARN_ON(!ctx->def.chan)) {
+ oper = &ctx->def;
+ if (ctx->rx_chains_static > 1)
+ smps_mode = IEEE80211_SMPS_OFF;
+ else if (ctx->rx_chains_dynamic > 1)
+ smps_mode = IEEE80211_SMPS_DYNAMIC;
+ else
+ smps_mode = IEEE80211_SMPS_STATIC;
+ }
+
if (local->scan_chandef.chan) {
chandef = local->scan_chandef;
} else if (local->tmp_channel) {
@@ -110,25 +126,30 @@ static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local)
chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
chandef.center_freq1 = chandef.chan->center_freq;
chandef.freq1_offset = chandef.chan->freq_offset;
- } else
- chandef = local->_oper_chandef;
+ } else if (oper) {
+ chandef = *oper;
+ } else {
+ chandef = local->dflt_chandef;
+ }
- WARN(!cfg80211_chandef_valid(&chandef),
- "control:%d.%03d MHz width:%d center: %d.%03d/%d MHz",
- chandef.chan->center_freq, chandef.chan->freq_offset,
- chandef.width, chandef.center_freq1, chandef.freq1_offset,
- chandef.center_freq2);
+ if (WARN(!cfg80211_chandef_valid(&chandef),
+ "control:%d.%03d MHz width:%d center: %d.%03d/%d MHz",
+ chandef.chan ? chandef.chan->center_freq : -1,
+ chandef.chan ? chandef.chan->freq_offset : 0,
+ chandef.width, chandef.center_freq1, chandef.freq1_offset,
+ chandef.center_freq2))
+ return 0;
- if (!cfg80211_chandef_identical(&chandef, &local->_oper_chandef))
+ if (!oper || !cfg80211_chandef_identical(&chandef, oper))
local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
else
local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL;
offchannel_flag ^= local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
- if (offchannel_flag ||
- !cfg80211_chandef_identical(&local->hw.conf.chandef,
- &local->_oper_chandef)) {
+ /* force it also for scanning, since drivers might config differently */
+ if (offchannel_flag || local->scanning || local->in_reconfig ||
+ !cfg80211_chandef_identical(&local->hw.conf.chandef, &chandef)) {
local->hw.conf.chandef = chandef;
changed |= IEEE80211_CONF_CHANGE_CHANNEL;
}
@@ -140,12 +161,14 @@ static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local)
* that otherwise STATIC is used.
*/
local->hw.conf.smps_mode = IEEE80211_SMPS_STATIC;
- } else if (local->hw.conf.smps_mode != local->smps_mode) {
- local->hw.conf.smps_mode = local->smps_mode;
+ } else if (local->hw.conf.smps_mode != smps_mode) {
+ local->hw.conf.smps_mode = smps_mode;
changed |= IEEE80211_CONF_CHANGE_SMPS;
}
power = ieee80211_chandef_max_power(&chandef);
+ if (local->user_power_level != IEEE80211_UNSET_POWER_LEVEL)
+ power = min(local->user_power_level, power);
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
@@ -173,12 +196,9 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
might_sleep();
- if (!local->use_chanctx)
- changed |= ieee80211_hw_conf_chan(local);
- else
- changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
- IEEE80211_CONF_CHANGE_POWER |
- IEEE80211_CONF_CHANGE_SMPS);
+ WARN_ON(changed & (IEEE80211_CONF_CHANGE_CHANNEL |
+ IEEE80211_CONF_CHANGE_POWER |
+ IEEE80211_CONF_CHANGE_SMPS));
if (changed && local->open_count) {
ret = drv_config(local, changed);
@@ -202,13 +222,115 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
return ret;
}
+/* for scanning, offchannel and chanctx emulation only */
+static int _ieee80211_hw_conf_chan(struct ieee80211_local *local,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ u32 changed;
+
+ if (!local->open_count)
+ return 0;
+
+ changed = ieee80211_calc_hw_conf_chan(local, ctx);
+ if (!changed)
+ return 0;
+
+ return drv_config(local, changed);
+}
+
+int ieee80211_hw_conf_chan(struct ieee80211_local *local)
+{
+ struct ieee80211_chanctx *ctx;
+
+ ctx = list_first_entry_or_null(&local->chanctx_list,
+ struct ieee80211_chanctx,
+ list);
+
+ return _ieee80211_hw_conf_chan(local, ctx ? &ctx->conf : NULL);
+}
+
+void ieee80211_hw_conf_init(struct ieee80211_local *local)
+{
+ u32 changed = ~(IEEE80211_CONF_CHANGE_CHANNEL |
+ IEEE80211_CONF_CHANGE_POWER |
+ IEEE80211_CONF_CHANGE_SMPS);
+
+ if (WARN_ON(!local->open_count))
+ return;
+
+ if (local->emulate_chanctx) {
+ struct ieee80211_chanctx *ctx;
+
+ ctx = list_first_entry_or_null(&local->chanctx_list,
+ struct ieee80211_chanctx,
+ list);
+
+ changed |= ieee80211_calc_hw_conf_chan(local,
+ ctx ? &ctx->conf : NULL);
+ }
+
+ WARN_ON(drv_config(local, changed));
+}
+
+int ieee80211_emulate_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ local->hw.conf.radar_enabled = ctx->radar_enabled;
+
+ return _ieee80211_hw_conf_chan(local, ctx);
+}
+EXPORT_SYMBOL(ieee80211_emulate_add_chanctx);
+
+void ieee80211_emulate_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ local->hw.conf.radar_enabled = false;
+
+ _ieee80211_hw_conf_chan(local, NULL);
+}
+EXPORT_SYMBOL(ieee80211_emulate_remove_chanctx);
+
+void ieee80211_emulate_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ local->hw.conf.radar_enabled = ctx->radar_enabled;
+
+ _ieee80211_hw_conf_chan(local, ctx);
+}
+EXPORT_SYMBOL(ieee80211_emulate_change_chanctx);
+
+int ieee80211_emulate_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ if (n_vifs <= 0)
+ return -EINVAL;
+
+ local->hw.conf.radar_enabled = vifs[0].new_ctx->radar_enabled;
+ _ieee80211_hw_conf_chan(local, vifs[0].new_ctx);
+
+ return 0;
+}
+EXPORT_SYMBOL(ieee80211_emulate_switch_vif_chanctx);
+
#define BSS_CHANGED_VIF_CFG_FLAGS (BSS_CHANGED_ASSOC |\
BSS_CHANGED_IDLE |\
BSS_CHANGED_PS |\
BSS_CHANGED_IBSS |\
BSS_CHANGED_ARP_FILTER |\
BSS_CHANGED_SSID |\
- BSS_CHANGED_MLD_VALID_LINKS)
+ BSS_CHANGED_MLD_VALID_LINKS |\
+ BSS_CHANGED_MLD_TTLM)
void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
u64 changed)
@@ -217,6 +339,8 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
might_sleep();
+ WARN_ON_ONCE(ieee80211_vif_is_mld(&sdata->vif));
+
if (!changed || sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
return;
@@ -249,7 +373,6 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
if (changed & ~BSS_CHANGED_VIF_CFG_FLAGS) {
u64 ch = changed & ~BSS_CHANGED_VIF_CFG_FLAGS;
- /* FIXME: should be for each link */
trace_drv_link_info_changed(local, sdata, &sdata->vif.bss_conf,
changed);
if (local->ops->link_info_changed)
@@ -303,9 +426,9 @@ u64 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
BSS_CHANGED_ERP_SLOT;
}
-static void ieee80211_tasklet_handler(struct tasklet_struct *t)
+/* context: requires softirqs disabled */
+void ieee80211_handle_queued_frames(struct ieee80211_local *local)
{
- struct ieee80211_local *local = from_tasklet(local, t, tasklet);
struct sk_buff *skb;
while ((skb = skb_dequeue(&local->skb_queue)) ||
@@ -330,6 +453,13 @@ static void ieee80211_tasklet_handler(struct tasklet_struct *t)
}
}
+static void ieee80211_tasklet_handler(struct tasklet_struct *t)
+{
+ struct ieee80211_local *local = from_tasklet(local, t, tasklet);
+
+ ieee80211_handle_queued_frames(local);
+}
+
static void ieee80211_restart_work(struct work_struct *work)
{
struct ieee80211_local *local =
@@ -596,8 +726,13 @@ ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
},
[NL80211_IFTYPE_P2P_DEVICE] = {
.tx = 0xffff,
+ /*
+ * To support P2P PASN pairing let user space register to rx
+ * also AUTH frames on P2P device interface.
+ */
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
- BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4),
},
};
@@ -644,7 +779,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
struct ieee80211_local *local;
int priv_size, i;
struct wiphy *wiphy;
- bool use_chanctx;
+ bool emulate_chanctx;
if (WARN_ON(!ops->tx || !ops->start || !ops->stop || !ops->config ||
!ops->add_interface || !ops->remove_interface ||
@@ -659,12 +794,26 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
return NULL;
/* check all or no channel context operations exist */
- i = !!ops->add_chanctx + !!ops->remove_chanctx +
- !!ops->change_chanctx + !!ops->assign_vif_chanctx +
- !!ops->unassign_vif_chanctx;
- if (WARN_ON(i != 0 && i != 5))
- return NULL;
- use_chanctx = i == 5;
+ if (ops->add_chanctx == ieee80211_emulate_add_chanctx &&
+ ops->remove_chanctx == ieee80211_emulate_remove_chanctx &&
+ ops->change_chanctx == ieee80211_emulate_change_chanctx) {
+ if (WARN_ON(ops->assign_vif_chanctx ||
+ ops->unassign_vif_chanctx))
+ return NULL;
+ emulate_chanctx = true;
+ } else {
+ if (WARN_ON(ops->add_chanctx == ieee80211_emulate_add_chanctx ||
+ ops->remove_chanctx == ieee80211_emulate_remove_chanctx ||
+ ops->change_chanctx == ieee80211_emulate_change_chanctx))
+ return NULL;
+ if (WARN_ON(!ops->add_chanctx ||
+ !ops->remove_chanctx ||
+ !ops->change_chanctx ||
+ !ops->assign_vif_chanctx ||
+ !ops->unassign_vif_chanctx))
+ return NULL;
+ emulate_chanctx = false;
+ }
/* Ensure 32-byte alignment of our private data and hw private data.
* We use the wiphy priv data for both our ieee80211_local and for
@@ -698,7 +847,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
WIPHY_FLAG_REPORTS_OBSS |
WIPHY_FLAG_OFFCHAN_TX;
- if (!use_chanctx || ops->remain_on_channel)
+ if (emulate_chanctx || ops->remain_on_channel)
wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
wiphy->features |= NL80211_FEATURE_SK_TX_STATUS |
@@ -734,8 +883,11 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT);
}
- if (!ops->set_key)
+ if (!ops->set_key) {
wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ wiphy_ext_feature_set(wiphy,
+ NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT);
+ }
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_TXQS);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_RRM);
@@ -752,7 +904,10 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN);
local->ops = ops;
- local->use_chanctx = use_chanctx;
+ local->emulate_chanctx = emulate_chanctx;
+
+ if (emulate_chanctx)
+ ieee80211_hw_set(&local->hw, CHANCTX_STA_CSA);
/*
* We need a bit of data queued to build aggregates properly, so
@@ -829,7 +984,6 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
ieee80211_dfs_radar_detected_work);
wiphy_work_init(&local->reconfig_filter, ieee80211_reconfig_filter);
- local->smps_mode = IEEE80211_SMPS_OFF;
wiphy_work_init(&local->dynamic_ps_enable_work,
ieee80211_dynamic_ps_enable_work);
@@ -904,9 +1058,9 @@ static int ieee80211_init_cipher_suites(struct ieee80211_local *local)
return 0;
/* Driver provides cipher suites, but we need to exclude WEP */
- suites = kmemdup(local->hw.wiphy->cipher_suites,
- sizeof(u32) * local->hw.wiphy->n_cipher_suites,
- GFP_KERNEL);
+ suites = kmemdup_array(local->hw.wiphy->cipher_suites,
+ local->hw.wiphy->n_cipher_suites,
+ sizeof(u32), GFP_KERNEL);
if (!suites)
return -ENOMEM;
@@ -944,6 +1098,27 @@ static int ieee80211_init_cipher_suites(struct ieee80211_local *local)
return 0;
}
+static bool
+ieee80211_ifcomb_check(const struct ieee80211_iface_combination *c, int n_comb)
+{
+ int i, j;
+
+ for (i = 0; i < n_comb; i++, c++) {
+ /* DFS is not supported with multi-channel combinations yet */
+ if (c->radar_detect_widths &&
+ c->num_different_channels > 1)
+ return false;
+
+ /* mac80211 doesn't support more than one IBSS interface */
+ for (j = 0; j < c->n_limits; j++)
+ if ((c->limits[j].types & BIT(NL80211_IFTYPE_ADHOC)) &&
+ c->limits[j].max > 1)
+ return false;
+ }
+
+ return true;
+}
+
int ieee80211_register_hw(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
@@ -980,7 +1155,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
* as much, e.g. monitoring beacons would be hard if we
* might not even know which link is active at which time.
*/
- if (WARN_ON(!local->use_chanctx))
+ if (WARN_ON(local->emulate_chanctx))
return -EINVAL;
if (WARN_ON(!local->ops->link_info_changed))
@@ -1014,9 +1189,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (WARN_ON(!ieee80211_hw_check(hw, AP_LINK_PS)))
return -EINVAL;
-
- if (WARN_ON(ieee80211_hw_check(hw, DEAUTH_NEED_MGD_TX_PREP)))
- return -EINVAL;
}
#ifdef CONFIG_PM
@@ -1024,7 +1196,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
return -EINVAL;
#endif
- if (!local->use_chanctx) {
+ if (local->emulate_chanctx) {
for (i = 0; i < local->hw.wiphy->n_iface_combinations; i++) {
const struct ieee80211_iface_combination *comb;
@@ -1033,17 +1205,20 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (comb->num_different_channels > 1)
return -EINVAL;
}
- } else {
- /* DFS is not supported with multi-channel combinations yet */
- for (i = 0; i < local->hw.wiphy->n_iface_combinations; i++) {
- const struct ieee80211_iface_combination *comb;
+ }
- comb = &local->hw.wiphy->iface_combinations[i];
+ if (hw->wiphy->n_radio) {
+ for (i = 0; i < hw->wiphy->n_radio; i++) {
+ const struct wiphy_radio *radio = &hw->wiphy->radio[i];
- if (comb->radar_detect_widths &&
- comb->num_different_channels > 1)
+ if (!ieee80211_ifcomb_check(radio->iface_combinations,
+ radio->n_iface_combinations))
return -EINVAL;
}
+ } else {
+ if (!ieee80211_ifcomb_check(hw->wiphy->iface_combinations,
+ hw->wiphy->n_iface_combinations))
+ return -EINVAL;
}
/* Only HW csum features are currently compatible with mac80211 */
@@ -1090,11 +1265,11 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
&sband->channels[i],
NL80211_CHAN_NO_HT);
/* init channel we're on */
- if (!local->use_chanctx && !local->_oper_chandef.chan) {
+ local->monitor_chanreq.oper = dflt_chandef;
+ if (local->emulate_chanctx) {
+ local->dflt_chandef = dflt_chandef;
local->hw.conf.chandef = dflt_chandef;
- local->_oper_chandef = dflt_chandef;
}
- local->monitor_chandef = dflt_chandef;
}
channels += sband->n_channels;
@@ -1115,8 +1290,31 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
supp_vht = supp_vht || sband->vht_cap.vht_supported;
for_each_sband_iftype_data(sband, i, iftd) {
+ u8 he_40_mhz_cap;
+
supp_he = supp_he || iftd->he_cap.has_he;
supp_eht = supp_eht || iftd->eht_cap.has_eht;
+
+ if (band == NL80211_BAND_2GHZ)
+ he_40_mhz_cap =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+ else
+ he_40_mhz_cap =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
+
+ /* currently no support for HE client where HT has 40 MHz but not HT */
+ if (iftd->he_cap.has_he &&
+ iftd->types_mask & (BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT)) &&
+ sband->ht_cap.ht_supported &&
+ sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 &&
+ !(iftd->he_cap.he_cap_elem.phy_cap_info[0] & he_40_mhz_cap))
+ return -EINVAL;
+
+ /* no support for per-band vendor elems with MLO */
+ if (WARN_ON(iftd->vendor_elems.len &&
+ hw->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO))
+ return -EINVAL;
}
/* HT, VHT, HE require QoS, thus >= 4 queues */
@@ -1155,18 +1353,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR);
hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR);
- /* mac80211 doesn't support more than one IBSS interface right now */
- for (i = 0; i < hw->wiphy->n_iface_combinations; i++) {
- const struct ieee80211_iface_combination *c;
- int j;
-
- c = &hw->wiphy->iface_combinations[i];
-
- for (j = 0; j < c->n_limits; j++)
- if ((c->limits[j].types & BIT(NL80211_IFTYPE_ADHOC)) &&
- c->limits[j].max > 1)
- return -EINVAL;
- }
local->int_scan_req = kzalloc(sizeof(*local->int_scan_req) +
sizeof(void *) * channels, GFP_KERNEL);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index fccbcde3359a..974081324aa4 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -1,13 +1,13 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008, 2009 open80211s Ltd.
- * Copyright (C) 2018 - 2023 Intel Corporation
+ * Copyright (C) 2018 - 2024 Intel Corporation
* Authors: Luis Carlos Cobo <luisca@cozybit.com>
* Javier Cardona <javier@cozybit.com>
*/
#include <linux/slab.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "ieee80211_i.h"
#include "mesh.h"
#include "wme.h"
@@ -97,7 +97,7 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
if (sdata->vif.bss_conf.basic_rates != basic_rates)
return false;
- cfg80211_chandef_create(&sta_chan_def, sdata->vif.bss_conf.chandef.chan,
+ cfg80211_chandef_create(&sta_chan_def, sdata->vif.bss_conf.chanreq.oper.chan,
NL80211_CHAN_NO_HT);
ieee80211_chandef_ht_oper(ie->ht_operation, &sta_chan_def);
@@ -107,10 +107,11 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
ieee80211_chandef_vht_oper(&sdata->local->hw, vht_cap_info,
ie->vht_operation, ie->ht_operation,
&sta_chan_def);
- ieee80211_chandef_he_6ghz_oper(sdata, ie->he_operation, ie->eht_operation,
+ ieee80211_chandef_he_6ghz_oper(sdata->local, ie->he_operation,
+ ie->eht_operation,
&sta_chan_def);
- if (!cfg80211_chandef_compatible(&sdata->vif.bss_conf.chandef,
+ if (!cfg80211_chandef_compatible(&sdata->vif.bss_conf.chanreq.oper,
&sta_chan_def))
return false;
@@ -435,9 +436,9 @@ int mesh_add_ht_cap_ie(struct ieee80211_sub_if_data *sdata,
return 0;
if (!sband->ht_cap.ht_supported ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap))
@@ -476,16 +477,16 @@ int mesh_add_ht_oper_ie(struct ieee80211_sub_if_data *sdata,
return 0;
if (!ht_cap->ht_supported ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_operation))
return -ENOMEM;
pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation));
- ieee80211_ie_build_ht_oper(pos, ht_cap, &sdata->vif.bss_conf.chandef,
+ ieee80211_ie_build_ht_oper(pos, ht_cap, &sdata->vif.bss_conf.chanreq.oper,
sdata->vif.bss_conf.ht_operation_mode,
false);
@@ -507,9 +508,9 @@ int mesh_add_vht_cap_ie(struct ieee80211_sub_if_data *sdata,
return 0;
if (!sband->vht_cap.vht_supported ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_vht_cap))
@@ -548,9 +549,9 @@ int mesh_add_vht_oper_ie(struct ieee80211_sub_if_data *sdata,
return 0;
if (!vht_cap->vht_supported ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_vht_operation))
@@ -558,7 +559,7 @@ int mesh_add_vht_oper_ie(struct ieee80211_sub_if_data *sdata,
pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_operation));
ieee80211_ie_build_vht_oper(pos, vht_cap,
- &sdata->vif.bss_conf.chandef);
+ &sdata->vif.bss_conf.chanreq.oper);
return 0;
}
@@ -566,29 +567,18 @@ int mesh_add_vht_oper_ie(struct ieee80211_sub_if_data *sdata,
int mesh_add_he_cap_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, u8 ie_len)
{
- const struct ieee80211_sta_he_cap *he_cap;
struct ieee80211_supported_band *sband;
- u8 *pos;
sband = ieee80211_get_sband(sdata);
if (!sband)
return -EINVAL;
- he_cap = ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT);
-
- if (!he_cap ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ if (sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
- if (skb_tailroom(skb) < ie_len)
- return -ENOMEM;
-
- pos = skb_put(skb, ie_len);
- ieee80211_ie_build_he_cap(0, pos, he_cap, pos + ie_len);
-
- return 0;
+ return ieee80211_put_he_cap(skb, sdata, sband, NULL);
}
int mesh_add_he_oper_ie(struct ieee80211_sub_if_data *sdata,
@@ -605,20 +595,20 @@ int mesh_add_he_oper_ie(struct ieee80211_sub_if_data *sdata,
he_cap = ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT);
if (!he_cap ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
len = 2 + 1 + sizeof(struct ieee80211_he_operation);
- if (sdata->vif.bss_conf.chandef.chan->band == NL80211_BAND_6GHZ)
+ if (sdata->vif.bss_conf.chanreq.oper.chan->band == NL80211_BAND_6GHZ)
len += sizeof(struct ieee80211_he_6ghz_oper);
if (skb_tailroom(skb) < len)
return -ENOMEM;
pos = skb_put(skb, len);
- ieee80211_ie_build_he_oper(pos, &sdata->vif.bss_conf.chandef);
+ ieee80211_ie_build_he_oper(pos, &sdata->vif.bss_conf.chanreq.oper);
return 0;
}
@@ -639,37 +629,25 @@ int mesh_add_he_6ghz_cap_ie(struct ieee80211_sub_if_data *sdata,
if (!iftd)
return 0;
- ieee80211_ie_build_he_6ghz_cap(sdata, sdata->deflink.smps_mode, skb);
+ ieee80211_put_he_6ghz_cap(skb, sdata, sdata->deflink.smps_mode);
return 0;
}
int mesh_add_eht_cap_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, u8 ie_len)
{
- const struct ieee80211_sta_he_cap *he_cap;
- const struct ieee80211_sta_eht_cap *eht_cap;
struct ieee80211_supported_band *sband;
- u8 *pos;
sband = ieee80211_get_sband(sdata);
if (!sband)
return -EINVAL;
- he_cap = ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT);
- eht_cap = ieee80211_get_eht_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT);
- if (!he_cap || !eht_cap ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ if (sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
- if (skb_tailroom(skb) < ie_len)
- return -ENOMEM;
-
- pos = skb_put(skb, ie_len);
- ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, pos + ie_len, false);
-
- return 0;
+ return ieee80211_put_eht_cap(skb, sdata, sband, NULL);
}
int mesh_add_eht_oper_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
@@ -685,9 +663,9 @@ int mesh_add_eht_oper_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *sk
eht_cap = ieee80211_get_eht_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT);
if (!eht_cap ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
len = 2 + 1 + offsetof(struct ieee80211_eht_operation, optional) +
@@ -697,7 +675,7 @@ int mesh_add_eht_oper_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *sk
return -ENOMEM;
pos = skb_put(skb, len);
- ieee80211_ie_build_eht_oper(pos, &sdata->vif.bss_conf.chandef, eht_cap);
+ ieee80211_ie_build_eht_oper(pos, &sdata->vif.bss_conf.chanreq.oper, eht_cap);
return 0;
}
@@ -745,9 +723,9 @@ ieee80211_mesh_update_bss_params(struct ieee80211_sub_if_data *sdata,
return;
if (!ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT) ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return;
sdata->vif.bss_conf.he_support = true;
@@ -769,6 +747,9 @@ bool ieee80211_mesh_xmit_fast(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, u32 ctrl_flags)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ struct ieee80211_mesh_fast_tx_key key = {
+ .type = MESH_FAST_TX_TYPE_LOCAL
+ };
struct ieee80211_mesh_fast_tx *entry;
struct ieee80211s_hdr *meshhdr;
u8 sa[ETH_ALEN] __aligned(2);
@@ -804,7 +785,10 @@ bool ieee80211_mesh_xmit_fast(struct ieee80211_sub_if_data *sdata,
return false;
}
- entry = mesh_fast_tx_get(sdata, skb->data);
+ ether_addr_copy(key.addr, skb->data);
+ if (!ether_addr_equal(skb->data + ETH_ALEN, sdata->vif.addr))
+ key.type = MESH_FAST_TX_TYPE_PROXIED;
+ entry = mesh_fast_tx_get(sdata, &key);
if (!entry)
return false;
@@ -966,24 +950,22 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
int head_len, tail_len;
struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
- struct ieee80211_chanctx_conf *chanctx_conf;
struct mesh_csa_settings *csa;
- enum nl80211_band band;
+ const struct ieee80211_supported_band *sband;
u8 ie_len_he_cap, ie_len_eht_cap;
u8 *pos;
struct ieee80211_sub_if_data *sdata;
int hdr_len = offsetofend(struct ieee80211_mgmt, u.beacon);
+ u32 rate_flags;
sdata = container_of(ifmsh, struct ieee80211_sub_if_data, u.mesh);
- rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
- band = chanctx_conf->def.chan->band;
- rcu_read_unlock();
- ie_len_he_cap = ieee80211_ie_len_he_cap(sdata,
- NL80211_IFTYPE_MESH_POINT);
- ie_len_eht_cap = ieee80211_ie_len_eht_cap(sdata,
- NL80211_IFTYPE_MESH_POINT);
+ sband = ieee80211_get_sband(sdata);
+ rate_flags =
+ ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chanreq.oper);
+
+ ie_len_he_cap = ieee80211_ie_len_he_cap(sdata);
+ ie_len_eht_cap = ieee80211_ie_len_eht_cap(sdata);
head_len = hdr_len +
2 + /* NULL SSID */
/* Channel Switch Announcement */
@@ -1107,7 +1089,9 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
}
rcu_read_unlock();
- if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
+ if (ieee80211_put_srates_elem(skb, sband,
+ sdata->vif.bss_conf.basic_rates,
+ rate_flags, 0, WLAN_EID_SUPP_RATES) ||
mesh_add_ds_params_ie(sdata, skb))
goto out_free;
@@ -1118,7 +1102,9 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
skb_trim(skb, 0);
bcn->tail = bcn->head + bcn->head_len;
- if (ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
+ if (ieee80211_put_srates_elem(skb, sband,
+ sdata->vif.bss_conf.basic_rates,
+ rate_flags, 0, WLAN_EID_EXT_SUPP_RATES) ||
mesh_add_rsn_ie(sdata, skb) ||
mesh_add_ht_cap_ie(sdata, skb) ||
mesh_add_ht_oper_ie(sdata, skb) ||
@@ -1171,14 +1157,14 @@ void ieee80211_mbss_info_change_notify(struct ieee80211_sub_if_data *sdata,
u64 changed)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- unsigned long bits = changed;
+ unsigned long bits[] = { BITMAP_FROM_U64(changed) };
u32 bit;
- if (!bits)
+ if (!changed)
return;
/* if we race with running work, worst case this work becomes a noop */
- for_each_set_bit(bit, &bits, sizeof(changed) * BITS_PER_BYTE)
+ for_each_set_bit(bit, bits, sizeof(changed) * BITS_PER_BYTE)
set_bit(bit, ifmsh->mbss_changed);
set_bit(MESH_WORK_MBSS_CHANGED, &ifmsh->wrkq_flags);
wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
@@ -1234,7 +1220,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
netif_carrier_off(sdata->dev);
/* flush STAs and mpaths on this iface */
- sta_info_flush(sdata);
+ sta_info_flush(sdata, -1);
ieee80211_free_keys(sdata, true);
mesh_path_flush_by_iface(sdata);
@@ -1276,11 +1262,12 @@ static void ieee80211_mesh_csa_mark_radar(struct ieee80211_sub_if_data *sdata)
* unavailable.
*/
err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
- &sdata->vif.bss_conf.chandef,
+ &sdata->vif.bss_conf.chanreq.oper,
NL80211_IFTYPE_MESH_POINT);
if (err > 0)
cfg80211_radar_event(sdata->local->hw.wiphy,
- &sdata->vif.bss_conf.chandef, GFP_ATOMIC);
+ &sdata->vif.bss_conf.chanreq.oper,
+ GFP_ATOMIC);
}
static bool
@@ -1292,7 +1279,7 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee80211_supported_band *sband;
int err;
- ieee80211_conn_flags_t conn_flags = 0;
+ struct ieee80211_conn_settings conn = ieee80211_conn_settings_unlimited;
u32 vht_cap_info = 0;
lockdep_assert_wiphy(sdata->local->hw.wiphy);
@@ -1301,15 +1288,18 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
if (!sband)
return false;
- switch (sdata->vif.bss_conf.chandef.width) {
+ switch (sdata->vif.bss_conf.chanreq.oper.width) {
case NL80211_CHAN_WIDTH_20_NOHT:
- conn_flags |= IEEE80211_CONN_DISABLE_HT;
- fallthrough;
+ conn.mode = IEEE80211_CONN_MODE_LEGACY;
+ conn.bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ break;
case NL80211_CHAN_WIDTH_20:
- conn_flags |= IEEE80211_CONN_DISABLE_40MHZ;
- fallthrough;
+ conn.mode = IEEE80211_CONN_MODE_HT;
+ conn.bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ break;
case NL80211_CHAN_WIDTH_40:
- conn_flags |= IEEE80211_CONN_DISABLE_VHT;
+ conn.mode = IEEE80211_CONN_MODE_HT;
+ conn.bw_limit = IEEE80211_CONN_BW_LIMIT_40;
break;
default:
break;
@@ -1321,8 +1311,8 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
memset(&params, 0, sizeof(params));
err = ieee80211_parse_ch_switch_ie(sdata, elems, sband->band,
- vht_cap_info,
- conn_flags, sdata->vif.addr,
+ vht_cap_info, &conn,
+ sdata->vif.addr, false,
&csa_ie);
if (err < 0)
return false;
@@ -1335,7 +1325,7 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
if (csa_ie.reason_code == WLAN_REASON_MESH_CHAN_REGULATORY)
ieee80211_mesh_csa_mark_radar(sdata);
- params.chandef = csa_ie.chandef;
+ params.chandef = csa_ie.chanreq.oper;
params.count = csa_ie.count;
if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, &params.chandef,
@@ -1371,7 +1361,7 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
params.radar_required = err;
if (cfg80211_chandef_identical(&params.chandef,
- &sdata->vif.bss_conf.chandef)) {
+ &sdata->vif.bss_conf.chanreq.oper)) {
mcsa_dbg(sdata,
"received csa with an identical chandef, ignoring\n");
return true;
@@ -1492,7 +1482,7 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
if (!elems)
return;
- /* ignore non-mesh or secure / unsecure mismatch */
+ /* ignore non-mesh or secure / insecure mismatch */
if ((!elems->mesh_id || !elems->mesh_config) ||
(elems->rsn && sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) ||
(!elems->rsn && sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE))
@@ -1551,7 +1541,7 @@ int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata, u64 *changed)
*changed |= BSS_CHANGED_BEACON;
mcsa_dbg(sdata, "complete switching to center freq %d MHz",
- sdata->vif.bss_conf.chandef.chan->center_freq);
+ sdata->vif.bss_conf.chanreq.oper.chan->center_freq);
return 0;
}
@@ -1786,6 +1776,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
ifmsh->last_preq = jiffies;
ifmsh->next_perr = jiffies;
ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_NONE;
+ ifmsh->nonpeer_pm = NL80211_MESH_POWER_ACTIVE;
/* Allocate all mesh structures when creating the first mesh interface. */
if (!mesh_allocated)
ieee80211s_init();
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index ad8469293d71..3f9664e4e00c 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2008, 2009 open80211s Ltd.
- * Copyright (C) 2023 Intel Corporation
+ * Copyright (C) 2023-2024 Intel Corporation
* Authors: Luis Carlos Cobo <luisca@cozybit.com>
* Javier Cardona <javier@cozybit.com>
*/
@@ -94,6 +94,7 @@ enum mesh_deferred_task_flags {
* @is_root: the destination station of this path is a root node
* @is_gate: the destination station of this path is a mesh gate
* @path_change_count: the number of path changes to destination
+ * @fast_tx_check: timestamp of last fast-xmit enable attempt
*
*
* The dst address is unique in the mesh path table. Since the mesh_path is
@@ -134,9 +135,38 @@ struct mesh_path {
#define MESH_FAST_TX_CACHE_TIMEOUT 8000 /* msecs */
/**
+ * enum ieee80211_mesh_fast_tx_type - cached mesh fast tx entry type
+ *
+ * @MESH_FAST_TX_TYPE_LOCAL: tx from the local vif address as SA
+ * @MESH_FAST_TX_TYPE_PROXIED: local tx with a different SA (e.g. bridged)
+ * @MESH_FAST_TX_TYPE_FORWARDED: forwarded from a different mesh point
+ * @NUM_MESH_FAST_TX_TYPE: number of entry types
+ */
+enum ieee80211_mesh_fast_tx_type {
+ MESH_FAST_TX_TYPE_LOCAL,
+ MESH_FAST_TX_TYPE_PROXIED,
+ MESH_FAST_TX_TYPE_FORWARDED,
+
+ /* must be last */
+ NUM_MESH_FAST_TX_TYPE
+};
+
+
+/**
+ * struct ieee80211_mesh_fast_tx_key - cached mesh fast tx entry key
+ *
+ * @addr: The Ethernet DA for this entry
+ * @type: cache entry type
+ */
+struct ieee80211_mesh_fast_tx_key {
+ u8 addr[ETH_ALEN] __aligned(2);
+ u16 type;
+};
+
+/**
* struct ieee80211_mesh_fast_tx - cached mesh fast tx entry
* @rhash: rhashtable pointer
- * @addr_key: The Ethernet DA which is the key for this entry
+ * @key: the lookup key for this cache entry
* @fast_tx: base fast_tx data
* @hdr: cached mesh and rfc1042 headers
* @hdrlen: length of mesh + rfc1042
@@ -147,7 +177,7 @@ struct mesh_path {
*/
struct ieee80211_mesh_fast_tx {
struct rhash_head rhash;
- u8 addr_key[ETH_ALEN] __aligned(2);
+ struct ieee80211_mesh_fast_tx_key key;
struct ieee80211_fast_tx fast_tx;
u8 hdr[sizeof(struct ieee80211s_hdr) + sizeof(rfc1042_header)];
@@ -333,7 +363,8 @@ void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata);
bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt);
struct ieee80211_mesh_fast_tx *
-mesh_fast_tx_get(struct ieee80211_sub_if_data *sdata, const u8 *addr);
+mesh_fast_tx_get(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mesh_fast_tx_key *key);
bool ieee80211_mesh_xmit_fast(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, u32 ctrl_flags);
void mesh_fast_tx_cache(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 024f48db6b05..4e9546e998b6 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -7,7 +7,7 @@
#include <linux/slab.h>
#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "wme.h"
#include "mesh.h"
@@ -220,12 +220,12 @@ static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
/**
* mesh_path_error_tx - Sends a PERR mesh management frame
*
+ * @sdata: local mesh subif
* @ttl: allowed remaining hops
* @target: broken destination
* @target_sn: SN of the broken destination
* @target_rcode: reason code for this PERR
* @ra: node this frame is addressed to
- * @sdata: local mesh subif
*
* Note: This function may be called with driver locks taken that the driver
* also acquires in the TX path. To avoid a deadlock we don't transmit the
@@ -1137,8 +1137,8 @@ enddiscovery:
/**
* mesh_nexthop_resolve - lookup next hop; conditionally start path discovery
*
- * @skb: 802.11 frame to be sent
* @sdata: network subif the frame will be sent through
+ * @skb: 802.11 frame to be sent
*
* Lookup next hop for given skb and start path discovery if no
* forwarding information is found.
@@ -1245,8 +1245,8 @@ void mesh_path_refresh(struct ieee80211_sub_if_data *sdata,
* this function is considered "using" the associated mpath, so preempt a path
* refresh if this mpath expires soon.
*
- * @skb: 802.11 frame to be sent
* @sdata: network subif the frame will be sent through
+ * @skb: 802.11 frame to be sent
*
* Returns: 0 if the next hop was found. Nonzero otherwise.
*/
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 735edde1bd81..9f9cb5af0a97 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -37,8 +37,8 @@ static const struct rhashtable_params mesh_rht_params = {
static const struct rhashtable_params fast_tx_rht_params = {
.nelem_hint = 10,
.automatic_shrinking = true,
- .key_len = ETH_ALEN,
- .key_offset = offsetof(struct ieee80211_mesh_fast_tx, addr_key),
+ .key_len = sizeof_field(struct ieee80211_mesh_fast_tx, key),
+ .key_offset = offsetof(struct ieee80211_mesh_fast_tx, key),
.head_offset = offsetof(struct ieee80211_mesh_fast_tx, rhash),
.hashfn = mesh_table_hash,
};
@@ -300,8 +300,8 @@ __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
/**
* mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
- * @idx: index
* @sdata: local subif, or NULL for all entries
+ * @idx: index
*
* Returns: pointer to the mesh path structure, or NULL if not found.
*
@@ -315,8 +315,8 @@ mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
/**
* mpp_path_lookup_by_idx - look up a path in the proxy path table by its index
- * @idx: index
* @sdata: local subif, or NULL for all entries
+ * @idx: index
*
* Returns: pointer to the proxy path structure, or NULL if not found.
*
@@ -431,20 +431,21 @@ static void mesh_fast_tx_entry_free(struct mesh_tx_cache *cache,
}
struct ieee80211_mesh_fast_tx *
-mesh_fast_tx_get(struct ieee80211_sub_if_data *sdata, const u8 *addr)
+mesh_fast_tx_get(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mesh_fast_tx_key *key)
{
struct ieee80211_mesh_fast_tx *entry;
struct mesh_tx_cache *cache;
cache = &sdata->u.mesh.tx_cache;
- entry = rhashtable_lookup(&cache->rht, addr, fast_tx_rht_params);
+ entry = rhashtable_lookup(&cache->rht, key, fast_tx_rht_params);
if (!entry)
return NULL;
if (!(entry->mpath->flags & MESH_PATH_ACTIVE) ||
mpath_expired(entry->mpath)) {
spin_lock_bh(&cache->walk_lock);
- entry = rhashtable_lookup(&cache->rht, addr, fast_tx_rht_params);
+ entry = rhashtable_lookup(&cache->rht, key, fast_tx_rht_params);
if (entry)
mesh_fast_tx_entry_free(cache, entry);
spin_unlock_bh(&cache->walk_lock);
@@ -489,18 +490,24 @@ void mesh_fast_tx_cache(struct ieee80211_sub_if_data *sdata,
if (!sta)
return;
+ build.key.type = MESH_FAST_TX_TYPE_LOCAL;
if ((meshhdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6) {
/* This is required to keep the mppath alive */
mppath = mpp_path_lookup(sdata, meshhdr->eaddr1);
if (!mppath)
return;
build.mppath = mppath;
+ if (!ether_addr_equal(meshhdr->eaddr2, sdata->vif.addr))
+ build.key.type = MESH_FAST_TX_TYPE_PROXIED;
} else if (ieee80211_has_a4(hdr->frame_control)) {
mppath = mpath;
} else {
return;
}
+ if (!ether_addr_equal(hdr->addr4, sdata->vif.addr))
+ build.key.type = MESH_FAST_TX_TYPE_FORWARDED;
+
/* rate limit, in case fast xmit can't be enabled */
if (mppath->fast_tx_check == jiffies)
return;
@@ -547,7 +554,7 @@ void mesh_fast_tx_cache(struct ieee80211_sub_if_data *sdata,
}
}
- memcpy(build.addr_key, mppath->dst, ETH_ALEN);
+ memcpy(build.key.addr, mppath->dst, ETH_ALEN);
build.timestamp = jiffies;
build.fast_tx.band = info->band;
build.fast_tx.da_offs = offsetof(struct ieee80211_hdr, addr3);
@@ -573,7 +580,7 @@ void mesh_fast_tx_cache(struct ieee80211_sub_if_data *sdata,
prev = rhashtable_lookup_get_insert_fast(&cache->rht,
&entry->rhash,
fast_tx_rht_params);
- if (unlikely(IS_ERR(prev))) {
+ if (IS_ERR(prev)) {
kfree(entry);
goto unlock_cache;
}
@@ -600,11 +607,10 @@ unlock_sta:
void mesh_fast_tx_gc(struct ieee80211_sub_if_data *sdata)
{
unsigned long timeout = msecs_to_jiffies(MESH_FAST_TX_CACHE_TIMEOUT);
- struct mesh_tx_cache *cache;
+ struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache;
struct ieee80211_mesh_fast_tx *entry;
struct hlist_node *n;
- cache = &sdata->u.mesh.tx_cache;
if (atomic_read(&cache->rht.nelems) < MESH_FAST_TX_CACHE_THRESHOLD_SIZE)
return;
@@ -622,7 +628,6 @@ void mesh_fast_tx_flush_mpath(struct mesh_path *mpath)
struct ieee80211_mesh_fast_tx *entry;
struct hlist_node *n;
- cache = &sdata->u.mesh.tx_cache;
spin_lock_bh(&cache->walk_lock);
hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list)
if (entry->mpath == mpath)
@@ -637,7 +642,6 @@ void mesh_fast_tx_flush_sta(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mesh_fast_tx *entry;
struct hlist_node *n;
- cache = &sdata->u.mesh.tx_cache;
spin_lock_bh(&cache->walk_lock);
hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list)
if (rcu_access_pointer(entry->mpath->next_hop) == sta)
@@ -649,20 +653,25 @@ void mesh_fast_tx_flush_addr(struct ieee80211_sub_if_data *sdata,
const u8 *addr)
{
struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache;
+ struct ieee80211_mesh_fast_tx_key key = {};
struct ieee80211_mesh_fast_tx *entry;
+ int i;
- cache = &sdata->u.mesh.tx_cache;
+ ether_addr_copy(key.addr, addr);
spin_lock_bh(&cache->walk_lock);
- entry = rhashtable_lookup_fast(&cache->rht, addr, fast_tx_rht_params);
- if (entry)
- mesh_fast_tx_entry_free(cache, entry);
+ for (i = 0; i < NUM_MESH_FAST_TX_TYPE; i++) {
+ key.type = i;
+ entry = rhashtable_lookup_fast(&cache->rht, &key, fast_tx_rht_params);
+ if (entry)
+ mesh_fast_tx_entry_free(cache, entry);
+ }
spin_unlock_bh(&cache->walk_lock);
}
/**
* mesh_path_add - allocate and add a new path to the mesh path table
- * @dst: destination address of the path (ETH_ALEN length)
* @sdata: local subif
+ * @dst: destination address of the path (ETH_ALEN length)
*
* Returns: 0 on success
*
@@ -907,8 +916,8 @@ static int table_path_del(struct mesh_table *tbl,
/**
* mesh_path_del - delete a mesh path from the table
*
- * @addr: dst address (ETH_ALEN length)
* @sdata: local subif
+ * @addr: dst address (ETH_ALEN length)
*
* Returns: 0 if successful
*/
@@ -987,8 +996,8 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
/**
* mesh_path_discard_frame - discard a frame whose path could not be resolved
*
- * @skb: frame to discard
* @sdata: network subif the frame was to be sent through
+ * @skb: frame to discard
*
* Locking: the function must me called within a rcu_read_lock region
*/
@@ -1008,10 +1017,23 @@ void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
*/
void mesh_path_flush_pending(struct mesh_path *mpath)
{
+ struct ieee80211_sub_if_data *sdata = mpath->sdata;
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ struct mesh_preq_queue *preq, *tmp;
struct sk_buff *skb;
while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
mesh_path_discard_frame(mpath->sdata, skb);
+
+ spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
+ list_for_each_entry_safe(preq, tmp, &ifmsh->preq_queue.list, list) {
+ if (ether_addr_equal(mpath->dst, preq->dst)) {
+ list_del(&preq->list);
+ kfree(preq);
+ --ifmsh->preq_queue_len;
+ }
+ }
+ spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
}
/**
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 28bf794f67f8..5a0156e11c91 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008, 2009 open80211s Ltd.
- * Copyright (C) 2019, 2021-2023 Intel Corporation
+ * Copyright (C) 2019, 2021-2024 Intel Corporation
* Author: Luis Carlos Cobo <luisca@cozybit.com>
*/
#include <linux/gfp.h>
@@ -163,7 +163,7 @@ static u64 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
u16 ht_opmode;
bool non_ht_sta = false, ht20_sta = false;
- switch (sdata->vif.bss_conf.chandef.width) {
+ switch (sdata->vif.bss_conf.chanreq.oper.width) {
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_5:
case NL80211_CHAN_WIDTH_10:
@@ -196,7 +196,7 @@ static u64 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
if (non_ht_sta)
ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED;
else if (ht20_sta &&
- sdata->vif.bss_conf.chandef.width > NL80211_CHAN_WIDTH_20)
+ sdata->vif.bss_conf.chanreq.oper.width > NL80211_CHAN_WIDTH_20)
ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_20MHZ;
else
ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
@@ -226,10 +226,8 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
int hdr_len = offsetofend(struct ieee80211_mgmt, u.action.u.self_prot);
int err = -ENOMEM;
- ie_len_he_cap = ieee80211_ie_len_he_cap(sdata,
- NL80211_IFTYPE_MESH_POINT);
- ie_len_eht_cap = ieee80211_ie_len_eht_cap(sdata,
- NL80211_IFTYPE_MESH_POINT);
+ ie_len_he_cap = ieee80211_ie_len_he_cap(sdata);
+ ie_len_eht_cap = ieee80211_ie_len_eht_cap(sdata);
skb = dev_alloc_skb(local->tx_headroom +
hdr_len +
2 + /* capability info */
@@ -266,14 +264,13 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
if (action != WLAN_SP_MESH_PEERING_CLOSE) {
struct ieee80211_supported_band *sband;
- enum nl80211_band band;
+ u32 rate_flags, basic_rates;
sband = ieee80211_get_sband(sdata);
if (!sband) {
err = -EINVAL;
goto free;
}
- band = sband->band;
/* capability info */
pos = skb_put_zero(skb, 2);
@@ -282,8 +279,17 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
pos = skb_put(skb, 2);
put_unaligned_le16(sta->sta.aid, pos);
}
- if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
- ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
+
+ rate_flags =
+ ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chanreq.oper);
+ basic_rates = sdata->vif.bss_conf.basic_rates;
+
+ if (ieee80211_put_srates_elem(skb, sband, basic_rates,
+ rate_flags, 0,
+ WLAN_EID_SUPP_RATES) ||
+ ieee80211_put_srates_elem(skb, sband, basic_rates,
+ rate_flags, 0,
+ WLAN_EID_EXT_SUPP_RATES) ||
mesh_add_rsn_ie(sdata, skb) ||
mesh_add_meshid_ie(sdata, skb) ||
mesh_add_meshconf_ie(sdata, skb))
@@ -426,15 +432,14 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
- u32 rates, basic_rates = 0, changed = 0;
+ u32 rates, changed = 0;
enum ieee80211_sta_rx_bandwidth bw = sta->sta.deflink.bandwidth;
sband = ieee80211_get_sband(sdata);
if (!sband)
return;
- rates = ieee80211_sta_get_rates(sdata, elems, sband->band,
- &basic_rates);
+ rates = ieee80211_sta_get_rates(sdata, elems, sband->band, NULL);
spin_lock_bh(&sta->mesh->plink_lock);
sta->deflink.rx_stats.last_rx = jiffies;
@@ -480,10 +485,11 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
sta->sta.deflink.bandwidth = IEEE80211_STA_RX_BW_20;
}
+ /* FIXME: this check is wrong without SW rate control */
if (!test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
- rate_control_rate_init(sta);
+ rate_control_rate_init(&sta->deflink);
else
- rate_control_rate_update(local, sband, sta, 0, changed);
+ rate_control_rate_update(local, sband, &sta->deflink, changed);
out:
spin_unlock_bh(&sta->mesh->plink_lock);
}
@@ -661,7 +667,7 @@ void mesh_plink_timer(struct timer_list *t)
/*
* This STA is valid because sta_info_destroy() will
* del_timer_sync() this timer after having made sure
- * it cannot be readded (by deleting the plink.)
+ * it cannot be re-added (by deleting the plink.)
*/
sta = mesh->plink_sta;
diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c
index 8cf3f395f52f..3a66b4cefca7 100644
--- a/net/mac80211/mesh_sync.c
+++ b/net/mac80211/mesh_sync.c
@@ -175,7 +175,7 @@ static void mesh_sync_offset_adjust_tsf(struct ieee80211_sub_if_data *sdata,
spin_lock_bh(&ifmsh->sync_offset_lock);
if (ifmsh->sync_offset_clockdrift_max > TOFFSET_MINIMUM_ADJUSTMENT) {
- /* Since ajusting the tsf here would
+ /* Since adjusting the tsf here would
* require a possibly blocking call
* to the driver tsf setter, we punt
* the tsf adjustment to the mesh tasklet
diff --git a/net/mac80211/michael.c b/net/mac80211/michael.c
index a57502d9ffec..8a1afc93e749 100644
--- a/net/mac80211/michael.c
+++ b/net/mac80211/michael.c
@@ -6,7 +6,7 @@
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/ieee80211.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "michael.h"
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 2022a26eb881..36a9be9a66c8 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -23,7 +23,7 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <net/mac80211.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
@@ -31,6 +31,8 @@
#include "led.h"
#include "fils_aead.h"
+#include <kunit/static_stub.h>
+
#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
#define IEEE80211_AUTH_TIMEOUT_LONG (HZ / 2)
#define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10)
@@ -46,6 +48,8 @@
#define IEEE80211_ADV_TTLM_SAFETY_BUFFER_MS msecs_to_jiffies(100)
#define IEEE80211_ADV_TTLM_ST_UNDERFLOW 0xff00
+#define IEEE80211_NEG_TTLM_REQ_TIMEOUT (HZ / 5)
+
static int max_nullfunc_tries = 2;
module_param(max_nullfunc_tries, int, 0644);
MODULE_PARM_DESC(max_nullfunc_tries,
@@ -92,84 +96,6 @@ MODULE_PARM_DESC(probe_wait_ms,
#define IEEE80211_SIGNAL_AVE_MIN_COUNT 4
/*
- * Extract from the given disabled subchannel bitmap (raw format
- * from the EHT Operation Element) the bits for the subchannel
- * we're using right now.
- */
-static u16
-ieee80211_extract_dis_subch_bmap(const struct ieee80211_eht_operation *eht_oper,
- struct cfg80211_chan_def *chandef, u16 bitmap)
-{
- struct ieee80211_eht_operation_info *info = (void *)eht_oper->optional;
- struct cfg80211_chan_def ap_chandef = *chandef;
- u32 ap_center_freq, local_center_freq;
- u32 ap_bw, local_bw;
- int ap_start_freq, local_start_freq;
- u16 shift, mask;
-
- if (!(eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT) ||
- !(eht_oper->params &
- IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT))
- return 0;
-
- /* set 160/320 supported to get the full AP definition */
- ieee80211_chandef_eht_oper((const void *)eht_oper->optional,
- true, true, &ap_chandef);
- ap_center_freq = ap_chandef.center_freq1;
- ap_bw = 20 * BIT(u8_get_bits(info->control,
- IEEE80211_EHT_OPER_CHAN_WIDTH));
- ap_start_freq = ap_center_freq - ap_bw / 2;
- local_center_freq = chandef->center_freq1;
- local_bw = 20 * BIT(ieee80211_chan_width_to_rx_bw(chandef->width));
- local_start_freq = local_center_freq - local_bw / 2;
- shift = (local_start_freq - ap_start_freq) / 20;
- mask = BIT(local_bw / 20) - 1;
-
- return (bitmap >> shift) & mask;
-}
-
-/*
- * Handle the puncturing bitmap, possibly downgrading bandwidth to get a
- * valid bitmap.
- */
-static void
-ieee80211_handle_puncturing_bitmap(struct ieee80211_link_data *link,
- const struct ieee80211_eht_operation *eht_oper,
- u16 bitmap, u64 *changed)
-{
- struct cfg80211_chan_def *chandef = &link->conf->chandef;
- struct ieee80211_local *local = link->sdata->local;
- u16 extracted;
- u64 _changed = 0;
-
- if (!changed)
- changed = &_changed;
-
- while (chandef->width > NL80211_CHAN_WIDTH_40) {
- extracted =
- ieee80211_extract_dis_subch_bmap(eht_oper, chandef,
- bitmap);
-
- if (cfg80211_valid_disable_subchannel_bitmap(&bitmap,
- chandef) &&
- !(bitmap && ieee80211_hw_check(&local->hw,
- DISALLOW_PUNCTURING)))
- break;
- link->u.mgd.conn_flags |=
- ieee80211_chandef_downgrade(chandef);
- *changed |= BSS_CHANGED_BANDWIDTH;
- }
-
- if (chandef->width <= NL80211_CHAN_WIDTH_40)
- extracted = 0;
-
- if (link->conf->eht_puncturing != extracted) {
- link->conf->eht_puncturing = extracted;
- *changed |= BSS_CHANGED_EHT_PUNCTURING;
- }
-}
-
-/*
* We can have multiple work items (and connection probing)
* scheduling this timer, but we need to take care to only
* reschedule it when it should fire _earlier_ than it was
@@ -223,77 +149,84 @@ static int ecw2cw(int ecw)
return (1 << ecw) - 1;
}
-static ieee80211_conn_flags_t
-ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_link_data *link,
- ieee80211_conn_flags_t conn_flags,
- struct ieee80211_supported_band *sband,
- struct ieee80211_channel *channel,
- u32 vht_cap_info,
- const struct ieee80211_ht_operation *ht_oper,
- const struct ieee80211_vht_operation *vht_oper,
- const struct ieee80211_he_operation *he_oper,
- const struct ieee80211_eht_operation *eht_oper,
- const struct ieee80211_s1g_oper_ie *s1g_oper,
- struct cfg80211_chan_def *chandef, bool tracking)
+static enum ieee80211_conn_mode
+ieee80211_determine_ap_chan(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_channel *channel,
+ u32 vht_cap_info,
+ const struct ieee802_11_elems *elems,
+ bool ignore_ht_channel_mismatch,
+ const struct ieee80211_conn_settings *conn,
+ struct cfg80211_chan_def *chandef)
{
+ const struct ieee80211_ht_operation *ht_oper = elems->ht_operation;
+ const struct ieee80211_vht_operation *vht_oper = elems->vht_operation;
+ const struct ieee80211_he_operation *he_oper = elems->he_operation;
+ const struct ieee80211_eht_operation *eht_oper = elems->eht_operation;
+ struct ieee80211_supported_band *sband =
+ sdata->local->hw.wiphy->bands[channel->band];
struct cfg80211_chan_def vht_chandef;
- struct ieee80211_sta_ht_cap sta_ht_cap;
- ieee80211_conn_flags_t ret;
+ bool no_vht = false;
u32 ht_cfreq;
- memset(chandef, 0, sizeof(struct cfg80211_chan_def));
- chandef->chan = channel;
- chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
- chandef->center_freq1 = channel->center_freq;
- chandef->freq1_offset = channel->freq_offset;
+ *chandef = (struct cfg80211_chan_def) {
+ .chan = channel,
+ .width = NL80211_CHAN_WIDTH_20_NOHT,
+ .center_freq1 = channel->center_freq,
+ .freq1_offset = channel->freq_offset,
+ };
- if (channel->band == NL80211_BAND_6GHZ) {
- if (!ieee80211_chandef_he_6ghz_oper(sdata, he_oper, eht_oper,
- chandef)) {
- mlme_dbg(sdata,
- "bad 6 GHz operation, disabling HT/VHT/HE/EHT\n");
- ret = IEEE80211_CONN_DISABLE_HT |
- IEEE80211_CONN_DISABLE_VHT |
- IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT;
- } else {
- ret = 0;
- }
- vht_chandef = *chandef;
- goto out;
- } else if (sband->band == NL80211_BAND_S1GHZ) {
- if (!ieee80211_chandef_s1g_oper(s1g_oper, chandef)) {
+ /* get special S1G case out of the way */
+ if (sband->band == NL80211_BAND_S1GHZ) {
+ if (!ieee80211_chandef_s1g_oper(elems->s1g_oper, chandef)) {
sdata_info(sdata,
"Missing S1G Operation Element? Trying operating == primary\n");
chandef->width = ieee80211_s1g_channel_width(channel);
}
- ret = IEEE80211_CONN_DISABLE_HT | IEEE80211_CONN_DISABLE_40MHZ |
- IEEE80211_CONN_DISABLE_VHT |
- IEEE80211_CONN_DISABLE_80P80MHZ |
- IEEE80211_CONN_DISABLE_160MHZ;
- goto out;
+ return IEEE80211_CONN_MODE_S1G;
}
- memcpy(&sta_ht_cap, &sband->ht_cap, sizeof(sta_ht_cap));
- ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap);
+ /* get special 6 GHz case out of the way */
+ if (sband->band == NL80211_BAND_6GHZ) {
+ enum ieee80211_conn_mode mode = IEEE80211_CONN_MODE_EHT;
- if (!ht_oper || !sta_ht_cap.ht_supported) {
- mlme_dbg(sdata, "HT operation missing / HT not supported\n");
- ret = IEEE80211_CONN_DISABLE_HT |
- IEEE80211_CONN_DISABLE_VHT |
- IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT;
- goto out;
+ /* this is an error */
+ if (conn->mode < IEEE80211_CONN_MODE_HE)
+ return IEEE80211_CONN_MODE_LEGACY;
+
+ if (!elems->he_6ghz_capa || !elems->he_cap) {
+ sdata_info(sdata,
+ "HE 6 GHz AP is missing HE/HE 6 GHz band capability\n");
+ return IEEE80211_CONN_MODE_LEGACY;
+ }
+
+ if (!eht_oper || !elems->eht_cap) {
+ eht_oper = NULL;
+ mode = IEEE80211_CONN_MODE_HE;
+ }
+
+ if (!ieee80211_chandef_he_6ghz_oper(sdata->local, he_oper,
+ eht_oper, chandef)) {
+ sdata_info(sdata, "bad HE/EHT 6 GHz operation\n");
+ return IEEE80211_CONN_MODE_LEGACY;
+ }
+
+ return mode;
}
+ /* now we have the progression HT, VHT, ... */
+ if (conn->mode < IEEE80211_CONN_MODE_HT)
+ return IEEE80211_CONN_MODE_LEGACY;
+
+ if (!ht_oper || !elems->ht_cap_elem)
+ return IEEE80211_CONN_MODE_LEGACY;
+
chandef->width = NL80211_CHAN_WIDTH_20;
ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan,
channel->band);
/* check that channel matches the right operating channel */
- if (!tracking && channel->center_freq != ht_cfreq) {
+ if (!ignore_ht_channel_mismatch && channel->center_freq != ht_cfreq) {
/*
* It's possible that some APs are confused here;
* Netgear WNDR3700 sometimes reports 4 higher than
@@ -305,36 +238,22 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
"Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
channel->center_freq, ht_cfreq,
ht_oper->primary_chan, channel->band);
- ret = IEEE80211_CONN_DISABLE_HT |
- IEEE80211_CONN_DISABLE_VHT |
- IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT;
- goto out;
+ return IEEE80211_CONN_MODE_LEGACY;
}
- /* check 40 MHz support, if we have it */
- if (sta_ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
- ieee80211_chandef_ht_oper(ht_oper, chandef);
- } else {
- mlme_dbg(sdata, "40 MHz not supported\n");
- /* 40 MHz (and 80 MHz) must be supported for VHT */
- ret = IEEE80211_CONN_DISABLE_VHT;
- /* also mark 40 MHz disabled */
- ret |= IEEE80211_CONN_DISABLE_40MHZ;
- goto out;
- }
+ ieee80211_chandef_ht_oper(ht_oper, chandef);
- if (!vht_oper || !sband->vht_cap.vht_supported) {
- mlme_dbg(sdata, "VHT operation missing / VHT not supported\n");
- ret = IEEE80211_CONN_DISABLE_VHT;
- goto out;
- }
+ if (conn->mode < IEEE80211_CONN_MODE_VHT)
+ return IEEE80211_CONN_MODE_HT;
vht_chandef = *chandef;
- if (!(conn_flags & IEEE80211_CONN_DISABLE_HE) &&
- he_oper &&
- (le32_to_cpu(he_oper->he_oper_params) &
- IEEE80211_HE_OPERATION_VHT_OPER_INFO)) {
+
+ /*
+ * having he_cap/he_oper parsed out implies we're at
+ * least operating as HE STA
+ */
+ if (elems->he_cap && he_oper &&
+ he_oper->he_oper_params & cpu_to_le32(IEEE80211_HE_OPERATION_VHT_OPER_INFO)) {
struct ieee80211_vht_operation he_oper_vht_cap;
/*
@@ -347,257 +266,939 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
if (!ieee80211_chandef_vht_oper(&sdata->local->hw, vht_cap_info,
&he_oper_vht_cap, ht_oper,
&vht_chandef)) {
- if (!(conn_flags & IEEE80211_CONN_DISABLE_HE))
- sdata_info(sdata,
- "HE AP VHT information is invalid, disabling HE\n");
- ret = IEEE80211_CONN_DISABLE_HE | IEEE80211_CONN_DISABLE_EHT;
- goto out;
+ sdata_info(sdata,
+ "HE AP VHT information is invalid, disabling HE\n");
+ /* this will cause us to re-parse as VHT STA */
+ return IEEE80211_CONN_MODE_VHT;
+ }
+ } else if (!vht_oper || !elems->vht_cap_elem) {
+ if (sband->band == NL80211_BAND_5GHZ) {
+ sdata_info(sdata,
+ "VHT information is missing, disabling VHT\n");
+ return IEEE80211_CONN_MODE_HT;
}
+ no_vht = true;
+ } else if (sband->band == NL80211_BAND_2GHZ) {
+ no_vht = true;
} else if (!ieee80211_chandef_vht_oper(&sdata->local->hw,
vht_cap_info,
vht_oper, ht_oper,
&vht_chandef)) {
- if (!(conn_flags & IEEE80211_CONN_DISABLE_VHT))
- sdata_info(sdata,
- "AP VHT information is invalid, disabling VHT\n");
- ret = IEEE80211_CONN_DISABLE_VHT;
- goto out;
+ sdata_info(sdata,
+ "AP VHT information is invalid, disabling VHT\n");
+ return IEEE80211_CONN_MODE_HT;
}
- if (!cfg80211_chandef_valid(&vht_chandef)) {
- if (!(conn_flags & IEEE80211_CONN_DISABLE_VHT))
- sdata_info(sdata,
- "AP VHT information is invalid, disabling VHT\n");
- ret = IEEE80211_CONN_DISABLE_VHT;
- goto out;
+ if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) {
+ sdata_info(sdata,
+ "AP VHT information doesn't match HT, disabling VHT\n");
+ return IEEE80211_CONN_MODE_HT;
}
- if (cfg80211_chandef_identical(chandef, &vht_chandef)) {
- ret = 0;
- goto out;
- }
+ *chandef = vht_chandef;
- if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) {
- if (!(conn_flags & IEEE80211_CONN_DISABLE_VHT))
- sdata_info(sdata,
- "AP VHT information doesn't match HT, disabling VHT\n");
- ret = IEEE80211_CONN_DISABLE_VHT;
- goto out;
+ /* stick to current max mode if we or the AP don't have HE */
+ if (conn->mode < IEEE80211_CONN_MODE_HE ||
+ !elems->he_operation || !elems->he_cap) {
+ if (no_vht)
+ return IEEE80211_CONN_MODE_HT;
+ return IEEE80211_CONN_MODE_VHT;
}
- *chandef = vht_chandef;
+ /* stick to HE if we or the AP don't have EHT */
+ if (conn->mode < IEEE80211_CONN_MODE_EHT ||
+ !eht_oper || !elems->eht_cap)
+ return IEEE80211_CONN_MODE_HE;
/*
* handle the case that the EHT operation indicates that it holds EHT
* operation information (in case that the channel width differs from
* the channel width reported in HT/VHT/HE).
*/
- if (eht_oper && (eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT)) {
+ if (eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT) {
struct cfg80211_chan_def eht_chandef = *chandef;
ieee80211_chandef_eht_oper((const void *)eht_oper->optional,
- eht_chandef.width ==
- NL80211_CHAN_WIDTH_160,
- false, &eht_chandef);
+ &eht_chandef);
+
+ eht_chandef.punctured =
+ ieee80211_eht_oper_dis_subchan_bitmap(eht_oper);
if (!cfg80211_chandef_valid(&eht_chandef)) {
- if (!(conn_flags & IEEE80211_CONN_DISABLE_EHT))
- sdata_info(sdata,
- "AP EHT information is invalid, disabling EHT\n");
- ret = IEEE80211_CONN_DISABLE_EHT;
- goto out;
+ sdata_info(sdata,
+ "AP EHT information is invalid, disabling EHT\n");
+ return IEEE80211_CONN_MODE_HE;
}
if (!cfg80211_chandef_compatible(chandef, &eht_chandef)) {
- if (!(conn_flags & IEEE80211_CONN_DISABLE_EHT))
- sdata_info(sdata,
- "AP EHT information is incompatible, disabling EHT\n");
- ret = IEEE80211_CONN_DISABLE_EHT;
- goto out;
+ sdata_info(sdata,
+ "AP EHT information doesn't match HT/VHT/HE, disabling EHT\n");
+ return IEEE80211_CONN_MODE_HE;
}
*chandef = eht_chandef;
}
- ret = 0;
+ return IEEE80211_CONN_MODE_EHT;
+}
+
+static bool
+ieee80211_verify_peer_he_mcs_support(struct ieee80211_sub_if_data *sdata,
+ int link_id,
+ const struct ieee80211_he_cap_elem *he_cap,
+ const struct ieee80211_he_operation *he_op)
+{
+ struct ieee80211_he_mcs_nss_supp *he_mcs_nss_supp;
+ u16 mcs_80_map_tx, mcs_80_map_rx;
+ u16 ap_min_req_set;
+ int nss;
+
+ if (!he_cap)
+ return false;
+
+ /* mcs_nss is right after he_cap info */
+ he_mcs_nss_supp = (void *)(he_cap + 1);
+
+ mcs_80_map_tx = le16_to_cpu(he_mcs_nss_supp->tx_mcs_80);
+ mcs_80_map_rx = le16_to_cpu(he_mcs_nss_supp->rx_mcs_80);
+
+ /* P802.11-REVme/D0.3
+ * 27.1.1 Introduction to the HE PHY
+ * ...
+ * An HE STA shall support the following features:
+ * ...
+ * Single spatial stream HE-MCSs 0 to 7 (transmit and receive) in all
+ * supported channel widths for HE SU PPDUs
+ */
+ if ((mcs_80_map_tx & 0x3) == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ (mcs_80_map_rx & 0x3) == IEEE80211_HE_MCS_NOT_SUPPORTED) {
+ link_id_info(sdata, link_id,
+ "Missing mandatory rates for 1 Nss, rx 0x%x, tx 0x%x, disable HE\n",
+ mcs_80_map_tx, mcs_80_map_rx);
+ return false;
+ }
+
+ if (!he_op)
+ return true;
+
+ ap_min_req_set = le16_to_cpu(he_op->he_mcs_nss_set);
-out:
/*
- * When tracking the current AP, don't do any further checks if the
- * new chandef is identical to the one we're currently using for the
- * connection. This keeps us from playing ping-pong with regulatory,
- * without it the following can happen (for example):
- * - connect to an AP with 80 MHz, world regdom allows 80 MHz
- * - AP advertises regdom US
- * - CRDA loads regdom US with 80 MHz prohibited (old database)
- * - the code below detects an unsupported channel, downgrades, and
- * we disconnect from the AP in the caller
- * - disconnect causes CRDA to reload world regdomain and the game
- * starts anew.
- * (see https://bugzilla.kernel.org/show_bug.cgi?id=70881)
+ * Apparently iPhone 13 (at least iOS version 15.3.1) sets this to all
+ * zeroes, which is nonsense, and completely inconsistent with itself
+ * (it doesn't have 8 streams). Accept the settings in this case anyway.
+ */
+ if (!ap_min_req_set)
+ return true;
+
+ /* make sure the AP is consistent with itself
*
- * It seems possible that there are still scenarios with CSA or real
- * bandwidth changes where a this could happen, but those cases are
- * less common and wouldn't completely prevent using the AP.
+ * P802.11-REVme/D0.3
+ * 26.17.1 Basic HE BSS operation
+ *
+ * A STA that is operating in an HE BSS shall be able to receive and
+ * transmit at each of the <HE-MCS, NSS> tuple values indicated by the
+ * Basic HE-MCS And NSS Set field of the HE Operation parameter of the
+ * MLME-START.request primitive and shall be able to receive at each of
+ * the <HE-MCS, NSS> tuple values indicated by the Supported HE-MCS and
+ * NSS Set field in the HE Capabilities parameter of the MLMESTART.request
+ * primitive
*/
- if (tracking &&
- cfg80211_chandef_identical(chandef, &link->conf->chandef))
- return ret;
+ for (nss = 8; nss > 0; nss--) {
+ u8 ap_op_val = (ap_min_req_set >> (2 * (nss - 1))) & 3;
+ u8 ap_rx_val;
+ u8 ap_tx_val;
+
+ if (ap_op_val == IEEE80211_HE_MCS_NOT_SUPPORTED)
+ continue;
+
+ ap_rx_val = (mcs_80_map_rx >> (2 * (nss - 1))) & 3;
+ ap_tx_val = (mcs_80_map_tx >> (2 * (nss - 1))) & 3;
+
+ if (ap_rx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ ap_tx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ ap_rx_val < ap_op_val || ap_tx_val < ap_op_val) {
+ link_id_info(sdata, link_id,
+ "Invalid rates for %d Nss, rx %d, tx %d oper %d, disable HE\n",
+ nss, ap_rx_val, ap_tx_val, ap_op_val);
+ return false;
+ }
+ }
- /* don't print the message below for VHT mismatch if VHT is disabled */
- if (ret & IEEE80211_CONN_DISABLE_VHT)
- vht_chandef = *chandef;
+ return true;
+}
+
+static bool
+ieee80211_verify_sta_he_mcs_support(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_supported_band *sband,
+ const struct ieee80211_he_operation *he_op)
+{
+ const struct ieee80211_sta_he_cap *sta_he_cap =
+ ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
+ u16 ap_min_req_set;
+ int i;
+
+ if (!sta_he_cap || !he_op)
+ return false;
+
+ ap_min_req_set = le16_to_cpu(he_op->he_mcs_nss_set);
/*
- * Ignore the DISABLED flag when we're already connected and only
- * tracking the APs beacon for bandwidth changes - otherwise we
- * might get disconnected here if we connect to an AP, update our
- * regulatory information based on the AP's country IE and the
- * information we have is wrong/outdated and disables the channel
- * that we're actually using for the connection to the AP.
+ * Apparently iPhone 13 (at least iOS version 15.3.1) sets this to all
+ * zeroes, which is nonsense, and completely inconsistent with itself
+ * (it doesn't have 8 streams). Accept the settings in this case anyway.
*/
- while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
- tracking ? 0 :
- IEEE80211_CHAN_DISABLED)) {
- if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) {
- ret = IEEE80211_CONN_DISABLE_HT |
- IEEE80211_CONN_DISABLE_VHT |
- IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT;
- break;
+ if (!ap_min_req_set)
+ return true;
+
+ /* Need to go over for 80MHz, 160MHz and for 80+80 */
+ for (i = 0; i < 3; i++) {
+ const struct ieee80211_he_mcs_nss_supp *sta_mcs_nss_supp =
+ &sta_he_cap->he_mcs_nss_supp;
+ u16 sta_mcs_map_rx =
+ le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i]);
+ u16 sta_mcs_map_tx =
+ le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i + 1]);
+ u8 nss;
+ bool verified = true;
+
+ /*
+ * For each band there is a maximum of 8 spatial streams
+ * possible. Each of the sta_mcs_map_* is a 16-bit struct built
+ * of 2 bits per NSS (1-8), with the values defined in enum
+ * ieee80211_he_mcs_support. Need to make sure STA TX and RX
+ * capabilities aren't less than the AP's minimum requirements
+ * for this HE BSS per SS.
+ * It is enough to find one such band that meets the reqs.
+ */
+ for (nss = 8; nss > 0; nss--) {
+ u8 sta_rx_val = (sta_mcs_map_rx >> (2 * (nss - 1))) & 3;
+ u8 sta_tx_val = (sta_mcs_map_tx >> (2 * (nss - 1))) & 3;
+ u8 ap_val = (ap_min_req_set >> (2 * (nss - 1))) & 3;
+
+ if (ap_val == IEEE80211_HE_MCS_NOT_SUPPORTED)
+ continue;
+
+ /*
+ * Make sure the HE AP doesn't require MCSs that aren't
+ * supported by the client as required by spec
+ *
+ * P802.11-REVme/D0.3
+ * 26.17.1 Basic HE BSS operation
+ *
+ * An HE STA shall not attempt to join * (MLME-JOIN.request primitive)
+ * a BSS, unless it supports (i.e., is able to both transmit and
+ * receive using) all of the <HE-MCS, NSS> tuples in the basic
+ * HE-MCS and NSS set.
+ */
+ if (sta_rx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ sta_tx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ (ap_val > sta_rx_val) || (ap_val > sta_tx_val)) {
+ verified = false;
+ break;
+ }
}
- ret |= ieee80211_chandef_downgrade(chandef);
+ if (verified)
+ return true;
}
- if (!he_oper || !cfg80211_chandef_usable(sdata->wdev.wiphy, chandef,
- IEEE80211_CHAN_NO_HE))
- ret |= IEEE80211_CONN_DISABLE_HE | IEEE80211_CONN_DISABLE_EHT;
+ /* If here, STA doesn't meet AP's HE min requirements */
+ return false;
+}
- if (!eht_oper || !cfg80211_chandef_usable(sdata->wdev.wiphy, chandef,
- IEEE80211_CHAN_NO_EHT))
- ret |= IEEE80211_CONN_DISABLE_EHT;
+static u8
+ieee80211_get_eht_cap_mcs_nss(const struct ieee80211_sta_he_cap *sta_he_cap,
+ const struct ieee80211_sta_eht_cap *sta_eht_cap,
+ unsigned int idx, int bw)
+{
+ u8 he_phy_cap0 = sta_he_cap->he_cap_elem.phy_cap_info[0];
+ u8 eht_phy_cap0 = sta_eht_cap->eht_cap_elem.phy_cap_info[0];
- if (chandef->width != vht_chandef.width && !tracking)
- sdata_info(sdata,
- "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n");
+ /* handle us being a 20 MHz-only EHT STA - with four values
+ * for MCS 0-7, 8-9, 10-11, 12-13.
+ */
+ if (!(he_phy_cap0 & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL))
+ return sta_eht_cap->eht_mcs_nss_supp.only_20mhz.rx_tx_max_nss[idx];
- WARN_ON_ONCE(!cfg80211_chandef_valid(chandef));
- return ret;
+ /* the others have MCS 0-9 together, rather than separately from 0-7 */
+ if (idx > 0)
+ idx--;
+
+ switch (bw) {
+ case 0:
+ return sta_eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_max_nss[idx];
+ case 1:
+ if (!(he_phy_cap0 &
+ (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)))
+ return 0xff; /* pass check */
+ return sta_eht_cap->eht_mcs_nss_supp.bw._160.rx_tx_max_nss[idx];
+ case 2:
+ if (!(eht_phy_cap0 & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ))
+ return 0xff; /* pass check */
+ return sta_eht_cap->eht_mcs_nss_supp.bw._320.rx_tx_max_nss[idx];
+ }
+
+ WARN_ON(1);
+ return 0;
+}
+
+static bool
+ieee80211_verify_sta_eht_mcs_support(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_supported_band *sband,
+ const struct ieee80211_eht_operation *eht_op)
+{
+ const struct ieee80211_sta_he_cap *sta_he_cap =
+ ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
+ const struct ieee80211_sta_eht_cap *sta_eht_cap =
+ ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
+ const struct ieee80211_eht_mcs_nss_supp_20mhz_only *req;
+ unsigned int i;
+
+ if (!sta_he_cap || !sta_eht_cap || !eht_op)
+ return false;
+
+ req = &eht_op->basic_mcs_nss;
+
+ for (i = 0; i < ARRAY_SIZE(req->rx_tx_max_nss); i++) {
+ u8 req_rx_nss, req_tx_nss;
+ unsigned int bw;
+
+ req_rx_nss = u8_get_bits(req->rx_tx_max_nss[i],
+ IEEE80211_EHT_MCS_NSS_RX);
+ req_tx_nss = u8_get_bits(req->rx_tx_max_nss[i],
+ IEEE80211_EHT_MCS_NSS_TX);
+
+ for (bw = 0; bw < 3; bw++) {
+ u8 have, have_rx_nss, have_tx_nss;
+
+ have = ieee80211_get_eht_cap_mcs_nss(sta_he_cap,
+ sta_eht_cap,
+ i, bw);
+ have_rx_nss = u8_get_bits(have,
+ IEEE80211_EHT_MCS_NSS_RX);
+ have_tx_nss = u8_get_bits(have,
+ IEEE80211_EHT_MCS_NSS_TX);
+
+ if (req_rx_nss > have_rx_nss ||
+ req_tx_nss > have_tx_nss)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
+ const u8 *supp_rates,
+ unsigned int supp_rates_len,
+ const u8 *ext_supp_rates,
+ unsigned int ext_supp_rates_len,
+ u32 *rates, u32 *basic_rates,
+ unsigned long *unknown_rates_selectors,
+ bool *have_higher_than_11mbit,
+ int *min_rate, int *min_rate_index)
+{
+ int i, j;
+
+ for (i = 0; i < supp_rates_len + ext_supp_rates_len; i++) {
+ u8 supp_rate = i < supp_rates_len ?
+ supp_rates[i] :
+ ext_supp_rates[i - supp_rates_len];
+ int rate = supp_rate & 0x7f;
+ bool is_basic = !!(supp_rate & 0x80);
+
+ if ((rate * 5) > 110 && have_higher_than_11mbit)
+ *have_higher_than_11mbit = true;
+
+ /*
+ * Skip membership selectors since they're not rates.
+ *
+ * Note: Even though the membership selector and the basic
+ * rate flag share the same bit, they are not exactly
+ * the same.
+ */
+ if (is_basic && rate >= BSS_MEMBERSHIP_SELECTOR_MIN) {
+ if (unknown_rates_selectors)
+ set_bit(rate, unknown_rates_selectors);
+ continue;
+ }
+
+ for (j = 0; j < sband->n_bitrates; j++) {
+ struct ieee80211_rate *br;
+ int brate;
+
+ br = &sband->bitrates[j];
+
+ brate = DIV_ROUND_UP(br->bitrate, 5);
+ if (brate == rate) {
+ if (rates)
+ *rates |= BIT(j);
+ if (is_basic && basic_rates)
+ *basic_rates |= BIT(j);
+ if (min_rate && (rate * 5) < *min_rate) {
+ *min_rate = rate * 5;
+ if (min_rate_index)
+ *min_rate_index = j;
+ }
+ break;
+ }
+ }
+
+ /* Handle an unknown entry as if it is an unknown selector */
+ if (is_basic && unknown_rates_selectors && j == sband->n_bitrates)
+ set_bit(rate, unknown_rates_selectors);
+ }
+}
+
+static bool ieee80211_chandef_usable(struct ieee80211_sub_if_data *sdata,
+ const struct cfg80211_chan_def *chandef,
+ u32 prohibited_flags)
+{
+ if (!cfg80211_chandef_usable(sdata->local->hw.wiphy,
+ chandef, prohibited_flags))
+ return false;
+
+ if (chandef->punctured &&
+ ieee80211_hw_check(&sdata->local->hw, DISALLOW_PUNCTURING))
+ return false;
+
+ if (chandef->punctured && chandef->chan->band == NL80211_BAND_5GHZ &&
+ ieee80211_hw_check(&sdata->local->hw, DISALLOW_PUNCTURING_5GHZ))
+ return false;
+
+ return true;
+}
+
+static int ieee80211_chandef_num_subchans(const struct cfg80211_chan_def *c)
+{
+ if (c->width == NL80211_CHAN_WIDTH_80P80)
+ return 4 + 4;
+
+ return nl80211_chan_width_to_mhz(c->width) / 20;
+}
+
+static int ieee80211_chandef_num_widths(const struct cfg80211_chan_def *c)
+{
+ switch (c->width) {
+ case NL80211_CHAN_WIDTH_20:
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ return 1;
+ case NL80211_CHAN_WIDTH_40:
+ return 2;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_80:
+ return 3;
+ case NL80211_CHAN_WIDTH_160:
+ return 4;
+ case NL80211_CHAN_WIDTH_320:
+ return 5;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+}
+
+VISIBLE_IF_MAC80211_KUNIT int
+ieee80211_calc_chandef_subchan_offset(const struct cfg80211_chan_def *ap,
+ u8 n_partial_subchans)
+{
+ int n = ieee80211_chandef_num_subchans(ap);
+ struct cfg80211_chan_def tmp = *ap;
+ int offset = 0;
+
+ /*
+ * Given a chandef (in this context, it's the AP's) and a number
+ * of subchannels that we want to look at ('n_partial_subchans'),
+ * calculate the offset in number of subchannels between the full
+ * and the subset with the desired width.
+ */
+
+ /* same number of subchannels means no offset, obviously */
+ if (n == n_partial_subchans)
+ return 0;
+
+ /* don't WARN - misconfigured APs could cause this if their N > width */
+ if (n < n_partial_subchans)
+ return 0;
+
+ while (ieee80211_chandef_num_subchans(&tmp) > n_partial_subchans) {
+ u32 prev = tmp.center_freq1;
+
+ ieee80211_chandef_downgrade(&tmp, NULL);
+
+ /*
+ * if center_freq moved up, half the original channels
+ * are gone now but were below, so increase offset
+ */
+ if (prev < tmp.center_freq1)
+ offset += ieee80211_chandef_num_subchans(&tmp);
+ }
+
+ /*
+ * 80+80 with secondary 80 below primary - four subchannels for it
+ * (we cannot downgrade *to* 80+80, so no need to consider 'tmp')
+ */
+ if (ap->width == NL80211_CHAN_WIDTH_80P80 &&
+ ap->center_freq2 < ap->center_freq1)
+ offset += 4;
+
+ return offset;
+}
+EXPORT_SYMBOL_IF_MAC80211_KUNIT(ieee80211_calc_chandef_subchan_offset);
+
+VISIBLE_IF_MAC80211_KUNIT void
+ieee80211_rearrange_tpe_psd(struct ieee80211_parsed_tpe_psd *psd,
+ const struct cfg80211_chan_def *ap,
+ const struct cfg80211_chan_def *used)
+{
+ u8 needed = ieee80211_chandef_num_subchans(used);
+ u8 have = ieee80211_chandef_num_subchans(ap);
+ u8 tmp[IEEE80211_TPE_PSD_ENTRIES_320MHZ];
+ u8 offset;
+
+ if (!psd->valid)
+ return;
+
+ /* if N is zero, all defaults were used, no point in rearranging */
+ if (!psd->n)
+ goto out;
+
+ BUILD_BUG_ON(sizeof(tmp) != sizeof(psd->power));
+
+ /*
+ * This assumes that 'N' is consistent with the HE channel, as
+ * it should be (otherwise the AP is broken).
+ *
+ * In psd->power we have values in the order 0..N, 0..K, where
+ * N+K should cover the entire channel per 'ap', but even if it
+ * doesn't then we've pre-filled 'unlimited' as defaults.
+ *
+ * But this is all the wrong order, we want to have them in the
+ * order of the 'used' channel.
+ *
+ * So for example, we could have a 320 MHz EHT AP, which has the
+ * HE channel as 80 MHz (e.g. due to puncturing, which doesn't
+ * seem to be considered for the TPE), as follows:
+ *
+ * EHT 320: | | | | | | | | | | | | | | | | |
+ * HE 80: | | | | |
+ * used 160: | | | | | | | | |
+ *
+ * N entries: |--|--|--|--|
+ * K entries: |--|--|--|--|--|--|--|--| |--|--|--|--|
+ * power idx: 4 5 6 7 8 9 10 11 0 1 2 3 12 13 14 15
+ * full chan: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ * used chan: 0 1 2 3 4 5 6 7
+ *
+ * The idx in the power array ('power idx') is like this since it
+ * comes directly from the element's N and K entries in their
+ * element order, and those are this way for HE compatibility.
+ *
+ * Rearrange them as desired here, first by putting them into the
+ * 'full chan' order, and then selecting the necessary subset for
+ * the 'used chan'.
+ */
+
+ /* first reorder according to AP channel */
+ offset = ieee80211_calc_chandef_subchan_offset(ap, psd->n);
+ for (int i = 0; i < have; i++) {
+ if (i < offset)
+ tmp[i] = psd->power[i + psd->n];
+ else if (i < offset + psd->n)
+ tmp[i] = psd->power[i - offset];
+ else
+ tmp[i] = psd->power[i];
+ }
+
+ /*
+ * and then select the subset for the used channel
+ * (set everything to defaults first in case a driver is confused)
+ */
+ memset(psd->power, IEEE80211_TPE_PSD_NO_LIMIT, sizeof(psd->power));
+ offset = ieee80211_calc_chandef_subchan_offset(ap, needed);
+ for (int i = 0; i < needed; i++)
+ psd->power[i] = tmp[offset + i];
+
+out:
+ /* limit, but don't lie if there are defaults in the data */
+ if (needed < psd->count)
+ psd->count = needed;
+}
+EXPORT_SYMBOL_IF_MAC80211_KUNIT(ieee80211_rearrange_tpe_psd);
+
+static void ieee80211_rearrange_tpe(struct ieee80211_parsed_tpe *tpe,
+ const struct cfg80211_chan_def *ap,
+ const struct cfg80211_chan_def *used)
+{
+ /* ignore this completely for narrow/invalid channels */
+ if (!ieee80211_chandef_num_subchans(ap) ||
+ !ieee80211_chandef_num_subchans(used)) {
+ ieee80211_clear_tpe(tpe);
+ return;
+ }
+
+ for (int i = 0; i < 2; i++) {
+ int needed_pwr_count;
+
+ ieee80211_rearrange_tpe_psd(&tpe->psd_local[i], ap, used);
+ ieee80211_rearrange_tpe_psd(&tpe->psd_reg_client[i], ap, used);
+
+ /* limit this to the widths we actually need */
+ needed_pwr_count = ieee80211_chandef_num_widths(used);
+ if (needed_pwr_count < tpe->max_local[i].count)
+ tpe->max_local[i].count = needed_pwr_count;
+ if (needed_pwr_count < tpe->max_reg_client[i].count)
+ tpe->max_reg_client[i].count = needed_pwr_count;
+ }
+}
+
+/*
+ * The AP part of the channel request is used to distinguish settings
+ * to the device used for wider bandwidth OFDMA. This is used in the
+ * channel context code to assign two channel contexts even if they're
+ * both for the same channel, if the AP bandwidths are incompatible.
+ * If not EHT (or driver override) then ap.chan == NULL indicates that
+ * there's no wider BW OFDMA used.
+ */
+static void ieee80211_set_chanreq_ap(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_chan_req *chanreq,
+ struct ieee80211_conn_settings *conn,
+ struct cfg80211_chan_def *ap_chandef)
+{
+ chanreq->ap.chan = NULL;
+
+ if (conn->mode < IEEE80211_CONN_MODE_EHT)
+ return;
+ if (sdata->vif.driver_flags & IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW)
+ return;
+
+ chanreq->ap = *ap_chandef;
+}
+
+static struct ieee802_11_elems *
+ieee80211_determine_chan_mode(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_conn_settings *conn,
+ struct cfg80211_bss *cbss, int link_id,
+ struct ieee80211_chan_req *chanreq,
+ struct cfg80211_chan_def *ap_chandef,
+ unsigned long *userspace_selectors)
+{
+ const struct cfg80211_bss_ies *ies = rcu_dereference(cbss->ies);
+ struct ieee80211_bss *bss = (void *)cbss->priv;
+ struct ieee80211_channel *channel = cbss->channel;
+ struct ieee80211_elems_parse_params parse_params = {
+ .link_id = -1,
+ .from_ap = true,
+ .start = ies->data,
+ .len = ies->len,
+ };
+ struct ieee802_11_elems *elems;
+ struct ieee80211_supported_band *sband;
+ enum ieee80211_conn_mode ap_mode;
+ unsigned long unknown_rates_selectors[BITS_TO_LONGS(128)] = {};
+ unsigned long sta_selectors[BITS_TO_LONGS(128)] = {};
+ int ret;
+
+again:
+ parse_params.mode = conn->mode;
+ elems = ieee802_11_parse_elems_full(&parse_params);
+ if (!elems)
+ return ERR_PTR(-ENOMEM);
+
+ ap_mode = ieee80211_determine_ap_chan(sdata, channel, bss->vht_cap_info,
+ elems, false, conn, ap_chandef);
+
+ /* this should be impossible since parsing depends on our mode */
+ if (WARN_ON(ap_mode > conn->mode)) {
+ ret = -EINVAL;
+ goto free;
+ }
+
+ if (conn->mode != ap_mode) {
+ conn->mode = ap_mode;
+ kfree(elems);
+ goto again;
+ }
+
+ mlme_link_id_dbg(sdata, link_id, "determined AP %pM to be %s\n",
+ cbss->bssid, ieee80211_conn_mode_str(ap_mode));
+
+ sband = sdata->local->hw.wiphy->bands[channel->band];
+
+ ieee80211_get_rates(sband, elems->supp_rates, elems->supp_rates_len,
+ elems->ext_supp_rates, elems->ext_supp_rates_len,
+ NULL, NULL, unknown_rates_selectors, NULL, NULL,
+ NULL);
+
+ switch (channel->band) {
+ case NL80211_BAND_S1GHZ:
+ if (WARN_ON(ap_mode != IEEE80211_CONN_MODE_S1G)) {
+ ret = -EINVAL;
+ goto free;
+ }
+ return elems;
+ case NL80211_BAND_6GHZ:
+ if (ap_mode < IEEE80211_CONN_MODE_HE) {
+ link_id_info(sdata, link_id,
+ "Rejecting non-HE 6/7 GHz connection");
+ ret = -EINVAL;
+ goto free;
+ }
+ break;
+ default:
+ if (WARN_ON(ap_mode == IEEE80211_CONN_MODE_S1G)) {
+ ret = -EINVAL;
+ goto free;
+ }
+ }
+
+ switch (ap_mode) {
+ case IEEE80211_CONN_MODE_S1G:
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto free;
+ case IEEE80211_CONN_MODE_LEGACY:
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ break;
+ case IEEE80211_CONN_MODE_HT:
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_40);
+ break;
+ case IEEE80211_CONN_MODE_VHT:
+ case IEEE80211_CONN_MODE_HE:
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_160);
+ break;
+ case IEEE80211_CONN_MODE_EHT:
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_320);
+ break;
+ }
+
+ chanreq->oper = *ap_chandef;
+
+ bitmap_copy(sta_selectors, userspace_selectors, 128);
+ if (conn->mode >= IEEE80211_CONN_MODE_HT)
+ set_bit(BSS_MEMBERSHIP_SELECTOR_HT_PHY, sta_selectors);
+ if (conn->mode >= IEEE80211_CONN_MODE_VHT)
+ set_bit(BSS_MEMBERSHIP_SELECTOR_VHT_PHY, sta_selectors);
+ if (conn->mode >= IEEE80211_CONN_MODE_HE)
+ set_bit(BSS_MEMBERSHIP_SELECTOR_HE_PHY, sta_selectors);
+ if (conn->mode >= IEEE80211_CONN_MODE_EHT)
+ set_bit(BSS_MEMBERSHIP_SELECTOR_EHT_PHY, sta_selectors);
+
+ /*
+ * We do not support EPD or GLK so never add them.
+ * SAE_H2E is handled through userspace_selectors.
+ */
+
+ /* Check if we support all required features */
+ if (!bitmap_subset(unknown_rates_selectors, sta_selectors, 128)) {
+ link_id_info(sdata, link_id,
+ "required basic rate or BSS membership selectors not supported or disabled, rejecting connection\n");
+ ret = -EINVAL;
+ goto free;
+ }
+
+ ieee80211_set_chanreq_ap(sdata, chanreq, conn, ap_chandef);
+
+ while (!ieee80211_chandef_usable(sdata, &chanreq->oper,
+ IEEE80211_CHAN_DISABLED)) {
+ if (WARN_ON(chanreq->oper.width == NL80211_CHAN_WIDTH_20_NOHT)) {
+ ret = -EINVAL;
+ goto free;
+ }
+
+ ieee80211_chanreq_downgrade(chanreq, conn);
+ }
+
+ if (conn->mode >= IEEE80211_CONN_MODE_HE &&
+ !cfg80211_chandef_usable(sdata->wdev.wiphy, &chanreq->oper,
+ IEEE80211_CHAN_NO_HE)) {
+ conn->mode = IEEE80211_CONN_MODE_VHT;
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_160);
+ }
+
+ if (conn->mode >= IEEE80211_CONN_MODE_EHT &&
+ !cfg80211_chandef_usable(sdata->wdev.wiphy, &chanreq->oper,
+ IEEE80211_CHAN_NO_EHT)) {
+ conn->mode = IEEE80211_CONN_MODE_HE;
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_160);
+ }
+
+ if (chanreq->oper.width != ap_chandef->width || ap_mode != conn->mode)
+ link_id_info(sdata, link_id,
+ "regulatory prevented using AP config, downgraded\n");
+
+ if (conn->mode >= IEEE80211_CONN_MODE_HE &&
+ (!ieee80211_verify_peer_he_mcs_support(sdata, link_id,
+ (void *)elems->he_cap,
+ elems->he_operation) ||
+ !ieee80211_verify_sta_he_mcs_support(sdata, sband,
+ elems->he_operation))) {
+ conn->mode = IEEE80211_CONN_MODE_VHT;
+ link_id_info(sdata, link_id,
+ "required MCSes not supported, disabling HE\n");
+ }
+
+ if (conn->mode >= IEEE80211_CONN_MODE_EHT &&
+ !ieee80211_verify_sta_eht_mcs_support(sdata, sband,
+ elems->eht_operation)) {
+ conn->mode = IEEE80211_CONN_MODE_HE;
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_160);
+ link_id_info(sdata, link_id,
+ "required MCSes not supported, disabling EHT\n");
+ }
+
+ /* the mode can only decrease, so this must terminate */
+ if (ap_mode != conn->mode) {
+ kfree(elems);
+ goto again;
+ }
+
+ mlme_link_id_dbg(sdata, link_id,
+ "connecting with %s mode, max bandwidth %d MHz\n",
+ ieee80211_conn_mode_str(conn->mode),
+ 20 * (1 << conn->bw_limit));
+
+ if (WARN_ON_ONCE(!cfg80211_chandef_valid(&chanreq->oper))) {
+ ret = -EINVAL;
+ goto free;
+ }
+
+ return elems;
+free:
+ kfree(elems);
+ return ERR_PTR(ret);
}
static int ieee80211_config_bw(struct ieee80211_link_data *link,
- const struct ieee80211_ht_cap *ht_cap,
- const struct ieee80211_vht_cap *vht_cap,
- const struct ieee80211_ht_operation *ht_oper,
- const struct ieee80211_vht_operation *vht_oper,
- const struct ieee80211_he_operation *he_oper,
- const struct ieee80211_eht_operation *eht_oper,
- const struct ieee80211_s1g_oper_ie *s1g_oper,
- const u8 *bssid, u64 *changed)
+ struct ieee802_11_elems *elems,
+ bool update, u64 *changed,
+ const char *frame)
{
+ struct ieee80211_channel *channel = link->conf->chanreq.oper.chan;
struct ieee80211_sub_if_data *sdata = link->sdata;
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct ieee80211_channel *chan = link->conf->chandef.chan;
- struct ieee80211_supported_band *sband =
- local->hw.wiphy->bands[chan->band];
- struct cfg80211_chan_def chandef;
- u16 ht_opmode;
- ieee80211_conn_flags_t flags;
+ struct ieee80211_chan_req chanreq = {};
+ struct cfg80211_chan_def ap_chandef;
+ enum ieee80211_conn_mode ap_mode;
u32 vht_cap_info = 0;
+ u16 ht_opmode;
int ret;
- /* if HT was/is disabled, don't track any bandwidth changes */
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT || !ht_oper)
+ /* don't track any bandwidth changes in legacy/S1G modes */
+ if (link->u.mgd.conn.mode == IEEE80211_CONN_MODE_LEGACY ||
+ link->u.mgd.conn.mode == IEEE80211_CONN_MODE_S1G)
return 0;
- /* don't check VHT if we associated as non-VHT station */
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)
- vht_oper = NULL;
+ if (elems->vht_cap_elem)
+ vht_cap_info = le32_to_cpu(elems->vht_cap_elem->vht_cap_info);
+
+ ap_mode = ieee80211_determine_ap_chan(sdata, channel, vht_cap_info,
+ elems, true, &link->u.mgd.conn,
+ &ap_chandef);
- /* don't check HE if we associated as non-HE station */
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE ||
- !ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif)) {
- he_oper = NULL;
- eht_oper = NULL;
+ if (ap_mode != link->u.mgd.conn.mode) {
+ link_info(link,
+ "AP %pM appears to change mode (expected %s, found %s) in %s, disconnect\n",
+ link->u.mgd.bssid,
+ ieee80211_conn_mode_str(link->u.mgd.conn.mode),
+ ieee80211_conn_mode_str(ap_mode), frame);
+ return -EINVAL;
}
- /* don't check EHT if we associated as non-EHT station */
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_EHT ||
- !ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif))
- eht_oper = NULL;
+ chanreq.oper = ap_chandef;
+ ieee80211_set_chanreq_ap(sdata, &chanreq, &link->u.mgd.conn,
+ &ap_chandef);
/*
- * if bss configuration changed store the new one -
+ * if HT operation mode changed store the new one -
* this may be applicable even if channel is identical
*/
- ht_opmode = le16_to_cpu(ht_oper->operation_mode);
- if (link->conf->ht_operation_mode != ht_opmode) {
- *changed |= BSS_CHANGED_HT;
- link->conf->ht_operation_mode = ht_opmode;
+ if (elems->ht_operation) {
+ ht_opmode = le16_to_cpu(elems->ht_operation->operation_mode);
+ if (link->conf->ht_operation_mode != ht_opmode) {
+ *changed |= BSS_CHANGED_HT;
+ link->conf->ht_operation_mode = ht_opmode;
+ }
}
- if (vht_cap)
- vht_cap_info = le32_to_cpu(vht_cap->vht_cap_info);
-
- /* calculate new channel (type) based on HT/VHT/HE operation IEs */
- flags = ieee80211_determine_chantype(sdata, link,
- link->u.mgd.conn_flags,
- sband, chan, vht_cap_info,
- ht_oper, vht_oper,
- he_oper, eht_oper,
- s1g_oper, &chandef, true);
-
/*
* Downgrade the new channel if we associated with restricted
- * capabilities. For example, if we associated as a 20 MHz STA
- * to a 40 MHz AP (due to regulatory, capabilities or config
- * reasons) then switching to a 40 MHz channel now won't do us
- * any good -- we couldn't use it with the AP.
+ * bandwidth capabilities. For example, if we associated as a
+ * 20 MHz STA to a 40 MHz AP (due to regulatory, capabilities
+ * or config reasons) then switching to a 40 MHz channel now
+ * won't do us any good -- we couldn't use it with the AP.
*/
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_80P80MHZ &&
- chandef.width == NL80211_CHAN_WIDTH_80P80)
- flags |= ieee80211_chandef_downgrade(&chandef);
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_160MHZ &&
- chandef.width == NL80211_CHAN_WIDTH_160)
- flags |= ieee80211_chandef_downgrade(&chandef);
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_40MHZ &&
- chandef.width > NL80211_CHAN_WIDTH_20)
- flags |= ieee80211_chandef_downgrade(&chandef);
-
- if (cfg80211_chandef_identical(&chandef, &link->conf->chandef))
+ while (link->u.mgd.conn.bw_limit <
+ ieee80211_min_bw_limit_from_chandef(&chanreq.oper))
+ ieee80211_chandef_downgrade(&chanreq.oper, NULL);
+
+ if (ap_chandef.chan->band == NL80211_BAND_6GHZ &&
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HE) {
+ ieee80211_rearrange_tpe(&elems->tpe, &ap_chandef,
+ &chanreq.oper);
+ if (memcmp(&link->conf->tpe, &elems->tpe, sizeof(elems->tpe))) {
+ link->conf->tpe = elems->tpe;
+ *changed |= BSS_CHANGED_TPE;
+ }
+ }
+
+ if (ieee80211_chanreq_identical(&chanreq, &link->conf->chanreq))
return 0;
link_info(link,
- "AP %pM changed bandwidth, new config is %d.%03d MHz, width %d (%d.%03d/%d MHz)\n",
- link->u.mgd.bssid, chandef.chan->center_freq,
- chandef.chan->freq_offset, chandef.width,
- chandef.center_freq1, chandef.freq1_offset,
- chandef.center_freq2);
-
- if (flags != (link->u.mgd.conn_flags &
- (IEEE80211_CONN_DISABLE_HT |
- IEEE80211_CONN_DISABLE_VHT |
- IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT |
- IEEE80211_CONN_DISABLE_40MHZ |
- IEEE80211_CONN_DISABLE_80P80MHZ |
- IEEE80211_CONN_DISABLE_160MHZ |
- IEEE80211_CONN_DISABLE_320MHZ)) ||
- !cfg80211_chandef_valid(&chandef)) {
+ "AP %pM changed bandwidth in %s, new used config is %d.%03d MHz, width %d (%d.%03d/%d MHz)\n",
+ link->u.mgd.bssid, frame, chanreq.oper.chan->center_freq,
+ chanreq.oper.chan->freq_offset, chanreq.oper.width,
+ chanreq.oper.center_freq1, chanreq.oper.freq1_offset,
+ chanreq.oper.center_freq2);
+
+ if (!cfg80211_chandef_valid(&chanreq.oper)) {
sdata_info(sdata,
- "AP %pM changed caps/bw in a way we can't support (0x%x/0x%x) - disconnect\n",
- link->u.mgd.bssid, flags, ifmgd->flags);
+ "AP %pM changed caps/bw in %s in a way we can't support - disconnect\n",
+ link->u.mgd.bssid, frame);
return -EINVAL;
}
- ret = ieee80211_link_change_bandwidth(link, &chandef, changed);
+ if (!update) {
+ link->conf->chanreq = chanreq;
+ return 0;
+ }
+ /*
+ * We're tracking the current AP here, so don't do any further checks
+ * here. This keeps us from playing ping-pong with regulatory, without
+ * it the following can happen (for example):
+ * - connect to an AP with 80 MHz, world regdom allows 80 MHz
+ * - AP advertises regdom US
+ * - CRDA loads regdom US with 80 MHz prohibited (old database)
+ * - we detect an unsupported channel and disconnect
+ * - disconnect causes CRDA to reload world regdomain and the game
+ * starts anew.
+ * (see https://bugzilla.kernel.org/show_bug.cgi?id=70881)
+ *
+ * It seems possible that there are still scenarios with CSA or real
+ * bandwidth changes where a this could happen, but those cases are
+ * less common and wouldn't completely prevent using the AP.
+ */
+
+ ret = ieee80211_link_change_chanreq(link, &chanreq, changed);
if (ret) {
sdata_info(sdata,
- "AP %pM changed bandwidth to incompatible one - disconnect\n",
- link->u.mgd.bssid);
+ "AP %pM changed bandwidth in %s to incompatible one - disconnect\n",
+ link->u.mgd.bssid, frame);
return ret;
}
@@ -612,7 +1213,7 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband,
struct ieee80211_channel *channel,
enum ieee80211_smps_mode smps,
- ieee80211_conn_flags_t conn_flags)
+ const struct ieee80211_conn_settings *conn)
{
u8 *pos;
u32 flags = channel->flags;
@@ -647,7 +1248,7 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
* capable of 40 MHz -- some broken APs will never fall
* back to trying to transmit in 20 MHz.
*/
- if (conn_flags & IEEE80211_CONN_DISABLE_40MHZ) {
+ if (conn->bw_limit <= IEEE80211_CONN_BW_LIMIT_20) {
cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
cap &= ~IEEE80211_HT_CAP_SGI_40;
}
@@ -686,7 +1287,7 @@ static bool ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb,
struct ieee80211_supported_band *sband,
struct ieee80211_vht_cap *ap_vht_cap,
- ieee80211_conn_flags_t conn_flags)
+ const struct ieee80211_conn_settings *conn)
{
struct ieee80211_local *local = sdata->local;
u8 *pos;
@@ -703,16 +1304,7 @@ static bool ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
/* determine capability flags */
cap = vht_cap.cap;
- if (conn_flags & IEEE80211_CONN_DISABLE_80P80MHZ) {
- u32 bw = cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
-
- cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
- if (bw == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ ||
- bw == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
- cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
- }
-
- if (conn_flags & IEEE80211_CONN_DISABLE_160MHZ) {
+ if (conn->bw_limit <= IEEE80211_CONN_BW_LIMIT_80) {
cap &= ~IEEE80211_VHT_CAP_SHORT_GI_160;
cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
}
@@ -740,7 +1332,7 @@ static bool ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
bool disable_mu_mimo = false;
struct ieee80211_sub_if_data *other;
- list_for_each_entry_rcu(other, &local->interfaces, list) {
+ list_for_each_entry(other, &local->interfaces, list) {
if (other->vif.bss_conf.mu_mimo_owner) {
disable_mu_mimo = true;
break;
@@ -769,79 +1361,12 @@ static bool ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
return mu_mimo_owner;
}
-/* This function determines HE capability flags for the association
- * and builds the IE.
- */
-static void ieee80211_add_he_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb,
- struct ieee80211_supported_band *sband,
- enum ieee80211_smps_mode smps_mode,
- ieee80211_conn_flags_t conn_flags)
-{
- u8 *pos, *pre_he_pos;
- const struct ieee80211_sta_he_cap *he_cap;
- u8 he_cap_size;
-
- he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
- if (WARN_ON(!he_cap))
- return;
-
- /* get a max size estimate */
- he_cap_size =
- 2 + 1 + sizeof(he_cap->he_cap_elem) +
- ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem) +
- ieee80211_he_ppe_size(he_cap->ppe_thres[0],
- he_cap->he_cap_elem.phy_cap_info);
- pos = skb_put(skb, he_cap_size);
- pre_he_pos = pos;
- pos = ieee80211_ie_build_he_cap(conn_flags,
- pos, he_cap, pos + he_cap_size);
- /* trim excess if any */
- skb_trim(skb, skb->len - (pre_he_pos + he_cap_size - pos));
-
- ieee80211_ie_build_he_6ghz_cap(sdata, smps_mode, skb);
-}
-
-static void ieee80211_add_eht_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb,
- struct ieee80211_supported_band *sband)
-{
- u8 *pos;
- const struct ieee80211_sta_he_cap *he_cap;
- const struct ieee80211_sta_eht_cap *eht_cap;
- u8 eht_cap_size;
-
- he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
- eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
-
- /*
- * EHT capabilities element is only added if the HE capabilities element
- * was added so assume that 'he_cap' is valid and don't check it.
- */
- if (WARN_ON(!he_cap || !eht_cap))
- return;
-
- eht_cap_size =
- 2 + 1 + sizeof(eht_cap->eht_cap_elem) +
- ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
- &eht_cap->eht_cap_elem,
- false) +
- ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0],
- eht_cap->eht_cap_elem.phy_cap_info);
- pos = skb_put(skb, eht_cap_size);
- ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, pos + eht_cap_size,
- false);
-}
-
static void ieee80211_assoc_add_rates(struct sk_buff *skb,
enum nl80211_chan_width width,
struct ieee80211_supported_band *sband,
struct ieee80211_mgd_assoc_data *assoc_data)
{
- unsigned int rates_len, supp_rates_len;
- u32 rates = 0;
- int i, count;
- u8 *pos;
+ u32 rates;
if (assoc_data->supp_rates_len) {
/*
@@ -850,53 +1375,23 @@ static void ieee80211_assoc_add_rates(struct sk_buff *skb,
* in the association request (e.g. D-Link DAP 1353 in
* b-only mode)...
*/
- rates_len = ieee80211_parse_bitrates(width, sband,
- assoc_data->supp_rates,
- assoc_data->supp_rates_len,
- &rates);
+ ieee80211_parse_bitrates(width, sband,
+ assoc_data->supp_rates,
+ assoc_data->supp_rates_len,
+ &rates);
} else {
/*
* In case AP not provide any supported rates information
* before association, we send information element(s) with
* all rates that we support.
*/
- rates_len = sband->n_bitrates;
- for (i = 0; i < sband->n_bitrates; i++)
- rates |= BIT(i);
- }
-
- supp_rates_len = rates_len;
- if (supp_rates_len > 8)
- supp_rates_len = 8;
-
- pos = skb_put(skb, supp_rates_len + 2);
- *pos++ = WLAN_EID_SUPP_RATES;
- *pos++ = supp_rates_len;
-
- count = 0;
- for (i = 0; i < sband->n_bitrates; i++) {
- if (BIT(i) & rates) {
- int rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, 5);
- *pos++ = (u8)rate;
- if (++count == 8)
- break;
- }
+ rates = ~0;
}
- if (rates_len > count) {
- pos = skb_put(skb, rates_len - count + 2);
- *pos++ = WLAN_EID_EXT_SUPP_RATES;
- *pos++ = rates_len - count;
-
- for (i++; i < sband->n_bitrates; i++) {
- if (BIT(i) & rates) {
- int rate;
-
- rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, 5);
- *pos++ = (u8)rate;
- }
- }
- }
+ ieee80211_put_srates_elem(skb, sband, 0, 0, ~rates,
+ WLAN_EID_SUPP_RATES);
+ ieee80211_put_srates_elem(skb, sband, 0, 0, ~rates,
+ WLAN_EID_EXT_SUPP_RATES);
}
static size_t ieee80211_add_before_ht_elems(struct sk_buff *skb,
@@ -1016,23 +1511,25 @@ static size_t ieee80211_add_before_he_elems(struct sk_buff *skb,
#define PRESENT_ELEMS_MAX 8
#define PRESENT_ELEM_EXT_OFFS 0x100
-static void ieee80211_assoc_add_ml_elem(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb, u16 capab,
- const struct element *ext_capa,
- const u16 *present_elems);
-
-static size_t ieee80211_assoc_link_elems(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb, u16 *capab,
- const struct element *ext_capa,
- const u8 *extra_elems,
- size_t extra_elems_len,
- unsigned int link_id,
- struct ieee80211_link_data *link,
- u16 *present_elems)
+static void
+ieee80211_assoc_add_ml_elem(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb, u16 capab,
+ const struct element *ext_capa,
+ const u16 *present_elems,
+ struct ieee80211_mgd_assoc_data *assoc_data);
+
+static size_t
+ieee80211_add_link_elems(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb, u16 *capab,
+ const struct element *ext_capa,
+ const u8 *extra_elems,
+ size_t extra_elems_len,
+ unsigned int link_id,
+ struct ieee80211_link_data *link,
+ u16 *present_elems,
+ struct ieee80211_mgd_assoc_data *assoc_data)
{
enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif);
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data;
struct cfg80211_bss *cbss = assoc_data->link[link_id].bss;
struct ieee80211_channel *chan = cbss->channel;
const struct ieee80211_sband_iftype_data *iftd;
@@ -1133,11 +1630,11 @@ static size_t ieee80211_assoc_link_elems(struct ieee80211_sub_if_data *sdata,
offset);
if (sband->band != NL80211_BAND_6GHZ &&
- !(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_HT)) {
+ assoc_data->link[link_id].conn.mode >= IEEE80211_CONN_MODE_HT) {
ieee80211_add_ht_ie(sdata, skb,
assoc_data->link[link_id].ap_ht_param,
sband, chan, smps_mode,
- assoc_data->link[link_id].conn_flags);
+ &assoc_data->link[link_id].conn);
ADD_PRESENT_ELEM(WLAN_EID_HT_CAPABILITY);
}
@@ -1147,37 +1644,28 @@ static size_t ieee80211_assoc_link_elems(struct ieee80211_sub_if_data *sdata,
offset);
if (sband->band != NL80211_BAND_6GHZ &&
- !(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_VHT)) {
+ assoc_data->link[link_id].conn.mode >= IEEE80211_CONN_MODE_VHT &&
+ sband->vht_cap.vht_supported) {
bool mu_mimo_owner =
ieee80211_add_vht_ie(sdata, skb, sband,
&assoc_data->link[link_id].ap_vht_cap,
- assoc_data->link[link_id].conn_flags);
+ &assoc_data->link[link_id].conn);
if (link)
link->conf->mu_mimo_owner = mu_mimo_owner;
ADD_PRESENT_ELEM(WLAN_EID_VHT_CAPABILITY);
}
- /*
- * If AP doesn't support HT, mark HE and EHT as disabled.
- * If on the 5GHz band, make sure it supports VHT.
- */
- if (assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_HT ||
- (sband->band == NL80211_BAND_5GHZ &&
- assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_VHT))
- assoc_data->link[link_id].conn_flags |=
- IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT;
-
/* if present, add any custom IEs that go before HE */
offset = ieee80211_add_before_he_elems(skb, extra_elems,
extra_elems_len,
offset);
- if (!(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_HE)) {
- ieee80211_add_he_ie(sdata, skb, sband, smps_mode,
- assoc_data->link[link_id].conn_flags);
+ if (assoc_data->link[link_id].conn.mode >= IEEE80211_CONN_MODE_HE) {
+ ieee80211_put_he_cap(skb, sdata, sband,
+ &assoc_data->link[link_id].conn);
ADD_PRESENT_EXT_ELEM(WLAN_EID_EXT_HE_CAPABILITY);
+ ieee80211_put_he_6ghz_cap(skb, sdata, smps_mode);
}
/*
@@ -1185,18 +1673,19 @@ static size_t ieee80211_assoc_link_elems(struct ieee80211_sub_if_data *sdata,
* calling ieee80211_assoc_add_ml_elem(), so add this one if
* we're going to put it after the ML element
*/
- if (!(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_EHT))
+ if (assoc_data->link[link_id].conn.mode >= IEEE80211_CONN_MODE_EHT)
ADD_PRESENT_EXT_ELEM(WLAN_EID_EXT_EHT_CAPABILITY);
if (link_id == assoc_data->assoc_link_id)
ieee80211_assoc_add_ml_elem(sdata, skb, orig_capab, ext_capa,
- present_elems);
+ present_elems, assoc_data);
/* crash if somebody gets it wrong */
present_elems = NULL;
- if (!(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_EHT))
- ieee80211_add_eht_ie(sdata, skb, sband);
+ if (assoc_data->link[link_id].conn.mode >= IEEE80211_CONN_MODE_EHT)
+ ieee80211_put_eht_cap(skb, sdata, sband,
+ &assoc_data->link[link_id].conn);
if (sband->band == NL80211_BAND_S1GHZ) {
ieee80211_add_aid_request_ie(sdata, skb);
@@ -1206,9 +1695,6 @@ static size_t ieee80211_assoc_link_elems(struct ieee80211_sub_if_data *sdata,
if (iftd && iftd->vendor_elems.data && iftd->vendor_elems.len)
skb_put_data(skb, iftd->vendor_elems.data, iftd->vendor_elems.len);
- if (link)
- link->u.mgd.conn_flags = assoc_data->link[link_id].conn_flags;
-
return offset;
}
@@ -1271,14 +1757,14 @@ static void ieee80211_add_non_inheritance_elem(struct sk_buff *skb,
*len = skb->len - skb_len - 2;
}
-static void ieee80211_assoc_add_ml_elem(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb, u16 capab,
- const struct element *ext_capa,
- const u16 *outer_present_elems)
+static void
+ieee80211_assoc_add_ml_elem(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb, u16 capab,
+ const struct element *ext_capa,
+ const u16 *outer_present_elems,
+ struct ieee80211_mgd_assoc_data *assoc_data)
{
struct ieee80211_local *local = sdata->local;
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data;
struct ieee80211_multi_link_elem *ml_elem;
struct ieee80211_mle_basic_common_info *common;
const struct wiphy_iftype_ext_capab *ift_ext_capa;
@@ -1318,8 +1804,6 @@ static void ieee80211_assoc_add_ml_elem(struct ieee80211_sub_if_data *sdata,
cpu_to_le16(IEEE80211_MLC_BASIC_PRES_EML_CAPA);
skb_put_data(skb, &eml_capa, sizeof(eml_capa));
}
- /* need indication from userspace to support this */
- mld_capa_ops &= ~cpu_to_le16(IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP);
skb_put_data(skb, &mld_capa_ops, sizeof(mld_capa_ops));
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
@@ -1353,16 +1837,17 @@ static void ieee80211_assoc_add_ml_elem(struct ieee80211_sub_if_data *sdata,
* (if applicable) are skipped. So we only have
* the capability field (remember the position and fill
* later), followed by the elements added below by
- * calling ieee80211_assoc_link_elems().
+ * calling ieee80211_add_link_elems().
*/
capab_pos = skb_put(skb, 2);
- extra_used = ieee80211_assoc_link_elems(sdata, skb, &capab,
- ext_capa,
- extra_elems,
- extra_elems_len,
- link_id, NULL,
- link_present_elems);
+ extra_used = ieee80211_add_link_elems(sdata, skb, &capab,
+ ext_capa,
+ extra_elems,
+ extra_elems_len,
+ link_id, NULL,
+ link_present_elems,
+ assoc_data);
if (extra_elems)
skb_put_data(skb, extra_elems + extra_used,
extra_elems_len - extra_used);
@@ -1379,6 +1864,55 @@ static void ieee80211_assoc_add_ml_elem(struct ieee80211_sub_if_data *sdata,
ieee80211_fragment_element(skb, ml_elem_len, WLAN_EID_FRAGMENT);
}
+static int
+ieee80211_link_common_elems_size(struct ieee80211_sub_if_data *sdata,
+ enum nl80211_iftype iftype,
+ struct cfg80211_bss *cbss,
+ size_t elems_len)
+{
+ struct ieee80211_local *local = sdata->local;
+ const struct ieee80211_sband_iftype_data *iftd;
+ struct ieee80211_supported_band *sband;
+ size_t size = 0;
+
+ if (!cbss)
+ return size;
+
+ sband = local->hw.wiphy->bands[cbss->channel->band];
+
+ /* add STA profile elements length */
+ size += elems_len;
+
+ /* and supported rates length */
+ size += 4 + sband->n_bitrates;
+
+ /* supported channels */
+ size += 2 + 2 * sband->n_channels;
+
+ iftd = ieee80211_get_sband_iftype_data(sband, iftype);
+ if (iftd)
+ size += iftd->vendor_elems.len;
+
+ /* power capability */
+ size += 4;
+
+ /* HT, VHT, HE, EHT */
+ size += 2 + sizeof(struct ieee80211_ht_cap);
+ size += 2 + sizeof(struct ieee80211_vht_cap);
+ size += 2 + 1 + sizeof(struct ieee80211_he_cap_elem) +
+ sizeof(struct ieee80211_he_mcs_nss_supp) +
+ IEEE80211_HE_PPE_THRES_MAX_LEN;
+
+ if (sband->band == NL80211_BAND_6GHZ)
+ size += 2 + 1 + sizeof(struct ieee80211_he_6ghz_capa);
+
+ size += 2 + 1 + sizeof(struct ieee80211_eht_cap_elem) +
+ sizeof(struct ieee80211_eht_mcs_nss_supp) +
+ IEEE80211_EHT_PPE_THRES_MAX_LEN;
+
+ return size;
+}
+
static int ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
@@ -1417,42 +1951,15 @@ static int ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
struct cfg80211_bss *cbss = assoc_data->link[link_id].bss;
- const struct ieee80211_sband_iftype_data *iftd;
- struct ieee80211_supported_band *sband;
+ size_t elems_len = assoc_data->link[link_id].elems_len;
if (!cbss)
continue;
- sband = local->hw.wiphy->bands[cbss->channel->band];
-
n_links++;
- /* add STA profile elements length */
- size += assoc_data->link[link_id].elems_len;
- /* and supported rates length */
- size += 4 + sband->n_bitrates;
- /* supported channels */
- size += 2 + 2 * sband->n_channels;
-
- iftd = ieee80211_get_sband_iftype_data(sband, iftype);
- if (iftd)
- size += iftd->vendor_elems.len;
-
- /* power capability */
- size += 4;
-
- /* HT, VHT, HE, EHT */
- size += 2 + sizeof(struct ieee80211_ht_cap);
- size += 2 + sizeof(struct ieee80211_vht_cap);
- size += 2 + 1 + sizeof(struct ieee80211_he_cap_elem) +
- sizeof(struct ieee80211_he_mcs_nss_supp) +
- IEEE80211_HE_PPE_THRES_MAX_LEN;
-
- if (sband->band == NL80211_BAND_6GHZ)
- size += 2 + 1 + sizeof(struct ieee80211_he_6ghz_capa);
-
- size += 2 + 1 + sizeof(struct ieee80211_eht_cap_elem) +
- sizeof(struct ieee80211_eht_mcs_nss_supp) +
- IEEE80211_EHT_PPE_THRES_MAX_LEN;
+
+ size += ieee80211_link_common_elems_size(sdata, iftype, cbss,
+ elems_len);
/* non-inheritance element */
size += 2 + 2 + PRESENT_ELEMS_MAX;
@@ -1499,7 +2006,7 @@ static int ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
/* Set MBSSID support for HE AP if needed */
if (ieee80211_hw_check(&local->hw, SUPPORTS_ONLY_HE_MULTI_BSSID) &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) &&
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HE &&
ext_capa && ext_capa->datalen >= 3)
ext_capa->data[2] |= WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT;
@@ -1544,17 +2051,17 @@ static int ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
* for some reason check it and want it to be set, set the bit for all
* pre-EHT connections as we used to do.
*/
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_EHT)
+ if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_EHT)
capab |= WLAN_CAPABILITY_ESS;
/* add the elements for the assoc (main) link */
link_capab = capab;
- offset = ieee80211_assoc_link_elems(sdata, skb, &link_capab,
- ext_capa,
- assoc_data->ie,
- assoc_data->ie_len,
- assoc_data->assoc_link_id, link,
- present_elems);
+ offset = ieee80211_add_link_elems(sdata, skb, &link_capab,
+ ext_capa,
+ assoc_data->ie,
+ assoc_data->ie_len,
+ assoc_data->assoc_link_id, link,
+ present_elems, assoc_data);
put_unaligned_le16(link_capab, capab_pos);
/* if present, add any custom non-vendor IEs */
@@ -1692,12 +2199,12 @@ void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
}
/* spectrum management related things */
-static void ieee80211_chswitch_work(struct wiphy *wiphy,
- struct wiphy_work *work)
+static void ieee80211_csa_switch_work(struct wiphy *wiphy,
+ struct wiphy_work *work)
{
struct ieee80211_link_data *link =
container_of(work, struct ieee80211_link_data,
- u.mgd.chswitch_work.work);
+ u.mgd.csa.switch_work.work);
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -1715,6 +2222,18 @@ static void ieee80211_chswitch_work(struct wiphy *wiphy,
return;
/*
+ * If the link isn't active (now), we cannot wait for beacons, won't
+ * have a reserved chanctx, etc. Just switch over the chandef and
+ * update cfg80211 directly.
+ */
+ if (!ieee80211_vif_link_active(&sdata->vif, link->link_id)) {
+ link->conf->chanreq = link->csa.chanreq;
+ cfg80211_ch_switch_notify(sdata->dev, &link->csa.chanreq.oper,
+ link->link_id);
+ return;
+ }
+
+ /*
* using reservation isn't immediate as it may be deferred until later
* with multi-vif. once reservation is complete it will re-schedule the
* work with no reserved_chanctx so verify chandef to check if it
@@ -1732,25 +2251,39 @@ static void ieee80211_chswitch_work(struct wiphy *wiphy,
ret = ieee80211_link_use_reserved_context(link);
if (ret) {
- sdata_info(sdata,
- "failed to use reserved channel context, disconnecting (err=%d)\n",
- ret);
+ link_info(link,
+ "failed to use reserved channel context, disconnecting (err=%d)\n",
+ ret);
wiphy_work_queue(sdata->local->hw.wiphy,
&ifmgd->csa_connection_drop_work);
}
return;
}
- if (!cfg80211_chandef_identical(&link->conf->chandef,
- &link->csa_chandef)) {
- sdata_info(sdata,
- "failed to finalize channel switch, disconnecting\n");
+ if (!ieee80211_chanreq_identical(&link->conf->chanreq,
+ &link->csa.chanreq)) {
+ link_info(link,
+ "failed to finalize channel switch, disconnecting\n");
wiphy_work_queue(sdata->local->hw.wiphy,
&ifmgd->csa_connection_drop_work);
return;
}
- link->u.mgd.csa_waiting_bcn = true;
+ link->u.mgd.csa.waiting_bcn = true;
+
+ /* apply new TPE restrictions immediately on the new channel */
+ if (link->u.mgd.csa.ap_chandef.chan->band == NL80211_BAND_6GHZ &&
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HE) {
+ ieee80211_rearrange_tpe(&link->u.mgd.csa.tpe,
+ &link->u.mgd.csa.ap_chandef,
+ &link->conf->chanreq.oper);
+ if (memcmp(&link->conf->tpe, &link->u.mgd.csa.tpe,
+ sizeof(link->u.mgd.csa.tpe))) {
+ link->conf->tpe = link->u.mgd.csa.tpe;
+ ieee80211_link_info_change_notify(sdata, link,
+ BSS_CHANGED_TPE);
+ }
+ }
ieee80211_sta_reset_beacon_monitor(sdata);
ieee80211_sta_reset_conn_monitor(sdata);
@@ -1759,7 +2292,6 @@ static void ieee80211_chswitch_work(struct wiphy *wiphy,
static void ieee80211_chswitch_post_beacon(struct ieee80211_link_data *link)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
- struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
int ret;
@@ -1767,31 +2299,23 @@ static void ieee80211_chswitch_post_beacon(struct ieee80211_link_data *link)
WARN_ON(!link->conf->csa_active);
- if (link->csa_block_tx) {
- ieee80211_wake_vif_queues(local, sdata,
- IEEE80211_QUEUE_STOP_REASON_CSA);
- link->csa_block_tx = false;
- }
+ ieee80211_vif_unblock_queues_csa(sdata);
link->conf->csa_active = false;
- link->u.mgd.csa_waiting_bcn = false;
- /*
- * If the CSA IE is still present on the beacon after the switch,
- * we need to consider it as a new CSA (possibly to self).
- */
- link->u.mgd.beacon_crc_valid = false;
+ link->u.mgd.csa.blocked_tx = false;
+ link->u.mgd.csa.waiting_bcn = false;
ret = drv_post_channel_switch(link);
if (ret) {
- sdata_info(sdata,
- "driver post channel switch failed, disconnecting\n");
+ link_info(link,
+ "driver post channel switch failed, disconnecting\n");
wiphy_work_queue(sdata->local->hw.wiphy,
&ifmgd->csa_connection_drop_work);
return;
}
- cfg80211_ch_switch_notify(sdata->dev, &link->reserved_chandef,
- link->link_id, 0);
+ cfg80211_ch_switch_notify(sdata->dev, &link->conf->chanreq.oper,
+ link->link_id);
}
void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success,
@@ -1805,7 +2329,8 @@ void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success,
if (!success) {
sdata_info(sdata,
- "driver channel switch failed, disconnecting\n");
+ "driver channel switch failed (link %d), disconnecting\n",
+ link_id);
wiphy_work_queue(sdata->local->hw.wiphy,
&sdata->u.mgd.csa_connection_drop_work);
} else {
@@ -1818,7 +2343,7 @@ void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success,
}
wiphy_delayed_work_queue(sdata->local->hw.wiphy,
- &link->u.mgd.chswitch_work, 0);
+ &link->u.mgd.csa.switch_work, 0);
}
rcu_read_unlock();
@@ -1838,175 +2363,394 @@ ieee80211_sta_abort_chanswitch(struct ieee80211_link_data *link)
ieee80211_link_unreserve_chanctx(link);
- if (link->csa_block_tx)
- ieee80211_wake_vif_queues(local, sdata,
- IEEE80211_QUEUE_STOP_REASON_CSA);
+ ieee80211_vif_unblock_queues_csa(sdata);
- link->csa_block_tx = false;
link->conf->csa_active = false;
+ link->u.mgd.csa.blocked_tx = false;
- drv_abort_channel_switch(sdata);
+ drv_abort_channel_switch(link);
}
+struct sta_csa_rnr_iter_data {
+ struct ieee80211_link_data *link;
+ struct ieee80211_channel *chan;
+ u8 mld_id;
+};
+
+static enum cfg80211_rnr_iter_ret
+ieee80211_sta_csa_rnr_iter(void *_data, u8 type,
+ const struct ieee80211_neighbor_ap_info *info,
+ const u8 *tbtt_info, u8 tbtt_info_len)
+{
+ struct sta_csa_rnr_iter_data *data = _data;
+ struct ieee80211_link_data *link = data->link;
+ struct ieee80211_sub_if_data *sdata = link->sdata;
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ const struct ieee80211_tbtt_info_ge_11 *ti;
+ enum nl80211_band band;
+ unsigned int center_freq;
+ int link_id;
+
+ if (type != IEEE80211_TBTT_INFO_TYPE_TBTT)
+ return RNR_ITER_CONTINUE;
+
+ if (tbtt_info_len < sizeof(*ti))
+ return RNR_ITER_CONTINUE;
+
+ ti = (const void *)tbtt_info;
+
+ if (ti->mld_params.mld_id != data->mld_id)
+ return RNR_ITER_CONTINUE;
+
+ link_id = le16_get_bits(ti->mld_params.params,
+ IEEE80211_RNR_MLD_PARAMS_LINK_ID);
+ if (link_id != data->link->link_id)
+ return RNR_ITER_CONTINUE;
+
+ /* we found the entry for our link! */
+
+ /* this AP is confused, it had this right before ... just disconnect */
+ if (!ieee80211_operating_class_to_band(info->op_class, &band)) {
+ link_info(link,
+ "AP now has invalid operating class in RNR, disconnect\n");
+ wiphy_work_queue(sdata->local->hw.wiphy,
+ &ifmgd->csa_connection_drop_work);
+ return RNR_ITER_BREAK;
+ }
+
+ center_freq = ieee80211_channel_to_frequency(info->channel, band);
+ data->chan = ieee80211_get_channel(sdata->local->hw.wiphy, center_freq);
+
+ return RNR_ITER_BREAK;
+}
+
+static void
+ieee80211_sta_other_link_csa_disappeared(struct ieee80211_link_data *link,
+ struct ieee802_11_elems *elems)
+{
+ struct ieee80211_sub_if_data *sdata = link->sdata;
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct sta_csa_rnr_iter_data data = {
+ .link = link,
+ };
+
+ /*
+ * If we get here, we see a beacon from another link without
+ * CSA still being reported for it, so now we have to check
+ * if the CSA was aborted or completed. This may not even be
+ * perfectly possible if the CSA was only done for changing
+ * the puncturing, but in that case if the link in inactive
+ * we don't really care, and if it's an active link (or when
+ * it's activated later) we'll get a beacon and adjust.
+ */
+
+ if (WARN_ON(!elems->ml_basic))
+ return;
+
+ data.mld_id = ieee80211_mle_get_mld_id((const void *)elems->ml_basic);
+
+ /*
+ * So in order to do this, iterate the RNR element(s) and see
+ * what channel is reported now.
+ */
+ cfg80211_iter_rnr(elems->ie_start, elems->total_len,
+ ieee80211_sta_csa_rnr_iter, &data);
+
+ if (!data.chan) {
+ link_info(link,
+ "couldn't find (valid) channel in RNR for CSA, disconnect\n");
+ wiphy_work_queue(sdata->local->hw.wiphy,
+ &ifmgd->csa_connection_drop_work);
+ return;
+ }
+
+ /*
+ * If it doesn't match the CSA, then assume it aborted. This
+ * may erroneously detect that it was _not_ aborted when it
+ * was in fact aborted, but only changed the bandwidth or the
+ * puncturing configuration, but we don't have enough data to
+ * detect that.
+ */
+ if (data.chan != link->csa.chanreq.oper.chan)
+ ieee80211_sta_abort_chanswitch(link);
+}
+
+enum ieee80211_csa_source {
+ IEEE80211_CSA_SOURCE_BEACON,
+ IEEE80211_CSA_SOURCE_OTHER_LINK,
+ IEEE80211_CSA_SOURCE_PROT_ACTION,
+ IEEE80211_CSA_SOURCE_UNPROT_ACTION,
+};
+
static void
ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
u64 timestamp, u32 device_timestamp,
- struct ieee802_11_elems *elems,
- bool beacon)
+ struct ieee802_11_elems *full_elems,
+ struct ieee802_11_elems *csa_elems,
+ enum ieee80211_csa_source source)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct cfg80211_bss *cbss = link->u.mgd.bss;
+ struct ieee80211_chanctx *chanctx = NULL;
struct ieee80211_chanctx_conf *conf;
- struct ieee80211_chanctx *chanctx;
- enum nl80211_band current_band;
- struct ieee80211_csa_ie csa_ie;
- struct ieee80211_channel_switch ch_switch;
- struct ieee80211_bss *bss;
- unsigned long timeout;
+ struct ieee80211_csa_ie csa_ie = {};
+ struct ieee80211_channel_switch ch_switch = {
+ .link_id = link->link_id,
+ .timestamp = timestamp,
+ .device_timestamp = device_timestamp,
+ };
+ unsigned long now;
int res;
lockdep_assert_wiphy(local->hw.wiphy);
- if (!cbss)
- return;
+ if (csa_elems) {
+ struct cfg80211_bss *cbss = link->conf->bss;
+ enum nl80211_band current_band;
+ struct ieee80211_bss *bss;
- current_band = cbss->channel->band;
- bss = (void *)cbss->priv;
- res = ieee80211_parse_ch_switch_ie(sdata, elems, current_band,
- bss->vht_cap_info,
- link->u.mgd.conn_flags,
- link->u.mgd.bssid, &csa_ie);
+ if (WARN_ON(!cbss))
+ return;
- if (!res) {
- ch_switch.timestamp = timestamp;
- ch_switch.device_timestamp = device_timestamp;
- ch_switch.block_tx = csa_ie.mode;
- ch_switch.chandef = csa_ie.chandef;
- ch_switch.count = csa_ie.count;
- ch_switch.delay = csa_ie.max_switch_time;
+ current_band = cbss->channel->band;
+ bss = (void *)cbss->priv;
+
+ res = ieee80211_parse_ch_switch_ie(sdata, csa_elems,
+ current_band,
+ bss->vht_cap_info,
+ &link->u.mgd.conn,
+ link->u.mgd.bssid,
+ source == IEEE80211_CSA_SOURCE_UNPROT_ACTION,
+ &csa_ie);
+ if (res == 0) {
+ ch_switch.block_tx = csa_ie.mode;
+ ch_switch.chandef = csa_ie.chanreq.oper;
+ ch_switch.count = csa_ie.count;
+ ch_switch.delay = csa_ie.max_switch_time;
+ }
+
+ link->u.mgd.csa.tpe = csa_elems->csa_tpe;
+ } else {
+ /*
+ * If there was no per-STA profile for this link, we
+ * get called with csa_elems == NULL. This of course means
+ * there are no CSA elements, so set res=1 indicating
+ * no more CSA.
+ */
+ res = 1;
}
- if (res < 0)
+ if (res < 0) {
+ /* ignore this case, not a protected frame */
+ if (source == IEEE80211_CSA_SOURCE_UNPROT_ACTION)
+ return;
goto drop_connection;
+ }
- if (beacon && link->conf->csa_active &&
- !link->u.mgd.csa_waiting_bcn) {
- if (res)
- ieee80211_sta_abort_chanswitch(link);
- else
- drv_channel_switch_rx_beacon(sdata, &ch_switch);
+ if (link->conf->csa_active) {
+ switch (source) {
+ case IEEE80211_CSA_SOURCE_PROT_ACTION:
+ case IEEE80211_CSA_SOURCE_UNPROT_ACTION:
+ /* already processing - disregard action frames */
+ return;
+ case IEEE80211_CSA_SOURCE_BEACON:
+ if (link->u.mgd.csa.waiting_bcn) {
+ ieee80211_chswitch_post_beacon(link);
+ /*
+ * If the CSA is still present after the switch
+ * we need to consider it as a new CSA (possibly
+ * to self). This happens by not returning here
+ * so we'll get to the check below.
+ */
+ } else if (res) {
+ ieee80211_sta_abort_chanswitch(link);
+ return;
+ } else {
+ drv_channel_switch_rx_beacon(sdata, &ch_switch);
+ return;
+ }
+ break;
+ case IEEE80211_CSA_SOURCE_OTHER_LINK:
+ /* active link: we want to see the beacon to continue */
+ if (ieee80211_vif_link_active(&sdata->vif,
+ link->link_id))
+ return;
+
+ /* switch work ran, so just complete the process */
+ if (link->u.mgd.csa.waiting_bcn) {
+ ieee80211_chswitch_post_beacon(link);
+ /*
+ * If the CSA is still present after the switch
+ * we need to consider it as a new CSA (possibly
+ * to self). This happens by not returning here
+ * so we'll get to the check below.
+ */
+ break;
+ }
+
+ /* link still has CSA but we already know, do nothing */
+ if (!res)
+ return;
+
+ /* check in the RNR if the CSA aborted */
+ ieee80211_sta_other_link_csa_disappeared(link,
+ full_elems);
+ return;
+ }
+ }
+
+ /* no active CSA nor a new one */
+ if (res) {
+ /*
+ * However, we may have stopped queues when receiving a public
+ * action frame that couldn't be protected, if it had the quiet
+ * bit set. This is a trade-off, we want to be quiet as soon as
+ * possible, but also don't trust the public action frame much,
+ * as it can't be protected.
+ */
+ if (unlikely(link->u.mgd.csa.blocked_tx)) {
+ link->u.mgd.csa.blocked_tx = false;
+ ieee80211_vif_unblock_queues_csa(sdata);
+ }
return;
- } else if (link->conf->csa_active || res) {
- /* disregard subsequent announcements if already processing */
+ }
+
+ /*
+ * We don't really trust public action frames, but block queues (go to
+ * quiet mode) for them anyway, we should get a beacon soon to either
+ * know what the CSA really is, or figure out the public action frame
+ * was actually an attack.
+ */
+ if (source == IEEE80211_CSA_SOURCE_UNPROT_ACTION) {
+ if (csa_ie.mode) {
+ link->u.mgd.csa.blocked_tx = true;
+ ieee80211_vif_block_queues_csa(sdata);
+ }
return;
}
- if (link->conf->chandef.chan->band !=
- csa_ie.chandef.chan->band) {
- sdata_info(sdata,
- "AP %pM switches to different band (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n",
- link->u.mgd.bssid,
- csa_ie.chandef.chan->center_freq,
- csa_ie.chandef.width, csa_ie.chandef.center_freq1,
- csa_ie.chandef.center_freq2);
+ if (link->conf->chanreq.oper.chan->band !=
+ csa_ie.chanreq.oper.chan->band) {
+ link_info(link,
+ "AP %pM switches to different band (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n",
+ link->u.mgd.bssid,
+ csa_ie.chanreq.oper.chan->center_freq,
+ csa_ie.chanreq.oper.width,
+ csa_ie.chanreq.oper.center_freq1,
+ csa_ie.chanreq.oper.center_freq2);
goto drop_connection;
}
- if (!cfg80211_chandef_usable(local->hw.wiphy, &csa_ie.chandef,
+ if (!cfg80211_chandef_usable(local->hw.wiphy, &csa_ie.chanreq.oper,
IEEE80211_CHAN_DISABLED)) {
- sdata_info(sdata,
- "AP %pM switches to unsupported channel "
- "(%d.%03d MHz, width:%d, CF1/2: %d.%03d/%d MHz), "
- "disconnecting\n",
- link->u.mgd.bssid,
- csa_ie.chandef.chan->center_freq,
- csa_ie.chandef.chan->freq_offset,
- csa_ie.chandef.width, csa_ie.chandef.center_freq1,
- csa_ie.chandef.freq1_offset,
- csa_ie.chandef.center_freq2);
+ link_info(link,
+ "AP %pM switches to unsupported channel (%d.%03d MHz, width:%d, CF1/2: %d.%03d/%d MHz), disconnecting\n",
+ link->u.mgd.bssid,
+ csa_ie.chanreq.oper.chan->center_freq,
+ csa_ie.chanreq.oper.chan->freq_offset,
+ csa_ie.chanreq.oper.width,
+ csa_ie.chanreq.oper.center_freq1,
+ csa_ie.chanreq.oper.freq1_offset,
+ csa_ie.chanreq.oper.center_freq2);
goto drop_connection;
}
- if (cfg80211_chandef_identical(&csa_ie.chandef,
- &link->conf->chandef) &&
- (!csa_ie.mode || !beacon)) {
- if (link->u.mgd.csa_ignored_same_chan)
+ if (cfg80211_chandef_identical(&csa_ie.chanreq.oper,
+ &link->conf->chanreq.oper) &&
+ (!csa_ie.mode || source != IEEE80211_CSA_SOURCE_BEACON)) {
+ if (link->u.mgd.csa.ignored_same_chan)
return;
- sdata_info(sdata,
- "AP %pM tries to chanswitch to same channel, ignore\n",
- link->u.mgd.bssid);
- link->u.mgd.csa_ignored_same_chan = true;
+ link_info(link,
+ "AP %pM tries to chanswitch to same channel, ignore\n",
+ link->u.mgd.bssid);
+ link->u.mgd.csa.ignored_same_chan = true;
return;
}
/*
- * Drop all TDLS peers - either we disconnect or move to a different
- * channel from this point on. There's no telling what our peer will do.
+ * Drop all TDLS peers on the affected link - either we disconnect or
+ * move to a different channel from this point on. There's no telling
+ * what our peer will do.
* The TDLS WIDER_BW scenario is also problematic, as peers might now
* have an incompatible wider chandef.
*/
- ieee80211_teardown_tdls_peers(sdata);
+ ieee80211_teardown_tdls_peers(link);
conf = rcu_dereference_protected(link->conf->chanctx_conf,
lockdep_is_held(&local->hw.wiphy->mtx));
- if (!conf) {
- sdata_info(sdata,
- "no channel context assigned to vif?, disconnecting\n");
+ if (ieee80211_vif_link_active(&sdata->vif, link->link_id) && !conf) {
+ link_info(link,
+ "no channel context assigned to vif?, disconnecting\n");
goto drop_connection;
}
- chanctx = container_of(conf, struct ieee80211_chanctx, conf);
+ if (conf)
+ chanctx = container_of(conf, struct ieee80211_chanctx, conf);
- if (local->use_chanctx &&
- !ieee80211_hw_check(&local->hw, CHANCTX_STA_CSA)) {
- sdata_info(sdata,
- "driver doesn't support chan-switch with channel contexts\n");
+ if (!ieee80211_hw_check(&local->hw, CHANCTX_STA_CSA)) {
+ link_info(link,
+ "driver doesn't support chan-switch with channel contexts\n");
goto drop_connection;
}
if (drv_pre_channel_switch(sdata, &ch_switch)) {
- sdata_info(sdata,
- "preparing for channel switch failed, disconnecting\n");
+ link_info(link,
+ "preparing for channel switch failed, disconnecting\n");
goto drop_connection;
}
- res = ieee80211_link_reserve_chanctx(link, &csa_ie.chandef,
- chanctx->mode, false);
- if (res) {
- sdata_info(sdata,
- "failed to reserve channel context for channel switch, disconnecting (err=%d)\n",
- res);
- goto drop_connection;
+ link->u.mgd.csa.ap_chandef = csa_ie.chanreq.ap;
+
+ link->csa.chanreq.oper = csa_ie.chanreq.oper;
+ ieee80211_set_chanreq_ap(sdata, &link->csa.chanreq, &link->u.mgd.conn,
+ &csa_ie.chanreq.ap);
+
+ if (chanctx) {
+ res = ieee80211_link_reserve_chanctx(link, &link->csa.chanreq,
+ chanctx->mode, false);
+ if (res) {
+ link_info(link,
+ "failed to reserve channel context for channel switch, disconnecting (err=%d)\n",
+ res);
+ goto drop_connection;
+ }
}
link->conf->csa_active = true;
- link->csa_chandef = csa_ie.chandef;
- link->csa_block_tx = csa_ie.mode;
- link->u.mgd.csa_ignored_same_chan = false;
+ link->u.mgd.csa.ignored_same_chan = false;
link->u.mgd.beacon_crc_valid = false;
+ link->u.mgd.csa.blocked_tx = csa_ie.mode;
- if (link->csa_block_tx)
- ieee80211_stop_vif_queues(local, sdata,
- IEEE80211_QUEUE_STOP_REASON_CSA);
+ if (csa_ie.mode)
+ ieee80211_vif_block_queues_csa(sdata);
- cfg80211_ch_switch_started_notify(sdata->dev, &csa_ie.chandef,
+ cfg80211_ch_switch_started_notify(sdata->dev, &csa_ie.chanreq.oper,
link->link_id, csa_ie.count,
- csa_ie.mode, 0);
+ csa_ie.mode);
+
+ /* we may have to handle timeout for deactivated link in software */
+ now = jiffies;
+ link->u.mgd.csa.time = now +
+ TU_TO_JIFFIES((max_t(int, csa_ie.count, 1) - 1) *
+ link->conf->beacon_int);
- if (local->ops->channel_switch) {
- /* use driver's channel switch callback */
+ if (ieee80211_vif_link_active(&sdata->vif, link->link_id) &&
+ local->ops->channel_switch) {
+ /*
+ * Use driver's channel switch callback, the driver will
+ * later call ieee80211_chswitch_done(). It may deactivate
+ * the link as well, we handle that elsewhere and queue
+ * the csa.switch_work for the calculated time then.
+ */
drv_channel_switch(local, sdata, &ch_switch);
return;
}
/* channel switch handled in software */
- timeout = TU_TO_JIFFIES((max_t(int, csa_ie.count, 1) - 1) *
- cbss->beacon_interval);
wiphy_delayed_work_queue(local->hw.wiphy,
- &link->u.mgd.chswitch_work,
- timeout);
+ &link->u.mgd.csa.switch_work,
+ link->u.mgd.csa.time - now);
return;
drop_connection:
/*
@@ -2017,15 +2761,97 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
* reset when the disconnection worker runs.
*/
link->conf->csa_active = true;
- link->csa_block_tx = csa_ie.mode;
+ link->u.mgd.csa.blocked_tx = csa_ie.mode;
wiphy_work_queue(sdata->local->hw.wiphy,
&ifmgd->csa_connection_drop_work);
}
+struct sta_bss_param_ch_cnt_data {
+ struct ieee80211_sub_if_data *sdata;
+ u8 reporting_link_id;
+ u8 mld_id;
+};
+
+static enum cfg80211_rnr_iter_ret
+ieee80211_sta_bss_param_ch_cnt_iter(void *_data, u8 type,
+ const struct ieee80211_neighbor_ap_info *info,
+ const u8 *tbtt_info, u8 tbtt_info_len)
+{
+ struct sta_bss_param_ch_cnt_data *data = _data;
+ struct ieee80211_sub_if_data *sdata = data->sdata;
+ const struct ieee80211_tbtt_info_ge_11 *ti;
+ u8 bss_param_ch_cnt;
+ int link_id;
+
+ if (type != IEEE80211_TBTT_INFO_TYPE_TBTT)
+ return RNR_ITER_CONTINUE;
+
+ if (tbtt_info_len < sizeof(*ti))
+ return RNR_ITER_CONTINUE;
+
+ ti = (const void *)tbtt_info;
+
+ if (ti->mld_params.mld_id != data->mld_id)
+ return RNR_ITER_CONTINUE;
+
+ link_id = le16_get_bits(ti->mld_params.params,
+ IEEE80211_RNR_MLD_PARAMS_LINK_ID);
+ bss_param_ch_cnt =
+ le16_get_bits(ti->mld_params.params,
+ IEEE80211_RNR_MLD_PARAMS_BSS_CHANGE_COUNT);
+
+ if (bss_param_ch_cnt != 255 &&
+ link_id < ARRAY_SIZE(sdata->link)) {
+ struct ieee80211_link_data *link =
+ sdata_dereference(sdata->link[link_id], sdata);
+
+ if (link && link->conf->bss_param_ch_cnt != bss_param_ch_cnt) {
+ link->conf->bss_param_ch_cnt = bss_param_ch_cnt;
+ link->conf->bss_param_ch_cnt_link_id =
+ data->reporting_link_id;
+ }
+ }
+
+ return RNR_ITER_CONTINUE;
+}
+
+static void
+ieee80211_mgd_update_bss_param_ch_cnt(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_bss_conf *bss_conf,
+ struct ieee802_11_elems *elems)
+{
+ struct sta_bss_param_ch_cnt_data data = {
+ .reporting_link_id = bss_conf->link_id,
+ .sdata = sdata,
+ };
+ int bss_param_ch_cnt;
+
+ if (!elems->ml_basic)
+ return;
+
+ data.mld_id = ieee80211_mle_get_mld_id((const void *)elems->ml_basic);
+
+ cfg80211_iter_rnr(elems->ie_start, elems->total_len,
+ ieee80211_sta_bss_param_ch_cnt_iter, &data);
+
+ bss_param_ch_cnt =
+ ieee80211_mle_get_bss_param_ch_cnt((const void *)elems->ml_basic);
+
+ /*
+ * Update bss_param_ch_cnt_link_id even if bss_param_ch_cnt
+ * didn't change to indicate that we got a beacon on our own
+ * link.
+ */
+ if (bss_param_ch_cnt >= 0 && bss_param_ch_cnt != 255) {
+ bss_conf->bss_param_ch_cnt = bss_param_ch_cnt;
+ bss_conf->bss_param_ch_cnt_link_id =
+ bss_conf->link_id;
+ }
+}
+
static bool
-ieee80211_find_80211h_pwr_constr(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_channel *channel,
+ieee80211_find_80211h_pwr_constr(struct ieee80211_channel *channel,
const u8 *country_ie, u8 country_ie_len,
const u8 *pwr_constr_elem,
int *chan_pwr, int *pwr_reduction)
@@ -2095,8 +2921,7 @@ ieee80211_find_80211h_pwr_constr(struct ieee80211_sub_if_data *sdata,
return have_chan_pwr;
}
-static void ieee80211_find_cisco_dtpc(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_channel *channel,
+static void ieee80211_find_cisco_dtpc(struct ieee80211_channel *channel,
const u8 *cisco_dtpc_ie,
int *pwr_level)
{
@@ -2130,7 +2955,7 @@ static u64 ieee80211_handle_pwr_constr(struct ieee80211_link_data *link,
(capab & cpu_to_le16(WLAN_CAPABILITY_SPECTRUM_MGMT) ||
capab & cpu_to_le16(WLAN_CAPABILITY_RADIO_MEASURE))) {
has_80211h_pwr = ieee80211_find_80211h_pwr_constr(
- sdata, channel, country_ie, country_ie_len,
+ channel, country_ie, country_ie_len,
pwr_constr_ie, &chan_pwr, &pwr_reduction_80211h);
pwr_level_80211h =
max_t(int, 0, chan_pwr - pwr_reduction_80211h);
@@ -2138,7 +2963,7 @@ static u64 ieee80211_handle_pwr_constr(struct ieee80211_link_data *link,
if (cisco_dtpc_ie) {
ieee80211_find_cisco_dtpc(
- sdata, channel, cisco_dtpc_ie, &pwr_level_cisco);
+ channel, cisco_dtpc_ie, &pwr_level_cisco);
has_cisco_pwr = true;
}
@@ -2171,7 +2996,7 @@ static u64 ieee80211_handle_pwr_constr(struct ieee80211_link_data *link,
}
link->ap_power_level = new_ap_level;
- if (__ieee80211_recalc_txpower(sdata))
+ if (__ieee80211_recalc_txpower(link))
return BSS_CHANGED_TXPOWER;
return 0;
}
@@ -2414,16 +3239,16 @@ void ieee80211_dfs_cac_timer_work(struct wiphy *wiphy, struct wiphy_work *work)
struct ieee80211_link_data *link =
container_of(work, struct ieee80211_link_data,
dfs_cac_timer_work.work);
- struct cfg80211_chan_def chandef = link->conf->chandef;
+ struct cfg80211_chan_def chandef = link->conf->chanreq.oper;
struct ieee80211_sub_if_data *sdata = link->sdata;
lockdep_assert_wiphy(sdata->local->hw.wiphy);
- if (sdata->wdev.cac_started) {
+ if (sdata->wdev.links[link->link_id].cac_started) {
ieee80211_link_release_channel(link);
cfg80211_cac_event(sdata->dev, &chandef,
NL80211_RADAR_CAC_FINISHED,
- GFP_KERNEL);
+ GFP_KERNEL, link->link_id);
}
}
@@ -2769,7 +3594,7 @@ static u64 ieee80211_link_set_associated(struct ieee80211_link_data *link,
ieee80211_check_rate_mask(link);
- link->u.mgd.bss = cbss;
+ link->conf->bss = cbss;
memcpy(link->u.mgd.bssid, cbss->bssid, ETH_ALEN);
if (sdata->vif.p2p ||
@@ -2892,20 +3717,58 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
netif_carrier_on(sdata->dev);
}
+static void ieee80211_ml_reconf_reset(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_mgd_assoc_data *add_links_data =
+ sdata->u.mgd.reconf.add_links_data;
+
+ if (!ieee80211_vif_is_mld(&sdata->vif) ||
+ !(sdata->u.mgd.reconf.added_links |
+ sdata->u.mgd.reconf.removed_links))
+ return;
+
+ wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
+ &sdata->u.mgd.reconf.wk);
+ sdata->u.mgd.reconf.added_links = 0;
+ sdata->u.mgd.reconf.removed_links = 0;
+ sdata->u.mgd.reconf.dialog_token = 0;
+
+ if (add_links_data) {
+ struct cfg80211_mlo_reconf_done_data done_data = {};
+ u8 link_id;
+
+ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS;
+ link_id++)
+ done_data.links[link_id].bss =
+ add_links_data->link[link_id].bss;
+
+ cfg80211_mlo_reconf_add_done(sdata->dev, &done_data);
+
+ kfree(sdata->u.mgd.reconf.add_links_data);
+ sdata->u.mgd.reconf.add_links_data = NULL;
+ }
+}
+
static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
u16 stype, u16 reason, bool tx,
u8 *frame_buf)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
+ struct sta_info *ap_sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr);
unsigned int link_id;
u64 changed = 0;
struct ieee80211_prep_tx_info info = {
.subtype = stype,
+ .was_assoc = true,
+ .link_id = ffs(sdata->vif.active_links) - 1,
};
lockdep_assert_wiphy(local->hw.wiphy);
+ if (WARN_ON(!ap_sta))
+ return;
+
if (WARN_ON_ONCE(tx && !frame_buf))
return;
@@ -2917,7 +3780,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
ifmgd->associated = false;
/* other links will be destroyed */
- sdata->deflink.u.mgd.bss = NULL;
+ sdata->deflink.conf->bss = NULL;
sdata->deflink.smps_mode = IEEE80211_SMPS_OFF;
netif_carrier_off(sdata->dev);
@@ -2950,29 +3813,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
/* deauthenticate/disassociate now */
if (tx || frame_buf) {
- /*
- * In multi channel scenarios guarantee that the virtual
- * interface is granted immediate airtime to transmit the
- * deauthentication frame by calling mgd_prepare_tx, if the
- * driver requested so.
- */
- if (ieee80211_hw_check(&local->hw, DEAUTH_NEED_MGD_TX_PREP)) {
- for (link_id = 0; link_id < ARRAY_SIZE(sdata->link);
- link_id++) {
- struct ieee80211_link_data *link;
-
- link = sdata_dereference(sdata->link[link_id],
- sdata);
- if (!link)
- continue;
- if (link->u.mgd.have_beacon)
- break;
- }
- if (link_id == IEEE80211_MLD_MAX_NUM_LINKS) {
- info.link_id = ffs(sdata->vif.active_links) - 1;
- drv_mgd_prepare_tx(sdata->local, sdata, &info);
- }
- }
+ drv_mgd_prepare_tx(sdata->local, sdata, &info);
ieee80211_send_deauth_disassoc(sdata, sdata->vif.cfg.ap_addr,
sdata->vif.cfg.ap_addr, stype,
@@ -2991,8 +3832,16 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
sdata->vif.cfg.ssid_len = 0;
- /* remove AP and TDLS peers */
- sta_info_flush(sdata);
+ /* Remove TDLS peers */
+ __sta_info_flush(sdata, false, -1, ap_sta);
+
+ if (sdata->vif.driver_flags & IEEE80211_VIF_REMOVE_AP_AFTER_DISASSOC) {
+ /* Only move the AP state */
+ sta_info_move_state(ap_sta, IEEE80211_STA_NONE);
+ } else {
+ /* Remove AP peer */
+ sta_info_flush(sdata, -1);
+ }
/* finally reset all BSS / config parameters */
if (!ieee80211_vif_is_mld(&sdata->vif))
@@ -3043,6 +3892,14 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
ieee80211_vif_cfg_change_notify(sdata, changed);
}
+ if (sdata->vif.driver_flags & IEEE80211_VIF_REMOVE_AP_AFTER_DISASSOC) {
+ /*
+ * After notifying the driver about the disassoc,
+ * remove the ap sta.
+ */
+ sta_info_flush(sdata, -1);
+ }
+
/* disassociated - set to defaults now */
ieee80211_set_wmm_default(&sdata->deflink, false, false);
@@ -3058,7 +3915,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
sdata->deflink.u.mgd.disable_wmm_tracking = false;
ifmgd->flags = 0;
- sdata->deflink.u.mgd.conn_flags = 0;
for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
struct ieee80211_link_data *link;
@@ -3070,27 +3926,47 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
}
sdata->vif.bss_conf.csa_active = false;
- sdata->deflink.u.mgd.csa_waiting_bcn = false;
- sdata->deflink.u.mgd.csa_ignored_same_chan = false;
- if (sdata->deflink.csa_block_tx) {
- ieee80211_wake_vif_queues(local, sdata,
- IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->deflink.csa_block_tx = false;
- }
+ sdata->deflink.u.mgd.csa.blocked_tx = false;
+ sdata->deflink.u.mgd.csa.waiting_bcn = false;
+ sdata->deflink.u.mgd.csa.ignored_same_chan = false;
+ ieee80211_vif_unblock_queues_csa(sdata);
/* existing TX TSPEC sessions no longer exist */
memset(ifmgd->tx_tspec, 0, sizeof(ifmgd->tx_tspec));
wiphy_delayed_work_cancel(local->hw.wiphy, &ifmgd->tx_tspec_wk);
+ sdata->vif.bss_conf.power_type = IEEE80211_REG_UNSET_AP;
sdata->vif.bss_conf.pwr_reduction = 0;
- sdata->vif.bss_conf.tx_pwr_env_num = 0;
- memset(sdata->vif.bss_conf.tx_pwr_env, 0,
- sizeof(sdata->vif.bss_conf.tx_pwr_env));
+ ieee80211_clear_tpe(&sdata->vif.bss_conf.tpe);
+
+ sdata->vif.cfg.eml_cap = 0;
+ sdata->vif.cfg.eml_med_sync_delay = 0;
+ sdata->vif.cfg.mld_capa_op = 0;
memset(&sdata->u.mgd.ttlm_info, 0,
sizeof(sdata->u.mgd.ttlm_info));
wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &ifmgd->ttlm_work);
+
+ memset(&sdata->vif.neg_ttlm, 0, sizeof(sdata->vif.neg_ttlm));
+ wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
+ &ifmgd->neg_ttlm_timeout_work);
+
+ sdata->u.mgd.removed_links = 0;
+ wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
+ &sdata->u.mgd.ml_reconf_work);
+
+ wiphy_work_cancel(sdata->local->hw.wiphy,
+ &ifmgd->teardown_ttlm_work);
+
ieee80211_vif_set_links(sdata, 0, 0);
+
+ ifmgd->mcast_seq_last = IEEE80211_SN_MODULO;
+
+ /* if disconnection happens in the middle of the ML reconfiguration
+ * flow, cfg80211 must called to release the BSS references obtained
+ * when the flow started.
+ */
+ ieee80211_ml_reconf_reset(sdata);
}
static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
@@ -3238,7 +4114,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
ieee80211_mlme_send_probe_req(sdata, sdata->vif.addr, dst,
sdata->vif.cfg.ssid,
sdata->vif.cfg.ssid_len,
- sdata->deflink.u.mgd.bss->channel);
+ sdata->deflink.conf->bss->channel);
}
ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
@@ -3321,7 +4197,7 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
return NULL;
if (ifmgd->associated)
- cbss = sdata->deflink.u.mgd.bss;
+ cbss = sdata->deflink.conf->bss;
else if (ifmgd->auth_data)
cbss = ifmgd->auth_data->bss;
else if (ifmgd->assoc_data && ifmgd->assoc_data->link[0].bss)
@@ -3371,16 +4247,32 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
- bool tx;
+ bool tx = false;
lockdep_assert_wiphy(local->hw.wiphy);
if (!ifmgd->associated)
return;
- /* in MLO assume we have a link where we can TX the frame */
- tx = ieee80211_vif_is_mld(&sdata->vif) ||
- !sdata->deflink.csa_block_tx;
+ /* only transmit if we have a link that makes that worthwhile */
+ for (unsigned int link_id = 0;
+ link_id < ARRAY_SIZE(sdata->link);
+ link_id++) {
+ struct ieee80211_link_data *link;
+
+ if (!ieee80211_vif_link_active(&sdata->vif, link_id))
+ continue;
+
+ link = sdata_dereference(sdata->link[link_id], sdata);
+ if (WARN_ON_ONCE(!link))
+ continue;
+
+ if (link->u.mgd.csa.blocked_tx)
+ continue;
+
+ tx = true;
+ break;
+ }
if (!ifmgd->driver_disconnect) {
unsigned int link_id;
@@ -3400,8 +4292,8 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
link = sdata_dereference(sdata->link[link_id], sdata);
if (!link)
continue;
- cfg80211_unlink_bss(local->hw.wiphy, link->u.mgd.bss);
- link->u.mgd.bss = NULL;
+ cfg80211_unlink_bss(local->hw.wiphy, link->conf->bss);
+ link->conf->bss = NULL;
}
}
@@ -3412,12 +4304,9 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
tx, frame_buf);
/* the other links will be destroyed */
sdata->vif.bss_conf.csa_active = false;
- sdata->deflink.u.mgd.csa_waiting_bcn = false;
- if (sdata->deflink.csa_block_tx) {
- ieee80211_wake_vif_queues(local, sdata,
- IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->deflink.csa_block_tx = false;
- }
+ sdata->deflink.u.mgd.csa.waiting_bcn = false;
+ sdata->deflink.u.mgd.csa.blocked_tx = false;
+ ieee80211_vif_unblock_queues_csa(sdata);
ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx,
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
@@ -3475,8 +4364,13 @@ EXPORT_SYMBOL(ieee80211_beacon_loss);
void ieee80211_connection_loss(struct ieee80211_vif *vif)
{
- struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
- struct ieee80211_hw *hw = &sdata->local->hw;
+ struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_hw *hw;
+
+ KUNIT_STATIC_STUB_REDIRECT(ieee80211_connection_loss, vif);
+
+ sdata = vif_to_sdata(vif);
+ hw = &sdata->local->hw;
trace_api_connection_loss(sdata);
@@ -3508,6 +4402,8 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata,
lockdep_assert_wiphy(sdata->local->hw.wiphy);
+ sdata->u.mgd.auth_data = NULL;
+
if (!assoc) {
/*
* we are not authenticated yet, the only timer that could be
@@ -3518,7 +4414,6 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata,
sta_info_destroy_addr(sdata, auth_data->ap_addr);
/* other links are destroyed */
- sdata->deflink.u.mgd.conn_flags = 0;
eth_zero_addr(sdata->deflink.u.mgd.bssid);
ieee80211_link_info_change_notify(sdata, &sdata->deflink,
BSS_CHANGED_BSSID);
@@ -3530,7 +4425,6 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata,
cfg80211_put_bss(sdata->local->hw.wiphy, auth_data->bss);
kfree(auth_data);
- sdata->u.mgd.auth_data = NULL;
}
enum assoc_status {
@@ -3547,6 +4441,8 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
lockdep_assert_wiphy(sdata->local->hw.wiphy);
+ sdata->u.mgd.assoc_data = NULL;
+
if (status != ASSOC_SUCCESS) {
/*
* we are not associated yet, the only timer that could be
@@ -3556,7 +4452,6 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
del_timer_sync(&sdata->u.mgd.timer);
sta_info_destroy_addr(sdata, assoc_data->ap_addr);
- sdata->deflink.u.mgd.conn_flags = 0;
eth_zero_addr(sdata->deflink.u.mgd.bssid);
ieee80211_link_info_change_notify(sdata, &sdata->deflink,
BSS_CHANGED_BSSID);
@@ -3586,7 +4481,6 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
}
kfree(assoc_data);
- sdata->u.mgd.assoc_data = NULL;
}
static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
@@ -3889,57 +4783,6 @@ static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
false);
}
-static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
- u8 *supp_rates, unsigned int supp_rates_len,
- u32 *rates, u32 *basic_rates,
- bool *have_higher_than_11mbit,
- int *min_rate, int *min_rate_index)
-{
- int i, j;
-
- for (i = 0; i < supp_rates_len; i++) {
- int rate = supp_rates[i] & 0x7f;
- bool is_basic = !!(supp_rates[i] & 0x80);
-
- if ((rate * 5) > 110)
- *have_higher_than_11mbit = true;
-
- /*
- * Skip HT, VHT, HE, EHT and SAE H2E only BSS membership
- * selectors since they're not rates.
- *
- * Note: Even though the membership selector and the basic
- * rate flag share the same bit, they are not exactly
- * the same.
- */
- if (supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_HT_PHY) ||
- supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_VHT_PHY) ||
- supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_HE_PHY) ||
- supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_EHT_PHY) ||
- supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_SAE_H2E))
- continue;
-
- for (j = 0; j < sband->n_bitrates; j++) {
- struct ieee80211_rate *br;
- int brate;
-
- br = &sband->bitrates[j];
-
- brate = DIV_ROUND_UP(br->bitrate, 5);
- if (brate == rate) {
- *rates |= BIT(j);
- if (is_basic)
- *basic_rates |= BIT(j);
- if ((rate * 5) < *min_rate) {
- *min_rate = rate * 5;
- *min_rate_index = j;
- }
- break;
- }
- }
- }
-}
-
static bool ieee80211_twt_req_supported(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband,
const struct link_sta_info *link_sta,
@@ -4001,16 +4844,19 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
u64 *changed)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
- struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data;
+ struct ieee80211_mgd_assoc_data *assoc_data =
+ sdata->u.mgd.assoc_data ?: sdata->u.mgd.reconf.add_links_data;
struct ieee80211_bss_conf *bss_conf = link->conf;
struct ieee80211_local *local = sdata->local;
unsigned int link_id = link->link_id;
struct ieee80211_elems_parse_params parse_params = {
+ .mode = link->u.mgd.conn.mode,
.start = elem_start,
.len = elem_len,
.link_id = link_id == assoc_data->assoc_link_id ? -1 : link_id,
.from_ap = true,
};
+ bool is_5ghz = cbss->channel->band == NL80211_BAND_5GHZ;
bool is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ;
bool is_s1g = cbss->channel->band == NL80211_BAND_S1GHZ;
const struct cfg80211_bss_ies *bss_ies = NULL;
@@ -4034,21 +4880,25 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
*/
assoc_data->link[link_id].status = WLAN_STATUS_SUCCESS;
if (elems->ml_basic) {
- if (!(elems->ml_basic->control &
- cpu_to_le16(IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT))) {
+ int bss_param_ch_cnt =
+ ieee80211_mle_get_bss_param_ch_cnt((const void *)elems->ml_basic);
+
+ if (bss_param_ch_cnt < 0) {
ret = false;
goto out;
}
- link->u.mgd.bss_param_ch_cnt =
- ieee80211_mle_get_bss_param_ch_cnt(elems->ml_basic);
+ bss_conf->bss_param_ch_cnt = bss_param_ch_cnt;
+ bss_conf->bss_param_ch_cnt_link_id = link_id;
}
- } else if (!elems->prof ||
+ } else if (elems->parse_error & IEEE80211_PARSE_ERR_DUP_NEST_ML_BASIC ||
+ !elems->prof ||
!(elems->prof->control & prof_bss_param_ch_present)) {
ret = false;
goto out;
} else {
const u8 *ptr = elems->prof->variable +
elems->prof->sta_info_len - 1;
+ int bss_param_ch_cnt;
/*
* During parsing, we validated that these fields exist,
@@ -4056,8 +4906,10 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
*/
capab_info = get_unaligned_le16(ptr);
assoc_data->link[link_id].status = get_unaligned_le16(ptr + 2);
- link->u.mgd.bss_param_ch_cnt =
+ bss_param_ch_cnt =
ieee80211_mle_basic_sta_prof_bss_param_ch_cnt(elems->prof);
+ bss_conf->bss_param_ch_cnt = bss_param_ch_cnt;
+ bss_conf->bss_param_ch_cnt_link_id = link_id;
if (assoc_data->link[link_id].status != WLAN_STATUS_SUCCESS) {
link_info(link, "association response status code=%u\n",
@@ -4086,9 +4938,9 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
*/
if (!is_6ghz &&
((assoc_data->wmm && !elems->wmm_param) ||
- (!(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT) &&
+ (link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT &&
(!elems->ht_cap_elem || !elems->ht_operation)) ||
- (!(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT) &&
+ (is_5ghz && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_VHT &&
(!elems->vht_cap_elem || !elems->vht_operation)))) {
const struct cfg80211_bss_ies *ies;
struct ieee802_11_elems *bss_elems;
@@ -4107,6 +4959,7 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
parse_params.start = bss_ies->data;
parse_params.len = bss_ies->len;
parse_params.bss = cbss;
+ parse_params.link_id = -1;
bss_elems = ieee802_11_parse_elems_full(&parse_params);
if (!bss_elems) {
ret = false;
@@ -4125,38 +4978,43 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
* have to include the IEs in the (re)association response.
*/
if (!elems->ht_cap_elem && bss_elems->ht_cap_elem &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT)) {
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT) {
elems->ht_cap_elem = bss_elems->ht_cap_elem;
sdata_info(sdata,
"AP bug: HT capability missing from AssocResp\n");
}
if (!elems->ht_operation && bss_elems->ht_operation &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT)) {
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT) {
elems->ht_operation = bss_elems->ht_operation;
sdata_info(sdata,
"AP bug: HT operation missing from AssocResp\n");
}
- if (!elems->vht_cap_elem && bss_elems->vht_cap_elem &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)) {
- elems->vht_cap_elem = bss_elems->vht_cap_elem;
- sdata_info(sdata,
- "AP bug: VHT capa missing from AssocResp\n");
- }
- if (!elems->vht_operation && bss_elems->vht_operation &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)) {
- elems->vht_operation = bss_elems->vht_operation;
- sdata_info(sdata,
- "AP bug: VHT operation missing from AssocResp\n");
- }
+ if (is_5ghz) {
+ if (!elems->vht_cap_elem && bss_elems->vht_cap_elem &&
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_VHT) {
+ elems->vht_cap_elem = bss_elems->vht_cap_elem;
+ sdata_info(sdata,
+ "AP bug: VHT capa missing from AssocResp\n");
+ }
+
+ if (!elems->vht_operation && bss_elems->vht_operation &&
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_VHT) {
+ elems->vht_operation = bss_elems->vht_operation;
+ sdata_info(sdata,
+ "AP bug: VHT operation missing from AssocResp\n");
+ }
+ }
kfree(bss_elems);
}
/*
* We previously checked these in the beacon/probe response, so
* they should be present here. This is just a safety net.
+ * Note that the ieee80211_config_bw() below would also check
+ * for this (and more), but this has better error reporting.
*/
- if (!is_6ghz && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT) &&
+ if (!is_6ghz && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT &&
(!elems->wmm_param || !elems->ht_cap_elem || !elems->ht_operation)) {
sdata_info(sdata,
"HT AP is missing WMM params or HT capability/operation\n");
@@ -4164,7 +5022,7 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
goto out;
}
- if (!is_6ghz && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT) &&
+ if (is_5ghz && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_VHT &&
(!elems->vht_cap_elem || !elems->vht_operation)) {
sdata_info(sdata,
"VHT AP is missing VHT capability/operation\n");
@@ -4172,36 +5030,28 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
goto out;
}
- if (is_6ghz && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) &&
- !elems->he_6ghz_capa) {
- sdata_info(sdata,
- "HE 6 GHz AP is missing HE 6 GHz band capability\n");
- ret = false;
- goto out;
- }
-
- if (WARN_ON(!link->conf->chandef.chan)) {
+ /* check/update if AP changed anything in assoc response vs. scan */
+ if (ieee80211_config_bw(link, elems,
+ link_id == assoc_data->assoc_link_id,
+ changed, "assoc response")) {
ret = false;
goto out;
}
- sband = local->hw.wiphy->bands[link->conf->chandef.chan->band];
- if (!(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) &&
- (!elems->he_cap || !elems->he_operation)) {
- sdata_info(sdata,
- "HE AP is missing HE capability/operation\n");
+ if (WARN_ON(!link->conf->chanreq.oper.chan)) {
ret = false;
goto out;
}
+ sband = local->hw.wiphy->bands[link->conf->chanreq.oper.chan->band];
/* Set up internal HT/VHT capabilities */
- if (elems->ht_cap_elem && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT))
+ if (elems->ht_cap_elem && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT)
ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
elems->ht_cap_elem,
link_sta);
if (elems->vht_cap_elem &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)) {
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_VHT) {
const struct ieee80211_vht_cap *bss_vht_cap = NULL;
const struct cfg80211_bss_ies *ies;
@@ -4228,7 +5078,8 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
rcu_read_unlock();
}
- if (elems->he_operation && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) &&
+ if (elems->he_operation &&
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HE &&
elems->he_cap) {
ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband,
elems->he_cap,
@@ -4249,7 +5100,7 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
link_sta, elems);
if (elems->eht_operation && elems->eht_cap &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_EHT)) {
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_EHT) {
ieee80211_eht_cap_ie_to_sta_eht_cap(sdata, sband,
elems->he_cap,
elems->he_cap_len,
@@ -4258,7 +5109,6 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
link_sta);
bss_conf->eht_support = link_sta->pub->eht_cap.has_eht;
- *changed |= BSS_CHANGED_EHT_PUNCTURING;
} else {
bss_conf->eht_support = false;
}
@@ -4406,7 +5256,9 @@ static int ieee80211_mgd_setup_link_sta(struct ieee80211_link_data *link,
sband = local->hw.wiphy->bands[cbss->channel->band];
ieee80211_get_rates(sband, bss->supp_rates, bss->supp_rates_len,
- &rates, &basic_rates, &have_higher_than_11mbit,
+ NULL, 0,
+ &rates, &basic_rates, NULL,
+ &have_higher_than_11mbit,
&min_rate, &min_rate_index);
/*
@@ -4456,7 +5308,7 @@ static u8 ieee80211_max_rx_chains(struct ieee80211_link_data *link,
bool support_160;
u8 chains = 1;
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT)
+ if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_HT)
return chains;
ht_cap_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_HT_CAPABILITY);
@@ -4469,7 +5321,7 @@ static u8 ieee80211_max_rx_chains(struct ieee80211_link_data *link,
*/
}
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)
+ if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_VHT)
return chains;
vht_cap_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_VHT_CAPABILITY);
@@ -4488,7 +5340,7 @@ static u8 ieee80211_max_rx_chains(struct ieee80211_link_data *link,
chains = max(chains, nss);
}
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE)
+ if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_HE)
return chains;
ies = rcu_dereference(cbss->ies);
@@ -4539,533 +5391,347 @@ static u8 ieee80211_max_rx_chains(struct ieee80211_link_data *link,
return chains;
}
-static bool
-ieee80211_verify_peer_he_mcs_support(struct ieee80211_sub_if_data *sdata,
- const struct cfg80211_bss_ies *ies,
- const struct ieee80211_he_operation *he_op)
+static void
+ieee80211_determine_our_sta_mode(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_supported_band *sband,
+ struct cfg80211_assoc_request *req,
+ bool wmm_used, int link_id,
+ struct ieee80211_conn_settings *conn)
{
- const struct element *he_cap_elem;
- const struct ieee80211_he_cap_elem *he_cap;
- struct ieee80211_he_mcs_nss_supp *he_mcs_nss_supp;
- u16 mcs_80_map_tx, mcs_80_map_rx;
- u16 ap_min_req_set;
- int mcs_nss_size;
- int nss;
-
- he_cap_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY,
- ies->data, ies->len);
-
- if (!he_cap_elem)
- return false;
+ struct ieee80211_sta_ht_cap sta_ht_cap = sband->ht_cap;
+ bool is_5ghz = sband->band == NL80211_BAND_5GHZ;
+ bool is_6ghz = sband->band == NL80211_BAND_6GHZ;
+ const struct ieee80211_sta_he_cap *he_cap;
+ const struct ieee80211_sta_eht_cap *eht_cap;
+ struct ieee80211_sta_vht_cap vht_cap;
- /* invalid HE IE */
- if (he_cap_elem->datalen < 1 + sizeof(*he_cap)) {
- sdata_info(sdata,
- "Invalid HE elem, Disable HE\n");
- return false;
+ if (sband->band == NL80211_BAND_S1GHZ) {
+ conn->mode = IEEE80211_CONN_MODE_S1G;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ mlme_dbg(sdata, "operating as S1G STA\n");
+ return;
}
- /* skip one byte ext_tag_id */
- he_cap = (void *)(he_cap_elem->data + 1);
- mcs_nss_size = ieee80211_he_mcs_nss_size(he_cap);
+ conn->mode = IEEE80211_CONN_MODE_LEGACY;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
- /* invalid HE IE */
- if (he_cap_elem->datalen < 1 + sizeof(*he_cap) + mcs_nss_size) {
- sdata_info(sdata,
- "Invalid HE elem with nss size, Disable HE\n");
- return false;
+ ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap);
+
+ if (req && req->flags & ASSOC_REQ_DISABLE_HT) {
+ mlme_link_id_dbg(sdata, link_id,
+ "HT disabled by flag, limiting to legacy\n");
+ goto out;
}
- /* mcs_nss is right after he_cap info */
- he_mcs_nss_supp = (void *)(he_cap + 1);
+ if (!wmm_used) {
+ mlme_link_id_dbg(sdata, link_id,
+ "WMM/QoS not supported, limiting to legacy\n");
+ goto out;
+ }
- mcs_80_map_tx = le16_to_cpu(he_mcs_nss_supp->tx_mcs_80);
- mcs_80_map_rx = le16_to_cpu(he_mcs_nss_supp->rx_mcs_80);
+ if (req) {
+ unsigned int i;
- /* P802.11-REVme/D0.3
- * 27.1.1 Introduction to the HE PHY
- * ...
- * An HE STA shall support the following features:
- * ...
- * Single spatial stream HE-MCSs 0 to 7 (transmit and receive) in all
- * supported channel widths for HE SU PPDUs
- */
- if ((mcs_80_map_tx & 0x3) == IEEE80211_HE_MCS_NOT_SUPPORTED ||
- (mcs_80_map_rx & 0x3) == IEEE80211_HE_MCS_NOT_SUPPORTED) {
- sdata_info(sdata,
- "Missing mandatory rates for 1 Nss, rx 0x%x, tx 0x%x, disable HE\n",
- mcs_80_map_tx, mcs_80_map_rx);
- return false;
+ for (i = 0; i < req->crypto.n_ciphers_pairwise; i++) {
+ if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 ||
+ req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP ||
+ req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) {
+ netdev_info(sdata->dev,
+ "WEP/TKIP use, limiting to legacy\n");
+ goto out;
+ }
+ }
}
- if (!he_op)
- return true;
-
- ap_min_req_set = le16_to_cpu(he_op->he_mcs_nss_set);
+ if (!sta_ht_cap.ht_supported && !is_6ghz) {
+ mlme_link_id_dbg(sdata, link_id,
+ "HT not supported (and not on 6 GHz), limiting to legacy\n");
+ goto out;
+ }
- /*
- * Apparently iPhone 13 (at least iOS version 15.3.1) sets this to all
- * zeroes, which is nonsense, and completely inconsistent with itself
- * (it doesn't have 8 streams). Accept the settings in this case anyway.
- */
- if (!ap_min_req_set)
- return true;
+ /* HT is fine */
+ conn->mode = IEEE80211_CONN_MODE_HT;
+ conn->bw_limit = sta_ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
+ IEEE80211_CONN_BW_LIMIT_40 :
+ IEEE80211_CONN_BW_LIMIT_20;
- /* make sure the AP is consistent with itself
- *
- * P802.11-REVme/D0.3
- * 26.17.1 Basic HE BSS operation
- *
- * A STA that is operating in an HE BSS shall be able to receive and
- * transmit at each of the <HE-MCS, NSS> tuple values indicated by the
- * Basic HE-MCS And NSS Set field of the HE Operation parameter of the
- * MLME-START.request primitive and shall be able to receive at each of
- * the <HE-MCS, NSS> tuple values indicated by the Supported HE-MCS and
- * NSS Set field in the HE Capabilities parameter of the MLMESTART.request
- * primitive
- */
- for (nss = 8; nss > 0; nss--) {
- u8 ap_op_val = (ap_min_req_set >> (2 * (nss - 1))) & 3;
- u8 ap_rx_val;
- u8 ap_tx_val;
+ memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap));
+ ieee80211_apply_vhtcap_overrides(sdata, &vht_cap);
- if (ap_op_val == IEEE80211_HE_MCS_NOT_SUPPORTED)
- continue;
+ if (req && req->flags & ASSOC_REQ_DISABLE_VHT) {
+ mlme_link_id_dbg(sdata, link_id,
+ "VHT disabled by flag, limiting to HT\n");
+ goto out;
+ }
- ap_rx_val = (mcs_80_map_rx >> (2 * (nss - 1))) & 3;
- ap_tx_val = (mcs_80_map_tx >> (2 * (nss - 1))) & 3;
+ if (vht_cap.vht_supported && is_5ghz) {
+ bool have_80mhz = false;
+ unsigned int i;
- if (ap_rx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
- ap_tx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
- ap_rx_val < ap_op_val || ap_tx_val < ap_op_val) {
- sdata_info(sdata,
- "Invalid rates for %d Nss, rx %d, tx %d oper %d, disable HE\n",
- nss, ap_rx_val, ap_rx_val, ap_op_val);
- return false;
+ if (conn->bw_limit == IEEE80211_CONN_BW_LIMIT_20) {
+ mlme_link_id_dbg(sdata, link_id,
+ "no 40 MHz support on 5 GHz, limiting to HT\n");
+ goto out;
}
- }
- return true;
-}
+ /* Allow VHT if at least one channel on the sband supports 80 MHz */
+ for (i = 0; i < sband->n_channels; i++) {
+ if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED |
+ IEEE80211_CHAN_NO_80MHZ))
+ continue;
-static bool
-ieee80211_verify_sta_he_mcs_support(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_supported_band *sband,
- const struct ieee80211_he_operation *he_op)
-{
- const struct ieee80211_sta_he_cap *sta_he_cap =
- ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
- u16 ap_min_req_set;
- int i;
+ have_80mhz = true;
+ break;
+ }
- if (!sta_he_cap || !he_op)
- return false;
+ if (!have_80mhz) {
+ mlme_link_id_dbg(sdata, link_id,
+ "no 80 MHz channel support on 5 GHz, limiting to HT\n");
+ goto out;
+ }
+ } else if (is_5ghz) { /* !vht_supported but on 5 GHz */
+ mlme_link_id_dbg(sdata, link_id,
+ "no VHT support on 5 GHz, limiting to HT\n");
+ goto out;
+ }
- ap_min_req_set = le16_to_cpu(he_op->he_mcs_nss_set);
+ /* VHT - if we have - is fine, including 80 MHz, check 160 below again */
+ if (sband->band != NL80211_BAND_2GHZ) {
+ conn->mode = IEEE80211_CONN_MODE_VHT;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_160;
+ }
- /*
- * Apparently iPhone 13 (at least iOS version 15.3.1) sets this to all
- * zeroes, which is nonsense, and completely inconsistent with itself
- * (it doesn't have 8 streams). Accept the settings in this case anyway.
- */
- if (!ap_min_req_set)
- return true;
+ if (is_5ghz &&
+ !(vht_cap.cap & (IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ))) {
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_80;
+ mlme_link_id_dbg(sdata, link_id,
+ "no VHT 160 MHz capability on 5 GHz, limiting to 80 MHz");
+ }
- /* Need to go over for 80MHz, 160MHz and for 80+80 */
- for (i = 0; i < 3; i++) {
- const struct ieee80211_he_mcs_nss_supp *sta_mcs_nss_supp =
- &sta_he_cap->he_mcs_nss_supp;
- u16 sta_mcs_map_rx =
- le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i]);
- u16 sta_mcs_map_tx =
- le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i + 1]);
- u8 nss;
- bool verified = true;
+ if (req && req->flags & ASSOC_REQ_DISABLE_HE) {
+ mlme_link_id_dbg(sdata, link_id,
+ "HE disabled by flag, limiting to HT/VHT\n");
+ goto out;
+ }
- /*
- * For each band there is a maximum of 8 spatial streams
- * possible. Each of the sta_mcs_map_* is a 16-bit struct built
- * of 2 bits per NSS (1-8), with the values defined in enum
- * ieee80211_he_mcs_support. Need to make sure STA TX and RX
- * capabilities aren't less than the AP's minimum requirements
- * for this HE BSS per SS.
- * It is enough to find one such band that meets the reqs.
- */
- for (nss = 8; nss > 0; nss--) {
- u8 sta_rx_val = (sta_mcs_map_rx >> (2 * (nss - 1))) & 3;
- u8 sta_tx_val = (sta_mcs_map_tx >> (2 * (nss - 1))) & 3;
- u8 ap_val = (ap_min_req_set >> (2 * (nss - 1))) & 3;
+ he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
+ if (!he_cap) {
+ WARN_ON(is_6ghz);
+ mlme_link_id_dbg(sdata, link_id,
+ "no HE support, limiting to HT/VHT\n");
+ goto out;
+ }
- if (ap_val == IEEE80211_HE_MCS_NOT_SUPPORTED)
- continue;
+ /* so we have HE */
+ conn->mode = IEEE80211_CONN_MODE_HE;
- /*
- * Make sure the HE AP doesn't require MCSs that aren't
- * supported by the client as required by spec
- *
- * P802.11-REVme/D0.3
- * 26.17.1 Basic HE BSS operation
- *
- * An HE STA shall not attempt to join * (MLME-JOIN.request primitive)
- * a BSS, unless it supports (i.e., is able to both transmit and
- * receive using) all of the <HE-MCS, NSS> tuples in the basic
- * HE-MCS and NSS set.
- */
- if (sta_rx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
- sta_tx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
- (ap_val > sta_rx_val) || (ap_val > sta_tx_val)) {
- verified = false;
- break;
- }
+ /* check bandwidth */
+ switch (sband->band) {
+ default:
+ case NL80211_BAND_2GHZ:
+ if (he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G)
+ break;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ mlme_link_id_dbg(sdata, link_id,
+ "no 40 MHz HE cap in 2.4 GHz, limiting to 20 MHz\n");
+ break;
+ case NL80211_BAND_5GHZ:
+ if (!(he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G)) {
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ mlme_link_id_dbg(sdata, link_id,
+ "no 40/80 MHz HE cap in 5 GHz, limiting to 20 MHz\n");
+ break;
}
-
- if (verified)
- return true;
+ if (!(he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)) {
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_80);
+ mlme_link_id_dbg(sdata, link_id,
+ "no 160 MHz HE cap in 5 GHz, limiting to 80 MHz\n");
+ }
+ break;
+ case NL80211_BAND_6GHZ:
+ if (he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+ break;
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_80);
+ mlme_link_id_dbg(sdata, link_id,
+ "no 160 MHz HE cap in 6 GHz, limiting to 80 MHz\n");
+ break;
}
- /* If here, STA doesn't meet AP's HE min requirements */
- return false;
-}
+ if (req && req->flags & ASSOC_REQ_DISABLE_EHT) {
+ mlme_link_id_dbg(sdata, link_id,
+ "EHT disabled by flag, limiting to HE\n");
+ goto out;
+ }
-static u8
-ieee80211_get_eht_cap_mcs_nss(const struct ieee80211_sta_he_cap *sta_he_cap,
- const struct ieee80211_sta_eht_cap *sta_eht_cap,
- unsigned int idx, int bw)
-{
- u8 he_phy_cap0 = sta_he_cap->he_cap_elem.phy_cap_info[0];
- u8 eht_phy_cap0 = sta_eht_cap->eht_cap_elem.phy_cap_info[0];
+ eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
+ if (!eht_cap) {
+ mlme_link_id_dbg(sdata, link_id,
+ "no EHT support, limiting to HE\n");
+ goto out;
+ }
- /* handle us being a 20 MHz-only EHT STA - with four values
- * for MCS 0-7, 8-9, 10-11, 12-13.
- */
- if (!(he_phy_cap0 & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL))
- return sta_eht_cap->eht_mcs_nss_supp.only_20mhz.rx_tx_max_nss[idx];
+ /* we have EHT */
- /* the others have MCS 0-9 together, rather than separately from 0-7 */
- if (idx > 0)
- idx--;
+ conn->mode = IEEE80211_CONN_MODE_EHT;
- switch (bw) {
- case 0:
- return sta_eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_max_nss[idx];
- case 1:
- if (!(he_phy_cap0 &
- (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)))
- return 0xff; /* pass check */
- return sta_eht_cap->eht_mcs_nss_supp.bw._160.rx_tx_max_nss[idx];
- case 2:
- if (!(eht_phy_cap0 & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ))
- return 0xff; /* pass check */
- return sta_eht_cap->eht_mcs_nss_supp.bw._320.rx_tx_max_nss[idx];
- }
+ /* check bandwidth */
+ if (is_6ghz &&
+ eht_cap->eht_cap_elem.phy_cap_info[0] & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ)
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_320;
+ else if (is_6ghz)
+ mlme_link_id_dbg(sdata, link_id,
+ "no EHT 320 MHz cap in 6 GHz, limiting to 160 MHz\n");
- WARN_ON(1);
- return 0;
+out:
+ mlme_link_id_dbg(sdata, link_id,
+ "determined local STA to be %s, BW limited to %d MHz\n",
+ ieee80211_conn_mode_str(conn->mode),
+ 20 * (1 << conn->bw_limit));
}
-static bool
-ieee80211_verify_sta_eht_mcs_support(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_supported_band *sband,
- const struct ieee80211_eht_operation *eht_op)
+static void
+ieee80211_determine_our_sta_mode_auth(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_supported_band *sband,
+ struct cfg80211_auth_request *req,
+ bool wmm_used,
+ struct ieee80211_conn_settings *conn)
{
- const struct ieee80211_sta_he_cap *sta_he_cap =
- ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
- const struct ieee80211_sta_eht_cap *sta_eht_cap =
- ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
- const struct ieee80211_eht_mcs_nss_supp_20mhz_only *req;
- unsigned int i;
-
- if (!sta_he_cap || !sta_eht_cap || !eht_op)
- return false;
-
- req = &eht_op->basic_mcs_nss;
+ ieee80211_determine_our_sta_mode(sdata, sband, NULL, wmm_used,
+ req->link_id > 0 ? req->link_id : 0,
+ conn);
+}
- for (i = 0; i < ARRAY_SIZE(req->rx_tx_max_nss); i++) {
- u8 req_rx_nss, req_tx_nss;
- unsigned int bw;
+static void
+ieee80211_determine_our_sta_mode_assoc(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_supported_band *sband,
+ struct cfg80211_assoc_request *req,
+ bool wmm_used, int link_id,
+ struct ieee80211_conn_settings *conn)
+{
+ struct ieee80211_conn_settings tmp;
- req_rx_nss = u8_get_bits(req->rx_tx_max_nss[i],
- IEEE80211_EHT_MCS_NSS_RX);
- req_tx_nss = u8_get_bits(req->rx_tx_max_nss[i],
- IEEE80211_EHT_MCS_NSS_TX);
+ WARN_ON(!req);
- for (bw = 0; bw < 3; bw++) {
- u8 have, have_rx_nss, have_tx_nss;
+ ieee80211_determine_our_sta_mode(sdata, sband, req, wmm_used, link_id,
+ &tmp);
- have = ieee80211_get_eht_cap_mcs_nss(sta_he_cap,
- sta_eht_cap,
- i, bw);
- have_rx_nss = u8_get_bits(have,
- IEEE80211_EHT_MCS_NSS_RX);
- have_tx_nss = u8_get_bits(have,
- IEEE80211_EHT_MCS_NSS_TX);
+ conn->mode = min_t(enum ieee80211_conn_mode,
+ conn->mode, tmp.mode);
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit, tmp.bw_limit);
+}
- if (req_rx_nss > have_rx_nss ||
- req_tx_nss > have_tx_nss)
- return false;
- }
+static enum ieee80211_ap_reg_power
+ieee80211_ap_power_type(u8 control)
+{
+ switch (u8_get_bits(control, IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) {
+ case IEEE80211_6GHZ_CTRL_REG_LPI_AP:
+ case IEEE80211_6GHZ_CTRL_REG_INDOOR_LPI_AP:
+ return IEEE80211_REG_LPI_AP;
+ case IEEE80211_6GHZ_CTRL_REG_SP_AP:
+ case IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP:
+ return IEEE80211_REG_SP_AP;
+ case IEEE80211_6GHZ_CTRL_REG_VLP_AP:
+ return IEEE80211_REG_VLP_AP;
+ default:
+ return IEEE80211_REG_UNSET_AP;
}
-
- return true;
}
static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
struct ieee80211_link_data *link,
- struct cfg80211_bss *cbss,
- bool mlo,
- ieee80211_conn_flags_t *conn_flags)
+ int link_id,
+ struct cfg80211_bss *cbss, bool mlo,
+ struct ieee80211_conn_settings *conn,
+ unsigned long *userspace_selectors)
{
struct ieee80211_local *local = sdata->local;
- const struct ieee80211_ht_cap *ht_cap = NULL;
- const struct ieee80211_ht_operation *ht_oper = NULL;
- const struct ieee80211_vht_operation *vht_oper = NULL;
- const struct ieee80211_he_operation *he_oper = NULL;
- const struct ieee80211_eht_operation *eht_oper = NULL;
- const struct ieee80211_s1g_oper_ie *s1g_oper = NULL;
- struct ieee80211_supported_band *sband;
- struct cfg80211_chan_def chandef;
bool is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ;
- bool is_5ghz = cbss->channel->band == NL80211_BAND_5GHZ;
- bool supports_mlo = false;
- struct ieee80211_bss *bss = (void *)cbss->priv;
- struct ieee80211_elems_parse_params parse_params = {
- .link_id = -1,
- .from_ap = true,
- };
+ struct ieee80211_chan_req chanreq = {};
+ struct cfg80211_chan_def ap_chandef;
struct ieee802_11_elems *elems;
- const struct cfg80211_bss_ies *ies;
int ret;
- u32 i;
- bool have_80mhz;
lockdep_assert_wiphy(local->hw.wiphy);
rcu_read_lock();
+ elems = ieee80211_determine_chan_mode(sdata, conn, cbss, link_id,
+ &chanreq, &ap_chandef,
+ userspace_selectors);
- ies = rcu_dereference(cbss->ies);
- parse_params.start = ies->data;
- parse_params.len = ies->len;
- elems = ieee802_11_parse_elems_full(&parse_params);
- if (!elems) {
+ if (IS_ERR(elems)) {
rcu_read_unlock();
- return -ENOMEM;
- }
-
- sband = local->hw.wiphy->bands[cbss->channel->band];
-
- *conn_flags &= ~(IEEE80211_CONN_DISABLE_40MHZ |
- IEEE80211_CONN_DISABLE_80P80MHZ |
- IEEE80211_CONN_DISABLE_160MHZ);
-
- /* disable HT/VHT/HE if we don't support them */
- if (!sband->ht_cap.ht_supported && !is_6ghz) {
- mlme_dbg(sdata, "HT not supported, disabling HT/VHT/HE/EHT\n");
- *conn_flags |= IEEE80211_CONN_DISABLE_HT;
- *conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- *conn_flags |= IEEE80211_CONN_DISABLE_HE;
- *conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- }
-
- if (!sband->vht_cap.vht_supported && is_5ghz) {
- mlme_dbg(sdata, "VHT not supported, disabling VHT/HE/EHT\n");
- *conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- *conn_flags |= IEEE80211_CONN_DISABLE_HE;
- *conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- }
-
- if (!ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif)) {
- mlme_dbg(sdata, "HE not supported, disabling HE and EHT\n");
- *conn_flags |= IEEE80211_CONN_DISABLE_HE;
- *conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- }
-
- if (!ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif)) {
- mlme_dbg(sdata, "EHT not supported, disabling EHT\n");
- *conn_flags |= IEEE80211_CONN_DISABLE_EHT;
+ return PTR_ERR(elems);
}
- if (!(*conn_flags & IEEE80211_CONN_DISABLE_HT) && !is_6ghz) {
- ht_oper = elems->ht_operation;
- ht_cap = elems->ht_cap_elem;
-
- if (!ht_cap) {
- *conn_flags |= IEEE80211_CONN_DISABLE_HT;
- ht_oper = NULL;
- }
- }
-
- if (!(*conn_flags & IEEE80211_CONN_DISABLE_VHT) && !is_6ghz) {
- vht_oper = elems->vht_operation;
- if (vht_oper && !ht_oper) {
- vht_oper = NULL;
- sdata_info(sdata,
- "AP advertised VHT without HT, disabling HT/VHT/HE\n");
- *conn_flags |= IEEE80211_CONN_DISABLE_HT;
- *conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- *conn_flags |= IEEE80211_CONN_DISABLE_HE;
- *conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- }
-
- if (!elems->vht_cap_elem) {
- *conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- vht_oper = NULL;
- }
+ if (mlo && !elems->ml_basic) {
+ sdata_info(sdata, "Rejecting MLO as it is not supported by AP\n");
+ rcu_read_unlock();
+ kfree(elems);
+ return -EINVAL;
}
- if (!(*conn_flags & IEEE80211_CONN_DISABLE_HE)) {
- he_oper = elems->he_operation;
-
- if (link && is_6ghz) {
- struct ieee80211_bss_conf *bss_conf;
- u8 j = 0;
+ if (link && is_6ghz && conn->mode >= IEEE80211_CONN_MODE_HE) {
+ const struct ieee80211_he_6ghz_oper *he_6ghz_oper;
- bss_conf = link->conf;
+ if (elems->pwr_constr_elem)
+ link->conf->pwr_reduction = *elems->pwr_constr_elem;
- if (elems->pwr_constr_elem)
- bss_conf->pwr_reduction = *elems->pwr_constr_elem;
-
- BUILD_BUG_ON(ARRAY_SIZE(bss_conf->tx_pwr_env) !=
- ARRAY_SIZE(elems->tx_pwr_env));
-
- for (i = 0; i < elems->tx_pwr_env_num; i++) {
- if (elems->tx_pwr_env_len[i] >
- sizeof(bss_conf->tx_pwr_env[j]))
- continue;
-
- bss_conf->tx_pwr_env_num++;
- memcpy(&bss_conf->tx_pwr_env[j], elems->tx_pwr_env[i],
- elems->tx_pwr_env_len[i]);
- j++;
- }
- }
-
- if (!ieee80211_verify_peer_he_mcs_support(sdata, ies, he_oper) ||
- !ieee80211_verify_sta_he_mcs_support(sdata, sband, he_oper))
- *conn_flags |= IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT;
- }
-
- /*
- * EHT requires HE to be supported as well. Specifically for 6 GHz
- * channels, the operation channel information can only be deduced from
- * both the 6 GHz operation information (from the HE operation IE) and
- * EHT operation.
- */
- if (!(*conn_flags &
- (IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT)) &&
- he_oper) {
- const struct cfg80211_bss_ies *cbss_ies;
- const struct element *eht_ml_elem;
- const u8 *eht_oper_ie;
-
- cbss_ies = rcu_dereference(cbss->ies);
- eht_oper_ie = cfg80211_find_ext_ie(WLAN_EID_EXT_EHT_OPERATION,
- cbss_ies->data, cbss_ies->len);
- if (eht_oper_ie && eht_oper_ie[1] >=
- 1 + sizeof(struct ieee80211_eht_operation))
- eht_oper = (void *)(eht_oper_ie + 3);
+ he_6ghz_oper = ieee80211_he_6ghz_oper(elems->he_operation);
+ if (he_6ghz_oper)
+ link->conf->power_type =
+ ieee80211_ap_power_type(he_6ghz_oper->control);
else
- eht_oper = NULL;
-
- if (!ieee80211_verify_sta_eht_mcs_support(sdata, sband, eht_oper))
- *conn_flags |= IEEE80211_CONN_DISABLE_EHT;
-
- eht_ml_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_EHT_MULTI_LINK,
- cbss_ies->data, cbss_ies->len);
-
- /* data + 1 / datalen - 1 since it's an extended element */
- if (!(*conn_flags & IEEE80211_CONN_DISABLE_EHT) &&
- eht_ml_elem &&
- ieee80211_mle_type_ok(eht_ml_elem->data + 1,
- IEEE80211_ML_CONTROL_TYPE_BASIC,
- eht_ml_elem->datalen - 1)) {
- supports_mlo = true;
-
- sdata->vif.cfg.eml_cap =
- ieee80211_mle_get_eml_cap(eht_ml_elem->data + 1);
- sdata->vif.cfg.eml_med_sync_delay =
- ieee80211_mle_get_eml_med_sync_delay(eht_ml_elem->data + 1);
- }
- }
-
- /* Allow VHT if at least one channel on the sband supports 80 MHz */
- have_80mhz = false;
- for (i = 0; i < sband->n_channels; i++) {
- if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED |
- IEEE80211_CHAN_NO_80MHZ))
- continue;
-
- have_80mhz = true;
- break;
- }
-
- if (!have_80mhz) {
- sdata_info(sdata, "80 MHz not supported, disabling VHT\n");
- *conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- }
+ link_info(link,
+ "HE 6 GHz operation missing (on %d MHz), expect issues\n",
+ cbss->channel->center_freq);
- if (sband->band == NL80211_BAND_S1GHZ) {
- s1g_oper = elems->s1g_oper;
- if (!s1g_oper)
- sdata_info(sdata,
- "AP missing S1G operation element?\n");
+ link->conf->tpe = elems->tpe;
+ ieee80211_rearrange_tpe(&link->conf->tpe, &ap_chandef,
+ &chanreq.oper);
}
-
- *conn_flags |=
- ieee80211_determine_chantype(sdata, link, *conn_flags,
- sband,
- cbss->channel,
- bss->vht_cap_info,
- ht_oper, vht_oper,
- he_oper, eht_oper,
- s1g_oper,
- &chandef, false);
-
- if (link)
- link->needed_rx_chains =
- min(ieee80211_max_rx_chains(link, cbss),
- local->rx_chains);
-
rcu_read_unlock();
/* the element data was RCU protected so no longer valid anyway */
kfree(elems);
elems = NULL;
- if (*conn_flags & IEEE80211_CONN_DISABLE_HE && is_6ghz) {
- sdata_info(sdata, "Rejecting non-HE 6/7 GHz connection");
- return -EINVAL;
- }
-
- if (mlo && !supports_mlo) {
- sdata_info(sdata, "Rejecting MLO as it is not supported by AP\n");
- return -EINVAL;
- }
-
if (!link)
return 0;
+ rcu_read_lock();
+ link->needed_rx_chains = min(ieee80211_max_rx_chains(link, cbss),
+ local->rx_chains);
+ rcu_read_unlock();
+
/*
* If this fails (possibly due to channel context sharing
* on incompatible channels, e.g. 80+80 and 160 sharing the
* same control channel) try to use a smaller bandwidth.
*/
- ret = ieee80211_link_use_channel(link, &chandef,
+ ret = ieee80211_link_use_channel(link, &chanreq,
IEEE80211_CHANCTX_SHARED);
/* don't downgrade for 5 and 10 MHz channels, though. */
- if (chandef.width == NL80211_CHAN_WIDTH_5 ||
- chandef.width == NL80211_CHAN_WIDTH_10)
- goto out;
+ if (chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ chanreq.oper.width == NL80211_CHAN_WIDTH_10)
+ return ret;
+
+ while (ret && chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT) {
+ ieee80211_chanreq_downgrade(&chanreq, conn);
- while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT) {
- *conn_flags |=
- ieee80211_chandef_downgrade(&chandef);
- ret = ieee80211_link_use_channel(link, &chandef,
+ ret = ieee80211_link_use_channel(link, &chanreq,
IEEE80211_CHANCTX_SHARED);
}
- out:
+
return ret;
}
@@ -5126,6 +5792,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
if (WARN_ON(!sta))
goto out_err;
+ sta->sta.spp_amsdu = assoc_data->spp_amsdu;
+
if (ieee80211_vif_is_mld(&sdata->vif)) {
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
if (!assoc_data->link[link_id].bss)
@@ -5189,8 +5857,11 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
link->conf->dtim_period = link->u.mgd.dtim_period ?: 1;
if (link_id != assoc_data->assoc_link_id) {
- err = ieee80211_prep_channel(sdata, link, cbss, true,
- &link->u.mgd.conn_flags);
+ link->u.mgd.conn = assoc_data->link[link_id].conn;
+
+ err = ieee80211_prep_channel(sdata, link, link_id, cbss,
+ true, &link->u.mgd.conn,
+ assoc_data->userspace_selectors);
if (err) {
link_info(link, "prep_channel failed\n");
goto out_err;
@@ -5224,7 +5895,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
/* links might have changed due to rejected ones, set them again */
ieee80211_vif_set_links(sdata, valid_links, dormant_links);
- rate_control_rate_init(sta);
+ rate_control_rate_init_all_links(sta);
if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) {
set_sta_flag(sta, WLAN_STA_MFP);
@@ -5308,6 +5979,11 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
if (!assoc_data)
return;
+ info.link_id = assoc_data->assoc_link_id;
+
+ parse_params.mode =
+ assoc_data->link[assoc_data->assoc_link_id].conn.mode;
+
if (!ether_addr_equal(assoc_data->ap_addr, mgmt->bssid) ||
!ether_addr_equal(assoc_data->ap_addr, mgmt->sa))
return;
@@ -5424,6 +6100,13 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
assoc_data->ap_addr);
goto abandon_assoc;
}
+
+ sdata->vif.cfg.eml_cap =
+ ieee80211_mle_get_eml_cap((const void *)elems->ml_basic);
+ sdata->vif.cfg.eml_med_sync_delay =
+ ieee80211_mle_get_eml_med_sync_delay((const void *)elems->ml_basic);
+ sdata->vif.cfg.mld_capa_op =
+ ieee80211_mle_get_mld_capa_op((const void *)elems->ml_basic);
}
sdata->vif.cfg.aid = aid;
@@ -5686,49 +6369,6 @@ static bool ieee80211_rx_our_beacon(const u8 *tx_bssid,
return ether_addr_equal(tx_bssid, bss->transmitted_bss->bssid);
}
-static bool ieee80211_config_puncturing(struct ieee80211_link_data *link,
- const struct ieee80211_eht_operation *eht_oper,
- u64 *changed)
-{
- struct ieee80211_local *local = link->sdata->local;
- u16 bitmap = 0, extracted;
-
- if ((eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT) &&
- (eht_oper->params &
- IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT)) {
- const struct ieee80211_eht_operation_info *info =
- (void *)eht_oper->optional;
- const u8 *disable_subchannel_bitmap = info->optional;
-
- bitmap = get_unaligned_le16(disable_subchannel_bitmap);
- }
-
- extracted = ieee80211_extract_dis_subch_bmap(eht_oper,
- &link->conf->chandef,
- bitmap);
-
- /* accept if there are no changes */
- if (!(*changed & BSS_CHANGED_BANDWIDTH) &&
- extracted == link->conf->eht_puncturing)
- return true;
-
- if (!cfg80211_valid_disable_subchannel_bitmap(&bitmap,
- &link->conf->chandef)) {
- link_info(link,
- "Got an invalid disable subchannel bitmap from AP %pM: bitmap = 0x%x, bw = 0x%x. disconnect\n",
- link->u.mgd.bssid,
- bitmap,
- link->conf->chandef.width);
- return false;
- }
-
- if (bitmap && ieee80211_hw_check(&local->hw, DISALLOW_PUNCTURING))
- return false;
-
- ieee80211_handle_puncturing_bitmap(link, eht_oper, bitmap, changed);
- return true;
-}
-
static void ieee80211_ml_reconf_work(struct wiphy *wiphy,
struct wiphy_work *work)
{
@@ -5792,9 +6432,7 @@ out:
static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems *elems)
{
- const struct ieee80211_multi_link_elem *ml;
const struct element *sub;
- ssize_t ml_len;
unsigned long removed_links = 0;
u16 link_removal_timeout[IEEE80211_MLD_MAX_NUM_LINKS] = {};
u8 link_id;
@@ -5803,24 +6441,11 @@ static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata,
if (!ieee80211_vif_is_mld(&sdata->vif) || !elems->ml_reconf)
return;
- ml_len = cfg80211_defragment_element(elems->ml_reconf_elem,
- elems->ie_start,
- elems->total_len,
- elems->scratch_pos,
- elems->scratch + elems->scratch_len -
- elems->scratch_pos,
- WLAN_EID_FRAGMENT);
- if (ml_len < 0)
- return;
-
- elems->ml_reconf = (const void *)elems->scratch_pos;
- elems->ml_reconf_len = ml_len;
- ml = elems->ml_reconf;
-
/* Directly parse the sub elements as the common information doesn't
* hold any useful information.
*/
- for_each_mle_subelement(sub, (u8 *)ml, ml_len) {
+ for_each_mle_subelement(sub, (const u8 *)elems->ml_reconf,
+ elems->ml_reconf_len) {
struct ieee80211_mle_per_sta_profile *prof = (void *)sub->data;
u8 *pos = prof->variable;
u16 control;
@@ -5848,7 +6473,7 @@ static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata,
*/
if (control &
IEEE80211_MLE_STA_RECONF_CONTROL_AP_REM_TIMER_PRESENT)
- link_removal_timeout[link_id] = le16_to_cpu(*(__le16 *)pos);
+ link_removal_timeout[link_id] = get_unaligned_le16(pos);
}
removed_links &= sdata->vif.valid_links;
@@ -5873,8 +6498,11 @@ static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata,
continue;
}
- link_delay = link_conf->beacon_int *
- link_removal_timeout[link_id];
+ if (link_removal_timeout[link_id] < 1)
+ link_delay = 0;
+ else
+ link_delay = link_conf->beacon_int *
+ (link_removal_timeout[link_id] - 1);
if (!delay)
delay = link_delay;
@@ -5888,6 +6516,64 @@ static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata,
TU_TO_JIFFIES(delay));
}
+static int ieee80211_ttlm_set_links(struct ieee80211_sub_if_data *sdata,
+ u16 active_links, u16 dormant_links,
+ u16 suspended_links)
+{
+ u64 changed = 0;
+ int ret;
+
+ if (!active_links) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* If there is an active negotiated TTLM, it should be discarded by
+ * the new negotiated/advertised TTLM.
+ */
+ if (sdata->vif.neg_ttlm.valid) {
+ memset(&sdata->vif.neg_ttlm, 0, sizeof(sdata->vif.neg_ttlm));
+ sdata->vif.suspended_links = 0;
+ changed = BSS_CHANGED_MLD_TTLM;
+ }
+
+ if (sdata->vif.active_links != active_links) {
+ /* usable links are affected when active_links are changed,
+ * so notify the driver about the status change
+ */
+ changed |= BSS_CHANGED_MLD_VALID_LINKS;
+ active_links &= sdata->vif.active_links;
+ if (!active_links)
+ active_links =
+ BIT(__ffs(sdata->vif.valid_links &
+ ~dormant_links));
+ ret = ieee80211_set_active_links(&sdata->vif, active_links);
+ if (ret) {
+ sdata_info(sdata, "Failed to set TTLM active links\n");
+ goto out;
+ }
+ }
+
+ ret = ieee80211_vif_set_links(sdata, sdata->vif.valid_links,
+ dormant_links);
+ if (ret) {
+ sdata_info(sdata, "Failed to set TTLM dormant links\n");
+ goto out;
+ }
+
+ sdata->vif.suspended_links = suspended_links;
+ if (sdata->vif.suspended_links)
+ changed |= BSS_CHANGED_MLD_TTLM;
+
+ ieee80211_vif_cfg_change_notify(sdata, changed);
+
+out:
+ if (ret)
+ ieee80211_disconnect(&sdata->vif, false);
+
+ return ret;
+}
+
static void ieee80211_tid_to_link_map_work(struct wiphy *wiphy,
struct wiphy_work *work)
{
@@ -5895,30 +6581,19 @@ static void ieee80211_tid_to_link_map_work(struct wiphy *wiphy,
struct ieee80211_sub_if_data *sdata =
container_of(work, struct ieee80211_sub_if_data,
u.mgd.ttlm_work.work);
- int ret;
new_active_links = sdata->u.mgd.ttlm_info.map &
sdata->vif.valid_links;
new_dormant_links = ~sdata->u.mgd.ttlm_info.map &
sdata->vif.valid_links;
- if (!new_active_links) {
- ieee80211_disconnect(&sdata->vif, false);
- return;
- }
ieee80211_vif_set_links(sdata, sdata->vif.valid_links, 0);
- new_active_links = BIT(ffs(new_active_links) - 1);
- ieee80211_set_active_links(&sdata->vif, new_active_links);
-
- ret = ieee80211_vif_set_links(sdata, sdata->vif.valid_links,
- new_dormant_links);
+ if (ieee80211_ttlm_set_links(sdata, new_active_links, new_dormant_links,
+ 0))
+ return;
sdata->u.mgd.ttlm_info.active = true;
sdata->u.mgd.ttlm_info.switch_time = 0;
-
- if (!ret)
- ieee80211_vif_cfg_change_notify(sdata,
- BSS_CHANGED_MLD_VALID_LINKS);
}
static u16 ieee80211_get_ttlm(u8 bm_size, u8 *data)
@@ -6104,13 +6779,140 @@ static void ieee80211_process_adv_ttlm(struct ieee80211_sub_if_data *sdata,
}
}
+static void
+ieee80211_mgd_check_cross_link_csa(struct ieee80211_sub_if_data *sdata,
+ int reporting_link_id,
+ struct ieee802_11_elems *elems)
+{
+ const struct element *sta_profiles[IEEE80211_MLD_MAX_NUM_LINKS] = {};
+ ssize_t sta_profiles_len[IEEE80211_MLD_MAX_NUM_LINKS] = {};
+ const struct element *sub;
+ const u8 *subelems;
+ size_t subelems_len;
+ u8 common_size;
+ int link_id;
+
+ if (!ieee80211_mle_size_ok((u8 *)elems->ml_basic, elems->ml_basic_len))
+ return;
+
+ common_size = ieee80211_mle_common_size((u8 *)elems->ml_basic);
+ subelems = (u8 *)elems->ml_basic + common_size;
+ subelems_len = elems->ml_basic_len - common_size;
+
+ for_each_element_id(sub, IEEE80211_MLE_SUBELEM_PER_STA_PROFILE,
+ subelems, subelems_len) {
+ struct ieee80211_mle_per_sta_profile *prof = (void *)sub->data;
+ struct ieee80211_link_data *link;
+ ssize_t len;
+
+ if (!ieee80211_mle_basic_sta_prof_size_ok(sub->data,
+ sub->datalen))
+ continue;
+
+ link_id = le16_get_bits(prof->control,
+ IEEE80211_MLE_STA_CONTROL_LINK_ID);
+ /* need a valid link ID, but also not our own, both AP bugs */
+ if (link_id == reporting_link_id ||
+ link_id >= IEEE80211_MLD_MAX_NUM_LINKS)
+ continue;
+
+ link = sdata_dereference(sdata->link[link_id], sdata);
+ if (!link)
+ continue;
+
+ len = cfg80211_defragment_element(sub, subelems, subelems_len,
+ NULL, 0,
+ IEEE80211_MLE_SUBELEM_FRAGMENT);
+ if (WARN_ON(len < 0))
+ continue;
+
+ sta_profiles[link_id] = sub;
+ sta_profiles_len[link_id] = len;
+ }
+
+ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
+ struct ieee80211_mle_per_sta_profile *prof;
+ struct ieee802_11_elems *prof_elems;
+ struct ieee80211_link_data *link;
+ ssize_t len;
+
+ if (link_id == reporting_link_id)
+ continue;
+
+ link = sdata_dereference(sdata->link[link_id], sdata);
+ if (!link)
+ continue;
+
+ if (!sta_profiles[link_id]) {
+ prof_elems = NULL;
+ goto handle;
+ }
+
+ /* we can defragment in-place, won't use the buffer again */
+ len = cfg80211_defragment_element(sta_profiles[link_id],
+ subelems, subelems_len,
+ (void *)sta_profiles[link_id],
+ sta_profiles_len[link_id],
+ IEEE80211_MLE_SUBELEM_FRAGMENT);
+ if (WARN_ON(len != sta_profiles_len[link_id]))
+ continue;
+
+ prof = (void *)sta_profiles[link_id];
+ prof_elems = ieee802_11_parse_elems(prof->variable +
+ (prof->sta_info_len - 1),
+ len -
+ (prof->sta_info_len - 1),
+ false, NULL);
+
+ /* memory allocation failed - let's hope that's transient */
+ if (!prof_elems)
+ continue;
+
+handle:
+ /*
+ * FIXME: the timings here are obviously incorrect,
+ * but only older Intel drivers seem to care, and
+ * those don't have MLO. If you really need this,
+ * the problem is having to calculate it with the
+ * TSF offset etc. The device_timestamp is still
+ * correct, of course.
+ */
+ ieee80211_sta_process_chanswitch(link, 0, 0, elems, prof_elems,
+ IEEE80211_CSA_SOURCE_OTHER_LINK);
+ kfree(prof_elems);
+ }
+}
+
+static bool ieee80211_mgd_ssid_mismatch(struct ieee80211_sub_if_data *sdata,
+ const struct ieee802_11_elems *elems)
+{
+ struct ieee80211_vif_cfg *cfg = &sdata->vif.cfg;
+ static u8 zero_ssid[IEEE80211_MAX_SSID_LEN];
+
+ if (!elems->ssid)
+ return false;
+
+ /* hidden SSID: zero length */
+ if (elems->ssid_len == 0)
+ return false;
+
+ if (elems->ssid_len != cfg->ssid_len)
+ return true;
+
+ /* hidden SSID: zeroed out */
+ if (!memcmp(elems->ssid, zero_ssid, elems->ssid_len))
+ return false;
+
+ return memcmp(elems->ssid, cfg->ssid, cfg->ssid_len);
+}
+
static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
struct ieee80211_hdr *hdr, size_t len,
struct ieee80211_rx_status *rx_status)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
+ struct ieee80211_bss_conf *bss_conf = link->conf;
struct ieee80211_vif_cfg *vif_cfg = &sdata->vif.cfg;
struct ieee80211_mgmt *mgmt = (void *) hdr;
size_t baselen;
@@ -6128,6 +6930,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
u8 *bssid, *variable = mgmt->u.beacon.variable;
u8 deauth_buf[IEEE80211_DEAUTH_FRAME_LEN];
struct ieee80211_elems_parse_params parse_params = {
+ .mode = link->u.mgd.conn.mode,
.link_id = -1,
.from_ap = true,
};
@@ -6153,7 +6956,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
parse_params.len = len - baselen;
rcu_read_lock();
- chanctx_conf = rcu_dereference(link->conf->chanctx_conf);
+ chanctx_conf = rcu_dereference(bss_conf->chanctx_conf);
if (!chanctx_conf) {
rcu_read_unlock();
return;
@@ -6181,12 +6984,13 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
link->u.mgd.dtim_period = elems->dtim_period;
link->u.mgd.have_beacon = true;
ifmgd->assoc_data->need_beacon = false;
- if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY)) {
- link->conf->sync_tsf =
+ if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY) &&
+ !ieee80211_is_s1g_beacon(hdr->frame_control)) {
+ bss_conf->sync_tsf =
le64_to_cpu(mgmt->u.beacon.timestamp);
- link->conf->sync_device_ts =
+ bss_conf->sync_device_ts =
rx_status->device_timestamp;
- link->conf->sync_dtim_count = elems->dtim_count;
+ bss_conf->sync_dtim_count = elems->dtim_count;
}
if (elems->mbssid_config_ie)
@@ -6210,7 +7014,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
}
if (!ifmgd->associated ||
- !ieee80211_rx_our_beacon(bssid, link->u.mgd.bss))
+ !ieee80211_rx_our_beacon(bssid, bss_conf->bss))
return;
bssid = link->u.mgd.bssid;
@@ -6237,12 +7041,21 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
*/
if (!ieee80211_is_s1g_beacon(hdr->frame_control))
ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4);
- parse_params.bss = link->u.mgd.bss;
+ parse_params.bss = bss_conf->bss;
parse_params.filter = care_about_ies;
parse_params.crc = ncrc;
elems = ieee802_11_parse_elems_full(&parse_params);
if (!elems)
return;
+
+ if (rx_status->flag & RX_FLAG_DECRYPTED &&
+ ieee80211_mgd_ssid_mismatch(sdata, elems)) {
+ sdata_info(sdata, "SSID mismatch for AP %pM, disconnect\n",
+ sdata->vif.cfg.ap_addr);
+ __ieee80211_disconnect(sdata);
+ return;
+ }
+
ncrc = elems->crc;
if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK) &&
@@ -6299,9 +7112,6 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
}
}
- if (link->u.mgd.csa_waiting_bcn)
- ieee80211_chswitch_post_beacon(link);
-
/*
* Update beacon timing and dtim count on every beacon appearance. This
* will allow the driver to use the most updated values. Do it before
@@ -6312,11 +7122,11 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
*/
if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY) &&
!ieee80211_is_s1g_beacon(hdr->frame_control)) {
- link->conf->sync_tsf =
+ bss_conf->sync_tsf =
le64_to_cpu(mgmt->u.beacon.timestamp);
- link->conf->sync_device_ts =
+ bss_conf->sync_device_ts =
rx_status->device_timestamp;
- link->conf->sync_dtim_count = elems->dtim_count;
+ bss_conf->sync_dtim_count = elems->dtim_count;
}
if ((ncrc == link->u.mgd.beacon_crc && link->u.mgd.beacon_crc_valid) ||
@@ -6329,7 +7139,13 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
ieee80211_sta_process_chanswitch(link, rx_status->mactime,
rx_status->device_timestamp,
- elems, true);
+ elems, elems,
+ IEEE80211_CSA_SOURCE_BEACON);
+
+ /* note that after this elems->ml_basic can no longer be used fully */
+ ieee80211_mgd_check_cross_link_csa(sdata, rx_status->link_id, elems);
+
+ ieee80211_mgd_update_bss_param_ch_cnt(sdata, bss_conf, elems);
if (!link->u.mgd.disable_wmm_tracking &&
ieee80211_sta_wmm_params(local, link, elems->wmm_param,
@@ -6375,21 +7191,14 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
goto free;
}
- if (WARN_ON(!link->conf->chandef.chan))
+ if (WARN_ON(!bss_conf->chanreq.oper.chan))
goto free;
- sband = local->hw.wiphy->bands[link->conf->chandef.chan->band];
+ sband = local->hw.wiphy->bands[bss_conf->chanreq.oper.chan->band];
changed |= ieee80211_recalc_twt_req(sdata, sband, link, link_sta, elems);
- if (ieee80211_config_bw(link, elems->ht_cap_elem,
- elems->vht_cap_elem, elems->ht_operation,
- elems->vht_operation, elems->he_operation,
- elems->eht_operation,
- elems->s1g_oper, bssid, &changed)) {
- sdata_info(sdata,
- "failed to follow AP %pM bandwidth change, disconnect\n",
- bssid);
+ if (ieee80211_config_bw(link, elems, true, &changed, "beacon")) {
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
WLAN_REASON_DEAUTH_LEAVING,
true, deauth_buf);
@@ -6411,21 +7220,6 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
elems->pwr_constr_elem,
elems->cisco_dtpc_elem);
- if (elems->eht_operation &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_EHT)) {
- if (!ieee80211_config_puncturing(link, elems->eht_operation,
- &changed)) {
- ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
- WLAN_REASON_DEAUTH_LEAVING,
- true, deauth_buf);
- ieee80211_report_disconnect(sdata, deauth_buf,
- sizeof(deauth_buf), true,
- WLAN_REASON_DEAUTH_LEAVING,
- false);
- goto free;
- }
- }
-
ieee80211_ml_reconfiguration(sdata, elems);
ieee80211_process_adv_ttlm(sdata, elems,
le64_to_cpu(mgmt->u.beacon.timestamp));
@@ -6435,6 +7229,430 @@ free:
kfree(elems);
}
+static void ieee80211_apply_neg_ttlm(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_neg_ttlm neg_ttlm)
+{
+ u16 new_active_links, new_dormant_links, new_suspended_links, map = 0;
+ u8 i;
+
+ for (i = 0; i < IEEE80211_TTLM_NUM_TIDS; i++)
+ map |= neg_ttlm.downlink[i] | neg_ttlm.uplink[i];
+
+ /* If there is an active TTLM, unset previously suspended links */
+ if (sdata->vif.neg_ttlm.valid)
+ sdata->vif.dormant_links &= ~sdata->vif.suspended_links;
+
+ /* exclude links that are already disabled by advertised TTLM */
+ new_active_links =
+ map & sdata->vif.valid_links & ~sdata->vif.dormant_links;
+ new_suspended_links =
+ (~map & sdata->vif.valid_links) & ~sdata->vif.dormant_links;
+ new_dormant_links = sdata->vif.dormant_links | new_suspended_links;
+ if (ieee80211_ttlm_set_links(sdata, new_active_links,
+ new_dormant_links, new_suspended_links))
+ return;
+
+ sdata->vif.neg_ttlm = neg_ttlm;
+ sdata->vif.neg_ttlm.valid = true;
+}
+
+static void ieee80211_neg_ttlm_timeout_work(struct wiphy *wiphy,
+ struct wiphy_work *work)
+{
+ struct ieee80211_sub_if_data *sdata =
+ container_of(work, struct ieee80211_sub_if_data,
+ u.mgd.neg_ttlm_timeout_work.work);
+
+ sdata_info(sdata,
+ "No negotiated TTLM response from AP, disconnecting.\n");
+
+ __ieee80211_disconnect(sdata);
+}
+
+static void
+ieee80211_neg_ttlm_add_suggested_map(struct sk_buff *skb,
+ struct ieee80211_neg_ttlm *neg_ttlm)
+{
+ u8 i, direction[IEEE80211_TTLM_MAX_CNT];
+
+ if (memcmp(neg_ttlm->downlink, neg_ttlm->uplink,
+ sizeof(neg_ttlm->downlink))) {
+ direction[0] = IEEE80211_TTLM_DIRECTION_DOWN;
+ direction[1] = IEEE80211_TTLM_DIRECTION_UP;
+ } else {
+ direction[0] = IEEE80211_TTLM_DIRECTION_BOTH;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(direction); i++) {
+ u8 tid, len, map_ind = 0, *len_pos, *map_ind_pos, *pos;
+ __le16 map;
+
+ len = sizeof(struct ieee80211_ttlm_elem) + 1 + 1;
+
+ pos = skb_put(skb, len + 2);
+ *pos++ = WLAN_EID_EXTENSION;
+ len_pos = pos++;
+ *pos++ = WLAN_EID_EXT_TID_TO_LINK_MAPPING;
+ *pos++ = direction[i];
+ map_ind_pos = pos++;
+ for (tid = 0; tid < IEEE80211_TTLM_NUM_TIDS; tid++) {
+ map = direction[i] == IEEE80211_TTLM_DIRECTION_UP ?
+ cpu_to_le16(neg_ttlm->uplink[tid]) :
+ cpu_to_le16(neg_ttlm->downlink[tid]);
+ if (!map)
+ continue;
+
+ len += 2;
+ map_ind |= BIT(tid);
+ skb_put_data(skb, &map, sizeof(map));
+ }
+
+ *map_ind_pos = map_ind;
+ *len_pos = len;
+
+ if (direction[i] == IEEE80211_TTLM_DIRECTION_BOTH)
+ break;
+ }
+}
+
+static void
+ieee80211_send_neg_ttlm_req(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_neg_ttlm *neg_ttlm,
+ u8 dialog_token)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_mgmt *mgmt;
+ struct sk_buff *skb;
+ int hdr_len = offsetofend(struct ieee80211_mgmt, u.action.u.ttlm_req);
+ int ttlm_max_len = 2 + 1 + sizeof(struct ieee80211_ttlm_elem) + 1 +
+ 2 * 2 * IEEE80211_TTLM_NUM_TIDS;
+
+ skb = dev_alloc_skb(local->tx_headroom + hdr_len + ttlm_max_len);
+ if (!skb)
+ return;
+
+ skb_reserve(skb, local->tx_headroom);
+ mgmt = skb_put_zero(skb, hdr_len);
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ACTION);
+ memcpy(mgmt->da, sdata->vif.cfg.ap_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
+ memcpy(mgmt->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN);
+
+ mgmt->u.action.category = WLAN_CATEGORY_PROTECTED_EHT;
+ mgmt->u.action.u.ttlm_req.action_code =
+ WLAN_PROTECTED_EHT_ACTION_TTLM_REQ;
+ mgmt->u.action.u.ttlm_req.dialog_token = dialog_token;
+ ieee80211_neg_ttlm_add_suggested_map(skb, neg_ttlm);
+ ieee80211_tx_skb(sdata, skb);
+}
+
+int ieee80211_req_neg_ttlm(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_ttlm_params *params)
+{
+ struct ieee80211_neg_ttlm neg_ttlm = {};
+ u8 i;
+
+ if (!ieee80211_vif_is_mld(&sdata->vif) ||
+ !(sdata->vif.cfg.mld_capa_op &
+ IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP))
+ return -EINVAL;
+
+ for (i = 0; i < IEEE80211_TTLM_NUM_TIDS; i++) {
+ if ((params->dlink[i] & ~sdata->vif.valid_links) ||
+ (params->ulink[i] & ~sdata->vif.valid_links))
+ return -EINVAL;
+
+ neg_ttlm.downlink[i] = params->dlink[i];
+ neg_ttlm.uplink[i] = params->ulink[i];
+ }
+
+ if (drv_can_neg_ttlm(sdata->local, sdata, &neg_ttlm) !=
+ NEG_TTLM_RES_ACCEPT)
+ return -EINVAL;
+
+ ieee80211_apply_neg_ttlm(sdata, neg_ttlm);
+ sdata->u.mgd.dialog_token_alloc++;
+ ieee80211_send_neg_ttlm_req(sdata, &sdata->vif.neg_ttlm,
+ sdata->u.mgd.dialog_token_alloc);
+ wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
+ &sdata->u.mgd.neg_ttlm_timeout_work);
+ wiphy_delayed_work_queue(sdata->local->hw.wiphy,
+ &sdata->u.mgd.neg_ttlm_timeout_work,
+ IEEE80211_NEG_TTLM_REQ_TIMEOUT);
+ return 0;
+}
+
+static void
+ieee80211_send_neg_ttlm_res(struct ieee80211_sub_if_data *sdata,
+ enum ieee80211_neg_ttlm_res ttlm_res,
+ u8 dialog_token,
+ struct ieee80211_neg_ttlm *neg_ttlm)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_mgmt *mgmt;
+ struct sk_buff *skb;
+ int hdr_len = offsetofend(struct ieee80211_mgmt, u.action.u.ttlm_res);
+ int ttlm_max_len = 2 + 1 + sizeof(struct ieee80211_ttlm_elem) + 1 +
+ 2 * 2 * IEEE80211_TTLM_NUM_TIDS;
+
+ skb = dev_alloc_skb(local->tx_headroom + hdr_len + ttlm_max_len);
+ if (!skb)
+ return;
+
+ skb_reserve(skb, local->tx_headroom);
+ mgmt = skb_put_zero(skb, hdr_len);
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ACTION);
+ memcpy(mgmt->da, sdata->vif.cfg.ap_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
+ memcpy(mgmt->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN);
+
+ mgmt->u.action.category = WLAN_CATEGORY_PROTECTED_EHT;
+ mgmt->u.action.u.ttlm_res.action_code =
+ WLAN_PROTECTED_EHT_ACTION_TTLM_RES;
+ mgmt->u.action.u.ttlm_res.dialog_token = dialog_token;
+ switch (ttlm_res) {
+ default:
+ WARN_ON(1);
+ fallthrough;
+ case NEG_TTLM_RES_REJECT:
+ mgmt->u.action.u.ttlm_res.status_code =
+ WLAN_STATUS_DENIED_TID_TO_LINK_MAPPING;
+ break;
+ case NEG_TTLM_RES_ACCEPT:
+ mgmt->u.action.u.ttlm_res.status_code = WLAN_STATUS_SUCCESS;
+ break;
+ case NEG_TTLM_RES_SUGGEST_PREFERRED:
+ mgmt->u.action.u.ttlm_res.status_code =
+ WLAN_STATUS_PREF_TID_TO_LINK_MAPPING_SUGGESTED;
+ ieee80211_neg_ttlm_add_suggested_map(skb, neg_ttlm);
+ break;
+ }
+
+ ieee80211_tx_skb(sdata, skb);
+}
+
+static int
+ieee80211_parse_neg_ttlm(struct ieee80211_sub_if_data *sdata,
+ const struct ieee80211_ttlm_elem *ttlm,
+ struct ieee80211_neg_ttlm *neg_ttlm,
+ u8 *direction)
+{
+ u8 control, link_map_presence, map_size, tid;
+ u8 *pos;
+
+ /* The element size was already validated in
+ * ieee80211_tid_to_link_map_size_ok()
+ */
+ pos = (void *)ttlm->optional;
+
+ control = ttlm->control;
+
+ /* mapping switch time and expected duration fields are not expected
+ * in case of negotiated TTLM
+ */
+ if (control & (IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT |
+ IEEE80211_TTLM_CONTROL_EXPECTED_DUR_PRESENT)) {
+ mlme_dbg(sdata,
+ "Invalid TTLM element in negotiated TTLM request\n");
+ return -EINVAL;
+ }
+
+ if (control & IEEE80211_TTLM_CONTROL_DEF_LINK_MAP) {
+ for (tid = 0; tid < IEEE80211_TTLM_NUM_TIDS; tid++) {
+ neg_ttlm->downlink[tid] = sdata->vif.valid_links;
+ neg_ttlm->uplink[tid] = sdata->vif.valid_links;
+ }
+ *direction = IEEE80211_TTLM_DIRECTION_BOTH;
+ return 0;
+ }
+
+ *direction = u8_get_bits(control, IEEE80211_TTLM_CONTROL_DIRECTION);
+ if (*direction != IEEE80211_TTLM_DIRECTION_DOWN &&
+ *direction != IEEE80211_TTLM_DIRECTION_UP &&
+ *direction != IEEE80211_TTLM_DIRECTION_BOTH)
+ return -EINVAL;
+
+ link_map_presence = *pos;
+ pos++;
+
+ if (control & IEEE80211_TTLM_CONTROL_LINK_MAP_SIZE)
+ map_size = 1;
+ else
+ map_size = 2;
+
+ for (tid = 0; tid < IEEE80211_TTLM_NUM_TIDS; tid++) {
+ u16 map;
+
+ if (link_map_presence & BIT(tid)) {
+ map = ieee80211_get_ttlm(map_size, pos);
+ if (!map) {
+ mlme_dbg(sdata,
+ "No active links for TID %d", tid);
+ return -EINVAL;
+ }
+ } else {
+ map = 0;
+ }
+
+ switch (*direction) {
+ case IEEE80211_TTLM_DIRECTION_BOTH:
+ neg_ttlm->downlink[tid] = map;
+ neg_ttlm->uplink[tid] = map;
+ break;
+ case IEEE80211_TTLM_DIRECTION_DOWN:
+ neg_ttlm->downlink[tid] = map;
+ break;
+ case IEEE80211_TTLM_DIRECTION_UP:
+ neg_ttlm->uplink[tid] = map;
+ break;
+ default:
+ return -EINVAL;
+ }
+ pos += map_size;
+ }
+ return 0;
+}
+
+void ieee80211_process_neg_ttlm_req(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgmt *mgmt, size_t len)
+{
+ u8 dialog_token, direction[IEEE80211_TTLM_MAX_CNT] = {}, i;
+ size_t ies_len;
+ enum ieee80211_neg_ttlm_res ttlm_res = NEG_TTLM_RES_ACCEPT;
+ struct ieee802_11_elems *elems = NULL;
+ struct ieee80211_neg_ttlm neg_ttlm = {};
+
+ BUILD_BUG_ON(ARRAY_SIZE(direction) != ARRAY_SIZE(elems->ttlm));
+
+ if (!ieee80211_vif_is_mld(&sdata->vif))
+ return;
+
+ dialog_token = mgmt->u.action.u.ttlm_req.dialog_token;
+ ies_len = len - offsetof(struct ieee80211_mgmt,
+ u.action.u.ttlm_req.variable);
+ elems = ieee802_11_parse_elems(mgmt->u.action.u.ttlm_req.variable,
+ ies_len, true, NULL);
+ if (!elems) {
+ ttlm_res = NEG_TTLM_RES_REJECT;
+ goto out;
+ }
+
+ for (i = 0; i < elems->ttlm_num; i++) {
+ if (ieee80211_parse_neg_ttlm(sdata, elems->ttlm[i],
+ &neg_ttlm, &direction[i]) ||
+ (direction[i] == IEEE80211_TTLM_DIRECTION_BOTH &&
+ elems->ttlm_num != 1)) {
+ ttlm_res = NEG_TTLM_RES_REJECT;
+ goto out;
+ }
+ }
+
+ if (!elems->ttlm_num ||
+ (elems->ttlm_num == 2 && direction[0] == direction[1])) {
+ ttlm_res = NEG_TTLM_RES_REJECT;
+ goto out;
+ }
+
+ for (i = 0; i < IEEE80211_TTLM_NUM_TIDS; i++) {
+ if ((neg_ttlm.downlink[i] &&
+ (neg_ttlm.downlink[i] & ~sdata->vif.valid_links)) ||
+ (neg_ttlm.uplink[i] &&
+ (neg_ttlm.uplink[i] & ~sdata->vif.valid_links))) {
+ ttlm_res = NEG_TTLM_RES_REJECT;
+ goto out;
+ }
+ }
+
+ ttlm_res = drv_can_neg_ttlm(sdata->local, sdata, &neg_ttlm);
+
+ if (ttlm_res != NEG_TTLM_RES_ACCEPT)
+ goto out;
+
+ ieee80211_apply_neg_ttlm(sdata, neg_ttlm);
+out:
+ kfree(elems);
+ ieee80211_send_neg_ttlm_res(sdata, ttlm_res, dialog_token, &neg_ttlm);
+}
+
+void ieee80211_process_neg_ttlm_res(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgmt *mgmt, size_t len)
+{
+ if (!ieee80211_vif_is_mld(&sdata->vif) ||
+ mgmt->u.action.u.ttlm_req.dialog_token !=
+ sdata->u.mgd.dialog_token_alloc)
+ return;
+
+ wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
+ &sdata->u.mgd.neg_ttlm_timeout_work);
+
+ /* MLD station sends a TID to link mapping request, mainly to handle
+ * BTM (BSS transition management) request, in which case it needs to
+ * restrict the active links set.
+ * In this case it's not expected that the MLD AP will reject the
+ * negotiated TTLM request.
+ * This can be better implemented in the future, to handle request
+ * rejections.
+ */
+ if (mgmt->u.action.u.ttlm_res.status_code != WLAN_STATUS_SUCCESS)
+ __ieee80211_disconnect(sdata);
+}
+
+static void ieee80211_teardown_ttlm_work(struct wiphy *wiphy,
+ struct wiphy_work *work)
+{
+ u16 new_dormant_links;
+ struct ieee80211_sub_if_data *sdata =
+ container_of(work, struct ieee80211_sub_if_data,
+ u.mgd.teardown_ttlm_work);
+
+ if (!sdata->vif.neg_ttlm.valid)
+ return;
+
+ memset(&sdata->vif.neg_ttlm, 0, sizeof(sdata->vif.neg_ttlm));
+ new_dormant_links =
+ sdata->vif.dormant_links & ~sdata->vif.suspended_links;
+ sdata->vif.suspended_links = 0;
+ ieee80211_vif_set_links(sdata, sdata->vif.valid_links,
+ new_dormant_links);
+ ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_MLD_TTLM |
+ BSS_CHANGED_MLD_VALID_LINKS);
+}
+
+void ieee80211_send_teardown_neg_ttlm(struct ieee80211_vif *vif)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_mgmt *mgmt;
+ struct sk_buff *skb;
+ int frame_len = offsetofend(struct ieee80211_mgmt,
+ u.action.u.ttlm_tear_down);
+ struct ieee80211_tx_info *info;
+
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom + frame_len);
+ if (!skb)
+ return;
+
+ skb_reserve(skb, local->hw.extra_tx_headroom);
+ mgmt = skb_put_zero(skb, frame_len);
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ACTION);
+ memcpy(mgmt->da, sdata->vif.cfg.ap_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
+ memcpy(mgmt->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN);
+
+ mgmt->u.action.category = WLAN_CATEGORY_PROTECTED_EHT;
+ mgmt->u.action.u.ttlm_tear_down.action_code =
+ WLAN_PROTECTED_EHT_ACTION_TTLM_TEARDOWN;
+
+ info = IEEE80211_SKB_CB(skb);
+ info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
+ info->status_data = IEEE80211_STATUS_TYPE_NEG_TTLM;
+ ieee80211_tx_skb(sdata, skb);
+}
+EXPORT_SYMBOL(ieee80211_send_teardown_neg_ttlm);
+
void ieee80211_sta_rx_queued_ext(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
@@ -6461,6 +7679,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_link_data *link = &sdata->deflink;
struct ieee80211_rx_status *rx_status;
+ struct ieee802_11_elems *elems;
struct ieee80211_mgmt *mgmt;
u16 fc;
int ies_len;
@@ -6504,9 +7723,8 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
!ether_addr_equal(mgmt->bssid, sdata->vif.cfg.ap_addr))
break;
- if (mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT) {
- struct ieee802_11_elems *elems;
-
+ switch (mgmt->u.action.category) {
+ case WLAN_CATEGORY_SPECTRUM_MGMT:
ies_len = skb->len -
offsetof(struct ieee80211_mgmt,
u.action.u.chan_switch.variable);
@@ -6519,15 +7737,20 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
mgmt->u.action.u.chan_switch.variable,
ies_len, true, NULL);
- if (elems && !elems->parse_error)
+ if (elems && !elems->parse_error) {
+ enum ieee80211_csa_source src =
+ IEEE80211_CSA_SOURCE_PROT_ACTION;
+
ieee80211_sta_process_chanswitch(link,
rx_status->mactime,
rx_status->device_timestamp,
- elems, false);
+ elems, elems,
+ src);
+ }
kfree(elems);
- } else if (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC) {
- struct ieee802_11_elems *elems;
-
+ break;
+ case WLAN_CATEGORY_PUBLIC:
+ case WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION:
ies_len = skb->len -
offsetof(struct ieee80211_mgmt,
u.action.u.ext_chan_switch.variable);
@@ -6544,6 +7767,14 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
ies_len, true, NULL);
if (elems && !elems->parse_error) {
+ enum ieee80211_csa_source src;
+
+ if (mgmt->u.action.category ==
+ WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION)
+ src = IEEE80211_CSA_SOURCE_PROT_ACTION;
+ else
+ src = IEEE80211_CSA_SOURCE_UNPROT_ACTION;
+
/* for the handling code pretend it was an IE */
elems->ext_chansw_ie =
&mgmt->u.action.u.ext_chan_switch.data;
@@ -6551,10 +7782,12 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
ieee80211_sta_process_chanswitch(link,
rx_status->mactime,
rx_status->device_timestamp,
- elems, false);
+ elems, elems,
+ src);
}
kfree(elems);
+ break;
}
break;
}
@@ -6665,6 +7898,7 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
lockdep_assert_wiphy(sdata->local->hw.wiphy);
assoc_data->tries++;
+ assoc_data->comeback = false;
if (assoc_data->tries > IEEE80211_ASSOC_MAX_TRIES) {
sdata_info(sdata, "association with %pM timed out\n",
assoc_data->ap_addr);
@@ -6874,7 +8108,7 @@ static void ieee80211_sta_bcn_mon_timer(struct timer_list *t)
return;
if (sdata->vif.bss_conf.csa_active &&
- !sdata->deflink.u.mgd.csa_waiting_bcn)
+ !sdata->deflink.u.mgd.csa.waiting_bcn)
return;
if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)
@@ -6898,7 +8132,7 @@ static void ieee80211_sta_conn_mon_timer(struct timer_list *t)
return;
if (sdata->vif.bss_conf.csa_active &&
- !sdata->deflink.u.mgd.csa_waiting_bcn)
+ !sdata->deflink.u.mgd.csa.waiting_bcn)
return;
sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr);
@@ -7043,6 +8277,25 @@ static void ieee80211_request_smps_mgd_work(struct wiphy *wiphy,
link->u.mgd.driver_smps_mode);
}
+static void ieee80211_ml_sta_reconf_timeout(struct wiphy *wiphy,
+ struct wiphy_work *work)
+{
+ struct ieee80211_sub_if_data *sdata =
+ container_of(work, struct ieee80211_sub_if_data,
+ u.mgd.reconf.wk.work);
+
+ if (!sdata->u.mgd.reconf.added_links &&
+ !sdata->u.mgd.reconf.removed_links)
+ return;
+
+ sdata_info(sdata,
+ "mlo: reconf: timeout: added=0x%x, removed=0x%x\n",
+ sdata->u.mgd.reconf.added_links,
+ sdata->u.mgd.reconf.removed_links);
+
+ __ieee80211_disconnect(sdata);
+}
+
/* interface setup */
void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
{
@@ -7057,6 +8310,8 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
ieee80211_tdls_peer_del_work);
wiphy_delayed_work_init(&ifmgd->ml_reconf_work,
ieee80211_ml_reconf_work);
+ wiphy_delayed_work_init(&ifmgd->reconf.wk,
+ ieee80211_ml_sta_reconf_timeout);
timer_setup(&ifmgd->timer, ieee80211_sta_timer, 0);
timer_setup(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer, 0);
timer_setup(&ifmgd->conn_mon_timer, ieee80211_sta_conn_mon_timer, 0);
@@ -7064,6 +8319,10 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
ieee80211_sta_handle_tspec_ac_params_wk);
wiphy_delayed_work_init(&ifmgd->ttlm_work,
ieee80211_tid_to_link_map_work);
+ wiphy_delayed_work_init(&ifmgd->neg_ttlm_timeout_work,
+ ieee80211_neg_ttlm_timeout_work);
+ wiphy_work_init(&ifmgd->teardown_ttlm_work,
+ ieee80211_teardown_ttlm_work);
ifmgd->flags = 0;
ifmgd->powersave = sdata->wdev.ps;
@@ -7073,6 +8332,7 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
spin_lock_init(&ifmgd->teardown_lock);
ifmgd->teardown_skb = NULL;
ifmgd->orig_teardown_skb = NULL;
+ ifmgd->mcast_seq_last = IEEE80211_SN_MODULO;
}
static void ieee80211_recalc_smps_work(struct wiphy *wiphy,
@@ -7092,7 +8352,6 @@ void ieee80211_mgd_setup_link(struct ieee80211_link_data *link)
unsigned int link_id = link->link_id;
link->u.mgd.p2p_noa_index = -1;
- link->u.mgd.conn_flags = 0;
link->conf->bssid = link->u.mgd.bssid;
link->smps_mode = IEEE80211_SMPS_OFF;
@@ -7105,12 +8364,17 @@ void ieee80211_mgd_setup_link(struct ieee80211_link_data *link)
else
link->u.mgd.req_smps = IEEE80211_SMPS_OFF;
- wiphy_delayed_work_init(&link->u.mgd.chswitch_work,
- ieee80211_chswitch_work);
+ wiphy_delayed_work_init(&link->u.mgd.csa.switch_work,
+ ieee80211_csa_switch_work);
+
+ ieee80211_clear_tpe(&link->conf->tpe);
if (sdata->u.mgd.assoc_data)
ether_addr_copy(link->conf->addr,
sdata->u.mgd.assoc_data->link[link_id].addr);
+ else if (sdata->u.mgd.reconf.add_links_data)
+ ether_addr_copy(link->conf->addr,
+ sdata->u.mgd.reconf.add_links_data->link[link_id].addr);
else if (!is_valid_ether_addr(link->conf->addr))
eth_random_addr(link->conf->addr);
}
@@ -7132,7 +8396,9 @@ void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local)
static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
struct cfg80211_bss *cbss, s8 link_id,
const u8 *ap_mld_addr, bool assoc,
- bool override)
+ struct ieee80211_conn_settings *conn,
+ bool override,
+ unsigned long *userspace_selectors)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -7263,13 +8529,23 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
}
if (new_sta || override) {
- err = ieee80211_prep_channel(sdata, link, cbss, mlo,
- &link->u.mgd.conn_flags);
+ /*
+ * Only set this if we're also going to calculate the AP
+ * settings etc., otherwise this was set before in a
+ * previous call. Note override is set to %true in assoc
+ * if the settings were changed.
+ */
+ link->u.mgd.conn = *conn;
+ err = ieee80211_prep_channel(sdata, link, link->link_id, cbss,
+ mlo, &link->u.mgd.conn,
+ userspace_selectors);
if (err) {
if (new_sta)
sta_info_free(local, new_sta);
goto out_err;
}
+ /* pass out for use in assoc */
+ *conn = link->u.mgd.conn;
}
if (new_sta) {
@@ -7291,7 +8567,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
sdata_info(sdata,
"failed to insert STA entry for the AP (error %d)\n",
err);
- goto out_err;
+ goto out_release_chan;
}
} else
WARN_ON_ONCE(!ether_addr_equal(link->u.mgd.bssid, cbss->bssid));
@@ -7302,8 +8578,9 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
return 0;
+out_release_chan:
+ ieee80211_link_release_channel(link);
out_err:
- ieee80211_link_release_channel(&sdata->deflink);
ieee80211_vif_set_links(sdata, 0, 0);
return err;
}
@@ -7377,6 +8654,22 @@ out:
return ret;
}
+static void ieee80211_parse_cfg_selectors(unsigned long *userspace_selectors,
+ const u8 *supported_selectors,
+ u8 supported_selectors_len)
+{
+ if (supported_selectors) {
+ for (int i = 0; i < supported_selectors_len; i++) {
+ set_bit(supported_selectors[i],
+ userspace_selectors);
+ }
+ } else {
+ /* Assume SAE_H2E support for backward compatibility. */
+ set_bit(BSS_MEMBERSHIP_SELECTOR_SAE_H2E,
+ userspace_selectors);
+ }
+}
+
/* config hooks */
int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
struct cfg80211_auth_request *req)
@@ -7384,10 +8677,13 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_mgd_auth_data *auth_data;
+ struct ieee80211_conn_settings conn;
struct ieee80211_link_data *link;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_bss *bss;
u16 auth_alg;
int err;
- bool cont_auth;
+ bool cont_auth, wmm_used;
lockdep_assert_wiphy(sdata->local->hw.wiphy);
@@ -7475,6 +8771,10 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
memcpy(auth_data->key, req->key, req->key_len);
}
+ ieee80211_parse_cfg_selectors(auth_data->userspace_selectors,
+ req->supported_selectors,
+ req->supported_selectors_len);
+
auth_data->algorithm = auth_alg;
/* try to authenticate/probe */
@@ -7518,15 +8818,25 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
/* needed for transmitting the auth frame(s) properly */
memcpy(sdata->vif.cfg.ap_addr, auth_data->ap_addr, ETH_ALEN);
+ bss = (void *)req->bss->priv;
+ wmm_used = bss->wmm_used && (local->hw.queues >= IEEE80211_NUM_ACS);
+
+ sband = local->hw.wiphy->bands[req->bss->channel->band];
+
+ ieee80211_determine_our_sta_mode_auth(sdata, sband, req, wmm_used,
+ &conn);
+
err = ieee80211_prep_connection(sdata, req->bss, req->link_id,
- req->ap_mld_addr, cont_auth, false);
+ req->ap_mld_addr, cont_auth,
+ &conn, false,
+ auth_data->userspace_selectors);
if (err)
goto err_clear;
- if (req->link_id > 0)
+ if (req->link_id >= 0)
link = sdata_dereference(sdata->link[req->link_id], sdata);
else
- link = sdata_dereference(sdata->link[0], sdata);
+ link = &sdata->deflink;
if (WARN_ON(!link)) {
err = -ENOLINK;
@@ -7558,38 +8868,33 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
return err;
}
-static ieee80211_conn_flags_t
+static void
ieee80211_setup_assoc_link(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgd_assoc_data *assoc_data,
struct cfg80211_assoc_request *req,
- ieee80211_conn_flags_t conn_flags,
+ struct ieee80211_conn_settings *conn,
unsigned int link_id)
{
struct ieee80211_local *local = sdata->local;
const struct cfg80211_bss_ies *bss_ies;
struct ieee80211_supported_band *sband;
- const struct element *ht_elem, *vht_elem;
struct ieee80211_link_data *link;
struct cfg80211_bss *cbss;
struct ieee80211_bss *bss;
- bool is_5ghz, is_6ghz;
cbss = assoc_data->link[link_id].bss;
if (WARN_ON(!cbss))
- return 0;
+ return;
bss = (void *)cbss->priv;
sband = local->hw.wiphy->bands[cbss->channel->band];
if (WARN_ON(!sband))
- return 0;
+ return;
link = sdata_dereference(sdata->link[link_id], sdata);
if (WARN_ON(!link))
- return 0;
-
- is_5ghz = cbss->channel->band == NL80211_BAND_5GHZ;
- is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ;
+ return;
/* for MLO connections assume advertising all rates is OK */
if (!req->ap_mld_addr) {
@@ -7606,40 +8911,18 @@ ieee80211_setup_assoc_link(struct ieee80211_sub_if_data *sdata,
assoc_data->ie_pos += req->links[link_id].elems_len;
}
- rcu_read_lock();
- ht_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_HT_OPERATION);
- if (ht_elem && ht_elem->datalen >= sizeof(struct ieee80211_ht_operation))
- assoc_data->link[link_id].ap_ht_param =
- ((struct ieee80211_ht_operation *)(ht_elem->data))->ht_param;
- else if (!is_6ghz)
- conn_flags |= IEEE80211_CONN_DISABLE_HT;
- vht_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_VHT_CAPABILITY);
- if (vht_elem && vht_elem->datalen >= sizeof(struct ieee80211_vht_cap)) {
- memcpy(&assoc_data->link[link_id].ap_vht_cap, vht_elem->data,
- sizeof(struct ieee80211_vht_cap));
- } else if (is_5ghz) {
- link_info(link,
- "VHT capa missing/short, disabling VHT/HE/EHT\n");
- conn_flags |= IEEE80211_CONN_DISABLE_VHT |
- IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT;
- }
- rcu_read_unlock();
-
link->u.mgd.beacon_crc_valid = false;
link->u.mgd.dtim_period = 0;
link->u.mgd.have_beacon = false;
- /* override HT/VHT configuration only if the AP and we support it */
- if (!(conn_flags & IEEE80211_CONN_DISABLE_HT)) {
+ /* override HT configuration only if the AP and we support it */
+ if (conn->mode >= IEEE80211_CONN_MODE_HT) {
struct ieee80211_sta_ht_cap sta_ht_cap;
memcpy(&sta_ht_cap, &sband->ht_cap, sizeof(sta_ht_cap));
ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap);
}
- link->conf->eht_puncturing = 0;
-
rcu_read_lock();
bss_ies = rcu_dereference(cbss->beacon_ies);
if (bss_ies) {
@@ -7660,7 +8943,6 @@ ieee80211_setup_assoc_link(struct ieee80211_sub_if_data *sdata,
}
if (bss_ies) {
- const struct ieee80211_eht_operation *eht_oper;
const struct element *elem;
elem = cfg80211_find_ext_elem(WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION,
@@ -7677,32 +8959,6 @@ ieee80211_setup_assoc_link(struct ieee80211_sub_if_data *sdata,
link->conf->ema_ap = true;
else
link->conf->ema_ap = false;
-
- elem = cfg80211_find_ext_elem(WLAN_EID_EXT_EHT_OPERATION,
- bss_ies->data, bss_ies->len);
- eht_oper = (const void *)(elem->data + 1);
-
- if (elem &&
- ieee80211_eht_oper_size_ok((const void *)(elem->data + 1),
- elem->datalen - 1) &&
- (eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT) &&
- (eht_oper->params & IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT)) {
- const struct ieee80211_eht_operation_info *info =
- (void *)eht_oper->optional;
- const u8 *disable_subchannel_bitmap = info->optional;
- u16 bitmap;
-
- bitmap = get_unaligned_le16(disable_subchannel_bitmap);
- if (cfg80211_valid_disable_subchannel_bitmap(&bitmap,
- &link->conf->chandef) &&
- !(bitmap && ieee80211_hw_check(&local->hw, DISALLOW_PUNCTURING)))
- ieee80211_handle_puncturing_bitmap(link,
- eht_oper,
- bitmap,
- NULL);
- else
- conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- }
}
rcu_read_unlock();
@@ -7729,8 +8985,67 @@ ieee80211_setup_assoc_link(struct ieee80211_sub_if_data *sdata,
} else {
link->smps_mode = link->u.mgd.req_smps;
}
+}
+
+static int
+ieee80211_mgd_get_ap_ht_vht_capa(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgd_assoc_data *assoc_data,
+ int link_id)
+{
+ struct cfg80211_bss *cbss = assoc_data->link[link_id].bss;
+ enum nl80211_band band = cbss->channel->band;
+ struct ieee80211_supported_band *sband;
+ const struct element *elem;
+ int err;
+
+ /* neither HT nor VHT elements used on 6 GHz */
+ if (band == NL80211_BAND_6GHZ)
+ return 0;
+
+ if (assoc_data->link[link_id].conn.mode < IEEE80211_CONN_MODE_HT)
+ return 0;
+
+ rcu_read_lock();
+ elem = ieee80211_bss_get_elem(cbss, WLAN_EID_HT_OPERATION);
+ if (!elem || elem->datalen < sizeof(struct ieee80211_ht_operation)) {
+ mlme_link_id_dbg(sdata, link_id, "no HT operation on BSS %pM\n",
+ cbss->bssid);
+ err = -EINVAL;
+ goto out_rcu;
+ }
+ assoc_data->link[link_id].ap_ht_param =
+ ((struct ieee80211_ht_operation *)(elem->data))->ht_param;
+ rcu_read_unlock();
+
+ if (assoc_data->link[link_id].conn.mode < IEEE80211_CONN_MODE_VHT)
+ return 0;
+
+ /* some drivers want to support VHT on 2.4 GHz even */
+ sband = sdata->local->hw.wiphy->bands[band];
+ if (!sband->vht_cap.vht_supported)
+ return 0;
- return conn_flags;
+ rcu_read_lock();
+ elem = ieee80211_bss_get_elem(cbss, WLAN_EID_VHT_CAPABILITY);
+ /* but even then accept it not being present on the AP */
+ if (!elem && band == NL80211_BAND_2GHZ) {
+ err = 0;
+ goto out_rcu;
+ }
+ if (!elem || elem->datalen < sizeof(struct ieee80211_vht_cap)) {
+ mlme_link_id_dbg(sdata, link_id, "no VHT capa on BSS %pM\n",
+ cbss->bssid);
+ err = -EINVAL;
+ goto out_rcu;
+ }
+ memcpy(&assoc_data->link[link_id].ap_vht_cap, elem->data,
+ sizeof(struct ieee80211_vht_cap));
+ rcu_read_unlock();
+
+ return 0;
+out_rcu:
+ rcu_read_unlock();
+ return err;
}
int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
@@ -7742,11 +9057,10 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgd_assoc_data *assoc_data;
const struct element *ssid_elem;
struct ieee80211_vif_cfg *vif_cfg = &sdata->vif.cfg;
- ieee80211_conn_flags_t conn_flags = 0;
struct ieee80211_link_data *link;
struct cfg80211_bss *cbss;
- struct ieee80211_bss *bss;
- bool override;
+ bool override, uapsd_supported;
+ bool match_auth;
int i, err;
size_t size = sizeof(*assoc_data) + req->ie_len;
@@ -7765,44 +9079,26 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
if (ieee80211_mgd_csa_in_process(sdata, cbss)) {
sdata_info(sdata, "AP is in CSA process, reject assoc\n");
- kfree(assoc_data);
- return -EINVAL;
+ err = -EINVAL;
+ goto err_free;
}
rcu_read_lock();
ssid_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_SSID);
if (!ssid_elem || ssid_elem->datalen > sizeof(assoc_data->ssid)) {
rcu_read_unlock();
- kfree(assoc_data);
- return -EINVAL;
+ err = -EINVAL;
+ goto err_free;
}
memcpy(assoc_data->ssid, ssid_elem->data, ssid_elem->datalen);
assoc_data->ssid_len = ssid_elem->datalen;
- memcpy(vif_cfg->ssid, assoc_data->ssid, assoc_data->ssid_len);
- vif_cfg->ssid_len = assoc_data->ssid_len;
rcu_read_unlock();
- if (req->ap_mld_addr) {
- for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
- if (!req->links[i].bss)
- continue;
- link = sdata_dereference(sdata->link[i], sdata);
- if (link)
- ether_addr_copy(assoc_data->link[i].addr,
- link->conf->addr);
- else
- eth_random_addr(assoc_data->link[i].addr);
- }
- } else {
- memcpy(assoc_data->link[0].addr, sdata->vif.addr, ETH_ALEN);
- }
-
- assoc_data->s1g = cbss->channel->band == NL80211_BAND_S1GHZ;
-
- memcpy(assoc_data->ap_addr,
- req->ap_mld_addr ?: req->bss->bssid,
- ETH_ALEN);
+ if (req->ap_mld_addr)
+ memcpy(assoc_data->ap_addr, req->ap_mld_addr, ETH_ALEN);
+ else
+ memcpy(assoc_data->ap_addr, cbss->bssid, ETH_ALEN);
if (ifmgd->associated) {
u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
@@ -7820,98 +9116,152 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
false);
}
- if (ifmgd->auth_data && !ifmgd->auth_data->done) {
- err = -EBUSY;
- goto err_free;
- }
+ ieee80211_parse_cfg_selectors(assoc_data->userspace_selectors,
+ req->supported_selectors,
+ req->supported_selectors_len);
- if (ifmgd->assoc_data) {
- err = -EBUSY;
- goto err_free;
- }
+ memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
+ memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask,
+ sizeof(ifmgd->ht_capa_mask));
- if (ifmgd->auth_data) {
- bool match;
+ memcpy(&ifmgd->vht_capa, &req->vht_capa, sizeof(ifmgd->vht_capa));
+ memcpy(&ifmgd->vht_capa_mask, &req->vht_capa_mask,
+ sizeof(ifmgd->vht_capa_mask));
+
+ memcpy(&ifmgd->s1g_capa, &req->s1g_capa, sizeof(ifmgd->s1g_capa));
+ memcpy(&ifmgd->s1g_capa_mask, &req->s1g_capa_mask,
+ sizeof(ifmgd->s1g_capa_mask));
- /* keep sta info, bssid if matching */
- match = ether_addr_equal(ifmgd->auth_data->ap_addr,
- assoc_data->ap_addr) &&
- ifmgd->auth_data->link_id == req->link_id;
+ /* keep some setup (AP STA, channel, ...) if matching */
+ match_auth = ifmgd->auth_data &&
+ ether_addr_equal(ifmgd->auth_data->ap_addr,
+ assoc_data->ap_addr) &&
+ ifmgd->auth_data->link_id == req->link_id;
- /* Cleanup is delayed if auth_data matches */
- if (!match)
- ieee80211_destroy_auth_data(sdata, false);
- }
+ if (req->ap_mld_addr) {
+ uapsd_supported = true;
- /* prepare assoc data */
+ if (req->flags & (ASSOC_REQ_DISABLE_HT |
+ ASSOC_REQ_DISABLE_VHT |
+ ASSOC_REQ_DISABLE_HE |
+ ASSOC_REQ_DISABLE_EHT)) {
+ err = -EINVAL;
+ goto err_free;
+ }
- bss = (void *)cbss->priv;
- assoc_data->wmm = bss->wmm_used &&
- (local->hw.queues >= IEEE80211_NUM_ACS);
+ for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
+ struct ieee80211_supported_band *sband;
+ struct cfg80211_bss *link_cbss = req->links[i].bss;
+ struct ieee80211_bss *bss;
- /*
- * IEEE802.11n does not allow TKIP/WEP as pairwise ciphers in HT mode.
- * We still associate in non-HT mode (11a/b/g) if any one of these
- * ciphers is configured as pairwise.
- * We can set this to true for non-11n hardware, that'll be checked
- * separately along with the peer capabilities.
- */
- for (i = 0; i < req->crypto.n_ciphers_pairwise; i++) {
- if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 ||
- req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP ||
- req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) {
- conn_flags |= IEEE80211_CONN_DISABLE_HT;
- conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- conn_flags |= IEEE80211_CONN_DISABLE_HE;
- conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- netdev_info(sdata->dev,
- "disabling HT/VHT/HE due to WEP/TKIP use\n");
+ if (!link_cbss)
+ continue;
+
+ bss = (void *)link_cbss->priv;
+
+ if (!bss->wmm_used) {
+ err = -EINVAL;
+ req->links[i].error = err;
+ goto err_free;
+ }
+
+ if (link_cbss->channel->band == NL80211_BAND_S1GHZ) {
+ err = -EINVAL;
+ req->links[i].error = err;
+ goto err_free;
+ }
+
+ link = sdata_dereference(sdata->link[i], sdata);
+ if (link)
+ ether_addr_copy(assoc_data->link[i].addr,
+ link->conf->addr);
+ else
+ eth_random_addr(assoc_data->link[i].addr);
+ sband = local->hw.wiphy->bands[link_cbss->channel->band];
+
+ if (match_auth && i == assoc_link_id && link)
+ assoc_data->link[i].conn = link->u.mgd.conn;
+ else
+ assoc_data->link[i].conn =
+ ieee80211_conn_settings_unlimited;
+ ieee80211_determine_our_sta_mode_assoc(sdata, sband,
+ req, true, i,
+ &assoc_data->link[i].conn);
+ assoc_data->link[i].bss = link_cbss;
+ assoc_data->link[i].disabled = req->links[i].disabled;
+
+ if (!bss->uapsd_supported)
+ uapsd_supported = false;
+
+ if (assoc_data->link[i].conn.mode < IEEE80211_CONN_MODE_EHT) {
+ err = -EINVAL;
+ req->links[i].error = err;
+ goto err_free;
+ }
+
+ err = ieee80211_mgd_get_ap_ht_vht_capa(sdata,
+ assoc_data, i);
+ if (err) {
+ err = -EINVAL;
+ req->links[i].error = err;
+ goto err_free;
+ }
}
- }
- /* also disable HT/VHT/HE/EHT if the AP doesn't use WMM */
- if (!bss->wmm_used) {
- conn_flags |= IEEE80211_CONN_DISABLE_HT;
- conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- conn_flags |= IEEE80211_CONN_DISABLE_HE;
- conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- netdev_info(sdata->dev,
- "disabling HT/VHT/HE as WMM/QoS is not supported by the AP\n");
- }
+ assoc_data->wmm = true;
+ } else {
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_bss *bss = (void *)cbss->priv;
- if (req->flags & ASSOC_REQ_DISABLE_HT) {
- mlme_dbg(sdata, "HT disabled by flag, disabling HT/VHT/HE\n");
- conn_flags |= IEEE80211_CONN_DISABLE_HT;
- conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- conn_flags |= IEEE80211_CONN_DISABLE_HE;
- conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- }
+ memcpy(assoc_data->link[0].addr, sdata->vif.addr, ETH_ALEN);
+ assoc_data->s1g = cbss->channel->band == NL80211_BAND_S1GHZ;
- if (req->flags & ASSOC_REQ_DISABLE_VHT) {
- mlme_dbg(sdata, "VHT disabled by flag, disabling VHT\n");
- conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- }
+ assoc_data->wmm = bss->wmm_used &&
+ (local->hw.queues >= IEEE80211_NUM_ACS);
+
+ if (cbss->channel->band == NL80211_BAND_6GHZ &&
+ req->flags & (ASSOC_REQ_DISABLE_HT |
+ ASSOC_REQ_DISABLE_VHT |
+ ASSOC_REQ_DISABLE_HE)) {
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ sband = local->hw.wiphy->bands[cbss->channel->band];
+
+ assoc_data->link[0].bss = cbss;
+
+ if (match_auth)
+ assoc_data->link[0].conn = sdata->deflink.u.mgd.conn;
+ else
+ assoc_data->link[0].conn =
+ ieee80211_conn_settings_unlimited;
+ ieee80211_determine_our_sta_mode_assoc(sdata, sband, req,
+ assoc_data->wmm, 0,
+ &assoc_data->link[0].conn);
- if (req->flags & ASSOC_REQ_DISABLE_HE) {
- mlme_dbg(sdata, "HE disabled by flag, disabling HE/EHT\n");
- conn_flags |= IEEE80211_CONN_DISABLE_HE;
- conn_flags |= IEEE80211_CONN_DISABLE_EHT;
+ uapsd_supported = bss->uapsd_supported;
+
+ err = ieee80211_mgd_get_ap_ht_vht_capa(sdata, assoc_data, 0);
+ if (err)
+ goto err_free;
}
- if (req->flags & ASSOC_REQ_DISABLE_EHT)
- conn_flags |= IEEE80211_CONN_DISABLE_EHT;
+ assoc_data->spp_amsdu = req->flags & ASSOC_REQ_SPP_AMSDU;
- memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
- memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask,
- sizeof(ifmgd->ht_capa_mask));
+ if (ifmgd->auth_data && !ifmgd->auth_data->done) {
+ err = -EBUSY;
+ goto err_free;
+ }
- memcpy(&ifmgd->vht_capa, &req->vht_capa, sizeof(ifmgd->vht_capa));
- memcpy(&ifmgd->vht_capa_mask, &req->vht_capa_mask,
- sizeof(ifmgd->vht_capa_mask));
+ if (ifmgd->assoc_data) {
+ err = -EBUSY;
+ goto err_free;
+ }
- memcpy(&ifmgd->s1g_capa, &req->s1g_capa, sizeof(ifmgd->s1g_capa));
- memcpy(&ifmgd->s1g_capa_mask, &req->s1g_capa_mask,
- sizeof(ifmgd->s1g_capa_mask));
+ /* Cleanup is delayed if auth_data matches */
+ if (ifmgd->auth_data && !match_auth)
+ ieee80211_destroy_auth_data(sdata, false);
if (req->ie && req->ie_len) {
memcpy(assoc_data->ie, req->ie, req->ie_len);
@@ -7943,19 +9293,10 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
assoc_data->assoc_link_id = assoc_link_id;
if (req->ap_mld_addr) {
- for (i = 0; i < ARRAY_SIZE(assoc_data->link); i++) {
- assoc_data->link[i].conn_flags = conn_flags;
- assoc_data->link[i].bss = req->links[i].bss;
- assoc_data->link[i].disabled = req->links[i].disabled;
- }
-
/* if there was no authentication, set up the link */
err = ieee80211_vif_set_links(sdata, BIT(assoc_link_id), 0);
if (err)
goto err_clear;
- } else {
- assoc_data->link[0].conn_flags = conn_flags;
- assoc_data->link[0].bss = cbss;
}
link = sdata_dereference(sdata->link[assoc_link_id], sdata);
@@ -7964,19 +9305,21 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
goto err_clear;
}
- /* keep old conn_flags from ieee80211_prep_channel() from auth */
- conn_flags |= link->u.mgd.conn_flags;
- conn_flags |= ieee80211_setup_assoc_link(sdata, assoc_data, req,
- conn_flags, assoc_link_id);
- override = link->u.mgd.conn_flags != conn_flags;
- link->u.mgd.conn_flags |= conn_flags;
+ override = link->u.mgd.conn.mode !=
+ assoc_data->link[assoc_link_id].conn.mode ||
+ link->u.mgd.conn.bw_limit !=
+ assoc_data->link[assoc_link_id].conn.bw_limit;
+ link->u.mgd.conn = assoc_data->link[assoc_link_id].conn;
+
+ ieee80211_setup_assoc_link(sdata, assoc_data, req, &link->u.mgd.conn,
+ assoc_link_id);
if (WARN((sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_UAPSD) &&
ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK),
"U-APSD not supported with HW_PS_NULLFUNC_STACK\n"))
sdata->vif.driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
- if (bss->wmm_used && bss->uapsd_supported &&
+ if (assoc_data->wmm && uapsd_supported &&
(sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_UAPSD)) {
assoc_data->uapsd = true;
ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED;
@@ -8020,27 +9363,31 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
continue;
if (i == assoc_data->assoc_link_id)
continue;
- /* only calculate the flags, hence link == NULL */
- err = ieee80211_prep_channel(sdata, NULL,
+ /* only calculate the mode, hence link == NULL */
+ err = ieee80211_prep_channel(sdata, NULL, i,
assoc_data->link[i].bss, true,
- &assoc_data->link[i].conn_flags);
+ &assoc_data->link[i].conn,
+ assoc_data->userspace_selectors);
if (err) {
req->links[i].error = err;
goto err_clear;
}
}
+ memcpy(vif_cfg->ssid, assoc_data->ssid, assoc_data->ssid_len);
+ vif_cfg->ssid_len = assoc_data->ssid_len;
+
/* needed for transmitting the assoc frames properly */
memcpy(sdata->vif.cfg.ap_addr, assoc_data->ap_addr, ETH_ALEN);
err = ieee80211_prep_connection(sdata, cbss, req->link_id,
- req->ap_mld_addr, true, override);
+ req->ap_mld_addr, true,
+ &assoc_data->link[assoc_link_id].conn,
+ override,
+ assoc_data->userspace_selectors);
if (err)
goto err_clear;
- assoc_data->link[assoc_data->assoc_link_id].conn_flags =
- link->u.mgd.conn_flags;
-
if (ieee80211_hw_check(&sdata->local->hw, NEED_DTIM_BEFORE_ASSOC)) {
const struct cfg80211_bss_ies *beacon_ies;
@@ -8181,7 +9528,7 @@ void ieee80211_mgd_stop_link(struct ieee80211_link_data *link)
wiphy_work_cancel(link->sdata->local->hw.wiphy,
&link->u.mgd.recalc_smps);
wiphy_delayed_work_cancel(link->sdata->local->hw.wiphy,
- &link->u.mgd.chswitch_work);
+ &link->u.mgd.csa.switch_work);
}
void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata)
@@ -8201,9 +9548,6 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata)
&ifmgd->csa_connection_drop_work);
wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
&ifmgd->tdls_peer_del_work);
- wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
- &ifmgd->ml_reconf_work);
- wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &ifmgd->ttlm_work);
if (ifmgd->assoc_data)
ieee80211_destroy_assoc_data(sdata, ASSOC_TIMEOUT);
@@ -8284,3 +9628,697 @@ void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif)
_ieee80211_enable_rssi_reports(sdata, 0, 0);
}
EXPORT_SYMBOL(ieee80211_disable_rssi_reports);
+
+static void ieee80211_ml_reconf_selectors(unsigned long *userspace_selectors)
+{
+ *userspace_selectors = 0;
+
+ /* these selectors are mandatory for ML reconfiguration */
+ set_bit(BSS_MEMBERSHIP_SELECTOR_SAE_H2E, userspace_selectors);
+ set_bit(BSS_MEMBERSHIP_SELECTOR_HE_PHY, userspace_selectors);
+ set_bit(BSS_MEMBERSHIP_SELECTOR_EHT_PHY, userspace_selectors);
+}
+
+void ieee80211_process_ml_reconf_resp(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgmt *mgmt, size_t len)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct ieee80211_mgd_assoc_data *add_links_data =
+ ifmgd->reconf.add_links_data;
+ struct sta_info *sta;
+ struct cfg80211_mlo_reconf_done_data done_data = {};
+ u16 sta_changed_links = sdata->u.mgd.reconf.added_links |
+ sdata->u.mgd.reconf.removed_links;
+ u16 link_mask, valid_links;
+ unsigned int link_id;
+ unsigned long userspace_selectors;
+ size_t orig_len = len;
+ u8 i, group_key_data_len;
+ u8 *pos;
+
+ if (!ieee80211_vif_is_mld(&sdata->vif) ||
+ len < offsetofend(typeof(*mgmt), u.action.u.ml_reconf_resp) ||
+ mgmt->u.action.u.ml_reconf_resp.dialog_token !=
+ sdata->u.mgd.reconf.dialog_token ||
+ !sta_changed_links)
+ return;
+
+ pos = mgmt->u.action.u.ml_reconf_resp.variable;
+ len -= offsetofend(typeof(*mgmt), u.action.u.ml_reconf_resp);
+
+ /* each status duple is 3 octets */
+ if (len < mgmt->u.action.u.ml_reconf_resp.count * 3) {
+ sdata_info(sdata,
+ "mlo: reconf: unexpected len=%zu, count=%u\n",
+ len, mgmt->u.action.u.ml_reconf_resp.count);
+ goto disconnect;
+ }
+
+ link_mask = sta_changed_links;
+ for (i = 0; i < mgmt->u.action.u.ml_reconf_resp.count; i++) {
+ u16 status = get_unaligned_le16(pos + 1);
+
+ link_id = *pos;
+
+ if (!(link_mask & BIT(link_id))) {
+ sdata_info(sdata,
+ "mlo: reconf: unexpected link: %u, changed=0x%x\n",
+ link_id, sta_changed_links);
+ goto disconnect;
+ }
+
+ /* clear the corresponding link, to detect the case that
+ * the same link was included more than one time
+ */
+ link_mask &= ~BIT(link_id);
+
+ /* Handle failure to remove links here. Failure to remove added
+ * links will be done later in the flow.
+ */
+ if (status != WLAN_STATUS_SUCCESS) {
+ sdata_info(sdata,
+ "mlo: reconf: failed on link=%u, status=%u\n",
+ link_id, status);
+
+ /* The AP MLD failed to remove a link that was already
+ * removed locally. As this is not expected behavior,
+ * disconnect
+ */
+ if (sdata->u.mgd.reconf.removed_links & BIT(link_id))
+ goto disconnect;
+
+ /* The AP MLD failed to add a link. Remove it from the
+ * added links.
+ */
+ sdata->u.mgd.reconf.added_links &= ~BIT(link_id);
+ }
+
+ pos += 3;
+ len -= 3;
+ }
+
+ if (link_mask) {
+ sdata_info(sdata,
+ "mlo: reconf: no response for links=0x%x\n",
+ link_mask);
+ goto disconnect;
+ }
+
+ if (!sdata->u.mgd.reconf.added_links)
+ goto out;
+
+ if (len < 1 || len < 1 + *pos) {
+ sdata_info(sdata,
+ "mlo: reconf: invalid group key data length");
+ goto disconnect;
+ }
+
+ /* The Group Key Data field must be present when links are added. This
+ * field should be processed by userland.
+ */
+ group_key_data_len = *pos++;
+
+ pos += group_key_data_len;
+ len -= group_key_data_len + 1;
+
+ /* Process the information for the added links */
+ sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr);
+ if (WARN_ON(!sta))
+ goto disconnect;
+
+ valid_links = sdata->vif.valid_links;
+ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
+ if (!add_links_data->link[link_id].bss ||
+ !(sdata->u.mgd.reconf.added_links & BIT(link_id)))
+
+ continue;
+
+ valid_links |= BIT(link_id);
+ if (ieee80211_sta_allocate_link(sta, link_id))
+ goto disconnect;
+ }
+
+ ieee80211_vif_set_links(sdata, valid_links, sdata->vif.dormant_links);
+ ieee80211_ml_reconf_selectors(&userspace_selectors);
+ link_mask = 0;
+ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
+ struct cfg80211_bss *cbss = add_links_data->link[link_id].bss;
+ struct ieee80211_link_data *link;
+ struct link_sta_info *link_sta;
+ u64 changed = 0;
+
+ if (!cbss)
+ continue;
+
+ link = sdata_dereference(sdata->link[link_id], sdata);
+ if (WARN_ON(!link))
+ goto disconnect;
+
+ link_info(link,
+ "mlo: reconf: local address %pM, AP link address %pM\n",
+ add_links_data->link[link_id].addr,
+ add_links_data->link[link_id].bss->bssid);
+
+ link_sta = rcu_dereference_protected(sta->link[link_id],
+ lockdep_is_held(&local->hw.wiphy->mtx));
+ if (WARN_ON(!link_sta))
+ goto disconnect;
+
+ if (!link->u.mgd.have_beacon) {
+ const struct cfg80211_bss_ies *ies;
+
+ rcu_read_lock();
+ ies = rcu_dereference(cbss->beacon_ies);
+ if (ies)
+ link->u.mgd.have_beacon = true;
+ else
+ ies = rcu_dereference(cbss->ies);
+ ieee80211_get_dtim(ies,
+ &link->conf->sync_dtim_count,
+ &link->u.mgd.dtim_period);
+ link->conf->beacon_int = cbss->beacon_interval;
+ rcu_read_unlock();
+ }
+
+ link->conf->dtim_period = link->u.mgd.dtim_period ?: 1;
+
+ link->u.mgd.conn = add_links_data->link[link_id].conn;
+ if (ieee80211_prep_channel(sdata, link, link_id, cbss,
+ true, &link->u.mgd.conn,
+ &userspace_selectors)) {
+ link_info(link, "mlo: reconf: prep_channel failed\n");
+ goto disconnect;
+ }
+
+ if (ieee80211_mgd_setup_link_sta(link, sta, link_sta,
+ add_links_data->link[link_id].bss))
+ goto disconnect;
+
+ if (!ieee80211_assoc_config_link(link, link_sta,
+ add_links_data->link[link_id].bss,
+ mgmt, pos, len,
+ &changed))
+ goto disconnect;
+
+ /* The AP MLD indicated success for this link, but the station
+ * profile status indicated otherwise. Since there is an
+ * inconsistency in the ML reconfiguration response, disconnect
+ */
+ if (add_links_data->link[link_id].status != WLAN_STATUS_SUCCESS)
+ goto disconnect;
+
+ ieee80211_sta_init_nss(link_sta);
+ if (ieee80211_sta_activate_link(sta, link_id))
+ goto disconnect;
+
+ changed |= ieee80211_link_set_associated(link, cbss);
+ ieee80211_link_info_change_notify(sdata, link, changed);
+
+ ieee80211_recalc_smps(sdata, link);
+ link_mask |= BIT(link_id);
+ }
+
+ sdata_info(sdata,
+ "mlo: reconf: current valid_links=0x%x, added=0x%x\n",
+ valid_links, link_mask);
+
+ /* links might have changed due to rejected ones, set them again */
+ ieee80211_vif_set_links(sdata, valid_links, sdata->vif.dormant_links);
+ ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_MLD_VALID_LINKS);
+
+ ieee80211_recalc_ps(local);
+ ieee80211_recalc_ps_vif(sdata);
+
+ done_data.buf = (const u8 *)mgmt;
+ done_data.len = orig_len;
+ done_data.added_links = link_mask;
+
+ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++)
+ done_data.links[link_id].bss = add_links_data->link[link_id].bss;
+
+ cfg80211_mlo_reconf_add_done(sdata->dev, &done_data);
+ kfree(sdata->u.mgd.reconf.add_links_data);
+ sdata->u.mgd.reconf.add_links_data = NULL;
+out:
+ ieee80211_ml_reconf_reset(sdata);
+ return;
+
+disconnect:
+ __ieee80211_disconnect(sdata);
+}
+
+static struct sk_buff *
+ieee80211_build_ml_reconf_req(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgd_assoc_data *add_links_data,
+ u16 removed_links)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_mgmt *mgmt;
+ struct ieee80211_multi_link_elem *ml_elem;
+ struct ieee80211_mle_basic_common_info *common;
+ enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif);
+ struct sk_buff *skb;
+ size_t size;
+ unsigned int link_id;
+ __le16 eml_capa = 0, mld_capa_ops = 0;
+ struct ieee80211_tx_info *info;
+ u8 common_size, var_common_size;
+ u8 *ml_elem_len;
+ u16 capab = 0;
+
+ size = local->hw.extra_tx_headroom + sizeof(*mgmt);
+
+ /* Consider the maximal length of the reconfiguration ML element */
+ size += sizeof(struct ieee80211_multi_link_elem);
+
+ /* The Basic ML element and the Reconfiguration ML element have the same
+ * fixed common information fields in the context of ML reconfiguration
+ * action frame. The AP MLD MAC address must always be present
+ */
+ common_size = sizeof(*common);
+
+ /* when adding links, the MLD capabilities must be present */
+ var_common_size = 0;
+ if (add_links_data) {
+ const struct wiphy_iftype_ext_capab *ift_ext_capa =
+ cfg80211_get_iftype_ext_capa(local->hw.wiphy,
+ ieee80211_vif_type_p2p(&sdata->vif));
+
+ if (ift_ext_capa) {
+ eml_capa = cpu_to_le16(ift_ext_capa->eml_capabilities);
+ mld_capa_ops =
+ cpu_to_le16(ift_ext_capa->mld_capa_and_ops);
+ }
+
+ /* MLD capabilities and operation */
+ var_common_size += 2;
+
+ /* EML capabilities */
+ if (eml_capa & cpu_to_le16((IEEE80211_EML_CAP_EMLSR_SUPP |
+ IEEE80211_EML_CAP_EMLMR_SUPPORT)))
+ var_common_size += 2;
+ }
+
+ /* Add the common information length */
+ size += common_size + var_common_size;
+
+ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
+ struct cfg80211_bss *cbss;
+ size_t elems_len;
+
+ if (removed_links & BIT(link_id)) {
+ size += sizeof(struct ieee80211_mle_per_sta_profile) +
+ ETH_ALEN;
+ continue;
+ }
+
+ if (!add_links_data || !add_links_data->link[link_id].bss)
+ continue;
+
+ elems_len = add_links_data->link[link_id].elems_len;
+ cbss = add_links_data->link[link_id].bss;
+
+ /* should be the same across all BSSes */
+ if (cbss->capability & WLAN_CAPABILITY_PRIVACY)
+ capab |= WLAN_CAPABILITY_PRIVACY;
+
+ size += 2 + sizeof(struct ieee80211_mle_per_sta_profile) +
+ ETH_ALEN;
+
+ /* SSID element + WMM */
+ size += 2 + sdata->vif.cfg.ssid_len + 9;
+ size += ieee80211_link_common_elems_size(sdata, iftype, cbss,
+ elems_len);
+ }
+
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, local->hw.extra_tx_headroom);
+ mgmt = skb_put_zero(skb, offsetofend(struct ieee80211_mgmt,
+ u.action.u.ml_reconf_req));
+
+ /* Add the MAC header */
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ACTION);
+ memcpy(mgmt->da, sdata->vif.cfg.ap_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
+ memcpy(mgmt->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN);
+
+ /* Add the action frame fixed fields */
+ mgmt->u.action.category = WLAN_CATEGORY_PROTECTED_EHT;
+ mgmt->u.action.u.ml_reconf_req.action_code =
+ WLAN_PROTECTED_EHT_ACTION_LINK_RECONFIG_REQ;
+
+ /* allocate a dialog token and store it */
+ sdata->u.mgd.reconf.dialog_token = ++sdata->u.mgd.dialog_token_alloc;
+ mgmt->u.action.u.ml_reconf_req.dialog_token =
+ sdata->u.mgd.reconf.dialog_token;
+
+ /* Add the ML reconfiguration element and the common information */
+ skb_put_u8(skb, WLAN_EID_EXTENSION);
+ ml_elem_len = skb_put(skb, 1);
+ skb_put_u8(skb, WLAN_EID_EXT_EHT_MULTI_LINK);
+ ml_elem = skb_put(skb, sizeof(*ml_elem));
+ ml_elem->control =
+ cpu_to_le16(IEEE80211_ML_CONTROL_TYPE_RECONF |
+ IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR);
+ common = skb_put(skb, common_size);
+ common->len = common_size + var_common_size;
+ memcpy(common->mld_mac_addr, sdata->vif.addr, ETH_ALEN);
+
+ if (add_links_data) {
+ if (eml_capa &
+ cpu_to_le16((IEEE80211_EML_CAP_EMLSR_SUPP |
+ IEEE80211_EML_CAP_EMLMR_SUPPORT))) {
+ ml_elem->control |=
+ cpu_to_le16(IEEE80211_MLC_RECONF_PRES_EML_CAPA);
+ skb_put_data(skb, &eml_capa, sizeof(eml_capa));
+ }
+
+ ml_elem->control |=
+ cpu_to_le16(IEEE80211_MLC_RECONF_PRES_MLD_CAPA_OP);
+
+ skb_put_data(skb, &mld_capa_ops, sizeof(mld_capa_ops));
+ }
+
+ if (sdata->u.mgd.flags & IEEE80211_STA_ENABLE_RRM)
+ capab |= WLAN_CAPABILITY_RADIO_MEASURE;
+
+ /* Add the per station profile */
+ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
+ u8 *subelem_len = NULL;
+ u16 ctrl;
+ const u8 *addr;
+
+ /* Skip links that are not changing */
+ if (!(removed_links & BIT(link_id)) &&
+ (!add_links_data || !add_links_data->link[link_id].bss))
+ continue;
+
+ ctrl = link_id |
+ IEEE80211_MLE_STA_RECONF_CONTROL_STA_MAC_ADDR_PRESENT;
+
+ if (removed_links & BIT(link_id)) {
+ struct ieee80211_bss_conf *conf =
+ sdata_dereference(sdata->vif.link_conf[link_id],
+ sdata);
+ if (!conf)
+ continue;
+
+ addr = conf->addr;
+ ctrl |= u16_encode_bits(IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_DEL_LINK,
+ IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE);
+ } else {
+ addr = add_links_data->link[link_id].addr;
+ ctrl |= IEEE80211_MLE_STA_RECONF_CONTROL_COMPLETE_PROFILE |
+ u16_encode_bits(IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_ADD_LINK,
+ IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE);
+ }
+
+ skb_put_u8(skb, IEEE80211_MLE_SUBELEM_PER_STA_PROFILE);
+ subelem_len = skb_put(skb, 1);
+
+ put_unaligned_le16(ctrl, skb_put(skb, sizeof(ctrl)));
+ skb_put_u8(skb, 1 + ETH_ALEN);
+ skb_put_data(skb, addr, ETH_ALEN);
+
+ if (!(removed_links & BIT(link_id))) {
+ u16 link_present_elems[PRESENT_ELEMS_MAX] = {};
+ size_t extra_used;
+ void *capab_pos;
+ u8 qos_info;
+
+ capab_pos = skb_put(skb, 2);
+
+ skb_put_u8(skb, WLAN_EID_SSID);
+ skb_put_u8(skb, sdata->vif.cfg.ssid_len);
+ skb_put_data(skb, sdata->vif.cfg.ssid,
+ sdata->vif.cfg.ssid_len);
+
+ extra_used =
+ ieee80211_add_link_elems(sdata, skb, &capab, NULL,
+ add_links_data->link[link_id].elems,
+ add_links_data->link[link_id].elems_len,
+ link_id, NULL,
+ link_present_elems,
+ add_links_data);
+
+ if (add_links_data->link[link_id].elems)
+ skb_put_data(skb,
+ add_links_data->link[link_id].elems +
+ extra_used,
+ add_links_data->link[link_id].elems_len -
+ extra_used);
+ if (sdata->u.mgd.flags & IEEE80211_STA_UAPSD_ENABLED) {
+ qos_info = sdata->u.mgd.uapsd_queues;
+ qos_info |= (sdata->u.mgd.uapsd_max_sp_len <<
+ IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT);
+ } else {
+ qos_info = 0;
+ }
+
+ ieee80211_add_wmm_info_ie(skb_put(skb, 9), qos_info);
+ put_unaligned_le16(capab, capab_pos);
+ }
+
+ ieee80211_fragment_element(skb, subelem_len,
+ IEEE80211_MLE_SUBELEM_FRAGMENT);
+ }
+
+ ieee80211_fragment_element(skb, ml_elem_len, WLAN_EID_FRAGMENT);
+
+ info = IEEE80211_SKB_CB(skb);
+ info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
+
+ return skb;
+}
+
+int ieee80211_mgd_assoc_ml_reconf(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_assoc_link *add_links,
+ u16 rem_links)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_mgd_assoc_data *data = NULL;
+ struct sta_info *sta;
+ struct sk_buff *skb;
+ u16 added_links, new_valid_links;
+ int link_id, err;
+
+ if (!ieee80211_vif_is_mld(&sdata->vif) ||
+ !(sdata->vif.cfg.mld_capa_op &
+ IEEE80211_MLD_CAP_OP_LINK_RECONF_SUPPORT))
+ return -EINVAL;
+
+ /* No support for concurrent ML reconfiguration operation */
+ if (sdata->u.mgd.reconf.added_links ||
+ sdata->u.mgd.reconf.removed_links)
+ return -EBUSY;
+
+ added_links = 0;
+ for (link_id = 0; add_links && link_id < IEEE80211_MLD_MAX_NUM_LINKS;
+ link_id++) {
+ if (!add_links[link_id].bss)
+ continue;
+
+ added_links |= BIT(link_id);
+ }
+
+ sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr);
+ if (WARN_ON(!sta))
+ return -ENOLINK;
+
+ if (rem_links & BIT(sta->sta.deflink.link_id))
+ return -EINVAL;
+
+ /* Adding links to the set of valid link is done only after a successful
+ * ML reconfiguration frame exchange. Here prepare the data for the ML
+ * reconfiguration frame construction and allocate the required
+ * resources
+ */
+ if (added_links) {
+ bool uapsd_supported;
+ unsigned long userspace_selectors;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ uapsd_supported = true;
+ ieee80211_ml_reconf_selectors(&userspace_selectors);
+ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS;
+ link_id++) {
+ struct ieee80211_supported_band *sband;
+ struct cfg80211_bss *link_cbss = add_links[link_id].bss;
+ struct ieee80211_bss *bss;
+
+ if (!link_cbss)
+ continue;
+
+ bss = (void *)link_cbss->priv;
+
+ if (!bss->wmm_used) {
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ if (link_cbss->channel->band == NL80211_BAND_S1GHZ) {
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ eth_random_addr(data->link[link_id].addr);
+ data->link[link_id].conn =
+ ieee80211_conn_settings_unlimited;
+ sband =
+ local->hw.wiphy->bands[link_cbss->channel->band];
+
+ ieee80211_determine_our_sta_mode(sdata, sband,
+ NULL, true, link_id,
+ &data->link[link_id].conn);
+
+ data->link[link_id].bss = link_cbss;
+ data->link[link_id].disabled =
+ add_links[link_id].disabled;
+ data->link[link_id].elems =
+ (u8 *)add_links[link_id].elems;
+ data->link[link_id].elems_len =
+ add_links[link_id].elems_len;
+
+ if (!bss->uapsd_supported)
+ uapsd_supported = false;
+
+ if (data->link[link_id].conn.mode <
+ IEEE80211_CONN_MODE_EHT) {
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ err = ieee80211_mgd_get_ap_ht_vht_capa(sdata, data,
+ link_id);
+ if (err) {
+ err = -EINVAL;
+ goto err_free;
+ }
+ }
+
+ /* Require U-APSD support to be similar to the current valid
+ * links
+ */
+ if (uapsd_supported !=
+ !!(sdata->u.mgd.flags & IEEE80211_STA_UAPSD_ENABLED)) {
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS;
+ link_id++) {
+ if (!data->link[link_id].bss)
+ continue;
+
+ /* only used to verify the mode, nothing is allocated */
+ err = ieee80211_prep_channel(sdata, NULL, link_id,
+ data->link[link_id].bss,
+ true,
+ &data->link[link_id].conn,
+ &userspace_selectors);
+ if (err)
+ goto err_free;
+ }
+ }
+
+ /* link removal is done before the ML reconfiguration frame exchange so
+ * that these links will not be used between their removal by the AP MLD
+ * and before the station got the ML reconfiguration response. Based on
+ * Section 35.3.6.4 in Draft P802.11be_D7.0 the AP MLD should accept the
+ * link removal request.
+ */
+ if (rem_links) {
+ u16 new_active_links = sdata->vif.active_links & ~rem_links;
+
+ new_valid_links = sdata->vif.valid_links & ~rem_links;
+
+ /* Should not be left with no valid links to perform the
+ * ML reconfiguration
+ */
+ if (!new_valid_links ||
+ !(new_valid_links & ~sdata->vif.dormant_links)) {
+ sdata_info(sdata, "mlo: reconf: no valid links\n");
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ if (new_active_links != sdata->vif.active_links) {
+ if (!new_active_links)
+ new_active_links =
+ BIT(__ffs(new_valid_links &
+ ~sdata->vif.dormant_links));
+
+ err = ieee80211_set_active_links(&sdata->vif,
+ new_active_links);
+ if (err) {
+ sdata_info(sdata,
+ "mlo: reconf: failed set active links\n");
+ goto err_free;
+ }
+ }
+ }
+
+ /* Build the SKB before the link removal as the construction of the
+ * station info for removed links requires the local address.
+ * Invalidate the removed links, so that the transmission of the ML
+ * reconfiguration request frame would not be done using them, as the AP
+ * is expected to send the ML reconfiguration response frame on the link
+ * on which the request was received.
+ */
+ skb = ieee80211_build_ml_reconf_req(sdata, data, rem_links);
+ if (!skb) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ if (rem_links) {
+ u16 new_dormant_links = sdata->vif.dormant_links & ~rem_links;
+
+ err = ieee80211_vif_set_links(sdata, new_valid_links,
+ new_dormant_links);
+ if (err) {
+ sdata_info(sdata,
+ "mlo: reconf: failed set valid links\n");
+ kfree_skb(skb);
+ goto err_free;
+ }
+
+ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS;
+ link_id++) {
+ if (!(rem_links & BIT(link_id)))
+ continue;
+
+ ieee80211_sta_remove_link(sta, link_id);
+ }
+
+ /* notify the driver and upper layers */
+ ieee80211_vif_cfg_change_notify(sdata,
+ BSS_CHANGED_MLD_VALID_LINKS);
+ cfg80211_links_removed(sdata->dev, rem_links);
+ }
+
+ sdata_info(sdata, "mlo: reconf: adding=0x%x, removed=0x%x\n",
+ added_links, rem_links);
+
+ ieee80211_tx_skb(sdata, skb);
+
+ sdata->u.mgd.reconf.added_links = added_links;
+ sdata->u.mgd.reconf.add_links_data = data;
+ sdata->u.mgd.reconf.removed_links = rem_links;
+ wiphy_delayed_work_queue(sdata->local->hw.wiphy,
+ &sdata->u.mgd.reconf.wk,
+ IEEE80211_ASSOC_TIMEOUT_SHORT);
+ return 0;
+
+ err_free:
+ kfree(data);
+ return err;
+}
diff --git a/net/mac80211/ocb.c b/net/mac80211/ocb.c
index 449af4e1cca4..6218abc3e441 100644
--- a/net/mac80211/ocb.c
+++ b/net/mac80211/ocb.c
@@ -4,7 +4,7 @@
*
* Copyright: (c) 2014 Czech Technical University in Prague
* (c) 2014 Volkswagen Group Research
- * Copyright (C) 2022 - 2023 Intel Corporation
+ * Copyright (C) 2022 - 2024 Intel Corporation
* Author: Rostislav Lisovy <rostislav.lisovy@fel.cvut.cz>
* Funded by: Volkswagen Group Research
*/
@@ -16,7 +16,7 @@
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <net/mac80211.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
@@ -96,7 +96,7 @@ static struct sta_info *ieee80211_ocb_finish_sta(struct sta_info *sta)
sta_info_move_state(sta, IEEE80211_STA_ASSOC);
sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
- rate_control_rate_init(sta);
+ rate_control_rate_init(&sta->deflink);
/* If it fails, maybe we raced another insertion? */
if (sta_info_insert_rcu(sta))
@@ -168,6 +168,7 @@ void ieee80211_ocb_setup_sdata(struct ieee80211_sub_if_data *sdata)
int ieee80211_ocb_join(struct ieee80211_sub_if_data *sdata,
struct ocb_setup *setup)
{
+ struct ieee80211_chan_req chanreq = { .oper = setup->chandef };
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
u64 changed = BSS_CHANGED_OCB | BSS_CHANGED_BSSID;
@@ -182,7 +183,7 @@ int ieee80211_ocb_join(struct ieee80211_sub_if_data *sdata,
sdata->deflink.smps_mode = IEEE80211_SMPS_OFF;
sdata->deflink.needed_rx_chains = sdata->local->rx_chains;
- err = ieee80211_link_use_channel(&sdata->deflink, &setup->chandef,
+ err = ieee80211_link_use_channel(&sdata->deflink, &chanreq,
IEEE80211_CHANCTX_SHARED);
if (err)
return err;
@@ -207,7 +208,7 @@ int ieee80211_ocb_leave(struct ieee80211_sub_if_data *sdata)
lockdep_assert_wiphy(sdata->local->hw.wiphy);
ifocb->joined = false;
- sta_info_flush(sdata);
+ sta_info_flush(sdata, -1);
spin_lock_bh(&ifocb->incomplete_lock);
while (!list_empty(&ifocb->incomplete_stations)) {
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 6c4080202573..29fab7ae47b4 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -8,7 +8,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2019, 2022-2023 Intel Corporation
+ * Copyright (C) 2019, 2022-2024 Intel Corporation
*/
#include <linux/export.h>
#include <net/mac80211.h>
@@ -86,7 +86,7 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local)
lockdep_assert_wiphy(local->hw.wiphy);
- if (WARN_ON(local->use_chanctx))
+ if (WARN_ON(!local->emulate_chanctx))
return;
/*
@@ -136,7 +136,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local)
lockdep_assert_wiphy(local->hw.wiphy);
- if (WARN_ON(local->use_chanctx))
+ if (WARN_ON(!local->emulate_chanctx))
return;
list_for_each_entry(sdata, &local->interfaces, list) {
@@ -351,10 +351,13 @@ static void _ieee80211_start_next_roc(struct ieee80211_local *local)
* 20 MHz channel width) don't stop all the operations but still
* treat it as though the ROC operation started properly, so
* other ROC operations won't interfere with this one.
+ *
+ * Note: scan can't run, tmp_channel is what we use, so this
+ * must be the currently active channel.
*/
- roc->on_channel = roc->chan == local->_oper_chandef.chan &&
- local->_oper_chandef.width != NL80211_CHAN_WIDTH_5 &&
- local->_oper_chandef.width != NL80211_CHAN_WIDTH_10;
+ roc->on_channel = roc->chan == local->hw.conf.chandef.chan &&
+ local->hw.conf.chandef.width != NL80211_CHAN_WIDTH_5 &&
+ local->hw.conf.chandef.width != NL80211_CHAN_WIDTH_10;
/* start this ROC */
ieee80211_recalc_idle(local);
@@ -363,7 +366,7 @@ static void _ieee80211_start_next_roc(struct ieee80211_local *local)
ieee80211_offchannel_stop_vifs(local);
local->tmp_channel = roc->chan;
- ieee80211_hw_config(local, 0);
+ ieee80211_hw_conf_chan(local);
}
wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work,
@@ -410,6 +413,39 @@ void ieee80211_start_next_roc(struct ieee80211_local *local)
}
}
+void ieee80211_reconfig_roc(struct ieee80211_local *local)
+{
+ struct ieee80211_roc_work *roc, *tmp;
+
+ /*
+ * In the software implementation can just continue with the
+ * interruption due to reconfig, roc_work is still queued if
+ * needed.
+ */
+ if (!local->ops->remain_on_channel)
+ return;
+
+ /* flush work so nothing from the driver is still pending */
+ wiphy_work_flush(local->hw.wiphy, &local->hw_roc_start);
+ wiphy_work_flush(local->hw.wiphy, &local->hw_roc_done);
+
+ list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
+ if (!roc->started)
+ break;
+
+ if (!roc->hw_begun) {
+ /* it didn't start in HW yet, so we can restart it */
+ roc->started = false;
+ continue;
+ }
+
+ /* otherwise destroy it and tell userspace */
+ ieee80211_roc_notify_destroy(roc);
+ }
+
+ ieee80211_start_next_roc(local);
+}
+
static void __ieee80211_roc_work(struct ieee80211_local *local)
{
struct ieee80211_roc_work *roc;
@@ -426,7 +462,7 @@ static void __ieee80211_roc_work(struct ieee80211_local *local)
return;
if (!roc->started) {
- WARN_ON(local->use_chanctx);
+ WARN_ON(!local->emulate_chanctx);
_ieee80211_start_next_roc(local);
} else {
on_channel = roc->on_channel;
@@ -439,7 +475,7 @@ static void __ieee80211_roc_work(struct ieee80211_local *local)
ieee80211_flush_queues(local, NULL, false);
local->tmp_channel = NULL;
- ieee80211_hw_config(local, 0);
+ ieee80211_hw_conf_chan(local);
ieee80211_offchannel_return(local);
}
@@ -539,7 +575,7 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
/* this may work, but is untested */
return -EOPNOTSUPP;
- if (local->use_chanctx && !local->ops->remain_on_channel)
+ if (!local->emulate_chanctx && !local->ops->remain_on_channel)
return -EOPNOTSUPP;
roc = kzalloc(sizeof(*roc), GFP_KERNEL);
@@ -894,8 +930,18 @@ int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
break;
}
- if (ether_addr_equal(conf->addr, mgmt->sa))
+ if (ether_addr_equal(conf->addr, mgmt->sa)) {
+ /* If userspace requested Tx on a specific link
+ * use the same link id if the link bss is matching
+ * the requested chan.
+ */
+ if (sdata->vif.valid_links &&
+ params->link_id >= 0 && params->link_id == i &&
+ params->chan == chanctx_conf->def.chan)
+ link_id = i;
+
break;
+ }
chanctx_conf = NULL;
}
@@ -951,6 +997,7 @@ int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
}
IEEE80211_SKB_CB(skb)->flags = flags;
+ IEEE80211_SKB_CB(skb)->control.flags |= IEEE80211_TX_CTRL_DONT_USE_RATE_MASK;
skb->dev = sdata->dev;
diff --git a/net/mac80211/parse.c b/net/mac80211/parse.c
new file mode 100644
index 000000000000..6da39c864f45
--- /dev/null
+++ b/net/mac80211/parse.c
@@ -0,0 +1,1127 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2002-2005, Instant802 Networks, Inc.
+ * Copyright 2005-2006, Devicescape Software, Inc.
+ * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
+ * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018-2024 Intel Corporation
+ *
+ * element parsing for mac80211
+ */
+
+#include <net/mac80211.h>
+#include <linux/netdevice.h>
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/bitmap.h>
+#include <linux/crc32.h>
+#include <net/net_namespace.h>
+#include <net/cfg80211.h>
+#include <net/rtnetlink.h>
+#include <kunit/visibility.h>
+
+#include "ieee80211_i.h"
+#include "driver-ops.h"
+#include "rate.h"
+#include "mesh.h"
+#include "wme.h"
+#include "led.h"
+#include "wep.h"
+
+struct ieee80211_elems_parse {
+ /* must be first for kfree to work */
+ struct ieee802_11_elems elems;
+
+ /* The basic Multi-Link element in the original elements */
+ const struct element *ml_basic_elem;
+
+ /* The reconfiguration Multi-Link element in the original elements */
+ const struct element *ml_reconf_elem;
+
+ /* The EPCS Multi-Link element in the original elements */
+ const struct element *ml_epcs_elem;
+
+ bool multi_link_inner;
+ bool skip_vendor;
+
+ /*
+ * scratch buffer that can be used for various element parsing related
+ * tasks, e.g., element de-fragmentation etc.
+ */
+ size_t scratch_len;
+ u8 *scratch_pos;
+ u8 scratch[] __counted_by(scratch_len);
+};
+
+static void
+ieee80211_parse_extension_element(u32 *crc,
+ const struct element *elem,
+ struct ieee80211_elems_parse *elems_parse,
+ struct ieee80211_elems_parse_params *params)
+{
+ struct ieee802_11_elems *elems = &elems_parse->elems;
+ const void *data = elem->data + 1;
+ bool calc_crc = false;
+ u8 len;
+
+ if (!elem->datalen)
+ return;
+
+ len = elem->datalen - 1;
+
+ switch (elem->data[0]) {
+ case WLAN_EID_EXT_HE_MU_EDCA:
+ if (params->mode < IEEE80211_CONN_MODE_HE)
+ break;
+ calc_crc = true;
+ if (len >= sizeof(*elems->mu_edca_param_set))
+ elems->mu_edca_param_set = data;
+ break;
+ case WLAN_EID_EXT_HE_CAPABILITY:
+ if (params->mode < IEEE80211_CONN_MODE_HE)
+ break;
+ if (ieee80211_he_capa_size_ok(data, len)) {
+ elems->he_cap = data;
+ elems->he_cap_len = len;
+ }
+ break;
+ case WLAN_EID_EXT_HE_OPERATION:
+ if (params->mode < IEEE80211_CONN_MODE_HE)
+ break;
+ calc_crc = true;
+ if (len >= sizeof(*elems->he_operation) &&
+ len >= ieee80211_he_oper_size(data) - 1)
+ elems->he_operation = data;
+ break;
+ case WLAN_EID_EXT_UORA:
+ if (params->mode < IEEE80211_CONN_MODE_HE)
+ break;
+ if (len >= 1)
+ elems->uora_element = data;
+ break;
+ case WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME:
+ if (len == 3)
+ elems->max_channel_switch_time = data;
+ break;
+ case WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION:
+ if (len >= sizeof(*elems->mbssid_config_ie))
+ elems->mbssid_config_ie = data;
+ break;
+ case WLAN_EID_EXT_HE_SPR:
+ if (params->mode < IEEE80211_CONN_MODE_HE)
+ break;
+ if (len >= sizeof(*elems->he_spr) &&
+ len >= ieee80211_he_spr_size(data) - 1)
+ elems->he_spr = data;
+ break;
+ case WLAN_EID_EXT_HE_6GHZ_CAPA:
+ if (params->mode < IEEE80211_CONN_MODE_HE)
+ break;
+ if (len >= sizeof(*elems->he_6ghz_capa))
+ elems->he_6ghz_capa = data;
+ break;
+ case WLAN_EID_EXT_EHT_CAPABILITY:
+ if (params->mode < IEEE80211_CONN_MODE_EHT)
+ break;
+ if (ieee80211_eht_capa_size_ok(elems->he_cap,
+ data, len,
+ params->from_ap)) {
+ elems->eht_cap = data;
+ elems->eht_cap_len = len;
+ }
+ break;
+ case WLAN_EID_EXT_EHT_OPERATION:
+ if (params->mode < IEEE80211_CONN_MODE_EHT)
+ break;
+ if (ieee80211_eht_oper_size_ok(data, len))
+ elems->eht_operation = data;
+ calc_crc = true;
+ break;
+ case WLAN_EID_EXT_EHT_MULTI_LINK:
+ if (params->mode < IEEE80211_CONN_MODE_EHT)
+ break;
+ calc_crc = true;
+
+ if (ieee80211_mle_size_ok(data, len)) {
+ const struct ieee80211_multi_link_elem *mle =
+ (void *)data;
+
+ switch (le16_get_bits(mle->control,
+ IEEE80211_ML_CONTROL_TYPE)) {
+ case IEEE80211_ML_CONTROL_TYPE_BASIC:
+ if (elems_parse->multi_link_inner) {
+ elems->parse_error |=
+ IEEE80211_PARSE_ERR_DUP_NEST_ML_BASIC;
+ break;
+ }
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_RECONF:
+ elems_parse->ml_reconf_elem = elem;
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS:
+ elems_parse->ml_epcs_elem = elem;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case WLAN_EID_EXT_BANDWIDTH_INDICATION:
+ if (params->mode < IEEE80211_CONN_MODE_EHT)
+ break;
+ if (ieee80211_bandwidth_indication_size_ok(data, len))
+ elems->bandwidth_indication = data;
+ calc_crc = true;
+ break;
+ case WLAN_EID_EXT_TID_TO_LINK_MAPPING:
+ if (params->mode < IEEE80211_CONN_MODE_EHT)
+ break;
+ calc_crc = true;
+ if (ieee80211_tid_to_link_map_size_ok(data, len) &&
+ elems->ttlm_num < ARRAY_SIZE(elems->ttlm)) {
+ elems->ttlm[elems->ttlm_num] = (void *)data;
+ elems->ttlm_num++;
+ }
+ break;
+ }
+
+ if (crc && calc_crc)
+ *crc = crc32_be(*crc, (void *)elem, elem->datalen + 2);
+}
+
+static void ieee80211_parse_tpe(struct ieee80211_parsed_tpe *tpe,
+ const u8 *data, u8 len)
+{
+ const struct ieee80211_tx_pwr_env *env = (const void *)data;
+ u8 count, interpret, category;
+ u8 *out, N, *cnt_out = NULL, *N_out = NULL;
+
+ if (!ieee80211_valid_tpe_element(data, len))
+ return;
+
+ count = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_COUNT);
+ interpret = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
+ category = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_CATEGORY);
+
+ switch (interpret) {
+ case IEEE80211_TPE_LOCAL_EIRP:
+ out = tpe->max_local[category].power;
+ cnt_out = &tpe->max_local[category].count;
+ tpe->max_local[category].valid = true;
+ break;
+ case IEEE80211_TPE_REG_CLIENT_EIRP:
+ out = tpe->max_reg_client[category].power;
+ cnt_out = &tpe->max_reg_client[category].count;
+ tpe->max_reg_client[category].valid = true;
+ break;
+ case IEEE80211_TPE_LOCAL_EIRP_PSD:
+ out = tpe->psd_local[category].power;
+ cnt_out = &tpe->psd_local[category].count;
+ N_out = &tpe->psd_local[category].n;
+ tpe->psd_local[category].valid = true;
+ break;
+ case IEEE80211_TPE_REG_CLIENT_EIRP_PSD:
+ out = tpe->psd_reg_client[category].power;
+ cnt_out = &tpe->psd_reg_client[category].count;
+ N_out = &tpe->psd_reg_client[category].n;
+ tpe->psd_reg_client[category].valid = true;
+ break;
+ }
+
+ switch (interpret) {
+ case IEEE80211_TPE_LOCAL_EIRP:
+ case IEEE80211_TPE_REG_CLIENT_EIRP:
+ /* count was validated <= 3, plus 320 MHz */
+ BUILD_BUG_ON(IEEE80211_TPE_EIRP_ENTRIES_320MHZ < 5);
+ memcpy(out, env->variable, count + 1);
+ *cnt_out = count + 1;
+ /* separately take 320 MHz if present */
+ if (count == 3 && len > sizeof(*env) + count + 1) {
+ out[4] = env->variable[4];
+ *cnt_out = 5;
+ }
+ break;
+ case IEEE80211_TPE_LOCAL_EIRP_PSD:
+ case IEEE80211_TPE_REG_CLIENT_EIRP_PSD:
+ if (!count) {
+ memset(out, env->variable[0],
+ IEEE80211_TPE_PSD_ENTRIES_320MHZ);
+ *cnt_out = IEEE80211_TPE_PSD_ENTRIES_320MHZ;
+ break;
+ }
+
+ N = 1 << (count - 1);
+ memcpy(out, env->variable, N);
+ *cnt_out = N;
+ *N_out = N;
+
+ if (len > sizeof(*env) + N) {
+ int K = u8_get_bits(env->variable[N],
+ IEEE80211_TX_PWR_ENV_EXT_COUNT);
+
+ K = min(K, IEEE80211_TPE_PSD_ENTRIES_320MHZ - N);
+ memcpy(out + N, env->variable + N + 1, K);
+ (*cnt_out) += K;
+ }
+ break;
+ }
+}
+
+static u32
+_ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params,
+ struct ieee80211_elems_parse *elems_parse,
+ const struct element *check_inherit)
+{
+ struct ieee802_11_elems *elems = &elems_parse->elems;
+ const struct element *elem;
+ bool calc_crc = params->filter != 0;
+ DECLARE_BITMAP(seen_elems, 256);
+ u32 crc = params->crc;
+
+ bitmap_zero(seen_elems, 256);
+
+ for_each_element(elem, params->start, params->len) {
+ const struct element *subelem;
+ u8 elem_parse_failed;
+ u8 id = elem->id;
+ u8 elen = elem->datalen;
+ const u8 *pos = elem->data;
+
+ if (check_inherit &&
+ !cfg80211_is_element_inherited(elem,
+ check_inherit))
+ continue;
+
+ switch (id) {
+ case WLAN_EID_SSID:
+ case WLAN_EID_SUPP_RATES:
+ case WLAN_EID_FH_PARAMS:
+ case WLAN_EID_DS_PARAMS:
+ case WLAN_EID_CF_PARAMS:
+ case WLAN_EID_TIM:
+ case WLAN_EID_IBSS_PARAMS:
+ case WLAN_EID_CHALLENGE:
+ case WLAN_EID_RSN:
+ case WLAN_EID_ERP_INFO:
+ case WLAN_EID_EXT_SUPP_RATES:
+ case WLAN_EID_HT_CAPABILITY:
+ case WLAN_EID_HT_OPERATION:
+ case WLAN_EID_VHT_CAPABILITY:
+ case WLAN_EID_VHT_OPERATION:
+ case WLAN_EID_MESH_ID:
+ case WLAN_EID_MESH_CONFIG:
+ case WLAN_EID_PEER_MGMT:
+ case WLAN_EID_PREQ:
+ case WLAN_EID_PREP:
+ case WLAN_EID_PERR:
+ case WLAN_EID_RANN:
+ case WLAN_EID_CHANNEL_SWITCH:
+ case WLAN_EID_EXT_CHANSWITCH_ANN:
+ case WLAN_EID_COUNTRY:
+ case WLAN_EID_PWR_CONSTRAINT:
+ case WLAN_EID_TIMEOUT_INTERVAL:
+ case WLAN_EID_SECONDARY_CHANNEL_OFFSET:
+ case WLAN_EID_WIDE_BW_CHANNEL_SWITCH:
+ case WLAN_EID_CHAN_SWITCH_PARAM:
+ case WLAN_EID_EXT_CAPABILITY:
+ case WLAN_EID_CHAN_SWITCH_TIMING:
+ case WLAN_EID_LINK_ID:
+ case WLAN_EID_BSS_MAX_IDLE_PERIOD:
+ case WLAN_EID_RSNX:
+ case WLAN_EID_S1G_BCN_COMPAT:
+ case WLAN_EID_S1G_CAPABILITIES:
+ case WLAN_EID_S1G_OPERATION:
+ case WLAN_EID_AID_RESPONSE:
+ case WLAN_EID_S1G_SHORT_BCN_INTERVAL:
+ /*
+ * not listing WLAN_EID_CHANNEL_SWITCH_WRAPPER -- it seems possible
+ * that if the content gets bigger it might be needed more than once
+ */
+ if (test_bit(id, seen_elems)) {
+ elems->parse_error |=
+ IEEE80211_PARSE_ERR_DUP_ELEM;
+ continue;
+ }
+ break;
+ }
+
+ if (calc_crc && id < 64 && (params->filter & (1ULL << id)))
+ crc = crc32_be(crc, pos - 2, elen + 2);
+
+ elem_parse_failed = 0;
+
+ switch (id) {
+ case WLAN_EID_LINK_ID:
+ if (elen + 2 < sizeof(struct ieee80211_tdls_lnkie)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->lnk_id = (void *)(pos - 2);
+ break;
+ case WLAN_EID_CHAN_SWITCH_TIMING:
+ if (elen < sizeof(struct ieee80211_ch_switch_timing)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->ch_sw_timing = (void *)pos;
+ break;
+ case WLAN_EID_EXT_CAPABILITY:
+ elems->ext_capab = pos;
+ elems->ext_capab_len = elen;
+ break;
+ case WLAN_EID_SSID:
+ elems->ssid = pos;
+ elems->ssid_len = elen;
+ break;
+ case WLAN_EID_SUPP_RATES:
+ elems->supp_rates = pos;
+ elems->supp_rates_len = elen;
+ break;
+ case WLAN_EID_DS_PARAMS:
+ if (elen >= 1)
+ elems->ds_params = pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_TIM:
+ if (elen >= sizeof(struct ieee80211_tim_ie)) {
+ elems->tim = (void *)pos;
+ elems->tim_len = elen;
+ } else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_VENDOR_SPECIFIC:
+ if (elems_parse->skip_vendor)
+ break;
+
+ if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
+ pos[2] == 0xf2) {
+ /* Microsoft OUI (00:50:F2) */
+
+ if (calc_crc)
+ crc = crc32_be(crc, pos - 2, elen + 2);
+
+ if (elen >= 5 && pos[3] == 2) {
+ /* OUI Type 2 - WMM IE */
+ if (pos[4] == 0) {
+ elems->wmm_info = pos;
+ elems->wmm_info_len = elen;
+ } else if (pos[4] == 1) {
+ elems->wmm_param = pos;
+ elems->wmm_param_len = elen;
+ }
+ }
+ }
+ break;
+ case WLAN_EID_RSN:
+ elems->rsn = pos;
+ elems->rsn_len = elen;
+ break;
+ case WLAN_EID_ERP_INFO:
+ if (elen >= 1)
+ elems->erp_info = pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_EXT_SUPP_RATES:
+ elems->ext_supp_rates = pos;
+ elems->ext_supp_rates_len = elen;
+ break;
+ case WLAN_EID_HT_CAPABILITY:
+ if (params->mode < IEEE80211_CONN_MODE_HT)
+ break;
+ if (elen >= sizeof(struct ieee80211_ht_cap))
+ elems->ht_cap_elem = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_HT_OPERATION:
+ if (params->mode < IEEE80211_CONN_MODE_HT)
+ break;
+ if (elen >= sizeof(struct ieee80211_ht_operation))
+ elems->ht_operation = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_VHT_CAPABILITY:
+ if (params->mode < IEEE80211_CONN_MODE_VHT)
+ break;
+ if (elen >= sizeof(struct ieee80211_vht_cap))
+ elems->vht_cap_elem = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_VHT_OPERATION:
+ if (params->mode < IEEE80211_CONN_MODE_VHT)
+ break;
+ if (elen >= sizeof(struct ieee80211_vht_operation)) {
+ elems->vht_operation = (void *)pos;
+ if (calc_crc)
+ crc = crc32_be(crc, pos - 2, elen + 2);
+ break;
+ }
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_OPMODE_NOTIF:
+ if (params->mode < IEEE80211_CONN_MODE_VHT)
+ break;
+ if (elen > 0) {
+ elems->opmode_notif = pos;
+ if (calc_crc)
+ crc = crc32_be(crc, pos - 2, elen + 2);
+ break;
+ }
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_MESH_ID:
+ elems->mesh_id = pos;
+ elems->mesh_id_len = elen;
+ break;
+ case WLAN_EID_MESH_CONFIG:
+ if (elen >= sizeof(struct ieee80211_meshconf_ie))
+ elems->mesh_config = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_PEER_MGMT:
+ elems->peering = pos;
+ elems->peering_len = elen;
+ break;
+ case WLAN_EID_MESH_AWAKE_WINDOW:
+ if (elen >= 2)
+ elems->awake_window = (void *)pos;
+ break;
+ case WLAN_EID_PREQ:
+ elems->preq = pos;
+ elems->preq_len = elen;
+ break;
+ case WLAN_EID_PREP:
+ elems->prep = pos;
+ elems->prep_len = elen;
+ break;
+ case WLAN_EID_PERR:
+ elems->perr = pos;
+ elems->perr_len = elen;
+ break;
+ case WLAN_EID_RANN:
+ if (elen >= sizeof(struct ieee80211_rann_ie))
+ elems->rann = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_CHANNEL_SWITCH:
+ if (elen != sizeof(struct ieee80211_channel_sw_ie)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->ch_switch_ie = (void *)pos;
+ break;
+ case WLAN_EID_EXT_CHANSWITCH_ANN:
+ if (elen != sizeof(struct ieee80211_ext_chansw_ie)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->ext_chansw_ie = (void *)pos;
+ break;
+ case WLAN_EID_SECONDARY_CHANNEL_OFFSET:
+ if (params->mode < IEEE80211_CONN_MODE_HT)
+ break;
+ if (elen != sizeof(struct ieee80211_sec_chan_offs_ie)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->sec_chan_offs = (void *)pos;
+ break;
+ case WLAN_EID_CHAN_SWITCH_PARAM:
+ if (elen <
+ sizeof(*elems->mesh_chansw_params_ie)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->mesh_chansw_params_ie = (void *)pos;
+ break;
+ case WLAN_EID_WIDE_BW_CHANNEL_SWITCH:
+ if (params->mode < IEEE80211_CONN_MODE_VHT)
+ break;
+
+ if (!params->action) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_UNEXPECTED_ELEM;
+ break;
+ }
+
+ if (elen < sizeof(*elems->wide_bw_chansw_ie)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->wide_bw_chansw_ie = (void *)pos;
+ break;
+ case WLAN_EID_CHANNEL_SWITCH_WRAPPER:
+ if (params->mode < IEEE80211_CONN_MODE_VHT)
+ break;
+ if (params->action) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_UNEXPECTED_ELEM;
+ break;
+ }
+ /*
+ * This is a bit tricky, but as we only care about
+ * a few elements, parse them out manually.
+ */
+ subelem = cfg80211_find_elem(WLAN_EID_WIDE_BW_CHANNEL_SWITCH,
+ pos, elen);
+ if (subelem) {
+ if (subelem->datalen >= sizeof(*elems->wide_bw_chansw_ie))
+ elems->wide_bw_chansw_ie =
+ (void *)subelem->data;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ }
+
+ if (params->mode < IEEE80211_CONN_MODE_EHT)
+ break;
+
+ subelem = cfg80211_find_ext_elem(WLAN_EID_EXT_BANDWIDTH_INDICATION,
+ pos, elen);
+ if (subelem) {
+ const void *edata = subelem->data + 1;
+ u8 edatalen = subelem->datalen - 1;
+
+ if (ieee80211_bandwidth_indication_size_ok(edata,
+ edatalen))
+ elems->bandwidth_indication = edata;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ }
+
+ subelem = cfg80211_find_ext_elem(WLAN_EID_TX_POWER_ENVELOPE,
+ pos, elen);
+ if (subelem)
+ ieee80211_parse_tpe(&elems->csa_tpe,
+ subelem->data + 1,
+ subelem->datalen - 1);
+ break;
+ case WLAN_EID_COUNTRY:
+ elems->country_elem = pos;
+ elems->country_elem_len = elen;
+ break;
+ case WLAN_EID_PWR_CONSTRAINT:
+ if (elen != 1) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->pwr_constr_elem = pos;
+ break;
+ case WLAN_EID_CISCO_VENDOR_SPECIFIC:
+ /* Lots of different options exist, but we only care
+ * about the Dynamic Transmit Power Control element.
+ * First check for the Cisco OUI, then for the DTPC
+ * tag (0x00).
+ */
+ if (elen < 4) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+
+ if (pos[0] != 0x00 || pos[1] != 0x40 ||
+ pos[2] != 0x96 || pos[3] != 0x00)
+ break;
+
+ if (elen != 6) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+
+ if (calc_crc)
+ crc = crc32_be(crc, pos - 2, elen + 2);
+
+ elems->cisco_dtpc_elem = pos;
+ break;
+ case WLAN_EID_ADDBA_EXT:
+ if (elen < sizeof(struct ieee80211_addba_ext_ie)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->addba_ext_ie = (void *)pos;
+ break;
+ case WLAN_EID_TIMEOUT_INTERVAL:
+ if (elen >= sizeof(struct ieee80211_timeout_interval_ie))
+ elems->timeout_int = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_BSS_MAX_IDLE_PERIOD:
+ if (elen >= sizeof(*elems->max_idle_period_ie))
+ elems->max_idle_period_ie = (void *)pos;
+ break;
+ case WLAN_EID_RSNX:
+ elems->rsnx = pos;
+ elems->rsnx_len = elen;
+ break;
+ case WLAN_EID_TX_POWER_ENVELOPE:
+ if (params->mode < IEEE80211_CONN_MODE_HE)
+ break;
+ ieee80211_parse_tpe(&elems->tpe, pos, elen);
+ break;
+ case WLAN_EID_EXTENSION:
+ ieee80211_parse_extension_element(calc_crc ?
+ &crc : NULL,
+ elem, elems_parse,
+ params);
+ break;
+ case WLAN_EID_S1G_CAPABILITIES:
+ if (params->mode != IEEE80211_CONN_MODE_S1G)
+ break;
+ if (elen >= sizeof(*elems->s1g_capab))
+ elems->s1g_capab = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_S1G_OPERATION:
+ if (params->mode != IEEE80211_CONN_MODE_S1G)
+ break;
+ if (elen == sizeof(*elems->s1g_oper))
+ elems->s1g_oper = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_S1G_BCN_COMPAT:
+ if (params->mode != IEEE80211_CONN_MODE_S1G)
+ break;
+ if (elen == sizeof(*elems->s1g_bcn_compat))
+ elems->s1g_bcn_compat = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_AID_RESPONSE:
+ if (params->mode != IEEE80211_CONN_MODE_S1G)
+ break;
+ if (elen == sizeof(struct ieee80211_aid_response_ie))
+ elems->aid_resp = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ default:
+ break;
+ }
+
+ if (elem_parse_failed)
+ elems->parse_error |= elem_parse_failed;
+ else
+ __set_bit(id, seen_elems);
+ }
+
+ if (!for_each_element_completed(elem, params->start, params->len))
+ elems->parse_error |= IEEE80211_PARSE_ERR_INVALID_END;
+
+ return crc;
+}
+
+static size_t ieee802_11_find_bssid_profile(const u8 *start, size_t len,
+ struct ieee802_11_elems *elems,
+ struct cfg80211_bss *bss,
+ u8 *nontransmitted_profile)
+{
+ const struct element *elem, *sub;
+ size_t profile_len = 0;
+ bool found = false;
+
+ if (!bss || !bss->transmitted_bss)
+ return profile_len;
+
+ for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, start, len) {
+ if (elem->datalen < 2)
+ continue;
+ if (elem->data[0] < 1 || elem->data[0] > 8)
+ continue;
+
+ for_each_element(sub, elem->data + 1, elem->datalen - 1) {
+ u8 new_bssid[ETH_ALEN];
+ const u8 *index;
+
+ if (sub->id != 0 || sub->datalen < 4) {
+ /* not a valid BSS profile */
+ continue;
+ }
+
+ if (sub->data[0] != WLAN_EID_NON_TX_BSSID_CAP ||
+ sub->data[1] != 2) {
+ /* The first element of the
+ * Nontransmitted BSSID Profile is not
+ * the Nontransmitted BSSID Capability
+ * element.
+ */
+ continue;
+ }
+
+ memset(nontransmitted_profile, 0, len);
+ profile_len = cfg80211_merge_profile(start, len,
+ elem,
+ sub,
+ nontransmitted_profile,
+ len);
+
+ /* found a Nontransmitted BSSID Profile */
+ index = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX,
+ nontransmitted_profile,
+ profile_len);
+ if (!index || index[1] < 1 || index[2] == 0) {
+ /* Invalid MBSSID Index element */
+ continue;
+ }
+
+ cfg80211_gen_new_bssid(bss->transmitted_bss->bssid,
+ elem->data[0],
+ index[2],
+ new_bssid);
+ if (ether_addr_equal(new_bssid, bss->bssid)) {
+ found = true;
+ elems->bssid_index_len = index[1];
+ elems->bssid_index = (void *)&index[2];
+ break;
+ }
+ }
+ }
+
+ return found ? profile_len : 0;
+}
+
+static void
+ieee80211_mle_get_sta_prof(struct ieee80211_elems_parse *elems_parse,
+ u8 link_id)
+{
+ struct ieee802_11_elems *elems = &elems_parse->elems;
+ const struct ieee80211_multi_link_elem *ml = elems->ml_basic;
+ ssize_t ml_len = elems->ml_basic_len;
+ const struct element *sub;
+
+ for_each_mle_subelement(sub, (u8 *)ml, ml_len) {
+ struct ieee80211_mle_per_sta_profile *prof = (void *)sub->data;
+ ssize_t sta_prof_len;
+ u16 control;
+
+ if (sub->id != IEEE80211_MLE_SUBELEM_PER_STA_PROFILE)
+ continue;
+
+ if (!ieee80211_mle_basic_sta_prof_size_ok(sub->data,
+ sub->datalen))
+ return;
+
+ control = le16_to_cpu(prof->control);
+
+ if (link_id != u16_get_bits(control,
+ IEEE80211_MLE_STA_CONTROL_LINK_ID))
+ continue;
+
+ if (!(control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE))
+ return;
+
+ /* the sub element can be fragmented */
+ sta_prof_len =
+ cfg80211_defragment_element(sub,
+ (u8 *)ml, ml_len,
+ elems_parse->scratch_pos,
+ elems_parse->scratch +
+ elems_parse->scratch_len -
+ elems_parse->scratch_pos,
+ IEEE80211_MLE_SUBELEM_FRAGMENT);
+
+ if (sta_prof_len < 0)
+ return;
+
+ elems->prof = (void *)elems_parse->scratch_pos;
+ elems->sta_prof_len = sta_prof_len;
+ elems_parse->scratch_pos += sta_prof_len;
+
+ return;
+ }
+}
+
+static const struct element *
+ieee80211_prep_mle_link_parse(struct ieee80211_elems_parse *elems_parse,
+ struct ieee80211_elems_parse_params *params,
+ struct ieee80211_elems_parse_params *sub)
+{
+ struct ieee802_11_elems *elems = &elems_parse->elems;
+ struct ieee80211_mle_per_sta_profile *prof;
+ const struct element *tmp;
+ ssize_t ml_len;
+ const u8 *end;
+
+ if (params->mode < IEEE80211_CONN_MODE_EHT)
+ return NULL;
+
+ for_each_element_extid(tmp, WLAN_EID_EXT_EHT_MULTI_LINK,
+ elems->ie_start, elems->total_len) {
+ const struct ieee80211_multi_link_elem *mle =
+ (void *)tmp->data + 1;
+
+ if (!ieee80211_mle_size_ok(tmp->data + 1, tmp->datalen - 1))
+ continue;
+
+ if (le16_get_bits(mle->control, IEEE80211_ML_CONTROL_TYPE) !=
+ IEEE80211_ML_CONTROL_TYPE_BASIC)
+ continue;
+
+ elems_parse->ml_basic_elem = tmp;
+ break;
+ }
+
+ ml_len = cfg80211_defragment_element(elems_parse->ml_basic_elem,
+ elems->ie_start,
+ elems->total_len,
+ elems_parse->scratch_pos,
+ elems_parse->scratch +
+ elems_parse->scratch_len -
+ elems_parse->scratch_pos,
+ WLAN_EID_FRAGMENT);
+
+ if (ml_len < 0)
+ return NULL;
+
+ elems->ml_basic = (const void *)elems_parse->scratch_pos;
+ elems->ml_basic_len = ml_len;
+ elems_parse->scratch_pos += ml_len;
+
+ if (params->link_id == -1)
+ return NULL;
+
+ ieee80211_mle_get_sta_prof(elems_parse, params->link_id);
+ prof = elems->prof;
+
+ if (!prof)
+ return NULL;
+
+ /* check if we have the 4 bytes for the fixed part in assoc response */
+ if (elems->sta_prof_len < sizeof(*prof) + prof->sta_info_len - 1 + 4) {
+ elems->prof = NULL;
+ elems->sta_prof_len = 0;
+ return NULL;
+ }
+
+ /*
+ * Skip the capability information and the status code that are expected
+ * as part of the station profile in association response frames. Note
+ * the -1 is because the 'sta_info_len' is accounted to as part of the
+ * per-STA profile, but not part of the 'u8 variable[]' portion.
+ */
+ sub->start = prof->variable + prof->sta_info_len - 1 + 4;
+ end = (const u8 *)prof + elems->sta_prof_len;
+ sub->len = end - sub->start;
+
+ sub->mode = params->mode;
+ sub->action = params->action;
+ sub->from_ap = params->from_ap;
+ sub->link_id = -1;
+
+ return cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
+ sub->start, sub->len);
+}
+
+static void
+ieee80211_mle_defrag_reconf(struct ieee80211_elems_parse *elems_parse)
+{
+ struct ieee802_11_elems *elems = &elems_parse->elems;
+ ssize_t ml_len;
+
+ ml_len = cfg80211_defragment_element(elems_parse->ml_reconf_elem,
+ elems->ie_start,
+ elems->total_len,
+ elems_parse->scratch_pos,
+ elems_parse->scratch +
+ elems_parse->scratch_len -
+ elems_parse->scratch_pos,
+ WLAN_EID_FRAGMENT);
+ if (ml_len < 0)
+ return;
+ elems->ml_reconf = (void *)elems_parse->scratch_pos;
+ elems->ml_reconf_len = ml_len;
+ elems_parse->scratch_pos += ml_len;
+}
+
+static void
+ieee80211_mle_defrag_epcs(struct ieee80211_elems_parse *elems_parse)
+{
+ struct ieee802_11_elems *elems = &elems_parse->elems;
+ ssize_t ml_len;
+
+ ml_len = cfg80211_defragment_element(elems_parse->ml_epcs_elem,
+ elems->ie_start,
+ elems->total_len,
+ elems_parse->scratch_pos,
+ elems_parse->scratch +
+ elems_parse->scratch_len -
+ elems_parse->scratch_pos,
+ WLAN_EID_FRAGMENT);
+ if (ml_len < 0)
+ return;
+ elems->ml_epcs = (void *)elems_parse->scratch_pos;
+ elems->ml_epcs_len = ml_len;
+ elems_parse->scratch_pos += ml_len;
+}
+
+struct ieee802_11_elems *
+ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params)
+{
+ struct ieee80211_elems_parse_params sub = {};
+ struct ieee80211_elems_parse *elems_parse;
+ const struct element *non_inherit = NULL;
+ struct ieee802_11_elems *elems;
+ size_t scratch_len = 3 * params->len;
+ bool multi_link_inner = false;
+
+ BUILD_BUG_ON(offsetof(typeof(*elems_parse), elems) != 0);
+
+ /* cannot parse for both a specific link and non-transmitted BSS */
+ if (WARN_ON(params->link_id >= 0 && params->bss))
+ return NULL;
+
+ elems_parse = kzalloc(struct_size(elems_parse, scratch, scratch_len),
+ GFP_ATOMIC);
+ if (!elems_parse)
+ return NULL;
+
+ elems_parse->scratch_len = scratch_len;
+ elems_parse->scratch_pos = elems_parse->scratch;
+
+ elems = &elems_parse->elems;
+ elems->ie_start = params->start;
+ elems->total_len = params->len;
+
+ /* set all TPE entries to unlimited (but invalid) */
+ ieee80211_clear_tpe(&elems->tpe);
+ ieee80211_clear_tpe(&elems->csa_tpe);
+
+ /*
+ * If we're looking for a non-transmitted BSS then we cannot at
+ * the same time be looking for a second link as the two can only
+ * appear in the same frame carrying info for different BSSes.
+ *
+ * In any case, we only look for one at a time, as encoded by
+ * the WARN_ON above.
+ */
+ if (params->bss) {
+ int nontx_len =
+ ieee802_11_find_bssid_profile(params->start,
+ params->len,
+ elems, params->bss,
+ elems_parse->scratch_pos);
+ sub.start = elems_parse->scratch_pos;
+ sub.mode = params->mode;
+ sub.len = nontx_len;
+ sub.action = params->action;
+ sub.link_id = params->link_id;
+
+ /* consume the space used for non-transmitted profile */
+ elems_parse->scratch_pos += nontx_len;
+
+ non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
+ sub.start, nontx_len);
+ } else {
+ /* must always parse to get elems_parse->ml_basic_elem */
+ non_inherit = ieee80211_prep_mle_link_parse(elems_parse, params,
+ &sub);
+ multi_link_inner = true;
+ }
+
+ elems_parse->skip_vendor =
+ cfg80211_find_elem(WLAN_EID_VENDOR_SPECIFIC,
+ sub.start, sub.len);
+ elems->crc = _ieee802_11_parse_elems_full(params, elems_parse,
+ non_inherit);
+
+ /* Override with nontransmitted/per-STA profile if found */
+ if (sub.len) {
+ elems_parse->multi_link_inner = multi_link_inner;
+ elems_parse->skip_vendor = false;
+ _ieee802_11_parse_elems_full(&sub, elems_parse, NULL);
+ }
+
+ ieee80211_mle_defrag_reconf(elems_parse);
+
+ ieee80211_mle_defrag_epcs(elems_parse);
+
+ if (elems->tim && !elems->parse_error) {
+ const struct ieee80211_tim_ie *tim_ie = elems->tim;
+
+ elems->dtim_period = tim_ie->dtim_period;
+ elems->dtim_count = tim_ie->dtim_count;
+ }
+
+ /* Override DTIM period and count if needed */
+ if (elems->bssid_index &&
+ elems->bssid_index_len >=
+ offsetofend(struct ieee80211_bssid_index, dtim_period))
+ elems->dtim_period = elems->bssid_index->dtim_period;
+
+ if (elems->bssid_index &&
+ elems->bssid_index_len >=
+ offsetofend(struct ieee80211_bssid_index, dtim_count))
+ elems->dtim_count = elems->bssid_index->dtim_count;
+
+ return elems;
+}
+EXPORT_SYMBOL_IF_KUNIT(ieee802_11_parse_elems_full);
+
+int ieee80211_parse_bitrates(enum nl80211_chan_width width,
+ const struct ieee80211_supported_band *sband,
+ const u8 *srates, int srates_len, u32 *rates)
+{
+ u32 rate_flags = ieee80211_chanwidth_rate_flags(width);
+ struct ieee80211_rate *br;
+ int brate, rate, i, j, count = 0;
+
+ *rates = 0;
+
+ for (i = 0; i < srates_len; i++) {
+ rate = srates[i] & 0x7f;
+
+ for (j = 0; j < sband->n_bitrates; j++) {
+ br = &sband->bitrates[j];
+ if ((rate_flags & br->flags) != rate_flags)
+ continue;
+
+ brate = DIV_ROUND_UP(br->bitrate, 5);
+ if (brate == rate) {
+ *rates |= BIT(j);
+ count++;
+ break;
+ }
+ }
+ }
+ return count;
+}
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index c1fa26e09479..7be52345f218 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Portions
- * Copyright (C) 2020-2021, 2023 Intel Corporation
+ * Copyright (C) 2020-2021, 2023-2024 Intel Corporation
*/
#include <net/mac80211.h>
#include <net/rtnetlink.h>
@@ -32,7 +32,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
ieee80211_scan_cancel(local);
- ieee80211_dfs_cac_cancel(local);
+ ieee80211_dfs_cac_cancel(local, NULL);
ieee80211_roc_purge(local, NULL);
@@ -171,7 +171,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
WARN_ON(!list_empty(&local->chanctx_list));
/* stop hardware - this must stop RX */
- ieee80211_stop_device(local);
+ ieee80211_stop_device(local, true);
suspend:
local->suspended = true;
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 9d33fd2377c8..0d056db9f81e 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -4,7 +4,7 @@
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
* Copyright 2017 Intel Deutschland GmbH
- * Copyright (C) 2022 Intel Corporation
+ * Copyright (C) 2019, 2022-2024 Intel Corporation
*/
#include <linux/kernel.h>
@@ -28,8 +28,9 @@ module_param(ieee80211_default_rc_algo, charp, 0644);
MODULE_PARM_DESC(ieee80211_default_rc_algo,
"Default rate control algorithm for mac80211 to use");
-void rate_control_rate_init(struct sta_info *sta)
+void rate_control_rate_init(struct link_sta_info *link_sta)
{
+ struct sta_info *sta = link_sta->sta;
struct ieee80211_local *local = sta->sdata->local;
struct rate_control_ref *ref = sta->rate_ctrl;
struct ieee80211_sta *ista = &sta->sta;
@@ -37,11 +38,15 @@ void rate_control_rate_init(struct sta_info *sta)
struct ieee80211_supported_band *sband;
struct ieee80211_chanctx_conf *chanctx_conf;
- ieee80211_sta_set_rx_nss(&sta->deflink);
+ ieee80211_sta_init_nss(link_sta);
if (!ref)
return;
+ /* SW rate control isn't supported with MLO right now */
+ if (WARN_ON(ieee80211_vif_is_mld(&sta->sdata->vif)))
+ return;
+
rcu_read_lock();
chanctx_conf = rcu_dereference(sta->sdata->vif.bss_conf.chanctx_conf);
@@ -67,6 +72,21 @@ void rate_control_rate_init(struct sta_info *sta)
set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
}
+void rate_control_rate_init_all_links(struct sta_info *sta)
+{
+ int link_id;
+
+ for (link_id = 0; link_id < ARRAY_SIZE(sta->link); link_id++) {
+ struct link_sta_info *link_sta;
+
+ link_sta = sdata_dereference(sta->link[link_id], sta->sdata);
+ if (!link_sta)
+ continue;
+
+ rate_control_rate_init(link_sta);
+ }
+}
+
void rate_control_tx_status(struct ieee80211_local *local,
struct ieee80211_tx_status *st)
{
@@ -93,16 +113,15 @@ void rate_control_tx_status(struct ieee80211_local *local,
void rate_control_rate_update(struct ieee80211_local *local,
struct ieee80211_supported_band *sband,
- struct sta_info *sta, unsigned int link_id,
+ struct link_sta_info *link_sta,
u32 changed)
{
struct rate_control_ref *ref = local->rate_ctrl;
+ struct sta_info *sta = link_sta->sta;
struct ieee80211_sta *ista = &sta->sta;
void *priv_sta = sta->rate_ctrl_priv;
struct ieee80211_chanctx_conf *chanctx_conf;
- WARN_ON(link_id != 0);
-
if (ref && ref->ops->rate_update) {
rcu_read_lock();
@@ -120,7 +139,8 @@ void rate_control_rate_update(struct ieee80211_local *local,
}
if (sta->uploaded)
- drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
+ drv_link_sta_rc_update(local, sta->sdata, link_sta->pub,
+ changed);
}
int ieee80211_rate_control_register(const struct rate_control_ops *ops)
@@ -229,9 +249,8 @@ static ssize_t rcname_read(struct file *file, char __user *userbuf,
ref->ops->name, len);
}
-const struct file_operations rcname_ops = {
+const struct debugfs_short_fops rcname_ops = {
.read = rcname_read,
- .open = simple_open,
.llseek = default_llseek,
};
#endif
@@ -279,10 +298,10 @@ void ieee80211_check_rate_mask(struct ieee80211_link_data *link)
u32 user_mask, basic_rates = link->conf->basic_rates;
enum nl80211_band band;
- if (WARN_ON(!link->conf->chandef.chan))
+ if (WARN_ON(!link->conf->chanreq.oper.chan))
return;
- band = link->conf->chandef.chan->band;
+ band = link->conf->chanreq.oper.chan->band;
if (band == NL80211_BAND_S1GHZ) {
/* TODO */
return;
@@ -762,7 +781,7 @@ static bool rate_control_cap_mask(struct ieee80211_sub_if_data *sdata,
u32 i, flags;
*mask = sdata->rc_rateidx_mask[sband->band];
- flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
+ flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chanreq.oper);
for (i = 0; i < sband->n_bitrates; i++) {
if ((flags & sband->bitrates[i].flags) != flags)
*mask &= ~BIT(i);
@@ -818,7 +837,7 @@ rate_control_apply_mask_ratetbl(struct sta_info *sta,
mcs_mask, vht_mask))
return;
- chan_width = sta->sdata->vif.bss_conf.chandef.width;
+ chan_width = sta->sdata->vif.bss_conf.chanreq.oper.width;
for (i = 0; i < IEEE80211_TX_RATE_TABLE_SIZE; i++) {
if (rates->rate[i].idx < 0)
break;
@@ -855,7 +874,7 @@ static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata,
* included in the configured mask and change the rate indexes
* if needed.
*/
- chan_width = sdata->vif.bss_conf.chandef.width;
+ chan_width = sdata->vif.bss_conf.chanreq.oper.width;
for (i = 0; i < max_rates; i++) {
/* Skip invalid rates */
if (rates[i].idx < 0)
@@ -877,6 +896,7 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif,
struct ieee80211_sub_if_data *sdata;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_supported_band *sband;
+ u32 mask = ~0;
rate_control_fill_sta_table(sta, info, dest, max_rates);
@@ -889,9 +909,12 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif,
if (ieee80211_is_tx_data(skb))
rate_control_apply_mask(sdata, sta, sband, dest, max_rates);
+ if (!(info->control.flags & IEEE80211_TX_CTRL_DONT_USE_RATE_MASK))
+ mask = sdata->rc_rateidx_mask[info->band];
+
if (dest[0].idx < 0)
__rate_control_send_low(&sdata->local->hw, sband, sta, info,
- sdata->rc_rateidx_mask[info->band]);
+ mask);
if (sta)
rate_fixup_ratelist(vif, sband, info, dest, max_rates);
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index d6190f10fe7c..5e4bde598212 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -3,7 +3,7 @@
* Copyright 2002-2005, Instant802 Networks, Inc.
* Copyright 2005, Devicescape Software, Inc.
* Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
- * Copyright (C) 2022 Intel Corporation
+ * Copyright (C) 2022, 2024 Intel Corporation
*/
#ifndef IEEE80211_RATE_H
@@ -29,11 +29,11 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
void rate_control_tx_status(struct ieee80211_local *local,
struct ieee80211_tx_status *st);
-void rate_control_rate_init(struct sta_info *sta);
+void rate_control_rate_init(struct link_sta_info *link_sta);
+void rate_control_rate_init_all_links(struct sta_info *sta);
void rate_control_rate_update(struct ieee80211_local *local,
struct ieee80211_supported_band *sband,
- struct sta_info *sta,
- unsigned int link_id,
+ struct link_sta_info *link_sta,
u32 changed);
static inline void *rate_control_alloc_sta(struct rate_control_ref *ref,
@@ -62,7 +62,7 @@ static inline void rate_control_add_sta_debugfs(struct sta_info *sta)
#endif
}
-extern const struct file_operations rcname_ops;
+extern const struct debugfs_short_fops rcname_ops;
static inline void rate_control_add_debugfs(struct ieee80211_local *local)
{
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 6bf3b4444a43..706cbc99f718 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -1053,7 +1053,7 @@ minstrel_ht_refill_sample_rates(struct minstrel_ht_sta *mi)
* - max_prob_rate must use only one stream, as a tradeoff between delivery
* probability and throughput during strong fluctuations
* - as long as the max prob rate has a probability of more than 75%, pick
- * higher throughput rates, even if the probablity is a bit lower
+ * higher throughput rates, even if the probability is a bit lower
*/
static void
minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c
index 25b8a67a63a4..85149c774505 100644
--- a/net/mac80211/rc80211_minstrel_ht_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c
@@ -187,7 +187,6 @@ static const struct file_operations minstrel_ht_stat_fops = {
.open = minstrel_ht_stats_open,
.read = minstrel_stats_read,
.release = minstrel_stats_release,
- .llseek = no_llseek,
};
static char *
@@ -323,7 +322,6 @@ static const struct file_operations minstrel_ht_stat_csv_fops = {
.open = minstrel_ht_stats_csv_open,
.read = minstrel_stats_read,
.release = minstrel_stats_release,
- .llseek = no_llseek,
};
void
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 0bf72928ccfc..0659ec892ec6 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -6,7 +6,7 @@
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#include <linux/jiffies.h>
@@ -22,7 +22,7 @@
#include <kunit/visibility.h>
#include <net/mac80211.h>
#include <net/ieee80211_radiotap.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
@@ -508,18 +508,13 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST;
if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR)
flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR;
- if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
- flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN;
if (status->flag & RX_FLAG_AMPDU_EOF_BIT_KNOWN)
flags |= IEEE80211_RADIOTAP_AMPDU_EOF_KNOWN;
if (status->flag & RX_FLAG_AMPDU_EOF_BIT)
flags |= IEEE80211_RADIOTAP_AMPDU_EOF;
put_unaligned_le16(flags, pos);
pos += 2;
- if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
- *pos++ = status->ampdu_delimiter_crc;
- else
- *pos++ = 0;
+ *pos++ = 0;
*pos++ = 0;
}
@@ -767,8 +762,8 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
struct ieee80211_rate *rate)
{
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
- struct ieee80211_sub_if_data *sdata;
- struct sk_buff *monskb = NULL;
+ struct ieee80211_sub_if_data *sdata, *prev_sdata = NULL;
+ struct sk_buff *skb, *monskb = NULL;
int present_fcs_len = 0;
unsigned int rtap_space = 0;
struct ieee80211_sub_if_data *monitor_sdata =
@@ -842,40 +837,52 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_space);
list_for_each_entry_rcu(sdata, &local->mon_list, u.mntr.list) {
- bool last_monitor = list_is_last(&sdata->u.mntr.list,
- &local->mon_list);
+ struct cfg80211_chan_def *chandef;
+
+ chandef = &sdata->vif.bss_conf.chanreq.oper;
+ if (chandef->chan &&
+ chandef->chan->center_freq != status->freq)
+ continue;
+
+ if (!prev_sdata) {
+ prev_sdata = sdata;
+ continue;
+ }
+
+ if (ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR))
+ ieee80211_handle_mu_mimo_mon(sdata, origskb, rtap_space);
if (!monskb)
monskb = ieee80211_make_monitor_skb(local, &origskb,
rate, rtap_space,
- only_monitor &&
- last_monitor);
+ false);
+ if (!monskb)
+ continue;
- if (monskb) {
- struct sk_buff *skb;
+ skb = skb_clone(monskb, GFP_ATOMIC);
+ if (!skb)
+ continue;
- if (last_monitor) {
- skb = monskb;
- monskb = NULL;
- } else {
- skb = skb_clone(monskb, GFP_ATOMIC);
- }
+ skb->dev = prev_sdata->dev;
+ dev_sw_netstats_rx_add(skb->dev, skb->len);
+ netif_receive_skb(skb);
+ prev_sdata = sdata;
+ }
- if (skb) {
- skb->dev = sdata->dev;
- dev_sw_netstats_rx_add(skb->dev, skb->len);
- netif_receive_skb(skb);
- }
+ if (prev_sdata) {
+ if (monskb)
+ skb = monskb;
+ else
+ skb = ieee80211_make_monitor_skb(local, &origskb,
+ rate, rtap_space,
+ only_monitor);
+ if (skb) {
+ skb->dev = prev_sdata->dev;
+ dev_sw_netstats_rx_add(skb->dev, skb->len);
+ netif_receive_skb(skb);
}
-
- if (last_monitor)
- break;
}
- /* this happens if last_monitor was erroneously false */
- dev_kfree_skb(monskb);
-
- /* ditto */
if (!origskb)
return NULL;
@@ -1251,8 +1258,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
- u16 sc = le16_to_cpu(hdr->seq_ctrl);
- u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
+ u16 mpdu_seq_num = ieee80211_get_sn(hdr);
u16 head_seq_num, buf_size;
int index;
bool ret = true;
@@ -1435,13 +1441,31 @@ ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
return RX_CONTINUE;
if (ieee80211_is_ctl(hdr->frame_control) ||
- ieee80211_is_any_nullfunc(hdr->frame_control) ||
- is_multicast_ether_addr(hdr->addr1))
+ ieee80211_is_any_nullfunc(hdr->frame_control))
return RX_CONTINUE;
if (!rx->sta)
return RX_CONTINUE;
+ if (unlikely(is_multicast_ether_addr(hdr->addr1))) {
+ struct ieee80211_sub_if_data *sdata = rx->sdata;
+ u16 sn = ieee80211_get_sn(hdr);
+
+ if (!ieee80211_is_data_present(hdr->frame_control))
+ return RX_CONTINUE;
+
+ if (!ieee80211_vif_is_mld(&sdata->vif) ||
+ sdata->vif.type != NL80211_IFTYPE_STATION)
+ return RX_CONTINUE;
+
+ if (sdata->u.mgd.mcast_seq_last != IEEE80211_SN_MODULO &&
+ ieee80211_sn_less_eq(sn, sdata->u.mgd.mcast_seq_last))
+ return RX_DROP_U_DUP;
+
+ sdata->u.mgd.mcast_seq_last = sn;
+ return RX_CONTINUE;
+ }
+
if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) {
I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount);
@@ -2746,7 +2770,10 @@ ieee80211_rx_mesh_fast_forward(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, int hdrlen)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- struct ieee80211_mesh_fast_tx *entry = NULL;
+ struct ieee80211_mesh_fast_tx_key key = {
+ .type = MESH_FAST_TX_TYPE_FORWARDED
+ };
+ struct ieee80211_mesh_fast_tx *entry;
struct ieee80211s_hdr *mesh_hdr;
struct tid_ampdu_tx *tid_tx;
struct sta_info *sta;
@@ -2755,9 +2782,13 @@ ieee80211_rx_mesh_fast_forward(struct ieee80211_sub_if_data *sdata,
mesh_hdr = (struct ieee80211s_hdr *)(skb->data + sizeof(eth));
if ((mesh_hdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6)
- entry = mesh_fast_tx_get(sdata, mesh_hdr->eaddr1);
+ ether_addr_copy(key.addr, mesh_hdr->eaddr1);
else if (!(mesh_hdr->flags & MESH_FLAGS_AE))
- entry = mesh_fast_tx_get(sdata, skb->data);
+ ether_addr_copy(key.addr, skb->data);
+ else
+ return false;
+
+ entry = mesh_fast_tx_get(sdata, &key);
if (!entry)
return false;
@@ -2970,6 +3001,7 @@ ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta
}
IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
+ ieee80211_set_qos_hdr(sdata, fwd_skb);
ieee80211_add_pending_skb(local, fwd_skb);
rx_accept:
@@ -3003,8 +3035,7 @@ __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
check_da = NULL;
break;
case NL80211_IFTYPE_STATION:
- if (!rx->sta ||
- !test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER))
+ if (!test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER))
check_sa = NULL;
break;
case NL80211_IFTYPE_MESH_POINT:
@@ -3298,8 +3329,8 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
return;
}
- if (!ether_addr_equal(mgmt->sa, sdata->deflink.u.mgd.bssid) ||
- !ether_addr_equal(mgmt->bssid, sdata->deflink.u.mgd.bssid)) {
+ if (!ether_addr_equal(mgmt->sa, sdata->vif.cfg.ap_addr) ||
+ !ether_addr_equal(mgmt->bssid, sdata->vif.cfg.ap_addr)) {
/* Not from the current AP or not associated yet. */
return;
}
@@ -3315,9 +3346,9 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
skb_reserve(skb, local->hw.extra_tx_headroom);
resp = skb_put_zero(skb, 24);
- memcpy(resp->da, mgmt->sa, ETH_ALEN);
+ memcpy(resp->da, sdata->vif.cfg.ap_addr, ETH_ALEN);
memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
- memcpy(resp->bssid, sdata->deflink.u.mgd.bssid, ETH_ALEN);
+ memcpy(resp->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN);
resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
@@ -3334,6 +3365,7 @@ static void
ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data *rx)
{
struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
+ struct ieee80211_bss_conf *bss_conf;
const struct element *ie;
size_t baselen;
@@ -3344,7 +3376,9 @@ ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data *rx)
if (ieee80211_hw_check(&rx->local->hw, DETECTS_COLOR_COLLISION))
return;
- if (rx->sdata->vif.bss_conf.csa_active)
+ bss_conf = rx->link->conf;
+ if (bss_conf->csa_active || bss_conf->color_change_active ||
+ !bss_conf->he_bss_color.enabled)
return;
baselen = mgmt->u.beacon.variable - rx->skb->data;
@@ -3356,7 +3390,6 @@ ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data *rx)
rx->skb->len - baselen);
if (ie && ie->datalen >= sizeof(struct ieee80211_he_operation) &&
ie->datalen >= ieee80211_he_oper_size(ie->data + 1)) {
- struct ieee80211_bss_conf *bss_conf = &rx->sdata->vif.bss_conf;
const struct ieee80211_he_operation *he_oper;
u8 color;
@@ -3370,7 +3403,7 @@ ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data *rx)
if (color == bss_conf->he_bss_color.color)
ieee80211_obss_color_collision_notify(&rx->sdata->vif,
BIT_ULL(color),
- GFP_ATOMIC);
+ bss_conf->link_id);
}
}
@@ -3542,7 +3575,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
sband = rx->local->hw.wiphy->bands[status->band];
- rate_control_rate_update(local, sband, rx->sta, 0,
+ rate_control_rate_update(local, sband, rx->link_sta,
IEEE80211_RC_SMPS_CHANGED);
cfg80211_sta_opmode_change_notify(sdata->dev,
rx->sta->addr,
@@ -3579,7 +3612,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
ieee80211_sta_rx_bw_to_chan_width(rx->link_sta);
sta_opmode.changed = STA_OPMODE_MAX_BW_CHANGED;
- rate_control_rate_update(local, sband, rx->sta, 0,
+ rate_control_rate_update(local, sband, rx->link_sta,
IEEE80211_RC_BW_CHANGED);
cfg80211_sta_opmode_change_notify(sdata->dev,
rx->sta->addr,
@@ -3593,6 +3626,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
break;
case WLAN_CATEGORY_PUBLIC:
+ case WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION:
if (len < IEEE80211_MIN_ACTION_SIZE + 1)
goto invalid;
if (sdata->vif.type != NL80211_IFTYPE_STATION)
@@ -3763,6 +3797,44 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
break;
}
break;
+ case WLAN_CATEGORY_PROTECTED_EHT:
+ if (len < offsetofend(typeof(*mgmt),
+ u.action.u.ttlm_req.action_code))
+ break;
+
+ switch (mgmt->u.action.u.ttlm_req.action_code) {
+ case WLAN_PROTECTED_EHT_ACTION_TTLM_REQ:
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ break;
+
+ if (len < offsetofend(typeof(*mgmt),
+ u.action.u.ttlm_req))
+ goto invalid;
+ goto queue;
+ case WLAN_PROTECTED_EHT_ACTION_TTLM_RES:
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ break;
+
+ if (len < offsetofend(typeof(*mgmt),
+ u.action.u.ttlm_res))
+ goto invalid;
+ goto queue;
+ case WLAN_PROTECTED_EHT_ACTION_LINK_RECONFIG_RESP:
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ break;
+
+ /* The reconfiguration response action frame must
+ * least one 'Status Duple' entry (3 octets)
+ */
+ if (len <
+ offsetofend(typeof(*mgmt),
+ u.action.u.ml_reconf_resp) + 3)
+ goto invalid;
+ goto queue;
+ default:
+ break;
+ }
+ break;
}
return RX_CONTINUE;
@@ -3920,8 +3992,8 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
__ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, -1,
status->band);
}
- dev_kfree_skb(rx->skb);
- return RX_QUEUED;
+
+ return RX_DROP_U_UNKNOWN_ACTION_REJECTED;
}
static ieee80211_rx_result debug_noinline
@@ -4502,7 +4574,9 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
return ieee80211_is_public_action(hdr, skb->len) ||
ieee80211_is_probe_req(hdr->frame_control) ||
ieee80211_is_probe_resp(hdr->frame_control) ||
- ieee80211_is_beacon(hdr->frame_control);
+ ieee80211_is_beacon(hdr->frame_control) ||
+ (ieee80211_is_auth(hdr->frame_control) &&
+ ether_addr_equal(sdata->vif.addr, hdr->addr1));
case NL80211_IFTYPE_NAN:
/* Currently no frames on NAN interface are allowed */
return false;
@@ -5192,7 +5266,6 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
*/
if (!status->link_valid && pubsta->mlo) {
- struct ieee80211_hdr *hdr = (void *)skb->data;
struct link_sta_info *link_sta;
link_sta = link_sta_info_get_bss(rx.sdata,
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index f9d5842601fa..cb7079071885 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -257,7 +257,6 @@ static bool ieee80211_scan_accept_presp(struct ieee80211_sub_if_data *sdata,
void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
{
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
- struct ieee80211_sub_if_data *sdata1, *sdata2;
struct ieee80211_mgmt *mgmt = (void *)skb->data;
struct ieee80211_bss *bss;
struct ieee80211_channel *channel;
@@ -281,12 +280,6 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
if (skb->len < min_hdr_len)
return;
- sdata1 = rcu_dereference(local->scan_sdata);
- sdata2 = rcu_dereference(local->sched_scan_sdata);
-
- if (likely(!sdata1 && !sdata2))
- return;
-
if (test_and_clear_bit(SCAN_BEACON_WAIT, &local->scanning)) {
/*
* we were passive scanning because of radar/no-IR, but
@@ -304,10 +297,17 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
return;
if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+ struct ieee80211_sub_if_data *sdata1, *sdata2;
struct cfg80211_scan_request *scan_req;
struct cfg80211_sched_scan_request *sched_scan_req;
u32 scan_req_flags = 0, sched_scan_req_flags = 0;
+ sdata1 = rcu_dereference(local->scan_sdata);
+ sdata2 = rcu_dereference(local->sched_scan_sdata);
+
+ if (likely(!sdata1 && !sdata2))
+ return;
+
scan_req = rcu_dereference(local->scan_req);
sched_scan_req = rcu_dereference(local->sched_scan_req);
@@ -327,8 +327,16 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
sched_scan_req_flags,
mgmt->da))
return;
+ } else {
+ /* Beacons are expected only with broadcast address */
+ if (!is_broadcast_ether_addr(mgmt->da))
+ return;
}
+ /* Do not update the BSS table in case of only monitor interfaces */
+ if (local->open_count == local->monitors)
+ return;
+
bss = ieee80211_bss_info_update(local, rx_status,
mgmt, skb->len,
channel);
@@ -350,7 +358,8 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_sub_if_data *sdata)
struct cfg80211_scan_request *req;
struct cfg80211_chan_def chandef;
u8 bands_used = 0;
- int i, ielen, n_chans;
+ int i, ielen;
+ u32 *n_chans;
u32 flags = 0;
req = rcu_dereference_protected(local->scan_req,
@@ -360,34 +369,34 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_sub_if_data *sdata)
return false;
if (ieee80211_hw_check(&local->hw, SINGLE_SCAN_ON_ALL_BANDS)) {
+ local->hw_scan_req->req.n_channels = req->n_channels;
+
for (i = 0; i < req->n_channels; i++) {
local->hw_scan_req->req.channels[i] = req->channels[i];
bands_used |= BIT(req->channels[i]->band);
}
-
- n_chans = req->n_channels;
} else {
do {
if (local->hw_scan_band == NUM_NL80211_BANDS)
return false;
- n_chans = 0;
+ n_chans = &local->hw_scan_req->req.n_channels;
+ *n_chans = 0;
for (i = 0; i < req->n_channels; i++) {
if (req->channels[i]->band !=
local->hw_scan_band)
continue;
- local->hw_scan_req->req.channels[n_chans] =
+ local->hw_scan_req->req.channels[(*n_chans)++] =
req->channels[i];
- n_chans++;
+
bands_used |= BIT(req->channels[i]->band);
}
local->hw_scan_band++;
- } while (!n_chans);
+ } while (!*n_chans);
}
- local->hw_scan_req->req.n_channels = n_chans;
ieee80211_prepare_scan_chandef(&chandef);
if (req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT)
@@ -400,6 +409,8 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_sub_if_data *sdata)
req->ie, req->ie_len,
bands_used, req->rates, &chandef,
flags);
+ if (ielen < 0)
+ return false;
local->hw_scan_req->req.ie_len = ielen;
local->hw_scan_req->req.no_cck = req->no_cck;
ether_addr_copy(local->hw_scan_req->req.mac_addr, req->mac_addr);
@@ -476,7 +487,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
}
/* Set power back to normal operating levels. */
- ieee80211_hw_config(local, 0);
+ ieee80211_hw_conf_chan(local);
if (!hw_scan && was_scanning) {
ieee80211_configure_filter(local);
@@ -493,7 +504,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
* the scan was in progress; if there was none this will
* just be a no-op for the particular interface.
*/
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+ list_for_each_entry(sdata, &local->interfaces, list) {
if (ieee80211_sdata_running(sdata))
wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
}
@@ -523,7 +534,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata)
{
/* Software scan is not supported in multi-channel cases */
- if (local->use_chanctx)
+ if (!local->emulate_chanctx)
return -EOPNOTSUPP;
/*
@@ -553,7 +564,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local,
ieee80211_configure_filter(local);
/* We need to set power level at maximum rate for scanning. */
- ieee80211_hw_config(local, 0);
+ ieee80211_hw_conf_chan(local);
wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
@@ -564,6 +575,7 @@ static bool __ieee80211_can_leave_ch(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_sub_if_data *sdata_iter;
+ unsigned int link_id;
lockdep_assert_wiphy(local->hw.wiphy);
@@ -574,8 +586,9 @@ static bool __ieee80211_can_leave_ch(struct ieee80211_sub_if_data *sdata)
return false;
list_for_each_entry(sdata_iter, &local->interfaces, list) {
- if (sdata_iter->wdev.cac_started)
- return false;
+ for_each_valid_link(&sdata_iter->wdev, link_id)
+ if (sdata_iter->wdev.links[link_id].cac_started)
+ return false;
}
return true;
@@ -638,6 +651,7 @@ static void ieee80211_send_scan_probe_req(struct ieee80211_sub_if_data *sdata,
cpu_to_le16(IEEE80211_SN_TO_SEQ(sn));
}
IEEE80211_SKB_CB(skb)->flags |= tx_flags;
+ IEEE80211_SKB_CB(skb)->control.flags |= IEEE80211_TX_CTRL_DONT_USE_RATE_MASK;
ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band);
}
}
@@ -677,7 +691,10 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
* After sending probe requests, wait for probe responses
* on the channel.
*/
- *next_delay = IEEE80211_CHANNEL_TIME;
+ *next_delay = msecs_to_jiffies(scan_req->duration) >
+ IEEE80211_PROBE_DELAY + IEEE80211_CHANNEL_TIME ?
+ msecs_to_jiffies(scan_req->duration) - IEEE80211_PROBE_DELAY :
+ IEEE80211_CHANNEL_TIME;
local->next_scan_state = SCAN_DECISION;
}
@@ -694,19 +711,11 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
return -EBUSY;
/* For an MLO connection, if a link ID was specified, validate that it
- * is indeed active. If no link ID was specified, select one of the
- * active links.
+ * is indeed active.
*/
- if (ieee80211_vif_is_mld(&sdata->vif)) {
- if (req->tsf_report_link_id >= 0) {
- if (!(sdata->vif.active_links &
- BIT(req->tsf_report_link_id)))
- return -EINVAL;
- } else {
- req->tsf_report_link_id =
- __ffs(sdata->vif.active_links);
- }
- }
+ if (ieee80211_vif_is_mld(&sdata->vif) && req->tsf_report_link_id >= 0 &&
+ !(sdata->vif.active_links & BIT(req->tsf_report_link_id)))
+ return -EINVAL;
if (!__ieee80211_can_leave_ch(sdata))
return -EBUSY;
@@ -738,15 +747,21 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
local->hw_scan_ies_bufsize *= n_bands;
}
- local->hw_scan_req = kmalloc(
- sizeof(*local->hw_scan_req) +
- req->n_channels * sizeof(req->channels[0]) +
- local->hw_scan_ies_bufsize, GFP_KERNEL);
+ local->hw_scan_req = kmalloc(struct_size(local->hw_scan_req,
+ req.channels,
+ req->n_channels) +
+ local->hw_scan_ies_bufsize,
+ GFP_KERNEL);
if (!local->hw_scan_req)
return -ENOMEM;
local->hw_scan_req->req.ssids = req->ssids;
local->hw_scan_req->req.n_ssids = req->n_ssids;
+ /* None of the channels are actually set
+ * up but let UBSAN know the boundaries.
+ */
+ local->hw_scan_req->req.n_channels = req->n_channels;
+
ies = (u8 *)local->hw_scan_req +
sizeof(*local->hw_scan_req) +
req->n_channels * sizeof(req->channels[0]);
@@ -787,7 +802,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
if (hw_scan) {
__set_bit(SCAN_HW_SCANNING, &local->scanning);
} else if ((req->n_channels == 1) &&
- (req->channels[0] == local->_oper_chandef.chan)) {
+ (req->channels[0] == local->hw.conf.chandef.chan)) {
/*
* If we are scanning only on the operating channel
* then we do not need to stop normal activities
@@ -805,7 +820,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
ieee80211_configure_filter(local); /* accept probe-responses */
/* We need to ensure power level is at max for scanning. */
- ieee80211_hw_config(local, 0);
+ ieee80211_hw_conf_chan(local);
if ((req->channels[0]->flags & (IEEE80211_CHAN_NO_IR |
IEEE80211_CHAN_RADAR)) ||
@@ -970,13 +985,13 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
/* If scanning on oper channel, use whatever channel-type
* is currently in use.
*/
- if (chan == local->_oper_chandef.chan)
- local->scan_chandef = local->_oper_chandef;
+ if (chan == local->hw.conf.chandef.chan)
+ local->scan_chandef = local->hw.conf.chandef;
else
local->scan_chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
set_channel:
- if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
+ if (ieee80211_hw_conf_chan(local))
skip = 1;
/* advance state machine to next channel/band */
@@ -1000,7 +1015,8 @@ set_channel:
*/
if ((chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR)) ||
!scan_req->n_ssids) {
- *next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
+ *next_delay = max(msecs_to_jiffies(scan_req->duration),
+ IEEE80211_PASSIVE_CHANNEL_TIME);
local->next_scan_state = SCAN_DECISION;
if (scan_req->n_ssids)
set_bit(SCAN_BEACON_WAIT, &local->scanning);
@@ -1017,7 +1033,7 @@ static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
{
/* switch back to the operating channel */
local->scan_chandef.chan = NULL;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+ ieee80211_hw_conf_chan(local);
/* disable PS */
ieee80211_offchannel_return(local);
@@ -1160,14 +1176,14 @@ int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
unsigned int n_channels)
{
struct ieee80211_local *local = sdata->local;
- int ret = -EBUSY, i, n_ch = 0;
+ int i, n_ch = 0;
enum nl80211_band band;
lockdep_assert_wiphy(local->hw.wiphy);
/* busy scanning */
if (local->scan_req)
- goto unlock;
+ return -EBUSY;
/* fill internal scan request */
if (!channels) {
@@ -1184,7 +1200,9 @@ int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
&local->hw.wiphy->bands[band]->channels[i];
if (tmp_ch->flags & (IEEE80211_CHAN_NO_IR |
- IEEE80211_CHAN_DISABLED))
+ IEEE80211_CHAN_DISABLED) ||
+ !cfg80211_wdev_channel_allowed(&sdata->wdev,
+ tmp_ch))
continue;
local->int_scan_req->channels[n_ch] = tmp_ch;
@@ -1193,21 +1211,23 @@ int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
}
if (WARN_ON_ONCE(n_ch == 0))
- goto unlock;
+ return -EINVAL;
local->int_scan_req->n_channels = n_ch;
} else {
for (i = 0; i < n_channels; i++) {
if (channels[i]->flags & (IEEE80211_CHAN_NO_IR |
- IEEE80211_CHAN_DISABLED))
+ IEEE80211_CHAN_DISABLED) ||
+ !cfg80211_wdev_channel_allowed(&sdata->wdev,
+ channels[i]))
continue;
local->int_scan_req->channels[n_ch] = channels[i];
n_ch++;
}
- if (WARN_ON_ONCE(n_ch == 0))
- goto unlock;
+ if (n_ch == 0)
+ return -EINVAL;
local->int_scan_req->n_channels = n_ch;
}
@@ -1217,9 +1237,7 @@ int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
memcpy(local->int_scan_req->ssids[0].ssid, ssid, IEEE80211_MAX_SSID_LEN);
local->int_scan_req->ssids[0].ssid_len = ssid_len;
- ret = __ieee80211_start_scan(sdata, sdata->local->int_scan_req);
- unlock:
- return ret;
+ return __ieee80211_start_scan(sdata, sdata->local->int_scan_req);
}
void ieee80211_scan_cancel(struct ieee80211_local *local)
@@ -1316,10 +1334,12 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
ieee80211_prepare_scan_chandef(&chandef);
- ieee80211_build_preq_ies(sdata, ie, num_bands * iebufsz,
- &sched_scan_ies, req->ie,
- req->ie_len, bands_used, rate_masks, &chandef,
- flags);
+ ret = ieee80211_build_preq_ies(sdata, ie, num_bands * iebufsz,
+ &sched_scan_ies, req->ie,
+ req->ie_len, bands_used, rate_masks,
+ &chandef, flags);
+ if (ret < 0)
+ goto error;
ret = drv_sched_scan_start(local, sdata, req, &sched_scan_ies);
if (ret == 0) {
@@ -1327,8 +1347,8 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
rcu_assign_pointer(local->sched_scan_req, req);
}
+error:
kfree(ie);
-
out:
if (ret) {
/* Clean in case of failure after HW restart or upon resume. */
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index 55959b0b24c5..c6015cd00372 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -9,7 +9,7 @@
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2007-2008, Intel Corporation
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2018, 2020, 2022-2023 Intel Corporation
+ * Copyright (C) 2018, 2020, 2022-2024 Intel Corporation
*/
#include <linux/ieee80211.h>
@@ -19,21 +19,222 @@
#include "sta_info.h"
#include "wme.h"
+static bool
+wbcs_elem_to_chandef(const struct ieee80211_wide_bw_chansw_ie *wbcs_elem,
+ struct cfg80211_chan_def *chandef)
+{
+ u8 ccfs0 = wbcs_elem->new_center_freq_seg0;
+ u8 ccfs1 = wbcs_elem->new_center_freq_seg1;
+ u32 cf0 = ieee80211_channel_to_frequency(ccfs0, chandef->chan->band);
+ u32 cf1 = ieee80211_channel_to_frequency(ccfs1, chandef->chan->band);
+
+ switch (wbcs_elem->new_channel_width) {
+ case IEEE80211_VHT_CHANWIDTH_160MHZ:
+ /* deprecated encoding */
+ chandef->width = NL80211_CHAN_WIDTH_160;
+ chandef->center_freq1 = cf0;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
+ /* deprecated encoding */
+ chandef->width = NL80211_CHAN_WIDTH_80P80;
+ chandef->center_freq1 = cf0;
+ chandef->center_freq2 = cf1;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_80MHZ:
+ chandef->width = NL80211_CHAN_WIDTH_80;
+ chandef->center_freq1 = cf0;
+
+ if (ccfs1) {
+ u8 diff = abs(ccfs0 - ccfs1);
+
+ if (diff == 8) {
+ chandef->width = NL80211_CHAN_WIDTH_160;
+ chandef->center_freq1 = cf1;
+ } else if (diff > 8) {
+ chandef->width = NL80211_CHAN_WIDTH_80P80;
+ chandef->center_freq2 = cf1;
+ }
+ }
+ break;
+ case IEEE80211_VHT_CHANWIDTH_USE_HT:
+ default:
+ /* If the WBCS Element is present, new channel bandwidth is
+ * at least 40 MHz.
+ */
+ chandef->width = NL80211_CHAN_WIDTH_40;
+ chandef->center_freq1 = cf0;
+ break;
+ }
+
+ return cfg80211_chandef_valid(chandef);
+}
+
+static void
+validate_chandef_by_ht_vht_oper(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_conn_settings *conn,
+ u32 vht_cap_info,
+ struct cfg80211_chan_def *chandef)
+{
+ u32 control_freq, center_freq1, center_freq2;
+ enum nl80211_chan_width chan_width;
+ struct ieee80211_ht_operation ht_oper;
+ struct ieee80211_vht_operation vht_oper;
+
+ if (conn->mode < IEEE80211_CONN_MODE_HT ||
+ conn->bw_limit < IEEE80211_CONN_BW_LIMIT_40) {
+ chandef->chan = NULL;
+ return;
+ }
+
+ control_freq = chandef->chan->center_freq;
+ center_freq1 = chandef->center_freq1;
+ center_freq2 = chandef->center_freq2;
+ chan_width = chandef->width;
+
+ ht_oper.primary_chan = ieee80211_frequency_to_channel(control_freq);
+ if (control_freq != center_freq1)
+ ht_oper.ht_param = control_freq > center_freq1 ?
+ IEEE80211_HT_PARAM_CHA_SEC_BELOW :
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ else
+ ht_oper.ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE;
+
+ ieee80211_chandef_ht_oper(&ht_oper, chandef);
+
+ if (conn->mode < IEEE80211_CONN_MODE_VHT)
+ return;
+
+ vht_oper.center_freq_seg0_idx =
+ ieee80211_frequency_to_channel(center_freq1);
+ vht_oper.center_freq_seg1_idx = center_freq2 ?
+ ieee80211_frequency_to_channel(center_freq2) : 0;
+
+ switch (chan_width) {
+ case NL80211_CHAN_WIDTH_320:
+ WARN_ON(1);
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ vht_oper.chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
+ vht_oper.center_freq_seg1_idx = vht_oper.center_freq_seg0_idx;
+ vht_oper.center_freq_seg0_idx +=
+ control_freq < center_freq1 ? -8 : 8;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ vht_oper.chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ vht_oper.chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
+ break;
+ default:
+ vht_oper.chan_width = IEEE80211_VHT_CHANWIDTH_USE_HT;
+ break;
+ }
+
+ ht_oper.operation_mode =
+ le16_encode_bits(vht_oper.center_freq_seg1_idx,
+ IEEE80211_HT_OP_MODE_CCFS2_MASK);
+
+ if (!ieee80211_chandef_vht_oper(&sdata->local->hw, vht_cap_info,
+ &vht_oper, &ht_oper, chandef))
+ chandef->chan = NULL;
+}
+
+static void
+validate_chandef_by_6ghz_he_eht_oper(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_conn_settings *conn,
+ struct cfg80211_chan_def *chandef)
+{
+ struct ieee80211_local *local = sdata->local;
+ u32 control_freq, center_freq1, center_freq2;
+ enum nl80211_chan_width chan_width;
+ struct {
+ struct ieee80211_he_operation _oper;
+ struct ieee80211_he_6ghz_oper _6ghz_oper;
+ } __packed he;
+ struct {
+ struct ieee80211_eht_operation _oper;
+ struct ieee80211_eht_operation_info _oper_info;
+ } __packed eht;
+ const struct ieee80211_eht_operation *eht_oper;
+
+ if (conn->mode < IEEE80211_CONN_MODE_HE) {
+ chandef->chan = NULL;
+ return;
+ }
+
+ control_freq = chandef->chan->center_freq;
+ center_freq1 = chandef->center_freq1;
+ center_freq2 = chandef->center_freq2;
+ chan_width = chandef->width;
+
+ he._oper.he_oper_params =
+ le32_encode_bits(1, IEEE80211_HE_OPERATION_6GHZ_OP_INFO);
+ he._6ghz_oper.primary =
+ ieee80211_frequency_to_channel(control_freq);
+ he._6ghz_oper.ccfs0 = ieee80211_frequency_to_channel(center_freq1);
+ he._6ghz_oper.ccfs1 = center_freq2 ?
+ ieee80211_frequency_to_channel(center_freq2) : 0;
+
+ switch (chan_width) {
+ case NL80211_CHAN_WIDTH_320:
+ he._6ghz_oper.ccfs1 = he._6ghz_oper.ccfs0;
+ he._6ghz_oper.ccfs0 += control_freq < center_freq1 ? -16 : 16;
+ he._6ghz_oper.control = IEEE80211_EHT_OPER_CHAN_WIDTH_320MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ he._6ghz_oper.ccfs1 = he._6ghz_oper.ccfs0;
+ he._6ghz_oper.ccfs0 += control_freq < center_freq1 ? -8 : 8;
+ fallthrough;
+ case NL80211_CHAN_WIDTH_80P80:
+ he._6ghz_oper.control =
+ IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ he._6ghz_oper.control =
+ IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_80MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ he._6ghz_oper.control =
+ IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_40MHZ;
+ break;
+ default:
+ he._6ghz_oper.control =
+ IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_20MHZ;
+ break;
+ }
+
+ if (conn->mode < IEEE80211_CONN_MODE_EHT) {
+ eht_oper = NULL;
+ } else {
+ eht._oper.params = IEEE80211_EHT_OPER_INFO_PRESENT;
+ eht._oper_info.control = he._6ghz_oper.control;
+ eht._oper_info.ccfs0 = he._6ghz_oper.ccfs0;
+ eht._oper_info.ccfs1 = he._6ghz_oper.ccfs1;
+ eht_oper = &eht._oper;
+ }
+
+ if (!ieee80211_chandef_he_6ghz_oper(local, &he._oper,
+ eht_oper, chandef))
+ chandef->chan = NULL;
+}
+
int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems *elems,
enum nl80211_band current_band,
u32 vht_cap_info,
- ieee80211_conn_flags_t conn_flags, u8 *bssid,
+ struct ieee80211_conn_settings *conn,
+ u8 *bssid, bool unprot_action,
struct ieee80211_csa_ie *csa_ie)
{
enum nl80211_band new_band = current_band;
int new_freq;
- u8 new_chan_no;
+ u8 new_chan_no = 0, new_op_class = 0;
struct ieee80211_channel *new_chan;
- struct cfg80211_chan_def new_vht_chandef = {};
+ struct cfg80211_chan_def new_chandef = {};
const struct ieee80211_sec_chan_offs_ie *sec_chan_offs;
const struct ieee80211_wide_bw_chansw_ie *wide_bw_chansw_ie;
const struct ieee80211_bandwidth_indication *bwi;
+ const struct ieee80211_ext_chansw_ie *ext_chansw_elem;
int secondary_channel_offset = -1;
memset(csa_ie, 0, sizeof(*csa_ie));
@@ -41,36 +242,43 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
sec_chan_offs = elems->sec_chan_offs;
wide_bw_chansw_ie = elems->wide_bw_chansw_ie;
bwi = elems->bandwidth_indication;
+ ext_chansw_elem = elems->ext_chansw_ie;
- if (conn_flags & (IEEE80211_CONN_DISABLE_HT |
- IEEE80211_CONN_DISABLE_40MHZ)) {
+ if (conn->mode < IEEE80211_CONN_MODE_HT ||
+ conn->bw_limit < IEEE80211_CONN_BW_LIMIT_40) {
sec_chan_offs = NULL;
wide_bw_chansw_ie = NULL;
}
- if (conn_flags & IEEE80211_CONN_DISABLE_VHT)
+ if (conn->mode < IEEE80211_CONN_MODE_VHT)
wide_bw_chansw_ie = NULL;
- if (elems->ext_chansw_ie) {
- if (!ieee80211_operating_class_to_band(
- elems->ext_chansw_ie->new_operating_class,
- &new_band)) {
- sdata_info(sdata,
- "cannot understand ECSA IE operating class, %d, ignoring\n",
- elems->ext_chansw_ie->new_operating_class);
+ if (ext_chansw_elem) {
+ new_op_class = ext_chansw_elem->new_operating_class;
+
+ if (!ieee80211_operating_class_to_band(new_op_class, &new_band)) {
+ new_op_class = 0;
+ if (!unprot_action)
+ sdata_info(sdata,
+ "cannot understand ECSA IE operating class, %d, ignoring\n",
+ ext_chansw_elem->new_operating_class);
+ } else {
+ new_chan_no = ext_chansw_elem->new_ch_num;
+ csa_ie->count = ext_chansw_elem->count;
+ csa_ie->mode = ext_chansw_elem->mode;
}
- new_chan_no = elems->ext_chansw_ie->new_ch_num;
- csa_ie->count = elems->ext_chansw_ie->count;
- csa_ie->mode = elems->ext_chansw_ie->mode;
- } else if (elems->ch_switch_ie) {
+ }
+
+ if (!new_op_class && elems->ch_switch_ie) {
new_chan_no = elems->ch_switch_ie->new_ch_num;
csa_ie->count = elems->ch_switch_ie->count;
csa_ie->mode = elems->ch_switch_ie->mode;
- } else {
- /* nothing here we understand */
- return 1;
}
+ /* nothing here we understand */
+ if (!new_chan_no)
+ return 1;
+
/* Mesh Channel Switch Parameters Element */
if (elems->mesh_chansw_params_ie) {
csa_ie->ttl = elems->mesh_chansw_params_ie->mesh_ttl;
@@ -87,15 +295,16 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
new_freq = ieee80211_channel_to_frequency(new_chan_no, new_band);
new_chan = ieee80211_get_channel(sdata->local->hw.wiphy, new_freq);
if (!new_chan || new_chan->flags & IEEE80211_CHAN_DISABLED) {
- sdata_info(sdata,
- "BSS %pM switches to unsupported channel (%d MHz), disconnecting\n",
- bssid, new_freq);
+ if (!unprot_action)
+ sdata_info(sdata,
+ "BSS %pM switches to unsupported channel (%d MHz), disconnecting\n",
+ bssid, new_freq);
return -EINVAL;
}
if (sec_chan_offs) {
secondary_channel_offset = sec_chan_offs->sec_chan_offs;
- } else if (!(conn_flags & IEEE80211_CONN_DISABLE_HT)) {
+ } else if (conn->mode >= IEEE80211_CONN_MODE_HT) {
/* If the secondary channel offset IE is not present,
* we can't know what's the post-CSA offset, so the
* best we can do is use 20MHz.
@@ -107,26 +316,26 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
default:
/* secondary_channel_offset was present but is invalid */
case IEEE80211_HT_PARAM_CHA_SEC_NONE:
- cfg80211_chandef_create(&csa_ie->chandef, new_chan,
+ cfg80211_chandef_create(&csa_ie->chanreq.oper, new_chan,
NL80211_CHAN_HT20);
break;
case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
- cfg80211_chandef_create(&csa_ie->chandef, new_chan,
+ cfg80211_chandef_create(&csa_ie->chanreq.oper, new_chan,
NL80211_CHAN_HT40PLUS);
break;
case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
- cfg80211_chandef_create(&csa_ie->chandef, new_chan,
+ cfg80211_chandef_create(&csa_ie->chanreq.oper, new_chan,
NL80211_CHAN_HT40MINUS);
break;
case -1:
- cfg80211_chandef_create(&csa_ie->chandef, new_chan,
+ cfg80211_chandef_create(&csa_ie->chanreq.oper, new_chan,
NL80211_CHAN_NO_HT);
/* keep width for 5/10 MHz channels */
- switch (sdata->vif.bss_conf.chandef.width) {
+ switch (sdata->vif.bss_conf.chanreq.oper.width) {
case NL80211_CHAN_WIDTH_5:
case NL80211_CHAN_WIDTH_10:
- csa_ie->chandef.width =
- sdata->vif.bss_conf.chandef.width;
+ csa_ie->chanreq.oper.width =
+ sdata->vif.bss_conf.chanreq.oper.width;
break;
default:
break;
@@ -134,59 +343,53 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
break;
}
+ /* capture the AP configuration */
+ csa_ie->chanreq.ap = csa_ie->chanreq.oper;
+
+ /* parse one of the Elements to build a new chandef */
+ memset(&new_chandef, 0, sizeof(new_chandef));
+ new_chandef.chan = new_chan;
if (bwi) {
/* start with the CSA one */
- new_vht_chandef = csa_ie->chandef;
+ new_chandef = csa_ie->chanreq.oper;
/* and update the width accordingly */
- /* FIXME: support 160/320 */
- ieee80211_chandef_eht_oper(&bwi->info, true, true,
- &new_vht_chandef);
- } else if (wide_bw_chansw_ie) {
- u8 new_seg1 = wide_bw_chansw_ie->new_center_freq_seg1;
- struct ieee80211_vht_operation vht_oper = {
- .chan_width =
- wide_bw_chansw_ie->new_channel_width,
- .center_freq_seg0_idx =
- wide_bw_chansw_ie->new_center_freq_seg0,
- .center_freq_seg1_idx = new_seg1,
- /* .basic_mcs_set doesn't matter */
- };
- struct ieee80211_ht_operation ht_oper = {
- .operation_mode =
- cpu_to_le16(new_seg1 <<
- IEEE80211_HT_OP_MODE_CCFS2_SHIFT),
- };
-
- /* default, for the case of IEEE80211_VHT_CHANWIDTH_USE_HT,
- * to the previously parsed chandef
- */
- new_vht_chandef = csa_ie->chandef;
-
- /* ignore if parsing fails */
- if (!ieee80211_chandef_vht_oper(&sdata->local->hw,
- vht_cap_info,
- &vht_oper, &ht_oper,
- &new_vht_chandef))
- new_vht_chandef.chan = NULL;
-
- if (conn_flags & IEEE80211_CONN_DISABLE_80P80MHZ &&
- new_vht_chandef.width == NL80211_CHAN_WIDTH_80P80)
- ieee80211_chandef_downgrade(&new_vht_chandef);
- if (conn_flags & IEEE80211_CONN_DISABLE_160MHZ &&
- new_vht_chandef.width == NL80211_CHAN_WIDTH_160)
- ieee80211_chandef_downgrade(&new_vht_chandef);
+ ieee80211_chandef_eht_oper(&bwi->info, &new_chandef);
+
+ if (bwi->params & IEEE80211_BW_IND_DIS_SUBCH_PRESENT)
+ new_chandef.punctured =
+ get_unaligned_le16(bwi->info.optional);
+ } else if (!wide_bw_chansw_ie || !wbcs_elem_to_chandef(wide_bw_chansw_ie,
+ &new_chandef)) {
+ if (!ieee80211_operating_class_to_chandef(new_op_class, new_chan,
+ &new_chandef))
+ new_chandef = csa_ie->chanreq.oper;
}
- /* if VHT data is there validate & use it */
- if (new_vht_chandef.chan) {
- if (!cfg80211_chandef_compatible(&new_vht_chandef,
- &csa_ie->chandef)) {
+ /* check if the new chandef fits the capabilities */
+ if (new_band == NL80211_BAND_6GHZ)
+ validate_chandef_by_6ghz_he_eht_oper(sdata, conn, &new_chandef);
+ else
+ validate_chandef_by_ht_vht_oper(sdata, conn, vht_cap_info,
+ &new_chandef);
+
+ /* if data is there validate the bandwidth & use it */
+ if (new_chandef.chan) {
+ /* capture the AP chandef before (potential) downgrading */
+ csa_ie->chanreq.ap = new_chandef;
+
+ while (conn->bw_limit <
+ ieee80211_min_bw_limit_from_chandef(&new_chandef))
+ ieee80211_chandef_downgrade(&new_chandef, NULL);
+
+ if (!cfg80211_chandef_compatible(&new_chandef,
+ &csa_ie->chanreq.oper)) {
sdata_info(sdata,
"BSS %pM: CSA has inconsistent channel data, disconnecting\n",
bssid);
return -EINVAL;
}
- csa_ie->chandef = new_vht_chandef;
+
+ csa_ie->chanreq.oper = new_chandef;
}
if (elems->max_channel_switch_time)
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 4391d8dd634b..caa3d0236b5e 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -4,7 +4,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/module.h>
@@ -509,6 +509,24 @@ static int sta_info_alloc_link(struct ieee80211_local *local,
for (i = 0; i < ARRAY_SIZE(link_info->rx_stats_avg.chain_signal); i++)
ewma_signal_init(&link_info->rx_stats_avg.chain_signal[i]);
+ link_info->rx_omi_bw_rx = IEEE80211_STA_RX_BW_MAX;
+ link_info->rx_omi_bw_tx = IEEE80211_STA_RX_BW_MAX;
+ link_info->rx_omi_bw_staging = IEEE80211_STA_RX_BW_MAX;
+
+ /*
+ * Cause (a) warning(s) if IEEE80211_STA_RX_BW_MAX != 320
+ * or if new values are added to the enum.
+ */
+ switch (link_info->cur_max_bandwidth) {
+ case IEEE80211_STA_RX_BW_20:
+ case IEEE80211_STA_RX_BW_40:
+ case IEEE80211_STA_RX_BW_80:
+ case IEEE80211_STA_RX_BW_160:
+ case IEEE80211_STA_RX_BW_MAX:
+ /* intentionally nothing */
+ break;
+ }
+
return 0;
}
@@ -1317,9 +1335,13 @@ static int _sta_info_move_state(struct sta_info *sta,
sta->sta.addr, new_state);
/* notify the driver before the actual changes so it can
- * fail the transition
+ * fail the transition if the state is increasing.
+ * The driver is required not to fail when the transition
+ * is decreasing the state, so first, do all the preparation
+ * work and only then, notify the driver.
*/
- if (test_sta_flag(sta, WLAN_STA_INSERTED)) {
+ if (new_state > sta->sta_state &&
+ test_sta_flag(sta, WLAN_STA_INSERTED)) {
int err = drv_sta_state(sta->local, sta->sdata, sta,
sta->sta_state, new_state);
if (err)
@@ -1395,6 +1417,16 @@ static int _sta_info_move_state(struct sta_info *sta,
break;
}
+ if (new_state < sta->sta_state &&
+ test_sta_flag(sta, WLAN_STA_INSERTED)) {
+ int err = drv_sta_state(sta->local, sta->sdata, sta,
+ sta->sta_state, new_state);
+
+ WARN_ONCE(err,
+ "Driver is not allowed to fail if the sta_state is transitioning down the list: %d\n",
+ err);
+ }
+
sta->sta_state = new_state;
return 0;
@@ -1566,7 +1598,8 @@ void sta_info_stop(struct ieee80211_local *local)
}
-int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans)
+int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans,
+ int link_id, struct sta_info *do_not_flush_sta)
{
struct ieee80211_local *local = sdata->local;
struct sta_info *sta, *tmp;
@@ -1580,12 +1613,21 @@ int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans)
WARN_ON(vlans && !sdata->bss);
list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
- if (sdata == sta->sdata ||
- (vlans && sdata->bss == sta->sdata->bss)) {
- if (!WARN_ON(__sta_info_destroy_part1(sta)))
- list_add(&sta->free_list, &free_list);
- ret++;
- }
+ if (sdata != sta->sdata &&
+ (!vlans || sdata->bss != sta->sdata->bss))
+ continue;
+
+ if (sta == do_not_flush_sta)
+ continue;
+
+ if (link_id >= 0 && sta->sta.valid_links &&
+ !(sta->sta.valid_links & BIT(link_id)))
+ continue;
+
+ if (!WARN_ON(__sta_info_destroy_part1(sta)))
+ list_add(&sta->free_list, &free_list);
+
+ ret++;
}
if (!list_empty(&free_list)) {
@@ -1717,7 +1759,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
skb_queue_head_init(&pending);
/* sync with ieee80211_tx_h_unicast_ps_buf */
- spin_lock(&sta->ps_lock);
+ spin_lock_bh(&sta->ps_lock);
/* Send all buffered frames to the station */
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
int count = skb_queue_len(&pending), tmp;
@@ -1746,7 +1788,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
*/
clear_sta_flag(sta, WLAN_STA_PSPOLL);
clear_sta_flag(sta, WLAN_STA_UAPSD);
- spin_unlock(&sta->ps_lock);
+ spin_unlock_bh(&sta->ps_lock);
atomic_dec(&ps->num_sta_ps);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 5ef1554f991f..07b7ec39a52f 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -3,7 +3,7 @@
* Copyright 2002-2005, Devicescape Software, Inc.
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright(c) 2020-2023 Intel Corporation
+ * Copyright(c) 2020-2024 Intel Corporation
*/
#ifndef STA_INFO_H
@@ -169,7 +169,7 @@ struct sta_info;
* @buf_size: reorder buffer size at receiver
* @failed_bar_ssn: ssn of the last failed BAR tx attempt
* @bar_pending: BAR needs to be re-sent
- * @amsdu: support A-MSDU withing A-MDPU
+ * @amsdu: support A-MSDU within A-MDPU
* @ssn: starting sequence number of the session
*
* This structure's lifetime is managed by RCU, assignments to
@@ -482,6 +482,8 @@ struct ieee80211_fragment_cache {
* same for non-MLD STA. This is used as key for searching link STA
* @link_id: Link ID uniquely identifying the link STA. This is 0 for non-MLD
* and set to the corresponding vif LinkId for MLD STA
+ * @op_mode_nss: NSS limit as set by operating mode notification, or 0
+ * @capa_nss: NSS limit as determined by local and peer capabilities
* @link_hash_node: hash node for rhashtable
* @sta: Points to the STA info
* @gtk: group keys negotiated with this station, if any
@@ -510,6 +512,10 @@ struct ieee80211_fragment_cache {
* @status_stats.avg_ack_signal: average ACK signal
* @cur_max_bandwidth: maximum bandwidth to use for TX to the station,
* taken from HT/VHT capabilities or VHT operating mode notification
+ * @rx_omi_bw_rx: RX OMI bandwidth restriction to apply for RX
+ * @rx_omi_bw_tx: RX OMI bandwidth restriction to apply for TX
+ * @rx_omi_bw_staging: RX OMI bandwidth restriction to apply later
+ * during finalize
* @debugfs_dir: debug filesystem directory dentry
* @pub: public (driver visible) link STA data
* TODO Move other link params from sta_info as required for MLD operation
@@ -518,6 +524,8 @@ struct link_sta_info {
u8 addr[ETH_ALEN];
u8 link_id;
+ u8 op_mode_nss, capa_nss;
+
struct rhlist_head link_hash_node;
struct sta_info *sta;
@@ -557,6 +565,9 @@ struct link_sta_info {
} tx_stats;
enum ieee80211_sta_rx_bandwidth cur_max_bandwidth;
+ enum ieee80211_sta_rx_bandwidth rx_omi_bw_rx,
+ rx_omi_bw_tx,
+ rx_omi_bw_staging;
#ifdef CONFIG_MAC80211_DEBUGFS
struct dentry *debugfs_dir;
@@ -723,6 +734,12 @@ struct sta_info {
struct ieee80211_sta sta;
};
+static inline int ieee80211_tdls_sta_link_id(struct sta_info *sta)
+{
+ /* TDLS STA can only have a single link */
+ return sta->sta.valid_links ? __ffs(sta->sta.valid_links) : 0;
+}
+
static inline enum nl80211_plink_state sta_plink_state(struct sta_info *sta)
{
#ifdef CONFIG_MAC80211_MESH
@@ -882,23 +899,32 @@ void sta_info_stop(struct ieee80211_local *local);
/**
* __sta_info_flush - flush matching STA entries from the STA table
*
- * Returns the number of removed STA entries.
+ * Return: the number of removed STA entries.
*
* @sdata: sdata to remove all stations from
* @vlans: if the given interface is an AP interface, also flush VLANs
+ * @link_id: if given (>=0), all those STA entries using @link_id only
+ * will be removed. If -1 is passed, all STA entries will be
+ * removed.
+ * @do_not_flush_sta: a station that shouldn't be flushed.
*/
-int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans);
+int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans,
+ int link_id, struct sta_info *do_not_flush_sta);
/**
* sta_info_flush - flush matching STA entries from the STA table
*
- * Returns the number of removed STA entries.
+ * Return: the number of removed STA entries.
*
* @sdata: sdata to remove all stations from
+ * @link_id: if given (>=0), all those STA entries using @link_id only
+ * will be removed. If -1 is passed, all STA entries will be
+ * removed.
*/
-static inline int sta_info_flush(struct ieee80211_sub_if_data *sdata)
+static inline int sta_info_flush(struct ieee80211_sub_if_data *sdata,
+ int link_id)
{
- return __sta_info_flush(sdata, false);
+ return __sta_info_flush(sdata, false, link_id, NULL);
}
void sta_set_rate_info_tx(struct sta_info *sta,
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 1708b33cdc5e..5f28f3633fa0 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -5,13 +5,13 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2008-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright 2021-2023 Intel Corporation
+ * Copyright 2021-2024 Intel Corporation
*/
#include <linux/export.h>
#include <linux/etherdevice.h>
#include <net/mac80211.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "ieee80211_i.h"
#include "rate.h"
#include "mesh.h"
@@ -696,6 +696,23 @@ static void ieee80211_handle_smps_status(struct ieee80211_sub_if_data *sdata,
wiphy_work_queue(sdata->local->hw.wiphy, &link->u.mgd.recalc_smps);
}
+static void
+ieee80211_handle_teardown_ttlm_status(struct ieee80211_sub_if_data *sdata,
+ bool acked)
+{
+ if (!sdata || !ieee80211_sdata_running(sdata))
+ return;
+
+ if (!acked)
+ return;
+
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ return;
+
+ wiphy_work_queue(sdata->local->hw.wiphy,
+ &sdata->u.mgd.teardown_ttlm_work);
+}
+
static void ieee80211_report_used_skb(struct ieee80211_local *local,
struct sk_buff *skb, bool dropped,
ktime_t ack_hwtstamp)
@@ -773,6 +790,9 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
ieee80211_handle_smps_status(sdata, acked,
info->status_data);
break;
+ case IEEE80211_STATUS_TYPE_NEG_TTLM:
+ ieee80211_handle_teardown_ttlm_status(sdata, acked);
+ break;
}
rcu_read_unlock();
}
@@ -907,6 +927,9 @@ void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb,
if (!ieee80211_sdata_running(sdata))
continue;
+ if (sdata->u.mntr.flags & MONITOR_FLAG_SKIP_TX)
+ continue;
+
if ((sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) &&
!send_to_cooked)
continue;
@@ -1079,7 +1102,7 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
* This is a bit racy but we can avoid a lot of work
* with this test...
*/
- if (!local->monitors && (!send_to_cooked || !local->cooked_mntrs)) {
+ if (!local->tx_mntrs && (!send_to_cooked || !local->cooked_mntrs)) {
if (status->free_list)
list_add_tail(&skb->list, status->free_list);
else
@@ -1281,3 +1304,4 @@ void ieee80211_purge_tx_queue(struct ieee80211_hw *hw,
while ((skb = __skb_dequeue(skbs)))
ieee80211_free_txskb(hw, skb);
}
+EXPORT_SYMBOL(ieee80211_purge_tx_queue);
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index 49730b424141..2f92e7c7f203 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -6,7 +6,7 @@
* Copyright 2014, Intel Corporation
* Copyright 2014 Intel Mobile Communications GmbH
* Copyright 2015 - 2016 Intel Deutschland GmbH
- * Copyright (C) 2019, 2021-2023 Intel Corporation
+ * Copyright (C) 2019, 2021-2024 Intel Corporation
*/
#include <linux/ieee80211.h>
@@ -159,7 +159,7 @@ static void ieee80211_tdls_add_oper_classes(struct ieee80211_link_data *link,
u8 *pos;
u8 op_class;
- if (!ieee80211_chandef_to_operating_class(&link->conf->chandef,
+ if (!ieee80211_chandef_to_operating_class(&link->conf->chanreq.oper,
&op_class))
return;
@@ -347,7 +347,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
(uc.width > sta->tdls_chandef.width &&
!cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc,
sdata->wdev.iftype)))
- ieee80211_chandef_downgrade(&uc);
+ ieee80211_chandef_downgrade(&uc, NULL);
if (!cfg80211_chandef_identical(&uc, &sta->tdls_chandef)) {
tdls_dbg(sdata, "TDLS ch width upgraded %d -> %d\n",
@@ -382,8 +382,8 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_link_data *link,
if (WARN_ON_ONCE(!sband))
return;
- ieee80211_add_srates_ie(sdata, skb, false, sband->band);
- ieee80211_add_ext_srates_ie(sdata, skb, false, sband->band);
+ ieee80211_put_srates_elem(skb, sband, 0, 0, 0, WLAN_EID_SUPP_RATES);
+ ieee80211_put_srates_elem(skb, sband, 0, 0, 0, WLAN_EID_EXT_SUPP_RATES);
ieee80211_tdls_add_supp_channels(sdata, skb);
/* add any custom IEs that go before Extended Capabilities */
@@ -438,7 +438,7 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_link_data *link,
if (WARN_ON_ONCE(!sta))
return;
- sta->tdls_chandef = link->conf->chandef;
+ sta->tdls_chandef = link->conf->chanreq.oper;
}
ieee80211_tdls_add_oper_classes(link, skb);
@@ -548,30 +548,14 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_link_data *link,
}
/* build the HE-cap from sband */
- if (he_cap &&
- (action_code == WLAN_TDLS_SETUP_REQUEST ||
- action_code == WLAN_TDLS_SETUP_RESPONSE ||
- action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES)) {
- __le16 he_6ghz_capa;
- u8 cap_size;
-
- cap_size =
- 2 + 1 + sizeof(he_cap->he_cap_elem) +
- ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem) +
- ieee80211_he_ppe_size(he_cap->ppe_thres[0],
- he_cap->he_cap_elem.phy_cap_info);
- pos = skb_put(skb, cap_size);
- pos = ieee80211_ie_build_he_cap(0, pos, he_cap, pos + cap_size);
+ if (action_code == WLAN_TDLS_SETUP_REQUEST ||
+ action_code == WLAN_TDLS_SETUP_RESPONSE ||
+ action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) {
+ ieee80211_put_he_cap(skb, sdata, sband, NULL);
/* Build HE 6Ghz capa IE from sband */
- if (sband->band == NL80211_BAND_6GHZ) {
- cap_size = 2 + 1 + sizeof(struct ieee80211_he_6ghz_capa);
- pos = skb_put(skb, cap_size);
- he_6ghz_capa =
- ieee80211_get_he_6ghz_capa_vif(sband, &sdata->vif);
- pos = ieee80211_write_he_6ghz_cap(pos, he_6ghz_capa,
- pos + cap_size);
- }
+ if (sband->band == NL80211_BAND_6GHZ)
+ ieee80211_put_he_6ghz_cap(skb, sdata, link->smps_mode);
}
/* add any custom IEs that go before EHT capabilities */
@@ -591,21 +575,10 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_link_data *link,
}
/* build the EHT-cap from sband */
- if (he_cap && eht_cap &&
- (action_code == WLAN_TDLS_SETUP_REQUEST ||
- action_code == WLAN_TDLS_SETUP_RESPONSE ||
- action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES)) {
- u8 cap_size;
-
- cap_size =
- 2 + 1 + sizeof(eht_cap->eht_cap_elem) +
- ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
- &eht_cap->eht_cap_elem, false) +
- ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0],
- eht_cap->eht_cap_elem.phy_cap_info);
- pos = skb_put(skb, cap_size);
- ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, pos + cap_size, false);
- }
+ if (action_code == WLAN_TDLS_SETUP_REQUEST ||
+ action_code == WLAN_TDLS_SETUP_RESPONSE ||
+ action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES)
+ ieee80211_put_eht_cap(skb, sdata, sband, NULL);
/* add any remaining IEs */
if (extra_ies_len) {
@@ -638,7 +611,7 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_link_data *link,
if (WARN_ON_ONCE(!sta || !ap_sta))
return;
- sta->tdls_chandef = link->conf->chandef;
+ sta->tdls_chandef = link->conf->chanreq.oper;
/* add any custom IEs that go before the QoS IE */
if (extra_ies_len) {
@@ -684,7 +657,7 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_link_data *link,
pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation));
ieee80211_ie_build_ht_oper(pos, &sta->sta.deflink.ht_cap,
- &link->conf->chandef, prot,
+ &link->conf->chanreq.oper, prot,
true);
}
@@ -1369,7 +1342,8 @@ static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata,
bw = min(bw, ieee80211_sta_cap_rx_bw(&sta->deflink));
if (bw != sta->sta.deflink.bandwidth) {
sta->sta.deflink.bandwidth = bw;
- rate_control_rate_update(local, sband, sta, 0,
+ rate_control_rate_update(local, sband,
+ &sta->deflink,
IEEE80211_RC_BW_CHANGED);
/*
* if a TDLS peer BW was updated, we need to
@@ -1413,8 +1387,8 @@ iee80211_tdls_recalc_ht_protection(struct ieee80211_sub_if_data *sdata,
IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT;
u16 opmode;
- /* Nothing to do if the BSS connection uses HT */
- if (!(sdata->deflink.u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT))
+ /* Nothing to do if the BSS connection uses (at least) HT */
+ if (sdata->deflink.u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT)
return;
tdls_ht = (sta && sta->sta.deflink.ht_cap.ht_supported) ||
@@ -2055,8 +2029,9 @@ ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata,
}
}
-void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata)
+void ieee80211_teardown_tdls_peers(struct ieee80211_link_data *link)
{
+ struct ieee80211_sub_if_data *sdata = link->sdata;
struct sta_info *sta;
u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
@@ -2066,6 +2041,9 @@ void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata)
!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
continue;
+ if (sta->deflink.link_id != link->link_id)
+ continue;
+
ieee80211_tdls_oper_request(&sdata->vif, sta->sta.addr,
NL80211_TDLS_TEARDOWN, reason,
GFP_ATOMIC);
diff --git a/net/mac80211/tests/Makefile b/net/mac80211/tests/Makefile
index 4fdaf3feaca3..0f5336bc7314 100644
--- a/net/mac80211/tests/Makefile
+++ b/net/mac80211/tests/Makefile
@@ -1,3 +1,3 @@
-mac80211-tests-y += module.o elems.o mfp.o
+mac80211-tests-y += module.o util.o elems.o mfp.o tpe.o
obj-$(CONFIG_MAC80211_KUNIT_TEST) += mac80211-tests.o
diff --git a/net/mac80211/tests/elems.c b/net/mac80211/tests/elems.c
index 997d0cd27b2d..a53c55a879a8 100644
--- a/net/mac80211/tests/elems.c
+++ b/net/mac80211/tests/elems.c
@@ -2,18 +2,19 @@
/*
* KUnit tests for element parsing
*
- * Copyright (C) 2023 Intel Corporation
+ * Copyright (C) 2023-2024 Intel Corporation
*/
#include <kunit/test.h>
#include "../ieee80211_i.h"
-MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
static void mle_defrag(struct kunit *test)
{
struct ieee80211_elems_parse_params parse_params = {
.link_id = 12,
.from_ap = true,
+ .mode = IEEE80211_CONN_MODE_EHT,
};
struct ieee802_11_elems *parsed;
struct sk_buff *skb;
@@ -68,7 +69,7 @@ static void mle_defrag(struct kunit *test)
if (IS_ERR_OR_NULL(parsed))
goto free_skb;
- KUNIT_EXPECT_NOT_NULL(test, parsed->ml_basic_elem);
+ KUNIT_EXPECT_NOT_NULL(test, parsed->ml_basic);
KUNIT_EXPECT_EQ(test,
parsed->ml_basic_len,
2 /* control */ +
diff --git a/net/mac80211/tests/mfp.c b/net/mac80211/tests/mfp.c
index a8dc1601da60..58e675e0ed91 100644
--- a/net/mac80211/tests/mfp.c
+++ b/net/mac80211/tests/mfp.c
@@ -9,7 +9,7 @@
#include "../ieee80211_i.h"
#include "../sta_info.h"
-MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
static const struct mfp_test_case {
const char *desc;
diff --git a/net/mac80211/tests/tpe.c b/net/mac80211/tests/tpe.c
new file mode 100644
index 000000000000..c73b6c66bd5a
--- /dev/null
+++ b/net/mac80211/tests/tpe.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KUnit tests for TPE element handling
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <kunit/test.h>
+#include "../ieee80211_i.h"
+
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
+
+static struct ieee80211_channel chan6g_1 = {
+ .band = NL80211_BAND_6GHZ,
+ .center_freq = 5955,
+};
+
+static struct ieee80211_channel chan6g_33 = {
+ .band = NL80211_BAND_6GHZ,
+ .center_freq = 6115,
+};
+
+static struct ieee80211_channel chan6g_61 = {
+ .band = NL80211_BAND_6GHZ,
+ .center_freq = 6255,
+};
+
+static const struct subchan_test_case {
+ const char *desc;
+ struct cfg80211_chan_def c;
+ u8 n;
+ int expect;
+} subchan_offset_cases[] = {
+ {
+ .desc = "identical 20 MHz",
+ .c.width = NL80211_CHAN_WIDTH_20,
+ .c.chan = &chan6g_1,
+ .c.center_freq1 = 5955,
+ .n = 1,
+ .expect = 0,
+ },
+ {
+ .desc = "identical 40 MHz",
+ .c.width = NL80211_CHAN_WIDTH_40,
+ .c.chan = &chan6g_1,
+ .c.center_freq1 = 5965,
+ .n = 2,
+ .expect = 0,
+ },
+ {
+ .desc = "identical 80+80 MHz",
+ /* not really is valid? doesn't matter for the test */
+ .c.width = NL80211_CHAN_WIDTH_80P80,
+ .c.chan = &chan6g_1,
+ .c.center_freq1 = 5985,
+ .c.center_freq2 = 6225,
+ .n = 16,
+ .expect = 0,
+ },
+ {
+ .desc = "identical 320 MHz",
+ .c.width = NL80211_CHAN_WIDTH_320,
+ .c.chan = &chan6g_1,
+ .c.center_freq1 = 6105,
+ .n = 16,
+ .expect = 0,
+ },
+ {
+ .desc = "lower 160 MHz of 320 MHz",
+ .c.width = NL80211_CHAN_WIDTH_320,
+ .c.chan = &chan6g_1,
+ .c.center_freq1 = 6105,
+ .n = 8,
+ .expect = 0,
+ },
+ {
+ .desc = "upper 160 MHz of 320 MHz",
+ .c.width = NL80211_CHAN_WIDTH_320,
+ .c.chan = &chan6g_61,
+ .c.center_freq1 = 6105,
+ .n = 8,
+ .expect = 8,
+ },
+ {
+ .desc = "upper 160 MHz of 320 MHz, go to 40",
+ .c.width = NL80211_CHAN_WIDTH_320,
+ .c.chan = &chan6g_61,
+ .c.center_freq1 = 6105,
+ .n = 2,
+ .expect = 8 + 4 + 2,
+ },
+ {
+ .desc = "secondary 80 above primary in 80+80 MHz",
+ /* not really is valid? doesn't matter for the test */
+ .c.width = NL80211_CHAN_WIDTH_80P80,
+ .c.chan = &chan6g_1,
+ .c.center_freq1 = 5985,
+ .c.center_freq2 = 6225,
+ .n = 4,
+ .expect = 0,
+ },
+ {
+ .desc = "secondary 80 below primary in 80+80 MHz",
+ /* not really is valid? doesn't matter for the test */
+ .c.width = NL80211_CHAN_WIDTH_80P80,
+ .c.chan = &chan6g_61,
+ .c.center_freq1 = 6225,
+ .c.center_freq2 = 5985,
+ .n = 4,
+ .expect = 4,
+ },
+ {
+ .desc = "secondary 80 below primary in 80+80 MHz, go to 20",
+ /* not really is valid? doesn't matter for the test */
+ .c.width = NL80211_CHAN_WIDTH_80P80,
+ .c.chan = &chan6g_61,
+ .c.center_freq1 = 6225,
+ .c.center_freq2 = 5985,
+ .n = 1,
+ .expect = 7,
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(subchan_offset, subchan_offset_cases, desc);
+
+static void subchan_offset(struct kunit *test)
+{
+ const struct subchan_test_case *params = test->param_value;
+ int offset;
+
+ KUNIT_ASSERT_EQ(test, cfg80211_chandef_valid(&params->c), true);
+
+ offset = ieee80211_calc_chandef_subchan_offset(&params->c, params->n);
+
+ KUNIT_EXPECT_EQ(test, params->expect, offset);
+}
+
+static const struct psd_reorder_test_case {
+ const char *desc;
+ struct cfg80211_chan_def ap, used;
+ struct ieee80211_parsed_tpe_psd psd, out;
+} psd_reorder_cases[] = {
+ {
+ .desc = "no changes, 320 MHz",
+
+ .ap.width = NL80211_CHAN_WIDTH_320,
+ .ap.chan = &chan6g_1,
+ .ap.center_freq1 = 6105,
+
+ .used.width = NL80211_CHAN_WIDTH_320,
+ .used.chan = &chan6g_1,
+ .used.center_freq1 = 6105,
+
+ .psd.valid = true,
+ .psd.count = 16,
+ .psd.n = 8,
+ .psd.power = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
+
+ .out.valid = true,
+ .out.count = 16,
+ .out.n = 8,
+ .out.power = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
+ },
+ {
+ .desc = "no changes, 320 MHz, 160 MHz used, n=0",
+
+ .ap.width = NL80211_CHAN_WIDTH_320,
+ .ap.chan = &chan6g_1,
+ .ap.center_freq1 = 6105,
+
+ .used.width = NL80211_CHAN_WIDTH_160,
+ .used.chan = &chan6g_1,
+ .used.center_freq1 = 6025,
+
+ .psd.valid = true,
+ .psd.count = 16,
+ .psd.n = 0,
+ .psd.power = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, },
+
+ .out.valid = true,
+ .out.count = 8,
+ .out.n = 0,
+ .out.power = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, },
+ },
+ {
+ .desc = "320 MHz, HE is 80, used 160, all lower",
+
+ .ap.width = NL80211_CHAN_WIDTH_320,
+ .ap.chan = &chan6g_1,
+ .ap.center_freq1 = 6105,
+
+ .used.width = NL80211_CHAN_WIDTH_160,
+ .used.chan = &chan6g_1,
+ .used.center_freq1 = 6025,
+
+ .psd.valid = true,
+ .psd.count = 16,
+ .psd.n = 4,
+ .psd.power = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
+
+ .out.valid = true,
+ .out.count = 8,
+ .out.n = 4,
+ .out.power = { 0, 1, 2, 3, 4, 5, 6, 7, 127, 127, 127, 127, 127, 127, 127, 127},
+ },
+ {
+ .desc = "320 MHz, HE is 80, used 160, all upper",
+ /*
+ * EHT: | | | | | | | | | | | | | | | | |
+ * HE: | | | | |
+ * used: | | | | | | | | |
+ */
+
+ .ap.width = NL80211_CHAN_WIDTH_320,
+ .ap.chan = &chan6g_61,
+ .ap.center_freq1 = 6105,
+
+ .used.width = NL80211_CHAN_WIDTH_160,
+ .used.chan = &chan6g_61,
+ .used.center_freq1 = 6185,
+
+ .psd.valid = true,
+ .psd.count = 16,
+ .psd.n = 4,
+ .psd.power = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
+
+ .out.valid = true,
+ .out.count = 8,
+ .out.n = 4,
+ .out.power = { 12, 13, 14, 15, 0, 1, 2, 3, 127, 127, 127, 127, 127, 127, 127, 127},
+ },
+ {
+ .desc = "320 MHz, HE is 80, used 160, split",
+ /*
+ * EHT: | | | | | | | | | | | | | | | | |
+ * HE: | | | | |
+ * used: | | | | | | | | |
+ */
+
+ .ap.width = NL80211_CHAN_WIDTH_320,
+ .ap.chan = &chan6g_33,
+ .ap.center_freq1 = 6105,
+
+ .used.width = NL80211_CHAN_WIDTH_160,
+ .used.chan = &chan6g_33,
+ .used.center_freq1 = 6185,
+
+ .psd.valid = true,
+ .psd.count = 16,
+ .psd.n = 4,
+ .psd.power = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
+
+ .out.valid = true,
+ .out.count = 8,
+ .out.n = 4,
+ .out.power = { 0, 1, 2, 3, 12, 13, 14, 15, 127, 127, 127, 127, 127, 127, 127, 127},
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(psd_reorder, psd_reorder_cases, desc);
+
+static void psd_reorder(struct kunit *test)
+{
+ const struct psd_reorder_test_case *params = test->param_value;
+ struct ieee80211_parsed_tpe_psd tmp = params->psd;
+
+ KUNIT_ASSERT_EQ(test, cfg80211_chandef_valid(&params->ap), true);
+ KUNIT_ASSERT_EQ(test, cfg80211_chandef_valid(&params->used), true);
+
+ ieee80211_rearrange_tpe_psd(&tmp, &params->ap, &params->used);
+ KUNIT_EXPECT_MEMEQ(test, &tmp, &params->out, sizeof(tmp));
+}
+
+static struct kunit_case tpe_test_cases[] = {
+ KUNIT_CASE_PARAM(subchan_offset, subchan_offset_gen_params),
+ KUNIT_CASE_PARAM(psd_reorder, psd_reorder_gen_params),
+ {}
+};
+
+static struct kunit_suite tpe = {
+ .name = "mac80211-tpe",
+ .test_cases = tpe_test_cases,
+};
+
+kunit_test_suite(tpe);
diff --git a/net/mac80211/tests/util.c b/net/mac80211/tests/util.c
new file mode 100644
index 000000000000..0936a73e3617
--- /dev/null
+++ b/net/mac80211/tests/util.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Utilities for mac80211 unit testing
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <linux/ieee80211.h>
+#include <net/mac80211.h>
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+#include "util.h"
+
+#define CHAN2G(_freq) { \
+ .band = NL80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_freq), \
+}
+
+static const struct ieee80211_channel channels_2ghz[] = {
+ CHAN2G(2412), /* Channel 1 */
+ CHAN2G(2417), /* Channel 2 */
+ CHAN2G(2422), /* Channel 3 */
+ CHAN2G(2427), /* Channel 4 */
+ CHAN2G(2432), /* Channel 5 */
+ CHAN2G(2437), /* Channel 6 */
+ CHAN2G(2442), /* Channel 7 */
+ CHAN2G(2447), /* Channel 8 */
+ CHAN2G(2452), /* Channel 9 */
+ CHAN2G(2457), /* Channel 10 */
+ CHAN2G(2462), /* Channel 11 */
+ CHAN2G(2467), /* Channel 12 */
+ CHAN2G(2472), /* Channel 13 */
+ CHAN2G(2484), /* Channel 14 */
+};
+
+#define CHAN5G(_freq) { \
+ .band = NL80211_BAND_5GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_freq), \
+}
+
+static const struct ieee80211_channel channels_5ghz[] = {
+ CHAN5G(5180), /* Channel 36 */
+ CHAN5G(5200), /* Channel 40 */
+ CHAN5G(5220), /* Channel 44 */
+ CHAN5G(5240), /* Channel 48 */
+};
+
+static const struct ieee80211_rate bitrates[] = {
+ { .bitrate = 10 },
+ { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 60 },
+ { .bitrate = 90 },
+ { .bitrate = 120 },
+ { .bitrate = 180 },
+ { .bitrate = 240 },
+ { .bitrate = 360 },
+ { .bitrate = 480 },
+ { .bitrate = 540 }
+};
+
+/* Copied from hwsim except that it only supports 4 EHT streams and STA/P2P mode */
+static const struct ieee80211_sband_iftype_data sband_capa_5ghz[] = {
+ {
+ .types_mask = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT),
+ .he_cap = {
+ .has_he = true,
+ .he_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_HE_MAC_CAP0_HTC_HE,
+ .mac_cap_info[1] =
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
+ IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
+ .mac_cap_info[2] =
+ IEEE80211_HE_MAC_CAP2_BSR |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING |
+ IEEE80211_HE_MAC_CAP2_ACK_EN,
+ .mac_cap_info[3] =
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3,
+ .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU,
+ .phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G,
+ .phy_cap_info[1] =
+ IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK |
+ IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
+ IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS,
+ .phy_cap_info[2] =
+ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO,
+
+ /* Leave all the other PHY capability bytes
+ * unset, as DCM, beam forming, RU and PPE
+ * threshold information are not supported
+ */
+ },
+ .he_mcs_nss_supp = {
+ .rx_mcs_80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80 = cpu_to_le16(0xfffa),
+ .rx_mcs_160 = cpu_to_le16(0xfffa),
+ .tx_mcs_160 = cpu_to_le16(0xfffa),
+ .rx_mcs_80p80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80p80 = cpu_to_le16(0xfffa),
+ },
+ },
+ .eht_cap = {
+ .has_eht = true,
+ .eht_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
+ IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1,
+ .phy_cap_info[0] =
+ IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
+ IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+ IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE |
+ IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK,
+ .phy_cap_info[1] =
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK,
+ .phy_cap_info[2] =
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK,
+ .phy_cap_info[3] =
+ IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK,
+ .phy_cap_info[4] =
+ IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP |
+ IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
+ IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI |
+ IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK,
+ .phy_cap_info[5] =
+ IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT |
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK |
+ IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK,
+ .phy_cap_info[6] =
+ IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK |
+ IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK,
+ .phy_cap_info[7] =
+ IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ,
+ },
+
+ /* For all MCS and bandwidth, set 4 NSS for both Tx and
+ * Rx
+ */
+ .eht_mcs_nss_supp = {
+ /*
+ * As B1 and B2 are set in the supported
+ * channel width set field in the HE PHY
+ * capabilities information field include all
+ * the following MCS/NSS.
+ */
+ .bw._80 = {
+ .rx_tx_mcs9_max_nss = 0x44,
+ .rx_tx_mcs11_max_nss = 0x44,
+ .rx_tx_mcs13_max_nss = 0x44,
+ },
+ .bw._160 = {
+ .rx_tx_mcs9_max_nss = 0x44,
+ .rx_tx_mcs11_max_nss = 0x44,
+ .rx_tx_mcs13_max_nss = 0x44,
+ },
+ },
+ /* PPE threshold information is not supported */
+ },
+ },
+};
+
+int t_sdata_init(struct kunit_resource *resource, void *ctx)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct t_sdata *t_sdata;
+
+ t_sdata = kzalloc(sizeof(*t_sdata), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, t_sdata);
+
+ resource->data = t_sdata;
+ resource->name = "sdata";
+
+ t_sdata->sdata = kzalloc(sizeof(*t_sdata->sdata), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, t_sdata->sdata);
+
+ t_sdata->wiphy = kzalloc(sizeof(*t_sdata->wiphy), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, t_sdata->wiphy);
+
+ strscpy(t_sdata->sdata->name, "kunit");
+
+ t_sdata->sdata->local = &t_sdata->local;
+ t_sdata->sdata->local->hw.wiphy = t_sdata->wiphy;
+ t_sdata->sdata->wdev.wiphy = t_sdata->wiphy;
+ t_sdata->sdata->vif.type = NL80211_IFTYPE_STATION;
+
+ t_sdata->sdata->deflink.sdata = t_sdata->sdata;
+ t_sdata->sdata->deflink.link_id = 0;
+
+ t_sdata->wiphy->bands[NL80211_BAND_2GHZ] = &t_sdata->band_2ghz;
+ t_sdata->wiphy->bands[NL80211_BAND_5GHZ] = &t_sdata->band_5ghz;
+
+ for (int band = NL80211_BAND_2GHZ; band <= NL80211_BAND_5GHZ; band++) {
+ struct ieee80211_supported_band *sband;
+
+ sband = t_sdata->wiphy->bands[band];
+ sband->band = band;
+
+ sband->bitrates =
+ kmemdup(bitrates, sizeof(bitrates), GFP_KERNEL);
+ sband->n_bitrates = ARRAY_SIZE(bitrates);
+
+ /* Initialize channels, feel free to add more channels/bands */
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ sband->channels = kmemdup(channels_2ghz,
+ sizeof(channels_2ghz),
+ GFP_KERNEL);
+ sband->n_channels = ARRAY_SIZE(channels_2ghz);
+ sband->bitrates = kmemdup(bitrates,
+ sizeof(bitrates),
+ GFP_KERNEL);
+ sband->n_bitrates = ARRAY_SIZE(bitrates);
+ break;
+ case NL80211_BAND_5GHZ:
+ sband->channels = kmemdup(channels_5ghz,
+ sizeof(channels_5ghz),
+ GFP_KERNEL);
+ sband->n_channels = ARRAY_SIZE(channels_5ghz);
+ sband->bitrates = kmemdup(bitrates,
+ sizeof(bitrates),
+ GFP_KERNEL);
+ sband->n_bitrates = ARRAY_SIZE(bitrates);
+
+ sband->vht_cap.vht_supported = true;
+ sband->vht_cap.cap =
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ |
+ IEEE80211_VHT_CAP_RXLDPC |
+ IEEE80211_VHT_CAP_SHORT_GI_80 |
+ IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_TXSTBC |
+ IEEE80211_VHT_CAP_RXSTBC_4 |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+ sband->vht_cap.vht_mcs.rx_mcs_map =
+ cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 6 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 8 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 14);
+ sband->vht_cap.vht_mcs.tx_mcs_map =
+ sband->vht_cap.vht_mcs.rx_mcs_map;
+ break;
+ default:
+ continue;
+ }
+
+ sband->ht_cap.ht_supported = band != NL80211_BAND_6GHZ;
+ sband->ht_cap.cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_DSSSCCK40;
+ sband->ht_cap.ampdu_factor = 0x3;
+ sband->ht_cap.ampdu_density = 0x6;
+ memset(&sband->ht_cap.mcs, 0, sizeof(sband->ht_cap.mcs));
+ sband->ht_cap.mcs.rx_mask[0] = 0xff;
+ sband->ht_cap.mcs.rx_mask[1] = 0xff;
+ sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ }
+
+ ieee80211_set_sband_iftype_data(&t_sdata->band_5ghz, sband_capa_5ghz);
+
+ return 0;
+}
+
+void t_sdata_exit(struct kunit_resource *resource)
+{
+ struct t_sdata *t_sdata = resource->data;
+
+ kfree(t_sdata->band_2ghz.channels);
+ kfree(t_sdata->band_2ghz.bitrates);
+ kfree(t_sdata->band_5ghz.channels);
+ kfree(t_sdata->band_5ghz.bitrates);
+
+ kfree(t_sdata->sdata);
+ kfree(t_sdata->wiphy);
+
+ kfree(t_sdata);
+}
diff --git a/net/mac80211/tests/util.h b/net/mac80211/tests/util.h
new file mode 100644
index 000000000000..6615880c123f
--- /dev/null
+++ b/net/mac80211/tests/util.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Utilities for mac80211 unit testing
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __MAC80211_UTILS_H
+#define __MAC80211_UTILS_H
+
+#include "../ieee80211_i.h"
+
+struct t_sdata {
+ struct ieee80211_sub_if_data *sdata;
+ struct wiphy *wiphy;
+ struct ieee80211_local local;
+
+ void *ctx;
+
+ struct ieee80211_supported_band band_2ghz;
+ struct ieee80211_supported_band band_5ghz;
+};
+
+#define T_SDATA(test) ({ \
+ struct t_sdata *__t_sdata = \
+ kunit_alloc_resource(test, t_sdata_init, \
+ t_sdata_exit, \
+ GFP_KERNEL, NULL); \
+ \
+ KUNIT_ASSERT_NOT_NULL(test, __t_sdata); \
+ __t_sdata; \
+ })
+
+int t_sdata_init(struct kunit_resource *resource, void *data);
+void t_sdata_exit(struct kunit_resource *resource);
+
+#endif /* __MAC80211_UTILS_H */
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index e7f57bb18f6e..94c00e71f6f8 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -9,7 +9,7 @@
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/export.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/mac80211.h>
#include "driver-ops.h"
@@ -313,7 +313,7 @@ int ieee80211_tkip_decrypt_data(struct arc4_ctx *ctx,
* Record previously received IV, will be copied into the
* key information after MIC verification. It is possible
* that we don't catch replays of fragments but that's ok
- * because the Michael MIC verication will then fail.
+ * because the Michael MIC verification will then fail.
*/
*out_iv32 = iv32;
*out_iv16 = iv16;
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 06835ed4c44f..72fad8ea8bb9 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -2,7 +2,7 @@
/*
* Portions of this file
* Copyright(c) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2023 Intel Corporation
+ * Copyright (C) 2018 - 2024 Intel Corporation
*/
#if !defined(__MAC80211_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ)
@@ -33,7 +33,7 @@
__string(vif_name, sdata->name)
#define VIF_ASSIGN __entry->vif_type = sdata->vif.type; __entry->sdata = sdata; \
__entry->p2p = sdata->vif.p2p; \
- __assign_str(vif_name, sdata->name)
+ __assign_str(vif_name)
#define VIF_PR_FMT " vif:%s(%d%s)"
#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : ""
@@ -50,7 +50,7 @@
__entry->center_freq1 = (c) ? (c)->center_freq1 : 0; \
__entry->freq1_offset = (c) ? (c)->freq1_offset : 0; \
__entry->center_freq2 = (c) ? (c)->center_freq2 : 0;
-#define CHANDEF_PR_FMT " control:%d.%03d MHz width:%d center: %d.%03d/%d MHz"
+#define CHANDEF_PR_FMT " chandef(%d.%03d MHz,width:%d,center: %d.%03d/%d MHz)"
#define CHANDEF_PR_ARG __entry->control_freq, __entry->freq_offset, __entry->chan_width, \
__entry->center_freq1, __entry->freq1_offset, __entry->center_freq2
@@ -69,22 +69,45 @@
__entry->min_center_freq1 = (c)->center_freq1; \
__entry->min_freq1_offset = (c)->freq1_offset; \
__entry->min_center_freq2 = (c)->center_freq2;
-#define MIN_CHANDEF_PR_FMT " min_control:%d.%03d MHz min_width:%d min_center: %d.%03d/%d MHz"
+#define MIN_CHANDEF_PR_FMT " mindef(%d.%03d MHz,width:%d,center: %d.%03d/%d MHz)"
#define MIN_CHANDEF_PR_ARG __entry->min_control_freq, __entry->min_freq_offset, \
__entry->min_chan_width, \
__entry->min_center_freq1, __entry->min_freq1_offset, \
__entry->min_center_freq2
+#define AP_CHANDEF_ENTRY \
+ __field(u32, ap_control_freq) \
+ __field(u32, ap_freq_offset) \
+ __field(u32, ap_chan_width) \
+ __field(u32, ap_center_freq1) \
+ __field(u32, ap_freq1_offset) \
+ __field(u32, ap_center_freq2)
+
+#define AP_CHANDEF_ASSIGN(c) \
+ __entry->ap_control_freq = (c)->chan ? (c)->chan->center_freq : 0;\
+ __entry->ap_freq_offset = (c)->chan ? (c)->chan->freq_offset : 0;\
+ __entry->ap_chan_width = (c)->chan ? (c)->width : 0; \
+ __entry->ap_center_freq1 = (c)->chan ? (c)->center_freq1 : 0; \
+ __entry->ap_freq1_offset = (c)->chan ? (c)->freq1_offset : 0; \
+ __entry->ap_center_freq2 = (c)->chan ? (c)->center_freq2 : 0;
+#define AP_CHANDEF_PR_FMT " ap(%d.%03d MHz,width:%d,center: %d.%03d/%d MHz)"
+#define AP_CHANDEF_PR_ARG __entry->ap_control_freq, __entry->ap_freq_offset, \
+ __entry->ap_chan_width, \
+ __entry->ap_center_freq1, __entry->ap_freq1_offset, \
+ __entry->ap_center_freq2
+
#define CHANCTX_ENTRY CHANDEF_ENTRY \
MIN_CHANDEF_ENTRY \
+ AP_CHANDEF_ENTRY \
__field(u8, rx_chains_static) \
__field(u8, rx_chains_dynamic)
#define CHANCTX_ASSIGN CHANDEF_ASSIGN(&ctx->conf.def) \
MIN_CHANDEF_ASSIGN(&ctx->conf.min_def) \
+ AP_CHANDEF_ASSIGN(&ctx->conf.ap) \
__entry->rx_chains_static = ctx->conf.rx_chains_static; \
__entry->rx_chains_dynamic = ctx->conf.rx_chains_dynamic
-#define CHANCTX_PR_FMT CHANDEF_PR_FMT MIN_CHANDEF_PR_FMT " chains:%d/%d"
-#define CHANCTX_PR_ARG CHANDEF_PR_ARG, MIN_CHANDEF_PR_ARG, \
+#define CHANCTX_PR_FMT CHANDEF_PR_FMT MIN_CHANDEF_PR_FMT AP_CHANDEF_PR_FMT " chains:%d/%d"
+#define CHANCTX_PR_ARG CHANDEF_PR_ARG, MIN_CHANDEF_PR_ARG, AP_CHANDEF_PR_ARG, \
__entry->rx_chains_static, __entry->rx_chains_dynamic
#define KEY_ENTRY __field(u32, cipher) \
@@ -305,9 +328,18 @@ TRACE_EVENT(drv_set_wakeup,
TP_printk(LOCAL_PR_FMT " enabled:%d", LOCAL_PR_ARG, __entry->enabled)
);
-DEFINE_EVENT(local_only_evt, drv_stop,
- TP_PROTO(struct ieee80211_local *local),
- TP_ARGS(local)
+TRACE_EVENT(drv_stop,
+ TP_PROTO(struct ieee80211_local *local, bool suspend),
+ TP_ARGS(local, suspend),
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(bool, suspend)
+ ),
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->suspend = suspend;
+ ),
+ TP_printk(LOCAL_PR_FMT " suspend:%d", LOCAL_PR_ARG, __entry->suspend)
);
DEFINE_EVENT(local_sdata_addr_evt, drv_add_interface,
@@ -503,9 +535,9 @@ TRACE_EVENT(drv_link_info_changed,
__entry->ht_operation_mode = link_conf->ht_operation_mode;
__entry->cqm_rssi_thold = link_conf->cqm_rssi_thold;
__entry->cqm_rssi_hyst = link_conf->cqm_rssi_hyst;
- __entry->channel_width = link_conf->chandef.width;
- __entry->channel_cfreq1 = link_conf->chandef.center_freq1;
- __entry->channel_cfreq1_offset = link_conf->chandef.freq1_offset;
+ __entry->channel_width = link_conf->chanreq.oper.width;
+ __entry->channel_cfreq1 = link_conf->chanreq.oper.center_freq1;
+ __entry->channel_cfreq1_offset = link_conf->chanreq.oper.freq1_offset;
__entry->qos = link_conf->qos;
__entry->hidden_ssid = link_conf->hidden_ssid;
__entry->txpower = link_conf->txpower;
@@ -907,31 +939,34 @@ TRACE_EVENT(drv_sta_set_txpwr,
)
);
-TRACE_EVENT(drv_sta_rc_update,
+TRACE_EVENT(drv_link_sta_rc_update,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
- struct ieee80211_sta *sta,
+ struct ieee80211_link_sta *link_sta,
u32 changed),
- TP_ARGS(local, sdata, sta, changed),
+ TP_ARGS(local, sdata, link_sta, changed),
TP_STRUCT__entry(
LOCAL_ENTRY
VIF_ENTRY
STA_ENTRY
__field(u32, changed)
+ __field(u32, link_id)
),
TP_fast_assign(
LOCAL_ASSIGN;
VIF_ASSIGN;
- STA_ASSIGN;
+ STA_NAMED_ASSIGN(link_sta->sta);
__entry->changed = changed;
+ __entry->link_id = link_sta->link_id;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " changed: 0x%x",
- LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->changed
+ LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " (link %d) changed: 0x%x",
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->link_id,
+ __entry->changed
)
);
@@ -1186,7 +1221,7 @@ DEFINE_EVENT(sta_event, drv_flush_sta,
TP_ARGS(local, sdata, sta)
);
-TRACE_EVENT(drv_channel_switch,
+DECLARE_EVENT_CLASS(chanswitch_evt,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct ieee80211_channel_switch *ch_switch),
@@ -1201,6 +1236,7 @@ TRACE_EVENT(drv_channel_switch,
__field(u32, device_timestamp)
__field(bool, block_tx)
__field(u8, count)
+ __field(u8, link_id)
),
TP_fast_assign(
@@ -1211,14 +1247,24 @@ TRACE_EVENT(drv_channel_switch,
__entry->device_timestamp = ch_switch->device_timestamp;
__entry->block_tx = ch_switch->block_tx;
__entry->count = ch_switch->count;
+ __entry->link_id = ch_switch->link_id;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT " new " CHANDEF_PR_FMT " count:%d",
- LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->count
+ LOCAL_PR_FMT VIF_PR_FMT CHANDEF_PR_FMT " count:%d block_tx:%d timestamp:%llu device_ts:%u link_id:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->count,
+ __entry->block_tx, __entry->timestamp,
+ __entry->device_timestamp, __entry->link_id
)
);
+DEFINE_EVENT(chanswitch_evt, drv_channel_switch,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_channel_switch *ch_switch),
+ TP_ARGS(local, sdata, ch_switch)
+);
+
TRACE_EVENT(drv_set_antenna,
TP_PROTO(struct ieee80211_local *local, u32 tx_ant, u32 rx_ant, int ret),
@@ -2098,39 +2144,11 @@ TRACE_EVENT(drv_channel_switch_beacon,
)
);
-TRACE_EVENT(drv_pre_channel_switch,
+DEFINE_EVENT(chanswitch_evt, drv_pre_channel_switch,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct ieee80211_channel_switch *ch_switch),
-
- TP_ARGS(local, sdata, ch_switch),
-
- TP_STRUCT__entry(
- LOCAL_ENTRY
- VIF_ENTRY
- CHANDEF_ENTRY
- __field(u64, timestamp)
- __field(u32, device_timestamp)
- __field(bool, block_tx)
- __field(u8, count)
- ),
-
- TP_fast_assign(
- LOCAL_ASSIGN;
- VIF_ASSIGN;
- CHANDEF_ASSIGN(&ch_switch->chandef)
- __entry->timestamp = ch_switch->timestamp;
- __entry->device_timestamp = ch_switch->device_timestamp;
- __entry->block_tx = ch_switch->block_tx;
- __entry->count = ch_switch->count;
- ),
-
- TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT " prepare channel switch to "
- CHANDEF_PR_FMT " count:%d block_tx:%d timestamp:%llu",
- LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->count,
- __entry->block_tx, __entry->timestamp
- )
+ TP_ARGS(local, sdata, ch_switch)
);
DEFINE_EVENT(local_sdata_evt, drv_post_channel_switch,
@@ -2145,52 +2163,24 @@ DEFINE_EVENT(local_sdata_evt, drv_abort_channel_switch,
TP_ARGS(local, sdata)
);
-TRACE_EVENT(drv_channel_switch_rx_beacon,
+DEFINE_EVENT(chanswitch_evt, drv_channel_switch_rx_beacon,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct ieee80211_channel_switch *ch_switch),
-
- TP_ARGS(local, sdata, ch_switch),
-
- TP_STRUCT__entry(
- LOCAL_ENTRY
- VIF_ENTRY
- CHANDEF_ENTRY
- __field(u64, timestamp)
- __field(u32, device_timestamp)
- __field(bool, block_tx)
- __field(u8, count)
- ),
-
- TP_fast_assign(
- LOCAL_ASSIGN;
- VIF_ASSIGN;
- CHANDEF_ASSIGN(&ch_switch->chandef)
- __entry->timestamp = ch_switch->timestamp;
- __entry->device_timestamp = ch_switch->device_timestamp;
- __entry->block_tx = ch_switch->block_tx;
- __entry->count = ch_switch->count;
- ),
-
- TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT
- " received a channel switch beacon to "
- CHANDEF_PR_FMT " count:%d block_tx:%d timestamp:%llu",
- LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->count,
- __entry->block_tx, __entry->timestamp
- )
+ TP_ARGS(local, sdata, ch_switch)
);
TRACE_EVENT(drv_get_txpower,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
- int dbm, int ret),
+ unsigned int link_id, int dbm, int ret),
- TP_ARGS(local, sdata, dbm, ret),
+ TP_ARGS(local, sdata, link_id, dbm, ret),
TP_STRUCT__entry(
LOCAL_ENTRY
VIF_ENTRY
+ __field(unsigned int, link_id)
__field(int, dbm)
__field(int, ret)
),
@@ -2198,13 +2188,14 @@ TRACE_EVENT(drv_get_txpower,
TP_fast_assign(
LOCAL_ASSIGN;
VIF_ASSIGN;
+ __entry->link_id = link_id;
__entry->dbm = dbm;
__entry->ret = ret;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT " dbm:%d ret:%d",
- LOCAL_PR_ARG, VIF_PR_ARG, __entry->dbm, __entry->ret
+ LOCAL_PR_FMT VIF_PR_FMT " link_id:%d dbm:%d ret:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->link_id, __entry->dbm, __entry->ret
)
);
@@ -2599,6 +2590,45 @@ TRACE_EVENT(drv_change_sta_links,
* Tracing for API calls that drivers call.
*/
+TRACE_EVENT(api_return_bool,
+ TP_PROTO(struct ieee80211_local *local, bool result),
+
+ TP_ARGS(local, result),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(bool, result)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->result = result;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " result=%d",
+ LOCAL_PR_ARG, __entry->result
+ )
+);
+
+TRACE_EVENT(api_return_void,
+ TP_PROTO(struct ieee80211_local *local),
+
+ TP_ARGS(local),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT, LOCAL_PR_ARG
+ )
+);
+
TRACE_EVENT(api_start_tx_ba_session,
TP_PROTO(struct ieee80211_sta *sta, u16 tid),
@@ -3035,6 +3065,93 @@ TRACE_EVENT(api_radar_detected,
)
);
+TRACE_EVENT(api_request_smps,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_link_data *link,
+ enum ieee80211_smps_mode smps_mode),
+
+ TP_ARGS(local, sdata, link, smps_mode),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ __field(int, link_id)
+ __field(u32, smps_mode)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ __entry->link_id = link->link_id,
+ __entry->smps_mode = smps_mode;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " " VIF_PR_FMT " link:%d, smps_mode:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->link_id, __entry->smps_mode
+ )
+);
+
+TRACE_EVENT(api_prepare_rx_omi_bw,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct link_sta_info *link_sta,
+ enum ieee80211_sta_rx_bandwidth bw),
+
+ TP_ARGS(local, sdata, link_sta, bw),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ STA_ENTRY
+ __field(int, link_id)
+ __field(u32, bw)
+ __field(bool, result)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ STA_NAMED_ASSIGN(link_sta->sta);
+ __entry->link_id = link_sta->link_id;
+ __entry->bw = bw;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " " VIF_PR_FMT " " STA_PR_FMT " link:%d, bw:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG,
+ __entry->link_id, __entry->bw
+ )
+);
+
+TRACE_EVENT(api_finalize_rx_omi_bw,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct link_sta_info *link_sta),
+
+ TP_ARGS(local, sdata, link_sta),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ STA_ENTRY
+ __field(int, link_id)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ STA_NAMED_ASSIGN(link_sta->sta);
+ __entry->link_id = link_sta->link_id;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " " VIF_PR_FMT " " STA_PR_FMT " link:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->link_id
+ )
+);
+
/*
* Tracing for internal functions
* (which may also be called in response to driver calls)
@@ -3042,49 +3159,126 @@ TRACE_EVENT(api_radar_detected,
TRACE_EVENT(wake_queue,
TP_PROTO(struct ieee80211_local *local, u16 queue,
- enum queue_stop_reason reason),
+ enum queue_stop_reason reason, int refcount),
- TP_ARGS(local, queue, reason),
+ TP_ARGS(local, queue, reason, refcount),
TP_STRUCT__entry(
LOCAL_ENTRY
__field(u16, queue)
__field(u32, reason)
+ __field(int, refcount)
),
TP_fast_assign(
LOCAL_ASSIGN;
__entry->queue = queue;
__entry->reason = reason;
+ __entry->refcount = refcount;
),
TP_printk(
- LOCAL_PR_FMT " queue:%d, reason:%d",
- LOCAL_PR_ARG, __entry->queue, __entry->reason
+ LOCAL_PR_FMT " queue:%d, reason:%d, refcount: %d",
+ LOCAL_PR_ARG, __entry->queue, __entry->reason,
+ __entry->refcount
)
);
TRACE_EVENT(stop_queue,
TP_PROTO(struct ieee80211_local *local, u16 queue,
- enum queue_stop_reason reason),
+ enum queue_stop_reason reason, int refcount),
- TP_ARGS(local, queue, reason),
+ TP_ARGS(local, queue, reason, refcount),
TP_STRUCT__entry(
LOCAL_ENTRY
__field(u16, queue)
__field(u32, reason)
+ __field(int, refcount)
),
TP_fast_assign(
LOCAL_ASSIGN;
__entry->queue = queue;
__entry->reason = reason;
+ __entry->refcount = refcount;
),
TP_printk(
- LOCAL_PR_FMT " queue:%d, reason:%d",
- LOCAL_PR_ARG, __entry->queue, __entry->reason
+ LOCAL_PR_FMT " queue:%d, reason:%d, refcount: %d",
+ LOCAL_PR_ARG, __entry->queue, __entry->reason,
+ __entry->refcount
+ )
+);
+
+TRACE_EVENT(drv_can_neg_ttlm,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_neg_ttlm *neg_ttlm),
+
+ TP_ARGS(local, sdata, neg_ttlm),
+
+ TP_STRUCT__entry(LOCAL_ENTRY
+ VIF_ENTRY
+ __array(u16, downlink, sizeof(u16) * 8)
+ __array(u16, uplink, sizeof(u16) * 8)
+ ),
+
+ TP_fast_assign(LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ memcpy(__entry->downlink, neg_ttlm->downlink,
+ sizeof(neg_ttlm->downlink));
+ memcpy(__entry->uplink, neg_ttlm->uplink,
+ sizeof(neg_ttlm->uplink));
+ ),
+
+ TP_printk(LOCAL_PR_FMT ", " VIF_PR_FMT, LOCAL_PR_ARG, VIF_PR_ARG)
+);
+
+TRACE_EVENT(drv_neg_ttlm_res,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ enum ieee80211_neg_ttlm_res res,
+ struct ieee80211_neg_ttlm *neg_ttlm),
+
+ TP_ARGS(local, sdata, res, neg_ttlm),
+
+ TP_STRUCT__entry(LOCAL_ENTRY
+ VIF_ENTRY
+ __field(u32, res)
+ __array(u16, downlink, sizeof(u16) * 8)
+ __array(u16, uplink, sizeof(u16) * 8)
+ ),
+
+ TP_fast_assign(LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ __entry->res = res;
+ memcpy(__entry->downlink, neg_ttlm->downlink,
+ sizeof(neg_ttlm->downlink));
+ memcpy(__entry->uplink, neg_ttlm->uplink,
+ sizeof(neg_ttlm->uplink));
+ ),
+
+ TP_printk(LOCAL_PR_FMT VIF_PR_FMT " response: %d\n ",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->res
+ )
+);
+
+TRACE_EVENT(drv_prep_add_interface,
+ TP_PROTO(struct ieee80211_local *local,
+ enum nl80211_iftype type),
+
+ TP_ARGS(local, type),
+ TP_STRUCT__entry(LOCAL_ENTRY
+ __field(u32, type)
+ ),
+
+ TP_fast_assign(LOCAL_ASSIGN;
+ __entry->type = type;
+ ),
+
+ TP_printk(LOCAL_PR_FMT " type: %u\n ",
+ LOCAL_PR_ARG, __entry->type
)
);
diff --git a/net/mac80211/trace_msg.h b/net/mac80211/trace_msg.h
index c9dbe9aab7bd..aea4ce55c5ac 100644
--- a/net/mac80211/trace_msg.h
+++ b/net/mac80211/trace_msg.h
@@ -16,8 +16,6 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM mac80211_msg
-#define MAX_MSG_LEN 120
-
DECLARE_EVENT_CLASS(mac80211_msg_event,
TP_PROTO(struct va_format *vaf),
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 6fbb15b65902..a24636bda679 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -24,7 +24,7 @@
#include <net/mac80211.h>
#include <net/codel.h>
#include <net/codel_impl.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/fq_impl.h>
#include <net/gso.h>
@@ -133,6 +133,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
mrate = sband->bitrates[0].bitrate;
for (i = 0; i < sband->n_bitrates; i++) {
struct ieee80211_rate *r = &sband->bitrates[i];
+ u32 flag;
if (r->bitrate > txrate->bitrate)
break;
@@ -145,28 +146,24 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
switch (sband->band) {
case NL80211_BAND_2GHZ:
- case NL80211_BAND_LC: {
- u32 flag;
+ case NL80211_BAND_LC:
if (tx->sdata->deflink.operating_11g_mode)
flag = IEEE80211_RATE_MANDATORY_G;
else
flag = IEEE80211_RATE_MANDATORY_B;
- if (r->flags & flag)
- mrate = r->bitrate;
break;
- }
case NL80211_BAND_5GHZ:
case NL80211_BAND_6GHZ:
- if (r->flags & IEEE80211_RATE_MANDATORY_A)
- mrate = r->bitrate;
+ flag = IEEE80211_RATE_MANDATORY_A;
break;
- case NL80211_BAND_S1GHZ:
- case NL80211_BAND_60GHZ:
- /* TODO, for now fall through */
- case NUM_NL80211_BANDS:
+ default:
+ flag = 0;
WARN_ON(1);
break;
}
+
+ if (r->flags & flag)
+ mrate = r->bitrate;
}
if (rate == -1) {
/* No matching basic rate found; use highest suitable mandatory
@@ -701,11 +698,16 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
txrc.bss_conf = &tx->sdata->vif.bss_conf;
txrc.skb = tx->skb;
txrc.reported_rate.idx = -1;
- txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band];
- if (tx->sdata->rc_has_mcs_mask[info->band])
- txrc.rate_idx_mcs_mask =
- tx->sdata->rc_rateidx_mcs_mask[info->band];
+ if (unlikely(info->control.flags & IEEE80211_TX_CTRL_DONT_USE_RATE_MASK)) {
+ txrc.rate_idx_mask = ~0;
+ } else {
+ txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band];
+
+ if (tx->sdata->rc_has_mcs_mask[info->band])
+ txrc.rate_idx_mcs_mask =
+ tx->sdata->rc_rateidx_mcs_mask[info->band];
+ }
txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
@@ -1607,8 +1609,8 @@ int ieee80211_txq_setup_flows(struct ieee80211_local *local)
local->cparams.target = MS2TIME(20);
local->cparams.ecn = true;
- local->cvars = kcalloc(fq->flows_cnt, sizeof(local->cvars[0]),
- GFP_KERNEL);
+ local->cvars = kvcalloc(fq->flows_cnt, sizeof(local->cvars[0]),
+ GFP_KERNEL);
if (!local->cvars) {
spin_lock_bh(&fq->lock);
fq_reset(fq, fq_skb_free_func);
@@ -1628,7 +1630,7 @@ void ieee80211_txq_teardown_flows(struct ieee80211_local *local)
{
struct fq *fq = &local->fq;
- kfree(local->cvars);
+ kvfree(local->cvars);
local->cvars = NULL;
spin_lock_bh(&fq->lock);
@@ -1761,12 +1763,13 @@ static bool __ieee80211_tx(struct ieee80211_local *local,
switch (sdata->vif.type) {
case NL80211_IFTYPE_MONITOR:
- if (sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) {
+ if ((sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) ||
+ ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR)) {
vif = &sdata->vif;
break;
}
sdata = rcu_dereference(local->monitor_sdata);
- if (sdata) {
+ if (sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) {
vif = &sdata->vif;
info->hw_queue =
vif->hw_queue[skb_get_queue_mapping(skb)];
@@ -2393,12 +2396,18 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
if (chanctx_conf)
chandef = &chanctx_conf->def;
- else if (!local->use_chanctx)
- chandef = &local->_oper_chandef;
else
goto fail_rcu;
/*
+ * If driver/HW supports IEEE80211_CHAN_CAN_MONITOR we still
+ * shouldn't transmit on disabled channels.
+ */
+ if (!cfg80211_chandef_usable(local->hw.wiphy, chandef,
+ IEEE80211_CHAN_DISABLED))
+ goto fail_rcu;
+
+ /*
* Frame injection is not allowed if beaconing is not allowed
* or if we need radar detection. Beaconing is usually not allowed when
* the mode or operation (Adhoc, AP, Mesh) does not support DFS.
@@ -2766,8 +2775,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
if (tdls_peer) {
/* For TDLS only one link can be valid with peer STA */
- int tdls_link_id = sta->sta.valid_links ?
- __ffs(sta->sta.valid_links) : 0;
+ int tdls_link_id = ieee80211_tdls_sta_link_id(sta);
struct ieee80211_link_data *link;
/* DA SA BSSID */
@@ -3093,8 +3101,7 @@ void ieee80211_check_fast_xmit(struct sta_info *sta)
case NL80211_IFTYPE_STATION:
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
/* For TDLS only one link can be valid with peer STA */
- int tdls_link_id = sta->sta.valid_links ?
- __ffs(sta->sta.valid_links) : 0;
+ int tdls_link_id = ieee80211_tdls_sta_link_id(sta);
struct ieee80211_link_data *link;
/* DA SA BSSID */
@@ -3946,12 +3953,14 @@ begin:
switch (tx.sdata->vif.type) {
case NL80211_IFTYPE_MONITOR:
- if (tx.sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) {
+ if ((tx.sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) ||
+ ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR)) {
vif = &tx.sdata->vif;
break;
}
tx.sdata = rcu_dereference(local->monitor_sdata);
- if (tx.sdata) {
+ if (tx.sdata &&
+ ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) {
vif = &tx.sdata->vif;
info->hw_queue =
vif->hw_queue[skb_get_queue_mapping(skb)];
@@ -3959,7 +3968,8 @@ begin:
ieee80211_free_txskb(&local->hw, skb);
goto begin;
} else {
- vif = NULL;
+ info->control.vif = NULL;
+ return skb;
}
break;
case NL80211_IFTYPE_AP_VLAN:
@@ -5032,16 +5042,24 @@ static u8 __ieee80211_beacon_update_cntdwn(struct beacon_data *beacon)
return beacon->cntdwn_current_counter;
}
-u8 ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif)
+u8 ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif, unsigned int link_id)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_link_data *link;
struct beacon_data *beacon = NULL;
u8 count = 0;
+ if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS))
+ return 0;
+
rcu_read_lock();
+ link = rcu_dereference(sdata->link[link_id]);
+ if (!link)
+ goto unlock;
+
if (sdata->vif.type == NL80211_IFTYPE_AP)
- beacon = rcu_dereference(sdata->deflink.u.ap.beacon);
+ beacon = rcu_dereference(link->u.ap.beacon);
else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
beacon = rcu_dereference(sdata->u.ibss.presp);
else if (ieee80211_vif_is_mesh(&sdata->vif))
@@ -5083,9 +5101,11 @@ unlock:
}
EXPORT_SYMBOL(ieee80211_beacon_set_cntdwn);
-bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif)
+bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif,
+ unsigned int link_id)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_link_data *link;
struct beacon_data *beacon = NULL;
u8 *beacon_data;
size_t beacon_data_len;
@@ -5094,9 +5114,17 @@ bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif)
if (!ieee80211_sdata_running(sdata))
return false;
+ if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS))
+ return 0;
+
rcu_read_lock();
+
+ link = rcu_dereference(sdata->link[link_id]);
+ if (!link)
+ goto out;
+
if (vif->type == NL80211_IFTYPE_AP) {
- beacon = rcu_dereference(sdata->deflink.u.ap.beacon);
+ beacon = rcu_dereference(link->u.ap.beacon);
if (WARN_ON(!beacon || !beacon->tail))
goto out;
beacon_data = beacon->tail;
@@ -5282,7 +5310,7 @@ ieee80211_beacon_get_ap(struct ieee80211_hw *hw,
if (beacon->cntdwn_counter_offsets[0]) {
if (!is_template)
- ieee80211_beacon_update_cntdwn(vif);
+ ieee80211_beacon_update_cntdwn(vif, link->link_id);
ieee80211_set_beacon_cntdwn(sdata, beacon, link);
}
@@ -5322,8 +5350,10 @@ ieee80211_beacon_get_ap(struct ieee80211_hw *hw,
if (beacon->tail)
skb_put_data(skb, beacon->tail, beacon->tail_len);
- if (ieee80211_beacon_protect(skb, local, sdata, link) < 0)
+ if (ieee80211_beacon_protect(skb, local, sdata, link) < 0) {
+ dev_kfree_skb(skb);
return NULL;
+ }
ieee80211_beacon_get_finish(hw, vif, link, offs, beacon, skb,
chanctx_conf, csa_off_base);
@@ -6186,7 +6216,7 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
goto start_xmit;
/* update QoS header to prioritize control port frames if possible,
- * priorization also happens for control port frames send over
+ * prioritization also happens for control port frames send over
* AF_PACKET
*/
rcu_read_lock();
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 643c54855be6..fdda14c08e2b 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -6,7 +6,7 @@
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*
* utilities for mac80211
*/
@@ -46,6 +46,11 @@ struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy)
}
EXPORT_SYMBOL(wiphy_to_ieee80211_hw);
+const struct ieee80211_conn_settings ieee80211_conn_settings_unlimited = {
+ .mode = IEEE80211_CONN_MODE_EHT,
+ .bw_limit = IEEE80211_CONN_BW_LIMIT_320,
+};
+
u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
enum nl80211_iftype type)
{
@@ -432,8 +437,6 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
{
struct ieee80211_local *local = hw_to_local(hw);
- trace_wake_queue(local, queue, reason);
-
if (WARN_ON(queue >= hw->queues))
return;
@@ -451,6 +454,9 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
if (local->q_stop_reasons[queue][reason] == 0)
__clear_bit(reason, &local->queue_stop_reasons[queue]);
+ trace_wake_queue(local, queue, reason,
+ local->q_stop_reasons[queue][reason]);
+
if (local->queue_stop_reasons[queue] != 0)
/* someone still has this queue stopped */
return;
@@ -497,8 +503,6 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
{
struct ieee80211_local *local = hw_to_local(hw);
- trace_stop_queue(local, queue, reason);
-
if (WARN_ON(queue >= hw->queues))
return;
@@ -507,6 +511,9 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
else
local->q_stop_reasons[queue][reason]++;
+ trace_stop_queue(local, queue, reason,
+ local->q_stop_reasons[queue][reason]);
+
set_bit(reason, &local->queue_stop_reasons[queue]);
}
@@ -652,7 +659,7 @@ void ieee80211_wake_queues(struct ieee80211_hw *hw)
}
EXPORT_SYMBOL(ieee80211_wake_queues);
-static unsigned int
+unsigned int
ieee80211_get_vif_queues(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata)
{
@@ -664,7 +671,8 @@ ieee80211_get_vif_queues(struct ieee80211_local *local,
queues = 0;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
- queues |= BIT(sdata->vif.hw_queue[ac]);
+ if (sdata->vif.hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
+ queues |= BIT(sdata->vif.hw_queue[ac]);
if (sdata->vif.cab_queue != IEEE80211_INVAL_HW_QUEUE)
queues |= BIT(sdata->vif.cab_queue);
} else {
@@ -679,7 +687,7 @@ void __ieee80211_flush_queues(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
unsigned int queues, bool drop)
{
- if (!local->ops->flush)
+ if (!local->ops->flush && !drop)
return;
/*
@@ -706,7 +714,8 @@ void __ieee80211_flush_queues(struct ieee80211_local *local,
}
}
- drv_flush(local, sdata, queues, drop);
+ if (local->ops->flush)
+ drv_flush(local, sdata, queues, drop);
ieee80211_wake_queues_by_reason(&local->hw, queues,
IEEE80211_QUEUE_STOP_REASON_FLUSH,
@@ -719,24 +728,6 @@ void ieee80211_flush_queues(struct ieee80211_local *local,
__ieee80211_flush_queues(local, sdata, 0, drop);
}
-void ieee80211_stop_vif_queues(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- enum queue_stop_reason reason)
-{
- ieee80211_stop_queues_by_reason(&local->hw,
- ieee80211_get_vif_queues(local, sdata),
- reason, true);
-}
-
-void ieee80211_wake_vif_queues(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- enum queue_stop_reason reason)
-{
- ieee80211_wake_queues_by_reason(&local->hw,
- ieee80211_get_vif_queues(local, sdata),
- reason, true);
-}
-
static void __iterate_interfaces(struct ieee80211_local *local,
u32 iter_flags,
void (*iterator)(void *data, u8 *mac,
@@ -746,10 +737,13 @@ static void __iterate_interfaces(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata;
bool active_only = iter_flags & IEEE80211_IFACE_ITER_ACTIVE;
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+ list_for_each_entry_rcu(sdata, &local->interfaces, list,
+ lockdep_is_held(&local->iflist_mtx) ||
+ lockdep_is_held(&local->hw.wiphy->mtx)) {
switch (sdata->vif.type) {
case NL80211_IFTYPE_MONITOR:
- if (!(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE))
+ if (!(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) &&
+ !ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR))
continue;
break;
case NL80211_IFTYPE_AP_VLAN:
@@ -771,7 +765,7 @@ static void __iterate_interfaces(struct ieee80211_local *local,
sdata = rcu_dereference_check(local->monitor_sdata,
lockdep_is_held(&local->iflist_mtx) ||
lockdep_is_held(&local->hw.wiphy->mtx));
- if (sdata &&
+ if (sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF) &&
(iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL || !active_only ||
sdata->flags & IEEE80211_SDATA_IN_DRIVER))
iterator(data, sdata->vif.addr, &sdata->vif);
@@ -828,7 +822,8 @@ static void __iterate_stations(struct ieee80211_local *local,
{
struct sta_info *sta;
- list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ list_for_each_entry_rcu(sta, &local->sta_list, list,
+ lockdep_is_held(&local->hw.wiphy->mtx)) {
if (!sta->uploaded)
continue;
@@ -849,6 +844,19 @@ void ieee80211_iterate_stations_atomic(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL_GPL(ieee80211_iterate_stations_atomic);
+void ieee80211_iterate_stations_mtx(struct ieee80211_hw *hw,
+ void (*iterator)(void *data,
+ struct ieee80211_sta *sta),
+ void *data)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ lockdep_assert_wiphy(local->hw.wiphy);
+
+ __iterate_stations(local, iterator, data);
+}
+EXPORT_SYMBOL_GPL(ieee80211_iterate_stations_mtx);
+
struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
@@ -912,776 +920,6 @@ void ieee80211_queue_delayed_work(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(ieee80211_queue_delayed_work);
-static void
-ieee80211_parse_extension_element(u32 *crc,
- const struct element *elem,
- struct ieee802_11_elems *elems,
- struct ieee80211_elems_parse_params *params)
-{
- const void *data = elem->data + 1;
- bool calc_crc = false;
- u8 len;
-
- if (!elem->datalen)
- return;
-
- len = elem->datalen - 1;
-
- switch (elem->data[0]) {
- case WLAN_EID_EXT_HE_MU_EDCA:
- calc_crc = true;
- if (len >= sizeof(*elems->mu_edca_param_set))
- elems->mu_edca_param_set = data;
- break;
- case WLAN_EID_EXT_HE_CAPABILITY:
- if (ieee80211_he_capa_size_ok(data, len)) {
- elems->he_cap = data;
- elems->he_cap_len = len;
- }
- break;
- case WLAN_EID_EXT_HE_OPERATION:
- calc_crc = true;
- if (len >= sizeof(*elems->he_operation) &&
- len >= ieee80211_he_oper_size(data) - 1)
- elems->he_operation = data;
- break;
- case WLAN_EID_EXT_UORA:
- if (len >= 1)
- elems->uora_element = data;
- break;
- case WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME:
- if (len == 3)
- elems->max_channel_switch_time = data;
- break;
- case WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION:
- if (len >= sizeof(*elems->mbssid_config_ie))
- elems->mbssid_config_ie = data;
- break;
- case WLAN_EID_EXT_HE_SPR:
- if (len >= sizeof(*elems->he_spr) &&
- len >= ieee80211_he_spr_size(data))
- elems->he_spr = data;
- break;
- case WLAN_EID_EXT_HE_6GHZ_CAPA:
- if (len >= sizeof(*elems->he_6ghz_capa))
- elems->he_6ghz_capa = data;
- break;
- case WLAN_EID_EXT_EHT_CAPABILITY:
- if (ieee80211_eht_capa_size_ok(elems->he_cap,
- data, len,
- params->from_ap)) {
- elems->eht_cap = data;
- elems->eht_cap_len = len;
- }
- break;
- case WLAN_EID_EXT_EHT_OPERATION:
- if (ieee80211_eht_oper_size_ok(data, len))
- elems->eht_operation = data;
- calc_crc = true;
- break;
- case WLAN_EID_EXT_EHT_MULTI_LINK:
- calc_crc = true;
-
- if (ieee80211_mle_size_ok(data, len)) {
- const struct ieee80211_multi_link_elem *mle =
- (void *)data;
-
- switch (le16_get_bits(mle->control,
- IEEE80211_ML_CONTROL_TYPE)) {
- case IEEE80211_ML_CONTROL_TYPE_BASIC:
- elems->ml_basic_elem = (void *)elem;
- elems->ml_basic = data;
- elems->ml_basic_len = len;
- break;
- case IEEE80211_ML_CONTROL_TYPE_RECONF:
- elems->ml_reconf_elem = (void *)elem;
- elems->ml_reconf = data;
- elems->ml_reconf_len = len;
- break;
- default:
- break;
- }
- }
- break;
- case WLAN_EID_EXT_BANDWIDTH_INDICATION:
- if (ieee80211_bandwidth_indication_size_ok(data, len))
- elems->bandwidth_indication = data;
- calc_crc = true;
- break;
- case WLAN_EID_EXT_TID_TO_LINK_MAPPING:
- calc_crc = true;
- if (ieee80211_tid_to_link_map_size_ok(data, len) &&
- elems->ttlm_num < ARRAY_SIZE(elems->ttlm)) {
- elems->ttlm[elems->ttlm_num] = (void *)data;
- elems->ttlm_num++;
- }
- break;
- }
-
- if (crc && calc_crc)
- *crc = crc32_be(*crc, (void *)elem, elem->datalen + 2);
-}
-
-static u32
-_ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params,
- struct ieee802_11_elems *elems,
- const struct element *check_inherit)
-{
- const struct element *elem;
- bool calc_crc = params->filter != 0;
- DECLARE_BITMAP(seen_elems, 256);
- u32 crc = params->crc;
-
- bitmap_zero(seen_elems, 256);
-
- for_each_element(elem, params->start, params->len) {
- const struct element *subelem;
- bool elem_parse_failed;
- u8 id = elem->id;
- u8 elen = elem->datalen;
- const u8 *pos = elem->data;
-
- if (check_inherit &&
- !cfg80211_is_element_inherited(elem,
- check_inherit))
- continue;
-
- switch (id) {
- case WLAN_EID_SSID:
- case WLAN_EID_SUPP_RATES:
- case WLAN_EID_FH_PARAMS:
- case WLAN_EID_DS_PARAMS:
- case WLAN_EID_CF_PARAMS:
- case WLAN_EID_TIM:
- case WLAN_EID_IBSS_PARAMS:
- case WLAN_EID_CHALLENGE:
- case WLAN_EID_RSN:
- case WLAN_EID_ERP_INFO:
- case WLAN_EID_EXT_SUPP_RATES:
- case WLAN_EID_HT_CAPABILITY:
- case WLAN_EID_HT_OPERATION:
- case WLAN_EID_VHT_CAPABILITY:
- case WLAN_EID_VHT_OPERATION:
- case WLAN_EID_MESH_ID:
- case WLAN_EID_MESH_CONFIG:
- case WLAN_EID_PEER_MGMT:
- case WLAN_EID_PREQ:
- case WLAN_EID_PREP:
- case WLAN_EID_PERR:
- case WLAN_EID_RANN:
- case WLAN_EID_CHANNEL_SWITCH:
- case WLAN_EID_EXT_CHANSWITCH_ANN:
- case WLAN_EID_COUNTRY:
- case WLAN_EID_PWR_CONSTRAINT:
- case WLAN_EID_TIMEOUT_INTERVAL:
- case WLAN_EID_SECONDARY_CHANNEL_OFFSET:
- case WLAN_EID_WIDE_BW_CHANNEL_SWITCH:
- case WLAN_EID_CHAN_SWITCH_PARAM:
- case WLAN_EID_EXT_CAPABILITY:
- case WLAN_EID_CHAN_SWITCH_TIMING:
- case WLAN_EID_LINK_ID:
- case WLAN_EID_BSS_MAX_IDLE_PERIOD:
- case WLAN_EID_RSNX:
- case WLAN_EID_S1G_BCN_COMPAT:
- case WLAN_EID_S1G_CAPABILITIES:
- case WLAN_EID_S1G_OPERATION:
- case WLAN_EID_AID_RESPONSE:
- case WLAN_EID_S1G_SHORT_BCN_INTERVAL:
- /*
- * not listing WLAN_EID_CHANNEL_SWITCH_WRAPPER -- it seems possible
- * that if the content gets bigger it might be needed more than once
- */
- if (test_bit(id, seen_elems)) {
- elems->parse_error = true;
- continue;
- }
- break;
- }
-
- if (calc_crc && id < 64 && (params->filter & (1ULL << id)))
- crc = crc32_be(crc, pos - 2, elen + 2);
-
- elem_parse_failed = false;
-
- switch (id) {
- case WLAN_EID_LINK_ID:
- if (elen + 2 < sizeof(struct ieee80211_tdls_lnkie)) {
- elem_parse_failed = true;
- break;
- }
- elems->lnk_id = (void *)(pos - 2);
- break;
- case WLAN_EID_CHAN_SWITCH_TIMING:
- if (elen < sizeof(struct ieee80211_ch_switch_timing)) {
- elem_parse_failed = true;
- break;
- }
- elems->ch_sw_timing = (void *)pos;
- break;
- case WLAN_EID_EXT_CAPABILITY:
- elems->ext_capab = pos;
- elems->ext_capab_len = elen;
- break;
- case WLAN_EID_SSID:
- elems->ssid = pos;
- elems->ssid_len = elen;
- break;
- case WLAN_EID_SUPP_RATES:
- elems->supp_rates = pos;
- elems->supp_rates_len = elen;
- break;
- case WLAN_EID_DS_PARAMS:
- if (elen >= 1)
- elems->ds_params = pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_TIM:
- if (elen >= sizeof(struct ieee80211_tim_ie)) {
- elems->tim = (void *)pos;
- elems->tim_len = elen;
- } else
- elem_parse_failed = true;
- break;
- case WLAN_EID_VENDOR_SPECIFIC:
- if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
- pos[2] == 0xf2) {
- /* Microsoft OUI (00:50:F2) */
-
- if (calc_crc)
- crc = crc32_be(crc, pos - 2, elen + 2);
-
- if (elen >= 5 && pos[3] == 2) {
- /* OUI Type 2 - WMM IE */
- if (pos[4] == 0) {
- elems->wmm_info = pos;
- elems->wmm_info_len = elen;
- } else if (pos[4] == 1) {
- elems->wmm_param = pos;
- elems->wmm_param_len = elen;
- }
- }
- }
- break;
- case WLAN_EID_RSN:
- elems->rsn = pos;
- elems->rsn_len = elen;
- break;
- case WLAN_EID_ERP_INFO:
- if (elen >= 1)
- elems->erp_info = pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_EXT_SUPP_RATES:
- elems->ext_supp_rates = pos;
- elems->ext_supp_rates_len = elen;
- break;
- case WLAN_EID_HT_CAPABILITY:
- if (elen >= sizeof(struct ieee80211_ht_cap))
- elems->ht_cap_elem = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_HT_OPERATION:
- if (elen >= sizeof(struct ieee80211_ht_operation))
- elems->ht_operation = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_VHT_CAPABILITY:
- if (elen >= sizeof(struct ieee80211_vht_cap))
- elems->vht_cap_elem = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_VHT_OPERATION:
- if (elen >= sizeof(struct ieee80211_vht_operation)) {
- elems->vht_operation = (void *)pos;
- if (calc_crc)
- crc = crc32_be(crc, pos - 2, elen + 2);
- break;
- }
- elem_parse_failed = true;
- break;
- case WLAN_EID_OPMODE_NOTIF:
- if (elen > 0) {
- elems->opmode_notif = pos;
- if (calc_crc)
- crc = crc32_be(crc, pos - 2, elen + 2);
- break;
- }
- elem_parse_failed = true;
- break;
- case WLAN_EID_MESH_ID:
- elems->mesh_id = pos;
- elems->mesh_id_len = elen;
- break;
- case WLAN_EID_MESH_CONFIG:
- if (elen >= sizeof(struct ieee80211_meshconf_ie))
- elems->mesh_config = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_PEER_MGMT:
- elems->peering = pos;
- elems->peering_len = elen;
- break;
- case WLAN_EID_MESH_AWAKE_WINDOW:
- if (elen >= 2)
- elems->awake_window = (void *)pos;
- break;
- case WLAN_EID_PREQ:
- elems->preq = pos;
- elems->preq_len = elen;
- break;
- case WLAN_EID_PREP:
- elems->prep = pos;
- elems->prep_len = elen;
- break;
- case WLAN_EID_PERR:
- elems->perr = pos;
- elems->perr_len = elen;
- break;
- case WLAN_EID_RANN:
- if (elen >= sizeof(struct ieee80211_rann_ie))
- elems->rann = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_CHANNEL_SWITCH:
- if (elen != sizeof(struct ieee80211_channel_sw_ie)) {
- elem_parse_failed = true;
- break;
- }
- elems->ch_switch_ie = (void *)pos;
- break;
- case WLAN_EID_EXT_CHANSWITCH_ANN:
- if (elen != sizeof(struct ieee80211_ext_chansw_ie)) {
- elem_parse_failed = true;
- break;
- }
- elems->ext_chansw_ie = (void *)pos;
- break;
- case WLAN_EID_SECONDARY_CHANNEL_OFFSET:
- if (elen != sizeof(struct ieee80211_sec_chan_offs_ie)) {
- elem_parse_failed = true;
- break;
- }
- elems->sec_chan_offs = (void *)pos;
- break;
- case WLAN_EID_CHAN_SWITCH_PARAM:
- if (elen <
- sizeof(*elems->mesh_chansw_params_ie)) {
- elem_parse_failed = true;
- break;
- }
- elems->mesh_chansw_params_ie = (void *)pos;
- break;
- case WLAN_EID_WIDE_BW_CHANNEL_SWITCH:
- if (!params->action ||
- elen < sizeof(*elems->wide_bw_chansw_ie)) {
- elem_parse_failed = true;
- break;
- }
- elems->wide_bw_chansw_ie = (void *)pos;
- break;
- case WLAN_EID_CHANNEL_SWITCH_WRAPPER:
- if (params->action) {
- elem_parse_failed = true;
- break;
- }
- /*
- * This is a bit tricky, but as we only care about
- * a few elements, parse them out manually.
- */
- subelem = cfg80211_find_elem(WLAN_EID_WIDE_BW_CHANNEL_SWITCH,
- pos, elen);
- if (subelem) {
- if (subelem->datalen >= sizeof(*elems->wide_bw_chansw_ie))
- elems->wide_bw_chansw_ie =
- (void *)subelem->data;
- else
- elem_parse_failed = true;
- }
-
- subelem = cfg80211_find_ext_elem(WLAN_EID_EXT_BANDWIDTH_INDICATION,
- pos, elen);
- if (subelem) {
- const void *edata = subelem->data + 1;
- u8 edatalen = subelem->datalen - 1;
-
- if (ieee80211_bandwidth_indication_size_ok(edata,
- edatalen))
- elems->bandwidth_indication = edata;
- else
- elem_parse_failed = true;
- }
- break;
- case WLAN_EID_COUNTRY:
- elems->country_elem = pos;
- elems->country_elem_len = elen;
- break;
- case WLAN_EID_PWR_CONSTRAINT:
- if (elen != 1) {
- elem_parse_failed = true;
- break;
- }
- elems->pwr_constr_elem = pos;
- break;
- case WLAN_EID_CISCO_VENDOR_SPECIFIC:
- /* Lots of different options exist, but we only care
- * about the Dynamic Transmit Power Control element.
- * First check for the Cisco OUI, then for the DTPC
- * tag (0x00).
- */
- if (elen < 4) {
- elem_parse_failed = true;
- break;
- }
-
- if (pos[0] != 0x00 || pos[1] != 0x40 ||
- pos[2] != 0x96 || pos[3] != 0x00)
- break;
-
- if (elen != 6) {
- elem_parse_failed = true;
- break;
- }
-
- if (calc_crc)
- crc = crc32_be(crc, pos - 2, elen + 2);
-
- elems->cisco_dtpc_elem = pos;
- break;
- case WLAN_EID_ADDBA_EXT:
- if (elen < sizeof(struct ieee80211_addba_ext_ie)) {
- elem_parse_failed = true;
- break;
- }
- elems->addba_ext_ie = (void *)pos;
- break;
- case WLAN_EID_TIMEOUT_INTERVAL:
- if (elen >= sizeof(struct ieee80211_timeout_interval_ie))
- elems->timeout_int = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_BSS_MAX_IDLE_PERIOD:
- if (elen >= sizeof(*elems->max_idle_period_ie))
- elems->max_idle_period_ie = (void *)pos;
- break;
- case WLAN_EID_RSNX:
- elems->rsnx = pos;
- elems->rsnx_len = elen;
- break;
- case WLAN_EID_TX_POWER_ENVELOPE:
- if (elen < 1 ||
- elen > sizeof(struct ieee80211_tx_pwr_env))
- break;
-
- if (elems->tx_pwr_env_num >= ARRAY_SIZE(elems->tx_pwr_env))
- break;
-
- elems->tx_pwr_env[elems->tx_pwr_env_num] = (void *)pos;
- elems->tx_pwr_env_len[elems->tx_pwr_env_num] = elen;
- elems->tx_pwr_env_num++;
- break;
- case WLAN_EID_EXTENSION:
- ieee80211_parse_extension_element(calc_crc ?
- &crc : NULL,
- elem, elems, params);
- break;
- case WLAN_EID_S1G_CAPABILITIES:
- if (elen >= sizeof(*elems->s1g_capab))
- elems->s1g_capab = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_S1G_OPERATION:
- if (elen == sizeof(*elems->s1g_oper))
- elems->s1g_oper = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_S1G_BCN_COMPAT:
- if (elen == sizeof(*elems->s1g_bcn_compat))
- elems->s1g_bcn_compat = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_AID_RESPONSE:
- if (elen == sizeof(struct ieee80211_aid_response_ie))
- elems->aid_resp = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- default:
- break;
- }
-
- if (elem_parse_failed)
- elems->parse_error = true;
- else
- __set_bit(id, seen_elems);
- }
-
- if (!for_each_element_completed(elem, params->start, params->len))
- elems->parse_error = true;
-
- return crc;
-}
-
-static size_t ieee802_11_find_bssid_profile(const u8 *start, size_t len,
- struct ieee802_11_elems *elems,
- struct cfg80211_bss *bss,
- u8 *nontransmitted_profile)
-{
- const struct element *elem, *sub;
- size_t profile_len = 0;
- bool found = false;
-
- if (!bss || !bss->transmitted_bss)
- return profile_len;
-
- for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, start, len) {
- if (elem->datalen < 2)
- continue;
- if (elem->data[0] < 1 || elem->data[0] > 8)
- continue;
-
- for_each_element(sub, elem->data + 1, elem->datalen - 1) {
- u8 new_bssid[ETH_ALEN];
- const u8 *index;
-
- if (sub->id != 0 || sub->datalen < 4) {
- /* not a valid BSS profile */
- continue;
- }
-
- if (sub->data[0] != WLAN_EID_NON_TX_BSSID_CAP ||
- sub->data[1] != 2) {
- /* The first element of the
- * Nontransmitted BSSID Profile is not
- * the Nontransmitted BSSID Capability
- * element.
- */
- continue;
- }
-
- memset(nontransmitted_profile, 0, len);
- profile_len = cfg80211_merge_profile(start, len,
- elem,
- sub,
- nontransmitted_profile,
- len);
-
- /* found a Nontransmitted BSSID Profile */
- index = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX,
- nontransmitted_profile,
- profile_len);
- if (!index || index[1] < 1 || index[2] == 0) {
- /* Invalid MBSSID Index element */
- continue;
- }
-
- cfg80211_gen_new_bssid(bss->transmitted_bss->bssid,
- elem->data[0],
- index[2],
- new_bssid);
- if (ether_addr_equal(new_bssid, bss->bssid)) {
- found = true;
- elems->bssid_index_len = index[1];
- elems->bssid_index = (void *)&index[2];
- break;
- }
- }
- }
-
- return found ? profile_len : 0;
-}
-
-static void ieee80211_mle_get_sta_prof(struct ieee802_11_elems *elems,
- u8 link_id)
-{
- const struct ieee80211_multi_link_elem *ml = elems->ml_basic;
- ssize_t ml_len = elems->ml_basic_len;
- const struct element *sub;
-
- if (!ml || !ml_len)
- return;
-
- if (le16_get_bits(ml->control, IEEE80211_ML_CONTROL_TYPE) !=
- IEEE80211_ML_CONTROL_TYPE_BASIC)
- return;
-
- for_each_mle_subelement(sub, (u8 *)ml, ml_len) {
- struct ieee80211_mle_per_sta_profile *prof = (void *)sub->data;
- ssize_t sta_prof_len;
- u16 control;
-
- if (sub->id != IEEE80211_MLE_SUBELEM_PER_STA_PROFILE)
- continue;
-
- if (!ieee80211_mle_basic_sta_prof_size_ok(sub->data,
- sub->datalen))
- return;
-
- control = le16_to_cpu(prof->control);
-
- if (link_id != u16_get_bits(control,
- IEEE80211_MLE_STA_CONTROL_LINK_ID))
- continue;
-
- if (!(control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE))
- return;
-
- /* the sub element can be fragmented */
- sta_prof_len =
- cfg80211_defragment_element(sub,
- (u8 *)ml, ml_len,
- elems->scratch_pos,
- elems->scratch +
- elems->scratch_len -
- elems->scratch_pos,
- IEEE80211_MLE_SUBELEM_FRAGMENT);
-
- if (sta_prof_len < 0)
- return;
-
- elems->prof = (void *)elems->scratch_pos;
- elems->sta_prof_len = sta_prof_len;
- elems->scratch_pos += sta_prof_len;
-
- return;
- }
-}
-
-static void ieee80211_mle_parse_link(struct ieee802_11_elems *elems,
- struct ieee80211_elems_parse_params *params)
-{
- struct ieee80211_mle_per_sta_profile *prof;
- struct ieee80211_elems_parse_params sub = {
- .action = params->action,
- .from_ap = params->from_ap,
- .link_id = -1,
- };
- ssize_t ml_len = elems->ml_basic_len;
- const struct element *non_inherit = NULL;
- const u8 *end;
-
- if (params->link_id == -1)
- return;
-
- ml_len = cfg80211_defragment_element(elems->ml_basic_elem,
- elems->ie_start,
- elems->total_len,
- elems->scratch_pos,
- elems->scratch +
- elems->scratch_len -
- elems->scratch_pos,
- WLAN_EID_FRAGMENT);
-
- if (ml_len < 0)
- return;
-
- elems->ml_basic = (const void *)elems->scratch_pos;
- elems->ml_basic_len = ml_len;
-
- ieee80211_mle_get_sta_prof(elems, params->link_id);
- prof = elems->prof;
-
- if (!prof)
- return;
-
- /* check if we have the 4 bytes for the fixed part in assoc response */
- if (elems->sta_prof_len < sizeof(*prof) + prof->sta_info_len - 1 + 4) {
- elems->prof = NULL;
- elems->sta_prof_len = 0;
- return;
- }
-
- /*
- * Skip the capability information and the status code that are expected
- * as part of the station profile in association response frames. Note
- * the -1 is because the 'sta_info_len' is accounted to as part of the
- * per-STA profile, but not part of the 'u8 variable[]' portion.
- */
- sub.start = prof->variable + prof->sta_info_len - 1 + 4;
- end = (const u8 *)prof + elems->sta_prof_len;
- sub.len = end - sub.start;
-
- non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
- sub.start, sub.len);
- _ieee802_11_parse_elems_full(&sub, elems, non_inherit);
-}
-
-struct ieee802_11_elems *
-ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params)
-{
- struct ieee802_11_elems *elems;
- const struct element *non_inherit = NULL;
- u8 *nontransmitted_profile;
- int nontransmitted_profile_len = 0;
- size_t scratch_len = 3 * params->len;
-
- elems = kzalloc(struct_size(elems, scratch, scratch_len), GFP_ATOMIC);
- if (!elems)
- return NULL;
- elems->ie_start = params->start;
- elems->total_len = params->len;
- elems->scratch_len = scratch_len;
- elems->scratch_pos = elems->scratch;
-
- nontransmitted_profile = elems->scratch_pos;
- nontransmitted_profile_len =
- ieee802_11_find_bssid_profile(params->start, params->len,
- elems, params->bss,
- nontransmitted_profile);
- elems->scratch_pos += nontransmitted_profile_len;
- elems->scratch_len -= nontransmitted_profile_len;
- non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
- nontransmitted_profile,
- nontransmitted_profile_len);
-
- elems->crc = _ieee802_11_parse_elems_full(params, elems, non_inherit);
-
- /* Override with nontransmitted profile, if found */
- if (nontransmitted_profile_len) {
- struct ieee80211_elems_parse_params sub = {
- .start = nontransmitted_profile,
- .len = nontransmitted_profile_len,
- .action = params->action,
- .link_id = params->link_id,
- };
-
- _ieee802_11_parse_elems_full(&sub, elems, NULL);
- }
-
- ieee80211_mle_parse_link(elems, params);
-
- if (elems->tim && !elems->parse_error) {
- const struct ieee80211_tim_ie *tim_ie = elems->tim;
-
- elems->dtim_period = tim_ie->dtim_period;
- elems->dtim_count = tim_ie->dtim_count;
- }
-
- /* Override DTIM period and count if needed */
- if (elems->bssid_index &&
- elems->bssid_index_len >=
- offsetofend(struct ieee80211_bssid_index, dtim_period))
- elems->dtim_period = elems->bssid_index->dtim_period;
-
- if (elems->bssid_index &&
- elems->bssid_index_len >=
- offsetofend(struct ieee80211_bssid_index, dtim_count))
- elems->dtim_count = elems->bssid_index->dtim_count;
-
- return elems;
-}
-EXPORT_SYMBOL_IF_KUNIT(ieee802_11_parse_elems_full);
-
void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata,
struct ieee80211_tx_queue_params
*qparam, int ac)
@@ -1759,7 +997,7 @@ void ieee80211_set_wmm_default(struct ieee80211_link_data *link,
else
aCWmin = 15;
- /* Confiure old 802.11b/g medium access rules. */
+ /* Configure old 802.11b/g medium access rules. */
qparam.cw_max = aCWmax;
qparam.cw_min = aCWmin;
qparam.txop = 0;
@@ -1938,37 +1176,34 @@ void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
}
}
-u8 *ieee80211_write_he_6ghz_cap(u8 *pos, __le16 cap, u8 *end)
+static int ieee80211_put_s1g_cap(struct sk_buff *skb,
+ struct ieee80211_sta_s1g_cap *s1g_cap)
{
- if ((end - pos) < 5)
- return pos;
+ if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_s1g_cap))
+ return -ENOBUFS;
- *pos++ = WLAN_EID_EXTENSION;
- *pos++ = 1 + sizeof(cap);
- *pos++ = WLAN_EID_EXT_HE_6GHZ_CAPA;
- memcpy(pos, &cap, sizeof(cap));
+ skb_put_u8(skb, WLAN_EID_S1G_CAPABILITIES);
+ skb_put_u8(skb, sizeof(struct ieee80211_s1g_cap));
+
+ skb_put_data(skb, &s1g_cap->cap, sizeof(s1g_cap->cap));
+ skb_put_data(skb, &s1g_cap->nss_mcs, sizeof(s1g_cap->nss_mcs));
- return pos + 2;
+ return 0;
}
-static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
- u8 *buffer, size_t buffer_len,
- const u8 *ie, size_t ie_len,
- enum nl80211_band band,
- u32 rate_mask,
- struct cfg80211_chan_def *chandef,
- size_t *offset, u32 flags)
+static int ieee80211_put_preq_ies_band(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ const u8 *ie, size_t ie_len,
+ size_t *offset,
+ enum nl80211_band band,
+ u32 rate_mask,
+ struct cfg80211_chan_def *chandef,
+ u32 flags)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
- const struct ieee80211_sta_he_cap *he_cap;
- const struct ieee80211_sta_eht_cap *eht_cap;
- u8 *pos = buffer, *end = buffer + buffer_len;
+ int i, err;
size_t noffset;
- int supp_rates_len, i;
- u8 rates[32];
- int num_rates;
- int ext_rates_len;
u32 rate_flags;
bool have_80mhz = false;
@@ -1981,32 +1216,13 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
rate_flags = ieee80211_chandef_rate_flags(chandef);
/* For direct scan add S1G IE and consider its override bits */
- if (band == NL80211_BAND_S1GHZ) {
- if (end - pos < 2 + sizeof(struct ieee80211_s1g_cap))
- goto out_err;
- pos = ieee80211_ie_build_s1g_cap(pos, &sband->s1g_cap);
- goto done;
- }
-
- num_rates = 0;
- for (i = 0; i < sband->n_bitrates; i++) {
- if ((BIT(i) & rate_mask) == 0)
- continue; /* skip rate */
- if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
- continue;
-
- rates[num_rates++] =
- (u8) DIV_ROUND_UP(sband->bitrates[i].bitrate, 5);
- }
-
- supp_rates_len = min_t(int, num_rates, 8);
+ if (band == NL80211_BAND_S1GHZ)
+ return ieee80211_put_s1g_cap(skb, &sband->s1g_cap);
- if (end - pos < 2 + supp_rates_len)
- goto out_err;
- *pos++ = WLAN_EID_SUPP_RATES;
- *pos++ = supp_rates_len;
- memcpy(pos, rates, supp_rates_len);
- pos += supp_rates_len;
+ err = ieee80211_put_srates_elem(skb, sband, 0, rate_flags,
+ ~rate_mask, WLAN_EID_SUPP_RATES);
+ if (err)
+ return err;
/* insert "request information" if in custom IEs */
if (ie && ie_len) {
@@ -2019,34 +1235,28 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
before_extrates,
ARRAY_SIZE(before_extrates),
*offset);
- if (end - pos < noffset - *offset)
- goto out_err;
- memcpy(pos, ie + *offset, noffset - *offset);
- pos += noffset - *offset;
+ if (skb_tailroom(skb) < noffset - *offset)
+ return -ENOBUFS;
+ skb_put_data(skb, ie + *offset, noffset - *offset);
*offset = noffset;
}
- ext_rates_len = num_rates - supp_rates_len;
- if (ext_rates_len > 0) {
- if (end - pos < 2 + ext_rates_len)
- goto out_err;
- *pos++ = WLAN_EID_EXT_SUPP_RATES;
- *pos++ = ext_rates_len;
- memcpy(pos, rates + supp_rates_len, ext_rates_len);
- pos += ext_rates_len;
- }
+ err = ieee80211_put_srates_elem(skb, sband, 0, rate_flags,
+ ~rate_mask, WLAN_EID_EXT_SUPP_RATES);
+ if (err)
+ return err;
if (chandef->chan && sband->band == NL80211_BAND_2GHZ) {
- if (end - pos < 3)
- goto out_err;
- *pos++ = WLAN_EID_DS_PARAMS;
- *pos++ = 1;
- *pos++ = ieee80211_frequency_to_channel(
- chandef->chan->center_freq);
+ if (skb_tailroom(skb) < 3)
+ return -ENOBUFS;
+ skb_put_u8(skb, WLAN_EID_DS_PARAMS);
+ skb_put_u8(skb, 1);
+ skb_put_u8(skb,
+ ieee80211_frequency_to_channel(chandef->chan->center_freq));
}
if (flags & IEEE80211_PROBE_FLAG_MIN_CONTENT)
- goto done;
+ return 0;
/* insert custom IEs that go before HT */
if (ie && ie_len) {
@@ -2061,18 +1271,21 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
noffset = ieee80211_ie_split(ie, ie_len,
before_ht, ARRAY_SIZE(before_ht),
*offset);
- if (end - pos < noffset - *offset)
- goto out_err;
- memcpy(pos, ie + *offset, noffset - *offset);
- pos += noffset - *offset;
+ if (skb_tailroom(skb) < noffset - *offset)
+ return -ENOBUFS;
+ skb_put_data(skb, ie + *offset, noffset - *offset);
*offset = noffset;
}
if (sband->ht_cap.ht_supported) {
- if (end - pos < 2 + sizeof(struct ieee80211_ht_cap))
- goto out_err;
- pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
- sband->ht_cap.cap);
+ u8 *pos;
+
+ if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap))
+ return -ENOBUFS;
+
+ pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_cap));
+ ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
+ sband->ht_cap.cap);
}
/* insert custom IEs that go before VHT */
@@ -2093,10 +1306,9 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
noffset = ieee80211_ie_split(ie, ie_len,
before_vht, ARRAY_SIZE(before_vht),
*offset);
- if (end - pos < noffset - *offset)
- goto out_err;
- memcpy(pos, ie + *offset, noffset - *offset);
- pos += noffset - *offset;
+ if (skb_tailroom(skb) < noffset - *offset)
+ return -ENOBUFS;
+ skb_put_data(skb, ie + *offset, noffset - *offset);
*offset = noffset;
}
@@ -2111,10 +1323,14 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
}
if (sband->vht_cap.vht_supported && have_80mhz) {
- if (end - pos < 2 + sizeof(struct ieee80211_vht_cap))
- goto out_err;
- pos = ieee80211_ie_build_vht_cap(pos, &sband->vht_cap,
- sband->vht_cap.cap);
+ u8 *pos;
+
+ if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_vht_cap))
+ return -ENOBUFS;
+
+ pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_cap));
+ ieee80211_ie_build_vht_cap(pos, &sband->vht_cap,
+ sband->vht_cap.cap);
}
/* insert custom IEs that go before HE */
@@ -2131,107 +1347,128 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
noffset = ieee80211_ie_split(ie, ie_len,
before_he, ARRAY_SIZE(before_he),
*offset);
- if (end - pos < noffset - *offset)
- goto out_err;
- memcpy(pos, ie + *offset, noffset - *offset);
- pos += noffset - *offset;
+ if (skb_tailroom(skb) < noffset - *offset)
+ return -ENOBUFS;
+ skb_put_data(skb, ie + *offset, noffset - *offset);
*offset = noffset;
}
- he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
- if (he_cap &&
- cfg80211_any_usable_channels(local->hw.wiphy, BIT(sband->band),
+ if (cfg80211_any_usable_channels(local->hw.wiphy, BIT(sband->band),
IEEE80211_CHAN_NO_HE)) {
- pos = ieee80211_ie_build_he_cap(0, pos, he_cap, end);
- if (!pos)
- goto out_err;
+ err = ieee80211_put_he_cap(skb, sdata, sband, NULL);
+ if (err)
+ return err;
}
- eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
-
- if (eht_cap &&
- cfg80211_any_usable_channels(local->hw.wiphy, BIT(sband->band),
+ if (cfg80211_any_usable_channels(local->hw.wiphy, BIT(sband->band),
IEEE80211_CHAN_NO_HE |
IEEE80211_CHAN_NO_EHT)) {
- pos = ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, end,
- sdata->vif.type == NL80211_IFTYPE_AP);
- if (!pos)
- goto out_err;
+ err = ieee80211_put_eht_cap(skb, sdata, sband, NULL);
+ if (err)
+ return err;
}
- if (cfg80211_any_usable_channels(local->hw.wiphy,
- BIT(NL80211_BAND_6GHZ),
- IEEE80211_CHAN_NO_HE)) {
- struct ieee80211_supported_band *sband6;
-
- sband6 = local->hw.wiphy->bands[NL80211_BAND_6GHZ];
- he_cap = ieee80211_get_he_iftype_cap_vif(sband6, &sdata->vif);
-
- if (he_cap) {
- enum nl80211_iftype iftype =
- ieee80211_vif_type_p2p(&sdata->vif);
- __le16 cap = ieee80211_get_he_6ghz_capa(sband6, iftype);
-
- pos = ieee80211_write_he_6ghz_cap(pos, cap, end);
- }
- }
+ err = ieee80211_put_he_6ghz_cap(skb, sdata, IEEE80211_SMPS_OFF);
+ if (err)
+ return err;
/*
* If adding more here, adjust code in main.c
* that calculates local->scan_ies_len.
*/
- return pos - buffer;
- out_err:
- WARN_ONCE(1, "not enough space for preq IEs\n");
- done:
- return pos - buffer;
+ return 0;
}
-int ieee80211_build_preq_ies(struct ieee80211_sub_if_data *sdata, u8 *buffer,
- size_t buffer_len,
- struct ieee80211_scan_ies *ie_desc,
- const u8 *ie, size_t ie_len,
- u8 bands_used, u32 *rate_masks,
- struct cfg80211_chan_def *chandef,
- u32 flags)
+static int ieee80211_put_preq_ies(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_scan_ies *ie_desc,
+ const u8 *ie, size_t ie_len,
+ u8 bands_used, u32 *rate_masks,
+ struct cfg80211_chan_def *chandef,
+ u32 flags)
{
- size_t pos = 0, old_pos = 0, custom_ie_offset = 0;
- int i;
+ size_t custom_ie_offset = 0;
+ int i, err;
memset(ie_desc, 0, sizeof(*ie_desc));
for (i = 0; i < NUM_NL80211_BANDS; i++) {
if (bands_used & BIT(i)) {
- pos += ieee80211_build_preq_ies_band(sdata,
- buffer + pos,
- buffer_len - pos,
- ie, ie_len, i,
- rate_masks[i],
- chandef,
- &custom_ie_offset,
- flags);
- ie_desc->ies[i] = buffer + old_pos;
- ie_desc->len[i] = pos - old_pos;
- old_pos = pos;
+ ie_desc->ies[i] = skb_tail_pointer(skb);
+ err = ieee80211_put_preq_ies_band(skb, sdata,
+ ie, ie_len,
+ &custom_ie_offset,
+ i, rate_masks[i],
+ chandef, flags);
+ if (err)
+ return err;
+ ie_desc->len[i] = skb_tail_pointer(skb) -
+ ie_desc->ies[i];
}
}
/* add any remaining custom IEs */
if (ie && ie_len) {
- if (WARN_ONCE(buffer_len - pos < ie_len - custom_ie_offset,
+ if (WARN_ONCE(skb_tailroom(skb) < ie_len - custom_ie_offset,
"not enough space for preq custom IEs\n"))
- return pos;
- memcpy(buffer + pos, ie + custom_ie_offset,
- ie_len - custom_ie_offset);
- ie_desc->common_ies = buffer + pos;
- ie_desc->common_ie_len = ie_len - custom_ie_offset;
- pos += ie_len - custom_ie_offset;
+ return -ENOBUFS;
+ ie_desc->common_ies = skb_tail_pointer(skb);
+ skb_put_data(skb, ie + custom_ie_offset,
+ ie_len - custom_ie_offset);
+ ie_desc->common_ie_len = skb_tail_pointer(skb) -
+ ie_desc->common_ies;
}
- return pos;
+ return 0;
};
+int ieee80211_build_preq_ies(struct ieee80211_sub_if_data *sdata, u8 *buffer,
+ size_t buffer_len,
+ struct ieee80211_scan_ies *ie_desc,
+ const u8 *ie, size_t ie_len,
+ u8 bands_used, u32 *rate_masks,
+ struct cfg80211_chan_def *chandef,
+ u32 flags)
+{
+ struct sk_buff *skb = alloc_skb(buffer_len, GFP_KERNEL);
+ uintptr_t offs;
+ int ret, i;
+ u8 *start;
+
+ if (!skb)
+ return -ENOMEM;
+
+ start = skb_tail_pointer(skb);
+ memset(start, 0, skb_tailroom(skb));
+ ret = ieee80211_put_preq_ies(skb, sdata, ie_desc, ie, ie_len,
+ bands_used, rate_masks, chandef,
+ flags);
+ if (ret < 0) {
+ goto out;
+ }
+
+ if (skb->len > buffer_len) {
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ memcpy(buffer, start, skb->len);
+
+ /* adjust ie_desc for copy */
+ for (i = 0; i < NUM_NL80211_BANDS; i++) {
+ offs = ie_desc->ies[i] - start;
+ ie_desc->ies[i] = buffer + offs;
+ }
+ offs = ie_desc->common_ies - start;
+ ie_desc->common_ies = buffer + offs;
+
+ ret = skb->len;
+out:
+ consume_skb(skb);
+ return ret;
+}
+
struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
const u8 *src, const u8 *dst,
u32 ratemask,
@@ -2244,7 +1481,6 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
struct cfg80211_chan_def chandef;
struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
- int ies_len;
u32 rate_masks[NUM_NL80211_BANDS] = {};
struct ieee80211_scan_ies dummy_ie_desc;
@@ -2253,7 +1489,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
* in order to maximize the chance that we get a response. Some
* badly-behaved APs don't respond when this parameter is included.
*/
- chandef.width = sdata->vif.bss_conf.chandef.width;
+ chandef.width = sdata->vif.bss_conf.chanreq.oper.width;
if (flags & IEEE80211_PROBE_FLAG_DIRECTED)
chandef.chan = NULL;
else
@@ -2265,11 +1501,9 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
return NULL;
rate_masks[chan->band] = ratemask;
- ies_len = ieee80211_build_preq_ies(sdata, skb_tail_pointer(skb),
- skb_tailroom(skb), &dummy_ie_desc,
- ie, ie_len, BIT(chan->band),
- rate_masks, &chandef, flags);
- skb_put(skb, ies_len);
+ ieee80211_put_preq_ies(skb, sdata, &dummy_ie_desc,
+ ie, ie_len, BIT(chan->band),
+ rate_masks, &chandef, flags);
if (dst) {
mgmt = (struct ieee80211_mgmt *) skb->data;
@@ -2295,7 +1529,8 @@ u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
if (WARN_ON(!sband))
return 1;
- rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
+ rate_flags =
+ ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chanreq.oper);
num_rates = sband->n_bitrates;
supp_rates = 0;
@@ -2333,8 +1568,12 @@ u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
return supp_rates;
}
-void ieee80211_stop_device(struct ieee80211_local *local)
+void ieee80211_stop_device(struct ieee80211_local *local, bool suspend)
{
+ local_bh_disable();
+ ieee80211_handle_queued_frames(local);
+ local_bh_enable();
+
ieee80211_led_radio(local, false);
ieee80211_mod_tpt_led_trig(local, 0, IEEE80211_TPT_LEDTRIG_FL_RADIO);
@@ -2342,7 +1581,7 @@ void ieee80211_stop_device(struct ieee80211_local *local)
flush_workqueue(local->workqueue);
wiphy_work_flush(local->hw.wiphy, NULL);
- drv_stop(local);
+ drv_stop(local, suspend);
}
static void ieee80211_flush_completed_scan(struct ieee80211_local *local,
@@ -2416,9 +1655,6 @@ static void ieee80211_assign_chanctx(struct ieee80211_local *local,
lockdep_assert_wiphy(local->hw.wiphy);
- if (!local->use_chanctx)
- return;
-
conf = rcu_dereference_protected(link->conf->chanctx_conf,
lockdep_is_held(&local->hw.wiphy->mtx));
if (conf) {
@@ -2593,6 +1829,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
WARN(1, "Hardware became unavailable upon resume. This could be a software issue prior to suspend or a hardware issue.\n");
else
WARN(1, "Hardware became unavailable during restart.\n");
+ ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP,
+ IEEE80211_QUEUE_STOP_REASON_SUSPEND,
+ false);
ieee80211_handle_reconfig_failure(local);
return res;
}
@@ -2612,7 +1851,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
/* add interfaces */
sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata);
- if (sdata) {
+ if (sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) {
/* in HW restart it exists already */
WARN_ON(local->resuming);
res = drv_add_interface(local, sdata);
@@ -2624,8 +1863,10 @@ int ieee80211_reconfig(struct ieee80211_local *local)
}
list_for_each_entry(sdata, &local->interfaces, list) {
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
+ !ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR))
+ continue;
if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
- sdata->vif.type != NL80211_IFTYPE_MONITOR &&
ieee80211_sdata_running(sdata)) {
res = drv_add_interface(local, sdata);
if (WARN_ON(res))
@@ -2638,30 +1879,33 @@ int ieee80211_reconfig(struct ieee80211_local *local)
*/
if (res) {
list_for_each_entry_continue_reverse(sdata, &local->interfaces,
- list)
+ list) {
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
+ !ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR))
+ continue;
if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
- sdata->vif.type != NL80211_IFTYPE_MONITOR &&
ieee80211_sdata_running(sdata))
drv_remove_interface(local, sdata);
+ }
ieee80211_handle_reconfig_failure(local);
return res;
}
/* add channel contexts */
- if (local->use_chanctx) {
- list_for_each_entry(ctx, &local->chanctx_list, list)
- if (ctx->replace_state !=
- IEEE80211_CHANCTX_REPLACES_OTHER)
- WARN_ON(drv_add_chanctx(local, ctx));
-
- sdata = wiphy_dereference(local->hw.wiphy,
- local->monitor_sdata);
- if (sdata && ieee80211_sdata_running(sdata))
- ieee80211_assign_chanctx(local, sdata, &sdata->deflink);
- }
+ list_for_each_entry(ctx, &local->chanctx_list, list)
+ if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER)
+ WARN_ON(drv_add_chanctx(local, ctx));
+
+ sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata);
+ if (sdata && ieee80211_sdata_running(sdata))
+ ieee80211_assign_chanctx(local, sdata, &sdata->deflink);
/* reconfigure hardware */
- ieee80211_hw_config(local, ~0);
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_LISTEN_INTERVAL |
+ IEEE80211_CONF_CHANGE_MONITOR |
+ IEEE80211_CONF_CHANGE_PS |
+ IEEE80211_CONF_CHANGE_RETRY_LIMITS |
+ IEEE80211_CONF_CHANGE_IDLE);
ieee80211_configure_filter(local);
@@ -2703,11 +1947,12 @@ int ieee80211_reconfig(struct ieee80211_local *local)
old);
}
+ sdata->restart_active_links = active_links;
+
for (link_id = 0;
link_id < ARRAY_SIZE(sdata->vif.link_conf);
link_id++) {
- if (ieee80211_vif_is_mld(&sdata->vif) &&
- !(sdata->vif.active_links & BIT(link_id)))
+ if (!ieee80211_vif_link_active(&sdata->vif, link_id))
continue;
link = sdata_dereference(sdata->link[link_id], sdata);
@@ -2756,9 +2001,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
sdata->vif.bss_conf.protected_keep_alive)
changed |= BSS_CHANGED_KEEP_ALIVE;
- if (sdata->vif.bss_conf.eht_puncturing)
- changed |= BSS_CHANGED_EHT_PUNCTURING;
-
ieee80211_bss_info_change_notify(sdata,
changed);
} else if (!WARN_ON(!link)) {
@@ -2834,9 +2076,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
WARN_ON(1);
break;
}
-
- if (active_links)
- ieee80211_set_active_links(&sdata->vif, active_links);
}
ieee80211_recalc_ps(local);
@@ -2877,6 +2116,20 @@ int ieee80211_reconfig(struct ieee80211_local *local)
list_for_each_entry(sdata, &local->interfaces, list)
ieee80211_reenable_keys(sdata);
+ /* re-enable multi-link for client interfaces */
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (sdata->restart_active_links)
+ ieee80211_set_active_links(&sdata->vif,
+ sdata->restart_active_links);
+ /*
+ * If a link switch was scheduled before the restart, and ran
+ * before reconfig, it will do nothing, so re-schedule.
+ */
+ if (sdata->desired_active_links)
+ wiphy_work_queue(sdata->local->hw.wiphy,
+ &sdata->activate_links_work);
+ }
+
/* Reconfigure sched scan if it was interrupted by FW restart */
sched_scan_sdata = rcu_dereference_protected(local->sched_scan_sdata,
lockdep_is_held(&local->hw.wiphy->mtx));
@@ -2937,12 +2190,13 @@ int ieee80211_reconfig(struct ieee80211_local *local)
local->in_reconfig = false;
barrier();
- /* Restart deferred ROCs */
- ieee80211_start_next_roc(local);
+ ieee80211_reconfig_roc(local);
/* Requeue all works */
- list_for_each_entry(sdata, &local->interfaces, list)
- wiphy_work_queue(local->hw.wiphy, &sdata->work);
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (ieee80211_sdata_running(sdata))
+ wiphy_work_queue(local->hw.wiphy, &sdata->work);
+ }
}
ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP,
@@ -3095,7 +2349,7 @@ void ieee80211_recalc_min_chandef(struct ieee80211_sub_if_data *sdata,
chanctx = container_of(chanctx_conf, struct ieee80211_chanctx,
conf);
- ieee80211_recalc_chanctx_min_def(local, chanctx, NULL);
+ ieee80211_recalc_chanctx_min_def(local, chanctx, NULL, false);
}
}
@@ -3109,21 +2363,6 @@ size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset)
return pos;
}
-u8 *ieee80211_ie_build_s1g_cap(u8 *pos, struct ieee80211_sta_s1g_cap *s1g_cap)
-{
- *pos++ = WLAN_EID_S1G_CAPABILITIES;
- *pos++ = sizeof(struct ieee80211_s1g_cap);
- memset(pos, 0, sizeof(struct ieee80211_s1g_cap));
-
- memcpy(pos, &s1g_cap->cap, sizeof(s1g_cap->cap));
- pos += sizeof(s1g_cap->cap);
-
- memcpy(pos, &s1g_cap->nss_mcs, sizeof(s1g_cap->nss_mcs));
- pos += sizeof(s1g_cap->nss_mcs);
-
- return pos;
-}
-
u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
u16 cap)
{
@@ -3180,7 +2419,8 @@ u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
return pos;
}
-u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata, u8 iftype)
+/* this may return more than ieee80211_put_he_6ghz_cap() will need */
+u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata)
{
const struct ieee80211_sta_he_cap *he_cap;
struct ieee80211_supported_band *sband;
@@ -3190,7 +2430,7 @@ u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata, u8 iftype)
if (!sband)
return 0;
- he_cap = ieee80211_get_he_iftype_cap(sband, iftype);
+ he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
if (!he_cap)
return 0;
@@ -3201,38 +2441,75 @@ u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata, u8 iftype)
he_cap->he_cap_elem.phy_cap_info);
}
-u8 *ieee80211_ie_build_he_cap(ieee80211_conn_flags_t disable_flags, u8 *pos,
+static void
+ieee80211_get_adjusted_he_cap(const struct ieee80211_conn_settings *conn,
const struct ieee80211_sta_he_cap *he_cap,
- u8 *end)
+ struct ieee80211_he_cap_elem *elem)
{
- struct ieee80211_he_cap_elem elem;
- u8 n;
- u8 ie_len;
- u8 *orig_pos = pos;
+ u8 ru_limit, max_ru;
- /* Make sure we have place for the IE */
- /*
- * TODO: the 1 added is because this temporarily is under the EXTENSION
- * IE. Get rid of it when it moves.
- */
- if (!he_cap)
- return orig_pos;
+ *elem = he_cap->he_cap_elem;
- /* modify on stack first to calculate 'n' and 'ie_len' correctly */
- elem = he_cap->he_cap_elem;
+ switch (conn->bw_limit) {
+ case IEEE80211_CONN_BW_LIMIT_20:
+ ru_limit = IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242;
+ break;
+ case IEEE80211_CONN_BW_LIMIT_40:
+ ru_limit = IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484;
+ break;
+ case IEEE80211_CONN_BW_LIMIT_80:
+ ru_limit = IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996;
+ break;
+ default:
+ ru_limit = IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996;
+ break;
+ }
- if (disable_flags & IEEE80211_CONN_DISABLE_40MHZ)
- elem.phy_cap_info[0] &=
+ max_ru = elem->phy_cap_info[8] & IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_MASK;
+ max_ru = min(max_ru, ru_limit);
+ elem->phy_cap_info[8] &= ~IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_MASK;
+ elem->phy_cap_info[8] |= max_ru;
+
+ if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_40) {
+ elem->phy_cap_info[0] &=
~(IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G);
+ elem->phy_cap_info[9] &=
+ ~IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM;
+ }
+
+ if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_160) {
+ elem->phy_cap_info[0] &=
+ ~(IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G);
+ elem->phy_cap_info[5] &=
+ ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK;
+ elem->phy_cap_info[7] &=
+ ~(IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ |
+ IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ);
+ }
+}
+
+int ieee80211_put_he_cap(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ const struct ieee80211_supported_band *sband,
+ const struct ieee80211_conn_settings *conn)
+{
+ const struct ieee80211_sta_he_cap *he_cap;
+ struct ieee80211_he_cap_elem elem;
+ u8 *len;
+ u8 n;
+ u8 ie_len;
+
+ if (!conn)
+ conn = &ieee80211_conn_settings_unlimited;
- if (disable_flags & IEEE80211_CONN_DISABLE_160MHZ)
- elem.phy_cap_info[0] &=
- ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+ he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
+ if (!he_cap)
+ return 0;
- if (disable_flags & IEEE80211_CONN_DISABLE_80P80MHZ)
- elem.phy_cap_info[0] &=
- ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G;
+ /* modify on stack first to calculate 'n' and 'ie_len' correctly */
+ ieee80211_get_adjusted_he_cap(conn, he_cap, &elem);
n = ieee80211_he_mcs_nss_size(&elem);
ie_len = 2 + 1 +
@@ -3240,19 +2517,17 @@ u8 *ieee80211_ie_build_he_cap(ieee80211_conn_flags_t disable_flags, u8 *pos,
ieee80211_he_ppe_size(he_cap->ppe_thres[0],
he_cap->he_cap_elem.phy_cap_info);
- if ((end - pos) < ie_len)
- return orig_pos;
+ if (skb_tailroom(skb) < ie_len)
+ return -ENOBUFS;
- *pos++ = WLAN_EID_EXTENSION;
- pos++; /* We'll set the size later below */
- *pos++ = WLAN_EID_EXT_HE_CAPABILITY;
+ skb_put_u8(skb, WLAN_EID_EXTENSION);
+ len = skb_put(skb, 1); /* We'll set the size later below */
+ skb_put_u8(skb, WLAN_EID_EXT_HE_CAPABILITY);
/* Fixed data */
- memcpy(pos, &elem, sizeof(elem));
- pos += sizeof(elem);
+ skb_put_data(skb, &elem, sizeof(elem));
- memcpy(pos, &he_cap->he_mcs_nss_supp, n);
- pos += n;
+ skb_put_data(skb, &he_cap->he_mcs_nss_supp, n);
/* Check if PPE Threshold should be present */
if ((he_cap->he_cap_elem.phy_cap_info[6] &
@@ -3276,41 +2551,39 @@ u8 *ieee80211_ie_build_he_cap(ieee80211_conn_flags_t disable_flags, u8 *pos,
n = DIV_ROUND_UP(n, 8);
/* Copy PPE Thresholds */
- memcpy(pos, &he_cap->ppe_thres, n);
- pos += n;
+ skb_put_data(skb, &he_cap->ppe_thres, n);
end:
- orig_pos[1] = (pos - orig_pos) - 2;
- return pos;
+ *len = skb_tail_pointer(skb) - len - 1;
+ return 0;
}
-void ieee80211_ie_build_he_6ghz_cap(struct ieee80211_sub_if_data *sdata,
- enum ieee80211_smps_mode smps_mode,
- struct sk_buff *skb)
+int ieee80211_put_he_6ghz_cap(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ enum ieee80211_smps_mode smps_mode)
{
struct ieee80211_supported_band *sband;
const struct ieee80211_sband_iftype_data *iftd;
enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif);
- u8 *pos;
- u16 cap;
+ __le16 cap;
if (!cfg80211_any_usable_channels(sdata->local->hw.wiphy,
BIT(NL80211_BAND_6GHZ),
IEEE80211_CHAN_NO_HE))
- return;
+ return 0;
sband = sdata->local->hw.wiphy->bands[NL80211_BAND_6GHZ];
iftd = ieee80211_get_sband_iftype_data(sband, iftype);
if (!iftd)
- return;
+ return 0;
/* Check for device HE 6 GHz capability before adding element */
if (!iftd->he_6ghz_capa.capa)
- return;
+ return 0;
- cap = le16_to_cpu(iftd->he_6ghz_capa.capa);
- cap &= ~IEEE80211_HE_6GHZ_CAP_SM_PS;
+ cap = iftd->he_6ghz_capa.capa;
+ cap &= cpu_to_le16(~IEEE80211_HE_6GHZ_CAP_SM_PS);
switch (smps_mode) {
case IEEE80211_SMPS_AUTOMATIC:
@@ -3318,22 +2591,27 @@ void ieee80211_ie_build_he_6ghz_cap(struct ieee80211_sub_if_data *sdata,
WARN_ON(1);
fallthrough;
case IEEE80211_SMPS_OFF:
- cap |= u16_encode_bits(WLAN_HT_CAP_SM_PS_DISABLED,
- IEEE80211_HE_6GHZ_CAP_SM_PS);
+ cap |= le16_encode_bits(WLAN_HT_CAP_SM_PS_DISABLED,
+ IEEE80211_HE_6GHZ_CAP_SM_PS);
break;
case IEEE80211_SMPS_STATIC:
- cap |= u16_encode_bits(WLAN_HT_CAP_SM_PS_STATIC,
- IEEE80211_HE_6GHZ_CAP_SM_PS);
+ cap |= le16_encode_bits(WLAN_HT_CAP_SM_PS_STATIC,
+ IEEE80211_HE_6GHZ_CAP_SM_PS);
break;
case IEEE80211_SMPS_DYNAMIC:
- cap |= u16_encode_bits(WLAN_HT_CAP_SM_PS_DYNAMIC,
- IEEE80211_HE_6GHZ_CAP_SM_PS);
+ cap |= le16_encode_bits(WLAN_HT_CAP_SM_PS_DYNAMIC,
+ IEEE80211_HE_6GHZ_CAP_SM_PS);
break;
}
- pos = skb_put(skb, 2 + 1 + sizeof(cap));
- ieee80211_write_he_6ghz_cap(pos, cpu_to_le16(cap),
- pos + 2 + 1 + sizeof(cap));
+ if (skb_tailroom(skb) < 2 + 1 + sizeof(cap))
+ return -ENOBUFS;
+
+ skb_put_u8(skb, WLAN_EID_EXTENSION);
+ skb_put_u8(skb, 1 + sizeof(cap));
+ skb_put_u8(skb, WLAN_EID_EXT_HE_6GHZ_CAPA);
+ skb_put_data(skb, &cap, sizeof(cap));
+ return 0;
}
u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
@@ -3471,10 +2749,11 @@ u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
return pos + sizeof(struct ieee80211_vht_operation);
}
-u8 *ieee80211_ie_build_he_oper(u8 *pos, struct cfg80211_chan_def *chandef)
+u8 *ieee80211_ie_build_he_oper(u8 *pos, const struct cfg80211_chan_def *chandef)
{
struct ieee80211_he_operation *he_oper;
struct ieee80211_he_6ghz_oper *he_6ghz_op;
+ struct cfg80211_chan_def he_chandef;
u32 he_oper_params;
u8 ie_len = 1 + sizeof(struct ieee80211_he_operation);
@@ -3506,27 +2785,33 @@ u8 *ieee80211_ie_build_he_oper(u8 *pos, struct cfg80211_chan_def *chandef)
if (chandef->chan->band != NL80211_BAND_6GHZ)
goto out;
+ cfg80211_chandef_create(&he_chandef, chandef->chan, NL80211_CHAN_NO_HT);
+ he_chandef.center_freq1 = chandef->center_freq1;
+ he_chandef.center_freq2 = chandef->center_freq2;
+ he_chandef.width = chandef->width;
+
/* TODO add VHT operational */
he_6ghz_op = (struct ieee80211_he_6ghz_oper *)pos;
he_6ghz_op->minrate = 6; /* 6 Mbps */
he_6ghz_op->primary =
- ieee80211_frequency_to_channel(chandef->chan->center_freq);
+ ieee80211_frequency_to_channel(he_chandef.chan->center_freq);
he_6ghz_op->ccfs0 =
- ieee80211_frequency_to_channel(chandef->center_freq1);
- if (chandef->center_freq2)
+ ieee80211_frequency_to_channel(he_chandef.center_freq1);
+ if (he_chandef.center_freq2)
he_6ghz_op->ccfs1 =
- ieee80211_frequency_to_channel(chandef->center_freq2);
+ ieee80211_frequency_to_channel(he_chandef.center_freq2);
else
he_6ghz_op->ccfs1 = 0;
- switch (chandef->width) {
+ switch (he_chandef.width) {
case NL80211_CHAN_WIDTH_320:
- /*
- * TODO: mesh operation is not defined over 6GHz 320 MHz
- * channels.
+ /* Downgrade EHT 320 MHz BW to 160 MHz for HE and set new
+ * center_freq1
*/
- WARN_ON(1);
- break;
+ ieee80211_chandef_downgrade(&he_chandef, NULL);
+ he_6ghz_op->ccfs0 =
+ ieee80211_frequency_to_channel(he_chandef.center_freq1);
+ fallthrough;
case NL80211_CHAN_WIDTH_160:
/* Convert 160 MHz channel width to new style as interop
* workaround.
@@ -3534,7 +2819,7 @@ u8 *ieee80211_ie_build_he_oper(u8 *pos, struct cfg80211_chan_def *chandef)
he_6ghz_op->control =
IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ;
he_6ghz_op->ccfs1 = he_6ghz_op->ccfs0;
- if (chandef->chan->center_freq < chandef->center_freq1)
+ if (he_chandef.chan->center_freq < he_chandef.center_freq1)
he_6ghz_op->ccfs0 -= 8;
else
he_6ghz_op->ccfs0 += 8;
@@ -3563,7 +2848,7 @@ out:
return pos;
}
-u8 *ieee80211_ie_build_eht_oper(u8 *pos, struct cfg80211_chan_def *chandef,
+u8 *ieee80211_ie_build_eht_oper(u8 *pos, const struct cfg80211_chan_def *chandef,
const struct ieee80211_sta_eht_cap *eht_cap)
{
@@ -3785,7 +3070,6 @@ bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw, u32 vht_cap_info,
}
void ieee80211_chandef_eht_oper(const struct ieee80211_eht_operation_info *info,
- bool support_160, bool support_320,
struct cfg80211_chan_def *chandef)
{
chandef->center_freq1 =
@@ -3804,90 +3088,38 @@ void ieee80211_chandef_eht_oper(const struct ieee80211_eht_operation_info *info,
chandef->width = NL80211_CHAN_WIDTH_80;
break;
case IEEE80211_EHT_OPER_CHAN_WIDTH_160MHZ:
- if (support_160) {
- chandef->width = NL80211_CHAN_WIDTH_160;
- chandef->center_freq1 =
- ieee80211_channel_to_frequency(info->ccfs1,
- chandef->chan->band);
- } else {
- chandef->width = NL80211_CHAN_WIDTH_80;
- }
+ chandef->width = NL80211_CHAN_WIDTH_160;
+ chandef->center_freq1 =
+ ieee80211_channel_to_frequency(info->ccfs1,
+ chandef->chan->band);
break;
case IEEE80211_EHT_OPER_CHAN_WIDTH_320MHZ:
- if (support_320) {
- chandef->width = NL80211_CHAN_WIDTH_320;
- chandef->center_freq1 =
- ieee80211_channel_to_frequency(info->ccfs1,
- chandef->chan->band);
- } else if (support_160) {
- chandef->width = NL80211_CHAN_WIDTH_160;
- } else {
- chandef->width = NL80211_CHAN_WIDTH_80;
-
- if (chandef->center_freq1 > chandef->chan->center_freq)
- chandef->center_freq1 -= 40;
- else
- chandef->center_freq1 += 40;
- }
+ chandef->width = NL80211_CHAN_WIDTH_320;
+ chandef->center_freq1 =
+ ieee80211_channel_to_frequency(info->ccfs1,
+ chandef->chan->band);
break;
}
}
-bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
+bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_local *local,
const struct ieee80211_he_operation *he_oper,
const struct ieee80211_eht_operation *eht_oper,
struct cfg80211_chan_def *chandef)
{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_supported_band *sband;
- enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif);
- const struct ieee80211_sta_he_cap *he_cap;
- const struct ieee80211_sta_eht_cap *eht_cap;
struct cfg80211_chan_def he_chandef = *chandef;
const struct ieee80211_he_6ghz_oper *he_6ghz_oper;
- struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
- bool support_80_80, support_160, support_320;
- u8 he_phy_cap, eht_phy_cap;
u32 freq;
if (chandef->chan->band != NL80211_BAND_6GHZ)
return true;
- sband = local->hw.wiphy->bands[NL80211_BAND_6GHZ];
-
- he_cap = ieee80211_get_he_iftype_cap(sband, iftype);
- if (!he_cap) {
- sdata_info(sdata, "Missing iftype sband data/HE cap");
+ if (!he_oper)
return false;
- }
-
- he_phy_cap = he_cap->he_cap_elem.phy_cap_info[0];
- support_160 =
- he_phy_cap &
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
- support_80_80 =
- he_phy_cap &
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G;
-
- if (!he_oper) {
- sdata_info(sdata,
- "HE is not advertised on (on %d MHz), expect issues\n",
- chandef->chan->center_freq);
- return false;
- }
-
- eht_cap = ieee80211_get_eht_iftype_cap(sband, iftype);
- if (!eht_cap)
- eht_oper = NULL;
he_6ghz_oper = ieee80211_he_6ghz_oper(he_oper);
-
- if (!he_6ghz_oper) {
- sdata_info(sdata,
- "HE 6GHz operation missing (on %d MHz), expect issues\n",
- chandef->chan->center_freq);
+ if (!he_6ghz_oper)
return false;
- }
/*
* The EHT operation IE does not contain the primary channel so the
@@ -3896,20 +3128,10 @@ bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
*/
freq = ieee80211_channel_to_frequency(he_6ghz_oper->primary,
NL80211_BAND_6GHZ);
- he_chandef.chan = ieee80211_get_channel(sdata->local->hw.wiphy, freq);
+ he_chandef.chan = ieee80211_get_channel(local->hw.wiphy, freq);
- switch (u8_get_bits(he_6ghz_oper->control,
- IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) {
- case IEEE80211_6GHZ_CTRL_REG_LPI_AP:
- bss_conf->power_type = IEEE80211_REG_LPI_AP;
- break;
- case IEEE80211_6GHZ_CTRL_REG_SP_AP:
- bss_conf->power_type = IEEE80211_REG_SP_AP;
- break;
- default:
- bss_conf->power_type = IEEE80211_REG_UNSET_AP;
- break;
- }
+ if (!he_chandef.chan)
+ return false;
if (!eht_oper ||
!(eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT)) {
@@ -3928,13 +3150,10 @@ bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
he_chandef.width = NL80211_CHAN_WIDTH_80;
if (!he_6ghz_oper->ccfs1)
break;
- if (abs(he_6ghz_oper->ccfs1 - he_6ghz_oper->ccfs0) == 8) {
- if (support_160)
- he_chandef.width = NL80211_CHAN_WIDTH_160;
- } else {
- if (support_80_80)
- he_chandef.width = NL80211_CHAN_WIDTH_80P80;
- }
+ if (abs(he_6ghz_oper->ccfs1 - he_6ghz_oper->ccfs0) == 8)
+ he_chandef.width = NL80211_CHAN_WIDTH_160;
+ else
+ he_chandef.width = NL80211_CHAN_WIDTH_80P80;
break;
}
@@ -3946,30 +3165,19 @@ bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
he_chandef.center_freq1 =
ieee80211_channel_to_frequency(he_6ghz_oper->ccfs0,
NL80211_BAND_6GHZ);
- if (support_80_80 || support_160)
- he_chandef.center_freq2 =
- ieee80211_channel_to_frequency(he_6ghz_oper->ccfs1,
- NL80211_BAND_6GHZ);
+ he_chandef.center_freq2 =
+ ieee80211_channel_to_frequency(he_6ghz_oper->ccfs1,
+ NL80211_BAND_6GHZ);
}
} else {
- eht_phy_cap = eht_cap->eht_cap_elem.phy_cap_info[0];
- support_320 =
- eht_phy_cap & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
-
ieee80211_chandef_eht_oper((const void *)eht_oper->optional,
- support_160, support_320,
&he_chandef);
+ he_chandef.punctured =
+ ieee80211_eht_oper_dis_subchan_bitmap(eht_oper);
}
- if (!cfg80211_chandef_valid(&he_chandef)) {
- sdata_info(sdata,
- "HE 6GHz operation resulted in invalid chandef: %d MHz/%d/%d MHz/%d MHz\n",
- he_chandef.chan ? he_chandef.chan->center_freq : 0,
- he_chandef.width,
- he_chandef.center_freq1,
- he_chandef.center_freq2);
+ if (!cfg80211_chandef_valid(&he_chandef))
return false;
- }
*chandef = he_chandef;
@@ -4012,121 +3220,62 @@ bool ieee80211_chandef_s1g_oper(const struct ieee80211_s1g_oper_ie *oper,
return true;
}
-int ieee80211_parse_bitrates(enum nl80211_chan_width width,
- const struct ieee80211_supported_band *sband,
- const u8 *srates, int srates_len, u32 *rates)
+int ieee80211_put_srates_elem(struct sk_buff *skb,
+ const struct ieee80211_supported_band *sband,
+ u32 basic_rates, u32 rate_flags, u32 masked_rates,
+ u8 element_id)
{
- u32 rate_flags = ieee80211_chanwidth_rate_flags(width);
- struct ieee80211_rate *br;
- int brate, rate, i, j, count = 0;
-
- *rates = 0;
+ u8 i, rates, skip;
- for (i = 0; i < srates_len; i++) {
- rate = srates[i] & 0x7f;
-
- for (j = 0; j < sband->n_bitrates; j++) {
- br = &sband->bitrates[j];
- if ((rate_flags & br->flags) != rate_flags)
- continue;
-
- brate = DIV_ROUND_UP(br->bitrate, 5);
- if (brate == rate) {
- *rates |= BIT(j);
- count++;
- break;
- }
- }
- }
- return count;
-}
-
-int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb, bool need_basic,
- enum nl80211_band band)
-{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_supported_band *sband;
- int rate;
- u8 i, rates, *pos;
- u32 basic_rates = sdata->vif.bss_conf.basic_rates;
- u32 rate_flags;
-
- rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
- sband = local->hw.wiphy->bands[band];
rates = 0;
for (i = 0; i < sband->n_bitrates; i++) {
if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
continue;
+ if (masked_rates & BIT(i))
+ continue;
rates++;
}
- if (rates > 8)
- rates = 8;
- if (skb_tailroom(skb) < rates + 2)
- return -ENOMEM;
-
- pos = skb_put(skb, rates + 2);
- *pos++ = WLAN_EID_SUPP_RATES;
- *pos++ = rates;
- for (i = 0; i < rates; i++) {
- u8 basic = 0;
- if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
- continue;
-
- if (need_basic && basic_rates & BIT(i))
- basic = 0x80;
- rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, 5);
- *pos++ = basic | (u8) rate;
+ if (element_id == WLAN_EID_SUPP_RATES) {
+ rates = min_t(u8, rates, 8);
+ skip = 0;
+ } else {
+ skip = 8;
+ if (rates <= skip)
+ return 0;
+ rates -= skip;
}
- return 0;
-}
+ if (skb_tailroom(skb) < rates + 2)
+ return -ENOBUFS;
-int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb, bool need_basic,
- enum nl80211_band band)
-{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_supported_band *sband;
- int rate;
- u8 i, exrates, *pos;
- u32 basic_rates = sdata->vif.bss_conf.basic_rates;
- u32 rate_flags;
+ skb_put_u8(skb, element_id);
+ skb_put_u8(skb, rates);
- rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
+ for (i = 0; i < sband->n_bitrates && rates; i++) {
+ int rate;
+ u8 basic;
- sband = local->hw.wiphy->bands[band];
- exrates = 0;
- for (i = 0; i < sband->n_bitrates; i++) {
if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
continue;
- exrates++;
- }
+ if (masked_rates & BIT(i))
+ continue;
- if (exrates > 8)
- exrates -= 8;
- else
- exrates = 0;
+ if (skip > 0) {
+ skip--;
+ continue;
+ }
- if (skb_tailroom(skb) < exrates + 2)
- return -ENOMEM;
+ basic = basic_rates & BIT(i) ? 0x80 : 0;
- if (exrates) {
- pos = skb_put(skb, exrates + 2);
- *pos++ = WLAN_EID_EXT_SUPP_RATES;
- *pos++ = exrates;
- for (i = 8; i < sband->n_bitrates; i++) {
- u8 basic = 0;
- if ((rate_flags & sband->bitrates[i].flags)
- != rate_flags)
- continue;
- if (need_basic && basic_rates & BIT(i))
- basic = 0x80;
- rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, 5);
- *pos++ = basic | (u8) rate;
- }
+ rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, 5);
+ skb_put_u8(skb, basic | (u8)rate);
+ rates--;
}
+
+ WARN(rates > 0, "rates confused: rates:%d, element:%d\n",
+ rates, element_id);
+
return 0;
}
@@ -4322,28 +3471,44 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
return ts;
}
-void ieee80211_dfs_cac_cancel(struct ieee80211_local *local)
+/* Cancel CAC for the interfaces under the specified @local. If @ctx is
+ * also provided, only the interfaces using that ctx will be canceled.
+ */
+void ieee80211_dfs_cac_cancel(struct ieee80211_local *local,
+ struct ieee80211_chanctx *ctx)
{
struct ieee80211_sub_if_data *sdata;
struct cfg80211_chan_def chandef;
+ struct ieee80211_link_data *link;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ unsigned int link_id;
lockdep_assert_wiphy(local->hw.wiphy);
list_for_each_entry(sdata, &local->interfaces, list) {
- /* it might be waiting for the local->mtx, but then
- * by the time it gets it, sdata->wdev.cac_started
- * will no longer be true
- */
- wiphy_delayed_work_cancel(local->hw.wiphy,
- &sdata->deflink.dfs_cac_timer_work);
-
- if (sdata->wdev.cac_started) {
- chandef = sdata->vif.bss_conf.chandef;
- ieee80211_link_release_channel(&sdata->deflink);
- cfg80211_cac_event(sdata->dev,
- &chandef,
+ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS;
+ link_id++) {
+ link = sdata_dereference(sdata->link[link_id],
+ sdata);
+ if (!link)
+ continue;
+
+ chanctx_conf = sdata_dereference(link->conf->chanctx_conf,
+ sdata);
+ if (ctx && &ctx->conf != chanctx_conf)
+ continue;
+
+ wiphy_delayed_work_cancel(local->hw.wiphy,
+ &link->dfs_cac_timer_work);
+
+ if (!sdata->wdev.links[link_id].cac_started)
+ continue;
+
+ chandef = link->conf->chanreq.oper;
+ ieee80211_link_release_channel(link);
+ cfg80211_cac_event(sdata->dev, &chandef,
NL80211_RADAR_CAC_ABORTED,
- GFP_KERNEL);
+ GFP_KERNEL, link_id);
}
}
}
@@ -4353,9 +3518,8 @@ void ieee80211_dfs_radar_detected_work(struct wiphy *wiphy,
{
struct ieee80211_local *local =
container_of(work, struct ieee80211_local, radar_detected_work);
- struct cfg80211_chan_def chandef = local->hw.conf.chandef;
+ struct cfg80211_chan_def chandef;
struct ieee80211_chanctx *ctx;
- int num_chanctx = 0;
lockdep_assert_wiphy(local->hw.wiphy);
@@ -4363,126 +3527,136 @@ void ieee80211_dfs_radar_detected_work(struct wiphy *wiphy,
if (ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER)
continue;
- num_chanctx++;
+ if (!ctx->radar_detected)
+ continue;
+
+ ctx->radar_detected = false;
+
chandef = ctx->conf.def;
+
+ ieee80211_dfs_cac_cancel(local, ctx);
+ cfg80211_radar_event(local->hw.wiphy, &chandef, GFP_KERNEL);
}
+}
- ieee80211_dfs_cac_cancel(local);
+static void
+ieee80211_radar_mark_chan_ctx_iterator(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *chanctx_conf,
+ void *data)
+{
+ struct ieee80211_chanctx *ctx =
+ container_of(chanctx_conf, struct ieee80211_chanctx,
+ conf);
- if (num_chanctx > 1)
- /* XXX: multi-channel is not supported yet */
- WARN_ON(1);
- else
- cfg80211_radar_event(local->hw.wiphy, &chandef, GFP_KERNEL);
+ if (ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER)
+ return;
+
+ if (data && data != chanctx_conf)
+ return;
+
+ ctx->radar_detected = true;
}
-void ieee80211_radar_detected(struct ieee80211_hw *hw)
+void ieee80211_radar_detected(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *chanctx_conf)
{
struct ieee80211_local *local = hw_to_local(hw);
trace_api_radar_detected(local);
+ ieee80211_iter_chan_contexts_atomic(hw, ieee80211_radar_mark_chan_ctx_iterator,
+ chanctx_conf);
+
wiphy_work_queue(hw->wiphy, &local->radar_detected_work);
}
EXPORT_SYMBOL(ieee80211_radar_detected);
-ieee80211_conn_flags_t ieee80211_chandef_downgrade(struct cfg80211_chan_def *c)
+void ieee80211_chandef_downgrade(struct cfg80211_chan_def *c,
+ struct ieee80211_conn_settings *conn)
{
- ieee80211_conn_flags_t ret;
- int tmp;
+ enum nl80211_chan_width new_primary_width;
+ struct ieee80211_conn_settings _ignored = {};
+
+ /* allow passing NULL if caller doesn't care */
+ if (!conn)
+ conn = &_ignored;
+
+again:
+ /* no-HT indicates nothing to do */
+ new_primary_width = NL80211_CHAN_WIDTH_20_NOHT;
switch (c->width) {
+ default:
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ WARN_ON_ONCE(1);
+ fallthrough;
case NL80211_CHAN_WIDTH_20:
c->width = NL80211_CHAN_WIDTH_20_NOHT;
- ret = IEEE80211_CONN_DISABLE_HT | IEEE80211_CONN_DISABLE_VHT;
+ conn->mode = IEEE80211_CONN_MODE_LEGACY;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ c->punctured = 0;
break;
case NL80211_CHAN_WIDTH_40:
c->width = NL80211_CHAN_WIDTH_20;
c->center_freq1 = c->chan->center_freq;
- ret = IEEE80211_CONN_DISABLE_40MHZ |
- IEEE80211_CONN_DISABLE_VHT;
+ if (conn->mode == IEEE80211_CONN_MODE_VHT)
+ conn->mode = IEEE80211_CONN_MODE_HT;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ c->punctured = 0;
break;
case NL80211_CHAN_WIDTH_80:
- tmp = (30 + c->chan->center_freq - c->center_freq1)/20;
- /* n_P40 */
- tmp /= 2;
- /* freq_P40 */
- c->center_freq1 = c->center_freq1 - 20 + 40 * tmp;
- c->width = NL80211_CHAN_WIDTH_40;
- ret = IEEE80211_CONN_DISABLE_VHT;
+ new_primary_width = NL80211_CHAN_WIDTH_40;
+ if (conn->mode == IEEE80211_CONN_MODE_VHT)
+ conn->mode = IEEE80211_CONN_MODE_HT;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_40;
break;
case NL80211_CHAN_WIDTH_80P80:
c->center_freq2 = 0;
c->width = NL80211_CHAN_WIDTH_80;
- ret = IEEE80211_CONN_DISABLE_80P80MHZ |
- IEEE80211_CONN_DISABLE_160MHZ;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_80;
break;
case NL80211_CHAN_WIDTH_160:
- /* n_P20 */
- tmp = (70 + c->chan->center_freq - c->center_freq1)/20;
- /* n_P80 */
- tmp /= 4;
- c->center_freq1 = c->center_freq1 - 40 + 80 * tmp;
- c->width = NL80211_CHAN_WIDTH_80;
- ret = IEEE80211_CONN_DISABLE_80P80MHZ |
- IEEE80211_CONN_DISABLE_160MHZ;
+ new_primary_width = NL80211_CHAN_WIDTH_80;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_80;
break;
case NL80211_CHAN_WIDTH_320:
- /* n_P20 */
- tmp = (150 + c->chan->center_freq - c->center_freq1) / 20;
- /* n_P160 */
- tmp /= 8;
- c->center_freq1 = c->center_freq1 - 80 + 160 * tmp;
- c->width = NL80211_CHAN_WIDTH_160;
- ret = IEEE80211_CONN_DISABLE_320MHZ;
- break;
- default:
- case NL80211_CHAN_WIDTH_20_NOHT:
- WARN_ON_ONCE(1);
- c->width = NL80211_CHAN_WIDTH_20_NOHT;
- ret = IEEE80211_CONN_DISABLE_HT | IEEE80211_CONN_DISABLE_VHT;
+ new_primary_width = NL80211_CHAN_WIDTH_160;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_160;
break;
case NL80211_CHAN_WIDTH_1:
case NL80211_CHAN_WIDTH_2:
case NL80211_CHAN_WIDTH_4:
case NL80211_CHAN_WIDTH_8:
case NL80211_CHAN_WIDTH_16:
+ WARN_ON_ONCE(1);
+ /* keep c->width */
+ conn->mode = IEEE80211_CONN_MODE_S1G;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ break;
case NL80211_CHAN_WIDTH_5:
case NL80211_CHAN_WIDTH_10:
WARN_ON_ONCE(1);
/* keep c->width */
- ret = IEEE80211_CONN_DISABLE_HT | IEEE80211_CONN_DISABLE_VHT;
+ conn->mode = IEEE80211_CONN_MODE_LEGACY;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
break;
}
- WARN_ON_ONCE(!cfg80211_chandef_valid(c));
-
- return ret;
-}
-
-/*
- * Returns true if smps_mode_new is strictly more restrictive than
- * smps_mode_old.
- */
-bool ieee80211_smps_is_restrictive(enum ieee80211_smps_mode smps_mode_old,
- enum ieee80211_smps_mode smps_mode_new)
-{
- if (WARN_ON_ONCE(smps_mode_old == IEEE80211_SMPS_AUTOMATIC ||
- smps_mode_new == IEEE80211_SMPS_AUTOMATIC))
- return false;
-
- switch (smps_mode_old) {
- case IEEE80211_SMPS_STATIC:
- return false;
- case IEEE80211_SMPS_DYNAMIC:
- return smps_mode_new == IEEE80211_SMPS_STATIC;
- case IEEE80211_SMPS_OFF:
- return smps_mode_new != IEEE80211_SMPS_OFF;
- default:
- WARN_ON(1);
+ if (new_primary_width != NL80211_CHAN_WIDTH_20_NOHT) {
+ c->center_freq1 = cfg80211_chandef_primary(c, new_primary_width,
+ &c->punctured);
+ c->width = new_primary_width;
}
- return false;
+ /*
+ * With an 80 MHz channel, we might have the puncturing in the primary
+ * 40 Mhz channel, but that's not valid when downgraded to 40 MHz width.
+ * In that case, downgrade again.
+ */
+ if (!cfg80211_chandef_valid(c) && c->punctured)
+ goto again;
+
+ WARN_ON_ONCE(!cfg80211_chandef_valid(c));
}
int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata,
@@ -4773,7 +3947,7 @@ static u8 ieee80211_chanctx_radar_detect(struct ieee80211_local *local,
list_for_each_entry(link, &ctx->reserved_links, reserved_chanctx_list)
if (link->reserved_radar_required)
- radar_detect |= BIT(link->reserved_chandef.width);
+ radar_detect |= BIT(link->reserved.oper.width);
/*
* An in-place reservation context should not have any assigned vifs
@@ -4787,25 +3961,109 @@ static u8 ieee80211_chanctx_radar_detect(struct ieee80211_local *local,
continue;
radar_detect |=
- BIT(link->conf->chandef.width);
+ BIT(link->conf->chanreq.oper.width);
}
return radar_detect;
}
+static u32
+__ieee80211_get_radio_mask(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_chanctx_conf *conf;
+ unsigned int link_id;
+ u32 mask = 0;
+
+ for_each_vif_active_link(&sdata->vif, link_conf, link_id) {
+ conf = sdata_dereference(link_conf->chanctx_conf, sdata);
+ if (!conf || conf->radio_idx < 0)
+ continue;
+
+ mask |= BIT(conf->radio_idx);
+ }
+
+ return mask;
+}
+
+u32 ieee80211_get_radio_mask(struct wiphy *wiphy, struct net_device *dev)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ return __ieee80211_get_radio_mask(sdata);
+}
+
+static bool
+ieee80211_sdata_uses_radio(struct ieee80211_sub_if_data *sdata, int radio_idx)
+{
+ if (radio_idx < 0)
+ return true;
+
+ return __ieee80211_get_radio_mask(sdata) & BIT(radio_idx);
+}
+
+static int
+ieee80211_fill_ifcomb_params(struct ieee80211_local *local,
+ struct iface_combination_params *params,
+ const struct cfg80211_chan_def *chandef,
+ struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_sub_if_data *sdata_iter;
+ struct ieee80211_chanctx *ctx;
+ int total = !!sdata;
+
+ list_for_each_entry(ctx, &local->chanctx_list, list) {
+ if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED)
+ continue;
+
+ if (params->radio_idx >= 0 &&
+ ctx->conf.radio_idx != params->radio_idx)
+ continue;
+
+ params->radar_detect |=
+ ieee80211_chanctx_radar_detect(local, ctx);
+
+ if (chandef && ctx->mode != IEEE80211_CHANCTX_EXCLUSIVE &&
+ cfg80211_chandef_compatible(chandef, &ctx->conf.def))
+ continue;
+
+ params->num_different_channels++;
+ }
+
+ list_for_each_entry(sdata_iter, &local->interfaces, list) {
+ struct wireless_dev *wdev_iter;
+
+ wdev_iter = &sdata_iter->wdev;
+
+ if (sdata_iter == sdata ||
+ !ieee80211_sdata_running(sdata_iter) ||
+ cfg80211_iftype_allowed(local->hw.wiphy,
+ wdev_iter->iftype, 0, 1))
+ continue;
+
+ if (!ieee80211_sdata_uses_radio(sdata_iter, params->radio_idx))
+ continue;
+
+ params->iftype_num[wdev_iter->iftype]++;
+ total++;
+ }
+
+ return total;
+}
+
int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
const struct cfg80211_chan_def *chandef,
enum ieee80211_chanctx_mode chanmode,
- u8 radar_detect)
+ u8 radar_detect, int radio_idx)
{
+ bool shared = chanmode == IEEE80211_CHANCTX_SHARED;
struct ieee80211_local *local = sdata->local;
- struct ieee80211_sub_if_data *sdata_iter;
enum nl80211_iftype iftype = sdata->wdev.iftype;
- struct ieee80211_chanctx *ctx;
- int total = 1;
struct iface_combination_params params = {
.radar_detect = radar_detect,
+ .radio_idx = radio_idx,
};
+ int total;
lockdep_assert_wiphy(local->hw.wiphy);
@@ -4842,37 +4100,9 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
if (iftype != NL80211_IFTYPE_UNSPECIFIED)
params.iftype_num[iftype] = 1;
- list_for_each_entry(ctx, &local->chanctx_list, list) {
- if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED)
- continue;
- params.radar_detect |=
- ieee80211_chanctx_radar_detect(local, ctx);
- if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE) {
- params.num_different_channels++;
- continue;
- }
- if (chandef && chanmode == IEEE80211_CHANCTX_SHARED &&
- cfg80211_chandef_compatible(chandef,
- &ctx->conf.def))
- continue;
- params.num_different_channels++;
- }
-
- list_for_each_entry_rcu(sdata_iter, &local->interfaces, list) {
- struct wireless_dev *wdev_iter;
-
- wdev_iter = &sdata_iter->wdev;
-
- if (sdata_iter == sdata ||
- !ieee80211_sdata_running(sdata_iter) ||
- cfg80211_iftype_allowed(local->hw.wiphy,
- wdev_iter->iftype, 0, 1))
- continue;
-
- params.iftype_num[wdev_iter->iftype]++;
- total++;
- }
-
+ total = ieee80211_fill_ifcomb_params(local, &params,
+ shared ? chandef : NULL,
+ sdata);
if (total == 1 && !params.radar_detect)
return 0;
@@ -4889,28 +4119,17 @@ ieee80211_iter_max_chans(const struct ieee80211_iface_combination *c,
c->num_different_channels);
}
-int ieee80211_max_num_channels(struct ieee80211_local *local)
+int ieee80211_max_num_channels(struct ieee80211_local *local, int radio_idx)
{
- struct ieee80211_sub_if_data *sdata;
- struct ieee80211_chanctx *ctx;
u32 max_num_different_channels = 1;
int err;
- struct iface_combination_params params = {0};
+ struct iface_combination_params params = {
+ .radio_idx = radio_idx,
+ };
lockdep_assert_wiphy(local->hw.wiphy);
- list_for_each_entry(ctx, &local->chanctx_list, list) {
- if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED)
- continue;
-
- params.num_different_channels++;
-
- params.radar_detect |=
- ieee80211_chanctx_radar_detect(local, ctx);
- }
-
- list_for_each_entry_rcu(sdata, &local->interfaces, list)
- params.iftype_num[sdata->wdev.iftype]++;
+ ieee80211_fill_ifcomb_params(local, &params, NULL, NULL);
err = cfg80211_iter_combinations(local->hw.wiphy, &params,
ieee80211_iter_max_chans,
@@ -5037,7 +4256,8 @@ u16 ieee80211_encode_usf(int listen_interval)
return (u16) listen_interval;
}
-u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata, u8 iftype)
+/* this may return more than ieee80211_put_eht_cap() will need */
+u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata)
{
const struct ieee80211_sta_he_cap *he_cap;
const struct ieee80211_sta_eht_cap *eht_cap;
@@ -5049,13 +4269,12 @@ u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata, u8 iftype)
if (!sband)
return 0;
- he_cap = ieee80211_get_he_iftype_cap(sband, iftype);
- eht_cap = ieee80211_get_eht_iftype_cap(sband, iftype);
+ he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
+ eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
if (!he_cap || !eht_cap)
return 0;
- is_ap = iftype == NL80211_IFTYPE_AP ||
- iftype == NL80211_IFTYPE_P2P_GO;
+ is_ap = sdata->vif.type == NL80211_IFTYPE_AP;
n = ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
&eht_cap->eht_cap_elem,
@@ -5067,45 +4286,159 @@ u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata, u8 iftype)
return 0;
}
-u8 *ieee80211_ie_build_eht_cap(u8 *pos,
- const struct ieee80211_sta_he_cap *he_cap,
- const struct ieee80211_sta_eht_cap *eht_cap,
- u8 *end,
- bool for_ap)
+int ieee80211_put_eht_cap(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ const struct ieee80211_supported_band *sband,
+ const struct ieee80211_conn_settings *conn)
{
+ const struct ieee80211_sta_he_cap *he_cap =
+ ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
+ const struct ieee80211_sta_eht_cap *eht_cap =
+ ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
+ bool for_ap = sdata->vif.type == NL80211_IFTYPE_AP;
+ struct ieee80211_eht_cap_elem_fixed fixed;
+ struct ieee80211_he_cap_elem he;
u8 mcs_nss_len, ppet_len;
+ u8 orig_mcs_nss_len;
u8 ie_len;
- u8 *orig_pos = pos;
+
+ if (!conn)
+ conn = &ieee80211_conn_settings_unlimited;
/* Make sure we have place for the IE */
if (!he_cap || !eht_cap)
- return orig_pos;
+ return 0;
+
+ orig_mcs_nss_len = ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
+ &eht_cap->eht_cap_elem,
+ for_ap);
+
+ ieee80211_get_adjusted_he_cap(conn, he_cap, &he);
+
+ fixed = eht_cap->eht_cap_elem;
+
+ if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_80)
+ fixed.phy_cap_info[6] &=
+ ~IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_80MHZ;
+
+ if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_160) {
+ fixed.phy_cap_info[1] &=
+ ~IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK;
+ fixed.phy_cap_info[2] &=
+ ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK;
+ fixed.phy_cap_info[6] &=
+ ~IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_160MHZ;
+ }
- mcs_nss_len = ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
- &eht_cap->eht_cap_elem,
- for_ap);
+ if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_320) {
+ fixed.phy_cap_info[0] &=
+ ~IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
+ fixed.phy_cap_info[1] &=
+ ~IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK;
+ fixed.phy_cap_info[2] &=
+ ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK;
+ fixed.phy_cap_info[6] &=
+ ~IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_320MHZ;
+ }
+
+ if (conn->bw_limit == IEEE80211_CONN_BW_LIMIT_20)
+ fixed.phy_cap_info[0] &=
+ ~IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ;
+
+ mcs_nss_len = ieee80211_eht_mcs_nss_size(&he, &fixed, for_ap);
ppet_len = ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0],
- eht_cap->eht_cap_elem.phy_cap_info);
+ fixed.phy_cap_info);
ie_len = 2 + 1 + sizeof(eht_cap->eht_cap_elem) + mcs_nss_len + ppet_len;
- if ((end - pos) < ie_len)
- return orig_pos;
+ if (skb_tailroom(skb) < ie_len)
+ return -ENOBUFS;
- *pos++ = WLAN_EID_EXTENSION;
- *pos++ = ie_len - 2;
- *pos++ = WLAN_EID_EXT_EHT_CAPABILITY;
+ skb_put_u8(skb, WLAN_EID_EXTENSION);
+ skb_put_u8(skb, ie_len - 2);
+ skb_put_u8(skb, WLAN_EID_EXT_EHT_CAPABILITY);
+ skb_put_data(skb, &fixed, sizeof(fixed));
- /* Fixed data */
- memcpy(pos, &eht_cap->eht_cap_elem, sizeof(eht_cap->eht_cap_elem));
- pos += sizeof(eht_cap->eht_cap_elem);
+ if (mcs_nss_len == 4 && orig_mcs_nss_len != 4) {
+ /*
+ * If the (non-AP) STA became 20 MHz only, then convert from
+ * <=80 to 20-MHz-only format, where MCSes are indicated in
+ * the groups 0-7, 8-9, 10-11, 12-13 rather than just 0-9,
+ * 10-11, 12-13. Thus, use 0-9 for 0-7 and 8-9.
+ */
+ skb_put_u8(skb, eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_mcs9_max_nss);
+ skb_put_u8(skb, eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_mcs9_max_nss);
+ skb_put_u8(skb, eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_mcs11_max_nss);
+ skb_put_u8(skb, eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_mcs13_max_nss);
+ } else {
+ skb_put_data(skb, &eht_cap->eht_mcs_nss_supp, mcs_nss_len);
+ }
- memcpy(pos, &eht_cap->eht_mcs_nss_supp, mcs_nss_len);
- pos += mcs_nss_len;
+ if (ppet_len)
+ skb_put_data(skb, &eht_cap->eht_ppe_thres, ppet_len);
- if (ppet_len) {
- memcpy(pos, &eht_cap->eht_ppe_thres, ppet_len);
- pos += ppet_len;
+ return 0;
+}
+
+const char *ieee80211_conn_mode_str(enum ieee80211_conn_mode mode)
+{
+ static const char * const modes[] = {
+ [IEEE80211_CONN_MODE_S1G] = "S1G",
+ [IEEE80211_CONN_MODE_LEGACY] = "legacy",
+ [IEEE80211_CONN_MODE_HT] = "HT",
+ [IEEE80211_CONN_MODE_VHT] = "VHT",
+ [IEEE80211_CONN_MODE_HE] = "HE",
+ [IEEE80211_CONN_MODE_EHT] = "EHT",
+ };
+
+ if (WARN_ON(mode >= ARRAY_SIZE(modes)))
+ return "<out of range>";
+
+ return modes[mode] ?: "<missing string>";
+}
+
+enum ieee80211_conn_bw_limit
+ieee80211_min_bw_limit_from_chandef(struct cfg80211_chan_def *chandef)
+{
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ return IEEE80211_CONN_BW_LIMIT_20;
+ case NL80211_CHAN_WIDTH_40:
+ return IEEE80211_CONN_BW_LIMIT_40;
+ case NL80211_CHAN_WIDTH_80:
+ return IEEE80211_CONN_BW_LIMIT_80;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ return IEEE80211_CONN_BW_LIMIT_160;
+ case NL80211_CHAN_WIDTH_320:
+ return IEEE80211_CONN_BW_LIMIT_320;
+ default:
+ WARN(1, "unhandled chandef width %d\n", chandef->width);
+ return IEEE80211_CONN_BW_LIMIT_20;
}
+}
- return pos;
+void ieee80211_clear_tpe(struct ieee80211_parsed_tpe *tpe)
+{
+ for (int i = 0; i < 2; i++) {
+ tpe->max_local[i].valid = false;
+ memset(tpe->max_local[i].power,
+ IEEE80211_TPE_MAX_TX_PWR_NO_CONSTRAINT,
+ sizeof(tpe->max_local[i].power));
+
+ tpe->max_reg_client[i].valid = false;
+ memset(tpe->max_reg_client[i].power,
+ IEEE80211_TPE_MAX_TX_PWR_NO_CONSTRAINT,
+ sizeof(tpe->max_reg_client[i].power));
+
+ tpe->psd_local[i].valid = false;
+ memset(tpe->psd_local[i].power,
+ IEEE80211_TPE_PSD_NO_LIMIT,
+ sizeof(tpe->psd_local[i].power));
+
+ tpe->psd_reg_client[i].valid = false;
+ memset(tpe->psd_reg_client[i].power,
+ IEEE80211_TPE_PSD_NO_LIMIT,
+ sizeof(tpe->psd_reg_client[i].power));
+ }
}
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index b3a5c3e96a72..c5c5d16ed6c8 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -4,7 +4,7 @@
*
* Portions of this file
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2023 Intel Corporation
+ * Copyright (C) 2018 - 2024 Intel Corporation
*/
#include <linux/ieee80211.h>
@@ -280,10 +280,10 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
/*
* This is a workaround for VHT-enabled STAs which break the spec
* and have the VHT-MCS Rx map filled in with value 3 for all eight
- * spacial streams, an example is AR9462.
+ * spatial streams, an example is AR9462.
*
* As per spec, in section 22.1.1 Introduction to the VHT PHY
- * A VHT STA shall support at least single spactial stream VHT-MCSs
+ * A VHT STA shall support at least single spatial stream VHT-MCSs
* 0 to 7 (transmit and receive) in all supported channel widths.
*/
if (vht_cap->vht_mcs.rx_mcs_map == cpu_to_le16(0xFFFF)) {
@@ -350,8 +350,9 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
}
/* FIXME: move this to some better location - parses HE/EHT now */
-enum ieee80211_sta_rx_bandwidth
-ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta)
+static enum ieee80211_sta_rx_bandwidth
+__ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta,
+ struct cfg80211_chan_def *chandef)
{
unsigned int link_id = link_sta->link_id;
struct ieee80211_sub_if_data *sdata = link_sta->sta->sdata;
@@ -361,44 +362,43 @@ ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta)
u32 cap_width;
if (he_cap->has_he) {
- struct ieee80211_bss_conf *link_conf;
- enum ieee80211_sta_rx_bandwidth ret;
+ enum nl80211_band band;
u8 info;
- rcu_read_lock();
- link_conf = rcu_dereference(sdata->vif.link_conf[link_id]);
+ if (chandef) {
+ band = chandef->chan->band;
+ } else {
+ struct ieee80211_bss_conf *link_conf;
+
+ rcu_read_lock();
+ link_conf = rcu_dereference(sdata->vif.link_conf[link_id]);
+ band = link_conf->chanreq.oper.chan->band;
+ rcu_read_unlock();
+ }
- if (eht_cap->has_eht &&
- link_conf->chandef.chan->band == NL80211_BAND_6GHZ) {
+ if (eht_cap->has_eht && band == NL80211_BAND_6GHZ) {
info = eht_cap->eht_cap_elem.phy_cap_info[0];
- if (info & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ) {
- ret = IEEE80211_STA_RX_BW_320;
- goto out;
- }
+ if (info & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ)
+ return IEEE80211_STA_RX_BW_320;
}
info = he_cap->he_cap_elem.phy_cap_info[0];
- if (link_conf->chandef.chan->band == NL80211_BAND_2GHZ) {
+ if (band == NL80211_BAND_2GHZ) {
if (info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G)
- ret = IEEE80211_STA_RX_BW_40;
- else
- ret = IEEE80211_STA_RX_BW_20;
- goto out;
+ return IEEE80211_STA_RX_BW_40;
+ return IEEE80211_STA_RX_BW_20;
}
if (info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G ||
info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
- ret = IEEE80211_STA_RX_BW_160;
- else if (info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G)
- ret = IEEE80211_STA_RX_BW_80;
- else
- ret = IEEE80211_STA_RX_BW_20;
-out:
- rcu_read_unlock();
+ return IEEE80211_STA_RX_BW_160;
- return ret;
+ if (info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G)
+ return IEEE80211_STA_RX_BW_80;
+
+ return IEEE80211_STA_RX_BW_20;
}
if (!vht_cap->vht_supported)
@@ -423,6 +423,28 @@ out:
return IEEE80211_STA_RX_BW_80;
}
+enum ieee80211_sta_rx_bandwidth
+_ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta,
+ struct cfg80211_chan_def *chandef)
+{
+ /*
+ * With RX OMI, also pretend that the STA's capability changed.
+ * Of course this isn't really true, it didn't change, only our
+ * RX capability was changed by notifying RX OMI to the STA.
+ * The purpose, however, is to save power, and that requires
+ * changing also transmissions to the AP and the chanctx. The
+ * transmissions depend on link_sta->bandwidth which is set in
+ * _ieee80211_sta_cur_vht_bw() below, but the chanctx depends
+ * on the result of this function which is also called by
+ * _ieee80211_sta_cur_vht_bw(), so we need to do that here as
+ * well. This is sufficient for the steady state, but during
+ * the transition we already need to change TX/RX separately,
+ * so _ieee80211_sta_cur_vht_bw() below applies the _tx one.
+ */
+ return min(__ieee80211_sta_cap_rx_bw(link_sta, chandef),
+ link_sta->rx_omi_bw_rx);
+}
+
enum nl80211_chan_width
ieee80211_sta_cap_chan_bw(struct link_sta_info *link_sta)
{
@@ -479,47 +501,35 @@ ieee80211_sta_rx_bw_to_chan_width(struct link_sta_info *link_sta)
}
}
-enum ieee80211_sta_rx_bandwidth
-ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width)
-{
- switch (width) {
- case NL80211_CHAN_WIDTH_20_NOHT:
- case NL80211_CHAN_WIDTH_20:
- return IEEE80211_STA_RX_BW_20;
- case NL80211_CHAN_WIDTH_40:
- return IEEE80211_STA_RX_BW_40;
- case NL80211_CHAN_WIDTH_80:
- return IEEE80211_STA_RX_BW_80;
- case NL80211_CHAN_WIDTH_160:
- case NL80211_CHAN_WIDTH_80P80:
- return IEEE80211_STA_RX_BW_160;
- case NL80211_CHAN_WIDTH_320:
- return IEEE80211_STA_RX_BW_320;
- default:
- WARN_ON_ONCE(1);
- return IEEE80211_STA_RX_BW_20;
- }
-}
-
/* FIXME: rename/move - this deals with everything not just VHT */
enum ieee80211_sta_rx_bandwidth
-ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta)
+_ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta,
+ struct cfg80211_chan_def *chandef)
{
struct sta_info *sta = link_sta->sta;
- struct ieee80211_bss_conf *link_conf;
enum nl80211_chan_width bss_width;
enum ieee80211_sta_rx_bandwidth bw;
- rcu_read_lock();
- link_conf = rcu_dereference(sta->sdata->vif.link_conf[link_sta->link_id]);
- if (WARN_ON(!link_conf))
- bss_width = NL80211_CHAN_WIDTH_20_NOHT;
- else
- bss_width = link_conf->chandef.width;
- rcu_read_unlock();
+ if (chandef) {
+ bss_width = chandef->width;
+ } else {
+ struct ieee80211_bss_conf *link_conf;
+
+ rcu_read_lock();
+ link_conf = rcu_dereference(sta->sdata->vif.link_conf[link_sta->link_id]);
+ if (WARN_ON_ONCE(!link_conf)) {
+ rcu_read_unlock();
+ return IEEE80211_STA_RX_BW_20;
+ }
+ bss_width = link_conf->chanreq.oper.width;
+ rcu_read_unlock();
+ }
- bw = ieee80211_sta_cap_rx_bw(link_sta);
+ /* intentionally do not take rx_bw_omi_rx into account */
+ bw = __ieee80211_sta_cap_rx_bw(link_sta, chandef);
bw = min(bw, link_sta->cur_max_bandwidth);
+ /* but do apply rx_omi_bw_tx */
+ bw = min(bw, link_sta->rx_omi_bw_tx);
/* Don't consider AP's bandwidth for TDLS peers, section 11.23.1 of
* IEEE80211-2016 specification makes higher bandwidth operation
@@ -541,15 +551,11 @@ ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta)
return bw;
}
-void ieee80211_sta_set_rx_nss(struct link_sta_info *link_sta)
+void ieee80211_sta_init_nss(struct link_sta_info *link_sta)
{
u8 ht_rx_nss = 0, vht_rx_nss = 0, he_rx_nss = 0, eht_rx_nss = 0, rx_nss;
bool support_160;
- /* if we received a notification already don't overwrite it */
- if (link_sta->pub->rx_nss)
- return;
-
if (link_sta->pub->eht_cap.has_eht) {
int i;
const u8 *rx_nss_mcs = (void *)&link_sta->pub->eht_cap.eht_mcs_nss_supp;
@@ -627,7 +633,15 @@ void ieee80211_sta_set_rx_nss(struct link_sta_info *link_sta)
rx_nss = max(vht_rx_nss, ht_rx_nss);
rx_nss = max(he_rx_nss, rx_nss);
rx_nss = max(eht_rx_nss, rx_nss);
- link_sta->pub->rx_nss = max_t(u8, 1, rx_nss);
+ rx_nss = max_t(u8, 1, rx_nss);
+ link_sta->capa_nss = rx_nss;
+
+ /* that shouldn't be set yet, but we can handle it anyway */
+ if (link_sta->op_mode_nss)
+ link_sta->pub->rx_nss =
+ min_t(u8, rx_nss, link_sta->op_mode_nss);
+ else
+ link_sta->pub->rx_nss = rx_nss;
}
u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
@@ -637,7 +651,7 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
enum ieee80211_sta_rx_bandwidth new_bw;
struct sta_opmode_info sta_opmode = {};
u32 changed = 0;
- u8 nss, cur_nss;
+ u8 nss;
/* ignore - no support for BF yet */
if (opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF)
@@ -647,23 +661,17 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
nss += 1;
- if (link_sta->pub->rx_nss != nss) {
- cur_nss = link_sta->pub->rx_nss;
- /* Reset rx_nss and call ieee80211_sta_set_rx_nss() which
- * will set the same to max nss value calculated based on capability.
- */
- link_sta->pub->rx_nss = 0;
- ieee80211_sta_set_rx_nss(link_sta);
- /* Do not allow an nss change to rx_nss greater than max_nss
- * negotiated and capped to APs capability during association.
- */
- if (nss <= link_sta->pub->rx_nss) {
- link_sta->pub->rx_nss = nss;
- sta_opmode.rx_nss = nss;
- changed |= IEEE80211_RC_NSS_CHANGED;
- sta_opmode.changed |= STA_OPMODE_N_SS_CHANGED;
+ if (link_sta->op_mode_nss != nss) {
+ if (nss <= link_sta->capa_nss) {
+ link_sta->op_mode_nss = nss;
+
+ if (nss != link_sta->pub->rx_nss) {
+ link_sta->pub->rx_nss = nss;
+ changed |= IEEE80211_RC_NSS_CHANGED;
+ sta_opmode.rx_nss = link_sta->pub->rx_nss;
+ sta_opmode.changed |= STA_OPMODE_N_SS_CHANGED;
+ }
} else {
- link_sta->pub->rx_nss = cur_nss;
pr_warn_ratelimited("Ignoring NSS change in VHT Operating Mode Notification from %pM with invalid nss %d",
link_sta->pub->addr, nss);
}
@@ -761,8 +769,7 @@ void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
if (changed > 0) {
ieee80211_recalc_min_chandef(sdata, link_sta->link_id);
- rate_control_rate_update(local, sband, link_sta->sta,
- link_sta->link_id, changed);
+ rate_control_rate_update(local, sband, link_sta, changed);
}
}
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 5c01e121481a..93b8668079a7 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -16,7 +16,7 @@
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 94dae7cb6dbd..40d5d9e48479 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -12,7 +12,7 @@
#include <linux/compiler.h>
#include <linux/ieee80211.h>
#include <linux/gfp.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/mac80211.h>
#include <crypto/aes.h>
#include <crypto/utils.h>
@@ -315,7 +315,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
* Calculate AAD for CCMP/GCMP, returning qos_tid since we
* need that in CCMP also for b_0.
*/
-static u8 ccmp_gcmp_aad(struct sk_buff *skb, u8 *aad)
+static u8 ccmp_gcmp_aad(struct sk_buff *skb, u8 *aad, bool spp_amsdu)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
__le16 mask_fc;
@@ -340,7 +340,14 @@ static u8 ccmp_gcmp_aad(struct sk_buff *skb, u8 *aad)
len_a += 6;
if (ieee80211_is_data_qos(hdr->frame_control)) {
- qos_tid = ieee80211_get_tid(hdr);
+ qos_tid = *ieee80211_get_qos_ctl(hdr);
+
+ if (spp_amsdu)
+ qos_tid &= IEEE80211_QOS_CTL_TID_MASK |
+ IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+ else
+ qos_tid &= IEEE80211_QOS_CTL_TID_MASK;
+
mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_ORDER);
len_a += 2;
} else {
@@ -369,10 +376,11 @@ static u8 ccmp_gcmp_aad(struct sk_buff *skb, u8 *aad)
return qos_tid;
}
-static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *b_0, u8 *aad)
+static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *b_0, u8 *aad,
+ bool spp_amsdu)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- u8 qos_tid = ccmp_gcmp_aad(skb, aad);
+ u8 qos_tid = ccmp_gcmp_aad(skb, aad, spp_amsdu);
/* In CCM, the initial vectors (IV) used for CTR mode encryption and CBC
* mode authentication are not allowed to collide, yet both are derived
@@ -479,7 +487,8 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb,
return 0;
pos += IEEE80211_CCMP_HDR_LEN;
- ccmp_special_blocks(skb, pn, b_0, aad);
+ ccmp_special_blocks(skb, pn, b_0, aad,
+ key->conf.flags & IEEE80211_KEY_FLAG_SPP_AMSDU);
return ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len,
skb_put(skb, mic_len));
}
@@ -557,7 +566,8 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
u8 aad[2 * AES_BLOCK_SIZE];
u8 b_0[AES_BLOCK_SIZE];
/* hardware didn't decrypt/verify MIC */
- ccmp_special_blocks(skb, pn, b_0, aad);
+ ccmp_special_blocks(skb, pn, b_0, aad,
+ key->conf.flags & IEEE80211_KEY_FLAG_SPP_AMSDU);
if (ieee80211_aes_ccm_decrypt(
key->u.ccmp.tfm, b_0, aad,
@@ -581,17 +591,15 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
return RX_CONTINUE;
}
-static void gcmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *j_0, u8 *aad)
+static void gcmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *j_0, u8 *aad,
+ bool spp_amsdu)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
memcpy(j_0, hdr->addr2, ETH_ALEN);
memcpy(&j_0[ETH_ALEN], pn, IEEE80211_GCMP_PN_LEN);
- j_0[13] = 0;
- j_0[14] = 0;
- j_0[AES_BLOCK_SIZE - 1] = 0x01;
- ccmp_gcmp_aad(skb, aad);
+ ccmp_gcmp_aad(skb, aad, spp_amsdu);
}
static inline void gcmp_pn2hdr(u8 *hdr, const u8 *pn, int key_id)
@@ -680,7 +688,8 @@ static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
return 0;
pos += IEEE80211_GCMP_HDR_LEN;
- gcmp_special_blocks(skb, pn, j_0, aad);
+ gcmp_special_blocks(skb, pn, j_0, aad,
+ key->conf.flags & IEEE80211_KEY_FLAG_SPP_AMSDU);
return ieee80211_aes_gcm_encrypt(key->u.gcmp.tfm, j_0, aad, pos, len,
skb_put(skb, IEEE80211_GCMP_MIC_LEN));
}
@@ -753,7 +762,8 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
u8 aad[2 * AES_BLOCK_SIZE];
u8 j_0[AES_BLOCK_SIZE];
/* hardware didn't decrypt/verify MIC */
- gcmp_special_blocks(skb, pn, j_0, aad);
+ gcmp_special_blocks(skb, pn, j_0, aad,
+ key->conf.flags & IEEE80211_KEY_FLAG_SPP_AMSDU);
if (ieee80211_aes_gcm_decrypt(
key->u.gcmp.tfm, j_0, aad,
@@ -882,7 +892,8 @@ ieee80211_crypto_aes_cmac_256_encrypt(struct ieee80211_tx_data *tx)
info = IEEE80211_SKB_CB(skb);
- if (info->control.hw_key)
+ if (info->control.hw_key &&
+ !(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIE))
return TX_CONTINUE;
if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie)))
@@ -898,6 +909,9 @@ ieee80211_crypto_aes_cmac_256_encrypt(struct ieee80211_tx_data *tx)
bip_ipn_set64(mmie->sequence_number, pn64);
+ if (info->control.hw_key)
+ return TX_CONTINUE;
+
bip_aad(skb, aad);
/* MIC = AES-256-CMAC(IGTK, AAD || Management Frame Body || MMIE, 128)
@@ -1027,7 +1041,8 @@ ieee80211_crypto_aes_gmac_encrypt(struct ieee80211_tx_data *tx)
info = IEEE80211_SKB_CB(skb);
- if (info->control.hw_key)
+ if (info->control.hw_key &&
+ !(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIE))
return TX_CONTINUE;
if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie)))
@@ -1043,6 +1058,9 @@ ieee80211_crypto_aes_gmac_encrypt(struct ieee80211_tx_data *tx)
bip_ipn_set64(mmie->sequence_number, pn64);
+ if (info->control.hw_key)
+ return TX_CONTINUE;
+
bip_aad(skb, aad);
hdr = (struct ieee80211_hdr *)skb->data;
diff --git a/net/mac802154/ieee802154_i.h b/net/mac802154/ieee802154_i.h
index 08dd521a51a5..8f2bff268392 100644
--- a/net/mac802154/ieee802154_i.h
+++ b/net/mac802154/ieee802154_i.h
@@ -194,9 +194,6 @@ int ieee802154_mlme_tx_locked(struct ieee802154_local *local,
struct ieee802154_sub_if_data *sdata,
struct sk_buff *skb);
void ieee802154_mlme_op_post(struct ieee802154_local *local);
-int ieee802154_mlme_tx_one(struct ieee802154_local *local,
- struct ieee802154_sub_if_data *sdata,
- struct sk_buff *skb);
int ieee802154_mlme_tx_one_locked(struct ieee802154_local *local,
struct ieee802154_sub_if_data *sdata,
struct sk_buff *skb);
diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
index c0e2da5072be..9e4631fade90 100644
--- a/net/mac802154/iface.c
+++ b/net/mac802154/iface.c
@@ -684,6 +684,10 @@ void ieee802154_if_remove(struct ieee802154_sub_if_data *sdata)
ASSERT_RTNL();
mutex_lock(&sdata->local->iflist_mtx);
+ if (list_empty(&sdata->local->interfaces)) {
+ mutex_unlock(&sdata->local->iflist_mtx);
+ return;
+ }
list_del_rcu(&sdata->list);
mutex_unlock(&sdata->local->iflist_mtx);
diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c
index 8d2eabc71bbe..f13b07ebfb98 100644
--- a/net/mac802154/llsec.c
+++ b/net/mac802154/llsec.c
@@ -265,19 +265,27 @@ fail:
return -ENOMEM;
}
+static void mac802154_llsec_key_del_rcu(struct rcu_head *rcu)
+{
+ struct ieee802154_llsec_key_entry *pos;
+ struct mac802154_llsec_key *mkey;
+
+ pos = container_of(rcu, struct ieee802154_llsec_key_entry, rcu);
+ mkey = container_of(pos->key, struct mac802154_llsec_key, key);
+
+ llsec_key_put(mkey);
+ kfree_sensitive(pos);
+}
+
int mac802154_llsec_key_del(struct mac802154_llsec *sec,
const struct ieee802154_llsec_key_id *key)
{
struct ieee802154_llsec_key_entry *pos;
list_for_each_entry(pos, &sec->table.keys, list) {
- struct mac802154_llsec_key *mkey;
-
- mkey = container_of(pos->key, struct mac802154_llsec_key, key);
-
if (llsec_key_id_equal(&pos->id, key)) {
list_del_rcu(&pos->list);
- llsec_key_put(mkey);
+ call_rcu(&pos->rcu, mac802154_llsec_key_del_rcu);
return 0;
}
}
diff --git a/net/mac802154/main.c b/net/mac802154/main.c
index 9ab7396668d2..21b7c3b280b4 100644
--- a/net/mac802154/main.c
+++ b/net/mac802154/main.c
@@ -161,8 +161,10 @@ void ieee802154_configure_durations(struct wpan_phy *phy,
}
phy->symbol_duration = duration;
- phy->lifs_period = (IEEE802154_LIFS_PERIOD * phy->symbol_duration) / NSEC_PER_SEC;
- phy->sifs_period = (IEEE802154_SIFS_PERIOD * phy->symbol_duration) / NSEC_PER_SEC;
+ phy->lifs_period =
+ (IEEE802154_LIFS_PERIOD * phy->symbol_duration) / NSEC_PER_USEC;
+ phy->sifs_period =
+ (IEEE802154_SIFS_PERIOD * phy->symbol_duration) / NSEC_PER_USEC;
}
EXPORT_SYMBOL(ieee802154_configure_durations);
@@ -184,10 +186,10 @@ static void ieee802154_setup_wpan_phy_pib(struct wpan_phy *wpan_phy)
* Should be done when all drivers sets this value.
*/
- wpan_phy->lifs_period =
- (IEEE802154_LIFS_PERIOD * wpan_phy->symbol_duration) / 1000;
- wpan_phy->sifs_period =
- (IEEE802154_SIFS_PERIOD * wpan_phy->symbol_duration) / 1000;
+ wpan_phy->lifs_period = (IEEE802154_LIFS_PERIOD *
+ wpan_phy->symbol_duration) / NSEC_PER_USEC;
+ wpan_phy->sifs_period = (IEEE802154_SIFS_PERIOD *
+ wpan_phy->symbol_duration) / NSEC_PER_USEC;
}
int ieee802154_register_hw(struct ieee802154_hw *hw)
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
index e40a988d6c80..aac359b5c71d 100644
--- a/net/mac802154/rx.c
+++ b/net/mac802154/rx.c
@@ -13,7 +13,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/crc-ccitt.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/mac802154.h>
#include <net/ieee802154_netdev.h>
diff --git a/net/mac802154/scan.c b/net/mac802154/scan.c
index 1c0eeaa76560..a6dab3cc3ad8 100644
--- a/net/mac802154/scan.c
+++ b/net/mac802154/scan.c
@@ -176,6 +176,7 @@ void mac802154_scan_worker(struct work_struct *work)
struct ieee802154_local *local =
container_of(work, struct ieee802154_local, scan_work.work);
struct cfg802154_scan_request *scan_req;
+ enum nl802154_scan_types scan_req_type;
struct ieee802154_sub_if_data *sdata;
unsigned int scan_duration = 0;
struct wpan_phy *wpan_phy;
@@ -209,6 +210,7 @@ void mac802154_scan_worker(struct work_struct *work)
}
wpan_phy = scan_req->wpan_phy;
+ scan_req_type = scan_req->type;
scan_req_duration = scan_req->duration;
/* Look for the next valid chan */
@@ -246,7 +248,7 @@ void mac802154_scan_worker(struct work_struct *work)
goto end_scan;
}
- if (scan_req->type == NL802154_SCAN_ACTIVE) {
+ if (scan_req_type == NL802154_SCAN_ACTIVE) {
ret = mac802154_transmit_beacon_req(local, sdata);
if (ret)
dev_err(&sdata->dev->dev,
diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
index 2a6f1ed763c9..4d13f18f6f2c 100644
--- a/net/mac802154/tx.c
+++ b/net/mac802154/tx.c
@@ -12,7 +12,7 @@
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/crc-ccitt.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/rtnetlink.h>
#include <net/ieee802154_netdev.h>
@@ -34,8 +34,8 @@ void ieee802154_xmit_sync_worker(struct work_struct *work)
if (res)
goto err_tx;
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
+ DEV_STATS_INC(dev, tx_packets);
+ DEV_STATS_ADD(dev, tx_bytes, skb->len);
ieee802154_xmit_complete(&local->hw, skb, false);
@@ -90,8 +90,8 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
if (ret)
goto err_wake_netif_queue;
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += len;
+ DEV_STATS_INC(dev, tx_packets);
+ DEV_STATS_ADD(dev, tx_bytes, len);
} else {
local->tx_skb = skb;
queue_work(local->workqueue, &local->sync_tx_work);
@@ -178,19 +178,6 @@ void ieee802154_mlme_op_post(struct ieee802154_local *local)
ieee802154_release_queue(local);
}
-int ieee802154_mlme_tx_one(struct ieee802154_local *local,
- struct ieee802154_sub_if_data *sdata,
- struct sk_buff *skb)
-{
- int ret;
-
- ieee802154_mlme_op_pre(local);
- ret = ieee802154_mlme_tx(local, sdata, skb);
- ieee802154_mlme_op_post(local);
-
- return ret;
-}
-
int ieee802154_mlme_tx_one_locked(struct ieee802154_local *local,
struct ieee802154_sub_if_data *sdata,
struct sk_buff *skb)
diff --git a/net/mctp/Kconfig b/net/mctp/Kconfig
index 3a5c0e70da77..d8d3413a37f7 100644
--- a/net/mctp/Kconfig
+++ b/net/mctp/Kconfig
@@ -14,6 +14,7 @@ menuconfig MCTP
config MCTP_TEST
bool "MCTP core tests" if !KUNIT_ALL_TESTS
+ select MCTP_FLOWS
depends on MCTP=y && KUNIT=y
default KUNIT_ALL_TESTS
diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c
index f6be58b68c6f..f6de136008f6 100644
--- a/net/mctp/af_mctp.c
+++ b/net/mctp/af_mctp.c
@@ -350,30 +350,102 @@ static int mctp_getsockopt(struct socket *sock, int level, int optname,
return -EINVAL;
}
-static int mctp_ioctl_alloctag(struct mctp_sock *msk, unsigned long arg)
+/* helpers for reading/writing the tag ioc, handling compatibility across the
+ * two versions, and some basic API error checking
+ */
+static int mctp_ioctl_tag_copy_from_user(unsigned long arg,
+ struct mctp_ioc_tag_ctl2 *ctl,
+ bool tagv2)
+{
+ struct mctp_ioc_tag_ctl ctl_compat;
+ unsigned long size;
+ void *ptr;
+ int rc;
+
+ if (tagv2) {
+ size = sizeof(*ctl);
+ ptr = ctl;
+ } else {
+ size = sizeof(ctl_compat);
+ ptr = &ctl_compat;
+ }
+
+ rc = copy_from_user(ptr, (void __user *)arg, size);
+ if (rc)
+ return -EFAULT;
+
+ if (!tagv2) {
+ /* compat, using defaults for new fields */
+ ctl->net = MCTP_INITIAL_DEFAULT_NET;
+ ctl->peer_addr = ctl_compat.peer_addr;
+ ctl->local_addr = MCTP_ADDR_ANY;
+ ctl->flags = ctl_compat.flags;
+ ctl->tag = ctl_compat.tag;
+ }
+
+ if (ctl->flags)
+ return -EINVAL;
+
+ if (ctl->local_addr != MCTP_ADDR_ANY &&
+ ctl->local_addr != MCTP_ADDR_NULL)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int mctp_ioctl_tag_copy_to_user(unsigned long arg,
+ struct mctp_ioc_tag_ctl2 *ctl,
+ bool tagv2)
+{
+ struct mctp_ioc_tag_ctl ctl_compat;
+ unsigned long size;
+ void *ptr;
+ int rc;
+
+ if (tagv2) {
+ ptr = ctl;
+ size = sizeof(*ctl);
+ } else {
+ ctl_compat.peer_addr = ctl->peer_addr;
+ ctl_compat.tag = ctl->tag;
+ ctl_compat.flags = ctl->flags;
+
+ ptr = &ctl_compat;
+ size = sizeof(ctl_compat);
+ }
+
+ rc = copy_to_user((void __user *)arg, ptr, size);
+ if (rc)
+ return -EFAULT;
+
+ return 0;
+}
+
+static int mctp_ioctl_alloctag(struct mctp_sock *msk, bool tagv2,
+ unsigned long arg)
{
struct net *net = sock_net(&msk->sk);
struct mctp_sk_key *key = NULL;
- struct mctp_ioc_tag_ctl ctl;
+ struct mctp_ioc_tag_ctl2 ctl;
unsigned long flags;
u8 tag;
+ int rc;
- if (copy_from_user(&ctl, (void __user *)arg, sizeof(ctl)))
- return -EFAULT;
+ rc = mctp_ioctl_tag_copy_from_user(arg, &ctl, tagv2);
+ if (rc)
+ return rc;
if (ctl.tag)
return -EINVAL;
- if (ctl.flags)
- return -EINVAL;
-
- key = mctp_alloc_local_tag(msk, ctl.peer_addr, MCTP_ADDR_ANY,
- true, &tag);
+ key = mctp_alloc_local_tag(msk, ctl.net, MCTP_ADDR_ANY,
+ ctl.peer_addr, true, &tag);
if (IS_ERR(key))
return PTR_ERR(key);
ctl.tag = tag | MCTP_TAG_OWNER | MCTP_TAG_PREALLOC;
- if (copy_to_user((void __user *)arg, &ctl, sizeof(ctl))) {
+ rc = mctp_ioctl_tag_copy_to_user(arg, &ctl, tagv2);
+ if (rc) {
unsigned long fl2;
/* Unwind our key allocation: the keys list lock needs to be
* taken before the individual key locks, and we need a valid
@@ -385,28 +457,27 @@ static int mctp_ioctl_alloctag(struct mctp_sock *msk, unsigned long arg)
__mctp_key_remove(key, net, fl2, MCTP_TRACE_KEY_DROPPED);
mctp_key_unref(key);
spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
- return -EFAULT;
+ return rc;
}
mctp_key_unref(key);
return 0;
}
-static int mctp_ioctl_droptag(struct mctp_sock *msk, unsigned long arg)
+static int mctp_ioctl_droptag(struct mctp_sock *msk, bool tagv2,
+ unsigned long arg)
{
struct net *net = sock_net(&msk->sk);
- struct mctp_ioc_tag_ctl ctl;
+ struct mctp_ioc_tag_ctl2 ctl;
unsigned long flags, fl2;
struct mctp_sk_key *key;
struct hlist_node *tmp;
int rc;
u8 tag;
- if (copy_from_user(&ctl, (void __user *)arg, sizeof(ctl)))
- return -EFAULT;
-
- if (ctl.flags)
- return -EINVAL;
+ rc = mctp_ioctl_tag_copy_from_user(arg, &ctl, tagv2);
+ if (rc)
+ return rc;
/* Must be a local tag, TO set, preallocated */
if ((ctl.tag & ~MCTP_TAG_MASK) != (MCTP_TAG_OWNER | MCTP_TAG_PREALLOC))
@@ -415,6 +486,9 @@ static int mctp_ioctl_droptag(struct mctp_sock *msk, unsigned long arg)
tag = ctl.tag & MCTP_TAG_MASK;
rc = -EINVAL;
+ if (ctl.peer_addr == MCTP_ADDR_NULL)
+ ctl.peer_addr = MCTP_ADDR_ANY;
+
spin_lock_irqsave(&net->mctp.keys_lock, flags);
hlist_for_each_entry_safe(key, tmp, &msk->keys, sklist) {
/* we do an irqsave here, even though we know the irq state,
@@ -422,6 +496,7 @@ static int mctp_ioctl_droptag(struct mctp_sock *msk, unsigned long arg)
*/
spin_lock_irqsave(&key->lock, fl2);
if (key->manual_alloc &&
+ ctl.net == key->net &&
ctl.peer_addr == key->peer_addr &&
tag == key->tag) {
__mctp_key_remove(key, net, fl2,
@@ -439,12 +514,17 @@ static int mctp_ioctl_droptag(struct mctp_sock *msk, unsigned long arg)
static int mctp_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct mctp_sock *msk = container_of(sock->sk, struct mctp_sock, sk);
+ bool tagv2 = false;
switch (cmd) {
+ case SIOCMCTPALLOCTAG2:
case SIOCMCTPALLOCTAG:
- return mctp_ioctl_alloctag(msk, arg);
+ tagv2 = cmd == SIOCMCTPALLOCTAG2;
+ return mctp_ioctl_alloctag(msk, tagv2, arg);
case SIOCMCTPDROPTAG:
- return mctp_ioctl_droptag(msk, arg);
+ case SIOCMCTPDROPTAG2:
+ tagv2 = cmd == SIOCMCTPDROPTAG2;
+ return mctp_ioctl_droptag(msk, tagv2, arg);
}
return -EINVAL;
@@ -676,10 +756,14 @@ static __init int mctp_init(void)
if (rc)
goto err_unreg_routes;
- mctp_device_init();
+ rc = mctp_device_init();
+ if (rc)
+ goto err_unreg_neigh;
return 0;
+err_unreg_neigh:
+ mctp_neigh_exit();
err_unreg_routes:
mctp_routes_exit();
err_unreg_proto:
diff --git a/net/mctp/device.c b/net/mctp/device.c
index acb97b257428..8e0724c56723 100644
--- a/net/mctp/device.c
+++ b/net/mctp/device.c
@@ -20,8 +20,7 @@
#include <net/sock.h>
struct mctp_dump_cb {
- int h;
- int idx;
+ unsigned long ifindex;
size_t a_idx;
};
@@ -115,43 +114,29 @@ static int mctp_dump_addrinfo(struct sk_buff *skb, struct netlink_callback *cb)
{
struct mctp_dump_cb *mcb = (void *)cb->ctx;
struct net *net = sock_net(skb->sk);
- struct hlist_head *head;
struct net_device *dev;
struct ifaddrmsg *hdr;
struct mctp_dev *mdev;
- int ifindex;
- int idx = 0, rc;
+ int ifindex, rc;
hdr = nlmsg_data(cb->nlh);
// filter by ifindex if requested
ifindex = hdr->ifa_index;
rcu_read_lock();
- for (; mcb->h < NETDEV_HASHENTRIES; mcb->h++, mcb->idx = 0) {
- idx = 0;
- head = &net->dev_index_head[mcb->h];
- hlist_for_each_entry_rcu(dev, head, index_hlist) {
- if (idx >= mcb->idx &&
- (ifindex == 0 || ifindex == dev->ifindex)) {
- mdev = __mctp_dev_get(dev);
- if (mdev) {
- rc = mctp_dump_dev_addrinfo(mdev,
- skb, cb);
- mctp_dev_put(mdev);
- // Error indicates full buffer, this
- // callback will get retried.
- if (rc < 0)
- goto out;
- }
- }
- idx++;
- // reset for next iteration
- mcb->a_idx = 0;
- }
+ for_each_netdev_dump(net, dev, mcb->ifindex) {
+ if (ifindex && ifindex != dev->ifindex)
+ continue;
+ mdev = __mctp_dev_get(dev);
+ if (!mdev)
+ continue;
+ rc = mctp_dump_dev_addrinfo(mdev, skb, cb);
+ mctp_dev_put(mdev);
+ if (rc < 0)
+ break;
+ mcb->a_idx = 0;
}
-out:
rcu_read_unlock();
- mcb->idx = idx;
return skb->len;
}
@@ -371,6 +356,8 @@ static int mctp_fill_link_af(struct sk_buff *skb,
return -ENODATA;
if (nla_put_u32(skb, IFLA_MCTP_NET, mdev->net))
return -EMSGSIZE;
+ if (nla_put_u8(skb, IFLA_MCTP_PHYS_BINDING, mdev->binding))
+ return -EMSGSIZE;
return 0;
}
@@ -385,6 +372,7 @@ static size_t mctp_get_link_af_size(const struct net_device *dev,
if (!mdev)
return 0;
ret = nla_total_size(4); /* IFLA_MCTP_NET */
+ ret += nla_total_size(1); /* IFLA_MCTP_PHYS_BINDING */
mctp_dev_put(mdev);
return ret;
}
@@ -480,7 +468,8 @@ static int mctp_dev_notify(struct notifier_block *this, unsigned long event,
}
static int mctp_register_netdevice(struct net_device *dev,
- const struct mctp_netdev_ops *ops)
+ const struct mctp_netdev_ops *ops,
+ enum mctp_phys_binding binding)
{
struct mctp_dev *mdev;
@@ -489,17 +478,19 @@ static int mctp_register_netdevice(struct net_device *dev,
return PTR_ERR(mdev);
mdev->ops = ops;
+ mdev->binding = binding;
return register_netdevice(dev);
}
int mctp_register_netdev(struct net_device *dev,
- const struct mctp_netdev_ops *ops)
+ const struct mctp_netdev_ops *ops,
+ enum mctp_phys_binding binding)
{
int rc;
rtnl_lock();
- rc = mctp_register_netdevice(dev, ops);
+ rc = mctp_register_netdevice(dev, ops, binding);
rtnl_unlock();
return rc;
@@ -524,25 +515,40 @@ static struct notifier_block mctp_dev_nb = {
.priority = ADDRCONF_NOTIFY_PRIORITY,
};
-void __init mctp_device_init(void)
+static const struct rtnl_msg_handler mctp_device_rtnl_msg_handlers[] = {
+ {.owner = THIS_MODULE, .protocol = PF_MCTP, .msgtype = RTM_NEWADDR,
+ .doit = mctp_rtm_newaddr},
+ {.owner = THIS_MODULE, .protocol = PF_MCTP, .msgtype = RTM_DELADDR,
+ .doit = mctp_rtm_deladdr},
+ {.owner = THIS_MODULE, .protocol = PF_MCTP, .msgtype = RTM_GETADDR,
+ .dumpit = mctp_dump_addrinfo},
+};
+
+int __init mctp_device_init(void)
{
+ int err;
+
register_netdevice_notifier(&mctp_dev_nb);
- rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_GETADDR,
- NULL, mctp_dump_addrinfo, 0);
- rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_NEWADDR,
- mctp_rtm_newaddr, NULL, 0);
- rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_DELADDR,
- mctp_rtm_deladdr, NULL, 0);
- rtnl_af_register(&mctp_af_ops);
+ err = rtnl_af_register(&mctp_af_ops);
+ if (err)
+ goto err_notifier;
+
+ err = rtnl_register_many(mctp_device_rtnl_msg_handlers);
+ if (err)
+ goto err_af;
+
+ return 0;
+err_af:
+ rtnl_af_unregister(&mctp_af_ops);
+err_notifier:
+ unregister_netdevice_notifier(&mctp_dev_nb);
+ return err;
}
void __exit mctp_device_exit(void)
{
+ rtnl_unregister_many(mctp_device_rtnl_msg_handlers);
rtnl_af_unregister(&mctp_af_ops);
- rtnl_unregister(PF_MCTP, RTM_DELADDR);
- rtnl_unregister(PF_MCTP, RTM_NEWADDR);
- rtnl_unregister(PF_MCTP, RTM_GETADDR);
-
unregister_netdevice_notifier(&mctp_dev_nb);
}
diff --git a/net/mctp/neigh.c b/net/mctp/neigh.c
index ffa0f9e0983f..590f642413e4 100644
--- a/net/mctp/neigh.c
+++ b/net/mctp/neigh.c
@@ -322,22 +322,29 @@ static struct pernet_operations mctp_net_ops = {
.exit = mctp_neigh_net_exit,
};
+static const struct rtnl_msg_handler mctp_neigh_rtnl_msg_handlers[] = {
+ {THIS_MODULE, PF_MCTP, RTM_NEWNEIGH, mctp_rtm_newneigh, NULL, 0},
+ {THIS_MODULE, PF_MCTP, RTM_DELNEIGH, mctp_rtm_delneigh, NULL, 0},
+ {THIS_MODULE, PF_MCTP, RTM_GETNEIGH, NULL, mctp_rtm_getneigh, 0},
+};
+
int __init mctp_neigh_init(void)
{
- rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_NEWNEIGH,
- mctp_rtm_newneigh, NULL, 0);
- rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_DELNEIGH,
- mctp_rtm_delneigh, NULL, 0);
- rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_GETNEIGH,
- NULL, mctp_rtm_getneigh, 0);
-
- return register_pernet_subsys(&mctp_net_ops);
+ int err;
+
+ err = register_pernet_subsys(&mctp_net_ops);
+ if (err)
+ return err;
+
+ err = rtnl_register_many(mctp_neigh_rtnl_msg_handlers);
+ if (err)
+ unregister_pernet_subsys(&mctp_net_ops);
+
+ return err;
}
-void __exit mctp_neigh_exit(void)
+void mctp_neigh_exit(void)
{
+ rtnl_unregister_many(mctp_neigh_rtnl_msg_handlers);
unregister_pernet_subsys(&mctp_net_ops);
- rtnl_unregister(PF_MCTP, RTM_GETNEIGH);
- rtnl_unregister(PF_MCTP, RTM_DELNEIGH);
- rtnl_unregister(PF_MCTP, RTM_NEWNEIGH);
}
diff --git a/net/mctp/route.c b/net/mctp/route.c
index ceee44ea09d9..4c460160914f 100644
--- a/net/mctp/route.c
+++ b/net/mctp/route.c
@@ -73,13 +73,50 @@ static struct mctp_sock *mctp_lookup_bind(struct net *net, struct sk_buff *skb)
return NULL;
}
-static bool mctp_key_match(struct mctp_sk_key *key, mctp_eid_t local,
- mctp_eid_t peer, u8 tag)
+/* A note on the key allocations.
+ *
+ * struct net->mctp.keys contains our set of currently-allocated keys for
+ * MCTP tag management. The lookup tuple for these is the peer EID,
+ * local EID and MCTP tag.
+ *
+ * In some cases, the peer EID may be MCTP_EID_ANY: for example, when a
+ * broadcast message is sent, we may receive responses from any peer EID.
+ * Because the broadcast dest address is equivalent to ANY, we create
+ * a key with (local = local-eid, peer = ANY). This allows a match on the
+ * incoming broadcast responses from any peer.
+ *
+ * We perform lookups when packets are received, and when tags are allocated
+ * in two scenarios:
+ *
+ * - when a packet is sent, with a locally-owned tag: we need to find an
+ * unused tag value for the (local, peer) EID pair.
+ *
+ * - when a tag is manually allocated: we need to find an unused tag value
+ * for the peer EID, but don't have a specific local EID at that stage.
+ *
+ * in the latter case, on successful allocation, we end up with a tag with
+ * (local = ANY, peer = peer-eid).
+ *
+ * So, the key set allows both a local EID of ANY, as well as a peer EID of
+ * ANY in the lookup tuple. Both may be ANY if we prealloc for a broadcast.
+ * The matching (in mctp_key_match()) during lookup allows the match value to
+ * be ANY in either the dest or source addresses.
+ *
+ * When allocating (+ inserting) a tag, we need to check for conflicts amongst
+ * the existing tag set. This requires macthing either exactly on the local
+ * and peer addresses, or either being ANY.
+ */
+
+static bool mctp_key_match(struct mctp_sk_key *key, unsigned int net,
+ mctp_eid_t local, mctp_eid_t peer, u8 tag)
{
+ if (key->net != net)
+ return false;
+
if (!mctp_address_matches(key->local_addr, local))
return false;
- if (key->peer_addr != peer)
+ if (!mctp_address_matches(key->peer_addr, peer))
return false;
if (key->tag != tag)
@@ -92,7 +129,7 @@ static bool mctp_key_match(struct mctp_sk_key *key, mctp_eid_t local,
* key exists.
*/
static struct mctp_sk_key *mctp_lookup_key(struct net *net, struct sk_buff *skb,
- mctp_eid_t peer,
+ unsigned int netid, mctp_eid_t peer,
unsigned long *irqflags)
__acquires(&key->lock)
{
@@ -108,7 +145,7 @@ static struct mctp_sk_key *mctp_lookup_key(struct net *net, struct sk_buff *skb,
spin_lock_irqsave(&net->mctp.keys_lock, flags);
hlist_for_each_entry(key, &net->mctp.keys, hlist) {
- if (!mctp_key_match(key, mh->dest, peer, tag))
+ if (!mctp_key_match(key, netid, mh->dest, peer, tag))
continue;
spin_lock(&key->lock);
@@ -131,6 +168,7 @@ static struct mctp_sk_key *mctp_lookup_key(struct net *net, struct sk_buff *skb,
}
static struct mctp_sk_key *mctp_key_alloc(struct mctp_sock *msk,
+ unsigned int net,
mctp_eid_t local, mctp_eid_t peer,
u8 tag, gfp_t gfp)
{
@@ -140,6 +178,7 @@ static struct mctp_sk_key *mctp_key_alloc(struct mctp_sock *msk,
if (!key)
return NULL;
+ key->net = net;
key->peer_addr = peer;
key->local_addr = local;
key->tag = tag;
@@ -185,8 +224,8 @@ static int mctp_key_add(struct mctp_sk_key *key, struct mctp_sock *msk)
}
hlist_for_each_entry(tmp, &net->mctp.keys, hlist) {
- if (mctp_key_match(tmp, key->local_addr, key->peer_addr,
- key->tag)) {
+ if (mctp_key_match(tmp, key->net, key->local_addr,
+ key->peer_addr, key->tag)) {
spin_lock(&tmp->lock);
if (tmp->valid)
rc = -EEXIST;
@@ -293,8 +332,14 @@ static int mctp_frag_queue(struct mctp_sk_key *key, struct sk_buff *skb)
& MCTP_HDR_SEQ_MASK;
if (!key->reasm_head) {
- key->reasm_head = skb;
- key->reasm_tailp = &(skb_shinfo(skb)->frag_list);
+ /* Since we're manipulating the shared frag_list, ensure it isn't
+ * shared with any other SKBs.
+ */
+ key->reasm_head = skb_unshare(skb, GFP_ATOMIC);
+ if (!key->reasm_head)
+ return -ENOMEM;
+
+ key->reasm_tailp = &(skb_shinfo(key->reasm_head)->frag_list);
key->last_seq = this_seq;
return 0;
}
@@ -327,6 +372,7 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
struct net *net = dev_net(skb->dev);
struct mctp_sock *msk;
struct mctp_hdr *mh;
+ unsigned int netid;
unsigned long f;
u8 tag, flags;
int rc;
@@ -334,8 +380,13 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
msk = NULL;
rc = -EINVAL;
- /* we may be receiving a locally-routed packet; drop source sk
- * accounting
+ /* We may be receiving a locally-routed packet; drop source sk
+ * accounting.
+ *
+ * From here, we will either queue the skb - either to a frag_queue, or
+ * to a receiving socket. When that succeeds, we clear the skb pointer;
+ * a non-NULL skb on exit will be otherwise unowned, and hence
+ * kfree_skb()-ed.
*/
skb_orphan(skb);
@@ -345,6 +396,7 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
/* grab header, advance data ptr */
mh = mctp_hdr(skb);
+ netid = mctp_cb(skb)->net;
skb_pull(skb, sizeof(struct mctp_hdr));
if (mh->ver != 1)
@@ -358,7 +410,7 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
/* lookup socket / reasm context, exactly matching (src,dest,tag).
* we hold a ref on the key, and key->lock held.
*/
- key = mctp_lookup_key(net, skb, mh->src, &f);
+ key = mctp_lookup_key(net, skb, netid, mh->src, &f);
if (flags & MCTP_HDR_FLAG_SOM) {
if (key) {
@@ -368,8 +420,12 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
* key lookup to find the socket, but don't use this
* key for reassembly - we'll create a more specific
* one for future packets if required (ie, !EOM).
+ *
+ * this lookup requires key->peer to be MCTP_ADDR_ANY,
+ * it doesn't match just any key->peer.
*/
- any_key = mctp_lookup_key(net, skb, MCTP_ADDR_ANY, &f);
+ any_key = mctp_lookup_key(net, skb, netid,
+ MCTP_ADDR_ANY, &f);
if (any_key) {
msk = container_of(any_key->sk,
struct mctp_sock, sk);
@@ -389,7 +445,9 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
* pending key.
*/
if (flags & MCTP_HDR_FLAG_EOM) {
- sock_queue_rcv_skb(&msk->sk, skb);
+ rc = sock_queue_rcv_skb(&msk->sk, skb);
+ if (!rc)
+ skb = NULL;
if (key) {
/* we've hit a pending reassembly; not much we
* can do but drop it
@@ -398,7 +456,6 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
MCTP_TRACE_KEY_REPLIED);
key = NULL;
}
- rc = 0;
goto out_unlock;
}
@@ -406,7 +463,7 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
* packets for this message
*/
if (!key) {
- key = mctp_key_alloc(msk, mh->dest, mh->src,
+ key = mctp_key_alloc(msk, netid, mh->dest, mh->src,
tag, GFP_ATOMIC);
if (!key) {
rc = -ENOMEM;
@@ -425,8 +482,10 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
* this function.
*/
rc = mctp_key_add(key, msk);
- if (!rc)
+ if (!rc) {
trace_mctp_key_acquire(key);
+ skb = NULL;
+ }
/* we don't need to release key->lock on exit, so
* clean up here and suppress the unlock via
@@ -444,6 +503,8 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
key = NULL;
} else {
rc = mctp_frag_queue(key, skb);
+ if (!rc)
+ skb = NULL;
}
}
@@ -458,12 +519,19 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
else
rc = mctp_frag_queue(key, skb);
+ if (rc)
+ goto out_unlock;
+
+ /* we've queued; the queue owns the skb now */
+ skb = NULL;
+
/* end of message? deliver to socket, and we're done with
* the reassembly/response key
*/
- if (!rc && flags & MCTP_HDR_FLAG_EOM) {
- sock_queue_rcv_skb(key->sk, key->reasm_head);
- key->reasm_head = NULL;
+ if (flags & MCTP_HDR_FLAG_EOM) {
+ rc = sock_queue_rcv_skb(key->sk, key->reasm_head);
+ if (!rc)
+ key->reasm_head = NULL;
__mctp_key_done_in(key, net, f, MCTP_TRACE_KEY_REPLIED);
key = NULL;
}
@@ -482,8 +550,7 @@ out_unlock:
if (any_key)
mctp_key_unref(any_key);
out:
- if (rc)
- kfree_skb(skb);
+ kfree_skb(skb);
return rc;
}
@@ -596,11 +663,12 @@ static void mctp_reserve_tag(struct net *net, struct mctp_sk_key *key,
refcount_inc(&key->refs);
}
-/* Allocate a locally-owned tag value for (saddr, daddr), and reserve
+/* Allocate a locally-owned tag value for (local, peer), and reserve
* it for the socket msk
*/
struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk,
- mctp_eid_t daddr, mctp_eid_t saddr,
+ unsigned int netid,
+ mctp_eid_t local, mctp_eid_t peer,
bool manual, u8 *tagp)
{
struct net *net = sock_net(&msk->sk);
@@ -610,11 +678,11 @@ struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk,
u8 tagbits;
/* for NULL destination EIDs, we may get a response from any peer */
- if (daddr == MCTP_ADDR_NULL)
- daddr = MCTP_ADDR_ANY;
+ if (peer == MCTP_ADDR_NULL)
+ peer = MCTP_ADDR_ANY;
/* be optimistic, alloc now */
- key = mctp_key_alloc(msk, saddr, daddr, 0, GFP_KERNEL);
+ key = mctp_key_alloc(msk, netid, local, peer, 0, GFP_KERNEL);
if (!key)
return ERR_PTR(-ENOMEM);
@@ -631,12 +699,24 @@ struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk,
* lock held, they don't change over the lifetime of the key.
*/
+ /* tags are net-specific */
+ if (tmp->net != netid)
+ continue;
+
/* if we don't own the tag, it can't conflict */
if (tmp->tag & MCTP_HDR_FLAG_TO)
continue;
- if (!(mctp_address_matches(tmp->peer_addr, daddr) &&
- mctp_address_matches(tmp->local_addr, saddr)))
+ /* Since we're avoiding conflicting entries, match peer and
+ * local addresses, including with a wildcard on ANY. See
+ * 'A note on key allocations' for background.
+ */
+ if (peer != MCTP_ADDR_ANY &&
+ !mctp_address_matches(tmp->peer_addr, peer))
+ continue;
+
+ if (local != MCTP_ADDR_ANY &&
+ !mctp_address_matches(tmp->local_addr, local))
continue;
spin_lock(&tmp->lock);
@@ -671,6 +751,7 @@ struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk,
}
static struct mctp_sk_key *mctp_lookup_prealloc_tag(struct mctp_sock *msk,
+ unsigned int netid,
mctp_eid_t daddr,
u8 req_tag, u8 *tagp)
{
@@ -685,6 +766,9 @@ static struct mctp_sk_key *mctp_lookup_prealloc_tag(struct mctp_sock *msk,
spin_lock_irqsave(&mns->keys_lock, flags);
hlist_for_each_entry(tmp, &mns->keys, hlist) {
+ if (tmp->net != netid)
+ continue;
+
if (tmp->tag != req_tag)
continue;
@@ -843,6 +927,9 @@ static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
/* copy message payload */
skb_copy_bits(skb, pos, skb_transport_header(skb2), size);
+ /* we need to copy the extensions, for MCTP flow data */
+ skb_ext_copy(skb2, skb);
+
/* do route */
rc = rt->output(rt, skb2);
if (rc)
@@ -865,6 +952,7 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt,
struct mctp_sk_key *key;
struct mctp_hdr *hdr;
unsigned long flags;
+ unsigned int netid;
unsigned int mtu;
mctp_eid_t saddr;
bool ext_rt;
@@ -916,16 +1004,17 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt,
rc = 0;
}
spin_unlock_irqrestore(&rt->dev->addrs_lock, flags);
+ netid = READ_ONCE(rt->dev->net);
if (rc)
goto out_release;
if (req_tag & MCTP_TAG_OWNER) {
if (req_tag & MCTP_TAG_PREALLOC)
- key = mctp_lookup_prealloc_tag(msk, daddr,
+ key = mctp_lookup_prealloc_tag(msk, netid, daddr,
req_tag, &tag);
else
- key = mctp_alloc_local_tag(msk, daddr, saddr,
+ key = mctp_alloc_local_tag(msk, netid, saddr, daddr,
false, &tag);
if (IS_ERR(key)) {
@@ -1407,26 +1496,39 @@ static struct pernet_operations mctp_net_ops = {
.exit = mctp_routes_net_exit,
};
+static const struct rtnl_msg_handler mctp_route_rtnl_msg_handlers[] = {
+ {THIS_MODULE, PF_MCTP, RTM_NEWROUTE, mctp_newroute, NULL, 0},
+ {THIS_MODULE, PF_MCTP, RTM_DELROUTE, mctp_delroute, NULL, 0},
+ {THIS_MODULE, PF_MCTP, RTM_GETROUTE, NULL, mctp_dump_rtinfo, 0},
+};
+
int __init mctp_routes_init(void)
{
+ int err;
+
dev_add_pack(&mctp_packet_type);
- rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_GETROUTE,
- NULL, mctp_dump_rtinfo, 0);
- rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_NEWROUTE,
- mctp_newroute, NULL, 0);
- rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_DELROUTE,
- mctp_delroute, NULL, 0);
+ err = register_pernet_subsys(&mctp_net_ops);
+ if (err)
+ goto err_pernet;
- return register_pernet_subsys(&mctp_net_ops);
+ err = rtnl_register_many(mctp_route_rtnl_msg_handlers);
+ if (err)
+ goto err_rtnl;
+
+ return 0;
+
+err_rtnl:
+ unregister_pernet_subsys(&mctp_net_ops);
+err_pernet:
+ dev_remove_pack(&mctp_packet_type);
+ return err;
}
void mctp_routes_exit(void)
{
+ rtnl_unregister_many(mctp_route_rtnl_msg_handlers);
unregister_pernet_subsys(&mctp_net_ops);
- rtnl_unregister(PF_MCTP, RTM_DELROUTE);
- rtnl_unregister(PF_MCTP, RTM_NEWROUTE);
- rtnl_unregister(PF_MCTP, RTM_GETROUTE);
dev_remove_pack(&mctp_packet_type);
}
diff --git a/net/mctp/test/route-test.c b/net/mctp/test/route-test.c
index 92ea4158f7fc..06c1897b685a 100644
--- a/net/mctp/test/route-test.c
+++ b/net/mctp/test/route-test.c
@@ -79,6 +79,16 @@ static void mctp_test_route_destroy(struct kunit *test,
kfree_rcu(&rt->rt, rcu);
}
+static void mctp_test_skb_set_dev(struct sk_buff *skb,
+ struct mctp_test_dev *dev)
+{
+ struct mctp_skb_cb *cb;
+
+ cb = mctp_cb(skb);
+ cb->net = READ_ONCE(dev->mdev->net);
+ skb->dev = dev->ndev;
+}
+
static struct sk_buff *mctp_test_create_skb(const struct mctp_hdr *hdr,
unsigned int data_len)
{
@@ -91,6 +101,7 @@ static struct sk_buff *mctp_test_create_skb(const struct mctp_hdr *hdr,
if (!skb)
return NULL;
+ __mctp_cb(skb);
memcpy(skb_put(skb, hdr_len), hdr, hdr_len);
buf = skb_put(skb, data_len);
@@ -111,6 +122,7 @@ static struct sk_buff *__mctp_test_create_skb_data(const struct mctp_hdr *hdr,
if (!skb)
return NULL;
+ __mctp_cb(skb);
memcpy(skb_put(skb, hdr_len), hdr, hdr_len);
memcpy(skb_put(skb, data_len), data, data_len);
@@ -249,8 +261,6 @@ static void mctp_test_rx_input(struct kunit *test)
skb = mctp_test_create_skb(&params->hdr, 1);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
- __mctp_cb(skb);
-
mctp_pkttype_receive(skb, dev->ndev, &mctp_packet_type, NULL);
KUNIT_EXPECT_EQ(test, !!rt->pkts.qlen, params->input);
@@ -283,7 +293,8 @@ KUNIT_ARRAY_PARAM(mctp_rx_input, mctp_rx_input_tests,
static void __mctp_route_test_init(struct kunit *test,
struct mctp_test_dev **devp,
struct mctp_test_route **rtp,
- struct socket **sockp)
+ struct socket **sockp,
+ unsigned int netid)
{
struct sockaddr_mctp addr = {0};
struct mctp_test_route *rt;
@@ -293,6 +304,8 @@ static void __mctp_route_test_init(struct kunit *test,
dev = mctp_test_create_dev();
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+ if (netid != MCTP_NET_ANY)
+ WRITE_ONCE(dev->mdev->net, netid);
rt = mctp_test_create_route(&init_net, dev->mdev, 8, 68);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt);
@@ -301,7 +314,7 @@ static void __mctp_route_test_init(struct kunit *test,
KUNIT_ASSERT_EQ(test, rc, 0);
addr.smctp_family = AF_MCTP;
- addr.smctp_network = MCTP_NET_ANY;
+ addr.smctp_network = netid;
addr.smctp_addr.s_addr = 8;
addr.smctp_type = 0;
rc = kernel_bind(sock, (struct sockaddr *)&addr, sizeof(addr));
@@ -339,13 +352,12 @@ static void mctp_test_route_input_sk(struct kunit *test)
params = test->param_value;
- __mctp_route_test_init(test, &dev, &rt, &sock);
+ __mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY);
skb = mctp_test_create_skb_data(&params->hdr, &params->type);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
- skb->dev = dev->ndev;
- __mctp_cb(skb);
+ mctp_test_skb_set_dev(skb, dev);
rc = mctp_route_input(&rt->rt, skb);
@@ -354,7 +366,7 @@ static void mctp_test_route_input_sk(struct kunit *test)
skb2 = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc);
KUNIT_EXPECT_NOT_ERR_OR_NULL(test, skb2);
- KUNIT_EXPECT_EQ(test, skb->len, 1);
+ KUNIT_EXPECT_EQ(test, skb2->len, 1);
skb_free_datagram(sock->sk, skb2);
@@ -410,15 +422,14 @@ static void mctp_test_route_input_sk_reasm(struct kunit *test)
params = test->param_value;
- __mctp_route_test_init(test, &dev, &rt, &sock);
+ __mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY);
for (i = 0; i < params->n_hdrs; i++) {
c = i;
skb = mctp_test_create_skb_data(&params->hdrs[i], &c);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
- skb->dev = dev->ndev;
- __mctp_cb(skb);
+ mctp_test_skb_set_dev(skb, dev);
rc = mctp_route_input(&rt->rt, skb);
}
@@ -544,6 +555,7 @@ static void mctp_test_route_input_sk_keys(struct kunit *test)
struct mctp_sock *msk;
struct socket *sock;
unsigned long flags;
+ unsigned int net;
int rc;
u8 c;
@@ -551,6 +563,7 @@ static void mctp_test_route_input_sk_keys(struct kunit *test)
dev = mctp_test_create_dev();
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+ net = READ_ONCE(dev->mdev->net);
rt = mctp_test_create_route(&init_net, dev->mdev, 8, 68);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt);
@@ -562,8 +575,9 @@ static void mctp_test_route_input_sk_keys(struct kunit *test)
mns = &sock_net(sock->sk)->mctp;
/* set the incoming tag according to test params */
- key = mctp_key_alloc(msk, params->key_local_addr, params->key_peer_addr,
- params->key_tag, GFP_KERNEL);
+ key = mctp_key_alloc(msk, net, params->key_local_addr,
+ params->key_peer_addr, params->key_tag,
+ GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, key);
@@ -576,8 +590,7 @@ static void mctp_test_route_input_sk_keys(struct kunit *test)
skb = mctp_test_create_skb_data(&params->hdr, &c);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
- skb->dev = dev->ndev;
- __mctp_cb(skb);
+ mctp_test_skb_set_dev(skb, dev);
rc = mctp_route_input(&rt->rt, skb);
@@ -665,6 +678,565 @@ static void mctp_route_input_sk_keys_to_desc(
KUNIT_ARRAY_PARAM(mctp_route_input_sk_keys, mctp_route_input_sk_keys_tests,
mctp_route_input_sk_keys_to_desc);
+struct test_net {
+ unsigned int netid;
+ struct mctp_test_dev *dev;
+ struct mctp_test_route *rt;
+ struct socket *sock;
+ struct sk_buff *skb;
+ struct mctp_sk_key *key;
+ struct {
+ u8 type;
+ unsigned int data;
+ } msg;
+};
+
+static void
+mctp_test_route_input_multiple_nets_bind_init(struct kunit *test,
+ struct test_net *t)
+{
+ struct mctp_hdr hdr = RX_HDR(1, 9, 8, FL_S | FL_E | FL_T(1) | FL_TO);
+
+ t->msg.data = t->netid;
+
+ __mctp_route_test_init(test, &t->dev, &t->rt, &t->sock, t->netid);
+
+ t->skb = mctp_test_create_skb_data(&hdr, &t->msg);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, t->skb);
+ mctp_test_skb_set_dev(t->skb, t->dev);
+}
+
+static void
+mctp_test_route_input_multiple_nets_bind_fini(struct kunit *test,
+ struct test_net *t)
+{
+ __mctp_route_test_fini(test, t->dev, t->rt, t->sock);
+}
+
+/* Test that skbs from different nets (otherwise identical) get routed to their
+ * corresponding socket via the sockets' bind()
+ */
+static void mctp_test_route_input_multiple_nets_bind(struct kunit *test)
+{
+ struct sk_buff *rx_skb1, *rx_skb2;
+ struct test_net t1, t2;
+ int rc;
+
+ t1.netid = 1;
+ t2.netid = 2;
+
+ t1.msg.type = 0;
+ t2.msg.type = 0;
+
+ mctp_test_route_input_multiple_nets_bind_init(test, &t1);
+ mctp_test_route_input_multiple_nets_bind_init(test, &t2);
+
+ rc = mctp_route_input(&t1.rt->rt, t1.skb);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+ rc = mctp_route_input(&t2.rt->rt, t2.skb);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ rx_skb1 = skb_recv_datagram(t1.sock->sk, MSG_DONTWAIT, &rc);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, rx_skb1);
+ KUNIT_EXPECT_EQ(test, rx_skb1->len, sizeof(t1.msg));
+ KUNIT_EXPECT_EQ(test,
+ *(unsigned int *)skb_pull(rx_skb1, sizeof(t1.msg.data)),
+ t1.netid);
+ kfree_skb(rx_skb1);
+
+ rx_skb2 = skb_recv_datagram(t2.sock->sk, MSG_DONTWAIT, &rc);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, rx_skb2);
+ KUNIT_EXPECT_EQ(test, rx_skb2->len, sizeof(t2.msg));
+ KUNIT_EXPECT_EQ(test,
+ *(unsigned int *)skb_pull(rx_skb2, sizeof(t2.msg.data)),
+ t2.netid);
+ kfree_skb(rx_skb2);
+
+ mctp_test_route_input_multiple_nets_bind_fini(test, &t1);
+ mctp_test_route_input_multiple_nets_bind_fini(test, &t2);
+}
+
+static void
+mctp_test_route_input_multiple_nets_key_init(struct kunit *test,
+ struct test_net *t)
+{
+ struct mctp_hdr hdr = RX_HDR(1, 9, 8, FL_S | FL_E | FL_T(1));
+ struct mctp_sock *msk;
+ struct netns_mctp *mns;
+ unsigned long flags;
+
+ t->msg.data = t->netid;
+
+ __mctp_route_test_init(test, &t->dev, &t->rt, &t->sock, t->netid);
+
+ msk = container_of(t->sock->sk, struct mctp_sock, sk);
+
+ t->key = mctp_key_alloc(msk, t->netid, hdr.dest, hdr.src, 1, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, t->key);
+
+ mns = &sock_net(t->sock->sk)->mctp;
+ spin_lock_irqsave(&mns->keys_lock, flags);
+ mctp_reserve_tag(&init_net, t->key, msk);
+ spin_unlock_irqrestore(&mns->keys_lock, flags);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, t->key);
+ t->skb = mctp_test_create_skb_data(&hdr, &t->msg);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, t->skb);
+ mctp_test_skb_set_dev(t->skb, t->dev);
+}
+
+static void
+mctp_test_route_input_multiple_nets_key_fini(struct kunit *test,
+ struct test_net *t)
+{
+ mctp_key_unref(t->key);
+ __mctp_route_test_fini(test, t->dev, t->rt, t->sock);
+}
+
+/* test that skbs from different nets (otherwise identical) get routed to their
+ * corresponding socket via the sk_key
+ */
+static void mctp_test_route_input_multiple_nets_key(struct kunit *test)
+{
+ struct sk_buff *rx_skb1, *rx_skb2;
+ struct test_net t1, t2;
+ int rc;
+
+ t1.netid = 1;
+ t2.netid = 2;
+
+ /* use type 1 which is not bound */
+ t1.msg.type = 1;
+ t2.msg.type = 1;
+
+ mctp_test_route_input_multiple_nets_key_init(test, &t1);
+ mctp_test_route_input_multiple_nets_key_init(test, &t2);
+
+ rc = mctp_route_input(&t1.rt->rt, t1.skb);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+ rc = mctp_route_input(&t2.rt->rt, t2.skb);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ rx_skb1 = skb_recv_datagram(t1.sock->sk, MSG_DONTWAIT, &rc);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, rx_skb1);
+ KUNIT_EXPECT_EQ(test, rx_skb1->len, sizeof(t1.msg));
+ KUNIT_EXPECT_EQ(test,
+ *(unsigned int *)skb_pull(rx_skb1, sizeof(t1.msg.data)),
+ t1.netid);
+ kfree_skb(rx_skb1);
+
+ rx_skb2 = skb_recv_datagram(t2.sock->sk, MSG_DONTWAIT, &rc);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, rx_skb2);
+ KUNIT_EXPECT_EQ(test, rx_skb2->len, sizeof(t2.msg));
+ KUNIT_EXPECT_EQ(test,
+ *(unsigned int *)skb_pull(rx_skb2, sizeof(t2.msg.data)),
+ t2.netid);
+ kfree_skb(rx_skb2);
+
+ mctp_test_route_input_multiple_nets_key_fini(test, &t1);
+ mctp_test_route_input_multiple_nets_key_fini(test, &t2);
+}
+
+/* Input route to socket, using a single-packet message, where sock delivery
+ * fails. Ensure we're handling the failure appropriately.
+ */
+static void mctp_test_route_input_sk_fail_single(struct kunit *test)
+{
+ const struct mctp_hdr hdr = RX_HDR(1, 10, 8, FL_S | FL_E | FL_TO);
+ struct mctp_test_route *rt;
+ struct mctp_test_dev *dev;
+ struct socket *sock;
+ struct sk_buff *skb;
+ int rc;
+
+ __mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY);
+
+ /* No rcvbuf space, so delivery should fail. __sock_set_rcvbuf will
+ * clamp the minimum to SOCK_MIN_RCVBUF, so we open-code this.
+ */
+ lock_sock(sock->sk);
+ WRITE_ONCE(sock->sk->sk_rcvbuf, 0);
+ release_sock(sock->sk);
+
+ skb = mctp_test_create_skb(&hdr, 10);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
+ skb_get(skb);
+
+ mctp_test_skb_set_dev(skb, dev);
+
+ /* do route input, which should fail */
+ rc = mctp_route_input(&rt->rt, skb);
+ KUNIT_EXPECT_NE(test, rc, 0);
+
+ /* we should hold the only reference to skb */
+ KUNIT_EXPECT_EQ(test, refcount_read(&skb->users), 1);
+ kfree_skb(skb);
+
+ __mctp_route_test_fini(test, dev, rt, sock);
+}
+
+/* Input route to socket, using a fragmented message, where sock delivery fails.
+ */
+static void mctp_test_route_input_sk_fail_frag(struct kunit *test)
+{
+ const struct mctp_hdr hdrs[2] = { RX_FRAG(FL_S, 0), RX_FRAG(FL_E, 1) };
+ struct mctp_test_route *rt;
+ struct mctp_test_dev *dev;
+ struct sk_buff *skbs[2];
+ struct socket *sock;
+ unsigned int i;
+ int rc;
+
+ __mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY);
+
+ lock_sock(sock->sk);
+ WRITE_ONCE(sock->sk->sk_rcvbuf, 0);
+ release_sock(sock->sk);
+
+ for (i = 0; i < ARRAY_SIZE(skbs); i++) {
+ skbs[i] = mctp_test_create_skb(&hdrs[i], 10);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skbs[i]);
+ skb_get(skbs[i]);
+
+ mctp_test_skb_set_dev(skbs[i], dev);
+ }
+
+ /* first route input should succeed, we're only queueing to the
+ * frag list
+ */
+ rc = mctp_route_input(&rt->rt, skbs[0]);
+ KUNIT_EXPECT_EQ(test, rc, 0);
+
+ /* final route input should fail to deliver to the socket */
+ rc = mctp_route_input(&rt->rt, skbs[1]);
+ KUNIT_EXPECT_NE(test, rc, 0);
+
+ /* we should hold the only reference to both skbs */
+ KUNIT_EXPECT_EQ(test, refcount_read(&skbs[0]->users), 1);
+ kfree_skb(skbs[0]);
+
+ KUNIT_EXPECT_EQ(test, refcount_read(&skbs[1]->users), 1);
+ kfree_skb(skbs[1]);
+
+ __mctp_route_test_fini(test, dev, rt, sock);
+}
+
+/* Input route to socket, using a fragmented message created from clones.
+ */
+static void mctp_test_route_input_cloned_frag(struct kunit *test)
+{
+ /* 5 packet fragments, forming 2 complete messages */
+ const struct mctp_hdr hdrs[5] = {
+ RX_FRAG(FL_S, 0),
+ RX_FRAG(0, 1),
+ RX_FRAG(FL_E, 2),
+ RX_FRAG(FL_S, 0),
+ RX_FRAG(FL_E, 1),
+ };
+ struct mctp_test_route *rt;
+ struct mctp_test_dev *dev;
+ struct sk_buff *skb[5];
+ struct sk_buff *rx_skb;
+ struct socket *sock;
+ size_t data_len;
+ u8 compare[100];
+ u8 flat[100];
+ size_t total;
+ void *p;
+ int rc;
+
+ /* Arbitrary length */
+ data_len = 3;
+ total = data_len + sizeof(struct mctp_hdr);
+
+ __mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY);
+
+ /* Create a single skb initially with concatenated packets */
+ skb[0] = mctp_test_create_skb(&hdrs[0], 5 * total);
+ mctp_test_skb_set_dev(skb[0], dev);
+ memset(skb[0]->data, 0 * 0x11, skb[0]->len);
+ memcpy(skb[0]->data, &hdrs[0], sizeof(struct mctp_hdr));
+
+ /* Extract and populate packets */
+ for (int i = 1; i < 5; i++) {
+ skb[i] = skb_clone(skb[i - 1], GFP_ATOMIC);
+ KUNIT_ASSERT_TRUE(test, skb[i]);
+ p = skb_pull(skb[i], total);
+ KUNIT_ASSERT_TRUE(test, p);
+ skb_reset_network_header(skb[i]);
+ memcpy(skb[i]->data, &hdrs[i], sizeof(struct mctp_hdr));
+ memset(&skb[i]->data[sizeof(struct mctp_hdr)], i * 0x11, data_len);
+ }
+ for (int i = 0; i < 5; i++)
+ skb_trim(skb[i], total);
+
+ /* SOM packets have a type byte to match the socket */
+ skb[0]->data[4] = 0;
+ skb[3]->data[4] = 0;
+
+ skb_dump("pkt1 ", skb[0], false);
+ skb_dump("pkt2 ", skb[1], false);
+ skb_dump("pkt3 ", skb[2], false);
+ skb_dump("pkt4 ", skb[3], false);
+ skb_dump("pkt5 ", skb[4], false);
+
+ for (int i = 0; i < 5; i++) {
+ KUNIT_EXPECT_EQ(test, refcount_read(&skb[i]->users), 1);
+ /* Take a reference so we can check refcounts at the end */
+ skb_get(skb[i]);
+ }
+
+ /* Feed the fragments into MCTP core */
+ for (int i = 0; i < 5; i++) {
+ rc = mctp_route_input(&rt->rt, skb[i]);
+ KUNIT_EXPECT_EQ(test, rc, 0);
+ }
+
+ /* Receive first reassembled message */
+ rx_skb = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc);
+ KUNIT_EXPECT_EQ(test, rc, 0);
+ KUNIT_EXPECT_EQ(test, rx_skb->len, 3 * data_len);
+ rc = skb_copy_bits(rx_skb, 0, flat, rx_skb->len);
+ for (int i = 0; i < rx_skb->len; i++)
+ compare[i] = (i / data_len) * 0x11;
+ /* Set type byte */
+ compare[0] = 0;
+
+ KUNIT_EXPECT_MEMEQ(test, flat, compare, rx_skb->len);
+ KUNIT_EXPECT_EQ(test, refcount_read(&rx_skb->users), 1);
+ kfree_skb(rx_skb);
+
+ /* Receive second reassembled message */
+ rx_skb = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc);
+ KUNIT_EXPECT_EQ(test, rc, 0);
+ KUNIT_EXPECT_EQ(test, rx_skb->len, 2 * data_len);
+ rc = skb_copy_bits(rx_skb, 0, flat, rx_skb->len);
+ for (int i = 0; i < rx_skb->len; i++)
+ compare[i] = (i / data_len + 3) * 0x11;
+ /* Set type byte */
+ compare[0] = 0;
+
+ KUNIT_EXPECT_MEMEQ(test, flat, compare, rx_skb->len);
+ KUNIT_EXPECT_EQ(test, refcount_read(&rx_skb->users), 1);
+ kfree_skb(rx_skb);
+
+ /* Check input skb refcounts */
+ for (int i = 0; i < 5; i++) {
+ KUNIT_EXPECT_EQ(test, refcount_read(&skb[i]->users), 1);
+ kfree_skb(skb[i]);
+ }
+
+ __mctp_route_test_fini(test, dev, rt, sock);
+}
+
+#if IS_ENABLED(CONFIG_MCTP_FLOWS)
+
+static void mctp_test_flow_init(struct kunit *test,
+ struct mctp_test_dev **devp,
+ struct mctp_test_route **rtp,
+ struct socket **sock,
+ struct sk_buff **skbp,
+ unsigned int len)
+{
+ struct mctp_test_route *rt;
+ struct mctp_test_dev *dev;
+ struct sk_buff *skb;
+
+ /* we have a slightly odd routing setup here; the test route
+ * is for EID 8, which is our local EID. We don't do a routing
+ * lookup, so that's fine - all we require is a path through
+ * mctp_local_output, which will call rt->output on whatever
+ * route we provide
+ */
+ __mctp_route_test_init(test, &dev, &rt, sock, MCTP_NET_ANY);
+
+ /* Assign a single EID. ->addrs is freed on mctp netdev release */
+ dev->mdev->addrs = kmalloc(sizeof(u8), GFP_KERNEL);
+ dev->mdev->num_addrs = 1;
+ dev->mdev->addrs[0] = 8;
+
+ skb = alloc_skb(len + sizeof(struct mctp_hdr) + 1, GFP_KERNEL);
+ KUNIT_ASSERT_TRUE(test, skb);
+ __mctp_cb(skb);
+ skb_reserve(skb, sizeof(struct mctp_hdr) + 1);
+ memset(skb_put(skb, len), 0, len);
+
+ /* take a ref for the route, we'll decrement in local output */
+ refcount_inc(&rt->rt.refs);
+
+ *devp = dev;
+ *rtp = rt;
+ *skbp = skb;
+}
+
+static void mctp_test_flow_fini(struct kunit *test,
+ struct mctp_test_dev *dev,
+ struct mctp_test_route *rt,
+ struct socket *sock)
+{
+ __mctp_route_test_fini(test, dev, rt, sock);
+}
+
+/* test that an outgoing skb has the correct MCTP extension data set */
+static void mctp_test_packet_flow(struct kunit *test)
+{
+ struct sk_buff *skb, *skb2;
+ struct mctp_test_route *rt;
+ struct mctp_test_dev *dev;
+ struct mctp_flow *flow;
+ struct socket *sock;
+ u8 dst = 8;
+ int n, rc;
+
+ mctp_test_flow_init(test, &dev, &rt, &sock, &skb, 30);
+
+ rc = mctp_local_output(sock->sk, &rt->rt, skb, dst, MCTP_TAG_OWNER);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ n = rt->pkts.qlen;
+ KUNIT_ASSERT_EQ(test, n, 1);
+
+ skb2 = skb_dequeue(&rt->pkts);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb2);
+
+ flow = skb_ext_find(skb2, SKB_EXT_MCTP);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flow);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flow->key);
+ KUNIT_ASSERT_PTR_EQ(test, flow->key->sk, sock->sk);
+
+ kfree_skb(skb2);
+ mctp_test_flow_fini(test, dev, rt, sock);
+}
+
+/* test that outgoing skbs, after fragmentation, all have the correct MCTP
+ * extension data set.
+ */
+static void mctp_test_fragment_flow(struct kunit *test)
+{
+ struct mctp_flow *flows[2];
+ struct sk_buff *tx_skbs[2];
+ struct mctp_test_route *rt;
+ struct mctp_test_dev *dev;
+ struct sk_buff *skb;
+ struct socket *sock;
+ u8 dst = 8;
+ int n, rc;
+
+ mctp_test_flow_init(test, &dev, &rt, &sock, &skb, 100);
+
+ rc = mctp_local_output(sock->sk, &rt->rt, skb, dst, MCTP_TAG_OWNER);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ n = rt->pkts.qlen;
+ KUNIT_ASSERT_EQ(test, n, 2);
+
+ /* both resulting packets should have the same flow data */
+ tx_skbs[0] = skb_dequeue(&rt->pkts);
+ tx_skbs[1] = skb_dequeue(&rt->pkts);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, tx_skbs[0]);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, tx_skbs[1]);
+
+ flows[0] = skb_ext_find(tx_skbs[0], SKB_EXT_MCTP);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flows[0]);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flows[0]->key);
+ KUNIT_ASSERT_PTR_EQ(test, flows[0]->key->sk, sock->sk);
+
+ flows[1] = skb_ext_find(tx_skbs[1], SKB_EXT_MCTP);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flows[1]);
+ KUNIT_ASSERT_PTR_EQ(test, flows[1]->key, flows[0]->key);
+
+ kfree_skb(tx_skbs[0]);
+ kfree_skb(tx_skbs[1]);
+ mctp_test_flow_fini(test, dev, rt, sock);
+}
+
+#else
+static void mctp_test_packet_flow(struct kunit *test)
+{
+ kunit_skip(test, "Requires CONFIG_MCTP_FLOWS=y");
+}
+
+static void mctp_test_fragment_flow(struct kunit *test)
+{
+ kunit_skip(test, "Requires CONFIG_MCTP_FLOWS=y");
+}
+#endif
+
+/* Test that outgoing skbs cause a suitable tag to be created */
+static void mctp_test_route_output_key_create(struct kunit *test)
+{
+ const unsigned int netid = 50;
+ const u8 dst = 26, src = 15;
+ struct mctp_test_route *rt;
+ struct mctp_test_dev *dev;
+ struct mctp_sk_key *key;
+ struct netns_mctp *mns;
+ unsigned long flags;
+ struct socket *sock;
+ struct sk_buff *skb;
+ bool empty, single;
+ const int len = 2;
+ int rc;
+
+ dev = mctp_test_create_dev();
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+ WRITE_ONCE(dev->mdev->net, netid);
+
+ rt = mctp_test_create_route(&init_net, dev->mdev, dst, 68);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt);
+
+ rc = sock_create_kern(&init_net, AF_MCTP, SOCK_DGRAM, 0, &sock);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ dev->mdev->addrs = kmalloc(sizeof(u8), GFP_KERNEL);
+ dev->mdev->num_addrs = 1;
+ dev->mdev->addrs[0] = src;
+
+ skb = alloc_skb(sizeof(struct mctp_hdr) + 1 + len, GFP_KERNEL);
+ KUNIT_ASSERT_TRUE(test, skb);
+ __mctp_cb(skb);
+ skb_reserve(skb, sizeof(struct mctp_hdr) + 1 + len);
+ memset(skb_put(skb, len), 0, len);
+
+ refcount_inc(&rt->rt.refs);
+
+ mns = &sock_net(sock->sk)->mctp;
+
+ /* We assume we're starting from an empty keys list, which requires
+ * preceding tests to clean up correctly!
+ */
+ spin_lock_irqsave(&mns->keys_lock, flags);
+ empty = hlist_empty(&mns->keys);
+ spin_unlock_irqrestore(&mns->keys_lock, flags);
+ KUNIT_ASSERT_TRUE(test, empty);
+
+ rc = mctp_local_output(sock->sk, &rt->rt, skb, dst, MCTP_TAG_OWNER);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ key = NULL;
+ single = false;
+ spin_lock_irqsave(&mns->keys_lock, flags);
+ if (!hlist_empty(&mns->keys)) {
+ key = hlist_entry(mns->keys.first, struct mctp_sk_key, hlist);
+ single = hlist_is_singular_node(&key->hlist, &mns->keys);
+ }
+ spin_unlock_irqrestore(&mns->keys_lock, flags);
+
+ KUNIT_ASSERT_NOT_NULL(test, key);
+ KUNIT_ASSERT_TRUE(test, single);
+
+ KUNIT_EXPECT_EQ(test, key->net, netid);
+ KUNIT_EXPECT_EQ(test, key->local_addr, src);
+ KUNIT_EXPECT_EQ(test, key->peer_addr, dst);
+ /* key has incoming tag, so inverse of what we sent */
+ KUNIT_EXPECT_FALSE(test, key->tag & MCTP_TAG_OWNER);
+
+ sock_release(sock);
+ mctp_test_route_destroy(test, rt);
+ mctp_test_destroy_dev(dev);
+}
+
static struct kunit_case mctp_test_cases[] = {
KUNIT_CASE_PARAM(mctp_test_fragment, mctp_frag_gen_params),
KUNIT_CASE_PARAM(mctp_test_rx_input, mctp_rx_input_gen_params),
@@ -673,6 +1245,14 @@ static struct kunit_case mctp_test_cases[] = {
mctp_route_input_sk_reasm_gen_params),
KUNIT_CASE_PARAM(mctp_test_route_input_sk_keys,
mctp_route_input_sk_keys_gen_params),
+ KUNIT_CASE(mctp_test_route_input_sk_fail_single),
+ KUNIT_CASE(mctp_test_route_input_sk_fail_frag),
+ KUNIT_CASE(mctp_test_route_input_multiple_nets_bind),
+ KUNIT_CASE(mctp_test_route_input_multiple_nets_key),
+ KUNIT_CASE(mctp_test_packet_flow),
+ KUNIT_CASE(mctp_test_fragment_flow),
+ KUNIT_CASE(mctp_test_route_output_key_create),
+ KUNIT_CASE(mctp_test_route_input_cloned_frag),
{}
};
diff --git a/net/mctp/test/utils.c b/net/mctp/test/utils.c
index e03ba66bbe18..565763eb0211 100644
--- a/net/mctp/test/utils.c
+++ b/net/mctp/test/utils.c
@@ -4,6 +4,7 @@
#include <linux/mctp.h>
#include <linux/if_arp.h>
+#include <net/mctp.h>
#include <net/mctpdevice.h>
#include <net/pkt_sched.h>
@@ -54,6 +55,7 @@ struct mctp_test_dev *mctp_test_create_dev(void)
rcu_read_lock();
dev->mdev = __mctp_dev_get(ndev);
+ dev->mdev->net = mctp_default_net(dev_net(ndev));
rcu_read_unlock();
return dev;
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 1af29af65388..1f63b32d76d6 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -594,7 +594,7 @@ static struct net_device *inet_fib_lookup_dev(struct net *net,
struct in_addr daddr;
memcpy(&daddr, addr, sizeof(struct in_addr));
- rt = ip_route_output(net, daddr.s_addr, 0, 0, 0);
+ rt = ip_route_output(net, daddr.s_addr, 0, 0, 0, RT_SCOPE_UNIVERSE);
if (IS_ERR(rt))
return ERR_CAST(rt);
@@ -1154,7 +1154,7 @@ static int mpls_netconf_fill_devconf(struct sk_buff *skb, struct mpls_dev *mdev,
if ((all || type == NETCONFA_INPUT) &&
nla_put_s32(skb, NETCONFA_INPUT,
- mdev->input_enabled) < 0)
+ READ_ONCE(mdev->input_enabled)) < 0)
goto nla_put_failure;
nlmsg_end(skb, nlh);
@@ -1201,8 +1201,7 @@ static void mpls_netconf_notify_devconf(struct net *net, int event,
rtnl_notify(skb, net, 0, RTNLGRP_MPLS_NETCONF, NULL, GFP_KERNEL);
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_MPLS_NETCONF, err);
+ rtnl_set_sk_err(net, RTNLGRP_MPLS_NETCONF, err);
}
static const struct nla_policy devconf_mpls_policy[NETCONFA_MAX + 1] = {
@@ -1303,11 +1302,12 @@ static int mpls_netconf_dump_devconf(struct sk_buff *skb,
{
const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
- struct hlist_head *head;
+ struct {
+ unsigned long ifindex;
+ } *ctx = (void *)cb->ctx;
struct net_device *dev;
struct mpls_dev *mdev;
- int idx, s_idx;
- int h, s_h;
+ int err = 0;
if (cb->strict_check) {
struct netlink_ext_ack *extack = cb->extack;
@@ -1324,46 +1324,29 @@ static int mpls_netconf_dump_devconf(struct sk_buff *skb,
}
}
- s_h = cb->args[0];
- s_idx = idx = cb->args[1];
-
- for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
- idx = 0;
- head = &net->dev_index_head[h];
- rcu_read_lock();
- cb->seq = net->dev_base_seq;
- hlist_for_each_entry_rcu(dev, head, index_hlist) {
- if (idx < s_idx)
- goto cont;
- mdev = mpls_dev_get(dev);
- if (!mdev)
- goto cont;
- if (mpls_netconf_fill_devconf(skb, mdev,
- NETLINK_CB(cb->skb).portid,
- nlh->nlmsg_seq,
- RTM_NEWNETCONF,
- NLM_F_MULTI,
- NETCONFA_ALL) < 0) {
- rcu_read_unlock();
- goto done;
- }
- nl_dump_check_consistent(cb, nlmsg_hdr(skb));
-cont:
- idx++;
- }
- rcu_read_unlock();
+ rcu_read_lock();
+ for_each_netdev_dump(net, dev, ctx->ifindex) {
+ mdev = mpls_dev_get(dev);
+ if (!mdev)
+ continue;
+ err = mpls_netconf_fill_devconf(skb, mdev,
+ NETLINK_CB(cb->skb).portid,
+ nlh->nlmsg_seq,
+ RTM_NEWNETCONF,
+ NLM_F_MULTI,
+ NETCONFA_ALL);
+ if (err < 0)
+ break;
}
-done:
- cb->args[0] = h;
- cb->args[1] = idx;
+ rcu_read_unlock();
- return skb->len;
+ return err;
}
#define MPLS_PERDEV_SYSCTL_OFFSET(field) \
(&((struct mpls_dev *)0)->field)
-static int mpls_conf_proc(struct ctl_table *ctl, int write,
+static int mpls_conf_proc(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int oval = *(int *)ctl->data;
@@ -1393,13 +1376,13 @@ static const struct ctl_table mpls_dev_table[] = {
.proc_handler = mpls_conf_proc,
.data = MPLS_PERDEV_SYSCTL_OFFSET(input_enabled),
},
- { }
};
static int mpls_dev_sysctl_register(struct net_device *dev,
struct mpls_dev *mdev)
{
char path[sizeof("net/mpls/conf/") + IFNAMSIZ];
+ size_t table_size = ARRAY_SIZE(mpls_dev_table);
struct net *net = dev_net(dev);
struct ctl_table *table;
int i;
@@ -1411,7 +1394,7 @@ static int mpls_dev_sysctl_register(struct net_device *dev,
/* Table data contains only offsets relative to the base of
* the mdev at this point, so make them absolute.
*/
- for (i = 0; i < ARRAY_SIZE(mpls_dev_table); i++) {
+ for (i = 0; i < table_size; i++) {
table[i].data = (char *)mdev + (uintptr_t)table[i].data;
table[i].extra1 = mdev;
table[i].extra2 = net;
@@ -1419,8 +1402,7 @@ static int mpls_dev_sysctl_register(struct net_device *dev,
snprintf(path, sizeof(path), "net/mpls/conf/%s", dev->name);
- mdev->sysctl = register_net_sysctl_sz(net, path, table,
- ARRAY_SIZE(mpls_dev_table));
+ mdev->sysctl = register_net_sysctl_sz(net, path, table, table_size);
if (!mdev->sysctl)
goto free;
@@ -1438,7 +1420,7 @@ static void mpls_dev_sysctl_unregister(struct net_device *dev,
struct mpls_dev *mdev)
{
struct net *net = dev_net(dev);
- struct ctl_table *table;
+ const struct ctl_table *table;
if (!mdev->sysctl)
return;
@@ -1682,7 +1664,7 @@ static int nla_put_via(struct sk_buff *skb,
u8 table, const void *addr, int alen)
{
static const int table_to_family[NEIGH_NR_TABLES + 1] = {
- AF_INET, AF_INET6, AF_DECnet, AF_PACKET,
+ AF_INET, AF_INET6, AF_PACKET,
};
struct nlattr *nla;
struct rtvia *via;
@@ -2179,7 +2161,9 @@ static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb)
const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
struct mpls_route __rcu **platform_label;
- struct fib_dump_filter filter = {};
+ struct fib_dump_filter filter = {
+ .rtnl_held = true,
+ };
unsigned int flags = NLM_F_MULTI;
size_t platform_labels;
unsigned int index;
@@ -2293,8 +2277,7 @@ static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt,
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_MPLS_ROUTE, err);
+ rtnl_set_sk_err(net, RTNLGRP_MPLS_ROUTE, err);
}
static int mpls_valid_getroute_req(struct sk_buff *skb,
@@ -2615,7 +2598,7 @@ nolabels:
return -ENOMEM;
}
-static int mpls_platform_labels(struct ctl_table *table, int write,
+static int mpls_platform_labels(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = table->data;
@@ -2667,11 +2650,11 @@ static const struct ctl_table mpls_table[] = {
.extra1 = SYSCTL_ONE,
.extra2 = &ttl_max,
},
- { }
};
static int mpls_net_init(struct net *net)
{
+ size_t table_size = ARRAY_SIZE(mpls_table);
struct ctl_table *table;
int i;
@@ -2687,11 +2670,11 @@ static int mpls_net_init(struct net *net)
/* Table data contains only offsets relative to the base of
* the mdev at this point, so make them absolute.
*/
- for (i = 0; i < ARRAY_SIZE(mpls_table) - 1; i++)
+ for (i = 0; i < table_size; i++)
table[i].data = (char *)net + (uintptr_t)table[i].data;
net->mpls.ctl = register_net_sysctl_sz(net, "net/mpls", table,
- ARRAY_SIZE(mpls_table));
+ table_size);
if (net->mpls.ctl == NULL) {
kfree(table);
return -ENOMEM;
@@ -2704,7 +2687,7 @@ static void mpls_net_exit(struct net *net)
{
struct mpls_route __rcu **platform_label;
size_t platform_labels;
- struct ctl_table *table;
+ const struct ctl_table *table;
unsigned int index;
table = net->mpls.ctl->ctl_table_arg;
@@ -2745,6 +2728,15 @@ static struct rtnl_af_ops mpls_af_ops __read_mostly = {
.get_stats_af_size = mpls_get_stats_af_size,
};
+static const struct rtnl_msg_handler mpls_rtnl_msg_handlers[] __initdata_or_module = {
+ {THIS_MODULE, PF_MPLS, RTM_NEWROUTE, mpls_rtm_newroute, NULL, 0},
+ {THIS_MODULE, PF_MPLS, RTM_DELROUTE, mpls_rtm_delroute, NULL, 0},
+ {THIS_MODULE, PF_MPLS, RTM_GETROUTE, mpls_getroute, mpls_dump_routes, 0},
+ {THIS_MODULE, PF_MPLS, RTM_GETNETCONF,
+ mpls_netconf_get_devconf, mpls_netconf_dump_devconf,
+ RTNL_FLAG_DUMP_UNLOCKED},
+};
+
static int __init mpls_init(void)
{
int err;
@@ -2761,25 +2753,30 @@ static int __init mpls_init(void)
dev_add_pack(&mpls_packet_type);
- rtnl_af_register(&mpls_af_ops);
-
- rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_NEWROUTE,
- mpls_rtm_newroute, NULL, 0);
- rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_DELROUTE,
- mpls_rtm_delroute, NULL, 0);
- rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_GETROUTE,
- mpls_getroute, mpls_dump_routes, 0);
- rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_GETNETCONF,
- mpls_netconf_get_devconf,
- mpls_netconf_dump_devconf, 0);
- err = ipgre_tunnel_encap_add_mpls_ops();
+ err = rtnl_af_register(&mpls_af_ops);
+ if (err)
+ goto out_unregister_dev_type;
+
+ err = rtnl_register_many(mpls_rtnl_msg_handlers);
if (err)
+ goto out_unregister_rtnl_af;
+
+ err = ipgre_tunnel_encap_add_mpls_ops();
+ if (err) {
pr_err("Can't add mpls over gre tunnel ops\n");
+ goto out_unregister_rtnl;
+ }
err = 0;
out:
return err;
+out_unregister_rtnl:
+ rtnl_unregister_many(mpls_rtnl_msg_handlers);
+out_unregister_rtnl_af:
+ rtnl_af_unregister(&mpls_af_ops);
+out_unregister_dev_type:
+ dev_remove_pack(&mpls_packet_type);
out_unregister_pernet:
unregister_pernet_subsys(&mpls_net_ops);
goto out;
diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c
index 533d082f0701..34ab659f541e 100644
--- a/net/mpls/mpls_gso.c
+++ b/net/mpls/mpls_gso.c
@@ -27,6 +27,9 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
__be16 mpls_protocol;
unsigned int mpls_hlen;
+ if (!skb_inner_network_header_was_set(skb))
+ goto out;
+
skb_reset_network_header(skb);
mpls_hlen = skb_inner_network_header(skb) - skb_network_header(skb);
if (unlikely(!mpls_hlen || mpls_hlen % MPLS_HLEN))
@@ -106,5 +109,5 @@ module_init(mpls_gso_init);
module_exit(mpls_gso_exit);
MODULE_DESCRIPTION("MPLS GSO support");
-MODULE_AUTHOR("Simon Horman (horms@verge.net.au)");
+MODULE_AUTHOR("Simon Horman <horms@verge.net.au>");
MODULE_LICENSE("GPL");
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index ef59e25dc482..6e73da94af7f 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -55,8 +55,6 @@ static int mpls_xmit(struct sk_buff *skb)
out_dev = dst->dev;
net = dev_net(out_dev);
- skb_orphan(skb);
-
if (!mpls_output_possible(out_dev) ||
!dst->lwtstate || skb_warn_if_lro(skb))
goto drop;
@@ -83,7 +81,7 @@ static int mpls_xmit(struct sk_buff *skb)
ttl = net->mpls.default_ttl;
else
ttl = ip_hdr(skb)->ttl;
- rt = (struct rtable *)dst;
+ rt = dst_rtable(dst);
} else if (dst->ops->family == AF_INET6) {
if (tun_encap_info->ttl_propagate == MPLS_TTL_PROP_DISABLED)
ttl = tun_encap_info->default_ttl;
@@ -92,7 +90,7 @@ static int mpls_xmit(struct sk_buff *skb)
ttl = net->mpls.default_ttl;
else
ttl = ipv6_hdr(skb)->hop_limit;
- rt6 = (struct rt6_info *)dst;
+ rt6 = dst_rt6_info(dst);
} else {
goto drop;
}
@@ -108,7 +106,7 @@ static int mpls_xmit(struct sk_buff *skb)
hh_len = 0;
/* Ensure there is enough space for the headers in the skb */
- if (skb_cow(skb, hh_len + new_header_size))
+ if (skb_cow_head(skb, hh_len + new_header_size))
goto drop;
skb_set_inner_protocol(skb, skb->protocol);
diff --git a/net/mptcp/crypto.c b/net/mptcp/crypto.c
index a8931349933c..b08ba959ac4f 100644
--- a/net/mptcp/crypto.c
+++ b/net/mptcp/crypto.c
@@ -22,7 +22,7 @@
#include <linux/kernel.h>
#include <crypto/sha2.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "protocol.h"
diff --git a/net/mptcp/ctrl.c b/net/mptcp/ctrl.c
index 13fe0748dde8..2dd81e6c26bd 100644
--- a/net/mptcp/ctrl.c
+++ b/net/mptcp/ctrl.c
@@ -12,6 +12,7 @@
#include <net/netns/generic.h>
#include "protocol.h"
+#include "mib.h"
#define MPTCP_SYSCTL_PATH "net/mptcp"
@@ -27,8 +28,12 @@ struct mptcp_pernet {
#endif
unsigned int add_addr_timeout;
+ unsigned int blackhole_timeout;
unsigned int close_timeout;
unsigned int stale_loss_cnt;
+ atomic_t active_disable_times;
+ u8 syn_retrans_before_tcp_fallback;
+ unsigned long active_disable_stamp;
u8 mptcp_enabled;
u8 checksum_enabled;
u8 allow_join_initial_addr_port;
@@ -87,15 +92,88 @@ static void mptcp_pernet_set_defaults(struct mptcp_pernet *pernet)
{
pernet->mptcp_enabled = 1;
pernet->add_addr_timeout = TCP_RTO_MAX;
+ pernet->blackhole_timeout = 3600;
+ pernet->syn_retrans_before_tcp_fallback = 2;
+ atomic_set(&pernet->active_disable_times, 0);
pernet->close_timeout = TCP_TIMEWAIT_LEN;
pernet->checksum_enabled = 0;
pernet->allow_join_initial_addr_port = 1;
pernet->stale_loss_cnt = 4;
pernet->pm_type = MPTCP_PM_TYPE_KERNEL;
- strcpy(pernet->scheduler, "default");
+ strscpy(pernet->scheduler, "default", sizeof(pernet->scheduler));
}
#ifdef CONFIG_SYSCTL
+static int mptcp_set_scheduler(char *scheduler, const char *name)
+{
+ struct mptcp_sched_ops *sched;
+ int ret = 0;
+
+ rcu_read_lock();
+ sched = mptcp_sched_find(name);
+ if (sched)
+ strscpy(scheduler, name, MPTCP_SCHED_NAME_MAX);
+ else
+ ret = -ENOENT;
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static int proc_scheduler(const struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ char (*scheduler)[MPTCP_SCHED_NAME_MAX] = ctl->data;
+ char val[MPTCP_SCHED_NAME_MAX];
+ struct ctl_table tbl = {
+ .data = val,
+ .maxlen = MPTCP_SCHED_NAME_MAX,
+ };
+ int ret;
+
+ strscpy(val, *scheduler, MPTCP_SCHED_NAME_MAX);
+
+ ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
+ if (write && ret == 0)
+ ret = mptcp_set_scheduler(*scheduler, val);
+
+ return ret;
+}
+
+static int proc_available_schedulers(const struct ctl_table *ctl,
+ int write, void *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ struct ctl_table tbl = { .maxlen = MPTCP_SCHED_BUF_MAX, };
+ int ret;
+
+ tbl.data = kmalloc(tbl.maxlen, GFP_USER);
+ if (!tbl.data)
+ return -ENOMEM;
+
+ mptcp_get_available_schedulers(tbl.data, MPTCP_SCHED_BUF_MAX);
+ ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
+ kfree(tbl.data);
+
+ return ret;
+}
+
+static int proc_blackhole_detect_timeout(const struct ctl_table *table,
+ int write, void *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ struct mptcp_pernet *pernet = container_of(table->data,
+ struct mptcp_pernet,
+ blackhole_timeout);
+ int ret;
+
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ if (write && ret == 0)
+ atomic_set(&pernet->active_disable_times, 0);
+
+ return ret;
+}
+
static struct ctl_table mptcp_sysctl_table[] = {
{
.procname = "enabled",
@@ -148,7 +226,13 @@ static struct ctl_table mptcp_sysctl_table[] = {
.procname = "scheduler",
.maxlen = MPTCP_SCHED_NAME_MAX,
.mode = 0644,
- .proc_handler = proc_dostring,
+ .proc_handler = proc_scheduler,
+ },
+ {
+ .procname = "available_schedulers",
+ .maxlen = MPTCP_SCHED_BUF_MAX,
+ .mode = 0444,
+ .proc_handler = proc_available_schedulers,
},
{
.procname = "close_timeout",
@@ -156,7 +240,19 @@ static struct ctl_table mptcp_sysctl_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
- {}
+ {
+ .procname = "blackhole_timeout",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_blackhole_detect_timeout,
+ .extra1 = SYSCTL_ZERO,
+ },
+ {
+ .procname = "syn_retrans_before_tcp_fallback",
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ },
};
static int mptcp_pernet_new_table(struct net *net, struct mptcp_pernet *pernet)
@@ -178,7 +274,10 @@ static int mptcp_pernet_new_table(struct net *net, struct mptcp_pernet *pernet)
table[4].data = &pernet->stale_loss_cnt;
table[5].data = &pernet->pm_type;
table[6].data = &pernet->scheduler;
- table[7].data = &pernet->close_timeout;
+ /* table[7] is for available_schedulers which is read-only info */
+ table[8].data = &pernet->close_timeout;
+ table[9].data = &pernet->blackhole_timeout;
+ table[10].data = &pernet->syn_retrans_before_tcp_fallback;
hdr = register_net_sysctl_sz(net, MPTCP_SYSCTL_PATH, table,
ARRAY_SIZE(mptcp_sysctl_table));
@@ -198,7 +297,7 @@ err_alloc:
static void mptcp_pernet_del_table(struct mptcp_pernet *pernet)
{
- struct ctl_table *table = pernet->ctl_table_hdr->ctl_table_arg;
+ const struct ctl_table *table = pernet->ctl_table_hdr->ctl_table_arg;
unregister_net_sysctl_table(pernet->ctl_table_hdr);
@@ -216,6 +315,115 @@ static void mptcp_pernet_del_table(struct mptcp_pernet *pernet) {}
#endif /* CONFIG_SYSCTL */
+/* The following code block is to deal with middle box issues with MPTCP,
+ * similar to what is done with TFO.
+ * The proposed solution is to disable active MPTCP globally when SYN+MPC are
+ * dropped, while SYN without MPC aren't. In this case, active side MPTCP is
+ * disabled globally for 1hr at first. Then if it happens again, it is disabled
+ * for 2h, then 4h, 8h, ...
+ * The timeout is reset back to 1hr when a successful active MPTCP connection is
+ * fully established.
+ */
+
+/* Disable active MPTCP and record current jiffies and active_disable_times */
+void mptcp_active_disable(struct sock *sk)
+{
+ struct net *net = sock_net(sk);
+ struct mptcp_pernet *pernet;
+
+ pernet = mptcp_get_pernet(net);
+
+ if (!READ_ONCE(pernet->blackhole_timeout))
+ return;
+
+ /* Paired with READ_ONCE() in mptcp_active_should_disable() */
+ WRITE_ONCE(pernet->active_disable_stamp, jiffies);
+
+ /* Paired with smp_rmb() in mptcp_active_should_disable().
+ * We want pernet->active_disable_stamp to be updated first.
+ */
+ smp_mb__before_atomic();
+ atomic_inc(&pernet->active_disable_times);
+
+ MPTCP_INC_STATS(net, MPTCP_MIB_BLACKHOLE);
+}
+
+/* Calculate timeout for MPTCP active disable
+ * Return true if we are still in the active MPTCP disable period
+ * Return false if timeout already expired and we should use active MPTCP
+ */
+bool mptcp_active_should_disable(struct sock *ssk)
+{
+ struct net *net = sock_net(ssk);
+ unsigned int blackhole_timeout;
+ struct mptcp_pernet *pernet;
+ unsigned long timeout;
+ int disable_times;
+ int multiplier;
+
+ pernet = mptcp_get_pernet(net);
+ blackhole_timeout = READ_ONCE(pernet->blackhole_timeout);
+
+ if (!blackhole_timeout)
+ return false;
+
+ disable_times = atomic_read(&pernet->active_disable_times);
+ if (!disable_times)
+ return false;
+
+ /* Paired with smp_mb__before_atomic() in mptcp_active_disable() */
+ smp_rmb();
+
+ /* Limit timeout to max: 2^6 * initial timeout */
+ multiplier = 1 << min(disable_times - 1, 6);
+
+ /* Paired with the WRITE_ONCE() in mptcp_active_disable(). */
+ timeout = READ_ONCE(pernet->active_disable_stamp) +
+ multiplier * blackhole_timeout * HZ;
+
+ return time_before(jiffies, timeout);
+}
+
+/* Enable active MPTCP and reset active_disable_times if needed */
+void mptcp_active_enable(struct sock *sk)
+{
+ struct mptcp_pernet *pernet = mptcp_get_pernet(sock_net(sk));
+
+ if (atomic_read(&pernet->active_disable_times)) {
+ struct dst_entry *dst = sk_dst_get(sk);
+
+ if (dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK))
+ atomic_set(&pernet->active_disable_times, 0);
+ }
+}
+
+/* Check the number of retransmissions, and fallback to TCP if needed */
+void mptcp_active_detect_blackhole(struct sock *ssk, bool expired)
+{
+ struct mptcp_subflow_context *subflow;
+
+ if (!sk_is_mptcp(ssk))
+ return;
+
+ subflow = mptcp_subflow_ctx(ssk);
+
+ if (subflow->request_mptcp && ssk->sk_state == TCP_SYN_SENT) {
+ struct net *net = sock_net(ssk);
+ u8 timeouts, to_max;
+
+ timeouts = inet_csk(ssk)->icsk_retransmits;
+ to_max = mptcp_get_pernet(net)->syn_retrans_before_tcp_fallback;
+
+ if (timeouts == to_max || (timeouts < to_max && expired)) {
+ MPTCP_INC_STATS(net, MPTCP_MIB_MPCAPABLEACTIVEDROP);
+ subflow->mpc_drop = 1;
+ mptcp_subflow_early_fallback(mptcp_sk(subflow->conn), subflow);
+ }
+ } else if (ssk->sk_state == TCP_SYN_SENT) {
+ subflow->mpc_drop = 0;
+ }
+}
+
static int __net_init mptcp_net_init(struct net *net)
{
struct mptcp_pernet *pernet = mptcp_get_pernet(net);
diff --git a/net/mptcp/diag.c b/net/mptcp/diag.c
index 7017dd60659d..02205f7994d7 100644
--- a/net/mptcp/diag.c
+++ b/net/mptcp/diag.c
@@ -10,7 +10,6 @@
#include <linux/net.h>
#include <linux/inet_diag.h>
#include <net/netlink.h>
-#include <uapi/linux/mptcp.h>
#include "protocol.h"
static int subflow_get_info(struct sock *sk, struct sk_buff *skb)
@@ -48,7 +47,7 @@ static int subflow_get_info(struct sock *sk, struct sk_buff *skb)
flags |= MPTCP_SUBFLOW_FLAG_BKUP_REM;
if (sf->request_bkup)
flags |= MPTCP_SUBFLOW_FLAG_BKUP_LOC;
- if (sf->fully_established)
+ if (READ_ONCE(sf->fully_established))
flags |= MPTCP_SUBFLOW_FLAG_FULLY_ESTABLISHED;
if (sf->conn_finished)
flags |= MPTCP_SUBFLOW_FLAG_CONNECTED;
@@ -95,7 +94,7 @@ static size_t subflow_get_info_size(const struct sock *sk)
nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ */
nla_total_size_64bit(8) + /* MPTCP_SUBFLOW_ATTR_MAP_SEQ */
nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_MAP_SFSEQ */
- nla_total_size(2) + /* MPTCP_SUBFLOW_ATTR_SSN_OFFSET */
+ nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_SSN_OFFSET */
nla_total_size(2) + /* MPTCP_SUBFLOW_ATTR_MAP_DATALEN */
nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_FLAGS */
nla_total_size(1) + /* MPTCP_SUBFLOW_ATTR_ID_REM */
diff --git a/net/mptcp/fastopen.c b/net/mptcp/fastopen.c
index ad28da655f8b..a29ff901df75 100644
--- a/net/mptcp/fastopen.c
+++ b/net/mptcp/fastopen.c
@@ -68,12 +68,12 @@ void __mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflo
skb = skb_peek_tail(&sk->sk_receive_queue);
if (skb) {
WARN_ON_ONCE(MPTCP_SKB_CB(skb)->end_seq);
- pr_debug("msk %p moving seq %llx -> %llx end_seq %llx -> %llx", sk,
+ pr_debug("msk %p moving seq %llx -> %llx end_seq %llx -> %llx\n", sk,
MPTCP_SKB_CB(skb)->map_seq, MPTCP_SKB_CB(skb)->map_seq + msk->ack_seq,
MPTCP_SKB_CB(skb)->end_seq, MPTCP_SKB_CB(skb)->end_seq + msk->ack_seq);
MPTCP_SKB_CB(skb)->map_seq += msk->ack_seq;
MPTCP_SKB_CB(skb)->end_seq += msk->ack_seq;
}
- pr_debug("msk=%p ack_seq=%llx", msk, msk->ack_seq);
+ pr_debug("msk=%p ack_seq=%llx\n", msk, msk->ack_seq);
}
diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c
index c30405e76833..19eb9292bd60 100644
--- a/net/mptcp/mib.c
+++ b/net/mptcp/mib.c
@@ -15,15 +15,26 @@ static const struct snmp_mib mptcp_snmp_list[] = {
SNMP_MIB_ITEM("MPCapableACKRX", MPTCP_MIB_MPCAPABLEPASSIVEACK),
SNMP_MIB_ITEM("MPCapableFallbackACK", MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK),
SNMP_MIB_ITEM("MPCapableFallbackSYNACK", MPTCP_MIB_MPCAPABLEACTIVEFALLBACK),
+ SNMP_MIB_ITEM("MPCapableSYNTXDrop", MPTCP_MIB_MPCAPABLEACTIVEDROP),
+ SNMP_MIB_ITEM("MPCapableSYNTXDisabled", MPTCP_MIB_MPCAPABLEACTIVEDISABLED),
+ SNMP_MIB_ITEM("MPCapableEndpAttempt", MPTCP_MIB_MPCAPABLEENDPATTEMPT),
SNMP_MIB_ITEM("MPFallbackTokenInit", MPTCP_MIB_TOKENFALLBACKINIT),
SNMP_MIB_ITEM("MPTCPRetrans", MPTCP_MIB_RETRANSSEGS),
SNMP_MIB_ITEM("MPJoinNoTokenFound", MPTCP_MIB_JOINNOTOKEN),
SNMP_MIB_ITEM("MPJoinSynRx", MPTCP_MIB_JOINSYNRX),
+ SNMP_MIB_ITEM("MPJoinSynBackupRx", MPTCP_MIB_JOINSYNBACKUPRX),
SNMP_MIB_ITEM("MPJoinSynAckRx", MPTCP_MIB_JOINSYNACKRX),
+ SNMP_MIB_ITEM("MPJoinSynAckBackupRx", MPTCP_MIB_JOINSYNACKBACKUPRX),
SNMP_MIB_ITEM("MPJoinSynAckHMacFailure", MPTCP_MIB_JOINSYNACKMAC),
SNMP_MIB_ITEM("MPJoinAckRx", MPTCP_MIB_JOINACKRX),
SNMP_MIB_ITEM("MPJoinAckHMacFailure", MPTCP_MIB_JOINACKMAC),
+ SNMP_MIB_ITEM("MPJoinSynTx", MPTCP_MIB_JOINSYNTX),
+ SNMP_MIB_ITEM("MPJoinSynTxCreatSkErr", MPTCP_MIB_JOINSYNTXCREATSKERR),
+ SNMP_MIB_ITEM("MPJoinSynTxBindErr", MPTCP_MIB_JOINSYNTXBINDERR),
+ SNMP_MIB_ITEM("MPJoinSynTxConnectErr", MPTCP_MIB_JOINSYNTXCONNECTERR),
SNMP_MIB_ITEM("DSSNotMatching", MPTCP_MIB_DSSNOMATCH),
+ SNMP_MIB_ITEM("DSSCorruptionFallback", MPTCP_MIB_DSSCORRUPTIONFALLBACK),
+ SNMP_MIB_ITEM("DSSCorruptionReset", MPTCP_MIB_DSSCORRUPTIONRESET),
SNMP_MIB_ITEM("InfiniteMapTx", MPTCP_MIB_INFINITEMAPTX),
SNMP_MIB_ITEM("InfiniteMapRx", MPTCP_MIB_INFINITEMAPRX),
SNMP_MIB_ITEM("DSSNoMatchTCP", MPTCP_MIB_DSSTCPMISMATCH),
@@ -67,6 +78,7 @@ static const struct snmp_mib mptcp_snmp_list[] = {
SNMP_MIB_ITEM("RcvWndConflictUpdate", MPTCP_MIB_RCVWNDCONFLICTUPDATE),
SNMP_MIB_ITEM("RcvWndConflict", MPTCP_MIB_RCVWNDCONFLICT),
SNMP_MIB_ITEM("MPCurrEstab", MPTCP_MIB_CURRESTAB),
+ SNMP_MIB_ITEM("Blackhole", MPTCP_MIB_BLACKHOLE),
SNMP_MIB_SENTINEL
};
diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h
index dd7fd1f246b5..128282982843 100644
--- a/net/mptcp/mib.h
+++ b/net/mptcp/mib.h
@@ -1,5 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
+#include <net/inet_common.h>
+
enum linux_mptcp_mib_field {
MPTCP_MIB_NUM = 0,
MPTCP_MIB_MPCAPABLEPASSIVE, /* Received SYN with MP_CAPABLE */
@@ -8,15 +10,26 @@ enum linux_mptcp_mib_field {
MPTCP_MIB_MPCAPABLEPASSIVEACK, /* Received third ACK with MP_CAPABLE */
MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK,/* Server-side fallback during 3-way handshake */
MPTCP_MIB_MPCAPABLEACTIVEFALLBACK, /* Client-side fallback during 3-way handshake */
+ MPTCP_MIB_MPCAPABLEACTIVEDROP, /* Client-side fallback due to a MPC drop */
+ MPTCP_MIB_MPCAPABLEACTIVEDISABLED, /* Client-side disabled due to past issues */
+ MPTCP_MIB_MPCAPABLEENDPATTEMPT, /* Prohibited MPC to port-based endp */
MPTCP_MIB_TOKENFALLBACKINIT, /* Could not init/allocate token */
MPTCP_MIB_RETRANSSEGS, /* Segments retransmitted at the MPTCP-level */
MPTCP_MIB_JOINNOTOKEN, /* Received MP_JOIN but the token was not found */
MPTCP_MIB_JOINSYNRX, /* Received a SYN + MP_JOIN */
+ MPTCP_MIB_JOINSYNBACKUPRX, /* Received a SYN + MP_JOIN + backup flag */
MPTCP_MIB_JOINSYNACKRX, /* Received a SYN/ACK + MP_JOIN */
+ MPTCP_MIB_JOINSYNACKBACKUPRX, /* Received a SYN/ACK + MP_JOIN + backup flag */
MPTCP_MIB_JOINSYNACKMAC, /* HMAC was wrong on SYN/ACK + MP_JOIN */
MPTCP_MIB_JOINACKRX, /* Received an ACK + MP_JOIN */
MPTCP_MIB_JOINACKMAC, /* HMAC was wrong on ACK + MP_JOIN */
+ MPTCP_MIB_JOINSYNTX, /* Sending a SYN + MP_JOIN */
+ MPTCP_MIB_JOINSYNTXCREATSKERR, /* Not able to create a socket when sending a SYN + MP_JOIN */
+ MPTCP_MIB_JOINSYNTXBINDERR, /* Not able to bind() the address when sending a SYN + MP_JOIN */
+ MPTCP_MIB_JOINSYNTXCONNECTERR, /* Not able to connect() when sending a SYN + MP_JOIN */
MPTCP_MIB_DSSNOMATCH, /* Received a new mapping that did not match the previous one */
+ MPTCP_MIB_DSSCORRUPTIONFALLBACK,/* DSS corruption detected, fallback */
+ MPTCP_MIB_DSSCORRUPTIONRESET, /* DSS corruption detected, MPJ subflow reset */
MPTCP_MIB_INFINITEMAPTX, /* Sent an infinite mapping */
MPTCP_MIB_INFINITEMAPRX, /* Received an infinite mapping */
MPTCP_MIB_DSSTCPMISMATCH, /* DSS-mapping did not map with TCP's sequence numbers */
@@ -66,6 +79,7 @@ enum linux_mptcp_mib_field {
*/
MPTCP_MIB_RCVWNDCONFLICT, /* Conflict with while updating msk rcv wnd */
MPTCP_MIB_CURRESTAB, /* Current established MPTCP connections */
+ MPTCP_MIB_BLACKHOLE, /* A blackhole has been detected */
__MPTCP_MIB_MAX
};
diff --git a/net/mptcp/mptcp_diag.c b/net/mptcp/mptcp_diag.c
index 5409c2ea3f57..0566dd793810 100644
--- a/net/mptcp/mptcp_diag.c
+++ b/net/mptcp/mptcp_diag.c
@@ -10,7 +10,6 @@
#include <linux/net.h>
#include <linux/inet_diag.h>
#include <net/netlink.h>
-#include <uapi/linux/mptcp.h>
#include "protocol.h"
static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
@@ -225,6 +224,7 @@ static void mptcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
}
static const struct inet_diag_handler mptcp_diag_handler = {
+ .owner = THIS_MODULE,
.dump = mptcp_diag_dump,
.dump_one = mptcp_diag_dump_one,
.idiag_get_info = mptcp_diag_get_info,
diff --git a/net/mptcp/mptcp_pm_gen.c b/net/mptcp/mptcp_pm_gen.c
index 670da7822e6c..dcffd847af33 100644
--- a/net/mptcp/mptcp_pm_gen.c
+++ b/net/mptcp/mptcp_pm_gen.c
@@ -14,7 +14,7 @@
const struct nla_policy mptcp_pm_address_nl_policy[MPTCP_PM_ADDR_ATTR_IF_IDX + 1] = {
[MPTCP_PM_ADDR_ATTR_FAMILY] = { .type = NLA_U16, },
[MPTCP_PM_ADDR_ATTR_ID] = { .type = NLA_U8, },
- [MPTCP_PM_ADDR_ATTR_ADDR4] = { .type = NLA_U32, },
+ [MPTCP_PM_ADDR_ATTR_ADDR4] = { .type = NLA_BE32, },
[MPTCP_PM_ADDR_ATTR_ADDR6] = NLA_POLICY_EXACT_LEN(16),
[MPTCP_PM_ADDR_ATTR_PORT] = { .type = NLA_U16, },
[MPTCP_PM_ADDR_ATTR_FLAGS] = { .type = NLA_U32, },
@@ -32,8 +32,9 @@ const struct nla_policy mptcp_pm_del_addr_nl_policy[MPTCP_PM_ENDPOINT_ADDR + 1]
};
/* MPTCP_PM_CMD_GET_ADDR - do */
-const struct nla_policy mptcp_pm_get_addr_nl_policy[MPTCP_PM_ENDPOINT_ADDR + 1] = {
- [MPTCP_PM_ENDPOINT_ADDR] = NLA_POLICY_NESTED(mptcp_pm_address_nl_policy),
+const struct nla_policy mptcp_pm_get_addr_nl_policy[MPTCP_PM_ATTR_TOKEN + 1] = {
+ [MPTCP_PM_ATTR_ADDR] = NLA_POLICY_NESTED(mptcp_pm_address_nl_policy),
+ [MPTCP_PM_ATTR_TOKEN] = { .type = NLA_U32, },
};
/* MPTCP_PM_CMD_FLUSH_ADDRS - do */
@@ -110,8 +111,7 @@ const struct genl_ops mptcp_pm_nl_ops[11] = {
.doit = mptcp_pm_nl_get_addr_doit,
.dumpit = mptcp_pm_nl_get_addr_dumpit,
.policy = mptcp_pm_get_addr_nl_policy,
- .maxattr = MPTCP_PM_ENDPOINT_ADDR,
- .flags = GENL_UNS_ADMIN_PERM,
+ .maxattr = MPTCP_PM_ATTR_TOKEN,
},
{
.cmd = MPTCP_PM_CMD_FLUSH_ADDRS,
diff --git a/net/mptcp/mptcp_pm_gen.h b/net/mptcp/mptcp_pm_gen.h
index ac9fc7225b6a..e24258f6f819 100644
--- a/net/mptcp/mptcp_pm_gen.h
+++ b/net/mptcp/mptcp_pm_gen.h
@@ -18,7 +18,7 @@ extern const struct nla_policy mptcp_pm_add_addr_nl_policy[MPTCP_PM_ENDPOINT_ADD
extern const struct nla_policy mptcp_pm_del_addr_nl_policy[MPTCP_PM_ENDPOINT_ADDR + 1];
-extern const struct nla_policy mptcp_pm_get_addr_nl_policy[MPTCP_PM_ENDPOINT_ADDR + 1];
+extern const struct nla_policy mptcp_pm_get_addr_nl_policy[MPTCP_PM_ATTR_TOKEN + 1];
extern const struct nla_policy mptcp_pm_flush_addrs_nl_policy[MPTCP_PM_ENDPOINT_ADDR + 1];
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 63fc0758c22d..23949ae2a3a8 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -108,7 +108,6 @@ static void mptcp_parse_option(const struct sk_buff *skb,
mp_opt->suboptions |= OPTION_MPTCP_DSS;
mp_opt->use_map = 1;
mp_opt->mpc_map = 1;
- mp_opt->use_ack = 0;
mp_opt->data_len = get_unaligned_be16(ptr);
ptr += 2;
}
@@ -117,7 +116,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
mp_opt->suboptions |= OPTION_MPTCP_CSUMREQD;
ptr += 2;
}
- pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d csum=%u",
+ pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d csum=%u\n",
version, flags, opsize, mp_opt->sndr_key,
mp_opt->rcvr_key, mp_opt->data_len, mp_opt->csum);
break;
@@ -131,7 +130,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
ptr += 4;
mp_opt->nonce = get_unaligned_be32(ptr);
ptr += 4;
- pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u",
+ pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u\n",
mp_opt->backup, mp_opt->join_id,
mp_opt->token, mp_opt->nonce);
} else if (opsize == TCPOLEN_MPTCP_MPJ_SYNACK) {
@@ -142,26 +141,21 @@ static void mptcp_parse_option(const struct sk_buff *skb,
ptr += 8;
mp_opt->nonce = get_unaligned_be32(ptr);
ptr += 4;
- pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u",
+ pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u\n",
mp_opt->backup, mp_opt->join_id,
mp_opt->thmac, mp_opt->nonce);
} else if (opsize == TCPOLEN_MPTCP_MPJ_ACK) {
mp_opt->suboptions |= OPTION_MPTCP_MPJ_ACK;
ptr += 2;
memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN);
- pr_debug("MP_JOIN hmac");
+ pr_debug("MP_JOIN hmac\n");
}
break;
case MPTCPOPT_DSS:
- pr_debug("DSS");
+ pr_debug("DSS\n");
ptr++;
- /* we must clear 'mpc_map' be able to detect MP_CAPABLE
- * map vs DSS map in mptcp_incoming_options(), and reconstruct
- * map info accordingly
- */
- mp_opt->mpc_map = 0;
flags = (*ptr++) & MPTCP_DSS_FLAG_MASK;
mp_opt->data_fin = (flags & MPTCP_DSS_DATA_FIN) != 0;
mp_opt->dsn64 = (flags & MPTCP_DSS_DSN64) != 0;
@@ -169,7 +163,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
mp_opt->ack64 = (flags & MPTCP_DSS_ACK64) != 0;
mp_opt->use_ack = (flags & MPTCP_DSS_HAS_ACK);
- pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d",
+ pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d\n",
mp_opt->data_fin, mp_opt->dsn64,
mp_opt->use_map, mp_opt->ack64,
mp_opt->use_ack);
@@ -207,7 +201,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
ptr += 4;
}
- pr_debug("data_ack=%llu", mp_opt->data_ack);
+ pr_debug("data_ack=%llu\n", mp_opt->data_ack);
}
if (mp_opt->use_map) {
@@ -231,7 +225,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
ptr += 2;
}
- pr_debug("data_seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
+ pr_debug("data_seq=%llu subflow_seq=%u data_len=%u csum=%d:%u\n",
mp_opt->data_seq, mp_opt->subflow_seq,
mp_opt->data_len, !!(mp_opt->suboptions & OPTION_MPTCP_CSUMREQD),
mp_opt->csum);
@@ -293,7 +287,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
mp_opt->ahmac = get_unaligned_be64(ptr);
ptr += 8;
}
- pr_debug("ADD_ADDR%s: id=%d, ahmac=%llu, echo=%d, port=%d",
+ pr_debug("ADD_ADDR%s: id=%d, ahmac=%llu, echo=%d, port=%d\n",
(mp_opt->addr.family == AF_INET6) ? "6" : "",
mp_opt->addr.id, mp_opt->ahmac, mp_opt->echo, ntohs(mp_opt->addr.port));
break;
@@ -309,7 +303,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
mp_opt->rm_list.nr = opsize - TCPOLEN_MPTCP_RM_ADDR_BASE;
for (i = 0; i < mp_opt->rm_list.nr; i++)
mp_opt->rm_list.ids[i] = *ptr++;
- pr_debug("RM_ADDR: rm_list_nr=%d", mp_opt->rm_list.nr);
+ pr_debug("RM_ADDR: rm_list_nr=%d\n", mp_opt->rm_list.nr);
break;
case MPTCPOPT_MP_PRIO:
@@ -318,7 +312,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
mp_opt->suboptions |= OPTION_MPTCP_PRIO;
mp_opt->backup = *ptr++ & MPTCP_PRIO_BKUP;
- pr_debug("MP_PRIO: prio=%d", mp_opt->backup);
+ pr_debug("MP_PRIO: prio=%d\n", mp_opt->backup);
break;
case MPTCPOPT_MP_FASTCLOSE:
@@ -329,7 +323,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
mp_opt->rcvr_key = get_unaligned_be64(ptr);
ptr += 8;
mp_opt->suboptions |= OPTION_MPTCP_FASTCLOSE;
- pr_debug("MP_FASTCLOSE: recv_key=%llu", mp_opt->rcvr_key);
+ pr_debug("MP_FASTCLOSE: recv_key=%llu\n", mp_opt->rcvr_key);
break;
case MPTCPOPT_RST:
@@ -343,7 +337,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
flags = *ptr++;
mp_opt->reset_transient = flags & MPTCP_RST_TRANSIENT;
mp_opt->reset_reason = *ptr;
- pr_debug("MP_RST: transient=%u reason=%u",
+ pr_debug("MP_RST: transient=%u reason=%u\n",
mp_opt->reset_transient, mp_opt->reset_reason);
break;
@@ -354,7 +348,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
ptr += 2;
mp_opt->suboptions |= OPTION_MPTCP_FAIL;
mp_opt->fail_seq = get_unaligned_be64(ptr);
- pr_debug("MP_FAIL: data_seq=%llu", mp_opt->fail_seq);
+ pr_debug("MP_FAIL: data_seq=%llu\n", mp_opt->fail_seq);
break;
default:
@@ -369,8 +363,11 @@ void mptcp_get_options(const struct sk_buff *skb,
const unsigned char *ptr;
int length;
- /* initialize option status */
- mp_opt->suboptions = 0;
+ /* Ensure that casting the whole status to u32 is efficient and safe */
+ BUILD_BUG_ON(sizeof_field(struct mptcp_options_received, status) != sizeof(u32));
+ BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct mptcp_options_received, status),
+ sizeof(u32)));
+ *(u32 *)&mp_opt->status = 0;
length = (th->doff * 4) - sizeof(struct tcphdr);
ptr = (const unsigned char *)(th + 1);
@@ -417,7 +414,7 @@ bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
*size = TCPOLEN_MPTCP_MPC_SYN;
return true;
} else if (subflow->request_join) {
- pr_debug("remote_token=%u, nonce=%u", subflow->remote_token,
+ pr_debug("remote_token=%u, nonce=%u\n", subflow->remote_token,
subflow->local_nonce);
opts->suboptions = OPTION_MPTCP_MPJ_SYN;
opts->join_id = subflow->local_id;
@@ -461,7 +458,7 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
return false;
/* MPC/MPJ needed only on 3rd ack packet, DATA_FIN and TCP shutdown take precedence */
- if (subflow->fully_established || snd_data_fin_enable ||
+ if (READ_ONCE(subflow->fully_established) || snd_data_fin_enable ||
subflow->snd_isn != TCP_SKB_CB(skb)->seq ||
sk->sk_state != TCP_ESTABLISHED)
return false;
@@ -500,7 +497,7 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
*size = TCPOLEN_MPTCP_MPC_ACK;
}
- pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d",
+ pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d\n",
subflow, subflow->local_key, subflow->remote_key,
data_len);
@@ -509,7 +506,7 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
opts->suboptions = OPTION_MPTCP_MPJ_ACK;
memcpy(opts->hmac, subflow->hmac, MPTCPOPT_HMAC_LEN);
*size = TCPOLEN_MPTCP_MPJ_ACK;
- pr_debug("subflow=%p", subflow);
+ pr_debug("subflow=%p\n", subflow);
/* we can use the full delegate action helper only from BH context
* If we are in process context - sk is flushing the backlog at
@@ -607,7 +604,6 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
}
opts->ext_copy.use_ack = 1;
opts->suboptions = OPTION_MPTCP_DSS;
- WRITE_ONCE(msk->old_wspace, __mptcp_space((struct sock *)msk));
/* Add kind/length/subtype/flag overhead if mapping is not populated */
if (dss_size == 0)
@@ -655,6 +651,7 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
bool drop_other_suboptions = false;
unsigned int opt_size = *size;
+ struct mptcp_addr_info addr;
bool echo;
int len;
@@ -663,19 +660,26 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *
*/
if (!mptcp_pm_should_add_signal(msk) ||
(opts->suboptions & (OPTION_MPTCP_MPJ_ACK | OPTION_MPTCP_MPC_ACK)) ||
- !mptcp_pm_add_addr_signal(msk, skb, opt_size, remaining, &opts->addr,
+ !mptcp_pm_add_addr_signal(msk, skb, opt_size, remaining, &addr,
&echo, &drop_other_suboptions))
return false;
+ /*
+ * Later on, mptcp_write_options() will enforce mutually exclusion with
+ * DSS, bail out if such option is set and we can't drop it.
+ */
if (drop_other_suboptions)
remaining += opt_size;
- len = mptcp_add_addr_len(opts->addr.family, echo, !!opts->addr.port);
+ else if (opts->suboptions & OPTION_MPTCP_DSS)
+ return false;
+
+ len = mptcp_add_addr_len(addr.family, echo, !!addr.port);
if (remaining < len)
return false;
*size = len;
if (drop_other_suboptions) {
- pr_debug("drop other suboptions");
+ pr_debug("drop other suboptions\n");
opts->suboptions = 0;
/* note that e.g. DSS could have written into the memory
@@ -686,16 +690,17 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *
opts->ahmac = 0;
*size -= opt_size;
}
+ opts->addr = addr;
opts->suboptions |= OPTION_MPTCP_ADD_ADDR;
if (!echo) {
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ADDADDRTX);
- opts->ahmac = add_addr_generate_hmac(msk->local_key,
- msk->remote_key,
+ opts->ahmac = add_addr_generate_hmac(READ_ONCE(msk->local_key),
+ READ_ONCE(msk->remote_key),
&opts->addr);
} else {
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ECHOADDTX);
}
- pr_debug("addr_id=%d, ahmac=%llu, echo=%d, port=%d",
+ pr_debug("addr_id=%d, ahmac=%llu, echo=%d, port=%d\n",
opts->addr.id, opts->ahmac, echo, ntohs(opts->addr.port));
return true;
@@ -726,7 +731,7 @@ static bool mptcp_established_options_rm_addr(struct sock *sk,
opts->rm_list = rm_list;
for (i = 0; i < opts->rm_list.nr; i++)
- pr_debug("rm_list_ids[%d]=%d", i, opts->rm_list.ids[i]);
+ pr_debug("rm_list_ids[%d]=%d\n", i, opts->rm_list.ids[i]);
MPTCP_ADD_STATS(sock_net(sk), MPTCP_MIB_RMADDRTX, opts->rm_list.nr);
return true;
}
@@ -752,7 +757,7 @@ static bool mptcp_established_options_mp_prio(struct sock *sk,
opts->suboptions |= OPTION_MPTCP_PRIO;
opts->backup = subflow->request_bkup;
- pr_debug("prio=%d", opts->backup);
+ pr_debug("prio=%d\n", opts->backup);
return true;
}
@@ -792,9 +797,9 @@ static bool mptcp_established_options_fastclose(struct sock *sk,
*size = TCPOLEN_MPTCP_FASTCLOSE;
opts->suboptions |= OPTION_MPTCP_FASTCLOSE;
- opts->rcvr_key = msk->remote_key;
+ opts->rcvr_key = READ_ONCE(msk->remote_key);
- pr_debug("FASTCLOSE key=%llu", opts->rcvr_key);
+ pr_debug("FASTCLOSE key=%llu\n", opts->rcvr_key);
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFASTCLOSETX);
return true;
}
@@ -816,7 +821,7 @@ static bool mptcp_established_options_mp_fail(struct sock *sk,
opts->suboptions |= OPTION_MPTCP_FAIL;
opts->fail_seq = subflow->map_seq;
- pr_debug("MP_FAIL fail_seq=%llu", opts->fail_seq);
+ pr_debug("MP_FAIL fail_seq=%llu\n", opts->fail_seq);
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFAILTX);
return true;
@@ -904,16 +909,16 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
opts->csum_reqd = subflow_req->csum_reqd;
opts->allow_join_id0 = subflow_req->allow_join_id0;
*size = TCPOLEN_MPTCP_MPC_SYNACK;
- pr_debug("subflow_req=%p, local_key=%llu",
+ pr_debug("subflow_req=%p, local_key=%llu\n",
subflow_req, subflow_req->local_key);
return true;
} else if (subflow_req->mp_join) {
opts->suboptions = OPTION_MPTCP_MPJ_SYNACK;
- opts->backup = subflow_req->backup;
+ opts->backup = subflow_req->request_bkup;
opts->join_id = subflow_req->local_id;
opts->thmac = subflow_req->thmac;
opts->nonce = subflow_req->local_nonce;
- pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u",
+ pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u\n",
subflow_req, opts->backup, opts->join_id,
opts->thmac, opts->nonce);
*size = TCPOLEN_MPTCP_MPJ_SYNACK;
@@ -930,7 +935,7 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
/* here we can process OoO, in-window pkts, only in-sequence 4th ack
* will make the subflow fully established
*/
- if (likely(subflow->fully_established)) {
+ if (likely(READ_ONCE(subflow->fully_established))) {
/* on passive sockets, check for 3rd ack retransmission
* note that msk is always set by subflow_syn_recv_sock()
* for mp_join subflows
@@ -958,7 +963,8 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
if (subflow->remote_key_valid &&
(((mp_opt->suboptions & OPTION_MPTCP_DSS) && mp_opt->use_ack) ||
- ((mp_opt->suboptions & OPTION_MPTCP_ADD_ADDR) && !mp_opt->echo))) {
+ ((mp_opt->suboptions & OPTION_MPTCP_ADD_ADDR) &&
+ (!mp_opt->echo || subflow->mp_join)))) {
/* subflows are fully established as soon as we get any
* additional ack, including ADD_ADDR.
*/
@@ -1031,7 +1037,7 @@ u64 __mptcp_expand_seq(u64 old_seq, u64 cur_seq)
static void __mptcp_snd_una_update(struct mptcp_sock *msk, u64 new_snd_una)
{
msk->bytes_acked += new_snd_una - msk->snd_una;
- msk->snd_una = new_snd_una;
+ WRITE_ONCE(msk->snd_una, new_snd_una);
}
static void ack_update_msk(struct mptcp_sock *msk,
@@ -1058,21 +1064,22 @@ static void ack_update_msk(struct mptcp_sock *msk,
new_wnd_end = new_snd_una + tcp_sk(ssk)->snd_wnd;
if (after64(new_wnd_end, msk->wnd_end))
- msk->wnd_end = new_wnd_end;
+ WRITE_ONCE(msk->wnd_end, new_wnd_end);
/* this assumes mptcp_incoming_options() is invoked after tcp_ack() */
- if (after64(msk->wnd_end, READ_ONCE(msk->snd_nxt)))
+ if (after64(msk->wnd_end, snd_nxt))
__mptcp_check_push(sk, ssk);
if (after64(new_snd_una, old_snd_una)) {
__mptcp_snd_una_update(msk, new_snd_una);
__mptcp_data_acked(sk);
}
+ msk->last_ack_recv = tcp_jiffies32;
mptcp_data_unlock(sk);
trace_ack_update_msk(mp_opt->data_ack,
old_snd_una, new_snd_una,
- new_wnd_end, msk->wnd_end);
+ new_wnd_end, READ_ONCE(msk->wnd_end));
}
bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq, bool use_64bit)
@@ -1100,8 +1107,8 @@ static bool add_addr_hmac_valid(struct mptcp_sock *msk,
if (mp_opt->echo)
return true;
- hmac = add_addr_generate_hmac(msk->remote_key,
- msk->local_key,
+ hmac = add_addr_generate_hmac(READ_ONCE(msk->remote_key),
+ READ_ONCE(msk->local_key),
&mp_opt->addr);
pr_debug("msk=%p, ahmac=%llu, mp_opt->ahmac=%llu\n",
@@ -1148,7 +1155,7 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
if (unlikely(mp_opt.suboptions != OPTION_MPTCP_DSS)) {
if ((mp_opt.suboptions & OPTION_MPTCP_FASTCLOSE) &&
- msk->local_key == mp_opt.rcvr_key) {
+ READ_ONCE(msk->local_key) == mp_opt.rcvr_key) {
WRITE_ONCE(msk->rcv_fastclose, true);
mptcp_schedule_work((struct sock *)msk);
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFASTCLOSERX);
@@ -1279,7 +1286,7 @@ static void mptcp_set_rwin(struct tcp_sock *tp, struct tcphdr *th)
}
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_RCVWNDCONFLICT);
}
- return;
+ goto update_wspace;
}
if (rcv_wnd_new != rcv_wnd_old) {
@@ -1304,6 +1311,9 @@ raise_win:
th->window = htons(new_win);
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_RCVWNDSHARED);
}
+
+update_wspace:
+ WRITE_ONCE(msk->old_wspace, tp->rcv_wnd);
}
__sum16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum)
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
index 4ae19113b8eb..16c336c51940 100644
--- a/net/mptcp/pm.c
+++ b/net/mptcp/pm.c
@@ -6,7 +6,6 @@
#define pr_fmt(fmt) "MPTCP: " fmt
#include <linux/kernel.h>
-#include <net/tcp.h>
#include <net/mptcp.h>
#include "protocol.h"
@@ -20,7 +19,7 @@ int mptcp_pm_announce_addr(struct mptcp_sock *msk,
{
u8 add_addr = READ_ONCE(msk->pm.addr_signal);
- pr_debug("msk=%p, local_id=%d, echo=%d", msk, addr->id, echo);
+ pr_debug("msk=%p, local_id=%d, echo=%d\n", msk, addr->id, echo);
lockdep_assert_held(&msk->pm.lock);
@@ -46,7 +45,7 @@ int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_
{
u8 rm_addr = READ_ONCE(msk->pm.addr_signal);
- pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr);
+ pr_debug("msk=%p, rm_list_nr=%d\n", msk, rm_list->nr);
if (rm_addr) {
MPTCP_ADD_STATS(sock_net((struct sock *)msk),
@@ -61,23 +60,13 @@ int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_
return 0;
}
-int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list)
-{
- pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr);
-
- spin_lock_bh(&msk->pm.lock);
- mptcp_pm_nl_rm_subflow_received(msk, rm_list);
- spin_unlock_bh(&msk->pm.lock);
- return 0;
-}
-
/* path manager event handlers */
void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side)
{
struct mptcp_pm_data *pm = &msk->pm;
- pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side);
+ pr_debug("msk=%p, token=%u side=%d\n", msk, READ_ONCE(msk->token), server_side);
WRITE_ONCE(pm->server_side, server_side);
mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC);
@@ -101,7 +90,7 @@ bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
subflows_max = mptcp_pm_get_subflows_max(msk);
- pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows,
+ pr_debug("msk=%p subflows=%d max=%d allow=%d\n", msk, pm->subflows,
subflows_max, READ_ONCE(pm->accept_subflow));
/* try to avoid acquiring the lock below */
@@ -125,7 +114,7 @@ bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
enum mptcp_pm_status new_status)
{
- pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status,
+ pr_debug("msk=%p status=%x new=%lx\n", msk, msk->pm.status,
BIT(new_status));
if (msk->pm.status & BIT(new_status))
return false;
@@ -140,7 +129,7 @@ void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk)
struct mptcp_pm_data *pm = &msk->pm;
bool announce = false;
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
spin_lock_bh(&pm->lock);
@@ -164,14 +153,17 @@ void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk)
void mptcp_pm_connection_closed(struct mptcp_sock *msk)
{
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
+
+ if (msk->token)
+ mptcp_event(MPTCP_EVENT_CLOSED, msk, NULL, GFP_KERNEL);
}
void mptcp_pm_subflow_established(struct mptcp_sock *msk)
{
struct mptcp_pm_data *pm = &msk->pm;
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
if (!READ_ONCE(pm->work_pending))
return;
@@ -223,7 +215,7 @@ void mptcp_pm_add_addr_received(const struct sock *ssk,
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
struct mptcp_pm_data *pm = &msk->pm;
- pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id,
+ pr_debug("msk=%p remote_id=%d accept=%d\n", msk, addr->id,
READ_ONCE(pm->accept_addr));
mptcp_event_addr_announced(ssk, addr);
@@ -237,7 +229,9 @@ void mptcp_pm_add_addr_received(const struct sock *ssk,
} else {
__MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP);
}
- } else if (!READ_ONCE(pm->accept_addr)) {
+ /* id0 should not have a different address */
+ } else if ((addr->id == 0 && !mptcp_pm_nl_is_init_remote_addr(msk, addr)) ||
+ (addr->id > 0 && !READ_ONCE(pm->accept_addr))) {
mptcp_pm_announce_addr(msk, addr, true);
mptcp_pm_add_addr_send_ack(msk);
} else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
@@ -254,7 +248,7 @@ void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk,
{
struct mptcp_pm_data *pm = &msk->pm;
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
spin_lock_bh(&pm->lock);
@@ -278,7 +272,7 @@ void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
struct mptcp_pm_data *pm = &msk->pm;
u8 i;
- pr_debug("msk=%p remote_ids_nr=%d", msk, rm_list->nr);
+ pr_debug("msk=%p remote_ids_nr=%d\n", msk, rm_list->nr);
for (i = 0; i < rm_list->nr; i++)
mptcp_event_addr_removed(msk, rm_list->ids[i]);
@@ -310,19 +304,19 @@ void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
- pr_debug("fail_seq=%llu", fail_seq);
+ pr_debug("fail_seq=%llu\n", fail_seq);
if (!READ_ONCE(msk->allow_infinite_fallback))
return;
if (!subflow->fail_tout) {
- pr_debug("send MP_FAIL response and infinite map");
+ pr_debug("send MP_FAIL response and infinite map\n");
subflow->send_mp_fail = 1;
subflow->send_infinite_map = 1;
tcp_send_ack(sk);
} else {
- pr_debug("MP_FAIL response received");
+ pr_debug("MP_FAIL response received\n");
WRITE_ONCE(subflow->fail_tout, 0);
}
}
@@ -427,27 +421,39 @@ int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
return mptcp_pm_nl_get_local_id(msk, &skc_local);
}
-int mptcp_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id,
- u8 *flags, int *ifindex)
+bool mptcp_pm_is_backup(struct mptcp_sock *msk, struct sock_common *skc)
{
- *flags = 0;
- *ifindex = 0;
+ struct mptcp_addr_info skc_local;
- if (!id)
- return 0;
+ mptcp_local_address((struct sock_common *)skc, &skc_local);
if (mptcp_pm_is_userspace(msk))
- return mptcp_userspace_pm_get_flags_and_ifindex_by_id(msk, id, flags, ifindex);
- return mptcp_pm_nl_get_flags_and_ifindex_by_id(msk, id, flags, ifindex);
+ return mptcp_userspace_pm_is_backup(msk, &skc_local);
+
+ return mptcp_pm_nl_is_backup(msk, &skc_local);
+}
+
+int mptcp_pm_get_addr(struct sk_buff *skb, struct genl_info *info)
+{
+ if (info->attrs[MPTCP_PM_ATTR_TOKEN])
+ return mptcp_userspace_pm_get_addr(skb, info);
+ return mptcp_pm_nl_get_addr(skb, info);
+}
+
+int mptcp_pm_dump_addr(struct sk_buff *msg, struct netlink_callback *cb)
+{
+ const struct genl_info *info = genl_info_dump(cb);
+
+ if (info->attrs[MPTCP_PM_ATTR_TOKEN])
+ return mptcp_userspace_pm_dump_addr(msg, cb);
+ return mptcp_pm_nl_dump_addr(msg, cb);
}
-int mptcp_pm_set_flags(struct net *net, struct nlattr *token,
- struct mptcp_pm_addr_entry *loc,
- struct mptcp_pm_addr_entry *rem, u8 bkup)
+int mptcp_pm_set_flags(struct sk_buff *skb, struct genl_info *info)
{
- if (token)
- return mptcp_userspace_pm_set_flags(net, token, loc, rem, bkup);
- return mptcp_pm_nl_set_flags(net, loc, bkup);
+ if (info->attrs[MPTCP_PM_ATTR_TOKEN])
+ return mptcp_userspace_pm_set_flags(skb, info);
+ return mptcp_pm_nl_set_flags(skb, info);
}
void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk)
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 58d17d9604e7..7868207c4e9d 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -8,18 +8,13 @@
#include <linux/inet.h>
#include <linux/kernel.h>
-#include <net/tcp.h>
#include <net/inet_common.h>
#include <net/netns/generic.h>
#include <net/mptcp.h>
-#include <net/genetlink.h>
-#include <uapi/linux/mptcp.h>
#include "protocol.h"
#include "mib.h"
-
-/* forward declaration */
-static struct genl_family mptcp_genl_family;
+#include "mptcp_pm_gen.h"
static int pm_nl_pernet_id;
@@ -112,8 +107,8 @@ static void remote_address(const struct sock_common *skc,
#endif
}
-static bool lookup_subflow_by_saddr(const struct list_head *list,
- const struct mptcp_addr_info *saddr)
+bool mptcp_lookup_subflow_by_saddr(const struct list_head *list,
+ const struct mptcp_addr_info *saddr)
{
struct mptcp_subflow_context *subflow;
struct mptcp_addr_info cur;
@@ -135,12 +130,15 @@ static bool lookup_subflow_by_daddr(const struct list_head *list,
{
struct mptcp_subflow_context *subflow;
struct mptcp_addr_info cur;
- struct sock_common *skc;
list_for_each_entry(subflow, list, node) {
- skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow);
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+
+ if (!((1 << inet_sk_state_load(ssk)) &
+ (TCPF_ESTABLISHED | TCPF_SYN_SENT | TCPF_SYN_RECV)))
+ continue;
- remote_address(skc, &cur);
+ remote_address((struct sock_common *)ssk, &cur);
if (mptcp_addresses_equal(&cur, daddr, daddr->port))
return true;
}
@@ -148,11 +146,13 @@ static bool lookup_subflow_by_daddr(const struct list_head *list,
return false;
}
-static struct mptcp_pm_addr_entry *
+static bool
select_local_address(const struct pm_nl_pernet *pernet,
- const struct mptcp_sock *msk)
+ const struct mptcp_sock *msk,
+ struct mptcp_pm_local *new_local)
{
- struct mptcp_pm_addr_entry *entry, *ret = NULL;
+ struct mptcp_pm_addr_entry *entry;
+ bool found = false;
msk_owned_by_me(msk);
@@ -164,17 +164,23 @@ select_local_address(const struct pm_nl_pernet *pernet,
if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap))
continue;
- ret = entry;
+ new_local->addr = entry->addr;
+ new_local->flags = entry->flags;
+ new_local->ifindex = entry->ifindex;
+ found = true;
break;
}
rcu_read_unlock();
- return ret;
+
+ return found;
}
-static struct mptcp_pm_addr_entry *
-select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk)
+static bool
+select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk,
+ struct mptcp_pm_local *new_local)
{
- struct mptcp_pm_addr_entry *entry, *ret = NULL;
+ struct mptcp_pm_addr_entry *entry;
+ bool found = false;
rcu_read_lock();
/* do not keep any additional per socket state, just signal
@@ -189,11 +195,15 @@ select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk)
if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL))
continue;
- ret = entry;
+ new_local->addr = entry->addr;
+ new_local->flags = entry->flags;
+ new_local->ifindex = entry->ifindex;
+ found = true;
break;
}
rcu_read_unlock();
- return ret;
+
+ return found;
}
unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk)
@@ -284,7 +294,7 @@ static void mptcp_pm_add_timer(struct timer_list *timer)
struct mptcp_sock *msk = entry->sock;
struct sock *sk = (struct sock *)msk;
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
if (!msk)
return;
@@ -303,7 +313,7 @@ static void mptcp_pm_add_timer(struct timer_list *timer)
spin_lock_bh(&msk->pm.lock);
if (!mptcp_pm_should_add_signal_addr(msk)) {
- pr_debug("retransmit ADD_ADDR id=%d", entry->addr.id);
+ pr_debug("retransmit ADD_ADDR id=%d\n", entry->addr.id);
mptcp_pm_announce_addr(msk, &entry->addr, false);
mptcp_pm_add_addr_send_ack(msk);
entry->retrans_times++;
@@ -328,15 +338,21 @@ mptcp_pm_del_add_timer(struct mptcp_sock *msk,
{
struct mptcp_pm_add_entry *entry;
struct sock *sk = (struct sock *)msk;
+ struct timer_list *add_timer = NULL;
spin_lock_bh(&msk->pm.lock);
entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
- if (entry && (!check_id || entry->addr.id == addr->id))
+ if (entry && (!check_id || entry->addr.id == addr->id)) {
entry->retrans_times = ADD_ADDR_RETRANS_MAX;
+ add_timer = &entry->add_timer;
+ }
+ if (!check_id && entry)
+ list_del(&entry->list);
spin_unlock_bh(&msk->pm.lock);
- if (entry && (!check_id || entry->addr.id == addr->id))
- sk_stop_timer_sync(sk, &entry->add_timer);
+ /* no lock, because sk_stop_timer_sync() is calling del_timer_sync() */
+ if (add_timer)
+ sk_stop_timer_sync(sk, add_timer);
return entry;
}
@@ -353,7 +369,7 @@ bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
add_entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
if (add_entry) {
- if (mptcp_pm_is_kernel(msk))
+ if (WARN_ON_ONCE(mptcp_pm_is_kernel(msk)))
return false;
sk_reset_timer(sk, &add_entry->add_timer,
@@ -384,7 +400,7 @@ void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
struct sock *sk = (struct sock *)msk;
LIST_HEAD(free_list);
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
spin_lock_bh(&msk->pm.lock);
list_splice_init(&msk->pm.anno_list, &free_list);
@@ -470,13 +486,12 @@ static void __mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_con
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
bool slow;
- pr_debug("send ack for %s",
+ pr_debug("send ack for %s\n",
prio ? "mp_prio" : (mptcp_pm_should_add_signal(msk) ? "add_addr" : "rm_addr"));
slow = lock_sock_fast(ssk);
if (prio) {
subflow->send_mp_prio = 1;
- subflow->backup = backup;
subflow->request_bkup = backup;
}
@@ -497,7 +512,8 @@ __lookup_addr_by_id(struct pm_nl_pernet *pernet, unsigned int id)
{
struct mptcp_pm_addr_entry *entry;
- list_for_each_entry(entry, &pernet->local_addr_list, list) {
+ list_for_each_entry_rcu(entry, &pernet->local_addr_list, list,
+ lockdep_is_held(&pernet->lock)) {
if (entry->addr.id == id)
return entry;
}
@@ -505,15 +521,13 @@ __lookup_addr_by_id(struct pm_nl_pernet *pernet, unsigned int id)
}
static struct mptcp_pm_addr_entry *
-__lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info,
- bool lookup_by_id)
+__lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info)
{
struct mptcp_pm_addr_entry *entry;
- list_for_each_entry(entry, &pernet->local_addr_list, list) {
- if ((!lookup_by_id &&
- mptcp_addresses_equal(&entry->addr, info, entry->addr.port)) ||
- (lookup_by_id && entry->addr.id == info->id))
+ list_for_each_entry_rcu(entry, &pernet->local_addr_list, list,
+ lockdep_is_held(&pernet->lock)) {
+ if (mptcp_addresses_equal(&entry->addr, info, entry->addr.port))
return entry;
}
return NULL;
@@ -522,10 +536,11 @@ __lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info,
static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
{
struct sock *sk = (struct sock *)msk;
- struct mptcp_pm_addr_entry *local;
unsigned int add_addr_signal_max;
+ bool signal_and_subflow = false;
unsigned int local_addr_max;
struct pm_nl_pernet *pernet;
+ struct mptcp_pm_local local;
unsigned int subflows_max;
pernet = pm_nl_get_pernet(sock_net(sk));
@@ -543,7 +558,7 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
mptcp_local_address((struct sock_common *)msk->first, &mpc_addr);
rcu_read_lock();
- entry = __lookup_addr(pernet, &mpc_addr, false);
+ entry = __lookup_addr(pernet, &mpc_addr);
if (entry) {
__clear_bit(entry->addr.id, msk->pm.id_avail_bitmap);
msk->mpc_endpoint_id = entry->addr.id;
@@ -564,8 +579,6 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
/* check first for announce */
if (msk->pm.add_addr_signaled < add_addr_signal_max) {
- local = select_signal_address(pernet, msk);
-
/* due to racing events on both ends we can reach here while
* previous add address is still running: if we invoke now
* mptcp_pm_announce_addr(), that will fail and the
@@ -576,16 +589,30 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
if (msk->pm.addr_signal & BIT(MPTCP_ADD_ADDR_SIGNAL))
return;
- if (local) {
- if (mptcp_pm_alloc_anno_list(msk, &local->addr)) {
- __clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
- msk->pm.add_addr_signaled++;
- mptcp_pm_announce_addr(msk, &local->addr, false);
- mptcp_pm_nl_addr_send_ack(msk);
- }
- }
+ if (!select_signal_address(pernet, msk, &local))
+ goto subflow;
+
+ /* If the alloc fails, we are on memory pressure, not worth
+ * continuing, and trying to create subflows.
+ */
+ if (!mptcp_pm_alloc_anno_list(msk, &local.addr))
+ return;
+
+ __clear_bit(local.addr.id, msk->pm.id_avail_bitmap);
+ msk->pm.add_addr_signaled++;
+
+ /* Special case for ID0: set the correct ID */
+ if (local.addr.id == msk->mpc_endpoint_id)
+ local.addr.id = 0;
+
+ mptcp_pm_announce_addr(msk, &local.addr, false);
+ mptcp_pm_nl_addr_send_ack(msk);
+
+ if (local.flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)
+ signal_and_subflow = true;
}
+subflow:
/* check if should create a new subflow */
while (msk->pm.local_addr_used < local_addr_max &&
msk->pm.subflows < subflows_max) {
@@ -593,21 +620,28 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
bool fullmesh;
int i, nr;
- local = select_local_address(pernet, msk);
- if (!local)
+ if (signal_and_subflow)
+ signal_and_subflow = false;
+ else if (!select_local_address(pernet, msk, &local))
break;
- fullmesh = !!(local->flags & MPTCP_PM_ADDR_FLAG_FULLMESH);
+ fullmesh = !!(local.flags & MPTCP_PM_ADDR_FLAG_FULLMESH);
+
+ __clear_bit(local.addr.id, msk->pm.id_avail_bitmap);
+
+ /* Special case for ID0: set the correct ID */
+ if (local.addr.id == msk->mpc_endpoint_id)
+ local.addr.id = 0;
+ else /* local_addr_used is not decr for ID 0 */
+ msk->pm.local_addr_used++;
- msk->pm.local_addr_used++;
- __clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
- nr = fill_remote_addresses_vec(msk, &local->addr, fullmesh, addrs);
+ nr = fill_remote_addresses_vec(msk, &local.addr, fullmesh, addrs);
if (nr == 0)
continue;
spin_unlock_bh(&msk->pm.lock);
for (i = 0; i < nr; i++)
- __mptcp_subflow_connect(sk, &local->addr, &addrs[i]);
+ __mptcp_subflow_connect(sk, &local, &addrs[i]);
spin_lock_bh(&msk->pm.lock);
}
mptcp_pm_nl_check_work_pending(msk);
@@ -628,10 +662,11 @@ static void mptcp_pm_nl_subflow_established(struct mptcp_sock *msk)
*/
static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
struct mptcp_addr_info *remote,
- struct mptcp_addr_info *addrs)
+ struct mptcp_pm_local *locals)
{
struct sock *sk = (struct sock *)msk;
struct mptcp_pm_addr_entry *entry;
+ struct mptcp_addr_info mpc_addr;
struct pm_nl_pernet *pernet;
unsigned int subflows_max;
int i = 0;
@@ -639,6 +674,8 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
pernet = pm_nl_get_pernet_from_msk(msk);
subflows_max = mptcp_pm_get_subflows_max(msk);
+ mptcp_local_address((struct sock_common *)msk, &mpc_addr);
+
rcu_read_lock();
list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
if (!(entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH))
@@ -648,8 +685,16 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
continue;
if (msk->pm.subflows < subflows_max) {
+ locals[i].addr = entry->addr;
+ locals[i].flags = entry->flags;
+ locals[i].ifindex = entry->ifindex;
+
+ /* Special case for ID0: set the correct ID */
+ if (mptcp_addresses_equal(&locals[i].addr, &mpc_addr, locals[i].addr.port))
+ locals[i].addr.id = 0;
+
msk->pm.subflows++;
- addrs[i++] = entry->addr;
+ i++;
}
}
rcu_read_unlock();
@@ -658,21 +703,19 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
* 'IPADDRANY' local address
*/
if (!i) {
- struct mptcp_addr_info local;
-
- memset(&local, 0, sizeof(local));
- local.family =
+ memset(&locals[i], 0, sizeof(locals[i]));
+ locals[i].addr.family =
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
remote->family == AF_INET6 &&
ipv6_addr_v4mapped(&remote->addr6) ? AF_INET :
#endif
remote->family;
- if (!mptcp_pm_addr_families_match(sk, &local, remote))
+ if (!mptcp_pm_addr_families_match(sk, &locals[i].addr, remote))
return 0;
msk->pm.subflows++;
- addrs[i++] = local;
+ i++;
}
return i;
@@ -680,17 +723,18 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
{
- struct mptcp_addr_info addrs[MPTCP_PM_ADDR_MAX];
+ struct mptcp_pm_local locals[MPTCP_PM_ADDR_MAX];
struct sock *sk = (struct sock *)msk;
unsigned int add_addr_accept_max;
struct mptcp_addr_info remote;
unsigned int subflows_max;
+ bool sf_created = false;
int i, nr;
add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk);
subflows_max = mptcp_pm_get_subflows_max(msk);
- pr_debug("accepted %d:%d remote family %d",
+ pr_debug("accepted %d:%d remote family %d\n",
msk->pm.add_addr_accepted, add_addr_accept_max,
msk->pm.remote.family);
@@ -708,24 +752,38 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
/* connect to the specified remote address, using whatever
* local address the routing configuration will pick.
*/
- nr = fill_local_addresses_vec(msk, &remote, addrs);
+ nr = fill_local_addresses_vec(msk, &remote, locals);
if (nr == 0)
return;
- msk->pm.add_addr_accepted++;
- if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
- msk->pm.subflows >= subflows_max)
- WRITE_ONCE(msk->pm.accept_addr, false);
-
spin_unlock_bh(&msk->pm.lock);
for (i = 0; i < nr; i++)
- __mptcp_subflow_connect(sk, &addrs[i], &remote);
+ if (__mptcp_subflow_connect(sk, &locals[i], &remote) == 0)
+ sf_created = true;
spin_lock_bh(&msk->pm.lock);
+
+ if (sf_created) {
+ /* add_addr_accepted is not decr for ID 0 */
+ if (remote.id)
+ msk->pm.add_addr_accepted++;
+ if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
+ msk->pm.subflows >= subflows_max)
+ WRITE_ONCE(msk->pm.accept_addr, false);
+ }
+}
+
+bool mptcp_pm_nl_is_init_remote_addr(struct mptcp_sock *msk,
+ const struct mptcp_addr_info *remote)
+{
+ struct mptcp_addr_info mpc_remote;
+
+ remote_address((struct sock_common *)msk, &mpc_remote);
+ return mptcp_addresses_equal(&mpc_remote, remote, remote->port);
}
void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
{
- struct mptcp_subflow_context *subflow;
+ struct mptcp_subflow_context *subflow, *alt = NULL;
msk_owned_by_me(msk);
lockdep_assert_held(&msk->pm.lock);
@@ -734,9 +792,20 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
!mptcp_pm_should_rm_signal(msk))
return;
- subflow = list_first_entry_or_null(&msk->conn_list, typeof(*subflow), node);
- if (subflow)
- mptcp_pm_send_ack(msk, subflow, false, false);
+ mptcp_for_each_subflow(msk, subflow) {
+ if (__mptcp_subflow_active(subflow)) {
+ if (!subflow->stale) {
+ mptcp_pm_send_ack(msk, subflow, false, false);
+ return;
+ }
+
+ if (!alt)
+ alt = subflow;
+ }
+ }
+
+ if (alt)
+ mptcp_pm_send_ack(msk, alt, false, false);
}
int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
@@ -746,7 +815,7 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
{
struct mptcp_subflow_context *subflow;
- pr_debug("bkup=%d", bkup);
+ pr_debug("bkup=%d\n", bkup);
mptcp_for_each_subflow(msk, subflow) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
@@ -769,11 +838,6 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
return -EINVAL;
}
-static bool mptcp_local_id_match(const struct mptcp_sock *msk, u8 local_id, u8 id)
-{
- return local_id == id || (!local_id && msk->mpc_endpoint_id == id);
-}
-
static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
const struct mptcp_rm_list *rm_list,
enum linux_mptcp_mib_field rm_type)
@@ -782,7 +846,7 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
struct sock *sk = (struct sock *)msk;
u8 i;
- pr_debug("%s rm_list_nr %d",
+ pr_debug("%s rm_list_nr %d\n",
rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", rm_list->nr);
msk_owned_by_me(msk);
@@ -806,37 +870,45 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
int how = RCV_SHUTDOWN | SEND_SHUTDOWN;
u8 id = subflow_get_local_id(subflow);
+ if ((1 << inet_sk_state_load(ssk)) &
+ (TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | TCPF_CLOSING | TCPF_CLOSE))
+ continue;
if (rm_type == MPTCP_MIB_RMADDR && remote_id != rm_id)
continue;
- if (rm_type == MPTCP_MIB_RMSUBFLOW && !mptcp_local_id_match(msk, id, rm_id))
+ if (rm_type == MPTCP_MIB_RMSUBFLOW && id != rm_id)
continue;
- pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u",
+ pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u\n",
rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow",
i, rm_id, id, remote_id, msk->mpc_endpoint_id);
spin_unlock_bh(&msk->pm.lock);
mptcp_subflow_shutdown(sk, ssk, how);
+ removed |= subflow->request_join;
/* the following takes care of updating the subflows counter */
mptcp_close_ssk(sk, ssk, subflow);
spin_lock_bh(&msk->pm.lock);
- removed = true;
- __MPTCP_INC_STATS(sock_net(sk), rm_type);
+ if (rm_type == MPTCP_MIB_RMSUBFLOW)
+ __MPTCP_INC_STATS(sock_net(sk), rm_type);
}
- if (rm_type == MPTCP_MIB_RMSUBFLOW)
- __set_bit(rm_id ? rm_id : msk->mpc_endpoint_id, msk->pm.id_avail_bitmap);
+
+ if (rm_type == MPTCP_MIB_RMADDR)
+ __MPTCP_INC_STATS(sock_net(sk), rm_type);
+
if (!removed)
continue;
if (!mptcp_pm_is_kernel(msk))
continue;
- if (rm_type == MPTCP_MIB_RMADDR) {
- msk->pm.add_addr_accepted--;
- WRITE_ONCE(msk->pm.accept_addr, true);
- } else if (rm_type == MPTCP_MIB_RMSUBFLOW) {
- msk->pm.local_addr_used--;
+ if (rm_type == MPTCP_MIB_RMADDR && rm_id &&
+ !WARN_ON_ONCE(msk->pm.add_addr_accepted == 0)) {
+ /* Note: if the subflow has been closed before, this
+ * add_addr_accepted counter will not be decremented.
+ */
+ if (--msk->pm.add_addr_accepted < mptcp_pm_get_add_addr_accept_max(msk))
+ WRITE_ONCE(msk->pm.accept_addr, true);
}
}
}
@@ -846,8 +918,8 @@ static void mptcp_pm_nl_rm_addr_received(struct mptcp_sock *msk)
mptcp_pm_nl_rm_addr_or_subflow(msk, &msk->pm.rm_list_rx, MPTCP_MIB_RMADDR);
}
-void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk,
- const struct mptcp_rm_list *rm_list)
+static void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk,
+ const struct mptcp_rm_list *rm_list)
{
mptcp_pm_nl_rm_addr_or_subflow(msk, rm_list, MPTCP_MIB_RMSUBFLOW);
}
@@ -863,7 +935,7 @@ void mptcp_pm_nl_work(struct mptcp_sock *msk)
spin_lock_bh(&msk->pm.lock);
- pr_debug("msk=%p status=%x", msk, pm->status);
+ pr_debug("msk=%p status=%x\n", msk, pm->status);
if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) {
pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
mptcp_pm_nl_add_addr_received(msk);
@@ -905,7 +977,7 @@ static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry)
static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
struct mptcp_pm_addr_entry *entry,
- bool needs_id)
+ bool needs_id, bool replace)
{
struct mptcp_pm_addr_entry *cur, *del_entry = NULL;
unsigned int addr_max;
@@ -945,6 +1017,17 @@ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
if (entry->addr.id)
goto out;
+ /* allow callers that only need to look up the local
+ * addr's id to skip replacement. This allows them to
+ * avoid calling synchronize_rcu in the packet recv
+ * path.
+ */
+ if (!replace) {
+ kfree(entry);
+ ret = cur->addr.id;
+ goto out;
+ }
+
pernet->addrs--;
entry->addr.id = cur->addr.id;
list_del_rcu(&cur->list);
@@ -1059,6 +1142,7 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
*/
inet_sk_state_store(newsk, TCP_LISTEN);
lock_sock(ssk);
+ WRITE_ONCE(mptcp_subflow_ctx(ssk)->pm_listener, true);
err = __inet_listen_sk(ssk, backlog);
if (!err)
mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CREATED);
@@ -1070,17 +1154,13 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc
{
struct mptcp_pm_addr_entry *entry;
struct pm_nl_pernet *pernet;
- int ret = -1;
+ int ret;
pernet = pm_nl_get_pernet_from_msk(msk);
rcu_read_lock();
- list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
- if (mptcp_addresses_equal(&entry->addr, skc, entry->addr.port)) {
- ret = entry->addr.id;
- break;
- }
- }
+ entry = __lookup_addr(pernet, skc);
+ ret = entry ? entry->addr.id : -1;
rcu_read_unlock();
if (ret >= 0)
return ret;
@@ -1096,13 +1176,27 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc
entry->ifindex = 0;
entry->flags = MPTCP_PM_ADDR_FLAG_IMPLICIT;
entry->lsk = NULL;
- ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true);
+ ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true, false);
if (ret < 0)
kfree(entry);
return ret;
}
+bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc)
+{
+ struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
+ struct mptcp_pm_addr_entry *entry;
+ bool backup;
+
+ rcu_read_lock();
+ entry = __lookup_addr(pernet, skc);
+ backup = entry && !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
+ rcu_read_unlock();
+
+ return backup;
+}
+
#define MPTCP_PM_CMD_GRP_OFFSET 0
#define MPTCP_PM_EV_GRP_OFFSET 1
@@ -1263,20 +1357,27 @@ static struct pm_nl_pernet *genl_info_pm_nl(struct genl_info *info)
return pm_nl_get_pernet(genl_info_net(info));
}
-static int mptcp_nl_add_subflow_or_signal_addr(struct net *net)
+static int mptcp_nl_add_subflow_or_signal_addr(struct net *net,
+ struct mptcp_addr_info *addr)
{
struct mptcp_sock *msk;
long s_slot = 0, s_num = 0;
while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
struct sock *sk = (struct sock *)msk;
+ struct mptcp_addr_info mpc_addr;
if (!READ_ONCE(msk->fully_established) ||
mptcp_pm_is_userspace(msk))
goto next;
+ /* if the endp linked to the init sf is re-added with a != ID */
+ mptcp_local_address((struct sock_common *)msk, &mpc_addr);
+
lock_sock(sk);
spin_lock_bh(&msk->pm.lock);
+ if (mptcp_addresses_equal(addr, &mpc_addr, addr->port))
+ msk->mpc_endpoint_id = addr->id;
mptcp_pm_create_subflow_or_signal_addr(msk);
spin_unlock_bh(&msk->pm.lock);
release_sock(sk);
@@ -1312,8 +1413,8 @@ int mptcp_pm_nl_add_addr_doit(struct sk_buff *skb, struct genl_info *info)
if (ret < 0)
return ret;
- if (addr.addr.port && !(addr.flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) {
- GENL_SET_ERR_MSG(info, "flags must have signal when using port");
+ if (addr.addr.port && !address_use_port(&addr)) {
+ GENL_SET_ERR_MSG(info, "flags must have signal and not subflow when using port");
return -EINVAL;
}
@@ -1343,13 +1444,14 @@ int mptcp_pm_nl_add_addr_doit(struct sk_buff *skb, struct genl_info *info)
}
}
ret = mptcp_pm_nl_append_new_local_addr(pernet, entry,
- !mptcp_pm_has_addr_attr_id(attr, info));
+ !mptcp_pm_has_addr_attr_id(attr, info),
+ true);
if (ret < 0) {
GENL_SET_ERR_MSG_FMT(info, "too many addresses or duplicate one: %d", ret);
goto out_free;
}
- mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk));
+ mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk), &entry->addr);
return 0;
out_free:
@@ -1357,32 +1459,13 @@ out_free:
return ret;
}
-int mptcp_pm_nl_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id,
- u8 *flags, int *ifindex)
-{
- struct mptcp_pm_addr_entry *entry;
- struct sock *sk = (struct sock *)msk;
- struct net *net = sock_net(sk);
-
- rcu_read_lock();
- entry = __lookup_addr_by_id(pm_nl_get_pernet(net), id);
- if (entry) {
- *flags = entry->flags;
- *ifindex = entry->ifindex;
- }
- rcu_read_unlock();
-
- return 0;
-}
-
-static bool remove_anno_list_by_saddr(struct mptcp_sock *msk,
- const struct mptcp_addr_info *addr)
+bool mptcp_remove_anno_list_by_saddr(struct mptcp_sock *msk,
+ const struct mptcp_addr_info *addr)
{
struct mptcp_pm_add_entry *entry;
entry = mptcp_pm_del_add_timer(msk, addr, false);
if (entry) {
- list_del(&entry->list);
kfree(entry);
return true;
}
@@ -1390,6 +1473,12 @@ static bool remove_anno_list_by_saddr(struct mptcp_sock *msk,
return false;
}
+static u8 mptcp_endp_get_local_id(struct mptcp_sock *msk,
+ const struct mptcp_addr_info *addr)
+{
+ return msk->mpc_endpoint_id == addr->id ? 0 : addr->id;
+}
+
static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
const struct mptcp_addr_info *addr,
bool force)
@@ -1397,28 +1486,38 @@ static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
struct mptcp_rm_list list = { .nr = 0 };
bool ret;
- list.ids[list.nr++] = addr->id;
+ list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr);
- ret = remove_anno_list_by_saddr(msk, addr);
+ ret = mptcp_remove_anno_list_by_saddr(msk, addr);
if (ret || force) {
spin_lock_bh(&msk->pm.lock);
+ if (ret) {
+ __set_bit(addr->id, msk->pm.id_avail_bitmap);
+ msk->pm.add_addr_signaled--;
+ }
mptcp_pm_remove_addr(msk, &list);
spin_unlock_bh(&msk->pm.lock);
}
return ret;
}
+static void __mark_subflow_endp_available(struct mptcp_sock *msk, u8 id)
+{
+ /* If it was marked as used, and not ID 0, decrement local_addr_used */
+ if (!__test_and_set_bit(id ? : msk->mpc_endpoint_id, msk->pm.id_avail_bitmap) &&
+ id && !WARN_ON_ONCE(msk->pm.local_addr_used == 0))
+ msk->pm.local_addr_used--;
+}
+
static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
const struct mptcp_pm_addr_entry *entry)
{
const struct mptcp_addr_info *addr = &entry->addr;
- struct mptcp_rm_list list = { .nr = 0 };
+ struct mptcp_rm_list list = { .nr = 1 };
long s_slot = 0, s_num = 0;
struct mptcp_sock *msk;
- pr_debug("remove_id=%d", addr->id);
-
- list.ids[list.nr++] = addr->id;
+ pr_debug("remove_id=%d\n", addr->id);
while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
struct sock *sk = (struct sock *)msk;
@@ -1427,17 +1526,26 @@ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
if (mptcp_pm_is_userspace(msk))
goto next;
- if (list_empty(&msk->conn_list)) {
- mptcp_pm_remove_anno_addr(msk, addr, false);
- goto next;
- }
-
lock_sock(sk);
- remove_subflow = lookup_subflow_by_saddr(&msk->conn_list, addr);
+ remove_subflow = mptcp_lookup_subflow_by_saddr(&msk->conn_list, addr);
mptcp_pm_remove_anno_addr(msk, addr, remove_subflow &&
!(entry->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT));
- if (remove_subflow)
- mptcp_pm_remove_subflow(msk, &list);
+
+ list.ids[0] = mptcp_endp_get_local_id(msk, addr);
+ if (remove_subflow) {
+ spin_lock_bh(&msk->pm.lock);
+ mptcp_pm_nl_rm_subflow_received(msk, &list);
+ spin_unlock_bh(&msk->pm.lock);
+ }
+
+ if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) {
+ spin_lock_bh(&msk->pm.lock);
+ __mark_subflow_endp_available(msk, list.ids[0]);
+ spin_unlock_bh(&msk->pm.lock);
+ }
+
+ if (msk->mpc_endpoint_id == entry->addr.id)
+ msk->mpc_endpoint_id = 0;
release_sock(sk);
next:
@@ -1472,6 +1580,7 @@ static int mptcp_nl_remove_id_zero_address(struct net *net,
spin_lock_bh(&msk->pm.lock);
mptcp_pm_remove_addr(msk, &list);
mptcp_pm_nl_rm_subflow_received(msk, &list);
+ __mark_subflow_endp_available(msk, 0);
spin_unlock_bh(&msk->pm.lock);
release_sock(sk);
@@ -1531,52 +1640,37 @@ int mptcp_pm_nl_del_addr_doit(struct sk_buff *skb, struct genl_info *info)
return ret;
}
-void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list)
-{
- struct mptcp_rm_list alist = { .nr = 0 };
- struct mptcp_pm_addr_entry *entry;
-
- list_for_each_entry(entry, rm_list, list) {
- if ((remove_anno_list_by_saddr(msk, &entry->addr) ||
- lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) &&
- alist.nr < MPTCP_RM_IDS_MAX)
- alist.ids[alist.nr++] = entry->addr.id;
- }
-
- if (alist.nr) {
- spin_lock_bh(&msk->pm.lock);
- mptcp_pm_remove_addr(msk, &alist);
- spin_unlock_bh(&msk->pm.lock);
- }
-}
-
-void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
- struct list_head *rm_list)
+static void mptcp_pm_flush_addrs_and_subflows(struct mptcp_sock *msk,
+ struct list_head *rm_list)
{
struct mptcp_rm_list alist = { .nr = 0 }, slist = { .nr = 0 };
struct mptcp_pm_addr_entry *entry;
list_for_each_entry(entry, rm_list, list) {
- if (lookup_subflow_by_saddr(&msk->conn_list, &entry->addr) &&
- slist.nr < MPTCP_RM_IDS_MAX)
- slist.ids[slist.nr++] = entry->addr.id;
+ if (slist.nr < MPTCP_RM_IDS_MAX &&
+ mptcp_lookup_subflow_by_saddr(&msk->conn_list, &entry->addr))
+ slist.ids[slist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr);
- if (remove_anno_list_by_saddr(msk, &entry->addr) &&
- alist.nr < MPTCP_RM_IDS_MAX)
- alist.ids[alist.nr++] = entry->addr.id;
+ if (alist.nr < MPTCP_RM_IDS_MAX &&
+ mptcp_remove_anno_list_by_saddr(msk, &entry->addr))
+ alist.ids[alist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr);
}
+ spin_lock_bh(&msk->pm.lock);
if (alist.nr) {
- spin_lock_bh(&msk->pm.lock);
+ msk->pm.add_addr_signaled -= alist.nr;
mptcp_pm_remove_addr(msk, &alist);
- spin_unlock_bh(&msk->pm.lock);
}
if (slist.nr)
- mptcp_pm_remove_subflow(msk, &slist);
+ mptcp_pm_nl_rm_subflow_received(msk, &slist);
+ /* Reset counters: maybe some subflows have been removed before */
+ bitmap_fill(msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
+ msk->pm.local_addr_used = 0;
+ spin_unlock_bh(&msk->pm.lock);
}
-static void mptcp_nl_remove_addrs_list(struct net *net,
- struct list_head *rm_list)
+static void mptcp_nl_flush_addrs_list(struct net *net,
+ struct list_head *rm_list)
{
long s_slot = 0, s_num = 0;
struct mptcp_sock *msk;
@@ -1589,7 +1683,7 @@ static void mptcp_nl_remove_addrs_list(struct net *net,
if (!mptcp_pm_is_userspace(msk)) {
lock_sock(sk);
- mptcp_pm_remove_addrs_and_subflows(msk, rm_list);
+ mptcp_pm_flush_addrs_and_subflows(msk, rm_list);
release_sock(sk);
}
@@ -1630,14 +1724,14 @@ int mptcp_pm_nl_flush_addrs_doit(struct sk_buff *skb, struct genl_info *info)
pernet->next_id = 1;
bitmap_zero(pernet->id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
spin_unlock_bh(&pernet->lock);
- mptcp_nl_remove_addrs_list(sock_net(skb->sk), &free_list);
+ mptcp_nl_flush_addrs_list(sock_net(skb->sk), &free_list);
synchronize_rcu();
__flush_addrs(&free_list);
return 0;
}
-static int mptcp_nl_fill_addr(struct sk_buff *skb,
- struct mptcp_pm_addr_entry *entry)
+int mptcp_nl_fill_addr(struct sk_buff *skb,
+ struct mptcp_pm_addr_entry *entry)
{
struct mptcp_addr_info *addr = &entry->addr;
struct nlattr *attr;
@@ -1675,7 +1769,7 @@ nla_put_failure:
return -EMSGSIZE;
}
-int mptcp_pm_nl_get_addr_doit(struct sk_buff *skb, struct genl_info *info)
+int mptcp_pm_nl_get_addr(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR];
struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
@@ -1700,7 +1794,7 @@ int mptcp_pm_nl_get_addr_doit(struct sk_buff *skb, struct genl_info *info)
goto fail;
}
- spin_lock_bh(&pernet->lock);
+ rcu_read_lock();
entry = __lookup_addr_by_id(pernet, addr.addr.id);
if (!entry) {
GENL_SET_ERR_MSG(info, "address not found");
@@ -1714,19 +1808,24 @@ int mptcp_pm_nl_get_addr_doit(struct sk_buff *skb, struct genl_info *info)
genlmsg_end(msg, reply);
ret = genlmsg_reply(msg, info);
- spin_unlock_bh(&pernet->lock);
+ rcu_read_unlock();
return ret;
unlock_fail:
- spin_unlock_bh(&pernet->lock);
+ rcu_read_unlock();
fail:
nlmsg_free(msg);
return ret;
}
-int mptcp_pm_nl_get_addr_dumpit(struct sk_buff *msg,
- struct netlink_callback *cb)
+int mptcp_pm_nl_get_addr_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ return mptcp_pm_get_addr(skb, info);
+}
+
+int mptcp_pm_nl_dump_addr(struct sk_buff *msg,
+ struct netlink_callback *cb)
{
struct net *net = sock_net(msg->sk);
struct mptcp_pm_addr_entry *entry;
@@ -1737,7 +1836,7 @@ int mptcp_pm_nl_get_addr_dumpit(struct sk_buff *msg,
pernet = pm_nl_get_pernet(net);
- spin_lock_bh(&pernet->lock);
+ rcu_read_lock();
for (i = id; i < MPTCP_PM_MAX_ADDR_ID + 1; i++) {
if (test_bit(i, pernet->id_bitmap)) {
entry = __lookup_addr_by_id(pernet, i);
@@ -1762,12 +1861,18 @@ int mptcp_pm_nl_get_addr_dumpit(struct sk_buff *msg,
genlmsg_end(msg, hdr);
}
}
- spin_unlock_bh(&pernet->lock);
+ rcu_read_unlock();
cb->args[0] = id;
return msg->len;
}
+int mptcp_pm_nl_get_addr_dumpit(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ return mptcp_pm_dump_addr(msg, cb);
+}
+
static int parse_limit(struct genl_info *info, int id, unsigned int *limit)
{
struct nlattr *attr = info->attrs[id];
@@ -1845,10 +1950,11 @@ static void mptcp_pm_nl_fullmesh(struct mptcp_sock *msk,
{
struct mptcp_rm_list list = { .nr = 0 };
- list.ids[list.nr++] = addr->id;
+ list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr);
spin_lock_bh(&msk->pm.lock);
mptcp_pm_nl_rm_subflow_received(msk, &list);
+ __mark_subflow_endp_available(msk, list.ids[0]);
mptcp_pm_create_subflow_or_signal_addr(msk);
spin_unlock_bh(&msk->pm.lock);
}
@@ -1882,66 +1988,64 @@ next:
return ret;
}
-int mptcp_pm_nl_set_flags(struct net *net, struct mptcp_pm_addr_entry *addr, u8 bkup)
+int mptcp_pm_nl_set_flags(struct sk_buff *skb, struct genl_info *info)
{
- struct pm_nl_pernet *pernet = pm_nl_get_pernet(net);
+ struct mptcp_pm_addr_entry addr = { .addr = { .family = AF_UNSPEC }, };
+ struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
u8 changed, mask = MPTCP_PM_ADDR_FLAG_BACKUP |
MPTCP_PM_ADDR_FLAG_FULLMESH;
+ struct net *net = sock_net(skb->sk);
struct mptcp_pm_addr_entry *entry;
+ struct pm_nl_pernet *pernet;
u8 lookup_by_id = 0;
+ u8 bkup = 0;
+ int ret;
+
+ pernet = pm_nl_get_pernet(net);
+
+ ret = mptcp_pm_parse_entry(attr, info, false, &addr);
+ if (ret < 0)
+ return ret;
- if (addr->addr.family == AF_UNSPEC) {
+ if (addr.addr.family == AF_UNSPEC) {
lookup_by_id = 1;
- if (!addr->addr.id)
+ if (!addr.addr.id) {
+ GENL_SET_ERR_MSG(info, "missing required inputs");
return -EOPNOTSUPP;
+ }
}
+ if (addr.flags & MPTCP_PM_ADDR_FLAG_BACKUP)
+ bkup = 1;
+
spin_lock_bh(&pernet->lock);
- entry = __lookup_addr(pernet, &addr->addr, lookup_by_id);
+ entry = lookup_by_id ? __lookup_addr_by_id(pernet, addr.addr.id) :
+ __lookup_addr(pernet, &addr.addr);
if (!entry) {
spin_unlock_bh(&pernet->lock);
+ GENL_SET_ERR_MSG(info, "address not found");
return -EINVAL;
}
- if ((addr->flags & MPTCP_PM_ADDR_FLAG_FULLMESH) &&
- (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) {
+ if ((addr.flags & MPTCP_PM_ADDR_FLAG_FULLMESH) &&
+ (entry->flags & (MPTCP_PM_ADDR_FLAG_SIGNAL |
+ MPTCP_PM_ADDR_FLAG_IMPLICIT))) {
spin_unlock_bh(&pernet->lock);
+ GENL_SET_ERR_MSG(info, "invalid addr flags");
return -EINVAL;
}
- changed = (addr->flags ^ entry->flags) & mask;
- entry->flags = (entry->flags & ~mask) | (addr->flags & mask);
- *addr = *entry;
+ changed = (addr.flags ^ entry->flags) & mask;
+ entry->flags = (entry->flags & ~mask) | (addr.flags & mask);
+ addr = *entry;
spin_unlock_bh(&pernet->lock);
- mptcp_nl_set_flags(net, &addr->addr, bkup, changed);
+ mptcp_nl_set_flags(net, &addr.addr, bkup, changed);
return 0;
}
int mptcp_pm_nl_set_flags_doit(struct sk_buff *skb, struct genl_info *info)
{
- struct mptcp_pm_addr_entry remote = { .addr = { .family = AF_UNSPEC }, };
- struct mptcp_pm_addr_entry addr = { .addr = { .family = AF_UNSPEC }, };
- struct nlattr *attr_rem = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE];
- struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
- struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
- struct net *net = sock_net(skb->sk);
- u8 bkup = 0;
- int ret;
-
- ret = mptcp_pm_parse_entry(attr, info, false, &addr);
- if (ret < 0)
- return ret;
-
- if (attr_rem) {
- ret = mptcp_pm_parse_entry(attr_rem, info, false, &remote);
- if (ret < 0)
- return ret;
- }
-
- if (addr.flags & MPTCP_PM_ADDR_FLAG_BACKUP)
- bkup = 1;
-
- return mptcp_pm_set_flags(net, token, &addr, &remote, bkup);
+ return mptcp_pm_set_flags(skb, info);
}
static void mptcp_nl_mcast_send(struct net *net, struct sk_buff *nlskb, gfp_t gfp)
@@ -2014,7 +2118,7 @@ static int mptcp_event_put_token_and_ssk(struct sk_buff *skb,
const struct mptcp_subflow_context *sf;
u8 sk_err;
- if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token))
+ if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)))
return -EMSGSIZE;
if (mptcp_event_add_subflow(skb, ssk))
@@ -2072,7 +2176,7 @@ static int mptcp_event_created(struct sk_buff *skb,
const struct mptcp_sock *msk,
const struct sock *ssk)
{
- int err = nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token);
+ int err = nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token));
if (err)
return err;
@@ -2100,7 +2204,7 @@ void mptcp_event_addr_removed(const struct mptcp_sock *msk, uint8_t id)
if (!nlh)
goto nla_put_failure;
- if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token))
+ if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)))
goto nla_put_failure;
if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, id))
@@ -2135,7 +2239,7 @@ void mptcp_event_addr_announced(const struct sock *ssk,
if (!nlh)
goto nla_put_failure;
- if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token))
+ if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)))
goto nla_put_failure;
if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, info->id))
@@ -2251,7 +2355,7 @@ void mptcp_event(enum mptcp_event_type type, const struct mptcp_sock *msk,
goto nla_put_failure;
break;
case MPTCP_EVENT_CLOSED:
- if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token) < 0)
+ if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)) < 0)
goto nla_put_failure;
break;
case MPTCP_EVENT_ANNOUNCED:
@@ -2281,7 +2385,7 @@ nla_put_failure:
nlmsg_free(skb);
}
-static struct genl_family mptcp_genl_family __ro_after_init = {
+struct genl_family mptcp_genl_family __ro_after_init = {
.name = MPTCP_PM_NAME,
.version = MPTCP_PM_VER,
.netnsok = true,
diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c
index bc97cc30f013..a3d477059b11 100644
--- a/net/mptcp/pm_userspace.c
+++ b/net/mptcp/pm_userspace.c
@@ -6,6 +6,11 @@
#include "protocol.h"
#include "mib.h"
+#include "mptcp_pm_gen.h"
+
+#define mptcp_for_each_userspace_pm_addr(__msk, __entry) \
+ list_for_each_entry(__entry, \
+ &((__msk)->pm.userspace_pm_local_addr_list), list)
void mptcp_free_local_addr_list(struct mptcp_sock *msk)
{
@@ -25,6 +30,19 @@ void mptcp_free_local_addr_list(struct mptcp_sock *msk)
}
}
+static struct mptcp_pm_addr_entry *
+mptcp_userspace_pm_lookup_addr(struct mptcp_sock *msk,
+ const struct mptcp_addr_info *addr)
+{
+ struct mptcp_pm_addr_entry *entry;
+
+ mptcp_for_each_userspace_pm_addr(msk, entry) {
+ if (mptcp_addresses_equal(&entry->addr, addr, false))
+ return entry;
+ }
+ return NULL;
+}
+
static int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
struct mptcp_pm_addr_entry *entry,
bool needs_id)
@@ -40,7 +58,7 @@ static int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
bitmap_zero(id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
spin_lock_bh(&msk->pm.lock);
- list_for_each_entry(e, &msk->pm.userspace_pm_local_addr_list, list) {
+ mptcp_for_each_userspace_pm_addr(msk, e) {
addr_match = mptcp_addresses_equal(&e->addr, &entry->addr, true);
if (addr_match && entry->addr.id == 0 && needs_id)
entry->addr.id = e->addr.id;
@@ -89,59 +107,43 @@ append_err:
static int mptcp_userspace_pm_delete_local_addr(struct mptcp_sock *msk,
struct mptcp_pm_addr_entry *addr)
{
- struct mptcp_pm_addr_entry *entry, *tmp;
+ struct sock *sk = (struct sock *)msk;
+ struct mptcp_pm_addr_entry *entry;
- list_for_each_entry_safe(entry, tmp, &msk->pm.userspace_pm_local_addr_list, list) {
- if (mptcp_addresses_equal(&entry->addr, &addr->addr, false)) {
- /* TODO: a refcount is needed because the entry can
- * be used multiple times (e.g. fullmesh mode).
- */
- list_del_rcu(&entry->list);
- kfree(entry);
- msk->pm.local_addr_used--;
- return 0;
- }
- }
+ entry = mptcp_userspace_pm_lookup_addr(msk, &addr->addr);
+ if (!entry)
+ return -EINVAL;
- return -EINVAL;
+ /* TODO: a refcount is needed because the entry can
+ * be used multiple times (e.g. fullmesh mode).
+ */
+ list_del_rcu(&entry->list);
+ sock_kfree_s(sk, entry, sizeof(*entry));
+ msk->pm.local_addr_used--;
+ return 0;
}
-int mptcp_userspace_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk,
- unsigned int id,
- u8 *flags, int *ifindex)
+static struct mptcp_pm_addr_entry *
+mptcp_userspace_pm_lookup_addr_by_id(struct mptcp_sock *msk, unsigned int id)
{
- struct mptcp_pm_addr_entry *entry, *match = NULL;
+ struct mptcp_pm_addr_entry *entry;
- spin_lock_bh(&msk->pm.lock);
- list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) {
- if (id == entry->addr.id) {
- match = entry;
- break;
- }
- }
- spin_unlock_bh(&msk->pm.lock);
- if (match) {
- *flags = match->flags;
- *ifindex = match->ifindex;
+ mptcp_for_each_userspace_pm_addr(msk, entry) {
+ if (entry->addr.id == id)
+ return entry;
}
-
- return 0;
+ return NULL;
}
int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk,
struct mptcp_addr_info *skc)
{
- struct mptcp_pm_addr_entry *entry = NULL, *e, new_entry;
+ struct mptcp_pm_addr_entry *entry = NULL, new_entry;
__be16 msk_sport = ((struct inet_sock *)
inet_sk((struct sock *)msk))->inet_sport;
spin_lock_bh(&msk->pm.lock);
- list_for_each_entry(e, &msk->pm.userspace_pm_local_addr_list, list) {
- if (mptcp_addresses_equal(&e->addr, skc, false)) {
- entry = e;
- break;
- }
- }
+ entry = mptcp_userspace_pm_lookup_addr(msk, skc);
spin_unlock_bh(&msk->pm.lock);
if (entry)
return entry->addr.id;
@@ -157,36 +159,64 @@ int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk,
return mptcp_userspace_pm_append_new_local_addr(msk, &new_entry, true);
}
-int mptcp_pm_nl_announce_doit(struct sk_buff *skb, struct genl_info *info)
+bool mptcp_userspace_pm_is_backup(struct mptcp_sock *msk,
+ struct mptcp_addr_info *skc)
+{
+ struct mptcp_pm_addr_entry *entry;
+ bool backup;
+
+ spin_lock_bh(&msk->pm.lock);
+ entry = mptcp_userspace_pm_lookup_addr(msk, skc);
+ backup = entry && !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
+ spin_unlock_bh(&msk->pm.lock);
+
+ return backup;
+}
+
+static struct mptcp_sock *mptcp_userspace_pm_get_sock(const struct genl_info *info)
{
struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
+ struct mptcp_sock *msk;
+
+ if (!token) {
+ GENL_SET_ERR_MSG(info, "missing required token");
+ return NULL;
+ }
+
+ msk = mptcp_token_get_sock(genl_info_net(info), nla_get_u32(token));
+ if (!msk) {
+ NL_SET_ERR_MSG_ATTR(info->extack, token, "invalid token");
+ return NULL;
+ }
+
+ if (!mptcp_pm_is_userspace(msk)) {
+ GENL_SET_ERR_MSG(info, "invalid request; userspace PM not selected");
+ sock_put((struct sock *)msk);
+ return NULL;
+ }
+
+ return msk;
+}
+
+int mptcp_pm_nl_announce_doit(struct sk_buff *skb, struct genl_info *info)
+{
struct nlattr *addr = info->attrs[MPTCP_PM_ATTR_ADDR];
struct mptcp_pm_addr_entry addr_val;
struct mptcp_sock *msk;
int err = -EINVAL;
struct sock *sk;
- u32 token_val;
- if (!addr || !token) {
- GENL_SET_ERR_MSG(info, "missing required inputs");
+ if (!addr) {
+ GENL_SET_ERR_MSG(info, "missing required address");
return err;
}
- token_val = nla_get_u32(token);
-
- msk = mptcp_token_get_sock(sock_net(skb->sk), token_val);
- if (!msk) {
- NL_SET_ERR_MSG_ATTR(info->extack, token, "invalid token");
+ msk = mptcp_userspace_pm_get_sock(info);
+ if (!msk)
return err;
- }
sk = (struct sock *)msk;
- if (!mptcp_pm_is_userspace(msk)) {
- GENL_SET_ERR_MSG(info, "invalid request; userspace PM not selected");
- goto announce_err;
- }
-
err = mptcp_pm_parse_entry(addr, info, true, &addr_val);
if (err < 0) {
GENL_SET_ERR_MSG(info, "error parsing local address");
@@ -257,40 +287,48 @@ remove_err:
return err;
}
+void mptcp_pm_remove_addr_entry(struct mptcp_sock *msk,
+ struct mptcp_pm_addr_entry *entry)
+{
+ struct mptcp_rm_list alist = { .nr = 0 };
+ int anno_nr = 0;
+
+ /* only delete if either announced or matching a subflow */
+ if (mptcp_remove_anno_list_by_saddr(msk, &entry->addr))
+ anno_nr++;
+ else if (!mptcp_lookup_subflow_by_saddr(&msk->conn_list, &entry->addr))
+ return;
+
+ alist.ids[alist.nr++] = entry->addr.id;
+
+ spin_lock_bh(&msk->pm.lock);
+ msk->pm.add_addr_signaled -= anno_nr;
+ mptcp_pm_remove_addr(msk, &alist);
+ spin_unlock_bh(&msk->pm.lock);
+}
+
int mptcp_pm_nl_remove_doit(struct sk_buff *skb, struct genl_info *info)
{
- struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
struct nlattr *id = info->attrs[MPTCP_PM_ATTR_LOC_ID];
- struct mptcp_pm_addr_entry *match = NULL;
- struct mptcp_pm_addr_entry *entry;
+ struct mptcp_pm_addr_entry *match;
struct mptcp_sock *msk;
- LIST_HEAD(free_list);
int err = -EINVAL;
struct sock *sk;
- u32 token_val;
u8 id_val;
- if (!id || !token) {
- GENL_SET_ERR_MSG(info, "missing required inputs");
+ if (!id) {
+ GENL_SET_ERR_MSG(info, "missing required ID");
return err;
}
id_val = nla_get_u8(id);
- token_val = nla_get_u32(token);
- msk = mptcp_token_get_sock(sock_net(skb->sk), token_val);
- if (!msk) {
- NL_SET_ERR_MSG_ATTR(info->extack, token, "invalid token");
+ msk = mptcp_userspace_pm_get_sock(info);
+ if (!msk)
return err;
- }
sk = (struct sock *)msk;
- if (!mptcp_pm_is_userspace(msk)) {
- GENL_SET_ERR_MSG(info, "invalid request; userspace PM not selected");
- goto out;
- }
-
if (id_val == 0) {
err = mptcp_userspace_pm_remove_id_zero_address(msk, info);
goto out;
@@ -298,28 +336,23 @@ int mptcp_pm_nl_remove_doit(struct sk_buff *skb, struct genl_info *info)
lock_sock(sk);
- list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) {
- if (entry->addr.id == id_val) {
- match = entry;
- break;
- }
- }
-
+ spin_lock_bh(&msk->pm.lock);
+ match = mptcp_userspace_pm_lookup_addr_by_id(msk, id_val);
if (!match) {
GENL_SET_ERR_MSG(info, "address with specified id not found");
+ spin_unlock_bh(&msk->pm.lock);
release_sock(sk);
goto out;
}
- list_move(&match->list, &free_list);
+ list_del_rcu(&match->list);
+ spin_unlock_bh(&msk->pm.lock);
- mptcp_pm_remove_addrs(msk, &free_list);
+ mptcp_pm_remove_addr_entry(msk, match);
release_sock(sk);
- list_for_each_entry_safe(match, entry, &free_list, list) {
- sock_kfree_s(sk, match, sizeof(*match));
- }
+ sock_kfree_s(sk, match, sizeof(*match));
err = 0;
out:
@@ -330,41 +363,37 @@ out:
int mptcp_pm_nl_subflow_create_doit(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *raddr = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE];
- struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
struct nlattr *laddr = info->attrs[MPTCP_PM_ATTR_ADDR];
- struct mptcp_pm_addr_entry local = { 0 };
+ struct mptcp_pm_addr_entry entry = { 0 };
struct mptcp_addr_info addr_r;
- struct mptcp_addr_info addr_l;
+ struct mptcp_pm_local local;
struct mptcp_sock *msk;
int err = -EINVAL;
struct sock *sk;
- u32 token_val;
- if (!laddr || !raddr || !token) {
- GENL_SET_ERR_MSG(info, "missing required inputs");
+ if (!laddr || !raddr) {
+ GENL_SET_ERR_MSG(info, "missing required address(es)");
return err;
}
- token_val = nla_get_u32(token);
-
- msk = mptcp_token_get_sock(genl_info_net(info), token_val);
- if (!msk) {
- NL_SET_ERR_MSG_ATTR(info->extack, token, "invalid token");
+ msk = mptcp_userspace_pm_get_sock(info);
+ if (!msk)
return err;
- }
sk = (struct sock *)msk;
- if (!mptcp_pm_is_userspace(msk)) {
- GENL_SET_ERR_MSG(info, "invalid request; userspace PM not selected");
+ err = mptcp_pm_parse_entry(laddr, info, true, &entry);
+ if (err < 0) {
+ NL_SET_ERR_MSG_ATTR(info->extack, laddr, "error parsing local addr");
goto create_err;
}
- err = mptcp_pm_parse_addr(laddr, info, &addr_l);
- if (err < 0) {
- NL_SET_ERR_MSG_ATTR(info->extack, laddr, "error parsing local addr");
+ if (entry.flags & MPTCP_PM_ADDR_FLAG_SIGNAL) {
+ GENL_SET_ERR_MSG(info, "invalid addr flags");
+ err = -EINVAL;
goto create_err;
}
+ entry.flags |= MPTCP_PM_ADDR_FLAG_SUBFLOW;
err = mptcp_pm_parse_addr(raddr, info, &addr_r);
if (err < 0) {
@@ -372,28 +401,29 @@ int mptcp_pm_nl_subflow_create_doit(struct sk_buff *skb, struct genl_info *info)
goto create_err;
}
- if (!mptcp_pm_addr_families_match(sk, &addr_l, &addr_r)) {
+ if (!mptcp_pm_addr_families_match(sk, &entry.addr, &addr_r)) {
GENL_SET_ERR_MSG(info, "families mismatch");
err = -EINVAL;
goto create_err;
}
- local.addr = addr_l;
- err = mptcp_userspace_pm_append_new_local_addr(msk, &local, false);
+ err = mptcp_userspace_pm_append_new_local_addr(msk, &entry, false);
if (err < 0) {
GENL_SET_ERR_MSG(info, "did not match address and id");
goto create_err;
}
- lock_sock(sk);
-
- err = __mptcp_subflow_connect(sk, &addr_l, &addr_r);
+ local.addr = entry.addr;
+ local.flags = entry.flags;
+ local.ifindex = entry.ifindex;
+ lock_sock(sk);
+ err = __mptcp_subflow_connect(sk, &local, &addr_r);
release_sock(sk);
spin_lock_bh(&msk->pm.lock);
if (err)
- mptcp_userspace_pm_delete_local_addr(msk, &local);
+ mptcp_userspace_pm_delete_local_addr(msk, &entry);
else
msk->pm.subflows++;
spin_unlock_bh(&msk->pm.lock);
@@ -454,36 +484,25 @@ static struct sock *mptcp_nl_find_ssk(struct mptcp_sock *msk,
int mptcp_pm_nl_subflow_destroy_doit(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *raddr = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE];
- struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
struct nlattr *laddr = info->attrs[MPTCP_PM_ATTR_ADDR];
- struct mptcp_addr_info addr_l;
+ struct mptcp_pm_addr_entry addr_l;
struct mptcp_addr_info addr_r;
struct mptcp_sock *msk;
struct sock *sk, *ssk;
int err = -EINVAL;
- u32 token_val;
- if (!laddr || !raddr || !token) {
- GENL_SET_ERR_MSG(info, "missing required inputs");
+ if (!laddr || !raddr) {
+ GENL_SET_ERR_MSG(info, "missing required address(es)");
return err;
}
- token_val = nla_get_u32(token);
-
- msk = mptcp_token_get_sock(genl_info_net(info), token_val);
- if (!msk) {
- NL_SET_ERR_MSG_ATTR(info->extack, token, "invalid token");
+ msk = mptcp_userspace_pm_get_sock(info);
+ if (!msk)
return err;
- }
sk = (struct sock *)msk;
- if (!mptcp_pm_is_userspace(msk)) {
- GENL_SET_ERR_MSG(info, "invalid request; userspace PM not selected");
- goto destroy_err;
- }
-
- err = mptcp_pm_parse_addr(laddr, info, &addr_l);
+ err = mptcp_pm_parse_entry(laddr, info, true, &addr_l);
if (err < 0) {
NL_SET_ERR_MSG_ATTR(info->extack, laddr, "error parsing local addr");
goto destroy_err;
@@ -496,43 +515,41 @@ int mptcp_pm_nl_subflow_destroy_doit(struct sk_buff *skb, struct genl_info *info
}
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
- if (addr_l.family == AF_INET && ipv6_addr_v4mapped(&addr_r.addr6)) {
- ipv6_addr_set_v4mapped(addr_l.addr.s_addr, &addr_l.addr6);
- addr_l.family = AF_INET6;
+ if (addr_l.addr.family == AF_INET && ipv6_addr_v4mapped(&addr_r.addr6)) {
+ ipv6_addr_set_v4mapped(addr_l.addr.addr.s_addr, &addr_l.addr.addr6);
+ addr_l.addr.family = AF_INET6;
}
- if (addr_r.family == AF_INET && ipv6_addr_v4mapped(&addr_l.addr6)) {
+ if (addr_r.family == AF_INET && ipv6_addr_v4mapped(&addr_l.addr.addr6)) {
ipv6_addr_set_v4mapped(addr_r.addr.s_addr, &addr_r.addr6);
addr_r.family = AF_INET6;
}
#endif
- if (addr_l.family != addr_r.family) {
+ if (addr_l.addr.family != addr_r.family) {
GENL_SET_ERR_MSG(info, "address families do not match");
err = -EINVAL;
goto destroy_err;
}
- if (!addr_l.port || !addr_r.port) {
+ if (!addr_l.addr.port || !addr_r.port) {
GENL_SET_ERR_MSG(info, "missing local or remote port");
err = -EINVAL;
goto destroy_err;
}
lock_sock(sk);
- ssk = mptcp_nl_find_ssk(msk, &addr_l, &addr_r);
- if (ssk) {
- struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
- struct mptcp_pm_addr_entry entry = { .addr = addr_l };
-
- spin_lock_bh(&msk->pm.lock);
- mptcp_userspace_pm_delete_local_addr(msk, &entry);
- spin_unlock_bh(&msk->pm.lock);
- mptcp_subflow_shutdown(sk, ssk, RCV_SHUTDOWN | SEND_SHUTDOWN);
- mptcp_close_ssk(sk, ssk, subflow);
- MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RMSUBFLOW);
- err = 0;
- } else {
+ ssk = mptcp_nl_find_ssk(msk, &addr_l.addr, &addr_r);
+ if (!ssk) {
err = -ESRCH;
+ goto release_sock;
}
+
+ spin_lock_bh(&msk->pm.lock);
+ mptcp_userspace_pm_delete_local_addr(msk, &addr_l);
+ spin_unlock_bh(&msk->pm.lock);
+ mptcp_subflow_shutdown(sk, ssk, RCV_SHUTDOWN | SEND_SHUTDOWN);
+ mptcp_close_ssk(sk, ssk, mptcp_subflow_ctx(ssk));
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RMSUBFLOW);
+release_sock:
release_sock(sk);
destroy_err:
@@ -540,35 +557,173 @@ destroy_err:
return err;
}
-int mptcp_userspace_pm_set_flags(struct net *net, struct nlattr *token,
- struct mptcp_pm_addr_entry *loc,
- struct mptcp_pm_addr_entry *rem, u8 bkup)
+int mptcp_userspace_pm_set_flags(struct sk_buff *skb, struct genl_info *info)
{
+ struct mptcp_pm_addr_entry loc = { .addr = { .family = AF_UNSPEC }, };
+ struct mptcp_pm_addr_entry rem = { .addr = { .family = AF_UNSPEC }, };
+ struct nlattr *attr_rem = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE];
+ struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
+ struct mptcp_pm_addr_entry *entry;
struct mptcp_sock *msk;
int ret = -EINVAL;
struct sock *sk;
- u32 token_val;
+ u8 bkup = 0;
- token_val = nla_get_u32(token);
-
- msk = mptcp_token_get_sock(net, token_val);
+ msk = mptcp_userspace_pm_get_sock(info);
if (!msk)
return ret;
sk = (struct sock *)msk;
- if (!mptcp_pm_is_userspace(msk))
+ ret = mptcp_pm_parse_entry(attr, info, false, &loc);
+ if (ret < 0)
goto set_flags_err;
- if (loc->addr.family == AF_UNSPEC ||
- rem->addr.family == AF_UNSPEC)
+ if (attr_rem) {
+ ret = mptcp_pm_parse_entry(attr_rem, info, false, &rem);
+ if (ret < 0)
+ goto set_flags_err;
+ }
+
+ if (loc.addr.family == AF_UNSPEC ||
+ rem.addr.family == AF_UNSPEC) {
+ GENL_SET_ERR_MSG(info, "invalid address families");
+ ret = -EINVAL;
goto set_flags_err;
+ }
+
+ if (loc.flags & MPTCP_PM_ADDR_FLAG_BACKUP)
+ bkup = 1;
+
+ spin_lock_bh(&msk->pm.lock);
+ entry = mptcp_userspace_pm_lookup_addr(msk, &loc.addr);
+ if (entry) {
+ if (bkup)
+ entry->flags |= MPTCP_PM_ADDR_FLAG_BACKUP;
+ else
+ entry->flags &= ~MPTCP_PM_ADDR_FLAG_BACKUP;
+ }
+ spin_unlock_bh(&msk->pm.lock);
lock_sock(sk);
- ret = mptcp_pm_nl_mp_prio_send_ack(msk, &loc->addr, &rem->addr, bkup);
+ ret = mptcp_pm_nl_mp_prio_send_ack(msk, &loc.addr, &rem.addr, bkup);
release_sock(sk);
set_flags_err:
sock_put(sk);
return ret;
}
+
+int mptcp_userspace_pm_dump_addr(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ struct id_bitmap {
+ DECLARE_BITMAP(map, MPTCP_PM_MAX_ADDR_ID + 1);
+ } *bitmap;
+ const struct genl_info *info = genl_info_dump(cb);
+ struct mptcp_pm_addr_entry *entry;
+ struct mptcp_sock *msk;
+ int ret = -EINVAL;
+ struct sock *sk;
+ void *hdr;
+
+ bitmap = (struct id_bitmap *)cb->ctx;
+
+ msk = mptcp_userspace_pm_get_sock(info);
+ if (!msk)
+ return ret;
+
+ sk = (struct sock *)msk;
+
+ lock_sock(sk);
+ spin_lock_bh(&msk->pm.lock);
+ mptcp_for_each_userspace_pm_addr(msk, entry) {
+ if (test_bit(entry->addr.id, bitmap->map))
+ continue;
+
+ hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, &mptcp_genl_family,
+ NLM_F_MULTI, MPTCP_PM_CMD_GET_ADDR);
+ if (!hdr)
+ break;
+
+ if (mptcp_nl_fill_addr(msg, entry) < 0) {
+ genlmsg_cancel(msg, hdr);
+ break;
+ }
+
+ __set_bit(entry->addr.id, bitmap->map);
+ genlmsg_end(msg, hdr);
+ }
+ spin_unlock_bh(&msk->pm.lock);
+ release_sock(sk);
+ ret = msg->len;
+
+ sock_put(sk);
+ return ret;
+}
+
+int mptcp_userspace_pm_get_addr(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR];
+ struct mptcp_pm_addr_entry addr, *entry;
+ struct mptcp_sock *msk;
+ struct sk_buff *msg;
+ int ret = -EINVAL;
+ struct sock *sk;
+ void *reply;
+
+ msk = mptcp_userspace_pm_get_sock(info);
+ if (!msk)
+ return ret;
+
+ sk = (struct sock *)msk;
+
+ ret = mptcp_pm_parse_entry(attr, info, false, &addr);
+ if (ret < 0)
+ goto out;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ reply = genlmsg_put_reply(msg, info, &mptcp_genl_family, 0,
+ info->genlhdr->cmd);
+ if (!reply) {
+ GENL_SET_ERR_MSG(info, "not enough space in Netlink message");
+ ret = -EMSGSIZE;
+ goto fail;
+ }
+
+ lock_sock(sk);
+ spin_lock_bh(&msk->pm.lock);
+ entry = mptcp_userspace_pm_lookup_addr_by_id(msk, addr.addr.id);
+ if (!entry) {
+ GENL_SET_ERR_MSG(info, "address not found");
+ ret = -EINVAL;
+ goto unlock_fail;
+ }
+
+ ret = mptcp_nl_fill_addr(msg, entry);
+ if (ret)
+ goto unlock_fail;
+
+ genlmsg_end(msg, reply);
+ ret = genlmsg_reply(msg, info);
+ spin_unlock_bh(&msk->pm.lock);
+ release_sock(sk);
+ sock_put(sk);
+ return ret;
+
+unlock_fail:
+ spin_unlock_bh(&msk->pm.lock);
+ release_sock(sk);
+fail:
+ nlmsg_free(msg);
+out:
+ sock_put(sk);
+ return ret;
+}
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 7833a49f6214..6bd819047470 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -15,12 +15,12 @@
#include <net/inet_common.h>
#include <net/inet_hashtables.h>
#include <net/protocol.h>
-#include <net/tcp.h>
#include <net/tcp_states.h>
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
#include <net/transp_v6.h>
#endif
#include <net/mptcp.h>
+#include <net/hotdata.h>
#include <net/xfrm.h>
#include <asm/ioctls.h>
#include "protocol.h"
@@ -47,7 +47,7 @@ static void __mptcp_destroy_sock(struct sock *sk);
static void mptcp_check_send_data_fin(struct sock *sk);
DEFINE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions);
-static struct net_device mptcp_napi_dev;
+static struct net_device *mptcp_napi_dev;
/* Returns end sequence number of the receiver's advertised window */
static u64 mptcp_wnd_end(const struct mptcp_sock *msk)
@@ -136,10 +136,11 @@ static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
int delta;
if (MPTCP_SKB_CB(from)->offset ||
+ ((to->len + from->len) > (sk->sk_rcvbuf >> 3)) ||
!skb_try_coalesce(to, from, &fragstolen, &delta))
return false;
- pr_debug("colesced seq %llx into %llx new len %d new end seq %llx",
+ pr_debug("colesced seq %llx into %llx new len %d new end seq %llx\n",
MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq,
to->len, MPTCP_SKB_CB(from)->end_seq);
MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq;
@@ -217,7 +218,7 @@ static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb)
end_seq = MPTCP_SKB_CB(skb)->end_seq;
max_seq = atomic64_read(&msk->rcv_wnd_sent);
- pr_debug("msk=%p seq=%llx limit=%llx empty=%d", msk, seq, max_seq,
+ pr_debug("msk=%p seq=%llx limit=%llx empty=%d\n", msk, seq, max_seq,
RB_EMPTY_ROOT(&msk->out_of_order_queue));
if (after64(end_seq, max_seq)) {
/* out of window */
@@ -350,8 +351,10 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
skb_orphan(skb);
/* try to fetch required memory from subflow */
- if (!mptcp_rmem_schedule(sk, ssk, skb->truesize))
+ if (!mptcp_rmem_schedule(sk, ssk, skb->truesize)) {
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED);
goto drop;
+ }
has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp;
@@ -410,6 +413,7 @@ static void mptcp_close_wake_up(struct sock *sk)
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
}
+/* called under the msk socket lock */
static bool mptcp_pending_data_fin_ack(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
@@ -441,16 +445,17 @@ static void mptcp_check_data_fin_ack(struct sock *sk)
}
}
+/* can be called with no lock acquired */
static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq)
{
struct mptcp_sock *msk = mptcp_sk(sk);
if (READ_ONCE(msk->rcv_data_fin) &&
- ((1 << sk->sk_state) &
+ ((1 << inet_sk_state_load(sk)) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq);
- if (msk->ack_seq == rcv_data_fin_seq) {
+ if (READ_ONCE(msk->ack_seq) == rcv_data_fin_seq) {
if (seq)
*seq = rcv_data_fin_seq;
@@ -524,13 +529,13 @@ static void mptcp_send_ack(struct mptcp_sock *msk)
mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow));
}
-static void mptcp_subflow_cleanup_rbuf(struct sock *ssk)
+static void mptcp_subflow_cleanup_rbuf(struct sock *ssk, int copied)
{
bool slow;
slow = lock_sock_fast(ssk);
if (tcp_can_send_ack(ssk))
- tcp_cleanup_rbuf(ssk, 1);
+ tcp_cleanup_rbuf(ssk, copied);
unlock_sock_fast(ssk, slow);
}
@@ -547,7 +552,7 @@ static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty)
(ICSK_ACK_PUSHED2 | ICSK_ACK_PUSHED)));
}
-static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
+static void mptcp_cleanup_rbuf(struct mptcp_sock *msk, int copied)
{
int old_space = READ_ONCE(msk->old_wspace);
struct mptcp_subflow_context *subflow;
@@ -555,14 +560,14 @@ static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
int space = __mptcp_space(sk);
bool cleanup, rx_empty;
- cleanup = (space > 0) && (space >= (old_space << 1));
- rx_empty = !__mptcp_rmem(sk);
+ cleanup = (space > 0) && (space >= (old_space << 1)) && copied;
+ rx_empty = !__mptcp_rmem(sk) && copied;
mptcp_for_each_subflow(msk, subflow) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty))
- mptcp_subflow_cleanup_rbuf(ssk);
+ mptcp_subflow_cleanup_rbuf(ssk, copied);
}
}
@@ -616,6 +621,18 @@ static bool mptcp_check_data_fin(struct sock *sk)
return ret;
}
+static void mptcp_dss_corruption(struct mptcp_sock *msk, struct sock *ssk)
+{
+ if (READ_ONCE(msk->allow_infinite_fallback)) {
+ MPTCP_INC_STATS(sock_net(ssk),
+ MPTCP_MIB_DSSCORRUPTIONFALLBACK);
+ mptcp_do_fallback(ssk);
+ } else {
+ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSCORRUPTIONRESET);
+ mptcp_subflow_reset(ssk);
+ }
+}
+
static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
struct sock *ssk,
unsigned int *bytes)
@@ -639,7 +656,7 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
}
}
- pr_debug("msk=%p ssk=%p", msk, ssk);
+ pr_debug("msk=%p ssk=%p\n", msk, ssk);
tp = tcp_sk(ssk);
do {
u32 map_remaining, offset;
@@ -688,10 +705,16 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
moved += len;
seq += len;
- if (WARN_ON_ONCE(map_remaining < len))
- break;
+ if (unlikely(map_remaining < len)) {
+ DEBUG_NET_WARN_ON_ONCE(1);
+ mptcp_dss_corruption(msk, ssk);
+ }
} else {
- WARN_ON_ONCE(!fin);
+ if (unlikely(!fin)) {
+ DEBUG_NET_WARN_ON_ONCE(1);
+ mptcp_dss_corruption(msk, ssk);
+ }
+
sk_eat_skb(ssk, skb);
done = true;
}
@@ -705,6 +728,8 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
}
} while (more_data_avail);
+ if (moved > 0)
+ msk->last_data_recv = tcp_jiffies32;
*bytes += moved;
return done;
}
@@ -718,7 +743,7 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
u64 end_seq;
p = rb_first(&msk->out_of_order_queue);
- pr_debug("msk=%p empty=%d", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
+ pr_debug("msk=%p empty=%d\n", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
while (p) {
skb = rb_to_skb(p);
if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq))
@@ -740,7 +765,7 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;
/* skip overlapping data, if any */
- pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d",
+ pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d\n",
MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq,
delta);
MPTCP_SKB_CB(skb)->offset += delta;
@@ -748,7 +773,7 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
__skb_queue_tail(&sk->sk_receive_queue, skb);
}
msk->bytes_received += end_seq - msk->ack_seq;
- msk->ack_seq = end_seq;
+ WRITE_ONCE(msk->ack_seq, end_seq);
moved = true;
}
return moved;
@@ -840,10 +865,8 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
sk_rbuf = ssk_rbuf;
/* over limit? can't append more skbs to msk, Also, no need to wake-up*/
- if (__mptcp_rmem(sk) > sk_rbuf) {
- MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED);
+ if (__mptcp_rmem(sk) > sk_rbuf)
return;
- }
/* Wake-up the reader only for in-sequence data */
mptcp_data_lock(sk);
@@ -985,6 +1008,7 @@ static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag)
put_page(dfrag->page);
}
+/* called under both the msk socket lock and the data lock */
static void __mptcp_clean_una(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
@@ -1033,13 +1057,15 @@ static void __mptcp_clean_una(struct sock *sk)
msk->recovery = false;
out:
- if (snd_una == READ_ONCE(msk->snd_nxt) &&
- snd_una == READ_ONCE(msk->write_seq)) {
+ if (snd_una == msk->snd_nxt && snd_una == msk->write_seq) {
if (mptcp_rtx_timer_pending(sk) && !mptcp_data_fin_enabled(msk))
mptcp_stop_rtx_timer(sk);
} else {
mptcp_reset_rtx_timer(sk);
}
+
+ if (mptcp_pending_data_fin_ack(sk))
+ mptcp_schedule_work(sk);
}
static void __mptcp_clean_una_wakeup(struct sock *sk)
@@ -1233,7 +1259,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
size_t copy;
int i;
- pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u",
+ pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u\n",
msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent);
if (WARN_ON_ONCE(info->sent > info->limit ||
@@ -1266,7 +1292,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
i = skb_shinfo(skb)->nr_frags;
can_coalesce = skb_can_coalesce(skb, i, dfrag->page, offset);
- if (!can_coalesce && i >= READ_ONCE(sysctl_max_skb_frags)) {
+ if (!can_coalesce && i >= READ_ONCE(net_hotdata.sysctl_max_skb_frags)) {
tcp_mark_push(tcp_sk(ssk), skb);
goto alloc_skb;
}
@@ -1334,7 +1360,7 @@ alloc_skb:
mpext->use_map = 1;
mpext->dsn64 = 1;
- pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d",
+ pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d\n",
mpext->data_seq, mpext->subflow_seq, mpext->data_len,
mpext->dsn64);
@@ -1415,13 +1441,15 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
}
mptcp_for_each_subflow(msk, subflow) {
+ bool backup = subflow->backup || subflow->request_bkup;
+
trace_mptcp_subflow_get_send(subflow);
ssk = mptcp_subflow_tcp_sock(subflow);
if (!mptcp_subflow_active(subflow))
continue;
tout = max(tout, mptcp_timeout_from_subflow(subflow));
- nr_active += !subflow->backup;
+ nr_active += !backup;
pace = subflow->avg_pacing_rate;
if (unlikely(!pace)) {
/* init pacing rate from socket */
@@ -1432,9 +1460,9 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
}
linger_time = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, pace);
- if (linger_time < send_info[subflow->backup].linger_time) {
- send_info[subflow->backup].ssk = ssk;
- send_info[subflow->backup].linger_time = linger_time;
+ if (linger_time < send_info[backup].linger_time) {
+ send_info[backup].ssk = ssk;
+ send_info[backup].linger_time = linger_time;
}
}
__mptcp_set_timeout(sk, tout);
@@ -1500,7 +1528,7 @@ static void mptcp_update_post_push(struct mptcp_sock *msk,
*/
if (likely(after64(snd_nxt_new, msk->snd_nxt))) {
msk->bytes_sent += snd_nxt_new - msk->snd_nxt;
- msk->snd_nxt = snd_nxt_new;
+ WRITE_ONCE(msk->snd_nxt, snd_nxt_new);
}
}
@@ -1552,6 +1580,8 @@ static int __subflow_push_pending(struct sock *sk, struct sock *ssk,
err = copied;
out:
+ if (err > 0)
+ msk->last_data_sent = tcp_jiffies32;
return err;
}
@@ -1687,15 +1717,6 @@ out:
}
}
-static void mptcp_set_nospace(struct sock *sk)
-{
- /* enable autotune */
- set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
-
- /* will be cleared on avail space */
- set_bit(MPTCP_NOSPACE, &mptcp_sk(sk)->flags);
-}
-
static int mptcp_disconnect(struct sock *sk, int flags);
static int mptcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
@@ -1746,8 +1767,10 @@ static int mptcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
* see mptcp_disconnect().
* Attempt it again outside the problematic scope.
*/
- if (!mptcp_disconnect(sk, 0))
+ if (!mptcp_disconnect(sk, 0)) {
+ sk->sk_disconnects++;
sk->sk_socket->state = SS_UNCONNECTED;
+ }
}
inet_clear_bit(DEFER_CONNECT, sk);
@@ -1766,6 +1789,30 @@ static int do_copy_data_nocache(struct sock *sk, int copy,
return 0;
}
+/* open-code sk_stream_memory_free() plus sent limit computation to
+ * avoid indirect calls in fast-path.
+ * Called under the msk socket lock, so we can avoid a bunch of ONCE
+ * annotations.
+ */
+static u32 mptcp_send_limit(const struct sock *sk)
+{
+ const struct mptcp_sock *msk = mptcp_sk(sk);
+ u32 limit, not_sent;
+
+ if (sk->sk_wmem_queued >= READ_ONCE(sk->sk_sndbuf))
+ return 0;
+
+ limit = mptcp_notsent_lowat(sk);
+ if (limit == UINT_MAX)
+ return UINT_MAX;
+
+ not_sent = msk->write_seq - msk->snd_nxt;
+ if (not_sent >= limit)
+ return 0;
+
+ return limit - not_sent;
+}
+
static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
struct mptcp_sock *msk = mptcp_sk(sk);
@@ -1810,6 +1857,12 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
struct mptcp_data_frag *dfrag;
bool dfrag_collapsed;
size_t psize, offset;
+ u32 copy_limit;
+
+ /* ensure fitting the notsent_lowat() constraint */
+ copy_limit = mptcp_send_limit(sk);
+ if (!copy_limit)
+ goto wait_for_memory;
/* reuse tail pfrag, if possible, or carve a new one from the
* page allocator
@@ -1817,9 +1870,6 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
dfrag = mptcp_pending_tail(sk);
dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag);
if (!dfrag_collapsed) {
- if (!sk_stream_memory_free(sk))
- goto wait_for_memory;
-
if (!mptcp_page_frag_refill(sk, pfrag))
goto wait_for_memory;
@@ -1834,6 +1884,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
offset = dfrag->offset + dfrag->data_len;
psize = pfrag->size - offset;
psize = min_t(size_t, psize, msg_data_left(msg));
+ psize = min_t(size_t, psize, copy_limit);
total_ts = psize + frag_truesize;
if (!sk_wmem_schedule(sk, total_ts))
@@ -1862,14 +1913,14 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (!msk->first_pending)
WRITE_ONCE(msk->first_pending, dfrag);
}
- pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d", msk,
+ pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n", msk,
dfrag->data_seq, dfrag->data_len, dfrag->already_sent,
!dfrag_collapsed);
continue;
wait_for_memory:
- mptcp_set_nospace(sk);
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
__mptcp_push_pending(sk, msg->msg_flags);
ret = sk_stream_wait_memory(sk, &timeo);
if (ret)
@@ -1891,6 +1942,8 @@ do_error:
goto out;
}
+static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied);
+
static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
struct msghdr *msg,
size_t len, int flags,
@@ -1944,6 +1997,7 @@ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
break;
}
+ mptcp_rcv_space_adjust(msk, copied);
return copied;
}
@@ -2012,13 +2066,13 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
do_div(grow, msk->rcvq_space.space);
rcvwin += (grow << 1);
- rcvbuf = min_t(u64, __tcp_space_from_win(scaling_ratio, rcvwin),
+ rcvbuf = min_t(u64, mptcp_space_from_win(sk, rcvwin),
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
if (rcvbuf > sk->sk_rcvbuf) {
u32 window_clamp;
- window_clamp = __tcp_win_from_space(scaling_ratio, rcvbuf);
+ window_clamp = mptcp_win_from_space(sk, rcvbuf);
WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
/* Make subflows follow along. If we do not do this, we
@@ -2033,8 +2087,9 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
ssk = mptcp_subflow_tcp_sock(subflow);
slow = lock_sock_fast(ssk);
WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf);
- tcp_sk(ssk)->window_clamp = window_clamp;
- tcp_cleanup_rbuf(ssk, 1);
+ WRITE_ONCE(tcp_sk(ssk)->window_clamp, window_clamp);
+ if (tcp_can_send_ack(ssk))
+ tcp_cleanup_rbuf(ssk, 1);
unlock_sock_fast(ssk, slow);
}
}
@@ -2115,7 +2170,7 @@ static unsigned int mptcp_inq_hint(const struct sock *sk)
skb = skb_peek(&msk->receive_queue);
if (skb) {
- u64 hint_val = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;
+ u64 hint_val = READ_ONCE(msk->ack_seq) - MPTCP_SKB_CB(skb)->map_seq;
if (hint_val >= INT_MAX)
return INT_MAX;
@@ -2157,7 +2212,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
cmsg_flags = MPTCP_CMSG_INQ;
while (copied < len) {
- int bytes_read;
+ int err, bytes_read;
bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied, flags, &tss, &cmsg_flags);
if (unlikely(bytes_read < 0)) {
@@ -2168,13 +2223,10 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
copied += bytes_read;
- /* be sure to advertise window change */
- mptcp_cleanup_rbuf(msk);
-
if (skb_queue_empty(&msk->receive_queue) && __mptcp_move_skbs(msk))
continue;
- /* only the master socket status is relevant here. The exit
+ /* only the MPTCP socket status is relevant here. The exit
* conditions mirror closely tcp_recvmsg()
*/
if (copied >= target)
@@ -2218,10 +2270,17 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
}
}
- pr_debug("block timeout %ld", timeo);
- sk_wait_data(sk, &timeo, NULL);
+ pr_debug("block timeout %ld\n", timeo);
+ mptcp_cleanup_rbuf(msk, copied);
+ err = sk_wait_data(sk, &timeo, NULL);
+ if (err < 0) {
+ err = copied ? : err;
+ goto out_err;
+ }
}
+ mptcp_cleanup_rbuf(msk, copied);
+
out_err:
if (cmsg_flags && copied >= 0) {
if (cmsg_flags & MPTCP_CMSG_TS)
@@ -2234,11 +2293,9 @@ out_err:
}
}
- pr_debug("msk=%p rx queue empty=%d:%d copied=%d",
+ pr_debug("msk=%p rx queue empty=%d:%d copied=%d\n",
msk, skb_queue_empty_lockless(&sk->sk_receive_queue),
skb_queue_empty(&msk->receive_queue), copied);
- if (!(flags & MSG_PEEK))
- mptcp_rcv_space_adjust(msk, copied);
release_sock(sk);
return copied;
@@ -2296,7 +2353,7 @@ struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk)
continue;
}
- if (subflow->backup) {
+ if (subflow->backup || subflow->request_bkup) {
if (!backup)
backup = ssk;
continue;
@@ -2478,6 +2535,12 @@ out:
void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
struct mptcp_subflow_context *subflow)
{
+ /* The first subflow can already be closed and still in the list */
+ if (subflow->close_event_done)
+ return;
+
+ subflow->close_event_done = true;
+
if (sk->sk_state == TCP_ESTABLISHED)
mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL);
@@ -2503,8 +2566,11 @@ static void __mptcp_close_subflow(struct sock *sk)
mptcp_for_each_subflow_safe(msk, subflow, tmp) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ int ssk_state = inet_sk_state_load(ssk);
- if (inet_sk_state_load(ssk) != TCP_CLOSE)
+ if (ssk_state != TCP_CLOSE &&
+ (ssk_state != TCP_CLOSE_WAIT ||
+ inet_sk_state_load(sk) != TCP_ESTABLISHED))
continue;
/* 'subflow_data_ready' will re-sched once rx queue is empty */
@@ -2542,7 +2608,7 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk)
slow = lock_sock_fast(tcp_sk);
if (tcp_sk->sk_state != TCP_CLOSE) {
- tcp_send_active_reset(tcp_sk, GFP_ATOMIC);
+ mptcp_send_active_reset_reason(tcp_sk);
tcp_set_state(tcp_sk, TCP_CLOSE);
}
unlock_sock_fast(tcp_sk, slow);
@@ -2665,8 +2731,8 @@ void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout)
if (!fail_tout && !inet_csk(sk)->icsk_mtup.probe_timestamp)
return;
- close_timeout = inet_csk(sk)->icsk_mtup.probe_timestamp - tcp_jiffies32 + jiffies +
- mptcp_close_timeout(sk);
+ close_timeout = (unsigned long)inet_csk(sk)->icsk_mtup.probe_timestamp -
+ tcp_jiffies32 + jiffies + mptcp_close_timeout(sk);
/* the close timeout takes precedence on the fail one, and here at least one of
* them is active
@@ -2684,7 +2750,7 @@ static void mptcp_mp_fail_no_response(struct mptcp_sock *msk)
if (!ssk)
return;
- pr_debug("MP_FAIL doesn't respond, reset the subflow");
+ pr_debug("MP_FAIL doesn't respond, reset the subflow\n");
slow = lock_sock_fast(ssk);
mptcp_subflow_reset(ssk);
@@ -2759,7 +2825,7 @@ static void __mptcp_init_sock(struct sock *sk)
__skb_queue_head_init(&msk->receive_queue);
msk->out_of_order_queue = RB_ROOT;
msk->first_pending = NULL;
- msk->rmem_fwd_alloc = 0;
+ WRITE_ONCE(msk->rmem_fwd_alloc, 0);
WRITE_ONCE(msk->rmem_released, 0);
msk->timer_ival = TCP_RTO_MIN;
msk->scaling_ratio = TCP_DEFAULT_SCALING_RATIO;
@@ -2770,6 +2836,9 @@ static void __mptcp_init_sock(struct sock *sk)
WRITE_ONCE(msk->allow_infinite_fallback, true);
msk->recovery = false;
msk->subflow_id = 1;
+ msk->last_data_sent = tcp_jiffies32;
+ msk->last_data_recv = tcp_jiffies32;
+ msk->last_ack_recv = tcp_jiffies32;
mptcp_pm_data_init(msk);
@@ -2783,7 +2852,8 @@ static void mptcp_ca_reset(struct sock *sk)
struct inet_connection_sock *icsk = inet_csk(sk);
tcp_assign_congestion_control(sk);
- strcpy(mptcp_sk(sk)->ca_name, icsk->icsk_ca_ops->name);
+ strscpy(mptcp_sk(sk)->ca_name, icsk->icsk_ca_ops->name,
+ sizeof(mptcp_sk(sk)->ca_name));
/* no need to keep a reference to the ops, the name will suffice */
tcp_cleanup_congestion_control(sk);
@@ -2803,8 +2873,10 @@ static int mptcp_init_sock(struct sock *sk)
if (unlikely(!net->mib.mptcp_statistics) && !mptcp_mib_alloc(net))
return -ENOMEM;
+ rcu_read_lock();
ret = mptcp_init_sched(mptcp_sk(sk),
mptcp_sched_find(mptcp_get_scheduler(net)));
+ rcu_read_unlock();
if (ret)
return ret;
@@ -2854,7 +2926,7 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
break;
default:
if (__mptcp_check_fallback(mptcp_sk(sk))) {
- pr_debug("Fallback");
+ pr_debug("Fallback\n");
ssk->sk_shutdown |= how;
tcp_shutdown(ssk, how);
@@ -2864,7 +2936,7 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
WRITE_ONCE(mptcp_sk(sk)->snd_una, mptcp_sk(sk)->snd_nxt);
mptcp_schedule_work(sk);
} else {
- pr_debug("Sending DATA_FIN on subflow %p", ssk);
+ pr_debug("Sending DATA_FIN on subflow %p\n", ssk);
tcp_send_ack(ssk);
if (!mptcp_rtx_timer_pending(sk))
mptcp_reset_rtx_timer(sk);
@@ -2884,9 +2956,14 @@ void mptcp_set_state(struct sock *sk, int state)
if (oldstate != TCP_ESTABLISHED)
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB);
break;
-
+ case TCP_CLOSE_WAIT:
+ /* Unlike TCP, MPTCP sk would not have the TCP_SYN_RECV state:
+ * MPTCP "accepted" sockets will be created later on. So no
+ * transition from TCP_SYN_RECV to TCP_CLOSE_WAIT.
+ */
+ break;
default:
- if (oldstate == TCP_ESTABLISHED)
+ if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT)
MPTCP_DEC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB);
}
@@ -2925,7 +3002,7 @@ static void mptcp_check_send_data_fin(struct sock *sk)
struct mptcp_subflow_context *subflow;
struct mptcp_sock *msk = mptcp_sk(sk);
- pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu",
+ pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu\n",
msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk),
msk->snd_nxt, msk->write_seq);
@@ -2949,7 +3026,7 @@ static void __mptcp_wr_shutdown(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
- pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d",
+ pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d\n",
msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state,
!!mptcp_send_head(sk));
@@ -2964,7 +3041,7 @@ static void __mptcp_destroy_sock(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
might_sleep();
@@ -2975,7 +3052,7 @@ static void __mptcp_destroy_sock(struct sock *sk)
sk->sk_prot->destroy(sk);
- WARN_ON_ONCE(msk->rmem_fwd_alloc);
+ WARN_ON_ONCE(READ_ONCE(msk->rmem_fwd_alloc));
WARN_ON_ONCE(msk->rmem_released);
sk_stream_kill_queues(sk);
xfrm_sk_free_policy(sk);
@@ -3072,9 +3149,8 @@ cleanup:
mptcp_set_state(sk, TCP_CLOSE);
sock_hold(sk);
- pr_debug("msk=%p state=%d", sk, sk->sk_state);
- if (msk->token)
- mptcp_event(MPTCP_EVENT_CLOSED, msk, NULL, GFP_KERNEL);
+ pr_debug("msk=%p state=%d\n", sk, sk->sk_state);
+ mptcp_pm_connection_closed(msk);
if (sk->sk_state == TCP_CLOSE) {
__mptcp_destroy_sock(sk);
@@ -3140,8 +3216,7 @@ static int mptcp_disconnect(struct sock *sk, int flags)
mptcp_stop_rtx_timer(sk);
mptcp_stop_tout_timer(sk);
- if (msk->token)
- mptcp_event(MPTCP_EVENT_CLOSED, msk, NULL, GFP_KERNEL);
+ mptcp_pm_connection_closed(msk);
/* msk->subflow is still intact, the following will not free the first
* subflow
@@ -3150,16 +3225,16 @@ static int mptcp_disconnect(struct sock *sk, int flags)
WRITE_ONCE(msk->flags, 0);
msk->cb_flags = 0;
msk->recovery = false;
- msk->can_ack = false;
- msk->fully_established = false;
- msk->rcv_data_fin = false;
- msk->snd_data_fin_enable = false;
- msk->rcv_fastclose = false;
- msk->use_64bit_ack = false;
- msk->bytes_consumed = 0;
+ WRITE_ONCE(msk->can_ack, false);
+ WRITE_ONCE(msk->fully_established, false);
+ WRITE_ONCE(msk->rcv_data_fin, false);
+ WRITE_ONCE(msk->snd_data_fin_enable, false);
+ WRITE_ONCE(msk->rcv_fastclose, false);
+ WRITE_ONCE(msk->use_64bit_ack, false);
WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
mptcp_pm_data_reset(msk);
mptcp_ca_reset(sk);
+ msk->bytes_consumed = 0;
msk->bytes_acked = 0;
msk->bytes_received = 0;
msk->bytes_sent = 0;
@@ -3250,17 +3325,17 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
mptcp_copy_ip_options(nsk, sk);
msk = mptcp_sk(nsk);
- msk->local_key = subflow_req->local_key;
- msk->token = subflow_req->token;
+ WRITE_ONCE(msk->local_key, subflow_req->local_key);
+ WRITE_ONCE(msk->token, subflow_req->token);
msk->in_accept_queue = 1;
WRITE_ONCE(msk->fully_established, false);
if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD)
WRITE_ONCE(msk->csum_enabled, true);
- msk->write_seq = subflow_req->idsn + 1;
- msk->snd_nxt = msk->write_seq;
- msk->snd_una = msk->write_seq;
- msk->wnd_end = msk->snd_nxt + tcp_sk(ssk)->snd_wnd;
+ WRITE_ONCE(msk->write_seq, subflow_req->idsn + 1);
+ WRITE_ONCE(msk->snd_nxt, msk->write_seq);
+ WRITE_ONCE(msk->snd_una, msk->write_seq);
+ WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd);
msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq;
mptcp_init_sched(msk, mptcp_sk(sk)->sched);
@@ -3363,9 +3438,6 @@ void __mptcp_data_acked(struct sock *sk)
__mptcp_clean_una(sk);
else
__set_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->cb_flags);
-
- if (mptcp_pending_data_fin_ack(sk))
- mptcp_schedule_work(sk);
}
void __mptcp_check_push(struct sock *sk, struct sock *ssk)
@@ -3448,7 +3520,7 @@ static void schedule_3rdack_retransmission(struct sock *ssk)
struct tcp_sock *tp = tcp_sk(ssk);
unsigned long timeout;
- if (mptcp_subflow_ctx(ssk)->fully_established)
+ if (READ_ONCE(mptcp_subflow_ctx(ssk)->fully_established))
return;
/* reschedule with a timeout above RTT, as we must look only for drop */
@@ -3459,7 +3531,8 @@ static void schedule_3rdack_retransmission(struct sock *ssk)
timeout += jiffies;
WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER);
- icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
+ smp_store_release(&icsk->icsk_ack.pending,
+ icsk->icsk_ack.pending | ICSK_ACK_SCHED | ICSK_ACK_TIMER);
icsk->icsk_ack.timeout = timeout;
sk_reset_timer(ssk, &icsk->icsk_delack_timer, timeout);
}
@@ -3492,7 +3565,7 @@ void mptcp_subflow_process_delegated(struct sock *ssk, long status)
static int mptcp_hash(struct sock *sk)
{
/* should never be called,
- * we hash the TCP subflows not the master socket
+ * we hash the TCP subflows not the MPTCP socket
*/
WARN_ON_ONCE(1);
return 0;
@@ -3507,7 +3580,7 @@ static int mptcp_get_port(struct sock *sk, unsigned short snum)
{
struct mptcp_sock *msk = mptcp_sk(sk);
- pr_debug("msk=%p, ssk=%p", msk, msk->first);
+ pr_debug("msk=%p, ssk=%p\n", msk, msk->first);
if (WARN_ON_ONCE(!msk->first))
return -EINVAL;
@@ -3524,7 +3597,7 @@ void mptcp_finish_connect(struct sock *ssk)
sk = subflow->conn;
msk = mptcp_sk(sk);
- pr_debug("msk=%p, token=%u", sk, subflow->token);
+ pr_debug("msk=%p, token=%u\n", sk, subflow->token);
subflow->map_seq = subflow->iasn;
subflow->map_subflow_seq = 1;
@@ -3553,7 +3626,7 @@ bool mptcp_finish_join(struct sock *ssk)
struct sock *parent = (void *)msk;
bool ret = true;
- pr_debug("msk=%p, subflow=%p", msk, subflow);
+ pr_debug("msk=%p, subflow=%p\n", msk, subflow);
/* mptcp socket already closing? */
if (!mptcp_is_fully_established(parent)) {
@@ -3599,7 +3672,7 @@ err_prohibited:
static void mptcp_shutdown(struct sock *sk, int how)
{
- pr_debug("sk=%p, how=%d", sk, how);
+ pr_debug("sk=%p, how=%d\n", sk, how);
if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk))
__mptcp_wr_shutdown(sk);
@@ -3672,13 +3745,6 @@ static int mptcp_ioctl(struct sock *sk, int cmd, int *karg)
return 0;
}
-static void mptcp_subflow_early_fallback(struct mptcp_sock *msk,
- struct mptcp_subflow_context *subflow)
-{
- subflow->request_mptcp = 0;
- __mptcp_do_fallback(msk);
-}
-
static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct mptcp_subflow_context *subflow;
@@ -3699,10 +3765,19 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (rcu_access_pointer(tcp_sk(ssk)->md5sig_info))
mptcp_subflow_early_fallback(msk, subflow);
#endif
- if (subflow->request_mptcp && mptcp_token_new_connect(ssk)) {
- MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_TOKENFALLBACKINIT);
- mptcp_subflow_early_fallback(msk, subflow);
+ if (subflow->request_mptcp) {
+ if (mptcp_active_should_disable(sk)) {
+ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPCAPABLEACTIVEDISABLED);
+ mptcp_subflow_early_fallback(msk, subflow);
+ } else if (mptcp_token_new_connect(ssk) < 0) {
+ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_TOKENFALLBACKINIT);
+ mptcp_subflow_early_fallback(msk, subflow);
+ }
}
+
+ WRITE_ONCE(msk->write_seq, subflow->idsn);
+ WRITE_ONCE(msk->snd_nxt, subflow->idsn);
+ WRITE_ONCE(msk->snd_una, subflow->idsn);
if (likely(!__mptcp_check_fallback(msk)))
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVE);
@@ -3767,6 +3842,7 @@ static struct proto mptcp_prot = {
.unhash = mptcp_unhash,
.get_port = mptcp_get_port,
.forward_alloc_get = mptcp_forward_alloc_get,
+ .stream_memory_free = mptcp_stream_memory_free,
.sockets_allocated = &mptcp_sockets_allocated,
.memory_allocated = &tcp_memory_allocated,
@@ -3815,7 +3891,7 @@ static int mptcp_listen(struct socket *sock, int backlog)
struct sock *ssk;
int err;
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
lock_sock(sk);
@@ -3849,13 +3925,12 @@ unlock:
}
static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
- int flags, bool kern)
+ struct proto_accept_arg *arg)
{
struct mptcp_sock *msk = mptcp_sk(sock->sk);
struct sock *ssk, *newsk;
- int err;
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
/* Buggy applications can call accept on socket states other then LISTEN
* but no need to allocate the first subflow just to error out.
@@ -3864,12 +3939,12 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
if (!ssk)
return -EINVAL;
- pr_debug("ssk=%p, listener=%p", ssk, mptcp_subflow_ctx(ssk));
- newsk = inet_csk_accept(ssk, flags, &err, kern);
+ pr_debug("ssk=%p, listener=%p\n", ssk, mptcp_subflow_ctx(ssk));
+ newsk = inet_csk_accept(ssk, arg);
if (!newsk)
- return err;
+ return arg->err;
- pr_debug("newsk=%p, subflow is mptcp=%d", newsk, sk_is_mptcp(newsk));
+ pr_debug("newsk=%p, subflow is mptcp=%d\n", newsk, sk_is_mptcp(newsk));
if (sk_is_mptcp(newsk)) {
struct mptcp_subflow_context *subflow;
struct sock *new_mptcp_sock;
@@ -3888,7 +3963,7 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
newsk = new_mptcp_sock;
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
- newsk->sk_kern_sock = kern;
+ newsk->sk_kern_sock = arg->kern;
lock_sock(newsk);
__inet_accept(sock, newsock, newsk);
@@ -3916,10 +3991,8 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
mptcp_set_state(newsk, TCP_CLOSE);
}
} else {
- MPTCP_INC_STATS(sock_net(ssk),
- MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
tcpfallback:
- newsk->sk_kern_sock = kern;
+ newsk->sk_kern_sock = arg->kern;
lock_sock(newsk);
__inet_accept(sock, newsock, newsk);
/* we are being invoked after accepting a non-mp-capable
@@ -3940,12 +4013,12 @@ static __poll_t mptcp_check_writeable(struct mptcp_sock *msk)
{
struct sock *sk = (struct sock *)msk;
- if (sk_stream_is_writeable(sk))
+ if (__mptcp_stream_is_writeable(sk, 1))
return EPOLLOUT | EPOLLWRNORM;
- mptcp_set_nospace(sk);
- smp_mb__after_atomic(); /* msk->flags is changed by write_space cb */
- if (sk_stream_is_writeable(sk))
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ smp_mb__after_atomic(); /* NOSPACE is changed by mptcp_write_space() */
+ if (__mptcp_stream_is_writeable(sk, 1))
return EPOLLOUT | EPOLLWRNORM;
return 0;
@@ -3964,7 +4037,7 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
sock_poll_wait(file, sock, wait);
state = inet_sk_state_load(sk);
- pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags);
+ pr_debug("msk=%p state=%d flags=%lx\n", msk, state, msk->flags);
if (state == TCP_LISTEN) {
struct sock *ssk = READ_ONCE(msk->first);
@@ -4076,11 +4149,13 @@ void __init mptcp_proto_init(void)
if (percpu_counter_init(&mptcp_sockets_allocated, 0, GFP_KERNEL))
panic("Failed to allocate MPTCP pcpu counter\n");
- init_dummy_netdev(&mptcp_napi_dev);
+ mptcp_napi_dev = alloc_netdev_dummy(0);
+ if (!mptcp_napi_dev)
+ panic("Failed to allocate MPTCP dummy netdev\n");
for_each_possible_cpu(cpu) {
delegated = per_cpu_ptr(&mptcp_delegated_actions, cpu);
INIT_LIST_HEAD(&delegated->head);
- netif_napi_add_tx(&mptcp_napi_dev, &delegated->napi,
+ netif_napi_add_tx(mptcp_napi_dev, &delegated->napi,
mptcp_napi_poll);
napi_enable(&delegated->napi);
}
@@ -4139,7 +4214,7 @@ int __init mptcp_proto_v6_init(void)
int err;
mptcp_v6_prot = mptcp_prot;
- strcpy(mptcp_v6_prot.name, "MPTCPv6");
+ strscpy(mptcp_v6_prot.name, "MPTCPv6", sizeof(mptcp_v6_prot.name));
mptcp_v6_prot.slab = NULL;
mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock);
mptcp_v6_prot.ipv6_pinfo_offset = offsetof(struct mptcp6_sock, np);
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 07f6242afc1a..ad21925af061 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -12,8 +12,7 @@
#include <net/inet_connection_sock.h>
#include <uapi/linux/mptcp.h>
#include <net/genetlink.h>
-
-#include "mptcp_pm_gen.h"
+#include <net/rstreason.h>
#define MPTCP_SUPPORTED_VERSION 1
@@ -113,10 +112,9 @@
#define MPTCP_RST_TRANSIENT BIT(0)
/* MPTCP socket atomic flags */
-#define MPTCP_NOSPACE 1
-#define MPTCP_WORK_RTX 2
-#define MPTCP_FALLBACK_DONE 4
-#define MPTCP_WORK_CLOSE_SUBFLOW 5
+#define MPTCP_WORK_RTX 1
+#define MPTCP_FALLBACK_DONE 2
+#define MPTCP_WORK_CLOSE_SUBFLOW 3
/* MPTCP socket release cb flags */
#define MPTCP_PUSH_PENDING 1
@@ -151,22 +149,24 @@ struct mptcp_options_received {
u32 subflow_seq;
u16 data_len;
__sum16 csum;
- u16 suboptions;
+ struct_group(status,
+ u16 suboptions;
+ u16 use_map:1,
+ dsn64:1,
+ data_fin:1,
+ use_ack:1,
+ ack64:1,
+ mpc_map:1,
+ reset_reason:4,
+ reset_transient:1,
+ echo:1,
+ backup:1,
+ deny_join_id0:1,
+ __unused:2;
+ );
+ u8 join_id;
u32 token;
u32 nonce;
- u16 use_map:1,
- dsn64:1,
- data_fin:1,
- use_ack:1,
- ack64:1,
- mpc_map:1,
- reset_reason:4,
- reset_transient:1,
- echo:1,
- backup:1,
- deny_join_id0:1,
- __unused:2;
- u8 join_id;
u64 thmac;
u8 hmac[MPTCPOPT_HMAC_LEN];
struct mptcp_addr_info addr;
@@ -238,6 +238,12 @@ struct mptcp_pm_data {
struct mptcp_rm_list rm_list_rx;
};
+struct mptcp_pm_local {
+ struct mptcp_addr_info addr;
+ u8 flags;
+ int ifindex;
+};
+
struct mptcp_pm_addr_entry {
struct list_head list;
struct mptcp_addr_info addr;
@@ -260,8 +266,10 @@ struct mptcp_data_frag {
struct mptcp_sock {
/* inet_connection_sock must be the first member */
struct inet_connection_sock sk;
- u64 local_key;
- u64 remote_key;
+ u64 local_key; /* protected by the first subflow socket lock
+ * lockless access read
+ */
+ u64 remote_key; /* same as above */
u64 write_seq;
u64 bytes_sent;
u64 snd_nxt;
@@ -281,6 +289,9 @@ struct mptcp_sock {
u64 bytes_acked;
u64 snd_una;
u64 wnd_end;
+ u32 last_data_sent;
+ u32 last_data_recv;
+ u32 last_ack_recv;
unsigned long timer_ival;
u32 token;
int rmem_released;
@@ -306,6 +317,10 @@ struct mptcp_sock {
in_accept_queue:1,
free_first:1,
rcvspace_init:1;
+ u32 notsent_lowat;
+ int keepalive_cnt;
+ int keepalive_idle;
+ int keepalive_intvl;
struct work_struct work;
struct sk_buff *ooo_last_skb;
struct rb_root out_of_order_queue;
@@ -341,12 +356,30 @@ struct mptcp_sock {
#define mptcp_for_each_subflow_safe(__msk, __subflow, __tmp) \
list_for_each_entry_safe(__subflow, __tmp, &((__msk)->conn_list), node)
+extern struct genl_family mptcp_genl_family;
+
static inline void msk_owned_by_me(const struct mptcp_sock *msk)
{
sock_owned_by_me((const struct sock *)msk);
}
+#ifdef CONFIG_DEBUG_NET
+/* MPTCP-specific: we might (indirectly) call this helper with the wrong sk */
+#undef tcp_sk
+#define tcp_sk(ptr) ({ \
+ typeof(ptr) _ptr = (ptr); \
+ WARN_ON(_ptr->sk_protocol != IPPROTO_TCP); \
+ container_of_const(_ptr, struct tcp_sock, inet_conn.icsk_inet.sk); \
+})
+#define mptcp_sk(ptr) ({ \
+ typeof(ptr) _ptr = (ptr); \
+ WARN_ON(_ptr->sk_protocol != IPPROTO_MPTCP); \
+ container_of_const(_ptr, struct mptcp_sock, sk.icsk_inet.sk); \
+})
+
+#else /* !CONFIG_DEBUG_NET */
#define mptcp_sk(ptr) container_of_const(ptr, struct mptcp_sock, sk.icsk_inet.sk)
+#endif
/* the msk socket don't use the backlog, also account for the bulk
* free memory
@@ -361,6 +394,11 @@ static inline int mptcp_win_from_space(const struct sock *sk, int space)
return __tcp_win_from_space(mptcp_sk(sk)->scaling_ratio, space);
}
+static inline int mptcp_space_from_win(const struct sock *sk, int win)
+{
+ return __tcp_space_from_win(mptcp_sk(sk)->scaling_ratio, win);
+}
+
static inline int __mptcp_space(const struct sock *sk)
{
return mptcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) - __mptcp_rmem(sk));
@@ -400,7 +438,7 @@ static inline struct mptcp_data_frag *mptcp_rtx_head(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
- if (msk->snd_una == READ_ONCE(msk->snd_nxt))
+ if (msk->snd_una == msk->snd_nxt)
return NULL;
return list_first_entry_or_null(&msk->rtx_queue, struct mptcp_data_frag, list);
@@ -418,6 +456,7 @@ struct mptcp_subflow_request_sock {
u16 mp_capable : 1,
mp_join : 1,
backup : 1,
+ request_bkup : 1,
csum_reqd : 1,
allow_join_id0 : 1;
u8 local_id;
@@ -476,7 +515,6 @@ struct mptcp_subflow_context {
request_bkup : 1,
mp_capable : 1, /* remote is MPTCP capable */
mp_join : 1, /* remote is JOINing */
- fully_established : 1, /* path validated */
pm_notified : 1, /* PM hook called for established status */
conn_finished : 1,
map_valid : 1,
@@ -493,9 +531,13 @@ struct mptcp_subflow_context {
stale : 1, /* unable to snd/rcv data, do not use for xmit */
valid_csum_seen : 1, /* at least one csum validated */
is_mptfo : 1, /* subflow is doing TFO */
- __unused : 10;
+ close_event_done : 1, /* has done the post-closed part */
+ mpc_drop : 1, /* the MPC option has been dropped in a rtx */
+ __unused : 9;
bool data_avail;
bool scheduled;
+ bool pm_listener; /* a listener managed by the kernel PM? */
+ bool fully_established; /* path validated */
u32 remote_nonce;
u64 thmac;
u32 local_nonce;
@@ -538,7 +580,7 @@ struct mptcp_subflow_context {
static inline struct mptcp_subflow_context *
mptcp_subflow_ctx(const struct sock *sk)
{
- struct inet_connection_sock *icsk = inet_csk(sk);
+ const struct inet_connection_sock *icsk = inet_csk(sk);
/* Use RCU on icsk_ulp_data only for sock diag code */
return (__force struct mptcp_subflow_context *)icsk->icsk_ulp_data;
@@ -558,6 +600,43 @@ mptcp_subflow_ctx_reset(struct mptcp_subflow_context *subflow)
WRITE_ONCE(subflow->local_id, -1);
}
+/* Convert reset reasons in MPTCP to enum sk_rst_reason type */
+static inline enum sk_rst_reason
+sk_rst_convert_mptcp_reason(u32 reason)
+{
+ switch (reason) {
+ case MPTCP_RST_EUNSPEC:
+ return SK_RST_REASON_MPTCP_RST_EUNSPEC;
+ case MPTCP_RST_EMPTCP:
+ return SK_RST_REASON_MPTCP_RST_EMPTCP;
+ case MPTCP_RST_ERESOURCE:
+ return SK_RST_REASON_MPTCP_RST_ERESOURCE;
+ case MPTCP_RST_EPROHIBIT:
+ return SK_RST_REASON_MPTCP_RST_EPROHIBIT;
+ case MPTCP_RST_EWQ2BIG:
+ return SK_RST_REASON_MPTCP_RST_EWQ2BIG;
+ case MPTCP_RST_EBADPERF:
+ return SK_RST_REASON_MPTCP_RST_EBADPERF;
+ case MPTCP_RST_EMIDDLEBOX:
+ return SK_RST_REASON_MPTCP_RST_EMIDDLEBOX;
+ default:
+ /* It should not happen, or else errors may occur
+ * in MPTCP layer
+ */
+ return SK_RST_REASON_ERROR;
+ }
+}
+
+static inline void
+mptcp_send_active_reset_reason(struct sock *sk)
+{
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+ enum sk_rst_reason reason;
+
+ reason = sk_rst_convert_mptcp_reason(subflow->reset_reason);
+ tcp_send_active_reset(sk, GFP_ATOMIC, reason);
+}
+
static inline u64
mptcp_subflow_get_map_offset(const struct mptcp_subflow_context *subflow)
{
@@ -622,6 +701,12 @@ unsigned int mptcp_stale_loss_cnt(const struct net *net);
unsigned int mptcp_close_timeout(const struct sock *sk);
int mptcp_get_pm_type(const struct net *net);
const char *mptcp_get_scheduler(const struct net *net);
+
+void mptcp_active_disable(struct sock *sk);
+bool mptcp_active_should_disable(struct sock *ssk);
+void mptcp_active_enable(struct sock *sk);
+
+void mptcp_get_available_schedulers(char *buf, size_t maxlen);
void __mptcp_subflow_fully_established(struct mptcp_sock *msk,
struct mptcp_subflow_context *subflow,
const struct mptcp_options_received *mp_opt);
@@ -649,7 +734,7 @@ bool mptcp_addresses_equal(const struct mptcp_addr_info *a,
void mptcp_local_address(const struct sock_common *skc, struct mptcp_addr_info *addr);
/* called with sk socket lock held */
-int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
+int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_pm_local *local,
const struct mptcp_addr_info *remote);
int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
struct socket **new_sock);
@@ -677,10 +762,15 @@ static inline u64 mptcp_data_avail(const struct mptcp_sock *msk)
static inline bool mptcp_epollin_ready(const struct sock *sk)
{
+ u64 data_avail = mptcp_data_avail(mptcp_sk(sk));
+
+ if (!data_avail)
+ return false;
+
/* mptcp doesn't have to deal with small skbs in the receive queue,
- * at it can always coalesce them
+ * as it can always coalesce them
*/
- return (mptcp_data_avail(mptcp_sk(sk)) >= sk->sk_rcvlowat) ||
+ return (data_avail >= sk->sk_rcvlowat) ||
(mem_cgroup_sockets_enabled && sk->sk_memcg &&
mem_cgroup_under_socket_pressure(sk->sk_memcg)) ||
READ_ONCE(tcp_memory_pressure);
@@ -697,7 +787,7 @@ static inline bool __tcp_can_send(const struct sock *ssk)
static inline bool __mptcp_subflow_active(struct mptcp_subflow_context *subflow)
{
/* can't send if JOIN hasn't completed yet (i.e. is usable for mptcp) */
- if (subflow->request_join && !subflow->fully_established)
+ if (subflow->request_join && !READ_ONCE(subflow->fully_established))
return false;
return __tcp_can_send(mptcp_subflow_tcp_sock(subflow));
@@ -790,14 +880,36 @@ static inline bool mptcp_data_fin_enabled(const struct mptcp_sock *msk)
READ_ONCE(msk->write_seq) == READ_ONCE(msk->snd_nxt);
}
+static inline u32 mptcp_notsent_lowat(const struct sock *sk)
+{
+ struct net *net = sock_net(sk);
+ u32 val;
+
+ val = READ_ONCE(mptcp_sk(sk)->notsent_lowat);
+ return val ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
+}
+
+static inline bool mptcp_stream_memory_free(const struct sock *sk, int wake)
+{
+ const struct mptcp_sock *msk = mptcp_sk(sk);
+ u32 notsent_bytes;
+
+ notsent_bytes = READ_ONCE(msk->write_seq) - READ_ONCE(msk->snd_nxt);
+ return (notsent_bytes << wake) < mptcp_notsent_lowat(sk);
+}
+
+static inline bool __mptcp_stream_is_writeable(const struct sock *sk, int wake)
+{
+ return mptcp_stream_memory_free(sk, wake) &&
+ __sk_stream_is_writeable(sk, wake);
+}
+
static inline void mptcp_write_space(struct sock *sk)
{
- if (sk_stream_is_writeable(sk)) {
- /* pairs with memory barrier in mptcp_poll */
- smp_mb();
- if (test_and_clear_bit(MPTCP_NOSPACE, &mptcp_sk(sk)->flags))
- sk_stream_write_space(sk);
- }
+ /* pairs with memory barrier in mptcp_poll */
+ smp_mb();
+ if (mptcp_stream_memory_free(sk, 1))
+ sk_stream_write_space(sk);
}
static inline void __mptcp_sync_sndbuf(struct sock *sk)
@@ -808,7 +920,7 @@ static inline void __mptcp_sync_sndbuf(struct sock *sk)
if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
return;
- new_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[0];
+ new_sndbuf = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[0]);
mptcp_for_each_subflow(mptcp_sk(sk), subflow) {
ssk_sndbuf = READ_ONCE(mptcp_subflow_tcp_sock(subflow)->sk_sndbuf);
@@ -901,6 +1013,8 @@ void mptcp_pm_add_addr_received(const struct sock *ssk,
void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk,
const struct mptcp_addr_info *addr);
void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk);
+bool mptcp_pm_nl_is_init_remote_addr(struct mptcp_sock *msk,
+ const struct mptcp_addr_info *remote);
void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk);
void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
const struct mptcp_rm_list *rm_list);
@@ -920,29 +1034,19 @@ mptcp_pm_del_add_timer(struct mptcp_sock *msk,
struct mptcp_pm_add_entry *
mptcp_lookup_anno_list_by_saddr(const struct mptcp_sock *msk,
const struct mptcp_addr_info *addr);
-int mptcp_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk,
- unsigned int id,
- u8 *flags, int *ifindex);
-int mptcp_pm_nl_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id,
- u8 *flags, int *ifindex);
-int mptcp_userspace_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk,
- unsigned int id,
- u8 *flags, int *ifindex);
-int mptcp_pm_set_flags(struct net *net, struct nlattr *token,
- struct mptcp_pm_addr_entry *loc,
- struct mptcp_pm_addr_entry *rem, u8 bkup);
-int mptcp_pm_nl_set_flags(struct net *net, struct mptcp_pm_addr_entry *addr, u8 bkup);
-int mptcp_userspace_pm_set_flags(struct net *net, struct nlattr *token,
- struct mptcp_pm_addr_entry *loc,
- struct mptcp_pm_addr_entry *rem, u8 bkup);
+bool mptcp_lookup_subflow_by_saddr(const struct list_head *list,
+ const struct mptcp_addr_info *saddr);
+bool mptcp_remove_anno_list_by_saddr(struct mptcp_sock *msk,
+ const struct mptcp_addr_info *addr);
+int mptcp_pm_set_flags(struct sk_buff *skb, struct genl_info *info);
+int mptcp_pm_nl_set_flags(struct sk_buff *skb, struct genl_info *info);
+int mptcp_userspace_pm_set_flags(struct sk_buff *skb, struct genl_info *info);
int mptcp_pm_announce_addr(struct mptcp_sock *msk,
const struct mptcp_addr_info *addr,
bool echo);
int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list);
-int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list);
-void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list);
-void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
- struct list_head *rm_list);
+void mptcp_pm_remove_addr_entry(struct mptcp_sock *msk,
+ struct mptcp_pm_addr_entry *entry);
void mptcp_free_local_addr_list(struct mptcp_sock *msk);
@@ -958,6 +1062,8 @@ void __mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflo
const struct mptcp_options_received *mp_opt);
void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subflow,
struct request_sock *req);
+int mptcp_nl_fill_addr(struct sk_buff *skb,
+ struct mptcp_pm_addr_entry *entry);
static inline bool mptcp_pm_should_add_signal(struct mptcp_sock *msk)
{
@@ -1022,6 +1128,18 @@ bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
+bool mptcp_pm_is_backup(struct mptcp_sock *msk, struct sock_common *skc);
+bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
+bool mptcp_userspace_pm_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
+int mptcp_pm_dump_addr(struct sk_buff *msg, struct netlink_callback *cb);
+int mptcp_pm_nl_dump_addr(struct sk_buff *msg,
+ struct netlink_callback *cb);
+int mptcp_userspace_pm_dump_addr(struct sk_buff *msg,
+ struct netlink_callback *cb);
+int mptcp_pm_get_addr(struct sk_buff *skb, struct genl_info *info);
+int mptcp_pm_nl_get_addr(struct sk_buff *skb, struct genl_info *info);
+int mptcp_userspace_pm_get_addr(struct sk_buff *skb,
+ struct genl_info *info);
static inline u8 subflow_get_local_id(const struct mptcp_subflow_context *subflow)
{
@@ -1034,8 +1152,6 @@ static inline u8 subflow_get_local_id(const struct mptcp_subflow_context *subflo
void __init mptcp_pm_nl_init(void);
void mptcp_pm_nl_work(struct mptcp_sock *msk);
-void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk,
- const struct mptcp_rm_list *rm_list);
unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk);
unsigned int mptcp_pm_get_add_addr_accept_max(const struct mptcp_sock *msk);
unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk);
@@ -1055,7 +1171,6 @@ static inline void mptcp_pm_close_subflow(struct mptcp_sock *msk)
spin_unlock_bh(&msk->pm.lock);
}
-void mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk);
void mptcp_sockopt_sync_locked(struct mptcp_sock *msk, struct sock *ssk);
static inline struct mptcp_ext *mptcp_get_ext(const struct sk_buff *skb)
@@ -1081,9 +1196,11 @@ static inline bool mptcp_check_fallback(const struct sock *sk)
static inline void __mptcp_do_fallback(struct mptcp_sock *msk)
{
if (__mptcp_check_fallback(msk)) {
- pr_debug("TCP fallback already done (msk=%p)", msk);
+ pr_debug("TCP fallback already done (msk=%p)\n", msk);
return;
}
+ if (WARN_ON_ONCE(!READ_ONCE(msk->allow_infinite_fallback)))
+ return;
set_bit(MPTCP_FALLBACK_DONE, &msk->flags);
}
@@ -1117,7 +1234,15 @@ static inline void mptcp_do_fallback(struct sock *ssk)
}
}
-#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)", __func__, a)
+#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)\n", __func__, a)
+
+static inline void mptcp_subflow_early_fallback(struct mptcp_sock *msk,
+ struct mptcp_subflow_context *subflow)
+{
+ pr_fallback(msk);
+ subflow->request_mptcp = 0;
+ __mptcp_do_fallback(msk);
+}
static inline bool mptcp_check_infinite_map(struct sk_buff *skb)
{
diff --git a/net/mptcp/sched.c b/net/mptcp/sched.c
index 4ab0693c069c..df7dbcfa3b71 100644
--- a/net/mptcp/sched.c
+++ b/net/mptcp/sched.c
@@ -51,6 +51,26 @@ struct mptcp_sched_ops *mptcp_sched_find(const char *name)
return ret;
}
+/* Build string with list of available scheduler values.
+ * Similar to tcp_get_available_congestion_control()
+ */
+void mptcp_get_available_schedulers(char *buf, size_t maxlen)
+{
+ struct mptcp_sched_ops *sched;
+ size_t offs = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sched, &mptcp_sched_list, list) {
+ offs += snprintf(buf + offs, maxlen - offs,
+ "%s%s",
+ offs == 0 ? "" : " ", sched->name);
+
+ if (WARN_ON_ONCE(offs >= maxlen))
+ break;
+ }
+ rcu_read_unlock();
+}
+
int mptcp_register_scheduler(struct mptcp_sched_ops *sched)
{
if (!sched->get_subflow)
@@ -64,7 +84,7 @@ int mptcp_register_scheduler(struct mptcp_sched_ops *sched)
list_add_tail_rcu(&sched->list, &mptcp_sched_list);
spin_unlock(&mptcp_sched_list_lock);
- pr_debug("%s registered", sched->name);
+ pr_debug("%s registered\n", sched->name);
return 0;
}
@@ -96,7 +116,7 @@ int mptcp_init_sched(struct mptcp_sock *msk,
if (msk->sched->init)
msk->sched->init(msk);
- pr_debug("sched=%s", msk->sched->name);
+ pr_debug("sched=%s\n", msk->sched->name);
return 0;
}
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index c40f1428e602..505445a9598f 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -181,8 +181,6 @@ static int mptcp_setsockopt_sol_socket_int(struct mptcp_sock *msk, int optname,
switch (optname) {
case SO_KEEPALIVE:
- mptcp_sol_socket_sync_intval(msk, optname, val);
- return 0;
case SO_DEBUG:
case SO_MARK:
case SO_PRIORITY:
@@ -618,26 +616,42 @@ static int mptcp_setsockopt_sol_tcp_congestion(struct mptcp_sock *msk, sockptr_t
}
if (ret == 0)
- strcpy(msk->ca_name, name);
+ strscpy(msk->ca_name, name, sizeof(msk->ca_name));
release_sock(sk);
return ret;
}
-static int mptcp_setsockopt_sol_tcp_cork(struct mptcp_sock *msk, sockptr_t optval,
- unsigned int optlen)
+static int __mptcp_setsockopt_set_val(struct mptcp_sock *msk, int max,
+ int (*set_val)(struct sock *, int),
+ int *msk_val, int val)
{
struct mptcp_subflow_context *subflow;
- struct sock *sk = (struct sock *)msk;
- int val;
+ int err = 0;
- if (optlen < sizeof(int))
- return -EINVAL;
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ int ret;
- if (copy_from_sockptr(&val, optval, sizeof(val)))
- return -EFAULT;
+ lock_sock(ssk);
+ ret = set_val(ssk, val);
+ err = err ? : ret;
+ release_sock(ssk);
+ }
+
+ if (!err) {
+ *msk_val = val;
+ sockopt_seq_inc(msk);
+ }
+
+ return err;
+}
+
+static int __mptcp_setsockopt_sol_tcp_cork(struct mptcp_sock *msk, int val)
+{
+ struct mptcp_subflow_context *subflow;
+ struct sock *sk = (struct sock *)msk;
- lock_sock(sk);
sockopt_seq_inc(msk);
msk->cork = !!val;
mptcp_for_each_subflow(msk, subflow) {
@@ -649,25 +663,15 @@ static int mptcp_setsockopt_sol_tcp_cork(struct mptcp_sock *msk, sockptr_t optva
}
if (!val)
mptcp_check_and_set_pending(sk);
- release_sock(sk);
return 0;
}
-static int mptcp_setsockopt_sol_tcp_nodelay(struct mptcp_sock *msk, sockptr_t optval,
- unsigned int optlen)
+static int __mptcp_setsockopt_sol_tcp_nodelay(struct mptcp_sock *msk, int val)
{
struct mptcp_subflow_context *subflow;
struct sock *sk = (struct sock *)msk;
- int val;
- if (optlen < sizeof(int))
- return -EINVAL;
-
- if (copy_from_sockptr(&val, optval, sizeof(val)))
- return -EFAULT;
-
- lock_sock(sk);
sockopt_seq_inc(msk);
msk->nodelay = !!val;
mptcp_for_each_subflow(msk, subflow) {
@@ -679,8 +683,6 @@ static int mptcp_setsockopt_sol_tcp_nodelay(struct mptcp_sock *msk, sockptr_t op
}
if (val)
mptcp_check_and_set_pending(sk);
- release_sock(sk);
-
return 0;
}
@@ -803,25 +805,10 @@ static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
int ret, val;
switch (optname) {
- case TCP_INQ:
- ret = mptcp_get_int_option(msk, optval, optlen, &val);
- if (ret)
- return ret;
- if (val < 0 || val > 1)
- return -EINVAL;
-
- lock_sock(sk);
- msk->recvmsg_inq = !!val;
- release_sock(sk);
- return 0;
case TCP_ULP:
return -EOPNOTSUPP;
case TCP_CONGESTION:
return mptcp_setsockopt_sol_tcp_congestion(msk, optval, optlen);
- case TCP_CORK:
- return mptcp_setsockopt_sol_tcp_cork(msk, optval, optlen);
- case TCP_NODELAY:
- return mptcp_setsockopt_sol_tcp_nodelay(msk, optval, optlen);
case TCP_DEFER_ACCEPT:
/* See tcp.c: TCP_DEFER_ACCEPT does not fail */
mptcp_setsockopt_first_sf_only(msk, SOL_TCP, optname, optval, optlen);
@@ -834,7 +821,50 @@ static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
optval, optlen);
}
- return -EOPNOTSUPP;
+ ret = mptcp_get_int_option(msk, optval, optlen, &val);
+ if (ret)
+ return ret;
+
+ lock_sock(sk);
+ switch (optname) {
+ case TCP_INQ:
+ if (val < 0 || val > 1)
+ ret = -EINVAL;
+ else
+ msk->recvmsg_inq = !!val;
+ break;
+ case TCP_NOTSENT_LOWAT:
+ WRITE_ONCE(msk->notsent_lowat, val);
+ mptcp_write_space(sk);
+ break;
+ case TCP_CORK:
+ ret = __mptcp_setsockopt_sol_tcp_cork(msk, val);
+ break;
+ case TCP_NODELAY:
+ ret = __mptcp_setsockopt_sol_tcp_nodelay(msk, val);
+ break;
+ case TCP_KEEPIDLE:
+ ret = __mptcp_setsockopt_set_val(msk, MAX_TCP_KEEPIDLE,
+ &tcp_sock_set_keepidle_locked,
+ &msk->keepalive_idle, val);
+ break;
+ case TCP_KEEPINTVL:
+ ret = __mptcp_setsockopt_set_val(msk, MAX_TCP_KEEPINTVL,
+ &tcp_sock_set_keepintvl,
+ &msk->keepalive_intvl, val);
+ break;
+ case TCP_KEEPCNT:
+ ret = __mptcp_setsockopt_set_val(msk, MAX_TCP_KEEPCNT,
+ &tcp_sock_set_keepcnt,
+ &msk->keepalive_cnt,
+ val);
+ break;
+ default:
+ ret = -ENOPROTOOPT;
+ }
+
+ release_sock(sk);
+ return ret;
}
int mptcp_setsockopt(struct sock *sk, int level, int optname,
@@ -843,7 +873,7 @@ int mptcp_setsockopt(struct sock *sk, int level, int optname,
struct mptcp_sock *msk = mptcp_sk(sk);
struct sock *ssk;
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
if (level == SOL_SOCKET)
return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen);
@@ -907,6 +937,7 @@ void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info)
struct sock *sk = (struct sock *)msk;
u32 flags = 0;
bool slow;
+ u32 now;
memset(info, 0, sizeof(*info));
@@ -935,14 +966,9 @@ void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info)
if (READ_ONCE(msk->can_ack))
flags |= MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED;
info->mptcpi_flags = flags;
- mptcp_data_lock(sk);
- info->mptcpi_snd_una = msk->snd_una;
- info->mptcpi_rcv_nxt = msk->ack_seq;
- info->mptcpi_bytes_acked = msk->bytes_acked;
- mptcp_data_unlock(sk);
slow = lock_sock_fast(sk);
- info->mptcpi_csum_enabled = msk->csum_enabled;
+ info->mptcpi_csum_enabled = READ_ONCE(msk->csum_enabled);
info->mptcpi_token = msk->token;
info->mptcpi_write_seq = msk->write_seq;
info->mptcpi_retransmits = inet_csk(sk)->icsk_retransmits;
@@ -951,7 +977,17 @@ void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info)
info->mptcpi_bytes_retrans = msk->bytes_retrans;
info->mptcpi_subflows_total = info->mptcpi_subflows +
__mptcp_has_initial_subflow(msk);
+ now = tcp_jiffies32;
+ info->mptcpi_last_data_sent = jiffies_to_msecs(now - msk->last_data_sent);
+ info->mptcpi_last_data_recv = jiffies_to_msecs(now - msk->last_data_recv);
unlock_sock_fast(sk, slow);
+
+ mptcp_data_lock(sk);
+ info->mptcpi_last_ack_recv = jiffies_to_msecs(now - msk->last_ack_recv);
+ info->mptcpi_snd_una = msk->snd_una;
+ info->mptcpi_rcv_nxt = msk->ack_seq;
+ info->mptcpi_bytes_acked = msk->bytes_acked;
+ mptcp_data_unlock(sk);
}
EXPORT_SYMBOL_GPL(mptcp_diag_fill_info);
@@ -963,6 +999,10 @@ static int mptcp_getsockopt_info(struct mptcp_sock *msk, char __user *optval, in
if (get_user(len, optlen))
return -EFAULT;
+ /* When used only to check if a fallback to TCP happened. */
+ if (len == 0)
+ return 0;
+
len = min_t(unsigned int, len, sizeof(struct mptcp_info));
mptcp_diag_fill_info(msk, &m_info);
@@ -1331,6 +1371,8 @@ static int mptcp_put_int_option(struct mptcp_sock *msk, char __user *optval,
static int mptcp_getsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
char __user *optval, int __user *optlen)
{
+ struct sock *sk = (void *)msk;
+
switch (optname) {
case TCP_ULP:
case TCP_CONGESTION:
@@ -1349,6 +1391,22 @@ static int mptcp_getsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
return mptcp_put_int_option(msk, optval, optlen, msk->cork);
case TCP_NODELAY:
return mptcp_put_int_option(msk, optval, optlen, msk->nodelay);
+ case TCP_KEEPIDLE:
+ return mptcp_put_int_option(msk, optval, optlen,
+ msk->keepalive_idle ? :
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_keepalive_time) / HZ);
+ case TCP_KEEPINTVL:
+ return mptcp_put_int_option(msk, optval, optlen,
+ msk->keepalive_intvl ? :
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_keepalive_intvl) / HZ);
+ case TCP_KEEPCNT:
+ return mptcp_put_int_option(msk, optval, optlen,
+ msk->keepalive_cnt ? :
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_keepalive_probes));
+ case TCP_NOTSENT_LOWAT:
+ return mptcp_put_int_option(msk, optval, optlen, msk->notsent_lowat);
+ case TCP_IS_MPTCP:
+ return mptcp_put_int_option(msk, optval, optlen, 1);
}
return -EOPNOTSUPP;
}
@@ -1395,7 +1453,7 @@ int mptcp_getsockopt(struct sock *sk, int level, int optname,
struct mptcp_sock *msk = mptcp_sk(sk);
struct sock *ssk;
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
/* @@ the meaning of setsockopt() when the socket is connected and
* there are multiple subflows is not yet defined. It is up to the
@@ -1464,6 +1522,9 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
tcp_set_congestion_control(ssk, msk->ca_name, false, true);
__tcp_sock_set_cork(ssk, !!msk->cork);
__tcp_sock_set_nodelay(ssk, !!msk->nodelay);
+ tcp_sock_set_keepidle_locked(ssk, msk->keepalive_idle);
+ tcp_sock_set_keepintvl(ssk, msk->keepalive_intvl);
+ tcp_sock_set_keepcnt(ssk, msk->keepalive_cnt);
inet_assign_bit(TRANSPARENT, ssk, inet_test_bit(TRANSPARENT, sk));
inet_assign_bit(FREEBIND, ssk, inet_test_bit(FREEBIND, sk));
@@ -1500,6 +1561,10 @@ int mptcp_set_rcvlowat(struct sock *sk, int val)
struct mptcp_subflow_context *subflow;
int space, cap;
+ /* bpf can land here with a wrong sk type */
+ if (sk->sk_protocol == IPPROTO_TCP)
+ return -EINVAL;
+
if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
cap = sk->sk_rcvbuf >> 1;
else
@@ -1514,7 +1579,7 @@ int mptcp_set_rcvlowat(struct sock *sk, int val)
if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
return 0;
- space = __tcp_space_from_win(mptcp_sk(sk)->scaling_ratio, val);
+ space = mptcp_space_from_win(sk, val);
if (space <= sk->sk_rcvbuf)
return 0;
@@ -1526,7 +1591,7 @@ int mptcp_set_rcvlowat(struct sock *sk, int val)
slow = lock_sock_fast(ssk);
WRITE_ONCE(ssk->sk_rcvbuf, space);
- tcp_sk(ssk)->window_clamp = val;
+ WRITE_ONCE(tcp_sk(ssk)->window_clamp, val);
unlock_sock_fast(ssk, slow);
}
return 0;
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 71ba86246ff8..9f18217dddc8 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -15,13 +15,12 @@
#include <net/inet_common.h>
#include <net/inet_hashtables.h>
#include <net/protocol.h>
-#include <net/tcp.h>
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
#include <net/ip6_route.h>
#include <net/transp_v6.h>
#endif
#include <net/mptcp.h>
-#include <uapi/linux/mptcp.h>
+
#include "protocol.h"
#include "mib.h"
@@ -40,7 +39,7 @@ static void subflow_req_destructor(struct request_sock *req)
{
struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
- pr_debug("subflow_req=%p", subflow_req);
+ pr_debug("subflow_req=%p\n", subflow_req);
if (subflow_req->msk)
sock_put((struct sock *)subflow_req->msk);
@@ -75,7 +74,8 @@ static void subflow_req_create_thmac(struct mptcp_subflow_request_sock *subflow_
get_random_bytes(&subflow_req->local_nonce, sizeof(u32));
- subflow_generate_hmac(msk->local_key, msk->remote_key,
+ subflow_generate_hmac(READ_ONCE(msk->local_key),
+ READ_ONCE(msk->remote_key),
subflow_req->local_nonce,
subflow_req->remote_nonce, hmac);
@@ -100,6 +100,7 @@ static struct mptcp_sock *subflow_token_join_request(struct request_sock *req)
return NULL;
}
subflow_req->local_id = local_id;
+ subflow_req->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)req);
return msk;
}
@@ -131,6 +132,13 @@ static void subflow_add_reset_reason(struct sk_buff *skb, u8 reason)
}
}
+static int subflow_reset_req_endp(struct request_sock *req, struct sk_buff *skb)
+{
+ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEENDPATTEMPT);
+ subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
+ return -EPERM;
+}
+
/* Init mptcp request socket.
*
* Returns an error code if a JOIN has failed and a TCP reset
@@ -145,14 +153,16 @@ static int subflow_check_req(struct request_sock *req,
struct mptcp_options_received mp_opt;
bool opt_mp_capable, opt_mp_join;
- pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
+ pr_debug("subflow_req=%p, listener=%p\n", subflow_req, listener);
#ifdef CONFIG_TCP_MD5SIG
/* no MPTCP if MD5SIG is enabled on this socket or we may run out of
* TCP option space.
*/
- if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info))
+ if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info)) {
+ subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP);
return -EINVAL;
+ }
#endif
mptcp_get_options(skb, &mp_opt);
@@ -162,10 +172,17 @@ static int subflow_check_req(struct request_sock *req,
if (opt_mp_capable) {
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
+ if (unlikely(listener->pm_listener))
+ return subflow_reset_req_endp(req, skb);
if (opt_mp_join)
return 0;
} else if (opt_mp_join) {
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
+
+ if (mp_opt.backup)
+ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNBACKUPRX);
+ } else if (unlikely(listener->pm_listener)) {
+ return subflow_reset_req_endp(req, skb);
}
if (opt_mp_capable && listener->request_mptcp) {
@@ -215,11 +232,12 @@ again:
}
if (subflow_use_different_sport(subflow_req->msk, sk_listener)) {
- pr_debug("syn inet_sport=%d %d",
+ pr_debug("syn inet_sport=%d %d\n",
ntohs(inet_sk(sk_listener)->inet_sport),
ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport));
if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) {
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTSYNRX);
+ subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
return -EPERM;
}
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTSYNRX);
@@ -228,13 +246,15 @@ again:
subflow_req_create_thmac(subflow_req);
if (unlikely(req->syncookie)) {
- if (mptcp_can_accept_new_subflow(subflow_req->msk))
- subflow_init_req_cookie_join_save(subflow_req, skb);
- else
+ if (!mptcp_can_accept_new_subflow(subflow_req->msk)) {
+ subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
return -EPERM;
+ }
+
+ subflow_init_req_cookie_join_save(subflow_req, skb);
}
- pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
+ pr_debug("token=%u, remote_nonce=%u msk=%p\n", subflow_req->token,
subflow_req->remote_nonce, subflow_req->msk);
}
@@ -282,10 +302,21 @@ int mptcp_subflow_init_cookie_req(struct request_sock *req,
}
EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req);
+static enum sk_rst_reason mptcp_get_rst_reason(const struct sk_buff *skb)
+{
+ const struct mptcp_ext *mpext = mptcp_get_ext(skb);
+
+ if (!mpext)
+ return SK_RST_REASON_NOT_SPECIFIED;
+
+ return sk_rst_convert_mptcp_reason(mpext->reset_reason);
+}
+
static struct dst_entry *subflow_v4_route_req(const struct sock *sk,
struct sk_buff *skb,
struct flowi *fl,
- struct request_sock *req)
+ struct request_sock *req,
+ u32 tw_isn)
{
struct dst_entry *dst;
int err;
@@ -293,7 +324,7 @@ static struct dst_entry *subflow_v4_route_req(const struct sock *sk,
tcp_rsk(req)->is_mptcp = 1;
subflow_init_req(req, sk);
- dst = tcp_request_sock_ipv4_ops.route_req(sk, skb, fl, req);
+ dst = tcp_request_sock_ipv4_ops.route_req(sk, skb, fl, req, tw_isn);
if (!dst)
return NULL;
@@ -303,7 +334,8 @@ static struct dst_entry *subflow_v4_route_req(const struct sock *sk,
dst_release(dst);
if (!req->syncookie)
- tcp_request_sock_ops.send_reset(sk, skb);
+ tcp_request_sock_ops.send_reset(sk, skb,
+ mptcp_get_rst_reason(skb));
return NULL;
}
@@ -352,7 +384,8 @@ static int subflow_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
static struct dst_entry *subflow_v6_route_req(const struct sock *sk,
struct sk_buff *skb,
struct flowi *fl,
- struct request_sock *req)
+ struct request_sock *req,
+ u32 tw_isn)
{
struct dst_entry *dst;
int err;
@@ -360,7 +393,7 @@ static struct dst_entry *subflow_v6_route_req(const struct sock *sk,
tcp_rsk(req)->is_mptcp = 1;
subflow_init_req(req, sk);
- dst = tcp_request_sock_ipv6_ops.route_req(sk, skb, fl, req);
+ dst = tcp_request_sock_ipv6_ops.route_req(sk, skb, fl, req, tw_isn);
if (!dst)
return NULL;
@@ -370,7 +403,8 @@ static struct dst_entry *subflow_v6_route_req(const struct sock *sk,
dst_release(dst);
if (!req->syncookie)
- tcp6_request_sock_ops.send_reset(sk, skb);
+ tcp6_request_sock_ops.send_reset(sk, skb,
+ mptcp_get_rst_reason(skb));
return NULL;
}
#endif
@@ -406,7 +440,7 @@ void mptcp_subflow_reset(struct sock *ssk)
/* must hold: tcp_done() could drop last reference on parent */
sock_hold(sk);
- tcp_send_active_reset(ssk, GFP_ATOMIC);
+ mptcp_send_active_reset_reason(ssk);
tcp_done(ssk);
if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags))
mptcp_schedule_work(sk);
@@ -504,7 +538,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
subflow->rel_write_seq = 1;
subflow->conn_finished = 1;
subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
- pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
+ pr_debug("subflow=%p synack seq=%x\n", subflow, subflow->ssn_offset);
mptcp_get_options(skb, &mp_opt);
if (subflow->request_mptcp) {
@@ -523,6 +557,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
subflow->mp_capable = 1;
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK);
mptcp_finish_connect(sk);
+ mptcp_active_enable(parent);
mptcp_propagate_state(parent, sk, subflow, &mp_opt);
} else if (subflow->request_join) {
u8 hmac[SHA256_DIGEST_SIZE];
@@ -536,7 +571,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
subflow->thmac = mp_opt.thmac;
subflow->remote_nonce = mp_opt.nonce;
WRITE_ONCE(subflow->remote_id, mp_opt.join_id);
- pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d",
+ pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d\n",
subflow, subflow->thmac, subflow->remote_nonce,
subflow->backup);
@@ -558,13 +593,19 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
subflow->mp_join = 1;
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
+ if (subflow->backup)
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKBACKUPRX);
+
if (subflow_use_different_dport(msk, sk)) {
- pr_debug("synack inet_dport=%d %d",
+ pr_debug("synack inet_dport=%d %d\n",
ntohs(inet_sk(sk)->inet_dport),
ntohs(inet_sk(parent)->inet_dport));
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX);
}
} else if (mptcp_check_fallback(sk)) {
+ /* It looks like MPTCP is blocked, while TCP is not */
+ if (subflow->mpc_drop)
+ mptcp_active_disable(parent);
fallback:
mptcp_propagate_state(parent, sk, subflow, NULL);
}
@@ -595,6 +636,8 @@ static int subflow_chk_local_id(struct sock *sk)
return err;
subflow_set_local_id(subflow, err);
+ subflow->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)sk);
+
return 0;
}
@@ -627,7 +670,7 @@ static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
- pr_debug("subflow=%p", subflow);
+ pr_debug("subflow=%p\n", subflow);
/* Never answer to SYNs sent to broadcast or multicast */
if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@@ -658,7 +701,7 @@ static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
- pr_debug("subflow=%p", subflow);
+ pr_debug("subflow=%p\n", subflow);
if (skb->protocol == htons(ETH_P_IP))
return subflow_v4_conn_request(sk, skb);
@@ -714,7 +757,8 @@ static bool subflow_hmac_valid(const struct request_sock *req,
if (!msk)
return false;
- subflow_generate_hmac(msk->remote_key, msk->local_key,
+ subflow_generate_hmac(READ_ONCE(msk->remote_key),
+ READ_ONCE(msk->local_key),
subflow_req->remote_nonce,
subflow_req->local_nonce, hmac);
@@ -756,7 +800,7 @@ void __mptcp_subflow_fully_established(struct mptcp_sock *msk,
const struct mptcp_options_received *mp_opt)
{
subflow_set_remote_key(msk, subflow, mp_opt);
- subflow->fully_established = 1;
+ WRITE_ONCE(subflow->fully_established, true);
WRITE_ONCE(msk->fully_established, true);
if (subflow->is_mptfo)
@@ -774,10 +818,11 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
struct mptcp_subflow_request_sock *subflow_req;
struct mptcp_options_received mp_opt;
bool fallback, fallback_is_fatal;
+ enum sk_rst_reason reason;
struct mptcp_sock *owner;
struct sock *child;
- pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
+ pr_debug("listener=%p, req=%p, conn=%p\n", listener, req, listener->conn);
/* After child creation we must look for MPC even when options
* are not parsed
@@ -868,18 +913,23 @@ create_child:
ctx->conn = (struct sock *)owner;
if (subflow_use_different_sport(owner, sk)) {
- pr_debug("ack inet_sport=%d %d",
+ pr_debug("ack inet_sport=%d %d\n",
ntohs(inet_sk(sk)->inet_sport),
ntohs(inet_sk((struct sock *)owner)->inet_sport));
if (!mptcp_pm_sport_in_anno_list(owner, sk)) {
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTACKRX);
+ subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
goto dispose_child;
}
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTACKRX);
}
- if (!mptcp_finish_join(child))
+ if (!mptcp_finish_join(child)) {
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(child);
+
+ subflow_add_reset_reason(skb, subflow->reset_reason);
goto dispose_child;
+ }
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX);
tcp_rsk(req)->drop_req = true;
@@ -887,7 +937,7 @@ create_child:
}
/* check for expected invariant - should never trigger, just help
- * catching eariler subtle bugs
+ * catching earlier subtle bugs
*/
WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp &&
(!mptcp_subflow_ctx(child) ||
@@ -899,12 +949,15 @@ dispose_child:
tcp_rsk(req)->drop_req = true;
inet_csk_prepare_for_destroy_sock(child);
tcp_done(child);
- req->rsk_ops->send_reset(sk, skb);
+ reason = mptcp_get_rst_reason(skb);
+ req->rsk_ops->send_reset(sk, skb, reason);
/* The last child reference will be released by the caller */
return child;
fallback:
+ if (fallback)
+ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
mptcp_subflow_drop_ctx(child);
return child;
}
@@ -918,12 +971,13 @@ enum mapping_status {
MAPPING_EMPTY,
MAPPING_DATA_FIN,
MAPPING_DUMMY,
- MAPPING_BAD_CSUM
+ MAPPING_BAD_CSUM,
+ MAPPING_NODSS
};
static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
{
- pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
+ pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d\n",
ssn, subflow->map_subflow_seq, subflow->map_data_len);
}
@@ -933,8 +987,10 @@ static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
unsigned int skb_consumed;
skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq;
- if (WARN_ON_ONCE(skb_consumed >= skb->len))
+ if (unlikely(skb_consumed >= skb->len)) {
+ DEBUG_NET_WARN_ON_ONCE(1);
return true;
+ }
return skb->len - skb_consumed <= subflow->map_data_len -
mptcp_subflow_get_map_offset(subflow);
@@ -1073,8 +1129,9 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
return MAPPING_EMPTY;
}
+ /* If the required DSS has likely been dropped by a middlebox */
if (!subflow->map_valid)
- return MAPPING_INVALID;
+ return MAPPING_NODSS;
goto validate_seq;
}
@@ -1083,17 +1140,18 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
data_len = mpext->data_len;
if (data_len == 0) {
- pr_debug("infinite mapping received");
+ pr_debug("infinite mapping received\n");
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
- subflow->map_data_len = 0;
return MAPPING_INVALID;
}
if (mpext->data_fin == 1) {
+ u64 data_fin_seq;
+
if (data_len == 1) {
bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq,
mpext->dsn64);
- pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq);
+ pr_debug("DATA_FIN with no payload seq=%llu\n", mpext->data_seq);
if (subflow->map_valid) {
/* A DATA_FIN might arrive in a DSS
* option before the previous mapping
@@ -1102,26 +1160,26 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
*/
skb_ext_del(skb, SKB_EXT_MPTCP);
return MAPPING_OK;
- } else {
- if (updated)
- mptcp_schedule_work((struct sock *)msk);
-
- return MAPPING_DATA_FIN;
}
- } else {
- u64 data_fin_seq = mpext->data_seq + data_len - 1;
- /* If mpext->data_seq is a 32-bit value, data_fin_seq
- * must also be limited to 32 bits.
- */
- if (!mpext->dsn64)
- data_fin_seq &= GENMASK_ULL(31, 0);
+ if (updated)
+ mptcp_schedule_work((struct sock *)msk);
- mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64);
- pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d",
- data_fin_seq, mpext->dsn64);
+ return MAPPING_DATA_FIN;
}
+ data_fin_seq = mpext->data_seq + data_len - 1;
+
+ /* If mpext->data_seq is a 32-bit value, data_fin_seq must also
+ * be limited to 32 bits.
+ */
+ if (!mpext->dsn64)
+ data_fin_seq &= GENMASK_ULL(31, 0);
+
+ mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64);
+ pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d\n",
+ data_fin_seq, mpext->dsn64);
+
/* Adjust for DATA_FIN using 1 byte of sequence space */
data_len--;
}
@@ -1165,7 +1223,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
if (unlikely(subflow->map_csum_reqd != csum_reqd))
return MAPPING_INVALID;
- pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
+ pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u\n",
subflow->map_seq, subflow->map_subflow_seq,
subflow->map_data_len, subflow->map_csum_reqd,
subflow->map_data_csum);
@@ -1190,14 +1248,22 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
- u32 incr;
+ struct tcp_sock *tp = tcp_sk(ssk);
+ u32 offset, incr, avail_len;
- incr = limit >= skb->len ? skb->len + fin : limit;
+ offset = tp->copied_seq - TCP_SKB_CB(skb)->seq;
+ if (WARN_ON_ONCE(offset > skb->len))
+ goto out;
- pr_debug("discarding=%d len=%d seq=%d", incr, skb->len,
- subflow->map_subflow_seq);
+ avail_len = skb->len - offset;
+ incr = limit >= avail_len ? avail_len + fin : limit;
+
+ pr_debug("discarding=%d len=%d offset=%d seq=%d\n", incr, skb->len,
+ offset, subflow->map_subflow_seq);
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
tcp_sk(ssk)->copied_seq += incr;
+
+out:
if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq))
sk_eat_skb(ssk, skb);
if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
@@ -1207,24 +1273,16 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
/* sched mptcp worker to remove the subflow if no more data is pending */
static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
{
- if (likely(ssk->sk_state != TCP_CLOSE))
+ struct sock *sk = (struct sock *)msk;
+
+ if (likely(ssk->sk_state != TCP_CLOSE &&
+ (ssk->sk_state != TCP_CLOSE_WAIT ||
+ inet_sk_state_load(sk) != TCP_ESTABLISHED)))
return;
if (skb_queue_empty(&ssk->sk_receive_queue) &&
!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
- mptcp_schedule_work((struct sock *)msk);
-}
-
-static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
-{
- struct mptcp_sock *msk = mptcp_sk(subflow->conn);
-
- if (subflow->mp_join)
- return false;
- else if (READ_ONCE(msk->csum_enabled))
- return !subflow->valid_csum_seen;
- else
- return !subflow->fully_established;
+ mptcp_schedule_work(sk);
}
static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
@@ -1232,7 +1290,7 @@ static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
unsigned long fail_tout;
- /* greceful failure can happen only on the MPC subflow */
+ /* graceful failure can happen only on the MPC subflow */
if (WARN_ON_ONCE(ssk != READ_ONCE(msk->first)))
return;
@@ -1274,7 +1332,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
status = get_mapping_status(ssk, msk);
trace_subflow_check_data_avail(status, skb_peek(&ssk->sk_receive_queue));
if (unlikely(status == MAPPING_INVALID || status == MAPPING_DUMMY ||
- status == MAPPING_BAD_CSUM))
+ status == MAPPING_BAD_CSUM || status == MAPPING_NODSS))
goto fallback;
if (status != MAPPING_OK)
@@ -1289,7 +1347,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
old_ack = READ_ONCE(msk->ack_seq);
ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
- pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
+ pr_debug("msk ack_seq=%llx subflow ack_seq=%llx\n", old_ack,
ack_seq);
if (unlikely(before64(ack_seq, old_ack))) {
mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
@@ -1322,19 +1380,21 @@ fallback:
return true;
}
- if (!subflow_can_fallback(subflow) && subflow->map_data_len) {
+ if (!READ_ONCE(msk->allow_infinite_fallback)) {
/* fatal protocol error, close the socket.
* subflow_error_report() will introduce the appropriate barriers
*/
subflow->reset_transient = 0;
- subflow->reset_reason = MPTCP_RST_EMPTCP;
+ subflow->reset_reason = status == MAPPING_NODSS ?
+ MPTCP_RST_EMIDDLEBOX :
+ MPTCP_RST_EMPTCP;
reset:
WRITE_ONCE(ssk->sk_err, EBADMSG);
tcp_set_state(ssk, TCP_CLOSE);
while ((skb = skb_peek(&ssk->sk_receive_queue)))
sk_eat_skb(ssk, skb);
- tcp_send_active_reset(ssk, GFP_ATOMIC);
+ mptcp_send_active_reset_reason(ssk);
WRITE_ONCE(subflow->data_avail, false);
return false;
}
@@ -1361,7 +1421,7 @@ bool mptcp_subflow_data_available(struct sock *sk)
subflow->map_valid = 0;
WRITE_ONCE(subflow->data_avail, false);
- pr_debug("Done with mapping: seq=%u data_len=%u",
+ pr_debug("Done with mapping: seq=%u data_len=%u\n",
subflow->map_subflow_seq,
subflow->map_data_len);
}
@@ -1471,7 +1531,7 @@ void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
- pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
+ pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d\n",
subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
if (likely(icsk->icsk_af_ops == target))
@@ -1513,28 +1573,31 @@ void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
#endif
}
-int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
+int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_pm_local *local,
const struct mptcp_addr_info *remote)
{
struct mptcp_sock *msk = mptcp_sk(sk);
struct mptcp_subflow_context *subflow;
+ int local_id = local->addr.id;
struct sockaddr_storage addr;
int remote_id = remote->id;
- int local_id = loc->id;
int err = -ENOTCONN;
struct socket *sf;
struct sock *ssk;
u32 remote_token;
int addrlen;
- int ifindex;
- u8 flags;
+ /* The userspace PM sent the request too early? */
if (!mptcp_is_fully_established(sk))
goto err_out;
- err = mptcp_subflow_create_socket(sk, loc->family, &sf);
- if (err)
+ err = mptcp_subflow_create_socket(sk, local->addr.family, &sf);
+ if (err) {
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNTXCREATSKERR);
+ pr_debug("msk=%p local=%d remote=%d create sock error: %d\n",
+ msk, local_id, remote_id, err);
goto err_out;
+ }
ssk = sf->sk;
subflow = mptcp_subflow_ctx(ssk);
@@ -1542,42 +1605,61 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
get_random_bytes(&subflow->local_nonce, sizeof(u32));
} while (!subflow->local_nonce);
- if (local_id)
+ /* if 'IPADDRANY', the ID will be set later, after the routing */
+ if (local->addr.family == AF_INET) {
+ if (!local->addr.addr.s_addr)
+ local_id = -1;
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ } else if (sk->sk_family == AF_INET6) {
+ if (ipv6_addr_any(&local->addr.addr6))
+ local_id = -1;
+#endif
+ }
+
+ if (local_id >= 0)
subflow_set_local_id(subflow, local_id);
- mptcp_pm_get_flags_and_ifindex_by_id(msk, local_id,
- &flags, &ifindex);
subflow->remote_key_valid = 1;
- subflow->remote_key = msk->remote_key;
- subflow->local_key = msk->local_key;
+ subflow->remote_key = READ_ONCE(msk->remote_key);
+ subflow->local_key = READ_ONCE(msk->local_key);
subflow->token = msk->token;
- mptcp_info2sockaddr(loc, &addr, ssk->sk_family);
+ mptcp_info2sockaddr(&local->addr, &addr, ssk->sk_family);
addrlen = sizeof(struct sockaddr_in);
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
if (addr.ss_family == AF_INET6)
addrlen = sizeof(struct sockaddr_in6);
#endif
- ssk->sk_bound_dev_if = ifindex;
+ ssk->sk_bound_dev_if = local->ifindex;
err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
- if (err)
+ if (err) {
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNTXBINDERR);
+ pr_debug("msk=%p local=%d remote=%d bind error: %d\n",
+ msk, local_id, remote_id, err);
goto failed;
+ }
mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
- pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
+ pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d\n", msk,
remote_token, local_id, remote_id);
subflow->remote_token = remote_token;
WRITE_ONCE(subflow->remote_id, remote_id);
subflow->request_join = 1;
- subflow->request_bkup = !!(flags & MPTCP_PM_ADDR_FLAG_BACKUP);
+ subflow->request_bkup = !!(local->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
subflow->subflow_id = msk->subflow_id++;
mptcp_info2sockaddr(remote, &addr, ssk->sk_family);
sock_hold(ssk);
list_add_tail(&subflow->node, &msk->conn_list);
err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
- if (err && err != -EINPROGRESS)
+ if (err && err != -EINPROGRESS) {
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNTXCONNECTERR);
+ pr_debug("msk=%p local=%d remote=%d connect error: %d\n",
+ msk, local_id, remote_id, err);
goto failed_unlink;
+ }
+
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNTX);
/* discard the subflow socket */
mptcp_sock_graft(ssk, sk->sk_socket);
@@ -1677,10 +1759,7 @@ int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
* needs it.
* Update ns_tracker to current stack trace and refcounted tracker.
*/
- __netns_tracker_free(net, &sf->sk->ns_tracker, false);
- sf->sk->sk_net_refcnt = 1;
- get_net_track(net, &sf->sk->ns_tracker, GFP_KERNEL);
- sock_inuse_add(net, 1);
+ sk_net_refcnt_upgrade(sf->sk);
err = tcp_set_ulp(sf->sk, "mptcp");
if (err)
goto err_free;
@@ -1688,7 +1767,7 @@ int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
mptcp_sockopt_sync_locked(mptcp_sk(sk), sf->sk);
release_sock(sf->sk);
- /* the newly created socket really belongs to the owning MPTCP master
+ /* the newly created socket really belongs to the owning MPTCP
* socket, even if for additional subflows the allocation is performed
* by a kernel workqueue. Adjust inode references, so that the
* procfs/diag interfaces really show this one belonging to the correct
@@ -1699,7 +1778,7 @@ int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
subflow = mptcp_subflow_ctx(sf->sk);
- pr_debug("subflow=%p", subflow);
+ pr_debug("subflow=%p\n", subflow);
*new_sock = sf;
sock_hold(sk);
@@ -1728,7 +1807,7 @@ static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
INIT_LIST_HEAD(&ctx->node);
INIT_LIST_HEAD(&ctx->delegated_node);
- pr_debug("subflow=%p", ctx);
+ pr_debug("subflow=%p\n", ctx);
ctx->tcp_sock = sk;
WRITE_ONCE(ctx->local_id, -1);
@@ -1879,7 +1958,7 @@ static int subflow_ulp_init(struct sock *sk)
goto out;
}
- pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
+ pr_debug("subflow=%p, family=%d\n", ctx, sk->sk_family);
tp->is_mptcp = 1;
ctx->icsk_af_ops = icsk->icsk_af_ops;
@@ -1954,7 +2033,6 @@ static void subflow_ulp_clone(const struct request_sock *req,
new_ctx->tcp_state_change = old_ctx->tcp_state_change;
new_ctx->tcp_error_report = old_ctx->tcp_error_report;
new_ctx->rel_write_seq = 1;
- new_ctx->tcp_sock = newsk;
if (subflow_req->mp_capable) {
/* see comments in subflow_syn_recv_sock(), MPTCP connection
@@ -1971,9 +2049,10 @@ static void subflow_ulp_clone(const struct request_sock *req,
} else if (subflow_req->mp_join) {
new_ctx->ssn_offset = subflow_req->ssn_offset;
new_ctx->mp_join = 1;
- new_ctx->fully_established = 1;
+ WRITE_ONCE(new_ctx->fully_established, true);
new_ctx->remote_key_valid = 1;
new_ctx->backup = subflow_req->backup;
+ new_ctx->request_bkup = subflow_req->request_bkup;
WRITE_ONCE(new_ctx->remote_id, subflow_req->remote_id);
new_ctx->token = subflow_req->token;
new_ctx->thmac = subflow_req->thmac;
diff --git a/net/mptcp/token_test.c b/net/mptcp/token_test.c
index bfff53e668da..4fc39fa2e262 100644
--- a/net/mptcp/token_test.c
+++ b/net/mptcp/token_test.c
@@ -52,14 +52,19 @@ static struct mptcp_subflow_context *build_ctx(struct kunit *test)
static struct mptcp_sock *build_msk(struct kunit *test)
{
struct mptcp_sock *msk;
+ struct sock *sk;
msk = kunit_kzalloc(test, sizeof(struct mptcp_sock), GFP_USER);
KUNIT_EXPECT_NOT_ERR_OR_NULL(test, msk);
refcount_set(&((struct sock *)msk)->sk_refcnt, 1);
sock_net_set((struct sock *)msk, &init_net);
+ sk = (struct sock *)msk;
+
/* be sure the token helpers can dereference sk->sk_prot */
- ((struct sock *)msk)->sk_prot = &tcp_prot;
+ sk->sk_prot = &tcp_prot;
+ sk->sk_protocol = IPPROTO_MPTCP;
+
return msk;
}
diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
index 374412ed780b..4e0842df5234 100644
--- a/net/ncsi/internal.h
+++ b/net/ncsi/internal.h
@@ -289,6 +289,7 @@ enum {
ncsi_dev_state_config_sp = 0x0301,
ncsi_dev_state_config_cis,
ncsi_dev_state_config_oem_gma,
+ ncsi_dev_state_config_apply_mac,
ncsi_dev_state_config_clear_vids,
ncsi_dev_state_config_svf,
ncsi_dev_state_config_ev,
@@ -322,9 +323,11 @@ struct ncsi_dev_priv {
#define NCSI_DEV_RESHUFFLE 4
#define NCSI_DEV_RESET 8 /* Reset state of NC */
unsigned int gma_flag; /* OEM GMA flag */
+ struct sockaddr pending_mac; /* MAC address received from GMA */
spinlock_t lock; /* Protect the NCSI device */
unsigned int package_probe_id;/* Current ID during probe */
unsigned int package_num; /* Number of packages */
+ unsigned int channel_probe_id;/* Current cahnnel ID during probe */
struct list_head packages; /* List of packages */
struct ncsi_channel *hot_channel; /* Channel was ever active */
struct ncsi_request requests[256]; /* Request table */
@@ -343,6 +346,7 @@ struct ncsi_dev_priv {
bool multi_package; /* Enable multiple packages */
bool mlx_multi_host; /* Enable multi host Mellanox */
u32 package_whitelist; /* Packages to configure */
+ unsigned char channel_count; /* Num of channels to probe */
};
struct ncsi_cmd_arg {
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index 745c788f1d1d..7891a537bddd 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -510,17 +510,19 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
break;
case ncsi_dev_state_suspend_gls:
- ndp->pending_req_num = np->channel_num;
+ ndp->pending_req_num = 1;
nca.type = NCSI_PKT_CMD_GLS;
nca.package = np->id;
+ nca.channel = ndp->channel_probe_id;
+ ret = ncsi_xmit_cmd(&nca);
+ if (ret)
+ goto error;
+ ndp->channel_probe_id++;
- nd->state = ncsi_dev_state_suspend_dcnt;
- NCSI_FOR_EACH_CHANNEL(np, nc) {
- nca.channel = nc->id;
- ret = ncsi_xmit_cmd(&nca);
- if (ret)
- goto error;
+ if (ndp->channel_probe_id == ndp->channel_count) {
+ ndp->channel_probe_id = 0;
+ nd->state = ncsi_dev_state_suspend_dcnt;
}
break;
@@ -1036,7 +1038,7 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
: ncsi_dev_state_config_clear_vids;
break;
case ncsi_dev_state_config_oem_gma:
- nd->state = ncsi_dev_state_config_clear_vids;
+ nd->state = ncsi_dev_state_config_apply_mac;
nca.package = np->id;
nca.channel = nc->id;
@@ -1048,10 +1050,22 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
nca.type = NCSI_PKT_CMD_OEM;
ret = ncsi_gma_handler(&nca, nc->version.mf_id);
}
- if (ret < 0)
+ if (ret < 0) {
+ nd->state = ncsi_dev_state_config_clear_vids;
schedule_work(&ndp->work);
+ }
break;
+ case ncsi_dev_state_config_apply_mac:
+ rtnl_lock();
+ ret = dev_set_mac_address(dev, &ndp->pending_mac, NULL);
+ rtnl_unlock();
+ if (ret < 0)
+ netdev_warn(dev, "NCSI: 'Writing MAC address to device failed\n");
+
+ nd->state = ncsi_dev_state_config_clear_vids;
+
+ fallthrough;
case ncsi_dev_state_config_clear_vids:
case ncsi_dev_state_config_svf:
case ncsi_dev_state_config_ev:
@@ -1345,7 +1359,6 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
{
struct ncsi_dev *nd = &ndp->ndev;
struct ncsi_package *np;
- struct ncsi_channel *nc;
struct ncsi_cmd_arg nca;
unsigned char index;
int ret;
@@ -1372,6 +1385,12 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
nd->state = ncsi_dev_state_probe_package;
break;
case ncsi_dev_state_probe_package:
+ if (ndp->package_probe_id >= 8) {
+ /* Last package probed, finishing */
+ ndp->flags |= NCSI_DEV_PROBED;
+ break;
+ }
+
ndp->pending_req_num = 1;
nca.type = NCSI_PKT_CMD_SP;
@@ -1423,23 +1442,6 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
nd->state = ncsi_dev_state_probe_cis;
break;
- case ncsi_dev_state_probe_cis:
- ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
-
- /* Clear initial state */
- nca.type = NCSI_PKT_CMD_CIS;
- nca.package = ndp->active_package->id;
- for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
- nca.channel = index;
- ret = ncsi_xmit_cmd(&nca);
- if (ret)
- goto error;
- }
-
- nd->state = ncsi_dev_state_probe_gvi;
- if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY))
- nd->state = ncsi_dev_state_probe_keep_phy;
- break;
case ncsi_dev_state_probe_keep_phy:
ndp->pending_req_num = 1;
@@ -1452,14 +1454,17 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
nd->state = ncsi_dev_state_probe_gvi;
break;
+ case ncsi_dev_state_probe_cis:
case ncsi_dev_state_probe_gvi:
case ncsi_dev_state_probe_gc:
case ncsi_dev_state_probe_gls:
np = ndp->active_package;
- ndp->pending_req_num = np->channel_num;
+ ndp->pending_req_num = 1;
- /* Retrieve version, capability or link status */
- if (nd->state == ncsi_dev_state_probe_gvi)
+ /* Clear initial state Retrieve version, capability or link status */
+ if (nd->state == ncsi_dev_state_probe_cis)
+ nca.type = NCSI_PKT_CMD_CIS;
+ else if (nd->state == ncsi_dev_state_probe_gvi)
nca.type = NCSI_PKT_CMD_GVI;
else if (nd->state == ncsi_dev_state_probe_gc)
nca.type = NCSI_PKT_CMD_GC;
@@ -1467,19 +1472,29 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
nca.type = NCSI_PKT_CMD_GLS;
nca.package = np->id;
- NCSI_FOR_EACH_CHANNEL(np, nc) {
- nca.channel = nc->id;
- ret = ncsi_xmit_cmd(&nca);
- if (ret)
- goto error;
- }
+ nca.channel = ndp->channel_probe_id;
- if (nd->state == ncsi_dev_state_probe_gvi)
+ ret = ncsi_xmit_cmd(&nca);
+ if (ret)
+ goto error;
+
+ if (nd->state == ncsi_dev_state_probe_cis) {
+ nd->state = ncsi_dev_state_probe_gvi;
+ if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY) && ndp->channel_probe_id == 0)
+ nd->state = ncsi_dev_state_probe_keep_phy;
+ } else if (nd->state == ncsi_dev_state_probe_gvi) {
nd->state = ncsi_dev_state_probe_gc;
- else if (nd->state == ncsi_dev_state_probe_gc)
+ } else if (nd->state == ncsi_dev_state_probe_gc) {
nd->state = ncsi_dev_state_probe_gls;
- else
+ } else {
+ nd->state = ncsi_dev_state_probe_cis;
+ ndp->channel_probe_id++;
+ }
+
+ if (ndp->channel_probe_id == ndp->channel_count) {
+ ndp->channel_probe_id = 0;
nd->state = ncsi_dev_state_probe_dp;
+ }
break;
case ncsi_dev_state_probe_dp:
ndp->pending_req_num = 1;
@@ -1492,13 +1507,8 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
if (ret)
goto error;
- /* Probe next package */
+ /* Probe next package after receiving response */
ndp->package_probe_id++;
- if (ndp->package_probe_id >= 8) {
- /* Probe finished */
- ndp->flags |= NCSI_DEV_PROBED;
- break;
- }
nd->state = ncsi_dev_state_probe_package;
ndp->active_package = NULL;
break;
@@ -1780,6 +1790,7 @@ struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
ndp->requests[i].ndp = ndp;
timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
}
+ ndp->channel_count = NCSI_RESERVED_CHANNEL;
spin_lock_irqsave(&ncsi_dev_lock, flags);
list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
@@ -1813,6 +1824,7 @@ int ncsi_start_dev(struct ncsi_dev *nd)
if (!(ndp->flags & NCSI_DEV_PROBED)) {
ndp->package_probe_id = 0;
+ ndp->channel_probe_id = 0;
nd->state = ncsi_dev_state_probe;
schedule_work(&ndp->work);
return 0;
@@ -1955,6 +1967,8 @@ void ncsi_unregister_dev(struct ncsi_dev *nd)
list_del_rcu(&ndp->node);
spin_unlock_irqrestore(&ncsi_dev_lock, flags);
+ disable_work_sync(&ndp->work);
+
kfree(ndp);
}
EXPORT_SYMBOL_GPL(ncsi_unregister_dev);
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index bee290d0f48b..4a8ce2949fae 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -628,16 +628,14 @@ static int ncsi_rsp_handler_snfc(struct ncsi_request *nr)
static int ncsi_rsp_handler_oem_gma(struct ncsi_request *nr, int mfr_id)
{
struct ncsi_dev_priv *ndp = nr->ndp;
+ struct sockaddr *saddr = &ndp->pending_mac;
struct net_device *ndev = ndp->ndev.dev;
struct ncsi_rsp_oem_pkt *rsp;
- struct sockaddr saddr;
u32 mac_addr_off = 0;
- int ret = 0;
/* Get the response header */
rsp = (struct ncsi_rsp_oem_pkt *)skb_network_header(nr->rsp);
- saddr.sa_family = ndev->type;
ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
if (mfr_id == NCSI_OEM_MFR_BCM_ID)
mac_addr_off = BCM_MAC_ADDR_OFFSET;
@@ -646,22 +644,17 @@ static int ncsi_rsp_handler_oem_gma(struct ncsi_request *nr, int mfr_id)
else if (mfr_id == NCSI_OEM_MFR_INTEL_ID)
mac_addr_off = INTEL_MAC_ADDR_OFFSET;
- memcpy(saddr.sa_data, &rsp->data[mac_addr_off], ETH_ALEN);
+ saddr->sa_family = ndev->type;
+ memcpy(saddr->sa_data, &rsp->data[mac_addr_off], ETH_ALEN);
if (mfr_id == NCSI_OEM_MFR_BCM_ID || mfr_id == NCSI_OEM_MFR_INTEL_ID)
- eth_addr_inc((u8 *)saddr.sa_data);
- if (!is_valid_ether_addr((const u8 *)saddr.sa_data))
+ eth_addr_inc((u8 *)saddr->sa_data);
+ if (!is_valid_ether_addr((const u8 *)saddr->sa_data))
return -ENXIO;
/* Set the flag for GMA command which should only be called once */
ndp->gma_flag = 1;
- rtnl_lock();
- ret = dev_set_mac_address(ndev, &saddr, NULL);
- rtnl_unlock();
- if (ret < 0)
- netdev_warn(ndev, "NCSI: 'Writing mac address to device failed\n");
-
- return ret;
+ return 0;
}
/* Response handler for Mellanox card */
@@ -795,12 +788,13 @@ static int ncsi_rsp_handler_gc(struct ncsi_request *nr)
struct ncsi_rsp_gc_pkt *rsp;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_channel *nc;
+ struct ncsi_package *np;
size_t size;
/* Find the channel */
rsp = (struct ncsi_rsp_gc_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
- NULL, &nc);
+ &np, &nc);
if (!nc)
return -ENODEV;
@@ -835,6 +829,7 @@ static int ncsi_rsp_handler_gc(struct ncsi_request *nr)
*/
nc->vlan_filter.bitmap = U64_MAX;
nc->vlan_filter.n_vids = rsp->vlan_cnt;
+ np->ndp->channel_count = rsp->channel_cnt;
return 0;
}
@@ -1094,14 +1089,12 @@ static int ncsi_rsp_handler_netlink(struct ncsi_request *nr)
static int ncsi_rsp_handler_gmcma(struct ncsi_request *nr)
{
struct ncsi_dev_priv *ndp = nr->ndp;
+ struct sockaddr *saddr = &ndp->pending_mac;
struct net_device *ndev = ndp->ndev.dev;
struct ncsi_rsp_gmcma_pkt *rsp;
- struct sockaddr saddr;
- int ret = -1;
int i;
rsp = (struct ncsi_rsp_gmcma_pkt *)skb_network_header(nr->rsp);
- saddr.sa_family = ndev->type;
ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
netdev_info(ndev, "NCSI: Received %d provisioned MAC addresses\n",
@@ -1113,20 +1106,20 @@ static int ncsi_rsp_handler_gmcma(struct ncsi_request *nr)
rsp->addresses[i][4], rsp->addresses[i][5]);
}
+ saddr->sa_family = ndev->type;
for (i = 0; i < rsp->address_count; i++) {
- memcpy(saddr.sa_data, &rsp->addresses[i], ETH_ALEN);
- ret = ndev->netdev_ops->ndo_set_mac_address(ndev, &saddr);
- if (ret < 0) {
+ if (!is_valid_ether_addr(rsp->addresses[i])) {
netdev_warn(ndev, "NCSI: Unable to assign %pM to device\n",
- saddr.sa_data);
+ rsp->addresses[i]);
continue;
}
- netdev_warn(ndev, "NCSI: Set MAC address to %pM\n", saddr.sa_data);
+ memcpy(saddr->sa_data, rsp->addresses[i], ETH_ALEN);
+ netdev_warn(ndev, "NCSI: Will set MAC address to %pM\n", saddr->sa_data);
break;
}
- ndp->gma_flag = ret == 0;
- return ret;
+ ndp->gma_flag = 1;
+ return 0;
}
static struct ncsi_rsp_handler {
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 441d1f134110..df2dc21304ef 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -818,7 +818,7 @@ config NETFILTER_XT_TARGET_AUDIT
config NETFILTER_XT_TARGET_CHECKSUM
tristate "CHECKSUM target support"
- depends on IP_NF_MANGLE || IP6_NF_MANGLE
+ depends on IP_NF_MANGLE || IP6_NF_MANGLE || NFT_COMPAT
depends on NETFILTER_ADVANCED
help
This option adds a `CHECKSUM' target, which can be used in the iptables mangle
@@ -869,7 +869,7 @@ config NETFILTER_XT_TARGET_CONNSECMARK
config NETFILTER_XT_TARGET_CT
tristate '"CT" target support'
depends on NF_CONNTRACK
- depends on IP_NF_RAW || IP6_NF_RAW
+ depends on IP_NF_RAW || IP6_NF_RAW || NFT_COMPAT
depends on NETFILTER_ADVANCED
help
This options adds a `CT' target, which allows to specify initial
@@ -880,7 +880,7 @@ config NETFILTER_XT_TARGET_CT
config NETFILTER_XT_TARGET_DSCP
tristate '"DSCP" and "TOS" target support'
- depends on IP_NF_MANGLE || IP6_NF_MANGLE
+ depends on IP_NF_MANGLE || IP6_NF_MANGLE || NFT_COMPAT
depends on NETFILTER_ADVANCED
help
This option adds a `DSCP' target, which allows you to manipulate
@@ -896,7 +896,7 @@ config NETFILTER_XT_TARGET_DSCP
config NETFILTER_XT_TARGET_HL
tristate '"HL" hoplimit target support'
- depends on IP_NF_MANGLE || IP6_NF_MANGLE
+ depends on IP_NF_MANGLE || IP6_NF_MANGLE || NFT_COMPAT
depends on NETFILTER_ADVANCED
help
This option adds the "HL" (for IPv6) and "TTL" (for IPv4)
@@ -1080,7 +1080,7 @@ config NETFILTER_XT_TARGET_TPROXY
depends on NETFILTER_ADVANCED
depends on IPV6 || IPV6=n
depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n
- depends on IP_NF_MANGLE
+ depends on IP_NF_MANGLE || NFT_COMPAT
select NF_DEFRAG_IPV4
select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES != n
select NF_TPROXY_IPV4
@@ -1147,7 +1147,7 @@ config NETFILTER_XT_TARGET_TCPMSS
config NETFILTER_XT_TARGET_TCPOPTSTRIP
tristate '"TCPOPTSTRIP" target support'
- depends on IP_NF_MANGLE || IP6_NF_MANGLE
+ depends on IP_NF_MANGLE || IP6_NF_MANGLE || NFT_COMPAT
depends on NETFILTER_ADVANCED
help
This option adds a "TCPOPTSTRIP" target, which allows you to strip
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index d4958e7e7631..f0aa4d7ef499 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -101,7 +101,7 @@ endif
endif
ifdef CONFIG_NFT_CT
-ifdef CONFIG_RETPOLINE
+ifdef CONFIG_MITIGATION_RETPOLINE
nf_tables-objs += nft_ct_fast.o
endif
endif
@@ -142,8 +142,13 @@ obj-$(CONFIG_NFT_FWD_NETDEV) += nft_fwd_netdev.o
# flow table infrastructure
obj-$(CONFIG_NF_FLOW_TABLE) += nf_flow_table.o
nf_flow_table-objs := nf_flow_table_core.o nf_flow_table_ip.o \
- nf_flow_table_offload.o
+ nf_flow_table_offload.o nf_flow_table_xdp.o
nf_flow_table-$(CONFIG_NF_FLOW_TABLE_PROCFS) += nf_flow_table_procfs.o
+ifeq ($(CONFIG_NF_FLOW_TABLE),m)
+nf_flow_table-$(CONFIG_DEBUG_INFO_BTF_MODULES) += nf_flow_table_bpf.o
+else ifeq ($(CONFIG_NF_FLOW_TABLE),y)
+nf_flow_table-$(CONFIG_DEBUG_INFO_BTF) += nf_flow_table_bpf.o
+endif
obj-$(CONFIG_NF_FLOW_TABLE_INET) += nf_flow_table_inet.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 3126911f5042..b9f551f02c81 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -655,11 +655,9 @@ void nf_hook_slow_list(struct list_head *head, struct nf_hook_state *state,
const struct nf_hook_entries *e)
{
struct sk_buff *skb, *next;
- struct list_head sublist;
+ LIST_HEAD(sublist);
int ret;
- INIT_LIST_HEAD(&sublist);
-
list_for_each_entry_safe(skb, next, head, list) {
skb_list_del_init(skb);
ret = nf_hook_slow(skb, state, e, 0);
@@ -815,12 +813,21 @@ int __init netfilter_init(void)
if (ret < 0)
goto err;
+#ifdef CONFIG_LWTUNNEL
+ ret = netfilter_lwtunnel_init();
+ if (ret < 0)
+ goto err_lwtunnel_pernet;
+#endif
ret = netfilter_log_init();
if (ret < 0)
- goto err_pernet;
+ goto err_log_pernet;
return 0;
-err_pernet:
+err_log_pernet:
+#ifdef CONFIG_LWTUNNEL
+ netfilter_lwtunnel_fini();
+err_lwtunnel_pernet:
+#endif
unregister_pernet_subsys(&netfilter_net_ops);
err:
return ret;
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index e4fa00abde6a..5988b9bb9029 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -163,11 +163,8 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
if (ret)
return ret;
- if (ip > ip_to) {
+ if (ip > ip_to)
swap(ip, ip_to);
- if (ip < map->first_ip)
- return -IPSET_ERR_BITMAP_RANGE;
- }
} else if (tb[IPSET_ATTR_CIDR]) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
@@ -178,7 +175,7 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
ip_to = ip;
}
- if (ip_to > map->last_ip)
+ if (ip < map->first_ip || ip_to > map->last_ip)
return -IPSET_ERR_BITMAP_RANGE;
for (; !before(ip_to, ip); ip += map->hosts) {
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 3184cc6be4c9..cc20e6d56807 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -53,12 +53,13 @@ MODULE_DESCRIPTION("core IP set support");
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
/* When the nfnl mutex or ip_set_ref_lock is held: */
-#define ip_set_dereference(p) \
- rcu_dereference_protected(p, \
+#define ip_set_dereference(inst) \
+ rcu_dereference_protected((inst)->ip_set_list, \
lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \
- lockdep_is_held(&ip_set_ref_lock))
+ lockdep_is_held(&ip_set_ref_lock) || \
+ (inst)->is_deleted)
#define ip_set(inst, id) \
- ip_set_dereference((inst)->ip_set_list)[id]
+ ip_set_dereference(inst)[id]
#define ip_set_ref_netlink(inst,id) \
rcu_dereference_raw((inst)->ip_set_list)[id]
#define ip_set_dereference_nfnl(p) \
@@ -103,14 +104,19 @@ find_set_type(const char *name, u8 family, u8 revision)
static bool
load_settype(const char *name)
{
+ if (!try_module_get(THIS_MODULE))
+ return false;
+
nfnl_unlock(NFNL_SUBSYS_IPSET);
pr_debug("try to load ip_set_%s\n", name);
if (request_module("ip_set_%s", name) < 0) {
pr_warn("Can't find ip_set type %s\n", name);
nfnl_lock(NFNL_SUBSYS_IPSET);
+ module_put(THIS_MODULE);
return false;
}
nfnl_lock(NFNL_SUBSYS_IPSET);
+ module_put(THIS_MODULE);
return true;
}
@@ -1133,7 +1139,7 @@ static int ip_set_create(struct sk_buff *skb, const struct nfnl_info *info,
if (!list)
goto cleanup;
/* nfnl mutex is held, both lists are valid */
- tmp = ip_set_dereference(inst->ip_set_list);
+ tmp = ip_set_dereference(inst);
memcpy(list, tmp, sizeof(struct ip_set *) * inst->ip_set_max);
rcu_assign_pointer(inst->ip_set_list, list);
/* Make sure all current packets have passed through */
@@ -1172,23 +1178,50 @@ ip_set_setname_policy[IPSET_ATTR_CMD_MAX + 1] = {
.len = IPSET_MAXNAMELEN - 1 },
};
+/* In order to return quickly when destroying a single set, it is split
+ * into two stages:
+ * - Cancel garbage collector
+ * - Destroy the set itself via call_rcu()
+ */
+
static void
-ip_set_destroy_set(struct ip_set *set)
+ip_set_destroy_set_rcu(struct rcu_head *head)
{
- pr_debug("set: %s\n", set->name);
+ struct ip_set *set = container_of(head, struct ip_set, rcu);
- /* Must call it without holding any lock */
set->variant->destroy(set);
module_put(set->type->me);
kfree(set);
}
static void
-ip_set_destroy_set_rcu(struct rcu_head *head)
+_destroy_all_sets(struct ip_set_net *inst)
{
- struct ip_set *set = container_of(head, struct ip_set, rcu);
+ struct ip_set *set;
+ ip_set_id_t i;
+ bool need_wait = false;
- ip_set_destroy_set(set);
+ /* First cancel gc's: set:list sets are flushed as well */
+ for (i = 0; i < inst->ip_set_max; i++) {
+ set = ip_set(inst, i);
+ if (set) {
+ set->variant->cancel_gc(set);
+ if (set->type->features & IPSET_TYPE_NAME)
+ need_wait = true;
+ }
+ }
+ /* Must wait for flush to be really finished */
+ if (need_wait)
+ rcu_barrier();
+ for (i = 0; i < inst->ip_set_max; i++) {
+ set = ip_set(inst, i);
+ if (set) {
+ ip_set(inst, i) = NULL;
+ set->variant->destroy(set);
+ module_put(set->type->me);
+ kfree(set);
+ }
+ }
}
static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
@@ -1202,11 +1235,10 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
if (unlikely(protocol_min_failed(attr)))
return -IPSET_ERR_PROTOCOL;
-
/* Commands are serialized and references are
* protected by the ip_set_ref_lock.
* External systems (i.e. xt_set) must call
- * ip_set_put|get_nfnl_* functions, that way we
+ * ip_set_nfnl_get_* functions, that way we
* can safely check references here.
*
* list:set timer can only decrement the reference
@@ -1214,8 +1246,6 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
* without holding the lock.
*/
if (!attr[IPSET_ATTR_SETNAME]) {
- /* Must wait for flush to be really finished in list:set */
- rcu_barrier();
read_lock_bh(&ip_set_ref_lock);
for (i = 0; i < inst->ip_set_max; i++) {
s = ip_set(inst, i);
@@ -1226,15 +1256,7 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
}
inst->is_destroyed = true;
read_unlock_bh(&ip_set_ref_lock);
- for (i = 0; i < inst->ip_set_max; i++) {
- s = ip_set(inst, i);
- if (s) {
- ip_set(inst, i) = NULL;
- /* Must cancel garbage collectors */
- s->variant->cancel_gc(s);
- ip_set_destroy_set(s);
- }
- }
+ _destroy_all_sets(inst);
/* Modified by ip_set_destroy() only, which is serialized */
inst->is_destroyed = false;
} else {
@@ -1255,12 +1277,12 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
features = s->type->features;
ip_set(inst, i) = NULL;
read_unlock_bh(&ip_set_ref_lock);
+ /* Must cancel garbage collectors */
+ s->variant->cancel_gc(s);
if (features & IPSET_TYPE_NAME) {
/* Must wait for flush to be really finished */
rcu_barrier();
}
- /* Must cancel garbage collectors */
- s->variant->cancel_gc(s);
call_rcu(&s->rcu, ip_set_destroy_set_rcu);
}
return 0;
@@ -2365,30 +2387,25 @@ ip_set_net_init(struct net *net)
}
static void __net_exit
-ip_set_net_exit(struct net *net)
+ip_set_net_pre_exit(struct net *net)
{
struct ip_set_net *inst = ip_set_pernet(net);
- struct ip_set *set = NULL;
- ip_set_id_t i;
-
inst->is_deleted = true; /* flag for ip_set_nfnl_put */
+}
- nfnl_lock(NFNL_SUBSYS_IPSET);
- for (i = 0; i < inst->ip_set_max; i++) {
- set = ip_set(inst, i);
- if (set) {
- ip_set(inst, i) = NULL;
- set->variant->cancel_gc(set);
- ip_set_destroy_set(set);
- }
- }
- nfnl_unlock(NFNL_SUBSYS_IPSET);
+static void __net_exit
+ip_set_net_exit(struct net *net)
+{
+ struct ip_set_net *inst = ip_set_pernet(net);
+
+ _destroy_all_sets(inst);
kvfree(rcu_dereference_protected(inst->ip_set_list, 1));
}
static struct pernet_operations ip_set_net_ops = {
.init = ip_set_net_init,
+ .pre_exit = ip_set_net_pre_exit,
.exit = ip_set_net_exit,
.id = &ip_set_net_id,
.size = sizeof(struct ip_set_net),
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 6c3f28bc59b3..db794fe1300e 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -79,7 +79,7 @@ list_set_kadd(struct ip_set *set, const struct sk_buff *skb,
struct set_elem *e;
int ret;
- list_for_each_entry(e, &map->members, list) {
+ list_for_each_entry_rcu(e, &map->members, list) {
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, set)))
continue;
@@ -99,7 +99,7 @@ list_set_kdel(struct ip_set *set, const struct sk_buff *skb,
struct set_elem *e;
int ret;
- list_for_each_entry(e, &map->members, list) {
+ list_for_each_entry_rcu(e, &map->members, list) {
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, set)))
continue;
@@ -188,9 +188,10 @@ list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
struct list_set *map = set->data;
struct set_adt_elem *d = value;
struct set_elem *e, *next, *prev = NULL;
- int ret;
+ int ret = 0;
- list_for_each_entry(e, &map->members, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(e, &map->members, list) {
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, set)))
continue;
@@ -201,6 +202,7 @@ list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
if (d->before == 0) {
ret = 1;
+ goto out;
} else if (d->before > 0) {
next = list_next_entry(e, list);
ret = !list_is_last(&e->list, &map->members) &&
@@ -208,9 +210,11 @@ list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
} else {
ret = prev && prev->id == d->refid;
}
- return ret;
+ goto out;
}
- return 0;
+out:
+ rcu_read_unlock();
+ return ret;
}
static void
@@ -239,7 +243,7 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
/* Find where to add the new entry */
n = prev = next = NULL;
- list_for_each_entry(e, &map->members, list) {
+ list_for_each_entry_rcu(e, &map->members, list) {
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, set)))
continue;
@@ -316,9 +320,9 @@ list_set_udel(struct ip_set *set, void *value, const struct ip_set_ext *ext,
{
struct list_set *map = set->data;
struct set_adt_elem *d = value;
- struct set_elem *e, *next, *prev = NULL;
+ struct set_elem *e, *n, *next, *prev = NULL;
- list_for_each_entry(e, &map->members, list) {
+ list_for_each_entry_safe(e, n, &map->members, list) {
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, set)))
continue;
@@ -424,14 +428,8 @@ static void
list_set_destroy(struct ip_set *set)
{
struct list_set *map = set->data;
- struct set_elem *e, *n;
- list_for_each_entry_safe(e, n, &map->members, list) {
- list_del(&e->list);
- ip_set_put_byindex(map->net, e->id);
- ip_set_ext_destroy(set, e);
- kfree(e);
- }
+ WARN_ON_ONCE(!list_empty(&map->members));
kfree(map);
set->data = NULL;
@@ -549,6 +547,9 @@ list_set_cancel_gc(struct ip_set *set)
if (SET_WITH_TIMEOUT(set))
timer_shutdown_sync(&map->gc);
+
+ /* Flush list to drop references to other ipsets */
+ list_set_flush(set);
}
static const struct ip_set_type_variant set_variant = {
@@ -610,6 +611,8 @@ init_list_set(struct net *net, struct ip_set *set, u32 size)
return true;
}
+static struct lock_class_key list_set_lockdep_key;
+
static int
list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
u32 flags)
@@ -626,6 +629,7 @@ list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
if (size < IP_SET_LIST_MIN_SIZE)
size = IP_SET_LIST_MIN_SIZE;
+ lockdep_set_class(&set->lock, &list_set_lockdep_key);
set->variant = &set_variant;
set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem),
__alignof__(struct set_elem));
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index a743db073887..20a1727e2457 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -1046,28 +1046,35 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
#ifdef CONFIG_PROC_FS
struct ip_vs_iter_state {
struct seq_net_private p;
- struct hlist_head *l;
+ unsigned int bucket;
+ unsigned int skip_elems;
};
-static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
+static void *ip_vs_conn_array(struct ip_vs_iter_state *iter)
{
int idx;
struct ip_vs_conn *cp;
- struct ip_vs_iter_state *iter = seq->private;
- for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
+ for (idx = iter->bucket; idx < ip_vs_conn_tab_size; idx++) {
+ unsigned int skip = 0;
+
hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) {
/* __ip_vs_conn_get() is not needed by
* ip_vs_conn_seq_show and ip_vs_conn_sync_seq_show
*/
- if (pos-- == 0) {
- iter->l = &ip_vs_conn_tab[idx];
+ if (skip >= iter->skip_elems) {
+ iter->bucket = idx;
return cp;
}
+
+ ++skip;
}
+
+ iter->skip_elems = 0;
cond_resched_rcu();
}
+ iter->bucket = idx;
return NULL;
}
@@ -1076,9 +1083,14 @@ static void *ip_vs_conn_seq_start(struct seq_file *seq, loff_t *pos)
{
struct ip_vs_iter_state *iter = seq->private;
- iter->l = NULL;
rcu_read_lock();
- return *pos ? ip_vs_conn_array(seq, *pos - 1) :SEQ_START_TOKEN;
+ if (*pos == 0) {
+ iter->skip_elems = 0;
+ iter->bucket = 0;
+ return SEQ_START_TOKEN;
+ }
+
+ return ip_vs_conn_array(iter);
}
static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
@@ -1086,28 +1098,22 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
struct ip_vs_conn *cp = v;
struct ip_vs_iter_state *iter = seq->private;
struct hlist_node *e;
- struct hlist_head *l = iter->l;
- int idx;
++*pos;
if (v == SEQ_START_TOKEN)
- return ip_vs_conn_array(seq, 0);
+ return ip_vs_conn_array(iter);
/* more on same hash chain? */
e = rcu_dereference(hlist_next_rcu(&cp->c_list));
- if (e)
+ if (e) {
+ iter->skip_elems++;
return hlist_entry(e, struct ip_vs_conn, c_list);
-
- idx = l - ip_vs_conn_tab;
- while (++idx < ip_vs_conn_tab_size) {
- hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) {
- iter->l = &ip_vs_conn_tab[idx];
- return cp;
- }
- cond_resched_rcu();
}
- iter->l = NULL;
- return NULL;
+
+ iter->skip_elems = 0;
+ iter->bucket++;
+
+ return ip_vs_conn_array(iter);
}
static void ip_vs_conn_seq_stop(struct seq_file *seq, void *v)
@@ -1495,8 +1501,8 @@ int __init ip_vs_conn_init(void)
max_avail -= 2; /* ~4 in hash row */
max_avail -= 1; /* IPVS up to 1/2 of mem */
max_avail -= order_base_2(sizeof(struct ip_vs_conn));
- max = clamp(max, min, max_avail);
- ip_vs_conn_tab_bits = clamp_val(ip_vs_conn_tab_bits, min, max);
+ max = clamp(max_avail, min, max);
+ ip_vs_conn_tab_bits = clamp(ip_vs_conn_tab_bits, min, max);
ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
ip_vs_conn_tab_mask = ip_vs_conn_tab_size - 1;
@@ -1511,9 +1517,7 @@ int __init ip_vs_conn_init(void)
return -ENOMEM;
/* Allocate ip_vs_conn slab cache */
- ip_vs_conn_cachep = kmem_cache_create("ip_vs_conn",
- sizeof(struct ip_vs_conn), 0,
- SLAB_HWCACHE_ALIGN, NULL);
+ ip_vs_conn_cachep = KMEM_CACHE(ip_vs_conn, SLAB_HWCACHE_ALIGN);
if (!ip_vs_conn_cachep) {
kvfree(ip_vs_conn_tab);
return -ENOMEM;
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index a2c16b501087..c7a8a08b7308 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1550,6 +1550,7 @@ static int ipvs_gre_decap(struct netns_ipvs *ipvs, struct sk_buff *skb,
if (!dest)
goto unk;
if (dest->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GRE) {
+ IP_TUNNEL_DECLARE_FLAGS(flags);
__be16 type;
/* Only support version 0 and C (csum) */
@@ -1560,7 +1561,10 @@ static int ipvs_gre_decap(struct netns_ipvs *ipvs, struct sk_buff *skb,
if (type != htons(ETH_P_IP))
goto unk;
*proto = IPPROTO_IPIP;
- return gre_calc_hlen(gre_flags_to_tnl_flags(greh->flags));
+
+ gre_flags_to_tnl_flags(flags, greh->flags);
+
+ return gre_calc_hlen(flags);
}
unk:
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 143a341bbc0a..0633276d96bf 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -94,6 +94,7 @@ static void update_defense_level(struct netns_ipvs *ipvs)
{
struct sysinfo i;
int availmem;
+ int amemthresh;
int nomem;
int to_change = -1;
@@ -105,7 +106,8 @@ static void update_defense_level(struct netns_ipvs *ipvs)
/* si_swapinfo(&i); */
/* availmem = availmem - (i.totalswap - i.freeswap); */
- nomem = (availmem < ipvs->sysctl_amemthresh);
+ amemthresh = max(READ_ONCE(ipvs->sysctl_amemthresh), 0);
+ nomem = (availmem < amemthresh);
local_bh_disable();
@@ -145,9 +147,8 @@ static void update_defense_level(struct netns_ipvs *ipvs)
break;
case 1:
if (nomem) {
- ipvs->drop_rate = ipvs->drop_counter
- = ipvs->sysctl_amemthresh /
- (ipvs->sysctl_amemthresh-availmem);
+ ipvs->drop_counter = amemthresh / (amemthresh - availmem);
+ ipvs->drop_rate = ipvs->drop_counter;
ipvs->sysctl_drop_packet = 2;
} else {
ipvs->drop_rate = 0;
@@ -155,9 +156,8 @@ static void update_defense_level(struct netns_ipvs *ipvs)
break;
case 2:
if (nomem) {
- ipvs->drop_rate = ipvs->drop_counter
- = ipvs->sysctl_amemthresh /
- (ipvs->sysctl_amemthresh-availmem);
+ ipvs->drop_counter = amemthresh / (amemthresh - availmem);
+ ipvs->drop_rate = ipvs->drop_counter;
} else {
ipvs->drop_rate = 0;
ipvs->sysctl_drop_packet = 1;
@@ -1459,18 +1459,18 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
if (ret < 0)
goto out_err;
- /* Bind the ct retriever */
- RCU_INIT_POINTER(svc->pe, pe);
- pe = NULL;
-
/* Update the virtual service counters */
if (svc->port == FTPPORT)
atomic_inc(&ipvs->ftpsvc_counter);
else if (svc->port == 0)
atomic_inc(&ipvs->nullsvc_counter);
- if (svc->pe && svc->pe->conn_out)
+ if (pe && pe->conn_out)
atomic_inc(&ipvs->conn_out_counter);
+ /* Bind the ct retriever */
+ RCU_INIT_POINTER(svc->pe, pe);
+ pe = NULL;
+
/* Count only IPv4 services for old get/setsockopt interface */
if (svc->af == AF_INET)
ipvs->num_services++;
@@ -1846,7 +1846,7 @@ static int ip_vs_zero_all(struct netns_ipvs *ipvs)
#ifdef CONFIG_SYSCTL
static int
-proc_do_defense_mode(struct ctl_table *table, int write,
+proc_do_defense_mode(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct netns_ipvs *ipvs = table->extra2;
@@ -1873,7 +1873,7 @@ proc_do_defense_mode(struct ctl_table *table, int write,
}
static int
-proc_do_sync_threshold(struct ctl_table *table, int write,
+proc_do_sync_threshold(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct netns_ipvs *ipvs = table->extra2;
@@ -1901,7 +1901,7 @@ proc_do_sync_threshold(struct ctl_table *table, int write,
}
static int
-proc_do_sync_ports(struct ctl_table *table, int write,
+proc_do_sync_ports(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = table->data;
@@ -1924,7 +1924,8 @@ proc_do_sync_ports(struct ctl_table *table, int write,
return rc;
}
-static int ipvs_proc_est_cpumask_set(struct ctl_table *table, void *buffer)
+static int ipvs_proc_est_cpumask_set(const struct ctl_table *table,
+ void *buffer)
{
struct netns_ipvs *ipvs = table->extra2;
cpumask_var_t *valp = table->data;
@@ -1962,8 +1963,8 @@ out:
return ret;
}
-static int ipvs_proc_est_cpumask_get(struct ctl_table *table, void *buffer,
- size_t size)
+static int ipvs_proc_est_cpumask_get(const struct ctl_table *table,
+ void *buffer, size_t size)
{
struct netns_ipvs *ipvs = table->extra2;
cpumask_var_t *valp = table->data;
@@ -1983,7 +1984,7 @@ static int ipvs_proc_est_cpumask_get(struct ctl_table *table, void *buffer,
return ret;
}
-static int ipvs_proc_est_cpulist(struct ctl_table *table, int write,
+static int ipvs_proc_est_cpulist(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
@@ -2010,7 +2011,7 @@ static int ipvs_proc_est_cpulist(struct ctl_table *table, int write,
return ret;
}
-static int ipvs_proc_est_nice(struct ctl_table *table, int write,
+static int ipvs_proc_est_nice(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct netns_ipvs *ipvs = table->extra2;
@@ -2040,7 +2041,7 @@ static int ipvs_proc_est_nice(struct ctl_table *table, int write,
return ret;
}
-static int ipvs_proc_run_estimation(struct ctl_table *table, int write,
+static int ipvs_proc_run_estimation(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct netns_ipvs *ipvs = table->extra2;
@@ -2263,7 +2264,6 @@ static struct ctl_table vs_vars[] = {
.proc_handler = proc_dointvec,
},
#endif
- { }
};
#endif
@@ -3091,12 +3091,12 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
case IP_VS_SO_GET_SERVICES:
{
struct ip_vs_get_services *get;
- int size;
+ size_t size;
get = (struct ip_vs_get_services *)arg;
size = struct_size(get, entrytable, get->num_services);
if (*len != size) {
- pr_err("length: %u != %u\n", *len, size);
+ pr_err("length: %u != %zu\n", *len, size);
ret = -EINVAL;
goto out;
}
@@ -3132,12 +3132,12 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
case IP_VS_SO_GET_DESTS:
{
struct ip_vs_get_dests *get;
- int size;
+ size_t size;
get = (struct ip_vs_get_dests *)arg;
size = struct_size(get, entrytable, get->num_dests);
if (*len != size) {
- pr_err("length: %u != %u\n", *len, size);
+ pr_err("length: %u != %zu\n", *len, size);
ret = -EINVAL;
goto out;
}
@@ -3662,10 +3662,7 @@ static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest,
nla_memcpy(&udest->addr, nla_addr, sizeof(udest->addr));
udest->port = nla_get_be16(nla_port);
- if (nla_addr_family)
- udest->af = nla_get_u16(nla_addr_family);
- else
- udest->af = 0;
+ udest->af = nla_get_u16_default(nla_addr_family, 0);
/* If a full entry was requested, check for the additional fields */
if (full_entry) {
@@ -4270,6 +4267,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs)
struct ctl_table *tbl;
int idx, ret;
size_t ctl_table_size = ARRAY_SIZE(vs_vars);
+ bool unpriv = net->user_ns != &init_user_ns;
atomic_set(&ipvs->dropentry, 0);
spin_lock_init(&ipvs->dropentry_lock);
@@ -4284,12 +4282,6 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs)
tbl = kmemdup(vs_vars, sizeof(vs_vars), GFP_KERNEL);
if (tbl == NULL)
return -ENOMEM;
-
- /* Don't export sysctls to unprivileged users */
- if (net->user_ns != &init_user_ns) {
- tbl[0].procname = NULL;
- ctl_table_size = 0;
- }
} else
tbl = vs_vars;
/* Initialize sysctl defaults */
@@ -4315,10 +4307,17 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs)
ipvs->sysctl_sync_ports = 1;
tbl[idx++].data = &ipvs->sysctl_sync_ports;
tbl[idx++].data = &ipvs->sysctl_sync_persist_mode;
+
ipvs->sysctl_sync_qlen_max = nr_free_buffer_pages() / 32;
+ if (unpriv)
+ tbl[idx].mode = 0444;
tbl[idx++].data = &ipvs->sysctl_sync_qlen_max;
+
ipvs->sysctl_sync_sock_size = 0;
+ if (unpriv)
+ tbl[idx].mode = 0444;
tbl[idx++].data = &ipvs->sysctl_sync_sock_size;
+
tbl[idx++].data = &ipvs->sysctl_cache_bypass;
tbl[idx++].data = &ipvs->sysctl_expire_nodest_conn;
tbl[idx++].data = &ipvs->sysctl_sloppy_tcp;
@@ -4341,15 +4340,22 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs)
tbl[idx++].data = &ipvs->sysctl_conn_reuse_mode;
tbl[idx++].data = &ipvs->sysctl_schedule_icmp;
tbl[idx++].data = &ipvs->sysctl_ignore_tunneled;
+
ipvs->sysctl_run_estimation = 1;
+ if (unpriv)
+ tbl[idx].mode = 0444;
tbl[idx].extra2 = ipvs;
tbl[idx++].data = &ipvs->sysctl_run_estimation;
ipvs->est_cpulist_valid = 0;
+ if (unpriv)
+ tbl[idx].mode = 0444;
tbl[idx].extra2 = ipvs;
tbl[idx++].data = &ipvs->sysctl_est_cpulist;
ipvs->sysctl_est_nice = IPVS_EST_NICE;
+ if (unpriv)
+ tbl[idx].mode = 0444;
tbl[idx].extra2 = ipvs;
tbl[idx++].data = &ipvs->sysctl_est_nice;
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index f53899d12416..d8a284999544 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -35,7 +35,7 @@
#include <linux/gfp.h>
#include <net/protocol.h>
#include <net/tcp.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/ip_vs.h>
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 8ceec7a2fa8f..2423513d701d 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -123,7 +123,6 @@ static struct ctl_table vs_vars_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
- { }
};
#endif
@@ -563,10 +562,8 @@ static int __net_init __ip_vs_lblc_init(struct net *net)
return -ENOMEM;
/* Don't export sysctls to unprivileged users */
- if (net->user_ns != &init_user_ns) {
- ipvs->lblc_ctl_table[0].procname = NULL;
+ if (net->user_ns != &init_user_ns)
vars_table_size = 0;
- }
} else
ipvs->lblc_ctl_table = vs_vars_table;
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index 0fb64707213f..cdb1d4bf6761 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -294,7 +294,6 @@ static struct ctl_table vs_vars_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
- { }
};
#endif
@@ -749,10 +748,8 @@ static int __net_init __ip_vs_lblcr_init(struct net *net)
return -ENOMEM;
/* Don't export sysctls to unprivileged users */
- if (net->user_ns != &init_user_ns) {
- ipvs->lblcr_ctl_table[0].procname = NULL;
+ if (net->user_ns != &init_user_ns)
vars_table_size = 0;
- }
} else
ipvs->lblcr_ctl_table = vs_vars_table;
ipvs->sysctl_lblcr_expiration = DEFAULT_EXPIRATION;
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index f100da4ba3bc..a9fd1d3fc2cb 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -340,7 +340,7 @@ void __net_exit ip_vs_protocol_net_cleanup(struct netns_ipvs *ipvs)
int __init ip_vs_protocol_init(void)
{
- char protocols[64];
+ char protocols[64] = { 0 };
#define REGISTER_PROTOCOL(p) \
do { \
register_ip_vs_protocol(p); \
@@ -348,8 +348,6 @@ int __init ip_vs_protocol_init(void)
strcat(protocols, (p)->name); \
} while (0)
- protocols[0] = '\0';
- protocols[2] = '\0';
#ifdef CONFIG_IP_VS_PROTO_TCP
REGISTER_PROTOCOL(&ip_vs_protocol_tcp);
#endif
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index a0921adc31a9..83e452916403 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -126,7 +126,8 @@ sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
if (sctph->source != cp->vport || payload_csum ||
skb->ip_summed == CHECKSUM_PARTIAL) {
sctph->source = cp->vport;
- sctp_nat_csum(skb, sctph, sctphoff);
+ if (!skb_is_gso(skb))
+ sctp_nat_csum(skb, sctph, sctphoff);
} else {
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
@@ -174,7 +175,8 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
(skb->ip_summed == CHECKSUM_PARTIAL &&
!(skb_dst(skb)->dev->features & NETIF_F_SCTP_CRC))) {
sctph->dest = cp->dport;
- sctp_nat_csum(skb, sctph, sctphoff);
+ if (!skb_is_gso(skb))
+ sctp_nat_csum(skb, sctph, sctphoff);
} else if (skb->ip_summed != CHECKSUM_PARTIAL) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index be74c0906dda..3402675bf521 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -51,7 +51,7 @@
#include <linux/kernel.h>
#include <linux/sched/signal.h>
-#include <asm/unaligned.h> /* Used for ntoh_seq and hton_seq */
+#include <linux/unaligned.h> /* Used for ntoh_seq and hton_seq */
#include <net/ip.h>
#include <net/sock.h>
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 65e0259178da..3313bceb6cc9 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -180,7 +180,7 @@ static inline bool crosses_local_route_boundary(int skb_af, struct sk_buff *skb,
(!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
(addr_type & IPV6_ADDR_LOOPBACK);
old_rt_is_local = __ip_vs_is_local_route6(
- (struct rt6_info *)skb_dst(skb));
+ dst_rt6_info(skb_dst(skb)));
} else
#endif
{
@@ -318,7 +318,7 @@ __ip_vs_get_out_rt(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
if (dest) {
dest_dst = __ip_vs_dst_check(dest);
if (likely(dest_dst))
- rt = (struct rtable *) dest_dst->dst_cache;
+ rt = dst_rtable(dest_dst->dst_cache);
else {
dest_dst = ip_vs_dest_dst_alloc();
spin_lock_bh(&dest->dst_lock);
@@ -390,10 +390,10 @@ __ip_vs_get_out_rt(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
skb->ip_summed == CHECKSUM_PARTIAL)
mtu -= GUE_PLEN_REMCSUM + GUE_LEN_PRIV;
} else if (dest->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GRE) {
- __be16 tflags = 0;
+ IP_TUNNEL_DECLARE_FLAGS(tflags) = { };
if (dest->tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_CSUM)
- tflags |= TUNNEL_CSUM;
+ __set_bit(IP_TUNNEL_CSUM_BIT, tflags);
mtu -= gre_calc_hlen(tflags);
}
if (mtu < 68) {
@@ -481,7 +481,7 @@ __ip_vs_get_out_rt_v6(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
if (dest) {
dest_dst = __ip_vs_dst_check(dest);
if (likely(dest_dst))
- rt = (struct rt6_info *) dest_dst->dst_cache;
+ rt = dst_rt6_info(dest_dst->dst_cache);
else {
u32 cookie;
@@ -501,7 +501,7 @@ __ip_vs_get_out_rt_v6(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
ip_vs_dest_dst_free(dest_dst);
goto err_unreach;
}
- rt = (struct rt6_info *) dst;
+ rt = dst_rt6_info(dst);
cookie = rt6_get_cookie(rt);
__ip_vs_dst_set(dest, dest_dst, &rt->dst, cookie);
spin_unlock_bh(&dest->dst_lock);
@@ -517,7 +517,7 @@ __ip_vs_get_out_rt_v6(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
rt_mode);
if (!dst)
goto err_unreach;
- rt = (struct rt6_info *) dst;
+ rt = dst_rt6_info(dst);
}
local = __ip_vs_is_local_route6(rt);
@@ -553,10 +553,10 @@ __ip_vs_get_out_rt_v6(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
skb->ip_summed == CHECKSUM_PARTIAL)
mtu -= GUE_PLEN_REMCSUM + GUE_LEN_PRIV;
} else if (dest->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GRE) {
- __be16 tflags = 0;
+ IP_TUNNEL_DECLARE_FLAGS(tflags) = { };
if (dest->tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_CSUM)
- tflags |= TUNNEL_CSUM;
+ __set_bit(IP_TUNNEL_CSUM_BIT, tflags);
mtu -= gre_calc_hlen(tflags);
}
if (mtu < IPV6_MIN_MTU) {
@@ -862,7 +862,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
IP_VS_RT_MODE_RDR);
if (local < 0)
goto tx_error;
- rt = (struct rt6_info *) skb_dst(skb);
+ rt = dst_rt6_info(skb_dst(skb));
/*
* Avoid duplicate tuple in reply direction for NAT traffic
* to local address when connection is sync-ed
@@ -1082,11 +1082,11 @@ ipvs_gre_encap(struct net *net, struct sk_buff *skb,
{
__be16 proto = *next_protocol == IPPROTO_IPIP ?
htons(ETH_P_IP) : htons(ETH_P_IPV6);
- __be16 tflags = 0;
+ IP_TUNNEL_DECLARE_FLAGS(tflags) = { };
size_t hdrlen;
if (cp->dest->tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_CSUM)
- tflags |= TUNNEL_CSUM;
+ __set_bit(IP_TUNNEL_CSUM_BIT, tflags);
hdrlen = gre_calc_hlen(tflags);
gre_build_header(skb, hdrlen, tflags, proto, 0, 0);
@@ -1165,11 +1165,11 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
max_headroom += sizeof(struct udphdr) + gue_hdrlen;
} else if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GRE) {
+ IP_TUNNEL_DECLARE_FLAGS(tflags) = { };
size_t gre_hdrlen;
- __be16 tflags = 0;
if (tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_CSUM)
- tflags |= TUNNEL_CSUM;
+ __set_bit(IP_TUNNEL_CSUM_BIT, tflags);
gre_hdrlen = gre_calc_hlen(tflags);
max_headroom += gre_hdrlen;
@@ -1288,7 +1288,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
if (local)
return ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 1);
- rt = (struct rt6_info *) skb_dst(skb);
+ rt = dst_rt6_info(skb_dst(skb));
tdev = rt->dst.dev;
/*
@@ -1310,11 +1310,11 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
max_headroom += sizeof(struct udphdr) + gue_hdrlen;
} else if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GRE) {
+ IP_TUNNEL_DECLARE_FLAGS(tflags) = { };
size_t gre_hdrlen;
- __be16 tflags = 0;
if (tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_CSUM)
- tflags |= TUNNEL_CSUM;
+ __set_bit(IP_TUNNEL_CSUM_BIT, tflags);
gre_hdrlen = gre_calc_hlen(tflags);
max_headroom += gre_hdrlen;
@@ -1590,7 +1590,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
&cp->daddr.in6, NULL, ipvsh, 0, rt_mode);
if (local < 0)
goto tx_error;
- rt = (struct rt6_info *) skb_dst(skb);
+ rt = dst_rt6_info(skb_dst(skb));
/*
* Avoid duplicate tuple in reply direction for NAT traffic
* to local address when connection is sync-ed
diff --git a/net/netfilter/nf_bpf_link.c b/net/netfilter/nf_bpf_link.c
index 0e4beae421f8..06b084844700 100644
--- a/net/netfilter/nf_bpf_link.c
+++ b/net/netfilter/nf_bpf_link.c
@@ -23,6 +23,7 @@ static unsigned int nf_hook_run_bpf(void *bpf_prog, struct sk_buff *skb,
struct bpf_nf_link {
struct bpf_link link;
struct nf_hook_ops hook_ops;
+ netns_tracker ns_tracker;
struct net *net;
u32 dead;
const struct nf_defrag_hook *defrag_hook;
@@ -42,7 +43,7 @@ get_proto_defrag_hook(struct bpf_nf_link *link,
hook = rcu_dereference(*ptr_global_hook);
if (!hook) {
rcu_read_unlock();
- err = request_module(mod);
+ err = request_module("%s", mod);
if (err)
return ERR_PTR(err < 0 ? err : -EINVAL);
@@ -120,6 +121,7 @@ static void bpf_nf_link_release(struct bpf_link *link)
if (!cmpxchg(&nf_link->dead, 0, 1)) {
nf_unregister_net_hook(nf_link->net, &nf_link->hook_ops);
bpf_nf_disable_defrag(nf_link);
+ put_net_track(nf_link->net, &nf_link->ns_tracker);
}
}
@@ -150,11 +152,12 @@ static int bpf_nf_link_fill_link_info(const struct bpf_link *link,
struct bpf_link_info *info)
{
struct bpf_nf_link *nf_link = container_of(link, struct bpf_nf_link, link);
+ const struct nf_defrag_hook *hook = nf_link->defrag_hook;
info->netfilter.pf = nf_link->hook_ops.pf;
info->netfilter.hooknum = nf_link->hook_ops.hooknum;
info->netfilter.priority = nf_link->hook_ops.priority;
- info->netfilter.flags = 0;
+ info->netfilter.flags = hook ? BPF_F_NETFILTER_IP_DEFRAG : 0;
return 0;
}
@@ -257,6 +260,8 @@ int bpf_nf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
return err;
}
+ get_net_track(net, &link->ns_tracker, GFP_KERNEL);
+
return bpf_link_settle(&link_primer);
}
@@ -314,7 +319,7 @@ static bool nf_is_valid_access(int off, int size, enum bpf_access_type type,
static const struct bpf_func_proto *
bpf_nf_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
- return bpf_base_func_proto(func_id);
+ return bpf_base_func_proto(func_id, prog);
}
const struct bpf_verifier_ops netfilter_verifier_ops = {
diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
index 5d8ed6c90b7e..913ede2f57f9 100644
--- a/net/netfilter/nf_conncount.c
+++ b/net/netfilter/nf_conncount.c
@@ -132,7 +132,7 @@ static int __nf_conncount_add(struct net *net,
struct nf_conn *found_ct;
unsigned int collect = 0;
- if (time_is_after_eq_jiffies((unsigned long)list->last_gc))
+ if ((u32)jiffies == list->last_gc)
goto add_new_node;
/* check the saved connections */
@@ -234,7 +234,7 @@ bool nf_conncount_gc_list(struct net *net,
bool ret = false;
/* don't bother if we just did GC */
- if (time_is_after_eq_jiffies((unsigned long)READ_ONCE(list->last_gc)))
+ if ((u32)jiffies == READ_ONCE(list->last_gc))
return false;
/* don't bother if other cpu is already doing GC */
@@ -321,7 +321,6 @@ insert_tree(struct net *net,
struct nf_conncount_rb *rbconn;
struct nf_conncount_tuple *conn;
unsigned int count = 0, gc_count = 0;
- u8 keylen = data->keylen;
bool do_gc = true;
spin_lock_bh(&nf_conncount_locks[hash]);
@@ -333,7 +332,7 @@ restart:
rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node);
parent = *rbnode;
- diff = key_diff(key, rbconn->key, keylen);
+ diff = key_diff(key, rbconn->key, data->keylen);
if (diff < 0) {
rbnode = &((*rbnode)->rb_left);
} else if (diff > 0) {
@@ -378,7 +377,9 @@ restart:
conn->tuple = *tuple;
conn->zone = *zone;
- memcpy(rbconn->key, key, sizeof(u32) * keylen);
+ conn->cpu = raw_smp_processor_id();
+ conn->jiffies32 = (u32)jiffies;
+ memcpy(rbconn->key, key, sizeof(u32) * data->keylen);
nf_conncount_list_init(&rbconn->list);
list_add(&conn->node, &rbconn->list.head);
@@ -403,7 +404,6 @@ count_tree(struct net *net,
struct rb_node *parent;
struct nf_conncount_rb *rbconn;
unsigned int hash;
- u8 keylen = data->keylen;
hash = jhash2(key, data->keylen, conncount_rnd) % CONNCOUNT_SLOTS;
root = &data->root[hash];
@@ -414,7 +414,7 @@ count_tree(struct net *net,
rbconn = rb_entry(parent, struct nf_conncount_rb, node);
- diff = key_diff(key, rbconn->key, keylen);
+ diff = key_diff(key, rbconn->key, data->keylen);
if (diff < 0) {
parent = rcu_dereference_raw(parent->rb_left);
} else if (diff > 0) {
@@ -524,11 +524,10 @@ unsigned int nf_conncount_count(struct net *net,
}
EXPORT_SYMBOL_GPL(nf_conncount_count);
-struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
- unsigned int keylen)
+struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int keylen)
{
struct nf_conncount_data *data;
- int ret, i;
+ int i;
if (keylen % sizeof(u32) ||
keylen / sizeof(u32) > MAX_KEYLEN ||
@@ -541,12 +540,6 @@ struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family
if (!data)
return ERR_PTR(-ENOMEM);
- ret = nf_ct_netns_get(net, family);
- if (ret < 0) {
- kfree(data);
- return ERR_PTR(ret);
- }
-
for (i = 0; i < ARRAY_SIZE(data->root); ++i)
data->root[i] = RB_ROOT;
@@ -583,13 +576,11 @@ static void destroy_tree(struct rb_root *r)
}
}
-void nf_conncount_destroy(struct net *net, unsigned int family,
- struct nf_conncount_data *data)
+void nf_conncount_destroy(struct net *net, struct nf_conncount_data *data)
{
unsigned int i;
cancel_work_sync(&data->gc_work);
- nf_ct_netns_put(net, family);
for (i = 0; i < ARRAY_SIZE(data->root); ++i)
destroy_tree(&data->root[i]);
@@ -605,15 +596,11 @@ static int __init nf_conncount_modinit(void)
for (i = 0; i < CONNCOUNT_SLOTS; ++i)
spin_lock_init(&nf_conncount_locks[i]);
- conncount_conn_cachep = kmem_cache_create("nf_conncount_tuple",
- sizeof(struct nf_conncount_tuple),
- 0, 0, NULL);
+ conncount_conn_cachep = KMEM_CACHE(nf_conncount_tuple, 0);
if (!conncount_conn_cachep)
return -ENOMEM;
- conncount_rb_cachep = kmem_cache_create("nf_conncount_rb",
- sizeof(struct nf_conncount_rb),
- 0, 0, NULL);
+ conncount_rb_cachep = KMEM_CACHE(nf_conncount_rb, 0);
if (!conncount_rb_cachep) {
kmem_cache_destroy(conncount_conn_cachep);
return -ENOMEM;
diff --git a/net/netfilter/nf_conntrack_amanda.c b/net/netfilter/nf_conntrack_amanda.c
index d011d2eb0848..7be4c35e4795 100644
--- a/net/netfilter/nf_conntrack_amanda.c
+++ b/net/netfilter/nf_conntrack_amanda.c
@@ -106,7 +106,7 @@ static int amanda_help(struct sk_buff *skb,
/* increase the UDP timeout of the master connection as replies from
* Amanda clients to the server can be quite delayed */
- nf_ct_refresh(ct, skb, master_timeout * HZ);
+ nf_ct_refresh(ct, master_timeout * HZ);
/* No data? */
dataoff = protoff + sizeof(struct udphdr);
diff --git a/net/netfilter/nf_conntrack_bpf.c b/net/netfilter/nf_conntrack_bpf.c
index 475358ec8212..4a136fc3a9c0 100644
--- a/net/netfilter/nf_conntrack_bpf.c
+++ b/net/netfilter/nf_conntrack_bpf.c
@@ -32,7 +32,9 @@
* -EINVAL - Passed NULL for bpf_tuple pointer
* -EINVAL - opts->reserved is not 0
* -EINVAL - netns_id is less than -1
- * -EINVAL - opts__sz isn't NF_BPF_CT_OPTS_SZ (12)
+ * -EINVAL - opts__sz isn't NF_BPF_CT_OPTS_SZ (16) or 12
+ * -EINVAL - opts->ct_zone_id set when
+ opts__sz isn't NF_BPF_CT_OPTS_SZ (16)
* -EPROTO - l4proto isn't one of IPPROTO_TCP or IPPROTO_UDP
* -ENONET - No network namespace found for netns_id
* -ENOENT - Conntrack lookup could not find entry for tuple
@@ -42,6 +44,8 @@
* Values:
* IPPROTO_TCP, IPPROTO_UDP
* @dir: - connection tracking tuple direction.
+ * @ct_zone_id - connection tracking zone id.
+ * @ct_zone_dir - connection tracking zone direction.
* @reserved - Reserved member, will be reused for more options in future
* Values:
* 0
@@ -51,11 +55,13 @@ struct bpf_ct_opts {
s32 error;
u8 l4proto;
u8 dir;
- u8 reserved[2];
+ u16 ct_zone_id;
+ u8 ct_zone_dir;
+ u8 reserved[3];
};
enum {
- NF_BPF_CT_OPTS_SZ = 12,
+ NF_BPF_CT_OPTS_SZ = 16,
};
static int bpf_nf_ct_tuple_parse(struct bpf_sock_tuple *bpf_tuple,
@@ -104,12 +110,21 @@ __bpf_nf_ct_alloc_entry(struct net *net, struct bpf_sock_tuple *bpf_tuple,
u32 timeout)
{
struct nf_conntrack_tuple otuple, rtuple;
+ struct nf_conntrack_zone ct_zone;
struct nf_conn *ct;
int err;
- if (!opts || !bpf_tuple || opts->reserved[0] || opts->reserved[1] ||
- opts_len != NF_BPF_CT_OPTS_SZ)
+ if (!opts || !bpf_tuple)
return ERR_PTR(-EINVAL);
+ if (!(opts_len == NF_BPF_CT_OPTS_SZ || opts_len == 12))
+ return ERR_PTR(-EINVAL);
+ if (opts_len == NF_BPF_CT_OPTS_SZ) {
+ if (opts->reserved[0] || opts->reserved[1] || opts->reserved[2])
+ return ERR_PTR(-EINVAL);
+ } else {
+ if (opts->ct_zone_id)
+ return ERR_PTR(-EINVAL);
+ }
if (unlikely(opts->netns_id < BPF_F_CURRENT_NETNS))
return ERR_PTR(-EINVAL);
@@ -130,7 +145,16 @@ __bpf_nf_ct_alloc_entry(struct net *net, struct bpf_sock_tuple *bpf_tuple,
return ERR_PTR(-ENONET);
}
- ct = nf_conntrack_alloc(net, &nf_ct_zone_dflt, &otuple, &rtuple,
+ if (opts_len == NF_BPF_CT_OPTS_SZ) {
+ if (opts->ct_zone_dir == 0)
+ opts->ct_zone_dir = NF_CT_DEFAULT_ZONE_DIR;
+ nf_ct_zone_init(&ct_zone,
+ opts->ct_zone_id, opts->ct_zone_dir, 0);
+ } else {
+ ct_zone = nf_ct_zone_dflt;
+ }
+
+ ct = nf_conntrack_alloc(net, &ct_zone, &otuple, &rtuple,
GFP_ATOMIC);
if (IS_ERR(ct))
goto out;
@@ -152,12 +176,21 @@ static struct nf_conn *__bpf_nf_ct_lookup(struct net *net,
{
struct nf_conntrack_tuple_hash *hash;
struct nf_conntrack_tuple tuple;
+ struct nf_conntrack_zone ct_zone;
struct nf_conn *ct;
int err;
- if (!opts || !bpf_tuple || opts->reserved[0] || opts->reserved[1] ||
- opts_len != NF_BPF_CT_OPTS_SZ)
+ if (!opts || !bpf_tuple)
return ERR_PTR(-EINVAL);
+ if (!(opts_len == NF_BPF_CT_OPTS_SZ || opts_len == 12))
+ return ERR_PTR(-EINVAL);
+ if (opts_len == NF_BPF_CT_OPTS_SZ) {
+ if (opts->reserved[0] || opts->reserved[1] || opts->reserved[2])
+ return ERR_PTR(-EINVAL);
+ } else {
+ if (opts->ct_zone_id)
+ return ERR_PTR(-EINVAL);
+ }
if (unlikely(opts->l4proto != IPPROTO_TCP && opts->l4proto != IPPROTO_UDP))
return ERR_PTR(-EPROTO);
if (unlikely(opts->netns_id < BPF_F_CURRENT_NETNS))
@@ -174,7 +207,16 @@ static struct nf_conn *__bpf_nf_ct_lookup(struct net *net,
return ERR_PTR(-ENONET);
}
- hash = nf_conntrack_find_get(net, &nf_ct_zone_dflt, &tuple);
+ if (opts_len == NF_BPF_CT_OPTS_SZ) {
+ if (opts->ct_zone_dir == 0)
+ opts->ct_zone_dir = NF_CT_DEFAULT_ZONE_DIR;
+ nf_ct_zone_init(&ct_zone,
+ opts->ct_zone_id, opts->ct_zone_dir, 0);
+ } else {
+ ct_zone = nf_ct_zone_dflt;
+ }
+
+ hash = nf_conntrack_find_get(net, &ct_zone, &tuple);
if (opts->netns_id >= 0)
put_net(net);
if (!hash)
@@ -245,7 +287,7 @@ __bpf_kfunc_start_defs();
* @opts - Additional options for allocation (documented above)
* Cannot be NULL
* @opts__sz - Length of the bpf_ct_opts structure
- * Must be NF_BPF_CT_OPTS_SZ (12)
+ * Must be NF_BPF_CT_OPTS_SZ (16) or 12
*/
__bpf_kfunc struct nf_conn___init *
bpf_xdp_ct_alloc(struct xdp_md *xdp_ctx, struct bpf_sock_tuple *bpf_tuple,
@@ -279,7 +321,7 @@ bpf_xdp_ct_alloc(struct xdp_md *xdp_ctx, struct bpf_sock_tuple *bpf_tuple,
* @opts - Additional options for lookup (documented above)
* Cannot be NULL
* @opts__sz - Length of the bpf_ct_opts structure
- * Must be NF_BPF_CT_OPTS_SZ (12)
+ * Must be NF_BPF_CT_OPTS_SZ (16) or 12
*/
__bpf_kfunc struct nf_conn *
bpf_xdp_ct_lookup(struct xdp_md *xdp_ctx, struct bpf_sock_tuple *bpf_tuple,
@@ -312,7 +354,7 @@ bpf_xdp_ct_lookup(struct xdp_md *xdp_ctx, struct bpf_sock_tuple *bpf_tuple,
* @opts - Additional options for allocation (documented above)
* Cannot be NULL
* @opts__sz - Length of the bpf_ct_opts structure
- * Must be NF_BPF_CT_OPTS_SZ (12)
+ * Must be NF_BPF_CT_OPTS_SZ (16) or 12
*/
__bpf_kfunc struct nf_conn___init *
bpf_skb_ct_alloc(struct __sk_buff *skb_ctx, struct bpf_sock_tuple *bpf_tuple,
@@ -347,7 +389,7 @@ bpf_skb_ct_alloc(struct __sk_buff *skb_ctx, struct bpf_sock_tuple *bpf_tuple,
* @opts - Additional options for lookup (documented above)
* Cannot be NULL
* @opts__sz - Length of the bpf_ct_opts structure
- * Must be NF_BPF_CT_OPTS_SZ (12)
+ * Must be NF_BPF_CT_OPTS_SZ (16) or 12
*/
__bpf_kfunc struct nf_conn *
bpf_skb_ct_lookup(struct __sk_buff *skb_ctx, struct bpf_sock_tuple *bpf_tuple,
@@ -467,7 +509,7 @@ __bpf_kfunc int bpf_ct_change_status(struct nf_conn *nfct, u32 status)
__bpf_kfunc_end_defs();
-BTF_SET8_START(nf_ct_kfunc_set)
+BTF_KFUNCS_START(nf_ct_kfunc_set)
BTF_ID_FLAGS(func, bpf_xdp_ct_alloc, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_xdp_ct_lookup, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_skb_ct_alloc, KF_ACQUIRE | KF_RET_NULL)
@@ -478,7 +520,7 @@ BTF_ID_FLAGS(func, bpf_ct_set_timeout, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_ct_change_timeout, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_ct_set_status, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_ct_change_status, KF_TRUSTED_ARGS)
-BTF_SET8_END(nf_ct_kfunc_set)
+BTF_KFUNCS_END(nf_ct_kfunc_set)
static const struct btf_kfunc_id_set nf_conntrack_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/netfilter/nf_conntrack_broadcast.c b/net/netfilter/nf_conntrack_broadcast.c
index cfa0fe0356de..a7552a46d6ac 100644
--- a/net/netfilter/nf_conntrack_broadcast.c
+++ b/net/netfilter/nf_conntrack_broadcast.c
@@ -75,7 +75,7 @@ int nf_conntrack_broadcast_help(struct sk_buff *skb,
nf_ct_expect_related(exp, 0);
nf_ct_expect_put(exp);
- nf_ct_refresh(ct, skb, timeout * HZ);
+ nf_ct_refresh(ct, timeout * HZ);
out:
return NF_ACCEPT;
}
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 5b876fa7f9af..7f8b245e287a 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -988,6 +988,56 @@ static void __nf_conntrack_insert_prepare(struct nf_conn *ct)
tstamp->start = ktime_get_real_ns();
}
+/**
+ * nf_ct_match_reverse - check if ct1 and ct2 refer to identical flow
+ * @ct1: conntrack in hash table to check against
+ * @ct2: merge candidate
+ *
+ * returns true if ct1 and ct2 happen to refer to the same flow, but
+ * in opposing directions, i.e.
+ * ct1: a:b -> c:d
+ * ct2: c:d -> a:b
+ * for both directions. If so, @ct2 should not have been created
+ * as the skb should have been picked up as ESTABLISHED flow.
+ * But ct1 was not yet committed to hash table before skb that created
+ * ct2 had arrived.
+ *
+ * Note we don't compare netns because ct entries in different net
+ * namespace cannot clash to begin with.
+ *
+ * @return: true if ct1 and ct2 are identical when swapping origin/reply.
+ */
+static bool
+nf_ct_match_reverse(const struct nf_conn *ct1, const struct nf_conn *ct2)
+{
+ u16 id1, id2;
+
+ if (!nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+ &ct2->tuplehash[IP_CT_DIR_REPLY].tuple))
+ return false;
+
+ if (!nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_REPLY].tuple,
+ &ct2->tuplehash[IP_CT_DIR_ORIGINAL].tuple))
+ return false;
+
+ id1 = nf_ct_zone_id(nf_ct_zone(ct1), IP_CT_DIR_ORIGINAL);
+ id2 = nf_ct_zone_id(nf_ct_zone(ct2), IP_CT_DIR_REPLY);
+ if (id1 != id2)
+ return false;
+
+ id1 = nf_ct_zone_id(nf_ct_zone(ct1), IP_CT_DIR_REPLY);
+ id2 = nf_ct_zone_id(nf_ct_zone(ct2), IP_CT_DIR_ORIGINAL);
+
+ return id1 == id2;
+}
+
+static int nf_ct_can_merge(const struct nf_conn *ct,
+ const struct nf_conn *loser_ct)
+{
+ return nf_ct_match(ct, loser_ct) ||
+ nf_ct_match_reverse(ct, loser_ct);
+}
+
/* caller must hold locks to prevent concurrent changes */
static int __nf_ct_resolve_clash(struct sk_buff *skb,
struct nf_conntrack_tuple_hash *h)
@@ -999,11 +1049,7 @@ static int __nf_ct_resolve_clash(struct sk_buff *skb,
loser_ct = nf_ct_get(skb, &ctinfo);
- if (nf_ct_is_dying(ct))
- return NF_DROP;
-
- if (((ct->status & IPS_NAT_DONE_MASK) == 0) ||
- nf_ct_match(ct, loser_ct)) {
+ if (nf_ct_can_merge(ct, loser_ct)) {
struct net *net = nf_ct_net(ct);
nf_conntrack_get(&ct->ct_general);
@@ -1090,7 +1136,7 @@ static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx)
* A conntrack entry can be inserted to the connection tracking table
* if there is no existing entry with an identical tuple.
*
- * If there is one, @skb (and the assocated, unconfirmed conntrack) has
+ * If there is one, @skb (and the associated, unconfirmed conntrack) has
* to be dropped. In case @skb is retransmitted, next conntrack lookup
* will find the already-existing entry.
*
@@ -1440,8 +1486,6 @@ static bool gc_worker_can_early_drop(const struct nf_conn *ct)
const struct nf_conntrack_l4proto *l4proto;
u8 protonum = nf_ct_protonum(ct);
- if (test_bit(IPS_OFFLOAD_BIT, &ct->status) && protonum != IPPROTO_UDP)
- return false;
if (!test_bit(IPS_ASSURED_BIT, &ct->status))
return true;
@@ -1500,12 +1544,6 @@ static void gc_worker(struct work_struct *work)
tmp = nf_ct_tuplehash_to_ctrack(h);
- if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) {
- nf_ct_offload_timeout(tmp);
- if (!nf_conntrack_max95)
- continue;
- }
-
if (expired_count > GC_SCAN_EXPIRED_MAX) {
rcu_read_unlock();
@@ -1724,7 +1762,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
hash);
if (IS_ERR(ct))
- return (struct nf_conntrack_tuple_hash *)ct;
+ return ERR_CAST(ct);
if (!nf_ct_add_synproxy(ct, tmpl)) {
nf_conntrack_free(ct);
@@ -2024,7 +2062,7 @@ repeat:
goto repeat;
NF_CT_STAT_INC_ATOMIC(state->net, invalid);
- if (ret == -NF_DROP)
+ if (ret == NF_DROP)
NF_CT_STAT_INC_ATOMIC(state->net, drop);
ret = -ret;
@@ -2045,9 +2083,8 @@ EXPORT_SYMBOL_GPL(nf_conntrack_in);
/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
void __nf_ct_refresh_acct(struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
- const struct sk_buff *skb,
u32 extra_jiffies,
- bool do_acct)
+ unsigned int bytes)
{
/* Only update if this is not a fixed timeout */
if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
@@ -2060,8 +2097,8 @@ void __nf_ct_refresh_acct(struct nf_conn *ct,
if (READ_ONCE(ct->timeout) != extra_jiffies)
WRITE_ONCE(ct->timeout, extra_jiffies);
acct:
- if (do_acct)
- nf_ct_acct_update(ct, CTINFO2DIR(ctinfo), skb->len);
+ if (bytes)
+ nf_ct_acct_update(ct, CTINFO2DIR(ctinfo), bytes);
}
EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
@@ -2153,80 +2190,6 @@ static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
nf_conntrack_get(skb_nfct(nskb));
}
-static int __nf_conntrack_update(struct net *net, struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo)
-{
- const struct nf_nat_hook *nat_hook;
- struct nf_conntrack_tuple_hash *h;
- struct nf_conntrack_tuple tuple;
- unsigned int status;
- int dataoff;
- u16 l3num;
- u8 l4num;
-
- l3num = nf_ct_l3num(ct);
-
- dataoff = get_l4proto(skb, skb_network_offset(skb), l3num, &l4num);
- if (dataoff <= 0)
- return NF_DROP;
-
- if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num,
- l4num, net, &tuple))
- return NF_DROP;
-
- if (ct->status & IPS_SRC_NAT) {
- memcpy(tuple.src.u3.all,
- ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.all,
- sizeof(tuple.src.u3.all));
- tuple.src.u.all =
- ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.all;
- }
-
- if (ct->status & IPS_DST_NAT) {
- memcpy(tuple.dst.u3.all,
- ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.all,
- sizeof(tuple.dst.u3.all));
- tuple.dst.u.all =
- ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all;
- }
-
- h = nf_conntrack_find_get(net, nf_ct_zone(ct), &tuple);
- if (!h)
- return NF_ACCEPT;
-
- /* Store status bits of the conntrack that is clashing to re-do NAT
- * mangling according to what it has been done already to this packet.
- */
- status = ct->status;
-
- nf_ct_put(ct);
- ct = nf_ct_tuplehash_to_ctrack(h);
- nf_ct_set(skb, ct, ctinfo);
-
- nat_hook = rcu_dereference(nf_nat_hook);
- if (!nat_hook)
- return NF_ACCEPT;
-
- if (status & IPS_SRC_NAT) {
- unsigned int verdict = nat_hook->manip_pkt(skb, ct,
- NF_NAT_MANIP_SRC,
- IP_CT_DIR_ORIGINAL);
- if (verdict != NF_ACCEPT)
- return verdict;
- }
-
- if (status & IPS_DST_NAT) {
- unsigned int verdict = nat_hook->manip_pkt(skb, ct,
- NF_NAT_MANIP_DST,
- IP_CT_DIR_ORIGINAL);
- if (verdict != NF_ACCEPT)
- return verdict;
- }
-
- return NF_ACCEPT;
-}
-
/* This packet is coming from userspace via nf_queue, complete the packet
* processing after the helper invocation in nf_confirm().
*/
@@ -2290,17 +2253,6 @@ static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
if (!ct)
return NF_ACCEPT;
- if (!nf_ct_is_confirmed(ct)) {
- int ret = __nf_conntrack_update(net, skb, ct, ctinfo);
-
- if (ret != NF_ACCEPT)
- return ret;
-
- ct = nf_ct_get(skb, &ctinfo);
- if (!ct)
- return NF_ACCEPT;
- }
-
return nf_confirm_cthelper(skb, ct, ctinfo);
}
@@ -2530,7 +2482,7 @@ void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
* netfilter framework. Roll on, two-stage module
* delete...
*/
- synchronize_net();
+ synchronize_rcu_expedited();
i_see_dead_people:
busy = 0;
list_for_each_entry(net, net_exit_list, exit_list) {
@@ -2558,12 +2510,15 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
struct hlist_nulls_head *hash;
unsigned int nr_slots, i;
- if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
+ if (*sizep > (INT_MAX / sizeof(struct hlist_nulls_head)))
return NULL;
BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
+ if (nr_slots > (INT_MAX / sizeof(struct hlist_nulls_head)))
+ return NULL;
+
hash = kvcalloc(nr_slots, sizeof(struct hlist_nulls_head), GFP_KERNEL);
if (hash && nulls)
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 69948e1d6974..af68c64acaab 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -162,6 +162,14 @@ static int __nf_conntrack_eventmask_report(struct nf_conntrack_ecache *e,
return ret;
}
+static void nf_ct_ecache_tstamp_refresh(struct nf_conntrack_ecache *e)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ if (local64_read(&e->timestamp))
+ local64_set(&e->timestamp, ktime_get_real_ns());
+#endif
+}
+
int nf_conntrack_eventmask_report(unsigned int events, struct nf_conn *ct,
u32 portid, int report)
{
@@ -186,6 +194,8 @@ int nf_conntrack_eventmask_report(unsigned int events, struct nf_conn *ct,
/* This is a resent of a destroy event? If so, skip missed */
missed = e->portid ? 0 : e->missed;
+ nf_ct_ecache_tstamp_refresh(e);
+
ret = __nf_conntrack_eventmask_report(e, events, missed, &item);
if (unlikely(ret < 0 && (events & (1 << IPCT_DESTROY)))) {
/* This is a destroy event that has been triggered by a process,
@@ -297,6 +307,18 @@ void nf_conntrack_ecache_work(struct net *net, enum nf_ct_ecache_state state)
}
}
+static void nf_ct_ecache_tstamp_new(const struct nf_conn *ct, struct nf_conntrack_ecache *e)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ u64 ts = 0;
+
+ if (nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP))
+ ts = ktime_get_real_ns();
+
+ local64_set(&e->timestamp, ts);
+#endif
+}
+
bool nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
{
struct net *net = nf_ct_net(ct);
@@ -326,6 +348,7 @@ bool nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp
e = nf_ct_ext_add(ct, NF_CT_EXT_ECACHE, gfp);
if (e) {
+ nf_ct_ecache_tstamp_new(ct, e);
e->ctmask = ctmask;
e->expmask = expmask;
}
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 81ca348915c9..21fa550966f0 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -722,9 +722,7 @@ int nf_conntrack_expect_init(void)
nf_ct_expect_hsize = 1;
}
nf_ct_expect_max = nf_ct_expect_hsize * 4;
- nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
- sizeof(struct nf_conntrack_expect),
- 0, 0, NULL);
+ nf_ct_expect_cachep = KMEM_CACHE(nf_conntrack_expect, 0);
if (!nf_ct_expect_cachep)
return -ENOMEM;
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 5a9bce24f3c3..14f73872f647 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -1385,7 +1385,7 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
if (info->timeout > 0) {
pr_debug("nf_ct_ras: set RAS connection timeout to "
"%u seconds\n", info->timeout);
- nf_ct_refresh(ct, skb, info->timeout * HZ);
+ nf_ct_refresh(ct, info->timeout * HZ);
/* Set expect timeout */
spin_lock_bh(&nf_conntrack_expect_lock);
@@ -1433,7 +1433,7 @@ static int process_urq(struct sk_buff *skb, struct nf_conn *ct,
info->sig_port[!dir] = 0;
/* Give it 30 seconds for UCF or URJ */
- nf_ct_refresh(ct, skb, 30 * HZ);
+ nf_ct_refresh(ct, 30 * HZ);
return 0;
}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 3b846cbdc050..db23876a6016 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -357,11 +357,11 @@ nla_put_failure:
static int ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
{
struct nlattr *nest_secctx;
- int len, ret;
- char *secctx;
+ struct lsm_context ctx;
+ int ret;
- ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
- if (ret)
+ ret = security_secid_to_secctx(ct->secmark, &ctx);
+ if (ret < 0)
return 0;
ret = -1;
@@ -369,20 +369,37 @@ static int ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
if (!nest_secctx)
goto nla_put_failure;
- if (nla_put_string(skb, CTA_SECCTX_NAME, secctx))
+ if (nla_put_string(skb, CTA_SECCTX_NAME, ctx.context))
goto nla_put_failure;
nla_nest_end(skb, nest_secctx);
ret = 0;
nla_put_failure:
- security_release_secctx(secctx, len);
+ security_release_secctx(&ctx);
return ret;
}
#else
#define ctnetlink_dump_secctx(a, b) (0)
#endif
-#ifdef CONFIG_NF_CONNTRACK_LABELS
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+static int
+ctnetlink_dump_event_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ const struct nf_conntrack_ecache *e = nf_ct_ecache_find(ct);
+
+ if (e) {
+ u64 ts = local64_read(&e->timestamp);
+
+ if (ts)
+ return nla_put_be64(skb, CTA_TIMESTAMP_EVENT,
+ cpu_to_be64(ts), CTA_TIMESTAMP_PAD);
+ }
+#endif
+ return 0;
+}
+
static inline int ctnetlink_label_size(const struct nf_conn *ct)
{
struct nf_conn_labels *labels = nf_ct_labels_find(ct);
@@ -391,6 +408,7 @@ static inline int ctnetlink_label_size(const struct nf_conn *ct)
return 0;
return nla_total_size(sizeof(labels->bits));
}
+#endif
static int
ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct)
@@ -411,10 +429,6 @@ ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct)
return 0;
}
-#else
-#define ctnetlink_dump_labels(a, b) (0)
-#define ctnetlink_label_size(a) (0)
-#endif
#define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
@@ -652,7 +666,6 @@ static size_t ctnetlink_proto_size(const struct nf_conn *ct)
return len + len4;
}
-#endif
static inline size_t ctnetlink_acct_size(const struct nf_conn *ct)
{
@@ -667,14 +680,14 @@ static inline size_t ctnetlink_acct_size(const struct nf_conn *ct)
static inline int ctnetlink_secctx_size(const struct nf_conn *ct)
{
#ifdef CONFIG_NF_CONNTRACK_SECMARK
- int len, ret;
+ int ret;
- ret = security_secid_to_secctx(ct->secmark, NULL, &len);
- if (ret)
+ ret = security_secid_to_secctx(ct->secmark, NULL);
+ if (ret < 0)
return 0;
return nla_total_size(0) /* CTA_SECCTX */
- + nla_total_size(sizeof(char) * len); /* CTA_SECCTX_NAME */
+ + nla_total_size(sizeof(char) * ret); /* CTA_SECCTX_NAME */
#else
return 0;
#endif
@@ -690,6 +703,7 @@ static inline size_t ctnetlink_timestamp_size(const struct nf_conn *ct)
return 0;
#endif
}
+#endif
#ifdef CONFIG_NF_CONNTRACK_EVENTS
static size_t ctnetlink_nlmsg_size(const struct nf_conn *ct)
@@ -720,6 +734,9 @@ static size_t ctnetlink_nlmsg_size(const struct nf_conn *ct)
#endif
+ ctnetlink_proto_size(ct)
+ ctnetlink_label_size(ct)
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ + nla_total_size(sizeof(u64)) /* CTA_TIMESTAMP_EVENT */
+#endif
;
}
@@ -841,6 +858,10 @@ ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item)
if (ctnetlink_dump_mark(skb, ct, events & (1 << IPCT_MARK)))
goto nla_put_failure;
#endif
+
+ if (ctnetlink_dump_event_timestamp(skb, ct))
+ goto nla_put_failure;
+
nlmsg_end(skb, nlh);
err = nfnetlink_send(skb, net, item->portid, group, item->report,
GFP_ATOMIC);
@@ -1560,6 +1581,7 @@ static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
.len = NF_CT_LABELS_MAX_SIZE },
[CTA_FILTER] = { .type = NLA_NESTED },
[CTA_STATUS_MASK] = { .type = NLA_U32 },
+ [CTA_TIMESTAMP_EVENT] = { .type = NLA_REJECT },
};
static int ctnetlink_flush_iterate(struct nf_conn *ct, void *data)
@@ -1579,9 +1601,6 @@ static int ctnetlink_flush_conntrack(struct net *net,
};
if (ctnetlink_needs_filter(family, cda)) {
- if (cda[CTA_FILTER])
- return -EOPNOTSUPP;
-
filter = ctnetlink_alloc_filter(cda, family);
if (IS_ERR(filter))
return PTR_ERR(filter);
@@ -1610,14 +1629,14 @@ static int ctnetlink_del_conntrack(struct sk_buff *skb,
if (err < 0)
return err;
- if (cda[CTA_TUPLE_ORIG])
+ if (cda[CTA_TUPLE_ORIG] && !cda[CTA_FILTER])
err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
family, &zone);
- else if (cda[CTA_TUPLE_REPLY])
+ else if (cda[CTA_TUPLE_REPLY] && !cda[CTA_FILTER])
err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
family, &zone);
else {
- u_int8_t u3 = info->nfmsg->version ? family : AF_UNSPEC;
+ u8 u3 = info->nfmsg->version || cda[CTA_FILTER] ? family : AF_UNSPEC;
return ctnetlink_flush_conntrack(info->net, cda,
NETLINK_CB(skb).portid,
@@ -3420,7 +3439,8 @@ static int ctnetlink_del_expect(struct sk_buff *skb,
if (cda[CTA_EXPECT_ID]) {
__be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
- if (ntohl(id) != (u32)(unsigned long)exp) {
+
+ if (id != nf_expect_get_id(exp)) {
nf_ct_expect_put(exp);
return -ENOENT;
}
@@ -3875,7 +3895,7 @@ static int __init ctnetlink_init(void)
{
int ret;
- NL_ASSERT_DUMP_CTX_FITS(struct ctnetlink_list_dump_ctx);
+ NL_ASSERT_CTX_FITS(struct ctnetlink_list_dump_ctx);
ret = nfnetlink_subsys_register(&ctnl_subsys);
if (ret < 0) {
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index e2db1f4ec2df..ebc4f733bb2e 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -525,7 +525,7 @@ int nf_conntrack_dccp_packet(struct nf_conn *ct, struct sk_buff *skb,
dh = skb_header_pointer(skb, dataoff, sizeof(*dh), &_dh.dh);
if (!dh)
- return NF_DROP;
+ return -NF_ACCEPT;
if (dccp_error(dh, skb, dataoff, state))
return -NF_ACCEPT;
@@ -533,7 +533,7 @@ int nf_conntrack_dccp_packet(struct nf_conn *ct, struct sk_buff *skb,
/* pull again, including possible 48 bit sequences and subtype header */
dh = dccp_header_pointer(skb, dataoff, dh, &_dh);
if (!dh)
- return NF_DROP;
+ return -NF_ACCEPT;
type = dh->dccph_type;
if (!nf_ct_is_confirmed(ct) && !dccp_new(ct, skb, dh, state))
diff --git a/net/netfilter/nf_conntrack_proto_icmpv6.c b/net/netfilter/nf_conntrack_proto_icmpv6.c
index 1020d67600a9..327b8059025d 100644
--- a/net/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/netfilter/nf_conntrack_proto_icmpv6.c
@@ -62,7 +62,9 @@ static const u_int8_t noct_valid_new[] = {
[NDISC_ROUTER_ADVERTISEMENT - 130] = 1,
[NDISC_NEIGHBOUR_SOLICITATION - 130] = 1,
[NDISC_NEIGHBOUR_ADVERTISEMENT - 130] = 1,
- [ICMPV6_MLD2_REPORT - 130] = 1
+ [ICMPV6_MLD2_REPORT - 130] = 1,
+ [ICMPV6_MRDISC_ADV - 130] = 1,
+ [ICMPV6_MRDISC_SOL - 130] = 1
};
bool nf_conntrack_invert_icmpv6_tuple(struct nf_conntrack_tuple *tuple,
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 4cc97f971264..7c6f7c9f7332 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -39,20 +39,15 @@ static const char *const sctp_conntrack_names[] = {
[SCTP_CONNTRACK_HEARTBEAT_SENT] = "HEARTBEAT_SENT",
};
-#define SECS * HZ
-#define MINS * 60 SECS
-#define HOURS * 60 MINS
-#define DAYS * 24 HOURS
-
static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = {
- [SCTP_CONNTRACK_CLOSED] = 10 SECS,
- [SCTP_CONNTRACK_COOKIE_WAIT] = 3 SECS,
- [SCTP_CONNTRACK_COOKIE_ECHOED] = 3 SECS,
- [SCTP_CONNTRACK_ESTABLISHED] = 210 SECS,
- [SCTP_CONNTRACK_SHUTDOWN_SENT] = 3 SECS,
- [SCTP_CONNTRACK_SHUTDOWN_RECD] = 3 SECS,
- [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = 3 SECS,
- [SCTP_CONNTRACK_HEARTBEAT_SENT] = 30 SECS,
+ [SCTP_CONNTRACK_CLOSED] = secs_to_jiffies(10),
+ [SCTP_CONNTRACK_COOKIE_WAIT] = secs_to_jiffies(3),
+ [SCTP_CONNTRACK_COOKIE_ECHOED] = secs_to_jiffies(3),
+ [SCTP_CONNTRACK_ESTABLISHED] = secs_to_jiffies(210),
+ [SCTP_CONNTRACK_SHUTDOWN_SENT] = secs_to_jiffies(3),
+ [SCTP_CONNTRACK_SHUTDOWN_RECD] = secs_to_jiffies(3),
+ [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = secs_to_jiffies(3),
+ [SCTP_CONNTRACK_HEARTBEAT_SENT] = secs_to_jiffies(30),
};
#define SCTP_FLAG_HEARTBEAT_VTAG_FAILED 1
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index ae493599a3ef..0c1d086e96cb 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -14,7 +14,7 @@
#include <linux/skbuff.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/tcp.h>
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index d0eac27f6ba0..ca748f8dbff1 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1553,7 +1553,7 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
if (dataoff >= skb->len)
return NF_ACCEPT;
- nf_ct_refresh(ct, skb, sip_timeout * HZ);
+ nf_ct_refresh(ct, sip_timeout * HZ);
if (unlikely(skb_linearize(skb)))
return NF_DROP;
@@ -1624,7 +1624,7 @@ static int sip_help_udp(struct sk_buff *skb, unsigned int protoff,
if (dataoff >= skb->len)
return NF_ACCEPT;
- nf_ct_refresh(ct, skb, sip_timeout * HZ);
+ nf_ct_refresh(ct, sip_timeout * HZ);
if (unlikely(skb_linearize(skb)))
return NF_DROP;
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 0ee98ce5b816..502cf10aab41 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -22,9 +22,6 @@
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_timestamp.h>
-#ifdef CONFIG_LWTUNNEL
-#include <net/netfilter/nf_hooks_lwtunnel.h>
-#endif
#include <linux/rculist_nulls.h>
static bool enable_hooks __read_mostly;
@@ -175,17 +172,16 @@ static void ct_seq_stop(struct seq_file *s, void *v)
#ifdef CONFIG_NF_CONNTRACK_SECMARK
static void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
{
+ struct lsm_context ctx;
int ret;
- u32 len;
- char *secctx;
- ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
- if (ret)
+ ret = security_secid_to_secctx(ct->secmark, &ctx);
+ if (ret < 0)
return;
- seq_printf(s, "secctx=%s ", secctx);
+ seq_printf(s, "secctx=%s ", ctx.context);
- security_release_secctx(secctx, len);
+ security_release_secctx(&ctx);
}
#else
static inline void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
@@ -527,7 +523,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_count);
static unsigned int nf_conntrack_htable_size_user __read_mostly;
static int
-nf_conntrack_hash_sysctl(struct ctl_table *table, int write,
+nf_conntrack_hash_sysctl(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
@@ -612,15 +608,10 @@ enum nf_ct_sysctl_index {
NF_SYSCTL_CT_PROTO_TIMEOUT_GRE,
NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM,
#endif
-#ifdef CONFIG_LWTUNNEL
- NF_SYSCTL_CT_LWTUNNEL,
-#endif
- __NF_SYSCTL_CT_LAST_SYSCTL,
+ NF_SYSCTL_CT_LAST_SYSCTL,
};
-#define NF_SYSCTL_CT_LAST_SYSCTL (__NF_SYSCTL_CT_LAST_SYSCTL + 1)
-
static struct ctl_table nf_ct_sysctl_table[] = {
[NF_SYSCTL_CT_MAX] = {
.procname = "nf_conntrack_max",
@@ -948,16 +939,6 @@ static struct ctl_table nf_ct_sysctl_table[] = {
.proc_handler = proc_dointvec_jiffies,
},
#endif
-#ifdef CONFIG_LWTUNNEL
- [NF_SYSCTL_CT_LWTUNNEL] = {
- .procname = "nf_hooks_lwtunnel",
- .data = NULL,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = nf_hooks_lwtunnel_sysctl_handler,
- },
-#endif
- {}
};
static struct ctl_table nf_ct_netfilter_table[] = {
@@ -968,7 +949,6 @@ static struct ctl_table nf_ct_netfilter_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
- { }
};
static void nf_conntrack_standalone_init_tcp_sysctl(struct net *net,
@@ -1122,7 +1102,7 @@ out_unregister_netfilter:
static void nf_conntrack_standalone_fini_sysctl(struct net *net)
{
struct nf_conntrack_net *cnet = nf_ct_pernet(net);
- struct ctl_table *table;
+ const struct ctl_table *table;
table = cnet->sysctl_header->ctl_table_arg;
unregister_net_sysctl_table(cnet->sysctl_header);
diff --git a/net/netfilter/nf_flow_table_bpf.c b/net/netfilter/nf_flow_table_bpf.c
new file mode 100644
index 000000000000..4a5f5195f2d2
--- /dev/null
+++ b/net/netfilter/nf_flow_table_bpf.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Unstable Flow Table Helpers for XDP hook
+ *
+ * These are called from the XDP programs.
+ * Note that it is allowed to break compatibility for these functions since
+ * the interface they are exposed through to BPF programs is explicitly
+ * unstable.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <net/netfilter/nf_flow_table.h>
+#include <linux/bpf.h>
+#include <linux/btf.h>
+#include <net/xdp.h>
+
+/* bpf_flowtable_opts - options for bpf flowtable helpers
+ * @error: out parameter, set for any encountered error
+ */
+struct bpf_flowtable_opts {
+ s32 error;
+};
+
+enum {
+ NF_BPF_FLOWTABLE_OPTS_SZ = 4,
+};
+
+__diag_push();
+__diag_ignore_all("-Wmissing-prototypes",
+ "Global functions as their definitions will be in nf_flow_table BTF");
+
+__bpf_kfunc_start_defs();
+
+static struct flow_offload_tuple_rhash *
+bpf_xdp_flow_tuple_lookup(struct net_device *dev,
+ struct flow_offload_tuple *tuple, __be16 proto)
+{
+ struct flow_offload_tuple_rhash *tuplehash;
+ struct nf_flowtable *nf_flow_table;
+ struct flow_offload *nf_flow;
+
+ nf_flow_table = nf_flowtable_by_dev(dev);
+ if (!nf_flow_table)
+ return ERR_PTR(-ENOENT);
+
+ tuplehash = flow_offload_lookup(nf_flow_table, tuple);
+ if (!tuplehash)
+ return ERR_PTR(-ENOENT);
+
+ nf_flow = container_of(tuplehash, struct flow_offload,
+ tuplehash[tuplehash->tuple.dir]);
+ flow_offload_refresh(nf_flow_table, nf_flow, false);
+
+ return tuplehash;
+}
+
+__bpf_kfunc struct flow_offload_tuple_rhash *
+bpf_xdp_flow_lookup(struct xdp_md *ctx, struct bpf_fib_lookup *fib_tuple,
+ struct bpf_flowtable_opts *opts, u32 opts_len)
+{
+ struct xdp_buff *xdp = (struct xdp_buff *)ctx;
+ struct flow_offload_tuple tuple = {
+ .iifidx = fib_tuple->ifindex,
+ .l3proto = fib_tuple->family,
+ .l4proto = fib_tuple->l4_protocol,
+ .src_port = fib_tuple->sport,
+ .dst_port = fib_tuple->dport,
+ };
+ struct flow_offload_tuple_rhash *tuplehash;
+ __be16 proto;
+
+ if (opts_len != NF_BPF_FLOWTABLE_OPTS_SZ) {
+ opts->error = -EINVAL;
+ return NULL;
+ }
+
+ switch (fib_tuple->family) {
+ case AF_INET:
+ tuple.src_v4.s_addr = fib_tuple->ipv4_src;
+ tuple.dst_v4.s_addr = fib_tuple->ipv4_dst;
+ proto = htons(ETH_P_IP);
+ break;
+ case AF_INET6:
+ tuple.src_v6 = *(struct in6_addr *)&fib_tuple->ipv6_src;
+ tuple.dst_v6 = *(struct in6_addr *)&fib_tuple->ipv6_dst;
+ proto = htons(ETH_P_IPV6);
+ break;
+ default:
+ opts->error = -EAFNOSUPPORT;
+ return NULL;
+ }
+
+ tuplehash = bpf_xdp_flow_tuple_lookup(xdp->rxq->dev, &tuple, proto);
+ if (IS_ERR(tuplehash)) {
+ opts->error = PTR_ERR(tuplehash);
+ return NULL;
+ }
+
+ return tuplehash;
+}
+
+__diag_pop()
+
+__bpf_kfunc_end_defs();
+
+BTF_KFUNCS_START(nf_ft_kfunc_set)
+BTF_ID_FLAGS(func, bpf_xdp_flow_lookup, KF_TRUSTED_ARGS | KF_RET_NULL)
+BTF_KFUNCS_END(nf_ft_kfunc_set)
+
+static const struct btf_kfunc_id_set nf_flow_kfunc_set = {
+ .owner = THIS_MODULE,
+ .set = &nf_ft_kfunc_set,
+};
+
+int nf_flow_register_bpf(void)
+{
+ return register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP,
+ &nf_flow_kfunc_set);
+}
+EXPORT_SYMBOL_GPL(nf_flow_register_bpf);
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index a0571339239c..9d8361526f82 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -77,12 +77,8 @@ EXPORT_SYMBOL_GPL(flow_offload_alloc);
static u32 flow_offload_dst_cookie(struct flow_offload_tuple *flow_tuple)
{
- const struct rt6_info *rt;
-
- if (flow_tuple->l3proto == NFPROTO_IPV6) {
- rt = (const struct rt6_info *)flow_tuple->dst_cache;
- return rt6_get_cookie(rt);
- }
+ if (flow_tuple->l3proto == NFPROTO_IPV6)
+ return rt6_get_cookie(dst_rt6_info(flow_tuple->dst_cache));
return 0;
}
@@ -165,42 +161,86 @@ void flow_offload_route_init(struct flow_offload *flow,
}
EXPORT_SYMBOL_GPL(flow_offload_route_init);
-static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
+static inline bool nf_flow_has_expired(const struct flow_offload *flow)
{
+ return nf_flow_timeout_delta(flow->timeout) <= 0;
+}
+
+static void flow_offload_fixup_tcp(struct nf_conn *ct, u8 tcp_state)
+{
+ struct ip_ct_tcp *tcp = &ct->proto.tcp;
+
+ spin_lock_bh(&ct->lock);
+ if (tcp->state != tcp_state)
+ tcp->state = tcp_state;
+
+ /* syn packet triggers the TCP reopen case from conntrack. */
+ if (tcp->state == TCP_CONNTRACK_CLOSE)
+ ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
+
+ /* Conntrack state is outdated due to offload bypass.
+ * Clear IP_CT_TCP_FLAG_MAXACK_SET, otherwise conntracks
+ * TCP reset validation will fail.
+ */
tcp->seen[0].td_maxwin = 0;
+ tcp->seen[0].flags &= ~IP_CT_TCP_FLAG_MAXACK_SET;
tcp->seen[1].td_maxwin = 0;
+ tcp->seen[1].flags &= ~IP_CT_TCP_FLAG_MAXACK_SET;
+ spin_unlock_bh(&ct->lock);
}
-static void flow_offload_fixup_ct(struct nf_conn *ct)
+static void flow_offload_fixup_ct(struct flow_offload *flow)
{
+ struct nf_conn *ct = flow->ct;
struct net *net = nf_ct_net(ct);
int l4num = nf_ct_protonum(ct);
+ bool expired, closing = false;
+ u32 offload_timeout = 0;
s32 timeout;
if (l4num == IPPROTO_TCP) {
- struct nf_tcp_net *tn = nf_tcp_pernet(net);
-
- flow_offload_fixup_tcp(&ct->proto.tcp);
+ const struct nf_tcp_net *tn = nf_tcp_pernet(net);
+ u8 tcp_state;
+
+ /* Enter CLOSE state if fin/rst packet has been seen, this
+ * allows TCP reopen from conntrack. Otherwise, pick up from
+ * the last seen TCP state.
+ */
+ closing = test_bit(NF_FLOW_CLOSING, &flow->flags);
+ if (closing) {
+ flow_offload_fixup_tcp(ct, TCP_CONNTRACK_CLOSE);
+ timeout = READ_ONCE(tn->timeouts[TCP_CONNTRACK_CLOSE]);
+ expired = false;
+ } else {
+ tcp_state = READ_ONCE(ct->proto.tcp.state);
+ flow_offload_fixup_tcp(ct, tcp_state);
+ timeout = READ_ONCE(tn->timeouts[tcp_state]);
+ expired = nf_flow_has_expired(flow);
+ }
+ offload_timeout = READ_ONCE(tn->offload_timeout);
- timeout = tn->timeouts[ct->proto.tcp.state];
- timeout -= tn->offload_timeout;
} else if (l4num == IPPROTO_UDP) {
- struct nf_udp_net *tn = nf_udp_pernet(net);
+ const struct nf_udp_net *tn = nf_udp_pernet(net);
enum udp_conntrack state =
test_bit(IPS_SEEN_REPLY_BIT, &ct->status) ?
UDP_CT_REPLIED : UDP_CT_UNREPLIED;
- timeout = tn->timeouts[state];
- timeout -= tn->offload_timeout;
+ timeout = READ_ONCE(tn->timeouts[state]);
+ expired = nf_flow_has_expired(flow);
+ offload_timeout = READ_ONCE(tn->offload_timeout);
} else {
return;
}
+ if (expired)
+ timeout -= offload_timeout;
+
if (timeout < 0)
timeout = 0;
- if (nf_flow_timeout_delta(READ_ONCE(ct->timeout)) > (__s32)timeout)
- WRITE_ONCE(ct->timeout, nfct_time_stamp + timeout);
+ if (closing ||
+ nf_flow_timeout_delta(READ_ONCE(ct->timeout)) > (__s32)timeout)
+ nf_ct_refresh(ct, timeout);
}
static void flow_offload_route_release(struct flow_offload *flow)
@@ -298,7 +338,7 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
return err;
}
- nf_ct_offload_timeout(flow->ct);
+ nf_ct_refresh(flow->ct, NF_CT_DAY);
if (nf_flowtable_hw_offload(flow_table)) {
__set_bit(NF_FLOW_HW, &flow->flags);
@@ -320,18 +360,14 @@ void flow_offload_refresh(struct nf_flowtable *flow_table,
else
return;
- if (likely(!nf_flowtable_hw_offload(flow_table)))
+ if (likely(!nf_flowtable_hw_offload(flow_table)) ||
+ test_bit(NF_FLOW_CLOSING, &flow->flags))
return;
nf_flow_offload_add(flow_table, flow);
}
EXPORT_SYMBOL_GPL(flow_offload_refresh);
-static inline bool nf_flow_has_expired(const struct flow_offload *flow)
-{
- return nf_flow_timeout_delta(flow->timeout) <= 0;
-}
-
static void flow_offload_del(struct nf_flowtable *flow_table,
struct flow_offload *flow)
{
@@ -348,7 +384,7 @@ void flow_offload_teardown(struct flow_offload *flow)
{
clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status);
set_bit(NF_FLOW_TEARDOWN, &flow->flags);
- flow_offload_fixup_ct(flow->ct);
+ flow_offload_fixup_ct(flow);
}
EXPORT_SYMBOL_GPL(flow_offload_teardown);
@@ -418,15 +454,116 @@ static bool nf_flow_custom_gc(struct nf_flowtable *flow_table,
return flow_table->type->gc && flow_table->type->gc(flow);
}
+/**
+ * nf_flow_table_tcp_timeout() - new timeout of offloaded tcp entry
+ * @ct: Flowtable offloaded tcp ct
+ *
+ * Return: number of seconds when ct entry should expire.
+ */
+static u32 nf_flow_table_tcp_timeout(const struct nf_conn *ct)
+{
+ u8 state = READ_ONCE(ct->proto.tcp.state);
+
+ switch (state) {
+ case TCP_CONNTRACK_SYN_SENT:
+ case TCP_CONNTRACK_SYN_RECV:
+ return 0;
+ case TCP_CONNTRACK_ESTABLISHED:
+ return NF_CT_DAY;
+ case TCP_CONNTRACK_FIN_WAIT:
+ case TCP_CONNTRACK_CLOSE_WAIT:
+ case TCP_CONNTRACK_LAST_ACK:
+ case TCP_CONNTRACK_TIME_WAIT:
+ return 5 * 60 * HZ;
+ case TCP_CONNTRACK_CLOSE:
+ return 0;
+ }
+
+ return 0;
+}
+
+/**
+ * nf_flow_table_extend_ct_timeout() - Extend ct timeout of offloaded conntrack entry
+ * @ct: Flowtable offloaded ct
+ *
+ * Datapath lookups in the conntrack table will evict nf_conn entries
+ * if they have expired.
+ *
+ * Once nf_conn entries have been offloaded, nf_conntrack might not see any
+ * packets anymore. Thus ct->timeout is no longer refreshed and ct can
+ * be evicted.
+ *
+ * To avoid the need for an additional check on the offload bit for every
+ * packet processed via nf_conntrack_in(), set an arbitrary timeout large
+ * enough not to ever expire, this save us a check for the IPS_OFFLOAD_BIT
+ * from the packet path via nf_ct_is_expired().
+ */
+static void nf_flow_table_extend_ct_timeout(struct nf_conn *ct)
+{
+ static const u32 min_timeout = 5 * 60 * HZ;
+ u32 expires = nf_ct_expires(ct);
+
+ /* normal case: large enough timeout, nothing to do. */
+ if (likely(expires >= min_timeout))
+ return;
+
+ /* must check offload bit after this, we do not hold any locks.
+ * flowtable and ct entries could have been removed on another CPU.
+ */
+ if (!refcount_inc_not_zero(&ct->ct_general.use))
+ return;
+
+ /* load ct->status after refcount increase */
+ smp_acquire__after_ctrl_dep();
+
+ if (nf_ct_is_confirmed(ct) &&
+ test_bit(IPS_OFFLOAD_BIT, &ct->status)) {
+ u8 l4proto = nf_ct_protonum(ct);
+ u32 new_timeout = true;
+
+ switch (l4proto) {
+ case IPPROTO_UDP:
+ new_timeout = NF_CT_DAY;
+ break;
+ case IPPROTO_TCP:
+ new_timeout = nf_flow_table_tcp_timeout(ct);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ /* Update to ct->timeout from nf_conntrack happens
+ * without holding ct->lock.
+ *
+ * Use cmpxchg to ensure timeout extension doesn't
+ * happen when we race with conntrack datapath.
+ *
+ * The inverse -- datapath updating ->timeout right
+ * after this -- is fine, datapath is authoritative.
+ */
+ if (new_timeout) {
+ new_timeout += nfct_time_stamp;
+ cmpxchg(&ct->timeout, expires, new_timeout);
+ }
+ }
+
+ nf_ct_put(ct);
+}
+
static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
struct flow_offload *flow, void *data)
{
+ bool teardown = test_bit(NF_FLOW_TEARDOWN, &flow->flags);
+
if (nf_flow_has_expired(flow) ||
nf_ct_is_dying(flow->ct) ||
nf_flow_custom_gc(flow_table, flow))
flow_offload_teardown(flow);
+ else if (!teardown)
+ nf_flow_table_extend_ct_timeout(flow->ct);
- if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
+ if (teardown) {
if (test_bit(NF_FLOW_HW, &flow->flags)) {
if (!test_bit(NF_FLOW_HW_DYING, &flow->flags))
nf_flow_offload_del(flow_table, flow);
@@ -435,6 +572,10 @@ static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
} else {
flow_offload_del(flow_table, flow);
}
+ } else if (test_bit(NF_FLOW_CLOSING, &flow->flags) &&
+ test_bit(NF_FLOW_HW, &flow->flags) &&
+ !test_bit(NF_FLOW_HW_DYING, &flow->flags)) {
+ nf_flow_offload_del(flow_table, flow);
} else if (test_bit(NF_FLOW_HW, &flow->flags)) {
nf_flow_offload_stats(flow_table, flow);
}
@@ -674,8 +815,14 @@ static int __init nf_flow_table_module_init(void)
if (ret)
goto out_offload;
+ ret = nf_flow_register_bpf();
+ if (ret)
+ goto out_bpf;
+
return 0;
+out_bpf:
+ nf_flow_table_offload_exit();
out_offload:
unregister_pernet_subsys(&nf_flow_table_net_ops);
return ret;
diff --git a/net/netfilter/nf_flow_table_inet.c b/net/netfilter/nf_flow_table_inet.c
index 9505f9d188ff..b0f199171932 100644
--- a/net/netfilter/nf_flow_table_inet.c
+++ b/net/netfilter/nf_flow_table_inet.c
@@ -17,11 +17,15 @@ nf_flow_offload_inet_hook(void *priv, struct sk_buff *skb,
switch (skb->protocol) {
case htons(ETH_P_8021Q):
+ if (!pskb_may_pull(skb, skb_mac_offset(skb) + sizeof(*veth)))
+ return NF_ACCEPT;
+
veth = (struct vlan_ethhdr *)skb_mac_header(skb);
proto = veth->h_vlan_encapsulated_proto;
break;
case htons(ETH_P_PPP_SES):
- proto = nf_flow_pppoe_proto(skb);
+ if (!nf_flow_pppoe_proto(skb, &proto))
+ return NF_ACCEPT;
break;
default:
proto = skb->protocol;
diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
index e45fade76409..8cd4cf7ae211 100644
--- a/net/netfilter/nf_flow_table_ip.c
+++ b/net/netfilter/nf_flow_table_ip.c
@@ -28,11 +28,15 @@ static int nf_flow_state_check(struct flow_offload *flow, int proto,
return 0;
tcph = (void *)(skb_network_header(skb) + thoff);
- if (unlikely(tcph->fin || tcph->rst)) {
+ if (tcph->syn && test_bit(NF_FLOW_CLOSING, &flow->flags)) {
flow_offload_teardown(flow);
return -1;
}
+ if ((tcph->fin || tcph->rst) &&
+ !test_bit(NF_FLOW_CLOSING, &flow->flags))
+ set_bit(NF_FLOW_CLOSING, &flow->flags);
+
return 0;
}
@@ -157,7 +161,7 @@ static void nf_flow_tuple_encap(struct sk_buff *skb,
tuple->encap[i].proto = skb->protocol;
break;
case htons(ETH_P_PPP_SES):
- phdr = (struct pppoe_hdr *)skb_mac_header(skb);
+ phdr = (struct pppoe_hdr *)skb_network_header(skb);
tuple->encap[i].id = ntohs(phdr->sid);
tuple->encap[i].proto = skb->protocol;
break;
@@ -273,13 +277,17 @@ static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
return NF_STOLEN;
}
-static bool nf_flow_skb_encap_protocol(const struct sk_buff *skb, __be16 proto,
+static bool nf_flow_skb_encap_protocol(struct sk_buff *skb, __be16 proto,
u32 *offset)
{
struct vlan_ethhdr *veth;
+ __be16 inner_proto;
switch (skb->protocol) {
case htons(ETH_P_8021Q):
+ if (!pskb_may_pull(skb, skb_mac_offset(skb) + sizeof(*veth)))
+ return false;
+
veth = (struct vlan_ethhdr *)skb_mac_header(skb);
if (veth->h_vlan_encapsulated_proto == proto) {
*offset += VLAN_HLEN;
@@ -287,7 +295,8 @@ static bool nf_flow_skb_encap_protocol(const struct sk_buff *skb, __be16 proto,
}
break;
case htons(ETH_P_PPP_SES):
- if (nf_flow_pppoe_proto(skb) == proto) {
+ if (nf_flow_pppoe_proto(skb, &inner_proto) &&
+ inner_proto == proto) {
*offset += PPPOE_SES_HLEN;
return true;
}
@@ -316,7 +325,7 @@ static void nf_flow_encap_pop(struct sk_buff *skb,
skb_reset_network_header(skb);
break;
case htons(ETH_P_PPP_SES):
- skb->protocol = nf_flow_pppoe_proto(skb);
+ skb->protocol = __nf_flow_pppoe_proto(skb);
skb_pull(skb, PPPOE_SES_HLEN);
skb_reset_network_header(skb);
break;
@@ -432,7 +441,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
return NF_ACCEPT;
if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
- rt = (struct rtable *)tuplehash->tuple.dst_cache;
+ rt = dst_rtable(tuplehash->tuple.dst_cache);
memset(skb->cb, 0, sizeof(struct inet_skb_parm));
IPCB(skb)->iif = skb->dev->ifindex;
IPCB(skb)->flags = IPSKB_FORWARDED;
@@ -444,7 +453,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
switch (tuplehash->tuple.xmit_type) {
case FLOW_OFFLOAD_XMIT_NEIGH:
- rt = (struct rtable *)tuplehash->tuple.dst_cache;
+ rt = dst_rtable(tuplehash->tuple.dst_cache);
outdev = rt->dst.dev;
skb->dev = outdev;
nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
@@ -727,7 +736,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
return NF_ACCEPT;
if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
- rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
+ rt = dst_rt6_info(tuplehash->tuple.dst_cache);
memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
IP6CB(skb)->iif = skb->dev->ifindex;
IP6CB(skb)->flags = IP6SKB_FORWARDED;
@@ -739,7 +748,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
switch (tuplehash->tuple.xmit_type) {
case FLOW_OFFLOAD_XMIT_NEIGH:
- rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
+ rt = dst_rt6_info(tuplehash->tuple.dst_cache);
outdev = rt->dst.dev;
skb->dev = outdev;
nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
index a010b25076ca..e06bc36f49fe 100644
--- a/net/netfilter/nf_flow_table_offload.c
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -841,8 +841,8 @@ static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
struct list_head *block_cb_list)
{
struct flow_cls_offload cls_flow = {};
+ struct netlink_ext_ack extack = {};
struct flow_block_cb *block_cb;
- struct netlink_ext_ack extack;
__be16 proto = ETH_P_ALL;
int err, i = 0;
@@ -1192,7 +1192,7 @@ int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
int err;
if (!nf_flowtable_hw_offload(flowtable))
- return 0;
+ return nf_flow_offload_xdp_setup(flowtable, dev, cmd);
if (dev->netdev_ops->ndo_setup_tc)
err = nf_flow_table_offload_cmd(&bo, flowtable, dev, cmd,
diff --git a/net/netfilter/nf_flow_table_xdp.c b/net/netfilter/nf_flow_table_xdp.c
new file mode 100644
index 000000000000..e1252d042699
--- /dev/null
+++ b/net/netfilter/nf_flow_table_xdp.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netfilter.h>
+#include <linux/rhashtable.h>
+#include <linux/netdevice.h>
+#include <net/flow_offload.h>
+#include <net/netfilter/nf_flow_table.h>
+
+struct flow_offload_xdp_ft {
+ struct list_head head;
+ struct nf_flowtable *ft;
+ struct rcu_head rcuhead;
+};
+
+struct flow_offload_xdp {
+ struct hlist_node hnode;
+ unsigned long net_device_addr;
+ struct list_head head;
+};
+
+#define NF_XDP_HT_BITS 4
+static DEFINE_HASHTABLE(nf_xdp_hashtable, NF_XDP_HT_BITS);
+static DEFINE_MUTEX(nf_xdp_hashtable_lock);
+
+/* caller must hold rcu read lock */
+struct nf_flowtable *nf_flowtable_by_dev(const struct net_device *dev)
+{
+ unsigned long key = (unsigned long)dev;
+ struct flow_offload_xdp *iter;
+
+ hash_for_each_possible_rcu(nf_xdp_hashtable, iter, hnode, key) {
+ if (key == iter->net_device_addr) {
+ struct flow_offload_xdp_ft *ft_elem;
+
+ /* The user is supposed to insert a given net_device
+ * just into a single nf_flowtable so we always return
+ * the first element here.
+ */
+ ft_elem = list_first_or_null_rcu(&iter->head,
+ struct flow_offload_xdp_ft,
+ head);
+ return ft_elem ? ft_elem->ft : NULL;
+ }
+ }
+
+ return NULL;
+}
+
+static int nf_flowtable_by_dev_insert(struct nf_flowtable *ft,
+ const struct net_device *dev)
+{
+ struct flow_offload_xdp *iter, *elem = NULL;
+ unsigned long key = (unsigned long)dev;
+ struct flow_offload_xdp_ft *ft_elem;
+
+ ft_elem = kzalloc(sizeof(*ft_elem), GFP_KERNEL_ACCOUNT);
+ if (!ft_elem)
+ return -ENOMEM;
+
+ ft_elem->ft = ft;
+
+ mutex_lock(&nf_xdp_hashtable_lock);
+
+ hash_for_each_possible(nf_xdp_hashtable, iter, hnode, key) {
+ if (key == iter->net_device_addr) {
+ elem = iter;
+ break;
+ }
+ }
+
+ if (!elem) {
+ elem = kzalloc(sizeof(*elem), GFP_KERNEL_ACCOUNT);
+ if (!elem)
+ goto err_unlock;
+
+ elem->net_device_addr = key;
+ INIT_LIST_HEAD(&elem->head);
+ hash_add_rcu(nf_xdp_hashtable, &elem->hnode, key);
+ }
+ list_add_tail_rcu(&ft_elem->head, &elem->head);
+
+ mutex_unlock(&nf_xdp_hashtable_lock);
+
+ return 0;
+
+err_unlock:
+ mutex_unlock(&nf_xdp_hashtable_lock);
+ kfree(ft_elem);
+
+ return -ENOMEM;
+}
+
+static void nf_flowtable_by_dev_remove(struct nf_flowtable *ft,
+ const struct net_device *dev)
+{
+ struct flow_offload_xdp *iter, *elem = NULL;
+ unsigned long key = (unsigned long)dev;
+
+ mutex_lock(&nf_xdp_hashtable_lock);
+
+ hash_for_each_possible(nf_xdp_hashtable, iter, hnode, key) {
+ if (key == iter->net_device_addr) {
+ elem = iter;
+ break;
+ }
+ }
+
+ if (elem) {
+ struct flow_offload_xdp_ft *ft_elem, *ft_next;
+
+ list_for_each_entry_safe(ft_elem, ft_next, &elem->head, head) {
+ if (ft_elem->ft == ft) {
+ list_del_rcu(&ft_elem->head);
+ kfree_rcu(ft_elem, rcuhead);
+ }
+ }
+
+ if (list_empty(&elem->head))
+ hash_del_rcu(&elem->hnode);
+ else
+ elem = NULL;
+ }
+
+ mutex_unlock(&nf_xdp_hashtable_lock);
+
+ if (elem) {
+ synchronize_rcu();
+ kfree(elem);
+ }
+}
+
+int nf_flow_offload_xdp_setup(struct nf_flowtable *flowtable,
+ struct net_device *dev,
+ enum flow_block_command cmd)
+{
+ switch (cmd) {
+ case FLOW_BLOCK_BIND:
+ return nf_flowtable_by_dev_insert(flowtable, dev);
+ case FLOW_BLOCK_UNBIND:
+ nf_flowtable_by_dev_remove(flowtable, dev);
+ return 0;
+ }
+
+ WARN_ON_ONCE(1);
+ return 0;
+}
diff --git a/net/netfilter/nf_hooks_lwtunnel.c b/net/netfilter/nf_hooks_lwtunnel.c
index 00e89ffd78f6..2d890dd04ff8 100644
--- a/net/netfilter/nf_hooks_lwtunnel.c
+++ b/net/netfilter/nf_hooks_lwtunnel.c
@@ -3,6 +3,9 @@
#include <linux/sysctl.h>
#include <net/lwtunnel.h>
#include <net/netfilter/nf_hooks_lwtunnel.h>
+#include <linux/netfilter.h>
+
+#include "nf_internals.h"
static inline int nf_hooks_lwtunnel_get(void)
{
@@ -25,7 +28,7 @@ static inline int nf_hooks_lwtunnel_set(int enable)
}
#ifdef CONFIG_SYSCTL
-int nf_hooks_lwtunnel_sysctl_handler(struct ctl_table *table, int write,
+int nf_hooks_lwtunnel_sysctl_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int proc_nf_hooks_lwtunnel_enabled = 0;
@@ -50,4 +53,71 @@ int nf_hooks_lwtunnel_sysctl_handler(struct ctl_table *table, int write,
return ret;
}
EXPORT_SYMBOL_GPL(nf_hooks_lwtunnel_sysctl_handler);
+
+static struct ctl_table nf_lwtunnel_sysctl_table[] = {
+ {
+ .procname = "nf_hooks_lwtunnel",
+ .data = NULL,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = nf_hooks_lwtunnel_sysctl_handler,
+ },
+};
+
+static int __net_init nf_lwtunnel_net_init(struct net *net)
+{
+ struct ctl_table_header *hdr;
+ struct ctl_table *table;
+
+ table = nf_lwtunnel_sysctl_table;
+ if (!net_eq(net, &init_net)) {
+ table = kmemdup(nf_lwtunnel_sysctl_table,
+ sizeof(nf_lwtunnel_sysctl_table),
+ GFP_KERNEL);
+ if (!table)
+ goto err_alloc;
+ }
+
+ hdr = register_net_sysctl_sz(net, "net/netfilter", table,
+ ARRAY_SIZE(nf_lwtunnel_sysctl_table));
+ if (!hdr)
+ goto err_reg;
+
+ net->nf.nf_lwtnl_dir_header = hdr;
+
+ return 0;
+err_reg:
+ if (!net_eq(net, &init_net))
+ kfree(table);
+err_alloc:
+ return -ENOMEM;
+}
+
+static void __net_exit nf_lwtunnel_net_exit(struct net *net)
+{
+ const struct ctl_table *table;
+
+ table = net->nf.nf_lwtnl_dir_header->ctl_table_arg;
+ unregister_net_sysctl_table(net->nf.nf_lwtnl_dir_header);
+ if (!net_eq(net, &init_net))
+ kfree(table);
+}
+
+static struct pernet_operations nf_lwtunnel_net_ops = {
+ .init = nf_lwtunnel_net_init,
+ .exit = nf_lwtunnel_net_exit,
+};
+
+int __init netfilter_lwtunnel_init(void)
+{
+ return register_pernet_subsys(&nf_lwtunnel_net_ops);
+}
+
+void netfilter_lwtunnel_fini(void)
+{
+ unregister_pernet_subsys(&nf_lwtunnel_net_ops);
+}
+#else
+int __init netfilter_lwtunnel_init(void) { return 0; }
+void netfilter_lwtunnel_fini(void) {}
#endif /* CONFIG_SYSCTL */
diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
index 832ae64179f0..25403023060b 100644
--- a/net/netfilter/nf_internals.h
+++ b/net/netfilter/nf_internals.h
@@ -29,6 +29,12 @@ void nf_queue_nf_hook_drop(struct net *net);
/* nf_log.c */
int __init netfilter_log_init(void);
+#ifdef CONFIG_LWTUNNEL
+/* nf_hooks_lwtunnel.c */
+int __init netfilter_lwtunnel_init(void);
+void netfilter_lwtunnel_fini(void);
+#endif
+
/* core.c */
void nf_hook_entries_delete_raw(struct nf_hook_entries __rcu **pp,
const struct nf_hook_ops *reg);
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index e16f158388bb..6dd0de33eebd 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -31,10 +31,10 @@ static struct nf_logger *__find_logger(int pf, const char *str_logger)
int i;
for (i = 0; i < NF_LOG_TYPE_MAX; i++) {
- if (loggers[pf][i] == NULL)
+ log = nft_log_dereference(loggers[pf][i]);
+ if (!log)
continue;
- log = nft_log_dereference(loggers[pf][i]);
if (!strncasecmp(str_logger, log->name, strlen(log->name)))
return log;
}
@@ -156,6 +156,11 @@ int nf_logger_find_get(int pf, enum nf_log_type type)
struct nf_logger *logger;
int ret = -ENOENT;
+ if (pf >= ARRAY_SIZE(loggers))
+ return -EINVAL;
+ if (type >= NF_LOG_TYPE_MAX)
+ return -EINVAL;
+
if (pf == NFPROTO_INET) {
ret = nf_logger_find_get(NFPROTO_IPV4, type);
if (ret < 0)
@@ -390,7 +395,7 @@ static const struct seq_operations nflog_seq_ops = {
#ifdef CONFIG_SYSCTL
static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
+static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO];
static struct ctl_table_header *nf_log_sysctl_fhdr;
static struct ctl_table nf_log_sysctl_ftable[] = {
@@ -401,10 +406,9 @@ static struct ctl_table nf_log_sysctl_ftable[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
- { }
};
-static int nf_log_proc_dostring(struct ctl_table *table, int write,
+static int nf_log_proc_dostring(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
const struct nf_logger *logger;
@@ -509,7 +513,7 @@ err_alloc:
static void netfilter_log_sysctl_exit(struct net *net)
{
- struct ctl_table *table;
+ const struct ctl_table *table;
table = net->nf.nf_log_dir_header->ctl_table_arg;
unregister_net_sysctl_table(net->nf.nf_log_dir_header);
diff --git a/net/netfilter/nf_nat_bpf.c b/net/netfilter/nf_nat_bpf.c
index 6e3b2f58855f..481be15609b1 100644
--- a/net/netfilter/nf_nat_bpf.c
+++ b/net/netfilter/nf_nat_bpf.c
@@ -54,9 +54,9 @@ __bpf_kfunc int bpf_ct_set_nat_info(struct nf_conn___init *nfct,
__bpf_kfunc_end_defs();
-BTF_SET8_START(nf_nat_kfunc_set)
+BTF_KFUNCS_START(nf_nat_kfunc_set)
BTF_ID_FLAGS(func, bpf_ct_set_nat_info, KF_TRUSTED_ARGS)
-BTF_SET8_END(nf_nat_kfunc_set)
+BTF_KFUNCS_END(nf_nat_kfunc_set)
static const struct btf_kfunc_id_set nf_bpf_nat_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 016c816d91cb..aad84aabd7f1 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -183,7 +183,35 @@ hash_by_src(const struct net *net,
return reciprocal_scale(hash, nf_nat_htable_size);
}
-/* Is this tuple already taken? (not by us) */
+/**
+ * nf_nat_used_tuple - check if proposed nat tuple clashes with existing entry
+ * @tuple: proposed NAT binding
+ * @ignored_conntrack: our (unconfirmed) conntrack entry
+ *
+ * A conntrack entry can be inserted to the connection tracking table
+ * if there is no existing entry with an identical tuple in either direction.
+ *
+ * Example:
+ * INITIATOR -> NAT/PAT -> RESPONDER
+ *
+ * INITIATOR passes through NAT/PAT ("us") and SNAT is done (saddr rewrite).
+ * Then, later, NAT/PAT itself also connects to RESPONDER.
+ *
+ * This will not work if the SNAT done earlier has same IP:PORT source pair.
+ *
+ * Conntrack table has:
+ * ORIGINAL: $IP_INITIATOR:$SPORT -> $IP_RESPONDER:$DPORT
+ * REPLY: $IP_RESPONDER:$DPORT -> $IP_NAT:$SPORT
+ *
+ * and new locally originating connection wants:
+ * ORIGINAL: $IP_NAT:$SPORT -> $IP_RESPONDER:$DPORT
+ * REPLY: $IP_RESPONDER:$DPORT -> $IP_NAT:$SPORT
+ *
+ * ... which would mean incoming packets cannot be distinguished between
+ * the existing and the newly added entry (identical IP_CT_DIR_REPLY tuple).
+ *
+ * @return: true if the proposed NAT mapping collides with an existing entry.
+ */
static int
nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
const struct nf_conn *ignored_conntrack)
@@ -200,6 +228,94 @@ nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
}
+static bool nf_nat_allow_clash(const struct nf_conn *ct)
+{
+ return nf_ct_l4proto_find(nf_ct_protonum(ct))->allow_clash;
+}
+
+/**
+ * nf_nat_used_tuple_new - check if to-be-inserted conntrack collides with existing entry
+ * @tuple: proposed NAT binding
+ * @ignored_ct: our (unconfirmed) conntrack entry
+ *
+ * Same as nf_nat_used_tuple, but also check for rare clash in reverse
+ * direction. Should be called only when @tuple has not been altered, i.e.
+ * @ignored_conntrack will not be subject to NAT.
+ *
+ * @return: true if the proposed NAT mapping collides with existing entry.
+ */
+static noinline bool
+nf_nat_used_tuple_new(const struct nf_conntrack_tuple *tuple,
+ const struct nf_conn *ignored_ct)
+{
+ static const unsigned long uses_nat = IPS_NAT_MASK | IPS_SEQ_ADJUST_BIT;
+ const struct nf_conntrack_tuple_hash *thash;
+ const struct nf_conntrack_zone *zone;
+ struct nf_conn *ct;
+ bool taken = true;
+ struct net *net;
+
+ if (!nf_nat_used_tuple(tuple, ignored_ct))
+ return false;
+
+ if (!nf_nat_allow_clash(ignored_ct))
+ return true;
+
+ /* Initial choice clashes with existing conntrack.
+ * Check for (rare) reverse collision.
+ *
+ * This can happen when new packets are received in both directions
+ * at the exact same time on different CPUs.
+ *
+ * Without SMP, first packet creates new conntrack entry and second
+ * packet is resolved as established reply packet.
+ *
+ * With parallel processing, both packets could be picked up as
+ * new and both get their own ct entry allocated.
+ *
+ * If ignored_conntrack and colliding ct are not subject to NAT then
+ * pretend the tuple is available and let later clash resolution
+ * handle this at insertion time.
+ *
+ * Without it, the 'reply' packet has its source port rewritten
+ * by nat engine.
+ */
+ if (READ_ONCE(ignored_ct->status) & uses_nat)
+ return true;
+
+ net = nf_ct_net(ignored_ct);
+ zone = nf_ct_zone(ignored_ct);
+
+ thash = nf_conntrack_find_get(net, zone, tuple);
+ if (unlikely(!thash)) /* clashing entry went away */
+ return false;
+
+ ct = nf_ct_tuplehash_to_ctrack(thash);
+
+ /* NB: IP_CT_DIR_ORIGINAL should be impossible because
+ * nf_nat_used_tuple() handles origin collisions.
+ *
+ * Handle remote chance other CPU confirmed its ct right after.
+ */
+ if (thash->tuple.dst.dir != IP_CT_DIR_REPLY)
+ goto out;
+
+ /* clashing connection subject to NAT? Retry with new tuple. */
+ if (READ_ONCE(ct->status) & uses_nat)
+ goto out;
+
+ if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+ &ignored_ct->tuplehash[IP_CT_DIR_REPLY].tuple) &&
+ nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+ &ignored_ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)) {
+ taken = false;
+ goto out;
+ }
+out:
+ nf_ct_put(ct);
+ return taken;
+}
+
static bool nf_nat_may_kill(struct nf_conn *ct, unsigned long flags)
{
static const unsigned long flags_refuse = IPS_FIXED_TIMEOUT |
@@ -611,7 +727,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
/* try the original tuple first */
if (nf_in_range(orig_tuple, range)) {
- if (!nf_nat_used_tuple(orig_tuple, ct)) {
+ if (!nf_nat_used_tuple_new(orig_tuple, ct)) {
*tuple = *orig_tuple;
return;
}
@@ -974,10 +1090,8 @@ static int nf_nat_ipv4_nlattr_to_range(struct nlattr *tb[],
range->flags |= NF_NAT_RANGE_MAP_IPS;
}
- if (tb[CTA_NAT_V4_MAXIP])
- range->max_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MAXIP]);
- else
- range->max_addr.ip = range->min_addr.ip;
+ range->max_addr.ip = nla_get_be32_default(tb[CTA_NAT_V4_MAXIP],
+ range->min_addr.ip);
return 0;
}
@@ -1104,7 +1218,7 @@ int nf_nat_register_fn(struct net *net, u8 pf, const struct nf_hook_ops *ops,
if (!nat_proto_net->nat_hook_ops) {
WARN_ON(nat_proto_net->users != 0);
- nat_ops = kmemdup(orig_nat_ops, sizeof(*orig_nat_ops) * ops_count, GFP_KERNEL);
+ nat_ops = kmemdup_array(orig_nat_ops, ops_count, sizeof(*orig_nat_ops), GFP_KERNEL);
if (!nat_ops) {
mutex_unlock(&nf_nat_proto_mutex);
return -ENOMEM;
@@ -1208,7 +1322,6 @@ static const struct nf_nat_hook nat_hook = {
#ifdef CONFIG_XFRM
.decode_session = __nf_nat_decode_session,
#endif
- .manip_pkt = nf_nat_manip_pkt,
.remove_nat_bysrc = nf_nat_cleanup_conntrack,
};
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index e2f334f70281..7f12e56e6e52 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -248,109 +248,3 @@ int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
return 0;
}
EXPORT_SYMBOL_GPL(nf_queue);
-
-static unsigned int nf_iterate(struct sk_buff *skb,
- struct nf_hook_state *state,
- const struct nf_hook_entries *hooks,
- unsigned int *index)
-{
- const struct nf_hook_entry *hook;
- unsigned int verdict, i = *index;
-
- while (i < hooks->num_hook_entries) {
- hook = &hooks->hooks[i];
-repeat:
- verdict = nf_hook_entry_hookfn(hook, skb, state);
- if (verdict != NF_ACCEPT) {
- *index = i;
- if (verdict != NF_REPEAT)
- return verdict;
- goto repeat;
- }
- i++;
- }
-
- *index = i;
- return NF_ACCEPT;
-}
-
-static struct nf_hook_entries *nf_hook_entries_head(const struct net *net, u8 pf, u8 hooknum)
-{
- switch (pf) {
-#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
- case NFPROTO_BRIDGE:
- return rcu_dereference(net->nf.hooks_bridge[hooknum]);
-#endif
- case NFPROTO_IPV4:
- return rcu_dereference(net->nf.hooks_ipv4[hooknum]);
- case NFPROTO_IPV6:
- return rcu_dereference(net->nf.hooks_ipv6[hooknum]);
- default:
- WARN_ON_ONCE(1);
- return NULL;
- }
-
- return NULL;
-}
-
-/* Caller must hold rcu read-side lock */
-void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
-{
- const struct nf_hook_entry *hook_entry;
- const struct nf_hook_entries *hooks;
- struct sk_buff *skb = entry->skb;
- const struct net *net;
- unsigned int i;
- int err;
- u8 pf;
-
- net = entry->state.net;
- pf = entry->state.pf;
-
- hooks = nf_hook_entries_head(net, pf, entry->state.hook);
-
- i = entry->hook_index;
- if (WARN_ON_ONCE(!hooks || i >= hooks->num_hook_entries)) {
- kfree_skb(skb);
- nf_queue_entry_free(entry);
- return;
- }
-
- hook_entry = &hooks->hooks[i];
-
- /* Continue traversal iff userspace said ok... */
- if (verdict == NF_REPEAT)
- verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
-
- if (verdict == NF_ACCEPT) {
- if (nf_reroute(skb, entry) < 0)
- verdict = NF_DROP;
- }
-
- if (verdict == NF_ACCEPT) {
-next_hook:
- ++i;
- verdict = nf_iterate(skb, &entry->state, hooks, &i);
- }
-
- switch (verdict & NF_VERDICT_MASK) {
- case NF_ACCEPT:
- case NF_STOP:
- local_bh_disable();
- entry->state.okfn(entry->state.net, entry->state.sk, skb);
- local_bh_enable();
- break;
- case NF_QUEUE:
- err = nf_queue(skb, &entry->state, i, verdict);
- if (err == 1)
- goto next_hook;
- break;
- case NF_STOLEN:
- break;
- default:
- kfree_skb(skb);
- }
-
- nf_queue_entry_free(entry);
-}
-EXPORT_SYMBOL(nf_reinject);
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index fbbc4fd37349..3fa3f5dfb264 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -5,7 +5,7 @@
#include <linux/module.h>
#include <linux/skbuff.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/tcp.h>
#include <net/netns/generic.h>
#include <linux/proc_fs.h>
@@ -800,7 +800,7 @@ synproxy_build_ip_ipv6(struct net *net, struct sk_buff *skb,
skb_reset_network_header(skb);
iph = skb_put(skb, sizeof(*iph));
ip6_flow_hdr(iph, 0, 0);
- iph->hop_limit = net->ipv6.devconf_all->hop_limit;
+ iph->hop_limit = READ_ONCE(net->ipv6.devconf_all->hop_limit);
iph->nexthdr = IPPROTO_TCP;
iph->saddr = *saddr;
iph->daddr = *daddr;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 1683dc196b59..c2df81b7e950 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -26,12 +26,14 @@
#define NFT_MODULE_AUTOLOAD_LIMIT (MODULE_NAME_LEN - sizeof("nft-expr-255-"))
#define NFT_SET_MAX_ANONLEN 16
+/* limit compaction to avoid huge kmalloc/krealloc sizes. */
+#define NFT_MAX_SET_NELEMS ((2048 - sizeof(struct nft_trans_elem)) / sizeof(struct nft_trans_one_elem))
+
unsigned int nf_tables_net_id __read_mostly;
static LIST_HEAD(nf_tables_expressions);
static LIST_HEAD(nf_tables_objects);
static LIST_HEAD(nf_tables_flowtables);
-static LIST_HEAD(nf_tables_destroy_list);
static LIST_HEAD(nf_tables_gc_list);
static DEFINE_SPINLOCK(nf_tables_destroy_list_lock);
static DEFINE_SPINLOCK(nf_tables_gc_list_lock);
@@ -122,7 +124,6 @@ static void nft_validate_state_update(struct nft_table *table, u8 new_validate_s
table->validate_state = new_validate_state;
}
static void nf_tables_trans_destroy_work(struct work_struct *w);
-static DECLARE_WORK(trans_destroy_work, nf_tables_trans_destroy_work);
static void nft_trans_gc_work(struct work_struct *work);
static DECLARE_WORK(trans_gc_work, nft_trans_gc_work);
@@ -146,6 +147,8 @@ static void nft_ctx_init(struct nft_ctx *ctx,
ctx->report = nlmsg_report(nlh);
ctx->flags = nlh->nlmsg_flags;
ctx->seq = nlh->nlmsg_seq;
+
+ bitmap_zero(ctx->reg_inited, NFT_REG32_NUM);
}
static struct nft_trans *nft_trans_alloc_gfp(const struct nft_ctx *ctx,
@@ -153,14 +156,18 @@ static struct nft_trans *nft_trans_alloc_gfp(const struct nft_ctx *ctx,
{
struct nft_trans *trans;
- trans = kzalloc(sizeof(struct nft_trans) + size, gfp);
+ trans = kzalloc(size, gfp);
if (trans == NULL)
return NULL;
INIT_LIST_HEAD(&trans->list);
- INIT_LIST_HEAD(&trans->binding_list);
trans->msg_type = msg_type;
- trans->ctx = *ctx;
+
+ trans->net = ctx->net;
+ trans->table = ctx->table;
+ trans->seq = ctx->seq;
+ trans->flags = ctx->flags;
+ trans->report = ctx->report;
return trans;
}
@@ -171,10 +178,26 @@ static struct nft_trans *nft_trans_alloc(const struct nft_ctx *ctx,
return nft_trans_alloc_gfp(ctx, msg_type, size, GFP_KERNEL);
}
+static struct nft_trans_binding *nft_trans_get_binding(struct nft_trans *trans)
+{
+ switch (trans->msg_type) {
+ case NFT_MSG_NEWCHAIN:
+ case NFT_MSG_NEWSET:
+ return container_of(trans, struct nft_trans_binding, nft_trans);
+ }
+
+ return NULL;
+}
+
static void nft_trans_list_del(struct nft_trans *trans)
{
+ struct nft_trans_binding *trans_binding;
+
list_del(&trans->list);
- list_del(&trans->binding_list);
+
+ trans_binding = nft_trans_get_binding(trans);
+ if (trans_binding)
+ list_del(&trans_binding->binding_list);
}
static void nft_trans_destroy(struct nft_trans *trans)
@@ -236,7 +259,7 @@ static void __nft_chain_trans_bind(const struct nft_ctx *ctx,
nft_trans_chain_bound(trans) = bind;
break;
case NFT_MSG_NEWRULE:
- if (trans->ctx.chain == chain)
+ if (nft_trans_rule_chain(trans) == chain)
nft_trans_rule_bound(trans) = bind;
break;
}
@@ -369,24 +392,132 @@ static void nf_tables_unregister_hook(struct net *net,
return __nf_tables_unregister_hook(net, table, chain, false);
}
+static bool nft_trans_collapse_set_elem_allowed(const struct nft_trans_elem *a, const struct nft_trans_elem *b)
+{
+ /* NB: the ->bound equality check is defensive, at this time we only merge
+ * a new nft_trans_elem transaction request with the transaction tail
+ * element, but a->bound != b->bound would imply a NEWRULE transaction
+ * is queued in-between.
+ *
+ * The set check is mandatory, the NFT_MAX_SET_NELEMS check prevents
+ * huge krealloc() requests.
+ */
+ return a->set == b->set && a->bound == b->bound && a->nelems < NFT_MAX_SET_NELEMS;
+}
+
+static bool nft_trans_collapse_set_elem(struct nftables_pernet *nft_net,
+ struct nft_trans_elem *tail,
+ struct nft_trans_elem *trans,
+ gfp_t gfp)
+{
+ unsigned int nelems, old_nelems = tail->nelems;
+ struct nft_trans_elem *new_trans;
+
+ if (!nft_trans_collapse_set_elem_allowed(tail, trans))
+ return false;
+
+ /* "cannot happen", at this time userspace element add
+ * requests always allocate a new transaction element.
+ *
+ * This serves as a reminder to adjust the list_add_tail
+ * logic below in case this ever changes.
+ */
+ if (WARN_ON_ONCE(trans->nelems != 1))
+ return false;
+
+ if (check_add_overflow(old_nelems, trans->nelems, &nelems))
+ return false;
+
+ /* krealloc might free tail which invalidates list pointers */
+ list_del_init(&tail->nft_trans.list);
+
+ new_trans = krealloc(tail, struct_size(tail, elems, nelems), gfp);
+ if (!new_trans) {
+ list_add_tail(&tail->nft_trans.list, &nft_net->commit_list);
+ return false;
+ }
+
+ /*
+ * new_trans->nft_trans.list contains garbage, but
+ * list_add_tail() doesn't care.
+ */
+ new_trans->nelems = nelems;
+ new_trans->elems[old_nelems] = trans->elems[0];
+ list_add_tail(&new_trans->nft_trans.list, &nft_net->commit_list);
+
+ return true;
+}
+
+static bool nft_trans_try_collapse(struct nftables_pernet *nft_net,
+ struct nft_trans *trans, gfp_t gfp)
+{
+ struct nft_trans *tail;
+
+ if (list_empty(&nft_net->commit_list))
+ return false;
+
+ tail = list_last_entry(&nft_net->commit_list, struct nft_trans, list);
+
+ if (tail->msg_type != trans->msg_type)
+ return false;
+
+ switch (trans->msg_type) {
+ case NFT_MSG_NEWSETELEM:
+ case NFT_MSG_DELSETELEM:
+ return nft_trans_collapse_set_elem(nft_net,
+ nft_trans_container_elem(tail),
+ nft_trans_container_elem(trans), gfp);
+ }
+
+ return false;
+}
+
static void nft_trans_commit_list_add_tail(struct net *net, struct nft_trans *trans)
{
struct nftables_pernet *nft_net = nft_pernet(net);
+ struct nft_trans_binding *binding;
+ struct nft_trans_set *trans_set;
+
+ list_add_tail(&trans->list, &nft_net->commit_list);
+
+ binding = nft_trans_get_binding(trans);
+ if (!binding)
+ return;
switch (trans->msg_type) {
case NFT_MSG_NEWSET:
+ trans_set = nft_trans_container_set(trans);
+
if (!nft_trans_set_update(trans) &&
nft_set_is_anonymous(nft_trans_set(trans)))
- list_add_tail(&trans->binding_list, &nft_net->binding_list);
+ list_add_tail(&binding->binding_list, &nft_net->binding_list);
+
+ list_add_tail(&trans_set->list_trans_newset, &nft_net->commit_set_list);
break;
case NFT_MSG_NEWCHAIN:
if (!nft_trans_chain_update(trans) &&
nft_chain_binding(nft_trans_chain(trans)))
- list_add_tail(&trans->binding_list, &nft_net->binding_list);
+ list_add_tail(&binding->binding_list, &nft_net->binding_list);
break;
}
+}
- list_add_tail(&trans->list, &nft_net->commit_list);
+static void nft_trans_commit_list_add_elem(struct net *net, struct nft_trans *trans,
+ gfp_t gfp)
+{
+ struct nftables_pernet *nft_net = nft_pernet(net);
+
+ WARN_ON_ONCE(trans->msg_type != NFT_MSG_NEWSETELEM &&
+ trans->msg_type != NFT_MSG_DELSETELEM);
+
+ might_alloc(gfp);
+
+ if (nft_trans_try_collapse(nft_net, trans, gfp)) {
+ kfree(trans);
+ return;
+ }
+
+ nft_trans_commit_list_add_tail(net, trans);
}
static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type)
@@ -416,11 +547,28 @@ static int nft_deltable(struct nft_ctx *ctx)
return err;
}
-static struct nft_trans *nft_trans_chain_add(struct nft_ctx *ctx, int msg_type)
+static struct nft_trans *
+nft_trans_alloc_chain(const struct nft_ctx *ctx, int msg_type)
{
+ struct nft_trans_chain *trans_chain;
struct nft_trans *trans;
trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_chain));
+ if (!trans)
+ return NULL;
+
+ trans_chain = nft_trans_container_chain(trans);
+ INIT_LIST_HEAD(&trans_chain->nft_trans_binding.binding_list);
+ trans_chain->chain = ctx->chain;
+
+ return trans;
+}
+
+static struct nft_trans *nft_trans_chain_add(struct nft_ctx *ctx, int msg_type)
+{
+ struct nft_trans *trans;
+
+ trans = nft_trans_alloc_chain(ctx, msg_type);
if (trans == NULL)
return ERR_PTR(-ENOMEM);
@@ -432,7 +580,6 @@ static struct nft_trans *nft_trans_chain_add(struct nft_ctx *ctx, int msg_type)
ntohl(nla_get_be32(ctx->nla[NFTA_CHAIN_ID]));
}
}
- nft_trans_chain(trans) = ctx->chain;
nft_trans_commit_list_add_tail(ctx->net, trans);
return trans;
@@ -505,6 +652,7 @@ static struct nft_trans *nft_trans_rule_add(struct nft_ctx *ctx, int msg_type,
ntohl(nla_get_be32(ctx->nla[NFTA_RULE_ID]));
}
nft_trans_rule(trans) = rule;
+ nft_trans_rule_chain(trans) = ctx->chain;
nft_trans_commit_list_add_tail(ctx->net, trans);
return trans;
@@ -560,12 +708,17 @@ static int __nft_trans_set_add(const struct nft_ctx *ctx, int msg_type,
struct nft_set *set,
const struct nft_set_desc *desc)
{
+ struct nft_trans_set *trans_set;
struct nft_trans *trans;
trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_set));
if (trans == NULL)
return -ENOMEM;
+ trans_set = nft_trans_container_set(trans);
+ INIT_LIST_HEAD(&trans_set->nft_trans_binding.binding_list);
+ INIT_LIST_HEAD(&trans_set->list_trans_newset);
+
if (msg_type == NFT_MSG_NEWSET && ctx->nla[NFTA_SET_ID] && !desc) {
nft_trans_set_id(trans) =
ntohl(nla_get_be32(ctx->nla[NFTA_SET_ID]));
@@ -594,6 +747,12 @@ static int nft_mapelem_deactivate(const struct nft_ctx *ctx,
const struct nft_set_iter *iter,
struct nft_elem_priv *elem_priv)
{
+ struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
+
+ if (!nft_set_elem_active(ext, iter->genmask))
+ return 0;
+
+ nft_set_elem_change_active(ctx->net, set, ext);
nft_setelem_data_deactivate(ctx->net, set, elem_priv);
return 0;
@@ -617,6 +776,7 @@ static void nft_map_catchall_deactivate(const struct nft_ctx *ctx,
if (!nft_set_elem_active(ext, genmask))
continue;
+ nft_set_elem_change_active(ctx->net, set, ext);
nft_setelem_data_deactivate(ctx->net, set, catchall->elem);
break;
}
@@ -626,6 +786,7 @@ static void nft_map_deactivate(const struct nft_ctx *ctx, struct nft_set *set)
{
struct nft_set_iter iter = {
.genmask = nft_genmask_next(ctx->net),
+ .type = NFT_ITER_UPDATE,
.fn = nft_mapelem_deactivate,
};
@@ -1195,8 +1356,30 @@ static void nf_tables_table_disable(struct net *net, struct nft_table *table)
#define __NFT_TABLE_F_INTERNAL (NFT_TABLE_F_MASK + 1)
#define __NFT_TABLE_F_WAS_DORMANT (__NFT_TABLE_F_INTERNAL << 0)
#define __NFT_TABLE_F_WAS_AWAKEN (__NFT_TABLE_F_INTERNAL << 1)
+#define __NFT_TABLE_F_WAS_ORPHAN (__NFT_TABLE_F_INTERNAL << 2)
#define __NFT_TABLE_F_UPDATE (__NFT_TABLE_F_WAS_DORMANT | \
- __NFT_TABLE_F_WAS_AWAKEN)
+ __NFT_TABLE_F_WAS_AWAKEN | \
+ __NFT_TABLE_F_WAS_ORPHAN)
+
+static bool nft_table_pending_update(const struct nft_ctx *ctx)
+{
+ struct nftables_pernet *nft_net = nft_pernet(ctx->net);
+ struct nft_trans *trans;
+
+ if (ctx->table->flags & __NFT_TABLE_F_UPDATE)
+ return true;
+
+ list_for_each_entry(trans, &nft_net->commit_list, list) {
+ if (trans->table == ctx->table &&
+ ((trans->msg_type == NFT_MSG_NEWCHAIN &&
+ nft_trans_chain_update(trans)) ||
+ (trans->msg_type == NFT_MSG_DELCHAIN &&
+ nft_is_base_chain(nft_trans_chain(trans)))))
+ return true;
+ }
+
+ return false;
+}
static int nf_tables_updtable(struct nft_ctx *ctx)
{
@@ -1211,17 +1394,20 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
if (flags & ~NFT_TABLE_F_MASK)
return -EOPNOTSUPP;
- if (flags == ctx->table->flags)
+ if (flags == (ctx->table->flags & NFT_TABLE_F_MASK))
return 0;
if ((nft_table_has_owner(ctx->table) &&
!(flags & NFT_TABLE_F_OWNER)) ||
- (!nft_table_has_owner(ctx->table) &&
- flags & NFT_TABLE_F_OWNER))
+ (flags & NFT_TABLE_F_OWNER &&
+ !nft_table_is_orphan(ctx->table)))
+ return -EOPNOTSUPP;
+
+ if ((flags ^ ctx->table->flags) & NFT_TABLE_F_PERSIST)
return -EOPNOTSUPP;
/* No dormant off/on/off/on games in single transaction */
- if (ctx->table->flags & __NFT_TABLE_F_UPDATE)
+ if (nft_table_pending_update(ctx))
return -EINVAL;
trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE,
@@ -1246,6 +1432,13 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
}
}
+ if ((flags & NFT_TABLE_F_OWNER) &&
+ !nft_table_has_owner(ctx->table)) {
+ ctx->table->nlpid = ctx->portid;
+ ctx->table->flags |= NFT_TABLE_F_OWNER |
+ __NFT_TABLE_F_WAS_ORPHAN;
+ }
+
nft_trans_table_update(trans) = true;
nft_trans_commit_list_add_tail(ctx->net, trans);
@@ -1575,15 +1768,15 @@ static int nf_tables_deltable(struct sk_buff *skb, const struct nfnl_info *info,
return nft_flush_table(&ctx);
}
-static void nf_tables_table_destroy(struct nft_ctx *ctx)
+static void nf_tables_table_destroy(struct nft_table *table)
{
- if (WARN_ON(ctx->table->use > 0))
+ if (WARN_ON(table->use > 0))
return;
- rhltable_destroy(&ctx->table->chains_ht);
- kfree(ctx->table->name);
- kfree(ctx->table->udata);
- kfree(ctx->table);
+ rhltable_destroy(&table->chains_ht);
+ kfree(table->name);
+ kfree(table->udata);
+ kfree(table);
}
void nft_register_chain_type(const struct nft_chain_type *ctype)
@@ -1730,7 +1923,8 @@ nla_put_failure:
return -ENOSPC;
}
-static int nft_dump_basechain_hook(struct sk_buff *skb, int family,
+static int nft_dump_basechain_hook(struct sk_buff *skb,
+ const struct net *net, int family,
const struct nft_base_chain *basechain,
const struct list_head *hook_list)
{
@@ -1755,19 +1949,21 @@ static int nft_dump_basechain_hook(struct sk_buff *skb, int family,
if (!hook_list)
hook_list = &basechain->hook_list;
- list_for_each_entry(hook, hook_list, list) {
+ list_for_each_entry_rcu(hook, hook_list, list,
+ lockdep_commit_lock_is_held(net)) {
if (!first)
first = hook;
- if (nla_put_string(skb, NFTA_DEVICE_NAME,
- hook->ops.dev->name))
+ if (nla_put(skb, NFTA_DEVICE_NAME,
+ hook->ifnamelen, hook->ifname))
goto nla_put_failure;
n++;
}
nla_nest_end(skb, nest_devs);
if (n == 1 &&
- nla_put_string(skb, NFTA_HOOK_DEV, first->ops.dev->name))
+ nla_put(skb, NFTA_HOOK_DEV,
+ first->ifnamelen, first->ifname))
goto nla_put_failure;
}
nla_nest_end(skb, nest);
@@ -1806,7 +2002,7 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
const struct nft_base_chain *basechain = nft_base_chain(chain);
struct nft_stats __percpu *stats;
- if (nft_dump_basechain_hook(skb, family, basechain, hook_list))
+ if (nft_dump_basechain_hook(skb, net, family, basechain, hook_list))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_CHAIN_POLICY,
@@ -1988,14 +2184,14 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
err = nla_parse_nested_deprecated(tb, NFTA_COUNTER_MAX, attr,
nft_counter_policy, NULL);
if (err < 0)
- return ERR_PTR(err);
+ return ERR_PTR_PCPU(err);
if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS])
- return ERR_PTR(-EINVAL);
+ return ERR_PTR_PCPU(-EINVAL);
newstats = netdev_alloc_pcpu_stats(struct nft_stats);
if (newstats == NULL)
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR_PCPU(-ENOMEM);
/* Restore old counters on this cpu, no problem. Per-cpu statistics
* are not exposed to userspace.
@@ -2009,18 +2205,19 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
return newstats;
}
-static void nft_chain_stats_replace(struct nft_trans *trans)
+static void nft_chain_stats_replace(struct nft_trans_chain *trans)
{
- struct nft_base_chain *chain = nft_base_chain(trans->ctx.chain);
+ const struct nft_trans *t = &trans->nft_trans_binding.nft_trans;
+ struct nft_base_chain *chain = nft_base_chain(trans->chain);
- if (!nft_trans_chain_stats(trans))
+ if (!trans->stats)
return;
- nft_trans_chain_stats(trans) =
- rcu_replace_pointer(chain->stats, nft_trans_chain_stats(trans),
- lockdep_commit_lock_is_held(trans->ctx.net));
+ trans->stats =
+ rcu_replace_pointer(chain->stats, trans->stats,
+ lockdep_commit_lock_is_held(t->net));
- if (!nft_trans_chain_stats(trans))
+ if (!trans->stats)
static_branch_inc(&nft_counters_enabled);
}
@@ -2038,9 +2235,9 @@ static void nf_tables_chain_free_chain_rules(struct nft_chain *chain)
kvfree(chain->blob_next);
}
-void nf_tables_chain_destroy(struct nft_ctx *ctx)
+void nf_tables_chain_destroy(struct nft_chain *chain)
{
- struct nft_chain *chain = ctx->chain;
+ const struct nft_table *table = chain->table;
struct nft_hook *hook, *next;
if (WARN_ON(chain->use > 0))
@@ -2052,7 +2249,7 @@ void nf_tables_chain_destroy(struct nft_ctx *ctx)
if (nft_is_base_chain(chain)) {
struct nft_base_chain *basechain = nft_base_chain(chain);
- if (nft_base_chain_netdev(ctx->family, basechain->ops.hooknum)) {
+ if (nft_base_chain_netdev(table->family, basechain->ops.hooknum)) {
list_for_each_entry_safe(hook, next,
&basechain->hook_list, list) {
list_del_rcu(&hook->list);
@@ -2078,7 +2275,6 @@ static struct nft_hook *nft_netdev_hook_alloc(struct net *net,
const struct nlattr *attr)
{
struct net_device *dev;
- char ifname[IFNAMSIZ];
struct nft_hook *hook;
int err;
@@ -2088,12 +2284,17 @@ static struct nft_hook *nft_netdev_hook_alloc(struct net *net,
goto err_hook_alloc;
}
- nla_strscpy(ifname, attr, IFNAMSIZ);
+ err = nla_strscpy(hook->ifname, attr, IFNAMSIZ);
+ if (err < 0)
+ goto err_hook_dev;
+
+ hook->ifnamelen = nla_len(attr);
+
/* nf_tables_netdev_event() is called under rtnl_mutex, this is
* indirectly serializing all the other holders of the commit_mutex with
* the rtnl_mutex.
*/
- dev = __dev_get_by_name(net, ifname);
+ dev = __dev_get_by_name(net, hook->ifname);
if (!dev) {
err = -ENOENT;
goto err_hook_dev;
@@ -2114,7 +2315,7 @@ static struct nft_hook *nft_hook_list_find(struct list_head *hook_list,
struct nft_hook *hook;
list_for_each_entry(hook, hook_list, list) {
- if (this->ops.dev == hook->ops.dev)
+ if (!strcmp(hook->ifname, this->ifname))
return hook;
}
@@ -2400,9 +2601,8 @@ int nft_chain_add(struct nft_table *table, struct nft_chain *chain)
static u64 chain_id;
-static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
- u8 policy, u32 flags,
- struct netlink_ext_ack *extack)
+static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 policy,
+ u32 flags, struct netlink_ext_ack *extack)
{
const struct nlattr * const *nla = ctx->nla;
struct nft_table *table = ctx->table;
@@ -2418,6 +2618,9 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
struct nft_stats __percpu *stats = NULL;
struct nft_chain_hook hook = {};
+ if (table->flags & __NFT_TABLE_F_UPDATE)
+ return -EINVAL;
+
if (flags & NFT_CHAIN_BINDING)
return -EOPNOTSUPP;
@@ -2435,10 +2638,10 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
if (nla[NFTA_CHAIN_COUNTERS]) {
stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]);
- if (IS_ERR(stats)) {
+ if (IS_ERR_PCPU(stats)) {
nft_chain_release_hook(&hook);
kfree(basechain);
- return PTR_ERR(stats);
+ return PTR_ERR_PCPU(stats);
}
rcu_assign_pointer(basechain->stats, stats);
}
@@ -2538,7 +2741,7 @@ err_chain_add:
err_trans:
nft_use_dec_restore(&table->use);
err_destroy_chain:
- nf_tables_chain_destroy(ctx);
+ nf_tables_chain_destroy(chain);
return err;
}
@@ -2552,7 +2755,7 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
struct nft_table *table = ctx->table;
struct nft_chain *chain = ctx->chain;
struct nft_chain_hook hook = {};
- struct nft_stats *stats = NULL;
+ struct nft_stats __percpu *stats = NULL;
struct nft_hook *h, *next;
struct nf_hook_ops *ops;
struct nft_trans *trans;
@@ -2619,17 +2822,11 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
}
}
- if (nla[NFTA_CHAIN_COUNTERS]) {
- if (!nft_is_base_chain(chain)) {
- err = -EOPNOTSUPP;
- goto err_hooks;
- }
-
- stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]);
- if (IS_ERR(stats)) {
- err = PTR_ERR(stats);
- goto err_hooks;
- }
+ if (table->flags & __NFT_TABLE_F_UPDATE &&
+ !list_empty(&hook.list)) {
+ NL_SET_BAD_ATTR(extack, attr);
+ err = -EOPNOTSUPP;
+ goto err_hooks;
}
if (!(table->flags & NFT_TABLE_F_DORMANT) &&
@@ -2646,9 +2843,22 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
}
unregister = true;
+
+ if (nla[NFTA_CHAIN_COUNTERS]) {
+ if (!nft_is_base_chain(chain)) {
+ err = -EOPNOTSUPP;
+ goto err_hooks;
+ }
+
+ stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]);
+ if (IS_ERR_PCPU(stats)) {
+ err = PTR_ERR_PCPU(stats);
+ goto err_hooks;
+ }
+ }
+
err = -ENOMEM;
- trans = nft_trans_alloc(ctx, NFT_MSG_NEWCHAIN,
- sizeof(struct nft_trans_chain));
+ trans = nft_trans_alloc_chain(ctx, NFT_MSG_NEWCHAIN);
if (trans == NULL)
goto err_trans;
@@ -2674,7 +2884,7 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
err = -EEXIST;
list_for_each_entry(tmp, &nft_net->commit_list, list) {
if (tmp->msg_type == NFT_MSG_NEWCHAIN &&
- tmp->ctx.table == table &&
+ tmp->table == table &&
nft_trans_chain_update(tmp) &&
nft_trans_chain_name(tmp) &&
strcmp(name, nft_trans_chain_name(tmp)) == 0) {
@@ -2723,13 +2933,11 @@ static struct nft_chain *nft_chain_lookup_byid(const struct net *net,
struct nft_trans *trans;
list_for_each_entry(trans, &nft_net->commit_list, list) {
- struct nft_chain *chain = trans->ctx.chain;
-
if (trans->msg_type == NFT_MSG_NEWCHAIN &&
- chain->table == table &&
+ nft_trans_chain(trans)->table == table &&
id == nft_trans_chain_id(trans) &&
- nft_active_genmask(chain, genmask))
- return chain;
+ nft_active_genmask(nft_trans_chain(trans), genmask))
+ return nft_trans_chain(trans);
}
return ERR_PTR(-ENOENT);
}
@@ -2832,7 +3040,7 @@ static int nf_tables_newchain(struct sk_buff *skb, const struct nfnl_info *info,
extack);
}
- return nf_tables_addchain(&ctx, family, genmask, policy, flags, extack);
+ return nf_tables_addchain(&ctx, family, policy, flags, extack);
}
static int nft_delchain_hook(struct nft_ctx *ctx,
@@ -2847,6 +3055,9 @@ static int nft_delchain_hook(struct nft_ctx *ctx,
struct nft_trans *trans;
int err;
+ if (ctx->table->flags & __NFT_TABLE_F_UPDATE)
+ return -EOPNOTSUPP;
+
err = nft_chain_parse_hook(ctx->net, basechain, nla, &chain_hook,
ctx->family, chain->flags, extack);
if (err < 0)
@@ -2861,8 +3072,7 @@ static int nft_delchain_hook(struct nft_ctx *ctx,
list_move(&hook->list, &chain_del_list);
}
- trans = nft_trans_alloc(ctx, NFT_MSG_DELCHAIN,
- sizeof(struct nft_trans_chain));
+ trans = nft_trans_alloc_chain(ctx, NFT_MSG_DELCHAIN);
if (!trans) {
err = -ENOMEM;
goto err_chain_del_hook;
@@ -2931,7 +3141,8 @@ static int nf_tables_delchain(struct sk_buff *skb, const struct nfnl_info *info,
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, chain, nla);
if (nla[NFTA_CHAIN_HOOK]) {
- if (chain->flags & NFT_CHAIN_HW_OFFLOAD)
+ if (NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_DESTROYCHAIN ||
+ chain->flags & NFT_CHAIN_HW_OFFLOAD)
return -EOPNOTSUPP;
if (nft_is_base_chain(chain)) {
@@ -3013,7 +3224,7 @@ static const struct nft_expr_type *__nft_expr_type_get(u8 family,
{
const struct nft_expr_type *type, *candidate = NULL;
- list_for_each_entry(type, &nf_tables_expressions, list) {
+ list_for_each_entry_rcu(type, &nf_tables_expressions, list) {
if (!nla_strcmp(nla, type->name)) {
if (!type->family && !candidate)
candidate = type;
@@ -3045,9 +3256,13 @@ static const struct nft_expr_type *nft_expr_type_get(struct net *net,
if (nla == NULL)
return ERR_PTR(-EINVAL);
+ rcu_read_lock();
type = __nft_expr_type_get(family, nla);
- if (type != NULL && try_module_get(type->owner))
+ if (type != NULL && try_module_get(type->owner)) {
+ rcu_read_unlock();
return type;
+ }
+ rcu_read_unlock();
lockdep_nfnl_nft_mutex_not_held();
#ifdef CONFIG_MODULES
@@ -3184,25 +3399,37 @@ int nft_expr_inner_parse(const struct nft_ctx *ctx, const struct nlattr *nla,
if (!tb[NFTA_EXPR_DATA] || !tb[NFTA_EXPR_NAME])
return -EINVAL;
+ rcu_read_lock();
+
type = __nft_expr_type_get(ctx->family, tb[NFTA_EXPR_NAME]);
- if (!type)
- return -ENOENT;
+ if (!type) {
+ err = -ENOENT;
+ goto out_unlock;
+ }
- if (!type->inner_ops)
- return -EOPNOTSUPP;
+ if (!type->inner_ops) {
+ err = -EOPNOTSUPP;
+ goto out_unlock;
+ }
err = nla_parse_nested_deprecated(info->tb, type->maxattr,
tb[NFTA_EXPR_DATA],
type->policy, NULL);
if (err < 0)
- goto err_nla_parse;
+ goto out_unlock;
info->attr = nla;
info->ops = type->inner_ops;
+ /* No module reference will be taken on type->owner.
+ * Presence of type->inner_ops implies that the expression
+ * is builtin, so it cannot go away.
+ */
+ rcu_read_unlock();
return 0;
-err_nla_parse:
+out_unlock:
+ rcu_read_unlock();
return err;
}
@@ -3274,7 +3501,7 @@ err_expr_parse:
return ERR_PTR(err);
}
-int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
+int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src, gfp_t gfp)
{
int err;
@@ -3282,7 +3509,7 @@ int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
return -EINVAL;
dst->ops = src->ops;
- err = src->ops->clone(dst, src);
+ err = src->ops->clone(dst, src, gfp);
if (err < 0)
return err;
@@ -3301,13 +3528,15 @@ void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr)
* Rules
*/
-static struct nft_rule *__nft_rule_lookup(const struct nft_chain *chain,
+static struct nft_rule *__nft_rule_lookup(const struct net *net,
+ const struct nft_chain *chain,
u64 handle)
{
struct nft_rule *rule;
// FIXME: this sucks
- list_for_each_entry_rcu(rule, &chain->rules, list) {
+ list_for_each_entry_rcu(rule, &chain->rules, list,
+ lockdep_commit_lock_is_held(net)) {
if (handle == rule->handle)
return rule;
}
@@ -3315,13 +3544,14 @@ static struct nft_rule *__nft_rule_lookup(const struct nft_chain *chain,
return ERR_PTR(-ENOENT);
}
-static struct nft_rule *nft_rule_lookup(const struct nft_chain *chain,
+static struct nft_rule *nft_rule_lookup(const struct net *net,
+ const struct nft_chain *chain,
const struct nlattr *nla)
{
if (nla == NULL)
return ERR_PTR(-EINVAL);
- return __nft_rule_lookup(chain, be64_to_cpu(nla_get_be64(nla)));
+ return __nft_rule_lookup(net, chain, be64_to_cpu(nla_get_be64(nla)));
}
static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = {
@@ -3622,7 +3852,7 @@ static int nf_tables_dump_rules_done(struct netlink_callback *cb)
return 0;
}
-/* called with rcu_read_lock held */
+/* Caller must hold rcu read lock or transaction mutex */
static struct sk_buff *
nf_tables_getrule_single(u32 portid, const struct nfnl_info *info,
const struct nlattr * const nla[], bool reset)
@@ -3649,7 +3879,7 @@ nf_tables_getrule_single(u32 portid, const struct nfnl_info *info,
return ERR_CAST(chain);
}
- rule = nft_rule_lookup(chain, nla[NFTA_RULE_HANDLE]);
+ rule = nft_rule_lookup(net, chain, nla[NFTA_RULE_HANDLE]);
if (IS_ERR(rule)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_HANDLE]);
return ERR_CAST(rule);
@@ -3758,16 +3988,27 @@ void nf_tables_rule_destroy(const struct nft_ctx *ctx, struct nft_rule *rule)
kfree(rule);
}
+/* can only be used if rule is no longer visible to dumps */
static void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *rule)
{
+ lockdep_commit_lock_is_held(ctx->net);
+
nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_RELEASE);
nf_tables_rule_destroy(ctx, rule);
}
+/** nft_chain_validate - loop detection and hook validation
+ *
+ * @ctx: context containing call depth and base chain
+ * @chain: chain to validate
+ *
+ * Walk through the rules of the given chain and chase all jumps/gotos
+ * and set lookups until either the jump limit is hit or all reachable
+ * chains have been validated.
+ */
int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain)
{
struct nft_expr *expr, *last;
- const struct nft_data *data;
struct nft_rule *rule;
int err;
@@ -3785,7 +4026,10 @@ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain)
if (!expr->ops->validate)
continue;
- err = expr->ops->validate(ctx, expr, &data);
+ /* This may call nft_chain_validate() recursively,
+ * callers that do so must increment ctx->level.
+ */
+ err = expr->ops->validate(ctx, expr);
if (err < 0)
return err;
}
@@ -3828,6 +4072,9 @@ int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set,
const struct nft_data *data;
int err;
+ if (!nft_set_elem_active(ext, iter->genmask))
+ return 0;
+
if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
*nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
return 0;
@@ -3851,17 +4098,21 @@ int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set,
int nft_set_catchall_validate(const struct nft_ctx *ctx, struct nft_set *set)
{
- u8 genmask = nft_genmask_next(ctx->net);
+ struct nft_set_iter dummy_iter = {
+ .genmask = nft_genmask_next(ctx->net),
+ };
struct nft_set_elem_catchall *catchall;
+
struct nft_set_ext *ext;
int ret = 0;
- list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
+ list_for_each_entry_rcu(catchall, &set->catchall_list, list,
+ lockdep_commit_lock_is_held(ctx->net)) {
ext = nft_set_elem_ext(set, catchall->elem);
- if (!nft_set_elem_active(ext, genmask))
+ if (!nft_set_elem_active(ext, dummy_iter.genmask))
continue;
- ret = nft_setelem_validate(ctx, set, NULL, catchall->elem);
+ ret = nft_setelem_validate(ctx, set, &dummy_iter, catchall->elem);
if (ret < 0)
return ret;
}
@@ -3930,7 +4181,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
if (nla[NFTA_RULE_HANDLE]) {
handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_HANDLE]));
- rule = __nft_rule_lookup(chain, handle);
+ rule = __nft_rule_lookup(net, chain, handle);
if (IS_ERR(rule)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_HANDLE]);
return PTR_ERR(rule);
@@ -3952,7 +4203,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
if (nla[NFTA_RULE_POSITION]) {
pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION]));
- old_rule = __nft_rule_lookup(chain, pos_handle);
+ old_rule = __nft_rule_lookup(net, chain, pos_handle);
if (IS_ERR(old_rule)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_POSITION]);
return PTR_ERR(old_rule);
@@ -4123,7 +4374,7 @@ static struct nft_rule *nft_rule_lookup_byid(const struct net *net,
list_for_each_entry(trans, &nft_net->commit_list, list) {
if (trans->msg_type == NFT_MSG_NEWRULE &&
- trans->ctx.chain == chain &&
+ nft_trans_rule_chain(trans) == chain &&
id == nft_trans_rule_id(trans))
return nft_trans_rule(trans);
}
@@ -4169,7 +4420,7 @@ static int nf_tables_delrule(struct sk_buff *skb, const struct nfnl_info *info,
if (chain) {
if (nla[NFTA_RULE_HANDLE]) {
- rule = nft_rule_lookup(chain, nla[NFTA_RULE_HANDLE]);
+ rule = nft_rule_lookup(info->net, chain, nla[NFTA_RULE_HANDLE]);
if (IS_ERR(rule)) {
if (PTR_ERR(rule) == -ENOENT &&
NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_DESTROYRULE)
@@ -4238,23 +4489,18 @@ static bool nft_set_ops_candidate(const struct nft_set_type *type, u32 flags)
* given, in that case the amount of memory per element is used.
*/
static const struct nft_set_ops *
-nft_select_set_ops(const struct nft_ctx *ctx,
- const struct nlattr * const nla[],
+nft_select_set_ops(const struct nft_ctx *ctx, u32 flags,
const struct nft_set_desc *desc)
{
struct nftables_pernet *nft_net = nft_pernet(ctx->net);
const struct nft_set_ops *ops, *bops;
struct nft_set_estimate est, best;
const struct nft_set_type *type;
- u32 flags = 0;
int i;
lockdep_assert_held(&nft_net->commit_mutex);
lockdep_nfnl_nft_mutex_not_held();
- if (nla[NFTA_SET_FLAGS] != NULL)
- flags = ntohl(nla_get_be32(nla[NFTA_SET_FLAGS]));
-
bops = NULL;
best.size = ~0;
best.lookup = ~0;
@@ -4334,7 +4580,8 @@ static const struct nla_policy nft_set_desc_policy[NFTA_SET_DESC_MAX + 1] = {
[NFTA_SET_DESC_CONCAT] = NLA_POLICY_NESTED_ARRAY(nft_concat_policy),
};
-static struct nft_set *nft_set_lookup(const struct nft_table *table,
+static struct nft_set *nft_set_lookup(const struct net *net,
+ const struct nft_table *table,
const struct nlattr *nla, u8 genmask)
{
struct nft_set *set;
@@ -4342,7 +4589,8 @@ static struct nft_set *nft_set_lookup(const struct nft_table *table,
if (nla == NULL)
return ERR_PTR(-EINVAL);
- list_for_each_entry_rcu(set, &table->sets, list) {
+ list_for_each_entry_rcu(set, &table->sets, list,
+ lockdep_commit_lock_is_held(net)) {
if (!nla_strcmp(nla, set->name) &&
nft_active_genmask(set, genmask))
return set;
@@ -4370,17 +4618,16 @@ static struct nft_set *nft_set_lookup_byid(const struct net *net,
{
struct nftables_pernet *nft_net = nft_pernet(net);
u32 id = ntohl(nla_get_be32(nla));
- struct nft_trans *trans;
+ struct nft_trans_set *trans;
- list_for_each_entry(trans, &nft_net->commit_list, list) {
- if (trans->msg_type == NFT_MSG_NEWSET) {
- struct nft_set *set = nft_trans_set(trans);
+ /* its likely the id we need is at the tail, not at start */
+ list_for_each_entry_reverse(trans, &nft_net->commit_set_list, list_trans_newset) {
+ struct nft_set *set = trans->set;
- if (id == nft_trans_set_id(trans) &&
- set->table == table &&
- nft_active_genmask(set, genmask))
- return set;
- }
+ if (id == trans->set_id &&
+ set->table == table &&
+ nft_active_genmask(set, genmask))
+ return set;
}
return ERR_PTR(-ENOENT);
}
@@ -4393,7 +4640,7 @@ struct nft_set *nft_set_lookup_global(const struct net *net,
{
struct nft_set *set;
- set = nft_set_lookup(table, nla_set_name, genmask);
+ set = nft_set_lookup(net, table, nla_set_name, genmask);
if (IS_ERR(set)) {
if (!nla_set_id)
return set;
@@ -4472,7 +4719,7 @@ int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result)
return -ERANGE;
ms *= NSEC_PER_MSEC;
- *result = nsecs_to_jiffies64(ms);
+ *result = nsecs_to_jiffies64(ms) ? : !!ms;
return 0;
}
@@ -4508,6 +4755,14 @@ static int nf_tables_fill_set_concat(struct sk_buff *skb,
return 0;
}
+static u32 nft_set_userspace_size(const struct nft_set_ops *ops, u32 size)
+{
+ if (ops->usize)
+ return ops->usize(size);
+
+ return size;
+}
+
static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
const struct nft_set *set, u16 event, u16 flags)
{
@@ -4578,7 +4833,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
if (!nest)
goto nla_put_failure;
if (set->size &&
- nla_put_be32(skb, NFTA_SET_DESC_SIZE, htonl(set->size)))
+ nla_put_be32(skb, NFTA_SET_DESC_SIZE,
+ htonl(nft_set_userspace_size(set->ops, set->size))))
goto nla_put_failure;
if (set->field_count > 1 &&
@@ -4769,7 +5025,7 @@ static int nf_tables_getset(struct sk_buff *skb, const struct nfnl_info *info,
if (!nla[NFTA_SET_TABLE])
return -EINVAL;
- set = nft_set_lookup(table, nla[NFTA_SET_NAME], genmask);
+ set = nft_set_lookup(net, table, nla[NFTA_SET_NAME], genmask);
if (IS_ERR(set)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_SET_NAME]);
return PTR_ERR(set);
@@ -4820,7 +5076,7 @@ static int nft_set_desc_concat_parse(const struct nlattr *attr,
static int nft_set_desc_concat(struct nft_set_desc *desc,
const struct nlattr *nla)
{
- u32 num_regs = 0, key_num_regs = 0;
+ u32 len = 0, num_regs;
struct nlattr *attr;
int rem, err, i;
@@ -4834,12 +5090,12 @@ static int nft_set_desc_concat(struct nft_set_desc *desc,
}
for (i = 0; i < desc->field_count; i++)
- num_regs += DIV_ROUND_UP(desc->field_len[i], sizeof(u32));
+ len += round_up(desc->field_len[i], sizeof(u32));
- key_num_regs = DIV_ROUND_UP(desc->klen, sizeof(u32));
- if (key_num_regs != num_regs)
+ if (len != desc->klen)
return -EINVAL;
+ num_regs = DIV_ROUND_UP(desc->klen, sizeof(u32));
if (num_regs > NFT_REG32_COUNT)
return -E2BIG;
@@ -4946,6 +5202,15 @@ static bool nft_set_is_same(const struct nft_set *set,
return true;
}
+static u32 nft_set_kernel_size(const struct nft_set_ops *ops,
+ const struct nft_set_desc *desc)
+{
+ if (ops->ksize)
+ return ops->ksize(desc->size);
+
+ return desc->size;
+}
+
static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
@@ -5105,7 +5370,7 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
- set = nft_set_lookup(table, nla[NFTA_SET_NAME], genmask);
+ set = nft_set_lookup(net, table, nla[NFTA_SET_NAME], genmask);
if (IS_ERR(set)) {
if (PTR_ERR(set) != -ENOENT) {
NL_SET_BAD_ATTR(extack, nla[NFTA_SET_NAME]);
@@ -5128,6 +5393,9 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
if (err < 0)
return err;
+ if (desc.size)
+ desc.size = nft_set_kernel_size(set->ops, &desc);
+
err = 0;
if (!nft_set_is_same(set, &desc, exprs, num_exprs, flags)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_SET_NAME]);
@@ -5146,10 +5414,13 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
if (!(info->nlh->nlmsg_flags & NLM_F_CREATE))
return -ENOENT;
- ops = nft_select_set_ops(&ctx, nla, &desc);
+ ops = nft_select_set_ops(&ctx, flags, &desc);
if (IS_ERR(ops))
return PTR_ERR(ops);
+ if (desc.size)
+ desc.size = nft_set_kernel_size(ops, &desc);
+
udlen = 0;
if (nla[NFTA_SET_USERDATA])
udlen = nla_len(nla[NFTA_SET_USERDATA]);
@@ -5307,7 +5578,7 @@ static int nf_tables_delset(struct sk_buff *skb, const struct nfnl_info *info,
set = nft_set_lookup_byhandle(table, attr, genmask);
} else {
attr = nla[NFTA_SET_NAME];
- set = nft_set_lookup(table, attr, genmask);
+ set = nft_set_lookup(net, table, attr, genmask);
}
if (IS_ERR(set)) {
@@ -5355,6 +5626,11 @@ static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx,
const struct nft_set_iter *iter,
struct nft_elem_priv *elem_priv)
{
+ const struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
+
+ if (!nft_set_elem_active(ext, iter->genmask))
+ return 0;
+
return nft_setelem_data_validate(ctx, set, elem_priv);
}
@@ -5366,7 +5642,8 @@ static int nft_set_catchall_bind_check(const struct nft_ctx *ctx,
struct nft_set_ext *ext;
int ret = 0;
- list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
+ list_for_each_entry_rcu(catchall, &set->catchall_list, list,
+ lockdep_commit_lock_is_held(ctx->net)) {
ext = nft_set_elem_ext(set, catchall->elem);
if (!nft_set_elem_active(ext, genmask))
continue;
@@ -5399,6 +5676,7 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
}
iter.genmask = nft_genmask_next(ctx->net);
+ iter.type = NFT_ITER_UPDATE;
iter.skip = 0;
iter.count = 0;
iter.err = 0;
@@ -5446,6 +5724,13 @@ static int nft_mapelem_activate(const struct nft_ctx *ctx,
const struct nft_set_iter *iter,
struct nft_elem_priv *elem_priv)
{
+ struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
+
+ /* called from abort path, reverse check to undo changes. */
+ if (nft_set_elem_active(ext, iter->genmask))
+ return 0;
+
+ nft_clear(ctx->net, ext);
nft_setelem_data_activate(ctx->net, set, elem_priv);
return 0;
@@ -5463,6 +5748,7 @@ static void nft_map_catchall_activate(const struct nft_ctx *ctx,
if (!nft_set_elem_active(ext, genmask))
continue;
+ nft_clear(ctx->net, ext);
nft_setelem_data_activate(ctx->net, set, catchall->elem);
break;
}
@@ -5472,6 +5758,7 @@ static void nft_map_activate(const struct nft_ctx *ctx, struct nft_set *set)
{
struct nft_set_iter iter = {
.genmask = nft_genmask_next(ctx->net),
+ .type = NFT_ITER_UPDATE,
.fn = nft_mapelem_activate,
};
@@ -5498,6 +5785,8 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *binding,
enum nft_trans_phase phase)
{
+ lockdep_commit_lock_is_held(ctx->net);
+
switch (phase) {
case NFT_TRANS_PREPARE_ERROR:
nft_set_trans_unbind(ctx, set);
@@ -5558,12 +5847,8 @@ const struct nft_set_ext_type nft_set_ext_types[] = {
.align = __alignof__(u8),
},
[NFT_SET_EXT_TIMEOUT] = {
- .len = sizeof(u64),
- .align = __alignof__(u64),
- },
- [NFT_SET_EXT_EXPIRATION] = {
- .len = sizeof(u64),
- .align = __alignof__(u64),
+ .len = sizeof(struct nft_timeout),
+ .align = __alignof__(struct nft_timeout),
},
[NFT_SET_EXT_USERDATA] = {
.len = sizeof(struct nft_userdata),
@@ -5665,8 +5950,7 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA) &&
nft_data_dump(skb, NFTA_SET_ELEM_DATA, nft_set_ext_data(ext),
- set->dtype == NFT_DATA_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE,
- set->dlen) < 0)
+ nft_set_datatype(set), set->dlen) < 0)
goto nla_put_failure;
if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPRESSIONS) &&
@@ -5683,25 +5967,32 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
htonl(*nft_set_ext_flags(ext))))
goto nla_put_failure;
- if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT) &&
- nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT,
- nf_jiffies64_to_msecs(*nft_set_ext_timeout(ext)),
- NFTA_SET_ELEM_PAD))
- goto nla_put_failure;
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT)) {
+ u64 timeout = READ_ONCE(nft_set_ext_timeout(ext)->timeout);
+ u64 set_timeout = READ_ONCE(set->timeout);
+ __be64 msecs = 0;
- if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) {
- u64 expires, now = get_jiffies_64();
+ if (set_timeout != timeout) {
+ msecs = nf_jiffies64_to_msecs(timeout);
+ if (nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT, msecs,
+ NFTA_SET_ELEM_PAD))
+ goto nla_put_failure;
+ }
- expires = *nft_set_ext_expiration(ext);
- if (time_before64(now, expires))
- expires -= now;
- else
- expires = 0;
+ if (timeout > 0) {
+ u64 expires, now = get_jiffies_64();
- if (nla_put_be64(skb, NFTA_SET_ELEM_EXPIRATION,
- nf_jiffies64_to_msecs(expires),
- NFTA_SET_ELEM_PAD))
- goto nla_put_failure;
+ expires = READ_ONCE(nft_set_ext_timeout(ext)->expiration);
+ if (time_before64(now, expires))
+ expires -= now;
+ else
+ expires = 0;
+
+ if (nla_put_be64(skb, NFTA_SET_ELEM_EXPIRATION,
+ nf_jiffies64_to_msecs(expires),
+ NFTA_SET_ELEM_PAD))
+ goto nla_put_failure;
+ }
}
if (nft_set_ext_exists(ext, NFT_SET_EXT_USERDATA)) {
@@ -5736,6 +6027,9 @@ static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
struct nft_set_dump_args *args;
+ if (!nft_set_elem_active(ext, iter->genmask))
+ return 0;
+
if (nft_set_elem_expired(ext) || nft_set_elem_is_dead(ext))
return 0;
@@ -5846,6 +6140,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
args.skb = skb;
args.reset = dump_ctx->reset;
args.iter.genmask = nft_genmask_cur(net);
+ args.iter.type = NFT_ITER_READ;
args.iter.skip = cb->args[0];
args.iter.count = 0;
args.iter.err = 0;
@@ -6116,7 +6411,7 @@ static int nft_set_dump_ctx_init(struct nft_set_dump_ctx *dump_ctx,
return PTR_ERR(table);
}
- set = nft_set_lookup(table, nla[NFTA_SET_ELEM_LIST_SET], genmask);
+ set = nft_set_lookup(net, table, nla[NFTA_SET_ELEM_LIST_SET], genmask);
if (IS_ERR(set)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_SET_ELEM_LIST_SET]);
return PTR_ERR(set);
@@ -6267,17 +6562,21 @@ err:
nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, -ENOBUFS);
}
-static struct nft_trans *nft_trans_elem_alloc(struct nft_ctx *ctx,
+static struct nft_trans *nft_trans_elem_alloc(const struct nft_ctx *ctx,
int msg_type,
struct nft_set *set)
{
+ struct nft_trans_elem *te;
struct nft_trans *trans;
- trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_elem));
+ trans = nft_trans_alloc(ctx, msg_type, struct_size(te, elems, 1));
if (trans == NULL)
return NULL;
- nft_trans_elem_set(trans) = set;
+ te = nft_trans_container_elem(trans);
+ te->nelems = 1;
+ te->set = set;
+
return trans;
}
@@ -6360,13 +6659,14 @@ struct nft_elem_priv *nft_set_elem_init(const struct nft_set *set,
nft_set_ext_data(ext), data, set->dlen) < 0)
goto err_ext_check;
- if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) {
- *nft_set_ext_expiration(ext) = get_jiffies_64() + expiration;
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT)) {
+ nft_set_ext_timeout(ext)->timeout = timeout;
+
if (expiration == 0)
- *nft_set_ext_expiration(ext) += timeout;
+ expiration = timeout;
+
+ nft_set_ext_timeout(ext)->expiration = get_jiffies_64() + expiration;
}
- if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT))
- *nft_set_ext_timeout(ext) = timeout;
return elem;
@@ -6398,28 +6698,52 @@ static void nft_set_elem_expr_destroy(const struct nft_ctx *ctx,
}
/* Drop references and destroy. Called from gc, dynset and abort path. */
-void nft_set_elem_destroy(const struct nft_set *set,
- const struct nft_elem_priv *elem_priv,
- bool destroy_expr)
+static void __nft_set_elem_destroy(const struct nft_ctx *ctx,
+ const struct nft_set *set,
+ const struct nft_elem_priv *elem_priv,
+ bool destroy_expr)
{
struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
- struct nft_ctx ctx = {
- .net = read_pnet(&set->net),
- .family = set->table->family,
- };
nft_data_release(nft_set_ext_key(ext), NFT_DATA_VALUE);
if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
nft_data_release(nft_set_ext_data(ext), set->dtype);
if (destroy_expr && nft_set_ext_exists(ext, NFT_SET_EXT_EXPRESSIONS))
- nft_set_elem_expr_destroy(&ctx, nft_set_ext_expr(ext));
+ nft_set_elem_expr_destroy(ctx, nft_set_ext_expr(ext));
if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
nft_use_dec(&(*nft_set_ext_obj(ext))->use);
kfree(elem_priv);
}
+
+/* Drop references and destroy. Called from gc and dynset. */
+void nft_set_elem_destroy(const struct nft_set *set,
+ const struct nft_elem_priv *elem_priv,
+ bool destroy_expr)
+{
+ struct nft_ctx ctx = {
+ .net = read_pnet(&set->net),
+ .family = set->table->family,
+ };
+
+ __nft_set_elem_destroy(&ctx, set, elem_priv, destroy_expr);
+}
EXPORT_SYMBOL_GPL(nft_set_elem_destroy);
+/* Drop references and destroy. Called from abort path. */
+static void nft_trans_set_elem_destroy(const struct nft_ctx *ctx, struct nft_trans_elem *te)
+{
+ int i;
+
+ for (i = 0; i < te->nelems; i++) {
+ /* skip update request, see nft_trans_elems_new_abort() */
+ if (!te->elems[i].priv)
+ continue;
+
+ __nft_set_elem_destroy(ctx, te->set, te->elems[i].priv, true);
+ }
+}
+
/* Destroy element. References have been already dropped in the preparation
* path via nft_setelem_data_deactivate().
*/
@@ -6435,6 +6759,15 @@ void nf_tables_set_elem_destroy(const struct nft_ctx *ctx,
kfree(elem_priv);
}
+static void nft_trans_elems_destroy(const struct nft_ctx *ctx,
+ const struct nft_trans_elem *te)
+{
+ int i;
+
+ for (i = 0; i < te->nelems; i++)
+ nf_tables_set_elem_destroy(ctx, te->set, te->elems[i].priv);
+}
+
int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_expr *expr_array[])
{
@@ -6446,7 +6779,7 @@ int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set,
if (!expr)
goto err_expr;
- err = nft_expr_clone(expr, set->exprs[i]);
+ err = nft_expr_clone(expr, set->exprs[i], GFP_KERNEL_ACCOUNT);
if (err < 0) {
kfree(expr);
goto err_expr;
@@ -6485,7 +6818,7 @@ static int nft_set_elem_expr_setup(struct nft_ctx *ctx,
for (i = 0; i < num_exprs; i++) {
expr = nft_setelem_expr_at(elem_expr, elem_expr->size);
- err = nft_expr_clone(expr, expr_array[i]);
+ err = nft_expr_clone(expr, expr_array[i], GFP_KERNEL_ACCOUNT);
if (err < 0)
goto err_elem_expr_setup;
@@ -6541,7 +6874,7 @@ static int nft_setelem_catchall_insert(const struct net *net,
}
}
- catchall = kmalloc(sizeof(*catchall), GFP_KERNEL);
+ catchall = kmalloc(sizeof(*catchall), GFP_KERNEL_ACCOUNT);
if (!catchall)
return -ENOMEM;
@@ -6585,12 +6918,44 @@ static void nft_setelem_activate(struct net *net, struct nft_set *set,
struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
if (nft_setelem_is_catchall(set, elem_priv)) {
- nft_set_elem_change_active(net, set, ext);
+ nft_clear(net, ext);
} else {
set->ops->activate(net, set, elem_priv);
}
}
+static void nft_trans_elem_update(const struct nft_set *set,
+ const struct nft_trans_one_elem *elem)
+{
+ const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
+ const struct nft_elem_update *update = elem->update;
+
+ if (update->flags & NFT_TRANS_UPD_TIMEOUT)
+ WRITE_ONCE(nft_set_ext_timeout(ext)->timeout, update->timeout);
+
+ if (update->flags & NFT_TRANS_UPD_EXPIRATION)
+ WRITE_ONCE(nft_set_ext_timeout(ext)->expiration, get_jiffies_64() + update->expiration);
+}
+
+static void nft_trans_elems_add(const struct nft_ctx *ctx,
+ struct nft_trans_elem *te)
+{
+ int i;
+
+ for (i = 0; i < te->nelems; i++) {
+ struct nft_trans_one_elem *elem = &te->elems[i];
+
+ if (elem->update)
+ nft_trans_elem_update(te->set, elem);
+ else
+ nft_setelem_activate(ctx->net, te->set, elem->priv);
+
+ nf_tables_setelem_notify(ctx, te->set, elem->priv,
+ NFT_MSG_NEWSETELEM);
+ kfree(elem->update);
+ }
+}
+
static int nft_setelem_catchall_deactivate(const struct net *net,
struct nft_set *set,
struct nft_set_elem *elem)
@@ -6673,6 +7038,26 @@ static void nft_setelem_remove(const struct net *net,
set->ops->remove(net, set, elem_priv);
}
+static void nft_trans_elems_remove(const struct nft_ctx *ctx,
+ const struct nft_trans_elem *te)
+{
+ int i;
+
+ for (i = 0; i < te->nelems; i++) {
+ WARN_ON_ONCE(te->elems[i].update);
+
+ nf_tables_setelem_notify(ctx, te->set,
+ te->elems[i].priv,
+ te->nft_trans.msg_type);
+
+ nft_setelem_remove(ctx->net, te->set, te->elems[i].priv);
+ if (!nft_setelem_is_catchall(te->set, te->elems[i].priv)) {
+ atomic_dec(&te->set->nelems);
+ te->set->ndeact--;
+ }
+ }
+}
+
static bool nft_setelem_valid_key_end(const struct nft_set *set,
struct nlattr **nla, u32 flags)
{
@@ -6692,6 +7077,27 @@ static bool nft_setelem_valid_key_end(const struct nft_set *set,
return true;
}
+static u32 nft_set_maxsize(const struct nft_set *set)
+{
+ u32 maxsize, delta;
+
+ if (!set->size)
+ return UINT_MAX;
+
+ if (set->ops->adjust_maxsize)
+ delta = set->ops->adjust_maxsize(set);
+ else
+ delta = 0;
+
+ if (check_add_overflow(set->size, set->ndeact, &maxsize))
+ return UINT_MAX;
+
+ if (check_add_overflow(maxsize, delta, &maxsize))
+ return UINT_MAX;
+
+ return maxsize;
+}
+
static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
const struct nlattr *attr, u32 nlmsg_flags)
{
@@ -6777,17 +7183,23 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
return err;
} else if (set->flags & NFT_SET_TIMEOUT &&
!(flags & NFT_SET_ELEM_INTERVAL_END)) {
- timeout = READ_ONCE(set->timeout);
+ timeout = set->timeout;
}
expiration = 0;
if (nla[NFTA_SET_ELEM_EXPIRATION] != NULL) {
if (!(set->flags & NFT_SET_TIMEOUT))
return -EINVAL;
+ if (timeout == 0)
+ return -EOPNOTSUPP;
+
err = nf_msecs_to_jiffies64(nla[NFTA_SET_ELEM_EXPIRATION],
&expiration);
if (err)
return err;
+
+ if (expiration > timeout)
+ return -ERANGE;
}
if (nla[NFTA_SET_ELEM_EXPR]) {
@@ -6873,16 +7285,10 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
goto err_parse_key_end;
}
- if (timeout > 0) {
- err = nft_set_ext_add(&tmpl, NFT_SET_EXT_EXPIRATION);
+ if (set->flags & NFT_SET_TIMEOUT) {
+ err = nft_set_ext_add(&tmpl, NFT_SET_EXT_TIMEOUT);
if (err < 0)
goto err_parse_key_end;
-
- if (timeout != READ_ONCE(set->timeout)) {
- err = nft_set_ext_add(&tmpl, NFT_SET_EXT_TIMEOUT);
- if (err < 0)
- goto err_parse_key_end;
- }
}
if (num_exprs) {
@@ -7020,8 +7426,40 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
nft_set_ext_exists(ext2, NFT_SET_EXT_OBJREF) &&
*nft_set_ext_obj(ext) != *nft_set_ext_obj(ext2)))
goto err_element_clash;
- else if (!(nlmsg_flags & NLM_F_EXCL))
+ else if (!(nlmsg_flags & NLM_F_EXCL)) {
err = 0;
+ if (nft_set_ext_exists(ext2, NFT_SET_EXT_TIMEOUT)) {
+ struct nft_elem_update update = { };
+
+ if (timeout != nft_set_ext_timeout(ext2)->timeout) {
+ update.timeout = timeout;
+ if (expiration == 0)
+ expiration = timeout;
+
+ update.flags |= NFT_TRANS_UPD_TIMEOUT;
+ }
+ if (expiration) {
+ update.expiration = expiration;
+ update.flags |= NFT_TRANS_UPD_EXPIRATION;
+ }
+
+ if (update.flags) {
+ struct nft_trans_one_elem *ue;
+
+ ue = &nft_trans_container_elem(trans)->elems[0];
+
+ ue->update = kmemdup(&update, sizeof(update), GFP_KERNEL);
+ if (!ue->update) {
+ err = -ENOMEM;
+ goto err_element_clash;
+ }
+
+ ue->priv = elem_priv;
+ nft_trans_commit_list_add_elem(ctx->net, trans, GFP_KERNEL);
+ goto err_elem_free;
+ }
+ }
+ }
} else if (err == -ENOTEMPTY) {
/* ENOTEMPTY reports overlapping between this element
* and an existing one.
@@ -7032,7 +7470,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
}
if (!(flags & NFT_SET_ELEM_CATCHALL)) {
- unsigned int max = set->size ? set->size + set->ndeact : UINT_MAX;
+ unsigned int max = nft_set_maxsize(set);
if (!atomic_add_unless(&set->nelems, 1, max)) {
err = -ENFILE;
@@ -7040,8 +7478,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
}
}
- nft_trans_elem_priv(trans) = elem.priv;
- nft_trans_commit_list_add_tail(ctx->net, trans);
+ nft_trans_container_elem(trans)->elems[0].priv = elem.priv;
+ nft_trans_commit_list_add_elem(ctx->net, trans, GFP_KERNEL);
return 0;
err_set_full:
@@ -7144,6 +7582,16 @@ void nft_data_hold(const struct nft_data *data, enum nft_data_types type)
}
}
+static int nft_setelem_active_next(const struct net *net,
+ const struct nft_set *set,
+ struct nft_elem_priv *elem_priv)
+{
+ const struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
+ u8 genmask = nft_genmask_next(net);
+
+ return nft_set_elem_active(ext, genmask);
+}
+
static void nft_setelem_data_activate(const struct net *net,
const struct nft_set *set,
struct nft_elem_priv *elem_priv)
@@ -7168,6 +7616,55 @@ void nft_setelem_data_deactivate(const struct net *net,
nft_use_dec(&(*nft_set_ext_obj(ext))->use);
}
+/* similar to nft_trans_elems_remove, but called from abort path to undo newsetelem.
+ * No notifications and no ndeact changes.
+ *
+ * Returns true if set had been added to (i.e., elements need to be removed again).
+ */
+static bool nft_trans_elems_new_abort(const struct nft_ctx *ctx,
+ struct nft_trans_elem *te)
+{
+ bool removed = false;
+ int i;
+
+ for (i = 0; i < te->nelems; i++) {
+ if (te->elems[i].update) {
+ kfree(te->elems[i].update);
+ te->elems[i].update = NULL;
+ /* Update request, so do not release this element */
+ te->elems[i].priv = NULL;
+ continue;
+ }
+
+ if (!te->set->ops->abort || nft_setelem_is_catchall(te->set, te->elems[i].priv))
+ nft_setelem_remove(ctx->net, te->set, te->elems[i].priv);
+
+ if (!nft_setelem_is_catchall(te->set, te->elems[i].priv))
+ atomic_dec(&te->set->nelems);
+
+ removed = true;
+ }
+
+ return removed;
+}
+
+/* Called from abort path to undo DELSETELEM/DESTROYSETELEM. */
+static void nft_trans_elems_destroy_abort(const struct nft_ctx *ctx,
+ const struct nft_trans_elem *te)
+{
+ int i;
+
+ for (i = 0; i < te->nelems; i++) {
+ if (!nft_setelem_active_next(ctx->net, te->set, te->elems[i].priv)) {
+ nft_setelem_data_activate(ctx->net, te->set, te->elems[i].priv);
+ nft_setelem_activate(ctx->net, te->set, te->elems[i].priv);
+ }
+
+ if (!nft_setelem_is_catchall(te->set, te->elems[i].priv))
+ te->set->ndeact--;
+ }
+}
+
static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
const struct nlattr *attr)
{
@@ -7247,8 +7744,8 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
nft_setelem_data_deactivate(ctx->net, set, elem.priv);
- nft_trans_elem_priv(trans) = elem.priv;
- nft_trans_commit_list_add_tail(ctx->net, trans);
+ nft_trans_container_elem(trans)->elems[0].priv = elem.priv;
+ nft_trans_commit_list_add_elem(ctx->net, trans, GFP_KERNEL);
return 0;
fail_ops:
@@ -7267,10 +7764,15 @@ static int nft_setelem_flush(const struct nft_ctx *ctx,
const struct nft_set_iter *iter,
struct nft_elem_priv *elem_priv)
{
+ const struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
struct nft_trans *trans;
+ if (!nft_set_elem_active(ext, iter->genmask))
+ return 0;
+
trans = nft_trans_alloc_gfp(ctx, NFT_MSG_DELSETELEM,
- sizeof(struct nft_trans_elem), GFP_ATOMIC);
+ struct_size_t(struct nft_trans_elem, elems, 1),
+ GFP_ATOMIC);
if (!trans)
return -ENOMEM;
@@ -7279,8 +7781,9 @@ static int nft_setelem_flush(const struct nft_ctx *ctx,
nft_setelem_data_deactivate(ctx->net, set, elem_priv);
nft_trans_elem_set(trans) = set;
- nft_trans_elem_priv(trans) = elem_priv;
- nft_trans_commit_list_add_tail(ctx->net, trans);
+ nft_trans_container_elem(trans)->nelems = 1;
+ nft_trans_container_elem(trans)->elems[0].priv = elem_priv;
+ nft_trans_commit_list_add_elem(ctx->net, trans, GFP_ATOMIC);
return 0;
}
@@ -7291,15 +7794,13 @@ static int __nft_set_catchall_flush(const struct nft_ctx *ctx,
{
struct nft_trans *trans;
- trans = nft_trans_alloc_gfp(ctx, NFT_MSG_DELSETELEM,
- sizeof(struct nft_trans_elem), GFP_KERNEL);
+ trans = nft_trans_elem_alloc(ctx, NFT_MSG_DELSETELEM, set);
if (!trans)
return -ENOMEM;
nft_setelem_data_deactivate(ctx->net, set, elem_priv);
- nft_trans_elem_set(trans) = set;
- nft_trans_elem_priv(trans) = elem_priv;
- nft_trans_commit_list_add_tail(ctx->net, trans);
+ nft_trans_container_elem(trans)->elems[0].priv = elem_priv;
+ nft_trans_commit_list_add_elem(ctx->net, trans, GFP_KERNEL);
return 0;
}
@@ -7312,7 +7813,8 @@ static int nft_set_catchall_flush(const struct nft_ctx *ctx,
struct nft_set_ext *ext;
int ret = 0;
- list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
+ list_for_each_entry_rcu(catchall, &set->catchall_list, list,
+ lockdep_commit_lock_is_held(ctx->net)) {
ext = nft_set_elem_ext(set, catchall->elem);
if (!nft_set_elem_active(ext, genmask))
continue;
@@ -7330,6 +7832,7 @@ static int nft_set_flush(struct nft_ctx *ctx, struct nft_set *set, u8 genmask)
{
struct nft_set_iter iter = {
.genmask = genmask,
+ .type = NFT_ITER_UPDATE,
.fn = nft_setelem_flush,
};
@@ -7361,7 +7864,7 @@ static int nf_tables_delsetelem(struct sk_buff *skb,
return PTR_ERR(table);
}
- set = nft_set_lookup(table, nla[NFTA_SET_ELEM_LIST_SET], genmask);
+ set = nft_set_lookup(net, table, nla[NFTA_SET_ELEM_LIST_SET], genmask);
if (IS_ERR(set)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_SET_ELEM_LIST_SET]);
return PTR_ERR(set);
@@ -7565,7 +8068,7 @@ static const struct nft_object_type *__nft_obj_type_get(u32 objtype, u8 family)
{
const struct nft_object_type *type;
- list_for_each_entry(type, &nf_tables_objects, list) {
+ list_for_each_entry_rcu(type, &nf_tables_objects, list) {
if (type->family != NFPROTO_UNSPEC &&
type->family != family)
continue;
@@ -7581,9 +8084,13 @@ nft_obj_type_get(struct net *net, u32 objtype, u8 family)
{
const struct nft_object_type *type;
+ rcu_read_lock();
type = __nft_obj_type_get(objtype, family);
- if (type != NULL && try_module_get(type->owner))
+ if (type != NULL && try_module_get(type->owner)) {
+ rcu_read_unlock();
return type;
+ }
+ rcu_read_unlock();
lockdep_nfnl_nft_mutex_not_held();
#ifdef CONFIG_MODULES
@@ -7604,9 +8111,7 @@ static int nf_tables_updobj(const struct nft_ctx *ctx,
struct nft_trans *trans;
int err = -ENOMEM;
- if (!try_module_get(type->owner))
- return -ENOENT;
-
+ /* caller must have obtained type->owner reference. */
trans = nft_trans_alloc(ctx, NFT_MSG_NEWOBJ,
sizeof(struct nft_trans_obj));
if (!trans)
@@ -7674,12 +8179,16 @@ static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info,
if (info->nlh->nlmsg_flags & NLM_F_REPLACE)
return -EOPNOTSUPP;
- type = __nft_obj_type_get(objtype, family);
- if (WARN_ON_ONCE(!type))
- return -ENOENT;
+ if (!obj->ops->update)
+ return 0;
+
+ type = nft_obj_type_get(net, objtype, family);
+ if (WARN_ON_ONCE(IS_ERR(type)))
+ return PTR_ERR(type);
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
+ /* type->owner reference is put when transaction object is released. */
return nf_tables_updobj(&ctx, type, nla[NFTA_OBJ_DATA], obj);
}
@@ -7865,6 +8374,19 @@ cont:
return skb->len;
}
+static int nf_tables_dumpreset_obj(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct nftables_pernet *nft_net = nft_pernet(sock_net(skb->sk));
+ int ret;
+
+ mutex_lock(&nft_net->commit_mutex);
+ ret = nf_tables_dump_obj(skb, cb);
+ mutex_unlock(&nft_net->commit_mutex);
+
+ return ret;
+}
+
static int nf_tables_dump_obj_start(struct netlink_callback *cb)
{
struct nft_obj_dump_ctx *ctx = (void *)cb->ctx;
@@ -7881,12 +8403,18 @@ static int nf_tables_dump_obj_start(struct netlink_callback *cb)
if (nla[NFTA_OBJ_TYPE])
ctx->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
- if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) == NFT_MSG_GETOBJ_RESET)
- ctx->reset = true;
-
return 0;
}
+static int nf_tables_dumpreset_obj_start(struct netlink_callback *cb)
+{
+ struct nft_obj_dump_ctx *ctx = (void *)cb->ctx;
+
+ ctx->reset = true;
+
+ return nf_tables_dump_obj_start(cb);
+}
+
static int nf_tables_dump_obj_done(struct netlink_callback *cb)
{
struct nft_obj_dump_ctx *ctx = (void *)cb->ctx;
@@ -7896,9 +8424,10 @@ static int nf_tables_dump_obj_done(struct netlink_callback *cb)
return 0;
}
-/* called with rcu_read_lock held */
-static int nf_tables_getobj(struct sk_buff *skb, const struct nfnl_info *info,
- const struct nlattr * const nla[])
+/* Caller must hold rcu read lock or transaction mutex */
+static struct sk_buff *
+nf_tables_getobj_single(u32 portid, const struct nfnl_info *info,
+ const struct nlattr * const nla[], bool reset)
{
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_cur(info->net);
@@ -7907,72 +8436,109 @@ static int nf_tables_getobj(struct sk_buff *skb, const struct nfnl_info *info,
struct net *net = info->net;
struct nft_object *obj;
struct sk_buff *skb2;
- bool reset = false;
u32 objtype;
int err;
- if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
- struct netlink_dump_control c = {
- .start = nf_tables_dump_obj_start,
- .dump = nf_tables_dump_obj,
- .done = nf_tables_dump_obj_done,
- .module = THIS_MODULE,
- .data = (void *)nla,
- };
-
- return nft_netlink_dump_start_rcu(info->sk, skb, info->nlh, &c);
- }
-
if (!nla[NFTA_OBJ_NAME] ||
!nla[NFTA_OBJ_TYPE])
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
table = nft_table_lookup(net, nla[NFTA_OBJ_TABLE], family, genmask, 0);
if (IS_ERR(table)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_TABLE]);
- return PTR_ERR(table);
+ return ERR_CAST(table);
}
objtype = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
obj = nft_obj_lookup(net, table, nla[NFTA_OBJ_NAME], objtype, genmask);
if (IS_ERR(obj)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_NAME]);
- return PTR_ERR(obj);
+ return ERR_CAST(obj);
}
skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
if (!skb2)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
+
+ err = nf_tables_fill_obj_info(skb2, net, portid,
+ info->nlh->nlmsg_seq, NFT_MSG_NEWOBJ, 0,
+ family, table, obj, reset);
+ if (err < 0) {
+ kfree_skb(skb2);
+ return ERR_PTR(err);
+ }
- if (NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_GETOBJ_RESET)
- reset = true;
+ return skb2;
+}
- if (reset) {
- const struct nftables_pernet *nft_net;
- char *buf;
+static int nf_tables_getobj(struct sk_buff *skb, const struct nfnl_info *info,
+ const struct nlattr * const nla[])
+{
+ u32 portid = NETLINK_CB(skb).portid;
+ struct sk_buff *skb2;
- nft_net = nft_pernet(net);
- buf = kasprintf(GFP_ATOMIC, "%s:%u", table->name, nft_net->base_seq);
+ if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
+ struct netlink_dump_control c = {
+ .start = nf_tables_dump_obj_start,
+ .dump = nf_tables_dump_obj,
+ .done = nf_tables_dump_obj_done,
+ .module = THIS_MODULE,
+ .data = (void *)nla,
+ };
- audit_log_nfcfg(buf,
- family,
- 1,
- AUDIT_NFT_OP_OBJ_RESET,
- GFP_ATOMIC);
- kfree(buf);
+ return nft_netlink_dump_start_rcu(info->sk, skb, info->nlh, &c);
}
- err = nf_tables_fill_obj_info(skb2, net, NETLINK_CB(skb).portid,
- info->nlh->nlmsg_seq, NFT_MSG_NEWOBJ, 0,
- family, table, obj, reset);
- if (err < 0)
- goto err_fill_obj_info;
+ skb2 = nf_tables_getobj_single(portid, info, nla, false);
+ if (IS_ERR(skb2))
+ return PTR_ERR(skb2);
- return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
+ return nfnetlink_unicast(skb2, info->net, portid);
+}
-err_fill_obj_info:
- kfree_skb(skb2);
- return err;
+static int nf_tables_getobj_reset(struct sk_buff *skb,
+ const struct nfnl_info *info,
+ const struct nlattr * const nla[])
+{
+ struct nftables_pernet *nft_net = nft_pernet(info->net);
+ u32 portid = NETLINK_CB(skb).portid;
+ struct net *net = info->net;
+ struct sk_buff *skb2;
+ char *buf;
+
+ if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
+ struct netlink_dump_control c = {
+ .start = nf_tables_dumpreset_obj_start,
+ .dump = nf_tables_dumpreset_obj,
+ .done = nf_tables_dump_obj_done,
+ .module = THIS_MODULE,
+ .data = (void *)nla,
+ };
+
+ return nft_netlink_dump_start_rcu(info->sk, skb, info->nlh, &c);
+ }
+
+ if (!try_module_get(THIS_MODULE))
+ return -EINVAL;
+ rcu_read_unlock();
+ mutex_lock(&nft_net->commit_mutex);
+ skb2 = nf_tables_getobj_single(portid, info, nla, true);
+ mutex_unlock(&nft_net->commit_mutex);
+ rcu_read_lock();
+ module_put(THIS_MODULE);
+
+ if (IS_ERR(skb2))
+ return PTR_ERR(skb2);
+
+ buf = kasprintf(GFP_ATOMIC, "%.*s:%u",
+ nla_len(nla[NFTA_OBJ_TABLE]),
+ (char *)nla_data(nla[NFTA_OBJ_TABLE]),
+ nft_net->base_seq);
+ audit_log_nfcfg(buf, info->nfmsg->nfgen_family, 1,
+ AUDIT_NFT_OP_OBJ_RESET, GFP_ATOMIC);
+ kfree(buf);
+
+ return nfnetlink_unicast(skb2, net, portid);
}
static void nft_obj_destroy(const struct nft_ctx *ctx, struct nft_object *obj)
@@ -8127,12 +8693,14 @@ static const struct nla_policy nft_flowtable_policy[NFTA_FLOWTABLE_MAX + 1] = {
[NFTA_FLOWTABLE_FLAGS] = { .type = NLA_U32 },
};
-struct nft_flowtable *nft_flowtable_lookup(const struct nft_table *table,
+struct nft_flowtable *nft_flowtable_lookup(const struct net *net,
+ const struct nft_table *table,
const struct nlattr *nla, u8 genmask)
{
struct nft_flowtable *flowtable;
- list_for_each_entry_rcu(flowtable, &table->flowtables, list) {
+ list_for_each_entry_rcu(flowtable, &table->flowtables, list,
+ lockdep_commit_lock_is_held(net)) {
if (!nla_strcmp(nla, flowtable->name) &&
nft_active_genmask(flowtable, genmask))
return flowtable;
@@ -8255,11 +8823,12 @@ static int nft_flowtable_parse_hook(const struct nft_ctx *ctx,
return err;
}
+/* call under rcu_read_lock */
static const struct nf_flowtable_type *__nft_flowtable_type_get(u8 family)
{
const struct nf_flowtable_type *type;
- list_for_each_entry(type, &nf_tables_flowtables, list) {
+ list_for_each_entry_rcu(type, &nf_tables_flowtables, list) {
if (family == type->family)
return type;
}
@@ -8271,9 +8840,13 @@ nft_flowtable_type_get(struct net *net, u8 family)
{
const struct nf_flowtable_type *type;
+ rcu_read_lock();
type = __nft_flowtable_type_get(family);
- if (type != NULL && try_module_get(type->owner))
+ if (type != NULL && try_module_get(type->owner)) {
+ rcu_read_unlock();
return type;
+ }
+ rcu_read_unlock();
lockdep_nfnl_nft_mutex_not_held();
#ifdef CONFIG_MODULES
@@ -8296,6 +8869,7 @@ static void nft_unregister_flowtable_hook(struct net *net,
}
static void __nft_unregister_flowtable_net_hooks(struct net *net,
+ struct nft_flowtable *flowtable,
struct list_head *hook_list,
bool release_netdev)
{
@@ -8303,6 +8877,8 @@ static void __nft_unregister_flowtable_net_hooks(struct net *net,
list_for_each_entry_safe(hook, next, hook_list, list) {
nf_unregister_net_hook(net, &hook->ops);
+ flowtable->data.type->setup(&flowtable->data, hook->ops.dev,
+ FLOW_BLOCK_UNBIND);
if (release_netdev) {
list_del(&hook->list);
kfree_rcu(hook, rcu);
@@ -8311,9 +8887,10 @@ static void __nft_unregister_flowtable_net_hooks(struct net *net,
}
static void nft_unregister_flowtable_net_hooks(struct net *net,
+ struct nft_flowtable *flowtable,
struct list_head *hook_list)
{
- __nft_unregister_flowtable_net_hooks(net, hook_list, false);
+ __nft_unregister_flowtable_net_hooks(net, flowtable, hook_list, false);
}
static int nft_register_flowtable_net_hooks(struct net *net,
@@ -8321,7 +8898,7 @@ static int nft_register_flowtable_net_hooks(struct net *net,
struct list_head *hook_list,
struct nft_flowtable *flowtable)
{
- struct nft_hook *hook, *hook2, *next;
+ struct nft_hook *hook, *next;
struct nft_flowtable *ft;
int err, i = 0;
@@ -8330,12 +8907,9 @@ static int nft_register_flowtable_net_hooks(struct net *net,
if (!nft_is_active_next(net, ft))
continue;
- list_for_each_entry(hook2, &ft->hook_list, list) {
- if (hook->ops.dev == hook2->ops.dev &&
- hook->ops.pf == hook2->ops.pf) {
- err = -EEXIST;
- goto err_unregister_net_hooks;
- }
+ if (nft_hook_list_find(&ft->hook_list, hook)) {
+ err = -EEXIST;
+ goto err_unregister_net_hooks;
}
}
@@ -8483,7 +9057,7 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
return PTR_ERR(table);
}
- flowtable = nft_flowtable_lookup(table, nla[NFTA_FLOWTABLE_NAME],
+ flowtable = nft_flowtable_lookup(net, table, nla[NFTA_FLOWTABLE_NAME],
genmask);
if (IS_ERR(flowtable)) {
err = PTR_ERR(flowtable);
@@ -8677,7 +9251,7 @@ static int nf_tables_delflowtable(struct sk_buff *skb,
flowtable = nft_flowtable_lookup_byhandle(table, attr, genmask);
} else {
attr = nla[NFTA_FLOWTABLE_NAME];
- flowtable = nft_flowtable_lookup(table, attr, genmask);
+ flowtable = nft_flowtable_lookup(net, table, attr, genmask);
}
if (IS_ERR(flowtable)) {
@@ -8747,8 +9321,10 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
if (!hook_list)
hook_list = &flowtable->hook_list;
- list_for_each_entry_rcu(hook, hook_list, list) {
- if (nla_put_string(skb, NFTA_DEVICE_NAME, hook->ops.dev->name))
+ list_for_each_entry_rcu(hook, hook_list, list,
+ lockdep_commit_lock_is_held(net)) {
+ if (nla_put(skb, NFTA_DEVICE_NAME,
+ hook->ifnamelen, hook->ifname))
goto nla_put_failure;
}
nla_nest_end(skb, nest_devs);
@@ -8889,7 +9465,7 @@ static int nf_tables_getflowtable(struct sk_buff *skb,
return PTR_ERR(table);
}
- flowtable = nft_flowtable_lookup(table, nla[NFTA_FLOWTABLE_NAME],
+ flowtable = nft_flowtable_lookup(net, table, nla[NFTA_FLOWTABLE_NAME],
genmask);
if (IS_ERR(flowtable)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_FLOWTABLE_NAME]);
@@ -8954,10 +9530,8 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
flowtable->data.type->free(&flowtable->data);
list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
- flowtable->data.type->setup(&flowtable->data, hook->ops.dev,
- FLOW_BLOCK_UNBIND);
list_del_rcu(&hook->list);
- kfree(hook);
+ kfree_rcu(hook, rcu);
}
kfree(flowtable->name);
module_put(flowtable->data.type->owner);
@@ -9250,7 +9824,7 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
.policy = nft_obj_policy,
},
[NFT_MSG_GETOBJ_RESET] = {
- .call = nf_tables_getobj,
+ .call = nf_tables_getobj_reset,
.type = NFNL_CB_RCU,
.attr_count = NFTA_OBJ_MAX,
.policy = nft_obj_policy,
@@ -9312,51 +9886,53 @@ static int nf_tables_validate(struct net *net)
*
* We defer the drop policy until the transaction has been finalized.
*/
-static void nft_chain_commit_drop_policy(struct nft_trans *trans)
+static void nft_chain_commit_drop_policy(struct nft_trans_chain *trans)
{
struct nft_base_chain *basechain;
- if (nft_trans_chain_policy(trans) != NF_DROP)
+ if (trans->policy != NF_DROP)
return;
- if (!nft_is_base_chain(trans->ctx.chain))
+ if (!nft_is_base_chain(trans->chain))
return;
- basechain = nft_base_chain(trans->ctx.chain);
+ basechain = nft_base_chain(trans->chain);
basechain->policy = NF_DROP;
}
-static void nft_chain_commit_update(struct nft_trans *trans)
+static void nft_chain_commit_update(struct nft_trans_chain *trans)
{
+ struct nft_table *table = trans->nft_trans_binding.nft_trans.table;
struct nft_base_chain *basechain;
- if (nft_trans_chain_name(trans)) {
- rhltable_remove(&trans->ctx.table->chains_ht,
- &trans->ctx.chain->rhlhead,
+ if (trans->name) {
+ rhltable_remove(&table->chains_ht,
+ &trans->chain->rhlhead,
nft_chain_ht_params);
- swap(trans->ctx.chain->name, nft_trans_chain_name(trans));
- rhltable_insert_key(&trans->ctx.table->chains_ht,
- trans->ctx.chain->name,
- &trans->ctx.chain->rhlhead,
+ swap(trans->chain->name, trans->name);
+ rhltable_insert_key(&table->chains_ht,
+ trans->chain->name,
+ &trans->chain->rhlhead,
nft_chain_ht_params);
}
- if (!nft_is_base_chain(trans->ctx.chain))
+ if (!nft_is_base_chain(trans->chain))
return;
nft_chain_stats_replace(trans);
- basechain = nft_base_chain(trans->ctx.chain);
+ basechain = nft_base_chain(trans->chain);
- switch (nft_trans_chain_policy(trans)) {
+ switch (trans->policy) {
case NF_DROP:
case NF_ACCEPT:
- basechain->policy = nft_trans_chain_policy(trans);
+ basechain->policy = trans->policy;
break;
}
}
-static void nft_obj_commit_update(struct nft_trans *trans)
+static void nft_obj_commit_update(const struct nft_ctx *ctx,
+ struct nft_trans *trans)
{
struct nft_object *newobj;
struct nft_object *obj;
@@ -9364,18 +9940,25 @@ static void nft_obj_commit_update(struct nft_trans *trans)
obj = nft_trans_obj(trans);
newobj = nft_trans_obj_newobj(trans);
- if (obj->ops->update)
- obj->ops->update(obj, newobj);
+ if (WARN_ON_ONCE(!obj->ops->update))
+ return;
- nft_obj_destroy(&trans->ctx, newobj);
+ obj->ops->update(obj, newobj);
+ nft_obj_destroy(ctx, newobj);
}
static void nft_commit_release(struct nft_trans *trans)
{
+ struct nft_ctx ctx = {
+ .net = trans->net,
+ };
+
+ nft_ctx_update(&ctx, trans);
+
switch (trans->msg_type) {
case NFT_MSG_DELTABLE:
case NFT_MSG_DESTROYTABLE:
- nf_tables_table_destroy(&trans->ctx);
+ nf_tables_table_destroy(trans->table);
break;
case NFT_MSG_NEWCHAIN:
free_percpu(nft_trans_chain_stats(trans));
@@ -9386,25 +9969,23 @@ static void nft_commit_release(struct nft_trans *trans)
if (nft_trans_chain_update(trans))
nft_hooks_destroy(&nft_trans_chain_hooks(trans));
else
- nf_tables_chain_destroy(&trans->ctx);
+ nf_tables_chain_destroy(nft_trans_chain(trans));
break;
case NFT_MSG_DELRULE:
case NFT_MSG_DESTROYRULE:
- nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
+ nf_tables_rule_destroy(&ctx, nft_trans_rule(trans));
break;
case NFT_MSG_DELSET:
case NFT_MSG_DESTROYSET:
- nft_set_destroy(&trans->ctx, nft_trans_set(trans));
+ nft_set_destroy(&ctx, nft_trans_set(trans));
break;
case NFT_MSG_DELSETELEM:
case NFT_MSG_DESTROYSETELEM:
- nf_tables_set_elem_destroy(&trans->ctx,
- nft_trans_elem_set(trans),
- nft_trans_elem_priv(trans));
+ nft_trans_elems_destroy(&ctx, nft_trans_container_elem(trans));
break;
case NFT_MSG_DELOBJ:
case NFT_MSG_DESTROYOBJ:
- nft_obj_destroy(&trans->ctx, nft_trans_obj(trans));
+ nft_obj_destroy(&ctx, nft_trans_obj(trans));
break;
case NFT_MSG_DELFLOWTABLE:
case NFT_MSG_DESTROYFLOWTABLE:
@@ -9416,18 +9997,19 @@ static void nft_commit_release(struct nft_trans *trans)
}
if (trans->put_net)
- put_net(trans->ctx.net);
+ put_net(trans->net);
kfree(trans);
}
static void nf_tables_trans_destroy_work(struct work_struct *w)
{
+ struct nftables_pernet *nft_net = container_of(w, struct nftables_pernet, destroy_work);
struct nft_trans *trans, *next;
LIST_HEAD(head);
spin_lock(&nf_tables_destroy_list_lock);
- list_splice_init(&nf_tables_destroy_list, &head);
+ list_splice_init(&nft_net->destroy_list, &head);
spin_unlock(&nf_tables_destroy_list_lock);
if (list_empty(&head))
@@ -9441,9 +10023,11 @@ static void nf_tables_trans_destroy_work(struct work_struct *w)
}
}
-void nf_tables_trans_destroy_flush_work(void)
+void nf_tables_trans_destroy_flush_work(struct net *net)
{
- flush_work(&trans_destroy_work);
+ struct nftables_pernet *nft_net = nft_pernet(net);
+
+ flush_work(&nft_net->destroy_work);
}
EXPORT_SYMBOL_GPL(nf_tables_trans_destroy_flush_work);
@@ -9535,10 +10119,10 @@ static void nf_tables_commit_chain_prepare_cancel(struct net *net)
struct nft_trans *trans, *next;
list_for_each_entry_safe(trans, next, &nft_net->commit_list, list) {
- struct nft_chain *chain = trans->ctx.chain;
-
if (trans->msg_type == NFT_MSG_NEWRULE ||
trans->msg_type == NFT_MSG_DELRULE) {
+ struct nft_chain *chain = nft_trans_rule_chain(trans);
+
kvfree(chain->blob_next);
chain->blob_next = NULL;
}
@@ -9896,16 +10480,16 @@ static void nf_tables_commit_release(struct net *net)
trans = list_last_entry(&nft_net->commit_list,
struct nft_trans, list);
- get_net(trans->ctx.net);
+ get_net(trans->net);
WARN_ON_ONCE(trans->put_net);
trans->put_net = true;
spin_lock(&nf_tables_destroy_list_lock);
- list_splice_tail_init(&nft_net->commit_list, &nf_tables_destroy_list);
+ list_splice_tail_init(&nft_net->commit_list, &nft_net->destroy_list);
spin_unlock(&nf_tables_destroy_list_lock);
nf_tables_module_autoload_cleanup(net);
- schedule_work(&trans_destroy_work);
+ schedule_work(&nft_net->destroy_work);
mutex_unlock(&nft_net->commit_mutex);
}
@@ -9973,9 +10557,24 @@ static void nf_tables_commit_audit_free(struct list_head *adl)
}
}
+/* nft audit emits the number of elements that get added/removed/updated,
+ * so NEW/DELSETELEM needs to increment based on the total elem count.
+ */
+static unsigned int nf_tables_commit_audit_entrycount(const struct nft_trans *trans)
+{
+ switch (trans->msg_type) {
+ case NFT_MSG_NEWSETELEM:
+ case NFT_MSG_DELSETELEM:
+ return nft_trans_container_elem(trans)->nelems;
+ }
+
+ return 1;
+}
+
static void nf_tables_commit_audit_collect(struct list_head *adl,
- struct nft_table *table, u32 op)
+ const struct nft_trans *trans, u32 op)
{
+ const struct nft_table *table = trans->table;
struct nft_audit_data *adp;
list_for_each_entry(adp, adl, list) {
@@ -9985,7 +10584,7 @@ static void nf_tables_commit_audit_collect(struct list_head *adl,
WARN_ONCE(1, "table=%s not expected in commit list", table->name);
return;
found:
- adp->entries++;
+ adp->entries += nf_tables_commit_audit_entrycount(trans);
if (!adp->op || adp->op > op)
adp->op = op;
}
@@ -10040,12 +10639,15 @@ static void nft_gc_seq_end(struct nftables_pernet *nft_net, unsigned int gc_seq)
static int nf_tables_commit(struct net *net, struct sk_buff *skb)
{
struct nftables_pernet *nft_net = nft_pernet(net);
+ const struct nlmsghdr *nlh = nlmsg_hdr(skb);
+ struct nft_trans_binding *trans_binding;
struct nft_trans *trans, *next;
unsigned int base_seq, gc_seq;
LIST_HEAD(set_update_list);
struct nft_trans_elem *te;
struct nft_chain *chain;
struct nft_table *table;
+ struct nft_ctx ctx;
LIST_HEAD(adl);
int err;
@@ -10054,7 +10656,10 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
return 0;
}
- list_for_each_entry(trans, &nft_net->binding_list, binding_list) {
+ nft_ctx_init(&ctx, net, skb, nlh, NFPROTO_UNSPEC, NULL, NULL, NULL);
+
+ list_for_each_entry(trans_binding, &nft_net->binding_list, binding_list) {
+ trans = &trans_binding->nft_trans;
switch (trans->msg_type) {
case NFT_MSG_NEWSET:
if (!nft_trans_set_update(trans) &&
@@ -10072,6 +10677,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
return -EINVAL;
}
break;
+ default:
+ WARN_ONCE(1, "Unhandled bind type %d", trans->msg_type);
+ break;
}
}
@@ -10087,9 +10695,10 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
/* 1. Allocate space for next generation rules_gen_X[] */
list_for_each_entry_safe(trans, next, &nft_net->commit_list, list) {
+ struct nft_table *table = trans->table;
int ret;
- ret = nf_tables_commit_audit_alloc(&adl, trans->ctx.table);
+ ret = nf_tables_commit_audit_alloc(&adl, table);
if (ret) {
nf_tables_commit_chain_prepare_cancel(net);
nf_tables_commit_audit_free(&adl);
@@ -10097,7 +10706,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
}
if (trans->msg_type == NFT_MSG_NEWRULE ||
trans->msg_type == NFT_MSG_DELRULE) {
- chain = trans->ctx.chain;
+ chain = nft_trans_rule_chain(trans);
ret = nf_tables_commit_chain_prepare(net, chain);
if (ret < 0) {
@@ -10130,68 +10739,71 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
net->nft.gencursor = nft_gencursor_next(net);
list_for_each_entry_safe(trans, next, &nft_net->commit_list, list) {
- nf_tables_commit_audit_collect(&adl, trans->ctx.table,
- trans->msg_type);
+ struct nft_table *table = trans->table;
+
+ nft_ctx_update(&ctx, trans);
+
+ nf_tables_commit_audit_collect(&adl, trans, trans->msg_type);
switch (trans->msg_type) {
case NFT_MSG_NEWTABLE:
if (nft_trans_table_update(trans)) {
- if (!(trans->ctx.table->flags & __NFT_TABLE_F_UPDATE)) {
+ if (!(table->flags & __NFT_TABLE_F_UPDATE)) {
nft_trans_destroy(trans);
break;
}
- if (trans->ctx.table->flags & NFT_TABLE_F_DORMANT)
- nf_tables_table_disable(net, trans->ctx.table);
+ if (table->flags & NFT_TABLE_F_DORMANT)
+ nf_tables_table_disable(net, table);
- trans->ctx.table->flags &= ~__NFT_TABLE_F_UPDATE;
+ table->flags &= ~__NFT_TABLE_F_UPDATE;
} else {
- nft_clear(net, trans->ctx.table);
+ nft_clear(net, table);
}
- nf_tables_table_notify(&trans->ctx, NFT_MSG_NEWTABLE);
+ nf_tables_table_notify(&ctx, NFT_MSG_NEWTABLE);
nft_trans_destroy(trans);
break;
case NFT_MSG_DELTABLE:
case NFT_MSG_DESTROYTABLE:
- list_del_rcu(&trans->ctx.table->list);
- nf_tables_table_notify(&trans->ctx, trans->msg_type);
+ list_del_rcu(&table->list);
+ nf_tables_table_notify(&ctx, trans->msg_type);
break;
case NFT_MSG_NEWCHAIN:
if (nft_trans_chain_update(trans)) {
- nft_chain_commit_update(trans);
- nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN,
+ nft_chain_commit_update(nft_trans_container_chain(trans));
+ nf_tables_chain_notify(&ctx, NFT_MSG_NEWCHAIN,
&nft_trans_chain_hooks(trans));
list_splice(&nft_trans_chain_hooks(trans),
&nft_trans_basechain(trans)->hook_list);
/* trans destroyed after rcu grace period */
} else {
- nft_chain_commit_drop_policy(trans);
- nft_clear(net, trans->ctx.chain);
- nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN, NULL);
+ nft_chain_commit_drop_policy(nft_trans_container_chain(trans));
+ nft_clear(net, nft_trans_chain(trans));
+ nf_tables_chain_notify(&ctx, NFT_MSG_NEWCHAIN, NULL);
nft_trans_destroy(trans);
}
break;
case NFT_MSG_DELCHAIN:
case NFT_MSG_DESTROYCHAIN:
if (nft_trans_chain_update(trans)) {
- nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN,
+ nf_tables_chain_notify(&ctx, NFT_MSG_DELCHAIN,
&nft_trans_chain_hooks(trans));
- nft_netdev_unregister_hooks(net,
- &nft_trans_chain_hooks(trans),
- true);
+ if (!(table->flags & NFT_TABLE_F_DORMANT)) {
+ nft_netdev_unregister_hooks(net,
+ &nft_trans_chain_hooks(trans),
+ true);
+ }
} else {
- nft_chain_del(trans->ctx.chain);
- nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN,
+ nft_chain_del(nft_trans_chain(trans));
+ nf_tables_chain_notify(&ctx, NFT_MSG_DELCHAIN,
NULL);
- nf_tables_unregister_hook(trans->ctx.net,
- trans->ctx.table,
- trans->ctx.chain);
+ nf_tables_unregister_hook(ctx.net, ctx.table,
+ nft_trans_chain(trans));
}
break;
case NFT_MSG_NEWRULE:
- nft_clear(trans->ctx.net, nft_trans_rule(trans));
- nf_tables_rule_notify(&trans->ctx,
- nft_trans_rule(trans),
+ nft_clear(net, nft_trans_rule(trans));
+ nf_tables_rule_notify(&ctx, nft_trans_rule(trans),
NFT_MSG_NEWRULE);
- if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
+ if (nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD)
nft_flow_rule_destroy(nft_trans_flow_rule(trans));
nft_trans_destroy(trans);
@@ -10199,17 +10811,16 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
case NFT_MSG_DELRULE:
case NFT_MSG_DESTROYRULE:
list_del_rcu(&nft_trans_rule(trans)->list);
- nf_tables_rule_notify(&trans->ctx,
- nft_trans_rule(trans),
+ nf_tables_rule_notify(&ctx, nft_trans_rule(trans),
trans->msg_type);
- nft_rule_expr_deactivate(&trans->ctx,
- nft_trans_rule(trans),
+ nft_rule_expr_deactivate(&ctx, nft_trans_rule(trans),
NFT_TRANS_COMMIT);
- if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
+ if (nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD)
nft_flow_rule_destroy(nft_trans_flow_rule(trans));
break;
case NFT_MSG_NEWSET:
+ list_del(&nft_trans_container_set(trans)->list_trans_newset);
if (nft_trans_set_update(trans)) {
struct nft_set *set = nft_trans_set(trans);
@@ -10225,9 +10836,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
*/
if (nft_set_is_anonymous(nft_trans_set(trans)) &&
!list_empty(&nft_trans_set(trans)->bindings))
- nft_use_dec(&trans->ctx.table->use);
+ nft_use_dec(&table->use);
}
- nf_tables_set_notify(&trans->ctx, nft_trans_set(trans),
+ nf_tables_set_notify(&ctx, nft_trans_set(trans),
NFT_MSG_NEWSET, GFP_KERNEL);
nft_trans_destroy(trans);
break;
@@ -10235,16 +10846,14 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
case NFT_MSG_DESTROYSET:
nft_trans_set(trans)->dead = 1;
list_del_rcu(&nft_trans_set(trans)->list);
- nf_tables_set_notify(&trans->ctx, nft_trans_set(trans),
+ nf_tables_set_notify(&ctx, nft_trans_set(trans),
trans->msg_type, GFP_KERNEL);
break;
case NFT_MSG_NEWSETELEM:
- te = (struct nft_trans_elem *)trans->data;
+ te = nft_trans_container_elem(trans);
+
+ nft_trans_elems_add(&ctx, te);
- nft_setelem_activate(net, te->set, te->elem_priv);
- nf_tables_setelem_notify(&trans->ctx, te->set,
- te->elem_priv,
- NFT_MSG_NEWSETELEM);
if (te->set->ops->commit &&
list_empty(&te->set->pending_update)) {
list_add_tail(&te->set->pending_update,
@@ -10254,16 +10863,10 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
break;
case NFT_MSG_DELSETELEM:
case NFT_MSG_DESTROYSETELEM:
- te = (struct nft_trans_elem *)trans->data;
-
- nf_tables_setelem_notify(&trans->ctx, te->set,
- te->elem_priv,
- trans->msg_type);
- nft_setelem_remove(net, te->set, te->elem_priv);
- if (!nft_setelem_is_catchall(te->set, te->elem_priv)) {
- atomic_dec(&te->set->nelems);
- te->set->ndeact--;
- }
+ te = nft_trans_container_elem(trans);
+
+ nft_trans_elems_remove(&ctx, te);
+
if (te->set->ops->commit &&
list_empty(&te->set->pending_update)) {
list_add_tail(&te->set->pending_update,
@@ -10272,13 +10875,13 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
break;
case NFT_MSG_NEWOBJ:
if (nft_trans_obj_update(trans)) {
- nft_obj_commit_update(trans);
- nf_tables_obj_notify(&trans->ctx,
+ nft_obj_commit_update(&ctx, trans);
+ nf_tables_obj_notify(&ctx,
nft_trans_obj(trans),
NFT_MSG_NEWOBJ);
} else {
nft_clear(net, nft_trans_obj(trans));
- nf_tables_obj_notify(&trans->ctx,
+ nf_tables_obj_notify(&ctx,
nft_trans_obj(trans),
NFT_MSG_NEWOBJ);
nft_trans_destroy(trans);
@@ -10287,14 +10890,14 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
case NFT_MSG_DELOBJ:
case NFT_MSG_DESTROYOBJ:
nft_obj_del(nft_trans_obj(trans));
- nf_tables_obj_notify(&trans->ctx, nft_trans_obj(trans),
+ nf_tables_obj_notify(&ctx, nft_trans_obj(trans),
trans->msg_type);
break;
case NFT_MSG_NEWFLOWTABLE:
if (nft_trans_flowtable_update(trans)) {
nft_trans_flowtable(trans)->data.flags =
nft_trans_flowtable_flags(trans);
- nf_tables_flowtable_notify(&trans->ctx,
+ nf_tables_flowtable_notify(&ctx,
nft_trans_flowtable(trans),
&nft_trans_flowtable_hooks(trans),
NFT_MSG_NEWFLOWTABLE);
@@ -10302,7 +10905,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
&nft_trans_flowtable(trans)->hook_list);
} else {
nft_clear(net, nft_trans_flowtable(trans));
- nf_tables_flowtable_notify(&trans->ctx,
+ nf_tables_flowtable_notify(&ctx,
nft_trans_flowtable(trans),
NULL,
NFT_MSG_NEWFLOWTABLE);
@@ -10312,19 +10915,21 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
case NFT_MSG_DELFLOWTABLE:
case NFT_MSG_DESTROYFLOWTABLE:
if (nft_trans_flowtable_update(trans)) {
- nf_tables_flowtable_notify(&trans->ctx,
+ nf_tables_flowtable_notify(&ctx,
nft_trans_flowtable(trans),
&nft_trans_flowtable_hooks(trans),
trans->msg_type);
nft_unregister_flowtable_net_hooks(net,
+ nft_trans_flowtable(trans),
&nft_trans_flowtable_hooks(trans));
} else {
list_del_rcu(&nft_trans_flowtable(trans)->list);
- nf_tables_flowtable_notify(&trans->ctx,
+ nf_tables_flowtable_notify(&ctx,
nft_trans_flowtable(trans),
NULL,
trans->msg_type);
nft_unregister_flowtable_net_hooks(net,
+ nft_trans_flowtable(trans),
&nft_trans_flowtable(trans)->hook_list);
}
break;
@@ -10362,28 +10967,31 @@ static void nf_tables_module_autoload(struct net *net)
static void nf_tables_abort_release(struct nft_trans *trans)
{
+ struct nft_ctx ctx = { };
+
+ nft_ctx_update(&ctx, trans);
+
switch (trans->msg_type) {
case NFT_MSG_NEWTABLE:
- nf_tables_table_destroy(&trans->ctx);
+ nf_tables_table_destroy(trans->table);
break;
case NFT_MSG_NEWCHAIN:
if (nft_trans_chain_update(trans))
nft_hooks_destroy(&nft_trans_chain_hooks(trans));
else
- nf_tables_chain_destroy(&trans->ctx);
+ nf_tables_chain_destroy(nft_trans_chain(trans));
break;
case NFT_MSG_NEWRULE:
- nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
+ nf_tables_rule_destroy(&ctx, nft_trans_rule(trans));
break;
case NFT_MSG_NEWSET:
- nft_set_destroy(&trans->ctx, nft_trans_set(trans));
+ nft_set_destroy(&ctx, nft_trans_set(trans));
break;
case NFT_MSG_NEWSETELEM:
- nft_set_elem_destroy(nft_trans_elem_set(trans),
- nft_trans_elem_priv(trans), true);
+ nft_trans_set_elem_destroy(&ctx, nft_trans_container_elem(trans));
break;
case NFT_MSG_NEWOBJ:
- nft_obj_destroy(&trans->ctx, nft_trans_obj(trans));
+ nft_obj_destroy(&ctx, nft_trans_obj(trans));
break;
case NFT_MSG_NEWFLOWTABLE:
if (nft_trans_flowtable_update(trans))
@@ -10415,42 +11023,56 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
struct nft_trans *trans, *next;
LIST_HEAD(set_update_list);
struct nft_trans_elem *te;
+ struct nft_ctx ctx = {
+ .net = net,
+ };
+ int err = 0;
if (action == NFNL_ABORT_VALIDATE &&
nf_tables_validate(net) < 0)
- return -EAGAIN;
+ err = -EAGAIN;
list_for_each_entry_safe_reverse(trans, next, &nft_net->commit_list,
list) {
+ struct nft_table *table = trans->table;
+
+ nft_ctx_update(&ctx, trans);
+
switch (trans->msg_type) {
case NFT_MSG_NEWTABLE:
if (nft_trans_table_update(trans)) {
- if (!(trans->ctx.table->flags & __NFT_TABLE_F_UPDATE)) {
+ if (!(table->flags & __NFT_TABLE_F_UPDATE)) {
nft_trans_destroy(trans);
break;
}
- if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_DORMANT) {
- nf_tables_table_disable(net, trans->ctx.table);
- trans->ctx.table->flags |= NFT_TABLE_F_DORMANT;
- } else if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_AWAKEN) {
- trans->ctx.table->flags &= ~NFT_TABLE_F_DORMANT;
+ if (table->flags & __NFT_TABLE_F_WAS_DORMANT) {
+ nf_tables_table_disable(net, table);
+ table->flags |= NFT_TABLE_F_DORMANT;
+ } else if (table->flags & __NFT_TABLE_F_WAS_AWAKEN) {
+ table->flags &= ~NFT_TABLE_F_DORMANT;
+ }
+ if (table->flags & __NFT_TABLE_F_WAS_ORPHAN) {
+ table->flags &= ~NFT_TABLE_F_OWNER;
+ table->nlpid = 0;
}
- trans->ctx.table->flags &= ~__NFT_TABLE_F_UPDATE;
+ table->flags &= ~__NFT_TABLE_F_UPDATE;
nft_trans_destroy(trans);
} else {
- list_del_rcu(&trans->ctx.table->list);
+ list_del_rcu(&table->list);
}
break;
case NFT_MSG_DELTABLE:
case NFT_MSG_DESTROYTABLE:
- nft_clear(trans->ctx.net, trans->ctx.table);
+ nft_clear(trans->net, table);
nft_trans_destroy(trans);
break;
case NFT_MSG_NEWCHAIN:
if (nft_trans_chain_update(trans)) {
- nft_netdev_unregister_hooks(net,
- &nft_trans_chain_hooks(trans),
- true);
+ if (!(table->flags & NFT_TABLE_F_DORMANT)) {
+ nft_netdev_unregister_hooks(net,
+ &nft_trans_chain_hooks(trans),
+ true);
+ }
free_percpu(nft_trans_chain_stats(trans));
kfree(nft_trans_chain_name(trans));
nft_trans_destroy(trans);
@@ -10459,11 +11081,10 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
nft_trans_destroy(trans);
break;
}
- nft_use_dec_restore(&trans->ctx.table->use);
- nft_chain_del(trans->ctx.chain);
- nf_tables_unregister_hook(trans->ctx.net,
- trans->ctx.table,
- trans->ctx.chain);
+ nft_use_dec_restore(&table->use);
+ nft_chain_del(nft_trans_chain(trans));
+ nf_tables_unregister_hook(trans->net, table,
+ nft_trans_chain(trans));
}
break;
case NFT_MSG_DELCHAIN:
@@ -10472,8 +11093,8 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
list_splice(&nft_trans_chain_hooks(trans),
&nft_trans_basechain(trans)->hook_list);
} else {
- nft_use_inc_restore(&trans->ctx.table->use);
- nft_clear(trans->ctx.net, trans->ctx.chain);
+ nft_use_inc_restore(&table->use);
+ nft_clear(trans->net, nft_trans_chain(trans));
}
nft_trans_destroy(trans);
break;
@@ -10482,30 +11103,31 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
nft_trans_destroy(trans);
break;
}
- nft_use_dec_restore(&trans->ctx.chain->use);
+ nft_use_dec_restore(&nft_trans_rule_chain(trans)->use);
list_del_rcu(&nft_trans_rule(trans)->list);
- nft_rule_expr_deactivate(&trans->ctx,
+ nft_rule_expr_deactivate(&ctx,
nft_trans_rule(trans),
NFT_TRANS_ABORT);
- if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
+ if (nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD)
nft_flow_rule_destroy(nft_trans_flow_rule(trans));
break;
case NFT_MSG_DELRULE:
case NFT_MSG_DESTROYRULE:
- nft_use_inc_restore(&trans->ctx.chain->use);
- nft_clear(trans->ctx.net, nft_trans_rule(trans));
- nft_rule_expr_activate(&trans->ctx, nft_trans_rule(trans));
- if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
+ nft_use_inc_restore(&nft_trans_rule_chain(trans)->use);
+ nft_clear(trans->net, nft_trans_rule(trans));
+ nft_rule_expr_activate(&ctx, nft_trans_rule(trans));
+ if (nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD)
nft_flow_rule_destroy(nft_trans_flow_rule(trans));
nft_trans_destroy(trans);
break;
case NFT_MSG_NEWSET:
+ list_del(&nft_trans_container_set(trans)->list_trans_newset);
if (nft_trans_set_update(trans)) {
nft_trans_destroy(trans);
break;
}
- nft_use_dec_restore(&trans->ctx.table->use);
+ nft_use_dec_restore(&table->use);
if (nft_trans_set_bound(trans)) {
nft_trans_destroy(trans);
break;
@@ -10515,10 +11137,10 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
break;
case NFT_MSG_DELSET:
case NFT_MSG_DESTROYSET:
- nft_use_inc_restore(&trans->ctx.table->use);
- nft_clear(trans->ctx.net, nft_trans_set(trans));
+ nft_use_inc_restore(&table->use);
+ nft_clear(trans->net, nft_trans_set(trans));
if (nft_trans_set(trans)->flags & (NFT_SET_MAP | NFT_SET_OBJECT))
- nft_map_activate(&trans->ctx, nft_trans_set(trans));
+ nft_map_activate(&ctx, nft_trans_set(trans));
nft_trans_destroy(trans);
break;
@@ -10527,10 +11149,11 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
nft_trans_destroy(trans);
break;
}
- te = (struct nft_trans_elem *)trans->data;
- nft_setelem_remove(net, te->set, te->elem_priv);
- if (!nft_setelem_is_catchall(te->set, te->elem_priv))
- atomic_dec(&te->set->nelems);
+ te = nft_trans_container_elem(trans);
+ if (!nft_trans_elems_new_abort(&ctx, te)) {
+ nft_trans_destroy(trans);
+ break;
+ }
if (te->set->ops->abort &&
list_empty(&te->set->pending_update)) {
@@ -10540,12 +11163,9 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
break;
case NFT_MSG_DELSETELEM:
case NFT_MSG_DESTROYSETELEM:
- te = (struct nft_trans_elem *)trans->data;
+ te = nft_trans_container_elem(trans);
- nft_setelem_data_activate(net, te->set, te->elem_priv);
- nft_setelem_activate(net, te->set, te->elem_priv);
- if (!nft_setelem_is_catchall(te->set, te->elem_priv))
- te->set->ndeact--;
+ nft_trans_elems_destroy_abort(&ctx, te);
if (te->set->ops->abort &&
list_empty(&te->set->pending_update)) {
@@ -10556,27 +11176,29 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
break;
case NFT_MSG_NEWOBJ:
if (nft_trans_obj_update(trans)) {
- nft_obj_destroy(&trans->ctx, nft_trans_obj_newobj(trans));
+ nft_obj_destroy(&ctx, nft_trans_obj_newobj(trans));
nft_trans_destroy(trans);
} else {
- nft_use_dec_restore(&trans->ctx.table->use);
+ nft_use_dec_restore(&table->use);
nft_obj_del(nft_trans_obj(trans));
}
break;
case NFT_MSG_DELOBJ:
case NFT_MSG_DESTROYOBJ:
- nft_use_inc_restore(&trans->ctx.table->use);
- nft_clear(trans->ctx.net, nft_trans_obj(trans));
+ nft_use_inc_restore(&table->use);
+ nft_clear(trans->net, nft_trans_obj(trans));
nft_trans_destroy(trans);
break;
case NFT_MSG_NEWFLOWTABLE:
if (nft_trans_flowtable_update(trans)) {
nft_unregister_flowtable_net_hooks(net,
+ nft_trans_flowtable(trans),
&nft_trans_flowtable_hooks(trans));
} else {
- nft_use_dec_restore(&trans->ctx.table->use);
+ nft_use_dec_restore(&table->use);
list_del_rcu(&nft_trans_flowtable(trans)->list);
nft_unregister_flowtable_net_hooks(net,
+ nft_trans_flowtable(trans),
&nft_trans_flowtable(trans)->hook_list);
}
break;
@@ -10586,14 +11208,16 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
list_splice(&nft_trans_flowtable_hooks(trans),
&nft_trans_flowtable(trans)->hook_list);
} else {
- nft_use_inc_restore(&trans->ctx.table->use);
- nft_clear(trans->ctx.net, nft_trans_flowtable(trans));
+ nft_use_inc_restore(&table->use);
+ nft_clear(trans->net, nft_trans_flowtable(trans));
}
nft_trans_destroy(trans);
break;
}
}
+ WARN_ON_ONCE(!list_empty(&nft_net->commit_set_list));
+
nft_set_abort_update(&set_update_list);
synchronize_rcu();
@@ -10604,12 +11228,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
nf_tables_abort_release(trans);
}
- if (action == NFNL_ABORT_AUTOLOAD)
- nf_tables_module_autoload(net);
- else
- nf_tables_module_autoload_cleanup(net);
-
- return 0;
+ return err;
}
static int nf_tables_abort(struct net *net, struct sk_buff *skb,
@@ -10622,6 +11241,17 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb,
gc_seq = nft_gc_seq_begin(nft_net);
ret = __nf_tables_abort(net, action);
nft_gc_seq_end(nft_net, gc_seq);
+
+ WARN_ON_ONCE(!list_empty(&nft_net->commit_list));
+
+ /* module autoload needs to happen after GC sequence update because it
+ * temporarily releases and grabs mutex again.
+ */
+ if (action == NFNL_ABORT_AUTOLOAD)
+ nf_tables_module_autoload(net);
+ else
+ nf_tables_module_autoload_cleanup(net);
+
mutex_unlock(&nft_net->commit_mutex);
return ret;
@@ -10686,146 +11316,6 @@ int nft_chain_validate_hooks(const struct nft_chain *chain,
}
EXPORT_SYMBOL_GPL(nft_chain_validate_hooks);
-/*
- * Loop detection - walk through the ruleset beginning at the destination chain
- * of a new jump until either the source chain is reached (loop) or all
- * reachable chains have been traversed.
- *
- * The loop check is performed whenever a new jump verdict is added to an
- * expression or verdict map or a verdict map is bound to a new chain.
- */
-
-static int nf_tables_check_loops(const struct nft_ctx *ctx,
- const struct nft_chain *chain);
-
-static int nft_check_loops(const struct nft_ctx *ctx,
- const struct nft_set_ext *ext)
-{
- const struct nft_data *data;
- int ret;
-
- data = nft_set_ext_data(ext);
- switch (data->verdict.code) {
- case NFT_JUMP:
- case NFT_GOTO:
- ret = nf_tables_check_loops(ctx, data->verdict.chain);
- break;
- default:
- ret = 0;
- break;
- }
-
- return ret;
-}
-
-static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx,
- struct nft_set *set,
- const struct nft_set_iter *iter,
- struct nft_elem_priv *elem_priv)
-{
- const struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
-
- if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
- *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
- return 0;
-
- return nft_check_loops(ctx, ext);
-}
-
-static int nft_set_catchall_loops(const struct nft_ctx *ctx,
- struct nft_set *set)
-{
- u8 genmask = nft_genmask_next(ctx->net);
- struct nft_set_elem_catchall *catchall;
- struct nft_set_ext *ext;
- int ret = 0;
-
- list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
- ext = nft_set_elem_ext(set, catchall->elem);
- if (!nft_set_elem_active(ext, genmask))
- continue;
-
- ret = nft_check_loops(ctx, ext);
- if (ret < 0)
- return ret;
- }
-
- return ret;
-}
-
-static int nf_tables_check_loops(const struct nft_ctx *ctx,
- const struct nft_chain *chain)
-{
- const struct nft_rule *rule;
- const struct nft_expr *expr, *last;
- struct nft_set *set;
- struct nft_set_binding *binding;
- struct nft_set_iter iter;
-
- if (ctx->chain == chain)
- return -ELOOP;
-
- if (fatal_signal_pending(current))
- return -EINTR;
-
- list_for_each_entry(rule, &chain->rules, list) {
- nft_rule_for_each_expr(expr, last, rule) {
- struct nft_immediate_expr *priv;
- const struct nft_data *data;
- int err;
-
- if (strcmp(expr->ops->type->name, "immediate"))
- continue;
-
- priv = nft_expr_priv(expr);
- if (priv->dreg != NFT_REG_VERDICT)
- continue;
-
- data = &priv->data;
- switch (data->verdict.code) {
- case NFT_JUMP:
- case NFT_GOTO:
- err = nf_tables_check_loops(ctx,
- data->verdict.chain);
- if (err < 0)
- return err;
- break;
- default:
- break;
- }
- }
- }
-
- list_for_each_entry(set, &ctx->table->sets, list) {
- if (!nft_is_active_next(ctx->net, set))
- continue;
- if (!(set->flags & NFT_SET_MAP) ||
- set->dtype != NFT_DATA_VERDICT)
- continue;
-
- list_for_each_entry(binding, &set->bindings, list) {
- if (!(binding->flags & NFT_SET_MAP) ||
- binding->chain != chain)
- continue;
-
- iter.genmask = nft_genmask_next(ctx->net);
- iter.skip = 0;
- iter.count = 0;
- iter.err = 0;
- iter.fn = nf_tables_loop_check_setelem;
-
- set->ops->walk(ctx, set, &iter);
- if (!iter.err)
- iter.err = nft_set_catchall_loops(ctx, set);
-
- if (iter.err < 0)
- return iter.err;
- }
- }
-
- return 0;
-}
-
/**
* nft_parse_u32_check - fetch u32 attribute and check for maximum value
*
@@ -10904,10 +11394,11 @@ static int nft_validate_register_load(enum nft_registers reg, unsigned int len)
return 0;
}
-int nft_parse_register_load(const struct nlattr *attr, u8 *sreg, u32 len)
+int nft_parse_register_load(const struct nft_ctx *ctx,
+ const struct nlattr *attr, u8 *sreg, u32 len)
{
- u32 reg;
- int err;
+ int err, invalid_reg;
+ u32 reg, next_register;
err = nft_parse_register(attr, &reg);
if (err < 0)
@@ -10917,11 +11408,36 @@ int nft_parse_register_load(const struct nlattr *attr, u8 *sreg, u32 len)
if (err < 0)
return err;
+ next_register = DIV_ROUND_UP(len, NFT_REG32_SIZE) + reg;
+
+ /* Can't happen: nft_validate_register_load() should have failed */
+ if (WARN_ON_ONCE(next_register > NFT_REG32_NUM))
+ return -EINVAL;
+
+ /* find first register that did not see an earlier store. */
+ invalid_reg = find_next_zero_bit(ctx->reg_inited, NFT_REG32_NUM, reg);
+
+ /* invalid register within the range that we're loading from? */
+ if (invalid_reg < next_register)
+ return -ENODATA;
+
*sreg = reg;
return 0;
}
EXPORT_SYMBOL_GPL(nft_parse_register_load);
+static void nft_saw_register_store(const struct nft_ctx *__ctx,
+ int reg, unsigned int len)
+{
+ unsigned int registers = DIV_ROUND_UP(len, NFT_REG32_SIZE);
+ struct nft_ctx *ctx = (struct nft_ctx *)__ctx;
+
+ if (WARN_ON_ONCE(len == 0 || reg < 0))
+ return;
+
+ bitmap_set(ctx->reg_inited, reg, registers);
+}
+
static int nft_validate_register_store(const struct nft_ctx *ctx,
enum nft_registers reg,
const struct nft_data *data,
@@ -10938,13 +11454,16 @@ static int nft_validate_register_store(const struct nft_ctx *ctx,
if (data != NULL &&
(data->verdict.code == NFT_GOTO ||
data->verdict.code == NFT_JUMP)) {
- err = nf_tables_check_loops(ctx, data->verdict.chain);
+ err = nft_chain_validate(ctx, data->verdict.chain);
if (err < 0)
return err;
}
- return 0;
+ break;
default:
+ if (type != NFT_DATA_VALUE)
+ return -EINVAL;
+
if (reg < NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE)
return -EINVAL;
if (len == 0)
@@ -10953,10 +11472,11 @@ static int nft_validate_register_store(const struct nft_ctx *ctx,
sizeof_field(struct nft_regs, data))
return -ERANGE;
- if (data != NULL && type != NFT_DATA_VALUE)
- return -EINVAL;
- return 0;
+ break;
}
+
+ nft_saw_register_store(ctx, reg, len);
+ return 0;
}
int nft_parse_register_store(const struct nft_ctx *ctx,
@@ -11222,27 +11742,6 @@ int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data,
}
EXPORT_SYMBOL_GPL(nft_data_dump);
-int __nft_release_basechain(struct nft_ctx *ctx)
-{
- struct nft_rule *rule, *nr;
-
- if (WARN_ON(!nft_is_base_chain(ctx->chain)))
- return 0;
-
- nf_tables_unregister_hook(ctx->net, ctx->chain->table, ctx->chain);
- list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) {
- list_del(&rule->list);
- nft_use_dec(&ctx->chain->use);
- nf_tables_rule_release(ctx, rule);
- }
- nft_chain_del(ctx->chain);
- nft_use_dec(&ctx->table->use);
- nf_tables_chain_destroy(ctx);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(__nft_release_basechain);
-
static void __nft_release_hook(struct net *net, struct nft_table *table)
{
struct nft_flowtable *flowtable;
@@ -11251,7 +11750,8 @@ static void __nft_release_hook(struct net *net, struct nft_table *table)
list_for_each_entry(chain, &table->chains, list)
__nf_tables_unregister_hook(net, table, chain, true);
list_for_each_entry(flowtable, &table->flowtables, list)
- __nft_unregister_flowtable_net_hooks(net, &flowtable->hook_list,
+ __nft_unregister_flowtable_net_hooks(net, flowtable,
+ &flowtable->hook_list,
true);
}
@@ -11312,12 +11812,11 @@ static void __nft_release_table(struct net *net, struct nft_table *table)
nft_obj_destroy(&ctx, obj);
}
list_for_each_entry_safe(chain, nc, &table->chains, list) {
- ctx.chain = chain;
nft_chain_del(chain);
nft_use_dec(&table->use);
- nf_tables_chain_destroy(&ctx);
+ nf_tables_chain_destroy(chain);
}
- nf_tables_table_destroy(&ctx);
+ nf_tables_table_destroy(table);
}
static void __nft_release_tables(struct net *net)
@@ -11355,12 +11854,15 @@ static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event,
gc_seq = nft_gc_seq_begin(nft_net);
- if (!list_empty(&nf_tables_destroy_list))
- nf_tables_trans_destroy_flush_work();
+ nf_tables_trans_destroy_flush_work(net);
again:
list_for_each_entry(table, &nft_net->tables, list) {
if (nft_table_has_owner(table) &&
n->portid == table->nlpid) {
+ if (table->flags & NFT_TABLE_F_PERSIST) {
+ table->flags &= ~NFT_TABLE_F_OWNER;
+ continue;
+ }
__nft_release_hook(net, table);
list_del_rcu(&table->list);
to_delete[deleted++] = table;
@@ -11394,6 +11896,8 @@ static int __net_init nf_tables_init_net(struct net *net)
INIT_LIST_HEAD(&nft_net->tables);
INIT_LIST_HEAD(&nft_net->commit_list);
+ INIT_LIST_HEAD(&nft_net->destroy_list);
+ INIT_LIST_HEAD(&nft_net->commit_set_list);
INIT_LIST_HEAD(&nft_net->binding_list);
INIT_LIST_HEAD(&nft_net->module_list);
INIT_LIST_HEAD(&nft_net->notify_list);
@@ -11401,6 +11905,7 @@ static int __net_init nf_tables_init_net(struct net *net)
nft_net->base_seq = 1;
nft_net->gc_seq = 0;
nft_net->validate_state = NFT_VALIDATE_SKIP;
+ INIT_WORK(&nft_net->destroy_work, nf_tables_trans_destroy_work);
return 0;
}
@@ -11423,18 +11928,23 @@ static void __net_exit nf_tables_exit_net(struct net *net)
gc_seq = nft_gc_seq_begin(nft_net);
- if (!list_empty(&nft_net->commit_list) ||
- !list_empty(&nft_net->module_list))
- __nf_tables_abort(net, NFNL_ABORT_NONE);
+ WARN_ON_ONCE(!list_empty(&nft_net->commit_list));
+ WARN_ON_ONCE(!list_empty(&nft_net->commit_set_list));
+
+ if (!list_empty(&nft_net->module_list))
+ nf_tables_module_autoload_cleanup(net);
+ cancel_work_sync(&nft_net->destroy_work);
__nft_release_tables(net);
nft_gc_seq_end(nft_net, gc_seq);
mutex_unlock(&nft_net->commit_mutex);
+
WARN_ON_ONCE(!list_empty(&nft_net->tables));
WARN_ON_ONCE(!list_empty(&nft_net->module_list));
WARN_ON_ONCE(!list_empty(&nft_net->notify_list));
+ WARN_ON_ONCE(!list_empty(&nft_net->destroy_list));
}
static void nf_tables_exit_batch(struct list_head *net_exit_list)
@@ -11455,6 +11965,14 @@ static int __init nf_tables_module_init(void)
{
int err;
+ BUILD_BUG_ON(offsetof(struct nft_trans_table, nft_trans) != 0);
+ BUILD_BUG_ON(offsetof(struct nft_trans_chain, nft_trans_binding.nft_trans) != 0);
+ BUILD_BUG_ON(offsetof(struct nft_trans_rule, nft_trans) != 0);
+ BUILD_BUG_ON(offsetof(struct nft_trans_set, nft_trans_binding.nft_trans) != 0);
+ BUILD_BUG_ON(offsetof(struct nft_trans_elem, nft_trans) != 0);
+ BUILD_BUG_ON(offsetof(struct nft_trans_obj, nft_trans) != 0);
+ BUILD_BUG_ON(offsetof(struct nft_trans_flowtable, nft_trans) != 0);
+
err = register_pernet_subsys(&nf_tables_net_ops);
if (err < 0)
return err;
@@ -11519,7 +12037,6 @@ static void __exit nf_tables_module_exit(void)
nft_chain_route_fini();
unregister_pernet_subsys(&nf_tables_net_ops);
cancel_work_sync(&trans_gc_work);
- cancel_work_sync(&trans_destroy_work);
rcu_barrier();
rhltable_destroy(&nft_objname_ht);
nf_tables_core_module_exit();
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index c3e635364701..75598520b0fa 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -21,7 +21,7 @@
#include <net/netfilter/nf_log.h>
#include <net/netfilter/nft_meta.h>
-#if defined(CONFIG_RETPOLINE) && defined(CONFIG_X86)
+#if defined(CONFIG_MITIGATION_RETPOLINE) && defined(CONFIG_X86)
static struct static_key_false nf_tables_skip_direct_calls;
@@ -207,7 +207,7 @@ static void expr_call_ops_eval(const struct nft_expr *expr,
struct nft_regs *regs,
struct nft_pktinfo *pkt)
{
-#ifdef CONFIG_RETPOLINE
+#ifdef CONFIG_MITIGATION_RETPOLINE
unsigned long e;
if (nf_skip_indirect_calls())
@@ -236,7 +236,7 @@ static void expr_call_ops_eval(const struct nft_expr *expr,
X(e, nft_objref_map_eval);
#undef X
indirect_call:
-#endif /* CONFIG_RETPOLINE */
+#endif /* CONFIG_MITIGATION_RETPOLINE */
expr->ops->eval(expr, regs, pkt);
}
@@ -256,7 +256,7 @@ nft_do_chain(struct nft_pktinfo *pkt, void *priv)
const struct net *net = nft_net(pkt);
const struct nft_expr *expr, *last;
const struct nft_rule_dp *rule;
- struct nft_regs regs = {};
+ struct nft_regs regs;
unsigned int stackptr = 0;
struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
bool genbit = READ_ONCE(net->nft.gencursor);
diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
index 12ab78fa5d84..64675f1c7f29 100644
--- a/net/netfilter/nf_tables_offload.c
+++ b/net/netfilter/nf_tables_offload.c
@@ -513,38 +513,38 @@ static void nft_flow_rule_offload_abort(struct net *net,
int err = 0;
list_for_each_entry_continue_reverse(trans, &nft_net->commit_list, list) {
- if (trans->ctx.family != NFPROTO_NETDEV)
+ if (trans->table->family != NFPROTO_NETDEV)
continue;
switch (trans->msg_type) {
case NFT_MSG_NEWCHAIN:
- if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) ||
+ if (!(nft_trans_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD) ||
nft_trans_chain_update(trans))
continue;
- err = nft_flow_offload_chain(trans->ctx.chain, NULL,
+ err = nft_flow_offload_chain(nft_trans_chain(trans), NULL,
FLOW_BLOCK_UNBIND);
break;
case NFT_MSG_DELCHAIN:
- if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
+ if (!(nft_trans_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
- err = nft_flow_offload_chain(trans->ctx.chain, NULL,
+ err = nft_flow_offload_chain(nft_trans_chain(trans), NULL,
FLOW_BLOCK_BIND);
break;
case NFT_MSG_NEWRULE:
- if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
+ if (!(nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
- err = nft_flow_offload_rule(trans->ctx.chain,
+ err = nft_flow_offload_rule(nft_trans_rule_chain(trans),
nft_trans_rule(trans),
NULL, FLOW_CLS_DESTROY);
break;
case NFT_MSG_DELRULE:
- if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
+ if (!(nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
- err = nft_flow_offload_rule(trans->ctx.chain,
+ err = nft_flow_offload_rule(nft_trans_rule_chain(trans),
nft_trans_rule(trans),
nft_trans_flow_rule(trans),
FLOW_CLS_REPLACE);
@@ -564,46 +564,46 @@ int nft_flow_rule_offload_commit(struct net *net)
u8 policy;
list_for_each_entry(trans, &nft_net->commit_list, list) {
- if (trans->ctx.family != NFPROTO_NETDEV)
+ if (trans->table->family != NFPROTO_NETDEV)
continue;
switch (trans->msg_type) {
case NFT_MSG_NEWCHAIN:
- if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) ||
+ if (!(nft_trans_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD) ||
nft_trans_chain_update(trans))
continue;
policy = nft_trans_chain_policy(trans);
- err = nft_flow_offload_chain(trans->ctx.chain, &policy,
+ err = nft_flow_offload_chain(nft_trans_chain(trans), &policy,
FLOW_BLOCK_BIND);
break;
case NFT_MSG_DELCHAIN:
- if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
+ if (!(nft_trans_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
policy = nft_trans_chain_policy(trans);
- err = nft_flow_offload_chain(trans->ctx.chain, &policy,
+ err = nft_flow_offload_chain(nft_trans_chain(trans), &policy,
FLOW_BLOCK_UNBIND);
break;
case NFT_MSG_NEWRULE:
- if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
+ if (!(nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
- if (trans->ctx.flags & NLM_F_REPLACE ||
- !(trans->ctx.flags & NLM_F_APPEND)) {
+ if (trans->flags & NLM_F_REPLACE ||
+ !(trans->flags & NLM_F_APPEND)) {
err = -EOPNOTSUPP;
break;
}
- err = nft_flow_offload_rule(trans->ctx.chain,
+ err = nft_flow_offload_rule(nft_trans_rule_chain(trans),
nft_trans_rule(trans),
nft_trans_flow_rule(trans),
FLOW_CLS_REPLACE);
break;
case NFT_MSG_DELRULE:
- if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
+ if (!(nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
- err = nft_flow_offload_rule(trans->ctx.chain,
+ err = nft_flow_offload_rule(nft_trans_rule_chain(trans),
nft_trans_rule(trans),
NULL, FLOW_CLS_DESTROY);
break;
diff --git a/net/netfilter/nf_tables_trace.c b/net/netfilter/nf_tables_trace.c
index a83637e3f455..580c55268f65 100644
--- a/net/netfilter/nf_tables_trace.c
+++ b/net/netfilter/nf_tables_trace.c
@@ -317,7 +317,7 @@ void nft_trace_init(struct nft_traceinfo *info, const struct nft_pktinfo *pkt,
net_get_random_once(&trace_key, sizeof(trace_key));
info->skbid = (u32)siphash_3u32(hash32_ptr(skb),
- skb_get_hash(skb),
+ skb_get_hash_net(nft_net(pkt), skb),
skb->skb_iif,
&trace_key);
}
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index c9fbe0f707b5..e598a2a252b0 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -402,31 +402,36 @@ replay_abort:
{
nfnl_unlock(subsys_id);
netlink_ack(oskb, nlh, -EOPNOTSUPP, NULL);
- return kfree_skb(skb);
+ return consume_skb(skb);
}
}
if (!ss->valid_genid || !ss->commit || !ss->abort) {
nfnl_unlock(subsys_id);
netlink_ack(oskb, nlh, -EOPNOTSUPP, NULL);
- return kfree_skb(skb);
+ return consume_skb(skb);
}
if (!try_module_get(ss->owner)) {
nfnl_unlock(subsys_id);
netlink_ack(oskb, nlh, -EOPNOTSUPP, NULL);
- return kfree_skb(skb);
+ return consume_skb(skb);
}
if (!ss->valid_genid(net, genid)) {
module_put(ss->owner);
nfnl_unlock(subsys_id);
netlink_ack(oskb, nlh, -ERESTART, NULL);
- return kfree_skb(skb);
+ return consume_skb(skb);
}
nfnl_unlock(subsys_id);
+ if (nlh->nlmsg_flags & NLM_F_ACK) {
+ memset(&extack, 0, sizeof(extack));
+ nfnl_err_add(&err_list, nlh, 0, &extack);
+ }
+
while (skb->len >= nlmsg_total_size(0)) {
int msglen, type;
@@ -512,7 +517,7 @@ replay_abort:
err = nla_parse_deprecated(cda,
ss->cb[cb_id].attr_count,
attr, attrlen,
- ss->cb[cb_id].policy, NULL);
+ ss->cb[cb_id].policy, &extack);
if (err < 0)
goto ack;
@@ -562,7 +567,7 @@ done:
if (status & NFNL_BATCH_REPLAY) {
ss->abort(net, oskb, NFNL_ABORT_AUTOLOAD);
nfnl_err_reset(&err_list);
- kfree_skb(skb);
+ consume_skb(skb);
module_put(ss->owner);
goto replay;
} else if (status == NFNL_BATCH_DONE) {
@@ -573,6 +578,9 @@ done:
} else if (err) {
ss->abort(net, oskb, NFNL_ABORT_NONE);
netlink_ack(oskb, nlmsg_hdr(oskb), err, NULL);
+ } else if (nlh->nlmsg_flags & NLM_F_ACK) {
+ memset(&extack, 0, sizeof(extack));
+ nfnl_err_add(&err_list, nlh, 0, &extack);
}
} else {
enum nfnl_abort_action abort_action;
@@ -585,7 +593,7 @@ done:
err = ss->abort(net, oskb, abort_action);
if (err == -EAGAIN) {
nfnl_err_reset(&err_list);
- kfree_skb(skb);
+ consume_skb(skb);
module_put(ss->owner);
status |= NFNL_BATCH_FAILURE;
goto replay_abort;
@@ -593,7 +601,7 @@ done:
}
nfnl_err_deliver(&err_list, oskb);
- kfree_skb(skb);
+ consume_skb(skb);
module_put(ss->owner);
}
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index f466af4f8531..eab4f476b47f 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -366,8 +366,7 @@ static int cttimeout_default_set(struct sk_buff *skb,
__u8 l4num;
int ret;
- if (!cda[CTA_TIMEOUT_L3PROTO] ||
- !cda[CTA_TIMEOUT_L4PROTO] ||
+ if (!cda[CTA_TIMEOUT_L4PROTO] ||
!cda[CTA_TIMEOUT_DATA])
return -EINVAL;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 5cf38fc0a366..5c913987901a 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -169,7 +169,9 @@ instance_destroy_rcu(struct rcu_head *head)
struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
rcu);
+ rcu_read_lock();
nfqnl_flush(inst, NULL, 0);
+ rcu_read_unlock();
kfree(inst);
module_put(THIS_MODULE);
}
@@ -225,6 +227,148 @@ find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
return entry;
}
+static unsigned int nf_iterate(struct sk_buff *skb,
+ struct nf_hook_state *state,
+ const struct nf_hook_entries *hooks,
+ unsigned int *index)
+{
+ const struct nf_hook_entry *hook;
+ unsigned int verdict, i = *index;
+
+ while (i < hooks->num_hook_entries) {
+ hook = &hooks->hooks[i];
+repeat:
+ verdict = nf_hook_entry_hookfn(hook, skb, state);
+ if (verdict != NF_ACCEPT) {
+ *index = i;
+ if (verdict != NF_REPEAT)
+ return verdict;
+ goto repeat;
+ }
+ i++;
+ }
+
+ *index = i;
+ return NF_ACCEPT;
+}
+
+static struct nf_hook_entries *nf_hook_entries_head(const struct net *net, u8 pf, u8 hooknum)
+{
+ switch (pf) {
+#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
+ case NFPROTO_BRIDGE:
+ return rcu_dereference(net->nf.hooks_bridge[hooknum]);
+#endif
+ case NFPROTO_IPV4:
+ return rcu_dereference(net->nf.hooks_ipv4[hooknum]);
+ case NFPROTO_IPV6:
+ return rcu_dereference(net->nf.hooks_ipv6[hooknum]);
+ default:
+ WARN_ON_ONCE(1);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+static int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry)
+{
+#ifdef CONFIG_INET
+ const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
+
+ if (entry->state.hook == NF_INET_LOCAL_OUT) {
+ const struct iphdr *iph = ip_hdr(skb);
+
+ if (!(iph->tos == rt_info->tos &&
+ skb->mark == rt_info->mark &&
+ iph->daddr == rt_info->daddr &&
+ iph->saddr == rt_info->saddr))
+ return ip_route_me_harder(entry->state.net, entry->state.sk,
+ skb, RTN_UNSPEC);
+ }
+#endif
+ return 0;
+}
+
+static int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry)
+{
+ const struct nf_ipv6_ops *v6ops;
+ int ret = 0;
+
+ switch (entry->state.pf) {
+ case AF_INET:
+ ret = nf_ip_reroute(skb, entry);
+ break;
+ case AF_INET6:
+ v6ops = rcu_dereference(nf_ipv6_ops);
+ if (v6ops)
+ ret = v6ops->reroute(skb, entry);
+ break;
+ }
+ return ret;
+}
+
+/* caller must hold rcu read-side lock */
+static void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
+{
+ const struct nf_hook_entry *hook_entry;
+ const struct nf_hook_entries *hooks;
+ struct sk_buff *skb = entry->skb;
+ const struct net *net;
+ unsigned int i;
+ int err;
+ u8 pf;
+
+ net = entry->state.net;
+ pf = entry->state.pf;
+
+ hooks = nf_hook_entries_head(net, pf, entry->state.hook);
+
+ i = entry->hook_index;
+ if (!hooks || i >= hooks->num_hook_entries) {
+ kfree_skb_reason(skb, SKB_DROP_REASON_NETFILTER_DROP);
+ nf_queue_entry_free(entry);
+ return;
+ }
+
+ hook_entry = &hooks->hooks[i];
+
+ /* Continue traversal iff userspace said ok... */
+ if (verdict == NF_REPEAT)
+ verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
+
+ if (verdict == NF_ACCEPT) {
+ if (nf_reroute(skb, entry) < 0)
+ verdict = NF_DROP;
+ }
+
+ if (verdict == NF_ACCEPT) {
+next_hook:
+ ++i;
+ verdict = nf_iterate(skb, &entry->state, hooks, &i);
+ }
+
+ switch (verdict & NF_VERDICT_MASK) {
+ case NF_ACCEPT:
+ case NF_STOP:
+ local_bh_disable();
+ entry->state.okfn(entry->state.net, entry->state.sk, skb);
+ local_bh_enable();
+ break;
+ case NF_QUEUE:
+ err = nf_queue(skb, &entry->state, i, verdict);
+ if (err == 1)
+ goto next_hook;
+ break;
+ case NF_STOLEN:
+ break;
+ default:
+ kfree_skb(skb);
+ }
+
+ nf_queue_entry_free(entry);
+}
+
static void nfqnl_reinject(struct nf_queue_entry *entry, unsigned int verdict)
{
const struct nf_ct_hook *ct_hook;
@@ -326,18 +470,18 @@ static int nfqnl_put_sk_classid(struct sk_buff *skb, struct sock *sk)
return 0;
}
-static u32 nfqnl_get_sk_secctx(struct sk_buff *skb, char **secdata)
+static int nfqnl_get_sk_secctx(struct sk_buff *skb, struct lsm_context *ctx)
{
- u32 seclen = 0;
+ int seclen = 0;
#if IS_ENABLED(CONFIG_NETWORK_SECMARK)
+
if (!skb || !sk_fullsock(skb->sk))
return 0;
read_lock_bh(&skb->sk->sk_callback_lock);
if (skb->secmark)
- security_secid_to_secctx(skb->secmark, secdata, &seclen);
-
+ seclen = security_secid_to_secctx(skb->secmark, ctx);
read_unlock_bh(&skb->sk->sk_callback_lock);
#endif
return seclen;
@@ -396,6 +540,14 @@ nla_put_failure:
return -1;
}
+static int nf_queue_checksum_help(struct sk_buff *entskb)
+{
+ if (skb_csum_is_sctp(entskb))
+ return skb_crc32c_csum_help(entskb);
+
+ return skb_checksum_help(entskb);
+}
+
static struct sk_buff *
nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
struct nf_queue_entry *entry,
@@ -415,8 +567,8 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
enum ip_conntrack_info ctinfo = 0;
const struct nfnl_ct_hook *nfnl_ct;
bool csum_verify;
- char *secdata = NULL;
- u32 seclen = 0;
+ struct lsm_context ctx;
+ int seclen = 0;
ktime_t tstamp;
size = nlmsg_total_size(sizeof(struct nfgenmsg))
@@ -458,7 +610,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
case NFQNL_COPY_PACKET:
if (!(queue->flags & NFQA_CFG_F_GSO) &&
entskb->ip_summed == CHECKSUM_PARTIAL &&
- skb_checksum_help(entskb))
+ nf_queue_checksum_help(entskb))
return NULL;
data_len = READ_ONCE(queue->copy_range);
@@ -490,7 +642,9 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
}
if ((queue->flags & NFQA_CFG_F_SECCTX) && entskb->sk) {
- seclen = nfqnl_get_sk_secctx(entskb, &secdata);
+ seclen = nfqnl_get_sk_secctx(entskb, &ctx);
+ if (seclen < 0)
+ return NULL;
if (seclen)
size += nla_total_size(seclen);
}
@@ -630,7 +784,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
if (nfqnl_put_sk_classid(skb, entskb->sk) < 0)
goto nla_put_failure;
- if (seclen && nla_put(skb, NFQA_SECCTX, seclen, secdata))
+ if (seclen > 0 && nla_put(skb, NFQA_SECCTX, ctx.len, ctx.context))
goto nla_put_failure;
if (ct && nfnl_ct->build(skb, ct, ctinfo, NFQA_CT, NFQA_CT_INFO) < 0)
@@ -658,8 +812,8 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
}
nlh->nlmsg_len = skb->len;
- if (seclen)
- security_release_secctx(secdata, seclen);
+ if (seclen >= 0)
+ security_release_secctx(&ctx);
return skb;
nla_put_failure:
@@ -667,8 +821,8 @@ nla_put_failure:
kfree_skb(skb);
net_err_ratelimited("nf_queue: error creating packet message\n");
nlmsg_failure:
- if (seclen)
- security_release_secctx(secdata, seclen);
+ if (seclen >= 0)
+ security_release_secctx(&ctx);
return NULL;
}
@@ -676,10 +830,41 @@ static bool nf_ct_drop_unconfirmed(const struct nf_queue_entry *entry)
{
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
static const unsigned long flags = IPS_CONFIRMED | IPS_DYING;
- const struct nf_conn *ct = (void *)skb_nfct(entry->skb);
+ struct nf_conn *ct = (void *)skb_nfct(entry->skb);
+ unsigned long status;
+ unsigned int use;
+
+ if (!ct)
+ return false;
- if (ct && ((ct->status & flags) == IPS_DYING))
+ status = READ_ONCE(ct->status);
+ if ((status & flags) == IPS_DYING)
return true;
+
+ if (status & IPS_CONFIRMED)
+ return false;
+
+ /* in some cases skb_clone() can occur after initial conntrack
+ * pickup, but conntrack assumes exclusive skb->_nfct ownership for
+ * unconfirmed entries.
+ *
+ * This happens for br_netfilter and with ip multicast routing.
+ * We can't be solved with serialization here because one clone could
+ * have been queued for local delivery.
+ */
+ use = refcount_read(&ct->ct_general.use);
+ if (likely(use == 1))
+ return false;
+
+ /* Can't decrement further? Exclusive ownership. */
+ if (!refcount_dec_not_one(&ct->ct_general.use))
+ return false;
+
+ skb_set_nfct(entry->skb, 0);
+ /* No nf_ct_put(): we already decremented .use and it cannot
+ * drop down to 0.
+ */
+ return true;
#endif
return false;
}
@@ -839,7 +1024,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
break;
}
- if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(skb))
+ if (!skb_is_gso(skb) || ((queue->flags & NFQA_CFG_F_GSO) && !skb_is_gso_sctp(skb)))
return __nfqnl_enqueue_packet(net, queue, entry);
nf_bridge_adjust_skb_data(skb);
diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c
index ca857afbf061..d550910aabec 100644
--- a/net/netfilter/nft_bitwise.c
+++ b/net/netfilter/nft_bitwise.c
@@ -17,6 +17,7 @@
struct nft_bitwise {
u8 sreg;
+ u8 sreg2;
u8 dreg;
enum nft_bitwise_ops op:8;
u8 len;
@@ -25,8 +26,8 @@ struct nft_bitwise {
struct nft_data data;
};
-static void nft_bitwise_eval_bool(u32 *dst, const u32 *src,
- const struct nft_bitwise *priv)
+static void nft_bitwise_eval_mask_xor(u32 *dst, const u32 *src,
+ const struct nft_bitwise *priv)
{
unsigned int i;
@@ -60,28 +61,72 @@ static void nft_bitwise_eval_rshift(u32 *dst, const u32 *src,
}
}
+static void nft_bitwise_eval_and(u32 *dst, const u32 *src, const u32 *src2,
+ const struct nft_bitwise *priv)
+{
+ unsigned int i, n;
+
+ for (i = 0, n = DIV_ROUND_UP(priv->len, sizeof(u32)); i < n; i++)
+ dst[i] = src[i] & src2[i];
+}
+
+static void nft_bitwise_eval_or(u32 *dst, const u32 *src, const u32 *src2,
+ const struct nft_bitwise *priv)
+{
+ unsigned int i, n;
+
+ for (i = 0, n = DIV_ROUND_UP(priv->len, sizeof(u32)); i < n; i++)
+ dst[i] = src[i] | src2[i];
+}
+
+static void nft_bitwise_eval_xor(u32 *dst, const u32 *src, const u32 *src2,
+ const struct nft_bitwise *priv)
+{
+ unsigned int i, n;
+
+ for (i = 0, n = DIV_ROUND_UP(priv->len, sizeof(u32)); i < n; i++)
+ dst[i] = src[i] ^ src2[i];
+}
+
void nft_bitwise_eval(const struct nft_expr *expr,
struct nft_regs *regs, const struct nft_pktinfo *pkt)
{
const struct nft_bitwise *priv = nft_expr_priv(expr);
- const u32 *src = &regs->data[priv->sreg];
+ const u32 *src = &regs->data[priv->sreg], *src2;
u32 *dst = &regs->data[priv->dreg];
- switch (priv->op) {
- case NFT_BITWISE_BOOL:
- nft_bitwise_eval_bool(dst, src, priv);
- break;
- case NFT_BITWISE_LSHIFT:
+ if (priv->op == NFT_BITWISE_MASK_XOR) {
+ nft_bitwise_eval_mask_xor(dst, src, priv);
+ return;
+ }
+ if (priv->op == NFT_BITWISE_LSHIFT) {
nft_bitwise_eval_lshift(dst, src, priv);
- break;
- case NFT_BITWISE_RSHIFT:
+ return;
+ }
+ if (priv->op == NFT_BITWISE_RSHIFT) {
nft_bitwise_eval_rshift(dst, src, priv);
- break;
+ return;
+ }
+
+ src2 = priv->sreg2 ? &regs->data[priv->sreg2] : priv->data.data;
+
+ if (priv->op == NFT_BITWISE_AND) {
+ nft_bitwise_eval_and(dst, src, src2, priv);
+ return;
+ }
+ if (priv->op == NFT_BITWISE_OR) {
+ nft_bitwise_eval_or(dst, src, src2, priv);
+ return;
+ }
+ if (priv->op == NFT_BITWISE_XOR) {
+ nft_bitwise_eval_xor(dst, src, src2, priv);
+ return;
}
}
static const struct nla_policy nft_bitwise_policy[NFTA_BITWISE_MAX + 1] = {
[NFTA_BITWISE_SREG] = { .type = NLA_U32 },
+ [NFTA_BITWISE_SREG2] = { .type = NLA_U32 },
[NFTA_BITWISE_DREG] = { .type = NLA_U32 },
[NFTA_BITWISE_LEN] = { .type = NLA_U32 },
[NFTA_BITWISE_MASK] = { .type = NLA_NESTED },
@@ -90,8 +135,8 @@ static const struct nla_policy nft_bitwise_policy[NFTA_BITWISE_MAX + 1] = {
[NFTA_BITWISE_DATA] = { .type = NLA_NESTED },
};
-static int nft_bitwise_init_bool(struct nft_bitwise *priv,
- const struct nlattr *const tb[])
+static int nft_bitwise_init_mask_xor(struct nft_bitwise *priv,
+ const struct nlattr *const tb[])
{
struct nft_data_desc mask = {
.type = NFT_DATA_VALUE,
@@ -105,7 +150,8 @@ static int nft_bitwise_init_bool(struct nft_bitwise *priv,
};
int err;
- if (tb[NFTA_BITWISE_DATA])
+ if (tb[NFTA_BITWISE_DATA] ||
+ tb[NFTA_BITWISE_SREG2])
return -EINVAL;
if (!tb[NFTA_BITWISE_MASK] ||
@@ -139,7 +185,8 @@ static int nft_bitwise_init_shift(struct nft_bitwise *priv,
int err;
if (tb[NFTA_BITWISE_MASK] ||
- tb[NFTA_BITWISE_XOR])
+ tb[NFTA_BITWISE_XOR] ||
+ tb[NFTA_BITWISE_SREG2])
return -EINVAL;
if (!tb[NFTA_BITWISE_DATA])
@@ -157,6 +204,41 @@ static int nft_bitwise_init_shift(struct nft_bitwise *priv,
return 0;
}
+static int nft_bitwise_init_bool(const struct nft_ctx *ctx,
+ struct nft_bitwise *priv,
+ const struct nlattr *const tb[])
+{
+ int err;
+
+ if (tb[NFTA_BITWISE_MASK] ||
+ tb[NFTA_BITWISE_XOR])
+ return -EINVAL;
+
+ if ((!tb[NFTA_BITWISE_DATA] && !tb[NFTA_BITWISE_SREG2]) ||
+ (tb[NFTA_BITWISE_DATA] && tb[NFTA_BITWISE_SREG2]))
+ return -EINVAL;
+
+ if (tb[NFTA_BITWISE_DATA]) {
+ struct nft_data_desc desc = {
+ .type = NFT_DATA_VALUE,
+ .size = sizeof(priv->data),
+ .len = priv->len,
+ };
+
+ err = nft_data_init(NULL, &priv->data, &desc,
+ tb[NFTA_BITWISE_DATA]);
+ if (err < 0)
+ return err;
+ } else {
+ err = nft_parse_register_load(ctx, tb[NFTA_BITWISE_SREG2],
+ &priv->sreg2, priv->len);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
static int nft_bitwise_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
@@ -171,7 +253,7 @@ static int nft_bitwise_init(const struct nft_ctx *ctx,
priv->len = len;
- err = nft_parse_register_load(tb[NFTA_BITWISE_SREG], &priv->sreg,
+ err = nft_parse_register_load(ctx, tb[NFTA_BITWISE_SREG], &priv->sreg,
priv->len);
if (err < 0)
return err;
@@ -185,32 +267,40 @@ static int nft_bitwise_init(const struct nft_ctx *ctx,
if (tb[NFTA_BITWISE_OP]) {
priv->op = ntohl(nla_get_be32(tb[NFTA_BITWISE_OP]));
switch (priv->op) {
- case NFT_BITWISE_BOOL:
+ case NFT_BITWISE_MASK_XOR:
case NFT_BITWISE_LSHIFT:
case NFT_BITWISE_RSHIFT:
+ case NFT_BITWISE_AND:
+ case NFT_BITWISE_OR:
+ case NFT_BITWISE_XOR:
break;
default:
return -EOPNOTSUPP;
}
} else {
- priv->op = NFT_BITWISE_BOOL;
+ priv->op = NFT_BITWISE_MASK_XOR;
}
switch(priv->op) {
- case NFT_BITWISE_BOOL:
- err = nft_bitwise_init_bool(priv, tb);
+ case NFT_BITWISE_MASK_XOR:
+ err = nft_bitwise_init_mask_xor(priv, tb);
break;
case NFT_BITWISE_LSHIFT:
case NFT_BITWISE_RSHIFT:
err = nft_bitwise_init_shift(priv, tb);
break;
+ case NFT_BITWISE_AND:
+ case NFT_BITWISE_OR:
+ case NFT_BITWISE_XOR:
+ err = nft_bitwise_init_bool(ctx, priv, tb);
+ break;
}
return err;
}
-static int nft_bitwise_dump_bool(struct sk_buff *skb,
- const struct nft_bitwise *priv)
+static int nft_bitwise_dump_mask_xor(struct sk_buff *skb,
+ const struct nft_bitwise *priv)
{
if (nft_data_dump(skb, NFTA_BITWISE_MASK, &priv->mask,
NFT_DATA_VALUE, priv->len) < 0)
@@ -232,6 +322,21 @@ static int nft_bitwise_dump_shift(struct sk_buff *skb,
return 0;
}
+static int nft_bitwise_dump_bool(struct sk_buff *skb,
+ const struct nft_bitwise *priv)
+{
+ if (priv->sreg2) {
+ if (nft_dump_register(skb, NFTA_BITWISE_SREG2, priv->sreg2))
+ return -1;
+ } else {
+ if (nft_data_dump(skb, NFTA_BITWISE_DATA, &priv->data,
+ NFT_DATA_VALUE, sizeof(u32)) < 0)
+ return -1;
+ }
+
+ return 0;
+}
+
static int nft_bitwise_dump(struct sk_buff *skb,
const struct nft_expr *expr, bool reset)
{
@@ -248,13 +353,18 @@ static int nft_bitwise_dump(struct sk_buff *skb,
return -1;
switch (priv->op) {
- case NFT_BITWISE_BOOL:
- err = nft_bitwise_dump_bool(skb, priv);
+ case NFT_BITWISE_MASK_XOR:
+ err = nft_bitwise_dump_mask_xor(skb, priv);
break;
case NFT_BITWISE_LSHIFT:
case NFT_BITWISE_RSHIFT:
err = nft_bitwise_dump_shift(skb, priv);
break;
+ case NFT_BITWISE_AND:
+ case NFT_BITWISE_OR:
+ case NFT_BITWISE_XOR:
+ err = nft_bitwise_dump_bool(skb, priv);
+ break;
}
return err;
@@ -269,7 +379,7 @@ static int nft_bitwise_offload(struct nft_offload_ctx *ctx,
const struct nft_bitwise *priv = nft_expr_priv(expr);
struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
- if (priv->op != NFT_BITWISE_BOOL)
+ if (priv->op != NFT_BITWISE_MASK_XOR)
return -EOPNOTSUPP;
if (memcmp(&priv->xor, &zero, sizeof(priv->xor)) ||
@@ -299,6 +409,7 @@ static bool nft_bitwise_reduce(struct nft_regs_track *track,
track->regs[priv->dreg].bitwise &&
track->regs[priv->dreg].bitwise->ops == expr->ops &&
priv->sreg == bitwise->sreg &&
+ priv->sreg2 == bitwise->sreg2 &&
priv->dreg == bitwise->dreg &&
priv->op == bitwise->op &&
priv->len == bitwise->len &&
@@ -365,7 +476,7 @@ static int nft_bitwise_fast_init(const struct nft_ctx *ctx,
struct nft_bitwise_fast_expr *priv = nft_expr_priv(expr);
int err;
- err = nft_parse_register_load(tb[NFTA_BITWISE_SREG], &priv->sreg,
+ err = nft_parse_register_load(ctx, tb[NFTA_BITWISE_SREG], &priv->sreg,
sizeof(u32));
if (err < 0)
return err;
@@ -375,7 +486,8 @@ static int nft_bitwise_fast_init(const struct nft_ctx *ctx,
if (err < 0)
return err;
- if (tb[NFTA_BITWISE_DATA])
+ if (tb[NFTA_BITWISE_DATA] ||
+ tb[NFTA_BITWISE_SREG2])
return -EINVAL;
if (!tb[NFTA_BITWISE_MASK] ||
@@ -406,7 +518,7 @@ nft_bitwise_fast_dump(struct sk_buff *skb,
return -1;
if (nla_put_be32(skb, NFTA_BITWISE_LEN, htonl(sizeof(u32))))
return -1;
- if (nla_put_be32(skb, NFTA_BITWISE_OP, htonl(NFT_BITWISE_BOOL)))
+ if (nla_put_be32(skb, NFTA_BITWISE_OP, htonl(NFT_BITWISE_MASK_XOR)))
return -1;
data.data[0] = priv->mask;
@@ -501,7 +613,7 @@ nft_bitwise_select_ops(const struct nft_ctx *ctx,
return &nft_bitwise_ops;
if (tb[NFTA_BITWISE_OP] &&
- ntohl(nla_get_be32(tb[NFTA_BITWISE_OP])) != NFT_BITWISE_BOOL)
+ ntohl(nla_get_be32(tb[NFTA_BITWISE_OP])) != NFT_BITWISE_MASK_XOR)
return &nft_bitwise_ops;
return &nft_bitwise_fast_ops;
diff --git a/net/netfilter/nft_byteorder.c b/net/netfilter/nft_byteorder.c
index f6e791a68101..af9206a3afd1 100644
--- a/net/netfilter/nft_byteorder.c
+++ b/net/netfilter/nft_byteorder.c
@@ -5,7 +5,7 @@
* Development of this code funded by Astaro AG (http://www.astaro.com/)
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -139,7 +139,7 @@ static int nft_byteorder_init(const struct nft_ctx *ctx,
priv->len = len;
- err = nft_parse_register_load(tb[NFTA_BYTEORDER_SREG], &priv->sreg,
+ err = nft_parse_register_load(ctx, tb[NFTA_BYTEORDER_SREG], &priv->sreg,
priv->len);
if (err < 0)
return err;
diff --git a/net/netfilter/nft_chain_filter.c b/net/netfilter/nft_chain_filter.c
index 274b6f7e6bb5..19a553550c76 100644
--- a/net/netfilter/nft_chain_filter.c
+++ b/net/netfilter/nft_chain_filter.c
@@ -319,38 +319,21 @@ static const struct nft_chain_type nft_chain_filter_netdev = {
};
static void nft_netdev_event(unsigned long event, struct net_device *dev,
- struct nft_ctx *ctx)
+ struct nft_base_chain *basechain)
{
- struct nft_base_chain *basechain = nft_base_chain(ctx->chain);
- struct nft_hook *hook, *found = NULL;
- int n = 0;
-
- if (event != NETDEV_UNREGISTER)
- return;
+ struct nft_hook *hook;
list_for_each_entry(hook, &basechain->hook_list, list) {
- if (hook->ops.dev == dev)
- found = hook;
+ if (hook->ops.dev != dev)
+ continue;
- n++;
- }
- if (!found)
- return;
-
- if (n > 1) {
- nf_unregister_net_hook(ctx->net, &found->ops);
- list_del_rcu(&found->list);
- kfree_rcu(found, rcu);
- return;
- }
+ if (!(basechain->chain.table->flags & NFT_TABLE_F_DORMANT))
+ nf_unregister_net_hook(dev_net(dev), &hook->ops);
- /* UNREGISTER events are also happening on netns exit.
- *
- * Although nf_tables core releases all tables/chains, only this event
- * handler provides guarantee that hook->ops.dev is still accessible,
- * so we cannot skip exiting net namespaces.
- */
- __nft_release_basechain(ctx);
+ list_del_rcu(&hook->list);
+ kfree_rcu(hook, rcu);
+ break;
+ }
}
static int nf_tables_netdev_event(struct notifier_block *this,
@@ -359,26 +342,20 @@ static int nf_tables_netdev_event(struct notifier_block *this,
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct nft_base_chain *basechain;
struct nftables_pernet *nft_net;
- struct nft_chain *chain, *nr;
+ struct nft_chain *chain;
struct nft_table *table;
- struct nft_ctx ctx = {
- .net = dev_net(dev),
- };
- if (event != NETDEV_UNREGISTER &&
- event != NETDEV_CHANGENAME)
+ if (event != NETDEV_UNREGISTER)
return NOTIFY_DONE;
- nft_net = nft_pernet(ctx.net);
+ nft_net = nft_pernet(dev_net(dev));
mutex_lock(&nft_net->commit_mutex);
list_for_each_entry(table, &nft_net->tables, list) {
if (table->family != NFPROTO_NETDEV &&
table->family != NFPROTO_INET)
continue;
- ctx.family = table->family;
- ctx.table = table;
- list_for_each_entry_safe(chain, nr, &table->chains, list) {
+ list_for_each_entry(chain, &table->chains, list) {
if (!nft_is_base_chain(chain))
continue;
@@ -387,8 +364,7 @@ static int nf_tables_netdev_event(struct notifier_block *this,
basechain->ops.hooknum != NF_INET_INGRESS)
continue;
- ctx.chain = chain;
- nft_netdev_event(event, dev, &ctx);
+ nft_netdev_event(event, dev, basechain);
}
}
mutex_unlock(&nft_net->commit_mutex);
diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
index cd4652259095..2605f43737bc 100644
--- a/net/netfilter/nft_cmp.c
+++ b/net/netfilter/nft_cmp.c
@@ -83,7 +83,7 @@ static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
if (err < 0)
return err;
- err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
+ err = nft_parse_register_load(ctx, tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
if (err < 0)
return err;
@@ -222,7 +222,7 @@ static int nft_cmp_fast_init(const struct nft_ctx *ctx,
if (err < 0)
return err;
- err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
+ err = nft_parse_register_load(ctx, tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
if (err < 0)
return err;
@@ -323,7 +323,7 @@ static int nft_cmp16_fast_init(const struct nft_ctx *ctx,
if (err < 0)
return err;
- err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
+ err = nft_parse_register_load(ctx, tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
if (err < 0)
return err;
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index d3d11dede545..72711d62fddf 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -228,7 +228,7 @@ static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv)
return 0;
}
-static void nft_compat_wait_for_destructors(void)
+static void nft_compat_wait_for_destructors(struct net *net)
{
/* xtables matches or targets can have side effects, e.g.
* creation/destruction of /proc files.
@@ -236,7 +236,7 @@ static void nft_compat_wait_for_destructors(void)
* work queue. If we have pending invocations we thus
* need to wait for those to finish.
*/
- nf_tables_trans_destroy_flush_work();
+ nf_tables_trans_destroy_flush_work(net);
}
static int
@@ -262,7 +262,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv);
- nft_compat_wait_for_destructors();
+ nft_compat_wait_for_destructors(ctx->net);
ret = xt_check_target(&par, size, proto, inv);
if (ret < 0) {
@@ -350,8 +350,7 @@ nla_put_failure:
}
static int nft_target_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data)
+ const struct nft_expr *expr)
{
struct xt_target *target = expr->ops->data;
unsigned int hook_mask = 0;
@@ -516,7 +515,7 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv);
- nft_compat_wait_for_destructors();
+ nft_compat_wait_for_destructors(ctx->net);
return xt_check_match(&par, size, proto, inv);
}
@@ -536,7 +535,7 @@ nft_match_large_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
struct xt_match *m = expr->ops->data;
int ret;
- priv->info = kmalloc(XT_ALIGN(m->matchsize), GFP_KERNEL);
+ priv->info = kmalloc(XT_ALIGN(m->matchsize), GFP_KERNEL_ACCOUNT);
if (!priv->info)
return -ENOMEM;
@@ -611,8 +610,7 @@ static int nft_match_large_dump(struct sk_buff *skb,
}
static int nft_match_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data)
+ const struct nft_expr *expr)
{
struct xt_match *match = expr->ops->data;
unsigned int hook_mask = 0;
@@ -810,7 +808,7 @@ nft_match_select_ops(const struct nft_ctx *ctx,
goto err;
}
- ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL);
+ ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL_ACCOUNT);
if (!ops) {
err = -ENOMEM;
goto err;
@@ -900,7 +898,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
goto err;
}
- ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL);
+ ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL_ACCOUNT);
if (!ops) {
err = -ENOMEM;
goto err;
diff --git a/net/netfilter/nft_connlimit.c b/net/netfilter/nft_connlimit.c
index de9d1980df69..92b984fa8175 100644
--- a/net/netfilter/nft_connlimit.c
+++ b/net/netfilter/nft_connlimit.c
@@ -210,12 +210,12 @@ static void nft_connlimit_destroy(const struct nft_ctx *ctx,
nft_connlimit_do_destroy(ctx, priv);
}
-static int nft_connlimit_clone(struct nft_expr *dst, const struct nft_expr *src)
+static int nft_connlimit_clone(struct nft_expr *dst, const struct nft_expr *src, gfp_t gfp)
{
struct nft_connlimit *priv_dst = nft_expr_priv(dst);
struct nft_connlimit *priv_src = nft_expr_priv(src);
- priv_dst->list = kmalloc(sizeof(*priv_dst->list), GFP_ATOMIC);
+ priv_dst->list = kmalloc(sizeof(*priv_dst->list), gfp);
if (!priv_dst->list)
return -ENOMEM;
diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c
index dccc68a5135a..cc7325329496 100644
--- a/net/netfilter/nft_counter.c
+++ b/net/netfilter/nft_counter.c
@@ -8,7 +8,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/seqlock.h>
+#include <linux/u64_stats_sync.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
@@ -17,6 +17,11 @@
#include <net/netfilter/nf_tables_offload.h>
struct nft_counter {
+ u64_stats_t bytes;
+ u64_stats_t packets;
+};
+
+struct nft_counter_tot {
s64 bytes;
s64 packets;
};
@@ -25,25 +30,24 @@ struct nft_counter_percpu_priv {
struct nft_counter __percpu *counter;
};
-static DEFINE_PER_CPU(seqcount_t, nft_counter_seq);
+static DEFINE_PER_CPU(struct u64_stats_sync, nft_counter_sync);
static inline void nft_counter_do_eval(struct nft_counter_percpu_priv *priv,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
+ struct u64_stats_sync *nft_sync;
struct nft_counter *this_cpu;
- seqcount_t *myseq;
local_bh_disable();
this_cpu = this_cpu_ptr(priv->counter);
- myseq = this_cpu_ptr(&nft_counter_seq);
-
- write_seqcount_begin(myseq);
+ nft_sync = this_cpu_ptr(&nft_counter_sync);
- this_cpu->bytes += pkt->skb->len;
- this_cpu->packets++;
+ u64_stats_update_begin(nft_sync);
+ u64_stats_add(&this_cpu->bytes, pkt->skb->len);
+ u64_stats_inc(&this_cpu->packets);
+ u64_stats_update_end(nft_sync);
- write_seqcount_end(myseq);
local_bh_enable();
}
@@ -66,17 +70,16 @@ static int nft_counter_do_init(const struct nlattr * const tb[],
if (cpu_stats == NULL)
return -ENOMEM;
- preempt_disable();
- this_cpu = this_cpu_ptr(cpu_stats);
+ this_cpu = raw_cpu_ptr(cpu_stats);
if (tb[NFTA_COUNTER_PACKETS]) {
- this_cpu->packets =
- be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
+ u64_stats_set(&this_cpu->packets,
+ be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS])));
}
if (tb[NFTA_COUNTER_BYTES]) {
- this_cpu->bytes =
- be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
+ u64_stats_set(&this_cpu->bytes,
+ be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES])));
}
- preempt_enable();
+
priv->counter = cpu_stats;
return 0;
}
@@ -104,35 +107,41 @@ static void nft_counter_obj_destroy(const struct nft_ctx *ctx,
}
static void nft_counter_reset(struct nft_counter_percpu_priv *priv,
- struct nft_counter *total)
+ struct nft_counter_tot *total)
{
+ struct u64_stats_sync *nft_sync;
struct nft_counter *this_cpu;
local_bh_disable();
this_cpu = this_cpu_ptr(priv->counter);
- this_cpu->packets -= total->packets;
- this_cpu->bytes -= total->bytes;
+ nft_sync = this_cpu_ptr(&nft_counter_sync);
+
+ u64_stats_update_begin(nft_sync);
+ u64_stats_add(&this_cpu->packets, -total->packets);
+ u64_stats_add(&this_cpu->bytes, -total->bytes);
+ u64_stats_update_end(nft_sync);
+
local_bh_enable();
}
static void nft_counter_fetch(struct nft_counter_percpu_priv *priv,
- struct nft_counter *total)
+ struct nft_counter_tot *total)
{
struct nft_counter *this_cpu;
- const seqcount_t *myseq;
u64 bytes, packets;
unsigned int seq;
int cpu;
memset(total, 0, sizeof(*total));
for_each_possible_cpu(cpu) {
- myseq = per_cpu_ptr(&nft_counter_seq, cpu);
+ struct u64_stats_sync *nft_sync = per_cpu_ptr(&nft_counter_sync, cpu);
+
this_cpu = per_cpu_ptr(priv->counter, cpu);
do {
- seq = read_seqcount_begin(myseq);
- bytes = this_cpu->bytes;
- packets = this_cpu->packets;
- } while (read_seqcount_retry(myseq, seq));
+ seq = u64_stats_fetch_begin(nft_sync);
+ bytes = u64_stats_read(&this_cpu->bytes);
+ packets = u64_stats_read(&this_cpu->packets);
+ } while (u64_stats_fetch_retry(nft_sync, seq));
total->bytes += bytes;
total->packets += packets;
@@ -143,7 +152,7 @@ static int nft_counter_do_dump(struct sk_buff *skb,
struct nft_counter_percpu_priv *priv,
bool reset)
{
- struct nft_counter total;
+ struct nft_counter_tot total;
nft_counter_fetch(priv, &total);
@@ -226,25 +235,23 @@ static void nft_counter_destroy(const struct nft_ctx *ctx,
nft_counter_do_destroy(priv);
}
-static int nft_counter_clone(struct nft_expr *dst, const struct nft_expr *src)
+static int nft_counter_clone(struct nft_expr *dst, const struct nft_expr *src, gfp_t gfp)
{
struct nft_counter_percpu_priv *priv = nft_expr_priv(src);
struct nft_counter_percpu_priv *priv_clone = nft_expr_priv(dst);
struct nft_counter __percpu *cpu_stats;
struct nft_counter *this_cpu;
- struct nft_counter total;
+ struct nft_counter_tot total;
nft_counter_fetch(priv, &total);
- cpu_stats = alloc_percpu_gfp(struct nft_counter, GFP_ATOMIC);
+ cpu_stats = alloc_percpu_gfp(struct nft_counter, gfp);
if (cpu_stats == NULL)
return -ENOMEM;
- preempt_disable();
- this_cpu = this_cpu_ptr(cpu_stats);
- this_cpu->packets = total.packets;
- this_cpu->bytes = total.bytes;
- preempt_enable();
+ this_cpu = raw_cpu_ptr(cpu_stats);
+ u64_stats_set(&this_cpu->packets, total.packets);
+ u64_stats_set(&this_cpu->bytes, total.bytes);
priv_clone->counter = cpu_stats;
return 0;
@@ -262,18 +269,18 @@ static void nft_counter_offload_stats(struct nft_expr *expr,
const struct flow_stats *stats)
{
struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
+ struct u64_stats_sync *nft_sync;
struct nft_counter *this_cpu;
- seqcount_t *myseq;
- preempt_disable();
+ local_bh_disable();
this_cpu = this_cpu_ptr(priv->counter);
- myseq = this_cpu_ptr(&nft_counter_seq);
+ nft_sync = this_cpu_ptr(&nft_counter_sync);
- write_seqcount_begin(myseq);
- this_cpu->packets += stats->pkts;
- this_cpu->bytes += stats->bytes;
- write_seqcount_end(myseq);
- preempt_enable();
+ u64_stats_update_begin(nft_sync);
+ u64_stats_add(&this_cpu->packets, stats->pkts);
+ u64_stats_add(&this_cpu->bytes, stats->bytes);
+ u64_stats_update_end(nft_sync);
+ local_bh_enable();
}
void nft_counter_init_seqcount(void)
@@ -281,7 +288,7 @@ void nft_counter_init_seqcount(void)
int cpu;
for_each_possible_cpu(cpu)
- seqcount_init(per_cpu_ptr(&nft_counter_seq, cpu));
+ u64_stats_init(per_cpu_ptr(&nft_counter_sync, cpu));
}
struct nft_expr_type nft_counter_type;
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index 255640013ab8..d526e69a2a2b 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -230,6 +230,7 @@ static void nft_ct_set_zone_eval(const struct nft_expr *expr,
enum ip_conntrack_info ctinfo;
u16 value = nft_reg_load16(&regs->data[priv->sreg]);
struct nf_conn *ct;
+ int oldcnt;
ct = nf_ct_get(skb, &ctinfo);
if (ct) /* already tracked */
@@ -250,10 +251,11 @@ static void nft_ct_set_zone_eval(const struct nft_expr *expr,
ct = this_cpu_read(nft_ct_pcpu_template);
- if (likely(refcount_read(&ct->ct_general.use) == 1)) {
- refcount_inc(&ct->ct_general.use);
+ __refcount_inc(&ct->ct_general.use, &oldcnt);
+ if (likely(oldcnt == 1)) {
nf_ct_zone_add(ct, &zone);
} else {
+ refcount_dec(&ct->ct_general.use);
/* previous skb got queued to userspace, allocate temporary
* one until percpu template can be reused.
*/
@@ -606,7 +608,7 @@ static int nft_ct_set_init(const struct nft_ctx *ctx,
}
priv->len = len;
- err = nft_parse_register_load(tb[NFTA_CT_SREG], &priv->sreg, len);
+ err = nft_parse_register_load(ctx, tb[NFTA_CT_SREG], &priv->sreg, len);
if (err < 0)
goto err1;
@@ -754,7 +756,7 @@ static bool nft_ct_set_reduce(struct nft_regs_track *track,
return false;
}
-#ifdef CONFIG_RETPOLINE
+#ifdef CONFIG_MITIGATION_RETPOLINE
static const struct nft_expr_ops nft_ct_get_fast_ops = {
.type = &nft_ct_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_ct)),
@@ -799,7 +801,7 @@ nft_ct_select_ops(const struct nft_ctx *ctx,
return ERR_PTR(-EINVAL);
if (tb[NFTA_CT_DREG]) {
-#ifdef CONFIG_RETPOLINE
+#ifdef CONFIG_MITIGATION_RETPOLINE
u32 k = ntohl(nla_get_be32(tb[NFTA_CT_KEY]));
switch (k) {
@@ -929,7 +931,7 @@ static void nft_ct_timeout_obj_eval(struct nft_object *obj,
*/
values = nf_ct_timeout_data(timeout);
if (values)
- nf_ct_refresh(ct, pkt->skb, values[0]);
+ nf_ct_refresh(ct, values[0]);
}
static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx,
diff --git a/net/netfilter/nft_dup_netdev.c b/net/netfilter/nft_dup_netdev.c
index e5739a59ebf1..0573f96ce079 100644
--- a/net/netfilter/nft_dup_netdev.c
+++ b/net/netfilter/nft_dup_netdev.c
@@ -40,7 +40,7 @@ static int nft_dup_netdev_init(const struct nft_ctx *ctx,
if (tb[NFTA_DUP_SREG_DEV] == NULL)
return -EINVAL;
- return nft_parse_register_load(tb[NFTA_DUP_SREG_DEV], &priv->sreg_dev,
+ return nft_parse_register_load(ctx, tb[NFTA_DUP_SREG_DEV], &priv->sreg_dev,
sizeof(int));
}
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index c09dba57354c..88922e0e8e83 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -35,7 +35,7 @@ static int nft_dynset_expr_setup(const struct nft_dynset *priv,
for (i = 0; i < priv->num_exprs; i++) {
expr = nft_setelem_expr_at(elem_expr, elem_expr->size);
- if (nft_expr_clone(expr, priv->expr_array[i]) < 0)
+ if (nft_expr_clone(expr, priv->expr_array[i], GFP_ATOMIC) < 0)
return -1;
elem_expr->size += priv->expr_array[i]->ops->size;
@@ -56,7 +56,7 @@ static struct nft_elem_priv *nft_dynset_new(struct nft_set *set,
if (!atomic_add_unless(&set->nelems, 1, set->size))
return NULL;
- timeout = priv->timeout ? : set->timeout;
+ timeout = priv->timeout ? : READ_ONCE(set->timeout);
elem_priv = nft_set_elem_init(set, &priv->tmpl,
&regs->data[priv->sreg_key], NULL,
&regs->data[priv->sreg_data],
@@ -94,9 +94,10 @@ void nft_dynset_eval(const struct nft_expr *expr,
if (set->ops->update(set, &regs->data[priv->sreg_key], nft_dynset_new,
expr, regs, &ext)) {
if (priv->op == NFT_DYNSET_OP_UPDATE &&
- nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) {
- timeout = priv->timeout ? : set->timeout;
- *nft_set_ext_expiration(ext) = get_jiffies_64() + timeout;
+ nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT) &&
+ READ_ONCE(nft_set_ext_timeout(ext)->timeout) != 0) {
+ timeout = priv->timeout ? : READ_ONCE(set->timeout);
+ WRITE_ONCE(nft_set_ext_timeout(ext)->expiration, get_jiffies_64() + timeout);
}
nft_set_elem_update_expr(ext, regs, pkt);
@@ -215,7 +216,7 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
return err;
}
- err = nft_parse_register_load(tb[NFTA_DYNSET_SREG_KEY], &priv->sreg_key,
+ err = nft_parse_register_load(ctx, tb[NFTA_DYNSET_SREG_KEY], &priv->sreg_key,
set->klen);
if (err < 0)
return err;
@@ -226,7 +227,7 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
if (set->dtype == NFT_DATA_VERDICT)
return -EOPNOTSUPP;
- err = nft_parse_register_load(tb[NFTA_DYNSET_SREG_DATA],
+ err = nft_parse_register_load(ctx, tb[NFTA_DYNSET_SREG_DATA],
&priv->sreg_data, set->dlen);
if (err < 0)
return err;
@@ -312,12 +313,9 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
if (priv->num_exprs)
nft_dynset_ext_add_expr(priv);
- if (set->flags & NFT_SET_TIMEOUT) {
- if (timeout || set->timeout) {
- nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_TIMEOUT);
- nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_EXPIRATION);
- }
- }
+ if (set->flags & NFT_SET_TIMEOUT &&
+ (timeout || READ_ONCE(set->timeout)))
+ nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_TIMEOUT);
priv->timeout = timeout;
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index 6eb571d0c3fd..c74012c99125 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -5,7 +5,7 @@
* Development of this code funded by Astaro AG (http://www.astaro.com/)
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/kernel.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
@@ -85,7 +85,6 @@ static int ipv4_find_option(struct net *net, struct sk_buff *skb,
unsigned char optbuf[sizeof(struct ip_options) + 40];
struct ip_options *opt = (struct ip_options *)optbuf;
struct iphdr *iph, _iph;
- unsigned int start;
bool found = false;
__be32 info;
int optlen;
@@ -93,7 +92,6 @@ static int ipv4_find_option(struct net *net, struct sk_buff *skb,
iph = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
if (!iph)
return -EBADMSG;
- start = sizeof(struct iphdr);
optlen = iph->ihl * 4 - (int)sizeof(struct iphdr);
if (optlen <= 0)
@@ -103,7 +101,7 @@ static int ipv4_find_option(struct net *net, struct sk_buff *skb,
/* Copy the options since __ip_options_compile() modifies
* the options.
*/
- if (skb_copy_bits(skb, start, opt->__data, optlen))
+ if (skb_copy_bits(skb, sizeof(struct iphdr), opt->__data, optlen))
return -EBADMSG;
opt->optlen = optlen;
@@ -118,18 +116,18 @@ static int ipv4_find_option(struct net *net, struct sk_buff *skb,
found = target == IPOPT_SSRR ? opt->is_strictroute :
!opt->is_strictroute;
if (found)
- *offset = opt->srr + start;
+ *offset = opt->srr;
break;
case IPOPT_RR:
if (!opt->rr)
break;
- *offset = opt->rr + start;
+ *offset = opt->rr;
found = true;
break;
case IPOPT_RA:
if (!opt->router_alert)
break;
- *offset = opt->router_alert + start;
+ *offset = opt->router_alert;
found = true;
break;
default:
@@ -588,7 +586,7 @@ static int nft_exthdr_tcp_set_init(const struct nft_ctx *ctx,
priv->flags = flags;
priv->op = op;
- return nft_parse_register_load(tb[NFTA_EXTHDR_SREG], &priv->sreg,
+ return nft_parse_register_load(ctx, tb[NFTA_EXTHDR_SREG], &priv->sreg,
priv->len);
}
diff --git a/net/netfilter/nft_fib.c b/net/netfilter/nft_fib.c
index 37cfe6dd712d..96e02a83c045 100644
--- a/net/netfilter/nft_fib.c
+++ b/net/netfilter/nft_fib.c
@@ -26,8 +26,7 @@ const struct nla_policy nft_fib_policy[NFTA_FIB_MAX + 1] = {
};
EXPORT_SYMBOL(nft_fib_policy);
-int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
- const struct nft_data **data)
+int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr)
{
const struct nft_fib *priv = nft_expr_priv(expr);
unsigned int hooks;
@@ -35,11 +34,9 @@ int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
switch (priv->result) {
case NFT_FIB_RESULT_OIF:
case NFT_FIB_RESULT_OIFNAME:
- hooks = (1 << NF_INET_PRE_ROUTING);
- if (priv->flags & NFTA_FIB_F_IIF) {
- hooks |= (1 << NF_INET_LOCAL_IN) |
- (1 << NF_INET_FORWARD);
- }
+ hooks = (1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_LOCAL_IN) |
+ (1 << NF_INET_FORWARD);
break;
case NFT_FIB_RESULT_ADDRTYPE:
if (priv->flags & NFTA_FIB_F_IIF)
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index ab9576098701..221d50223018 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -8,7 +8,8 @@
#include <linux/spinlock.h>
#include <linux/netfilter/nf_conntrack_common.h>
#include <linux/netfilter/nf_tables.h>
-#include <net/ip.h> /* for ipv4 options. */
+#include <net/ip.h>
+#include <net/inet_dscp.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_conntrack_core.h>
@@ -235,7 +236,7 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
fl.u.ip4.saddr = ct->tuplehash[!dir].tuple.src.u3.ip;
fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex;
fl.u.ip4.flowi4_iif = this_dst->dev->ifindex;
- fl.u.ip4.flowi4_tos = RT_TOS(ip_hdr(pkt->skb)->tos);
+ fl.u.ip4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip_hdr(pkt->skb)));
fl.u.ip4.flowi4_mark = pkt->skb->mark;
fl.u.ip4.flowi4_flags = FLOWI_FLAG_ANYSRC;
break;
@@ -288,6 +289,15 @@ static bool nft_flow_offload_skip(struct sk_buff *skb, int family)
return false;
}
+static void flow_offload_ct_tcp(struct nf_conn *ct)
+{
+ /* conntrack will not see all packets, disable tcp window validation. */
+ spin_lock_bh(&ct->lock);
+ ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
+ ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
+ spin_unlock_bh(&ct->lock);
+}
+
static void nft_flow_offload_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
@@ -355,11 +365,8 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
goto err_flow_alloc;
flow_offload_route_init(flow, &route);
-
- if (tcph) {
- ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
- ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
- }
+ if (tcph)
+ flow_offload_ct_tcp(ct);
__set_bit(NF_FLOW_HW_BIDIRECTIONAL, &flow->flags);
ret = flow_offload_add(flowtable, flow);
@@ -380,8 +387,7 @@ out:
}
static int nft_flow_offload_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data)
+ const struct nft_expr *expr)
{
unsigned int hook_mask = (1 << NF_INET_FORWARD);
@@ -409,8 +415,8 @@ static int nft_flow_offload_init(const struct nft_ctx *ctx,
if (!tb[NFTA_FLOW_TABLE_NAME])
return -EINVAL;
- flowtable = nft_flowtable_lookup(ctx->table, tb[NFTA_FLOW_TABLE_NAME],
- genmask);
+ flowtable = nft_flowtable_lookup(ctx->net, ctx->table,
+ tb[NFTA_FLOW_TABLE_NAME], genmask);
if (IS_ERR(flowtable))
return PTR_ERR(flowtable);
diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c
index 358e742afad7..152a9fb4d23a 100644
--- a/net/netfilter/nft_fwd_netdev.c
+++ b/net/netfilter/nft_fwd_netdev.c
@@ -52,7 +52,7 @@ static int nft_fwd_netdev_init(const struct nft_ctx *ctx,
if (tb[NFTA_FWD_SREG_DEV] == NULL)
return -EINVAL;
- return nft_parse_register_load(tb[NFTA_FWD_SREG_DEV], &priv->sreg_dev,
+ return nft_parse_register_load(ctx, tb[NFTA_FWD_SREG_DEV], &priv->sreg_dev,
sizeof(int));
}
@@ -178,12 +178,12 @@ static int nft_fwd_neigh_init(const struct nft_ctx *ctx,
return -EOPNOTSUPP;
}
- err = nft_parse_register_load(tb[NFTA_FWD_SREG_DEV], &priv->sreg_dev,
+ err = nft_parse_register_load(ctx, tb[NFTA_FWD_SREG_DEV], &priv->sreg_dev,
sizeof(int));
if (err < 0)
return err;
- return nft_parse_register_load(tb[NFTA_FWD_SREG_ADDR], &priv->sreg_addr,
+ return nft_parse_register_load(ctx, tb[NFTA_FWD_SREG_ADDR], &priv->sreg_addr,
addr_len);
}
@@ -204,8 +204,7 @@ nla_put_failure:
}
static int nft_fwd_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data)
+ const struct nft_expr *expr)
{
return nft_chain_validate_hooks(ctx->chain, (1 << NF_NETDEV_INGRESS) |
(1 << NF_NETDEV_EGRESS));
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index 92d47e469204..5d034bbb6913 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -51,7 +51,8 @@ static void nft_symhash_eval(const struct nft_expr *expr,
struct sk_buff *skb = pkt->skb;
u32 h;
- h = reciprocal_scale(__skb_get_hash_symmetric(skb), priv->modulus);
+ h = reciprocal_scale(__skb_get_hash_symmetric_net(nft_net(pkt), skb),
+ priv->modulus);
regs->data[priv->dreg] = h + priv->offset;
}
@@ -91,7 +92,7 @@ static int nft_jhash_init(const struct nft_ctx *ctx,
priv->len = len;
- err = nft_parse_register_load(tb[NFTA_HASH_SREG], &priv->sreg, len);
+ err = nft_parse_register_load(ctx, tb[NFTA_HASH_SREG], &priv->sreg, len);
if (err < 0)
return err;
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
index 6475c7abc1fe..02ee5fb69871 100644
--- a/net/netfilter/nft_immediate.c
+++ b/net/netfilter/nft_immediate.c
@@ -221,7 +221,7 @@ static void nft_immediate_destroy(const struct nft_ctx *ctx,
list_del(&rule->list);
nf_tables_rule_destroy(&chain_ctx, rule);
}
- nf_tables_chain_destroy(&chain_ctx);
+ nf_tables_chain_destroy(chain);
break;
default:
break;
@@ -244,8 +244,7 @@ nla_put_failure:
}
static int nft_immediate_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **d)
+ const struct nft_expr *expr)
{
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
struct nft_ctx *pctx = (struct nft_ctx *)ctx;
diff --git a/net/netfilter/nft_inner.c b/net/netfilter/nft_inner.c
index 928312d01eb1..817ab978d24a 100644
--- a/net/netfilter/nft_inner.c
+++ b/net/netfilter/nft_inner.c
@@ -210,35 +210,66 @@ static int nft_inner_parse(const struct nft_inner *priv,
struct nft_pktinfo *pkt,
struct nft_inner_tun_ctx *tun_ctx)
{
- struct nft_inner_tun_ctx ctx = {};
u32 off = pkt->inneroff;
if (priv->flags & NFT_INNER_HDRSIZE &&
- nft_inner_parse_tunhdr(priv, pkt, &ctx, &off) < 0)
+ nft_inner_parse_tunhdr(priv, pkt, tun_ctx, &off) < 0)
return -1;
if (priv->flags & (NFT_INNER_LL | NFT_INNER_NH)) {
- if (nft_inner_parse_l2l3(priv, pkt, &ctx, off) < 0)
+ if (nft_inner_parse_l2l3(priv, pkt, tun_ctx, off) < 0)
return -1;
} else if (priv->flags & NFT_INNER_TH) {
- ctx.inner_thoff = off;
- ctx.flags |= NFT_PAYLOAD_CTX_INNER_TH;
+ tun_ctx->inner_thoff = off;
+ tun_ctx->flags |= NFT_PAYLOAD_CTX_INNER_TH;
}
- *tun_ctx = ctx;
tun_ctx->type = priv->type;
+ tun_ctx->cookie = (unsigned long)pkt->skb;
pkt->flags |= NFT_PKTINFO_INNER_FULL;
return 0;
}
+static bool nft_inner_restore_tun_ctx(const struct nft_pktinfo *pkt,
+ struct nft_inner_tun_ctx *tun_ctx)
+{
+ struct nft_inner_tun_ctx *this_cpu_tun_ctx;
+
+ local_bh_disable();
+ this_cpu_tun_ctx = this_cpu_ptr(&nft_pcpu_tun_ctx);
+ if (this_cpu_tun_ctx->cookie != (unsigned long)pkt->skb) {
+ local_bh_enable();
+ return false;
+ }
+ *tun_ctx = *this_cpu_tun_ctx;
+ local_bh_enable();
+
+ return true;
+}
+
+static void nft_inner_save_tun_ctx(const struct nft_pktinfo *pkt,
+ const struct nft_inner_tun_ctx *tun_ctx)
+{
+ struct nft_inner_tun_ctx *this_cpu_tun_ctx;
+
+ local_bh_disable();
+ this_cpu_tun_ctx = this_cpu_ptr(&nft_pcpu_tun_ctx);
+ if (this_cpu_tun_ctx->cookie != tun_ctx->cookie)
+ *this_cpu_tun_ctx = *tun_ctx;
+ local_bh_enable();
+}
+
static bool nft_inner_parse_needed(const struct nft_inner *priv,
const struct nft_pktinfo *pkt,
- const struct nft_inner_tun_ctx *tun_ctx)
+ struct nft_inner_tun_ctx *tun_ctx)
{
if (!(pkt->flags & NFT_PKTINFO_INNER_FULL))
return true;
+ if (!nft_inner_restore_tun_ctx(pkt, tun_ctx))
+ return true;
+
if (priv->type != tun_ctx->type)
return true;
@@ -248,27 +279,29 @@ static bool nft_inner_parse_needed(const struct nft_inner *priv,
static void nft_inner_eval(const struct nft_expr *expr, struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
- struct nft_inner_tun_ctx *tun_ctx = this_cpu_ptr(&nft_pcpu_tun_ctx);
const struct nft_inner *priv = nft_expr_priv(expr);
+ struct nft_inner_tun_ctx tun_ctx = {};
if (nft_payload_inner_offset(pkt) < 0)
goto err;
- if (nft_inner_parse_needed(priv, pkt, tun_ctx) &&
- nft_inner_parse(priv, (struct nft_pktinfo *)pkt, tun_ctx) < 0)
+ if (nft_inner_parse_needed(priv, pkt, &tun_ctx) &&
+ nft_inner_parse(priv, (struct nft_pktinfo *)pkt, &tun_ctx) < 0)
goto err;
switch (priv->expr_type) {
case NFT_INNER_EXPR_PAYLOAD:
- nft_payload_inner_eval((struct nft_expr *)&priv->expr, regs, pkt, tun_ctx);
+ nft_payload_inner_eval((struct nft_expr *)&priv->expr, regs, pkt, &tun_ctx);
break;
case NFT_INNER_EXPR_META:
- nft_meta_inner_eval((struct nft_expr *)&priv->expr, regs, pkt, tun_ctx);
+ nft_meta_inner_eval((struct nft_expr *)&priv->expr, regs, pkt, &tun_ctx);
break;
default:
WARN_ON_ONCE(1);
goto err;
}
+ nft_inner_save_tun_ctx(pkt, &tun_ctx);
+
return;
err:
regs->verdict.code = NFT_BREAK;
diff --git a/net/netfilter/nft_last.c b/net/netfilter/nft_last.c
index 8e6d7eaf9dc8..de1b6066bfa8 100644
--- a/net/netfilter/nft_last.c
+++ b/net/netfilter/nft_last.c
@@ -102,12 +102,12 @@ static void nft_last_destroy(const struct nft_ctx *ctx,
kfree(priv->last);
}
-static int nft_last_clone(struct nft_expr *dst, const struct nft_expr *src)
+static int nft_last_clone(struct nft_expr *dst, const struct nft_expr *src, gfp_t gfp)
{
struct nft_last_priv *priv_dst = nft_expr_priv(dst);
struct nft_last_priv *priv_src = nft_expr_priv(src);
- priv_dst->last = kzalloc(sizeof(*priv_dst->last), GFP_ATOMIC);
+ priv_dst->last = kzalloc(sizeof(*priv_dst->last), gfp);
if (!priv_dst->last)
return -ENOMEM;
diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
index cefa25e0dbb0..21d26b79b460 100644
--- a/net/netfilter/nft_limit.c
+++ b/net/netfilter/nft_limit.c
@@ -150,7 +150,7 @@ static void nft_limit_destroy(const struct nft_ctx *ctx,
}
static int nft_limit_clone(struct nft_limit_priv *priv_dst,
- const struct nft_limit_priv *priv_src)
+ const struct nft_limit_priv *priv_src, gfp_t gfp)
{
priv_dst->tokens_max = priv_src->tokens_max;
priv_dst->rate = priv_src->rate;
@@ -158,7 +158,7 @@ static int nft_limit_clone(struct nft_limit_priv *priv_dst,
priv_dst->burst = priv_src->burst;
priv_dst->invert = priv_src->invert;
- priv_dst->limit = kmalloc(sizeof(*priv_dst->limit), GFP_ATOMIC);
+ priv_dst->limit = kmalloc(sizeof(*priv_dst->limit), gfp);
if (!priv_dst->limit)
return -ENOMEM;
@@ -223,14 +223,15 @@ static void nft_limit_pkts_destroy(const struct nft_ctx *ctx,
nft_limit_destroy(ctx, &priv->limit);
}
-static int nft_limit_pkts_clone(struct nft_expr *dst, const struct nft_expr *src)
+static int nft_limit_pkts_clone(struct nft_expr *dst, const struct nft_expr *src,
+ gfp_t gfp)
{
struct nft_limit_priv_pkts *priv_dst = nft_expr_priv(dst);
struct nft_limit_priv_pkts *priv_src = nft_expr_priv(src);
priv_dst->cost = priv_src->cost;
- return nft_limit_clone(&priv_dst->limit, &priv_src->limit);
+ return nft_limit_clone(&priv_dst->limit, &priv_src->limit, gfp);
}
static struct nft_expr_type nft_limit_type;
@@ -281,12 +282,13 @@ static void nft_limit_bytes_destroy(const struct nft_ctx *ctx,
nft_limit_destroy(ctx, priv);
}
-static int nft_limit_bytes_clone(struct nft_expr *dst, const struct nft_expr *src)
+static int nft_limit_bytes_clone(struct nft_expr *dst, const struct nft_expr *src,
+ gfp_t gfp)
{
struct nft_limit_priv *priv_dst = nft_expr_priv(dst);
struct nft_limit_priv *priv_src = nft_expr_priv(src);
- return nft_limit_clone(priv_dst, priv_src);
+ return nft_limit_clone(priv_dst, priv_src, gfp);
}
static const struct nft_expr_ops nft_limit_bytes_ops = {
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c
index 5defe6e4fd98..e35588137995 100644
--- a/net/netfilter/nft_log.c
+++ b/net/netfilter/nft_log.c
@@ -163,7 +163,7 @@ static int nft_log_init(const struct nft_ctx *ctx,
nla = tb[NFTA_LOG_PREFIX];
if (nla != NULL) {
- priv->prefix = kmalloc(nla_len(nla) + 1, GFP_KERNEL);
+ priv->prefix = kmalloc(nla_len(nla) + 1, GFP_KERNEL_ACCOUNT);
if (priv->prefix == NULL)
return -ENOMEM;
nla_strscpy(priv->prefix, nla, nla_len(nla) + 1);
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index 870e5b113d13..63ef832b8aa7 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -24,7 +24,7 @@ struct nft_lookup {
struct nft_set_binding binding;
};
-#ifdef CONFIG_RETPOLINE
+#ifdef CONFIG_MITIGATION_RETPOLINE
bool nft_set_do_lookup(const struct net *net, const struct nft_set *set,
const u32 *key, const struct nft_set_ext **ext)
{
@@ -113,7 +113,7 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
if (IS_ERR(set))
return PTR_ERR(set);
- err = nft_parse_register_load(tb[NFTA_LOOKUP_SREG], &priv->sreg,
+ err = nft_parse_register_load(ctx, tb[NFTA_LOOKUP_SREG], &priv->sreg,
set->klen);
if (err < 0)
return err;
@@ -132,7 +132,8 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
return -EINVAL;
err = nft_parse_register_store(ctx, tb[NFTA_LOOKUP_DREG],
- &priv->dreg, NULL, set->dtype,
+ &priv->dreg, NULL,
+ nft_set_datatype(set),
set->dlen);
if (err < 0)
return err;
@@ -205,8 +206,7 @@ nla_put_failure:
}
static int nft_lookup_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **d)
+ const struct nft_expr *expr)
{
const struct nft_lookup *priv = nft_expr_priv(expr);
struct nft_set_iter iter;
@@ -216,6 +216,7 @@ static int nft_lookup_validate(const struct nft_ctx *ctx,
return 0;
iter.genmask = nft_genmask_next(ctx->net);
+ iter.type = NFT_ITER_UPDATE;
iter.skip = 0;
iter.count = 0;
iter.err = 0;
diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c
index 8a14aaca93bb..868bd4d73555 100644
--- a/net/netfilter/nft_masq.c
+++ b/net/netfilter/nft_masq.c
@@ -27,8 +27,7 @@ static const struct nla_policy nft_masq_policy[NFTA_MASQ_MAX + 1] = {
};
static int nft_masq_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data)
+ const struct nft_expr *expr)
{
int err;
@@ -52,13 +51,13 @@ static int nft_masq_init(const struct nft_ctx *ctx,
priv->flags = ntohl(nla_get_be32(tb[NFTA_MASQ_FLAGS]));
if (tb[NFTA_MASQ_REG_PROTO_MIN]) {
- err = nft_parse_register_load(tb[NFTA_MASQ_REG_PROTO_MIN],
+ err = nft_parse_register_load(ctx, tb[NFTA_MASQ_REG_PROTO_MIN],
&priv->sreg_proto_min, plen);
if (err < 0)
return err;
if (tb[NFTA_MASQ_REG_PROTO_MAX]) {
- err = nft_parse_register_load(tb[NFTA_MASQ_REG_PROTO_MAX],
+ err = nft_parse_register_load(ctx, tb[NFTA_MASQ_REG_PROTO_MAX],
&priv->sreg_proto_max,
plen);
if (err < 0)
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index ba0d3683a45d..05cd1e6e6a2f 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -581,8 +581,7 @@ static int nft_meta_get_validate_xfrm(const struct nft_ctx *ctx)
}
static int nft_meta_get_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data)
+ const struct nft_expr *expr)
{
const struct nft_meta *priv = nft_expr_priv(expr);
@@ -600,8 +599,7 @@ static int nft_meta_get_validate(const struct nft_ctx *ctx,
}
int nft_meta_set_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data)
+ const struct nft_expr *expr)
{
struct nft_meta *priv = nft_expr_priv(expr);
unsigned int hooks;
@@ -657,7 +655,7 @@ int nft_meta_set_init(const struct nft_ctx *ctx,
}
priv->len = len;
- err = nft_parse_register_load(tb[NFTA_META_SREG], &priv->sreg, len);
+ err = nft_parse_register_load(ctx, tb[NFTA_META_SREG], &priv->sreg, len);
if (err < 0)
return err;
@@ -839,6 +837,9 @@ static int nft_meta_inner_init(const struct nft_ctx *ctx,
struct nft_meta *priv = nft_expr_priv(expr);
unsigned int len;
+ if (!tb[NFTA_META_KEY] || !tb[NFTA_META_DREG])
+ return -EINVAL;
+
priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
switch (priv->key) {
case NFT_META_PROTOCOL:
@@ -951,7 +952,7 @@ static int nft_secmark_obj_init(const struct nft_ctx *ctx,
if (tb[NFTA_SECMARK_CTX] == NULL)
return -EINVAL;
- priv->ctx = nla_strdup(tb[NFTA_SECMARK_CTX], GFP_KERNEL);
+ priv->ctx = nla_strdup(tb[NFTA_SECMARK_CTX], GFP_KERNEL_ACCOUNT);
if (!priv->ctx)
return -ENOMEM;
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index 808f5802c270..6e21f72c5b57 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -137,8 +137,7 @@ static const struct nla_policy nft_nat_policy[NFTA_NAT_MAX + 1] = {
};
static int nft_nat_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data)
+ const struct nft_expr *expr)
{
struct nft_nat *priv = nft_expr_priv(expr);
int err;
@@ -214,13 +213,13 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
priv->family = family;
if (tb[NFTA_NAT_REG_ADDR_MIN]) {
- err = nft_parse_register_load(tb[NFTA_NAT_REG_ADDR_MIN],
+ err = nft_parse_register_load(ctx, tb[NFTA_NAT_REG_ADDR_MIN],
&priv->sreg_addr_min, alen);
if (err < 0)
return err;
if (tb[NFTA_NAT_REG_ADDR_MAX]) {
- err = nft_parse_register_load(tb[NFTA_NAT_REG_ADDR_MAX],
+ err = nft_parse_register_load(ctx, tb[NFTA_NAT_REG_ADDR_MAX],
&priv->sreg_addr_max,
alen);
if (err < 0)
@@ -234,13 +233,13 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
plen = sizeof_field(struct nf_nat_range, min_proto.all);
if (tb[NFTA_NAT_REG_PROTO_MIN]) {
- err = nft_parse_register_load(tb[NFTA_NAT_REG_PROTO_MIN],
+ err = nft_parse_register_load(ctx, tb[NFTA_NAT_REG_PROTO_MIN],
&priv->sreg_proto_min, plen);
if (err < 0)
return err;
if (tb[NFTA_NAT_REG_PROTO_MAX]) {
- err = nft_parse_register_load(tb[NFTA_NAT_REG_PROTO_MAX],
+ err = nft_parse_register_load(ctx, tb[NFTA_NAT_REG_PROTO_MAX],
&priv->sreg_proto_max,
plen);
if (err < 0)
diff --git a/net/netfilter/nft_numgen.c b/net/netfilter/nft_numgen.c
index 7d29db7c2ac0..bd058babfc82 100644
--- a/net/netfilter/nft_numgen.c
+++ b/net/netfilter/nft_numgen.c
@@ -66,7 +66,7 @@ static int nft_ng_inc_init(const struct nft_ctx *ctx,
if (priv->offset + priv->modulus - 1 < priv->offset)
return -EOVERFLOW;
- priv->counter = kmalloc(sizeof(*priv->counter), GFP_KERNEL);
+ priv->counter = kmalloc(sizeof(*priv->counter), GFP_KERNEL_ACCOUNT);
if (!priv->counter)
return -ENOMEM;
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
index 509011b1ef59..09da7a3f9f96 100644
--- a/net/netfilter/nft_objref.c
+++ b/net/netfilter/nft_objref.c
@@ -143,7 +143,7 @@ static int nft_objref_map_init(const struct nft_ctx *ctx,
if (!(set->flags & NFT_SET_OBJECT))
return -EINVAL;
- err = nft_parse_register_load(tb[NFTA_OBJREF_SET_SREG], &priv->sreg,
+ err = nft_parse_register_load(ctx, tb[NFTA_OBJREF_SET_SREG], &priv->sreg,
set->klen);
if (err < 0)
return err;
diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
index 7f61506e5b44..1c0b493ef0a9 100644
--- a/net/netfilter/nft_osf.c
+++ b/net/netfilter/nft_osf.c
@@ -63,7 +63,6 @@ static int nft_osf_init(const struct nft_ctx *ctx,
{
struct nft_osf *priv = nft_expr_priv(expr);
u32 flags;
- int err;
u8 ttl;
if (!tb[NFTA_OSF_DREG])
@@ -83,13 +82,9 @@ static int nft_osf_init(const struct nft_ctx *ctx,
priv->flags = flags;
}
- err = nft_parse_register_store(ctx, tb[NFTA_OSF_DREG], &priv->dreg,
- NULL, NFT_DATA_VALUE,
- NFT_OSF_MAXGENRELEN);
- if (err < 0)
- return err;
-
- return 0;
+ return nft_parse_register_store(ctx, tb[NFTA_OSF_DREG], &priv->dreg,
+ NULL, NFT_DATA_VALUE,
+ NFT_OSF_MAXGENRELEN);
}
static int nft_osf_dump(struct sk_buff *skb,
@@ -113,8 +108,7 @@ nla_put_failure:
}
static int nft_osf_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data)
+ const struct nft_expr *expr)
{
unsigned int hooks;
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index 0a689c8e0295..7dfc5343dae4 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -45,36 +45,27 @@ nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
int mac_off = skb_mac_header(skb) - skb->data;
u8 *vlanh, *dst_u8 = (u8 *) d;
struct vlan_ethhdr veth;
- u8 vlan_hlen = 0;
-
- if ((skb->protocol == htons(ETH_P_8021AD) ||
- skb->protocol == htons(ETH_P_8021Q)) &&
- offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN)
- vlan_hlen += VLAN_HLEN;
vlanh = (u8 *) &veth;
- if (offset < VLAN_ETH_HLEN + vlan_hlen) {
+ if (offset < VLAN_ETH_HLEN) {
u8 ethlen = len;
- if (vlan_hlen &&
- skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0)
- return false;
- else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
+ if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
return false;
- if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
- ethlen -= offset + len - VLAN_ETH_HLEN - vlan_hlen;
+ if (offset + len > VLAN_ETH_HLEN)
+ ethlen -= offset + len - VLAN_ETH_HLEN;
- memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
+ memcpy(dst_u8, vlanh + offset, ethlen);
len -= ethlen;
if (len == 0)
return true;
dst_u8 += ethlen;
- offset = ETH_HLEN + vlan_hlen;
+ offset = ETH_HLEN;
} else {
- offset -= VLAN_HLEN + vlan_hlen;
+ offset -= VLAN_HLEN;
}
return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
@@ -154,12 +145,12 @@ int nft_payload_inner_offset(const struct nft_pktinfo *pkt)
return pkt->inneroff;
}
-static bool nft_payload_need_vlan_copy(const struct nft_payload *priv)
+static bool nft_payload_need_vlan_adjust(u32 offset, u32 len)
{
- unsigned int len = priv->offset + priv->len;
+ unsigned int boundary = offset + len;
/* data past ether src/dst requested, copy needed */
- if (len > offsetof(struct ethhdr, h_proto))
+ if (boundary > offsetof(struct ethhdr, h_proto))
return true;
return false;
@@ -183,7 +174,7 @@ void nft_payload_eval(const struct nft_expr *expr,
goto err;
if (skb_vlan_tag_present(skb) &&
- nft_payload_need_vlan_copy(priv)) {
+ nft_payload_need_vlan_adjust(priv->offset, priv->len)) {
if (!nft_payload_copy_vlan(dest, skb,
priv->offset, priv->len))
goto err;
@@ -659,6 +650,10 @@ static int nft_payload_inner_init(const struct nft_ctx *ctx,
struct nft_payload *priv = nft_expr_priv(expr);
u32 base;
+ if (!tb[NFTA_PAYLOAD_BASE] || !tb[NFTA_PAYLOAD_OFFSET] ||
+ !tb[NFTA_PAYLOAD_LEN] || !tb[NFTA_PAYLOAD_DREG])
+ return -EINVAL;
+
base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
switch (base) {
case NFT_PAYLOAD_TUN_HEADER:
@@ -810,21 +805,79 @@ struct nft_payload_set {
u8 csum_flags;
};
+/* This is not struct vlan_hdr. */
+struct nft_payload_vlan_hdr {
+ __be16 h_vlan_proto;
+ __be16 h_vlan_TCI;
+};
+
+static bool
+nft_payload_set_vlan(const u32 *src, struct sk_buff *skb, u8 offset, u8 len,
+ int *vlan_hlen)
+{
+ struct nft_payload_vlan_hdr *vlanh;
+ __be16 vlan_proto;
+ u16 vlan_tci;
+
+ if (offset >= offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto)) {
+ *vlan_hlen = VLAN_HLEN;
+ return true;
+ }
+
+ switch (offset) {
+ case offsetof(struct vlan_ethhdr, h_vlan_proto):
+ if (len == 2) {
+ vlan_proto = nft_reg_load_be16(src);
+ skb->vlan_proto = vlan_proto;
+ } else if (len == 4) {
+ vlanh = (struct nft_payload_vlan_hdr *)src;
+ __vlan_hwaccel_put_tag(skb, vlanh->h_vlan_proto,
+ ntohs(vlanh->h_vlan_TCI));
+ } else {
+ return false;
+ }
+ break;
+ case offsetof(struct vlan_ethhdr, h_vlan_TCI):
+ if (len != 2)
+ return false;
+
+ vlan_tci = ntohs(nft_reg_load_be16(src));
+ skb->vlan_tci = vlan_tci;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
static void nft_payload_set_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_payload_set *priv = nft_expr_priv(expr);
- struct sk_buff *skb = pkt->skb;
const u32 *src = &regs->data[priv->sreg];
- int offset, csum_offset;
+ int offset, csum_offset, vlan_hlen = 0;
+ struct sk_buff *skb = pkt->skb;
__wsum fsum, tsum;
switch (priv->base) {
case NFT_PAYLOAD_LL_HEADER:
if (!skb_mac_header_was_set(skb))
goto err;
- offset = skb_mac_header(skb) - skb->data;
+
+ if (skb_vlan_tag_present(skb) &&
+ nft_payload_need_vlan_adjust(priv->offset, priv->len)) {
+ if (!nft_payload_set_vlan(src, skb,
+ priv->offset, priv->len,
+ &vlan_hlen))
+ goto err;
+
+ if (!vlan_hlen)
+ return;
+ }
+
+ offset = skb_mac_header(skb) - skb->data - vlan_hlen;
break;
case NFT_PAYLOAD_NETWORK_HEADER:
offset = skb_network_offset(skb);
@@ -851,6 +904,9 @@ static void nft_payload_set_eval(const struct nft_expr *expr,
((priv->base != NFT_PAYLOAD_TRANSPORT_HEADER &&
priv->base != NFT_PAYLOAD_INNER_HEADER) ||
skb->ip_summed != CHECKSUM_PARTIAL)) {
+ if (offset + priv->len > skb->len)
+ goto err;
+
fsum = skb_checksum(skb, offset, priv->len, 0);
tsum = csum_partial(src, priv->len, 0);
@@ -928,7 +984,7 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
}
priv->csum_type = csum_type;
- return nft_parse_register_load(tb[NFTA_PAYLOAD_SREG], &priv->sreg,
+ return nft_parse_register_load(ctx, tb[NFTA_PAYLOAD_SREG], &priv->sreg,
priv->len);
}
diff --git a/net/netfilter/nft_queue.c b/net/netfilter/nft_queue.c
index b2b8127c8d43..344fe311878f 100644
--- a/net/netfilter/nft_queue.c
+++ b/net/netfilter/nft_queue.c
@@ -69,8 +69,7 @@ static void nft_queue_sreg_eval(const struct nft_expr *expr,
}
static int nft_queue_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data)
+ const struct nft_expr *expr)
{
static const unsigned int supported_hooks = ((1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN) |
@@ -136,7 +135,7 @@ static int nft_queue_sreg_init(const struct nft_ctx *ctx,
struct nft_queue *priv = nft_expr_priv(expr);
int err;
- err = nft_parse_register_load(tb[NFTA_QUEUE_SREG_QNUM],
+ err = nft_parse_register_load(ctx, tb[NFTA_QUEUE_SREG_QNUM],
&priv->sreg_qnum, sizeof(u32));
if (err < 0)
return err;
diff --git a/net/netfilter/nft_quota.c b/net/netfilter/nft_quota.c
index 3ba12a7471b0..9b2d7463d3d3 100644
--- a/net/netfilter/nft_quota.c
+++ b/net/netfilter/nft_quota.c
@@ -233,7 +233,7 @@ static void nft_quota_destroy(const struct nft_ctx *ctx,
return nft_quota_do_destroy(ctx, priv);
}
-static int nft_quota_clone(struct nft_expr *dst, const struct nft_expr *src)
+static int nft_quota_clone(struct nft_expr *dst, const struct nft_expr *src, gfp_t gfp)
{
struct nft_quota *priv_dst = nft_expr_priv(dst);
struct nft_quota *priv_src = nft_expr_priv(src);
@@ -241,7 +241,7 @@ static int nft_quota_clone(struct nft_expr *dst, const struct nft_expr *src)
priv_dst->quota = priv_src->quota;
priv_dst->flags = priv_src->flags;
- priv_dst->consumed = kmalloc(sizeof(*priv_dst->consumed), GFP_ATOMIC);
+ priv_dst->consumed = kmalloc(sizeof(*priv_dst->consumed), gfp);
if (!priv_dst->consumed)
return -ENOMEM;
diff --git a/net/netfilter/nft_range.c b/net/netfilter/nft_range.c
index 51ae64cd268f..ea382f7bbd78 100644
--- a/net/netfilter/nft_range.c
+++ b/net/netfilter/nft_range.c
@@ -83,7 +83,7 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
goto err2;
}
- err = nft_parse_register_load(tb[NFTA_RANGE_SREG], &priv->sreg,
+ err = nft_parse_register_load(ctx, tb[NFTA_RANGE_SREG], &priv->sreg,
desc_from.len);
if (err < 0)
goto err2;
diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c
index a58bd8d291ff..95eedad85c83 100644
--- a/net/netfilter/nft_redir.c
+++ b/net/netfilter/nft_redir.c
@@ -27,8 +27,7 @@ static const struct nla_policy nft_redir_policy[NFTA_REDIR_MAX + 1] = {
};
static int nft_redir_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data)
+ const struct nft_expr *expr)
{
int err;
@@ -51,13 +50,13 @@ static int nft_redir_init(const struct nft_ctx *ctx,
plen = sizeof_field(struct nf_nat_range, min_proto.all);
if (tb[NFTA_REDIR_REG_PROTO_MIN]) {
- err = nft_parse_register_load(tb[NFTA_REDIR_REG_PROTO_MIN],
+ err = nft_parse_register_load(ctx, tb[NFTA_REDIR_REG_PROTO_MIN],
&priv->sreg_proto_min, plen);
if (err < 0)
return err;
if (tb[NFTA_REDIR_REG_PROTO_MAX]) {
- err = nft_parse_register_load(tb[NFTA_REDIR_REG_PROTO_MAX],
+ err = nft_parse_register_load(ctx, tb[NFTA_REDIR_REG_PROTO_MAX],
&priv->sreg_proto_max,
plen);
if (err < 0)
diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c
index ed2e668474d6..196a92c7ea09 100644
--- a/net/netfilter/nft_reject.c
+++ b/net/netfilter/nft_reject.c
@@ -24,8 +24,7 @@ const struct nla_policy nft_reject_policy[NFTA_REJECT_MAX + 1] = {
EXPORT_SYMBOL_GPL(nft_reject_policy);
int nft_reject_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data)
+ const struct nft_expr *expr)
{
return nft_chain_validate_hooks(ctx->chain,
(1 << NF_INET_LOCAL_IN) |
diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c
index 973fa31a9dd6..49020e67304a 100644
--- a/net/netfilter/nft_reject_inet.c
+++ b/net/netfilter/nft_reject_inet.c
@@ -61,8 +61,7 @@ static void nft_reject_inet_eval(const struct nft_expr *expr,
}
static int nft_reject_inet_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data)
+ const struct nft_expr *expr)
{
return nft_chain_validate_hooks(ctx->chain,
(1 << NF_INET_LOCAL_IN) |
diff --git a/net/netfilter/nft_reject_netdev.c b/net/netfilter/nft_reject_netdev.c
index 7865cd8b11bb..2558ce1505d9 100644
--- a/net/netfilter/nft_reject_netdev.c
+++ b/net/netfilter/nft_reject_netdev.c
@@ -145,8 +145,7 @@ out:
}
static int nft_reject_netdev_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data)
+ const struct nft_expr *expr)
{
return nft_chain_validate_hooks(ctx->chain, (1 << NF_NETDEV_INGRESS));
}
diff --git a/net/netfilter/nft_rt.c b/net/netfilter/nft_rt.c
index 24d977138572..dc50b9a5bd68 100644
--- a/net/netfilter/nft_rt.c
+++ b/net/netfilter/nft_rt.c
@@ -73,14 +73,14 @@ void nft_rt_get_eval(const struct nft_expr *expr,
if (nft_pf(pkt) != NFPROTO_IPV4)
goto err;
- *dest = (__force u32)rt_nexthop((const struct rtable *)dst,
+ *dest = (__force u32)rt_nexthop(dst_rtable(dst),
ip_hdr(skb)->daddr);
break;
case NFT_RT_NEXTHOP6:
if (nft_pf(pkt) != NFPROTO_IPV6)
goto err;
- memcpy(dest, rt6_nexthop((struct rt6_info *)dst,
+ memcpy(dest, rt6_nexthop(dst_rt6_info(dst),
&ipv6_hdr(skb)->daddr),
sizeof(struct in6_addr));
break;
@@ -160,8 +160,7 @@ nla_put_failure:
return -1;
}
-static int nft_rt_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
- const struct nft_data **data)
+static int nft_rt_validate(const struct nft_ctx *ctx, const struct nft_expr *expr)
{
const struct nft_rt *priv = nft_expr_priv(expr);
unsigned int hooks;
diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c
index 32df7a16835d..12390d2e994f 100644
--- a/net/netfilter/nft_set_bitmap.c
+++ b/net/netfilter/nft_set_bitmap.c
@@ -88,13 +88,15 @@ bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
}
static struct nft_bitmap_elem *
-nft_bitmap_elem_find(const struct nft_set *set, struct nft_bitmap_elem *this,
+nft_bitmap_elem_find(const struct net *net,
+ const struct nft_set *set, struct nft_bitmap_elem *this,
u8 genmask)
{
const struct nft_bitmap *priv = nft_set_priv(set);
struct nft_bitmap_elem *be;
- list_for_each_entry_rcu(be, &priv->list, head) {
+ list_for_each_entry_rcu(be, &priv->list, head,
+ lockdep_is_held(&nft_pernet(net)->commit_mutex)) {
if (memcmp(nft_set_ext_key(&be->ext),
nft_set_ext_key(&this->ext), set->klen) ||
!nft_set_elem_active(&be->ext, genmask))
@@ -132,7 +134,7 @@ static int nft_bitmap_insert(const struct net *net, const struct nft_set *set,
u8 genmask = nft_genmask_next(net);
u32 idx, off;
- be = nft_bitmap_elem_find(set, new, genmask);
+ be = nft_bitmap_elem_find(net, set, new, genmask);
if (be) {
*elem_priv = &be->priv;
return -EEXIST;
@@ -172,7 +174,7 @@ static void nft_bitmap_activate(const struct net *net,
nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
/* Enter 11 state. */
priv->bitmap[idx] |= (genmask << off);
- nft_set_elem_change_active(net, set, &be->ext);
+ nft_clear(net, &be->ext);
}
static void nft_bitmap_flush(const struct net *net,
@@ -201,7 +203,7 @@ nft_bitmap_deactivate(const struct net *net, const struct nft_set *set,
nft_bitmap_location(set, elem->key.val.data, &idx, &off);
- be = nft_bitmap_elem_find(set, this, genmask);
+ be = nft_bitmap_elem_find(net, set, this, genmask);
if (!be)
return NULL;
@@ -222,8 +224,6 @@ static void nft_bitmap_walk(const struct nft_ctx *ctx,
list_for_each_entry_rcu(be, &priv->list, head) {
if (iter->count < iter->skip)
goto cont;
- if (!nft_set_elem_active(&be->ext, iter->genmask))
- goto cont;
iter->err = iter->fn(ctx, set, iter, &be->priv);
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index 6968a3b34236..8bfac4185ac7 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -24,11 +24,13 @@
struct nft_rhash {
struct rhashtable ht;
struct delayed_work gc_work;
+ u32 wq_gc_seq;
};
struct nft_rhash_elem {
struct nft_elem_priv priv;
struct rhash_head node;
+ u32 wq_gc_seq;
struct nft_set_ext ext;
};
@@ -199,7 +201,7 @@ static void nft_rhash_activate(const struct net *net, const struct nft_set *set,
{
struct nft_rhash_elem *he = nft_elem_priv_cast(elem_priv);
- nft_set_elem_change_active(net, set, &he->ext);
+ nft_clear(net, &he->ext);
}
static void nft_rhash_flush(const struct net *net,
@@ -286,8 +288,6 @@ static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set,
if (iter->count < iter->skip)
goto cont;
- if (!nft_set_elem_active(&he->ext, iter->genmask))
- goto cont;
iter->err = iter->fn(ctx, set, iter, &he->priv);
if (iter->err < 0)
@@ -340,6 +340,10 @@ static void nft_rhash_gc(struct work_struct *work)
if (!gc)
goto done;
+ /* Elements never collected use a zero gc worker sequence number. */
+ if (unlikely(++priv->wq_gc_seq == 0))
+ priv->wq_gc_seq++;
+
rhashtable_walk_enter(&priv->ht, &hti);
rhashtable_walk_start(&hti);
@@ -357,6 +361,14 @@ static void nft_rhash_gc(struct work_struct *work)
goto try_later;
}
+ /* rhashtable walk is unstable, already seen in this gc run?
+ * Then, skip this element. In case of (unlikely) sequence
+ * wraparound and stale element wq_gc_seq, next gc run will
+ * just find this expired element.
+ */
+ if (he->wq_gc_seq == priv->wq_gc_seq)
+ continue;
+
if (nft_set_elem_is_dead(&he->ext))
goto dead_elem;
@@ -373,6 +385,8 @@ dead_elem:
if (!gc)
goto try_later;
+ /* annotate gc sequence for this attempt. */
+ he->wq_gc_seq = priv->wq_gc_seq;
nft_trans_gc_elem_add(gc, he);
}
@@ -599,7 +613,7 @@ static void nft_hash_activate(const struct net *net, const struct nft_set *set,
{
struct nft_hash_elem *he = nft_elem_priv_cast(elem_priv);
- nft_set_elem_change_active(net, set, &he->ext);
+ nft_clear(net, &he->ext);
}
static void nft_hash_flush(const struct net *net,
@@ -649,11 +663,10 @@ static void nft_hash_walk(const struct nft_ctx *ctx, struct nft_set *set,
int i;
for (i = 0; i < priv->buckets; i++) {
- hlist_for_each_entry_rcu(he, &priv->table[i], node) {
+ hlist_for_each_entry_rcu(he, &priv->table[i], node,
+ lockdep_is_held(&nft_pernet(ctx->net)->commit_mutex)) {
if (iter->count < iter->skip)
goto cont;
- if (!nft_set_elem_active(&he->ext, iter->genmask))
- goto cont;
iter->err = iter->fn(ctx, set, iter, &he->priv);
if (iter->err < 0)
diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
index aa1d9e93a9a0..7be342b495f5 100644
--- a/net/netfilter/nft_set_pipapo.c
+++ b/net/netfilter/nft_set_pipapo.c
@@ -359,11 +359,13 @@
*
* Return: -1 on no match, bit position on 'match_only', 0 otherwise.
*/
-int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
- union nft_pipapo_map_bucket *mt, bool match_only)
+int pipapo_refill(unsigned long *map, unsigned int len, unsigned int rules,
+ unsigned long *dst,
+ const union nft_pipapo_map_bucket *mt, bool match_only)
{
unsigned long bitset;
- int k, ret = -1;
+ unsigned int k;
+ int ret = -1;
for (k = 0; k < len; k++) {
bitset = map[k];
@@ -412,9 +414,9 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
struct nft_pipapo_scratch *scratch;
unsigned long *res_map, *fill_map;
u8 genmask = nft_genmask_cur(net);
+ const struct nft_pipapo_match *m;
+ const struct nft_pipapo_field *f;
const u8 *rp = (const u8 *)key;
- struct nft_pipapo_match *m;
- struct nft_pipapo_field *f;
bool map_index;
int i;
@@ -432,7 +434,7 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
res_map = scratch->map + (map_index ? m->bsize_max : 0);
fill_map = scratch->map + (map_index ? 0 : m->bsize_max);
- memset(res_map, 0xff, m->bsize_max * sizeof(*res_map));
+ pipapo_resmap_init(m, res_map);
nft_pipapo_for_each_field(f, i, m) {
bool last = i == m->field_count - 1;
@@ -502,9 +504,11 @@ out:
* pipapo_get() - Get matching element reference given key data
* @net: Network namespace
* @set: nftables API set representation
+ * @m: storage containing active/existing elements
* @data: Key data to be matched against existing elements
* @genmask: If set, check that element is active in given genmask
* @tstamp: timestamp to check for expired elements
+ * @gfp: the type of memory to allocate (see kmalloc).
*
* This is essentially the same as the lookup function, except that it matches
* key data against the uncommitted copy and doesn't use preallocated maps for
@@ -514,29 +518,31 @@ out:
*/
static struct nft_pipapo_elem *pipapo_get(const struct net *net,
const struct nft_set *set,
+ const struct nft_pipapo_match *m,
const u8 *data, u8 genmask,
- u64 tstamp)
+ u64 tstamp, gfp_t gfp)
{
struct nft_pipapo_elem *ret = ERR_PTR(-ENOENT);
- struct nft_pipapo *priv = nft_set_priv(set);
- struct nft_pipapo_match *m = priv->clone;
unsigned long *res_map, *fill_map = NULL;
- struct nft_pipapo_field *f;
+ const struct nft_pipapo_field *f;
int i;
- res_map = kmalloc_array(m->bsize_max, sizeof(*res_map), GFP_ATOMIC);
+ if (m->bsize_max == 0)
+ return ret;
+
+ res_map = kmalloc_array(m->bsize_max, sizeof(*res_map), gfp);
if (!res_map) {
ret = ERR_PTR(-ENOMEM);
goto out;
}
- fill_map = kcalloc(m->bsize_max, sizeof(*res_map), GFP_ATOMIC);
+ fill_map = kcalloc(m->bsize_max, sizeof(*res_map), gfp);
if (!fill_map) {
ret = ERR_PTR(-ENOMEM);
goto out;
}
- memset(res_map, 0xff, m->bsize_max * sizeof(*res_map));
+ pipapo_resmap_init(m, res_map);
nft_pipapo_for_each_field(f, i, m) {
bool last = i == m->field_count - 1;
@@ -605,10 +611,13 @@ static struct nft_elem_priv *
nft_pipapo_get(const struct net *net, const struct nft_set *set,
const struct nft_set_elem *elem, unsigned int flags)
{
+ struct nft_pipapo *priv = nft_set_priv(set);
+ struct nft_pipapo_match *m = rcu_dereference(priv->match);
struct nft_pipapo_elem *e;
- e = pipapo_get(net, set, (const u8 *)elem->key.val.data,
- nft_genmask_cur(net), get_jiffies_64());
+ e = pipapo_get(net, set, m, (const u8 *)elem->key.val.data,
+ nft_genmask_cur(net), get_jiffies_64(),
+ GFP_ATOMIC);
if (IS_ERR(e))
return ERR_CAST(e);
@@ -616,6 +625,65 @@ nft_pipapo_get(const struct net *net, const struct nft_set *set,
}
/**
+ * pipapo_realloc_mt() - Reallocate mapping table if needed upon resize
+ * @f: Field containing mapping table
+ * @old_rules: Amount of existing mapped rules
+ * @rules: Amount of new rules to map
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int pipapo_realloc_mt(struct nft_pipapo_field *f,
+ unsigned int old_rules, unsigned int rules)
+{
+ union nft_pipapo_map_bucket *new_mt = NULL, *old_mt = f->mt;
+ const unsigned int extra = PAGE_SIZE / sizeof(*new_mt);
+ unsigned int rules_alloc = rules;
+
+ might_sleep();
+
+ if (unlikely(rules == 0))
+ goto out_free;
+
+ /* growing and enough space left, no action needed */
+ if (rules > old_rules && f->rules_alloc > rules)
+ return 0;
+
+ /* downsize and extra slack has not grown too large */
+ if (rules < old_rules) {
+ unsigned int remove = f->rules_alloc - rules;
+
+ if (remove < (2u * extra))
+ return 0;
+ }
+
+ /* If set needs more than one page of memory for rules then
+ * allocate another extra page to avoid frequent reallocation.
+ */
+ if (rules > extra &&
+ check_add_overflow(rules, extra, &rules_alloc))
+ return -EOVERFLOW;
+
+ new_mt = kvmalloc_array(rules_alloc, sizeof(*new_mt), GFP_KERNEL_ACCOUNT);
+ if (!new_mt)
+ return -ENOMEM;
+
+ if (old_mt)
+ memcpy(new_mt, old_mt, min(old_rules, rules) * sizeof(*new_mt));
+
+ if (rules > old_rules) {
+ memset(new_mt + old_rules, 0,
+ (rules - old_rules) * sizeof(*new_mt));
+ }
+out_free:
+ f->rules_alloc = rules_alloc;
+ f->mt = new_mt;
+
+ kvfree(old_mt);
+
+ return 0;
+}
+
+/**
* pipapo_resize() - Resize lookup or mapping table, or both
* @f: Field containing lookup and mapping tables
* @old_rules: Previous amount of rules in field
@@ -627,12 +695,15 @@ nft_pipapo_get(const struct net *net, const struct nft_set *set,
*
* Return: 0 on success, -ENOMEM on allocation failure.
*/
-static int pipapo_resize(struct nft_pipapo_field *f, int old_rules, int rules)
+static int pipapo_resize(struct nft_pipapo_field *f,
+ unsigned int old_rules, unsigned int rules)
{
long *new_lt = NULL, *new_p, *old_lt = f->lt, *old_p;
- union nft_pipapo_map_bucket *new_mt, *old_mt = f->mt;
- size_t new_bucket_size, copy;
- int group, bucket;
+ unsigned int new_bucket_size, copy;
+ int group, bucket, err;
+
+ if (rules >= NFT_PIPAPO_RULE0_MAX)
+ return -ENOSPC;
new_bucket_size = DIV_ROUND_UP(rules, BITS_PER_LONG);
#ifdef NFT_PIPAPO_ALIGN
@@ -672,27 +743,18 @@ static int pipapo_resize(struct nft_pipapo_field *f, int old_rules, int rules)
}
mt:
- new_mt = kvmalloc(rules * sizeof(*new_mt), GFP_KERNEL);
- if (!new_mt) {
+ err = pipapo_realloc_mt(f, old_rules, rules);
+ if (err) {
kvfree(new_lt);
- return -ENOMEM;
- }
-
- memcpy(new_mt, f->mt, min(old_rules, rules) * sizeof(*new_mt));
- if (rules > old_rules) {
- memset(new_mt + old_rules, 0,
- (rules - old_rules) * sizeof(*new_mt));
+ return err;
}
if (new_lt) {
f->bsize = new_bucket_size;
- NFT_PIPAPO_LT_ASSIGN(f, new_lt);
+ f->lt = new_lt;
kvfree(old_lt);
}
- f->mt = new_mt;
- kvfree(old_mt);
-
return 0;
}
@@ -843,8 +905,8 @@ static void pipapo_lt_8b_to_4b(int old_groups, int bsize,
*/
static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
{
+ unsigned int groups, bb;
unsigned long *new_lt;
- int groups, bb;
size_t lt_size;
lt_size = f->groups * NFT_PIPAPO_BUCKETS(f->bb) * f->bsize *
@@ -874,7 +936,7 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
return;
}
- new_lt = kvzalloc(lt_size + NFT_PIPAPO_ALIGN_HEADROOM, GFP_KERNEL);
+ new_lt = kvzalloc(lt_size + NFT_PIPAPO_ALIGN_HEADROOM, GFP_KERNEL_ACCOUNT);
if (!new_lt)
return;
@@ -894,7 +956,7 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
f->groups = groups;
f->bb = bb;
kvfree(f->lt);
- NFT_PIPAPO_LT_ASSIGN(f, new_lt);
+ f->lt = new_lt;
}
/**
@@ -911,7 +973,7 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
static int pipapo_insert(struct nft_pipapo_field *f, const uint8_t *k,
int mask_bits)
{
- int rule = f->rules, group, ret, bit_offset = 0;
+ unsigned int rule = f->rules, group, ret, bit_offset = 0;
ret = pipapo_resize(f, f->rules, f->rules + 1);
if (ret)
@@ -1150,7 +1212,7 @@ static int pipapo_realloc_scratch(struct nft_pipapo_match *clone,
scratch = kzalloc_node(struct_size(scratch, map,
bsize_max * 2) +
NFT_PIPAPO_ALIGN_HEADROOM,
- GFP_KERNEL, cpu_to_node(i));
+ GFP_KERNEL_ACCOUNT, cpu_to_node(i));
if (!scratch) {
/* On failure, there's no need to undo previous
* allocations: this means that some scratch maps have
@@ -1186,6 +1248,40 @@ static int pipapo_realloc_scratch(struct nft_pipapo_match *clone,
return 0;
}
+static bool nft_pipapo_transaction_mutex_held(const struct nft_set *set)
+{
+#ifdef CONFIG_PROVE_LOCKING
+ const struct net *net = read_pnet(&set->net);
+
+ return lockdep_is_held(&nft_pernet(net)->commit_mutex);
+#else
+ return true;
+#endif
+}
+
+static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old);
+
+/**
+ * pipapo_maybe_clone() - Build clone for pending data changes, if not existing
+ * @set: nftables API set representation
+ *
+ * Return: newly created or existing clone, if any. NULL on allocation failure
+ */
+static struct nft_pipapo_match *pipapo_maybe_clone(const struct nft_set *set)
+{
+ struct nft_pipapo *priv = nft_set_priv(set);
+ struct nft_pipapo_match *m;
+
+ if (priv->clone)
+ return priv->clone;
+
+ m = rcu_dereference_protected(priv->match,
+ nft_pipapo_transaction_mutex_held(set));
+ priv->clone = pipapo_clone(m);
+
+ return priv->clone;
+}
+
/**
* nft_pipapo_insert() - Validate and insert ranged elements
* @net: Network namespace
@@ -1202,8 +1298,7 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS];
const u8 *start = (const u8 *)elem->key.val.data, *end;
- struct nft_pipapo *priv = nft_set_priv(set);
- struct nft_pipapo_match *m = priv->clone;
+ struct nft_pipapo_match *m = pipapo_maybe_clone(set);
u8 genmask = nft_genmask_next(net);
struct nft_pipapo_elem *e, *dup;
u64 tstamp = nft_net_tstamp(net);
@@ -1211,12 +1306,15 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
const u8 *start_p, *end_p;
int i, bsize_max, err = 0;
+ if (!m)
+ return -ENOMEM;
+
if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY_END))
end = (const u8 *)nft_set_ext_key_end(ext)->data;
else
end = start;
- dup = pipapo_get(net, set, start, genmask, tstamp);
+ dup = pipapo_get(net, set, m, start, genmask, tstamp, GFP_KERNEL);
if (!IS_ERR(dup)) {
/* Check if we already have the same exact entry */
const struct nft_data *dup_key, *dup_end;
@@ -1238,7 +1336,8 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
if (PTR_ERR(dup) == -ENOENT) {
/* Look for partially overlapping entries */
- dup = pipapo_get(net, set, end, nft_genmask_next(net), tstamp);
+ dup = pipapo_get(net, set, m, end, nft_genmask_next(net), tstamp,
+ GFP_KERNEL);
}
if (PTR_ERR(dup) != -ENOENT) {
@@ -1251,8 +1350,14 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
/* Validate */
start_p = start;
end_p = end;
+
+ /* some helpers return -1, or 0 >= for valid rule pos,
+ * so we cannot support more than INT_MAX rules at this time.
+ */
+ BUILD_BUG_ON(NFT_PIPAPO_RULE0_MAX > INT_MAX);
+
nft_pipapo_for_each_field(f, i, m) {
- if (f->rules >= (unsigned long)NFT_PIPAPO_RULE0_MAX)
+ if (f->rules >= NFT_PIPAPO_RULE0_MAX)
return -ENOSPC;
if (memcmp(start_p, end_p,
@@ -1264,8 +1369,6 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
}
/* Insert */
- priv->dirty = true;
-
bsize_max = m->bsize_max;
nft_pipapo_for_each_field(f, i, m) {
@@ -1316,7 +1419,7 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
* pipapo_clone() - Clone matching data to create new working copy
* @old: Existing matching data
*
- * Return: copy of matching data passed as 'old', error pointer on failure
+ * Return: copy of matching data passed as 'old' or NULL.
*/
static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
{
@@ -1324,9 +1427,9 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
struct nft_pipapo_match *new;
int i;
- new = kmalloc(struct_size(new, f, old->field_count), GFP_KERNEL);
+ new = kmalloc(struct_size(new, f, old->field_count), GFP_KERNEL_ACCOUNT);
if (!new)
- return ERR_PTR(-ENOMEM);
+ return NULL;
new->field_count = old->field_count;
new->bsize_max = old->bsize_max;
@@ -1354,22 +1457,30 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
new_lt = kvzalloc(src->groups * NFT_PIPAPO_BUCKETS(src->bb) *
src->bsize * sizeof(*dst->lt) +
NFT_PIPAPO_ALIGN_HEADROOM,
- GFP_KERNEL);
+ GFP_KERNEL_ACCOUNT);
if (!new_lt)
goto out_lt;
- NFT_PIPAPO_LT_ASSIGN(dst, new_lt);
+ dst->lt = new_lt;
memcpy(NFT_PIPAPO_LT_ALIGN(new_lt),
NFT_PIPAPO_LT_ALIGN(src->lt),
src->bsize * sizeof(*dst->lt) *
src->groups * NFT_PIPAPO_BUCKETS(src->bb));
- dst->mt = kvmalloc(src->rules * sizeof(*src->mt), GFP_KERNEL);
- if (!dst->mt)
- goto out_mt;
+ if (src->rules > 0) {
+ dst->mt = kvmalloc_array(src->rules_alloc,
+ sizeof(*src->mt),
+ GFP_KERNEL_ACCOUNT);
+ if (!dst->mt)
+ goto out_mt;
+
+ memcpy(dst->mt, src->mt, src->rules * sizeof(*src->mt));
+ } else {
+ dst->mt = NULL;
+ dst->rules_alloc = 0;
+ }
- memcpy(dst->mt, src->mt, src->rules * sizeof(*src->mt));
src++;
dst++;
}
@@ -1391,7 +1502,7 @@ out_scratch:
free_percpu(new->scratch);
kfree(new);
- return ERR_PTR(-ENOMEM);
+ return NULL;
}
/**
@@ -1423,10 +1534,10 @@ out_scratch:
*
* Return: Number of rules that originated from the same entry as @first.
*/
-static int pipapo_rules_same_key(struct nft_pipapo_field *f, int first)
+static unsigned int pipapo_rules_same_key(struct nft_pipapo_field *f, unsigned int first)
{
struct nft_pipapo_elem *e = NULL; /* Keep gcc happy */
- int r;
+ unsigned int r;
for (r = first; r < f->rules; r++) {
if (r != first && e != f->mt[r].e)
@@ -1479,8 +1590,9 @@ static int pipapo_rules_same_key(struct nft_pipapo_field *f, int first)
* 0 1 2
* element pointers: 0x42 0x42 0x44
*/
-static void pipapo_unmap(union nft_pipapo_map_bucket *mt, int rules,
- int start, int n, int to_offset, bool is_last)
+static void pipapo_unmap(union nft_pipapo_map_bucket *mt, unsigned int rules,
+ unsigned int start, unsigned int n,
+ unsigned int to_offset, bool is_last)
{
int i;
@@ -1586,8 +1698,8 @@ static void pipapo_gc(struct nft_set *set, struct nft_pipapo_match *m)
{
struct nft_pipapo *priv = nft_set_priv(set);
struct net *net = read_pnet(&set->net);
+ unsigned int rules_f0, first_rule = 0;
u64 tstamp = nft_net_tstamp(net);
- int rules_f0, first_rule = 0;
struct nft_pipapo_elem *e;
struct nft_trans_gc *gc;
@@ -1597,8 +1709,8 @@ static void pipapo_gc(struct nft_set *set, struct nft_pipapo_match *m)
while ((rules_f0 = pipapo_rules_same_key(m->f, first_rule))) {
union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS];
- struct nft_pipapo_field *f;
- int i, start, rules_fx;
+ const struct nft_pipapo_field *f;
+ unsigned int i, start, rules_fx;
start = first_rule;
rules_fx = rules_f0;
@@ -1622,8 +1734,6 @@ static void pipapo_gc(struct nft_set *set, struct nft_pipapo_match *m)
* NFT_SET_ELEM_DEAD_BIT.
*/
if (__nft_set_elem_expired(&e->ext, tstamp)) {
- priv->dirty = true;
-
gc = nft_trans_gc_queue_sync(gc, GFP_KERNEL);
if (!gc)
return;
@@ -1701,57 +1811,30 @@ static void pipapo_reclaim_match(struct rcu_head *rcu)
static void nft_pipapo_commit(struct nft_set *set)
{
struct nft_pipapo *priv = nft_set_priv(set);
- struct nft_pipapo_match *new_clone, *old;
+ struct nft_pipapo_match *old;
- if (time_after_eq(jiffies, priv->last_gc + nft_set_gc_interval(set)))
- pipapo_gc(set, priv->clone);
-
- if (!priv->dirty)
+ if (!priv->clone)
return;
- new_clone = pipapo_clone(priv->clone);
- if (IS_ERR(new_clone))
- return;
+ if (time_after_eq(jiffies, priv->last_gc + nft_set_gc_interval(set)))
+ pipapo_gc(set, priv->clone);
- priv->dirty = false;
+ old = rcu_replace_pointer(priv->match, priv->clone,
+ nft_pipapo_transaction_mutex_held(set));
+ priv->clone = NULL;
- old = rcu_access_pointer(priv->match);
- rcu_assign_pointer(priv->match, priv->clone);
if (old)
call_rcu(&old->rcu, pipapo_reclaim_match);
-
- priv->clone = new_clone;
-}
-
-static bool nft_pipapo_transaction_mutex_held(const struct nft_set *set)
-{
-#ifdef CONFIG_PROVE_LOCKING
- const struct net *net = read_pnet(&set->net);
-
- return lockdep_is_held(&nft_pernet(net)->commit_mutex);
-#else
- return true;
-#endif
}
static void nft_pipapo_abort(const struct nft_set *set)
{
struct nft_pipapo *priv = nft_set_priv(set);
- struct nft_pipapo_match *new_clone, *m;
- if (!priv->dirty)
+ if (!priv->clone)
return;
-
- m = rcu_dereference_protected(priv->match, nft_pipapo_transaction_mutex_held(set));
-
- new_clone = pipapo_clone(m);
- if (IS_ERR(new_clone))
- return;
-
- priv->dirty = false;
-
pipapo_free_match(priv->clone);
- priv->clone = new_clone;
+ priv->clone = NULL;
}
/**
@@ -1771,55 +1854,42 @@ static void nft_pipapo_activate(const struct net *net,
{
struct nft_pipapo_elem *e = nft_elem_priv_cast(elem_priv);
- nft_set_elem_change_active(net, set, &e->ext);
+ nft_clear(net, &e->ext);
}
/**
- * pipapo_deactivate() - Check that element is in set, mark as inactive
+ * nft_pipapo_deactivate() - Search for element and make it inactive
* @net: Network namespace
* @set: nftables API set representation
- * @data: Input key data
- * @ext: nftables API extension pointer, used to check for end element
- *
- * This is a convenience function that can be called from both
- * nft_pipapo_deactivate() and nft_pipapo_flush(), as they are in fact the same
- * operation.
+ * @elem: nftables API element representation containing key data
*
* Return: deactivated element if found, NULL otherwise.
*/
-static void *pipapo_deactivate(const struct net *net, const struct nft_set *set,
- const u8 *data, const struct nft_set_ext *ext)
+static struct nft_elem_priv *
+nft_pipapo_deactivate(const struct net *net, const struct nft_set *set,
+ const struct nft_set_elem *elem)
{
+ struct nft_pipapo_match *m = pipapo_maybe_clone(set);
struct nft_pipapo_elem *e;
- e = pipapo_get(net, set, data, nft_genmask_next(net), nft_net_tstamp(net));
+ /* removal must occur on priv->clone, if we are low on memory
+ * we have no choice and must fail the removal request.
+ */
+ if (!m)
+ return NULL;
+
+ e = pipapo_get(net, set, m, (const u8 *)elem->key.val.data,
+ nft_genmask_next(net), nft_net_tstamp(net), GFP_KERNEL);
if (IS_ERR(e))
return NULL;
nft_set_elem_change_active(net, set, &e->ext);
- return e;
-}
-
-/**
- * nft_pipapo_deactivate() - Call pipapo_deactivate() to make element inactive
- * @net: Network namespace
- * @set: nftables API set representation
- * @elem: nftables API element representation containing key data
- *
- * Return: deactivated element if found, NULL otherwise.
- */
-static struct nft_elem_priv *
-nft_pipapo_deactivate(const struct net *net, const struct nft_set *set,
- const struct nft_set_elem *elem)
-{
- const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
-
- return pipapo_deactivate(net, set, (const u8 *)elem->key.val.data, ext);
+ return &e->priv;
}
/**
- * nft_pipapo_flush() - Call pipapo_deactivate() to make element inactive
+ * nft_pipapo_flush() - make element inactive
* @net: Network namespace
* @set: nftables API set representation
* @elem_priv: nftables API element representation containing key data
@@ -1976,7 +2046,7 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
{
struct nft_pipapo *priv = nft_set_priv(set);
struct nft_pipapo_match *m = priv->clone;
- int rules_f0, first_rule = 0;
+ unsigned int rules_f0, first_rule = 0;
struct nft_pipapo_elem *e;
const u8 *data;
@@ -2000,6 +2070,8 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
rules_fx = rules_f0;
nft_pipapo_for_each_field(f, i, m) {
+ bool last = i == m->field_count - 1;
+
if (!pipapo_match_field(f, start, rules_fx,
match_start, match_end))
break;
@@ -2012,45 +2084,36 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
match_start += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
match_end += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
- }
- if (i == m->field_count) {
- priv->dirty = true;
- pipapo_drop(m, rulemap);
- return;
+ if (last && f->mt[rulemap[i].to].e == e) {
+ pipapo_drop(m, rulemap);
+ return;
+ }
}
first_rule += rules_f0;
}
+
+ WARN_ON_ONCE(1); /* elem_priv not found */
}
/**
- * nft_pipapo_walk() - Walk over elements
+ * nft_pipapo_do_walk() - Walk over elements in m
* @ctx: nftables API context
* @set: nftables API set representation
+ * @m: matching data pointing to key mapping array
* @iter: Iterator
*
* As elements are referenced in the mapping array for the last field, directly
* scan that array: there's no need to follow rule mappings from the first
- * field.
+ * field. @m is protected either by RCU read lock or by transaction mutex.
*/
-static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set,
- struct nft_set_iter *iter)
+static void nft_pipapo_do_walk(const struct nft_ctx *ctx, struct nft_set *set,
+ const struct nft_pipapo_match *m,
+ struct nft_set_iter *iter)
{
- struct nft_pipapo *priv = nft_set_priv(set);
- struct net *net = read_pnet(&set->net);
- struct nft_pipapo_match *m;
- struct nft_pipapo_field *f;
- int i, r;
-
- rcu_read_lock();
- if (iter->genmask == nft_genmask_cur(net))
- m = rcu_dereference(priv->match);
- else
- m = priv->clone;
-
- if (unlikely(!m))
- goto out;
+ const struct nft_pipapo_field *f;
+ unsigned int i, r;
for (i = 0, f = m->f; i < m->field_count - 1; i++, f++)
;
@@ -2066,19 +2129,51 @@ static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set,
e = f->mt[r].e;
- if (!nft_set_elem_active(&e->ext, iter->genmask))
- goto cont;
-
iter->err = iter->fn(ctx, set, iter, &e->priv);
if (iter->err < 0)
- goto out;
+ return;
cont:
iter->count++;
}
+}
-out:
- rcu_read_unlock();
+/**
+ * nft_pipapo_walk() - Walk over elements
+ * @ctx: nftables API context
+ * @set: nftables API set representation
+ * @iter: Iterator
+ *
+ * Test if destructive action is needed or not, clone active backend if needed
+ * and call the real function to work on the data.
+ */
+static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_set_iter *iter)
+{
+ struct nft_pipapo *priv = nft_set_priv(set);
+ const struct nft_pipapo_match *m;
+
+ switch (iter->type) {
+ case NFT_ITER_UPDATE:
+ m = pipapo_maybe_clone(set);
+ if (!m) {
+ iter->err = -ENOMEM;
+ return;
+ }
+
+ nft_pipapo_do_walk(ctx, set, m, iter);
+ break;
+ case NFT_ITER_READ:
+ rcu_read_lock();
+ m = rcu_dereference(priv->match);
+ nft_pipapo_do_walk(ctx, set, m, iter);
+ rcu_read_unlock();
+ break;
+ default:
+ iter->err = -EINVAL;
+ WARN_ON_ONCE(1);
+ break;
+ }
}
/**
@@ -2145,6 +2240,9 @@ static int nft_pipapo_init(const struct nft_set *set,
field_count = desc->field_count ? : 1;
+ BUILD_BUG_ON(NFT_PIPAPO_MAX_FIELDS > 255);
+ BUILD_BUG_ON(NFT_PIPAPO_MAX_FIELDS != NFT_REG32_COUNT);
+
if (field_count > NFT_PIPAPO_MAX_FIELDS)
return -EINVAL;
@@ -2166,7 +2264,11 @@ static int nft_pipapo_init(const struct nft_set *set,
rcu_head_init(&m->rcu);
nft_pipapo_for_each_field(f, i, m) {
- int len = desc->field_len[i] ? : set->klen;
+ unsigned int len = desc->field_len[i] ? : set->klen;
+
+ /* f->groups is u8 */
+ BUILD_BUG_ON((NFT_PIPAPO_MAX_BYTES *
+ BITS_PER_BYTE / NFT_PIPAPO_GROUP_BITS_LARGE_SET) >= 256);
f->bb = NFT_PIPAPO_GROUP_BITS_INIT;
f->groups = len * NFT_PIPAPO_GROUPS_PER_BYTE(f);
@@ -2175,25 +2277,15 @@ static int nft_pipapo_init(const struct nft_set *set,
f->bsize = 0;
f->rules = 0;
- NFT_PIPAPO_LT_ASSIGN(f, NULL);
+ f->rules_alloc = 0;
+ f->lt = NULL;
f->mt = NULL;
}
- /* Create an initial clone of matching data for next insertion */
- priv->clone = pipapo_clone(m);
- if (IS_ERR(priv->clone)) {
- err = PTR_ERR(priv->clone);
- goto out_free;
- }
-
- priv->dirty = false;
-
rcu_assign_pointer(priv->match, m);
return 0;
-out_free:
- free_percpu(m->scratch);
out_scratch:
kfree(m);
@@ -2211,7 +2303,7 @@ static void nft_set_pipapo_match_destroy(const struct nft_ctx *ctx,
struct nft_pipapo_match *m)
{
struct nft_pipapo_field *f;
- int i, r;
+ unsigned int i, r;
for (i = 0, f = m->f; i < m->field_count - 1; i++, f++)
;
@@ -2238,36 +2330,18 @@ static void nft_pipapo_destroy(const struct nft_ctx *ctx,
{
struct nft_pipapo *priv = nft_set_priv(set);
struct nft_pipapo_match *m;
- int cpu;
m = rcu_dereference_protected(priv->match, true);
- if (m) {
- rcu_barrier();
-
- nft_set_pipapo_match_destroy(ctx, set, m);
-
- for_each_possible_cpu(cpu)
- pipapo_free_scratch(m, cpu);
- free_percpu(m->scratch);
- pipapo_free_fields(m);
- kfree(m);
- priv->match = NULL;
- }
if (priv->clone) {
- m = priv->clone;
-
- if (priv->dirty)
- nft_set_pipapo_match_destroy(ctx, set, m);
-
- for_each_possible_cpu(cpu)
- pipapo_free_scratch(priv->clone, cpu);
- free_percpu(priv->clone->scratch);
-
- pipapo_free_fields(priv->clone);
- kfree(priv->clone);
+ nft_set_pipapo_match_destroy(ctx, set, priv->clone);
+ pipapo_free_match(priv->clone);
priv->clone = NULL;
+ } else {
+ nft_set_pipapo_match_destroy(ctx, set, m);
}
+
+ pipapo_free_match(m);
}
/**
diff --git a/net/netfilter/nft_set_pipapo.h b/net/netfilter/nft_set_pipapo.h
index 3842c7341a9f..4a2ff85ce1c4 100644
--- a/net/netfilter/nft_set_pipapo.h
+++ b/net/netfilter/nft_set_pipapo.h
@@ -70,15 +70,9 @@
#define NFT_PIPAPO_ALIGN_HEADROOM \
(NFT_PIPAPO_ALIGN - ARCH_KMALLOC_MINALIGN)
#define NFT_PIPAPO_LT_ALIGN(lt) (PTR_ALIGN((lt), NFT_PIPAPO_ALIGN))
-#define NFT_PIPAPO_LT_ASSIGN(field, x) \
- do { \
- (field)->lt_aligned = NFT_PIPAPO_LT_ALIGN(x); \
- (field)->lt = (x); \
- } while (0)
#else
#define NFT_PIPAPO_ALIGN_HEADROOM 0
#define NFT_PIPAPO_LT_ALIGN(lt) (lt)
-#define NFT_PIPAPO_LT_ASSIGN(field, x) ((field)->lt = (x))
#endif /* NFT_PIPAPO_ALIGN */
#define nft_pipapo_for_each_field(field, index, match) \
@@ -110,22 +104,20 @@ union nft_pipapo_map_bucket {
/**
* struct nft_pipapo_field - Lookup, mapping tables and related data for a field
- * @groups: Amount of bit groups
* @rules: Number of inserted rules
* @bsize: Size of each bucket in lookup table, in longs
+ * @rules_alloc: Number of allocated rules, always >= rules
+ * @groups: Amount of bit groups
* @bb: Number of bits grouped together in lookup table buckets
* @lt: Lookup table: 'groups' rows of buckets
- * @lt_aligned: Version of @lt aligned to NFT_PIPAPO_ALIGN bytes
* @mt: Mapping table: one bucket per rule
*/
struct nft_pipapo_field {
- int groups;
- unsigned long rules;
- size_t bsize;
- int bb;
-#ifdef NFT_PIPAPO_ALIGN
- unsigned long *lt_aligned;
-#endif
+ unsigned int rules;
+ unsigned int bsize;
+ unsigned int rules_alloc;
+ u8 groups;
+ u8 bb;
unsigned long *lt;
union nft_pipapo_map_bucket *mt;
};
@@ -145,15 +137,15 @@ struct nft_pipapo_scratch {
/**
* struct nft_pipapo_match - Data used for lookup and matching
* @field_count: Amount of fields in set
- * @scratch: Preallocated per-CPU maps for partial matching results
* @bsize_max: Maximum lookup table bucket size of all fields, in longs
+ * @scratch: Preallocated per-CPU maps for partial matching results
* @rcu: Matching data is swapped on commits
* @f: Fields, with lookup and mapping tables
*/
struct nft_pipapo_match {
- int field_count;
+ u8 field_count;
+ unsigned int bsize_max;
struct nft_pipapo_scratch * __percpu *scratch;
- size_t bsize_max;
struct rcu_head rcu;
struct nft_pipapo_field f[] __counted_by(field_count);
};
@@ -163,14 +155,12 @@ struct nft_pipapo_match {
* @match: Currently in-use matching data
* @clone: Copy where pending insertions and deletions are kept
* @width: Total bytes to be matched for one packet, including padding
- * @dirty: Working copy has pending insertions or deletions
* @last_gc: Timestamp of last garbage collection run, jiffies
*/
struct nft_pipapo {
struct nft_pipapo_match __rcu *match;
struct nft_pipapo_match *clone;
int width;
- bool dirty;
unsigned long last_gc;
};
@@ -186,8 +176,9 @@ struct nft_pipapo_elem {
struct nft_set_ext ext;
};
-int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
- union nft_pipapo_map_bucket *mt, bool match_only);
+int pipapo_refill(unsigned long *map, unsigned int len, unsigned int rules,
+ unsigned long *dst,
+ const union nft_pipapo_map_bucket *mt, bool match_only);
/**
* pipapo_and_field_buckets_4bit() - Intersect 4-bit buckets
@@ -195,7 +186,7 @@ int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
* @dst: Area to store result
* @data: Input data selecting table buckets
*/
-static inline void pipapo_and_field_buckets_4bit(struct nft_pipapo_field *f,
+static inline void pipapo_and_field_buckets_4bit(const struct nft_pipapo_field *f,
unsigned long *dst,
const u8 *data)
{
@@ -223,7 +214,7 @@ static inline void pipapo_and_field_buckets_4bit(struct nft_pipapo_field *f,
* @dst: Area to store result
* @data: Input data selecting table buckets
*/
-static inline void pipapo_and_field_buckets_8bit(struct nft_pipapo_field *f,
+static inline void pipapo_and_field_buckets_8bit(const struct nft_pipapo_field *f,
unsigned long *dst,
const u8 *data)
{
@@ -287,4 +278,25 @@ static u64 pipapo_estimate_size(const struct nft_set_desc *desc)
return size;
}
+/**
+ * pipapo_resmap_init() - Initialise result map before first use
+ * @m: Matching data, including mapping table
+ * @res_map: Result map
+ *
+ * Initialize all bits covered by the first field to one, so that after
+ * the first step, only the matching bits of the first bit group remain.
+ *
+ * If other fields have a large bitmap, set remainder of res_map to 0.
+ */
+static inline void pipapo_resmap_init(const struct nft_pipapo_match *m, unsigned long *res_map)
+{
+ const struct nft_pipapo_field *f = m->f;
+ int i;
+
+ for (i = 0; i < f->bsize; i++)
+ res_map[i] = ULONG_MAX;
+
+ for (i = f->bsize; i < m->bsize_max; i++)
+ res_map[i] = 0ul;
+}
#endif /* _NFT_SET_PIPAPO_H */
diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
index a3a8ddca9918..b8d3c3213efe 100644
--- a/net/netfilter/nft_set_pipapo_avx2.c
+++ b/net/netfilter/nft_set_pipapo_avx2.c
@@ -212,8 +212,9 @@ static int nft_pipapo_avx2_refill(int offset, unsigned long *map,
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_4b_2(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
u8 pg[2] = { pkt[0] >> 4, pkt[0] & 0xf };
@@ -274,8 +275,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_4b_4(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
u8 pg[4] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf };
@@ -350,8 +352,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_4b_8(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
u8 pg[8] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf,
pkt[2] >> 4, pkt[2] & 0xf, pkt[3] >> 4, pkt[3] & 0xf,
@@ -445,8 +448,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_4b_12(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
u8 pg[12] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf,
pkt[2] >> 4, pkt[2] & 0xf, pkt[3] >> 4, pkt[3] & 0xf,
@@ -534,8 +538,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_4b_32(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
u8 pg[32] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf,
pkt[2] >> 4, pkt[2] & 0xf, pkt[3] >> 4, pkt[3] & 0xf,
@@ -669,8 +674,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_8b_1(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
@@ -726,8 +732,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_8b_2(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
@@ -790,8 +797,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_8b_4(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
@@ -865,8 +873,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_8b_6(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
@@ -950,8 +959,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_8b_16(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
@@ -1026,6 +1036,7 @@ nothing:
/**
* nft_pipapo_avx2_lookup_slow() - Fallback function for uncommon field sizes
+ * @mdata: Matching data, including mapping table
* @map: Previous match result, used as initial bitmap
* @fill: Destination bitmap to be filled with current match result
* @f: Field, containing lookup and mapping tables
@@ -1041,15 +1052,17 @@ nothing:
* Return: -1 on no match, rule index of match if @last, otherwise first long
* word index to be checked next (i.e. first filled word).
*/
-static int nft_pipapo_avx2_lookup_slow(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+static int nft_pipapo_avx2_lookup_slow(const struct nft_pipapo_match *mdata,
+ unsigned long *map, unsigned long *fill,
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
unsigned long bsize = f->bsize;
int i, ret = -1, b;
if (first)
- memset(map, 0xff, bsize * sizeof(*map));
+ pipapo_resmap_init(mdata, map);
for (i = offset; i < bsize; i++) {
if (f->bb == 8)
@@ -1119,15 +1132,21 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
struct nft_pipapo *priv = nft_set_priv(set);
struct nft_pipapo_scratch *scratch;
u8 genmask = nft_genmask_cur(net);
+ const struct nft_pipapo_match *m;
+ const struct nft_pipapo_field *f;
const u8 *rp = (const u8 *)key;
- struct nft_pipapo_match *m;
- struct nft_pipapo_field *f;
unsigned long *res, *fill;
bool map_index;
int i, ret = 0;
- if (unlikely(!irq_fpu_usable()))
- return nft_pipapo_lookup(net, set, key, ext);
+ local_bh_disable();
+
+ if (unlikely(!irq_fpu_usable())) {
+ bool fallback_res = nft_pipapo_lookup(net, set, key, ext);
+
+ local_bh_enable();
+ return fallback_res;
+ }
m = rcu_dereference(priv->match);
@@ -1142,6 +1161,7 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
scratch = *raw_cpu_ptr(m->scratch);
if (unlikely(!scratch)) {
kernel_fpu_end();
+ local_bh_enable();
return false;
}
@@ -1175,7 +1195,7 @@ next_match:
} else if (f->groups == 16) {
NFT_SET_PIPAPO_AVX2_LOOKUP(8, 16);
} else {
- ret = nft_pipapo_avx2_lookup_slow(res, fill, f,
+ ret = nft_pipapo_avx2_lookup_slow(m, res, fill, f,
ret, rp,
first, last);
}
@@ -1191,7 +1211,7 @@ next_match:
} else if (f->groups == 32) {
NFT_SET_PIPAPO_AVX2_LOOKUP(4, 32);
} else {
- ret = nft_pipapo_avx2_lookup_slow(res, fill, f,
+ ret = nft_pipapo_avx2_lookup_slow(m, res, fill, f,
ret, rp,
first, last);
}
@@ -1222,6 +1242,7 @@ out:
if (i % 2)
scratch->map_index = !map_index;
kernel_fpu_end();
+ local_bh_enable();
return ret >= 0;
}
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 9944fe479e53..2e8ef16ff191 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -532,7 +532,7 @@ static void nft_rbtree_activate(const struct net *net,
{
struct nft_rbtree_elem *rbe = nft_elem_priv_cast(elem_priv);
- nft_set_elem_change_active(net, set, &rbe->ext);
+ nft_clear(net, &rbe->ext);
}
static void nft_rbtree_flush(const struct net *net,
@@ -600,8 +600,6 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
if (iter->count < iter->skip)
goto cont;
- if (!nft_set_elem_active(&rbe->ext, iter->genmask))
- goto cont;
iter->err = iter->fn(ctx, set, iter, &rbe->priv);
if (iter->err < 0) {
@@ -752,6 +750,46 @@ static void nft_rbtree_gc_init(const struct nft_set *set)
priv->last_gc = jiffies;
}
+/* rbtree stores ranges as singleton elements, each range is composed of two
+ * elements ...
+ */
+static u32 nft_rbtree_ksize(u32 size)
+{
+ return size * 2;
+}
+
+/* ... hide this detail to userspace. */
+static u32 nft_rbtree_usize(u32 size)
+{
+ if (!size)
+ return 0;
+
+ return size / 2;
+}
+
+static u32 nft_rbtree_adjust_maxsize(const struct nft_set *set)
+{
+ struct nft_rbtree *priv = nft_set_priv(set);
+ struct nft_rbtree_elem *rbe;
+ struct rb_node *node;
+ const void *key;
+
+ node = rb_last(&priv->root);
+ if (!node)
+ return 0;
+
+ rbe = rb_entry(node, struct nft_rbtree_elem, node);
+ if (!nft_rbtree_interval_end(rbe))
+ return 0;
+
+ key = nft_set_ext_key(&rbe->ext);
+ if (memchr(key, 1, set->klen))
+ return 0;
+
+ /* this is the all-zero no-match element. */
+ return 1;
+}
+
const struct nft_set_type nft_set_rbtree_type = {
.features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
.ops = {
@@ -770,5 +808,8 @@ const struct nft_set_type nft_set_rbtree_type = {
.lookup = nft_rbtree_lookup,
.walk = nft_rbtree_walk,
.get = nft_rbtree_get,
+ .ksize = nft_rbtree_ksize,
+ .usize = nft_rbtree_usize,
+ .adjust_maxsize = nft_rbtree_adjust_maxsize,
},
};
diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c
index f30163e2ca62..35d0409b0095 100644
--- a/net/netfilter/nft_socket.c
+++ b/net/netfilter/nft_socket.c
@@ -9,7 +9,8 @@
struct nft_socket {
enum nft_socket_keys key:8;
- u8 level;
+ u8 level; /* cgroupv2 level to extract */
+ u8 level_user; /* cgroupv2 level provided by userspace */
u8 len;
union {
u8 dreg;
@@ -53,6 +54,28 @@ nft_sock_get_eval_cgroupv2(u32 *dest, struct sock *sk, const struct nft_pktinfo
memcpy(dest, &cgid, sizeof(u64));
return true;
}
+
+/* process context only, uses current->nsproxy. */
+static noinline int nft_socket_cgroup_subtree_level(void)
+{
+ struct cgroup *cgrp = cgroup_get_from_path("/");
+ int level;
+
+ if (IS_ERR(cgrp))
+ return PTR_ERR(cgrp);
+
+ level = cgrp->level;
+
+ cgroup_put(cgrp);
+
+ if (level > 255)
+ return -ERANGE;
+
+ if (WARN_ON_ONCE(level < 0))
+ return -EINVAL;
+
+ return level;
+}
#endif
static struct sock *nft_socket_do_lookup(const struct nft_pktinfo *pkt)
@@ -110,13 +133,13 @@ static void nft_socket_eval(const struct nft_expr *expr,
*dest = READ_ONCE(sk->sk_mark);
} else {
regs->verdict.code = NFT_BREAK;
- return;
+ goto out_put_sk;
}
break;
case NFT_SOCKET_WILDCARD:
if (!sk_fullsock(sk)) {
regs->verdict.code = NFT_BREAK;
- return;
+ goto out_put_sk;
}
nft_socket_wildcard(pkt, regs, sk, dest);
break;
@@ -124,7 +147,7 @@ static void nft_socket_eval(const struct nft_expr *expr,
case NFT_SOCKET_CGROUPV2:
if (!nft_sock_get_eval_cgroupv2(dest, sk, pkt, priv->level)) {
regs->verdict.code = NFT_BREAK;
- return;
+ goto out_put_sk;
}
break;
#endif
@@ -133,6 +156,7 @@ static void nft_socket_eval(const struct nft_expr *expr,
regs->verdict.code = NFT_BREAK;
}
+out_put_sk:
if (sk != skb->sk)
sock_gen_put(sk);
}
@@ -173,9 +197,10 @@ static int nft_socket_init(const struct nft_ctx *ctx,
case NFT_SOCKET_MARK:
len = sizeof(u32);
break;
-#ifdef CONFIG_CGROUPS
+#ifdef CONFIG_SOCK_CGROUP_DATA
case NFT_SOCKET_CGROUPV2: {
unsigned int level;
+ int err;
if (!tb[NFTA_SOCKET_LEVEL])
return -EINVAL;
@@ -184,6 +209,17 @@ static int nft_socket_init(const struct nft_ctx *ctx,
if (level > 255)
return -EOPNOTSUPP;
+ err = nft_socket_cgroup_subtree_level();
+ if (err < 0)
+ return err;
+
+ priv->level_user = level;
+
+ level += err;
+ /* Implies a giant cgroup tree */
+ if (WARN_ON_ONCE(level > 255))
+ return -EOPNOTSUPP;
+
priv->level = level;
len = sizeof(u64);
break;
@@ -208,7 +244,7 @@ static int nft_socket_dump(struct sk_buff *skb,
if (nft_dump_register(skb, NFTA_SOCKET_DREG, priv->dreg))
return -1;
if (priv->key == NFT_SOCKET_CGROUPV2 &&
- nla_put_be32(skb, NFTA_SOCKET_LEVEL, htonl(priv->level)))
+ nla_put_be32(skb, NFTA_SOCKET_LEVEL, htonl(priv->level_user)))
return -1;
return 0;
}
@@ -239,8 +275,7 @@ static bool nft_socket_reduce(struct nft_regs_track *track,
}
static int nft_socket_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data)
+ const struct nft_expr *expr)
{
if (ctx->family != NFPROTO_IPV4 &&
ctx->family != NFPROTO_IPV6 &&
diff --git a/net/netfilter/nft_synproxy.c b/net/netfilter/nft_synproxy.c
index 1d737f89dfc1..5d3e51825985 100644
--- a/net/netfilter/nft_synproxy.c
+++ b/net/netfilter/nft_synproxy.c
@@ -248,8 +248,7 @@ static void nft_synproxy_eval(const struct nft_expr *expr,
}
static int nft_synproxy_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data)
+ const struct nft_expr *expr)
{
if (ctx->family != NFPROTO_IPV4 &&
ctx->family != NFPROTO_IPV6 &&
diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c
index 71412adb73d4..50481280abd2 100644
--- a/net/netfilter/nft_tproxy.c
+++ b/net/netfilter/nft_tproxy.c
@@ -254,14 +254,14 @@ static int nft_tproxy_init(const struct nft_ctx *ctx,
}
if (tb[NFTA_TPROXY_REG_ADDR]) {
- err = nft_parse_register_load(tb[NFTA_TPROXY_REG_ADDR],
+ err = nft_parse_register_load(ctx, tb[NFTA_TPROXY_REG_ADDR],
&priv->sreg_addr, alen);
if (err < 0)
return err;
}
if (tb[NFTA_TPROXY_REG_PORT]) {
- err = nft_parse_register_load(tb[NFTA_TPROXY_REG_PORT],
+ err = nft_parse_register_load(ctx, tb[NFTA_TPROXY_REG_PORT],
&priv->sreg_port, sizeof(u16));
if (err < 0)
return err;
@@ -313,8 +313,7 @@ static int nft_tproxy_dump(struct sk_buff *skb,
}
static int nft_tproxy_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data)
+ const struct nft_expr *expr)
{
if (ctx->family != NFPROTO_IPV4 &&
ctx->family != NFPROTO_IPV6 &&
diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
index f735d79d8be5..681301b46aa4 100644
--- a/net/netfilter/nft_tunnel.c
+++ b/net/netfilter/nft_tunnel.c
@@ -174,8 +174,8 @@ struct nft_tunnel_opts {
struct erspan_metadata erspan;
u8 data[IP_TUNNEL_OPTS_MAX];
} u;
+ IP_TUNNEL_DECLARE_FLAGS(flags);
u32 len;
- __be16 flags;
};
struct nft_tunnel_obj {
@@ -271,7 +271,8 @@ static int nft_tunnel_obj_vxlan_init(const struct nlattr *attr,
opts->u.vxlan.gbp = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_VXLAN_GBP]));
opts->len = sizeof(struct vxlan_metadata);
- opts->flags = TUNNEL_VXLAN_OPT;
+ ip_tunnel_flags_zero(opts->flags);
+ __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, opts->flags);
return 0;
}
@@ -325,7 +326,8 @@ static int nft_tunnel_obj_erspan_init(const struct nlattr *attr,
opts->u.erspan.version = version;
opts->len = sizeof(struct erspan_metadata);
- opts->flags = TUNNEL_ERSPAN_OPT;
+ ip_tunnel_flags_zero(opts->flags);
+ __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, opts->flags);
return 0;
}
@@ -366,7 +368,8 @@ static int nft_tunnel_obj_geneve_init(const struct nlattr *attr,
opt->length = data_len / 4;
opt->opt_class = nla_get_be16(tb[NFTA_TUNNEL_KEY_GENEVE_CLASS]);
opt->type = nla_get_u8(tb[NFTA_TUNNEL_KEY_GENEVE_TYPE]);
- opts->flags = TUNNEL_GENEVE_OPT;
+ ip_tunnel_flags_zero(opts->flags);
+ __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, opts->flags);
return 0;
}
@@ -385,8 +388,8 @@ static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx,
struct nft_tunnel_opts *opts)
{
struct nlattr *nla;
- __be16 type = 0;
int err, rem;
+ u32 type = 0;
err = nla_validate_nested_deprecated(attr, NFTA_TUNNEL_KEY_OPTS_MAX,
nft_tunnel_opts_policy, NULL);
@@ -401,7 +404,7 @@ static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx,
err = nft_tunnel_obj_vxlan_init(nla, opts);
if (err)
return err;
- type = TUNNEL_VXLAN_OPT;
+ type = IP_TUNNEL_VXLAN_OPT_BIT;
break;
case NFTA_TUNNEL_KEY_OPTS_ERSPAN:
if (type)
@@ -409,15 +412,15 @@ static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx,
err = nft_tunnel_obj_erspan_init(nla, opts);
if (err)
return err;
- type = TUNNEL_ERSPAN_OPT;
+ type = IP_TUNNEL_ERSPAN_OPT_BIT;
break;
case NFTA_TUNNEL_KEY_OPTS_GENEVE:
- if (type && type != TUNNEL_GENEVE_OPT)
+ if (type && type != IP_TUNNEL_GENEVE_OPT_BIT)
return -EINVAL;
err = nft_tunnel_obj_geneve_init(nla, opts);
if (err)
return err;
- type = TUNNEL_GENEVE_OPT;
+ type = IP_TUNNEL_GENEVE_OPT_BIT;
break;
default:
return -EOPNOTSUPP;
@@ -454,7 +457,9 @@ static int nft_tunnel_obj_init(const struct nft_ctx *ctx,
memset(&info, 0, sizeof(info));
info.mode = IP_TUNNEL_INFO_TX;
info.key.tun_id = key32_to_tunnel_id(nla_get_be32(tb[NFTA_TUNNEL_KEY_ID]));
- info.key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
+ __set_bit(IP_TUNNEL_KEY_BIT, info.key.tun_flags);
+ __set_bit(IP_TUNNEL_CSUM_BIT, info.key.tun_flags);
+ __set_bit(IP_TUNNEL_NOCACHE_BIT, info.key.tun_flags);
if (tb[NFTA_TUNNEL_KEY_IP]) {
err = nft_tunnel_obj_ip_init(ctx, tb[NFTA_TUNNEL_KEY_IP], &info);
@@ -483,18 +488,16 @@ static int nft_tunnel_obj_init(const struct nft_ctx *ctx,
return -EOPNOTSUPP;
if (tun_flags & NFT_TUNNEL_F_ZERO_CSUM_TX)
- info.key.tun_flags &= ~TUNNEL_CSUM;
+ __clear_bit(IP_TUNNEL_CSUM_BIT, info.key.tun_flags);
if (tun_flags & NFT_TUNNEL_F_DONT_FRAGMENT)
- info.key.tun_flags |= TUNNEL_DONT_FRAGMENT;
+ __set_bit(IP_TUNNEL_DONT_FRAGMENT_BIT,
+ info.key.tun_flags);
if (tun_flags & NFT_TUNNEL_F_SEQ_NUMBER)
- info.key.tun_flags |= TUNNEL_SEQ;
+ __set_bit(IP_TUNNEL_SEQ_BIT, info.key.tun_flags);
}
if (tb[NFTA_TUNNEL_KEY_TOS])
info.key.tos = nla_get_u8(tb[NFTA_TUNNEL_KEY_TOS]);
- if (tb[NFTA_TUNNEL_KEY_TTL])
- info.key.ttl = nla_get_u8(tb[NFTA_TUNNEL_KEY_TTL]);
- else
- info.key.ttl = U8_MAX;
+ info.key.ttl = nla_get_u8_default(tb[NFTA_TUNNEL_KEY_TTL], U8_MAX);
if (tb[NFTA_TUNNEL_KEY_OPTS]) {
err = nft_tunnel_obj_opts_init(ctx, tb[NFTA_TUNNEL_KEY_OPTS],
@@ -503,13 +506,14 @@ static int nft_tunnel_obj_init(const struct nft_ctx *ctx,
return err;
}
- md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL, GFP_KERNEL);
+ md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL,
+ GFP_KERNEL_ACCOUNT);
if (!md)
return -ENOMEM;
memcpy(&md->u.tun_info, &info, sizeof(info));
#ifdef CONFIG_DST_CACHE
- err = dst_cache_init(&md->u.tun_info.dst_cache, GFP_KERNEL);
+ err = dst_cache_init(&md->u.tun_info.dst_cache, GFP_KERNEL_ACCOUNT);
if (err < 0) {
metadata_dst_free(md);
return err;
@@ -583,7 +587,7 @@ static int nft_tunnel_opts_dump(struct sk_buff *skb,
if (!nest)
return -1;
- if (opts->flags & TUNNEL_VXLAN_OPT) {
+ if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, opts->flags)) {
inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_VXLAN);
if (!inner)
goto failure;
@@ -591,7 +595,7 @@ static int nft_tunnel_opts_dump(struct sk_buff *skb,
htonl(opts->u.vxlan.gbp)))
goto inner_failure;
nla_nest_end(skb, inner);
- } else if (opts->flags & TUNNEL_ERSPAN_OPT) {
+ } else if (test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, opts->flags)) {
inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_ERSPAN);
if (!inner)
goto failure;
@@ -613,7 +617,7 @@ static int nft_tunnel_opts_dump(struct sk_buff *skb,
break;
}
nla_nest_end(skb, inner);
- } else if (opts->flags & TUNNEL_GENEVE_OPT) {
+ } else if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, opts->flags)) {
struct geneve_opt *opt;
int offset = 0;
@@ -658,11 +662,11 @@ static int nft_tunnel_flags_dump(struct sk_buff *skb,
{
u32 flags = 0;
- if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
+ if (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, info->key.tun_flags))
flags |= NFT_TUNNEL_F_DONT_FRAGMENT;
- if (!(info->key.tun_flags & TUNNEL_CSUM))
+ if (!test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags))
flags |= NFT_TUNNEL_F_ZERO_CSUM_TX;
- if (info->key.tun_flags & TUNNEL_SEQ)
+ if (test_bit(IP_TUNNEL_SEQ_BIT, info->key.tun_flags))
flags |= NFT_TUNNEL_F_SEQ_NUMBER;
if (nla_put_be32(skb, NFTA_TUNNEL_KEY_FLAGS, htonl(flags)) < 0)
diff --git a/net/netfilter/nft_xfrm.c b/net/netfilter/nft_xfrm.c
index 1c866757db55..3210cfc966ab 100644
--- a/net/netfilter/nft_xfrm.c
+++ b/net/netfilter/nft_xfrm.c
@@ -112,7 +112,8 @@ static bool xfrm_state_addr_ok(enum nft_xfrm_keys k, u8 family, u8 mode)
return true;
}
- return mode == XFRM_MODE_BEET || mode == XFRM_MODE_TUNNEL;
+ return mode == XFRM_MODE_BEET || mode == XFRM_MODE_TUNNEL ||
+ mode == XFRM_MODE_IPTFS;
}
static void nft_xfrm_state_get_key(const struct nft_xfrm *priv,
@@ -229,8 +230,7 @@ static int nft_xfrm_get_dump(struct sk_buff *skb,
return 0;
}
-static int nft_xfrm_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
- const struct nft_data **data)
+static int nft_xfrm_validate(const struct nft_ctx *ctx, const struct nft_expr *expr)
{
const struct nft_xfrm *priv = nft_expr_priv(expr);
unsigned int hooks;
diff --git a/net/netfilter/utils.c b/net/netfilter/utils.c
index acef4155f0da..008419db815a 100644
--- a/net/netfilter/utils.c
+++ b/net/netfilter/utils.c
@@ -179,43 +179,6 @@ int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
}
EXPORT_SYMBOL_GPL(nf_route);
-static int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry)
-{
-#ifdef CONFIG_INET
- const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
-
- if (entry->state.hook == NF_INET_LOCAL_OUT) {
- const struct iphdr *iph = ip_hdr(skb);
-
- if (!(iph->tos == rt_info->tos &&
- skb->mark == rt_info->mark &&
- iph->daddr == rt_info->daddr &&
- iph->saddr == rt_info->saddr))
- return ip_route_me_harder(entry->state.net, entry->state.sk,
- skb, RTN_UNSPEC);
- }
-#endif
- return 0;
-}
-
-int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry)
-{
- const struct nf_ipv6_ops *v6ops;
- int ret = 0;
-
- switch (entry->state.pf) {
- case AF_INET:
- ret = nf_ip_reroute(skb, entry);
- break;
- case AF_INET6:
- v6ops = rcu_dereference(nf_ipv6_ops);
- if (v6ops)
- ret = v6ops->reroute(skb, entry);
- break;
- }
- return ret;
-}
-
/* Only get and check the lengths, not do any hop-by-hop stuff. */
int nf_ip6_check_hbh_len(struct sk_buff *skb, u32 *plen)
{
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 21624d68314f..709840612f0d 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1142,7 +1142,8 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
if (target->compat_from_user)
target->compat_from_user(t->data, ct->data);
else
- memcpy(t->data, ct->data, tsize - sizeof(*ct));
+ unsafe_memcpy(t->data, ct->data, tsize - sizeof(*ct),
+ /* UAPI 0-sized destination */);
tsize += off;
t->u.user.target_size = tsize;
@@ -1268,7 +1269,7 @@ struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
/* and once again: */
list_for_each_entry(t, &xt_net->tables[af], list)
- if (strcmp(t->name, name) == 0)
+ if (strcmp(t->name, name) == 0 && owner == t->me)
return t;
module_put(owner);
diff --git a/net/netfilter/xt_CHECKSUM.c b/net/netfilter/xt_CHECKSUM.c
index c8a639f56168..9d99f5a3d176 100644
--- a/net/netfilter/xt_CHECKSUM.c
+++ b/net/netfilter/xt_CHECKSUM.c
@@ -63,24 +63,37 @@ static int checksum_tg_check(const struct xt_tgchk_param *par)
return 0;
}
-static struct xt_target checksum_tg_reg __read_mostly = {
- .name = "CHECKSUM",
- .family = NFPROTO_UNSPEC,
- .target = checksum_tg,
- .targetsize = sizeof(struct xt_CHECKSUM_info),
- .table = "mangle",
- .checkentry = checksum_tg_check,
- .me = THIS_MODULE,
+static struct xt_target checksum_tg_reg[] __read_mostly = {
+ {
+ .name = "CHECKSUM",
+ .family = NFPROTO_IPV4,
+ .target = checksum_tg,
+ .targetsize = sizeof(struct xt_CHECKSUM_info),
+ .table = "mangle",
+ .checkentry = checksum_tg_check,
+ .me = THIS_MODULE,
+ },
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+ {
+ .name = "CHECKSUM",
+ .family = NFPROTO_IPV6,
+ .target = checksum_tg,
+ .targetsize = sizeof(struct xt_CHECKSUM_info),
+ .table = "mangle",
+ .checkentry = checksum_tg_check,
+ .me = THIS_MODULE,
+ },
+#endif
};
static int __init checksum_tg_init(void)
{
- return xt_register_target(&checksum_tg_reg);
+ return xt_register_targets(checksum_tg_reg, ARRAY_SIZE(checksum_tg_reg));
}
static void __exit checksum_tg_exit(void)
{
- xt_unregister_target(&checksum_tg_reg);
+ xt_unregister_targets(checksum_tg_reg, ARRAY_SIZE(checksum_tg_reg));
}
module_init(checksum_tg_init);
diff --git a/net/netfilter/xt_CLASSIFY.c b/net/netfilter/xt_CLASSIFY.c
index 0accac98dea7..0ae8d8a1216e 100644
--- a/net/netfilter/xt_CLASSIFY.c
+++ b/net/netfilter/xt_CLASSIFY.c
@@ -38,9 +38,9 @@ static struct xt_target classify_tg_reg[] __read_mostly = {
{
.name = "CLASSIFY",
.revision = 0,
- .family = NFPROTO_UNSPEC,
+ .family = NFPROTO_IPV4,
.hooks = (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_FORWARD) |
- (1 << NF_INET_POST_ROUTING),
+ (1 << NF_INET_POST_ROUTING),
.target = classify_tg,
.targetsize = sizeof(struct xt_classify_target_info),
.me = THIS_MODULE,
@@ -54,6 +54,18 @@ static struct xt_target classify_tg_reg[] __read_mostly = {
.targetsize = sizeof(struct xt_classify_target_info),
.me = THIS_MODULE,
},
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+ {
+ .name = "CLASSIFY",
+ .revision = 0,
+ .family = NFPROTO_IPV6,
+ .hooks = (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_FORWARD) |
+ (1 << NF_INET_POST_ROUTING),
+ .target = classify_tg,
+ .targetsize = sizeof(struct xt_classify_target_info),
+ .me = THIS_MODULE,
+ },
+#endif
};
static int __init classify_tg_init(void)
diff --git a/net/netfilter/xt_CONNSECMARK.c b/net/netfilter/xt_CONNSECMARK.c
index 76acecf3e757..1494b3ee30e1 100644
--- a/net/netfilter/xt_CONNSECMARK.c
+++ b/net/netfilter/xt_CONNSECMARK.c
@@ -114,25 +114,39 @@ static void connsecmark_tg_destroy(const struct xt_tgdtor_param *par)
nf_ct_netns_put(par->net, par->family);
}
-static struct xt_target connsecmark_tg_reg __read_mostly = {
- .name = "CONNSECMARK",
- .revision = 0,
- .family = NFPROTO_UNSPEC,
- .checkentry = connsecmark_tg_check,
- .destroy = connsecmark_tg_destroy,
- .target = connsecmark_tg,
- .targetsize = sizeof(struct xt_connsecmark_target_info),
- .me = THIS_MODULE,
+static struct xt_target connsecmark_tg_reg[] __read_mostly = {
+ {
+ .name = "CONNSECMARK",
+ .revision = 0,
+ .family = NFPROTO_IPV4,
+ .checkentry = connsecmark_tg_check,
+ .destroy = connsecmark_tg_destroy,
+ .target = connsecmark_tg,
+ .targetsize = sizeof(struct xt_connsecmark_target_info),
+ .me = THIS_MODULE,
+ },
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+ {
+ .name = "CONNSECMARK",
+ .revision = 0,
+ .family = NFPROTO_IPV6,
+ .checkentry = connsecmark_tg_check,
+ .destroy = connsecmark_tg_destroy,
+ .target = connsecmark_tg,
+ .targetsize = sizeof(struct xt_connsecmark_target_info),
+ .me = THIS_MODULE,
+ },
+#endif
};
static int __init connsecmark_tg_init(void)
{
- return xt_register_target(&connsecmark_tg_reg);
+ return xt_register_targets(connsecmark_tg_reg, ARRAY_SIZE(connsecmark_tg_reg));
}
static void __exit connsecmark_tg_exit(void)
{
- xt_unregister_target(&connsecmark_tg_reg);
+ xt_unregister_targets(connsecmark_tg_reg, ARRAY_SIZE(connsecmark_tg_reg));
}
module_init(connsecmark_tg_init);
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 2be2f7a7b60f..3ba94c34297c 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -313,10 +313,30 @@ static void xt_ct_tg_destroy_v1(const struct xt_tgdtor_param *par)
xt_ct_tg_destroy(par, par->targinfo);
}
+static unsigned int
+notrack_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ /* Previously seen (loopback)? Ignore. */
+ if (skb->_nfct != 0)
+ return XT_CONTINUE;
+
+ nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
+
+ return XT_CONTINUE;
+}
+
static struct xt_target xt_ct_tg_reg[] __read_mostly = {
{
+ .name = "NOTRACK",
+ .revision = 0,
+ .family = NFPROTO_IPV4,
+ .target = notrack_tg,
+ .table = "raw",
+ .me = THIS_MODULE,
+ },
+ {
.name = "CT",
- .family = NFPROTO_UNSPEC,
+ .family = NFPROTO_IPV4,
.targetsize = sizeof(struct xt_ct_target_info),
.usersize = offsetof(struct xt_ct_target_info, ct),
.checkentry = xt_ct_tg_check_v0,
@@ -327,7 +347,7 @@ static struct xt_target xt_ct_tg_reg[] __read_mostly = {
},
{
.name = "CT",
- .family = NFPROTO_UNSPEC,
+ .family = NFPROTO_IPV4,
.revision = 1,
.targetsize = sizeof(struct xt_ct_target_info_v1),
.usersize = offsetof(struct xt_ct_target_info, ct),
@@ -339,7 +359,7 @@ static struct xt_target xt_ct_tg_reg[] __read_mostly = {
},
{
.name = "CT",
- .family = NFPROTO_UNSPEC,
+ .family = NFPROTO_IPV4,
.revision = 2,
.targetsize = sizeof(struct xt_ct_target_info_v1),
.usersize = offsetof(struct xt_ct_target_info, ct),
@@ -349,49 +369,61 @@ static struct xt_target xt_ct_tg_reg[] __read_mostly = {
.table = "raw",
.me = THIS_MODULE,
},
-};
-
-static unsigned int
-notrack_tg(struct sk_buff *skb, const struct xt_action_param *par)
-{
- /* Previously seen (loopback)? Ignore. */
- if (skb->_nfct != 0)
- return XT_CONTINUE;
-
- nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
-
- return XT_CONTINUE;
-}
-
-static struct xt_target notrack_tg_reg __read_mostly = {
- .name = "NOTRACK",
- .revision = 0,
- .family = NFPROTO_UNSPEC,
- .target = notrack_tg,
- .table = "raw",
- .me = THIS_MODULE,
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+ {
+ .name = "NOTRACK",
+ .revision = 0,
+ .family = NFPROTO_IPV6,
+ .target = notrack_tg,
+ .table = "raw",
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "CT",
+ .family = NFPROTO_IPV6,
+ .targetsize = sizeof(struct xt_ct_target_info),
+ .usersize = offsetof(struct xt_ct_target_info, ct),
+ .checkentry = xt_ct_tg_check_v0,
+ .destroy = xt_ct_tg_destroy_v0,
+ .target = xt_ct_target_v0,
+ .table = "raw",
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "CT",
+ .family = NFPROTO_IPV6,
+ .revision = 1,
+ .targetsize = sizeof(struct xt_ct_target_info_v1),
+ .usersize = offsetof(struct xt_ct_target_info, ct),
+ .checkentry = xt_ct_tg_check_v1,
+ .destroy = xt_ct_tg_destroy_v1,
+ .target = xt_ct_target_v1,
+ .table = "raw",
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "CT",
+ .family = NFPROTO_IPV6,
+ .revision = 2,
+ .targetsize = sizeof(struct xt_ct_target_info_v1),
+ .usersize = offsetof(struct xt_ct_target_info, ct),
+ .checkentry = xt_ct_tg_check_v2,
+ .destroy = xt_ct_tg_destroy_v1,
+ .target = xt_ct_target_v1,
+ .table = "raw",
+ .me = THIS_MODULE,
+ },
+#endif
};
static int __init xt_ct_tg_init(void)
{
- int ret;
-
- ret = xt_register_target(&notrack_tg_reg);
- if (ret < 0)
- return ret;
-
- ret = xt_register_targets(xt_ct_tg_reg, ARRAY_SIZE(xt_ct_tg_reg));
- if (ret < 0) {
- xt_unregister_target(&notrack_tg_reg);
- return ret;
- }
- return 0;
+ return xt_register_targets(xt_ct_tg_reg, ARRAY_SIZE(xt_ct_tg_reg));
}
static void __exit xt_ct_tg_exit(void)
{
xt_unregister_targets(xt_ct_tg_reg, ARRAY_SIZE(xt_ct_tg_reg));
- xt_unregister_target(&notrack_tg_reg);
}
module_init(xt_ct_tg_init);
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index db720efa811d..9f54819eb52c 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -107,14 +107,12 @@ static void idletimer_tg_expired(struct timer_list *t)
schedule_work(&timer->work);
}
-static enum alarmtimer_restart idletimer_tg_alarmproc(struct alarm *alarm,
- ktime_t now)
+static void idletimer_tg_alarmproc(struct alarm *alarm, ktime_t now)
{
struct idletimer_tg *timer = alarm->data;
pr_debug("alarm %s expired\n", timer->attr.attr.name);
schedule_work(&timer->work);
- return ALARMTIMER_NORESTART;
}
static int idletimer_check_sysfs_name(const char *name, unsigned int size)
@@ -409,21 +407,23 @@ static void idletimer_tg_destroy(const struct xt_tgdtor_param *par)
mutex_lock(&list_mutex);
- if (--info->timer->refcnt == 0) {
- pr_debug("deleting timer %s\n", info->label);
-
- list_del(&info->timer->entry);
- timer_shutdown_sync(&info->timer->timer);
- cancel_work_sync(&info->timer->work);
- sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
- kfree(info->timer->attr.attr.name);
- kfree(info->timer);
- } else {
+ if (--info->timer->refcnt > 0) {
pr_debug("decreased refcnt of timer %s to %u\n",
info->label, info->timer->refcnt);
+ mutex_unlock(&list_mutex);
+ return;
}
+ pr_debug("deleting timer %s\n", info->label);
+
+ list_del(&info->timer->entry);
mutex_unlock(&list_mutex);
+
+ timer_shutdown_sync(&info->timer->timer);
+ cancel_work_sync(&info->timer->work);
+ sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
+ kfree(info->timer->attr.attr.name);
+ kfree(info->timer);
}
static void idletimer_tg_destroy_v1(const struct xt_tgdtor_param *par)
@@ -434,52 +434,75 @@ static void idletimer_tg_destroy_v1(const struct xt_tgdtor_param *par)
mutex_lock(&list_mutex);
- if (--info->timer->refcnt == 0) {
- pr_debug("deleting timer %s\n", info->label);
-
- list_del(&info->timer->entry);
- if (info->timer->timer_type & XT_IDLETIMER_ALARM) {
- alarm_cancel(&info->timer->alarm);
- } else {
- timer_shutdown_sync(&info->timer->timer);
- }
- cancel_work_sync(&info->timer->work);
- sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
- kfree(info->timer->attr.attr.name);
- kfree(info->timer);
- } else {
+ if (--info->timer->refcnt > 0) {
pr_debug("decreased refcnt of timer %s to %u\n",
info->label, info->timer->refcnt);
+ mutex_unlock(&list_mutex);
+ return;
}
+ pr_debug("deleting timer %s\n", info->label);
+
+ list_del(&info->timer->entry);
mutex_unlock(&list_mutex);
+
+ if (info->timer->timer_type & XT_IDLETIMER_ALARM) {
+ alarm_cancel(&info->timer->alarm);
+ } else {
+ timer_shutdown_sync(&info->timer->timer);
+ }
+ cancel_work_sync(&info->timer->work);
+ sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
+ kfree(info->timer->attr.attr.name);
+ kfree(info->timer);
}
static struct xt_target idletimer_tg[] __read_mostly = {
{
- .name = "IDLETIMER",
- .family = NFPROTO_UNSPEC,
- .target = idletimer_tg_target,
- .targetsize = sizeof(struct idletimer_tg_info),
- .usersize = offsetof(struct idletimer_tg_info, timer),
- .checkentry = idletimer_tg_checkentry,
- .destroy = idletimer_tg_destroy,
- .me = THIS_MODULE,
+ .name = "IDLETIMER",
+ .family = NFPROTO_IPV4,
+ .target = idletimer_tg_target,
+ .targetsize = sizeof(struct idletimer_tg_info),
+ .usersize = offsetof(struct idletimer_tg_info, timer),
+ .checkentry = idletimer_tg_checkentry,
+ .destroy = idletimer_tg_destroy,
+ .me = THIS_MODULE,
},
{
- .name = "IDLETIMER",
- .family = NFPROTO_UNSPEC,
- .revision = 1,
- .target = idletimer_tg_target_v1,
- .targetsize = sizeof(struct idletimer_tg_info_v1),
- .usersize = offsetof(struct idletimer_tg_info_v1, timer),
- .checkentry = idletimer_tg_checkentry_v1,
- .destroy = idletimer_tg_destroy_v1,
- .me = THIS_MODULE,
+ .name = "IDLETIMER",
+ .family = NFPROTO_IPV4,
+ .revision = 1,
+ .target = idletimer_tg_target_v1,
+ .targetsize = sizeof(struct idletimer_tg_info_v1),
+ .usersize = offsetof(struct idletimer_tg_info_v1, timer),
+ .checkentry = idletimer_tg_checkentry_v1,
+ .destroy = idletimer_tg_destroy_v1,
+ .me = THIS_MODULE,
},
-
-
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+ {
+ .name = "IDLETIMER",
+ .family = NFPROTO_IPV6,
+ .target = idletimer_tg_target,
+ .targetsize = sizeof(struct idletimer_tg_info),
+ .usersize = offsetof(struct idletimer_tg_info, timer),
+ .checkentry = idletimer_tg_checkentry,
+ .destroy = idletimer_tg_destroy,
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "IDLETIMER",
+ .family = NFPROTO_IPV6,
+ .revision = 1,
+ .target = idletimer_tg_target_v1,
+ .targetsize = sizeof(struct idletimer_tg_info_v1),
+ .usersize = offsetof(struct idletimer_tg_info_v1, timer),
+ .checkentry = idletimer_tg_checkentry_v1,
+ .destroy = idletimer_tg_destroy_v1,
+ .me = THIS_MODULE,
+ },
+#endif
};
static struct class *idletimer_tg_class;
diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c
index 36c9720ad8d6..8a80fd76fe45 100644
--- a/net/netfilter/xt_LED.c
+++ b/net/netfilter/xt_LED.c
@@ -96,7 +96,9 @@ static int led_tg_check(const struct xt_tgchk_param *par)
struct xt_led_info_internal *ledinternal;
int err;
- if (ledinfo->id[0] == '\0')
+ /* Bail out if empty string or not a string at all. */
+ if (ledinfo->id[0] == '\0' ||
+ !memchr(ledinfo->id, '\0', sizeof(ledinfo->id)))
return -EINVAL;
mutex_lock(&xt_led_mutex);
@@ -175,26 +177,41 @@ static void led_tg_destroy(const struct xt_tgdtor_param *par)
kfree(ledinternal);
}
-static struct xt_target led_tg_reg __read_mostly = {
- .name = "LED",
- .revision = 0,
- .family = NFPROTO_UNSPEC,
- .target = led_tg,
- .targetsize = sizeof(struct xt_led_info),
- .usersize = offsetof(struct xt_led_info, internal_data),
- .checkentry = led_tg_check,
- .destroy = led_tg_destroy,
- .me = THIS_MODULE,
+static struct xt_target led_tg_reg[] __read_mostly = {
+ {
+ .name = "LED",
+ .revision = 0,
+ .family = NFPROTO_IPV4,
+ .target = led_tg,
+ .targetsize = sizeof(struct xt_led_info),
+ .usersize = offsetof(struct xt_led_info, internal_data),
+ .checkentry = led_tg_check,
+ .destroy = led_tg_destroy,
+ .me = THIS_MODULE,
+ },
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+ {
+ .name = "LED",
+ .revision = 0,
+ .family = NFPROTO_IPV6,
+ .target = led_tg,
+ .targetsize = sizeof(struct xt_led_info),
+ .usersize = offsetof(struct xt_led_info, internal_data),
+ .checkentry = led_tg_check,
+ .destroy = led_tg_destroy,
+ .me = THIS_MODULE,
+ },
+#endif
};
static int __init led_tg_init(void)
{
- return xt_register_target(&led_tg_reg);
+ return xt_register_targets(led_tg_reg, ARRAY_SIZE(led_tg_reg));
}
static void __exit led_tg_exit(void)
{
- xt_unregister_target(&led_tg_reg);
+ xt_unregister_targets(led_tg_reg, ARRAY_SIZE(led_tg_reg));
}
module_init(led_tg_init);
diff --git a/net/netfilter/xt_NFLOG.c b/net/netfilter/xt_NFLOG.c
index e660c3710a10..6dcf4bc7e30b 100644
--- a/net/netfilter/xt_NFLOG.c
+++ b/net/netfilter/xt_NFLOG.c
@@ -64,25 +64,39 @@ static void nflog_tg_destroy(const struct xt_tgdtor_param *par)
nf_logger_put(par->family, NF_LOG_TYPE_ULOG);
}
-static struct xt_target nflog_tg_reg __read_mostly = {
- .name = "NFLOG",
- .revision = 0,
- .family = NFPROTO_UNSPEC,
- .checkentry = nflog_tg_check,
- .destroy = nflog_tg_destroy,
- .target = nflog_tg,
- .targetsize = sizeof(struct xt_nflog_info),
- .me = THIS_MODULE,
+static struct xt_target nflog_tg_reg[] __read_mostly = {
+ {
+ .name = "NFLOG",
+ .revision = 0,
+ .family = NFPROTO_IPV4,
+ .checkentry = nflog_tg_check,
+ .destroy = nflog_tg_destroy,
+ .target = nflog_tg,
+ .targetsize = sizeof(struct xt_nflog_info),
+ .me = THIS_MODULE,
+ },
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+ {
+ .name = "NFLOG",
+ .revision = 0,
+ .family = NFPROTO_IPV6,
+ .checkentry = nflog_tg_check,
+ .destroy = nflog_tg_destroy,
+ .target = nflog_tg,
+ .targetsize = sizeof(struct xt_nflog_info),
+ .me = THIS_MODULE,
+ },
+#endif
};
static int __init nflog_tg_init(void)
{
- return xt_register_target(&nflog_tg_reg);
+ return xt_register_targets(nflog_tg_reg, ARRAY_SIZE(nflog_tg_reg));
}
static void __exit nflog_tg_exit(void)
{
- xt_unregister_target(&nflog_tg_reg);
+ xt_unregister_targets(nflog_tg_reg, ARRAY_SIZE(nflog_tg_reg));
}
module_init(nflog_tg_init);
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index 80f6624e2355..4f49cfc27831 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -179,16 +179,31 @@ static void xt_rateest_tg_destroy(const struct xt_tgdtor_param *par)
xt_rateest_put(par->net, info->est);
}
-static struct xt_target xt_rateest_tg_reg __read_mostly = {
- .name = "RATEEST",
- .revision = 0,
- .family = NFPROTO_UNSPEC,
- .target = xt_rateest_tg,
- .checkentry = xt_rateest_tg_checkentry,
- .destroy = xt_rateest_tg_destroy,
- .targetsize = sizeof(struct xt_rateest_target_info),
- .usersize = offsetof(struct xt_rateest_target_info, est),
- .me = THIS_MODULE,
+static struct xt_target xt_rateest_tg_reg[] __read_mostly = {
+ {
+ .name = "RATEEST",
+ .revision = 0,
+ .family = NFPROTO_IPV4,
+ .target = xt_rateest_tg,
+ .checkentry = xt_rateest_tg_checkentry,
+ .destroy = xt_rateest_tg_destroy,
+ .targetsize = sizeof(struct xt_rateest_target_info),
+ .usersize = offsetof(struct xt_rateest_target_info, est),
+ .me = THIS_MODULE,
+ },
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+ {
+ .name = "RATEEST",
+ .revision = 0,
+ .family = NFPROTO_IPV6,
+ .target = xt_rateest_tg,
+ .checkentry = xt_rateest_tg_checkentry,
+ .destroy = xt_rateest_tg_destroy,
+ .targetsize = sizeof(struct xt_rateest_target_info),
+ .usersize = offsetof(struct xt_rateest_target_info, est),
+ .me = THIS_MODULE,
+ },
+#endif
};
static __net_init int xt_rateest_net_init(struct net *net)
@@ -214,12 +229,12 @@ static int __init xt_rateest_tg_init(void)
if (err)
return err;
- return xt_register_target(&xt_rateest_tg_reg);
+ return xt_register_targets(xt_rateest_tg_reg, ARRAY_SIZE(xt_rateest_tg_reg));
}
static void __exit xt_rateest_tg_fini(void)
{
- xt_unregister_target(&xt_rateest_tg_reg);
+ xt_unregister_targets(xt_rateest_tg_reg, ARRAY_SIZE(xt_rateest_tg_reg));
unregister_pernet_subsys(&xt_rateest_net_ops);
}
diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
index 498a0bf6f044..5bc5ea505eb9 100644
--- a/net/netfilter/xt_SECMARK.c
+++ b/net/netfilter/xt_SECMARK.c
@@ -157,7 +157,7 @@ static struct xt_target secmark_tg_reg[] __read_mostly = {
{
.name = "SECMARK",
.revision = 0,
- .family = NFPROTO_UNSPEC,
+ .family = NFPROTO_IPV4,
.checkentry = secmark_tg_check_v0,
.destroy = secmark_tg_destroy,
.target = secmark_tg_v0,
@@ -167,7 +167,7 @@ static struct xt_target secmark_tg_reg[] __read_mostly = {
{
.name = "SECMARK",
.revision = 1,
- .family = NFPROTO_UNSPEC,
+ .family = NFPROTO_IPV4,
.checkentry = secmark_tg_check_v1,
.destroy = secmark_tg_destroy,
.target = secmark_tg_v1,
@@ -175,6 +175,29 @@ static struct xt_target secmark_tg_reg[] __read_mostly = {
.usersize = offsetof(struct xt_secmark_target_info_v1, secid),
.me = THIS_MODULE,
},
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+ {
+ .name = "SECMARK",
+ .revision = 0,
+ .family = NFPROTO_IPV6,
+ .checkentry = secmark_tg_check_v0,
+ .destroy = secmark_tg_destroy,
+ .target = secmark_tg_v0,
+ .targetsize = sizeof(struct xt_secmark_target_info),
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "SECMARK",
+ .revision = 1,
+ .family = NFPROTO_IPV6,
+ .checkentry = secmark_tg_check_v1,
+ .destroy = secmark_tg_destroy,
+ .target = secmark_tg_v1,
+ .targetsize = sizeof(struct xt_secmark_target_info_v1),
+ .usersize = offsetof(struct xt_secmark_target_info_v1, secid),
+ .me = THIS_MODULE,
+ },
+#endif
};
static int __init secmark_tg_init(void)
diff --git a/net/netfilter/xt_TRACE.c b/net/netfilter/xt_TRACE.c
index 5582dce98cae..a642ff09fc8e 100644
--- a/net/netfilter/xt_TRACE.c
+++ b/net/netfilter/xt_TRACE.c
@@ -29,25 +29,39 @@ trace_tg(struct sk_buff *skb, const struct xt_action_param *par)
return XT_CONTINUE;
}
-static struct xt_target trace_tg_reg __read_mostly = {
- .name = "TRACE",
- .revision = 0,
- .family = NFPROTO_UNSPEC,
- .table = "raw",
- .target = trace_tg,
- .checkentry = trace_tg_check,
- .destroy = trace_tg_destroy,
- .me = THIS_MODULE,
+static struct xt_target trace_tg_reg[] __read_mostly = {
+ {
+ .name = "TRACE",
+ .revision = 0,
+ .family = NFPROTO_IPV4,
+ .table = "raw",
+ .target = trace_tg,
+ .checkentry = trace_tg_check,
+ .destroy = trace_tg_destroy,
+ .me = THIS_MODULE,
+ },
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+ {
+ .name = "TRACE",
+ .revision = 0,
+ .family = NFPROTO_IPV6,
+ .table = "raw",
+ .target = trace_tg,
+ .checkentry = trace_tg_check,
+ .destroy = trace_tg_destroy,
+ .me = THIS_MODULE,
+ },
+#endif
};
static int __init trace_tg_init(void)
{
- return xt_register_target(&trace_tg_reg);
+ return xt_register_targets(trace_tg_reg, ARRAY_SIZE(trace_tg_reg));
}
static void __exit trace_tg_exit(void)
{
- xt_unregister_target(&trace_tg_reg);
+ xt_unregister_targets(trace_tg_reg, ARRAY_SIZE(trace_tg_reg));
}
module_init(trace_tg_init);
diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c
index e9b2181e8c42..a77088943107 100644
--- a/net/netfilter/xt_addrtype.c
+++ b/net/netfilter/xt_addrtype.c
@@ -208,13 +208,24 @@ static struct xt_match addrtype_mt_reg[] __read_mostly = {
},
{
.name = "addrtype",
- .family = NFPROTO_UNSPEC,
+ .family = NFPROTO_IPV4,
.revision = 1,
.match = addrtype_mt_v1,
.checkentry = addrtype_mt_checkentry_v1,
.matchsize = sizeof(struct xt_addrtype_info_v1),
.me = THIS_MODULE
- }
+ },
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+ {
+ .name = "addrtype",
+ .family = NFPROTO_IPV6,
+ .revision = 1,
+ .match = addrtype_mt_v1,
+ .checkentry = addrtype_mt_checkentry_v1,
+ .matchsize = sizeof(struct xt_addrtype_info_v1),
+ .me = THIS_MODULE
+ },
+#endif
};
static int __init addrtype_mt_init(void)
diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c
index a047a545371e..908fd5f2c3c8 100644
--- a/net/netfilter/xt_cluster.c
+++ b/net/netfilter/xt_cluster.c
@@ -146,24 +146,37 @@ static void xt_cluster_mt_destroy(const struct xt_mtdtor_param *par)
nf_ct_netns_put(par->net, par->family);
}
-static struct xt_match xt_cluster_match __read_mostly = {
- .name = "cluster",
- .family = NFPROTO_UNSPEC,
- .match = xt_cluster_mt,
- .checkentry = xt_cluster_mt_checkentry,
- .matchsize = sizeof(struct xt_cluster_match_info),
- .destroy = xt_cluster_mt_destroy,
- .me = THIS_MODULE,
+static struct xt_match xt_cluster_match[] __read_mostly = {
+ {
+ .name = "cluster",
+ .family = NFPROTO_IPV4,
+ .match = xt_cluster_mt,
+ .checkentry = xt_cluster_mt_checkentry,
+ .matchsize = sizeof(struct xt_cluster_match_info),
+ .destroy = xt_cluster_mt_destroy,
+ .me = THIS_MODULE,
+ },
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+ {
+ .name = "cluster",
+ .family = NFPROTO_IPV6,
+ .match = xt_cluster_mt,
+ .checkentry = xt_cluster_mt_checkentry,
+ .matchsize = sizeof(struct xt_cluster_match_info),
+ .destroy = xt_cluster_mt_destroy,
+ .me = THIS_MODULE,
+ },
+#endif
};
static int __init xt_cluster_mt_init(void)
{
- return xt_register_match(&xt_cluster_match);
+ return xt_register_matches(xt_cluster_match, ARRAY_SIZE(xt_cluster_match));
}
static void __exit xt_cluster_mt_fini(void)
{
- xt_unregister_match(&xt_cluster_match);
+ xt_unregister_matches(xt_cluster_match, ARRAY_SIZE(xt_cluster_match));
}
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
diff --git a/net/netfilter/xt_connbytes.c b/net/netfilter/xt_connbytes.c
index 93cb018c3055..2aabdcea8707 100644
--- a/net/netfilter/xt_connbytes.c
+++ b/net/netfilter/xt_connbytes.c
@@ -111,9 +111,11 @@ static int connbytes_mt_check(const struct xt_mtchk_param *par)
return -EINVAL;
ret = nf_ct_netns_get(par->net, par->family);
- if (ret < 0)
+ if (ret < 0) {
pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
par->family);
+ return ret;
+ }
/*
* This filter cannot function correctly unless connection tracking
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 5d04ef80a61d..0189f8b6b0bd 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -86,6 +86,7 @@ static int connlimit_mt_check(const struct xt_mtchk_param *par)
{
struct xt_connlimit_info *info = par->matchinfo;
unsigned int keylen;
+ int ret;
keylen = sizeof(u32);
if (par->family == NFPROTO_IPV6)
@@ -93,8 +94,17 @@ static int connlimit_mt_check(const struct xt_mtchk_param *par)
else
keylen += sizeof(struct in_addr);
+ ret = nf_ct_netns_get(par->net, par->family);
+ if (ret < 0) {
+ pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
+ par->family);
+ return ret;
+ }
+
/* init private data */
- info->data = nf_conncount_init(par->net, par->family, keylen);
+ info->data = nf_conncount_init(par->net, keylen);
+ if (IS_ERR(info->data))
+ nf_ct_netns_put(par->net, par->family);
return PTR_ERR_OR_ZERO(info->data);
}
@@ -103,29 +113,45 @@ static void connlimit_mt_destroy(const struct xt_mtdtor_param *par)
{
const struct xt_connlimit_info *info = par->matchinfo;
- nf_conncount_destroy(par->net, par->family, info->data);
+ nf_conncount_destroy(par->net, info->data);
+ nf_ct_netns_put(par->net, par->family);
}
-static struct xt_match connlimit_mt_reg __read_mostly = {
- .name = "connlimit",
- .revision = 1,
- .family = NFPROTO_UNSPEC,
- .checkentry = connlimit_mt_check,
- .match = connlimit_mt,
- .matchsize = sizeof(struct xt_connlimit_info),
- .usersize = offsetof(struct xt_connlimit_info, data),
- .destroy = connlimit_mt_destroy,
- .me = THIS_MODULE,
+static struct xt_match connlimit_mt_reg[] __read_mostly = {
+ {
+ .name = "connlimit",
+ .revision = 1,
+ .family = NFPROTO_IPV4,
+ .checkentry = connlimit_mt_check,
+ .match = connlimit_mt,
+ .matchsize = sizeof(struct xt_connlimit_info),
+ .usersize = offsetof(struct xt_connlimit_info, data),
+ .destroy = connlimit_mt_destroy,
+ .me = THIS_MODULE,
+ },
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+ {
+ .name = "connlimit",
+ .revision = 1,
+ .family = NFPROTO_IPV6,
+ .checkentry = connlimit_mt_check,
+ .match = connlimit_mt,
+ .matchsize = sizeof(struct xt_connlimit_info),
+ .usersize = offsetof(struct xt_connlimit_info, data),
+ .destroy = connlimit_mt_destroy,
+ .me = THIS_MODULE,
+ },
+#endif
};
static int __init connlimit_mt_init(void)
{
- return xt_register_match(&connlimit_mt_reg);
+ return xt_register_matches(connlimit_mt_reg, ARRAY_SIZE(connlimit_mt_reg));
}
static void __exit connlimit_mt_exit(void)
{
- xt_unregister_match(&connlimit_mt_reg);
+ xt_unregister_matches(connlimit_mt_reg, ARRAY_SIZE(connlimit_mt_reg));
}
module_init(connlimit_mt_init);
diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c
index ad3c033db64e..4277084de2e7 100644
--- a/net/netfilter/xt_connmark.c
+++ b/net/netfilter/xt_connmark.c
@@ -151,7 +151,7 @@ static struct xt_target connmark_tg_reg[] __read_mostly = {
{
.name = "CONNMARK",
.revision = 1,
- .family = NFPROTO_UNSPEC,
+ .family = NFPROTO_IPV4,
.checkentry = connmark_tg_check,
.target = connmark_tg,
.targetsize = sizeof(struct xt_connmark_tginfo1),
@@ -161,13 +161,35 @@ static struct xt_target connmark_tg_reg[] __read_mostly = {
{
.name = "CONNMARK",
.revision = 2,
- .family = NFPROTO_UNSPEC,
+ .family = NFPROTO_IPV4,
.checkentry = connmark_tg_check,
.target = connmark_tg_v2,
.targetsize = sizeof(struct xt_connmark_tginfo2),
.destroy = connmark_tg_destroy,
.me = THIS_MODULE,
- }
+ },
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+ {
+ .name = "CONNMARK",
+ .revision = 1,
+ .family = NFPROTO_IPV6,
+ .checkentry = connmark_tg_check,
+ .target = connmark_tg,
+ .targetsize = sizeof(struct xt_connmark_tginfo1),
+ .destroy = connmark_tg_destroy,
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "CONNMARK",
+ .revision = 2,
+ .family = NFPROTO_IPV6,
+ .checkentry = connmark_tg_check,
+ .target = connmark_tg_v2,
+ .targetsize = sizeof(struct xt_connmark_tginfo2),
+ .destroy = connmark_tg_destroy,
+ .me = THIS_MODULE,
+ },
+#endif
};
static struct xt_match connmark_mt_reg __read_mostly = {
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 0859b8f76764..fa02aab56724 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -363,11 +363,15 @@ static void htable_selective_cleanup(struct xt_hashlimit_htable *ht, bool select
unsigned int i;
for (i = 0; i < ht->cfg.size; i++) {
+ struct hlist_head *head = &ht->hash[i];
struct dsthash_ent *dh;
struct hlist_node *n;
+ if (hlist_empty(head))
+ continue;
+
spin_lock_bh(&ht->lock);
- hlist_for_each_entry_safe(dh, n, &ht->hash[i], node) {
+ hlist_for_each_entry_safe(dh, n, head, node) {
if (time_after_eq(jiffies, dh->expires) || select_all)
dsthash_free(ht, dh);
}
diff --git a/net/netfilter/xt_mark.c b/net/netfilter/xt_mark.c
index 1ad74b5920b5..65b965ca40ea 100644
--- a/net/netfilter/xt_mark.c
+++ b/net/netfilter/xt_mark.c
@@ -39,13 +39,35 @@ mark_mt(const struct sk_buff *skb, struct xt_action_param *par)
return ((skb->mark & info->mask) == info->mark) ^ info->invert;
}
-static struct xt_target mark_tg_reg __read_mostly = {
- .name = "MARK",
- .revision = 2,
- .family = NFPROTO_UNSPEC,
- .target = mark_tg,
- .targetsize = sizeof(struct xt_mark_tginfo2),
- .me = THIS_MODULE,
+static struct xt_target mark_tg_reg[] __read_mostly = {
+ {
+ .name = "MARK",
+ .revision = 2,
+ .family = NFPROTO_IPV4,
+ .target = mark_tg,
+ .targetsize = sizeof(struct xt_mark_tginfo2),
+ .me = THIS_MODULE,
+ },
+#if IS_ENABLED(CONFIG_IP_NF_ARPTABLES)
+ {
+ .name = "MARK",
+ .revision = 2,
+ .family = NFPROTO_ARP,
+ .target = mark_tg,
+ .targetsize = sizeof(struct xt_mark_tginfo2),
+ .me = THIS_MODULE,
+ },
+#endif
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+ {
+ .name = "MARK",
+ .revision = 2,
+ .family = NFPROTO_IPV6,
+ .target = mark_tg,
+ .targetsize = sizeof(struct xt_mark_tginfo2),
+ .me = THIS_MODULE,
+ },
+#endif
};
static struct xt_match mark_mt_reg __read_mostly = {
@@ -61,12 +83,12 @@ static int __init mark_mt_init(void)
{
int ret;
- ret = xt_register_target(&mark_tg_reg);
+ ret = xt_register_targets(mark_tg_reg, ARRAY_SIZE(mark_tg_reg));
if (ret < 0)
return ret;
ret = xt_register_match(&mark_mt_reg);
if (ret < 0) {
- xt_unregister_target(&mark_tg_reg);
+ xt_unregister_targets(mark_tg_reg, ARRAY_SIZE(mark_tg_reg));
return ret;
}
return 0;
@@ -75,7 +97,7 @@ static int __init mark_mt_init(void)
static void __exit mark_mt_exit(void)
{
xt_unregister_match(&mark_mt_reg);
- xt_unregister_target(&mark_tg_reg);
+ xt_unregister_targets(mark_tg_reg, ARRAY_SIZE(mark_tg_reg));
}
module_init(mark_mt_init);
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index ef93e0d3bee0..588a5e6ad899 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -59,9 +59,9 @@ MODULE_PARM_DESC(ip_list_gid, "default owning group of /proc/net/xt_recent/* fil
/* retained for backwards compatibility */
static unsigned int ip_pkt_list_tot __read_mostly;
module_param(ip_pkt_list_tot, uint, 0400);
-MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP address to remember (max. 255)");
+MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP address to remember (max. 65535)");
-#define XT_RECENT_MAX_NSTAMPS 256
+#define XT_RECENT_MAX_NSTAMPS 65536
struct recent_entry {
struct list_head list;
@@ -69,7 +69,7 @@ struct recent_entry {
union nf_inet_addr addr;
u_int16_t family;
u_int8_t ttl;
- u_int8_t index;
+ u_int16_t index;
u_int16_t nstamps;
unsigned long stamps[];
};
@@ -80,7 +80,7 @@ struct recent_table {
union nf_inet_addr mask;
unsigned int refcnt;
unsigned int entries;
- u8 nstamps_max_mask;
+ u_int16_t nstamps_max_mask;
struct list_head lru_list;
struct list_head iphash[];
};
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index 27511c90a26f..cd9160bbc919 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -610,7 +610,7 @@ int netlbl_catmap_walk(struct netlbl_lsm_catmap *catmap, u32 offset)
struct netlbl_lsm_catmap *iter;
u32 idx;
u32 bit;
- NETLBL_CATMAP_MAPTYPE bitmap;
+ u64 bitmap;
iter = _netlbl_catmap_getnode(&catmap, offset, _CM_F_WALK, 0);
if (iter == NULL)
@@ -666,8 +666,8 @@ int netlbl_catmap_walkrng(struct netlbl_lsm_catmap *catmap, u32 offset)
struct netlbl_lsm_catmap *prev = NULL;
u32 idx;
u32 bit;
- NETLBL_CATMAP_MAPTYPE bitmask;
- NETLBL_CATMAP_MAPTYPE bitmap;
+ u64 bitmask;
+ u64 bitmap;
iter = _netlbl_catmap_getnode(&catmap, offset, _CM_F_WALK, 0);
if (iter == NULL)
@@ -857,7 +857,7 @@ int netlbl_catmap_setlong(struct netlbl_lsm_catmap **catmap,
offset -= iter->startbit;
idx = offset / NETLBL_CATMAP_MAPSIZE;
- iter->bitmap[idx] |= (NETLBL_CATMAP_MAPTYPE)bitmap
+ iter->bitmap[idx] |= (u64)bitmap
<< (offset % NETLBL_CATMAP_MAPSIZE);
return 0;
@@ -876,7 +876,7 @@ int netlbl_catmap_setlong(struct netlbl_lsm_catmap **catmap,
* Description:
* Starting at @offset, walk the bitmap from left to right until either the
* desired bit is found or we reach the end. Return the bit offset, -1 if
- * not found, or -2 if error.
+ * not found.
*/
int netlbl_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len,
u32 offset, u8 state)
@@ -965,6 +965,7 @@ int netlbl_enabled(void)
* @sk: the socket to label
* @family: protocol family
* @secattr: the security attributes
+ * @sk_locked: true if caller holds the socket lock
*
* Description:
* Attach the correct label to the given socket using the security attributes
@@ -977,7 +978,8 @@ int netlbl_enabled(void)
*/
int netlbl_sock_setattr(struct sock *sk,
u16 family,
- const struct netlbl_lsm_secattr *secattr)
+ const struct netlbl_lsm_secattr *secattr,
+ bool sk_locked)
{
int ret_val;
struct netlbl_dom_map *dom_entry;
@@ -997,7 +999,7 @@ int netlbl_sock_setattr(struct sock *sk,
case NETLBL_NLTYPE_CIPSOV4:
ret_val = cipso_v4_sock_setattr(sk,
dom_entry->def.cipso,
- secattr);
+ secattr, sk_locked);
break;
case NETLBL_NLTYPE_UNLABELED:
ret_val = 0;
@@ -1091,6 +1093,28 @@ int netlbl_sock_getattr(struct sock *sk,
}
/**
+ * netlbl_sk_lock_check - Check if the socket lock has been acquired.
+ * @sk: the socket to be checked
+ *
+ * Return: true if socket @sk is locked or if lock debugging is disabled at
+ * runtime or compile-time; false otherwise
+ *
+ */
+#ifdef CONFIG_LOCKDEP
+bool netlbl_sk_lock_check(struct sock *sk)
+{
+ if (debug_locks)
+ return lockdep_sock_is_held(sk);
+ return true;
+}
+#else
+bool netlbl_sk_lock_check(struct sock *sk)
+{
+ return true;
+}
+#endif
+
+/**
* netlbl_conn_setattr - Label a connected socket using the correct protocol
* @sk: the socket to label
* @addr: the destination address
@@ -1126,7 +1150,8 @@ int netlbl_conn_setattr(struct sock *sk,
switch (entry->type) {
case NETLBL_NLTYPE_CIPSOV4:
ret_val = cipso_v4_sock_setattr(sk,
- entry->cipso, secattr);
+ entry->cipso, secattr,
+ netlbl_sk_lock_check(sk));
break;
case NETLBL_NLTYPE_UNLABELED:
/* just delete the protocols we support for right now
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index 689eaa2afbec..079fe72a6384 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -107,11 +107,9 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
switch (entry->def.type) {
case NETLBL_NLTYPE_UNLABELED:
- if (info->attrs[NLBL_MGMT_A_FAMILY])
- entry->family =
- nla_get_u16(info->attrs[NLBL_MGMT_A_FAMILY]);
- else
- entry->family = AF_UNSPEC;
+ entry->family =
+ nla_get_u16_default(info->attrs[NLBL_MGMT_A_FAMILY],
+ AF_UNSPEC);
break;
case NETLBL_NLTYPE_CIPSOV4:
if (!info->attrs[NLBL_MGMT_A_CV4DOI])
@@ -601,10 +599,7 @@ static int netlbl_mgmt_listdef(struct sk_buff *skb, struct genl_info *info)
struct netlbl_dom_map *entry;
u16 family;
- if (info->attrs[NLBL_MGMT_A_FAMILY])
- family = nla_get_u16(info->attrs[NLBL_MGMT_A_FAMILY]);
- else
- family = AF_INET;
+ family = nla_get_u16_default(info->attrs[NLBL_MGMT_A_FAMILY], AF_INET);
ans_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (ans_skb == NULL)
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 9996883bf2b7..dfda9ea61971 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -374,8 +374,7 @@ int netlbl_unlhsh_add(struct net *net,
struct net_device *dev;
struct netlbl_unlhsh_iface *iface;
struct audit_buffer *audit_buf = NULL;
- char *secctx = NULL;
- u32 secctx_len;
+ struct lsm_context ctx;
if (addr_len != sizeof(struct in_addr) &&
addr_len != sizeof(struct in6_addr))
@@ -438,11 +437,9 @@ int netlbl_unlhsh_add(struct net *net,
unlhsh_add_return:
rcu_read_unlock();
if (audit_buf != NULL) {
- if (security_secid_to_secctx(secid,
- &secctx,
- &secctx_len) == 0) {
- audit_log_format(audit_buf, " sec_obj=%s", secctx);
- security_release_secctx(secctx, secctx_len);
+ if (security_secid_to_secctx(secid, &ctx) >= 0) {
+ audit_log_format(audit_buf, " sec_obj=%s", ctx.context);
+ security_release_secctx(&ctx);
}
audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0);
audit_log_end(audit_buf);
@@ -473,8 +470,7 @@ static int netlbl_unlhsh_remove_addr4(struct net *net,
struct netlbl_unlhsh_addr4 *entry;
struct audit_buffer *audit_buf;
struct net_device *dev;
- char *secctx;
- u32 secctx_len;
+ struct lsm_context ctx;
spin_lock(&netlbl_unlhsh_lock);
list_entry = netlbl_af4list_remove(addr->s_addr, mask->s_addr,
@@ -494,10 +490,9 @@ static int netlbl_unlhsh_remove_addr4(struct net *net,
addr->s_addr, mask->s_addr);
dev_put(dev);
if (entry != NULL &&
- security_secid_to_secctx(entry->secid,
- &secctx, &secctx_len) == 0) {
- audit_log_format(audit_buf, " sec_obj=%s", secctx);
- security_release_secctx(secctx, secctx_len);
+ security_secid_to_secctx(entry->secid, &ctx) >= 0) {
+ audit_log_format(audit_buf, " sec_obj=%s", ctx.context);
+ security_release_secctx(&ctx);
}
audit_log_format(audit_buf, " res=%u", entry != NULL ? 1 : 0);
audit_log_end(audit_buf);
@@ -534,8 +529,7 @@ static int netlbl_unlhsh_remove_addr6(struct net *net,
struct netlbl_unlhsh_addr6 *entry;
struct audit_buffer *audit_buf;
struct net_device *dev;
- char *secctx;
- u32 secctx_len;
+ struct lsm_context ctx;
spin_lock(&netlbl_unlhsh_lock);
list_entry = netlbl_af6list_remove(addr, mask, &iface->addr6_list);
@@ -554,10 +548,9 @@ static int netlbl_unlhsh_remove_addr6(struct net *net,
addr, mask);
dev_put(dev);
if (entry != NULL &&
- security_secid_to_secctx(entry->secid,
- &secctx, &secctx_len) == 0) {
- audit_log_format(audit_buf, " sec_obj=%s", secctx);
- security_release_secctx(secctx, secctx_len);
+ security_secid_to_secctx(entry->secid, &ctx) >= 0) {
+ audit_log_format(audit_buf, " sec_obj=%s", ctx.context);
+ security_release_secctx(&ctx);
}
audit_log_format(audit_buf, " res=%u", entry != NULL ? 1 : 0);
audit_log_end(audit_buf);
@@ -1069,10 +1062,9 @@ static int netlbl_unlabel_staticlist_gen(u32 cmd,
int ret_val = -ENOMEM;
struct netlbl_unlhsh_walk_arg *cb_arg = arg;
struct net_device *dev;
+ struct lsm_context ctx;
void *data;
u32 secid;
- char *secctx;
- u32 secctx_len;
data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).portid,
cb_arg->seq, &netlbl_unlabel_gnl_family,
@@ -1127,14 +1119,14 @@ static int netlbl_unlabel_staticlist_gen(u32 cmd,
secid = addr6->secid;
}
- ret_val = security_secid_to_secctx(secid, &secctx, &secctx_len);
- if (ret_val != 0)
+ ret_val = security_secid_to_secctx(secid, &ctx);
+ if (ret_val < 0)
goto list_cb_failure;
ret_val = nla_put(cb_arg->skb,
NLBL_UNLABEL_A_SECCTX,
- secctx_len,
- secctx);
- security_release_secctx(secctx, secctx_len);
+ ctx.len,
+ ctx.context);
+ security_release_secctx(&ctx);
if (ret_val != 0)
goto list_cb_failure;
@@ -1538,7 +1530,7 @@ int __init netlbl_unlabel_defconf(void)
/* Only the kernel is allowed to call this function and the only time
* it is called is at bootup before the audit subsystem is reporting
* messages so don't worry to much about these values. */
- security_current_getsecid_subj(&audit_info.secid);
+ security_current_getlsmprop_subj(&audit_info.prop);
audit_info.loginuid = GLOBAL_ROOT_UID;
audit_info.sessionid = 0;
diff --git a/net/netlabel/netlabel_user.c b/net/netlabel/netlabel_user.c
index 3ed4fea2a2de..0d04d23aafe7 100644
--- a/net/netlabel/netlabel_user.c
+++ b/net/netlabel/netlabel_user.c
@@ -84,8 +84,7 @@ struct audit_buffer *netlbl_audit_start_common(int type,
struct netlbl_audit *audit_info)
{
struct audit_buffer *audit_buf;
- char *secctx;
- u32 secctx_len;
+ struct lsm_context ctx;
if (audit_enabled == AUDIT_OFF)
return NULL;
@@ -98,12 +97,10 @@ struct audit_buffer *netlbl_audit_start_common(int type,
from_kuid(&init_user_ns, audit_info->loginuid),
audit_info->sessionid);
- if (audit_info->secid != 0 &&
- security_secid_to_secctx(audit_info->secid,
- &secctx,
- &secctx_len) == 0) {
- audit_log_format(audit_buf, " subj=%s", secctx);
- security_release_secctx(secctx, secctx_len);
+ if (lsmprop_is_set(&audit_info->prop) &&
+ security_lsmprop_to_secctx(&audit_info->prop, &ctx) > 0) {
+ audit_log_format(audit_buf, " subj=%s", ctx.context);
+ security_release_secctx(&ctx);
}
return audit_buf;
diff --git a/net/netlabel/netlabel_user.h b/net/netlabel/netlabel_user.h
index d6c5b31eb4eb..d4c434956212 100644
--- a/net/netlabel/netlabel_user.h
+++ b/net/netlabel/netlabel_user.h
@@ -32,7 +32,7 @@
*/
static inline void netlbl_netlink_auditinfo(struct netlbl_audit *audit_info)
{
- security_current_getsecid_subj(&audit_info->secid);
+ security_current_getlsmprop_subj(&audit_info->prop);
audit_info->loginuid = audit_get_loginuid(current);
audit_info->sessionid = audit_get_sessionid(current);
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index ff315351269f..a53ea60d0a78 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -59,7 +59,6 @@
#include <linux/rhashtable.h>
#include <asm/cacheflush.h>
#include <linux/hash.h>
-#include <linux/genetlink.h>
#include <linux/net_namespace.h>
#include <linux/nospec.h>
#include <linux/btf_ids.h>
@@ -73,6 +72,7 @@
#include <trace/events/netlink.h>
#include "af_netlink.h"
+#include "genetlink.h"
struct listeners {
struct rcu_head rcu;
@@ -130,7 +130,7 @@ static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = {
"nlk_cb_mutex-MAX_LINKS"
};
-static int netlink_dump(struct sock *sk);
+static int netlink_dump(struct sock *sk, bool lock_taken);
/* nl_table locking explained:
* Lookup and traversal are protected with an RCU read-side lock. Insertion
@@ -393,15 +393,6 @@ static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
static void netlink_sock_destruct(struct sock *sk)
{
- struct netlink_sock *nlk = nlk_sk(sk);
-
- if (nlk->cb_running) {
- if (nlk->cb.done)
- nlk->cb.done(&nlk->cb);
- module_put(nlk->cb.module);
- kfree_skb(nlk->cb.skb);
- }
-
skb_queue_purge(&sk->sk_receive_queue);
if (!sock_flag(sk, SOCK_DEAD)) {
@@ -414,14 +405,6 @@ static void netlink_sock_destruct(struct sock *sk)
WARN_ON(nlk_sk(sk)->groups);
}
-static void netlink_sock_destruct_work(struct work_struct *work)
-{
- struct netlink_sock *nlk = container_of(work, struct netlink_sock,
- work);
-
- sk_free(&nlk->sk);
-}
-
/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
* SMP. Look, when several writers sleep and reader wakes them up, all but one
* immediately hit write lock and grab all the cpus. Exclusive sleep solves
@@ -636,8 +619,7 @@ static struct proto netlink_proto = {
};
static int __netlink_create(struct net *net, struct socket *sock,
- struct mutex *cb_mutex, int protocol,
- int kern)
+ int protocol, int kern)
{
struct sock *sk;
struct netlink_sock *nlk;
@@ -651,15 +633,10 @@ static int __netlink_create(struct net *net, struct socket *sock,
sock_init_data(sock, sk);
nlk = nlk_sk(sk);
- if (cb_mutex) {
- nlk->cb_mutex = cb_mutex;
- } else {
- nlk->cb_mutex = &nlk->cb_def_mutex;
- mutex_init(nlk->cb_mutex);
- lockdep_set_class_and_name(nlk->cb_mutex,
+ mutex_init(&nlk->nl_cb_mutex);
+ lockdep_set_class_and_name(&nlk->nl_cb_mutex,
nlk_cb_mutex_keys + protocol,
nlk_cb_mutex_key_strings[protocol]);
- }
init_waitqueue_head(&nlk->wait);
sk->sk_destruct = netlink_sock_destruct;
@@ -671,7 +648,6 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct module *module = NULL;
- struct mutex *cb_mutex;
struct netlink_sock *nlk;
int (*bind)(struct net *net, int group);
void (*unbind)(struct net *net, int group);
@@ -700,7 +676,6 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
module = nl_table[protocol].module;
else
err = -EPROTONOSUPPORT;
- cb_mutex = nl_table[protocol].cb_mutex;
bind = nl_table[protocol].bind;
unbind = nl_table[protocol].unbind;
release = nl_table[protocol].release;
@@ -709,7 +684,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
if (err < 0)
goto out;
- err = __netlink_create(net, sock, cb_mutex, protocol, kern);
+ err = __netlink_create(net, sock, protocol, kern);
if (err < 0)
goto out_module;
@@ -739,12 +714,6 @@ static void deferred_put_nlk_sk(struct rcu_head *head)
if (!refcount_dec_and_test(&sk->sk_refcnt))
return;
- if (nlk->cb_running && nlk->cb.done) {
- INIT_WORK(&nlk->work, netlink_sock_destruct_work);
- schedule_work(&nlk->work);
- return;
- }
-
sk_free(sk);
}
@@ -796,6 +765,14 @@ static int netlink_release(struct socket *sock)
NETLINK_URELEASE, &n);
}
+ /* Terminate any outstanding dump */
+ if (nlk->cb_running) {
+ if (nlk->cb.done)
+ nlk->cb.done(&nlk->cb);
+ module_put(nlk->cb.module);
+ kfree_skb(nlk->cb.skb);
+ }
+
module_put(nlk->module);
if (netlink_is_kernel(sk)) {
@@ -818,16 +795,6 @@ static int netlink_release(struct socket *sock)
sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
- /* Because struct net might disappear soon, do not keep a pointer. */
- if (!sk->sk_net_refcnt && sock_net(sk) != &init_net) {
- __netns_tracker_free(sock_net(sk), &sk->ns_tracker, false);
- /* Because of deferred_put_nlk_sk and use of work queue,
- * it is possible netns will be freed before this socket.
- */
- sock_net_set(sk, &init_net);
- __netns_tracker_alloc(&init_net, &sk->ns_tracker,
- false, GFP_KERNEL);
- }
call_rcu(&nlk->rcu, deferred_put_nlk_sk);
return 0;
}
@@ -1188,11 +1155,16 @@ static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
return sock;
}
-struct sock *netlink_getsockbyfilp(struct file *filp)
+struct sock *netlink_getsockbyfd(int fd)
{
- struct inode *inode = file_inode(filp);
+ CLASS(fd, f)(fd);
+ struct inode *inode;
struct sock *sock;
+ if (fd_empty(f))
+ return ERR_PTR(-EBADF);
+
+ inode = file_inode(fd_file(f));
if (!S_ISSOCK(inode->i_mode))
return ERR_PTR(-ENOTSOCK);
@@ -1206,23 +1178,21 @@ struct sock *netlink_getsockbyfilp(struct file *filp)
struct sk_buff *netlink_alloc_large_skb(unsigned int size, int broadcast)
{
+ size_t head_size = SKB_HEAD_ALIGN(size);
struct sk_buff *skb;
void *data;
- if (size <= NLMSG_GOODSIZE || broadcast)
+ if (head_size <= PAGE_SIZE || broadcast)
return alloc_skb(size, GFP_KERNEL);
- size = SKB_DATA_ALIGN(size) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-
- data = vmalloc(size);
- if (data == NULL)
+ data = kvmalloc(head_size, GFP_KERNEL);
+ if (!data)
return NULL;
- skb = __build_skb(data, size);
- if (skb == NULL)
- vfree(data);
- else
+ skb = __build_skb(data, head_size);
+ if (!skb)
+ kvfree(data);
+ else if (is_vmalloc_addr(data))
skb->destructor = netlink_skb_destructor;
return skb;
@@ -1307,6 +1277,7 @@ static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
{
int delta;
+ skb_assert_len(skb);
WARN_ON(skb->sk != NULL);
delta = skb->end - skb->tail;
if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
@@ -1779,6 +1750,9 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
netlink_unlock_table();
return err;
}
+ case NETLINK_LISTEN_ALL_NSID:
+ flag = NETLINK_F_LISTEN_ALL_NSID;
+ break;
case NETLINK_CAP_ACK:
flag = NETLINK_F_CAP_ACK;
break;
@@ -1987,7 +1961,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
if (READ_ONCE(nlk->cb_running) &&
atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
- ret = netlink_dump(sk);
+ ret = netlink_dump(sk, false);
if (ret) {
WRITE_ONCE(sk->sk_err, -ret);
sk_error_report(sk);
@@ -2019,7 +1993,6 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module,
struct sock *sk;
struct netlink_sock *nlk;
struct listeners *listeners = NULL;
- struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
unsigned int groups;
BUG_ON(!nl_table);
@@ -2030,7 +2003,7 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module,
if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
return NULL;
- if (__netlink_create(net, sock, cb_mutex, unit, 1) < 0)
+ if (__netlink_create(net, sock, unit, 1) < 0)
goto out_sock_release_nosk;
sk = sock->sk;
@@ -2058,7 +2031,6 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module,
if (!nl_table[unit].registered) {
nl_table[unit].groups = groups;
rcu_assign_pointer(nl_table[unit].listeners, listeners);
- nl_table[unit].cb_mutex = cb_mutex;
nl_table[unit].module = module;
if (cfg) {
nl_table[unit].bind = cfg->bind;
@@ -2145,8 +2117,9 @@ void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
{
struct sock *sk;
struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
+ struct hlist_node *tmp;
- sk_for_each_bound(sk, &tbl->mc_list)
+ sk_for_each_bound_safe(sk, tmp, &tbl->mc_list)
netlink_update_socket_mc(nlk_sk(sk), group, 0);
}
@@ -2168,6 +2141,70 @@ __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int fla
}
EXPORT_SYMBOL(__nlmsg_put);
+static size_t
+netlink_ack_tlv_len(struct netlink_sock *nlk, int err,
+ const struct netlink_ext_ack *extack)
+{
+ size_t tlvlen;
+
+ if (!extack || !test_bit(NETLINK_F_EXT_ACK, &nlk->flags))
+ return 0;
+
+ tlvlen = 0;
+ if (extack->_msg)
+ tlvlen += nla_total_size(strlen(extack->_msg) + 1);
+ if (extack->cookie_len)
+ tlvlen += nla_total_size(extack->cookie_len);
+
+ /* Following attributes are only reported as error (not warning) */
+ if (!err)
+ return tlvlen;
+
+ if (extack->bad_attr)
+ tlvlen += nla_total_size(sizeof(u32));
+ if (extack->policy)
+ tlvlen += netlink_policy_dump_attr_size_estimate(extack->policy);
+ if (extack->miss_type)
+ tlvlen += nla_total_size(sizeof(u32));
+ if (extack->miss_nest)
+ tlvlen += nla_total_size(sizeof(u32));
+
+ return tlvlen;
+}
+
+static bool nlmsg_check_in_payload(const struct nlmsghdr *nlh, const void *addr)
+{
+ return !WARN_ON(addr < nlmsg_data(nlh) ||
+ addr - (const void *) nlh >= nlh->nlmsg_len);
+}
+
+static void
+netlink_ack_tlv_fill(struct sk_buff *skb, const struct nlmsghdr *nlh, int err,
+ const struct netlink_ext_ack *extack)
+{
+ if (extack->_msg)
+ WARN_ON(nla_put_string(skb, NLMSGERR_ATTR_MSG, extack->_msg));
+ if (extack->cookie_len)
+ WARN_ON(nla_put(skb, NLMSGERR_ATTR_COOKIE,
+ extack->cookie_len, extack->cookie));
+
+ if (!err)
+ return;
+
+ if (extack->bad_attr && nlmsg_check_in_payload(nlh, extack->bad_attr))
+ WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_OFFS,
+ (u8 *)extack->bad_attr - (const u8 *)nlh));
+ if (extack->policy)
+ netlink_policy_dump_write_attr(skb, extack->policy,
+ NLMSGERR_ATTR_POLICY);
+ if (extack->miss_type)
+ WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_MISS_TYPE,
+ extack->miss_type));
+ if (extack->miss_nest && nlmsg_check_in_payload(nlh, extack->miss_nest))
+ WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_MISS_NEST,
+ (u8 *)extack->miss_nest - (const u8 *)nlh));
+}
+
/*
* It looks a bit ugly.
* It would be better to create kernel thread.
@@ -2178,6 +2215,7 @@ static int netlink_dump_done(struct netlink_sock *nlk, struct sk_buff *skb,
struct netlink_ext_ack *extack)
{
struct nlmsghdr *nlh;
+ size_t extack_len;
nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(nlk->dump_done_errno),
NLM_F_MULTI | cb->answer_flags);
@@ -2187,16 +2225,20 @@ static int netlink_dump_done(struct netlink_sock *nlk, struct sk_buff *skb,
nl_dump_check_consistent(cb, nlh);
memcpy(nlmsg_data(nlh), &nlk->dump_done_errno, sizeof(nlk->dump_done_errno));
- if (extack->_msg && test_bit(NETLINK_F_EXT_ACK, &nlk->flags)) {
+ extack_len = netlink_ack_tlv_len(nlk, nlk->dump_done_errno, extack);
+ if (extack_len) {
nlh->nlmsg_flags |= NLM_F_ACK_TLVS;
- if (!nla_put_string(skb, NLMSGERR_ATTR_MSG, extack->_msg))
+ if (skb_tailroom(skb) >= extack_len) {
+ netlink_ack_tlv_fill(skb, cb->nlh,
+ nlk->dump_done_errno, extack);
nlmsg_end(skb, nlh);
+ }
}
return 0;
}
-static int netlink_dump(struct sock *sk)
+static int netlink_dump(struct sock *sk, bool lock_taken)
{
struct netlink_sock *nlk = nlk_sk(sk);
struct netlink_ext_ack extack = {};
@@ -2208,7 +2250,8 @@ static int netlink_dump(struct sock *sk)
int alloc_min_size;
int alloc_size;
- mutex_lock(nlk->cb_mutex);
+ if (!lock_taken)
+ mutex_lock(&nlk->nl_cb_mutex);
if (!nlk->cb_running) {
err = -EINVAL;
goto errout_skb;
@@ -2218,7 +2261,7 @@ static int netlink_dump(struct sock *sk)
goto errout_skb;
/* NLMSG_GOODSIZE is small to avoid high order allocations being
- * required, but it makes sense to _attempt_ a 16K bytes allocation
+ * required, but it makes sense to _attempt_ a 32KiB allocation
* to reduce number of system calls on dump operations, if user
* ever provided a big enough buffer.
*/
@@ -2240,7 +2283,7 @@ static int netlink_dump(struct sock *sk)
goto errout_skb;
/* Trim skb to allocated size. User is expected to provide buffer as
- * large as max(min_dump_alloc, 16KiB (mac_recvmsg_len capped at
+ * large as max(min_dump_alloc, 32KiB (max_recvmsg_len capped at
* netlink_recvmsg())). dump will pack as many smaller messages as
* could fit within the allocated skb. skb is typically allocated
* with larger space than required (could be as much as near 2x the
@@ -2261,13 +2304,24 @@ static int netlink_dump(struct sock *sk)
if (nlk->dump_done_errno > 0) {
cb->extack = &extack;
+
nlk->dump_done_errno = cb->dump(skb, cb);
+
+ /* EMSGSIZE plus something already in the skb means
+ * that there's more to dump but current skb has filled up.
+ * If the callback really wants to return EMSGSIZE to user space
+ * it needs to do so again, on the next cb->dump() call,
+ * without putting data in the skb.
+ */
+ if (nlk->dump_done_errno == -EMSGSIZE && skb->len)
+ nlk->dump_done_errno = skb->len;
+
cb->extack = NULL;
}
if (nlk->dump_done_errno > 0 ||
skb_tailroom(skb) < nlmsg_total_size(sizeof(nlk->dump_done_errno))) {
- mutex_unlock(nlk->cb_mutex);
+ mutex_unlock(&nlk->nl_cb_mutex);
if (sk_filter(sk, skb))
kfree_skb(skb);
@@ -2301,13 +2355,13 @@ static int netlink_dump(struct sock *sk)
WRITE_ONCE(nlk->cb_running, false);
module = cb->module;
skb = cb->skb;
- mutex_unlock(nlk->cb_mutex);
+ mutex_unlock(&nlk->nl_cb_mutex);
module_put(module);
consume_skb(skb);
return 0;
errout_skb:
- mutex_unlock(nlk->cb_mutex);
+ mutex_unlock(&nlk->nl_cb_mutex);
kfree_skb(skb);
return err;
}
@@ -2330,7 +2384,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
}
nlk = nlk_sk(sk);
- mutex_lock(nlk->cb_mutex);
+ mutex_lock(&nlk->nl_cb_mutex);
/* A dump is in progress... */
if (nlk->cb_running) {
ret = -EBUSY;
@@ -2350,6 +2404,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
cb->data = control->data;
cb->module = control->module;
cb->min_dump_alloc = control->min_dump_alloc;
+ cb->flags = control->flags;
cb->skb = skb;
cb->strict_check = nlk_test_bit(STRICT_CHK, NETLINK_CB(skb).sk);
@@ -2365,9 +2420,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
WRITE_ONCE(nlk->cb_running, true);
nlk->dump_done_errno = INT_MAX;
- mutex_unlock(nlk->cb_mutex);
-
- ret = netlink_dump(sk);
+ ret = netlink_dump(sk, true);
sock_put(sk);
@@ -2383,76 +2436,13 @@ error_put:
module_put(control->module);
error_unlock:
sock_put(sk);
- mutex_unlock(nlk->cb_mutex);
+ mutex_unlock(&nlk->nl_cb_mutex);
error_free:
kfree_skb(skb);
return ret;
}
EXPORT_SYMBOL(__netlink_dump_start);
-static size_t
-netlink_ack_tlv_len(struct netlink_sock *nlk, int err,
- const struct netlink_ext_ack *extack)
-{
- size_t tlvlen;
-
- if (!extack || !test_bit(NETLINK_F_EXT_ACK, &nlk->flags))
- return 0;
-
- tlvlen = 0;
- if (extack->_msg)
- tlvlen += nla_total_size(strlen(extack->_msg) + 1);
- if (extack->cookie_len)
- tlvlen += nla_total_size(extack->cookie_len);
-
- /* Following attributes are only reported as error (not warning) */
- if (!err)
- return tlvlen;
-
- if (extack->bad_attr)
- tlvlen += nla_total_size(sizeof(u32));
- if (extack->policy)
- tlvlen += netlink_policy_dump_attr_size_estimate(extack->policy);
- if (extack->miss_type)
- tlvlen += nla_total_size(sizeof(u32));
- if (extack->miss_nest)
- tlvlen += nla_total_size(sizeof(u32));
-
- return tlvlen;
-}
-
-static void
-netlink_ack_tlv_fill(struct sk_buff *in_skb, struct sk_buff *skb,
- struct nlmsghdr *nlh, int err,
- const struct netlink_ext_ack *extack)
-{
- if (extack->_msg)
- WARN_ON(nla_put_string(skb, NLMSGERR_ATTR_MSG, extack->_msg));
- if (extack->cookie_len)
- WARN_ON(nla_put(skb, NLMSGERR_ATTR_COOKIE,
- extack->cookie_len, extack->cookie));
-
- if (!err)
- return;
-
- if (extack->bad_attr &&
- !WARN_ON((u8 *)extack->bad_attr < in_skb->data ||
- (u8 *)extack->bad_attr >= in_skb->data + in_skb->len))
- WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_OFFS,
- (u8 *)extack->bad_attr - (u8 *)nlh));
- if (extack->policy)
- netlink_policy_dump_write_attr(skb, extack->policy,
- NLMSGERR_ATTR_POLICY);
- if (extack->miss_type)
- WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_MISS_TYPE,
- extack->miss_type));
- if (extack->miss_nest &&
- !WARN_ON((u8 *)extack->miss_nest < in_skb->data ||
- (u8 *)extack->miss_nest > in_skb->data + in_skb->len))
- WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_MISS_NEST,
- (u8 *)extack->miss_nest - (u8 *)nlh));
-}
-
void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
const struct netlink_ext_ack *extack)
{
@@ -2498,7 +2488,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
}
if (tlvlen)
- netlink_ack_tlv_fill(in_skb, skb, nlh, err, extack);
+ netlink_ack_tlv_fill(skb, nlh, err, extack);
nlmsg_end(skb, rep);
@@ -2927,12 +2917,8 @@ static int __init netlink_proto_init(void)
for (i = 0; i < MAX_LINKS; i++) {
if (rhashtable_init(&nl_table[i].hash,
- &netlink_rhashtable_params) < 0) {
- while (--i > 0)
- rhashtable_destroy(&nl_table[i].hash);
- kfree(nl_table);
+ &netlink_rhashtable_params) < 0)
goto panic;
- }
}
netlink_add_usersock_entry();
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index 2145979b9986..778a3809361f 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -4,7 +4,6 @@
#include <linux/rhashtable.h>
#include <linux/atomic.h>
-#include <linux/workqueue.h>
#include <net/sock.h>
/* flags */
@@ -39,8 +38,8 @@ struct netlink_sock {
bool cb_running;
int dump_done_errno;
struct netlink_callback cb;
- struct mutex *cb_mutex;
- struct mutex cb_def_mutex;
+ struct mutex nl_cb_mutex;
+
void (*netlink_rcv)(struct sk_buff *skb);
int (*netlink_bind)(struct net *net, int group);
void (*netlink_unbind)(struct net *net, int group);
@@ -50,7 +49,6 @@ struct netlink_sock {
struct rhash_head node;
struct rcu_head rcu;
- struct work_struct work;
};
static inline struct netlink_sock *nlk_sk(struct sock *sk)
diff --git a/net/netlink/diag.c b/net/netlink/diag.c
index 1eeff9422856..61981e01fd6f 100644
--- a/net/netlink/diag.c
+++ b/net/netlink/diag.c
@@ -207,7 +207,7 @@ static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
err = __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num);
}
- return err < 0 ? err : skb->len;
+ return err <= 0 ? err : skb->len;
}
static int netlink_diag_dump_done(struct netlink_callback *cb)
@@ -241,6 +241,7 @@ static int netlink_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
}
static const struct sock_diag_handler netlink_diag_handler = {
+ .owner = THIS_MODULE,
.family = AF_NETLINK,
.dump = netlink_diag_handler_dump,
};
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 8c7af02f8454..104732d34543 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -22,6 +22,8 @@
#include <net/sock.h>
#include <net/genetlink.h>
+#include "genetlink.h"
+
static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */
static DECLARE_RWSEM(cb_lock);
@@ -995,7 +997,7 @@ static int genl_start(struct netlink_callback *cb)
info->info.attrs = attrs;
genl_info_net_set(&info->info, sock_net(cb->skb->sk));
info->info.extack = cb->extack;
- memset(&info->info.user_ptr, 0, sizeof(info->info.user_ptr));
+ memset(&info->info.ctx, 0, sizeof(info->info.ctx));
cb->data = info;
if (ops->start) {
@@ -1102,7 +1104,7 @@ static int genl_family_rcv_msg_doit(const struct genl_family *family,
info.attrs = attrbuf;
info.extack = extack;
genl_info_net_set(&info, net);
- memset(&info.user_ptr, 0, sizeof(info.user_ptr));
+ memset(&info.ctx, 0, sizeof(info.ctx));
if (ops->pre_doit) {
err = ops->pre_doit(ops, skb, &info);
@@ -1232,7 +1234,7 @@ static int ctrl_fill_info(const struct genl_family *family, u32 portid, u32 seq,
hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
if (hdr == NULL)
- return -1;
+ return -EMSGSIZE;
if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) ||
@@ -1355,6 +1357,7 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
struct net *net = sock_net(skb->sk);
int fams_to_skip = cb->args[0];
unsigned int id;
+ int err = 0;
idr_for_each_entry(&genl_fam_idr, rt, id) {
if (!rt->netnsok && !net_eq(net, &init_net))
@@ -1363,16 +1366,17 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
if (n++ < fams_to_skip)
continue;
- if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI,
- skb, CTRL_CMD_NEWFAMILY) < 0) {
+ err = ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ skb, CTRL_CMD_NEWFAMILY);
+ if (err) {
n--;
break;
}
}
cb->args[0] = n;
- return skb->len;
+ return err;
}
static struct sk_buff *ctrl_build_family_msg(const struct genl_family *family,
@@ -1497,15 +1501,11 @@ static int genl_ctrl_event(int event, const struct genl_family *family,
if (IS_ERR(msg))
return PTR_ERR(msg);
- if (!family->netnsok) {
+ if (!family->netnsok)
genlmsg_multicast_netns(&genl_ctrl, &init_net, msg, 0,
0, GFP_KERNEL);
- } else {
- rcu_read_lock();
- genlmsg_multicast_allns(&genl_ctrl, msg, 0,
- 0, GFP_ATOMIC);
- rcu_read_unlock();
- }
+ else
+ genlmsg_multicast_allns(&genl_ctrl, msg, 0, 0);
return 0;
}
@@ -1836,6 +1836,9 @@ static int genl_bind(struct net *net, int group)
!ns_capable(net->user_ns, CAP_SYS_ADMIN))
ret = -EPERM;
+ if (family->bind)
+ family->bind(i);
+
break;
}
@@ -1843,12 +1846,39 @@ static int genl_bind(struct net *net, int group)
return ret;
}
+static void genl_unbind(struct net *net, int group)
+{
+ const struct genl_family *family;
+ unsigned int id;
+
+ down_read(&cb_lock);
+
+ idr_for_each_entry(&genl_fam_idr, family, id) {
+ int i;
+
+ if (family->n_mcgrps == 0)
+ continue;
+
+ i = group - family->mcgrp_offset;
+ if (i < 0 || i >= family->n_mcgrps)
+ continue;
+
+ if (family->unbind)
+ family->unbind(i);
+
+ break;
+ }
+
+ up_read(&cb_lock);
+}
+
static int __net_init genl_pernet_init(struct net *net)
{
struct netlink_kernel_cfg cfg = {
.input = genl_rcv,
.flags = NL_CFG_F_NONROOT_RECV,
.bind = genl_bind,
+ .unbind = genl_unbind,
.release = genl_release,
};
@@ -1895,23 +1925,23 @@ problem:
core_initcall(genl_init);
-static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
- gfp_t flags)
+static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group)
{
struct sk_buff *tmp;
struct net *net, *prev = NULL;
bool delivered = false;
int err;
+ rcu_read_lock();
for_each_net_rcu(net) {
if (prev) {
- tmp = skb_clone(skb, flags);
+ tmp = skb_clone(skb, GFP_ATOMIC);
if (!tmp) {
err = -ENOMEM;
goto error;
}
err = nlmsg_multicast(prev->genl_sock, tmp,
- portid, group, flags);
+ portid, group, GFP_ATOMIC);
if (!err)
delivered = true;
else if (err != -ESRCH)
@@ -1920,27 +1950,31 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
prev = net;
}
+ err = nlmsg_multicast(prev->genl_sock, skb, portid, group, GFP_ATOMIC);
+
+ rcu_read_unlock();
- err = nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
if (!err)
delivered = true;
else if (err != -ESRCH)
return err;
return delivered ? 0 : -ESRCH;
error:
+ rcu_read_unlock();
+
kfree_skb(skb);
return err;
}
int genlmsg_multicast_allns(const struct genl_family *family,
struct sk_buff *skb, u32 portid,
- unsigned int group, gfp_t flags)
+ unsigned int group)
{
if (WARN_ON_ONCE(group >= family->n_mcgrps))
return -EINVAL;
group = family->mcgrp_offset + group;
- return genlmsg_mcast(skb, portid, group, flags);
+ return genlmsg_mcast(skb, portid, group);
}
EXPORT_SYMBOL(genlmsg_multicast_allns);
diff --git a/net/netlink/genetlink.h b/net/netlink/genetlink.h
new file mode 100644
index 000000000000..89bd9d2631c3
--- /dev/null
+++ b/net/netlink/genetlink.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_GENETLINK_H
+#define __NET_GENETLINK_H
+
+#include <linux/wait.h>
+
+/* for synchronisation between af_netlink and genetlink */
+extern atomic_t genl_sk_destructing_cnt;
+extern wait_queue_head_t genl_sk_destructing_waitq;
+
+#endif /* __LINUX_GENERIC_NETLINK_H */
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 104a80b75477..6ee148f0e6d0 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -772,8 +772,8 @@ out_release:
return err;
}
-static int nr_accept(struct socket *sock, struct socket *newsock, int flags,
- bool kern)
+static int nr_accept(struct socket *sock, struct socket *newsock,
+ struct proto_accept_arg *arg)
{
struct sk_buff *skb;
struct sock *newsk;
@@ -805,7 +805,7 @@ static int nr_accept(struct socket *sock, struct socket *newsock, int flags,
if (skb)
break;
- if (flags & O_NONBLOCK) {
+ if (arg->flags & O_NONBLOCK) {
err = -EWOULDBLOCK;
break;
}
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 70480869ad1c..b94cb2ffbaf8 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -189,7 +189,7 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
}
nr_node->callsign = *nr;
- strcpy(nr_node->mnemonic, mnemonic);
+ strscpy(nr_node->mnemonic, mnemonic);
nr_node->which = 0;
nr_node->count = 1;
@@ -214,7 +214,7 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
nr_node_lock(nr_node);
if (quality != 0)
- strcpy(nr_node->mnemonic, mnemonic);
+ strscpy(nr_node->mnemonic, mnemonic);
for (found = 0, i = 0; i < nr_node->count; i++) {
if (nr_node->routes[i].neighbour == nr_neigh) {
@@ -285,22 +285,14 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
return 0;
}
-static inline void __nr_remove_node(struct nr_node *nr_node)
+static void nr_remove_node_locked(struct nr_node *nr_node)
{
+ lockdep_assert_held(&nr_node_list_lock);
+
hlist_del_init(&nr_node->node_node);
nr_node_put(nr_node);
}
-#define nr_remove_node_locked(__node) \
- __nr_remove_node(__node)
-
-static void nr_remove_node(struct nr_node *nr_node)
-{
- spin_lock_bh(&nr_node_list_lock);
- __nr_remove_node(nr_node);
- spin_unlock_bh(&nr_node_list_lock);
-}
-
static inline void __nr_remove_neigh(struct nr_neigh *nr_neigh)
{
hlist_del_init(&nr_neigh->neigh_node);
@@ -339,6 +331,7 @@ static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct n
return -EINVAL;
}
+ spin_lock_bh(&nr_node_list_lock);
nr_node_lock(nr_node);
for (i = 0; i < nr_node->count; i++) {
if (nr_node->routes[i].neighbour == nr_neigh) {
@@ -352,7 +345,7 @@ static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct n
nr_node->count--;
if (nr_node->count == 0) {
- nr_remove_node(nr_node);
+ nr_remove_node_locked(nr_node);
} else {
switch (i) {
case 0:
@@ -367,12 +360,14 @@ static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct n
nr_node_put(nr_node);
}
nr_node_unlock(nr_node);
+ spin_unlock_bh(&nr_node_list_lock);
return 0;
}
}
nr_neigh_put(nr_neigh);
nr_node_unlock(nr_node);
+ spin_unlock_bh(&nr_node_list_lock);
nr_node_put(nr_node);
return -EINVAL;
@@ -759,6 +754,12 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
int ret;
struct sk_buff *skbn;
+ /*
+ * Reject malformed packets early. Check that it contains at least 2
+ * addresses and 1 byte more for Time-To-Live
+ */
+ if (skb->len < 2 * sizeof(ax25_address) + 1)
+ return 0;
nr_src = (ax25_address *)(skb->data + 0);
nr_dest = (ax25_address *)(skb->data + 7);
diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
index 4e7c968cde2d..5e3ca068f04e 100644
--- a/net/netrom/nr_timer.c
+++ b/net/netrom/nr_timer.c
@@ -121,7 +121,8 @@ static void nr_heartbeat_expiry(struct timer_list *t)
is accepted() it isn't 'dead' so doesn't get removed. */
if (sock_flag(sk, SOCK_DESTROY) ||
(sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
- sock_hold(sk);
+ if (sk->sk_state == TCP_LISTEN)
+ sock_hold(sk);
bh_unlock_sock(sk);
nr_destroy_socket(sk);
goto out;
diff --git a/net/netrom/sysctl_net_netrom.c b/net/netrom/sysctl_net_netrom.c
index 79fb2d3f477b..7dc0fa628f2e 100644
--- a/net/netrom/sysctl_net_netrom.c
+++ b/net/netrom/sysctl_net_netrom.c
@@ -140,7 +140,6 @@ static struct ctl_table nr_table[] = {
.extra1 = &min_reset,
.extra2 = &max_reset
},
- { }
};
int __init nr_register_sysctl(void)
diff --git a/net/nfc/core.c b/net/nfc/core.c
index eb2c0959e5b6..e58dc6405054 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -1015,7 +1015,7 @@ static void nfc_check_pres_timeout(struct timer_list *t)
schedule_work(&dev->check_pres_work);
}
-struct class nfc_class = {
+const struct class nfc_class = {
.name = "nfc",
.dev_release = nfc_release,
};
diff --git a/net/nfc/hci/llc.c b/net/nfc/hci/llc.c
index 2140f6724644..ba91284f4086 100644
--- a/net/nfc/hci/llc.c
+++ b/net/nfc/hci/llc.c
@@ -30,15 +30,19 @@ exit:
return r;
}
+static void nfc_llc_del_engine(struct nfc_llc_engine *llc_engine)
+{
+ list_del(&llc_engine->entry);
+ kfree_const(llc_engine->name);
+ kfree(llc_engine);
+}
+
void nfc_llc_exit(void)
{
struct nfc_llc_engine *llc_engine, *n;
- list_for_each_entry_safe(llc_engine, n, &llc_engines, entry) {
- list_del(&llc_engine->entry);
- kfree(llc_engine->name);
- kfree(llc_engine);
- }
+ list_for_each_entry_safe(llc_engine, n, &llc_engines, entry)
+ nfc_llc_del_engine(llc_engine);
}
int nfc_llc_register(const char *name, const struct nfc_llc_ops *ops)
@@ -49,7 +53,7 @@ int nfc_llc_register(const char *name, const struct nfc_llc_ops *ops)
if (llc_engine == NULL)
return -ENOMEM;
- llc_engine->name = kstrdup(name, GFP_KERNEL);
+ llc_engine->name = kstrdup_const(name, GFP_KERNEL);
if (llc_engine->name == NULL) {
kfree(llc_engine);
return -ENOMEM;
@@ -82,9 +86,7 @@ void nfc_llc_unregister(const char *name)
if (llc_engine == NULL)
return;
- list_del(&llc_engine->entry);
- kfree(llc_engine->name);
- kfree(llc_engine);
+ nfc_llc_del_engine(llc_engine);
}
struct nfc_llc *nfc_llc_allocate(const char *name, struct nfc_hci_dev *hdev,
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index 819157bbb5a2..57a2f97004e1 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -252,10 +252,10 @@ static int nfc_llcp_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
- err = -EFAULT;
+ err = copy_safe_from_sockptr(&opt, sizeof(opt),
+ optval, optlen);
+ if (err)
break;
- }
if (opt > LLCP_MAX_RW) {
err = -EINVAL;
@@ -274,10 +274,10 @@ static int nfc_llcp_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
- err = -EFAULT;
+ err = copy_safe_from_sockptr(&opt, sizeof(opt),
+ optval, optlen);
+ if (err)
break;
- }
if (opt > LLCP_MAX_MIUX) {
err = -EINVAL;
@@ -447,7 +447,7 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *parent,
}
static int llcp_sock_accept(struct socket *sock, struct socket *newsock,
- int flags, bool kern)
+ struct proto_accept_arg *arg)
{
DECLARE_WAITQUEUE(wait, current);
struct sock *sk = sock->sk, *new_sk;
@@ -463,7 +463,7 @@ static int llcp_sock_accept(struct socket *sock, struct socket *newsock,
goto error;
}
- timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
+ timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK);
/* Wait for an incoming connection. */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index cdad47b140fa..1ec5955fe469 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -757,6 +757,14 @@ int nci_core_conn_close(struct nci_dev *ndev, u8 conn_id)
}
EXPORT_SYMBOL(nci_core_conn_close);
+static void nci_set_target_ats(struct nfc_target *target, struct nci_dev *ndev)
+{
+ if (ndev->target_ats_len > 0) {
+ target->ats_len = ndev->target_ats_len;
+ memcpy(target->ats, ndev->target_ats, target->ats_len);
+ }
+}
+
static int nci_set_local_general_bytes(struct nfc_dev *nfc_dev)
{
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
@@ -939,8 +947,11 @@ static int nci_activate_target(struct nfc_dev *nfc_dev,
msecs_to_jiffies(NCI_RF_DISC_SELECT_TIMEOUT));
}
- if (!rc)
+ if (!rc) {
ndev->target_active_prot = protocol;
+ if (protocol == NFC_PROTO_ISO14443)
+ nci_set_target_ats(target, ndev);
+ }
return rc;
}
@@ -1463,6 +1474,19 @@ int nci_core_ntf_packet(struct nci_dev *ndev, __u16 opcode,
ndev->ops->n_core_ops);
}
+static bool nci_valid_size(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(NCI_CTRL_HDR_SIZE != NCI_DATA_HDR_SIZE);
+ unsigned int hdr_size = NCI_CTRL_HDR_SIZE;
+
+ if (skb->len < hdr_size ||
+ !nci_plen(skb->data) ||
+ skb->len < hdr_size + nci_plen(skb->data)) {
+ return false;
+ }
+ return true;
+}
+
/* ---- NCI TX Data worker thread ---- */
static void nci_tx_work(struct work_struct *work)
@@ -1516,6 +1540,11 @@ static void nci_rx_work(struct work_struct *work)
nfc_send_to_raw_sock(ndev->nfc_dev, skb,
RAW_PAYLOAD_NCI, NFC_DIRECTION_RX);
+ if (!nci_valid_size(skb)) {
+ kfree_skb(skb);
+ continue;
+ }
+
/* Process frame */
switch (nci_mt(skb->data)) {
case NCI_MT_RSP_PKT:
diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
index de175318a3a0..082ab66f120b 100644
--- a/net/nfc/nci/hci.c
+++ b/net/nfc/nci/hci.c
@@ -542,6 +542,8 @@ static u8 nci_hci_create_pipe(struct nci_dev *ndev, u8 dest_host,
pr_debug("pipe created=%d\n", pipe);
+ if (pipe >= NCI_HCI_MAX_PIPES)
+ pipe = NCI_HCI_INVALID_PIPE;
return pipe;
}
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index 994a0a1efb58..a818eff27e6b 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -402,7 +402,7 @@ static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
switch (ntf->activation_rf_tech_and_mode) {
case NCI_NFC_A_PASSIVE_POLL_MODE:
nfca_poll = &ntf->activation_params.nfca_poll_iso_dep;
- nfca_poll->rats_res_len = min_t(__u8, *data++, 20);
+ nfca_poll->rats_res_len = min_t(__u8, *data++, NFC_ATS_MAXSIZE);
pr_debug("rats_res_len %d\n", nfca_poll->rats_res_len);
if (nfca_poll->rats_res_len > 0) {
memcpy(nfca_poll->rats_res,
@@ -531,6 +531,28 @@ static int nci_store_general_bytes_nfc_dep(struct nci_dev *ndev,
return NCI_STATUS_OK;
}
+static int nci_store_ats_nfc_iso_dep(struct nci_dev *ndev,
+ const struct nci_rf_intf_activated_ntf *ntf)
+{
+ ndev->target_ats_len = 0;
+
+ if (ntf->activation_params_len <= 0)
+ return NCI_STATUS_OK;
+
+ if (ntf->activation_params.nfca_poll_iso_dep.rats_res_len > NFC_ATS_MAXSIZE) {
+ pr_debug("ATS too long\n");
+ return NCI_STATUS_RF_PROTOCOL_ERROR;
+ }
+
+ if (ntf->activation_params.nfca_poll_iso_dep.rats_res_len > 0) {
+ ndev->target_ats_len = ntf->activation_params.nfca_poll_iso_dep.rats_res_len;
+ memcpy(ndev->target_ats, ntf->activation_params.nfca_poll_iso_dep.rats_res,
+ ndev->target_ats_len);
+ }
+
+ return NCI_STATUS_OK;
+}
+
static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
const struct sk_buff *skb)
{
@@ -660,6 +682,14 @@ exit:
if (err != NCI_STATUS_OK)
pr_err("unable to store general bytes\n");
}
+
+ /* store ATS to be reported later in nci_activate_target */
+ if (ntf.rf_interface == NCI_RF_INTERFACE_ISO_DEP &&
+ ntf.activation_rf_tech_and_mode == NCI_NFC_A_PASSIVE_POLL_MODE) {
+ err = nci_store_ats_nfc_iso_dep(ndev, &ntf);
+ if (err != NCI_STATUS_OK)
+ pr_err("unable to store ATS\n");
+ }
}
if (!(ntf.activation_rf_tech_and_mode & NCI_RF_TECH_MODE_LISTEN_MASK)) {
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index aa1dbf654c3e..6a40b8d0350d 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -96,6 +96,11 @@ static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
goto nla_put_failure;
}
+ if (target->ats_len > 0 &&
+ nla_put(msg, NFC_ATTR_TARGET_ATS, target->ats_len,
+ target->ats))
+ goto nla_put_failure;
+
genlmsg_end(msg, hdr);
return 0;
@@ -969,8 +974,7 @@ static int nfc_genl_dep_link_down(struct sk_buff *skb, struct genl_info *info)
int rc;
u32 idx;
- if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
- !info->attrs[NFC_ATTR_TARGET_INDEX])
+ if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
return -EINVAL;
idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
@@ -1018,8 +1022,7 @@ static int nfc_genl_llc_get_params(struct sk_buff *skb, struct genl_info *info)
struct sk_buff *msg = NULL;
u32 idx;
- if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
- !info->attrs[NFC_ATTR_FIRMWARE_NAME])
+ if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
return -EINVAL;
idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
diff --git a/net/nsh/nsh.c b/net/nsh/nsh.c
index f4a38bd6a7e0..bfb7758063f3 100644
--- a/net/nsh/nsh.c
+++ b/net/nsh/nsh.c
@@ -77,13 +77,15 @@ EXPORT_SYMBOL_GPL(nsh_pop);
static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
+ unsigned int outer_hlen, mac_len, nsh_len;
struct sk_buff *segs = ERR_PTR(-EINVAL);
u16 mac_offset = skb->mac_header;
- unsigned int nsh_len, mac_len;
- __be16 proto;
+ __be16 outer_proto, proto;
skb_reset_network_header(skb);
+ outer_proto = skb->protocol;
+ outer_hlen = skb_mac_header_len(skb);
mac_len = skb->mac_len;
if (unlikely(!pskb_may_pull(skb, NSH_BASE_HDR_LEN)))
@@ -113,10 +115,10 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
}
for (skb = segs; skb; skb = skb->next) {
- skb->protocol = htons(ETH_P_NSH);
- __skb_push(skb, nsh_len);
- skb->mac_header = mac_offset;
- skb->network_header = skb->mac_header + mac_len;
+ skb->protocol = outer_proto;
+ __skb_push(skb, nsh_len + outer_hlen);
+ skb_reset_mac_header(skb);
+ skb_set_network_header(skb, outer_hlen);
skb->mac_len = mac_len;
}
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig
index 29a7081858cd..2535f3f9f462 100644
--- a/net/openvswitch/Kconfig
+++ b/net/openvswitch/Kconfig
@@ -10,6 +10,7 @@ config OPENVSWITCH
(NF_CONNTRACK && ((!NF_DEFRAG_IPV6 || NF_DEFRAG_IPV6) && \
(!NF_NAT || NF_NAT) && \
(!NETFILTER_CONNCOUNT || NETFILTER_CONNCOUNT)))
+ depends on PSAMPLE || !PSAMPLE
select LIBCRC32C
select MPLS
select NET_MPLS_GSO
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 6fcd7e2ca81f..704c858cf209 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -24,6 +24,11 @@
#include <net/checksum.h>
#include <net/dsfield.h>
#include <net/mpls.h>
+
+#if IS_ENABLED(CONFIG_PSAMPLE)
+#include <net/psample.h>
+#endif
+
#include <net/sctp/checksum.h>
#include "datapath.h"
@@ -232,14 +237,18 @@ static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
const struct ovs_action_push_vlan *vlan)
{
+ int err;
+
if (skb_vlan_tag_present(skb)) {
invalidate_flow_key(key);
} else {
key->eth.vlan.tci = vlan->vlan_tci;
key->eth.vlan.tpid = vlan->vlan_tpid;
}
- return skb_vlan_push(skb, vlan->vlan_tpid,
- ntohs(vlan->vlan_tci) & ~VLAN_CFI_MASK);
+ err = skb_vlan_push(skb, vlan->vlan_tpid,
+ ntohs(vlan->vlan_tci) & ~VLAN_CFI_MASK);
+ skb_reset_mac_len(skb);
+ return err;
}
/* 'src' is already properly masked. */
@@ -925,7 +934,9 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
{
struct vport *vport = ovs_vport_rcu(dp, out_port);
- if (likely(vport && netif_carrier_ok(vport->dev))) {
+ if (likely(vport &&
+ netif_running(vport->dev) &&
+ netif_carrier_ok(vport->dev))) {
u16 mru = OVS_CB(skb)->mru;
u32 cutlen = OVS_CB(skb)->cutlen;
@@ -936,6 +947,12 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
pskb_trim(skb, ovs_mac_header_len(key));
}
+ /* Need to set the pkt_type to involve the routing layer. The
+ * packet movement through the OVS datapath doesn't generally
+ * use routing, but this is needed for tunnel cases.
+ */
+ skb->pkt_type = PACKET_OUTGOING;
+
if (likely(!mru ||
(skb->len <= mru + vport->dev->hard_header_len))) {
ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
@@ -1037,12 +1054,15 @@ static int sample(struct datapath *dp, struct sk_buff *skb,
struct nlattr *sample_arg;
int rem = nla_len(attr);
const struct sample_arg *arg;
+ u32 init_probability;
bool clone_flow_key;
+ int err;
/* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
sample_arg = nla_data(attr);
arg = nla_data(sample_arg);
actions = nla_next(sample_arg, &rem);
+ init_probability = OVS_CB(skb)->probability;
if ((arg->probability != U32_MAX) &&
(!arg->probability || get_random_u32() > arg->probability)) {
@@ -1051,9 +1071,16 @@ static int sample(struct datapath *dp, struct sk_buff *skb,
return 0;
}
+ OVS_CB(skb)->probability = arg->probability;
+
clone_flow_key = !arg->exec;
- return clone_execute(dp, skb, key, 0, actions, rem, last,
- clone_flow_key);
+ err = clone_execute(dp, skb, key, 0, actions, rem, last,
+ clone_flow_key);
+
+ if (!last)
+ OVS_CB(skb)->probability = init_probability;
+
+ return err;
}
/* When 'last' is true, clone() should always consume the 'skb'.
@@ -1293,6 +1320,44 @@ static int execute_dec_ttl(struct sk_buff *skb, struct sw_flow_key *key)
return 0;
}
+#if IS_ENABLED(CONFIG_PSAMPLE)
+static void execute_psample(struct datapath *dp, struct sk_buff *skb,
+ const struct nlattr *attr)
+{
+ struct psample_group psample_group = {};
+ struct psample_metadata md = {};
+ const struct nlattr *a;
+ u32 rate;
+ int rem;
+
+ nla_for_each_attr(a, nla_data(attr), nla_len(attr), rem) {
+ switch (nla_type(a)) {
+ case OVS_PSAMPLE_ATTR_GROUP:
+ psample_group.group_num = nla_get_u32(a);
+ break;
+
+ case OVS_PSAMPLE_ATTR_COOKIE:
+ md.user_cookie = nla_data(a);
+ md.user_cookie_len = nla_len(a);
+ break;
+ }
+ }
+
+ psample_group.net = ovs_dp_get_net(dp);
+ md.in_ifindex = OVS_CB(skb)->input_vport->dev->ifindex;
+ md.trunc_size = skb->len - OVS_CB(skb)->cutlen;
+ md.rate_as_probability = 1;
+
+ rate = OVS_CB(skb)->probability ? OVS_CB(skb)->probability : U32_MAX;
+
+ psample_sample_packet(&psample_group, skb, rate, &md);
+}
+#else
+static void execute_psample(struct datapath *dp, struct sk_buff *skb,
+ const struct nlattr *attr)
+{}
+#endif
+
/* Execute a list of actions against 'skb'. */
static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
struct sw_flow_key *key,
@@ -1496,6 +1561,15 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
ovs_kfree_skb_reason(skb, reason);
return 0;
}
+
+ case OVS_ACTION_ATTR_PSAMPLE:
+ execute_psample(dp, skb, a);
+ OVS_CB(skb)->cutlen = 0;
+ if (nla_is_last(a, rem)) {
+ consume_skb(skb);
+ return 0;
+ }
+ break;
}
if (unlikely(err)) {
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 3019a4406ca4..e573e9221302 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -168,8 +168,13 @@ static u32 ovs_ct_get_mark(const struct nf_conn *ct)
static void ovs_ct_get_labels(const struct nf_conn *ct,
struct ovs_key_ct_labels *labels)
{
- struct nf_conn_labels *cl = ct ? nf_ct_labels_find(ct) : NULL;
+ struct nf_conn_labels *cl = NULL;
+ if (ct) {
+ if (ct->master && !nf_ct_is_confirmed(ct))
+ ct = ct->master;
+ cl = nf_ct_labels_find(ct);
+ }
if (cl)
memcpy(labels, cl->bits, OVS_CT_LABELS_LEN);
else
@@ -674,6 +679,8 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
action |= BIT(NF_NAT_MANIP_DST);
err = nf_ct_nat(skb, ct, ctinfo, &action, &info->range, info->commit);
+ if (err != NF_ACCEPT)
+ return err;
if (action & BIT(NF_NAT_MANIP_SRC))
ovs_nat_update_key(key, skb, NF_NAT_MANIP_SRC);
@@ -692,6 +699,22 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
}
#endif
+static int verdict_to_errno(unsigned int verdict)
+{
+ switch (verdict & NF_VERDICT_MASK) {
+ case NF_ACCEPT:
+ return 0;
+ case NF_DROP:
+ return -EINVAL;
+ case NF_STOLEN:
+ return -EINPROGRESS;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
/* Pass 'skb' through conntrack in 'net', using zone configured in 'info', if
* not done already. Update key with new CT state after passing the packet
* through conntrack.
@@ -730,7 +753,7 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
err = nf_conntrack_in(skb, &state);
if (err != NF_ACCEPT)
- return -ENOENT;
+ return verdict_to_errno(err);
/* Clear CT state NAT flags to mark that we have not yet done
* NAT after the nf_conntrack_in() call. We can actually clear
@@ -757,9 +780,12 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
* the key->ct_state.
*/
if (info->nat && !(key->ct_state & OVS_CS_F_NAT_MASK) &&
- (nf_ct_is_confirmed(ct) || info->commit) &&
- ovs_ct_nat(net, key, info, skb, ct, ctinfo) != NF_ACCEPT) {
- return -EINVAL;
+ (nf_ct_is_confirmed(ct) || info->commit)) {
+ int err = ovs_ct_nat(net, key, info, skb, ct, ctinfo);
+
+ err = verdict_to_errno(err);
+ if (err)
+ return err;
}
/* Userspace may decide to perform a ct lookup without a helper
@@ -790,9 +816,12 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
* - When committing an unconfirmed connection.
*/
if ((nf_ct_is_confirmed(ct) ? !cached || add_helper :
- info->commit) &&
- nf_ct_helper(skb, ct, ctinfo, info->family) != NF_ACCEPT) {
- return -EINVAL;
+ info->commit)) {
+ int err = nf_ct_helper(skb, ct, ctinfo, info->family);
+
+ err = verdict_to_errno(err);
+ if (err)
+ return err;
}
if (nf_ct_protonum(ct) == IPPROTO_TCP &&
@@ -996,10 +1025,9 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
/* This will take care of sending queued events even if the connection
* is already confirmed.
*/
- if (nf_conntrack_confirm(skb) != NF_ACCEPT)
- return -EINVAL;
+ err = nf_conntrack_confirm(skb);
- return 0;
+ return verdict_to_errno(err);
}
/* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
@@ -1034,6 +1062,10 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
else
err = ovs_ct_lookup(net, key, info, skb);
+ /* conntrack core returned NF_STOLEN */
+ if (err == -EINPROGRESS)
+ return err;
+
skb_push_rcsum(skb, nh_ofs);
if (err)
ovs_kfree_skb_reason(skb, OVS_DROP_CONNTRACK);
@@ -1380,8 +1412,9 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
if (ct_info.timeout[0]) {
if (nf_ct_set_timeout(net, ct_info.ct, family, key->ip.proto,
ct_info.timeout))
- pr_info_ratelimited("Failed to associated timeout "
- "policy `%s'\n", ct_info.timeout);
+ OVS_NLERR(log,
+ "Failed to associated timeout policy '%s'",
+ ct_info.timeout);
else
ct_info.nf_ct_timeout = rcu_dereference(
nf_ct_timeout_find(ct_info.ct)->timeout);
@@ -1570,8 +1603,7 @@ static int ovs_ct_limit_init(struct net *net, struct ovs_net *ovs_net)
for (i = 0; i < CT_LIMIT_HASH_BUCKETS; i++)
INIT_HLIST_HEAD(&ovs_net->ct_limit_info->limits[i]);
- ovs_net->ct_limit_info->data =
- nf_conncount_init(net, NFPROTO_INET, sizeof(u32));
+ ovs_net->ct_limit_info->data = nf_conncount_init(net, sizeof(u32));
if (IS_ERR(ovs_net->ct_limit_info->data)) {
err = PTR_ERR(ovs_net->ct_limit_info->data);
@@ -1588,13 +1620,13 @@ static void ovs_ct_limit_exit(struct net *net, struct ovs_net *ovs_net)
const struct ovs_ct_limit_info *info = ovs_net->ct_limit_info;
int i;
- nf_conncount_destroy(net, NFPROTO_INET, info->data);
+ nf_conncount_destroy(net, info->data);
for (i = 0; i < CT_LIMIT_HASH_BUCKETS; ++i) {
struct hlist_head *head = &info->limits[i];
struct ovs_ct_limit *ct_limit;
+ struct hlist_node *next;
- hlist_for_each_entry_rcu(ct_limit, head, hlist_node,
- lockdep_ovsl_is_held())
+ hlist_for_each_entry_safe(ct_limit, next, head, hlist_node)
kfree_rcu(ct_limit, rcu);
}
kfree(info->limits);
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 11c69415c605..5d548eda742d 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -15,7 +15,6 @@
#include <linux/delay.h>
#include <linux/time.h>
#include <linux/etherdevice.h>
-#include <linux/genetlink.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
@@ -1829,8 +1828,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
parms.dp = dp;
parms.port_no = OVSP_LOCAL;
parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
- parms.desired_ifindex = a[OVS_DP_ATTR_IFINDEX]
- ? nla_get_s32(a[OVS_DP_ATTR_IFINDEX]) : 0;
+ parms.desired_ifindex = nla_get_s32_default(a[OVS_DP_ATTR_IFINDEX], 0);
/* So far only local changes have been made, now need the lock. */
ovs_lock();
@@ -2103,6 +2101,7 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
{
struct ovs_header *ovs_header;
struct ovs_vport_stats vport_stats;
+ struct net *net_vport;
int err;
ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
@@ -2119,12 +2118,15 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
nla_put_u32(skb, OVS_VPORT_ATTR_IFINDEX, vport->dev->ifindex))
goto nla_put_failure;
- if (!net_eq(net, dev_net(vport->dev))) {
- int id = peernet2id_alloc(net, dev_net(vport->dev), gfp);
+ rcu_read_lock();
+ net_vport = dev_net_rcu(vport->dev);
+ if (!net_eq(net, net_vport)) {
+ int id = peernet2id_alloc(net, net_vport, GFP_ATOMIC);
if (nla_put_s32(skb, OVS_VPORT_ATTR_NETNSID, id))
- goto nla_put_failure;
+ goto nla_put_failure_unlock;
}
+ rcu_read_unlock();
ovs_vport_get_stats(vport, &vport_stats);
if (nla_put_64bit(skb, OVS_VPORT_ATTR_STATS,
@@ -2145,6 +2147,8 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
genlmsg_end(skb, ovs_header);
return 0;
+nla_put_failure_unlock:
+ rcu_read_unlock();
nla_put_failure:
err = -EMSGSIZE;
error:
@@ -2267,8 +2271,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
if (a[OVS_VPORT_ATTR_IFINDEX] && parms.type != OVS_VPORT_TYPE_INTERNAL)
return -EOPNOTSUPP;
- port_no = a[OVS_VPORT_ATTR_PORT_NO]
- ? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0;
+ port_no = nla_get_u32_default(a[OVS_VPORT_ATTR_PORT_NO], 0);
if (port_no >= DP_MAX_PORTS)
return -EFBIG;
@@ -2305,8 +2308,8 @@ restart:
parms.dp = dp;
parms.port_no = port_no;
parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
- parms.desired_ifindex = a[OVS_VPORT_ATTR_IFINDEX]
- ? nla_get_s32(a[OVS_VPORT_ATTR_IFINDEX]) : 0;
+ parms.desired_ifindex = nla_get_s32_default(a[OVS_VPORT_ATTR_IFINDEX],
+ 0);
vport = new_vport(&parms);
err = PTR_ERR(vport);
@@ -2707,7 +2710,7 @@ static struct pernet_operations ovs_net_ops = {
};
static const char * const ovs_drop_reasons[] = {
-#define S(x) (#x),
+#define S(x) [(x) & ~SKB_DROP_REASON_SUBSYS_MASK] = (#x),
OVS_DROP_REASONS(S)
#undef S
};
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index 0cd29971a907..9ca6231ea647 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -115,12 +115,15 @@ struct datapath {
* fragmented.
* @acts_origlen: The netlink size of the flow actions applied to this skb.
* @cutlen: The number of bytes from the packet end to be removed.
+ * @probability: The sampling probability that was applied to this skb; 0 means
+ * no sampling has occurred; U32_MAX means 100% probability.
*/
struct ovs_skb_cb {
struct vport *input_vport;
u16 mru;
u16 acts_origlen;
u32 cutlen;
+ u32 probability;
};
#define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 33b21a0c0548..8a848ce72e29 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -561,7 +561,6 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
*/
key->tp.src = htons(icmp->icmp6_type);
key->tp.dst = htons(icmp->icmp6_code);
- memset(&key->ipv6.nd, 0, sizeof(key->ipv6.nd));
if (icmp->icmp6_code == 0 &&
(icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
@@ -570,6 +569,8 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
struct nd_msg *nd;
int offset;
+ memset(&key->ipv6.nd, 0, sizeof(key->ipv6.nd));
+
/* In order to process neighbor discovery options, we need the
* entire packet.
*/
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index ebc5728aab4e..95e0dd14dc1a 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -64,6 +64,7 @@ static bool actions_may_change_flow(const struct nlattr *actions)
case OVS_ACTION_ATTR_TRUNC:
case OVS_ACTION_ATTR_USERSPACE:
case OVS_ACTION_ATTR_DROP:
+ case OVS_ACTION_ATTR_PSAMPLE:
break;
case OVS_ACTION_ATTR_CT:
@@ -152,6 +153,13 @@ static void update_range(struct sw_flow_match *match,
sizeof((match)->key->field)); \
} while (0)
+#define SW_FLOW_KEY_BITMAP_COPY(match, field, value_p, nbits, is_mask) ({ \
+ update_range(match, offsetof(struct sw_flow_key, field), \
+ bitmap_size(nbits), is_mask); \
+ bitmap_copy(is_mask ? (match)->mask->key.field : (match)->key->field, \
+ value_p, nbits); \
+})
+
static bool match_validate(const struct sw_flow_match *match,
u64 key_attrs, u64 mask_attrs, bool log)
{
@@ -670,8 +678,8 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
bool log)
{
bool ttl = false, ipv4 = false, ipv6 = false;
+ IP_TUNNEL_DECLARE_FLAGS(tun_flags) = { };
bool info_bridge_mode = false;
- __be16 tun_flags = 0;
int opts_type = 0;
struct nlattr *a;
int rem;
@@ -697,7 +705,7 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
case OVS_TUNNEL_KEY_ATTR_ID:
SW_FLOW_KEY_PUT(match, tun_key.tun_id,
nla_get_be64(a), is_mask);
- tun_flags |= TUNNEL_KEY;
+ __set_bit(IP_TUNNEL_KEY_BIT, tun_flags);
break;
case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.src,
@@ -729,10 +737,10 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
ttl = true;
break;
case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
- tun_flags |= TUNNEL_DONT_FRAGMENT;
+ __set_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, tun_flags);
break;
case OVS_TUNNEL_KEY_ATTR_CSUM:
- tun_flags |= TUNNEL_CSUM;
+ __set_bit(IP_TUNNEL_CSUM_BIT, tun_flags);
break;
case OVS_TUNNEL_KEY_ATTR_TP_SRC:
SW_FLOW_KEY_PUT(match, tun_key.tp_src,
@@ -743,7 +751,7 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
nla_get_be16(a), is_mask);
break;
case OVS_TUNNEL_KEY_ATTR_OAM:
- tun_flags |= TUNNEL_OAM;
+ __set_bit(IP_TUNNEL_OAM_BIT, tun_flags);
break;
case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
if (opts_type) {
@@ -755,7 +763,7 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
if (err)
return err;
- tun_flags |= TUNNEL_GENEVE_OPT;
+ __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, tun_flags);
opts_type = type;
break;
case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
@@ -768,7 +776,7 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
if (err)
return err;
- tun_flags |= TUNNEL_VXLAN_OPT;
+ __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, tun_flags);
opts_type = type;
break;
case OVS_TUNNEL_KEY_ATTR_PAD:
@@ -784,7 +792,7 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
if (err)
return err;
- tun_flags |= TUNNEL_ERSPAN_OPT;
+ __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, tun_flags);
opts_type = type;
break;
case OVS_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE:
@@ -798,7 +806,8 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
}
}
- SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask);
+ SW_FLOW_KEY_BITMAP_COPY(match, tun_key.tun_flags, tun_flags,
+ __IP_TUNNEL_FLAG_NUM, is_mask);
if (is_mask)
SW_FLOW_KEY_MEMSET_FIELD(match, tun_proto, 0xff, true);
else
@@ -823,13 +832,15 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
}
if (ipv4) {
if (info_bridge_mode) {
+ __clear_bit(IP_TUNNEL_KEY_BIT, tun_flags);
+
if (match->key->tun_key.u.ipv4.src ||
match->key->tun_key.u.ipv4.dst ||
match->key->tun_key.tp_src ||
match->key->tun_key.tp_dst ||
match->key->tun_key.ttl ||
match->key->tun_key.tos ||
- tun_flags & ~TUNNEL_KEY) {
+ !ip_tunnel_flags_empty(tun_flags)) {
OVS_NLERR(log, "IPv4 tun info is not correct");
return -EINVAL;
}
@@ -874,7 +885,7 @@ static int __ip_tun_to_nlattr(struct sk_buff *skb,
const void *tun_opts, int swkey_tun_opts_len,
unsigned short tun_proto, u8 mode)
{
- if (output->tun_flags & TUNNEL_KEY &&
+ if (test_bit(IP_TUNNEL_KEY_BIT, output->tun_flags) &&
nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id,
OVS_TUNNEL_KEY_ATTR_PAD))
return -EMSGSIZE;
@@ -910,10 +921,10 @@ static int __ip_tun_to_nlattr(struct sk_buff *skb,
return -EMSGSIZE;
if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ttl))
return -EMSGSIZE;
- if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) &&
+ if (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, output->tun_flags) &&
nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
return -EMSGSIZE;
- if ((output->tun_flags & TUNNEL_CSUM) &&
+ if (test_bit(IP_TUNNEL_CSUM_BIT, output->tun_flags) &&
nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM))
return -EMSGSIZE;
if (output->tp_src &&
@@ -922,18 +933,20 @@ static int __ip_tun_to_nlattr(struct sk_buff *skb,
if (output->tp_dst &&
nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_DST, output->tp_dst))
return -EMSGSIZE;
- if ((output->tun_flags & TUNNEL_OAM) &&
+ if (test_bit(IP_TUNNEL_OAM_BIT, output->tun_flags) &&
nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM))
return -EMSGSIZE;
if (swkey_tun_opts_len) {
- if (output->tun_flags & TUNNEL_GENEVE_OPT &&
+ if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, output->tun_flags) &&
nla_put(skb, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
swkey_tun_opts_len, tun_opts))
return -EMSGSIZE;
- else if (output->tun_flags & TUNNEL_VXLAN_OPT &&
+ else if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT,
+ output->tun_flags) &&
vxlan_opt_to_nlattr(skb, tun_opts, swkey_tun_opts_len))
return -EMSGSIZE;
- else if (output->tun_flags & TUNNEL_ERSPAN_OPT &&
+ else if (test_bit(IP_TUNNEL_ERSPAN_OPT_BIT,
+ output->tun_flags) &&
nla_put(skb, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
swkey_tun_opts_len, tun_opts))
return -EMSGSIZE;
@@ -1925,7 +1938,7 @@ int ovs_nla_get_identifier(struct sw_flow_id *sfid, const struct nlattr *ufid,
u32 ovs_nla_get_ufid_flags(const struct nlattr *attr)
{
- return attr ? nla_get_u32(attr) : 0;
+ return nla_get_u32_default(attr, 0);
}
/**
@@ -2029,7 +2042,7 @@ static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
if ((swkey->tun_proto || is_mask)) {
const void *opts = NULL;
- if (output->tun_key.tun_flags & TUNNEL_OPTIONS_PRESENT)
+ if (ip_tunnel_is_options_present(output->tun_key.tun_flags))
opts = TUN_METADATA_OPTS(output, swkey->tun_opts_len);
if (ip_tun_to_nlattr(skb, &output->tun_key, opts,
@@ -2304,14 +2317,10 @@ int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb)
OVS_FLOW_ATTR_MASK, true, skb);
}
-#define MAX_ACTIONS_BUFSIZE (32 * 1024)
-
static struct sw_flow_actions *nla_alloc_flow_actions(int size)
{
struct sw_flow_actions *sfa;
- WARN_ON_ONCE(size > MAX_ACTIONS_BUFSIZE);
-
sfa = kmalloc(kmalloc_size_roundup(sizeof(*sfa) + size), GFP_KERNEL);
if (!sfa)
return ERR_PTR(-ENOMEM);
@@ -2397,7 +2406,7 @@ static void ovs_nla_free_nested_actions(const struct nlattr *actions, int len)
/* Whenever new actions are added, the need to update this
* function should be considered.
*/
- BUILD_BUG_ON(OVS_ACTION_ATTR_MAX != 24);
+ BUILD_BUG_ON(OVS_ACTION_ATTR_MAX != 25);
if (!actions)
return;
@@ -2467,18 +2476,9 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
- if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
- if ((next_offset + req_size) > MAX_ACTIONS_BUFSIZE) {
- OVS_NLERR(log, "Flow action size exceeds max %u",
- MAX_ACTIONS_BUFSIZE);
- return ERR_PTR(-EMSGSIZE);
- }
- new_acts_size = MAX_ACTIONS_BUFSIZE;
- }
-
acts = nla_alloc_flow_actions(new_acts_size);
if (IS_ERR(acts))
- return (void *)acts;
+ return ERR_CAST(acts);
memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len);
acts->actions_len = (*sfa)->actions_len;
@@ -2752,7 +2752,8 @@ static int validate_geneve_opts(struct sw_flow_key *key)
opts_len -= len;
}
- key->tun_key.tun_flags |= crit_opt ? TUNNEL_CRIT_OPT : 0;
+ if (crit_opt)
+ __set_bit(IP_TUNNEL_CRIT_OPT_BIT, key->tun_key.tun_flags);
return 0;
}
@@ -2760,6 +2761,7 @@ static int validate_geneve_opts(struct sw_flow_key *key)
static int validate_and_copy_set_tun(const struct nlattr *attr,
struct sw_flow_actions **sfa, bool log)
{
+ IP_TUNNEL_DECLARE_FLAGS(dst_opt_type) = { };
struct sw_flow_match match;
struct sw_flow_key key;
struct metadata_dst *tun_dst;
@@ -2767,9 +2769,7 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
struct ovs_tunnel_info *ovs_tun;
struct nlattr *a;
int err = 0, start, opts_type;
- __be16 dst_opt_type;
- dst_opt_type = 0;
ovs_match_init(&match, &key, true, NULL);
opts_type = ip_tun_from_nlattr(nla_data(attr), &match, false, log);
if (opts_type < 0)
@@ -2781,13 +2781,14 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
err = validate_geneve_opts(&key);
if (err < 0)
return err;
- dst_opt_type = TUNNEL_GENEVE_OPT;
+
+ __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, dst_opt_type);
break;
case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
- dst_opt_type = TUNNEL_VXLAN_OPT;
+ __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, dst_opt_type);
break;
case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
- dst_opt_type = TUNNEL_ERSPAN_OPT;
+ __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, dst_opt_type);
break;
}
}
@@ -3144,6 +3145,28 @@ static int validate_and_copy_check_pkt_len(struct net *net,
return 0;
}
+static int validate_psample(const struct nlattr *attr)
+{
+ static const struct nla_policy policy[OVS_PSAMPLE_ATTR_MAX + 1] = {
+ [OVS_PSAMPLE_ATTR_GROUP] = { .type = NLA_U32 },
+ [OVS_PSAMPLE_ATTR_COOKIE] = {
+ .type = NLA_BINARY,
+ .len = OVS_PSAMPLE_COOKIE_MAX_SIZE,
+ },
+ };
+ struct nlattr *a[OVS_PSAMPLE_ATTR_MAX + 1];
+ int err;
+
+ if (!IS_ENABLED(CONFIG_PSAMPLE))
+ return -EOPNOTSUPP;
+
+ err = nla_parse_nested(a, OVS_PSAMPLE_ATTR_MAX, attr, policy, NULL);
+ if (err)
+ return err;
+
+ return a[OVS_PSAMPLE_ATTR_GROUP] ? 0 : -EINVAL;
+}
+
static int copy_action(const struct nlattr *from,
struct sw_flow_actions **sfa, bool log)
{
@@ -3199,6 +3222,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
[OVS_ACTION_ATTR_ADD_MPLS] = sizeof(struct ovs_action_add_mpls),
[OVS_ACTION_ATTR_DEC_TTL] = (u32)-1,
[OVS_ACTION_ATTR_DROP] = sizeof(u32),
+ [OVS_ACTION_ATTR_PSAMPLE] = (u32)-1,
};
const struct ovs_action_push_vlan *vlan;
int type = nla_type(a);
@@ -3477,6 +3501,12 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
return -EINVAL;
break;
+ case OVS_ACTION_ATTR_PSAMPLE:
+ err = validate_psample(a);
+ if (err)
+ return err;
+ break;
+
default:
OVS_NLERR(log, "Unknown Action type %d", type);
return -EINVAL;
@@ -3502,7 +3532,7 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
int err;
u32 mpls_label_count = 0;
- *sfa = nla_alloc_flow_actions(min(nla_len(attr), MAX_ACTIONS_BUFSIZE));
+ *sfa = nla_alloc_flow_actions(nla_len(attr));
if (IS_ERR(*sfa))
return PTR_ERR(*sfa);
diff --git a/net/openvswitch/meter.h b/net/openvswitch/meter.h
index ed11cd12b512..8bbf983cd244 100644
--- a/net/openvswitch/meter.h
+++ b/net/openvswitch/meter.h
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/netlink.h>
#include <linux/openvswitch.h>
-#include <linux/genetlink.h>
#include <linux/skbuff.h>
#include <linux/bits.h>
diff --git a/net/openvswitch/openvswitch_trace.h b/net/openvswitch/openvswitch_trace.h
index 3eb35d9eb700..74d75aaebef4 100644
--- a/net/openvswitch/openvswitch_trace.h
+++ b/net/openvswitch/openvswitch_trace.h
@@ -43,8 +43,8 @@ TRACE_EVENT(ovs_do_execute_action,
TP_fast_assign(
__entry->dpaddr = dp;
- __assign_str(dp_name, ovs_dp_name(dp));
- __assign_str(dev_name, skb->dev->name);
+ __assign_str(dp_name);
+ __assign_str(dev_name);
__entry->skbaddr = skb;
__entry->len = skb->len;
__entry->data_len = skb->data_len;
@@ -113,8 +113,8 @@ TRACE_EVENT(ovs_dp_upcall,
TP_fast_assign(
__entry->dpaddr = dp;
- __assign_str(dp_name, ovs_dp_name(dp));
- __assign_str(dev_name, skb->dev->name);
+ __assign_str(dp_name);
+ __assign_str(dev_name);
__entry->skbaddr = skb;
__entry->len = skb->len;
__entry->data_len = skb->data_len;
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 74c88a6baa43..2412d7813d24 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -85,7 +85,6 @@ static const struct net_device_ops internal_dev_netdev_ops = {
.ndo_stop = internal_dev_stop,
.ndo_start_xmit = internal_dev_xmit,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_get_stats64 = dev_get_tstats64,
};
static struct rtnl_link_ops internal_dev_link_ops __read_mostly = {
@@ -103,19 +102,20 @@ static void do_setup(struct net_device *netdev)
netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH |
IFF_NO_QUEUE;
+ netdev->lltx = true;
netdev->needs_free_netdev = true;
netdev->priv_destructor = NULL;
netdev->ethtool_ops = &internal_dev_ethtool_ops;
netdev->rtnl_link_ops = &internal_dev_link_ops;
- netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST |
- NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
- NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL;
+ netdev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
+ NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE |
+ NETIF_F_GSO_ENCAP_ALL;
netdev->vlan_features = netdev->features;
netdev->hw_enc_features = netdev->features;
netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
- netdev->hw_features = netdev->features & ~NETIF_F_LLTX;
+ netdev->hw_features = netdev->features;
eth_hw_addr_random(netdev);
}
@@ -140,11 +140,7 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
err = -ENOMEM;
goto error_free_vport;
}
- vport->dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!vport->dev->tstats) {
- err = -ENOMEM;
- goto error_free_netdev;
- }
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
dev_net_set(vport->dev, ovs_dp_get_net(vport->dp));
dev->ifindex = parms->desired_ifindex;
@@ -153,7 +149,7 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
/* Restrict bridge port to current netns. */
if (vport->port_no == OVSP_LOCAL)
- vport->dev->features |= NETIF_F_NETNS_LOCAL;
+ vport->dev->netns_local = true;
rtnl_lock();
err = register_netdevice(vport->dev);
@@ -169,8 +165,6 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
error_unlock:
rtnl_unlock();
- free_percpu(dev->tstats);
-error_free_netdev:
free_netdev(dev);
error_free_vport:
ovs_vport_free(vport);
@@ -186,7 +180,6 @@ static void internal_dev_destroy(struct vport *vport)
/* unregister_netdevice() waits for an RCU grace period. */
unregister_netdevice(vport->dev);
- free_percpu(vport->dev->tstats);
rtnl_unlock();
}
@@ -202,7 +195,6 @@ static int internal_dev_recv(struct sk_buff *skb)
skb_dst_drop(skb);
nf_reset_ct(skb);
- secpath_reset(skb);
skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, netdev);
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index 903537a5da22..91a11067e458 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -82,6 +82,13 @@ struct vport *ovs_netdev_link(struct vport *vport, const char *name)
err = -ENODEV;
goto error_free_vport;
}
+ /* Ensure that the device exists and that the provided
+ * name is not one of its aliases.
+ */
+ if (strcmp(name, ovs_vport_name(vport))) {
+ err = -ENODEV;
+ goto error_put;
+ }
netdev_tracker_alloc(vport->dev, &vport->dev_tracker, GFP_KERNEL);
if (vport->dev->flags & IFF_LOOPBACK ||
(vport->dev->type != ARPHRD_ETHER &&
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 972ae01a70f7..8732f6e51ae5 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -500,6 +500,7 @@ int ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
OVS_CB(skb)->input_vport = vport;
OVS_CB(skb)->mru = 0;
OVS_CB(skb)->cutlen = 0;
+ OVS_CB(skb)->probability = 0;
if (unlikely(dev_net(skb->dev) != ovs_dp_get_net(vport->dp))) {
u32 mark;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index c9bbc2686690..c131e5ceea37 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -538,6 +538,47 @@ static void *packet_current_frame(struct packet_sock *po,
return packet_lookup_frame(po, rb, rb->head, status);
}
+static u16 vlan_get_tci(const struct sk_buff *skb, struct net_device *dev)
+{
+ struct vlan_hdr vhdr, *vh;
+ unsigned int header_len;
+
+ if (!dev)
+ return 0;
+
+ /* In the SOCK_DGRAM scenario, skb data starts at the network
+ * protocol, which is after the VLAN headers. The outer VLAN
+ * header is at the hard_header_len offset in non-variable
+ * length link layer headers. If it's a VLAN device, the
+ * min_header_len should be used to exclude the VLAN header
+ * size.
+ */
+ if (dev->min_header_len == dev->hard_header_len)
+ header_len = dev->hard_header_len;
+ else if (is_vlan_dev(dev))
+ header_len = dev->min_header_len;
+ else
+ return 0;
+
+ vh = skb_header_pointer(skb, skb_mac_offset(skb) + header_len,
+ sizeof(vhdr), &vhdr);
+ if (unlikely(!vh))
+ return 0;
+
+ return ntohs(vh->h_vlan_TCI);
+}
+
+static __be16 vlan_get_protocol_dgram(const struct sk_buff *skb)
+{
+ __be16 proto = skb->protocol;
+
+ if (unlikely(eth_type_vlan(proto)))
+ proto = __vlan_get_protocol_offset(skb, proto,
+ skb_mac_offset(skb), NULL);
+
+ return proto;
+}
+
static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
{
del_timer_sync(&pkc->retire_blk_timer);
@@ -1007,10 +1048,16 @@ static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
struct tpacket3_hdr *ppd)
{
+ struct packet_sock *po = container_of(pkc, struct packet_sock, rx_ring.prb_bdqc);
+
if (skb_vlan_tag_present(pkc->skb)) {
ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
+ } else if (unlikely(po->sk.sk_type == SOCK_DGRAM && eth_type_vlan(pkc->skb->protocol))) {
+ ppd->hv1.tp_vlan_tci = vlan_get_tci(pkc->skb, pkc->skb->dev);
+ ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->protocol);
+ ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
} else {
ppd->hv1.tp_vlan_tci = 0;
ppd->hv1.tp_vlan_tpid = 0;
@@ -1785,21 +1832,22 @@ static int fanout_add(struct sock *sk, struct fanout_args *args)
err = -EINVAL;
spin_lock(&po->bind_lock);
- if (packet_sock_flag(po, PACKET_SOCK_RUNNING) &&
+ if (po->num &&
match->type == type &&
match->prot_hook.type == po->prot_hook.type &&
match->prot_hook.dev == po->prot_hook.dev) {
err = -ENOSPC;
if (refcount_read(&match->sk_ref) < match->max_num_members) {
- __dev_remove_pack(&po->prot_hook);
-
/* Paired with packet_setsockopt(PACKET_FANOUT_DATA) */
WRITE_ONCE(po->fanout, match);
po->rollover = rollover;
rollover = NULL;
refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
- __fanout_link(sk, po);
+ if (packet_sock_flag(po, PACKET_SOCK_RUNNING)) {
+ __dev_remove_pack(&po->prot_hook);
+ __fanout_link(sk, po);
+ }
err = 0;
}
}
@@ -2056,9 +2104,8 @@ retry:
skb->dev = dev;
skb->priority = READ_ONCE(sk->sk_priority);
skb->mark = READ_ONCE(sk->sk_mark);
- skb->tstamp = sockc.transmit_time;
-
- skb_setup_tx_timestamp(skb, sockc.tsflags);
+ skb_set_delivery_type_by_clockid(skb, sockc.transmit_time, sk->sk_clockid);
+ skb_setup_tx_timestamp(skb, &sockc);
if (unlikely(extra_len == 4))
skb->no_fcs = 1;
@@ -2122,7 +2169,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
enum skb_drop_reason drop_reason = SKB_CONSUMED;
- struct sock *sk;
+ struct sock *sk = NULL;
struct sockaddr_ll *sll;
struct packet_sock *po;
u8 *skb_head = skb->data;
@@ -2156,7 +2203,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
}
}
- snaplen = skb->len;
+ snaplen = skb_frags_readable(skb) ? skb->len : skb_headlen(skb);
res = run_filter(skb, sk, snaplen);
if (!res)
@@ -2227,7 +2274,7 @@ drop_n_restore:
skb->len = skb_len;
}
drop:
- kfree_skb_reason(skb, drop_reason);
+ sk_skb_reason_drop(sk, skb, drop_reason);
return 0;
}
@@ -2235,7 +2282,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
enum skb_drop_reason drop_reason = SKB_CONSUMED;
- struct sock *sk;
+ struct sock *sk = NULL;
struct packet_sock *po;
struct sockaddr_ll *sll;
union tpacket_uhdr h;
@@ -2276,7 +2323,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
}
}
- snaplen = skb->len;
+ snaplen = skb_frags_readable(skb) ? skb->len : skb_headlen(skb);
res = run_filter(skb, sk, snaplen);
if (!res)
@@ -2318,7 +2365,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
}
if (po->tp_version <= TPACKET_V2) {
if (macoff + snaplen > po->rx_ring.frame_size) {
- if (po->copy_thresh &&
+ if (READ_ONCE(po->copy_thresh) &&
atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
if (skb_shared(skb)) {
copy_skb = skb_clone(skb, GFP_ATOMIC);
@@ -2428,6 +2475,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
+ } else if (unlikely(sk->sk_type == SOCK_DGRAM && eth_type_vlan(skb->protocol))) {
+ h.h2->tp_vlan_tci = vlan_get_tci(skb, skb->dev);
+ h.h2->tp_vlan_tpid = ntohs(skb->protocol);
+ status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
} else {
h.h2->tp_vlan_tci = 0;
h.h2->tp_vlan_tpid = 0;
@@ -2457,7 +2508,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
sll->sll_family = AF_PACKET;
sll->sll_hatype = dev->type;
- sll->sll_protocol = skb->protocol;
+ sll->sll_protocol = (sk->sk_type == SOCK_DGRAM) ?
+ vlan_get_protocol_dgram(skb) : skb->protocol;
sll->sll_pkttype = skb->pkt_type;
if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
sll->sll_ifindex = orig_dev->ifindex;
@@ -2495,7 +2547,7 @@ drop_n_restore:
skb->len = skb_len;
}
drop:
- kfree_skb_reason(skb, drop_reason);
+ sk_skb_reason_drop(sk, skb, drop_reason);
return 0;
drop_n_account:
@@ -2504,7 +2556,7 @@ drop_n_account:
drop_reason = SKB_DROP_REASON_PACKET_SOCK_ERROR;
sk->sk_data_ready(sk);
- kfree_skb_reason(copy_skb, drop_reason);
+ sk_skb_reason_drop(sk, copy_skb, drop_reason);
goto drop_n_restore;
}
@@ -2522,8 +2574,7 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
ts = __packet_set_timestamp(po, ph, skb);
__packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
- if (!packet_read_pending(&po->tx_ring))
- complete(&po->skb_completion);
+ complete(&po->skb_completion);
}
sock_wfree(skb);
@@ -2585,8 +2636,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
skb->dev = dev;
skb->priority = READ_ONCE(po->sk.sk_priority);
skb->mark = READ_ONCE(po->sk.sk_mark);
- skb->tstamp = sockc->transmit_time;
- skb_setup_tx_timestamp(skb, sockc->tsflags);
+ skb_set_delivery_type_by_clockid(skb, sockc->transmit_time, po->sk.sk_clockid);
+ skb_setup_tx_timestamp(skb, sockc);
skb_zcopy_set_nouarg(skb, ph.raw);
skb_reserve(skb, hlen);
@@ -3051,7 +3102,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
goto out_free;
}
- skb_setup_tx_timestamp(skb, sockc.tsflags);
+ skb_setup_tx_timestamp(skb, &sockc);
if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
!packet_extra_vlan_len_allowed(dev, skb)) {
@@ -3061,9 +3112,9 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
skb->protocol = proto;
skb->dev = dev;
- skb->priority = READ_ONCE(sk->sk_priority);
+ skb->priority = sockc.priority;
skb->mark = sockc.mark;
- skb->tstamp = sockc.transmit_time;
+ skb_set_delivery_type_by_clockid(skb, sockc.transmit_time, sk->sk_clockid);
if (unlikely(extra_len == 4))
skb->no_fcs = 1;
@@ -3357,17 +3408,17 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
if (sock->type == SOCK_PACKET)
sock->ops = &packet_ops_spkt;
+ po = pkt_sk(sk);
+ err = packet_alloc_pending(po);
+ if (err)
+ goto out_sk_free;
+
sock_init_data(sock, sk);
- po = pkt_sk(sk);
init_completion(&po->skb_completion);
sk->sk_family = PF_PACKET;
po->num = proto;
- err = packet_alloc_pending(po);
- if (err)
- goto out2;
-
packet_cached_dev_reset(po);
sk->sk_destruct = packet_sock_destruct;
@@ -3399,7 +3450,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
sock_prot_inuse_add(net, &packet_proto, 1);
return 0;
-out2:
+out_sk_free:
sk_free(sk);
out:
return err;
@@ -3483,7 +3534,8 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
/* Original length was stored in sockaddr_ll fields */
origlen = PACKET_SKB_CB(skb)->sa.origlen;
sll->sll_family = AF_PACKET;
- sll->sll_protocol = skb->protocol;
+ sll->sll_protocol = (sock->type == SOCK_DGRAM) ?
+ vlan_get_protocol_dgram(skb) : skb->protocol;
}
sock_recv_cmsgs(msg, sk, skb);
@@ -3540,6 +3592,21 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
aux.tp_vlan_tci = skb_vlan_tag_get(skb);
aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
+ } else if (unlikely(sock->type == SOCK_DGRAM && eth_type_vlan(skb->protocol))) {
+ struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
+ struct net_device *dev;
+
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(sock_net(sk), sll->sll_ifindex);
+ if (dev) {
+ aux.tp_vlan_tci = vlan_get_tci(skb, dev);
+ aux.tp_vlan_tpid = ntohs(skb->protocol);
+ aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
+ } else {
+ aux.tp_vlan_tci = 0;
+ aux.tp_vlan_tpid = 0;
+ }
+ rcu_read_unlock();
} else {
aux.tp_vlan_tci = 0;
aux.tp_vlan_tpid = 0;
@@ -3800,28 +3867,30 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
case PACKET_TX_RING:
{
union tpacket_req_u req_u;
- int len;
+ ret = -EINVAL;
lock_sock(sk);
switch (po->tp_version) {
case TPACKET_V1:
case TPACKET_V2:
- len = sizeof(req_u.req);
+ if (optlen < sizeof(req_u.req))
+ break;
+ ret = copy_from_sockptr(&req_u.req, optval,
+ sizeof(req_u.req)) ?
+ -EINVAL : 0;
break;
case TPACKET_V3:
default:
- len = sizeof(req_u.req3);
+ if (optlen < sizeof(req_u.req3))
+ break;
+ ret = copy_from_sockptr(&req_u.req3, optval,
+ sizeof(req_u.req3)) ?
+ -EINVAL : 0;
break;
}
- if (optlen < len) {
- ret = -EINVAL;
- } else {
- if (copy_from_sockptr(&req_u.req, optval, len))
- ret = -EFAULT;
- else
- ret = packet_set_ring(sk, &req_u, 0,
- optname == PACKET_TX_RING);
- }
+ if (!ret)
+ ret = packet_set_ring(sk, &req_u, 0,
+ optname == PACKET_TX_RING);
release_sock(sk);
return ret;
}
@@ -3834,7 +3903,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
if (copy_from_sockptr(&val, optval, sizeof(val)))
return -EFAULT;
- pkt_sk(sk)->copy_thresh = val;
+ WRITE_ONCE(pkt_sk(sk)->copy_thresh, val);
return 0;
}
case PACKET_VERSION:
@@ -3998,7 +4067,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
if (val < 0 || val > 1)
return -EINVAL;
- po->prot_hook.ignore_outgoing = !!val;
+ WRITE_ONCE(po->prot_hook.ignore_outgoing, !!val);
return 0;
}
case PACKET_TX_HAS_OFF:
@@ -4088,6 +4157,9 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
case PACKET_VNET_HDR_SZ:
val = READ_ONCE(po->vnet_hdr_sz);
break;
+ case PACKET_COPY_THRESH:
+ val = READ_ONCE(pkt_sk(sk)->copy_thresh);
+ break;
case PACKET_VERSION:
val = po->tp_version;
break;
@@ -4129,7 +4201,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
0);
break;
case PACKET_IGNORE_OUTGOING:
- val = po->prot_hook.ignore_outgoing;
+ val = READ_ONCE(po->prot_hook.ignore_outgoing);
break;
case PACKET_ROLLOVER_STATS:
if (!po->rollover)
diff --git a/net/packet/diag.c b/net/packet/diag.c
index 9a7980e3309d..47f69f3dbf73 100644
--- a/net/packet/diag.c
+++ b/net/packet/diag.c
@@ -17,7 +17,7 @@ static int pdiag_put_info(const struct packet_sock *po, struct sk_buff *nlskb)
pinfo.pdi_index = po->ifindex;
pinfo.pdi_version = po->tp_version;
pinfo.pdi_reserve = po->tp_reserve;
- pinfo.pdi_copy_thresh = po->copy_thresh;
+ pinfo.pdi_copy_thresh = READ_ONCE(po->copy_thresh);
pinfo.pdi_tstamp = READ_ONCE(po->tp_tstamp);
pinfo.pdi_flags = 0;
@@ -245,6 +245,7 @@ static int packet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
}
static const struct sock_diag_handler packet_diag_handler = {
+ .owner = THIS_MODULE,
.family = AF_PACKET,
.dump = packet_diag_handler_dump,
};
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index 2b582da1e88c..a27efa4faa4e 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -13,7 +13,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/sock.h>
#include <linux/if_phonet.h>
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 3dd5f52bc1b5..53a858478e22 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -759,8 +759,8 @@ static void pep_sock_close(struct sock *sk, long timeout)
sock_put(sk);
}
-static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
- bool kern)
+static struct sock *pep_sock_accept(struct sock *sk,
+ struct proto_accept_arg *arg)
{
struct pep_sock *pn = pep_sk(sk), *newpn;
struct sock *newsk = NULL;
@@ -772,8 +772,8 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
u8 pipe_handle, enabled, n_sb;
u8 aligned = 0;
- skb = skb_recv_datagram(sk, (flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
- errp);
+ skb = skb_recv_datagram(sk, (arg->flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
+ &arg->err);
if (!skb)
return NULL;
@@ -836,7 +836,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
/* Create a new to-be-accepted sock */
newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot,
- kern);
+ arg->kern);
if (!newsk) {
pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL);
err = -ENOBUFS;
@@ -878,7 +878,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
drop:
release_sock(sk);
kfree_skb(skb);
- *errp = err;
+ arg->err = err;
return newsk;
}
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index cde671d29d5d..5c36bae37b8f 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -22,7 +22,7 @@
#include <net/phonet/pn_dev.h>
struct phonet_routes {
- struct mutex lock;
+ spinlock_t lock;
struct net_device __rcu *table[64];
};
@@ -54,7 +54,7 @@ static struct phonet_device *__phonet_device_alloc(struct net_device *dev)
pnd->netdev = dev;
bitmap_zero(pnd->addrs, 64);
- BUG_ON(!mutex_is_locked(&pndevs->lock));
+ lockdep_assert_held(&pndevs->lock);
list_add_rcu(&pnd->list, &pndevs->list);
return pnd;
}
@@ -64,7 +64,8 @@ static struct phonet_device *__phonet_get(struct net_device *dev)
struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
struct phonet_device *pnd;
- BUG_ON(!mutex_is_locked(&pndevs->lock));
+ lockdep_assert_held(&pndevs->lock);
+
list_for_each_entry(pnd, &pndevs->list, list) {
if (pnd->netdev == dev)
return pnd;
@@ -91,17 +92,22 @@ static void phonet_device_destroy(struct net_device *dev)
ASSERT_RTNL();
- mutex_lock(&pndevs->lock);
+ spin_lock(&pndevs->lock);
+
pnd = __phonet_get(dev);
if (pnd)
list_del_rcu(&pnd->list);
- mutex_unlock(&pndevs->lock);
+
+ spin_unlock(&pndevs->lock);
if (pnd) {
+ struct net *net = dev_net(dev);
+ u32 ifindex = dev->ifindex;
u8 addr;
for_each_set_bit(addr, pnd->addrs, 64)
- phonet_address_notify(RTM_DELADDR, dev, addr);
+ phonet_address_notify(net, RTM_DELADDR, ifindex, addr);
+
kfree(pnd);
}
}
@@ -133,7 +139,8 @@ int phonet_address_add(struct net_device *dev, u8 addr)
struct phonet_device *pnd;
int err = 0;
- mutex_lock(&pndevs->lock);
+ spin_lock(&pndevs->lock);
+
/* Find or create Phonet-specific device data */
pnd = __phonet_get(dev);
if (pnd == NULL)
@@ -142,7 +149,9 @@ int phonet_address_add(struct net_device *dev, u8 addr)
err = -ENOMEM;
else if (test_and_set_bit(addr >> 2, pnd->addrs))
err = -EEXIST;
- mutex_unlock(&pndevs->lock);
+
+ spin_unlock(&pndevs->lock);
+
return err;
}
@@ -152,7 +161,8 @@ int phonet_address_del(struct net_device *dev, u8 addr)
struct phonet_device *pnd;
int err = 0;
- mutex_lock(&pndevs->lock);
+ spin_lock(&pndevs->lock);
+
pnd = __phonet_get(dev);
if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs)) {
err = -EADDRNOTAVAIL;
@@ -161,7 +171,8 @@ int phonet_address_del(struct net_device *dev, u8 addr)
list_del_rcu(&pnd->list);
else
pnd = NULL;
- mutex_unlock(&pndevs->lock);
+
+ spin_unlock(&pndevs->lock);
if (pnd)
kfree_rcu(pnd, rcu);
@@ -244,32 +255,39 @@ static int phonet_device_autoconf(struct net_device *dev)
ret = phonet_address_add(dev, req.ifr_phonet_autoconf.device);
if (ret)
return ret;
- phonet_address_notify(RTM_NEWADDR, dev,
- req.ifr_phonet_autoconf.device);
+
+ phonet_address_notify(dev_net(dev), RTM_NEWADDR, dev->ifindex,
+ req.ifr_phonet_autoconf.device);
return 0;
}
static void phonet_route_autodel(struct net_device *dev)
{
- struct phonet_net *pnn = phonet_pernet(dev_net(dev));
- unsigned int i;
+ struct net *net = dev_net(dev);
DECLARE_BITMAP(deleted, 64);
+ u32 ifindex = dev->ifindex;
+ struct phonet_net *pnn;
+ unsigned int i;
+
+ pnn = phonet_pernet(net);
/* Remove left-over Phonet routes */
bitmap_zero(deleted, 64);
- mutex_lock(&pnn->routes.lock);
- for (i = 0; i < 64; i++)
+
+ spin_lock(&pnn->routes.lock);
+ for (i = 0; i < 64; i++) {
if (rcu_access_pointer(pnn->routes.table[i]) == dev) {
RCU_INIT_POINTER(pnn->routes.table[i], NULL);
set_bit(i, deleted);
}
- mutex_unlock(&pnn->routes.lock);
+ }
+ spin_unlock(&pnn->routes.lock);
if (bitmap_empty(deleted, 64))
return; /* short-circuit RCU */
synchronize_rcu();
for_each_set_bit(i, deleted, 64) {
- rtm_phonet_notify(RTM_DELROUTE, dev, i);
+ rtm_phonet_notify(net, RTM_DELROUTE, ifindex, i);
dev_put(dev);
}
}
@@ -309,8 +327,8 @@ static int __net_init phonet_init_net(struct net *net)
return -ENOMEM;
INIT_LIST_HEAD(&pnn->pndevs.list);
- mutex_init(&pnn->pndevs.lock);
- mutex_init(&pnn->routes.lock);
+ spin_lock_init(&pnn->pndevs.lock);
+ spin_lock_init(&pnn->routes.lock);
return 0;
}
@@ -360,13 +378,15 @@ int phonet_route_add(struct net_device *dev, u8 daddr)
int err = -EEXIST;
daddr = daddr >> 2;
- mutex_lock(&routes->lock);
+
+ spin_lock(&routes->lock);
if (routes->table[daddr] == NULL) {
rcu_assign_pointer(routes->table[daddr], dev);
dev_hold(dev);
err = 0;
}
- mutex_unlock(&routes->lock);
+ spin_unlock(&routes->lock);
+
return err;
}
@@ -376,17 +396,19 @@ int phonet_route_del(struct net_device *dev, u8 daddr)
struct phonet_routes *routes = &pnn->routes;
daddr = daddr >> 2;
- mutex_lock(&routes->lock);
+
+ spin_lock(&routes->lock);
if (rcu_access_pointer(routes->table[daddr]) == dev)
RCU_INIT_POINTER(routes->table[daddr], NULL);
else
dev = NULL;
- mutex_unlock(&routes->lock);
+ spin_unlock(&routes->lock);
if (!dev)
return -ENOENT;
- synchronize_rcu();
- dev_put(dev);
+
+ /* Note : our caller must call synchronize_rcu() and dev_put(dev) */
+
return 0;
}
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index 59aebe296890..b9043c92dc24 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -19,10 +19,10 @@
/* Device address handling */
-static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr,
+static int fill_addr(struct sk_buff *skb, u32 ifindex, u8 addr,
u32 portid, u32 seq, int event);
-void phonet_address_notify(int event, struct net_device *dev, u8 addr)
+void phonet_address_notify(struct net *net, int event, u32 ifindex, u8 addr)
{
struct sk_buff *skb;
int err = -ENOBUFS;
@@ -31,17 +31,18 @@ void phonet_address_notify(int event, struct net_device *dev, u8 addr)
nla_total_size(1), GFP_KERNEL);
if (skb == NULL)
goto errout;
- err = fill_addr(skb, dev, addr, 0, 0, event);
+
+ err = fill_addr(skb, ifindex, addr, 0, 0, event);
if (err < 0) {
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout;
}
- rtnl_notify(skb, dev_net(dev), 0,
- RTNLGRP_PHONET_IFADDR, NULL, GFP_KERNEL);
+
+ rtnl_notify(skb, net, 0, RTNLGRP_PHONET_IFADDR, NULL, GFP_KERNEL);
return;
errout:
- rtnl_set_sk_err(dev_net(dev), RTNLGRP_PHONET_IFADDR, err);
+ rtnl_set_sk_err(net, RTNLGRP_PHONET_IFADDR, err);
}
static const struct nla_policy ifa_phonet_policy[IFA_MAX+1] = {
@@ -64,8 +65,6 @@ static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
if (!netlink_capable(skb, CAP_SYS_ADMIN))
return -EPERM;
- ASSERT_RTNL();
-
err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
ifa_phonet_policy, extack);
if (err < 0)
@@ -79,21 +78,29 @@ static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
/* Phonet addresses only have 6 high-order bits */
return -EINVAL;
- dev = __dev_get_by_index(net, ifm->ifa_index);
- if (dev == NULL)
+ rcu_read_lock();
+
+ dev = dev_get_by_index_rcu(net, ifm->ifa_index);
+ if (!dev) {
+ rcu_read_unlock();
return -ENODEV;
+ }
if (nlh->nlmsg_type == RTM_NEWADDR)
err = phonet_address_add(dev, pnaddr);
else
err = phonet_address_del(dev, pnaddr);
+
+ rcu_read_unlock();
+
if (!err)
- phonet_address_notify(nlh->nlmsg_type, dev, pnaddr);
+ phonet_address_notify(net, nlh->nlmsg_type, ifm->ifa_index, pnaddr);
+
return err;
}
-static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr,
- u32 portid, u32 seq, int event)
+static int fill_addr(struct sk_buff *skb, u32 ifindex, u8 addr,
+ u32 portid, u32 seq, int event)
{
struct ifaddrmsg *ifm;
struct nlmsghdr *nlh;
@@ -107,7 +114,7 @@ static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr,
ifm->ifa_prefixlen = 0;
ifm->ifa_flags = IFA_F_PERMANENT;
ifm->ifa_scope = RT_SCOPE_LINK;
- ifm->ifa_index = dev->ifindex;
+ ifm->ifa_index = ifindex;
if (nla_put_u8(skb, IFA_LOCAL, addr))
goto nla_put_failure;
nlmsg_end(skb, nlh);
@@ -120,14 +127,17 @@ nla_put_failure:
static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
{
+ int addr_idx = 0, addr_start_idx = cb->args[1];
+ int dev_idx = 0, dev_start_idx = cb->args[0];
struct phonet_device_list *pndevs;
struct phonet_device *pnd;
- int dev_idx = 0, dev_start_idx = cb->args[0];
- int addr_idx = 0, addr_start_idx = cb->args[1];
+ int err = 0;
pndevs = phonet_device_list(sock_net(skb->sk));
+
rcu_read_lock();
list_for_each_entry_rcu(pnd, &pndevs->list, list) {
+ DECLARE_BITMAP(addrs, 64);
u8 addr;
if (dev_idx > dev_start_idx)
@@ -136,29 +146,32 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
continue;
addr_idx = 0;
- for_each_set_bit(addr, pnd->addrs, 64) {
+ memcpy(addrs, pnd->addrs, sizeof(pnd->addrs));
+
+ for_each_set_bit(addr, addrs, 64) {
if (addr_idx++ < addr_start_idx)
continue;
- if (fill_addr(skb, pnd->netdev, addr << 2,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, RTM_NEWADDR) < 0)
+ err = fill_addr(skb, READ_ONCE(pnd->netdev->ifindex),
+ addr << 2, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, RTM_NEWADDR);
+ if (err < 0)
goto out;
}
}
-
out:
rcu_read_unlock();
+
cb->args[0] = dev_idx;
cb->args[1] = addr_idx;
- return skb->len;
+ return err;
}
/* Routes handling */
-static int fill_route(struct sk_buff *skb, struct net_device *dev, u8 dst,
- u32 portid, u32 seq, int event)
+static int fill_route(struct sk_buff *skb, u32 ifindex, u8 dst,
+ u32 portid, u32 seq, int event)
{
struct rtmsg *rtm;
struct nlmsghdr *nlh;
@@ -177,8 +190,7 @@ static int fill_route(struct sk_buff *skb, struct net_device *dev, u8 dst,
rtm->rtm_scope = RT_SCOPE_UNIVERSE;
rtm->rtm_type = RTN_UNICAST;
rtm->rtm_flags = 0;
- if (nla_put_u8(skb, RTA_DST, dst) ||
- nla_put_u32(skb, RTA_OIF, dev->ifindex))
+ if (nla_put_u8(skb, RTA_DST, dst) || nla_put_u32(skb, RTA_OIF, ifindex))
goto nla_put_failure;
nlmsg_end(skb, nlh);
return 0;
@@ -188,26 +200,27 @@ nla_put_failure:
return -EMSGSIZE;
}
-void rtm_phonet_notify(int event, struct net_device *dev, u8 dst)
+void rtm_phonet_notify(struct net *net, int event, u32 ifindex, u8 dst)
{
struct sk_buff *skb;
int err = -ENOBUFS;
- skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct ifaddrmsg)) +
+ skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct rtmsg)) +
nla_total_size(1) + nla_total_size(4), GFP_KERNEL);
if (skb == NULL)
goto errout;
- err = fill_route(skb, dev, dst, 0, 0, event);
+
+ err = fill_route(skb, ifindex, dst, 0, 0, event);
if (err < 0) {
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout;
}
- rtnl_notify(skb, dev_net(dev), 0,
- RTNLGRP_PHONET_ROUTE, NULL, GFP_KERNEL);
+
+ rtnl_notify(skb, net, 0, RTNLGRP_PHONET_ROUTE, NULL, GFP_KERNEL);
return;
errout:
- rtnl_set_sk_err(dev_net(dev), RTNLGRP_PHONET_ROUTE, err);
+ rtnl_set_sk_err(net, RTNLGRP_PHONET_ROUTE, err);
}
static const struct nla_policy rtm_phonet_policy[RTA_MAX+1] = {
@@ -220,8 +233,10 @@ static int route_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
{
struct net *net = sock_net(skb->sk);
struct nlattr *tb[RTA_MAX+1];
+ bool sync_needed = false;
struct net_device *dev;
struct rtmsg *rtm;
+ u32 ifindex;
int err;
u8 dst;
@@ -231,8 +246,6 @@ static int route_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
if (!netlink_capable(skb, CAP_SYS_ADMIN))
return -EPERM;
- ASSERT_RTNL();
-
err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
rtm_phonet_policy, extack);
if (err < 0)
@@ -247,22 +260,40 @@ static int route_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
if (dst & 3) /* Phonet addresses only have 6 high-order bits */
return -EINVAL;
- dev = __dev_get_by_index(net, nla_get_u32(tb[RTA_OIF]));
- if (dev == NULL)
+ ifindex = nla_get_u32(tb[RTA_OIF]);
+
+ rcu_read_lock();
+
+ dev = dev_get_by_index_rcu(net, ifindex);
+ if (!dev) {
+ rcu_read_unlock();
return -ENODEV;
+ }
- if (nlh->nlmsg_type == RTM_NEWROUTE)
+ if (nlh->nlmsg_type == RTM_NEWROUTE) {
err = phonet_route_add(dev, dst);
- else
+ } else {
err = phonet_route_del(dev, dst);
+ if (!err)
+ sync_needed = true;
+ }
+
+ rcu_read_unlock();
+
+ if (sync_needed) {
+ synchronize_rcu();
+ dev_put(dev);
+ }
if (!err)
- rtm_phonet_notify(nlh->nlmsg_type, dev, dst);
+ rtm_phonet_notify(net, nlh->nlmsg_type, ifindex, dst);
+
return err;
}
static int route_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
+ int err = 0;
u8 addr;
rcu_read_lock();
@@ -272,35 +303,34 @@ static int route_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
if (!dev)
continue;
- if (fill_route(skb, dev, addr << 2, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, RTM_NEWROUTE) < 0)
- goto out;
+ err = fill_route(skb, READ_ONCE(dev->ifindex), addr << 2,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, RTM_NEWROUTE);
+ if (err < 0)
+ break;
}
-
-out:
rcu_read_unlock();
cb->args[0] = addr;
- return skb->len;
+ return err;
}
+static const struct rtnl_msg_handler phonet_rtnl_msg_handlers[] __initdata_or_module = {
+ {.owner = THIS_MODULE, .protocol = PF_PHONET, .msgtype = RTM_NEWADDR,
+ .doit = addr_doit, .flags = RTNL_FLAG_DOIT_UNLOCKED},
+ {.owner = THIS_MODULE, .protocol = PF_PHONET, .msgtype = RTM_DELADDR,
+ .doit = addr_doit, .flags = RTNL_FLAG_DOIT_UNLOCKED},
+ {.owner = THIS_MODULE, .protocol = PF_PHONET, .msgtype = RTM_GETADDR,
+ .dumpit = getaddr_dumpit, .flags = RTNL_FLAG_DUMP_UNLOCKED},
+ {.owner = THIS_MODULE, .protocol = PF_PHONET, .msgtype = RTM_NEWROUTE,
+ .doit = route_doit, .flags = RTNL_FLAG_DOIT_UNLOCKED},
+ {.owner = THIS_MODULE, .protocol = PF_PHONET, .msgtype = RTM_DELROUTE,
+ .doit = route_doit, .flags = RTNL_FLAG_DOIT_UNLOCKED},
+ {.owner = THIS_MODULE, .protocol = PF_PHONET, .msgtype = RTM_GETROUTE,
+ .dumpit = route_dumpit, .flags = RTNL_FLAG_DUMP_UNLOCKED},
+};
+
int __init phonet_netlink_register(void)
{
- int err = rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_NEWADDR,
- addr_doit, NULL, 0);
- if (err)
- return err;
-
- /* Further rtnl_register_module() cannot fail */
- rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_DELADDR,
- addr_doit, NULL, 0);
- rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_GETADDR,
- NULL, getaddr_dumpit, 0);
- rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_NEWROUTE,
- route_doit, NULL, 0);
- rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_DELROUTE,
- route_doit, NULL, 0);
- rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_GETROUTE,
- NULL, route_dumpit, 0);
- return 0;
+ return rtnl_register_many(phonet_rtnl_msg_handlers);
}
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 1018340d89a7..5ce0b3ee5def 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -292,18 +292,17 @@ out:
}
static int pn_socket_accept(struct socket *sock, struct socket *newsock,
- int flags, bool kern)
+ struct proto_accept_arg *arg)
{
struct sock *sk = sock->sk;
struct sock *newsk;
- int err;
if (unlikely(sk->sk_state != TCP_LISTEN))
return -EINVAL;
- newsk = sk->sk_prot->accept(sk, flags, &err, kern);
+ newsk = sk->sk_prot->accept(sk, arg);
if (!newsk)
- return err;
+ return arg->err;
lock_sock(newsk);
sock_graft(newsk, newsock);
diff --git a/net/phonet/sysctl.c b/net/phonet/sysctl.c
index 0d0bf41381c2..463a74a27d3e 100644
--- a/net/phonet/sysctl.c
+++ b/net/phonet/sysctl.c
@@ -48,7 +48,7 @@ void phonet_get_local_port_range(int *min, int *max)
} while (read_seqretry(&local_port_range_lock, seq));
}
-static int proc_local_port_range(struct ctl_table *table, int write,
+static int proc_local_port_range(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
@@ -81,7 +81,6 @@ static struct ctl_table phonet_table[] = {
.mode = 0644,
.proc_handler = proc_local_port_range,
},
- { }
};
int __init phonet_sysctl_init(void)
diff --git a/net/psample/psample.c b/net/psample/psample.c
index ddd211a151d0..25f92ba0840c 100644
--- a/net/psample/psample.c
+++ b/net/psample/psample.c
@@ -221,7 +221,7 @@ static int __psample_ip_tun_to_nlattr(struct sk_buff *skb,
const struct ip_tunnel_key *tun_key = &tun_info->key;
int tun_opts_len = tun_info->options_len;
- if (tun_key->tun_flags & TUNNEL_KEY &&
+ if (test_bit(IP_TUNNEL_KEY_BIT, tun_key->tun_flags) &&
nla_put_be64(skb, PSAMPLE_TUNNEL_KEY_ATTR_ID, tun_key->tun_id,
PSAMPLE_TUNNEL_KEY_ATTR_PAD))
return -EMSGSIZE;
@@ -257,10 +257,10 @@ static int __psample_ip_tun_to_nlattr(struct sk_buff *skb,
return -EMSGSIZE;
if (nla_put_u8(skb, PSAMPLE_TUNNEL_KEY_ATTR_TTL, tun_key->ttl))
return -EMSGSIZE;
- if ((tun_key->tun_flags & TUNNEL_DONT_FRAGMENT) &&
+ if (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, tun_key->tun_flags) &&
nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
return -EMSGSIZE;
- if ((tun_key->tun_flags & TUNNEL_CSUM) &&
+ if (test_bit(IP_TUNNEL_CSUM_BIT, tun_key->tun_flags) &&
nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_CSUM))
return -EMSGSIZE;
if (tun_key->tp_src &&
@@ -269,15 +269,16 @@ static int __psample_ip_tun_to_nlattr(struct sk_buff *skb,
if (tun_key->tp_dst &&
nla_put_be16(skb, PSAMPLE_TUNNEL_KEY_ATTR_TP_DST, tun_key->tp_dst))
return -EMSGSIZE;
- if ((tun_key->tun_flags & TUNNEL_OAM) &&
+ if (test_bit(IP_TUNNEL_OAM_BIT, tun_key->tun_flags) &&
nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_OAM))
return -EMSGSIZE;
if (tun_opts_len) {
- if (tun_key->tun_flags & TUNNEL_GENEVE_OPT &&
+ if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, tun_key->tun_flags) &&
nla_put(skb, PSAMPLE_TUNNEL_KEY_ATTR_GENEVE_OPTS,
tun_opts_len, tun_opts))
return -EMSGSIZE;
- else if (tun_key->tun_flags & TUNNEL_ERSPAN_OPT &&
+ else if (test_bit(IP_TUNNEL_ERSPAN_OPT_BIT,
+ tun_key->tun_flags) &&
nla_put(skb, PSAMPLE_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
tun_opts_len, tun_opts))
return -EMSGSIZE;
@@ -314,7 +315,7 @@ static int psample_tunnel_meta_len(struct ip_tunnel_info *tun_info)
int tun_opts_len = tun_info->options_len;
int sum = nla_total_size(0); /* PSAMPLE_ATTR_TUNNEL */
- if (tun_key->tun_flags & TUNNEL_KEY)
+ if (test_bit(IP_TUNNEL_KEY_BIT, tun_key->tun_flags))
sum += nla_total_size_64bit(sizeof(u64));
if (tun_info->mode & IP_TUNNEL_INFO_BRIDGE)
@@ -337,20 +338,21 @@ static int psample_tunnel_meta_len(struct ip_tunnel_info *tun_info)
if (tun_key->tos)
sum += nla_total_size(sizeof(u8));
sum += nla_total_size(sizeof(u8)); /* TTL */
- if (tun_key->tun_flags & TUNNEL_DONT_FRAGMENT)
+ if (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, tun_key->tun_flags))
sum += nla_total_size(0);
- if (tun_key->tun_flags & TUNNEL_CSUM)
+ if (test_bit(IP_TUNNEL_CSUM_BIT, tun_key->tun_flags))
sum += nla_total_size(0);
if (tun_key->tp_src)
sum += nla_total_size(sizeof(u16));
if (tun_key->tp_dst)
sum += nla_total_size(sizeof(u16));
- if (tun_key->tun_flags & TUNNEL_OAM)
+ if (test_bit(IP_TUNNEL_OAM_BIT, tun_key->tun_flags))
sum += nla_total_size(0);
if (tun_opts_len) {
- if (tun_key->tun_flags & TUNNEL_GENEVE_OPT)
+ if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, tun_key->tun_flags))
sum += nla_total_size(tun_opts_len);
- else if (tun_key->tun_flags & TUNNEL_ERSPAN_OPT)
+ else if (test_bit(IP_TUNNEL_ERSPAN_OPT_BIT,
+ tun_key->tun_flags))
sum += nla_total_size(tun_opts_len);
}
@@ -358,8 +360,9 @@ static int psample_tunnel_meta_len(struct ip_tunnel_info *tun_info)
}
#endif
-void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
- u32 sample_rate, const struct psample_metadata *md)
+void psample_sample_packet(struct psample_group *group,
+ const struct sk_buff *skb, u32 sample_rate,
+ const struct psample_metadata *md)
{
ktime_t tstamp = ktime_get_real();
int out_ifindex = md->out_ifindex;
@@ -374,6 +377,10 @@ void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
void *data;
int ret;
+ if (!genl_has_listeners(&psample_nl_family, group->net,
+ PSAMPLE_NL_MCGRP_SAMPLE))
+ return;
+
meta_len = (in_ifindex ? nla_total_size(sizeof(u16)) : 0) +
(out_ifindex ? nla_total_size(sizeof(u16)) : 0) +
(md->out_tc_valid ? nla_total_size(sizeof(u16)) : 0) +
@@ -384,7 +391,11 @@ void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
nla_total_size(sizeof(u32)) + /* group_num */
nla_total_size(sizeof(u32)) + /* seq */
nla_total_size_64bit(sizeof(u64)) + /* timestamp */
- nla_total_size(sizeof(u16)); /* protocol */
+ nla_total_size(sizeof(u16)) + /* protocol */
+ (md->user_cookie_len ?
+ nla_total_size(md->user_cookie_len) : 0) + /* user cookie */
+ (md->rate_as_probability ?
+ nla_total_size(0) : 0); /* rate as probability */
#ifdef CONFIG_INET
tun_info = skb_tunnel_info(skb);
@@ -484,6 +495,15 @@ void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
}
#endif
+ if (md->user_cookie && md->user_cookie_len &&
+ nla_put(nl_skb, PSAMPLE_ATTR_USER_COOKIE, md->user_cookie_len,
+ md->user_cookie))
+ goto error;
+
+ if (md->rate_as_probability &&
+ nla_put_flag(nl_skb, PSAMPLE_ATTR_SAMPLE_PROBABILITY))
+ goto error;
+
genlmsg_end(nl_skb, data);
genlmsg_multicast_netns(&psample_nl_family, group->net, nl_skb, 0,
PSAMPLE_NL_MCGRP_SAMPLE, GFP_ATOMIC);
diff --git a/net/qrtr/af_qrtr.c b/net/qrtr/af_qrtr.c
index 41ece61eb57a..00c51cf693f3 100644
--- a/net/qrtr/af_qrtr.c
+++ b/net/qrtr/af_qrtr.c
@@ -884,7 +884,7 @@ static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
mutex_lock(&qrtr_node_lock);
list_for_each_entry(node, &qrtr_all_nodes, item) {
- skbn = skb_clone(skb, GFP_KERNEL);
+ skbn = pskb_copy(skb, GFP_KERNEL);
if (!skbn)
break;
skb_set_owner_w(skbn, skb->sk);
diff --git a/net/qrtr/mhi.c b/net/qrtr/mhi.c
index 9ced13c0627a..69f53625a049 100644
--- a/net/qrtr/mhi.c
+++ b/net/qrtr/mhi.c
@@ -118,6 +118,51 @@ static const struct mhi_device_id qcom_mhi_qrtr_id_table[] = {
};
MODULE_DEVICE_TABLE(mhi, qcom_mhi_qrtr_id_table);
+static int __maybe_unused qcom_mhi_qrtr_pm_suspend_late(struct device *dev)
+{
+ struct mhi_device *mhi_dev = container_of(dev, struct mhi_device, dev);
+ enum mhi_state state;
+
+ state = mhi_get_mhi_state(mhi_dev->mhi_cntrl);
+ /*
+ * If the device is in suspend state, then no need for the
+ * client driver to unprepare the channels.
+ */
+ if (state == MHI_STATE_M3)
+ return 0;
+
+ mhi_unprepare_from_transfer(mhi_dev);
+
+ return 0;
+}
+
+static int __maybe_unused qcom_mhi_qrtr_pm_resume_early(struct device *dev)
+{
+ struct mhi_device *mhi_dev = container_of(dev, struct mhi_device, dev);
+ enum mhi_state state;
+ int rc;
+
+ state = mhi_get_mhi_state(mhi_dev->mhi_cntrl);
+ /*
+ * If the device is in suspend state, we won't unprepare channels
+ * in suspend callback, therefore no need to prepare channels when
+ * resume.
+ */
+ if (state == MHI_STATE_M3)
+ return 0;
+
+ rc = mhi_prepare_for_transfer_autoqueue(mhi_dev);
+ if (rc)
+ dev_err(dev, "failed to prepare for autoqueue transfer %d\n", rc);
+
+ return rc;
+}
+
+static const struct dev_pm_ops qcom_mhi_qrtr_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(qcom_mhi_qrtr_pm_suspend_late,
+ qcom_mhi_qrtr_pm_resume_early)
+};
+
static struct mhi_driver qcom_mhi_qrtr_driver = {
.probe = qcom_mhi_qrtr_probe,
.remove = qcom_mhi_qrtr_remove,
@@ -126,6 +171,7 @@ static struct mhi_driver qcom_mhi_qrtr_driver = {
.id_table = qcom_mhi_qrtr_id_table,
.driver = {
.name = "qcom_mhi_qrtr",
+ .pm = &qcom_mhi_qrtr_pm_ops,
},
};
diff --git a/net/qrtr/ns.c b/net/qrtr/ns.c
index abb0c70ffc8b..3de9350cbf30 100644
--- a/net/qrtr/ns.c
+++ b/net/qrtr/ns.c
@@ -132,8 +132,8 @@ static int service_announce_new(struct sockaddr_qrtr *dest,
return kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
}
-static int service_announce_del(struct sockaddr_qrtr *dest,
- struct qrtr_server *srv)
+static void service_announce_del(struct sockaddr_qrtr *dest,
+ struct qrtr_server *srv)
{
struct qrtr_ctrl_pkt pkt;
struct msghdr msg = { };
@@ -157,10 +157,10 @@ static int service_announce_del(struct sockaddr_qrtr *dest,
msg.msg_namelen = sizeof(*dest);
ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
- if (ret < 0)
+ if (ret < 0 && ret != -ENODEV)
pr_err("failed to announce del service\n");
- return ret;
+ return;
}
static void lookup_notify(struct sockaddr_qrtr *to, struct qrtr_server *srv,
@@ -188,7 +188,7 @@ static void lookup_notify(struct sockaddr_qrtr *to, struct qrtr_server *srv,
msg.msg_namelen = sizeof(*to);
ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
- if (ret < 0)
+ if (ret < 0 && ret != -ENODEV)
pr_err("failed to send lookup notification\n");
}
@@ -207,6 +207,9 @@ static int announce_servers(struct sockaddr_qrtr *sq)
xa_for_each(&node->servers, index, srv) {
ret = service_announce_new(sq, srv);
if (ret < 0) {
+ if (ret == -ENODEV)
+ continue;
+
pr_err("failed to announce new service\n");
return ret;
}
@@ -369,7 +372,7 @@ static int ctrl_cmd_bye(struct sockaddr_qrtr *from)
msg.msg_namelen = sizeof(sq);
ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
- if (ret < 0) {
+ if (ret < 0 && ret != -ENODEV) {
pr_err("failed to send bye cmd\n");
return ret;
}
@@ -443,7 +446,7 @@ static int ctrl_cmd_del_client(struct sockaddr_qrtr *from,
msg.msg_namelen = sizeof(sq);
ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
- if (ret < 0) {
+ if (ret < 0 && ret != -ENODEV) {
pr_err("failed to send del client cmd\n");
return ret;
}
@@ -725,6 +728,24 @@ int qrtr_ns_init(void)
if (ret < 0)
goto err_wq;
+ /* As the qrtr ns socket owner and creator is the same module, we have
+ * to decrease the qrtr module reference count to guarantee that it
+ * remains zero after the ns socket is created, otherwise, executing
+ * "rmmod" command is unable to make the qrtr module deleted after the
+ * qrtr module is inserted successfully.
+ *
+ * However, the reference count is increased twice in
+ * sock_create_kern(): one is to increase the reference count of owner
+ * of qrtr socket's proto_ops struct; another is to increment the
+ * reference count of owner of qrtr proto struct. Therefore, we must
+ * decrement the module reference count twice to ensure that it keeps
+ * zero after server's listening socket is created. Of course, we
+ * must bump the module reference count twice as well before the socket
+ * is closed.
+ */
+ module_put(qrtr_ns.sock->ops->owner);
+ module_put(qrtr_ns.sock->sk->sk_prot_creator->owner);
+
return 0;
err_wq:
@@ -739,6 +760,15 @@ void qrtr_ns_remove(void)
{
cancel_work_sync(&qrtr_ns.work);
destroy_workqueue(qrtr_ns.workqueue);
+
+ /* sock_release() expects the two references that were put during
+ * qrtr_ns_init(). This function is only called during module remove,
+ * so try_stop_module() has already set the refcnt to 0. Use
+ * __module_get() instead of try_module_get() to successfully take two
+ * references.
+ */
+ __module_get(qrtr_ns.sock->ops->owner);
+ __module_get(qrtr_ns.sock->sk->sk_prot_creator->owner);
sock_release(qrtr_ns.sock);
}
EXPORT_SYMBOL_GPL(qrtr_ns_remove);
diff --git a/net/rds/Kconfig b/net/rds/Kconfig
index 75cd696963b2..f007730aa2bb 100644
--- a/net/rds/Kconfig
+++ b/net/rds/Kconfig
@@ -26,3 +26,12 @@ config RDS_DEBUG
bool "RDS debugging messages"
depends on RDS
default n
+
+config GCOV_PROFILE_RDS
+ bool "Enable GCOV profiling on RDS"
+ depends on GCOV_KERNEL
+ help
+ Enable GCOV profiling on RDS for checking which functions/lines
+ are executed.
+
+ If unsure, say N.
diff --git a/net/rds/Makefile b/net/rds/Makefile
index 8fdc118e2927..3af1ca1d965c 100644
--- a/net/rds/Makefile
+++ b/net/rds/Makefile
@@ -15,3 +15,8 @@ rds_tcp-y := tcp.o tcp_connect.o tcp_listen.o tcp_recv.o \
tcp_send.o tcp_stats.o
ccflags-$(CONFIG_RDS_DEBUG) := -DRDS_DEBUG
+
+# for GCOV coverage profiling
+ifdef CONFIG_GCOV_PROFILE_RDS
+GCOV_PROFILE := y
+endif
diff --git a/net/rds/connection.c b/net/rds/connection.c
index b4cc699c5fad..c749c5525b40 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -829,9 +829,7 @@ int rds_conn_init(void)
if (ret)
return ret;
- rds_conn_slab = kmem_cache_create("rds_connection",
- sizeof(struct rds_connection),
- 0, 0, NULL);
+ rds_conn_slab = KMEM_CACHE(rds_connection, 0);
if (!rds_conn_slab) {
rds_loop_net_exit();
return -ENOMEM;
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 2ba71102b1f1..8ef3178ed4d6 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -369,9 +369,6 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp);
void rds_ib_conn_free(void *arg);
int rds_ib_conn_path_connect(struct rds_conn_path *cp);
void rds_ib_conn_path_shutdown(struct rds_conn_path *cp);
-void rds_ib_state_change(struct sock *sk);
-int rds_ib_listen_init(void);
-void rds_ib_listen_stop(void);
__printf(2, 3)
void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...);
int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
@@ -402,7 +399,6 @@ void rds_ib_inc_free(struct rds_incoming *inc);
int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc,
struct rds_ib_ack_state *state);
-void rds_ib_recv_tasklet_fn(unsigned long data);
void rds_ib_recv_init_ring(struct rds_ib_connection *ic);
void rds_ib_recv_clear_ring(struct rds_ib_connection *ic);
void rds_ib_recv_init_ack(struct rds_ib_connection *ic);
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 8f070ee7e742..d1cfceeff133 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -40,10 +40,6 @@
#include "rds.h"
struct workqueue_struct *rds_ib_mr_wq;
-struct rds_ib_dereg_odp_mr {
- struct work_struct work;
- struct ib_mr *mr;
-};
static void rds_ib_odp_mr_worker(struct work_struct *work);
diff --git a/net/rds/ib_sysctl.c b/net/rds/ib_sysctl.c
index e4e41b3afce7..2af678e71e3c 100644
--- a/net/rds/ib_sysctl.c
+++ b/net/rds/ib_sysctl.c
@@ -103,7 +103,6 @@ static struct ctl_table rds_ib_sysctl_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
- { }
};
void rds_ib_sysctl_exit(void)
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index a4e3c5de998b..00dbcd4d28e6 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -302,7 +302,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
}
ret = PTR_ERR(trans_private);
/* Trigger connection so that its ready for the next retry */
- if (ret == -ENODEV)
+ if (ret == -ENODEV && cp)
rds_conn_connect_if_down(cp->cp_conn);
goto out;
}
diff --git a/net/rds/send.c b/net/rds/send.c
index 2899def23865..09a280110654 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -103,13 +103,12 @@ EXPORT_SYMBOL_GPL(rds_send_path_reset);
static int acquire_in_xmit(struct rds_conn_path *cp)
{
- return test_and_set_bit(RDS_IN_XMIT, &cp->cp_flags) == 0;
+ return test_and_set_bit_lock(RDS_IN_XMIT, &cp->cp_flags) == 0;
}
static void release_in_xmit(struct rds_conn_path *cp)
{
- clear_bit(RDS_IN_XMIT, &cp->cp_flags);
- smp_mb__after_atomic();
+ clear_bit_unlock(RDS_IN_XMIT, &cp->cp_flags);
/*
* We don't use wait_on_bit()/wake_up_bit() because our waking is in a
* hot path and finding waiters is very rare. We don't want to walk
diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c
index e381bbcd9cc1..025f518a4349 100644
--- a/net/rds/sysctl.c
+++ b/net/rds/sysctl.c
@@ -89,7 +89,6 @@ static struct ctl_table rds_sysctl_rds_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
- { }
};
void rds_sysctl_exit(void)
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 2dba7505b414..3cc2f303bf78 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -61,8 +61,10 @@ static atomic_t rds_tcp_unloading = ATOMIC_INIT(0);
static struct kmem_cache *rds_tcp_conn_slab;
-static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write,
- void *buffer, size_t *lenp, loff_t *fpos);
+static int rds_tcp_sndbuf_handler(const struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *fpos);
+static int rds_tcp_rcvbuf_handler(const struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *fpos);
static int rds_tcp_min_sndbuf = SOCK_MIN_SNDBUF;
static int rds_tcp_min_rcvbuf = SOCK_MIN_RCVBUF;
@@ -74,7 +76,7 @@ static struct ctl_table rds_tcp_sysctl_table[] = {
/* data is per-net pointer */
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = rds_tcp_skbuf_handler,
+ .proc_handler = rds_tcp_sndbuf_handler,
.extra1 = &rds_tcp_min_sndbuf,
},
#define RDS_TCP_RCVBUF 1
@@ -83,10 +85,9 @@ static struct ctl_table rds_tcp_sysctl_table[] = {
/* data is per-net pointer */
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = rds_tcp_skbuf_handler,
+ .proc_handler = rds_tcp_rcvbuf_handler,
.extra1 = &rds_tcp_min_rcvbuf,
},
- { }
};
u32 rds_tcp_write_seq(struct rds_tcp_connection *tc)
@@ -503,12 +504,8 @@ bool rds_tcp_tune(struct socket *sock)
release_sock(sk);
return false;
}
- /* Update ns_tracker to current stack trace and refcounted tracker */
- __netns_tracker_free(net, &sk->ns_tracker, false);
-
- sk->sk_net_refcnt = 1;
- netns_tracker_alloc(net, &sk->ns_tracker, GFP_KERNEL);
- sock_inuse_add(net, 1);
+ sk_net_refcnt_upgrade(sk);
+ put_net(net);
}
rtn = net_generic(net, rds_tcp_netid);
if (rtn->sndbuf_size > 0) {
@@ -683,10 +680,10 @@ static void rds_tcp_sysctl_reset(struct net *net)
spin_unlock_irq(&rds_tcp_conn_lock);
}
-static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write,
+static int rds_tcp_skbuf_handler(struct rds_tcp_net *rtn,
+ const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *fpos)
{
- struct net *net = current->nsproxy->net_ns;
int err;
err = proc_dointvec_minmax(ctl, write, buffer, lenp, fpos);
@@ -695,11 +692,34 @@ static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write,
*(int *)(ctl->extra1));
return err;
}
- if (write)
+
+ if (write && rtn->rds_tcp_listen_sock && rtn->rds_tcp_listen_sock->sk) {
+ struct net *net = sock_net(rtn->rds_tcp_listen_sock->sk);
+
rds_tcp_sysctl_reset(net);
+ }
+
return 0;
}
+static int rds_tcp_sndbuf_handler(const struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *fpos)
+{
+ struct rds_tcp_net *rtn = container_of(ctl->data, struct rds_tcp_net,
+ sndbuf_size);
+
+ return rds_tcp_skbuf_handler(rtn, ctl, write, buffer, lenp, fpos);
+}
+
+static int rds_tcp_rcvbuf_handler(const struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *fpos)
+{
+ struct rds_tcp_net *rtn = container_of(ctl->data, struct rds_tcp_net,
+ rcvbuf_size);
+
+ return rds_tcp_skbuf_handler(rtn, ctl, write, buffer, lenp, fpos);
+}
+
static void rds_tcp_exit(void)
{
rds_tcp_set_unloading();
@@ -720,9 +740,7 @@ static int __init rds_tcp_init(void)
{
int ret;
- rds_tcp_conn_slab = kmem_cache_create("rds_tcp_connection",
- sizeof(struct rds_tcp_connection),
- 0, 0, NULL);
+ rds_tcp_conn_slab = KMEM_CACHE(rds_tcp_connection, 0);
if (!rds_tcp_conn_slab) {
ret = -ENOMEM;
goto out;
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 05008ce5c421..d89bd8d0c354 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -105,6 +105,10 @@ int rds_tcp_accept_one(struct socket *sock)
int conn_state;
struct rds_conn_path *cp;
struct in6_addr *my_addr, *peer_addr;
+ struct proto_accept_arg arg = {
+ .flags = O_NONBLOCK,
+ .kern = true,
+ };
#if !IS_ENABLED(CONFIG_IPV6)
struct in6_addr saddr, daddr;
#endif
@@ -119,7 +123,7 @@ int rds_tcp_accept_one(struct socket *sock)
if (ret)
goto out;
- ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, true);
+ ret = sock->ops->accept(sock, new_sock, &arg);
if (ret < 0)
goto out;
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index c00f04a1a534..7997a19d1da3 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -337,9 +337,7 @@ out:
int rds_tcp_recv_init(void)
{
- rds_tcp_incoming_slab = kmem_cache_create("rds_tcp_incoming",
- sizeof(struct rds_tcp_incoming),
- 0, 0, NULL);
+ rds_tcp_incoming_slab = KMEM_CACHE(rds_tcp_incoming, 0);
if (!rds_tcp_incoming_slab)
return -ENOMEM;
return 0;
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index c3feb4f49d09..7d3e82e4c2fc 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -539,18 +539,14 @@ bool rfkill_get_global_sw_state(const enum rfkill_type type)
#endif
bool rfkill_set_hw_state_reason(struct rfkill *rfkill,
- bool blocked, unsigned long reason)
+ bool blocked,
+ enum rfkill_hard_block_reasons reason)
{
unsigned long flags;
bool ret, prev;
BUG_ON(!rfkill);
- if (WARN(reason &
- ~(RFKILL_HARD_BLOCK_SIGNAL | RFKILL_HARD_BLOCK_NOT_OWNER),
- "hw_state reason not supported: 0x%lx", reason))
- return blocked;
-
spin_lock_irqsave(&rfkill->lock, flags);
prev = !!(rfkill->hard_block_reasons & reason);
if (blocked) {
@@ -1398,7 +1394,6 @@ static const struct file_operations rfkill_fops = {
.release = rfkill_fop_release,
.unlocked_ioctl = rfkill_fop_ioctl,
.compat_ioctl = compat_ptr_ioctl,
- .llseek = no_llseek,
};
#define RFKILL_NAME "rfkill"
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index 4e32d659524e..9fa019e0dcad 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -3,6 +3,7 @@
* Copyright (c) 2011, NVIDIA Corporation.
*/
+#include <linux/dmi.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -31,8 +32,12 @@ static int rfkill_gpio_set_power(void *data, bool blocked)
{
struct rfkill_gpio_data *rfkill = data;
- if (!blocked && !IS_ERR(rfkill->clk) && !rfkill->clk_enabled)
- clk_enable(rfkill->clk);
+ if (!blocked && !IS_ERR(rfkill->clk) && !rfkill->clk_enabled) {
+ int ret = clk_enable(rfkill->clk);
+
+ if (ret)
+ return ret;
+ }
gpiod_set_value_cansleep(rfkill->shutdown_gpio, !blocked);
gpiod_set_value_cansleep(rfkill->reset_gpio, !blocked);
@@ -72,6 +77,20 @@ static int rfkill_gpio_acpi_probe(struct device *dev,
return devm_acpi_dev_add_driver_gpios(dev, acpi_rfkill_default_gpios);
}
+/* List of DMI matches for devices on which rfkill-gpio should not load,
+ * to avoid firmware bugs.
+ */
+static const struct dmi_system_id rfkill_gpio_deny_table[] = {
+ {
+ /* Lenovo Yoga Tab 3 Pro YT3-X90, bogus "BCM4752" device in DSDT */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
+ },
+ },
+ { }
+};
+
static int rfkill_gpio_probe(struct platform_device *pdev)
{
struct rfkill_gpio_data *rfkill;
@@ -81,6 +100,9 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
const char *type_name;
int ret;
+ if (dmi_check_system(rfkill_gpio_deny_table))
+ return -ENODEV;
+
rfkill = devm_kzalloc(&pdev->dev, sizeof(*rfkill), GFP_KERNEL);
if (!rfkill)
return -ENOMEM;
@@ -156,14 +178,12 @@ err_destroy:
return ret;
}
-static int rfkill_gpio_remove(struct platform_device *pdev)
+static void rfkill_gpio_remove(struct platform_device *pdev)
{
struct rfkill_gpio_data *rfkill = platform_get_drvdata(pdev);
rfkill_unregister(rfkill->rfkill_dev);
rfkill_destroy(rfkill->rfkill_dev);
-
- return 0;
}
#ifdef CONFIG_ACPI
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index ef81d019b20f..a4a668b88a8f 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -397,15 +397,15 @@ static int rose_setsockopt(struct socket *sock, int level, int optname,
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
- int opt;
+ unsigned int opt;
if (level != SOL_ROSE)
return -ENOPROTOOPT;
- if (optlen < sizeof(int))
+ if (optlen < sizeof(unsigned int))
return -EINVAL;
- if (copy_from_sockptr(&opt, optval, sizeof(int)))
+ if (copy_from_sockptr(&opt, optval, sizeof(unsigned int)))
return -EFAULT;
switch (optname) {
@@ -414,31 +414,31 @@ static int rose_setsockopt(struct socket *sock, int level, int optname,
return 0;
case ROSE_T1:
- if (opt < 1)
+ if (opt < 1 || opt > UINT_MAX / HZ)
return -EINVAL;
rose->t1 = opt * HZ;
return 0;
case ROSE_T2:
- if (opt < 1)
+ if (opt < 1 || opt > UINT_MAX / HZ)
return -EINVAL;
rose->t2 = opt * HZ;
return 0;
case ROSE_T3:
- if (opt < 1)
+ if (opt < 1 || opt > UINT_MAX / HZ)
return -EINVAL;
rose->t3 = opt * HZ;
return 0;
case ROSE_HOLDBACK:
- if (opt < 1)
+ if (opt < 1 || opt > UINT_MAX / HZ)
return -EINVAL;
rose->hb = opt * HZ;
return 0;
case ROSE_IDLE:
- if (opt < 0)
+ if (opt > UINT_MAX / (60 * HZ))
return -EINVAL;
rose->idle = opt * 60 * HZ;
return 0;
@@ -701,11 +701,9 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
struct net_device *dev;
ax25_address *source;
ax25_uid_assoc *user;
+ int err = -EINVAL;
int n;
- if (!sock_flag(sk, SOCK_ZAPPED))
- return -EINVAL;
-
if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
return -EINVAL;
@@ -718,8 +716,15 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
return -EINVAL;
- if ((dev = rose_dev_get(&addr->srose_addr)) == NULL)
- return -EADDRNOTAVAIL;
+ lock_sock(sk);
+
+ if (!sock_flag(sk, SOCK_ZAPPED))
+ goto out_release;
+
+ err = -EADDRNOTAVAIL;
+ dev = rose_dev_get(&addr->srose_addr);
+ if (!dev)
+ goto out_release;
source = &addr->srose_call;
@@ -730,7 +735,8 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
} else {
if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) {
dev_put(dev);
- return -EACCES;
+ err = -EACCES;
+ goto out_release;
}
rose->source_call = *source;
}
@@ -753,8 +759,10 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
rose_insert_socket(sk);
sock_reset_flag(sk, SOCK_ZAPPED);
-
- return 0;
+ err = 0;
+out_release:
+ release_sock(sk);
+ return err;
}
static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags)
@@ -919,8 +927,8 @@ out_release:
return err;
}
-static int rose_accept(struct socket *sock, struct socket *newsock, int flags,
- bool kern)
+static int rose_accept(struct socket *sock, struct socket *newsock,
+ struct proto_accept_arg *arg)
{
struct sk_buff *skb;
struct sock *newsk;
@@ -953,7 +961,7 @@ static int rose_accept(struct socket *sock, struct socket *newsock, int flags,
if (skb)
break;
- if (flags & O_NONBLOCK) {
+ if (arg->flags & O_NONBLOCK) {
err = -EWOULDBLOCK;
break;
}
diff --git a/net/rose/rose_timer.c b/net/rose/rose_timer.c
index f06ddbed3fed..1525773e94aa 100644
--- a/net/rose/rose_timer.c
+++ b/net/rose/rose_timer.c
@@ -122,6 +122,10 @@ static void rose_heartbeat_expiry(struct timer_list *t)
struct rose_sock *rose = rose_sk(sk);
bh_lock_sock(sk);
+ if (sock_owned_by_user(sk)) {
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ/20);
+ goto out;
+ }
switch (rose->state) {
case ROSE_STATE_0:
/* Magic here: If we listen() and a new link dies before it
@@ -152,6 +156,7 @@ static void rose_heartbeat_expiry(struct timer_list *t)
}
rose_start_heartbeat(sk);
+out:
bh_unlock_sock(sk);
sock_put(sk);
}
@@ -162,6 +167,10 @@ static void rose_timer_expiry(struct timer_list *t)
struct sock *sk = &rose->sock;
bh_lock_sock(sk);
+ if (sock_owned_by_user(sk)) {
+ sk_reset_timer(sk, &rose->timer, jiffies + HZ/20);
+ goto out;
+ }
switch (rose->state) {
case ROSE_STATE_1: /* T1 */
case ROSE_STATE_4: /* T2 */
@@ -182,6 +191,7 @@ static void rose_timer_expiry(struct timer_list *t)
}
break;
}
+out:
bh_unlock_sock(sk);
sock_put(sk);
}
@@ -192,6 +202,10 @@ static void rose_idletimer_expiry(struct timer_list *t)
struct sock *sk = &rose->sock;
bh_lock_sock(sk);
+ if (sock_owned_by_user(sk)) {
+ sk_reset_timer(sk, &rose->idletimer, jiffies + HZ/20);
+ goto out;
+ }
rose_clear_queues(sk);
rose_write_internal(sk, ROSE_CLEAR_REQUEST);
@@ -207,6 +221,7 @@ static void rose_idletimer_expiry(struct timer_list *t)
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
}
+out:
bh_unlock_sock(sk);
sock_put(sk);
}
diff --git a/net/rose/sysctl_net_rose.c b/net/rose/sysctl_net_rose.c
index d391d7758f52..d801315b7083 100644
--- a/net/rose/sysctl_net_rose.c
+++ b/net/rose/sysctl_net_rose.c
@@ -112,7 +112,6 @@ static struct ctl_table rose_table[] = {
.extra1 = &min_window,
.extra2 = &max_window
},
- { }
};
void __init rose_register_sysctl(void)
diff --git a/net/rxrpc/Makefile b/net/rxrpc/Makefile
index ac5caf5a48e1..210b75e3179e 100644
--- a/net/rxrpc/Makefile
+++ b/net/rxrpc/Makefile
@@ -16,6 +16,7 @@ rxrpc-y := \
conn_object.o \
conn_service.o \
input.o \
+ input_rack.o \
insecure.o \
io_thread.o \
key.o \
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 465bfe5eb061..86873399f7d5 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -65,7 +65,7 @@ static void rxrpc_write_space(struct sock *sk)
if (skwq_has_sleeper(wq))
wake_up_interruptible(&wq->wait);
- sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
+ sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
rcu_read_unlock();
}
@@ -408,9 +408,9 @@ void rxrpc_kernel_shutdown_call(struct socket *sock, struct rxrpc_call *call)
/* Make sure we're not going to call back into a kernel service */
if (call->notify_rx) {
- spin_lock(&call->notify_lock);
+ spin_lock_irq(&call->notify_lock);
call->notify_rx = rxrpc_dummy_notify_rx;
- spin_unlock(&call->notify_lock);
+ spin_unlock_irq(&call->notify_lock);
}
}
mutex_unlock(&call->user_mutex);
@@ -487,7 +487,7 @@ EXPORT_SYMBOL(rxrpc_kernel_new_call_notification);
* rxrpc_kernel_set_max_life - Set maximum lifespan on a call
* @sock: The socket the call is on
* @call: The call to configure
- * @hard_timeout: The maximum lifespan of the call in jiffies
+ * @hard_timeout: The maximum lifespan of the call in ms
*
* Set the maximum lifespan of a call. The call will end with ETIME or
* ETIMEDOUT if it takes longer than this.
@@ -495,14 +495,14 @@ EXPORT_SYMBOL(rxrpc_kernel_new_call_notification);
void rxrpc_kernel_set_max_life(struct socket *sock, struct rxrpc_call *call,
unsigned long hard_timeout)
{
- unsigned long now;
+ ktime_t delay = ms_to_ktime(hard_timeout), expect_term_by;
mutex_lock(&call->user_mutex);
- now = jiffies;
- hard_timeout += now;
- WRITE_ONCE(call->expect_term_by, hard_timeout);
- rxrpc_reduce_call_timer(call, hard_timeout, now, rxrpc_timer_set_for_hard);
+ expect_term_by = ktime_add(ktime_get_real(), delay);
+ WRITE_ONCE(call->expect_term_by, expect_term_by);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_hard);
+ rxrpc_poke_call(call, rxrpc_call_poke_set_timeout);
mutex_unlock(&call->user_mutex);
}
@@ -707,9 +707,10 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
ret = -EISCONN;
if (rx->sk.sk_state != RXRPC_UNBOUND)
goto error;
- ret = copy_from_sockptr(&min_sec_level, optval,
- sizeof(unsigned int));
- if (ret < 0)
+ ret = copy_safe_from_sockptr(&min_sec_level,
+ sizeof(min_sec_level),
+ optval, optlen);
+ if (ret)
goto error;
ret = -EINVAL;
if (min_sec_level > RXRPC_SECURITY_MAX)
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 7818aae1be8e..a64a0cab1bf7 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -30,6 +30,7 @@ struct rxrpc_crypt {
struct key_preparsed_payload;
struct rxrpc_connection;
struct rxrpc_txbuf;
+struct rxrpc_txqueue;
/*
* Mark applied to socket buffers in skb->mark. skb->priority is used
@@ -98,6 +99,7 @@ struct rxrpc_net {
atomic_t stat_tx_data_send;
atomic_t stat_tx_data_send_frag;
atomic_t stat_tx_data_send_fail;
+ atomic_t stat_tx_data_send_msgsize;
atomic_t stat_tx_data_underflow;
atomic_t stat_tx_data_cwnd_reset;
atomic_t stat_rx_data;
@@ -109,6 +111,8 @@ struct rxrpc_net {
atomic_t stat_tx_ack_skip;
atomic_t stat_tx_acks[256];
atomic_t stat_rx_acks[256];
+ atomic_t stat_tx_jumbo[10];
+ atomic_t stat_rx_jumbo[10];
atomic_t stat_why_req_ack[8];
@@ -198,8 +202,8 @@ struct rxrpc_host_header {
* - max 48 bytes (struct sk_buff::cb)
*/
struct rxrpc_skb_priv {
- struct rxrpc_connection *conn; /* Connection referred to (poke packet) */
union {
+ struct rxrpc_connection *conn; /* Connection referred to (poke packet) */
struct {
u16 offset; /* Offset of data */
u16 len; /* Length of data */
@@ -208,9 +212,11 @@ struct rxrpc_skb_priv {
};
struct {
rxrpc_seq_t first_ack; /* First packet in acks table */
- u8 nr_acks; /* Number of acks+nacks */
- u8 nr_nacks; /* Number of nacks */
- };
+ rxrpc_seq_t prev_ack; /* Highest seq seen */
+ rxrpc_serial_t acked_serial; /* Packet in response to (or 0) */
+ u16 nr_acks; /* Number of acks+nacks */
+ u8 reason; /* Reason for ack */
+ } ack;
};
struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */
};
@@ -248,10 +254,9 @@ struct rxrpc_security {
struct rxrpc_key_token *);
/* Work out how much data we can store in a packet, given an estimate
- * of the amount of data remaining.
+ * of the amount of data remaining and allocate a data buffer.
*/
- int (*how_much_data)(struct rxrpc_call *, size_t,
- size_t *, size_t *, size_t *);
+ struct rxrpc_txbuf *(*alloc_txbuf)(struct rxrpc_call *call, size_t remaining, gfp_t gfp);
/* impose security on a packet */
int (*secure_packet)(struct rxrpc_call *, struct rxrpc_txbuf *);
@@ -292,6 +297,7 @@ struct rxrpc_local {
struct socket *socket; /* my UDP socket */
struct task_struct *io_thread;
struct completion io_thread_ready; /* Indication that the I/O thread started */
+ struct page_frag_cache tx_alloc; /* Tx control packet allocation (I/O thread only) */
struct rxrpc_sock *service; /* Service(s) listening on this endpoint */
#ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
struct sk_buff_head rx_delay_queue; /* Delay injection queue */
@@ -317,6 +323,12 @@ struct rxrpc_local {
struct list_head new_client_calls; /* Newly created client calls need connection */
spinlock_t client_call_lock; /* Lock for ->new_client_calls */
struct sockaddr_rxrpc srx; /* local address */
+ /* Provide a kvec table sufficiently large to manage either a DATA
+ * packet with a maximum set of jumbo subpackets or a PING ACK padded
+ * out to 64K with zeropages for PMTUD.
+ */
+ struct kvec kvec[1 + RXRPC_MAX_NR_JUMBO > 3 + 16 ?
+ 1 + RXRPC_MAX_NR_JUMBO : 3 + 16];
};
/*
@@ -335,25 +347,27 @@ struct rxrpc_peer {
time64_t last_tx_at; /* Last time packet sent here */
seqlock_t service_conn_lock;
spinlock_t lock; /* access lock */
- unsigned int if_mtu; /* interface MTU for this peer */
- unsigned int mtu; /* network MTU for this peer */
- unsigned int maxdata; /* data size (MTU - hdrsize) */
- unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
int debug_id; /* debug ID for printks */
struct sockaddr_rxrpc srx; /* remote address */
- /* calculated RTT cache */
-#define RXRPC_RTT_CACHE_SIZE 32
- spinlock_t rtt_input_lock; /* RTT lock for input routine */
- ktime_t rtt_last_req; /* Time of last RTT request */
- unsigned int rtt_count; /* Number of samples we've got */
+ /* Path MTU discovery [RFC8899] */
+ unsigned int pmtud_trial; /* Current MTU probe size */
+ unsigned int pmtud_good; /* Largest working MTU probe we've tried */
+ unsigned int pmtud_bad; /* Smallest non-working MTU probe we've tried */
+ bool pmtud_lost; /* T if MTU probe was lost */
+ bool pmtud_probing; /* T if we have an active probe outstanding */
+ bool pmtud_pending; /* T if a call to this peer should send a probe */
+ u8 pmtud_jumbo; /* Max jumbo packets for the MTU */
+ bool ackr_adv_pmtud; /* T if the peer advertises path-MTU */
+ unsigned int ackr_max_data; /* Maximum data advertised by peer */
+ unsigned int if_mtu; /* Local interface MTU (- hdrsize) for this peer */
+ unsigned int max_data; /* Maximum packet data capacity for this peer */
+ unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
+ unsigned short tx_seg_max; /* Maximum number of transmissable segments */
- u32 srtt_us; /* smoothed round trip time << 3 in usecs */
- u32 mdev_us; /* medium deviation */
- u32 mdev_max_us; /* maximal mdev for the last rtt period */
- u32 rttvar_us; /* smoothed mdev_max */
- u32 rto_j; /* Retransmission timeout in jiffies */
- u8 backoff; /* Backoff timeout */
+ /* Calculated RTT cache */
+ unsigned int recent_srtt_us;
+ unsigned int recent_rto_us;
u8 cong_ssthresh; /* Congestion slow-start threshold */
};
@@ -500,6 +514,8 @@ struct rxrpc_connection {
struct list_head proc_link; /* link in procfs list */
struct list_head link; /* link in master connection list */
struct sk_buff_head rx_queue; /* received conn-level packets */
+ struct page_frag_cache tx_data_alloc; /* Tx DATA packet allocation */
+ struct mutex tx_data_alloc_lock;
struct mutex security_lock; /* Lock for security management */
const struct rxrpc_security *security; /* applied security module */
@@ -520,6 +536,8 @@ struct rxrpc_connection {
int debug_id; /* debug ID for printks */
rxrpc_serial_t tx_serial; /* Outgoing packet serial number counter */
unsigned int hi_serial; /* highest serial number received */
+ rxrpc_serial_t pmtud_probe; /* Serial of MTU probe (or 0) */
+ unsigned int pmtud_call; /* ID of call used for probe */
u32 service_id; /* Service ID, possibly upgraded */
u32 security_level; /* Security level selected */
u8 security_ix; /* security type */
@@ -552,6 +570,7 @@ enum rxrpc_call_flag {
RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */
RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */
RXRPC_CALL_TX_ALL_ACKED, /* Last packet has been hard-acked */
+ RXRPC_CALL_TX_NO_MORE, /* No more data to transmit (MSG_MORE deasserted) */
RXRPC_CALL_SEND_PING, /* A ping will need to be sent */
RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */
RXRPC_CALL_BEGAN_RX_TIMER, /* We began the expect_rx_by timer */
@@ -562,6 +581,7 @@ enum rxrpc_call_flag {
RXRPC_CALL_EXCLUSIVE, /* The call uses a once-only connection */
RXRPC_CALL_RX_IS_IDLE, /* recvmsg() is idle - send an ACK */
RXRPC_CALL_RECVMSG_READ_ALL, /* recvmsg() read all of the received data */
+ RXRPC_CALL_CONN_CHALLENGING, /* The connection is being challenged */
};
/*
@@ -582,7 +602,6 @@ enum rxrpc_call_state {
RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */
RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */
RXRPC_CALL_SERVER_PREALLOC, /* - service preallocation */
- RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */
RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */
RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */
RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */
@@ -594,13 +613,25 @@ enum rxrpc_call_state {
/*
* Call Tx congestion management modes.
*/
-enum rxrpc_congest_mode {
- RXRPC_CALL_SLOW_START,
- RXRPC_CALL_CONGEST_AVOIDANCE,
- RXRPC_CALL_PACKET_LOSS,
- RXRPC_CALL_FAST_RETRANSMIT,
- NR__RXRPC_CONGEST_MODES
-};
+enum rxrpc_ca_state {
+ RXRPC_CA_SLOW_START,
+ RXRPC_CA_CONGEST_AVOIDANCE,
+ RXRPC_CA_PACKET_LOSS,
+ RXRPC_CA_FAST_RETRANSMIT,
+ NR__RXRPC_CA_STATES
+} __mode(byte);
+
+/*
+ * Current purpose of call RACK timer. According to the RACK-TLP protocol
+ * [RFC8985], the transmission timer (call->rack_timo_at) may only be used for
+ * one of these at once.
+ */
+enum rxrpc_rack_timer_mode {
+ RXRPC_CALL_RACKTIMER_OFF, /* Timer not running */
+ RXRPC_CALL_RACKTIMER_RACK_REORDER, /* RACK reordering timer */
+ RXRPC_CALL_RACKTIMER_TLP_PTO, /* TLP timeout */
+ RXRPC_CALL_RACKTIMER_RTO, /* Retransmission timeout */
+} __mode(byte);
/*
* RxRPC call definition
@@ -618,17 +649,16 @@ struct rxrpc_call {
const struct rxrpc_security *security; /* applied security module */
struct mutex user_mutex; /* User access mutex */
struct sockaddr_rxrpc dest_srx; /* Destination address */
- unsigned long delay_ack_at; /* When DELAY ACK needs to happen */
- unsigned long ack_lost_at; /* When ACK is figured as lost */
- unsigned long resend_at; /* When next resend needs to happen */
- unsigned long ping_at; /* When next to send a ping */
- unsigned long keepalive_at; /* When next to send a keepalive ping */
- unsigned long expect_rx_by; /* When we expect to get a packet by */
- unsigned long expect_req_by; /* When we expect to get a request DATA packet by */
- unsigned long expect_term_by; /* When we expect call termination by */
- u32 next_rx_timo; /* Timeout for next Rx packet (jif) */
- u32 next_req_timo; /* Timeout for next Rx request packet (jif) */
- u32 hard_timo; /* Maximum lifetime or 0 (jif) */
+ ktime_t delay_ack_at; /* When DELAY ACK needs to happen */
+ ktime_t rack_timo_at; /* When ACK is figured as lost */
+ ktime_t ping_at; /* When next to send a ping */
+ ktime_t keepalive_at; /* When next to send a keepalive ping */
+ ktime_t expect_rx_by; /* When we expect to get a packet by */
+ ktime_t expect_req_by; /* When we expect to get a request DATA packet by */
+ ktime_t expect_term_by; /* When we expect call termination by */
+ u32 next_rx_timo; /* Timeout for next Rx packet (ms) */
+ u32 next_req_timo; /* Timeout for next Rx request packet (ms) */
+ u32 hard_timo; /* Maximum lifetime or 0 (s) */
struct timer_list timer; /* Combined event timer */
struct work_struct destroyer; /* In-process-context destroyer */
rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
@@ -665,21 +695,30 @@ struct rxrpc_call {
unsigned short rx_pkt_offset; /* Current recvmsg packet offset */
unsigned short rx_pkt_len; /* Current recvmsg packet len */
+ /* Sendmsg data tracking. */
+ rxrpc_seq_t send_top; /* Highest Tx slot filled by sendmsg. */
+ struct rxrpc_txqueue *send_queue; /* Queue that sendmsg is writing into */
+
/* Transmitted data tracking. */
- spinlock_t tx_lock; /* Transmit queue lock */
- struct list_head tx_sendmsg; /* Sendmsg prepared packets */
- struct list_head tx_buffer; /* Buffer of transmissible packets */
+ struct rxrpc_txqueue *tx_queue; /* Start of transmission buffers */
+ struct rxrpc_txqueue *tx_qtail; /* End of transmission buffers */
+ rxrpc_seq_t tx_qbase; /* First slot in tx_queue */
rxrpc_seq_t tx_bottom; /* First packet in buffer */
rxrpc_seq_t tx_transmitted; /* Highest packet transmitted */
- rxrpc_seq_t tx_prepared; /* Highest Tx slot prepared. */
rxrpc_seq_t tx_top; /* Highest Tx slot allocated. */
- u16 tx_backoff; /* Delay to insert due to Tx failure */
- u8 tx_winsize; /* Maximum size of Tx window */
+ rxrpc_serial_t tx_last_serial; /* Serial of last DATA transmitted */
+ u16 tx_backoff; /* Delay to insert due to Tx failure (ms) */
+ u16 tx_nr_sent; /* Number of packets sent, but unacked */
+ u16 tx_nr_lost; /* Number of packets marked lost */
+ u16 tx_nr_resent; /* Number of packets resent, but unacked */
+ u16 tx_winsize; /* Maximum size of Tx window */
#define RXRPC_TX_MAX_WINDOW 128
+ u8 tx_jumbo_max; /* Maximum subpkts peer will accept */
ktime_t tx_last_sent; /* Last time a transmission occurred */
/* Received data tracking */
struct sk_buff_head recvmsg_queue; /* Queue of packets ready for recvmsg() */
+ struct sk_buff_head rx_queue; /* Queue of packets for this call to receive */
struct sk_buff_head rx_oos_queue; /* Queue of out of sequence packets */
rxrpc_seq_t rx_highest_seq; /* Higest sequence number received */
@@ -692,15 +731,33 @@ struct rxrpc_call {
* packets) rather than bytes.
*/
#define RXRPC_TX_SMSS RXRPC_JUMBO_DATALEN
-#define RXRPC_MIN_CWND (RXRPC_TX_SMSS > 2190 ? 2 : RXRPC_TX_SMSS > 1095 ? 3 : 4)
- u8 cong_cwnd; /* Congestion window size */
+#define RXRPC_MIN_CWND 4
+ enum rxrpc_ca_state cong_ca_state; /* Congestion control state */
u8 cong_extra; /* Extra to send for congestion management */
- u8 cong_ssthresh; /* Slow-start threshold */
- enum rxrpc_congest_mode cong_mode:8; /* Congestion management mode */
- u8 cong_dup_acks; /* Count of ACKs showing missing packets */
- u8 cong_cumul_acks; /* Cumulative ACK count */
+ u16 cong_cwnd; /* Congestion window size */
+ u16 cong_ssthresh; /* Slow-start threshold */
+ u16 cong_dup_acks; /* Count of ACKs showing missing packets */
+ u16 cong_cumul_acks; /* Cumulative ACK count */
ktime_t cong_tstamp; /* Last time cwnd was changed */
- struct sk_buff *cong_last_nack; /* Last ACK with nacks received */
+
+ /* RACK-TLP [RFC8985] state. */
+ ktime_t rack_xmit_ts; /* Latest transmission timestamp */
+ ktime_t rack_rtt; /* RTT of most recently ACK'd segment */
+ ktime_t rack_rtt_ts; /* Timestamp of rack_rtt */
+ ktime_t rack_reo_wnd; /* Reordering window */
+ unsigned int rack_reo_wnd_mult; /* Multiplier applied to rack_reo_wnd */
+ int rack_reo_wnd_persist; /* Num loss recoveries before reset reo_wnd */
+ rxrpc_seq_t rack_fack; /* Highest sequence so far ACK'd */
+ rxrpc_seq_t rack_end_seq; /* Highest sequence seen */
+ rxrpc_seq_t rack_dsack_round; /* DSACK opt recv'd in latest roundtrip */
+ bool rack_dsack_round_none; /* T if dsack_round is "None" */
+ bool rack_reordering_seen; /* T if detected reordering event */
+ enum rxrpc_rack_timer_mode rack_timer_mode; /* Current mode of RACK timer */
+ bool tlp_is_retrans; /* T if unacked TLP retransmission */
+ rxrpc_serial_t tlp_serial; /* Serial of TLP probe (or 0 if none in progress) */
+ rxrpc_seq_t tlp_seq; /* Sequence of TLP probe */
+ unsigned int tlp_rtt_taken; /* Last time RTT taken */
+ ktime_t tlp_max_ack_delay; /* Sender budget for max delayed ACK interval */
/* Receive-phase ACK management (ACKs we send). */
u8 ackr_reason; /* reason to ACK */
@@ -725,32 +782,45 @@ struct rxrpc_call {
/* Transmission-phase ACK management (ACKs we've received). */
ktime_t acks_latest_ts; /* Timestamp of latest ACK received */
- rxrpc_seq_t acks_first_seq; /* first sequence number received */
+ rxrpc_seq_t acks_hard_ack; /* Highest sequence hard acked */
rxrpc_seq_t acks_prev_seq; /* Highest previousPacket received */
- rxrpc_seq_t acks_hard_ack; /* Latest hard-ack point */
rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */
rxrpc_serial_t acks_highest_serial; /* Highest serial number ACK'd */
+ unsigned short acks_nr_sacks; /* Number of soft acks recorded */
+ unsigned short acks_nr_snacks; /* Number of soft nacks recorded */
+
+ /* Calculated RTT cache */
+ ktime_t rtt_last_req; /* Time of last RTT request */
+ unsigned int rtt_count; /* Number of samples we've got */
+ unsigned int rtt_taken; /* Number of samples taken (wrapping) */
+ struct minmax min_rtt; /* Estimated minimum RTT */
+ u32 srtt_us; /* smoothed round trip time << 3 in usecs */
+ u32 mdev_us; /* medium deviation */
+ u32 mdev_max_us; /* maximal mdev for the last rtt period */
+ u32 rttvar_us; /* smoothed mdev_max */
+ u32 rto_us; /* Retransmission timeout in usec */
+ u8 backoff; /* Backoff timeout (as shift) */
};
/*
* Summary of a new ACK and the changes it made to the Tx buffer packet states.
*/
struct rxrpc_ack_summary {
- u16 nr_acks; /* Number of ACKs in packet */
- u16 nr_new_acks; /* Number of new ACKs in packet */
- u16 nr_new_nacks; /* Number of new nacks in packet */
- u16 nr_retained_nacks; /* Number of nacks retained between ACKs */
- u8 ack_reason;
- bool saw_nacks; /* Saw NACKs in packet */
- bool new_low_nack; /* T if new low NACK found */
- bool retrans_timeo; /* T if reTx due to timeout happened */
- u8 flight_size; /* Number of unreceived transmissions */
- /* Place to stash values for tracing */
- enum rxrpc_congest_mode mode:8;
- u8 cwnd;
- u8 ssthresh;
- u8 dup_acks;
- u8 cumulative_acks;
+ rxrpc_serial_t ack_serial; /* Serial number of ACK */
+ rxrpc_serial_t acked_serial; /* Serial number ACK'd */
+ u16 in_flight; /* Number of unreceived transmissions */
+ u16 nr_new_hacks; /* Number of rotated new ACKs */
+ u16 nr_new_sacks; /* Number of new soft ACKs in packet */
+ u16 nr_new_snacks; /* Number of new soft nacks in packet */
+ u8 ack_reason;
+ bool new_low_snack:1; /* T if new low soft NACK found */
+ bool retrans_timeo:1; /* T if reTx due to timeout happened */
+ bool need_retransmit:1; /* T if we need transmission */
+ bool rtt_sample_avail:1; /* T if RTT sample available */
+ bool in_fast_or_rto_recovery:1;
+ bool exiting_fast_or_rto_recovery:1;
+ bool tlp_probe_acked:1; /* T if the TLP probe seq was acked */
+ u8 /*enum rxrpc_congest_change*/ change;
};
/*
@@ -788,40 +858,27 @@ struct rxrpc_send_params {
* Buffer of data to be output as a packet.
*/
struct rxrpc_txbuf {
- struct rcu_head rcu;
- struct list_head call_link; /* Link in call->tx_sendmsg/tx_buffer */
- struct list_head tx_link; /* Link in live Enc queue or Tx queue */
- ktime_t last_sent; /* Time at which last transmitted */
refcount_t ref;
rxrpc_seq_t seq; /* Sequence number of this packet */
+ rxrpc_serial_t serial; /* Last serial number transmitted with */
unsigned int call_debug_id;
unsigned int debug_id;
- unsigned int len; /* Amount of data in buffer */
- unsigned int space; /* Remaining data space */
- unsigned int offset; /* Offset of fill point */
- unsigned long flags;
-#define RXRPC_TXBUF_LAST 0 /* Set if last packet in Tx phase */
-#define RXRPC_TXBUF_RESENT 1 /* Set if has been resent */
- u8 /*enum rxrpc_propose_ack_trace*/ ack_why; /* If ack, why */
- struct {
- /* The packet for encrypting and DMA'ing. We align it such
- * that data[] aligns correctly for any crypto blocksize.
- */
- u8 pad[64 - sizeof(struct rxrpc_wire_header)];
- struct rxrpc_wire_header wire; /* Network-ready header */
- union {
- u8 data[RXRPC_JUMBO_DATALEN]; /* Data packet */
- struct {
- struct rxrpc_ackpacket ack;
- DECLARE_FLEX_ARRAY(u8, acks);
- };
- };
- } __aligned(64);
+ unsigned short len; /* Amount of data in buffer */
+ unsigned short space; /* Remaining data space */
+ unsigned short offset; /* Offset of fill point */
+ unsigned short pkt_len; /* Size of packet content */
+ unsigned short alloc_size; /* Amount of bufferage allocated */
+ unsigned int flags;
+#define RXRPC_TXBUF_WIRE_FLAGS 0xff /* The wire protocol flags */
+#define RXRPC_TXBUF_RESENT 0x100 /* Set if has been resent */
+ __be16 cksum; /* Checksum to go in header */
+ bool jumboable; /* Can be non-terminal jumbo subpacket */
+ void *data; /* Data with preceding jumbo header */
};
static inline bool rxrpc_sending_to_server(const struct rxrpc_txbuf *txb)
{
- return txb->wire.flags & RXRPC_CLIENT_INITIATED;
+ return txb->flags & RXRPC_CLIENT_INITIATED;
}
static inline bool rxrpc_sending_to_client(const struct rxrpc_txbuf *txb)
@@ -829,6 +886,46 @@ static inline bool rxrpc_sending_to_client(const struct rxrpc_txbuf *txb)
return !rxrpc_sending_to_server(txb);
}
+/*
+ * Transmit queue element, including RACK [RFC8985] per-segment metadata. The
+ * transmission timestamp is in usec from the base.
+ */
+struct rxrpc_txqueue {
+ /* Start with the members we want to prefetch. */
+ struct rxrpc_txqueue *next;
+ ktime_t xmit_ts_base;
+ rxrpc_seq_t qbase;
+ u8 nr_reported_acks; /* Number of segments explicitly acked/nacked */
+ unsigned long segment_acked; /* Bit-per-buf: Set if ACK'd */
+ unsigned long segment_lost; /* Bit-per-buf: Set if declared lost */
+ unsigned long segment_retransmitted; /* Bit-per-buf: Set if retransmitted */
+ unsigned long rtt_samples; /* Bit-per-buf: Set if available for RTT */
+ unsigned long ever_retransmitted; /* Bit-per-buf: Set if ever retransmitted */
+
+ /* The arrays we want to pack into as few cache lines as possible. */
+ struct {
+#define RXRPC_NR_TXQUEUE BITS_PER_LONG
+#define RXRPC_TXQ_MASK (RXRPC_NR_TXQUEUE - 1)
+ struct rxrpc_txbuf *bufs[RXRPC_NR_TXQUEUE];
+ unsigned int segment_serial[RXRPC_NR_TXQUEUE];
+ unsigned int segment_xmit_ts[RXRPC_NR_TXQUEUE];
+ } ____cacheline_aligned;
+};
+
+/*
+ * Data transmission request.
+ */
+struct rxrpc_send_data_req {
+ ktime_t now; /* Current time */
+ struct rxrpc_txqueue *tq; /* Tx queue segment holding first DATA */
+ rxrpc_seq_t seq; /* Sequence of first data */
+ int n; /* Number of DATA packets to glue into jumbo */
+ bool retrans; /* T if this is a retransmission */
+ bool did_send; /* T if did actually send */
+ bool tlp_probe; /* T if this is a TLP probe */
+ int /* enum rxrpc_txdata_trace */ trace;
+};
+
#include <trace/events/rxrpc.h>
/*
@@ -846,6 +943,21 @@ static inline rxrpc_serial_t rxrpc_get_next_serial(struct rxrpc_connection *conn
}
/*
+ * Allocate the next serial n numbers on a connection. 0 must be skipped.
+ */
+static inline rxrpc_serial_t rxrpc_get_next_serials(struct rxrpc_connection *conn,
+ unsigned int n)
+{
+ rxrpc_serial_t serial;
+
+ serial = conn->tx_serial;
+ if (serial + n <= n)
+ serial = 1;
+ conn->tx_serial = serial + n;
+ return serial;
+}
+
+/*
* af_rxrpc.c
*/
extern atomic_t rxrpc_n_rx_skbs;
@@ -861,7 +973,6 @@ bool rxrpc_new_incoming_call(struct rxrpc_local *local,
struct rxrpc_connection *conn,
struct sockaddr_rxrpc *peer_srx,
struct sk_buff *skb);
-void rxrpc_accept_incoming_calls(struct rxrpc_local *);
int rxrpc_user_charge_accept(struct rxrpc_sock *, unsigned long);
/*
@@ -869,18 +980,12 @@ int rxrpc_user_charge_accept(struct rxrpc_sock *, unsigned long);
*/
void rxrpc_propose_ping(struct rxrpc_call *call, u32 serial,
enum rxrpc_propose_ack_trace why);
-void rxrpc_send_ACK(struct rxrpc_call *, u8, rxrpc_serial_t, enum rxrpc_propose_ack_trace);
void rxrpc_propose_delay_ACK(struct rxrpc_call *, rxrpc_serial_t,
enum rxrpc_propose_ack_trace);
-void rxrpc_shrink_call_tx_buffer(struct rxrpc_call *);
-void rxrpc_resend(struct rxrpc_call *call, struct sk_buff *ack_skb);
-
-void rxrpc_reduce_call_timer(struct rxrpc_call *call,
- unsigned long expire_at,
- unsigned long now,
- enum rxrpc_timer_trace why);
-
-bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb);
+void rxrpc_resend_tlp(struct rxrpc_call *call);
+void rxrpc_transmit_some_data(struct rxrpc_call *call, unsigned int limit,
+ enum rxrpc_txdata_trace trace);
+bool rxrpc_input_call_event(struct rxrpc_call *call);
/*
* call_object.c
@@ -980,7 +1085,6 @@ void rxrpc_connect_client_calls(struct rxrpc_local *local);
void rxrpc_expose_client_call(struct rxrpc_call *);
void rxrpc_disconnect_client_call(struct rxrpc_bundle *, struct rxrpc_call *);
void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle);
-void rxrpc_put_client_conn(struct rxrpc_connection *, enum rxrpc_conn_trace);
void rxrpc_discard_expired_client_conns(struct rxrpc_local *local);
void rxrpc_clean_up_local_conns(struct rxrpc_local *);
@@ -1060,6 +1164,32 @@ void rxrpc_input_call_packet(struct rxrpc_call *, struct sk_buff *);
void rxrpc_implicit_end_call(struct rxrpc_call *, struct sk_buff *);
/*
+ * input_rack.c
+ */
+void rxrpc_input_rack_one(struct rxrpc_call *call,
+ struct rxrpc_ack_summary *summary,
+ struct rxrpc_txqueue *tq,
+ unsigned int ix);
+void rxrpc_input_rack(struct rxrpc_call *call,
+ struct rxrpc_ack_summary *summary,
+ struct rxrpc_txqueue *tq,
+ unsigned long new_acks);
+void rxrpc_rack_detect_loss_and_arm_timer(struct rxrpc_call *call,
+ struct rxrpc_ack_summary *summary);
+ktime_t rxrpc_tlp_calc_pto(struct rxrpc_call *call, ktime_t now);
+void rxrpc_tlp_send_probe(struct rxrpc_call *call);
+void rxrpc_tlp_process_ack(struct rxrpc_call *call, struct rxrpc_ack_summary *summary);
+void rxrpc_rack_timer_expired(struct rxrpc_call *call, ktime_t overran_by);
+
+/* Initialise TLP state [RFC8958 7.1]. */
+static inline void rxrpc_tlp_init(struct rxrpc_call *call)
+{
+ call->tlp_serial = 0;
+ call->tlp_seq = call->acks_hard_ack;
+ call->tlp_is_retrans = false;
+}
+
+/*
* io_thread.c
*/
int rxrpc_encap_rcv(struct sock *, struct sk_buff *);
@@ -1069,7 +1199,7 @@ bool rxrpc_direct_abort(struct sk_buff *skb, enum rxrpc_abort_reason why,
int rxrpc_io_thread(void *data);
static inline void rxrpc_wake_up_io_thread(struct rxrpc_local *local)
{
- wake_up_process(local->io_thread);
+ wake_up_process(READ_ONCE(local->io_thread));
}
static inline bool rxrpc_protocol_error(struct sk_buff *skb, enum rxrpc_abort_reason why)
@@ -1160,19 +1290,22 @@ static inline struct rxrpc_net *rxrpc_net(struct net *net)
/*
* output.c
*/
-int rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb);
+void rxrpc_send_ACK(struct rxrpc_call *call, u8 ack_reason,
+ rxrpc_serial_t serial, enum rxrpc_propose_ack_trace why);
+void rxrpc_send_probe_for_pmtud(struct rxrpc_call *call);
int rxrpc_send_abort_packet(struct rxrpc_call *);
-int rxrpc_send_data_packet(struct rxrpc_call *, struct rxrpc_txbuf *);
+void rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_send_data_req *req);
void rxrpc_send_conn_abort(struct rxrpc_connection *conn);
void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb);
void rxrpc_send_keepalive(struct rxrpc_peer *);
-void rxrpc_transmit_one(struct rxrpc_call *call, struct rxrpc_txbuf *txb);
/*
* peer_event.c
*/
void rxrpc_input_error(struct rxrpc_local *, struct sk_buff *);
void rxrpc_peer_keepalive_worker(struct work_struct *);
+void rxrpc_input_probe_for_pmtud(struct rxrpc_connection *conn, rxrpc_serial_t acked_serial,
+ bool sendmsg_fail);
/*
* peer_object.c
@@ -1221,10 +1354,12 @@ static inline int rxrpc_abort_eproto(struct rxrpc_call *call,
/*
* rtt.c
*/
-void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, int,
- rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
-unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *, bool);
-void rxrpc_peer_init_rtt(struct rxrpc_peer *);
+void rxrpc_call_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
+ int rtt_slot,
+ rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
+ ktime_t send_time, ktime_t resp_time);
+ktime_t rxrpc_get_rto_backoff(struct rxrpc_call *call, bool retrans);
+void rxrpc_call_init_rtt(struct rxrpc_call *call);
/*
* rxkad.c
@@ -1295,8 +1430,8 @@ static inline void rxrpc_sysctl_exit(void) {}
* txbuf.c
*/
extern atomic_t rxrpc_nr_txbuf;
-struct rxrpc_txbuf *rxrpc_alloc_txbuf(struct rxrpc_call *call, u8 packet_type,
- gfp_t gfp);
+struct rxrpc_txbuf *rxrpc_alloc_data_txbuf(struct rxrpc_call *call, size_t data_size,
+ size_t data_align, gfp_t gfp);
void rxrpc_get_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what);
void rxrpc_see_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what);
void rxrpc_put_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what);
@@ -1323,6 +1458,53 @@ static inline bool after_eq(u32 seq1, u32 seq2)
return (s32)(seq1 - seq2) >= 0;
}
+static inline u32 earliest(u32 seq1, u32 seq2)
+{
+ return before(seq1, seq2) ? seq1 : seq2;
+}
+
+static inline u32 latest(u32 seq1, u32 seq2)
+{
+ return after(seq1, seq2) ? seq1 : seq2;
+}
+
+static inline bool rxrpc_seq_in_txq(const struct rxrpc_txqueue *tq, rxrpc_seq_t seq)
+{
+ return (seq & (RXRPC_NR_TXQUEUE - 1)) == tq->qbase;
+}
+
+static inline void rxrpc_queue_rx_call_packet(struct rxrpc_call *call, struct sk_buff *skb)
+{
+ rxrpc_get_skb(skb, rxrpc_skb_get_call_rx);
+ __skb_queue_tail(&call->rx_queue, skb);
+ rxrpc_poke_call(call, rxrpc_call_poke_rx_packet);
+}
+
+/*
+ * Calculate how much space there is for transmitting more DATA packets.
+ */
+static inline unsigned int rxrpc_tx_window_space(const struct rxrpc_call *call)
+{
+ int winsize = umin(call->tx_winsize, call->cong_cwnd + call->cong_extra);
+ int transmitted = call->tx_top - call->tx_bottom;
+
+ return max(winsize - transmitted, 0);
+}
+
+static inline unsigned int rxrpc_left_out(const struct rxrpc_call *call)
+{
+ return call->acks_nr_sacks + call->tx_nr_lost;
+}
+
+/*
+ * Calculate the number of transmitted DATA packets assumed to be in flight
+ * [approx RFC6675].
+ */
+static inline unsigned int rxrpc_tx_in_flight(const struct rxrpc_call *call)
+{
+ return call->tx_nr_sent - rxrpc_left_out(call) + call->tx_nr_resent;
+}
+
/*
* debug tracing
*/
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 0f5a1d77b890..e685034ce4f7 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -188,8 +188,8 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
/* Make sure that there aren't any incoming calls in progress before we
* clear the preallocation buffers.
*/
- spin_lock(&rx->incoming_lock);
- spin_unlock(&rx->incoming_lock);
+ spin_lock_irq(&rx->incoming_lock);
+ spin_unlock_irq(&rx->incoming_lock);
head = b->peer_backlog_head;
tail = b->peer_backlog_tail;
@@ -343,7 +343,7 @@ bool rxrpc_new_incoming_call(struct rxrpc_local *local,
if (sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
return rxrpc_protocol_error(skb, rxrpc_eproto_no_service_call);
- read_lock(&local->services_lock);
+ read_lock_irq(&local->services_lock);
/* Weed out packets to services we're not offering. Packets that would
* begin a call are explicitly rejected and the rest are just
@@ -399,34 +399,34 @@ bool rxrpc_new_incoming_call(struct rxrpc_local *local,
spin_unlock(&conn->state_lock);
spin_unlock(&rx->incoming_lock);
- read_unlock(&local->services_lock);
+ read_unlock_irq(&local->services_lock);
if (hlist_unhashed(&call->error_link)) {
- spin_lock(&call->peer->lock);
+ spin_lock_irq(&call->peer->lock);
hlist_add_head(&call->error_link, &call->peer->error_targets);
- spin_unlock(&call->peer->lock);
+ spin_unlock_irq(&call->peer->lock);
}
_leave(" = %p{%d}", call, call->debug_id);
- rxrpc_input_call_event(call, skb);
+ rxrpc_queue_rx_call_packet(call, skb);
rxrpc_put_call(call, rxrpc_call_put_input);
return true;
unsupported_service:
- read_unlock(&local->services_lock);
+ read_unlock_irq(&local->services_lock);
return rxrpc_direct_abort(skb, rxrpc_abort_service_not_offered,
RX_INVALID_OPERATION, -EOPNOTSUPP);
unsupported_security:
- read_unlock(&local->services_lock);
+ read_unlock_irq(&local->services_lock);
return rxrpc_direct_abort(skb, rxrpc_abort_service_not_offered,
RX_INVALID_OPERATION, -EKEYREJECTED);
no_call:
spin_unlock(&rx->incoming_lock);
- read_unlock(&local->services_lock);
+ read_unlock_irq(&local->services_lock);
_leave(" = f [%u]", skb->mark);
return false;
discard:
- read_unlock(&local->services_lock);
+ read_unlock_irq(&local->services_lock);
return true;
}
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index 0f78544d043b..8e477f7f8850 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -23,14 +23,14 @@
void rxrpc_propose_ping(struct rxrpc_call *call, u32 serial,
enum rxrpc_propose_ack_trace why)
{
- unsigned long now = jiffies;
- unsigned long ping_at = now + rxrpc_idle_ack_delay;
-
- if (time_before(ping_at, call->ping_at)) {
- WRITE_ONCE(call->ping_at, ping_at);
- rxrpc_reduce_call_timer(call, ping_at, now,
- rxrpc_timer_set_for_ping);
- trace_rxrpc_propose_ack(call, why, RXRPC_ACK_PING, serial);
+ ktime_t delay = ms_to_ktime(READ_ONCE(rxrpc_idle_ack_delay));
+ ktime_t now = ktime_get_real();
+ ktime_t ping_at = ktime_add(now, delay);
+
+ trace_rxrpc_propose_ack(call, why, RXRPC_ACK_PING, serial);
+ if (ktime_before(ping_at, call->ping_at)) {
+ call->ping_at = ping_at;
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_ping);
}
}
@@ -40,215 +40,119 @@ void rxrpc_propose_ping(struct rxrpc_call *call, u32 serial,
void rxrpc_propose_delay_ACK(struct rxrpc_call *call, rxrpc_serial_t serial,
enum rxrpc_propose_ack_trace why)
{
- unsigned long expiry = rxrpc_soft_ack_delay;
- unsigned long now = jiffies, ack_at;
+ ktime_t now = ktime_get_real(), delay;
- if (rxrpc_soft_ack_delay < expiry)
- expiry = rxrpc_soft_ack_delay;
- if (call->peer->srtt_us != 0)
- ack_at = usecs_to_jiffies(call->peer->srtt_us >> 3);
+ trace_rxrpc_propose_ack(call, why, RXRPC_ACK_DELAY, serial);
+
+ if (call->srtt_us)
+ delay = (call->srtt_us >> 3) * NSEC_PER_USEC;
else
- ack_at = expiry;
-
- ack_at += READ_ONCE(call->tx_backoff);
- ack_at += now;
- if (time_before(ack_at, call->delay_ack_at)) {
- WRITE_ONCE(call->delay_ack_at, ack_at);
- rxrpc_reduce_call_timer(call, ack_at, now,
- rxrpc_timer_set_for_ack);
- }
+ delay = ms_to_ktime(READ_ONCE(rxrpc_soft_ack_delay));
+ ktime_add_ms(delay, call->tx_backoff);
- trace_rxrpc_propose_ack(call, why, RXRPC_ACK_DELAY, serial);
+ call->delay_ack_at = ktime_add(now, delay);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_delayed_ack);
}
/*
- * Queue an ACK for immediate transmission.
+ * Retransmit one or more packets.
*/
-void rxrpc_send_ACK(struct rxrpc_call *call, u8 ack_reason,
- rxrpc_serial_t serial, enum rxrpc_propose_ack_trace why)
+static bool rxrpc_retransmit_data(struct rxrpc_call *call,
+ struct rxrpc_send_data_req *req)
{
- struct rxrpc_txbuf *txb;
+ struct rxrpc_txqueue *tq = req->tq;
+ unsigned int ix = req->seq & RXRPC_TXQ_MASK;
+ struct rxrpc_txbuf *txb = tq->bufs[ix];
- if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
- return;
+ _enter("%x,%x,%x,%x", tq->qbase, req->seq, ix, txb->debug_id);
- rxrpc_inc_stat(call->rxnet, stat_tx_acks[ack_reason]);
+ req->retrans = true;
+ trace_rxrpc_retransmit(call, req, txb);
- txb = rxrpc_alloc_txbuf(call, RXRPC_PACKET_TYPE_ACK,
- rcu_read_lock_held() ? GFP_ATOMIC | __GFP_NOWARN : GFP_NOFS);
- if (!txb) {
- kleave(" = -ENOMEM");
- return;
- }
-
- txb->ack_why = why;
- txb->wire.seq = 0;
- txb->wire.type = RXRPC_PACKET_TYPE_ACK;
- txb->wire.flags |= RXRPC_SLOW_START_OK;
- txb->ack.bufferSpace = 0;
- txb->ack.maxSkew = 0;
- txb->ack.firstPacket = 0;
- txb->ack.previousPacket = 0;
- txb->ack.serial = htonl(serial);
- txb->ack.reason = ack_reason;
- txb->ack.nAcks = 0;
-
- trace_rxrpc_send_ack(call, why, ack_reason, serial);
- rxrpc_send_ack_packet(call, txb);
- rxrpc_put_txbuf(txb, rxrpc_txbuf_put_ack_tx);
-}
+ txb->flags |= RXRPC_TXBUF_RESENT;
+ rxrpc_send_data_packet(call, req);
+ rxrpc_inc_stat(call->rxnet, stat_tx_data_retrans);
-/*
- * Handle congestion being detected by the retransmit timeout.
- */
-static void rxrpc_congestion_timeout(struct rxrpc_call *call)
-{
- set_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags);
+ req->tq = NULL;
+ req->n = 0;
+ req->did_send = true;
+ req->now = ktime_get_real();
+ return true;
}
/*
* Perform retransmission of NAK'd and unack'd packets.
*/
-void rxrpc_resend(struct rxrpc_call *call, struct sk_buff *ack_skb)
+static void rxrpc_resend(struct rxrpc_call *call)
{
- struct rxrpc_ackpacket *ack = NULL;
- struct rxrpc_skb_priv *sp;
- struct rxrpc_txbuf *txb;
- unsigned long resend_at;
- rxrpc_seq_t transmitted = READ_ONCE(call->tx_transmitted);
- ktime_t now, max_age, oldest, ack_ts;
- bool unacked = false;
- unsigned int i;
- LIST_HEAD(retrans_queue);
-
- _enter("{%d,%d}", call->acks_hard_ack, call->tx_top);
-
- now = ktime_get_real();
- max_age = ktime_sub_us(now, jiffies_to_usecs(call->peer->rto_j));
- oldest = now;
-
- if (list_empty(&call->tx_buffer))
- goto no_resend;
-
- if (list_empty(&call->tx_buffer))
- goto no_further_resend;
+ struct rxrpc_send_data_req req = {
+ .now = ktime_get_real(),
+ .trace = rxrpc_txdata_retransmit,
+ };
+ struct rxrpc_txqueue *tq;
- trace_rxrpc_resend(call, ack_skb);
- txb = list_first_entry(&call->tx_buffer, struct rxrpc_txbuf, call_link);
+ _enter("{%d,%d}", call->tx_bottom, call->tx_top);
- /* Scan the soft ACK table without dropping the lock and resend any
- * explicitly NAK'd packets.
- */
- if (ack_skb) {
- sp = rxrpc_skb(ack_skb);
- ack = (void *)ack_skb->data + sizeof(struct rxrpc_wire_header);
+ trace_rxrpc_resend(call, call->acks_highest_serial);
- for (i = 0; i < sp->nr_acks; i++) {
- rxrpc_seq_t seq;
+ /* Scan the transmission queue, looking for lost packets. */
+ for (tq = call->tx_queue; tq; tq = tq->next) {
+ unsigned long lost = tq->segment_lost;
- if (ack->acks[i] & 1)
- continue;
- seq = sp->first_ack + i;
- if (after(txb->seq, transmitted))
- break;
- if (after(txb->seq, seq))
- continue; /* A new hard ACK probably came in */
- list_for_each_entry_from(txb, &call->tx_buffer, call_link) {
- if (txb->seq == seq)
- goto found_txb;
- }
- goto no_further_resend;
-
- found_txb:
- if (after(ntohl(txb->wire.serial), call->acks_highest_serial))
- continue; /* Ack point not yet reached */
+ if (after(tq->qbase, call->tx_transmitted))
+ break;
- rxrpc_see_txbuf(txb, rxrpc_txbuf_see_unacked);
+ _debug("retr %16lx %u c=%08x [%x]",
+ tq->segment_acked, tq->nr_reported_acks, call->debug_id, tq->qbase);
+ _debug("lost %16lx", lost);
- if (list_empty(&txb->tx_link)) {
- list_add_tail(&txb->tx_link, &retrans_queue);
- set_bit(RXRPC_TXBUF_RESENT, &txb->flags);
- }
+ trace_rxrpc_resend_lost(call, tq, lost);
+ while (lost) {
+ unsigned int ix = __ffs(lost);
+ struct rxrpc_txbuf *txb = tq->bufs[ix];
- trace_rxrpc_retransmit(call, txb->seq,
- ktime_to_ns(ktime_sub(txb->last_sent,
- max_age)));
+ __clear_bit(ix, &lost);
+ rxrpc_see_txbuf(txb, rxrpc_txbuf_see_lost);
- if (list_is_last(&txb->call_link, &call->tx_buffer))
- goto no_further_resend;
- txb = list_next_entry(txb, call_link);
+ req.tq = tq;
+ req.seq = tq->qbase + ix;
+ req.n = 1;
+ rxrpc_retransmit_data(call, &req);
}
}
- /* Fast-forward through the Tx queue to the point the peer says it has
- * seen. Anything between the soft-ACK table and that point will get
- * ACK'd or NACK'd in due course, so don't worry about it here; here we
- * need to consider retransmitting anything beyond that point.
- *
- * Note that ACK for a packet can beat the update of tx_transmitted.
- */
- if (after_eq(READ_ONCE(call->acks_prev_seq), READ_ONCE(call->tx_transmitted)))
- goto no_further_resend;
-
- list_for_each_entry_from(txb, &call->tx_buffer, call_link) {
- if (before_eq(txb->seq, READ_ONCE(call->acks_prev_seq)))
- continue;
- if (after(txb->seq, READ_ONCE(call->tx_transmitted)))
- break; /* Not transmitted yet */
-
- if (ack && ack->reason == RXRPC_ACK_PING_RESPONSE &&
- before(ntohl(txb->wire.serial), ntohl(ack->serial)))
- goto do_resend; /* Wasn't accounted for by a more recent ping. */
-
- if (ktime_after(txb->last_sent, max_age)) {
- if (ktime_before(txb->last_sent, oldest))
- oldest = txb->last_sent;
- continue;
- }
-
- do_resend:
- unacked = true;
- if (list_empty(&txb->tx_link)) {
- list_add_tail(&txb->tx_link, &retrans_queue);
- set_bit(RXRPC_TXBUF_RESENT, &txb->flags);
- rxrpc_inc_stat(call->rxnet, stat_tx_data_retrans);
- }
- }
+ rxrpc_get_rto_backoff(call, req.did_send);
+ _leave("");
+}
-no_further_resend:
-no_resend:
- resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest)));
- resend_at += jiffies + rxrpc_get_rto_backoff(call->peer,
- !list_empty(&retrans_queue));
- WRITE_ONCE(call->resend_at, resend_at);
-
- if (unacked)
- rxrpc_congestion_timeout(call);
-
- /* If there was nothing that needed retransmission then it's likely
- * that an ACK got lost somewhere. Send a ping to find out instead of
- * retransmitting data.
- */
- if (list_empty(&retrans_queue)) {
- rxrpc_reduce_call_timer(call, resend_at, jiffies,
- rxrpc_timer_set_for_resend);
- ack_ts = ktime_sub(now, call->acks_latest_ts);
- if (ktime_to_us(ack_ts) < (call->peer->srtt_us >> 3))
- goto out;
- rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
- rxrpc_propose_ack_ping_for_lost_ack);
- goto out;
+/*
+ * Resend the highest-seq DATA packet so far transmitted for RACK-TLP [RFC8985 7.3].
+ */
+void rxrpc_resend_tlp(struct rxrpc_call *call)
+{
+ struct rxrpc_send_data_req req = {
+ .now = ktime_get_real(),
+ .seq = call->tx_transmitted,
+ .n = 1,
+ .tlp_probe = true,
+ .trace = rxrpc_txdata_tlp_retransmit,
+ };
+
+ /* There's a chance it'll be on the tail segment of the queue. */
+ req.tq = READ_ONCE(call->tx_qtail);
+ if (req.tq &&
+ before(call->tx_transmitted, req.tq->qbase + RXRPC_NR_TXQUEUE)) {
+ rxrpc_retransmit_data(call, &req);
+ return;
}
- /* Retransmit the queue */
- while ((txb = list_first_entry_or_null(&retrans_queue,
- struct rxrpc_txbuf, tx_link))) {
- list_del_init(&txb->tx_link);
- rxrpc_transmit_one(call, txb);
+ for (req.tq = call->tx_queue; req.tq; req.tq = req.tq->next) {
+ if (after_eq(call->tx_transmitted, req.tq->qbase) &&
+ before(call->tx_transmitted, req.tq->qbase + RXRPC_NR_TXQUEUE)) {
+ rxrpc_retransmit_data(call, &req);
+ return;
+ }
}
-
-out:
- _leave("");
}
/*
@@ -257,13 +161,11 @@ out:
*/
static void rxrpc_begin_service_reply(struct rxrpc_call *call)
{
- unsigned long now = jiffies;
-
rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SEND_REPLY);
- WRITE_ONCE(call->delay_ack_at, now + MAX_JIFFY_OFFSET);
if (call->ackr_reason == RXRPC_ACK_DELAY)
call->ackr_reason = 0;
- trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now);
+ call->delay_ack_at = KTIME_MAX;
+ trace_rxrpc_timer_can(call, rxrpc_timer_trace_delayed_ack);
}
/*
@@ -286,68 +188,93 @@ static void rxrpc_close_tx_phase(struct rxrpc_call *call)
}
}
-static bool rxrpc_tx_window_has_space(struct rxrpc_call *call)
-{
- unsigned int winsize = min_t(unsigned int, call->tx_winsize,
- call->cong_cwnd + call->cong_extra);
- rxrpc_seq_t window = call->acks_hard_ack, wtop = window + winsize;
- rxrpc_seq_t tx_top = call->tx_top;
- int space;
-
- space = wtop - tx_top;
- return space > 0;
-}
-
/*
- * Decant some if the sendmsg prepared queue into the transmission buffer.
+ * Transmit some as-yet untransmitted data, to a maximum of the supplied limit.
*/
-static void rxrpc_decant_prepared_tx(struct rxrpc_call *call)
+static void rxrpc_transmit_fresh_data(struct rxrpc_call *call, unsigned int limit,
+ enum rxrpc_txdata_trace trace)
{
- struct rxrpc_txbuf *txb;
+ int space = rxrpc_tx_window_space(call);
if (!test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
- if (list_empty(&call->tx_sendmsg))
+ if (call->send_top == call->tx_top)
return;
rxrpc_expose_client_call(call);
}
- while ((txb = list_first_entry_or_null(&call->tx_sendmsg,
- struct rxrpc_txbuf, call_link))) {
- spin_lock(&call->tx_lock);
- list_del(&txb->call_link);
- spin_unlock(&call->tx_lock);
+ while (space > 0) {
+ struct rxrpc_send_data_req req = {
+ .now = ktime_get_real(),
+ .seq = call->tx_transmitted + 1,
+ .n = 0,
+ .trace = trace,
+ };
+ struct rxrpc_txqueue *tq;
+ struct rxrpc_txbuf *txb;
+ rxrpc_seq_t send_top, seq;
+ int limit = min(space, max(call->peer->pmtud_jumbo, 1));
+
+ /* Order send_top before the contents of the new txbufs and
+ * txqueue pointers
+ */
+ send_top = smp_load_acquire(&call->send_top);
+ if (call->tx_top == send_top)
+ break;
- call->tx_top = txb->seq;
- list_add_tail(&txb->call_link, &call->tx_buffer);
+ trace_rxrpc_transmit(call, send_top, space);
- if (txb->wire.flags & RXRPC_LAST_PACKET)
- rxrpc_close_tx_phase(call);
+ tq = call->tx_qtail;
+ seq = call->tx_top;
+ trace_rxrpc_tq(call, tq, seq, rxrpc_tq_decant);
- rxrpc_transmit_one(call, txb);
+ do {
+ int ix;
- if (!rxrpc_tx_window_has_space(call))
- break;
+ seq++;
+ ix = seq & RXRPC_TXQ_MASK;
+ if (!ix) {
+ tq = tq->next;
+ trace_rxrpc_tq(call, tq, seq, rxrpc_tq_decant_advance);
+ }
+ if (!req.tq)
+ req.tq = tq;
+ txb = tq->bufs[ix];
+ req.n++;
+ if (!txb->jumboable)
+ break;
+ } while (req.n < limit && before(seq, send_top));
+
+ if (txb->flags & RXRPC_LAST_PACKET) {
+ rxrpc_close_tx_phase(call);
+ tq = NULL;
+ }
+ call->tx_qtail = tq;
+ call->tx_top = seq;
+
+ space -= req.n;
+ rxrpc_send_data_packet(call, &req);
}
}
-static void rxrpc_transmit_some_data(struct rxrpc_call *call)
+void rxrpc_transmit_some_data(struct rxrpc_call *call, unsigned int limit,
+ enum rxrpc_txdata_trace trace)
{
switch (__rxrpc_call_state(call)) {
case RXRPC_CALL_SERVER_ACK_REQUEST:
- if (list_empty(&call->tx_sendmsg))
+ if (call->tx_bottom == READ_ONCE(call->send_top))
return;
rxrpc_begin_service_reply(call);
fallthrough;
case RXRPC_CALL_SERVER_SEND_REPLY:
case RXRPC_CALL_CLIENT_SEND_REQUEST:
- if (!rxrpc_tx_window_has_space(call))
+ if (!rxrpc_tx_window_space(call))
return;
- if (list_empty(&call->tx_sendmsg)) {
+ if (call->tx_bottom == READ_ONCE(call->send_top)) {
rxrpc_inc_stat(call->rxnet, stat_tx_data_underflow);
return;
}
- rxrpc_decant_prepared_tx(call);
+ rxrpc_transmit_fresh_data(call, limit, trace);
break;
default:
return;
@@ -360,8 +287,8 @@ static void rxrpc_transmit_some_data(struct rxrpc_call *call)
*/
static void rxrpc_send_initial_ping(struct rxrpc_call *call)
{
- if (call->peer->rtt_count < 3 ||
- ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
+ if (call->rtt_count < 3 ||
+ ktime_before(ktime_add_ms(call->rtt_last_req, 1000),
ktime_get_real()))
rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
rxrpc_propose_ack_ping_for_params);
@@ -370,10 +297,11 @@ static void rxrpc_send_initial_ping(struct rxrpc_call *call)
/*
* Handle retransmission and deferred ACK/abort generation.
*/
-bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
+bool rxrpc_input_call_event(struct rxrpc_call *call)
{
- unsigned long now, next, t;
- bool resend = false, expired = false;
+ struct sk_buff *skb;
+ ktime_t now, t;
+ bool did_receive = false, saw_ack = false;
s32 abort_code;
rxrpc_see_call(call, rxrpc_call_see_input);
@@ -383,9 +311,6 @@ bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
call->debug_id, rxrpc_call_states[__rxrpc_call_state(call)],
call->events);
- if (__rxrpc_call_is_complete(call))
- goto out;
-
/* Handle abort request locklessly, vs rxrpc_propose_abort(). */
abort_code = smp_load_acquire(&call->send_abort);
if (abort_code) {
@@ -394,113 +319,116 @@ bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
goto out;
}
- if (skb && skb->mark == RXRPC_SKB_MARK_ERROR)
- goto out;
+ do {
+ skb = __skb_dequeue(&call->rx_queue);
+ if (skb) {
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+
+ if (__rxrpc_call_is_complete(call) ||
+ skb->mark == RXRPC_SKB_MARK_ERROR) {
+ rxrpc_free_skb(skb, rxrpc_skb_put_call_rx);
+ goto out;
+ }
+
+ saw_ack |= sp->hdr.type == RXRPC_PACKET_TYPE_ACK;
+
+ rxrpc_input_call_packet(call, skb);
+ rxrpc_free_skb(skb, rxrpc_skb_put_call_rx);
+ did_receive = true;
+ }
+
+ t = ktime_sub(call->rack_timo_at, ktime_get_real());
+ if (t <= 0) {
+ trace_rxrpc_timer_exp(call, t,
+ rxrpc_timer_trace_rack_off + call->rack_timer_mode);
+ call->rack_timo_at = KTIME_MAX;
+ rxrpc_rack_timer_expired(call, t);
+ }
+
+ } while (!skb_queue_empty(&call->rx_queue));
/* If we see our async-event poke, check for timeout trippage. */
- now = jiffies;
- t = READ_ONCE(call->expect_rx_by);
- if (time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_normal, now);
- expired = true;
+ now = ktime_get_real();
+ t = ktime_sub(call->expect_rx_by, now);
+ if (t <= 0) {
+ trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_expect_rx);
+ goto expired;
}
- t = READ_ONCE(call->expect_req_by);
- if (__rxrpc_call_state(call) == RXRPC_CALL_SERVER_RECV_REQUEST &&
- time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now);
- expired = true;
+ t = ktime_sub(call->expect_req_by, now);
+ if (t <= 0) {
+ call->expect_req_by = KTIME_MAX;
+ if (__rxrpc_call_state(call) == RXRPC_CALL_SERVER_RECV_REQUEST) {
+ trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_idle);
+ goto expired;
+ }
}
- t = READ_ONCE(call->expect_term_by);
- if (time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_hard, now);
- expired = true;
+ t = ktime_sub(READ_ONCE(call->expect_term_by), now);
+ if (t <= 0) {
+ trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_hard);
+ goto expired;
}
- t = READ_ONCE(call->delay_ack_at);
- if (time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_ack, now);
- cmpxchg(&call->delay_ack_at, t, now + MAX_JIFFY_OFFSET);
+ t = ktime_sub(call->delay_ack_at, now);
+ if (t <= 0) {
+ trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_delayed_ack);
+ call->delay_ack_at = KTIME_MAX;
rxrpc_send_ACK(call, RXRPC_ACK_DELAY, 0,
- rxrpc_propose_ack_ping_for_lost_ack);
- }
-
- t = READ_ONCE(call->ack_lost_at);
- if (time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_lost_ack, now);
- cmpxchg(&call->ack_lost_at, t, now + MAX_JIFFY_OFFSET);
- set_bit(RXRPC_CALL_EV_ACK_LOST, &call->events);
+ rxrpc_propose_ack_delayed_ack);
}
- t = READ_ONCE(call->keepalive_at);
- if (time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now);
- cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET);
+ t = ktime_sub(call->ping_at, now);
+ if (t <= 0) {
+ trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_ping);
+ call->ping_at = KTIME_MAX;
rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
rxrpc_propose_ack_ping_for_keepalive);
}
- t = READ_ONCE(call->ping_at);
- if (time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_ping, now);
- cmpxchg(&call->ping_at, t, now + MAX_JIFFY_OFFSET);
+ now = ktime_get_real();
+ t = ktime_sub(call->keepalive_at, now);
+ if (t <= 0) {
+ trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_keepalive);
+ call->keepalive_at = KTIME_MAX;
rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
rxrpc_propose_ack_ping_for_keepalive);
}
- t = READ_ONCE(call->resend_at);
- if (time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_resend, now);
- cmpxchg(&call->resend_at, t, now + MAX_JIFFY_OFFSET);
- resend = true;
- }
-
- if (skb)
- rxrpc_input_call_packet(call, skb);
+ if (test_and_clear_bit(RXRPC_CALL_EV_INITIAL_PING, &call->events))
+ rxrpc_send_initial_ping(call);
- rxrpc_transmit_some_data(call);
+ rxrpc_transmit_some_data(call, UINT_MAX, rxrpc_txdata_new_data);
- if (skb) {
- struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ if (saw_ack)
+ rxrpc_congestion_degrade(call);
- if (sp->hdr.type == RXRPC_PACKET_TYPE_ACK)
- rxrpc_congestion_degrade(call);
+ if (did_receive &&
+ (__rxrpc_call_state(call) == RXRPC_CALL_CLIENT_SEND_REQUEST ||
+ __rxrpc_call_state(call) == RXRPC_CALL_SERVER_SEND_REPLY)) {
+ t = ktime_sub(call->rack_timo_at, ktime_get_real());
+ trace_rxrpc_rack(call, t);
}
- if (test_and_clear_bit(RXRPC_CALL_EV_INITIAL_PING, &call->events))
- rxrpc_send_initial_ping(call);
-
/* Process events */
- if (expired) {
- if (test_bit(RXRPC_CALL_RX_HEARD, &call->flags) &&
- (int)call->conn->hi_serial - (int)call->rx_serial > 0) {
- trace_rxrpc_call_reset(call);
- rxrpc_abort_call(call, 0, RX_CALL_DEAD, -ECONNRESET,
- rxrpc_abort_call_reset);
- } else {
- rxrpc_abort_call(call, 0, RX_CALL_TIMEOUT, -ETIME,
- rxrpc_abort_call_timeout);
- }
- goto out;
- }
-
if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events))
rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
rxrpc_propose_ack_ping_for_lost_ack);
- if (resend && __rxrpc_call_state(call) != RXRPC_CALL_CLIENT_RECV_REPLY)
- rxrpc_resend(call, NULL);
+ if (call->tx_nr_lost > 0 &&
+ __rxrpc_call_state(call) != RXRPC_CALL_CLIENT_RECV_REPLY &&
+ !test_bit(RXRPC_CALL_TX_ALL_ACKED, &call->flags))
+ rxrpc_resend(call);
if (test_and_clear_bit(RXRPC_CALL_RX_IS_IDLE, &call->flags))
rxrpc_send_ACK(call, RXRPC_ACK_IDLE, 0,
rxrpc_propose_ack_rx_idle);
if (call->ackr_nr_unacked > 2) {
- if (call->peer->rtt_count < 3)
+ if (call->rtt_count < 3)
rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
rxrpc_propose_ack_ping_for_rtt);
- else if (ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
+ else if (ktime_before(ktime_add_ms(call->rtt_last_req, 1000),
ktime_get_real()))
rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
rxrpc_propose_ack_ping_for_old_rtt);
@@ -511,23 +439,32 @@ bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
/* Make sure the timer is restarted */
if (!__rxrpc_call_is_complete(call)) {
- next = call->expect_rx_by;
+ ktime_t next = READ_ONCE(call->expect_term_by), delay;
-#define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; }
+#define set(T) { ktime_t _t = (T); if (ktime_before(_t, next)) next = _t; }
set(call->expect_req_by);
- set(call->expect_term_by);
+ set(call->expect_rx_by);
set(call->delay_ack_at);
- set(call->ack_lost_at);
- set(call->resend_at);
+ set(call->rack_timo_at);
set(call->keepalive_at);
set(call->ping_at);
- now = jiffies;
- if (time_after_eq(now, next))
+ now = ktime_get_real();
+ delay = ktime_sub(next, now);
+ if (delay <= 0) {
rxrpc_poke_call(call, rxrpc_call_poke_timer_now);
-
- rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
+ } else {
+ unsigned long nowj = jiffies, delayj, nextj;
+
+ delayj = umax(nsecs_to_jiffies(delay), 1);
+ nextj = nowj + delayj;
+ if (time_before(nextj, call->timer.expires) ||
+ !timer_pending(&call->timer)) {
+ trace_rxrpc_timer_restart(call, delay, delayj);
+ timer_reduce(&call->timer, nextj);
+ }
+ }
}
out:
@@ -537,9 +474,24 @@ out:
rxrpc_disconnect_call(call);
if (call->security)
call->security->free_call_crypto(call);
+ } else {
+ if (did_receive &&
+ call->peer->ackr_adv_pmtud &&
+ call->peer->pmtud_pending)
+ rxrpc_send_probe_for_pmtud(call);
}
- if (call->acks_hard_ack != call->tx_bottom)
- rxrpc_shrink_call_tx_buffer(call);
_leave("");
return true;
+
+expired:
+ if (test_bit(RXRPC_CALL_RX_HEARD, &call->flags) &&
+ (int)call->conn->hi_serial - (int)call->rx_serial > 0) {
+ trace_rxrpc_call_reset(call);
+ rxrpc_abort_call(call, 0, RX_CALL_DEAD, -ECONNRESET,
+ rxrpc_abort_call_reset);
+ } else {
+ rxrpc_abort_call(call, 0, RX_CALL_TIMEOUT, -ETIME,
+ rxrpc_abort_call_timeout);
+ }
+ goto out;
}
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 9fc9a6c3f685..c4c8b46a68c6 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -22,7 +22,6 @@ const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
[RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
[RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
[RXRPC_CALL_SERVER_PREALLOC] = "SvPrealc",
- [RXRPC_CALL_SERVER_SECURING] = "SvSecure",
[RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
[RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
[RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
@@ -49,7 +48,7 @@ void rxrpc_poke_call(struct rxrpc_call *call, enum rxrpc_call_poke_trace what)
bool busy;
if (!test_bit(RXRPC_CALL_DISCONNECTED, &call->flags)) {
- spin_lock_bh(&local->lock);
+ spin_lock_irq(&local->lock);
busy = !list_empty(&call->attend_link);
trace_rxrpc_poke_call(call, busy, what);
if (!busy && !rxrpc_try_get_call(call, rxrpc_call_get_poke))
@@ -57,7 +56,7 @@ void rxrpc_poke_call(struct rxrpc_call *call, enum rxrpc_call_poke_trace what)
if (!busy) {
list_add_tail(&call->attend_link, &local->call_attend_q);
}
- spin_unlock_bh(&local->lock);
+ spin_unlock_irq(&local->lock);
if (!busy)
rxrpc_wake_up_io_thread(local);
}
@@ -70,20 +69,11 @@ static void rxrpc_call_timer_expired(struct timer_list *t)
_enter("%d", call->debug_id);
if (!__rxrpc_call_is_complete(call)) {
- trace_rxrpc_timer_expired(call, jiffies);
+ trace_rxrpc_timer_expired(call);
rxrpc_poke_call(call, rxrpc_call_poke_timer);
}
}
-void rxrpc_reduce_call_timer(struct rxrpc_call *call,
- unsigned long expire_at,
- unsigned long now,
- enum rxrpc_timer_trace why)
-{
- trace_rxrpc_timer(call, why, now);
- timer_reduce(&call->timer, expire_at);
-}
-
static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
static void rxrpc_destroy_call(struct work_struct *);
@@ -155,34 +145,37 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
INIT_LIST_HEAD(&call->recvmsg_link);
INIT_LIST_HEAD(&call->sock_link);
INIT_LIST_HEAD(&call->attend_link);
- INIT_LIST_HEAD(&call->tx_sendmsg);
- INIT_LIST_HEAD(&call->tx_buffer);
+ skb_queue_head_init(&call->rx_queue);
skb_queue_head_init(&call->recvmsg_queue);
skb_queue_head_init(&call->rx_oos_queue);
init_waitqueue_head(&call->waitq);
spin_lock_init(&call->notify_lock);
- spin_lock_init(&call->tx_lock);
refcount_set(&call->ref, 1);
- call->debug_id = debug_id;
- call->tx_total_len = -1;
- call->next_rx_timo = 20 * HZ;
- call->next_req_timo = 1 * HZ;
- call->ackr_window = 1;
- call->ackr_wtop = 1;
+ call->debug_id = debug_id;
+ call->tx_total_len = -1;
+ call->tx_jumbo_max = 1;
+ call->next_rx_timo = 20 * HZ;
+ call->next_req_timo = 1 * HZ;
+ call->ackr_window = 1;
+ call->ackr_wtop = 1;
+ call->delay_ack_at = KTIME_MAX;
+ call->rack_timo_at = KTIME_MAX;
+ call->ping_at = KTIME_MAX;
+ call->keepalive_at = KTIME_MAX;
+ call->expect_rx_by = KTIME_MAX;
+ call->expect_req_by = KTIME_MAX;
+ call->expect_term_by = KTIME_MAX;
memset(&call->sock_node, 0xed, sizeof(call->sock_node));
call->rx_winsize = rxrpc_rx_window_size;
call->tx_winsize = 16;
- if (RXRPC_TX_SMSS > 2190)
- call->cong_cwnd = 2;
- else if (RXRPC_TX_SMSS > 1095)
- call->cong_cwnd = 3;
- else
- call->cong_cwnd = 4;
+ call->cong_cwnd = RXRPC_MIN_CWND;
call->cong_ssthresh = RXRPC_TX_MAX_WINDOW;
+ rxrpc_call_init_rtt(call);
+
call->rxnet = rxnet;
call->rtt_avail = RXRPC_CALL_RTT_AVAIL_MASK;
atomic_inc(&rxnet->nr_calls);
@@ -226,11 +219,11 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
__set_bit(RXRPC_CALL_EXCLUSIVE, &call->flags);
if (p->timeouts.normal)
- call->next_rx_timo = min(msecs_to_jiffies(p->timeouts.normal), 1UL);
+ call->next_rx_timo = umin(p->timeouts.normal, 1);
if (p->timeouts.idle)
- call->next_req_timo = min(msecs_to_jiffies(p->timeouts.idle), 1UL);
+ call->next_req_timo = umin(p->timeouts.idle, 1);
if (p->timeouts.hard)
- call->hard_timo = p->timeouts.hard * HZ;
+ call->hard_timo = p->timeouts.hard;
ret = rxrpc_init_client_call_security(call);
if (ret < 0) {
@@ -253,18 +246,13 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
*/
void rxrpc_start_call_timer(struct rxrpc_call *call)
{
- unsigned long now = jiffies;
- unsigned long j = now + MAX_JIFFY_OFFSET;
-
- call->delay_ack_at = j;
- call->ack_lost_at = j;
- call->resend_at = j;
- call->ping_at = j;
- call->keepalive_at = j;
- call->expect_rx_by = j;
- call->expect_req_by = j;
- call->expect_term_by = j + call->hard_timo;
- call->timer.expires = now;
+ if (call->hard_timo) {
+ ktime_t delay = ms_to_ktime(call->hard_timo * 1000);
+
+ call->expect_term_by = ktime_add(ktime_get_real(), delay);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_hard);
+ }
+ call->timer.expires = jiffies;
}
/*
@@ -313,9 +301,9 @@ static int rxrpc_connect_call(struct rxrpc_call *call, gfp_t gfp)
trace_rxrpc_client(NULL, -1, rxrpc_client_queue_new_call);
rxrpc_get_call(call, rxrpc_call_get_io_thread);
- spin_lock(&local->client_call_lock);
+ spin_lock_irq(&local->client_call_lock);
list_add_tail(&call->wait_link, &local->new_client_calls);
- spin_unlock(&local->client_call_lock);
+ spin_unlock_irq(&local->client_call_lock);
rxrpc_wake_up_io_thread(local);
return 0;
@@ -445,7 +433,7 @@ error_attached_to_socket:
/*
* Set up an incoming call. call->conn points to the connection.
- * This is called in BH context and isn't allowed to fail.
+ * This is called with interrupts disabled and isn't allowed to fail.
*/
void rxrpc_incoming_call(struct rxrpc_sock *rx,
struct rxrpc_call *call,
@@ -464,17 +452,16 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
call->cong_tstamp = skb->tstamp;
__set_bit(RXRPC_CALL_EXPOSED, &call->flags);
- rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SECURING);
+ rxrpc_set_call_state(call, RXRPC_CALL_SERVER_RECV_REQUEST);
spin_lock(&conn->state_lock);
switch (conn->state) {
case RXRPC_CONN_SERVICE_UNSECURED:
case RXRPC_CONN_SERVICE_CHALLENGING:
- rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SECURING);
+ __set_bit(RXRPC_CALL_CONN_CHALLENGING, &call->flags);
break;
case RXRPC_CONN_SERVICE:
- rxrpc_set_call_state(call, RXRPC_CALL_SERVER_RECV_REQUEST);
break;
case RXRPC_CONN_ABORTED:
@@ -542,11 +529,29 @@ void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace why)
}
/*
- * Clean up the Rx skb ring.
+ * Clean up the transmission buffers.
+ */
+static void rxrpc_cleanup_tx_buffers(struct rxrpc_call *call)
+{
+ struct rxrpc_txqueue *tq, *next;
+
+ for (tq = call->tx_queue; tq; tq = next) {
+ next = tq->next;
+ for (int i = 0; i < RXRPC_NR_TXQUEUE; i++)
+ if (tq->bufs[i])
+ rxrpc_put_txbuf(tq->bufs[i], rxrpc_txbuf_put_cleaned);
+ trace_rxrpc_tq(call, tq, 0, rxrpc_tq_cleaned);
+ kfree(tq);
+ }
+}
+
+/*
+ * Clean up the receive buffers.
*/
-static void rxrpc_cleanup_ring(struct rxrpc_call *call)
+static void rxrpc_cleanup_rx_buffers(struct rxrpc_call *call)
{
rxrpc_purge_queue(&call->recvmsg_queue);
+ rxrpc_purge_queue(&call->rx_queue);
rxrpc_purge_queue(&call->rx_oos_queue);
}
@@ -569,7 +574,7 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
rxrpc_put_call_slot(call);
/* Make sure we don't get any more notifications */
- spin_lock(&rx->recvmsg_lock);
+ spin_lock_irq(&rx->recvmsg_lock);
if (!list_empty(&call->recvmsg_link)) {
_debug("unlinking once-pending call %p { e=%lx f=%lx }",
@@ -582,7 +587,7 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
call->recvmsg_link.next = NULL;
call->recvmsg_link.prev = NULL;
- spin_unlock(&rx->recvmsg_lock);
+ spin_unlock_irq(&rx->recvmsg_lock);
if (put)
rxrpc_put_call(call, rxrpc_call_put_unnotify);
@@ -682,23 +687,11 @@ static void rxrpc_rcu_free_call(struct rcu_head *rcu)
static void rxrpc_destroy_call(struct work_struct *work)
{
struct rxrpc_call *call = container_of(work, struct rxrpc_call, destroyer);
- struct rxrpc_txbuf *txb;
del_timer_sync(&call->timer);
- rxrpc_free_skb(call->cong_last_nack, rxrpc_skb_put_last_nack);
- rxrpc_cleanup_ring(call);
- while ((txb = list_first_entry_or_null(&call->tx_sendmsg,
- struct rxrpc_txbuf, call_link))) {
- list_del(&txb->call_link);
- rxrpc_put_txbuf(txb, rxrpc_txbuf_put_cleaned);
- }
- while ((txb = list_first_entry_or_null(&call->tx_buffer,
- struct rxrpc_txbuf, call_link))) {
- list_del(&txb->call_link);
- rxrpc_put_txbuf(txb, rxrpc_txbuf_put_cleaned);
- }
-
+ rxrpc_cleanup_tx_buffers(call);
+ rxrpc_cleanup_rx_buffers(call);
rxrpc_put_txbuf(call->tx_pending, rxrpc_txbuf_put_cleaned);
rxrpc_put_connection(call->conn, rxrpc_conn_put_call);
rxrpc_deactivate_bundle(call->bundle);
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 3b9b267a4431..db0099197890 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -231,7 +231,7 @@ static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
distance = id - id_cursor;
if (distance < 0)
distance = -distance;
- limit = max_t(unsigned long, atomic_read(&rxnet->nr_conns) * 4, 1024);
+ limit = umax(atomic_read(&rxnet->nr_conns) * 4, 1024);
if (distance > limit)
goto mark_dont_reuse;
@@ -437,9 +437,9 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
call->dest_srx.srx_service = conn->service_id;
call->cong_ssthresh = call->peer->cong_ssthresh;
if (call->cong_cwnd >= call->cong_ssthresh)
- call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
+ call->cong_ca_state = RXRPC_CA_CONGEST_AVOIDANCE;
else
- call->cong_mode = RXRPC_CALL_SLOW_START;
+ call->cong_ca_state = RXRPC_CA_SLOW_START;
chan->call_id = call_id;
chan->call_debug_id = call->debug_id;
@@ -508,15 +508,18 @@ static void rxrpc_activate_channels(struct rxrpc_bundle *bundle)
void rxrpc_connect_client_calls(struct rxrpc_local *local)
{
struct rxrpc_call *call;
+ LIST_HEAD(new_client_calls);
- while ((call = list_first_entry_or_null(&local->new_client_calls,
- struct rxrpc_call, wait_link))
- ) {
+ spin_lock_irq(&local->client_call_lock);
+ list_splice_tail_init(&local->new_client_calls, &new_client_calls);
+ spin_unlock_irq(&local->client_call_lock);
+
+ while ((call = list_first_entry_or_null(&new_client_calls,
+ struct rxrpc_call, wait_link))) {
struct rxrpc_bundle *bundle = call->bundle;
- spin_lock(&local->client_call_lock);
list_move_tail(&call->wait_link, &bundle->waiting_calls);
- spin_unlock(&local->client_call_lock);
+ rxrpc_see_call(call, rxrpc_call_see_waiting_call);
if (rxrpc_bundle_has_space(bundle))
rxrpc_activate_channels(bundle);
@@ -544,9 +547,9 @@ void rxrpc_expose_client_call(struct rxrpc_call *call)
set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
trace_rxrpc_client(conn, channel, rxrpc_client_exposed);
- spin_lock(&call->peer->lock);
+ spin_lock_irq(&call->peer->lock);
hlist_add_head(&call->error_link, &call->peer->error_targets);
- spin_unlock(&call->peer->lock);
+ spin_unlock_irq(&call->peer->lock);
}
}
@@ -586,7 +589,10 @@ void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call
_debug("call is waiting");
ASSERTCMP(call->call_id, ==, 0);
ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags));
+ /* May still be on ->new_client_calls. */
+ spin_lock_irq(&local->client_call_lock);
list_del_init(&call->wait_link);
+ spin_unlock_irq(&local->client_call_lock);
return;
}
@@ -636,7 +642,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call
test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
unsigned long final_ack_at = jiffies + 2;
- WRITE_ONCE(chan->final_ack_at, final_ack_at);
+ chan->final_ack_at = final_ack_at;
smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */
set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
rxrpc_reduce_conn_timer(conn, final_ack_at);
@@ -770,7 +776,7 @@ next:
conn_expires_at = conn->idle_timestamp + expiry;
- now = READ_ONCE(jiffies);
+ now = jiffies;
if (time_after(conn_expires_at, now))
goto not_yet_expired;
}
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 1f251d758cb9..4d9c5e21ba78 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -26,7 +26,7 @@ static bool rxrpc_set_conn_aborted(struct rxrpc_connection *conn, struct sk_buff
bool aborted = false;
if (conn->state != RXRPC_CONN_ABORTED) {
- spin_lock(&conn->state_lock);
+ spin_lock_irq(&conn->state_lock);
if (conn->state != RXRPC_CONN_ABORTED) {
conn->abort_code = abort_code;
conn->error = err;
@@ -37,7 +37,7 @@ static bool rxrpc_set_conn_aborted(struct rxrpc_connection *conn, struct sk_buff
set_bit(RXRPC_CONN_EV_ABORT_CALLS, &conn->events);
aborted = true;
}
- spin_unlock(&conn->state_lock);
+ spin_unlock_irq(&conn->state_lock);
}
return aborted;
@@ -63,11 +63,12 @@ int rxrpc_abort_conn(struct rxrpc_connection *conn, struct sk_buff *skb,
/*
* Mark a connection as being remotely aborted.
*/
-static bool rxrpc_input_conn_abort(struct rxrpc_connection *conn,
+static void rxrpc_input_conn_abort(struct rxrpc_connection *conn,
struct sk_buff *skb)
{
- return rxrpc_set_conn_aborted(conn, skb, skb->priority, -ECONNABORTED,
- RXRPC_CALL_REMOTELY_ABORTED);
+ trace_rxrpc_rx_conn_abort(conn, skb);
+ rxrpc_set_conn_aborted(conn, skb, skb->priority, -ECONNABORTED,
+ RXRPC_CALL_REMOTELY_ABORTED);
}
/*
@@ -88,10 +89,10 @@ void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
struct rxrpc_ackpacket ack;
};
} __attribute__((packed)) pkt;
- struct rxrpc_ackinfo ack_info;
+ struct rxrpc_acktrailer trailer;
size_t len;
int ret, ioc;
- u32 serial, mtu, call_id, padding;
+ u32 serial, max_mtu, if_mtu, call_id, padding;
_enter("%d", conn->debug_id);
@@ -122,8 +123,8 @@ void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
iov[0].iov_len = sizeof(pkt.whdr);
iov[1].iov_base = &padding;
iov[1].iov_len = 3;
- iov[2].iov_base = &ack_info;
- iov[2].iov_len = sizeof(ack_info);
+ iov[2].iov_base = &trailer;
+ iov[2].iov_len = sizeof(trailer);
serial = rxrpc_get_next_serial(conn);
@@ -149,8 +150,13 @@ void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
break;
case RXRPC_PACKET_TYPE_ACK:
- mtu = conn->peer->if_mtu;
- mtu -= conn->peer->hdrsize;
+ if_mtu = conn->peer->if_mtu - conn->peer->hdrsize;
+ if (conn->peer->ackr_adv_pmtud) {
+ max_mtu = umax(conn->peer->max_data, rxrpc_rx_mtu);
+ } else {
+ if_mtu = umin(1444, if_mtu);
+ max_mtu = if_mtu;
+ }
pkt.ack.bufferSpace = 0;
pkt.ack.maxSkew = htons(skb ? skb->priority : 0);
pkt.ack.firstPacket = htonl(chan->last_seq + 1);
@@ -158,20 +164,21 @@ void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
pkt.ack.serial = htonl(skb ? sp->hdr.serial : 0);
pkt.ack.reason = skb ? RXRPC_ACK_DUPLICATE : RXRPC_ACK_IDLE;
pkt.ack.nAcks = 0;
- ack_info.rxMTU = htonl(rxrpc_rx_mtu);
- ack_info.maxMTU = htonl(mtu);
- ack_info.rwind = htonl(rxrpc_rx_window_size);
- ack_info.jumbo_max = htonl(rxrpc_rx_jumbo_max);
+ trailer.maxMTU = htonl(max_mtu);
+ trailer.ifMTU = htonl(if_mtu);
+ trailer.rwind = htonl(rxrpc_rx_window_size);
+ trailer.jumbo_max = 0;
pkt.whdr.flags |= RXRPC_SLOW_START_OK;
padding = 0;
iov[0].iov_len += sizeof(pkt.ack);
- len += sizeof(pkt.ack) + 3 + sizeof(ack_info);
+ len += sizeof(pkt.ack) + 3 + sizeof(trailer);
ioc = 3;
trace_rxrpc_tx_ack(chan->call_debug_id, serial,
ntohl(pkt.ack.firstPacket),
ntohl(pkt.ack.serial),
- pkt.ack.reason, 0, rxrpc_rx_window_size);
+ pkt.ack.reason, 0, rxrpc_rx_window_size,
+ rxrpc_propose_ack_retransmit);
break;
default:
@@ -202,11 +209,14 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn)
for (i = 0; i < RXRPC_MAXCALLS; i++) {
call = conn->channels[i].call;
- if (call)
+ if (call) {
+ rxrpc_see_call(call, rxrpc_call_see_conn_abort);
rxrpc_set_call_completion(call,
conn->completion,
conn->abort_code,
conn->error);
+ rxrpc_poke_call(call, rxrpc_call_poke_conn_abort);
+ }
}
_leave("");
@@ -218,10 +228,8 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn)
*/
static void rxrpc_call_is_secure(struct rxrpc_call *call)
{
- if (call && __rxrpc_call_state(call) == RXRPC_CALL_SERVER_SECURING) {
- rxrpc_set_call_state(call, RXRPC_CALL_SERVER_RECV_REQUEST);
+ if (call && __test_and_clear_bit(RXRPC_CALL_CONN_CHALLENGING, &call->flags))
rxrpc_notify_socket(call);
- }
}
/*
@@ -252,16 +260,17 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
if (ret < 0)
return ret;
- spin_lock(&conn->state_lock);
+ spin_lock_irq(&conn->state_lock);
if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING)
conn->state = RXRPC_CONN_SERVICE;
- spin_unlock(&conn->state_lock);
+ spin_unlock_irq(&conn->state_lock);
if (conn->state == RXRPC_CONN_SERVICE) {
/* Offload call state flipping to the I/O thread. As
* we've already received the packet, put it on the
* front of the queue.
*/
+ sp->conn = rxrpc_get_connection(conn, rxrpc_conn_get_poke_secured);
skb->mark = RXRPC_SKB_MARK_SERVICE_CONN_SECURED;
rxrpc_get_skb(skb, rxrpc_skb_get_conn_secured);
skb_queue_head(&conn->local->rx_queue, skb);
@@ -427,14 +436,16 @@ void rxrpc_input_conn_event(struct rxrpc_connection *conn, struct sk_buff *skb)
if (test_and_clear_bit(RXRPC_CONN_EV_ABORT_CALLS, &conn->events))
rxrpc_abort_calls(conn);
- switch (skb->mark) {
- case RXRPC_SKB_MARK_SERVICE_CONN_SECURED:
- if (conn->state != RXRPC_CONN_SERVICE)
- break;
+ if (skb) {
+ switch (skb->mark) {
+ case RXRPC_SKB_MARK_SERVICE_CONN_SECURED:
+ if (conn->state != RXRPC_CONN_SERVICE)
+ break;
- for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
- rxrpc_call_is_secure(conn->channels[loop].call);
- break;
+ for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
+ rxrpc_call_is_secure(conn->channels[loop].call);
+ break;
+ }
}
/* Process delayed ACKs whose time has come. */
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index df8a271948a1..2f1fd1e2e7e4 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -31,13 +31,13 @@ void rxrpc_poke_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why)
if (WARN_ON_ONCE(!local))
return;
- spin_lock_bh(&local->lock);
+ spin_lock_irq(&local->lock);
busy = !list_empty(&conn->attend_link);
if (!busy) {
rxrpc_get_connection(conn, why);
list_add_tail(&conn->attend_link, &local->conn_attend_q);
}
- spin_unlock_bh(&local->lock);
+ spin_unlock_irq(&local->lock);
rxrpc_wake_up_io_thread(local);
}
@@ -67,7 +67,9 @@ struct rxrpc_connection *rxrpc_alloc_connection(struct rxrpc_net *rxnet,
INIT_WORK(&conn->destructor, rxrpc_clean_up_connection);
INIT_LIST_HEAD(&conn->proc_link);
INIT_LIST_HEAD(&conn->link);
+ INIT_LIST_HEAD(&conn->attend_link);
mutex_init(&conn->security_lock);
+ mutex_init(&conn->tx_data_alloc_lock);
skb_queue_head_init(&conn->rx_queue);
conn->rxnet = rxnet;
conn->security = &rxrpc_no_security;
@@ -118,18 +120,13 @@ struct rxrpc_connection *rxrpc_find_client_connection_rcu(struct rxrpc_local *lo
switch (srx->transport.family) {
case AF_INET:
if (peer->srx.transport.sin.sin_port !=
- srx->transport.sin.sin_port ||
- peer->srx.transport.sin.sin_addr.s_addr !=
- srx->transport.sin.sin_addr.s_addr)
+ srx->transport.sin.sin_port)
goto not_found;
break;
#ifdef CONFIG_AF_RXRPC_IPV6
case AF_INET6:
if (peer->srx.transport.sin6.sin6_port !=
- srx->transport.sin6.sin6_port ||
- memcmp(&peer->srx.transport.sin6.sin6_addr,
- &srx->transport.sin6.sin6_addr,
- sizeof(struct in6_addr)) != 0)
+ srx->transport.sin6.sin6_port)
goto not_found;
break;
#endif
@@ -200,9 +197,9 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
call->peer->cong_ssthresh = call->cong_ssthresh;
if (!hlist_unhashed(&call->error_link)) {
- spin_lock(&call->peer->lock);
+ spin_lock_irq(&call->peer->lock);
hlist_del_init(&call->error_link);
- spin_unlock(&call->peer->lock);
+ spin_unlock_irq(&call->peer->lock);
}
if (rxrpc_is_client_call(call)) {
@@ -325,6 +322,12 @@ static void rxrpc_clean_up_connection(struct work_struct *work)
list_del_init(&conn->proc_link);
write_unlock(&rxnet->conn_lock);
+ if (conn->pmtud_probe) {
+ trace_rxrpc_pmtud_lost(conn, 0);
+ conn->peer->pmtud_probing = false;
+ conn->peer->pmtud_pending = true;
+ }
+
rxrpc_purge_queue(&conn->rx_queue);
rxrpc_kill_client_conn(conn);
@@ -341,6 +344,7 @@ static void rxrpc_clean_up_connection(struct work_struct *work)
*/
rxrpc_purge_queue(&conn->rx_queue);
+ page_frag_cache_drain(&conn->tx_data_alloc);
call_rcu(&conn->rcu, rxrpc_rcu_free_connection);
}
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 9691de00ade7..24aceb183c2c 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -9,6 +9,17 @@
#include "ar-internal.h"
+/* Override priority when generating ACKs for received DATA */
+static const u8 rxrpc_ack_priority[RXRPC_ACK__INVALID] = {
+ [RXRPC_ACK_IDLE] = 1,
+ [RXRPC_ACK_DELAY] = 2,
+ [RXRPC_ACK_REQUESTED] = 3,
+ [RXRPC_ACK_DUPLICATE] = 4,
+ [RXRPC_ACK_EXCEEDS_WINDOW] = 5,
+ [RXRPC_ACK_NOSPACE] = 6,
+ [RXRPC_ACK_OUT_OF_SEQUENCE] = 7,
+};
+
static void rxrpc_proto_abort(struct rxrpc_call *call, rxrpc_seq_t seq,
enum rxrpc_abort_reason why)
{
@@ -16,80 +27,68 @@ static void rxrpc_proto_abort(struct rxrpc_call *call, rxrpc_seq_t seq,
}
/*
- * Do TCP-style congestion management [RFC 5681].
+ * Do TCP-style congestion management [RFC5681].
*/
static void rxrpc_congestion_management(struct rxrpc_call *call,
- struct sk_buff *skb,
- struct rxrpc_ack_summary *summary,
- rxrpc_serial_t acked_serial)
+ struct rxrpc_ack_summary *summary)
{
- enum rxrpc_congest_change change = rxrpc_cong_no_change;
- unsigned int cumulative_acks = call->cong_cumul_acks;
- unsigned int cwnd = call->cong_cwnd;
- bool resend = false;
-
- summary->flight_size =
- (call->tx_top - call->acks_hard_ack) - summary->nr_acks;
+ summary->change = rxrpc_cong_no_change;
+ summary->in_flight = rxrpc_tx_in_flight(call);
if (test_and_clear_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags)) {
summary->retrans_timeo = true;
- call->cong_ssthresh = max_t(unsigned int,
- summary->flight_size / 2, 2);
- cwnd = 1;
- if (cwnd >= call->cong_ssthresh &&
- call->cong_mode == RXRPC_CALL_SLOW_START) {
- call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
- call->cong_tstamp = skb->tstamp;
- cumulative_acks = 0;
+ call->cong_ssthresh = umax(summary->in_flight / 2, 2);
+ call->cong_cwnd = 1;
+ if (call->cong_cwnd >= call->cong_ssthresh &&
+ call->cong_ca_state == RXRPC_CA_SLOW_START) {
+ call->cong_ca_state = RXRPC_CA_CONGEST_AVOIDANCE;
+ call->cong_tstamp = call->acks_latest_ts;
+ call->cong_cumul_acks = 0;
}
}
- cumulative_acks += summary->nr_new_acks;
- if (cumulative_acks > 255)
- cumulative_acks = 255;
+ call->cong_cumul_acks += summary->nr_new_sacks;
+ call->cong_cumul_acks += summary->nr_new_hacks;
+ if (call->cong_cumul_acks > 255)
+ call->cong_cumul_acks = 255;
- summary->cwnd = call->cong_cwnd;
- summary->ssthresh = call->cong_ssthresh;
- summary->cumulative_acks = cumulative_acks;
- summary->dup_acks = call->cong_dup_acks;
-
- switch (call->cong_mode) {
- case RXRPC_CALL_SLOW_START:
- if (summary->saw_nacks)
+ switch (call->cong_ca_state) {
+ case RXRPC_CA_SLOW_START:
+ if (call->acks_nr_snacks > 0)
goto packet_loss_detected;
- if (summary->cumulative_acks > 0)
- cwnd += 1;
- if (cwnd >= call->cong_ssthresh) {
- call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
- call->cong_tstamp = skb->tstamp;
+ if (call->cong_cumul_acks > 0)
+ call->cong_cwnd += 1;
+ if (call->cong_cwnd >= call->cong_ssthresh) {
+ call->cong_ca_state = RXRPC_CA_CONGEST_AVOIDANCE;
+ call->cong_tstamp = call->acks_latest_ts;
}
goto out;
- case RXRPC_CALL_CONGEST_AVOIDANCE:
- if (summary->saw_nacks)
+ case RXRPC_CA_CONGEST_AVOIDANCE:
+ if (call->acks_nr_snacks > 0)
goto packet_loss_detected;
/* We analyse the number of packets that get ACK'd per RTT
* period and increase the window if we managed to fill it.
*/
- if (call->peer->rtt_count == 0)
+ if (call->rtt_count == 0)
goto out;
- if (ktime_before(skb->tstamp,
+ if (ktime_before(call->acks_latest_ts,
ktime_add_us(call->cong_tstamp,
- call->peer->srtt_us >> 3)))
+ call->srtt_us >> 3)))
goto out_no_clear_ca;
- change = rxrpc_cong_rtt_window_end;
- call->cong_tstamp = skb->tstamp;
- if (cumulative_acks >= cwnd)
- cwnd++;
+ summary->change = rxrpc_cong_rtt_window_end;
+ call->cong_tstamp = call->acks_latest_ts;
+ if (call->cong_cumul_acks >= call->cong_cwnd)
+ call->cong_cwnd++;
goto out;
- case RXRPC_CALL_PACKET_LOSS:
- if (!summary->saw_nacks)
+ case RXRPC_CA_PACKET_LOSS:
+ if (call->acks_nr_snacks == 0)
goto resume_normality;
- if (summary->new_low_nack) {
- change = rxrpc_cong_new_low_nack;
+ if (summary->new_low_snack) {
+ summary->change = rxrpc_cong_new_low_nack;
call->cong_dup_acks = 1;
if (call->cong_extra > 1)
call->cong_extra = 1;
@@ -100,31 +99,35 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
if (call->cong_dup_acks < 3)
goto send_extra_data;
- change = rxrpc_cong_begin_retransmission;
- call->cong_mode = RXRPC_CALL_FAST_RETRANSMIT;
- call->cong_ssthresh = max_t(unsigned int,
- summary->flight_size / 2, 2);
- cwnd = call->cong_ssthresh + 3;
+ summary->change = rxrpc_cong_begin_retransmission;
+ call->cong_ca_state = RXRPC_CA_FAST_RETRANSMIT;
+ call->cong_ssthresh = umax(summary->in_flight / 2, 2);
+ call->cong_cwnd = call->cong_ssthresh + 3;
call->cong_extra = 0;
call->cong_dup_acks = 0;
- resend = true;
+ summary->need_retransmit = true;
+ summary->in_fast_or_rto_recovery = true;
goto out;
- case RXRPC_CALL_FAST_RETRANSMIT:
- if (!summary->new_low_nack) {
- if (summary->nr_new_acks == 0)
- cwnd += 1;
+ case RXRPC_CA_FAST_RETRANSMIT:
+ rxrpc_tlp_init(call);
+ summary->in_fast_or_rto_recovery = true;
+ if (!summary->new_low_snack) {
+ if (summary->nr_new_sacks == 0)
+ call->cong_cwnd += 1;
call->cong_dup_acks++;
if (call->cong_dup_acks == 2) {
- change = rxrpc_cong_retransmit_again;
+ summary->change = rxrpc_cong_retransmit_again;
call->cong_dup_acks = 0;
- resend = true;
+ summary->need_retransmit = true;
}
} else {
- change = rxrpc_cong_progress;
- cwnd = call->cong_ssthresh;
- if (!summary->saw_nacks)
+ summary->change = rxrpc_cong_progress;
+ call->cong_cwnd = call->cong_ssthresh;
+ if (call->acks_nr_snacks == 0) {
+ summary->exiting_fast_or_rto_recovery = true;
goto resume_normality;
+ }
}
goto out;
@@ -134,30 +137,25 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
}
resume_normality:
- change = rxrpc_cong_cleared_nacks;
+ summary->change = rxrpc_cong_cleared_nacks;
call->cong_dup_acks = 0;
call->cong_extra = 0;
- call->cong_tstamp = skb->tstamp;
- if (cwnd < call->cong_ssthresh)
- call->cong_mode = RXRPC_CALL_SLOW_START;
+ call->cong_tstamp = call->acks_latest_ts;
+ if (call->cong_cwnd < call->cong_ssthresh)
+ call->cong_ca_state = RXRPC_CA_SLOW_START;
else
- call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
+ call->cong_ca_state = RXRPC_CA_CONGEST_AVOIDANCE;
out:
- cumulative_acks = 0;
+ call->cong_cumul_acks = 0;
out_no_clear_ca:
- if (cwnd >= RXRPC_TX_MAX_WINDOW)
- cwnd = RXRPC_TX_MAX_WINDOW;
- call->cong_cwnd = cwnd;
- call->cong_cumul_acks = cumulative_acks;
- summary->mode = call->cong_mode;
- trace_rxrpc_congest(call, summary, acked_serial, change);
- if (resend)
- rxrpc_resend(call, skb);
+ if (call->cong_cwnd >= RXRPC_TX_MAX_WINDOW)
+ call->cong_cwnd = RXRPC_TX_MAX_WINDOW;
+ trace_rxrpc_congest(call, summary);
return;
packet_loss_detected:
- change = rxrpc_cong_saw_nack;
- call->cong_mode = RXRPC_CALL_PACKET_LOSS;
+ summary->change = rxrpc_cong_saw_nack;
+ call->cong_ca_state = RXRPC_CA_PACKET_LOSS;
call->cong_dup_acks = 0;
goto send_extra_data;
@@ -166,7 +164,7 @@ send_extra_data:
* state.
*/
if (test_bit(RXRPC_CALL_TX_LAST, &call->flags) ||
- summary->nr_acks != call->tx_top - call->acks_hard_ack) {
+ call->acks_nr_sacks != call->tx_top - call->tx_bottom) {
call->cong_extra++;
wake_up(&call->waitq);
}
@@ -178,26 +176,42 @@ send_extra_data:
*/
void rxrpc_congestion_degrade(struct rxrpc_call *call)
{
- ktime_t rtt, now;
+ ktime_t rtt, now, time_since;
- if (call->cong_mode != RXRPC_CALL_SLOW_START &&
- call->cong_mode != RXRPC_CALL_CONGEST_AVOIDANCE)
+ if (call->cong_ca_state != RXRPC_CA_SLOW_START &&
+ call->cong_ca_state != RXRPC_CA_CONGEST_AVOIDANCE)
return;
if (__rxrpc_call_state(call) == RXRPC_CALL_CLIENT_AWAIT_REPLY)
return;
- rtt = ns_to_ktime(call->peer->srtt_us * (1000 / 8));
+ rtt = ns_to_ktime(call->srtt_us * (NSEC_PER_USEC / 8));
now = ktime_get_real();
- if (!ktime_before(ktime_add(call->tx_last_sent, rtt), now))
+ time_since = ktime_sub(now, call->tx_last_sent);
+ if (ktime_before(time_since, rtt))
return;
- trace_rxrpc_reset_cwnd(call, now);
+ trace_rxrpc_reset_cwnd(call, time_since, rtt);
rxrpc_inc_stat(call->rxnet, stat_tx_data_cwnd_reset);
call->tx_last_sent = now;
- call->cong_mode = RXRPC_CALL_SLOW_START;
- call->cong_ssthresh = max_t(unsigned int, call->cong_ssthresh,
- call->cong_cwnd * 3 / 4);
- call->cong_cwnd = max_t(unsigned int, call->cong_cwnd / 2, RXRPC_MIN_CWND);
+ call->cong_ca_state = RXRPC_CA_SLOW_START;
+ call->cong_ssthresh = umax(call->cong_ssthresh, call->cong_cwnd * 3 / 4);
+ call->cong_cwnd = umax(call->cong_cwnd / 2, RXRPC_MIN_CWND);
+}
+
+/*
+ * Add an RTT sample derived from an ACK'd DATA packet.
+ */
+static void rxrpc_add_data_rtt_sample(struct rxrpc_call *call,
+ struct rxrpc_ack_summary *summary,
+ struct rxrpc_txqueue *tq,
+ int ix)
+{
+ ktime_t xmit_ts = ktime_add_us(tq->xmit_ts_base, tq->segment_xmit_ts[ix]);
+
+ rxrpc_call_add_rtt(call, rxrpc_rtt_rx_data_ack, -1,
+ summary->acked_serial, summary->ack_serial,
+ xmit_ts, call->acks_latest_ts);
+ __clear_bit(ix, &tq->rtt_samples); /* Prevent repeat RTT sample */
}
/*
@@ -206,37 +220,120 @@ void rxrpc_congestion_degrade(struct rxrpc_call *call)
static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
struct rxrpc_ack_summary *summary)
{
- struct rxrpc_txbuf *txb;
- bool rot_last = false;
+ struct rxrpc_txqueue *tq = call->tx_queue;
+ rxrpc_seq_t seq = call->tx_bottom + 1;
+ bool rot_last = false, trace = false;
- list_for_each_entry_rcu(txb, &call->tx_buffer, call_link, false) {
- if (before_eq(txb->seq, call->acks_hard_ack))
- continue;
- if (test_bit(RXRPC_TXBUF_LAST, &txb->flags)) {
+ _enter("%x,%x", call->tx_bottom, to);
+
+ trace_rxrpc_tx_rotate(call, seq, to);
+ trace_rxrpc_tq(call, tq, seq, rxrpc_tq_rotate);
+
+ if (call->acks_lowest_nak == call->tx_bottom) {
+ call->acks_lowest_nak = to;
+ } else if (after(to, call->acks_lowest_nak)) {
+ summary->new_low_snack = true;
+ call->acks_lowest_nak = to;
+ }
+
+ /* We may have a left over fully-consumed buffer at the front that we
+ * couldn't drop before (rotate_and_keep below).
+ */
+ if (seq == call->tx_qbase + RXRPC_NR_TXQUEUE) {
+ call->tx_qbase += RXRPC_NR_TXQUEUE;
+ call->tx_queue = tq->next;
+ trace_rxrpc_tq(call, tq, seq, rxrpc_tq_rotate_and_free);
+ kfree(tq);
+ tq = call->tx_queue;
+ }
+
+ do {
+ unsigned int ix = seq - call->tx_qbase;
+
+ _debug("tq=%x seq=%x i=%d f=%x", tq->qbase, seq, ix, tq->bufs[ix]->flags);
+ if (tq->bufs[ix]->flags & RXRPC_LAST_PACKET) {
set_bit(RXRPC_CALL_TX_LAST, &call->flags);
rot_last = true;
}
- if (txb->seq == to)
- break;
- }
- if (rot_last)
- set_bit(RXRPC_CALL_TX_ALL_ACKED, &call->flags);
+ if (summary->acked_serial == tq->segment_serial[ix] &&
+ test_bit(ix, &tq->rtt_samples))
+ rxrpc_add_data_rtt_sample(call, summary, tq, ix);
+
+ if (ix == tq->nr_reported_acks) {
+ /* Packet directly hard ACK'd. */
+ tq->nr_reported_acks++;
+ rxrpc_input_rack_one(call, summary, tq, ix);
+ if (seq == call->tlp_seq)
+ summary->tlp_probe_acked = true;
+ summary->nr_new_hacks++;
+ __set_bit(ix, &tq->segment_acked);
+ trace_rxrpc_rotate(call, tq, summary, seq, rxrpc_rotate_trace_hack);
+ } else if (test_bit(ix, &tq->segment_acked)) {
+ /* Soft ACK -> hard ACK. */
+ call->acks_nr_sacks--;
+ trace_rxrpc_rotate(call, tq, summary, seq, rxrpc_rotate_trace_sack);
+ } else {
+ /* Soft NAK -> hard ACK. */
+ call->acks_nr_snacks--;
+ rxrpc_input_rack_one(call, summary, tq, ix);
+ if (seq == call->tlp_seq)
+ summary->tlp_probe_acked = true;
+ summary->nr_new_hacks++;
+ __set_bit(ix, &tq->segment_acked);
+ trace_rxrpc_rotate(call, tq, summary, seq, rxrpc_rotate_trace_snak);
+ }
- _enter("%x,%x,%x,%d", to, call->acks_hard_ack, call->tx_top, rot_last);
+ call->tx_nr_sent--;
+ if (__test_and_clear_bit(ix, &tq->segment_lost))
+ call->tx_nr_lost--;
+ if (__test_and_clear_bit(ix, &tq->segment_retransmitted))
+ call->tx_nr_resent--;
+ __clear_bit(ix, &tq->ever_retransmitted);
- if (call->acks_lowest_nak == call->acks_hard_ack) {
- call->acks_lowest_nak = to;
- } else if (after(to, call->acks_lowest_nak)) {
- summary->new_low_nack = true;
- call->acks_lowest_nak = to;
+ rxrpc_put_txbuf(tq->bufs[ix], rxrpc_txbuf_put_rotated);
+ tq->bufs[ix] = NULL;
+
+ WRITE_ONCE(call->tx_bottom, seq);
+ trace_rxrpc_txqueue(call, (rot_last ?
+ rxrpc_txqueue_rotate_last :
+ rxrpc_txqueue_rotate));
+
+ seq++;
+ trace = true;
+ if (!(seq & RXRPC_TXQ_MASK)) {
+ trace_rxrpc_rack_update(call, summary);
+ trace = false;
+ prefetch(tq->next);
+ if (tq != call->tx_qtail) {
+ call->tx_qbase += RXRPC_NR_TXQUEUE;
+ call->tx_queue = tq->next;
+ trace_rxrpc_tq(call, tq, seq, rxrpc_tq_rotate_and_free);
+ kfree(tq);
+ tq = call->tx_queue;
+ } else {
+ trace_rxrpc_tq(call, tq, seq, rxrpc_tq_rotate_and_keep);
+ tq = NULL;
+ break;
+ }
+ }
+
+ } while (before_eq(seq, to));
+
+ if (trace)
+ trace_rxrpc_rack_update(call, summary);
+
+ if (rot_last) {
+ set_bit(RXRPC_CALL_TX_ALL_ACKED, &call->flags);
+ if (tq) {
+ trace_rxrpc_tq(call, tq, seq, rxrpc_tq_rotate_and_free);
+ kfree(tq);
+ call->tx_queue = NULL;
+ }
}
- smp_store_release(&call->acks_hard_ack, to);
+ _debug("%x,%x,%x,%d", to, call->tx_bottom, call->tx_top, rot_last);
- trace_rxrpc_txqueue(call, (rot_last ?
- rxrpc_txqueue_rotate_last :
- rxrpc_txqueue_rotate));
wake_up(&call->waitq);
return rot_last;
}
@@ -252,10 +349,10 @@ static void rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
{
ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags));
- if (unlikely(call->cong_last_nack)) {
- rxrpc_free_skb(call->cong_last_nack, rxrpc_skb_put_last_nack);
- call->cong_last_nack = NULL;
- }
+ call->rack_timer_mode = RXRPC_CALL_RACKTIMER_OFF;
+ call->rack_timo_at = KTIME_MAX;
+ trace_rxrpc_rack_timer(call, 0, false);
+ trace_rxrpc_timer_can(call, rxrpc_timer_trace_rack_off + call->rack_timer_mode);
switch (__rxrpc_call_state(call)) {
case RXRPC_CALL_CLIENT_SEND_REQUEST:
@@ -288,15 +385,11 @@ static void rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
static bool rxrpc_receiving_reply(struct rxrpc_call *call)
{
struct rxrpc_ack_summary summary = { 0 };
- unsigned long now, timo;
rxrpc_seq_t top = READ_ONCE(call->tx_top);
if (call->ackr_reason) {
- now = jiffies;
- timo = now + MAX_JIFFY_OFFSET;
-
- WRITE_ONCE(call->delay_ack_at, timo);
- trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
+ call->delay_ack_at = KTIME_MAX;
+ trace_rxrpc_timer_can(call, rxrpc_timer_trace_delayed_ack);
}
if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
@@ -329,7 +422,7 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
case RXRPC_CALL_SERVER_RECV_REQUEST:
rxrpc_set_call_state(call, RXRPC_CALL_SERVER_ACK_REQUEST);
- call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
+ call->expect_req_by = KTIME_MAX;
rxrpc_propose_delay_ACK(call, serial, rxrpc_propose_ack_processing_op);
break;
@@ -355,18 +448,26 @@ static void rxrpc_input_queue_data(struct rxrpc_call *call, struct sk_buff *skb,
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
bool last = sp->hdr.flags & RXRPC_LAST_PACKET;
+ spin_lock_irq(&call->recvmsg_queue.lock);
+
__skb_queue_tail(&call->recvmsg_queue, skb);
rxrpc_input_update_ack_window(call, window, wtop);
trace_rxrpc_receive(call, last ? why + 1 : why, sp->hdr.serial, sp->hdr.seq);
if (last)
+ /* Change the state inside the lock so that recvmsg syncs
+ * correctly with it and using sendmsg() to send a reply
+ * doesn't race.
+ */
rxrpc_end_rx_phase(call, sp->hdr.serial);
+
+ spin_unlock_irq(&call->recvmsg_queue.lock);
}
/*
* Process a DATA packet.
*/
static void rxrpc_input_data_one(struct rxrpc_call *call, struct sk_buff *skb,
- bool *_notify)
+ bool *_notify, rxrpc_serial_t *_ack_serial, int *_ack_reason)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct sk_buff *oos;
@@ -419,8 +520,6 @@ static void rxrpc_input_data_one(struct rxrpc_call *call, struct sk_buff *skb,
/* Send an immediate ACK if we fill in a hole */
else if (!skb_queue_empty(&call->rx_oos_queue))
ack_reason = RXRPC_ACK_DELAY;
- else
- call->ackr_nr_unacked++;
window++;
if (after(window, wtop)) {
@@ -434,7 +533,6 @@ static void rxrpc_input_data_one(struct rxrpc_call *call, struct sk_buff *skb,
rxrpc_get_skb(skb, rxrpc_skb_get_to_recvmsg);
- spin_lock(&call->recvmsg_queue.lock);
rxrpc_input_queue_data(call, skb, window, wtop, rxrpc_receive_queue);
*_notify = true;
@@ -456,8 +554,6 @@ static void rxrpc_input_data_one(struct rxrpc_call *call, struct sk_buff *skb,
rxrpc_receive_queue_oos);
}
- spin_unlock(&call->recvmsg_queue.lock);
-
call->ackr_sack_base = sack;
} else {
unsigned int slot;
@@ -498,12 +594,16 @@ static void rxrpc_input_data_one(struct rxrpc_call *call, struct sk_buff *skb,
}
send_ack:
- if (ack_reason >= 0)
- rxrpc_send_ACK(call, ack_reason, serial,
- rxrpc_propose_ack_input_data);
- else
- rxrpc_propose_delay_ACK(call, serial,
- rxrpc_propose_ack_input_data);
+ if (ack_reason >= 0) {
+ if (rxrpc_ack_priority[ack_reason] > rxrpc_ack_priority[*_ack_reason]) {
+ *_ack_serial = serial;
+ *_ack_reason = ack_reason;
+ } else if (rxrpc_ack_priority[ack_reason] == rxrpc_ack_priority[*_ack_reason] &&
+ ack_reason == RXRPC_ACK_REQUESTED) {
+ *_ack_serial = serial;
+ *_ack_reason = ack_reason;
+ }
+ }
}
/*
@@ -514,9 +614,11 @@ static bool rxrpc_input_split_jumbo(struct rxrpc_call *call, struct sk_buff *skb
struct rxrpc_jumbo_header jhdr;
struct rxrpc_skb_priv *sp = rxrpc_skb(skb), *jsp;
struct sk_buff *jskb;
+ rxrpc_serial_t ack_serial = 0;
unsigned int offset = sizeof(struct rxrpc_wire_header);
unsigned int len = skb->len - offset;
bool notify = false;
+ int ack_reason = 0, count = 1, stat_ix;
while (sp->hdr.flags & RXRPC_JUMBO_PACKET) {
if (len < RXRPC_JUMBO_SUBPKTLEN)
@@ -536,7 +638,7 @@ static bool rxrpc_input_split_jumbo(struct rxrpc_call *call, struct sk_buff *skb
jsp = rxrpc_skb(jskb);
jsp->offset = offset;
jsp->len = RXRPC_JUMBO_DATALEN;
- rxrpc_input_data_one(call, jskb, &notify);
+ rxrpc_input_data_one(call, jskb, &notify, &ack_serial, &ack_reason);
rxrpc_free_skb(jskb, rxrpc_skb_put_jumbo_subpacket);
sp->hdr.flags = jhdr.flags;
@@ -545,12 +647,25 @@ static bool rxrpc_input_split_jumbo(struct rxrpc_call *call, struct sk_buff *skb
sp->hdr.serial++;
offset += RXRPC_JUMBO_SUBPKTLEN;
len -= RXRPC_JUMBO_SUBPKTLEN;
+ count++;
}
sp->offset = offset;
sp->len = len;
- rxrpc_input_data_one(call, skb, &notify);
- if (notify) {
+ rxrpc_input_data_one(call, skb, &notify, &ack_serial, &ack_reason);
+
+ stat_ix = umin(count, ARRAY_SIZE(call->rxnet->stat_rx_jumbo)) - 1;
+ atomic_inc(&call->rxnet->stat_rx_jumbo[stat_ix]);
+
+ if (ack_reason > 0) {
+ rxrpc_send_ACK(call, ack_reason, ack_serial,
+ rxrpc_propose_ack_input_data);
+ } else {
+ call->ackr_nr_unacked++;
+ rxrpc_propose_delay_ACK(call, sp->hdr.serial,
+ rxrpc_propose_ack_input_data);
+ }
+ if (notify && !test_bit(RXRPC_CALL_CONN_CHALLENGING, &call->flags)) {
trace_rxrpc_notify_socket(call->debug_id, sp->hdr.serial);
rxrpc_notify_socket(call);
}
@@ -589,14 +704,12 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
case RXRPC_CALL_SERVER_RECV_REQUEST: {
unsigned long timo = READ_ONCE(call->next_req_timo);
- unsigned long now, expect_req_by;
if (timo) {
- now = jiffies;
- expect_req_by = now + timo;
- WRITE_ONCE(call->expect_req_by, expect_req_by);
- rxrpc_reduce_call_timer(call, expect_req_by, now,
- rxrpc_timer_set_for_idle);
+ ktime_t delay = ms_to_ktime(timo);
+
+ call->expect_req_by = ktime_add(ktime_get_real(), delay);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_idle);
}
break;
}
@@ -646,7 +759,7 @@ static void rxrpc_complete_rtt_probe(struct rxrpc_call *call,
clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
smp_mb(); /* Read data before setting avail bit */
set_bit(i, &call->rtt_avail);
- rxrpc_peer_add_rtt(call, type, i, acked_serial, ack_serial,
+ rxrpc_call_add_rtt(call, type, i, acked_serial, ack_serial,
sent_at, resp_time);
matched = true;
}
@@ -656,7 +769,7 @@ static void rxrpc_complete_rtt_probe(struct rxrpc_call *call,
*/
if (after(acked_serial, orig_serial)) {
trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_obsolete, i,
- orig_serial, acked_serial, 0, 0);
+ orig_serial, acked_serial, 0, 0, 0);
clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
smp_wmb();
set_bit(i, &call->rtt_avail);
@@ -664,20 +777,23 @@ static void rxrpc_complete_rtt_probe(struct rxrpc_call *call,
}
if (!matched)
- trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_lost, 9, 0, acked_serial, 0, 0);
+ trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_lost, 9, 0, acked_serial, 0, 0, 0);
}
/*
* Process the extra information that may be appended to an ACK packet
*/
-static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
- struct rxrpc_ackinfo *ackinfo)
+static void rxrpc_input_ack_trailer(struct rxrpc_call *call, struct sk_buff *skb,
+ struct rxrpc_acktrailer *trailer)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- struct rxrpc_peer *peer;
- unsigned int mtu;
+ struct rxrpc_peer *peer = call->peer;
+ unsigned int max_data, capacity;
bool wake = false;
- u32 rwind = ntohl(ackinfo->rwind);
+ u32 max_mtu = ntohl(trailer->maxMTU);
+ //u32 if_mtu = ntohl(trailer->ifMTU);
+ u32 rwind = ntohl(trailer->rwind);
+ u32 jumbo_max = ntohl(trailer->jumbo_max);
if (rwind > RXRPC_TX_MAX_WINDOW)
rwind = RXRPC_TX_MAX_WINDOW;
@@ -688,58 +804,147 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
call->tx_winsize = rwind;
}
- if (call->cong_ssthresh > rwind)
- call->cong_ssthresh = rwind;
+ max_mtu = clamp(max_mtu, 500, 65535);
+ peer->ackr_max_data = max_mtu;
- mtu = min(ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU));
+ if (max_mtu < peer->max_data) {
+ trace_rxrpc_pmtud_reduce(peer, sp->hdr.serial, max_mtu,
+ rxrpc_pmtud_reduce_ack);
+ peer->max_data = max_mtu;
+ }
+
+ max_data = umin(max_mtu, peer->max_data);
+ capacity = max_data;
+ capacity += sizeof(struct rxrpc_jumbo_header); /* First subpacket has main hdr, not jumbo */
+ capacity /= sizeof(struct rxrpc_jumbo_header) + RXRPC_JUMBO_DATALEN;
- peer = call->peer;
- if (mtu < peer->maxdata) {
- spin_lock(&peer->lock);
- peer->maxdata = mtu;
- peer->mtu = mtu + peer->hdrsize;
- spin_unlock(&peer->lock);
+ if (jumbo_max == 0) {
+ /* The peer says it supports pmtu discovery */
+ peer->ackr_adv_pmtud = true;
+ } else {
+ peer->ackr_adv_pmtud = false;
+ capacity = clamp(capacity, 1, jumbo_max);
}
+ call->tx_jumbo_max = capacity;
+
if (wake)
wake_up(&call->waitq);
}
+#if defined(CONFIG_X86) && __GNUC__ && !defined(__clang__)
+/* Clang doesn't support the %z constraint modifier */
+#define shiftr_adv_rotr(shift_from, rotate_into) ({ \
+ asm(" shr%z1 %1\n" \
+ " inc %0\n" \
+ " rcr%z2 %2\n" \
+ : "+d"(shift_from), "+m"(*(shift_from)), "+rm"(rotate_into) \
+ ); \
+ })
+#else
+#define shiftr_adv_rotr(shift_from, rotate_into) ({ \
+ typeof(rotate_into) __bit0 = *(shift_from) & 1; \
+ *(shift_from) >>= 1; \
+ shift_from++; \
+ rotate_into >>= 1; \
+ rotate_into |= __bit0 << (sizeof(rotate_into) * 8 - 1); \
+ })
+#endif
+
/*
- * Determine how many nacks from the previous ACK have now been satisfied.
+ * Deal with RTT samples from soft ACKs.
*/
-static rxrpc_seq_t rxrpc_input_check_prev_ack(struct rxrpc_call *call,
- struct rxrpc_ack_summary *summary,
- rxrpc_seq_t seq)
+static void rxrpc_input_soft_rtt(struct rxrpc_call *call,
+ struct rxrpc_ack_summary *summary,
+ struct rxrpc_txqueue *tq)
{
- struct sk_buff *skb = call->cong_last_nack;
- struct rxrpc_ackpacket ack;
- struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- unsigned int i, new_acks = 0, retained_nacks = 0;
- rxrpc_seq_t old_seq = sp->first_ack;
- u8 *acks = skb->data + sizeof(struct rxrpc_wire_header) + sizeof(ack);
-
- if (after_eq(seq, old_seq + sp->nr_acks)) {
- summary->nr_new_acks += sp->nr_nacks;
- summary->nr_new_acks += seq - (old_seq + sp->nr_acks);
- summary->nr_retained_nacks = 0;
- } else if (seq == old_seq) {
- summary->nr_retained_nacks = sp->nr_nacks;
- } else {
- for (i = 0; i < sp->nr_acks; i++) {
- if (acks[i] == RXRPC_ACK_TYPE_NACK) {
- if (before(old_seq + i, seq))
- new_acks++;
- else
- retained_nacks++;
- }
+ for (int ix = 0; ix < RXRPC_NR_TXQUEUE; ix++)
+ if (summary->acked_serial == tq->segment_serial[ix])
+ return rxrpc_add_data_rtt_sample(call, summary, tq, ix);
+}
+
+/*
+ * Process a batch of soft ACKs specific to a transmission queue segment.
+ */
+static void rxrpc_input_soft_ack_tq(struct rxrpc_call *call,
+ struct rxrpc_ack_summary *summary,
+ struct rxrpc_txqueue *tq,
+ unsigned long extracted_acks,
+ int nr_reported,
+ rxrpc_seq_t seq,
+ rxrpc_seq_t *lowest_nak)
+{
+ unsigned long old_reported = 0, flipped, new_acks = 0;
+ unsigned long a_to_n, n_to_a = 0;
+ int new, a, n;
+
+ if (tq->nr_reported_acks > 0)
+ old_reported = ~0UL >> (RXRPC_NR_TXQUEUE - tq->nr_reported_acks);
+
+ _enter("{%x,%lx,%d},%lx,%d,%x",
+ tq->qbase, tq->segment_acked, tq->nr_reported_acks,
+ extracted_acks, nr_reported, seq);
+
+ _debug("[%x]", tq->qbase);
+ _debug("tq %16lx %u", tq->segment_acked, tq->nr_reported_acks);
+ _debug("sack %16lx %u", extracted_acks, nr_reported);
+
+ /* See how many previously logged ACKs/NAKs have flipped. */
+ flipped = (tq->segment_acked ^ extracted_acks) & old_reported;
+ if (flipped) {
+ n_to_a = ~tq->segment_acked & flipped; /* Old NAK -> ACK */
+ a_to_n = tq->segment_acked & flipped; /* Old ACK -> NAK */
+ a = hweight_long(n_to_a);
+ n = hweight_long(a_to_n);
+ _debug("flip %16lx", flipped);
+ _debug("ntoa %16lx %d", n_to_a, a);
+ _debug("aton %16lx %d", a_to_n, n);
+ call->acks_nr_sacks += a - n;
+ call->acks_nr_snacks += n - a;
+ summary->nr_new_sacks += a;
+ summary->nr_new_snacks += n;
+ }
+
+ /* See how many new ACKs/NAKs have been acquired. */
+ new = nr_reported - tq->nr_reported_acks;
+ if (new > 0) {
+ new_acks = extracted_acks & ~old_reported;
+ if (new_acks) {
+ a = hweight_long(new_acks);
+ n = new - a;
+ _debug("new_a %16lx new=%d a=%d n=%d", new_acks, new, a, n);
+ call->acks_nr_sacks += a;
+ call->acks_nr_snacks += n;
+ summary->nr_new_sacks += a;
+ summary->nr_new_snacks += n;
+ } else {
+ call->acks_nr_snacks += new;
+ summary->nr_new_snacks += new;
}
+ }
+
+ tq->nr_reported_acks = nr_reported;
+ tq->segment_acked = extracted_acks;
+ trace_rxrpc_apply_acks(call, tq);
+
+ if (extracted_acks != ~0UL) {
+ rxrpc_seq_t lowest = seq + ffz(extracted_acks);
- summary->nr_new_acks += new_acks;
- summary->nr_retained_nacks = retained_nacks;
+ if (before(lowest, *lowest_nak))
+ *lowest_nak = lowest;
}
- return old_seq + sp->nr_acks;
+ if (summary->acked_serial)
+ rxrpc_input_soft_rtt(call, summary, tq);
+
+ new_acks |= n_to_a;
+ if (new_acks)
+ rxrpc_input_rack(call, summary, tq, new_acks);
+
+ if (call->tlp_serial &&
+ rxrpc_seq_in_txq(tq, call->tlp_seq) &&
+ test_bit(call->tlp_seq - tq->qbase, &new_acks))
+ summary->tlp_probe_acked = true;
}
/*
@@ -753,39 +958,50 @@ static rxrpc_seq_t rxrpc_input_check_prev_ack(struct rxrpc_call *call,
*/
static void rxrpc_input_soft_acks(struct rxrpc_call *call,
struct rxrpc_ack_summary *summary,
- struct sk_buff *skb,
- rxrpc_seq_t seq,
- rxrpc_seq_t since)
+ struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- unsigned int i, old_nacks = 0;
- rxrpc_seq_t lowest_nak = seq + sp->nr_acks;
+ struct rxrpc_txqueue *tq = call->tx_queue;
+ unsigned long extracted = ~0UL;
+ unsigned int nr = 0;
+ rxrpc_seq_t seq = call->acks_hard_ack + 1;
+ rxrpc_seq_t lowest_nak = seq + sp->ack.nr_acks;
u8 *acks = skb->data + sizeof(struct rxrpc_wire_header) + sizeof(struct rxrpc_ackpacket);
- for (i = 0; i < sp->nr_acks; i++) {
- if (acks[i] == RXRPC_ACK_TYPE_ACK) {
- summary->nr_acks++;
- if (after_eq(seq, since))
- summary->nr_new_acks++;
- } else {
- summary->saw_nacks = true;
- if (before(seq, since)) {
- /* Overlap with previous ACK */
- old_nacks++;
- } else {
- summary->nr_new_nacks++;
- sp->nr_nacks++;
- }
+ _enter("%x,%x,%u", tq->qbase, seq, sp->ack.nr_acks);
+
+ while (after(seq, tq->qbase + RXRPC_NR_TXQUEUE - 1))
+ tq = tq->next;
- if (before(seq, lowest_nak))
- lowest_nak = seq;
+ for (unsigned int i = 0; i < sp->ack.nr_acks; i++) {
+ /* Decant ACKs until we hit a txqueue boundary. */
+ shiftr_adv_rotr(acks, extracted);
+ if (i == 256) {
+ acks -= i;
+ i = 0;
}
seq++;
+ nr++;
+ if ((seq & RXRPC_TXQ_MASK) != 0)
+ continue;
+
+ _debug("bound %16lx %u", extracted, nr);
+
+ rxrpc_input_soft_ack_tq(call, summary, tq, extracted, RXRPC_NR_TXQUEUE,
+ seq - RXRPC_NR_TXQUEUE, &lowest_nak);
+ extracted = ~0UL;
+ nr = 0;
+ tq = tq->next;
+ prefetch(tq);
}
- if (lowest_nak != call->acks_lowest_nak) {
- call->acks_lowest_nak = lowest_nak;
- summary->new_low_nack = true;
+ if (nr) {
+ unsigned int nr_reported = seq & RXRPC_TXQ_MASK;
+
+ extracted >>= RXRPC_NR_TXQUEUE - nr_reported;
+ _debug("tail %16lx %u", extracted, nr_reported);
+ rxrpc_input_soft_ack_tq(call, summary, tq, extracted, nr_reported,
+ seq & ~RXRPC_TXQ_MASK, &lowest_nak);
}
/* We *can* have more nacks than we did - the peer is permitted to drop
@@ -793,9 +1009,14 @@ static void rxrpc_input_soft_acks(struct rxrpc_call *call,
* possible for the nack distribution to change whilst the number of
* nacks stays the same or goes down.
*/
- if (old_nacks < summary->nr_retained_nacks)
- summary->nr_new_acks += summary->nr_retained_nacks - old_nacks;
- summary->nr_retained_nacks = old_nacks;
+ if (lowest_nak != call->acks_lowest_nak) {
+ call->acks_lowest_nak = lowest_nak;
+ summary->new_low_snack = true;
+ }
+
+ _debug("summary A=%d+%d N=%d+%d",
+ call->acks_nr_sacks, summary->nr_new_sacks,
+ call->acks_nr_snacks, summary->nr_new_snacks);
}
/*
@@ -803,21 +1024,21 @@ static void rxrpc_input_soft_acks(struct rxrpc_call *call,
* with respect to the ack state conveyed by preceding ACKs.
*/
static bool rxrpc_is_ack_valid(struct rxrpc_call *call,
- rxrpc_seq_t first_pkt, rxrpc_seq_t prev_pkt)
+ rxrpc_seq_t hard_ack, rxrpc_seq_t prev_pkt)
{
- rxrpc_seq_t base = READ_ONCE(call->acks_first_seq);
+ rxrpc_seq_t base = READ_ONCE(call->acks_hard_ack);
- if (after(first_pkt, base))
+ if (after(hard_ack, base))
return true; /* The window advanced */
- if (before(first_pkt, base))
+ if (before(hard_ack, base))
return false; /* firstPacket regressed */
if (after_eq(prev_pkt, call->acks_prev_seq))
return true; /* previousPacket hasn't regressed. */
/* Some rx implementations put a serial number in previousPacket. */
- if (after_eq(prev_pkt, base + call->tx_winsize))
+ if (after(prev_pkt, base + call->tx_winsize))
return false;
return true;
}
@@ -835,59 +1056,34 @@ static bool rxrpc_is_ack_valid(struct rxrpc_call *call,
static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
{
struct rxrpc_ack_summary summary = { 0 };
- struct rxrpc_ackpacket ack;
+ struct rxrpc_acktrailer trailer;
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- struct rxrpc_ackinfo info;
- rxrpc_serial_t ack_serial, acked_serial;
- rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt, since;
+ rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt;
int nr_acks, offset, ioffset;
_enter("");
- offset = sizeof(struct rxrpc_wire_header);
- if (skb_copy_bits(skb, offset, &ack, sizeof(ack)) < 0)
- return rxrpc_proto_abort(call, 0, rxrpc_badmsg_short_ack);
- offset += sizeof(ack);
-
- ack_serial = sp->hdr.serial;
- acked_serial = ntohl(ack.serial);
- first_soft_ack = ntohl(ack.firstPacket);
- prev_pkt = ntohl(ack.previousPacket);
- hard_ack = first_soft_ack - 1;
- nr_acks = ack.nAcks;
- sp->first_ack = first_soft_ack;
- sp->nr_acks = nr_acks;
- summary.ack_reason = (ack.reason < RXRPC_ACK__INVALID ?
- ack.reason : RXRPC_ACK__INVALID);
-
- trace_rxrpc_rx_ack(call, ack_serial, acked_serial,
- first_soft_ack, prev_pkt,
- summary.ack_reason, nr_acks);
- rxrpc_inc_stat(call->rxnet, stat_rx_acks[ack.reason]);
-
- if (acked_serial != 0) {
- switch (ack.reason) {
- case RXRPC_ACK_PING_RESPONSE:
- rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
- rxrpc_rtt_rx_ping_response);
- break;
- case RXRPC_ACK_REQUESTED:
- rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
- rxrpc_rtt_rx_requested_ack);
- break;
- default:
- rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
- rxrpc_rtt_rx_other_ack);
- break;
- }
- }
+ offset = sizeof(struct rxrpc_wire_header) + sizeof(struct rxrpc_ackpacket);
+
+ summary.ack_serial = sp->hdr.serial;
+ first_soft_ack = sp->ack.first_ack;
+ prev_pkt = sp->ack.prev_ack;
+ nr_acks = sp->ack.nr_acks;
+ hard_ack = first_soft_ack - 1;
+ summary.acked_serial = sp->ack.acked_serial;
+ summary.ack_reason = (sp->ack.reason < RXRPC_ACK__INVALID ?
+ sp->ack.reason : RXRPC_ACK__INVALID);
+
+ trace_rxrpc_rx_ack(call, sp);
+ rxrpc_inc_stat(call->rxnet, stat_rx_acks[summary.ack_reason]);
+ prefetch(call->tx_queue);
/* If we get an EXCEEDS_WINDOW ACK from the server, it probably
* indicates that the client address changed due to NAT. The server
* lost the call because it switched to a different peer.
*/
- if (unlikely(ack.reason == RXRPC_ACK_EXCEEDS_WINDOW) &&
- first_soft_ack == 1 &&
+ if (unlikely(summary.ack_reason == RXRPC_ACK_EXCEEDS_WINDOW) &&
+ hard_ack == 0 &&
prev_pkt == 0 &&
rxrpc_is_client_call(call)) {
rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
@@ -899,10 +1095,10 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
* indicate a change of address. However, we can retransmit the call
* if we still have it buffered to the beginning.
*/
- if (unlikely(ack.reason == RXRPC_ACK_OUT_OF_SEQUENCE) &&
- first_soft_ack == 1 &&
+ if (unlikely(summary.ack_reason == RXRPC_ACK_OUT_OF_SEQUENCE) &&
+ hard_ack == 0 &&
prev_pkt == 0 &&
- call->acks_hard_ack == 0 &&
+ call->tx_bottom == 0 &&
rxrpc_is_client_call(call)) {
rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
0, -ENETRESET);
@@ -910,50 +1106,44 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
}
/* Discard any out-of-order or duplicate ACKs (outside lock). */
- if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
- trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial,
- first_soft_ack, call->acks_first_seq,
- prev_pkt, call->acks_prev_seq);
- goto send_response;
+ if (!rxrpc_is_ack_valid(call, hard_ack, prev_pkt)) {
+ trace_rxrpc_rx_discard_ack(call, summary.ack_serial, hard_ack, prev_pkt);
+ goto send_response; /* Still respond if requested. */
}
- info.rxMTU = 0;
+ trailer.maxMTU = 0;
ioffset = offset + nr_acks + 3;
- if (skb->len >= ioffset + sizeof(info) &&
- skb_copy_bits(skb, ioffset, &info, sizeof(info)) < 0)
- return rxrpc_proto_abort(call, 0, rxrpc_badmsg_short_ack_info);
+ if (skb->len >= ioffset + sizeof(trailer) &&
+ skb_copy_bits(skb, ioffset, &trailer, sizeof(trailer)) < 0)
+ return rxrpc_proto_abort(call, 0, rxrpc_badmsg_short_ack_trailer);
if (nr_acks > 0)
skb_condense(skb);
- if (call->cong_last_nack) {
- since = rxrpc_input_check_prev_ack(call, &summary, first_soft_ack);
- rxrpc_free_skb(call->cong_last_nack, rxrpc_skb_put_last_nack);
- call->cong_last_nack = NULL;
- } else {
- summary.nr_new_acks = first_soft_ack - call->acks_first_seq;
- call->acks_lowest_nak = first_soft_ack + nr_acks;
- since = first_soft_ack;
- }
-
- call->acks_latest_ts = skb->tstamp;
- call->acks_first_seq = first_soft_ack;
+ call->acks_latest_ts = ktime_get_real();
+ call->acks_hard_ack = hard_ack;
call->acks_prev_seq = prev_pkt;
- switch (ack.reason) {
- case RXRPC_ACK_PING:
- break;
- default:
- if (acked_serial && after(acked_serial, call->acks_highest_serial))
- call->acks_highest_serial = acked_serial;
- break;
+ if (summary.acked_serial) {
+ switch (summary.ack_reason) {
+ case RXRPC_ACK_PING_RESPONSE:
+ rxrpc_complete_rtt_probe(call, call->acks_latest_ts,
+ summary.acked_serial, summary.ack_serial,
+ rxrpc_rtt_rx_ping_response);
+ break;
+ default:
+ if (after(summary.acked_serial, call->acks_highest_serial))
+ call->acks_highest_serial = summary.acked_serial;
+ summary.rtt_sample_avail = true;
+ break;
+ }
}
/* Parse rwind and mtu sizes if provided. */
- if (info.rxMTU)
- rxrpc_input_ackinfo(call, skb, &info);
+ if (trailer.maxMTU)
+ rxrpc_input_ack_trailer(call, skb, &trailer);
- if (first_soft_ack == 0)
+ if (hard_ack + 1 == 0)
return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_zero);
/* Ignore ACKs unless we are or have just been transmitting. */
@@ -967,13 +1157,13 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
goto send_response;
}
- if (before(hard_ack, call->acks_hard_ack) ||
+ if (before(hard_ack, call->tx_bottom) ||
after(hard_ack, call->tx_top))
return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_outside_window);
if (nr_acks > call->tx_top - hard_ack)
return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_sack_overflow);
- if (after(hard_ack, call->acks_hard_ack)) {
+ if (after(hard_ack, call->tx_bottom)) {
if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) {
rxrpc_end_tx_phase(call, false, rxrpc_eproto_unexpected_ack);
goto send_response;
@@ -983,25 +1173,30 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
if (nr_acks > 0) {
if (offset > (int)skb->len - nr_acks)
return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_short_sack);
- rxrpc_input_soft_acks(call, &summary, skb, first_soft_ack, since);
- rxrpc_get_skb(skb, rxrpc_skb_get_last_nack);
- call->cong_last_nack = skb;
+ rxrpc_input_soft_acks(call, &summary, skb);
}
if (test_bit(RXRPC_CALL_TX_LAST, &call->flags) &&
- summary.nr_acks == call->tx_top - hard_ack &&
+ call->acks_nr_sacks == call->tx_top - hard_ack &&
rxrpc_is_client_call(call))
- rxrpc_propose_ping(call, ack_serial,
+ rxrpc_propose_ping(call, summary.ack_serial,
rxrpc_propose_ack_ping_for_lost_reply);
- rxrpc_congestion_management(call, skb, &summary, acked_serial);
+ /* Drive the congestion management algorithm first and then RACK-TLP as
+ * the latter depends on the state/change in state in the former.
+ */
+ rxrpc_congestion_management(call, &summary);
+ rxrpc_rack_detect_loss_and_arm_timer(call, &summary);
+ rxrpc_tlp_process_ack(call, &summary);
+ if (call->tlp_serial && after_eq(summary.acked_serial, call->tlp_serial))
+ call->tlp_serial = 0;
send_response:
- if (ack.reason == RXRPC_ACK_PING)
- rxrpc_send_ACK(call, RXRPC_ACK_PING_RESPONSE, ack_serial,
+ if (summary.ack_reason == RXRPC_ACK_PING)
+ rxrpc_send_ACK(call, RXRPC_ACK_PING_RESPONSE, summary.ack_serial,
rxrpc_propose_ack_respond_to_ping);
else if (sp->hdr.flags & RXRPC_REQUEST_ACK)
- rxrpc_send_ACK(call, RXRPC_ACK_REQUESTED, ack_serial,
+ rxrpc_send_ACK(call, RXRPC_ACK_REQUESTED, summary.ack_serial,
rxrpc_propose_ack_respond_to_ack);
}
@@ -1048,12 +1243,10 @@ void rxrpc_input_call_packet(struct rxrpc_call *call, struct sk_buff *skb)
timo = READ_ONCE(call->next_rx_timo);
if (timo) {
- unsigned long now = jiffies, expect_rx_by;
+ ktime_t delay = ms_to_ktime(timo);
- expect_rx_by = now + timo;
- WRITE_ONCE(call->expect_rx_by, expect_rx_by);
- rxrpc_reduce_call_timer(call, expect_rx_by, now,
- rxrpc_timer_set_for_normal);
+ call->expect_rx_by = ktime_add(ktime_get_real(), delay);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_expect_rx);
}
switch (sp->hdr.type) {
@@ -1102,5 +1295,5 @@ void rxrpc_implicit_end_call(struct rxrpc_call *call, struct sk_buff *skb)
break;
}
- rxrpc_input_call_event(call, skb);
+ rxrpc_input_call_event(call);
}
diff --git a/net/rxrpc/input_rack.c b/net/rxrpc/input_rack.c
new file mode 100644
index 000000000000..13c371261e0a
--- /dev/null
+++ b/net/rxrpc/input_rack.c
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* RACK-TLP [RFC8958] Implementation
+ *
+ * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "ar-internal.h"
+
+static bool rxrpc_rack_sent_after(ktime_t t1, rxrpc_seq_t seq1,
+ ktime_t t2, rxrpc_seq_t seq2)
+{
+ if (ktime_after(t1, t2))
+ return true;
+ return t1 == t2 && after(seq1, seq2);
+}
+
+/*
+ * Mark a packet lost.
+ */
+static void rxrpc_rack_mark_lost(struct rxrpc_call *call,
+ struct rxrpc_txqueue *tq, unsigned int ix)
+{
+ if (__test_and_set_bit(ix, &tq->segment_lost)) {
+ if (__test_and_clear_bit(ix, &tq->segment_retransmitted))
+ call->tx_nr_resent--;
+ } else {
+ call->tx_nr_lost++;
+ }
+ tq->segment_xmit_ts[ix] = UINT_MAX;
+}
+
+/*
+ * Get the transmission time of a packet in the Tx queue.
+ */
+static ktime_t rxrpc_get_xmit_ts(const struct rxrpc_txqueue *tq, unsigned int ix)
+{
+ if (tq->segment_xmit_ts[ix] == UINT_MAX)
+ return KTIME_MAX;
+ return ktime_add_us(tq->xmit_ts_base, tq->segment_xmit_ts[ix]);
+}
+
+/*
+ * Get a bitmask of nack bits for a queue segment and mask off any that aren't
+ * yet reported.
+ */
+static unsigned long rxrpc_tq_nacks(const struct rxrpc_txqueue *tq)
+{
+ unsigned long nacks = ~tq->segment_acked;
+
+ if (tq->nr_reported_acks < RXRPC_NR_TXQUEUE)
+ nacks &= (1UL << tq->nr_reported_acks) - 1;
+ return nacks;
+}
+
+/*
+ * Update the RACK state for the most recently sent packet that has been
+ * delivered [RFC8958 6.2 Step 2].
+ */
+static void rxrpc_rack_update(struct rxrpc_call *call,
+ struct rxrpc_ack_summary *summary,
+ struct rxrpc_txqueue *tq,
+ unsigned int ix)
+{
+ rxrpc_seq_t seq = tq->qbase + ix;
+ ktime_t xmit_ts = rxrpc_get_xmit_ts(tq, ix);
+ ktime_t rtt = ktime_sub(call->acks_latest_ts, xmit_ts);
+
+ if (__test_and_clear_bit(ix, &tq->segment_lost))
+ call->tx_nr_lost--;
+
+ if (test_bit(ix, &tq->segment_retransmitted)) {
+ /* Use Rx.serial instead of TCP.ACK.ts_option.echo_reply. */
+ if (before(call->acks_highest_serial, tq->segment_serial[ix]))
+ return;
+ if (rtt < minmax_get(&call->min_rtt))
+ return;
+ }
+
+ /* The RACK algorithm requires the segment ACKs to be traversed in
+ * order of segment transmission - but the only thing this seems to
+ * matter for is that RACK.rtt is set to the rtt of the most recently
+ * transmitted segment. We should be able to achieve the same by only
+ * setting RACK.rtt if the xmit time is greater.
+ */
+ if (ktime_after(xmit_ts, call->rack_rtt_ts)) {
+ call->rack_rtt = rtt;
+ call->rack_rtt_ts = xmit_ts;
+ }
+
+ if (rxrpc_rack_sent_after(xmit_ts, seq, call->rack_xmit_ts, call->rack_end_seq)) {
+ call->rack_rtt = rtt;
+ call->rack_xmit_ts = xmit_ts;
+ call->rack_end_seq = seq;
+ }
+}
+
+/*
+ * Detect data segment reordering [RFC8958 6.2 Step 3].
+ */
+static void rxrpc_rack_detect_reordering(struct rxrpc_call *call,
+ struct rxrpc_ack_summary *summary,
+ struct rxrpc_txqueue *tq,
+ unsigned int ix)
+{
+ rxrpc_seq_t seq = tq->qbase + ix;
+
+ /* Track the highest sequence number so far ACK'd. This is not
+ * necessarily the same as ack.firstPacket + ack.nAcks - 1 as the peer
+ * could put a NACK in the last SACK slot.
+ */
+ if (after(seq, call->rack_fack))
+ call->rack_fack = seq;
+ else if (before(seq, call->rack_fack) &&
+ test_bit(ix, &tq->segment_retransmitted))
+ call->rack_reordering_seen = true;
+}
+
+void rxrpc_input_rack_one(struct rxrpc_call *call,
+ struct rxrpc_ack_summary *summary,
+ struct rxrpc_txqueue *tq,
+ unsigned int ix)
+{
+ rxrpc_rack_update(call, summary, tq, ix);
+ rxrpc_rack_detect_reordering(call, summary, tq, ix);
+}
+
+void rxrpc_input_rack(struct rxrpc_call *call,
+ struct rxrpc_ack_summary *summary,
+ struct rxrpc_txqueue *tq,
+ unsigned long new_acks)
+{
+ while (new_acks) {
+ unsigned int ix = __ffs(new_acks);
+
+ __clear_bit(ix, &new_acks);
+ rxrpc_input_rack_one(call, summary, tq, ix);
+ }
+
+ trace_rxrpc_rack_update(call, summary);
+}
+
+/*
+ * Update the reordering window [RFC8958 6.2 Step 4]. Returns the updated
+ * duration of the reordering window.
+ *
+ * Note that the Rx protocol doesn't have a 'DSACK option' per se, but ACKs can
+ * be given a 'DUPLICATE' reason with the serial number referring to the
+ * duplicated DATA packet. Rx does not inform as to whether this was a
+ * reception of the same packet twice or of a retransmission of a packet we
+ * already received (though this could be determined by the transmitter based
+ * on the serial number).
+ */
+static ktime_t rxrpc_rack_update_reo_wnd(struct rxrpc_call *call,
+ struct rxrpc_ack_summary *summary)
+{
+ rxrpc_seq_t snd_una = call->acks_lowest_nak; /* Lowest unack'd seq */
+ rxrpc_seq_t snd_nxt = call->tx_transmitted + 1; /* Next seq to be sent */
+ bool have_dsack_option = summary->ack_reason == RXRPC_ACK_DUPLICATE;
+ int dup_thresh = 3;
+
+ /* DSACK-based reordering window adaptation */
+ if (!call->rack_dsack_round_none &&
+ after_eq(snd_una, call->rack_dsack_round))
+ call->rack_dsack_round_none = true;
+
+ /* Grow the reordering window per round that sees DSACK. Reset the
+ * window after 16 DSACK-free recoveries.
+ */
+ if (call->rack_dsack_round_none && have_dsack_option) {
+ call->rack_dsack_round_none = false;
+ call->rack_dsack_round = snd_nxt;
+ call->rack_reo_wnd_mult++;
+ call->rack_reo_wnd_persist = 16;
+ } else if (summary->exiting_fast_or_rto_recovery) {
+ call->rack_reo_wnd_persist--;
+ if (call->rack_reo_wnd_persist <= 0)
+ call->rack_reo_wnd_mult = 1;
+ }
+
+ if (!call->rack_reordering_seen) {
+ if (summary->in_fast_or_rto_recovery)
+ return 0;
+ if (call->acks_nr_sacks >= dup_thresh)
+ return 0;
+ }
+
+ return us_to_ktime(umin(call->rack_reo_wnd_mult * minmax_get(&call->min_rtt) / 4,
+ call->srtt_us >> 3));
+}
+
+/*
+ * Detect losses [RFC8958 6.2 Step 5].
+ */
+static ktime_t rxrpc_rack_detect_loss(struct rxrpc_call *call,
+ struct rxrpc_ack_summary *summary)
+{
+ struct rxrpc_txqueue *tq;
+ ktime_t timeout = 0, lost_after, now = ktime_get_real();
+
+ call->rack_reo_wnd = rxrpc_rack_update_reo_wnd(call, summary);
+ lost_after = ktime_add(call->rack_rtt, call->rack_reo_wnd);
+ trace_rxrpc_rack_scan_loss(call);
+
+ for (tq = call->tx_queue; tq; tq = tq->next) {
+ unsigned long nacks = rxrpc_tq_nacks(tq);
+
+ if (after(tq->qbase, call->tx_transmitted))
+ break;
+ trace_rxrpc_rack_scan_loss_tq(call, tq, nacks);
+
+ /* Skip ones marked lost but not yet retransmitted */
+ nacks &= ~tq->segment_lost | tq->segment_retransmitted;
+
+ while (nacks) {
+ unsigned int ix = __ffs(nacks);
+ rxrpc_seq_t seq = tq->qbase + ix;
+ ktime_t remaining;
+ ktime_t xmit_ts = rxrpc_get_xmit_ts(tq, ix);
+
+ __clear_bit(ix, &nacks);
+
+ if (rxrpc_rack_sent_after(call->rack_xmit_ts, call->rack_end_seq,
+ xmit_ts, seq)) {
+ remaining = ktime_sub(ktime_add(xmit_ts, lost_after), now);
+ if (remaining <= 0) {
+ rxrpc_rack_mark_lost(call, tq, ix);
+ trace_rxrpc_rack_detect_loss(call, summary, seq);
+ } else {
+ timeout = max(remaining, timeout);
+ }
+ }
+ }
+ }
+
+ return timeout;
+}
+
+/*
+ * Detect losses and set a timer to retry the detection [RFC8958 6.2 Step 5].
+ */
+void rxrpc_rack_detect_loss_and_arm_timer(struct rxrpc_call *call,
+ struct rxrpc_ack_summary *summary)
+{
+ ktime_t timeout = rxrpc_rack_detect_loss(call, summary);
+
+ if (timeout) {
+ call->rack_timer_mode = RXRPC_CALL_RACKTIMER_RACK_REORDER;
+ call->rack_timo_at = ktime_add(ktime_get_real(), timeout);
+ trace_rxrpc_rack_timer(call, timeout, false);
+ trace_rxrpc_timer_set(call, timeout, rxrpc_timer_trace_rack_reo);
+ }
+}
+
+/*
+ * Handle RACK-TLP RTO expiration [RFC8958 6.3].
+ */
+static void rxrpc_rack_mark_losses_on_rto(struct rxrpc_call *call)
+{
+ struct rxrpc_txqueue *tq;
+ rxrpc_seq_t snd_una = call->acks_lowest_nak; /* Lowest unack'd seq */
+ ktime_t lost_after = ktime_add(call->rack_rtt, call->rack_reo_wnd);
+ ktime_t deadline = ktime_sub(ktime_get_real(), lost_after);
+
+ for (tq = call->tx_queue; tq; tq = tq->next) {
+ unsigned long unacked = ~tq->segment_acked;
+
+ trace_rxrpc_rack_mark_loss_tq(call, tq);
+ while (unacked) {
+ unsigned int ix = __ffs(unacked);
+ rxrpc_seq_t seq = tq->qbase + ix;
+ ktime_t xmit_ts = rxrpc_get_xmit_ts(tq, ix);
+
+ if (after(seq, call->tx_transmitted))
+ return;
+ __clear_bit(ix, &unacked);
+
+ if (seq == snd_una ||
+ ktime_before(xmit_ts, deadline))
+ rxrpc_rack_mark_lost(call, tq, ix);
+ }
+ }
+}
+
+/*
+ * Calculate the TLP loss probe timeout (PTO) [RFC8958 7.2].
+ */
+ktime_t rxrpc_tlp_calc_pto(struct rxrpc_call *call, ktime_t now)
+{
+ unsigned int flight_size = rxrpc_tx_in_flight(call);
+ ktime_t rto_at = ktime_add(call->tx_last_sent,
+ rxrpc_get_rto_backoff(call, false));
+ ktime_t pto;
+
+ if (call->rtt_count > 0) {
+ /* Use 2*SRTT as the timeout. */
+ pto = ns_to_ktime(call->srtt_us * NSEC_PER_USEC / 4);
+ if (flight_size)
+ pto = ktime_add(pto, call->tlp_max_ack_delay);
+ } else {
+ pto = NSEC_PER_SEC;
+ }
+
+ if (ktime_after(ktime_add(now, pto), rto_at))
+ pto = ktime_sub(rto_at, now);
+ return pto;
+}
+
+/*
+ * Send a TLP loss probe on PTO expiration [RFC8958 7.3].
+ */
+void rxrpc_tlp_send_probe(struct rxrpc_call *call)
+{
+ unsigned int in_flight = rxrpc_tx_in_flight(call);
+
+ if (after_eq(call->acks_hard_ack, call->tx_transmitted))
+ return; /* Everything we transmitted has been acked. */
+
+ /* There must be no other loss probe still in flight and we need to
+ * have taken a new RTT sample since last probe or the start of
+ * connection.
+ */
+ if (!call->tlp_serial &&
+ call->tlp_rtt_taken != call->rtt_taken) {
+ call->tlp_is_retrans = false;
+ if (after(call->send_top, call->tx_transmitted) &&
+ rxrpc_tx_window_space(call) > 0) {
+ /* Transmit the lowest-sequence unsent DATA */
+ call->tx_last_serial = 0;
+ rxrpc_transmit_some_data(call, 1, rxrpc_txdata_tlp_new_data);
+ call->tlp_serial = call->tx_last_serial;
+ call->tlp_seq = call->tx_transmitted;
+ trace_rxrpc_tlp_probe(call, rxrpc_tlp_probe_trace_transmit_new);
+ in_flight = rxrpc_tx_in_flight(call);
+ } else {
+ /* Retransmit the highest-sequence DATA sent */
+ call->tx_last_serial = 0;
+ rxrpc_resend_tlp(call);
+ call->tlp_is_retrans = true;
+ trace_rxrpc_tlp_probe(call, rxrpc_tlp_probe_trace_retransmit);
+ }
+ } else {
+ trace_rxrpc_tlp_probe(call, rxrpc_tlp_probe_trace_busy);
+ }
+
+ if (in_flight != 0) {
+ ktime_t rto = rxrpc_get_rto_backoff(call, false);
+
+ call->rack_timer_mode = RXRPC_CALL_RACKTIMER_RTO;
+ call->rack_timo_at = ktime_add(ktime_get_real(), rto);
+ trace_rxrpc_rack_timer(call, rto, false);
+ trace_rxrpc_timer_set(call, rto, rxrpc_timer_trace_rack_rto);
+ }
+}
+
+/*
+ * Detect losses using the ACK of a TLP loss probe [RFC8958 7.4].
+ */
+void rxrpc_tlp_process_ack(struct rxrpc_call *call, struct rxrpc_ack_summary *summary)
+{
+ if (!call->tlp_serial || after(call->tlp_seq, call->acks_hard_ack))
+ return;
+
+ if (!call->tlp_is_retrans) {
+ /* TLP of new data delivered */
+ trace_rxrpc_tlp_ack(call, summary, rxrpc_tlp_ack_trace_new_data);
+ call->tlp_serial = 0;
+ } else if (summary->ack_reason == RXRPC_ACK_DUPLICATE &&
+ summary->acked_serial == call->tlp_serial) {
+ /* General Case: Detected packet losses using RACK [7.4.1] */
+ trace_rxrpc_tlp_ack(call, summary, rxrpc_tlp_ack_trace_dup_acked);
+ call->tlp_serial = 0;
+ } else if (after(call->acks_hard_ack, call->tlp_seq)) {
+ /* Repaired the single loss */
+ trace_rxrpc_tlp_ack(call, summary, rxrpc_tlp_ack_trace_hard_beyond);
+ call->tlp_serial = 0;
+ // TODO: Invoke congestion control to react to the loss
+ // event the probe has repaired
+ } else if (summary->tlp_probe_acked) {
+ trace_rxrpc_tlp_ack(call, summary, rxrpc_tlp_ack_trace_acked);
+ /* Special Case: Detected a single loss repaired by the loss
+ * probe [7.4.2]
+ */
+ call->tlp_serial = 0;
+ } else {
+ trace_rxrpc_tlp_ack(call, summary, rxrpc_tlp_ack_trace_incomplete);
+ }
+}
+
+/*
+ * Handle RACK timer expiration; returns true to request a resend.
+ */
+void rxrpc_rack_timer_expired(struct rxrpc_call *call, ktime_t overran_by)
+{
+ struct rxrpc_ack_summary summary = {};
+ enum rxrpc_rack_timer_mode mode = call->rack_timer_mode;
+
+ trace_rxrpc_rack_timer(call, overran_by, true);
+ call->rack_timer_mode = RXRPC_CALL_RACKTIMER_OFF;
+
+ switch (mode) {
+ case RXRPC_CALL_RACKTIMER_RACK_REORDER:
+ rxrpc_rack_detect_loss_and_arm_timer(call, &summary);
+ break;
+ case RXRPC_CALL_RACKTIMER_TLP_PTO:
+ rxrpc_tlp_send_probe(call);
+ break;
+ case RXRPC_CALL_RACKTIMER_RTO:
+ // Might need to poke the congestion algo in some way
+ rxrpc_rack_mark_losses_on_rto(call);
+ break;
+ //case RXRPC_CALL_RACKTIMER_ZEROWIN:
+ default:
+ pr_warn("Unexpected rack timer %u", call->rack_timer_mode);
+ }
+}
diff --git a/net/rxrpc/insecure.c b/net/rxrpc/insecure.c
index 34353b6e584b..e068f9b79d02 100644
--- a/net/rxrpc/insecure.c
+++ b/net/rxrpc/insecure.c
@@ -15,18 +15,18 @@ static int none_init_connection_security(struct rxrpc_connection *conn,
}
/*
- * Work out how much data we can put in an unsecured packet.
+ * Allocate an appropriately sized buffer for the amount of data remaining.
*/
-static int none_how_much_data(struct rxrpc_call *call, size_t remain,
- size_t *_buf_size, size_t *_data_size, size_t *_offset)
+static struct rxrpc_txbuf *none_alloc_txbuf(struct rxrpc_call *call, size_t remain, gfp_t gfp)
{
- *_buf_size = *_data_size = min_t(size_t, remain, RXRPC_JUMBO_DATALEN);
- *_offset = 0;
- return 0;
+ return rxrpc_alloc_data_txbuf(call, umin(remain, RXRPC_JUMBO_DATALEN), 1, gfp);
}
static int none_secure_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
{
+ txb->pkt_len = txb->len;
+ if (txb->len == RXRPC_JUMBO_DATALEN)
+ txb->jumboable = true;
return 0;
}
@@ -79,7 +79,7 @@ const struct rxrpc_security rxrpc_no_security = {
.exit = none_exit,
.init_connection_security = none_init_connection_security,
.free_call_crypto = none_free_call_crypto,
- .how_much_data = none_how_much_data,
+ .alloc_txbuf = none_alloc_txbuf,
.secure_packet = none_secure_packet,
.verify_packet = none_verify_packet,
.respond_to_challenge = none_respond_to_challenge,
diff --git a/net/rxrpc/io_thread.c b/net/rxrpc/io_thread.c
index 4a3a08a0e2cd..64f8d77b8731 100644
--- a/net/rxrpc/io_thread.c
+++ b/net/rxrpc/io_thread.c
@@ -27,11 +27,17 @@ int rxrpc_encap_rcv(struct sock *udp_sk, struct sk_buff *skb)
{
struct sk_buff_head *rx_queue;
struct rxrpc_local *local = rcu_dereference_sk_user_data(udp_sk);
+ struct task_struct *io_thread;
if (unlikely(!local)) {
kfree_skb(skb);
return 0;
}
+ io_thread = READ_ONCE(local->io_thread);
+ if (!io_thread) {
+ kfree_skb(skb);
+ return 0;
+ }
if (skb->tstamp == 0)
skb->tstamp = ktime_get_real();
@@ -47,7 +53,7 @@ int rxrpc_encap_rcv(struct sock *udp_sk, struct sk_buff *skb)
#endif
skb_queue_tail(rx_queue, skb);
- rxrpc_wake_up_io_thread(local);
+ wake_up_process(io_thread);
return 0;
}
@@ -124,6 +130,7 @@ static bool rxrpc_extract_header(struct rxrpc_skb_priv *sp,
struct sk_buff *skb)
{
struct rxrpc_wire_header whdr;
+ struct rxrpc_ackpacket ack;
/* dig out the RxRPC connection details */
if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0)
@@ -141,6 +148,16 @@ static bool rxrpc_extract_header(struct rxrpc_skb_priv *sp,
sp->hdr.securityIndex = whdr.securityIndex;
sp->hdr._rsvd = ntohs(whdr._rsvd);
sp->hdr.serviceId = ntohs(whdr.serviceId);
+
+ if (sp->hdr.type == RXRPC_PACKET_TYPE_ACK) {
+ if (skb_copy_bits(skb, sizeof(whdr), &ack, sizeof(ack)) < 0)
+ return rxrpc_bad_message(skb, rxrpc_badmsg_short_ack);
+ sp->ack.first_ack = ntohl(ack.firstPacket);
+ sp->ack.prev_ack = ntohl(ack.previousPacket);
+ sp->ack.acked_serial = ntohl(ack.serial);
+ sp->ack.reason = ack.reason;
+ sp->ack.nr_acks = ack.nAcks;
+ }
return true;
}
@@ -321,7 +338,6 @@ static int rxrpc_input_packet_on_conn(struct rxrpc_connection *conn,
struct rxrpc_channel *chan;
struct rxrpc_call *call = NULL;
unsigned int channel;
- bool ret;
if (sp->hdr.securityIndex != conn->security_ix)
return rxrpc_direct_abort(skb, rxrpc_eproto_wrong_security,
@@ -347,6 +363,12 @@ static int rxrpc_input_packet_on_conn(struct rxrpc_connection *conn,
if (sp->hdr.callNumber == 0)
return rxrpc_input_conn_packet(conn, skb);
+ /* Deal with path MTU discovery probing. */
+ if (sp->hdr.type == RXRPC_PACKET_TYPE_ACK &&
+ conn->pmtud_probe &&
+ after_eq(sp->ack.acked_serial, conn->pmtud_probe))
+ rxrpc_input_probe_for_pmtud(conn, sp->ack.acked_serial, false);
+
/* Call-bound packets are routed by connection channel. */
channel = sp->hdr.cid & RXRPC_CHANNELMASK;
chan = &conn->channels[channel];
@@ -402,9 +424,9 @@ static int rxrpc_input_packet_on_conn(struct rxrpc_connection *conn,
peer_srx, skb);
}
- ret = rxrpc_input_call_event(call, skb);
+ rxrpc_queue_rx_call_packet(call, skb);
rxrpc_put_call(call, rxrpc_call_put_input);
- return ret;
+ return true;
}
/*
@@ -421,6 +443,8 @@ int rxrpc_io_thread(void *data)
ktime_t now;
#endif
bool should_stop;
+ LIST_HEAD(conn_attend_q);
+ LIST_HEAD(call_attend_q);
complete(&local->io_thread_ready);
@@ -431,43 +455,26 @@ int rxrpc_io_thread(void *data)
for (;;) {
rxrpc_inc_stat(local->rxnet, stat_io_loop);
- /* Deal with connections that want immediate attention. */
- conn = list_first_entry_or_null(&local->conn_attend_q,
- struct rxrpc_connection,
- attend_link);
- if (conn) {
- spin_lock_bh(&local->lock);
- list_del_init(&conn->attend_link);
- spin_unlock_bh(&local->lock);
-
- rxrpc_input_conn_event(conn, NULL);
- rxrpc_put_connection(conn, rxrpc_conn_put_poke);
- continue;
+ /* Inject a delay into packets if requested. */
+#ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
+ now = ktime_get_real();
+ while ((skb = skb_peek(&local->rx_delay_queue))) {
+ if (ktime_before(now, skb->tstamp))
+ break;
+ skb = skb_dequeue(&local->rx_delay_queue);
+ skb_queue_tail(&local->rx_queue, skb);
}
+#endif
- if (test_and_clear_bit(RXRPC_CLIENT_CONN_REAP_TIMER,
- &local->client_conn_flags))
- rxrpc_discard_expired_client_conns(local);
-
- /* Deal with calls that want immediate attention. */
- if ((call = list_first_entry_or_null(&local->call_attend_q,
- struct rxrpc_call,
- attend_link))) {
- spin_lock_bh(&local->lock);
- list_del_init(&call->attend_link);
- spin_unlock_bh(&local->lock);
-
- trace_rxrpc_call_poked(call);
- rxrpc_input_call_event(call, NULL);
- rxrpc_put_call(call, rxrpc_call_put_poke);
- continue;
+ if (!skb_queue_empty(&local->rx_queue)) {
+ spin_lock_irq(&local->rx_queue.lock);
+ skb_queue_splice_tail_init(&local->rx_queue, &rx_queue);
+ spin_unlock_irq(&local->rx_queue.lock);
+ trace_rxrpc_iothread_rx(local, skb_queue_len(&rx_queue));
}
- if (!list_empty(&local->new_client_calls))
- rxrpc_connect_client_calls(local);
-
- /* Process received packets and errors. */
- if ((skb = __skb_dequeue(&rx_queue))) {
+ /* Distribute packets and errors. */
+ while ((skb = __skb_dequeue(&rx_queue))) {
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
switch (skb->mark) {
case RXRPC_SKB_MARK_PACKET:
@@ -491,27 +498,46 @@ int rxrpc_io_thread(void *data)
rxrpc_free_skb(skb, rxrpc_skb_put_unknown);
break;
}
- continue;
}
- /* Inject a delay into packets if requested. */
-#ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
- now = ktime_get_real();
- while ((skb = skb_peek(&local->rx_delay_queue))) {
- if (ktime_before(now, skb->tstamp))
- break;
- skb = skb_dequeue(&local->rx_delay_queue);
- skb_queue_tail(&local->rx_queue, skb);
+ /* Deal with connections that want immediate attention. */
+ spin_lock_irq(&local->lock);
+ list_splice_tail_init(&local->conn_attend_q, &conn_attend_q);
+ spin_unlock_irq(&local->lock);
+
+ while ((conn = list_first_entry_or_null(&conn_attend_q,
+ struct rxrpc_connection,
+ attend_link))) {
+ spin_lock_irq(&local->lock);
+ list_del_init(&conn->attend_link);
+ spin_unlock_irq(&local->lock);
+ rxrpc_input_conn_event(conn, NULL);
+ rxrpc_put_connection(conn, rxrpc_conn_put_poke);
}
-#endif
- if (!skb_queue_empty(&local->rx_queue)) {
- spin_lock_irq(&local->rx_queue.lock);
- skb_queue_splice_tail_init(&local->rx_queue, &rx_queue);
- spin_unlock_irq(&local->rx_queue.lock);
- continue;
+ if (test_and_clear_bit(RXRPC_CLIENT_CONN_REAP_TIMER,
+ &local->client_conn_flags))
+ rxrpc_discard_expired_client_conns(local);
+
+ /* Deal with calls that want immediate attention. */
+ spin_lock_irq(&local->lock);
+ list_splice_tail_init(&local->call_attend_q, &call_attend_q);
+ spin_unlock_irq(&local->lock);
+
+ while ((call = list_first_entry_or_null(&call_attend_q,
+ struct rxrpc_call,
+ attend_link))) {
+ spin_lock_irq(&local->lock);
+ list_del_init(&call->attend_link);
+ spin_unlock_irq(&local->lock);
+ trace_rxrpc_call_poked(call);
+ rxrpc_input_call_event(call);
+ rxrpc_put_call(call, rxrpc_call_put_poke);
}
+ if (!list_empty(&local->new_client_calls))
+ rxrpc_connect_client_calls(local);
+
set_current_state(TASK_INTERRUPTIBLE);
should_stop = kthread_should_stop();
if (!skb_queue_empty(&local->rx_queue) ||
@@ -541,7 +567,7 @@ int rxrpc_io_thread(void *data)
}
timeout = nsecs_to_jiffies(delay_ns);
- timeout = max(timeout, 1UL);
+ timeout = umax(timeout, 1);
schedule_timeout(timeout);
__set_current_state(TASK_RUNNING);
continue;
@@ -554,7 +580,7 @@ int rxrpc_io_thread(void *data)
__set_current_state(TASK_RUNNING);
rxrpc_see_local(local, rxrpc_local_stop);
rxrpc_destroy_local(local);
- local->io_thread = NULL;
+ WRITE_ONCE(local->io_thread, NULL);
rxrpc_see_local(local, rxrpc_local_stopped);
return 0;
}
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 34d307368135..a74a4b43904f 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -215,9 +215,6 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
/* we want to set the don't fragment bit */
rxrpc_local_dont_fragment(local, true);
-
- /* We want receive timestamps. */
- sock_enable_timestamps(usk);
break;
default:
@@ -232,7 +229,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
}
wait_for_completion(&local->io_thread_ready);
- local->io_thread = io_thread;
+ WRITE_ONCE(local->io_thread, io_thread);
_leave(" = 0");
return 0;
@@ -452,6 +449,7 @@ void rxrpc_destroy_local(struct rxrpc_local *local)
#endif
rxrpc_purge_queue(&local->rx_queue);
rxrpc_purge_client_connections(local);
+ page_frag_cache_drain(&local->tx_alloc);
}
/*
diff --git a/net/rxrpc/misc.c b/net/rxrpc/misc.c
index 825b81183046..8fcc8139d771 100644
--- a/net/rxrpc/misc.c
+++ b/net/rxrpc/misc.c
@@ -17,22 +17,22 @@
unsigned int rxrpc_max_backlog __read_mostly = 10;
/*
- * How long to wait before scheduling an ACK with subtype DELAY (in jiffies).
+ * How long to wait before scheduling an ACK with subtype DELAY (in ms).
*
* We use this when we've received new data packets. If those packets aren't
* all consumed within this time we will send a DELAY ACK if an ACK was not
* requested to let the sender know it doesn't need to resend.
*/
-unsigned long rxrpc_soft_ack_delay = HZ;
+unsigned long rxrpc_soft_ack_delay = 1000;
/*
- * How long to wait before scheduling an ACK with subtype IDLE (in jiffies).
+ * How long to wait before scheduling an ACK with subtype IDLE (in ms).
*
* We use this when we've consumed some previously soft-ACK'd packets when
* further packets aren't immediately received to decide when to send an IDLE
* ACK let the other end know that it can free up its Tx buffer space.
*/
-unsigned long rxrpc_idle_ack_delay = HZ / 2;
+unsigned long rxrpc_idle_ack_delay = 500;
/*
* Receive window size in packets. This indicates the maximum number of
@@ -46,13 +46,13 @@ unsigned int rxrpc_rx_window_size = 255;
* Maximum Rx MTU size. This indicates to the sender the size of jumbo packet
* made by gluing normal packets together that we're willing to handle.
*/
-unsigned int rxrpc_rx_mtu = 5692;
+unsigned int rxrpc_rx_mtu = RXRPC_JUMBO(46);
/*
* The maximum number of fragments in a received jumbo packet that we tell the
* sender that we're willing to handle.
*/
-unsigned int rxrpc_rx_jumbo_max = 4;
+unsigned int rxrpc_rx_jumbo_max = 46;
#ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
/*
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 4a292f860ae3..95905b85a8d7 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -48,12 +48,10 @@ static const char rxrpc_keepalive_string[] = "";
static void rxrpc_tx_backoff(struct rxrpc_call *call, int ret)
{
if (ret < 0) {
- u16 tx_backoff = READ_ONCE(call->tx_backoff);
-
- if (tx_backoff < HZ)
- WRITE_ONCE(call->tx_backoff, tx_backoff + 1);
+ if (call->tx_backoff < 1000)
+ call->tx_backoff += 100;
} else {
- WRITE_ONCE(call->tx_backoff, 0);
+ call->tx_backoff = 0;
}
}
@@ -65,84 +63,66 @@ static void rxrpc_tx_backoff(struct rxrpc_call *call, int ret)
* Receiving a response to the ping will prevent the ->expect_rx_by timer from
* expiring.
*/
-static void rxrpc_set_keepalive(struct rxrpc_call *call)
+static void rxrpc_set_keepalive(struct rxrpc_call *call, ktime_t now)
{
- unsigned long now = jiffies, keepalive_at = call->next_rx_timo / 6;
+ ktime_t delay = ms_to_ktime(READ_ONCE(call->next_rx_timo) / 6);
- keepalive_at += now;
- WRITE_ONCE(call->keepalive_at, keepalive_at);
- rxrpc_reduce_call_timer(call, keepalive_at, now,
- rxrpc_timer_set_for_keepalive);
+ call->keepalive_at = ktime_add(ktime_get_real(), delay);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_keepalive);
}
/*
- * Fill out an ACK packet.
+ * Allocate transmission buffers for an ACK and attach them to local->kv[].
*/
-static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
- struct rxrpc_call *call,
- struct rxrpc_txbuf *txb,
- u16 *_rwind)
+static int rxrpc_alloc_ack(struct rxrpc_call *call, size_t sack_size)
{
- struct rxrpc_ackinfo ackinfo;
- unsigned int qsize, sack, wrap, to;
- rxrpc_seq_t window, wtop;
- int rsize;
- u32 mtu, jmax;
- u8 *ackp = txb->acks;
-
- call->ackr_nr_unacked = 0;
- atomic_set(&call->ackr_nr_consumed, 0);
- rxrpc_inc_stat(call->rxnet, stat_tx_ack_fill);
- clear_bit(RXRPC_CALL_RX_IS_IDLE, &call->flags);
-
- window = call->ackr_window;
- wtop = call->ackr_wtop;
- sack = call->ackr_sack_base % RXRPC_SACK_SIZE;
- txb->ack.firstPacket = htonl(window);
- txb->ack.nAcks = wtop - window;
-
- if (after(wtop, window)) {
- wrap = RXRPC_SACK_SIZE - sack;
- to = min_t(unsigned int, txb->ack.nAcks, RXRPC_SACK_SIZE);
-
- if (sack + txb->ack.nAcks <= RXRPC_SACK_SIZE) {
- memcpy(txb->acks, call->ackr_sack_table + sack, txb->ack.nAcks);
- } else {
- memcpy(txb->acks, call->ackr_sack_table + sack, wrap);
- memcpy(txb->acks + wrap, call->ackr_sack_table,
- to - wrap);
+ struct rxrpc_wire_header *whdr;
+ struct rxrpc_acktrailer *trailer;
+ struct rxrpc_ackpacket *ack;
+ struct kvec *kv = call->local->kvec;
+ gfp_t gfp = rcu_read_lock_held() ? GFP_ATOMIC | __GFP_NOWARN : GFP_NOFS;
+ void *buf, *buf2 = NULL;
+ u8 *filler;
+
+ buf = page_frag_alloc(&call->local->tx_alloc,
+ sizeof(*whdr) + sizeof(*ack) + 1 + 3 + sizeof(*trailer), gfp);
+ if (!buf)
+ return -ENOMEM;
+
+ if (sack_size) {
+ buf2 = page_frag_alloc(&call->local->tx_alloc, sack_size, gfp);
+ if (!buf2) {
+ page_frag_free(buf);
+ return -ENOMEM;
}
-
- ackp += to;
- } else if (before(wtop, window)) {
- pr_warn("ack window backward %x %x", window, wtop);
- } else if (txb->ack.reason == RXRPC_ACK_DELAY) {
- txb->ack.reason = RXRPC_ACK_IDLE;
}
- mtu = conn->peer->if_mtu;
- mtu -= conn->peer->hdrsize;
- jmax = rxrpc_rx_jumbo_max;
- qsize = (window - 1) - call->rx_consumed;
- rsize = max_t(int, call->rx_winsize - qsize, 0);
- *_rwind = rsize;
- ackinfo.rxMTU = htonl(rxrpc_rx_mtu);
- ackinfo.maxMTU = htonl(mtu);
- ackinfo.rwind = htonl(rsize);
- ackinfo.jumbo_max = htonl(jmax);
-
- *ackp++ = 0;
- *ackp++ = 0;
- *ackp++ = 0;
- memcpy(ackp, &ackinfo, sizeof(ackinfo));
- return txb->ack.nAcks + 3 + sizeof(ackinfo);
+ whdr = buf;
+ ack = buf + sizeof(*whdr);
+ filler = buf + sizeof(*whdr) + sizeof(*ack) + 1;
+ trailer = buf + sizeof(*whdr) + sizeof(*ack) + 1 + 3;
+
+ kv[0].iov_base = whdr;
+ kv[0].iov_len = sizeof(*whdr) + sizeof(*ack);
+ kv[1].iov_base = buf2;
+ kv[1].iov_len = sack_size;
+ kv[2].iov_base = filler;
+ kv[2].iov_len = 3 + sizeof(*trailer);
+ return 3; /* Number of kvec[] used. */
+}
+
+static void rxrpc_free_ack(struct rxrpc_call *call)
+{
+ page_frag_free(call->local->kvec[0].iov_base);
+ if (call->local->kvec[1].iov_base)
+ page_frag_free(call->local->kvec[1].iov_base);
}
/*
* Record the beginning of an RTT probe.
*/
-static int rxrpc_begin_rtt_probe(struct rxrpc_call *call, rxrpc_serial_t serial,
- enum rxrpc_rtt_tx_trace why)
+static void rxrpc_begin_rtt_probe(struct rxrpc_call *call, rxrpc_serial_t serial,
+ ktime_t now, enum rxrpc_rtt_tx_trace why)
{
unsigned long avail = call->rtt_avail;
int rtt_slot = 9;
@@ -155,47 +135,126 @@ static int rxrpc_begin_rtt_probe(struct rxrpc_call *call, rxrpc_serial_t serial,
goto no_slot;
call->rtt_serial[rtt_slot] = serial;
- call->rtt_sent_at[rtt_slot] = ktime_get_real();
+ call->rtt_sent_at[rtt_slot] = now;
smp_wmb(); /* Write data before avail bit */
set_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
trace_rxrpc_rtt_tx(call, why, rtt_slot, serial);
- return rtt_slot;
+ return;
no_slot:
trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_no_slot, rtt_slot, serial);
- return -1;
}
/*
- * Cancel an RTT probe.
+ * Fill out an ACK packet.
*/
-static void rxrpc_cancel_rtt_probe(struct rxrpc_call *call,
- rxrpc_serial_t serial, int rtt_slot)
+static int rxrpc_fill_out_ack(struct rxrpc_call *call, int nr_kv, u8 ack_reason,
+ rxrpc_serial_t serial_to_ack, rxrpc_serial_t *_ack_serial)
{
- if (rtt_slot != -1) {
- clear_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
- smp_wmb(); /* Clear pending bit before setting slot */
- set_bit(rtt_slot, &call->rtt_avail);
- trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_cancel, rtt_slot, serial);
+ struct kvec *kv = call->local->kvec;
+ struct rxrpc_wire_header *whdr = kv[0].iov_base;
+ struct rxrpc_acktrailer *trailer = kv[2].iov_base + 3;
+ struct rxrpc_ackpacket *ack = (struct rxrpc_ackpacket *)(whdr + 1);
+ unsigned int qsize, sack, wrap, to, max_mtu, if_mtu;
+ rxrpc_seq_t window, wtop;
+ ktime_t now = ktime_get_real();
+ int rsize;
+ u8 *filler = kv[2].iov_base;
+ u8 *sackp = kv[1].iov_base;
+
+ rxrpc_inc_stat(call->rxnet, stat_tx_ack_fill);
+
+ window = call->ackr_window;
+ wtop = call->ackr_wtop;
+ sack = call->ackr_sack_base % RXRPC_SACK_SIZE;
+
+ *_ack_serial = rxrpc_get_next_serial(call->conn);
+
+ whdr->epoch = htonl(call->conn->proto.epoch);
+ whdr->cid = htonl(call->cid);
+ whdr->callNumber = htonl(call->call_id);
+ whdr->serial = htonl(*_ack_serial);
+ whdr->seq = 0;
+ whdr->type = RXRPC_PACKET_TYPE_ACK;
+ whdr->flags = call->conn->out_clientflag | RXRPC_SLOW_START_OK;
+ whdr->userStatus = 0;
+ whdr->securityIndex = call->security_ix;
+ whdr->_rsvd = 0;
+ whdr->serviceId = htons(call->dest_srx.srx_service);
+
+ ack->bufferSpace = 0;
+ ack->maxSkew = 0;
+ ack->firstPacket = htonl(window);
+ ack->previousPacket = htonl(call->rx_highest_seq);
+ ack->serial = htonl(serial_to_ack);
+ ack->reason = ack_reason;
+ ack->nAcks = wtop - window;
+ filler[0] = 0;
+ filler[1] = 0;
+ filler[2] = 0;
+
+ if (ack_reason == RXRPC_ACK_PING)
+ whdr->flags |= RXRPC_REQUEST_ACK;
+
+ if (after(wtop, window)) {
+ kv[1].iov_len = ack->nAcks;
+
+ wrap = RXRPC_SACK_SIZE - sack;
+ to = umin(ack->nAcks, RXRPC_SACK_SIZE);
+
+ if (sack + ack->nAcks <= RXRPC_SACK_SIZE) {
+ memcpy(sackp, call->ackr_sack_table + sack, ack->nAcks);
+ } else {
+ memcpy(sackp, call->ackr_sack_table + sack, wrap);
+ memcpy(sackp + wrap, call->ackr_sack_table, to - wrap);
+ }
+ } else if (before(wtop, window)) {
+ pr_warn("ack window backward %x %x", window, wtop);
+ } else if (ack->reason == RXRPC_ACK_DELAY) {
+ ack->reason = RXRPC_ACK_IDLE;
+ }
+
+ qsize = (window - 1) - call->rx_consumed;
+ rsize = max_t(int, call->rx_winsize - qsize, 0);
+
+ if_mtu = call->peer->if_mtu - call->peer->hdrsize;
+ if (call->peer->ackr_adv_pmtud) {
+ max_mtu = umax(call->peer->max_data, rxrpc_rx_mtu);
+ } else {
+ if_mtu = umin(if_mtu, 1444);
+ max_mtu = if_mtu;
}
+
+ trailer->maxMTU = htonl(max_mtu);
+ trailer->ifMTU = htonl(if_mtu);
+ trailer->rwind = htonl(rsize);
+ trailer->jumbo_max = 0; /* Advertise pmtu discovery */
+
+ if (ack_reason == RXRPC_ACK_PING)
+ rxrpc_begin_rtt_probe(call, *_ack_serial, now, rxrpc_rtt_tx_ping);
+ if (whdr->flags & RXRPC_REQUEST_ACK)
+ call->rtt_last_req = now;
+ rxrpc_set_keepalive(call, now);
+ return nr_kv;
}
/*
* Transmit an ACK packet.
*/
-int rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
+static void rxrpc_send_ack_packet(struct rxrpc_call *call, int nr_kv, size_t len,
+ rxrpc_serial_t serial, enum rxrpc_propose_ack_trace why)
{
+ struct kvec *kv = call->local->kvec;
+ struct rxrpc_wire_header *whdr = kv[0].iov_base;
+ struct rxrpc_acktrailer *trailer = kv[2].iov_base + 3;
struct rxrpc_connection *conn;
+ struct rxrpc_ackpacket *ack = (struct rxrpc_ackpacket *)(whdr + 1);
struct msghdr msg;
- struct kvec iov[1];
- rxrpc_serial_t serial;
- size_t len, n;
- int ret, rtt_slot = -1;
- u16 rwind;
+ int ret;
if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
- return -ECONNRESET;
+ return;
conn = call->conn;
@@ -203,55 +262,100 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
msg.msg_namelen = call->peer->srx.transport_len;
msg.msg_control = NULL;
msg.msg_controllen = 0;
- msg.msg_flags = 0;
+ msg.msg_flags = MSG_SPLICE_PAGES;
- if (txb->ack.reason == RXRPC_ACK_PING)
- txb->wire.flags |= RXRPC_REQUEST_ACK;
-
- n = rxrpc_fill_out_ack(conn, call, txb, &rwind);
- if (n == 0)
- return 0;
-
- iov[0].iov_base = &txb->wire;
- iov[0].iov_len = sizeof(txb->wire) + sizeof(txb->ack) + n;
- len = iov[0].iov_len;
-
- serial = rxrpc_get_next_serial(conn);
- txb->wire.serial = htonl(serial);
trace_rxrpc_tx_ack(call->debug_id, serial,
- ntohl(txb->ack.firstPacket),
- ntohl(txb->ack.serial), txb->ack.reason, txb->ack.nAcks,
- rwind);
-
- if (txb->ack.reason == RXRPC_ACK_PING)
- rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_ping);
+ ntohl(ack->firstPacket),
+ ntohl(ack->serial), ack->reason, ack->nAcks,
+ ntohl(trailer->rwind), why);
rxrpc_inc_stat(call->rxnet, stat_tx_ack_send);
- /* Grab the highest received seq as late as possible */
- txb->ack.previousPacket = htonl(call->rx_highest_seq);
+ iov_iter_kvec(&msg.msg_iter, WRITE, kv, nr_kv, len);
+ rxrpc_local_dont_fragment(conn->local, why == rxrpc_propose_ack_ping_for_mtu_probe);
- iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, len);
ret = do_udp_sendmsg(conn->local->socket, &msg, len);
call->peer->last_tx_at = ktime_get_seconds();
if (ret < 0) {
trace_rxrpc_tx_fail(call->debug_id, serial, ret,
rxrpc_tx_point_call_ack);
+ if (why == rxrpc_propose_ack_ping_for_mtu_probe &&
+ ret == -EMSGSIZE)
+ rxrpc_input_probe_for_pmtud(conn, serial, true);
} else {
- trace_rxrpc_tx_packet(call->debug_id, &txb->wire,
+ trace_rxrpc_tx_packet(call->debug_id, whdr,
rxrpc_tx_point_call_ack);
- if (txb->wire.flags & RXRPC_REQUEST_ACK)
- call->peer->rtt_last_req = ktime_get_real();
+ if (why == rxrpc_propose_ack_ping_for_mtu_probe) {
+ call->peer->pmtud_pending = false;
+ call->peer->pmtud_probing = true;
+ call->conn->pmtud_probe = serial;
+ call->conn->pmtud_call = call->debug_id;
+ trace_rxrpc_pmtud_tx(call);
+ }
}
rxrpc_tx_backoff(call, ret);
+}
- if (!__rxrpc_call_is_complete(call)) {
- if (ret < 0)
- rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
- rxrpc_set_keepalive(call);
+/*
+ * Queue an ACK for immediate transmission.
+ */
+void rxrpc_send_ACK(struct rxrpc_call *call, u8 ack_reason,
+ rxrpc_serial_t serial_to_ack, enum rxrpc_propose_ack_trace why)
+{
+ struct kvec *kv = call->local->kvec;
+ rxrpc_serial_t ack_serial;
+ size_t len;
+ int nr_kv;
+
+ if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
+ return;
+
+ rxrpc_inc_stat(call->rxnet, stat_tx_acks[ack_reason]);
+
+ nr_kv = rxrpc_alloc_ack(call, call->ackr_wtop - call->ackr_window);
+ if (nr_kv < 0) {
+ kleave(" = -ENOMEM");
+ return;
}
- return ret;
+ nr_kv = rxrpc_fill_out_ack(call, nr_kv, ack_reason, serial_to_ack, &ack_serial);
+ len = kv[0].iov_len;
+ len += kv[1].iov_len;
+ len += kv[2].iov_len;
+
+ /* Extend a path MTU probe ACK. */
+ if (why == rxrpc_propose_ack_ping_for_mtu_probe) {
+ size_t probe_mtu = call->peer->pmtud_trial + sizeof(struct rxrpc_wire_header);
+
+ if (len > probe_mtu)
+ goto skip;
+ while (len < probe_mtu) {
+ size_t part = umin(probe_mtu - len, PAGE_SIZE);
+
+ kv[nr_kv].iov_base = page_address(ZERO_PAGE(0));
+ kv[nr_kv].iov_len = part;
+ len += part;
+ nr_kv++;
+ }
+ }
+
+ call->ackr_nr_unacked = 0;
+ atomic_set(&call->ackr_nr_consumed, 0);
+ clear_bit(RXRPC_CALL_RX_IS_IDLE, &call->flags);
+
+ trace_rxrpc_send_ack(call, why, ack_reason, ack_serial);
+ rxrpc_send_ack_packet(call, nr_kv, len, ack_serial, why);
+skip:
+ rxrpc_free_ack(call);
+}
+
+/*
+ * Send an ACK probe for path MTU discovery.
+ */
+void rxrpc_send_probe_for_pmtud(struct rxrpc_call *call)
+{
+ rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
+ rxrpc_propose_ack_ping_for_mtu_probe);
}
/*
@@ -319,38 +423,38 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
}
/*
- * send a packet through the transport endpoint
+ * Prepare a (sub)packet for transmission.
*/
-int rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
+static size_t rxrpc_prepare_data_subpacket(struct rxrpc_call *call,
+ struct rxrpc_send_data_req *req,
+ struct rxrpc_txbuf *txb,
+ struct rxrpc_wire_header *whdr,
+ rxrpc_serial_t serial, int subpkt)
{
+ struct rxrpc_jumbo_header *jumbo = txb->data - sizeof(*jumbo);
enum rxrpc_req_ack_trace why;
struct rxrpc_connection *conn = call->conn;
- struct msghdr msg;
- struct kvec iov[1];
- rxrpc_serial_t serial;
- size_t len;
- int ret, rtt_slot = -1;
+ struct kvec *kv = &call->local->kvec[1 + subpkt];
+ size_t len = txb->pkt_len;
+ bool last;
+ u8 flags;
- _enter("%x,{%d}", txb->seq, txb->len);
+ _enter("%x,%zd", txb->seq, len);
- /* Each transmission of a Tx packet needs a new serial number */
- serial = rxrpc_get_next_serial(conn);
- txb->wire.serial = htonl(serial);
+ txb->serial = serial;
if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags) &&
txb->seq == 1)
- txb->wire.userStatus = RXRPC_USERSTATUS_SERVICE_UPGRADE;
+ whdr->userStatus = RXRPC_USERSTATUS_SERVICE_UPGRADE;
- iov[0].iov_base = &txb->wire;
- iov[0].iov_len = sizeof(txb->wire) + txb->len;
- len = iov[0].iov_len;
- iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, len);
+ txb->flags &= ~RXRPC_REQUEST_ACK;
+ flags = txb->flags & RXRPC_TXBUF_WIRE_FLAGS;
+ last = txb->flags & RXRPC_LAST_PACKET;
- msg.msg_name = &call->peer->srx.transport;
- msg.msg_namelen = call->peer->srx.transport_len;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = 0;
+ if (subpkt < req->n - 1) {
+ len = RXRPC_JUMBO_DATALEN;
+ goto dont_set_request_ack;
+ }
/* If our RTT cache needs working on, request an ACK. Also request
* ACKs if a DATA packet appears to have been lost.
@@ -359,63 +463,245 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
* service call, lest OpenAFS incorrectly send us an ACK with some
* soft-ACKs in it and then never follow up with a proper hard ACK.
*/
- if (txb->wire.flags & RXRPC_REQUEST_ACK)
- why = rxrpc_reqack_already_on;
- else if (test_bit(RXRPC_TXBUF_LAST, &txb->flags) && rxrpc_sending_to_client(txb))
+ if (last && rxrpc_sending_to_client(txb))
why = rxrpc_reqack_no_srv_last;
else if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events))
why = rxrpc_reqack_ack_lost;
- else if (test_bit(RXRPC_TXBUF_RESENT, &txb->flags))
+ else if (txb->flags & RXRPC_TXBUF_RESENT)
why = rxrpc_reqack_retrans;
- else if (call->cong_mode == RXRPC_CALL_SLOW_START && call->cong_cwnd <= 2)
+ else if (call->cong_ca_state == RXRPC_CA_SLOW_START && call->cong_cwnd <= RXRPC_MIN_CWND)
why = rxrpc_reqack_slow_start;
else if (call->tx_winsize <= 2)
why = rxrpc_reqack_small_txwin;
- else if (call->peer->rtt_count < 3 && txb->seq & 1)
+ else if (call->rtt_count < 3)
why = rxrpc_reqack_more_rtt;
- else if (ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), ktime_get_real()))
+ else if (ktime_before(ktime_add_ms(call->rtt_last_req, 1000), ktime_get_real()))
why = rxrpc_reqack_old_rtt;
+ else if (!last && !after(READ_ONCE(call->send_top), txb->seq))
+ why = rxrpc_reqack_app_stall;
else
goto dont_set_request_ack;
rxrpc_inc_stat(call->rxnet, stat_why_req_ack[why]);
trace_rxrpc_req_ack(call->debug_id, txb->seq, why);
- if (why != rxrpc_reqack_no_srv_last)
- txb->wire.flags |= RXRPC_REQUEST_ACK;
+ if (why != rxrpc_reqack_no_srv_last) {
+ flags |= RXRPC_REQUEST_ACK;
+ trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, -1, serial);
+ call->rtt_last_req = req->now;
+ }
dont_set_request_ack:
- if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
- static int lose;
- if ((lose++ & 7) == 7) {
- ret = 0;
- trace_rxrpc_tx_data(call, txb->seq, serial,
- txb->wire.flags,
- test_bit(RXRPC_TXBUF_RESENT, &txb->flags),
- true);
- goto done;
+ /* There's a jumbo header prepended to the data if we need it. */
+ if (subpkt < req->n - 1)
+ flags |= RXRPC_JUMBO_PACKET;
+ else
+ flags &= ~RXRPC_JUMBO_PACKET;
+ if (subpkt == 0) {
+ whdr->flags = flags;
+ whdr->cksum = txb->cksum;
+ kv->iov_base = txb->data;
+ } else {
+ jumbo->flags = flags;
+ jumbo->pad = 0;
+ jumbo->cksum = txb->cksum;
+ kv->iov_base = jumbo;
+ len += sizeof(*jumbo);
+ }
+
+ trace_rxrpc_tx_data(call, txb->seq, txb->serial, flags, req->trace);
+ kv->iov_len = len;
+ return len;
+}
+
+/*
+ * Prepare a transmission queue object for initial transmission. Returns the
+ * number of microseconds since the transmission queue base timestamp.
+ */
+static unsigned int rxrpc_prepare_txqueue(struct rxrpc_txqueue *tq,
+ struct rxrpc_send_data_req *req)
+{
+ if (!tq)
+ return 0;
+ if (tq->xmit_ts_base == KTIME_MIN) {
+ tq->xmit_ts_base = req->now;
+ return 0;
+ }
+ return ktime_to_us(ktime_sub(req->now, tq->xmit_ts_base));
+}
+
+/*
+ * Prepare a (jumbo) packet for transmission.
+ */
+static size_t rxrpc_prepare_data_packet(struct rxrpc_call *call,
+ struct rxrpc_send_data_req *req,
+ struct rxrpc_wire_header *whdr)
+{
+ struct rxrpc_txqueue *tq = req->tq;
+ rxrpc_serial_t serial;
+ unsigned int xmit_ts;
+ rxrpc_seq_t seq = req->seq;
+ size_t len = 0;
+ bool start_tlp = false;
+
+ trace_rxrpc_tq(call, tq, seq, rxrpc_tq_transmit);
+
+ /* Each transmission of a Tx packet needs a new serial number */
+ serial = rxrpc_get_next_serials(call->conn, req->n);
+
+ whdr->epoch = htonl(call->conn->proto.epoch);
+ whdr->cid = htonl(call->cid);
+ whdr->callNumber = htonl(call->call_id);
+ whdr->seq = htonl(seq);
+ whdr->serial = htonl(serial);
+ whdr->type = RXRPC_PACKET_TYPE_DATA;
+ whdr->flags = 0;
+ whdr->userStatus = 0;
+ whdr->securityIndex = call->security_ix;
+ whdr->_rsvd = 0;
+ whdr->serviceId = htons(call->conn->service_id);
+
+ call->tx_last_serial = serial + req->n - 1;
+ call->tx_last_sent = req->now;
+ xmit_ts = rxrpc_prepare_txqueue(tq, req);
+ prefetch(tq->next);
+
+ for (int i = 0;;) {
+ int ix = seq & RXRPC_TXQ_MASK;
+ struct rxrpc_txbuf *txb = tq->bufs[seq & RXRPC_TXQ_MASK];
+
+ _debug("prep[%u] tq=%x q=%x", i, tq->qbase, seq);
+
+ /* Record (re-)transmission for RACK [RFC8985 6.1]. */
+ if (__test_and_clear_bit(ix, &tq->segment_lost))
+ call->tx_nr_lost--;
+ if (req->retrans) {
+ __set_bit(ix, &tq->ever_retransmitted);
+ __set_bit(ix, &tq->segment_retransmitted);
+ call->tx_nr_resent++;
+ } else {
+ call->tx_nr_sent++;
+ start_tlp = true;
+ }
+ tq->segment_xmit_ts[ix] = xmit_ts;
+ tq->segment_serial[ix] = serial;
+ if (i + 1 == req->n)
+ /* Only sample the last subpacket in a jumbo. */
+ __set_bit(ix, &tq->rtt_samples);
+ len += rxrpc_prepare_data_subpacket(call, req, txb, whdr, serial, i);
+ serial++;
+ seq++;
+ i++;
+ if (i >= req->n)
+ break;
+ if (!(seq & RXRPC_TXQ_MASK)) {
+ tq = tq->next;
+ trace_rxrpc_tq(call, tq, seq, rxrpc_tq_transmit_advance);
+ xmit_ts = rxrpc_prepare_txqueue(tq, req);
}
}
- trace_rxrpc_tx_data(call, txb->seq, serial, txb->wire.flags,
- test_bit(RXRPC_TXBUF_RESENT, &txb->flags), false);
+ /* Set timeouts */
+ if (req->tlp_probe) {
+ /* Sending TLP loss probe [RFC8985 7.3]. */
+ call->tlp_serial = serial - 1;
+ call->tlp_seq = seq - 1;
+ } else if (start_tlp) {
+ /* Schedule TLP loss probe [RFC8985 7.2]. */
+ ktime_t pto;
+
+ if (!test_bit(RXRPC_CALL_BEGAN_RX_TIMER, &call->flags))
+ /* The first packet may take longer to elicit a response. */
+ pto = NSEC_PER_SEC;
+ else
+ pto = rxrpc_tlp_calc_pto(call, req->now);
+
+ call->rack_timer_mode = RXRPC_CALL_RACKTIMER_TLP_PTO;
+ call->rack_timo_at = ktime_add(req->now, pto);
+ trace_rxrpc_rack_timer(call, pto, false);
+ trace_rxrpc_timer_set(call, pto, rxrpc_timer_trace_rack_tlp_pto);
+ }
+
+ if (!test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER, &call->flags)) {
+ ktime_t delay = ms_to_ktime(READ_ONCE(call->next_rx_timo));
+
+ call->expect_rx_by = ktime_add(req->now, delay);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_expect_rx);
+ }
+
+ rxrpc_set_keepalive(call, req->now);
+ page_frag_free(whdr);
+ return len;
+}
+
+/*
+ * Send one or more packets through the transport endpoint
+ */
+void rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_send_data_req *req)
+{
+ struct rxrpc_wire_header *whdr;
+ struct rxrpc_connection *conn = call->conn;
+ enum rxrpc_tx_point frag;
+ struct rxrpc_txqueue *tq = req->tq;
+ struct rxrpc_txbuf *txb;
+ struct msghdr msg;
+ rxrpc_seq_t seq = req->seq;
+ size_t len = sizeof(*whdr);
+ bool new_call = test_bit(RXRPC_CALL_BEGAN_RX_TIMER, &call->flags);
+ int ret, stat_ix;
+
+ _enter("%x,%x-%x", tq->qbase, seq, seq + req->n - 1);
+
+ whdr = page_frag_alloc(&call->local->tx_alloc, sizeof(*whdr), GFP_NOFS);
+ if (!whdr)
+ return; /* Drop the packet if no memory. */
+
+ call->local->kvec[0].iov_base = whdr;
+ call->local->kvec[0].iov_len = sizeof(*whdr);
+
+ stat_ix = umin(req->n, ARRAY_SIZE(call->rxnet->stat_tx_jumbo)) - 1;
+ atomic_inc(&call->rxnet->stat_tx_jumbo[stat_ix]);
+
+ len += rxrpc_prepare_data_packet(call, req, whdr);
+ txb = tq->bufs[seq & RXRPC_TXQ_MASK];
+
+ iov_iter_kvec(&msg.msg_iter, WRITE, call->local->kvec, 1 + req->n, len);
+
+ msg.msg_name = &call->peer->srx.transport;
+ msg.msg_namelen = call->peer->srx.transport_len;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_flags = MSG_SPLICE_PAGES;
+
+ /* Send the packet with the don't fragment bit set unless we think it's
+ * too big or if this is a retransmission.
+ */
+ if (seq == call->tx_transmitted + 1 &&
+ len >= sizeof(struct rxrpc_wire_header) + call->peer->max_data) {
+ rxrpc_local_dont_fragment(conn->local, false);
+ frag = rxrpc_tx_point_call_data_frag;
+ } else {
+ rxrpc_local_dont_fragment(conn->local, true);
+ frag = rxrpc_tx_point_call_data_nofrag;
+ }
/* Track what we've attempted to transmit at least once so that the
* retransmission algorithm doesn't try to resend what we haven't sent
- * yet. However, this can race as we can receive an ACK before we get
- * to this point. But, OTOH, if we won't get an ACK mentioning this
- * packet unless the far side received it (though it could have
- * discarded it anyway and NAK'd it).
+ * yet.
*/
- cmpxchg(&call->tx_transmitted, txb->seq - 1, txb->seq);
+ if (seq == call->tx_transmitted + 1)
+ call->tx_transmitted = seq + req->n - 1;
- /* send the packet with the don't fragment bit set if we currently
- * think it's small enough */
- if (txb->len >= call->peer->maxdata)
- goto send_fragmentable;
+ if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
+ static int lose;
- txb->last_sent = ktime_get_real();
- if (txb->wire.flags & RXRPC_REQUEST_ACK)
- rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data);
+ if ((lose++ & 7) == 7) {
+ ret = 0;
+ trace_rxrpc_tx_data(call, txb->seq, txb->serial, txb->flags,
+ rxrpc_txdata_inject_loss);
+ conn->peer->last_tx_at = ktime_get_seconds();
+ goto done;
+ }
+ }
/* send the packet by UDP
* - returns -EMSGSIZE if UDP would have to fragment the packet
@@ -427,96 +713,35 @@ dont_set_request_ack:
ret = do_udp_sendmsg(conn->local->socket, &msg, len);
conn->peer->last_tx_at = ktime_get_seconds();
- if (ret < 0) {
+ if (ret == -EMSGSIZE) {
+ rxrpc_inc_stat(call->rxnet, stat_tx_data_send_msgsize);
+ trace_rxrpc_tx_packet(call->debug_id, whdr, frag);
+ ret = 0;
+ } else if (ret < 0) {
rxrpc_inc_stat(call->rxnet, stat_tx_data_send_fail);
- rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
- trace_rxrpc_tx_fail(call->debug_id, serial, ret,
- rxrpc_tx_point_call_data_nofrag);
+ trace_rxrpc_tx_fail(call->debug_id, txb->serial, ret, frag);
} else {
- trace_rxrpc_tx_packet(call->debug_id, &txb->wire,
- rxrpc_tx_point_call_data_nofrag);
+ trace_rxrpc_tx_packet(call->debug_id, whdr, frag);
}
rxrpc_tx_backoff(call, ret);
- if (ret == -EMSGSIZE)
- goto send_fragmentable;
-
-done:
- if (ret >= 0) {
- call->tx_last_sent = txb->last_sent;
- if (txb->wire.flags & RXRPC_REQUEST_ACK) {
- call->peer->rtt_last_req = txb->last_sent;
- if (call->peer->rtt_count > 1) {
- unsigned long nowj = jiffies, ack_lost_at;
-
- ack_lost_at = rxrpc_get_rto_backoff(call->peer, false);
- ack_lost_at += nowj;
- WRITE_ONCE(call->ack_lost_at, ack_lost_at);
- rxrpc_reduce_call_timer(call, ack_lost_at, nowj,
- rxrpc_timer_set_for_lost_ack);
- }
- }
-
- if (txb->seq == 1 &&
- !test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER,
- &call->flags)) {
- unsigned long nowj = jiffies, expect_rx_by;
-
- expect_rx_by = nowj + call->next_rx_timo;
- WRITE_ONCE(call->expect_rx_by, expect_rx_by);
- rxrpc_reduce_call_timer(call, expect_rx_by, nowj,
- rxrpc_timer_set_for_normal);
- }
- rxrpc_set_keepalive(call);
- } else {
- /* Cancel the call if the initial transmission fails,
- * particularly if that's due to network routing issues that
- * aren't going away anytime soon. The layer above can arrange
- * the retransmission.
+ if (ret < 0) {
+ /* Cancel the call if the initial transmission fails or if we
+ * hit due to network routing issues that aren't going away
+ * anytime soon. The layer above can arrange the
+ * retransmission.
*/
- if (!test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER, &call->flags))
+ if (new_call ||
+ ret == -ENETUNREACH ||
+ ret == -EHOSTUNREACH ||
+ ret == -ECONNREFUSED)
rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
RX_USER_ABORT, ret);
}
- _leave(" = %d [%u]", ret, call->peer->maxdata);
- return ret;
-
-send_fragmentable:
- /* attempt to send this message with fragmentation enabled */
- _debug("send fragment");
-
- txb->last_sent = ktime_get_real();
- if (txb->wire.flags & RXRPC_REQUEST_ACK)
- rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data);
-
- switch (conn->local->srx.transport.family) {
- case AF_INET6:
- case AF_INET:
- rxrpc_local_dont_fragment(conn->local, false);
- rxrpc_inc_stat(call->rxnet, stat_tx_data_send_frag);
- ret = do_udp_sendmsg(conn->local->socket, &msg, len);
- conn->peer->last_tx_at = ktime_get_seconds();
-
- rxrpc_local_dont_fragment(conn->local, true);
- break;
-
- default:
- BUG();
- }
-
- if (ret < 0) {
- rxrpc_inc_stat(call->rxnet, stat_tx_data_send_fail);
- rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
- trace_rxrpc_tx_fail(call->debug_id, serial, ret,
- rxrpc_tx_point_call_data_frag);
- } else {
- trace_rxrpc_tx_packet(call->debug_id, &txb->wire,
- rxrpc_tx_point_call_data_frag);
- }
- rxrpc_tx_backoff(call, ret);
- goto done;
+done:
+ _leave(" = %d [%u]", ret, call->peer->max_data);
}
/*
@@ -691,43 +916,3 @@ void rxrpc_send_keepalive(struct rxrpc_peer *peer)
peer->last_tx_at = ktime_get_seconds();
_leave("");
}
-
-/*
- * Schedule an instant Tx resend.
- */
-static inline void rxrpc_instant_resend(struct rxrpc_call *call,
- struct rxrpc_txbuf *txb)
-{
- if (!__rxrpc_call_is_complete(call))
- kdebug("resend");
-}
-
-/*
- * Transmit one packet.
- */
-void rxrpc_transmit_one(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
-{
- int ret;
-
- ret = rxrpc_send_data_packet(call, txb);
- if (ret < 0) {
- switch (ret) {
- case -ENETUNREACH:
- case -EHOSTUNREACH:
- case -ECONNREFUSED:
- rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
- 0, ret);
- break;
- default:
- _debug("need instant resend %d", ret);
- rxrpc_instant_resend(call, txb);
- }
- } else {
- unsigned long now = jiffies;
- unsigned long resend_at = now + call->peer->rto_j;
-
- WRITE_ONCE(call->resend_at, resend_at);
- rxrpc_reduce_call_timer(call, resend_at, now,
- rxrpc_timer_set_for_send);
- }
-}
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 552ba84a255c..7f4729234957 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -102,6 +102,8 @@ static struct rxrpc_peer *rxrpc_lookup_peer_local_rcu(struct rxrpc_local *local,
*/
static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, unsigned int mtu)
{
+ unsigned int max_data;
+
/* wind down the local interface MTU */
if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu)
peer->if_mtu = mtu;
@@ -120,11 +122,15 @@ static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, unsigned int mtu)
}
}
- if (mtu < peer->mtu) {
- spin_lock(&peer->lock);
- peer->mtu = mtu;
- peer->maxdata = peer->mtu - peer->hdrsize;
- spin_unlock(&peer->lock);
+ max_data = max_t(int, mtu - peer->hdrsize, 500);
+ if (max_data < peer->max_data) {
+ if (peer->pmtud_good > max_data)
+ peer->pmtud_good = max_data;
+ if (peer->pmtud_bad > max_data + 1)
+ peer->pmtud_bad = max_data + 1;
+
+ trace_rxrpc_pmtud_reduce(peer, 0, max_data, rxrpc_pmtud_reduce_icmp);
+ peer->max_data = max_data;
}
}
@@ -161,6 +167,13 @@ void rxrpc_input_error(struct rxrpc_local *local, struct sk_buff *skb)
goto out;
}
+ if ((serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6 &&
+ serr->ee.ee_type == ICMPV6_PKT_TOOBIG &&
+ serr->ee.ee_code == 0)) {
+ rxrpc_adjust_mtu(peer, serr->ee.ee_info);
+ goto out;
+ }
+
rxrpc_store_error(peer, skb);
out:
rxrpc_put_peer(peer, rxrpc_peer_put_input_error);
@@ -205,23 +218,23 @@ static void rxrpc_distribute_error(struct rxrpc_peer *peer, struct sk_buff *skb,
struct rxrpc_call *call;
HLIST_HEAD(error_targets);
- spin_lock(&peer->lock);
+ spin_lock_irq(&peer->lock);
hlist_move_list(&peer->error_targets, &error_targets);
while (!hlist_empty(&error_targets)) {
call = hlist_entry(error_targets.first,
struct rxrpc_call, error_link);
hlist_del_init(&call->error_link);
- spin_unlock(&peer->lock);
+ spin_unlock_irq(&peer->lock);
rxrpc_see_call(call, rxrpc_call_see_distribute_error);
rxrpc_set_call_completion(call, compl, 0, -err);
- rxrpc_input_call_event(call, skb);
+ rxrpc_input_call_event(call);
- spin_lock(&peer->lock);
+ spin_lock_irq(&peer->lock);
}
- spin_unlock(&peer->lock);
+ spin_unlock_irq(&peer->lock);
}
/*
@@ -238,7 +251,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
bool use;
int slot;
- spin_lock(&rxnet->peer_hash_lock);
+ spin_lock_bh(&rxnet->peer_hash_lock);
while (!list_empty(collector)) {
peer = list_entry(collector->next,
@@ -249,7 +262,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
continue;
use = __rxrpc_use_local(peer->local, rxrpc_local_use_peer_keepalive);
- spin_unlock(&rxnet->peer_hash_lock);
+ spin_unlock_bh(&rxnet->peer_hash_lock);
if (use) {
keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
@@ -269,17 +282,17 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
*/
slot += cursor;
slot &= mask;
- spin_lock(&rxnet->peer_hash_lock);
+ spin_lock_bh(&rxnet->peer_hash_lock);
list_add_tail(&peer->keepalive_link,
&rxnet->peer_keepalive[slot & mask]);
- spin_unlock(&rxnet->peer_hash_lock);
+ spin_unlock_bh(&rxnet->peer_hash_lock);
rxrpc_unuse_local(peer->local, rxrpc_local_unuse_peer_keepalive);
}
rxrpc_put_peer(peer, rxrpc_peer_put_keepalive);
- spin_lock(&rxnet->peer_hash_lock);
+ spin_lock_bh(&rxnet->peer_hash_lock);
}
- spin_unlock(&rxnet->peer_hash_lock);
+ spin_unlock_bh(&rxnet->peer_hash_lock);
}
/*
@@ -309,7 +322,7 @@ void rxrpc_peer_keepalive_worker(struct work_struct *work)
* second; the bucket at cursor + 1 goes at now + 1s and so
* on...
*/
- spin_lock(&rxnet->peer_hash_lock);
+ spin_lock_bh(&rxnet->peer_hash_lock);
list_splice_init(&rxnet->peer_keepalive_new, &collector);
stop = cursor + ARRAY_SIZE(rxnet->peer_keepalive);
@@ -321,7 +334,7 @@ void rxrpc_peer_keepalive_worker(struct work_struct *work)
}
base = now;
- spin_unlock(&rxnet->peer_hash_lock);
+ spin_unlock_bh(&rxnet->peer_hash_lock);
rxnet->peer_keepalive_base = base;
rxnet->peer_keepalive_cursor = cursor;
@@ -347,3 +360,84 @@ void rxrpc_peer_keepalive_worker(struct work_struct *work)
_leave("");
}
+
+/*
+ * Do path MTU probing.
+ */
+void rxrpc_input_probe_for_pmtud(struct rxrpc_connection *conn, rxrpc_serial_t acked_serial,
+ bool sendmsg_fail)
+{
+ struct rxrpc_peer *peer = conn->peer;
+ unsigned int max_data = peer->max_data;
+ int good, trial, bad, jumbo;
+
+ good = peer->pmtud_good;
+ trial = peer->pmtud_trial;
+ bad = peer->pmtud_bad;
+ if (good >= bad - 1) {
+ conn->pmtud_probe = 0;
+ peer->pmtud_lost = false;
+ return;
+ }
+
+ if (!peer->pmtud_probing)
+ goto send_probe;
+
+ if (sendmsg_fail || after(acked_serial, conn->pmtud_probe)) {
+ /* Retry a lost probe. */
+ if (!peer->pmtud_lost) {
+ trace_rxrpc_pmtud_lost(conn, acked_serial);
+ conn->pmtud_probe = 0;
+ peer->pmtud_lost = true;
+ goto send_probe;
+ }
+
+ /* The probed size didn't seem to get through. */
+ bad = trial;
+ peer->pmtud_bad = bad;
+ if (bad <= max_data)
+ max_data = bad - 1;
+ } else {
+ /* It did get through. */
+ good = trial;
+ peer->pmtud_good = good;
+ if (good > max_data)
+ max_data = good;
+ }
+
+ max_data = umin(max_data, peer->ackr_max_data);
+ if (max_data != peer->max_data)
+ peer->max_data = max_data;
+
+ jumbo = max_data + sizeof(struct rxrpc_jumbo_header);
+ jumbo /= RXRPC_JUMBO_SUBPKTLEN;
+ peer->pmtud_jumbo = jumbo;
+
+ trace_rxrpc_pmtud_rx(conn, acked_serial);
+ conn->pmtud_probe = 0;
+ peer->pmtud_lost = false;
+
+ if (good < RXRPC_JUMBO(2) && bad > RXRPC_JUMBO(2))
+ trial = RXRPC_JUMBO(2);
+ else if (good < RXRPC_JUMBO(4) && bad > RXRPC_JUMBO(4))
+ trial = RXRPC_JUMBO(4);
+ else if (good < RXRPC_JUMBO(3) && bad > RXRPC_JUMBO(3))
+ trial = RXRPC_JUMBO(3);
+ else if (good < RXRPC_JUMBO(6) && bad > RXRPC_JUMBO(6))
+ trial = RXRPC_JUMBO(6);
+ else if (good < RXRPC_JUMBO(5) && bad > RXRPC_JUMBO(5))
+ trial = RXRPC_JUMBO(5);
+ else if (good < RXRPC_JUMBO(8) && bad > RXRPC_JUMBO(8))
+ trial = RXRPC_JUMBO(8);
+ else if (good < RXRPC_JUMBO(7) && bad > RXRPC_JUMBO(7))
+ trial = RXRPC_JUMBO(7);
+ else
+ trial = (good + bad) / 2;
+ peer->pmtud_trial = trial;
+
+ if (good >= bad)
+ return;
+
+send_probe:
+ peer->pmtud_pending = true;
+}
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 49dcda67a0d5..56e09d161a97 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -162,6 +162,11 @@ static void rxrpc_assess_MTU_size(struct rxrpc_local *local,
#endif
peer->if_mtu = 1500;
+ if (peer->max_data < peer->if_mtu - peer->hdrsize) {
+ trace_rxrpc_pmtud_reduce(peer, 0, peer->if_mtu - peer->hdrsize,
+ rxrpc_pmtud_reduce_route);
+ peer->max_data = peer->if_mtu - peer->hdrsize;
+ }
memset(&fl, 0, sizeof(fl));
switch (peer->srx.transport.family) {
@@ -199,8 +204,16 @@ static void rxrpc_assess_MTU_size(struct rxrpc_local *local,
}
peer->if_mtu = dst_mtu(dst);
+ peer->hdrsize += dst->header_len + dst->trailer_len;
+ peer->tx_seg_max = dst->dev->gso_max_segs;
dst_release(dst);
+ peer->max_data = umin(RXRPC_JUMBO(1), peer->if_mtu - peer->hdrsize);
+ peer->pmtud_good = 500;
+ peer->pmtud_bad = peer->if_mtu - peer->hdrsize + 1;
+ peer->pmtud_trial = umin(peer->max_data, peer->pmtud_bad - 1);
+ peer->pmtud_pending = true;
+
_leave(" [if_mtu %u]", peer->if_mtu);
}
@@ -222,11 +235,8 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp,
peer->service_conns = RB_ROOT;
seqlock_init(&peer->service_conn_lock);
spin_lock_init(&peer->lock);
- spin_lock_init(&peer->rtt_input_lock);
peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
-
- rxrpc_peer_init_rtt(peer);
-
+ peer->recent_srtt_us = UINT_MAX;
peer->cong_ssthresh = RXRPC_TX_MAX_WINDOW;
trace_rxrpc_peer(peer->debug_id, 1, why);
}
@@ -242,9 +252,7 @@ static void rxrpc_init_peer(struct rxrpc_local *local, struct rxrpc_peer *peer,
unsigned long hash_key)
{
peer->hash_key = hash_key;
- rxrpc_assess_MTU_size(local, peer);
- peer->mtu = peer->if_mtu;
- peer->rtt_last_req = ktime_get_real();
+
switch (peer->srx.transport.family) {
case AF_INET:
@@ -268,7 +276,9 @@ static void rxrpc_init_peer(struct rxrpc_local *local, struct rxrpc_peer *peer,
}
peer->hdrsize += sizeof(struct rxrpc_wire_header);
- peer->maxdata = peer->mtu - peer->hdrsize;
+ peer->max_data = peer->if_mtu - peer->hdrsize;
+
+ rxrpc_assess_MTU_size(local, peer);
}
/*
@@ -304,6 +314,7 @@ static void rxrpc_free_peer(struct rxrpc_peer *peer)
* Set up a new incoming peer. There shouldn't be any other matching peers
* since we've already done a search in the list from the non-reentrant context
* (the data_ready handler) that is the only place we can add new peers.
+ * Called with interrupts disabled.
*/
void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer)
{
@@ -348,7 +359,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
return NULL;
}
- spin_lock(&rxnet->peer_hash_lock);
+ spin_lock_bh(&rxnet->peer_hash_lock);
/* Need to check that we aren't racing with someone else */
peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
@@ -361,7 +372,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
&rxnet->peer_keepalive_new);
}
- spin_unlock(&rxnet->peer_hash_lock);
+ spin_unlock_bh(&rxnet->peer_hash_lock);
if (peer)
rxrpc_free_peer(candidate);
@@ -411,10 +422,10 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer)
ASSERT(hlist_empty(&peer->error_targets));
- spin_lock(&rxnet->peer_hash_lock);
+ spin_lock_bh(&rxnet->peer_hash_lock);
hash_del_rcu(&peer->hash_link);
list_del_init(&peer->keepalive_link);
- spin_unlock(&rxnet->peer_hash_lock);
+ spin_unlock_bh(&rxnet->peer_hash_lock);
rxrpc_free_peer(peer);
}
@@ -479,7 +490,7 @@ EXPORT_SYMBOL(rxrpc_kernel_get_call_peer);
*/
unsigned int rxrpc_kernel_get_srtt(const struct rxrpc_peer *peer)
{
- return peer->rtt_count > 0 ? peer->srtt_us >> 3 : UINT_MAX;
+ return READ_ONCE(peer->recent_srtt_us);
}
EXPORT_SYMBOL(rxrpc_kernel_get_srtt);
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c
index 26dc2f26d92d..d803562ca0ac 100644
--- a/net/rxrpc/proc.c
+++ b/net/rxrpc/proc.c
@@ -52,9 +52,9 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
struct rxrpc_call *call;
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
enum rxrpc_call_state state;
- unsigned long timeout = 0;
- rxrpc_seq_t acks_hard_ack;
+ rxrpc_seq_t tx_bottom;
char lbuff[50], rbuff[50];
+ long timeout = 0;
if (v == &rxnet->calls) {
seq_puts(seq,
@@ -76,12 +76,10 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
sprintf(rbuff, "%pISpc", &call->dest_srx.transport);
state = rxrpc_call_state(call);
- if (state != RXRPC_CALL_SERVER_PREALLOC) {
- timeout = READ_ONCE(call->expect_rx_by);
- timeout -= jiffies;
- }
+ if (state != RXRPC_CALL_SERVER_PREALLOC)
+ timeout = ktime_ms_delta(READ_ONCE(call->expect_rx_by), ktime_get_real());
- acks_hard_ack = READ_ONCE(call->acks_hard_ack);
+ tx_bottom = READ_ONCE(call->tx_bottom);
seq_printf(seq,
"UDP %-47.47s %-47.47s %4x %08x %08x %s %3u"
" %-8.8s %08x %08x %08x %02x %08x %02x %08x %02x %06lx\n",
@@ -95,7 +93,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
rxrpc_call_states[state],
call->abort_code,
call->debug_id,
- acks_hard_ack, READ_ONCE(call->tx_top) - acks_hard_ack,
+ tx_bottom, READ_ONCE(call->tx_top) - tx_bottom,
call->ackr_window, call->ackr_wtop - call->ackr_window,
call->rx_serial,
call->cong_cwnd,
@@ -285,9 +283,7 @@ static int rxrpc_peer_seq_show(struct seq_file *seq, void *v)
if (v == SEQ_START_TOKEN) {
seq_puts(seq,
- "Proto Local "
- " Remote "
- " Use SST MTU LastUse RTT RTO\n"
+ "Proto Local Remote Use SST Maxd LastUse RTT RTO\n"
);
return 0;
}
@@ -300,16 +296,15 @@ static int rxrpc_peer_seq_show(struct seq_file *seq, void *v)
now = ktime_get_seconds();
seq_printf(seq,
- "UDP %-47.47s %-47.47s %3u"
- " %3u %5u %6llus %8u %8u\n",
+ "UDP %-47.47s %-47.47s %3u %4u %5u %6llus %8d %8d\n",
lbuff,
rbuff,
refcount_read(&peer->ref),
peer->cong_ssthresh,
- peer->mtu,
+ peer->max_data,
now - peer->last_tx_at,
- peer->srtt_us >> 3,
- jiffies_to_usecs(peer->rto_j));
+ READ_ONCE(peer->recent_srtt_us),
+ READ_ONCE(peer->recent_rto_us));
return 0;
}
@@ -478,10 +473,11 @@ int rxrpc_stats_show(struct seq_file *seq, void *v)
struct rxrpc_net *rxnet = rxrpc_net(seq_file_single_net(seq));
seq_printf(seq,
- "Data : send=%u sendf=%u fail=%u\n",
+ "Data : send=%u sendf=%u fail=%u emsz=%u\n",
atomic_read(&rxnet->stat_tx_data_send),
atomic_read(&rxnet->stat_tx_data_send_frag),
- atomic_read(&rxnet->stat_tx_data_send_fail));
+ atomic_read(&rxnet->stat_tx_data_send_fail),
+ atomic_read(&rxnet->stat_tx_data_send_msgsize));
seq_printf(seq,
"Data-Tx : nr=%u retrans=%u uf=%u cwr=%u\n",
atomic_read(&rxnet->stat_tx_data),
@@ -510,7 +506,7 @@ int rxrpc_stats_show(struct seq_file *seq, void *v)
atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_DELAY]),
atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_IDLE]));
seq_printf(seq,
- "Ack-Rx : req=%u dup=%u oos=%u exw=%u nos=%u png=%u prs=%u dly=%u idl=%u\n",
+ "Ack-Rx : req=%u dup=%u oos=%u exw=%u nos=%u png=%u prs=%u dly=%u idl=%u z=%u\n",
atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_REQUESTED]),
atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_DUPLICATE]),
atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_OUT_OF_SEQUENCE]),
@@ -519,13 +515,14 @@ int rxrpc_stats_show(struct seq_file *seq, void *v)
atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_PING]),
atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_PING_RESPONSE]),
atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_DELAY]),
- atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_IDLE]));
+ atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_IDLE]),
+ atomic_read(&rxnet->stat_rx_acks[0]));
seq_printf(seq,
- "Why-Req-A: acklost=%u already=%u mrtt=%u ortt=%u\n",
+ "Why-Req-A: acklost=%u mrtt=%u ortt=%u stall=%u\n",
atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_ack_lost]),
- atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_already_on]),
atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_more_rtt]),
- atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_old_rtt]));
+ atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_old_rtt]),
+ atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_app_stall]));
seq_printf(seq,
"Why-Req-A: nolast=%u retx=%u slows=%u smtxw=%u\n",
atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_no_srv_last]),
@@ -533,6 +530,30 @@ int rxrpc_stats_show(struct seq_file *seq, void *v)
atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_slow_start]),
atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_small_txwin]));
seq_printf(seq,
+ "Jumbo-Tx : %u,%u,%u,%u,%u,%u,%u,%u,%u,%u\n",
+ atomic_read(&rxnet->stat_tx_jumbo[0]),
+ atomic_read(&rxnet->stat_tx_jumbo[1]),
+ atomic_read(&rxnet->stat_tx_jumbo[2]),
+ atomic_read(&rxnet->stat_tx_jumbo[3]),
+ atomic_read(&rxnet->stat_tx_jumbo[4]),
+ atomic_read(&rxnet->stat_tx_jumbo[5]),
+ atomic_read(&rxnet->stat_tx_jumbo[6]),
+ atomic_read(&rxnet->stat_tx_jumbo[7]),
+ atomic_read(&rxnet->stat_tx_jumbo[8]),
+ atomic_read(&rxnet->stat_tx_jumbo[9]));
+ seq_printf(seq,
+ "Jumbo-Rx : %u,%u,%u,%u,%u,%u,%u,%u,%u,%u\n",
+ atomic_read(&rxnet->stat_rx_jumbo[0]),
+ atomic_read(&rxnet->stat_rx_jumbo[1]),
+ atomic_read(&rxnet->stat_rx_jumbo[2]),
+ atomic_read(&rxnet->stat_rx_jumbo[3]),
+ atomic_read(&rxnet->stat_rx_jumbo[4]),
+ atomic_read(&rxnet->stat_rx_jumbo[5]),
+ atomic_read(&rxnet->stat_rx_jumbo[6]),
+ atomic_read(&rxnet->stat_rx_jumbo[7]),
+ atomic_read(&rxnet->stat_rx_jumbo[8]),
+ atomic_read(&rxnet->stat_rx_jumbo[9]));
+ seq_printf(seq,
"Buffers : txb=%u rxb=%u\n",
atomic_read(&rxrpc_nr_txbuf),
atomic_read(&rxrpc_n_rx_skbs));
@@ -569,6 +590,8 @@ int rxrpc_stats_clear(struct file *file, char *buf, size_t size)
atomic_set(&rxnet->stat_tx_ack_skip, 0);
memset(&rxnet->stat_tx_acks, 0, sizeof(rxnet->stat_tx_acks));
memset(&rxnet->stat_rx_acks, 0, sizeof(rxnet->stat_rx_acks));
+ memset(&rxnet->stat_tx_jumbo, 0, sizeof(rxnet->stat_tx_jumbo));
+ memset(&rxnet->stat_rx_jumbo, 0, sizeof(rxnet->stat_rx_jumbo));
memset(&rxnet->stat_why_req_ack, 0, sizeof(rxnet->stat_why_req_ack));
diff --git a/net/rxrpc/protocol.h b/net/rxrpc/protocol.h
index e8ee4af43ca8..42f70e4636f8 100644
--- a/net/rxrpc/protocol.h
+++ b/net/rxrpc/protocol.h
@@ -92,11 +92,16 @@ struct rxrpc_jumbo_header {
/*
* The maximum number of subpackets that can possibly fit in a UDP packet is:
*
- * ((max_IP - IP_hdr - UDP_hdr) / RXRPC_JUMBO_SUBPKTLEN) + 1
- * = ((65535 - 28 - 28) / 1416) + 1
- * = 46 non-terminal packets and 1 terminal packet.
+ * (max_UDP - wirehdr + jumbohdr) / (jumbohdr + 1412)
+ * = ((65535 - 28 + 4) / 1416)
+ * = 45 non-terminal packets and 1 terminal packet.
*/
-#define RXRPC_MAX_NR_JUMBO 47
+#define RXRPC_MAX_NR_JUMBO 46
+
+/* Size of a jumbo packet with N subpackets, excluding UDP+IP */
+#define RXRPC_JUMBO(N) ((int)sizeof(struct rxrpc_wire_header) + \
+ RXRPC_JUMBO_DATALEN + \
+ ((N) - 1) * RXRPC_JUMBO_SUBPKTLEN)
/*****************************************************************************/
/*
@@ -135,9 +140,9 @@ struct rxrpc_ackpacket {
/*
* ACK packets can have a further piece of information tagged on the end
*/
-struct rxrpc_ackinfo {
- __be32 rxMTU; /* maximum Rx MTU size (bytes) [AFS 3.3] */
- __be32 maxMTU; /* maximum interface MTU size (bytes) [AFS 3.3] */
+struct rxrpc_acktrailer {
+ __be32 maxMTU; /* maximum Rx MTU size (bytes) [AFS 3.3] */
+ __be32 ifMTU; /* maximum interface MTU size (bytes) [AFS 3.3] */
__be32 rwind; /* Rx window size (packets) [AFS 3.4] */
__be32 jumbo_max; /* max packets to stick into a jumbo packet [AFS 3.5] */
};
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index a482f88c5fc5..32cd5f1d541d 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -36,16 +36,16 @@ void rxrpc_notify_socket(struct rxrpc_call *call)
sk = &rx->sk;
if (rx && sk->sk_state < RXRPC_CLOSE) {
if (call->notify_rx) {
- spin_lock(&call->notify_lock);
+ spin_lock_irq(&call->notify_lock);
call->notify_rx(sk, call, call->user_call_ID);
- spin_unlock(&call->notify_lock);
+ spin_unlock_irq(&call->notify_lock);
} else {
- spin_lock(&rx->recvmsg_lock);
+ spin_lock_irq(&rx->recvmsg_lock);
if (list_empty(&call->recvmsg_link)) {
rxrpc_get_call(call, rxrpc_call_get_notify_socket);
list_add_tail(&call->recvmsg_link, &rx->recvmsg_q);
}
- spin_unlock(&rx->recvmsg_lock);
+ spin_unlock_irq(&rx->recvmsg_lock);
if (!sock_flag(sk, SOCK_DEAD)) {
_debug("call %ps", sk->sk_data_ready);
@@ -337,14 +337,14 @@ try_again:
* We also want to weed out calls that got requeued whilst we were
* shovelling data out.
*/
- spin_lock(&rx->recvmsg_lock);
+ spin_lock_irq(&rx->recvmsg_lock);
l = rx->recvmsg_q.next;
call = list_entry(l, struct rxrpc_call, recvmsg_link);
if (!rxrpc_call_is_complete(call) &&
skb_queue_empty(&call->recvmsg_queue)) {
list_del_init(&call->recvmsg_link);
- spin_unlock(&rx->recvmsg_lock);
+ spin_unlock_irq(&rx->recvmsg_lock);
release_sock(&rx->sk);
trace_rxrpc_recvmsg(call->debug_id, rxrpc_recvmsg_unqueue, 0);
rxrpc_put_call(call, rxrpc_call_put_recvmsg);
@@ -355,7 +355,7 @@ try_again:
list_del_init(&call->recvmsg_link);
else
rxrpc_get_call(call, rxrpc_call_get_recvmsg);
- spin_unlock(&rx->recvmsg_lock);
+ spin_unlock_irq(&rx->recvmsg_lock);
call_debug_id = call->debug_id;
trace_rxrpc_recvmsg(call_debug_id, rxrpc_recvmsg_dequeue, 0);
@@ -445,9 +445,9 @@ error_unlock_call:
error_requeue_call:
if (!(flags & MSG_PEEK)) {
- spin_lock(&rx->recvmsg_lock);
+ spin_lock_irq(&rx->recvmsg_lock);
list_add(&call->recvmsg_link, &rx->recvmsg_q);
- spin_unlock(&rx->recvmsg_lock);
+ spin_unlock_irq(&rx->recvmsg_lock);
trace_rxrpc_recvmsg(call_debug_id, rxrpc_recvmsg_requeue, 0);
} else {
rxrpc_put_call(call, rxrpc_call_put_recvmsg);
diff --git a/net/rxrpc/rtt.c b/net/rxrpc/rtt.c
index be61d6f5be8d..7474f88d7b18 100644
--- a/net/rxrpc/rtt.c
+++ b/net/rxrpc/rtt.c
@@ -11,23 +11,23 @@
#include <linux/net.h>
#include "ar-internal.h"
-#define RXRPC_RTO_MAX ((unsigned)(120 * HZ))
-#define RXRPC_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
+#define RXRPC_RTO_MAX (120 * USEC_PER_SEC)
+#define RXRPC_TIMEOUT_INIT ((unsigned int)(1 * USEC_PER_SEC)) /* RFC6298 2.1 initial RTO value */
#define rxrpc_jiffies32 ((u32)jiffies) /* As rxrpc_jiffies32 */
-static u32 rxrpc_rto_min_us(struct rxrpc_peer *peer)
+static u32 rxrpc_rto_min_us(struct rxrpc_call *call)
{
return 200;
}
-static u32 __rxrpc_set_rto(const struct rxrpc_peer *peer)
+static u32 __rxrpc_set_rto(const struct rxrpc_call *call)
{
- return usecs_to_jiffies((peer->srtt_us >> 3) + peer->rttvar_us);
+ return (call->srtt_us >> 3) + call->rttvar_us;
}
static u32 rxrpc_bound_rto(u32 rto)
{
- return min(rto, RXRPC_RTO_MAX);
+ return clamp(200000, rto + 100000, RXRPC_RTO_MAX);
}
/*
@@ -40,10 +40,10 @@ static u32 rxrpc_bound_rto(u32 rto)
* To save cycles in the RFC 1323 implementation it was better to break
* it up into three procedures. -- erics
*/
-static void rxrpc_rtt_estimator(struct rxrpc_peer *peer, long sample_rtt_us)
+static void rxrpc_rtt_estimator(struct rxrpc_call *call, long sample_rtt_us)
{
long m = sample_rtt_us; /* RTT */
- u32 srtt = peer->srtt_us;
+ u32 srtt = call->srtt_us;
/* The following amusing code comes from Jacobson's
* article in SIGCOMM '88. Note that rtt and mdev
@@ -66,7 +66,7 @@ static void rxrpc_rtt_estimator(struct rxrpc_peer *peer, long sample_rtt_us)
srtt += m; /* rtt = 7/8 rtt + 1/8 new */
if (m < 0) {
m = -m; /* m is now abs(error) */
- m -= (peer->mdev_us >> 2); /* similar update on mdev */
+ m -= (call->mdev_us >> 2); /* similar update on mdev */
/* This is similar to one of Eifel findings.
* Eifel blocks mdev updates when rtt decreases.
* This solution is a bit different: we use finer gain
@@ -78,31 +78,31 @@ static void rxrpc_rtt_estimator(struct rxrpc_peer *peer, long sample_rtt_us)
if (m > 0)
m >>= 3;
} else {
- m -= (peer->mdev_us >> 2); /* similar update on mdev */
+ m -= (call->mdev_us >> 2); /* similar update on mdev */
}
- peer->mdev_us += m; /* mdev = 3/4 mdev + 1/4 new */
- if (peer->mdev_us > peer->mdev_max_us) {
- peer->mdev_max_us = peer->mdev_us;
- if (peer->mdev_max_us > peer->rttvar_us)
- peer->rttvar_us = peer->mdev_max_us;
+ call->mdev_us += m; /* mdev = 3/4 mdev + 1/4 new */
+ if (call->mdev_us > call->mdev_max_us) {
+ call->mdev_max_us = call->mdev_us;
+ if (call->mdev_max_us > call->rttvar_us)
+ call->rttvar_us = call->mdev_max_us;
}
} else {
/* no previous measure. */
srtt = m << 3; /* take the measured time to be rtt */
- peer->mdev_us = m << 1; /* make sure rto = 3*rtt */
- peer->rttvar_us = max(peer->mdev_us, rxrpc_rto_min_us(peer));
- peer->mdev_max_us = peer->rttvar_us;
+ call->mdev_us = m << 1; /* make sure rto = 3*rtt */
+ call->rttvar_us = umax(call->mdev_us, rxrpc_rto_min_us(call));
+ call->mdev_max_us = call->rttvar_us;
}
- peer->srtt_us = max(1U, srtt);
+ call->srtt_us = umax(srtt, 1);
}
/*
* Calculate rto without backoff. This is the second half of Van Jacobson's
* routine referred to above.
*/
-static void rxrpc_set_rto(struct rxrpc_peer *peer)
+static void rxrpc_set_rto(struct rxrpc_call *call)
{
u32 rto;
@@ -113,7 +113,7 @@ static void rxrpc_set_rto(struct rxrpc_peer *peer)
* is invisible. Actually, Linux-2.4 also generates erratic
* ACKs in some circumstances.
*/
- rto = __rxrpc_set_rto(peer);
+ rto = __rxrpc_set_rto(call);
/* 2. Fixups made earlier cannot be right.
* If we do not estimate RTO correctly without them,
@@ -124,72 +124,85 @@ static void rxrpc_set_rto(struct rxrpc_peer *peer)
/* NOTE: clamping at RXRPC_RTO_MIN is not required, current algo
* guarantees that rto is higher.
*/
- peer->rto_j = rxrpc_bound_rto(rto);
+ call->rto_us = rxrpc_bound_rto(rto);
}
-static void rxrpc_ack_update_rtt(struct rxrpc_peer *peer, long rtt_us)
+static void rxrpc_update_rtt_min(struct rxrpc_call *call, ktime_t resp_time, long rtt_us)
+{
+ /* Window size 5mins in approx usec (ipv4.sysctl_tcp_min_rtt_wlen) */
+ u32 wlen_us = 5ULL * NSEC_PER_SEC / 1024;
+
+ minmax_running_min(&call->min_rtt, wlen_us, resp_time / 1024,
+ (u32)rtt_us ? : jiffies_to_usecs(1));
+}
+
+static void rxrpc_ack_update_rtt(struct rxrpc_call *call, ktime_t resp_time, long rtt_us)
{
if (rtt_us < 0)
return;
- //rxrpc_update_rtt_min(peer, rtt_us);
- rxrpc_rtt_estimator(peer, rtt_us);
- rxrpc_set_rto(peer);
+ /* Update RACK min RTT [RFC8985 6.1 Step 1]. */
+ rxrpc_update_rtt_min(call, resp_time, rtt_us);
+
+ rxrpc_rtt_estimator(call, rtt_us);
+ rxrpc_set_rto(call);
- /* RFC6298: only reset backoff on valid RTT measurement. */
- peer->backoff = 0;
+ /* Only reset backoff on valid RTT measurement [RFC6298]. */
+ call->backoff = 0;
}
/*
* Add RTT information to cache. This is called in softirq mode and has
- * exclusive access to the peer RTT data.
+ * exclusive access to the call RTT data.
*/
-void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
+void rxrpc_call_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
int rtt_slot,
rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
ktime_t send_time, ktime_t resp_time)
{
- struct rxrpc_peer *peer = call->peer;
s64 rtt_us;
rtt_us = ktime_to_us(ktime_sub(resp_time, send_time));
if (rtt_us < 0)
return;
- spin_lock(&peer->rtt_input_lock);
- rxrpc_ack_update_rtt(peer, rtt_us);
- if (peer->rtt_count < 3)
- peer->rtt_count++;
- spin_unlock(&peer->rtt_input_lock);
+ rxrpc_ack_update_rtt(call, resp_time, rtt_us);
+ if (call->rtt_count < 3)
+ call->rtt_count++;
+ call->rtt_taken++;
+
+ WRITE_ONCE(call->peer->recent_srtt_us, call->srtt_us / 8);
+ WRITE_ONCE(call->peer->recent_rto_us, call->rto_us);
trace_rxrpc_rtt_rx(call, why, rtt_slot, send_serial, resp_serial,
- peer->srtt_us >> 3, peer->rto_j);
+ rtt_us, call->srtt_us, call->rto_us);
}
/*
- * Get the retransmission timeout to set in jiffies, backing it off each time
- * we retransmit.
+ * Get the retransmission timeout to set in nanoseconds, backing it off each
+ * time we retransmit.
*/
-unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *peer, bool retrans)
+ktime_t rxrpc_get_rto_backoff(struct rxrpc_call *call, bool retrans)
{
- u64 timo_j;
- u8 backoff = READ_ONCE(peer->backoff);
+ u64 timo_us;
+ u32 backoff = READ_ONCE(call->backoff);
- timo_j = peer->rto_j;
- timo_j <<= backoff;
- if (retrans && timo_j * 2 <= RXRPC_RTO_MAX)
- WRITE_ONCE(peer->backoff, backoff + 1);
+ timo_us = call->rto_us;
+ timo_us <<= backoff;
+ if (retrans && timo_us * 2 <= RXRPC_RTO_MAX)
+ WRITE_ONCE(call->backoff, backoff + 1);
- if (timo_j < 1)
- timo_j = 1;
+ if (timo_us < 1)
+ timo_us = 1;
- return timo_j;
+ return ns_to_ktime(timo_us * NSEC_PER_USEC);
}
-void rxrpc_peer_init_rtt(struct rxrpc_peer *peer)
+void rxrpc_call_init_rtt(struct rxrpc_call *call)
{
- peer->rto_j = RXRPC_TIMEOUT_INIT;
- peer->mdev_us = jiffies_to_usecs(RXRPC_TIMEOUT_INIT);
- peer->backoff = 0;
- //minmax_reset(&peer->rtt_min, rxrpc_jiffies32, ~0U);
+ call->rtt_last_req = KTIME_MIN;
+ call->rto_us = RXRPC_TIMEOUT_INIT;
+ call->mdev_us = RXRPC_TIMEOUT_INIT;
+ call->backoff = 0;
+ //minmax_reset(&call->rtt_min, rxrpc_jiffies32, ~0U);
}
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 6b32d61d4cdc..6cb37b0eb77f 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -145,16 +145,17 @@ error:
/*
* Work out how much data we can put in a packet.
*/
-static int rxkad_how_much_data(struct rxrpc_call *call, size_t remain,
- size_t *_buf_size, size_t *_data_size, size_t *_offset)
+static struct rxrpc_txbuf *rxkad_alloc_txbuf(struct rxrpc_call *call, size_t remain, gfp_t gfp)
{
- size_t shdr, buf_size, chunk;
+ struct rxrpc_txbuf *txb;
+ size_t shdr, alloc, limit, part;
+
+ remain = umin(remain, 65535 - sizeof(struct rxrpc_wire_header));
switch (call->conn->security_level) {
default:
- buf_size = chunk = min_t(size_t, remain, RXRPC_JUMBO_DATALEN);
- shdr = 0;
- goto out;
+ alloc = umin(remain, RXRPC_JUMBO_DATALEN);
+ return rxrpc_alloc_data_txbuf(call, alloc, 1, gfp);
case RXRPC_SECURITY_AUTH:
shdr = sizeof(struct rxkad_level1_hdr);
break;
@@ -163,17 +164,22 @@ static int rxkad_how_much_data(struct rxrpc_call *call, size_t remain,
break;
}
- buf_size = round_down(RXRPC_JUMBO_DATALEN, RXKAD_ALIGN);
+ limit = round_down(RXRPC_JUMBO_DATALEN, RXKAD_ALIGN) - shdr;
+ if (remain < limit) {
+ part = remain;
+ alloc = round_up(shdr + part, RXKAD_ALIGN);
+ } else {
+ part = limit;
+ alloc = RXRPC_JUMBO_DATALEN;
+ }
- chunk = buf_size - shdr;
- if (remain < chunk)
- buf_size = round_up(shdr + remain, RXKAD_ALIGN);
+ txb = rxrpc_alloc_data_txbuf(call, alloc, RXKAD_ALIGN, gfp);
+ if (!txb)
+ return NULL;
-out:
- *_buf_size = buf_size;
- *_data_size = chunk;
- *_offset = shdr;
- return 0;
+ txb->offset += shdr;
+ txb->space = part;
+ return txb;
}
/*
@@ -251,7 +257,7 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
struct rxrpc_txbuf *txb,
struct skcipher_request *req)
{
- struct rxkad_level1_hdr *hdr = (void *)txb->data;
+ struct rxkad_level1_hdr *hdr = txb->data;
struct rxrpc_crypt iv;
struct scatterlist sg;
size_t pad;
@@ -259,22 +265,22 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
_enter("");
- check = txb->seq ^ ntohl(txb->wire.callNumber);
+ check = txb->seq ^ call->call_id;
hdr->data_size = htonl((u32)check << 16 | txb->len);
- txb->len += sizeof(struct rxkad_level1_hdr);
- pad = txb->len;
+ txb->pkt_len = sizeof(struct rxkad_level1_hdr) + txb->len;
+ pad = txb->pkt_len;
pad = RXKAD_ALIGN - pad;
pad &= RXKAD_ALIGN - 1;
if (pad) {
memset(txb->data + txb->offset, 0, pad);
- txb->len += pad;
+ txb->pkt_len += pad;
}
/* start the encryption afresh */
memset(&iv, 0, sizeof(iv));
- sg_init_one(&sg, txb->data, 8);
+ sg_init_one(&sg, hdr, 8);
skcipher_request_set_sync_tfm(req, call->conn->rxkad.cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x);
@@ -293,37 +299,34 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
struct skcipher_request *req)
{
const struct rxrpc_key_token *token;
- struct rxkad_level2_hdr *rxkhdr = (void *)txb->data;
+ struct rxkad_level2_hdr *rxkhdr = txb->data;
struct rxrpc_crypt iv;
struct scatterlist sg;
- size_t pad;
+ size_t content, pad;
u16 check;
int ret;
_enter("");
- check = txb->seq ^ ntohl(txb->wire.callNumber);
+ check = txb->seq ^ call->call_id;
rxkhdr->data_size = htonl(txb->len | (u32)check << 16);
rxkhdr->checksum = 0;
- txb->len += sizeof(struct rxkad_level2_hdr);
- pad = txb->len;
- pad = RXKAD_ALIGN - pad;
- pad &= RXKAD_ALIGN - 1;
- if (pad) {
+ content = sizeof(struct rxkad_level2_hdr) + txb->len;
+ txb->pkt_len = round_up(content, RXKAD_ALIGN);
+ pad = txb->pkt_len - content;
+ if (pad)
memset(txb->data + txb->offset, 0, pad);
- txb->len += pad;
- }
/* encrypt from the session key */
token = call->conn->key->payload.data[0];
memcpy(&iv, token->kad->session_key, sizeof(iv));
- sg_init_one(&sg, txb->data, txb->len);
+ sg_init_one(&sg, rxkhdr, txb->pkt_len);
skcipher_request_set_sync_tfm(req, call->conn->rxkad.cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
- skcipher_request_set_crypt(req, &sg, &sg, txb->len, iv.x);
+ skcipher_request_set_crypt(req, &sg, &sg, txb->pkt_len, iv.x);
ret = crypto_skcipher_encrypt(req);
skcipher_request_zero(req);
return ret;
@@ -362,9 +365,9 @@ static int rxkad_secure_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
memcpy(&iv, call->conn->rxkad.csum_iv.x, sizeof(iv));
/* calculate the security checksum */
- x = (ntohl(txb->wire.cid) & RXRPC_CHANNELMASK) << (32 - RXRPC_CIDSHIFT);
+ x = (call->cid & RXRPC_CHANNELMASK) << (32 - RXRPC_CIDSHIFT);
x |= txb->seq & 0x3fffffff;
- crypto.buf[0] = txb->wire.callNumber;
+ crypto.buf[0] = htonl(call->call_id);
crypto.buf[1] = htonl(x);
sg_init_one(&sg, crypto.buf, 8);
@@ -378,23 +381,36 @@ static int rxkad_secure_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
y = (y >> 16) & 0xffff;
if (y == 0)
y = 1; /* zero checksums are not permitted */
- txb->wire.cksum = htons(y);
+ txb->cksum = htons(y);
switch (call->conn->security_level) {
case RXRPC_SECURITY_PLAIN:
+ txb->pkt_len = txb->len;
ret = 0;
break;
case RXRPC_SECURITY_AUTH:
ret = rxkad_secure_packet_auth(call, txb, req);
+ if (txb->alloc_size == RXRPC_JUMBO_DATALEN)
+ txb->jumboable = true;
break;
case RXRPC_SECURITY_ENCRYPT:
ret = rxkad_secure_packet_encrypt(call, txb, req);
+ if (txb->alloc_size == RXRPC_JUMBO_DATALEN)
+ txb->jumboable = true;
break;
default:
ret = -EPERM;
break;
}
+ /* Clear excess space in the packet */
+ if (txb->pkt_len < txb->alloc_size) {
+ size_t gap = txb->alloc_size - txb->pkt_len;
+ void *p = txb->data;
+
+ memset(p + txb->pkt_len, 0, gap);
+ }
+
skcipher_request_free(req);
_leave(" = %d [set %x]", ret, y);
return ret;
@@ -726,7 +742,6 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
rxrpc_local_dont_fragment(conn->local, false);
ret = kernel_sendmsg(conn->local->socket, &msg, iov, 3, len);
- rxrpc_local_dont_fragment(conn->local, true);
if (ret < 0) {
trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
rxrpc_tx_point_rxkad_response);
@@ -1256,7 +1271,7 @@ const struct rxrpc_security rxkad = {
.free_preparse_server_key = rxkad_free_preparse_server_key,
.destroy_server_key = rxkad_destroy_server_key,
.init_connection_security = rxkad_init_connection_security,
- .how_much_data = rxkad_how_much_data,
+ .alloc_txbuf = rxkad_alloc_txbuf,
.secure_packet = rxkad_secure_packet,
.verify_packet = rxkad_verify_packet,
.free_call_crypto = rxkad_free_call_crypto,
diff --git a/net/rxrpc/rxperf.c b/net/rxrpc/rxperf.c
index 085e7892d310..e848a4777b8c 100644
--- a/net/rxrpc/rxperf.c
+++ b/net/rxrpc/rxperf.c
@@ -478,6 +478,18 @@ static int rxperf_deliver_request(struct rxperf_call *call)
call->unmarshal++;
fallthrough;
case 2:
+ ret = rxperf_extract_data(call, true);
+ if (ret < 0)
+ return ret;
+
+ /* Deal with the terminal magic cookie. */
+ call->iov_len = 4;
+ call->kvec[0].iov_len = call->iov_len;
+ call->kvec[0].iov_base = call->tmp;
+ iov_iter_kvec(&call->iter, READ, call->kvec, 1, call->iov_len);
+ call->unmarshal++;
+ fallthrough;
+ case 3:
ret = rxperf_extract_data(call, false);
if (ret < 0)
return ret;
@@ -503,7 +515,7 @@ static int rxperf_process_call(struct rxperf_call *call)
reply_len + sizeof(rxperf_magic_cookie));
while (reply_len > 0) {
- len = min_t(size_t, reply_len, PAGE_SIZE);
+ len = umin(reply_len, PAGE_SIZE);
bvec_set_page(&bv, ZERO_PAGE(0), len, 0);
iov_iter_bvec(&msg.msg_iter, WRITE, &bv, 1, len);
msg.msg_flags = MSG_MORE;
diff --git a/net/rxrpc/security.c b/net/rxrpc/security.c
index cb8dd1d3b1d4..9784adc8f275 100644
--- a/net/rxrpc/security.c
+++ b/net/rxrpc/security.c
@@ -114,10 +114,10 @@ found:
if (conn->state == RXRPC_CONN_CLIENT_UNSECURED) {
ret = conn->security->init_connection_security(conn, token);
if (ret == 0) {
- spin_lock(&conn->state_lock);
+ spin_lock_irq(&conn->state_lock);
if (conn->state == RXRPC_CONN_CLIENT_UNSECURED)
conn->state = RXRPC_CONN_CLIENT;
- spin_unlock(&conn->state_lock);
+ spin_unlock_irq(&conn->state_lock);
}
}
mutex_unlock(&conn->security_lock);
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index 5677d5690a02..84dc6c94f23b 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -29,6 +29,7 @@ bool rxrpc_propose_abort(struct rxrpc_call *call, s32 abort_code, int error,
call->send_abort_why = why;
call->send_abort_err = error;
call->send_abort_seq = 0;
+ trace_rxrpc_abort_call(call, abort_code);
/* Request abort locklessly vs rxrpc_input_call_event(). */
smp_store_release(&call->send_abort, abort_code);
rxrpc_poke_call(call, rxrpc_call_poke_abort);
@@ -93,9 +94,11 @@ no_wait:
*/
static bool rxrpc_check_tx_space(struct rxrpc_call *call, rxrpc_seq_t *_tx_win)
{
+ rxrpc_seq_t tx_bottom = READ_ONCE(call->tx_bottom);
+
if (_tx_win)
- *_tx_win = call->tx_bottom;
- return call->tx_prepared - call->tx_bottom < 256;
+ *_tx_win = tx_bottom;
+ return call->send_top - tx_bottom < 256;
}
/*
@@ -131,13 +134,13 @@ static int rxrpc_wait_for_tx_window_waitall(struct rxrpc_sock *rx,
rxrpc_seq_t tx_start, tx_win;
signed long rtt, timeout;
- rtt = READ_ONCE(call->peer->srtt_us) >> 3;
+ rtt = READ_ONCE(call->srtt_us) >> 3;
rtt = usecs_to_jiffies(rtt) * 2;
if (rtt < 2)
rtt = 2;
timeout = rtt;
- tx_start = smp_load_acquire(&call->acks_hard_ack);
+ tx_start = READ_ONCE(call->tx_bottom);
for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
@@ -194,8 +197,8 @@ static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx,
DECLARE_WAITQUEUE(myself, current);
int ret;
- _enter(",{%u,%u,%u,%u}",
- call->tx_bottom, call->acks_hard_ack, call->tx_top, call->tx_winsize);
+ _enter(",{%u,%u,%u}",
+ call->tx_bottom, call->tx_top, call->tx_winsize);
add_wait_queue(&call->waitq, &myself);
@@ -239,37 +242,77 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
struct rxrpc_txbuf *txb,
rxrpc_notify_end_tx_t notify_end_tx)
{
+ struct rxrpc_txqueue *sq = call->send_queue;
rxrpc_seq_t seq = txb->seq;
- bool last = test_bit(RXRPC_TXBUF_LAST, &txb->flags), poke;
-
+ bool poke, last = txb->flags & RXRPC_LAST_PACKET;
+ int ix = seq & RXRPC_TXQ_MASK;
rxrpc_inc_stat(call->rxnet, stat_tx_data);
- ASSERTCMP(txb->seq, ==, call->tx_prepared + 1);
-
- /* We have to set the timestamp before queueing as the retransmit
- * algorithm can see the packet as soon as we queue it.
- */
- txb->last_sent = ktime_get_real();
+ ASSERTCMP(txb->seq, ==, call->send_top + 1);
if (last)
trace_rxrpc_txqueue(call, rxrpc_txqueue_queue_last);
else
trace_rxrpc_txqueue(call, rxrpc_txqueue_queue);
+ if (WARN_ON_ONCE(sq->bufs[ix]))
+ trace_rxrpc_tq(call, sq, seq, rxrpc_tq_queue_dup);
+ else
+ trace_rxrpc_tq(call, sq, seq, rxrpc_tq_queue);
+
/* Add the packet to the call's output buffer */
- spin_lock(&call->tx_lock);
- poke = list_empty(&call->tx_sendmsg);
- list_add_tail(&txb->call_link, &call->tx_sendmsg);
- call->tx_prepared = seq;
- if (last)
+ poke = (READ_ONCE(call->tx_bottom) == call->send_top);
+ sq->bufs[ix] = txb;
+ /* Order send_top after the queue->next pointer and txb content. */
+ smp_store_release(&call->send_top, seq);
+ if (last) {
+ set_bit(RXRPC_CALL_TX_NO_MORE, &call->flags);
rxrpc_notify_end_tx(rx, call, notify_end_tx);
- spin_unlock(&call->tx_lock);
+ call->send_queue = NULL;
+ }
if (poke)
rxrpc_poke_call(call, rxrpc_call_poke_start);
}
/*
+ * Allocate a new txqueue unit and add it to the transmission queue.
+ */
+static int rxrpc_alloc_txqueue(struct sock *sk, struct rxrpc_call *call)
+{
+ struct rxrpc_txqueue *tq;
+
+ tq = kzalloc(sizeof(*tq), sk->sk_allocation);
+ if (!tq)
+ return -ENOMEM;
+
+ tq->xmit_ts_base = KTIME_MIN;
+ for (int i = 0; i < RXRPC_NR_TXQUEUE; i++)
+ tq->segment_xmit_ts[i] = UINT_MAX;
+
+ if (call->send_queue) {
+ tq->qbase = call->send_top + 1;
+ call->send_queue->next = tq;
+ call->send_queue = tq;
+ } else if (WARN_ON(call->tx_queue)) {
+ kfree(tq);
+ return -ENOMEM;
+ } else {
+ /* We start at seq 1, so pretend seq 0 is hard-acked. */
+ tq->nr_reported_acks = 1;
+ tq->segment_acked = 1UL;
+ tq->qbase = 0;
+ call->tx_qbase = 0;
+ call->send_queue = tq;
+ call->tx_qtail = tq;
+ call->tx_queue = tq;
+ }
+
+ trace_rxrpc_tq(call, tq, call->send_top, rxrpc_tq_alloc);
+ return 0;
+}
+
+/*
* send data through a socket
* - must be called in process context
* - The caller holds the call user access mutex, but not the socket lock.
@@ -287,6 +330,13 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
bool more = msg->msg_flags & MSG_MORE;
int ret, copied = 0;
+ if (test_bit(RXRPC_CALL_TX_NO_MORE, &call->flags)) {
+ trace_rxrpc_abort(call->debug_id, rxrpc_sendmsg_late_send,
+ call->cid, call->call_id, call->rx_consumed,
+ 0, -EPROTO);
+ return -EPROTO;
+ }
+
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
ret = rxrpc_wait_to_be_connected(call, &timeo);
@@ -303,6 +353,11 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
reload:
+ txb = call->tx_pending;
+ call->tx_pending = NULL;
+ if (txb)
+ rxrpc_see_txbuf(txb, rxrpc_txbuf_see_send_more);
+
ret = -EPIPE;
if (sk->sk_shutdown & SEND_SHUTDOWN)
goto maybe_error;
@@ -329,53 +384,43 @@ reload:
goto maybe_error;
}
- txb = call->tx_pending;
- call->tx_pending = NULL;
- if (txb)
- rxrpc_see_txbuf(txb, rxrpc_txbuf_see_send_more);
-
do {
if (!txb) {
- size_t remain, bufsize, chunk, offset;
+ size_t remain;
_debug("alloc");
if (!rxrpc_check_tx_space(call, NULL))
goto wait_for_space;
+ /* See if we need to begin/extend the Tx queue. */
+ if (!call->send_queue || !((call->send_top + 1) & RXRPC_TXQ_MASK)) {
+ ret = rxrpc_alloc_txqueue(sk, call);
+ if (ret < 0)
+ goto maybe_error;
+ }
+
/* Work out the maximum size of a packet. Assume that
* the security header is going to be in the padded
* region (enc blocksize), but the trailer is not.
*/
remain = more ? INT_MAX : msg_data_left(msg);
- ret = call->conn->security->how_much_data(call, remain,
- &bufsize, &chunk, &offset);
- if (ret < 0)
+ txb = call->conn->security->alloc_txbuf(call, remain, sk->sk_allocation);
+ if (!txb) {
+ ret = -ENOMEM;
goto maybe_error;
-
- _debug("SIZE: %zu/%zu @%zu", chunk, bufsize, offset);
-
- /* create a buffer that we can retain until it's ACK'd */
- ret = -ENOMEM;
- txb = rxrpc_alloc_txbuf(call, RXRPC_PACKET_TYPE_DATA,
- GFP_KERNEL);
- if (!txb)
- goto maybe_error;
-
- txb->offset = offset;
- txb->space -= offset;
- txb->space = min_t(size_t, chunk, txb->space);
+ }
}
_debug("append");
/* append next segment of data to the current buffer */
if (msg_data_left(msg) > 0) {
- size_t copy = min_t(size_t, txb->space, msg_data_left(msg));
+ size_t copy = umin(txb->space, msg_data_left(msg));
_debug("add %zu", copy);
- if (!copy_from_iter_full(txb->data + txb->offset, copy,
- &msg->msg_iter))
+ if (!copy_from_iter_full(txb->data + txb->offset,
+ copy, &msg->msg_iter))
goto efault;
_debug("added");
txb->space -= copy;
@@ -394,18 +439,12 @@ reload:
/* add the packet to the send queue if it's now full */
if (!txb->space ||
(msg_data_left(msg) == 0 && !more)) {
- if (msg_data_left(msg) == 0 && !more) {
- txb->wire.flags |= RXRPC_LAST_PACKET;
- __set_bit(RXRPC_TXBUF_LAST, &txb->flags);
- }
- else if (call->tx_top - call->acks_hard_ack <
- call->tx_winsize)
- txb->wire.flags |= RXRPC_MORE_PACKETS;
+ if (msg_data_left(msg) == 0 && !more)
+ txb->flags |= RXRPC_LAST_PACKET;
ret = call->security->secure_packet(call, txb);
if (ret < 0)
goto out;
-
rxrpc_queue_packet(rx, call, txb, notify_end_tx);
txb = NULL;
}
@@ -621,7 +660,6 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
__releases(&rx->sk.sk_lock.slock)
{
struct rxrpc_call *call;
- unsigned long now, j;
bool dropped_lock = false;
int ret;
@@ -667,7 +705,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
} else {
switch (rxrpc_call_state(call)) {
case RXRPC_CALL_CLIENT_AWAIT_CONN:
- case RXRPC_CALL_SERVER_SECURING:
+ case RXRPC_CALL_SERVER_RECV_REQUEST:
if (p.command == RXRPC_CMD_SEND_ABORT)
break;
fallthrough;
@@ -699,25 +737,21 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
switch (p.call.nr_timeouts) {
case 3:
- j = msecs_to_jiffies(p.call.timeouts.normal);
- if (p.call.timeouts.normal > 0 && j == 0)
- j = 1;
- WRITE_ONCE(call->next_rx_timo, j);
+ WRITE_ONCE(call->next_rx_timo, p.call.timeouts.normal);
fallthrough;
case 2:
- j = msecs_to_jiffies(p.call.timeouts.idle);
- if (p.call.timeouts.idle > 0 && j == 0)
- j = 1;
- WRITE_ONCE(call->next_req_timo, j);
+ WRITE_ONCE(call->next_req_timo, p.call.timeouts.idle);
fallthrough;
case 1:
if (p.call.timeouts.hard > 0) {
- j = p.call.timeouts.hard * HZ;
- now = jiffies;
- j += now;
- WRITE_ONCE(call->expect_term_by, j);
- rxrpc_reduce_call_timer(call, j, now,
- rxrpc_timer_set_for_hard);
+ ktime_t delay = ms_to_ktime(p.call.timeouts.hard * MSEC_PER_SEC);
+
+ WRITE_ONCE(call->expect_term_by,
+ ktime_add(p.call.timeouts.hard,
+ ktime_get_real()));
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_hard);
+ rxrpc_poke_call(call, rxrpc_call_poke_set_timeout);
+
}
break;
}
diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c
index ecaeb4ecfb58..46a20cf4c402 100644
--- a/net/rxrpc/sysctl.c
+++ b/net/rxrpc/sysctl.c
@@ -11,10 +11,14 @@
#include "ar-internal.h"
static struct ctl_table_header *rxrpc_sysctl_reg_table;
+static const unsigned int rxrpc_rx_mtu_min = 500;
+static const unsigned int rxrpc_jumbo_max = RXRPC_MAX_NR_JUMBO;
static const unsigned int four = 4;
static const unsigned int max_backlog = RXRPC_BACKLOG_MAX - 1;
static const unsigned int n_65535 = 65535;
static const unsigned int n_max_acks = 255;
+static const unsigned long one_ms = 1;
+static const unsigned long max_ms = 1000;
static const unsigned long one_jiffy = 1;
static const unsigned long max_jiffies = MAX_JIFFY_OFFSET;
#ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
@@ -28,24 +32,24 @@ static const unsigned long max_500 = 500;
* information on the individual parameters.
*/
static struct ctl_table rxrpc_sysctl_table[] = {
- /* Values measured in milliseconds but used in jiffies */
+ /* Values measured in milliseconds */
{
.procname = "soft_ack_delay",
.data = &rxrpc_soft_ack_delay,
.maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_doulongvec_ms_jiffies_minmax,
- .extra1 = (void *)&one_jiffy,
- .extra2 = (void *)&max_jiffies,
+ .proc_handler = proc_doulongvec_minmax,
+ .extra1 = (void *)&one_ms,
+ .extra2 = (void *)&max_ms,
},
{
.procname = "idle_ack_delay",
.data = &rxrpc_idle_ack_delay,
.maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_doulongvec_ms_jiffies_minmax,
- .extra1 = (void *)&one_jiffy,
- .extra2 = (void *)&max_jiffies,
+ .proc_handler = proc_doulongvec_minmax,
+ .extra1 = (void *)&one_ms,
+ .extra2 = (void *)&max_ms,
},
{
.procname = "idle_conn_expiry",
@@ -113,7 +117,7 @@ static struct ctl_table rxrpc_sysctl_table[] = {
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = (void *)SYSCTL_ONE,
+ .extra1 = (void *)&rxrpc_rx_mtu_min,
.extra2 = (void *)&n_65535,
},
{
@@ -123,9 +127,8 @@ static struct ctl_table rxrpc_sysctl_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = (void *)SYSCTL_ONE,
- .extra2 = (void *)&four,
+ .extra2 = (void *)&rxrpc_jumbo_max,
},
- { }
};
int __init rxrpc_sysctl_init(void)
diff --git a/net/rxrpc/txbuf.c b/net/rxrpc/txbuf.c
index d43be8512386..c550991d48fa 100644
--- a/net/rxrpc/txbuf.c
+++ b/net/rxrpc/txbuf.c
@@ -14,45 +14,49 @@ static atomic_t rxrpc_txbuf_debug_ids;
atomic_t rxrpc_nr_txbuf;
/*
- * Allocate and partially initialise an I/O request structure.
+ * Allocate and partially initialise a data transmission buffer.
*/
-struct rxrpc_txbuf *rxrpc_alloc_txbuf(struct rxrpc_call *call, u8 packet_type,
- gfp_t gfp)
+struct rxrpc_txbuf *rxrpc_alloc_data_txbuf(struct rxrpc_call *call, size_t data_size,
+ size_t data_align, gfp_t gfp)
{
struct rxrpc_txbuf *txb;
-
- txb = kmalloc(sizeof(*txb), gfp);
- if (txb) {
- INIT_LIST_HEAD(&txb->call_link);
- INIT_LIST_HEAD(&txb->tx_link);
- refcount_set(&txb->ref, 1);
- txb->call_debug_id = call->debug_id;
- txb->debug_id = atomic_inc_return(&rxrpc_txbuf_debug_ids);
- txb->space = sizeof(txb->data);
- txb->len = 0;
- txb->offset = 0;
- txb->flags = 0;
- txb->ack_why = 0;
- txb->seq = call->tx_prepared + 1;
- txb->wire.epoch = htonl(call->conn->proto.epoch);
- txb->wire.cid = htonl(call->cid);
- txb->wire.callNumber = htonl(call->call_id);
- txb->wire.seq = htonl(txb->seq);
- txb->wire.type = packet_type;
- txb->wire.flags = call->conn->out_clientflag;
- txb->wire.userStatus = 0;
- txb->wire.securityIndex = call->security_ix;
- txb->wire._rsvd = 0;
- txb->wire.serviceId = htons(call->dest_srx.srx_service);
-
- trace_rxrpc_txbuf(txb->debug_id,
- txb->call_debug_id, txb->seq, 1,
- packet_type == RXRPC_PACKET_TYPE_DATA ?
- rxrpc_txbuf_alloc_data :
- rxrpc_txbuf_alloc_ack);
- atomic_inc(&rxrpc_nr_txbuf);
+ size_t total, doff, jsize = sizeof(struct rxrpc_jumbo_header);
+ void *buf;
+
+ txb = kzalloc(sizeof(*txb), gfp);
+ if (!txb)
+ return NULL;
+
+ /* We put a jumbo header in the buffer, but not a full wire header to
+ * avoid delayed-corruption problems with zerocopy.
+ */
+ doff = round_up(jsize, data_align);
+ total = doff + data_size;
+
+ data_align = umax(data_align, L1_CACHE_BYTES);
+ mutex_lock(&call->conn->tx_data_alloc_lock);
+ buf = page_frag_alloc_align(&call->conn->tx_data_alloc, total, gfp,
+ data_align);
+ mutex_unlock(&call->conn->tx_data_alloc_lock);
+ if (!buf) {
+ kfree(txb);
+ return NULL;
}
+ refcount_set(&txb->ref, 1);
+ txb->call_debug_id = call->debug_id;
+ txb->debug_id = atomic_inc_return(&rxrpc_txbuf_debug_ids);
+ txb->alloc_size = data_size;
+ txb->space = data_size;
+ txb->offset = 0;
+ txb->flags = call->conn->out_clientflag;
+ txb->seq = call->send_top + 1;
+ txb->data = buf + doff;
+
+ trace_rxrpc_txbuf(txb->debug_id, txb->call_debug_id, txb->seq, 1,
+ rxrpc_txbuf_alloc_data);
+
+ atomic_inc(&rxrpc_nr_txbuf);
return txb;
}
@@ -71,12 +75,12 @@ void rxrpc_see_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what)
trace_rxrpc_txbuf(txb->debug_id, txb->call_debug_id, txb->seq, r, what);
}
-static void rxrpc_free_txbuf(struct rcu_head *rcu)
+static void rxrpc_free_txbuf(struct rxrpc_txbuf *txb)
{
- struct rxrpc_txbuf *txb = container_of(rcu, struct rxrpc_txbuf, rcu);
-
trace_rxrpc_txbuf(txb->debug_id, txb->call_debug_id, txb->seq, 0,
rxrpc_txbuf_free);
+ if (txb->data)
+ page_frag_free(txb->data);
kfree(txb);
atomic_dec(&rxrpc_nr_txbuf);
}
@@ -95,40 +99,6 @@ void rxrpc_put_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what)
dead = __refcount_dec_and_test(&txb->ref, &r);
trace_rxrpc_txbuf(debug_id, call_debug_id, seq, r - 1, what);
if (dead)
- call_rcu(&txb->rcu, rxrpc_free_txbuf);
- }
-}
-
-/*
- * Shrink the transmit buffer.
- */
-void rxrpc_shrink_call_tx_buffer(struct rxrpc_call *call)
-{
- struct rxrpc_txbuf *txb;
- rxrpc_seq_t hard_ack = smp_load_acquire(&call->acks_hard_ack);
- bool wake = false;
-
- _enter("%x/%x/%x", call->tx_bottom, call->acks_hard_ack, call->tx_top);
-
- while ((txb = list_first_entry_or_null(&call->tx_buffer,
- struct rxrpc_txbuf, call_link))) {
- hard_ack = smp_load_acquire(&call->acks_hard_ack);
- if (before(hard_ack, txb->seq))
- break;
-
- if (txb->seq != call->tx_bottom + 1)
- rxrpc_see_txbuf(txb, rxrpc_txbuf_see_out_of_step);
- ASSERTCMP(txb->seq, ==, call->tx_bottom + 1);
- smp_store_release(&call->tx_bottom, call->tx_bottom + 1);
- list_del_rcu(&txb->call_link);
-
- trace_rxrpc_txqueue(call, rxrpc_txqueue_dequeue);
-
- rxrpc_put_txbuf(txb, rxrpc_txbuf_put_rotated);
- if (after(call->acks_hard_ack, call->tx_bottom + 128))
- wake = true;
+ rxrpc_free_txbuf(txb);
}
-
- if (wake)
- wake_up(&call->waitq);
}
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 470c70deffe2..8180d0c12fce 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -737,16 +737,6 @@ config NET_ACT_SAMPLE
To compile this code as a module, choose M here: the
module will be called act_sample.
-config NET_ACT_IPT
- tristate "IPtables targets"
- depends on NET_CLS_ACT && NETFILTER && NETFILTER_XTABLES
- help
- Say Y here to be able to invoke iptables targets after successful
- classification.
-
- To compile this code as a module, choose M here: the
- module will be called act_ipt.
-
config NET_ACT_NAT
tristate "Stateless NAT"
depends on NET_CLS_ACT
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 3e30d7260493..839790043256 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -62,7 +62,7 @@ static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
{
struct tc_cookie *old;
- old = xchg((__force struct tc_cookie **)old_cookie, new_cookie);
+ old = unrcu_pointer(xchg(old_cookie, RCU_INITIALIZER(new_cookie)));
if (old)
call_rcu(&old->rcu, tcf_free_cookie_rcu);
}
@@ -504,6 +504,50 @@ nla_put_failure:
return -1;
}
+static int
+tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
+{
+ unsigned char *b = skb_tail_pointer(skb);
+ struct nlattr *nest;
+ int err = -EINVAL;
+ u32 flags;
+
+ if (tcf_action_dump_terse(skb, a, false))
+ goto nla_put_failure;
+
+ if (a->hw_stats != TCA_ACT_HW_STATS_ANY &&
+ nla_put_bitfield32(skb, TCA_ACT_HW_STATS,
+ a->hw_stats, TCA_ACT_HW_STATS_ANY))
+ goto nla_put_failure;
+
+ if (a->used_hw_stats_valid &&
+ nla_put_bitfield32(skb, TCA_ACT_USED_HW_STATS,
+ a->used_hw_stats, TCA_ACT_HW_STATS_ANY))
+ goto nla_put_failure;
+
+ flags = a->tcfa_flags & TCA_ACT_FLAGS_USER_MASK;
+ if (flags &&
+ nla_put_bitfield32(skb, TCA_ACT_FLAGS,
+ flags, flags))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_ACT_IN_HW_COUNT, a->in_hw_count))
+ goto nla_put_failure;
+
+ nest = nla_nest_start_noflag(skb, TCA_ACT_OPTIONS);
+ if (nest == NULL)
+ goto nla_put_failure;
+ err = tcf_action_dump_old(skb, a, bind, ref);
+ if (err > 0) {
+ nla_nest_end(skb, nest);
+ return err;
+ }
+
+nla_put_failure:
+ nlmsg_trim(skb, b);
+ return -1;
+}
+
static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
struct netlink_callback *cb)
{
@@ -830,7 +874,6 @@ int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
u32 max;
if (*index) {
-again:
rcu_read_lock();
p = idr_find(&idrinfo->action_idr, *index);
@@ -839,7 +882,7 @@ again:
* index but did not assign the pointer yet.
*/
rcu_read_unlock();
- goto again;
+ return -EAGAIN;
}
if (!p) {
@@ -1191,51 +1234,6 @@ tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
return a->ops->dump(skb, a, bind, ref);
}
-int
-tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
-{
- int err = -EINVAL;
- unsigned char *b = skb_tail_pointer(skb);
- struct nlattr *nest;
- u32 flags;
-
- if (tcf_action_dump_terse(skb, a, false))
- goto nla_put_failure;
-
- if (a->hw_stats != TCA_ACT_HW_STATS_ANY &&
- nla_put_bitfield32(skb, TCA_ACT_HW_STATS,
- a->hw_stats, TCA_ACT_HW_STATS_ANY))
- goto nla_put_failure;
-
- if (a->used_hw_stats_valid &&
- nla_put_bitfield32(skb, TCA_ACT_USED_HW_STATS,
- a->used_hw_stats, TCA_ACT_HW_STATS_ANY))
- goto nla_put_failure;
-
- flags = a->tcfa_flags & TCA_ACT_FLAGS_USER_MASK;
- if (flags &&
- nla_put_bitfield32(skb, TCA_ACT_FLAGS,
- flags, flags))
- goto nla_put_failure;
-
- if (nla_put_u32(skb, TCA_ACT_IN_HW_COUNT, a->in_hw_count))
- goto nla_put_failure;
-
- nest = nla_nest_start_noflag(skb, TCA_ACT_OPTIONS);
- if (nest == NULL)
- goto nla_put_failure;
- err = tcf_action_dump_old(skb, a, bind, ref);
- if (err > 0) {
- nla_nest_end(skb, nest);
- return err;
- }
-
-nla_put_failure:
- nlmsg_trim(skb, b);
- return -1;
-}
-EXPORT_SYMBOL(tcf_action_dump_1);
-
int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[],
int bind, int ref, bool terse)
{
@@ -1363,7 +1361,7 @@ struct tc_action_ops *tc_action_load_ops(struct nlattr *nla, u32 flags,
if (rtnl_held)
rtnl_unlock();
- request_module("act_%s", act_name);
+ request_module(NET_ACT_ALIAS_PREFIX "%s", act_name);
if (rtnl_held)
rtnl_lock();
@@ -1499,8 +1497,29 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
bool skip_sw = tc_skip_sw(fl_flags);
bool skip_hw = tc_skip_hw(fl_flags);
- if (tc_act_bind(act->tcfa_flags))
+ if (tc_act_bind(act->tcfa_flags)) {
+ /* Action is created by classifier and is not
+ * standalone. Check that the user did not set
+ * any action flags different than the
+ * classifier flags, and inherit the flags from
+ * the classifier for the compatibility case
+ * where no flags were specified at all.
+ */
+ if ((tc_act_skip_sw(act->tcfa_flags) && !skip_sw) ||
+ (tc_act_skip_hw(act->tcfa_flags) && !skip_hw)) {
+ NL_SET_ERR_MSG(extack,
+ "Mismatch between action and filter offload flags");
+ err = -EINVAL;
+ goto err;
+ }
+ if (skip_sw)
+ act->tcfa_flags |= TCA_ACT_FLAGS_SKIP_SW;
+ if (skip_hw)
+ act->tcfa_flags |= TCA_ACT_FLAGS_SKIP_HW;
continue;
+ }
+
+ /* Action is standalone */
if (skip_sw != tc_act_skip_sw(act->tcfa_flags) ||
skip_hw != tc_act_skip_hw(act->tcfa_flags)) {
NL_SET_ERR_MSG(extack,
@@ -2244,13 +2263,16 @@ out_module_put:
return skb->len;
}
+static const struct rtnl_msg_handler tc_action_rtnl_msg_handlers[] __initconst = {
+ {.msgtype = RTM_NEWACTION, .doit = tc_ctl_action},
+ {.msgtype = RTM_DELACTION, .doit = tc_ctl_action},
+ {.msgtype = RTM_GETACTION, .doit = tc_ctl_action,
+ .dumpit = tc_dump_action},
+};
+
static int __init tc_action_init(void)
{
- rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action,
- 0);
-
+ rtnl_register_many(tc_action_rtnl_msg_handlers);
return 0;
}
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 6cfee6658103..396b576390d0 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -54,8 +54,8 @@ TC_INDIRECT_SCOPE int tcf_bpf_act(struct sk_buff *skb,
bpf_compute_data_pointers(skb);
filter_res = bpf_prog_run(filter, skb);
}
- if (unlikely(!skb->tstamp && skb->mono_delivery_time))
- skb->mono_delivery_time = 0;
+ if (unlikely(!skb->tstamp && skb->tstamp_type))
+ skb->tstamp_type = SKB_CLOCK_REALTIME;
if (skb_sk_is_prefetched(skb) && filter_res != TC_ACT_OK)
skb_orphan(skb);
@@ -401,6 +401,7 @@ static struct tc_action_ops act_bpf_ops __read_mostly = {
.init = tcf_bpf_init,
.size = sizeof(struct tcf_bpf),
};
+MODULE_ALIAS_NET_ACT("bpf");
static __net_init int bpf_init_net(struct net *net)
{
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index f8762756657d..0fce631e7c91 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -242,6 +242,7 @@ static struct tc_action_ops act_connmark_ops = {
.cleanup = tcf_connmark_cleanup,
.size = sizeof(struct tcf_connmark_info),
};
+MODULE_ALIAS_NET_ACT("connmark");
static __net_init int connmark_init_net(struct net *net)
{
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 7f8b1f2f2ed9..5cc8e407e791 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -709,6 +709,7 @@ static struct tc_action_ops act_csum_ops = {
.offload_act_setup = tcf_csum_offload_act_setup,
.size = sizeof(struct tcf_csum),
};
+MODULE_ALIAS_NET_ACT("csum");
static __net_init int csum_init_net(struct net *net)
{
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index 6124d8b128d1..c02f39efc6ef 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -41,21 +41,26 @@ static struct workqueue_struct *act_ct_wq;
static struct rhashtable zones_ht;
static DEFINE_MUTEX(zones_mutex);
+struct zones_ht_key {
+ struct net *net;
+ u16 zone;
+};
+
struct tcf_ct_flow_table {
struct rhash_head node; /* In zones tables */
struct rcu_work rwork;
struct nf_flowtable nf_ft;
refcount_t ref;
- u16 zone;
+ struct zones_ht_key key;
bool dying;
};
static const struct rhashtable_params zones_params = {
.head_offset = offsetof(struct tcf_ct_flow_table, node),
- .key_offset = offsetof(struct tcf_ct_flow_table, zone),
- .key_len = sizeof_field(struct tcf_ct_flow_table, zone),
+ .key_offset = offsetof(struct tcf_ct_flow_table, key),
+ .key_len = offsetofend(struct zones_ht_key, zone),
.automatic_shrinking = true,
};
@@ -316,11 +321,12 @@ static struct nf_flowtable_type flowtable_ct = {
static int tcf_ct_flow_table_get(struct net *net, struct tcf_ct_params *params)
{
+ struct zones_ht_key key = { .net = net, .zone = params->zone };
struct tcf_ct_flow_table *ct_ft;
int err = -ENOMEM;
mutex_lock(&zones_mutex);
- ct_ft = rhashtable_lookup_fast(&zones_ht, &params->zone, zones_params);
+ ct_ft = rhashtable_lookup_fast(&zones_ht, &key, zones_params);
if (ct_ft && refcount_inc_not_zero(&ct_ft->ref))
goto out_unlock;
@@ -329,7 +335,7 @@ static int tcf_ct_flow_table_get(struct net *net, struct tcf_ct_params *params)
goto err_alloc;
refcount_set(&ct_ft->ref, 1);
- ct_ft->zone = params->zone;
+ ct_ft->key = key;
err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
if (err)
goto err_insert;
@@ -938,6 +944,8 @@ static int tcf_ct_act_nat(struct sk_buff *skb,
action |= BIT(NF_NAT_MANIP_DST);
err = nf_ct_nat(skb, ct, ctinfo, &action, range, commit);
+ if (err != NF_ACCEPT)
+ return err & NF_VERDICT_MASK;
if (action & BIT(NF_NAT_MANIP_SRC))
tc_skb_cb(skb)->post_ct_snat = 1;
@@ -1029,7 +1037,7 @@ TC_INDIRECT_SCOPE int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
state.pf = family;
err = nf_conntrack_in(skb, &state);
if (err != NF_ACCEPT)
- goto out_push;
+ goto nf_error;
}
do_nat:
@@ -1041,7 +1049,7 @@ do_nat:
err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit);
if (err != NF_ACCEPT)
- goto drop;
+ goto nf_error;
if (!nf_ct_is_confirmed(ct) && commit && p->helper && !nfct_help(ct)) {
err = __nf_ct_try_assign_helper(ct, p->tmpl, GFP_ATOMIC);
@@ -1055,8 +1063,9 @@ do_nat:
}
if (nf_ct_is_confirmed(ct) ? ((!cached && !skip_add) || add_helper) : commit) {
- if (nf_ct_helper(skb, ct, ctinfo, family) != NF_ACCEPT)
- goto drop;
+ err = nf_ct_helper(skb, ct, ctinfo, family);
+ if (err != NF_ACCEPT)
+ goto nf_error;
}
if (commit) {
@@ -1069,8 +1078,17 @@ do_nat:
/* This will take care of sending queued events
* even if the connection is already confirmed.
*/
- if (nf_conntrack_confirm(skb) != NF_ACCEPT)
- goto drop;
+ err = nf_conntrack_confirm(skb);
+ if (err != NF_ACCEPT)
+ goto nf_error;
+
+ /* The ct may be dropped if a clash has been resolved,
+ * so it's necessary to retrieve it from skb again to
+ * prevent UAF.
+ */
+ ct = nf_ct_get(skb, &ctinfo);
+ if (!ct)
+ skip_add = true;
}
if (!skip_add)
@@ -1094,6 +1112,21 @@ out_frag:
drop:
tcf_action_inc_drop_qstats(&c->common);
return TC_ACT_SHOT;
+
+nf_error:
+ /* some verdicts store extra data in upper bits, such
+ * as errno or queue number.
+ */
+ switch (err & NF_VERDICT_MASK) {
+ case NF_DROP:
+ goto drop;
+ case NF_STOLEN:
+ tcf_action_inc_drop_qstats(&c->common);
+ return TC_ACT_CONSUMED;
+ default:
+ DEBUG_NET_WARN_ON_ONCE(1);
+ goto drop;
+ }
}
static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = {
@@ -1150,9 +1183,8 @@ static int tcf_ct_fill_params_nat(struct tcf_ct_params *p,
range->min_addr.ip =
nla_get_in_addr(tb[TCA_CT_NAT_IPV4_MIN]);
- range->max_addr.ip = max_attr ?
- nla_get_in_addr(max_attr) :
- range->min_addr.ip;
+ range->max_addr.ip =
+ nla_get_in_addr_default(max_attr, range->min_addr.ip);
} else if (tb[TCA_CT_NAT_IPV6_MIN]) {
struct nlattr *max_attr = tb[TCA_CT_NAT_IPV6_MAX];
@@ -1281,8 +1313,9 @@ static int tcf_ct_fill_params(struct net *net,
err = -EINVAL;
goto err;
}
- family = tb[TCA_CT_HELPER_FAMILY] ? nla_get_u8(tb[TCA_CT_HELPER_FAMILY]) : AF_INET;
- proto = tb[TCA_CT_HELPER_PROTO] ? nla_get_u8(tb[TCA_CT_HELPER_PROTO]) : IPPROTO_TCP;
+ family = nla_get_u8_default(tb[TCA_CT_HELPER_FAMILY], AF_INET);
+ proto = nla_get_u8_default(tb[TCA_CT_HELPER_PROTO],
+ IPPROTO_TCP);
err = nf_ct_add_helper(tmpl, name, family, proto,
p->ct_action & TCA_CT_ACT_NAT, &p->helper);
if (err) {
@@ -1600,6 +1633,7 @@ static struct tc_action_ops act_ct_ops = {
.offload_act_setup = tcf_ct_offload_act_setup,
.size = sizeof(struct tcf_ct),
};
+MODULE_ALIAS_NET_ACT("ct");
static __net_init int ct_init_net(struct net *net)
{
diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c
index e620f9a84afe..5b1241ddc758 100644
--- a/net/sched/act_ctinfo.c
+++ b/net/sched/act_ctinfo.c
@@ -197,8 +197,9 @@ static int tcf_ctinfo_init(struct net *net, struct nlattr *nla,
"dscp mask must be 6 contiguous bits");
return -EINVAL;
}
- dscpstatemask = tb[TCA_CTINFO_PARMS_DSCP_STATEMASK] ?
- nla_get_u32(tb[TCA_CTINFO_PARMS_DSCP_STATEMASK]) : 0;
+ dscpstatemask =
+ nla_get_u32_default(tb[TCA_CTINFO_PARMS_DSCP_STATEMASK],
+ 0);
/* mask & statemask must not overlap */
if (dscpmask & dscpstatemask) {
NL_SET_ERR_MSG_ATTR(extack,
@@ -243,8 +244,7 @@ static int tcf_ctinfo_init(struct net *net, struct nlattr *nla,
}
cp_new->net = net;
- cp_new->zone = tb[TCA_CTINFO_ZONE] ?
- nla_get_u16(tb[TCA_CTINFO_ZONE]) : 0;
+ cp_new->zone = nla_get_u16_default(tb[TCA_CTINFO_ZONE], 0);
if (dscpmask) {
cp_new->dscpmask = dscpmask;
cp_new->dscpmaskshift = dscpmaskshift;
@@ -363,6 +363,7 @@ static struct tc_action_ops act_ctinfo_ops = {
.cleanup= tcf_ctinfo_cleanup,
.size = sizeof(struct tcf_ctinfo),
};
+MODULE_ALIAS_NET_ACT("ctinfo");
static __net_init int ctinfo_init_net(struct net *net)
{
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 4af3b7ec249f..e949280eb800 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -296,6 +296,7 @@ static struct tc_action_ops act_gact_ops = {
.offload_act_setup = tcf_gact_offload_act_setup,
.size = sizeof(struct tcf_gact),
};
+MODULE_ALIAS_NET_ACT("gact");
static __net_init int gact_init_net(struct net *net)
{
diff --git a/net/sched/act_gate.c b/net/sched/act_gate.c
index c681cd011afd..91c0ec729823 100644
--- a/net/sched/act_gate.c
+++ b/net/sched/act_gate.c
@@ -190,15 +190,10 @@ static int fill_gate_entry(struct nlattr **tb, struct tcfg_gate_entry *entry,
entry->interval = interval;
- if (tb[TCA_GATE_ENTRY_IPV])
- entry->ipv = nla_get_s32(tb[TCA_GATE_ENTRY_IPV]);
- else
- entry->ipv = -1;
+ entry->ipv = nla_get_s32_default(tb[TCA_GATE_ENTRY_IPV], -1);
- if (tb[TCA_GATE_ENTRY_MAX_OCTETS])
- entry->maxoctets = nla_get_s32(tb[TCA_GATE_ENTRY_MAX_OCTETS]);
- else
- entry->maxoctets = -1;
+ entry->maxoctets = nla_get_s32_default(tb[TCA_GATE_ENTRY_MAX_OCTETS],
+ -1);
return 0;
}
@@ -645,6 +640,7 @@ static struct tc_action_ops act_gate_ops = {
.offload_act_setup = tcf_gate_offload_act_setup,
.size = sizeof(struct tcf_gate),
};
+MODULE_ALIAS_NET_ACT("gate");
static __net_init int gate_init_net(struct net *net)
{
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 0e867d13beb5..107c6d83dc5c 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -889,6 +889,7 @@ static struct tc_action_ops act_ife_ops = {
.init = tcf_ife_init,
.size = sizeof(struct tcf_ife_info),
};
+MODULE_ALIAS_NET_ACT("ife");
static __net_init int ife_init_net(struct net *net)
{
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 6faa7d00da09..5b3814365924 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -635,6 +635,7 @@ static struct tc_action_ops act_mirred_ops = {
.size = sizeof(struct tcf_mirred),
.get_dev = tcf_mirred_get_dev,
};
+MODULE_ALIAS_NET_ACT("mirred");
static __net_init int mirred_init_net(struct net *net)
{
diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
index 34b8edb6cc77..9f86f4e666d3 100644
--- a/net/sched/act_mpls.c
+++ b/net/sched/act_mpls.c
@@ -288,16 +288,14 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla,
}
p->tcfm_action = parm->m_action;
- p->tcfm_label = tb[TCA_MPLS_LABEL] ? nla_get_u32(tb[TCA_MPLS_LABEL]) :
- ACT_MPLS_LABEL_NOT_SET;
- p->tcfm_tc = tb[TCA_MPLS_TC] ? nla_get_u8(tb[TCA_MPLS_TC]) :
- ACT_MPLS_TC_NOT_SET;
- p->tcfm_ttl = tb[TCA_MPLS_TTL] ? nla_get_u8(tb[TCA_MPLS_TTL]) :
- mpls_ttl;
- p->tcfm_bos = tb[TCA_MPLS_BOS] ? nla_get_u8(tb[TCA_MPLS_BOS]) :
- ACT_MPLS_BOS_NOT_SET;
- p->tcfm_proto = tb[TCA_MPLS_PROTO] ? nla_get_be16(tb[TCA_MPLS_PROTO]) :
- htons(ETH_P_MPLS_UC);
+ p->tcfm_label = nla_get_u32_default(tb[TCA_MPLS_LABEL],
+ ACT_MPLS_LABEL_NOT_SET);
+ p->tcfm_tc = nla_get_u8_default(tb[TCA_MPLS_TC], ACT_MPLS_TC_NOT_SET);
+ p->tcfm_ttl = nla_get_u8_default(tb[TCA_MPLS_TTL], mpls_ttl);
+ p->tcfm_bos = nla_get_u8_default(tb[TCA_MPLS_BOS],
+ ACT_MPLS_BOS_NOT_SET);
+ p->tcfm_proto = nla_get_be16_default(tb[TCA_MPLS_PROTO],
+ htons(ETH_P_MPLS_UC));
spin_lock_bh(&m->tcf_lock);
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
@@ -452,6 +450,7 @@ static struct tc_action_ops act_mpls_ops = {
.offload_act_setup = tcf_mpls_offload_act_setup,
.size = sizeof(struct tcf_mpls),
};
+MODULE_ALIAS_NET_ACT("mpls");
static __net_init int mpls_init_net(struct net *net)
{
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index a180e724634e..d541f553805f 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -324,6 +324,7 @@ static struct tc_action_ops act_nat_ops = {
.cleanup = tcf_nat_cleanup,
.size = sizeof(struct tcf_nat),
};
+MODULE_ALIAS_NET_ACT("nat");
static __net_init int nat_init_net(struct net *net)
{
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 2ef22969f274..fc0a35a7b62a 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -515,11 +515,11 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
spin_unlock_bh(&p->tcf_lock);
return -ENOBUFS;
}
+ opt->nkeys = parms->tcfp_nkeys;
memcpy(opt->keys, parms->tcfp_keys,
flex_array_size(opt, keys, parms->tcfp_nkeys));
opt->index = p->tcf_index;
- opt->nkeys = parms->tcfp_nkeys;
opt->flags = parms->tcfp_flags;
opt->action = p->tcf_action;
opt->refcnt = refcount_read(&p->tcf_refcnt) - ref;
@@ -620,6 +620,7 @@ static struct tc_action_ops act_pedit_ops = {
.offload_act_setup = tcf_pedit_offload_act_setup,
.size = sizeof(struct tcf_pedit),
};
+MODULE_ALIAS_NET_ACT("pedit");
static __net_init int pedit_init_net(struct net *net)
{
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index e119b4a3db9f..a214ed681142 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -167,8 +167,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
}
if (R_tab) {
new->rate_present = true;
- rate64 = tb[TCA_POLICE_RATE64] ?
- nla_get_u64(tb[TCA_POLICE_RATE64]) : 0;
+ rate64 = nla_get_u64_default(tb[TCA_POLICE_RATE64], 0);
psched_ratecfg_precompute(&new->rate, &R_tab->rate, rate64);
qdisc_put_rtab(R_tab);
} else {
@@ -176,8 +175,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
}
if (P_tab) {
new->peak_present = true;
- prate64 = tb[TCA_POLICE_PEAKRATE64] ?
- nla_get_u64(tb[TCA_POLICE_PEAKRATE64]) : 0;
+ prate64 = nla_get_u64_default(tb[TCA_POLICE_PEAKRATE64], 0);
psched_ratecfg_precompute(&new->peak, &P_tab->rate, prate64);
qdisc_put_rtab(P_tab);
} else {
@@ -502,6 +500,7 @@ static struct tc_action_ops act_police_ops = {
.offload_act_setup = tcf_police_offload_act_setup,
.size = sizeof(struct tcf_police),
};
+MODULE_ALIAS_NET_ACT("police");
static __net_init int police_init_net(struct net *net)
{
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index c5c61efe6db4..2ceb4d141b71 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -167,7 +167,9 @@ TC_INDIRECT_SCOPE int tcf_sample_act(struct sk_buff *skb,
{
struct tcf_sample *s = to_sample(a);
struct psample_group *psample_group;
+ u8 cookie_data[TC_COOKIE_MAX_SIZE];
struct psample_metadata md = {};
+ struct tc_cookie *user_cookie;
int retval;
tcf_lastuse_update(&s->tcf_tm);
@@ -189,6 +191,16 @@ TC_INDIRECT_SCOPE int tcf_sample_act(struct sk_buff *skb,
if (skb_at_tc_ingress(skb) && tcf_sample_dev_ok_push(skb->dev))
skb_push(skb, skb->mac_len);
+ rcu_read_lock();
+ user_cookie = rcu_dereference(a->user_cookie);
+ if (user_cookie) {
+ memcpy(cookie_data, user_cookie->data,
+ user_cookie->len);
+ md.user_cookie = cookie_data;
+ md.user_cookie_len = user_cookie->len;
+ }
+ rcu_read_unlock();
+
md.trunc_size = s->truncate ? s->trunc_size : skb->len;
psample_sample_packet(psample_group, skb, s->rate, &md);
@@ -316,6 +328,7 @@ static struct tc_action_ops act_sample_ops = {
.offload_act_setup = tcf_sample_offload_act_setup,
.size = sizeof(struct tcf_sample),
};
+MODULE_ALIAS_NET_ACT("sample");
static __net_init int sample_init_net(struct net *net)
{
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 0a3e92888295..f3abe0545989 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -209,6 +209,7 @@ static struct tc_action_ops act_simp_ops = {
.init = tcf_simp_init,
.size = sizeof(struct tcf_defact),
};
+MODULE_ALIAS_NET_ACT("simple");
static __net_init int simp_init_net(struct net *net)
{
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 754f78b35bb8..1f1d9ce3e968 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -426,6 +426,7 @@ static struct tc_action_ops act_skbedit_ops = {
.offload_act_setup = tcf_skbedit_offload_act_setup,
.size = sizeof(struct tcf_skbedit),
};
+MODULE_ALIAS_NET_ACT("skbedit");
static __net_init int skbedit_init_net(struct net *net)
{
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index bcb673ab0008..dc0229693461 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -241,13 +241,13 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
struct tcf_skbmod *d = to_skbmod(a);
unsigned char *b = skb_tail_pointer(skb);
struct tcf_skbmod_params *p;
- struct tc_skbmod opt = {
- .index = d->tcf_index,
- .refcnt = refcount_read(&d->tcf_refcnt) - ref,
- .bindcnt = atomic_read(&d->tcf_bindcnt) - bind,
- };
+ struct tc_skbmod opt;
struct tcf_t t;
+ memset(&opt, 0, sizeof(opt));
+ opt.index = d->tcf_index;
+ opt.refcnt = refcount_read(&d->tcf_refcnt) - ref;
+ opt.bindcnt = atomic_read(&d->tcf_bindcnt) - bind;
spin_lock_bh(&d->tcf_lock);
opt.action = d->tcf_action;
p = rcu_dereference_protected(d->skbmod_p,
@@ -287,6 +287,7 @@ static struct tc_action_ops act_skbmod_ops = {
.cleanup = tcf_skbmod_cleanup,
.size = sizeof(struct tcf_skbmod),
};
+MODULE_ALIAS_NET_ACT("skbmod");
static __net_init int skbmod_init_net(struct net *net)
{
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 300b08aa8283..af7c99845948 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -230,7 +230,7 @@ static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst,
nla_for_each_attr(attr, head, len, rem) {
switch (nla_type(attr)) {
case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE:
- if (type && type != TUNNEL_GENEVE_OPT) {
+ if (type && type != IP_TUNNEL_GENEVE_OPT_BIT) {
NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
return -EINVAL;
}
@@ -247,7 +247,7 @@ static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst,
dst_len -= opt_len;
dst += opt_len;
}
- type = TUNNEL_GENEVE_OPT;
+ type = IP_TUNNEL_GENEVE_OPT_BIT;
break;
case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN:
if (type) {
@@ -259,7 +259,7 @@ static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst,
if (opt_len < 0)
return opt_len;
opts_len += opt_len;
- type = TUNNEL_VXLAN_OPT;
+ type = IP_TUNNEL_VXLAN_OPT_BIT;
break;
case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN:
if (type) {
@@ -271,7 +271,7 @@ static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst,
if (opt_len < 0)
return opt_len;
opts_len += opt_len;
- type = TUNNEL_ERSPAN_OPT;
+ type = IP_TUNNEL_ERSPAN_OPT_BIT;
break;
}
}
@@ -302,7 +302,7 @@ static int tunnel_key_opts_set(struct nlattr *nla, struct ip_tunnel_info *info,
switch (nla_type(nla_data(nla))) {
case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE:
#if IS_ENABLED(CONFIG_INET)
- info->key.tun_flags |= TUNNEL_GENEVE_OPT;
+ __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, info->key.tun_flags);
return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
opts_len, extack);
#else
@@ -310,7 +310,7 @@ static int tunnel_key_opts_set(struct nlattr *nla, struct ip_tunnel_info *info,
#endif
case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN:
#if IS_ENABLED(CONFIG_INET)
- info->key.tun_flags |= TUNNEL_VXLAN_OPT;
+ __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, info->key.tun_flags);
return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
opts_len, extack);
#else
@@ -318,7 +318,7 @@ static int tunnel_key_opts_set(struct nlattr *nla, struct ip_tunnel_info *info,
#endif
case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN:
#if IS_ENABLED(CONFIG_INET)
- info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
+ __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, info->key.tun_flags);
return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
opts_len, extack);
#else
@@ -363,6 +363,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
bool bind = act_flags & TCA_ACT_FLAGS_BIND;
struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1];
struct tcf_tunnel_key_params *params_new;
+ IP_TUNNEL_DECLARE_FLAGS(flags) = { };
struct metadata_dst *metadata = NULL;
struct tcf_chain *goto_ch = NULL;
struct tc_tunnel_key *parm;
@@ -371,7 +372,6 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
__be16 dst_port = 0;
__be64 key_id = 0;
int opts_len = 0;
- __be16 flags = 0;
u8 tos, ttl;
int ret = 0;
u32 index;
@@ -412,16 +412,16 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
key32 = nla_get_be32(tb[TCA_TUNNEL_KEY_ENC_KEY_ID]);
key_id = key32_to_tunnel_id(key32);
- flags = TUNNEL_KEY;
+ __set_bit(IP_TUNNEL_KEY_BIT, flags);
}
- flags |= TUNNEL_CSUM;
+ __set_bit(IP_TUNNEL_CSUM_BIT, flags);
if (tb[TCA_TUNNEL_KEY_NO_CSUM] &&
nla_get_u8(tb[TCA_TUNNEL_KEY_NO_CSUM]))
- flags &= ~TUNNEL_CSUM;
+ __clear_bit(IP_TUNNEL_CSUM_BIT, flags);
if (nla_get_flag(tb[TCA_TUNNEL_KEY_NO_FRAG]))
- flags |= TUNNEL_DONT_FRAGMENT;
+ __set_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, flags);
if (tb[TCA_TUNNEL_KEY_ENC_DST_PORT])
dst_port = nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_DST_PORT]);
@@ -663,15 +663,15 @@ static int tunnel_key_opts_dump(struct sk_buff *skb,
if (!start)
return -EMSGSIZE;
- if (info->key.tun_flags & TUNNEL_GENEVE_OPT) {
+ if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, info->key.tun_flags)) {
err = tunnel_key_geneve_opts_dump(skb, info);
if (err)
goto err_out;
- } else if (info->key.tun_flags & TUNNEL_VXLAN_OPT) {
+ } else if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, info->key.tun_flags)) {
err = tunnel_key_vxlan_opts_dump(skb, info);
if (err)
goto err_out;
- } else if (info->key.tun_flags & TUNNEL_ERSPAN_OPT) {
+ } else if (test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, info->key.tun_flags)) {
err = tunnel_key_erspan_opts_dump(skb, info);
if (err)
goto err_out;
@@ -741,7 +741,7 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
struct ip_tunnel_key *key = &info->key;
__be32 key_id = tunnel_id_to_key32(key->tun_id);
- if (((key->tun_flags & TUNNEL_KEY) &&
+ if ((test_bit(IP_TUNNEL_KEY_BIT, key->tun_flags) &&
nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_KEY_ID, key_id)) ||
tunnel_key_dump_addresses(skb,
&params->tcft_enc_metadata->u.tun_info) ||
@@ -749,8 +749,8 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_DST_PORT,
key->tp_dst)) ||
nla_put_u8(skb, TCA_TUNNEL_KEY_NO_CSUM,
- !(key->tun_flags & TUNNEL_CSUM)) ||
- ((key->tun_flags & TUNNEL_DONT_FRAGMENT) &&
+ !test_bit(IP_TUNNEL_CSUM_BIT, key->tun_flags)) ||
+ (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, key->tun_flags) &&
nla_put_flag(skb, TCA_TUNNEL_KEY_NO_FRAG)) ||
tunnel_key_opts_dump(skb, info))
goto nla_put_failure;
@@ -842,6 +842,7 @@ static struct tc_action_ops act_tunnel_key_ops = {
.offload_act_setup = tcf_tunnel_key_offload_act_setup,
.size = sizeof(struct tcf_tunnel_key),
};
+MODULE_ALIAS_NET_ACT("tunnel_key");
static __net_init int tunnel_key_init_net(struct net *net)
{
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index 836183011a7c..383bf18b6862 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -96,6 +96,7 @@ out:
if (skb_at_tc_ingress(skb))
skb_pull_rcsum(skb, skb->mac_len);
+ skb_reset_mac_len(skb);
return action;
drop:
@@ -427,6 +428,7 @@ static struct tc_action_ops act_vlan_ops = {
.offload_act_setup = tcf_vlan_offload_act_setup,
.size = sizeof(struct tcf_vlan),
};
+MODULE_ALIAS_NET_ACT("vlan");
static __net_init int vlan_init_net(struct net *net)
{
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index ff3d396a65aa..4f648af8cfaa 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -97,7 +97,7 @@ tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
err = xa_alloc_cyclic(&tcf_exts_miss_cookies_xa, &n->miss_cookie_base,
n, xa_limit_32b, &next, GFP_KERNEL);
- if (err)
+ if (err < 0)
goto err_xa_alloc;
exts->miss_cookie_node = n;
@@ -257,7 +257,7 @@ tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
#ifdef CONFIG_MODULES
if (rtnl_held)
rtnl_unlock();
- request_module("cls_%s", kind);
+ request_module(NET_CLS_ALIAS_PREFIX "%s", kind);
if (rtnl_held)
rtnl_lock();
ops = __tcf_proto_lookup_ops(kind);
@@ -390,6 +390,7 @@ static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
tp->protocol = protocol;
tp->prio = prio;
tp->chain = chain;
+ tp->usesw = !tp->ops->reoffload;
spin_lock_init(&tp->lock);
refcount_set(&tp->refcnt, 1);
@@ -410,12 +411,40 @@ static void tcf_proto_get(struct tcf_proto *tp)
refcount_inc(&tp->refcnt);
}
+static void tcf_proto_count_usesw(struct tcf_proto *tp, bool add)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ struct tcf_block *block = tp->chain->block;
+ bool counted = false;
+
+ if (!add) {
+ if (tp->usesw && tp->counted) {
+ if (!atomic_dec_return(&block->useswcnt))
+ static_branch_dec(&tcf_sw_enabled_key);
+ tp->counted = false;
+ }
+ return;
+ }
+
+ spin_lock(&tp->lock);
+ if (tp->usesw && !tp->counted) {
+ counted = true;
+ tp->counted = true;
+ }
+ spin_unlock(&tp->lock);
+
+ if (counted && atomic_inc_return(&block->useswcnt) == 1)
+ static_branch_inc(&tcf_sw_enabled_key);
+#endif
+}
+
static void tcf_chain_put(struct tcf_chain *chain);
static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
bool sig_destroy, struct netlink_ext_ack *extack)
{
tp->ops->destroy(tp, rtnl_held, extack);
+ tcf_proto_count_usesw(tp, false);
if (sig_destroy)
tcf_proto_signal_destroyed(tp->chain, tp);
tcf_chain_put(tp->chain);
@@ -1482,6 +1511,7 @@ int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
return 0;
err_dev_insert:
+ tcf_block_offload_unbind(block, q, ei);
err_block_offload_bind:
tcf_chain0_head_change_cb_del(block, ei);
err_chain0_head_change_cb_add:
@@ -1896,7 +1926,8 @@ static void tcf_chain_tp_remove(struct tcf_chain *chain,
static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
struct tcf_chain_info *chain_info,
u32 protocol, u32 prio,
- bool prio_allocate);
+ bool prio_allocate,
+ struct netlink_ext_ack *extack);
/* Try to insert new proto.
* If proto with specified priority already exists, free new proto
@@ -1920,8 +1951,7 @@ static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
return ERR_PTR(-EAGAIN);
}
- tp = tcf_chain_tp_find(chain, &chain_info,
- protocol, prio, false);
+ tp = tcf_chain_tp_find(chain, &chain_info, protocol, prio, false, NULL);
if (!tp)
err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
mutex_unlock(&chain->filter_chain_lock);
@@ -1981,7 +2011,8 @@ static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
struct tcf_chain_info *chain_info,
u32 protocol, u32 prio,
- bool prio_allocate)
+ bool prio_allocate,
+ struct netlink_ext_ack *extack)
{
struct tcf_proto **pprev;
struct tcf_proto *tp;
@@ -1992,9 +2023,14 @@ static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
pprev = &tp->next) {
if (tp->prio >= prio) {
if (tp->prio == prio) {
- if (prio_allocate ||
- (tp->protocol != protocol && protocol))
+ if (prio_allocate) {
+ NL_SET_ERR_MSG(extack, "Lowest ID from auto-alloc range already in use");
+ return ERR_PTR(-ENOSPC);
+ }
+ if (tp->protocol != protocol && protocol) {
+ NL_SET_ERR_MSG(extack, "Protocol mismatch for filter with specified priority");
return ERR_PTR(-EINVAL);
+ }
} else {
tp = NULL;
}
@@ -2260,7 +2296,7 @@ replay:
}
block->classid = parent;
- chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
+ chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0);
if (chain_index > TC_ACT_EXT_VAL_MASK) {
NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
err = -EINVAL;
@@ -2275,9 +2311,8 @@ replay:
mutex_lock(&chain->filter_chain_lock);
tp = tcf_chain_tp_find(chain, &chain_info, protocol,
- prio, prio_allocate);
+ prio, prio_allocate, extack);
if (IS_ERR(tp)) {
- NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
err = PTR_ERR(tp);
goto errout_locked;
}
@@ -2367,6 +2402,7 @@ replay:
tfilter_notify(net, skb, n, tp, block, q, parent, fh,
RTM_NEWTFILTER, false, rtnl_held, extack);
tfilter_put(tp, fh);
+ tcf_proto_count_usesw(tp, true);
/* q pointer is NULL for shared blocks */
if (q)
q->flags &= ~TCQ_F_CAN_BYPASS;
@@ -2471,7 +2507,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
goto errout;
}
- chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
+ chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0);
if (chain_index > TC_ACT_EXT_VAL_MASK) {
NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
err = -EINVAL;
@@ -2501,10 +2537,13 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
mutex_lock(&chain->filter_chain_lock);
tp = tcf_chain_tp_find(chain, &chain_info, protocol,
- prio, false);
- if (!tp || IS_ERR(tp)) {
+ prio, false, extack);
+ if (!tp) {
+ err = -ENOENT;
NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
- err = tp ? PTR_ERR(tp) : -ENOENT;
+ goto errout_locked;
+ } else if (IS_ERR(tp)) {
+ err = PTR_ERR(tp);
goto errout_locked;
} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
@@ -2626,7 +2665,7 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
goto errout;
}
- chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
+ chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0);
if (chain_index > TC_ACT_EXT_VAL_MASK) {
NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
err = -EINVAL;
@@ -2641,11 +2680,14 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
mutex_lock(&chain->filter_chain_lock);
tp = tcf_chain_tp_find(chain, &chain_info, protocol,
- prio, false);
+ prio, false, extack);
mutex_unlock(&chain->filter_chain_lock);
- if (!tp || IS_ERR(tp)) {
+ if (!tp) {
+ err = -ENOENT;
NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
- err = tp ? PTR_ERR(tp) : -ENOENT;
+ goto errout;
+ } else if (IS_ERR(tp)) {
+ err = PTR_ERR(tp);
goto errout;
} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
@@ -3066,7 +3108,7 @@ replay:
if (IS_ERR(block))
return PTR_ERR(block);
- chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
+ chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0);
if (chain_index > TC_ACT_EXT_VAL_MASK) {
NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
err = -EINVAL;
@@ -4014,6 +4056,19 @@ static struct pernet_operations tcf_net_ops = {
.size = sizeof(struct tcf_net),
};
+static const struct rtnl_msg_handler tc_filter_rtnl_msg_handlers[] __initconst = {
+ {.msgtype = RTM_NEWTFILTER, .doit = tc_new_tfilter,
+ .flags = RTNL_FLAG_DOIT_UNLOCKED},
+ {.msgtype = RTM_DELTFILTER, .doit = tc_del_tfilter,
+ .flags = RTNL_FLAG_DOIT_UNLOCKED},
+ {.msgtype = RTM_GETTFILTER, .doit = tc_get_tfilter,
+ .dumpit = tc_dump_tfilter, .flags = RTNL_FLAG_DOIT_UNLOCKED},
+ {.msgtype = RTM_NEWCHAIN, .doit = tc_ctl_chain},
+ {.msgtype = RTM_DELCHAIN, .doit = tc_ctl_chain},
+ {.msgtype = RTM_GETCHAIN, .doit = tc_ctl_chain,
+ .dumpit = tc_dump_chain},
+};
+
static int __init tc_filter_init(void)
{
int err;
@@ -4027,17 +4082,7 @@ static int __init tc_filter_init(void)
goto err_register_pernet_subsys;
xa_init_flags(&tcf_exts_miss_cookies_xa, XA_FLAGS_ALLOC1);
-
- rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
- RTNL_FLAG_DOIT_UNLOCKED);
- rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
- RTNL_FLAG_DOIT_UNLOCKED);
- rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
- tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
- rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
- tc_dump_chain, 0);
+ rtnl_register_many(tc_filter_rtnl_msg_handlers);
return 0;
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index a1f56931330c..ecfaa4f9a04e 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -328,6 +328,7 @@ static struct tcf_proto_ops cls_basic_ops __read_mostly = {
.bind_class = basic_bind_class,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_CLS("basic");
static int __init init_basic(void)
{
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 382c7a71f81f..7fbe42f0e5c2 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -104,8 +104,8 @@ TC_INDIRECT_SCOPE int cls_bpf_classify(struct sk_buff *skb,
bpf_compute_data_pointers(skb);
filter_res = bpf_prog_run(prog->filter, skb);
}
- if (unlikely(!skb->tstamp && skb->mono_delivery_time))
- skb->mono_delivery_time = 0;
+ if (unlikely(!skb->tstamp && skb->tstamp_type))
+ skb->tstamp_type = SKB_CLOCK_REALTIME;
if (prog->exts_integrated) {
res->class = 0;
@@ -509,6 +509,8 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
if (!tc_in_hw(prog->gen_flags))
prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+ tcf_proto_update_usesw(tp, prog->gen_flags);
+
if (oldprog) {
idr_replace(&head->handle_idr, prog, handle);
list_replace_rcu(&oldprog->link, &prog->link);
@@ -693,6 +695,7 @@ static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
.dump = cls_bpf_dump,
.bind_class = cls_bpf_bind_class,
};
+MODULE_ALIAS_NET_CLS("bpf");
static int __init cls_bpf_init_mod(void)
{
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 7ee8dbf49ed0..424252982d6a 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -209,6 +209,7 @@ static struct tcf_proto_ops cls_cgroup_ops __read_mostly = {
.dump = cls_cgroup_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_CLS("cgroup");
static int __init init_cgroup_cls(void)
{
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 6ab317b48d6c..5c2580a07530 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -356,7 +356,8 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
[TCA_FLOW_KEYS] = { .type = NLA_U32 },
[TCA_FLOW_MODE] = { .type = NLA_U32 },
[TCA_FLOW_BASECLASS] = { .type = NLA_U32 },
- [TCA_FLOW_RSHIFT] = { .type = NLA_U32 },
+ [TCA_FLOW_RSHIFT] = NLA_POLICY_MAX(NLA_U32,
+ 31 /* BITS_PER_U32 - 1 */),
[TCA_FLOW_ADDEND] = { .type = NLA_U32 },
[TCA_FLOW_MASK] = { .type = NLA_U32 },
[TCA_FLOW_XOR] = { .type = NLA_U32 },
@@ -702,6 +703,7 @@ static struct tcf_proto_ops cls_flow_ops __read_mostly = {
.walk = flow_walk,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_CLS("flow");
static int __init cls_flow_init(void)
{
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 6ee7064c82fc..03505673d523 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -28,6 +28,7 @@
#include <net/vxlan.h>
#include <net/erspan.h>
#include <net/gtp.h>
+#include <net/pfcp.h>
#include <net/tc_wrapper.h>
#include <net/dst.h>
@@ -40,6 +41,16 @@
#define TCA_FLOWER_KEY_CT_FLAGS_MASK \
(TCA_FLOWER_KEY_CT_FLAGS_MAX - 1)
+#define TCA_FLOWER_KEY_FLAGS_POLICY_MASK \
+ (TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT | \
+ TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST)
+
+#define TCA_FLOWER_KEY_ENC_FLAGS_POLICY_MASK \
+ (TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM | \
+ TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT | \
+ TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM | \
+ TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT)
+
struct fl_flow_key {
struct flow_dissector_key_meta meta;
struct flow_dissector_key_control control;
@@ -668,8 +679,10 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
[TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
[TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
- [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 },
- [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 },
+ [TCA_FLOWER_KEY_FLAGS] = NLA_POLICY_MASK(NLA_BE32,
+ TCA_FLOWER_KEY_FLAGS_POLICY_MASK),
+ [TCA_FLOWER_KEY_FLAGS_MASK] = NLA_POLICY_MASK(NLA_BE32,
+ TCA_FLOWER_KEY_FLAGS_POLICY_MASK),
[TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 },
[TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
[TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 },
@@ -731,6 +744,10 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
[TCA_FLOWER_KEY_SPI_MASK] = { .type = NLA_U32 },
[TCA_FLOWER_L2_MISS] = NLA_POLICY_MAX(NLA_U8, 1),
[TCA_FLOWER_KEY_CFM] = { .type = NLA_NESTED },
+ [TCA_FLOWER_KEY_ENC_FLAGS] = NLA_POLICY_MASK(NLA_BE32,
+ TCA_FLOWER_KEY_ENC_FLAGS_POLICY_MASK),
+ [TCA_FLOWER_KEY_ENC_FLAGS_MASK] = NLA_POLICY_MASK(NLA_BE32,
+ TCA_FLOWER_KEY_ENC_FLAGS_POLICY_MASK),
};
static const struct nla_policy
@@ -741,6 +758,7 @@ enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
[TCA_FLOWER_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED },
[TCA_FLOWER_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED },
[TCA_FLOWER_KEY_ENC_OPTS_GTP] = { .type = NLA_NESTED },
+ [TCA_FLOWER_KEY_ENC_OPTS_PFCP] = { .type = NLA_NESTED },
};
static const struct nla_policy
@@ -771,6 +789,12 @@ gtp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1] = {
};
static const struct nla_policy
+pfcp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX + 1] = {
+ [TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE] = { .type = NLA_U8 },
+ [TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID] = { .type = NLA_U64 },
+};
+
+static const struct nla_policy
mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH] = { .type = NLA_U8 },
[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL] = { .type = NLA_U8 },
@@ -1147,19 +1171,29 @@ static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
}
}
-static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key,
- u32 *flags_mask, struct netlink_ext_ack *extack)
+static int fl_set_key_flags(struct nlattr *tca_opts, struct nlattr **tb,
+ bool encap, u32 *flags_key, u32 *flags_mask,
+ struct netlink_ext_ack *extack)
{
+ int fl_key, fl_mask;
u32 key, mask;
+ if (encap) {
+ fl_key = TCA_FLOWER_KEY_ENC_FLAGS;
+ fl_mask = TCA_FLOWER_KEY_ENC_FLAGS_MASK;
+ } else {
+ fl_key = TCA_FLOWER_KEY_FLAGS;
+ fl_mask = TCA_FLOWER_KEY_FLAGS_MASK;
+ }
+
/* mask is mandatory for flags */
- if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) {
+ if (NL_REQ_ATTR_CHECK(extack, tca_opts, tb, fl_mask)) {
NL_SET_ERR_MSG(extack, "Missing flags mask");
return -EINVAL;
}
- key = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS]));
- mask = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
+ key = be32_to_cpu(nla_get_be32(tb[fl_key]));
+ mask = be32_to_cpu(nla_get_be32(tb[fl_mask]));
*flags_key = 0;
*flags_mask = 0;
@@ -1170,6 +1204,21 @@ static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key,
TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
FLOW_DIS_FIRST_FRAG);
+ fl_set_key_flag(key, mask, flags_key, flags_mask,
+ TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM,
+ FLOW_DIS_F_TUNNEL_CSUM);
+
+ fl_set_key_flag(key, mask, flags_key, flags_mask,
+ TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT,
+ FLOW_DIS_F_TUNNEL_DONT_FRAGMENT);
+
+ fl_set_key_flag(key, mask, flags_key, flags_mask,
+ TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM, FLOW_DIS_F_TUNNEL_OAM);
+
+ fl_set_key_flag(key, mask, flags_key, flags_mask,
+ TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT,
+ FLOW_DIS_F_TUNNEL_CRIT_OPT);
+
return 0;
}
@@ -1320,7 +1369,6 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
int err;
md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
- memset(md, 0xff, sizeof(*md));
md->version = 1;
if (!depth)
@@ -1349,9 +1397,9 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
return -EINVAL;
}
+ memset(&md->u.index, 0xff, sizeof(md->u.index));
if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
- memset(&md->u, 0x00, sizeof(md->u));
md->u.index = nla_get_be32(nla);
}
} else if (md->version == 2) {
@@ -1360,10 +1408,12 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
return -EINVAL;
}
+ md->u.md2.dir = 1;
if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
md->u.md2.dir = nla_get_u8(nla);
}
+ set_hwid(&md->u.md2, 0xff);
if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
set_hwid(&md->u.md2, nla_get_u8(nla));
@@ -1419,6 +1469,44 @@ static int fl_set_gtp_opt(const struct nlattr *nla, struct fl_flow_key *key,
return sizeof(*sinfo);
}
+static int fl_set_pfcp_opt(const struct nlattr *nla, struct fl_flow_key *key,
+ int depth, int option_len,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX + 1];
+ struct pfcp_metadata *md;
+ int err;
+
+ md = (struct pfcp_metadata *)&key->enc_opts.data[key->enc_opts.len];
+ memset(md, 0xff, sizeof(*md));
+
+ if (!depth)
+ return sizeof(*md);
+
+ if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_PFCP) {
+ NL_SET_ERR_MSG_MOD(extack, "Non-pfcp option type for mask");
+ return -EINVAL;
+ }
+
+ err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX, nla,
+ pfcp_opt_policy, extack);
+ if (err < 0)
+ return err;
+
+ if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing tunnel key pfcp option type");
+ return -EINVAL;
+ }
+
+ if (tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE])
+ md->type = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE]);
+
+ if (tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID])
+ md->seid = nla_get_be64(tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID]);
+
+ return sizeof(*md);
+}
+
static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
struct fl_flow_key *mask,
struct netlink_ext_ack *extack)
@@ -1454,12 +1542,13 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
switch (nla_type(nla_opt_key)) {
case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
if (key->enc_opts.dst_opt_type &&
- key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
+ key->enc_opts.dst_opt_type !=
+ IP_TUNNEL_GENEVE_OPT_BIT) {
NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
return -EINVAL;
}
option_len = 0;
- key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
+ key->enc_opts.dst_opt_type = IP_TUNNEL_GENEVE_OPT_BIT;
option_len = fl_set_geneve_opt(nla_opt_key, key,
key_depth, option_len,
extack);
@@ -1470,7 +1559,7 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
/* At the same time we need to parse through the mask
* in order to verify exact and mask attribute lengths.
*/
- mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
+ mask->enc_opts.dst_opt_type = IP_TUNNEL_GENEVE_OPT_BIT;
option_len = fl_set_geneve_opt(nla_opt_msk, mask,
msk_depth, option_len,
extack);
@@ -1489,7 +1578,7 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
return -EINVAL;
}
option_len = 0;
- key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
+ key->enc_opts.dst_opt_type = IP_TUNNEL_VXLAN_OPT_BIT;
option_len = fl_set_vxlan_opt(nla_opt_key, key,
key_depth, option_len,
extack);
@@ -1500,7 +1589,7 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
/* At the same time we need to parse through the mask
* in order to verify exact and mask attribute lengths.
*/
- mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
+ mask->enc_opts.dst_opt_type = IP_TUNNEL_VXLAN_OPT_BIT;
option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
msk_depth, option_len,
extack);
@@ -1519,7 +1608,7 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
return -EINVAL;
}
option_len = 0;
- key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
+ key->enc_opts.dst_opt_type = IP_TUNNEL_ERSPAN_OPT_BIT;
option_len = fl_set_erspan_opt(nla_opt_key, key,
key_depth, option_len,
extack);
@@ -1530,7 +1619,7 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
/* At the same time we need to parse through the mask
* in order to verify exact and mask attribute lengths.
*/
- mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
+ mask->enc_opts.dst_opt_type = IP_TUNNEL_ERSPAN_OPT_BIT;
option_len = fl_set_erspan_opt(nla_opt_msk, mask,
msk_depth, option_len,
extack);
@@ -1550,7 +1639,7 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
return -EINVAL;
}
option_len = 0;
- key->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
+ key->enc_opts.dst_opt_type = IP_TUNNEL_GTP_OPT_BIT;
option_len = fl_set_gtp_opt(nla_opt_key, key,
key_depth, option_len,
extack);
@@ -1561,7 +1650,7 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
/* At the same time we need to parse through the mask
* in order to verify exact and mask attribute lengths.
*/
- mask->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
+ mask->enc_opts.dst_opt_type = IP_TUNNEL_GTP_OPT_BIT;
option_len = fl_set_gtp_opt(nla_opt_msk, mask,
msk_depth, option_len,
extack);
@@ -1575,6 +1664,36 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
return -EINVAL;
}
break;
+ case TCA_FLOWER_KEY_ENC_OPTS_PFCP:
+ if (key->enc_opts.dst_opt_type) {
+ NL_SET_ERR_MSG_MOD(extack, "Duplicate type for pfcp options");
+ return -EINVAL;
+ }
+ option_len = 0;
+ key->enc_opts.dst_opt_type = IP_TUNNEL_PFCP_OPT_BIT;
+ option_len = fl_set_pfcp_opt(nla_opt_key, key,
+ key_depth, option_len,
+ extack);
+ if (option_len < 0)
+ return option_len;
+
+ key->enc_opts.len += option_len;
+ /* At the same time we need to parse through the mask
+ * in order to verify exact and mask attribute lengths.
+ */
+ mask->enc_opts.dst_opt_type = IP_TUNNEL_PFCP_OPT_BIT;
+ option_len = fl_set_pfcp_opt(nla_opt_msk, mask,
+ msk_depth, option_len,
+ extack);
+ if (option_len < 0)
+ return option_len;
+
+ mask->enc_opts.len += option_len;
+ if (key->enc_opts.len != mask->enc_opts.len) {
+ NL_SET_ERR_MSG_MOD(extack, "Key and mask miss aligned");
+ return -EINVAL;
+ }
+ break;
default:
NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
return -EINVAL;
@@ -1748,9 +1867,9 @@ static int fl_set_key_cfm(struct nlattr **tb,
return 0;
}
-static int fl_set_key(struct net *net, struct nlattr **tb,
- struct fl_flow_key *key, struct fl_flow_key *mask,
- struct netlink_ext_ack *extack)
+static int fl_set_key(struct net *net, struct nlattr *tca_opts,
+ struct nlattr **tb, struct fl_flow_key *key,
+ struct fl_flow_key *mask, struct netlink_ext_ack *extack)
{
__be16 ethertype;
int ret = 0;
@@ -1982,9 +2101,18 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
if (ret)
return ret;
- if (tb[TCA_FLOWER_KEY_FLAGS])
- ret = fl_set_key_flags(tb, &key->control.flags,
+ if (tb[TCA_FLOWER_KEY_FLAGS]) {
+ ret = fl_set_key_flags(tca_opts, tb, false,
+ &key->control.flags,
&mask->control.flags, extack);
+ if (ret)
+ return ret;
+ }
+
+ if (tb[TCA_FLOWER_KEY_ENC_FLAGS])
+ ret = fl_set_key_flags(tca_opts, tb, true,
+ &key->enc_control.flags,
+ &mask->enc_control.flags, extack);
return ret;
}
@@ -2075,7 +2203,8 @@ static void fl_init_dissector(struct flow_dissector *dissector,
FL_KEY_SET_IF_MASKED(mask, keys, cnt,
FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
- FL_KEY_IS_MASKED(mask, enc_ipv6))
+ FL_KEY_IS_MASKED(mask, enc_ipv6) ||
+ FL_KEY_IS_MASKED(mask, enc_control))
FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
enc_control);
FL_KEY_SET_IF_MASKED(mask, keys, cnt,
@@ -2233,6 +2362,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
{
struct cls_fl_head *head = fl_head_dereference(tp);
bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL);
+ struct nlattr *tca_opts = tca[TCA_OPTIONS];
struct cls_fl_filter *fold = *arg;
bool bound_to_filter = false;
struct cls_fl_filter *fnew;
@@ -2241,7 +2371,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
bool in_ht;
int err;
- if (!tca[TCA_OPTIONS]) {
+ if (!tca_opts) {
err = -EINVAL;
goto errout_fold;
}
@@ -2259,7 +2389,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
}
err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
- tca[TCA_OPTIONS], fl_policy, NULL);
+ tca_opts, fl_policy, NULL);
if (err < 0)
goto errout_tb;
@@ -2335,7 +2465,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
bound_to_filter = true;
}
- err = fl_set_key(net, tb, &fnew->key, &mask->key, extack);
+ err = fl_set_key(net, tca_opts, tb, &fnew->key, &mask->key, extack);
if (err)
goto unbind_filter;
@@ -2373,6 +2503,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
if (!tc_in_hw(fnew->flags))
fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+ tcf_proto_update_usesw(tp, fnew->flags);
+
spin_lock(&tp->lock);
/* tp was deleted concurrently. -EAGAIN will cause caller to lookup
@@ -2675,18 +2807,19 @@ static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
struct nlattr **tca,
struct netlink_ext_ack *extack)
{
+ struct nlattr *tca_opts = tca[TCA_OPTIONS];
struct fl_flow_tmplt *tmplt;
struct nlattr **tb;
int err;
- if (!tca[TCA_OPTIONS])
+ if (!tca_opts)
return ERR_PTR(-EINVAL);
tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
if (!tb)
return ERR_PTR(-ENOBUFS);
err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
- tca[TCA_OPTIONS], fl_policy, NULL);
+ tca_opts, fl_policy, NULL);
if (err)
goto errout_tb;
@@ -2696,7 +2829,8 @@ static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
goto errout_tb;
}
tmplt->chain = chain;
- err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
+ err = fl_set_key(net, tca_opts, tb, &tmplt->dummy_key,
+ &tmplt->mask, extack);
if (err)
goto errout_tmplt;
@@ -2972,12 +3106,22 @@ static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
}
}
-static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
+static int fl_dump_key_flags(struct sk_buff *skb, bool encap,
+ u32 flags_key, u32 flags_mask)
{
- u32 key, mask;
+ int fl_key, fl_mask;
__be32 _key, _mask;
+ u32 key, mask;
int err;
+ if (encap) {
+ fl_key = TCA_FLOWER_KEY_ENC_FLAGS;
+ fl_mask = TCA_FLOWER_KEY_ENC_FLAGS_MASK;
+ } else {
+ fl_key = TCA_FLOWER_KEY_FLAGS;
+ fl_mask = TCA_FLOWER_KEY_FLAGS_MASK;
+ }
+
if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
return 0;
@@ -2990,14 +3134,29 @@ static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
FLOW_DIS_FIRST_FRAG);
+ fl_get_key_flag(flags_key, flags_mask, &key, &mask,
+ TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM,
+ FLOW_DIS_F_TUNNEL_CSUM);
+
+ fl_get_key_flag(flags_key, flags_mask, &key, &mask,
+ TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT,
+ FLOW_DIS_F_TUNNEL_DONT_FRAGMENT);
+
+ fl_get_key_flag(flags_key, flags_mask, &key, &mask,
+ TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM, FLOW_DIS_F_TUNNEL_OAM);
+
+ fl_get_key_flag(flags_key, flags_mask, &key, &mask,
+ TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT,
+ FLOW_DIS_F_TUNNEL_CRIT_OPT);
+
_key = cpu_to_be32(key);
_mask = cpu_to_be32(mask);
- err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
+ err = nla_put(skb, fl_key, 4, &_key);
if (err)
return err;
- return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
+ return nla_put(skb, fl_mask, 4, &_mask);
}
static int fl_dump_key_geneve_opt(struct sk_buff *skb,
@@ -3117,6 +3276,32 @@ nla_put_failure:
return -EMSGSIZE;
}
+static int fl_dump_key_pfcp_opt(struct sk_buff *skb,
+ struct flow_dissector_key_enc_opts *enc_opts)
+{
+ struct pfcp_metadata *md;
+ struct nlattr *nest;
+
+ nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_PFCP);
+ if (!nest)
+ goto nla_put_failure;
+
+ md = (struct pfcp_metadata *)&enc_opts->data[0];
+ if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE, md->type))
+ goto nla_put_failure;
+
+ if (nla_put_be64(skb, TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID,
+ md->seid, 0))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, nest);
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+}
+
static int fl_dump_key_ct(struct sk_buff *skb,
struct flow_dissector_key_ct *key,
struct flow_dissector_key_ct *mask)
@@ -3202,26 +3387,31 @@ static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
goto nla_put_failure;
switch (enc_opts->dst_opt_type) {
- case TUNNEL_GENEVE_OPT:
+ case IP_TUNNEL_GENEVE_OPT_BIT:
err = fl_dump_key_geneve_opt(skb, enc_opts);
if (err)
goto nla_put_failure;
break;
- case TUNNEL_VXLAN_OPT:
+ case IP_TUNNEL_VXLAN_OPT_BIT:
err = fl_dump_key_vxlan_opt(skb, enc_opts);
if (err)
goto nla_put_failure;
break;
- case TUNNEL_ERSPAN_OPT:
+ case IP_TUNNEL_ERSPAN_OPT_BIT:
err = fl_dump_key_erspan_opt(skb, enc_opts);
if (err)
goto nla_put_failure;
break;
- case TUNNEL_GTP_OPT:
+ case IP_TUNNEL_GTP_OPT_BIT:
err = fl_dump_key_gtp_opt(skb, enc_opts);
if (err)
goto nla_put_failure;
break;
+ case IP_TUNNEL_PFCP_OPT_BIT:
+ err = fl_dump_key_pfcp_opt(skb, enc_opts);
+ if (err)
+ goto nla_put_failure;
+ break;
default:
goto nla_put_failure;
}
@@ -3473,7 +3663,8 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net,
if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
goto nla_put_failure;
- if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
+ if (fl_dump_key_flags(skb, false, key->control.flags,
+ mask->control.flags))
goto nla_put_failure;
if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
@@ -3484,6 +3675,10 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net,
if (fl_dump_key_cfm(skb, &key->cfm, &mask->cfm))
goto nla_put_failure;
+ if (fl_dump_key_flags(skb, true, key->enc_control.flags,
+ mask->enc_control.flags))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
@@ -3659,6 +3854,7 @@ static struct tcf_proto_ops cls_fl_ops __read_mostly = {
.owner = THIS_MODULE,
.flags = TCF_PROTO_OPS_DOIT_UNLOCKED,
};
+MODULE_ALIAS_NET_CLS("flower");
static int __init cls_fl_init(void)
{
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index afc534ee0a18..cdddc8695228 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -433,6 +433,7 @@ static struct tcf_proto_ops cls_fw_ops __read_mostly = {
.bind_class = fw_bind_class,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_CLS("fw");
static int __init init_fw(void)
{
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index c4ed11df6254..f03bf5da39ee 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -228,6 +228,8 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
if (!tc_in_hw(new->flags))
new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+ tcf_proto_update_usesw(tp, new->flags);
+
*arg = head;
rcu_assign_pointer(tp->root, new);
return 0;
@@ -398,6 +400,7 @@ static struct tcf_proto_ops cls_mall_ops __read_mostly = {
.bind_class = mall_bind_class,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_CLS("matchall");
static int __init cls_mall_init(void)
{
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 12a505db4183..b9c58c040c30 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -671,6 +671,7 @@ static struct tcf_proto_ops cls_route4_ops __read_mostly = {
.bind_class = route4_bind_class,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_CLS("route");
static int __init init_route4(void)
{
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 289e1755c26b..2a1c00048fd6 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -92,6 +92,16 @@ struct tc_u_common {
long knodes;
};
+static u32 handle2id(u32 h)
+{
+ return ((h & 0x80000000) ? ((h >> 20) & 0x7FF) : h);
+}
+
+static u32 id2handle(u32 id)
+{
+ return (id | 0x800U) << 20;
+}
+
static inline unsigned int u32_hash_fold(__be32 key,
const struct tc_u32_sel *sel,
u8 fshift)
@@ -310,7 +320,7 @@ static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr)
int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL);
if (id < 0)
return 0;
- return (id | 0x800U) << 20;
+ return id2handle(id);
}
static struct hlist_head *tc_u_common_hash;
@@ -360,7 +370,7 @@ static int u32_init(struct tcf_proto *tp)
return -ENOBUFS;
refcount_set(&root_ht->refcnt, 1);
- root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : 0x80000000;
+ root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : id2handle(0);
root_ht->prio = tp->prio;
root_ht->is_root = true;
idr_init(&root_ht->handle_idr);
@@ -612,7 +622,7 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
if (phn == ht) {
u32_clear_hw_hnode(tp, ht, extack);
idr_destroy(&ht->handle_idr);
- idr_remove(&tp_c->handle_idr, ht->handle);
+ idr_remove(&tp_c->handle_idr, handle2id(ht->handle));
RCU_INIT_POINTER(*hn, ht->next);
kfree_rcu(ht, rcu);
return 0;
@@ -941,6 +951,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
if (!tc_in_hw(new->flags))
new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+ tcf_proto_update_usesw(tp, new->flags);
+
u32_replace_knode(tp, tp_c, new);
tcf_unbind_filter(tp, &n->res);
tcf_exts_get_net(&n->exts);
@@ -989,7 +1001,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
err = u32_replace_hw_hnode(tp, ht, userflags, extack);
if (err) {
- idr_remove(&tp_c->handle_idr, handle);
+ idr_remove(&tp_c->handle_idr, handle2id(handle));
kfree(ht);
return err;
}
@@ -1154,6 +1166,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
if (!tc_in_hw(n->flags))
n->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+ tcf_proto_update_usesw(tp, n->flags);
+
ins = &ht->ht[TC_U32_HASH(handle)];
for (pins = rtnl_dereference(*ins); pins;
ins = &pins->next, pins = rtnl_dereference(*ins))
@@ -1453,6 +1467,7 @@ static struct tcf_proto_ops cls_u32_ops __read_mostly = {
.bind_class = u32_bind_class,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_CLS("u32");
static int __init init_u32(void)
{
diff --git a/net/sched/em_cmp.c b/net/sched/em_cmp.c
index c90ad7ea26b4..64b637f18bc7 100644
--- a/net/sched/em_cmp.c
+++ b/net/sched/em_cmp.c
@@ -10,7 +10,7 @@
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/tc_ematch/tc_em_cmp.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/pkt_cls.h>
static inline int cmp_needs_transformation(struct tcf_em_cmp *cmp)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 36b025cc4fd2..6c625dcd0651 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -228,7 +228,7 @@ int qdisc_set_default(const char *name)
if (!ops) {
/* Not found, drop lock and try to load module */
write_unlock(&qdisc_mod_lock);
- request_module("sch_%s", name);
+ request_module(NET_SCH_ALIAS_PREFIX "%s", name);
write_lock(&qdisc_mod_lock);
ops = qdisc_lookup_default(name);
@@ -593,7 +593,6 @@ out:
pkt_len = 1;
qdisc_skb_cb(skb)->pkt_len = pkt_len;
}
-EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
{
@@ -792,7 +791,7 @@ void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
drops = max_t(int, n, 0);
rcu_read_lock();
while ((parentid = sch->parent)) {
- if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
+ if (parentid == TC_H_ROOT)
break;
if (sch->flags & TCQ_F_NOPARENT)
@@ -809,7 +808,7 @@ void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
!qdisc_is_offloaded);
/* TODO: perform the search on a per txq basis */
- sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
+ sch = qdisc_lookup_rcu(qdisc_dev(sch), TC_H_MAJ(parentid));
if (sch == NULL) {
WARN_ON_ONCE(parentid != TC_H_ROOT);
break;
@@ -1201,6 +1200,12 @@ skip:
return -EINVAL;
}
+ if (new &&
+ !(parent->flags & TCQ_F_MQROOT) &&
+ rcu_access_pointer(new->stab)) {
+ NL_SET_ERR_MSG(extack, "STAB not supported on a non root");
+ return -EINVAL;
+ }
err = cops->graft(parent, cl, new, &old, extack);
if (err)
return err;
@@ -1275,7 +1280,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
* go away in the mean time.
*/
rtnl_unlock();
- request_module("sch_%s", name);
+ request_module(NET_SCH_ALIAS_PREFIX "%s", name);
rtnl_lock();
ops = qdisc_lookup_ops(kind);
if (ops != NULL) {
@@ -1334,7 +1339,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
* before again attaching a qdisc.
*/
if ((dev->priv_flags & IFF_NO_QUEUE) && (dev->tx_queue_len == 0)) {
- dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
+ WRITE_ONCE(dev->tx_queue_len, DEFAULT_TX_QUEUE_LEN);
netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
}
@@ -1389,6 +1394,7 @@ err_out4:
ops->destroy(sch);
qdisc_put_stab(rtnl_dereference(sch->stab));
err_out3:
+ lockdep_unregister_key(&sch->root_lock_key);
netdev_put(dev, &sch->dev_tracker);
qdisc_free(sch);
err_out2:
@@ -1554,7 +1560,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
}
if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
- NL_SET_ERR_MSG(extack, "Invalid qdisc name");
+ NL_SET_ERR_MSG(extack, "Invalid qdisc name: must match existing qdisc");
return -EINVAL;
}
@@ -1658,13 +1664,17 @@ replay:
q = qdisc_lookup(dev, tcm->tcm_handle);
if (!q)
goto create_n_graft;
+ if (q->parent != tcm->tcm_parent) {
+ NL_SET_ERR_MSG(extack, "Cannot move an existing qdisc to a different parent");
+ return -EINVAL;
+ }
if (n->nlmsg_flags & NLM_F_EXCL) {
NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override");
return -EEXIST;
}
if (tca[TCA_KIND] &&
nla_strcmp(tca[TCA_KIND], q->ops->id)) {
- NL_SET_ERR_MSG(extack, "Invalid qdisc name");
+ NL_SET_ERR_MSG(extack, "Invalid qdisc name: must match existing qdisc");
return -EINVAL;
}
if (q->flags & TCQ_F_INGRESS) {
@@ -1740,7 +1750,7 @@ replay:
return -EEXIST;
}
if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
- NL_SET_ERR_MSG(extack, "Invalid qdisc name");
+ NL_SET_ERR_MSG(extack, "Invalid qdisc name: must match existing qdisc");
return -EINVAL;
}
err = qdisc_change(q, tca, extack);
@@ -2244,6 +2254,12 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
return -EOPNOTSUPP;
}
+ /* Prevent creation of traffic classes with classid TC_H_ROOT */
+ if (clid == TC_H_ROOT) {
+ NL_SET_ERR_MSG(extack, "Cannot create traffic class with classid TC_H_ROOT");
+ return -EINVAL;
+ }
+
new_cl = cl;
err = -EOPNOTSUPP;
if (cops->change)
@@ -2410,10 +2426,21 @@ static struct pernet_operations psched_net_ops = {
.exit = psched_net_exit,
};
-#if IS_ENABLED(CONFIG_RETPOLINE)
+#if IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)
DEFINE_STATIC_KEY_FALSE(tc_skip_wrapper);
#endif
+static const struct rtnl_msg_handler psched_rtnl_msg_handlers[] __initconst = {
+ {.msgtype = RTM_NEWQDISC, .doit = tc_modify_qdisc},
+ {.msgtype = RTM_DELQDISC, .doit = tc_get_qdisc},
+ {.msgtype = RTM_GETQDISC, .doit = tc_get_qdisc,
+ .dumpit = tc_dump_qdisc},
+ {.msgtype = RTM_NEWTCLASS, .doit = tc_ctl_tclass},
+ {.msgtype = RTM_DELTCLASS, .doit = tc_ctl_tclass},
+ {.msgtype = RTM_GETTCLASS, .doit = tc_ctl_tclass,
+ .dumpit = tc_dump_tclass},
+};
+
static int __init pktsched_init(void)
{
int err;
@@ -2432,14 +2459,7 @@ static int __init pktsched_init(void)
register_qdisc(&mq_qdisc_ops);
register_qdisc(&noqueue_qdisc_ops);
- rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc,
- 0);
- rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass,
- 0);
+ rtnl_register_many(psched_rtnl_msg_handlers);
tc_wrapper_init();
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 9cff99558694..48dd8c88903f 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -361,8 +361,24 @@ static const u8 besteffort[] = {
static const u8 normal_order[] = {0, 1, 2, 3, 4, 5, 6, 7};
static const u8 bulk_order[] = {1, 0, 2, 3};
+/* There is a big difference in timing between the accurate values placed in the
+ * cache and the approximations given by a single Newton step for small count
+ * values, particularly when stepping from count 1 to 2 or vice versa. Hence,
+ * these values are calculated using eight Newton steps, using the
+ * implementation below. Above 16, a single Newton step gives sufficient
+ * accuracy in either direction, given the precision stored.
+ *
+ * The magnitude of the error when stepping up to count 2 is such as to give the
+ * value that *should* have been produced at count 4.
+ */
+
#define REC_INV_SQRT_CACHE (16)
-static u32 cobalt_rec_inv_sqrt_cache[REC_INV_SQRT_CACHE] = {0};
+static const u32 inv_sqrt_cache[REC_INV_SQRT_CACHE] = {
+ ~0, ~0, 3037000500, 2479700525,
+ 2147483647, 1920767767, 1753413056, 1623345051,
+ 1518500250, 1431655765, 1358187914, 1294981364,
+ 1239850263, 1191209601, 1147878294, 1108955788
+};
/* http://en.wikipedia.org/wiki/Methods_of_computing_square_roots
* new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
@@ -388,47 +404,14 @@ static void cobalt_newton_step(struct cobalt_vars *vars)
static void cobalt_invsqrt(struct cobalt_vars *vars)
{
if (vars->count < REC_INV_SQRT_CACHE)
- vars->rec_inv_sqrt = cobalt_rec_inv_sqrt_cache[vars->count];
+ vars->rec_inv_sqrt = inv_sqrt_cache[vars->count];
else
cobalt_newton_step(vars);
}
-/* There is a big difference in timing between the accurate values placed in
- * the cache and the approximations given by a single Newton step for small
- * count values, particularly when stepping from count 1 to 2 or vice versa.
- * Above 16, a single Newton step gives sufficient accuracy in either
- * direction, given the precision stored.
- *
- * The magnitude of the error when stepping up to count 2 is such as to give
- * the value that *should* have been produced at count 4.
- */
-
-static void cobalt_cache_init(void)
-{
- struct cobalt_vars v;
-
- memset(&v, 0, sizeof(v));
- v.rec_inv_sqrt = ~0U;
- cobalt_rec_inv_sqrt_cache[0] = v.rec_inv_sqrt;
-
- for (v.count = 1; v.count < REC_INV_SQRT_CACHE; v.count++) {
- cobalt_newton_step(&v);
- cobalt_newton_step(&v);
- cobalt_newton_step(&v);
- cobalt_newton_step(&v);
-
- cobalt_rec_inv_sqrt_cache[v.count] = v.rec_inv_sqrt;
- }
-}
-
static void cobalt_vars_init(struct cobalt_vars *vars)
{
memset(vars, 0, sizeof(*vars));
-
- if (!cobalt_rec_inv_sqrt_cache[0]) {
- cobalt_cache_init();
- cobalt_rec_inv_sqrt_cache[0] = ~0;
- }
}
/* CoDel control_law is t + interval/sqrt(count)
@@ -501,13 +484,14 @@ static bool cobalt_queue_empty(struct cobalt_vars *vars,
/* Call this with a freshly dequeued packet for possible congestion marking.
* Returns true as an instruction to drop the packet, false for delivery.
*/
-static bool cobalt_should_drop(struct cobalt_vars *vars,
- struct cobalt_params *p,
- ktime_t now,
- struct sk_buff *skb,
- u32 bulk_flows)
-{
- bool next_due, over_target, drop = false;
+static enum skb_drop_reason cobalt_should_drop(struct cobalt_vars *vars,
+ struct cobalt_params *p,
+ ktime_t now,
+ struct sk_buff *skb,
+ u32 bulk_flows)
+{
+ enum skb_drop_reason reason = SKB_NOT_DROPPED_YET;
+ bool next_due, over_target;
ktime_t schedule;
u64 sojourn;
@@ -550,7 +534,8 @@ static bool cobalt_should_drop(struct cobalt_vars *vars,
if (next_due && vars->dropping) {
/* Use ECN mark if possible, otherwise drop */
- drop = !(vars->ecn_marked = INET_ECN_set_ce(skb));
+ if (!(vars->ecn_marked = INET_ECN_set_ce(skb)))
+ reason = SKB_DROP_REASON_QDISC_CONGESTED;
vars->count++;
if (!vars->count)
@@ -573,16 +558,17 @@ static bool cobalt_should_drop(struct cobalt_vars *vars,
}
/* Simple BLUE implementation. Lack of ECN is deliberate. */
- if (vars->p_drop)
- drop |= (get_random_u32() < vars->p_drop);
+ if (vars->p_drop && reason == SKB_NOT_DROPPED_YET &&
+ get_random_u32() < vars->p_drop)
+ reason = SKB_DROP_REASON_CAKE_FLOOD;
/* Overload the drop_next field as an activity timeout */
if (!vars->count)
vars->drop_next = ktime_add_ns(now, p->interval);
- else if (ktime_to_ns(schedule) > 0 && !drop)
+ else if (ktime_to_ns(schedule) > 0 && reason == SKB_NOT_DROPPED_YET)
vars->drop_next = now;
- return drop;
+ return reason;
}
static bool cake_update_flowkeys(struct flow_keys *keys,
@@ -644,6 +630,63 @@ static bool cake_ddst(int flow_mode)
return (flow_mode & CAKE_FLOW_DUAL_DST) == CAKE_FLOW_DUAL_DST;
}
+static void cake_dec_srchost_bulk_flow_count(struct cake_tin_data *q,
+ struct cake_flow *flow,
+ int flow_mode)
+{
+ if (likely(cake_dsrc(flow_mode) &&
+ q->hosts[flow->srchost].srchost_bulk_flow_count))
+ q->hosts[flow->srchost].srchost_bulk_flow_count--;
+}
+
+static void cake_inc_srchost_bulk_flow_count(struct cake_tin_data *q,
+ struct cake_flow *flow,
+ int flow_mode)
+{
+ if (likely(cake_dsrc(flow_mode) &&
+ q->hosts[flow->srchost].srchost_bulk_flow_count < CAKE_QUEUES))
+ q->hosts[flow->srchost].srchost_bulk_flow_count++;
+}
+
+static void cake_dec_dsthost_bulk_flow_count(struct cake_tin_data *q,
+ struct cake_flow *flow,
+ int flow_mode)
+{
+ if (likely(cake_ddst(flow_mode) &&
+ q->hosts[flow->dsthost].dsthost_bulk_flow_count))
+ q->hosts[flow->dsthost].dsthost_bulk_flow_count--;
+}
+
+static void cake_inc_dsthost_bulk_flow_count(struct cake_tin_data *q,
+ struct cake_flow *flow,
+ int flow_mode)
+{
+ if (likely(cake_ddst(flow_mode) &&
+ q->hosts[flow->dsthost].dsthost_bulk_flow_count < CAKE_QUEUES))
+ q->hosts[flow->dsthost].dsthost_bulk_flow_count++;
+}
+
+static u16 cake_get_flow_quantum(struct cake_tin_data *q,
+ struct cake_flow *flow,
+ int flow_mode)
+{
+ u16 host_load = 1;
+
+ if (cake_dsrc(flow_mode))
+ host_load = max(host_load,
+ q->hosts[flow->srchost].srchost_bulk_flow_count);
+
+ if (cake_ddst(flow_mode))
+ host_load = max(host_load,
+ q->hosts[flow->dsthost].dsthost_bulk_flow_count);
+
+ /* The get_random_u16() is a way to apply dithering to avoid
+ * accumulating roundoff errors
+ */
+ return (q->flow_quantum * quantum_div[host_load] +
+ get_random_u16()) >> 16;
+}
+
static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
int flow_mode, u16 flow_override, u16 host_override)
{
@@ -786,12 +829,13 @@ skip_hash:
* queue, accept the collision, update the host tags.
*/
q->way_collisions++;
- if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
- q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--;
- q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--;
- }
allocate_src = cake_dsrc(flow_mode);
allocate_dst = cake_ddst(flow_mode);
+
+ if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
+ cake_dec_srchost_bulk_flow_count(q, &q->flows[outer_hash + k], flow_mode);
+ cake_dec_dsthost_bulk_flow_count(q, &q->flows[outer_hash + k], flow_mode);
+ }
found:
/* reserve queue for future packets in same flow */
reduced_hash = outer_hash + k;
@@ -815,9 +859,10 @@ found:
q->hosts[outer_hash + k].srchost_tag = srchost_hash;
found_src:
srchost_idx = outer_hash + k;
- if (q->flows[reduced_hash].set == CAKE_SET_BULK)
- q->hosts[srchost_idx].srchost_bulk_flow_count++;
q->flows[reduced_hash].srchost = srchost_idx;
+
+ if (q->flows[reduced_hash].set == CAKE_SET_BULK)
+ cake_inc_srchost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode);
}
if (allocate_dst) {
@@ -838,9 +883,10 @@ found_src:
q->hosts[outer_hash + k].dsthost_tag = dsthost_hash;
found_dst:
dsthost_idx = outer_hash + k;
- if (q->flows[reduced_hash].set == CAKE_SET_BULK)
- q->hosts[dsthost_idx].dsthost_bulk_flow_count++;
q->flows[reduced_hash].dsthost = dsthost_idx;
+
+ if (q->flows[reduced_hash].set == CAKE_SET_BULK)
+ cake_inc_dsthost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode);
}
}
@@ -1512,7 +1558,7 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
if (!q->overflow_timeout) {
int i;
/* Build fresh max-heap */
- for (i = CAKE_MAX_TINS * CAKE_QUEUES / 2; i >= 0; i--)
+ for (i = CAKE_MAX_TINS * CAKE_QUEUES / 2 - 1; i >= 0; i--)
cake_heapify(q, i);
}
q->overflow_timeout = 65535;
@@ -1539,17 +1585,16 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
b->backlogs[idx] -= len;
b->tin_backlog -= len;
sch->qstats.backlog -= len;
- qdisc_tree_reduce_backlog(sch, 1, len);
flow->dropped++;
b->tin_dropped++;
- sch->qstats.drops++;
if (q->rate_flags & CAKE_FLAG_INGRESS)
cake_advance_shaper(q, b, skb, now, true);
- __qdisc_drop(skb, to_free);
+ qdisc_drop_reason(skb, sch, to_free, SKB_DROP_REASON_QDISC_OVERLIMIT);
sch->q.qlen--;
+ qdisc_tree_reduce_backlog(sch, 1, len);
cake_heapify(q, 0);
@@ -1853,10 +1898,6 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
/* flowchain */
if (!flow->set || flow->set == CAKE_SET_DECAYING) {
- struct cake_host *srchost = &b->hosts[flow->srchost];
- struct cake_host *dsthost = &b->hosts[flow->dsthost];
- u16 host_load = 1;
-
if (!flow->set) {
list_add_tail(&flow->flowchain, &b->new_flows);
} else {
@@ -1866,18 +1907,8 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
flow->set = CAKE_SET_SPARSE;
b->sparse_flow_count++;
- if (cake_dsrc(q->flow_mode))
- host_load = max(host_load, srchost->srchost_bulk_flow_count);
-
- if (cake_ddst(q->flow_mode))
- host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
-
- flow->deficit = (b->flow_quantum *
- quantum_div[host_load]) >> 16;
+ flow->deficit = cake_get_flow_quantum(b, flow, q->flow_mode);
} else if (flow->set == CAKE_SET_SPARSE_WAIT) {
- struct cake_host *srchost = &b->hosts[flow->srchost];
- struct cake_host *dsthost = &b->hosts[flow->dsthost];
-
/* this flow was empty, accounted as a sparse flow, but actually
* in the bulk rotation.
*/
@@ -1885,12 +1916,8 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
b->sparse_flow_count--;
b->bulk_flow_count++;
- if (cake_dsrc(q->flow_mode))
- srchost->srchost_bulk_flow_count++;
-
- if (cake_ddst(q->flow_mode))
- dsthost->dsthost_bulk_flow_count++;
-
+ cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode);
+ cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode);
}
if (q->buffer_used > q->buffer_max_used)
@@ -1940,20 +1967,19 @@ static void cake_clear_tin(struct Qdisc *sch, u16 tin)
q->cur_tin = tin;
for (q->cur_flow = 0; q->cur_flow < CAKE_QUEUES; q->cur_flow++)
while (!!(skb = cake_dequeue_one(sch)))
- kfree_skb(skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_QUEUE_PURGE);
}
static struct sk_buff *cake_dequeue(struct Qdisc *sch)
{
struct cake_sched_data *q = qdisc_priv(sch);
struct cake_tin_data *b = &q->tins[q->cur_tin];
- struct cake_host *srchost, *dsthost;
+ enum skb_drop_reason reason;
ktime_t now = ktime_get();
struct cake_flow *flow;
struct list_head *head;
bool first_flow = true;
struct sk_buff *skb;
- u16 host_load;
u64 delay;
u32 len;
@@ -2053,11 +2079,6 @@ retry:
q->cur_flow = flow - b->flows;
first_flow = false;
- /* triple isolation (modified DRR++) */
- srchost = &b->hosts[flow->srchost];
- dsthost = &b->hosts[flow->dsthost];
- host_load = 1;
-
/* flow isolation (DRR++) */
if (flow->deficit <= 0) {
/* Keep all flows with deficits out of the sparse and decaying
@@ -2069,11 +2090,8 @@ retry:
b->sparse_flow_count--;
b->bulk_flow_count++;
- if (cake_dsrc(q->flow_mode))
- srchost->srchost_bulk_flow_count++;
-
- if (cake_ddst(q->flow_mode))
- dsthost->dsthost_bulk_flow_count++;
+ cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode);
+ cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode);
flow->set = CAKE_SET_BULK;
} else {
@@ -2085,19 +2103,7 @@ retry:
}
}
- if (cake_dsrc(q->flow_mode))
- host_load = max(host_load, srchost->srchost_bulk_flow_count);
-
- if (cake_ddst(q->flow_mode))
- host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
-
- WARN_ON(host_load > CAKE_QUEUES);
-
- /* The get_random_u16() is a way to apply dithering to avoid
- * accumulating roundoff errors
- */
- flow->deficit += (b->flow_quantum * quantum_div[host_load] +
- get_random_u16()) >> 16;
+ flow->deficit += cake_get_flow_quantum(b, flow, q->flow_mode);
list_move_tail(&flow->flowchain, &b->old_flows);
goto retry;
@@ -2121,11 +2127,8 @@ retry:
if (flow->set == CAKE_SET_BULK) {
b->bulk_flow_count--;
- if (cake_dsrc(q->flow_mode))
- srchost->srchost_bulk_flow_count--;
-
- if (cake_ddst(q->flow_mode))
- dsthost->dsthost_bulk_flow_count--;
+ cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode);
+ cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode);
b->decaying_flow_count++;
} else if (flow->set == CAKE_SET_SPARSE ||
@@ -2143,12 +2146,8 @@ retry:
else if (flow->set == CAKE_SET_BULK) {
b->bulk_flow_count--;
- if (cake_dsrc(q->flow_mode))
- srchost->srchost_bulk_flow_count--;
-
- if (cake_ddst(q->flow_mode))
- dsthost->dsthost_bulk_flow_count--;
-
+ cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode);
+ cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode);
} else
b->decaying_flow_count--;
@@ -2157,12 +2156,12 @@ retry:
goto begin;
}
+ reason = cobalt_should_drop(&flow->cvars, &b->cparams, now, skb,
+ (b->bulk_flow_count *
+ !!(q->rate_flags &
+ CAKE_FLAG_INGRESS)));
/* Last packet in queue may be marked, shouldn't be dropped */
- if (!cobalt_should_drop(&flow->cvars, &b->cparams, now, skb,
- (b->bulk_flow_count *
- !!(q->rate_flags &
- CAKE_FLAG_INGRESS))) ||
- !flow->head)
+ if (reason == SKB_NOT_DROPPED_YET || !flow->head)
break;
/* drop this packet, get another one */
@@ -2176,7 +2175,7 @@ retry:
b->tin_dropped++;
qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
qdisc_qstats_drop(sch);
- kfree_skb(skb);
+ kfree_skb_reason(skb, reason);
if (q->rate_flags & CAKE_FLAG_INGRESS)
goto retry;
}
@@ -2572,6 +2571,8 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
{
struct cake_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_CAKE_MAX + 1];
+ u16 rate_flags;
+ u8 flow_mode;
int err;
err = nla_parse_nested_deprecated(tb, TCA_CAKE_MAX, opt, cake_policy,
@@ -2579,10 +2580,11 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
if (err < 0)
return err;
+ flow_mode = q->flow_mode;
if (tb[TCA_CAKE_NAT]) {
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
- q->flow_mode &= ~CAKE_FLOW_NAT_FLAG;
- q->flow_mode |= CAKE_FLOW_NAT_FLAG *
+ flow_mode &= ~CAKE_FLOW_NAT_FLAG;
+ flow_mode |= CAKE_FLOW_NAT_FLAG *
!!nla_get_u32(tb[TCA_CAKE_NAT]);
#else
NL_SET_ERR_MSG_ATTR(extack, tb[TCA_CAKE_NAT],
@@ -2592,29 +2594,34 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
}
if (tb[TCA_CAKE_BASE_RATE64])
- q->rate_bps = nla_get_u64(tb[TCA_CAKE_BASE_RATE64]);
+ WRITE_ONCE(q->rate_bps,
+ nla_get_u64(tb[TCA_CAKE_BASE_RATE64]));
if (tb[TCA_CAKE_DIFFSERV_MODE])
- q->tin_mode = nla_get_u32(tb[TCA_CAKE_DIFFSERV_MODE]);
+ WRITE_ONCE(q->tin_mode,
+ nla_get_u32(tb[TCA_CAKE_DIFFSERV_MODE]));
+ rate_flags = q->rate_flags;
if (tb[TCA_CAKE_WASH]) {
if (!!nla_get_u32(tb[TCA_CAKE_WASH]))
- q->rate_flags |= CAKE_FLAG_WASH;
+ rate_flags |= CAKE_FLAG_WASH;
else
- q->rate_flags &= ~CAKE_FLAG_WASH;
+ rate_flags &= ~CAKE_FLAG_WASH;
}
if (tb[TCA_CAKE_FLOW_MODE])
- q->flow_mode = ((q->flow_mode & CAKE_FLOW_NAT_FLAG) |
+ flow_mode = ((flow_mode & CAKE_FLOW_NAT_FLAG) |
(nla_get_u32(tb[TCA_CAKE_FLOW_MODE]) &
CAKE_FLOW_MASK));
if (tb[TCA_CAKE_ATM])
- q->atm_mode = nla_get_u32(tb[TCA_CAKE_ATM]);
+ WRITE_ONCE(q->atm_mode,
+ nla_get_u32(tb[TCA_CAKE_ATM]));
if (tb[TCA_CAKE_OVERHEAD]) {
- q->rate_overhead = nla_get_s32(tb[TCA_CAKE_OVERHEAD]);
- q->rate_flags |= CAKE_FLAG_OVERHEAD;
+ WRITE_ONCE(q->rate_overhead,
+ nla_get_s32(tb[TCA_CAKE_OVERHEAD]));
+ rate_flags |= CAKE_FLAG_OVERHEAD;
q->max_netlen = 0;
q->max_adjlen = 0;
@@ -2623,7 +2630,7 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
}
if (tb[TCA_CAKE_RAW]) {
- q->rate_flags &= ~CAKE_FLAG_OVERHEAD;
+ rate_flags &= ~CAKE_FLAG_OVERHEAD;
q->max_netlen = 0;
q->max_adjlen = 0;
@@ -2632,54 +2639,58 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
}
if (tb[TCA_CAKE_MPU])
- q->rate_mpu = nla_get_u32(tb[TCA_CAKE_MPU]);
+ WRITE_ONCE(q->rate_mpu,
+ nla_get_u32(tb[TCA_CAKE_MPU]));
if (tb[TCA_CAKE_RTT]) {
- q->interval = nla_get_u32(tb[TCA_CAKE_RTT]);
+ u32 interval = nla_get_u32(tb[TCA_CAKE_RTT]);
- if (!q->interval)
- q->interval = 1;
+ WRITE_ONCE(q->interval, max(interval, 1U));
}
if (tb[TCA_CAKE_TARGET]) {
- q->target = nla_get_u32(tb[TCA_CAKE_TARGET]);
+ u32 target = nla_get_u32(tb[TCA_CAKE_TARGET]);
- if (!q->target)
- q->target = 1;
+ WRITE_ONCE(q->target, max(target, 1U));
}
if (tb[TCA_CAKE_AUTORATE]) {
if (!!nla_get_u32(tb[TCA_CAKE_AUTORATE]))
- q->rate_flags |= CAKE_FLAG_AUTORATE_INGRESS;
+ rate_flags |= CAKE_FLAG_AUTORATE_INGRESS;
else
- q->rate_flags &= ~CAKE_FLAG_AUTORATE_INGRESS;
+ rate_flags &= ~CAKE_FLAG_AUTORATE_INGRESS;
}
if (tb[TCA_CAKE_INGRESS]) {
if (!!nla_get_u32(tb[TCA_CAKE_INGRESS]))
- q->rate_flags |= CAKE_FLAG_INGRESS;
+ rate_flags |= CAKE_FLAG_INGRESS;
else
- q->rate_flags &= ~CAKE_FLAG_INGRESS;
+ rate_flags &= ~CAKE_FLAG_INGRESS;
}
if (tb[TCA_CAKE_ACK_FILTER])
- q->ack_filter = nla_get_u32(tb[TCA_CAKE_ACK_FILTER]);
+ WRITE_ONCE(q->ack_filter,
+ nla_get_u32(tb[TCA_CAKE_ACK_FILTER]));
if (tb[TCA_CAKE_MEMORY])
- q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]);
+ WRITE_ONCE(q->buffer_config_limit,
+ nla_get_u32(tb[TCA_CAKE_MEMORY]));
if (tb[TCA_CAKE_SPLIT_GSO]) {
if (!!nla_get_u32(tb[TCA_CAKE_SPLIT_GSO]))
- q->rate_flags |= CAKE_FLAG_SPLIT_GSO;
+ rate_flags |= CAKE_FLAG_SPLIT_GSO;
else
- q->rate_flags &= ~CAKE_FLAG_SPLIT_GSO;
+ rate_flags &= ~CAKE_FLAG_SPLIT_GSO;
}
if (tb[TCA_CAKE_FWMARK]) {
- q->fwmark_mask = nla_get_u32(tb[TCA_CAKE_FWMARK]);
- q->fwmark_shft = q->fwmark_mask ? __ffs(q->fwmark_mask) : 0;
+ WRITE_ONCE(q->fwmark_mask, nla_get_u32(tb[TCA_CAKE_FWMARK]));
+ WRITE_ONCE(q->fwmark_shft,
+ q->fwmark_mask ? __ffs(q->fwmark_mask) : 0);
}
+ WRITE_ONCE(q->rate_flags, rate_flags);
+ WRITE_ONCE(q->flow_mode, flow_mode);
if (q->tins) {
sch_tree_lock(sch);
cake_reconfigure(sch);
@@ -2774,68 +2785,72 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct cake_sched_data *q = qdisc_priv(sch);
struct nlattr *opts;
+ u16 rate_flags;
+ u8 flow_mode;
opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
if (!opts)
goto nla_put_failure;
- if (nla_put_u64_64bit(skb, TCA_CAKE_BASE_RATE64, q->rate_bps,
- TCA_CAKE_PAD))
+ if (nla_put_u64_64bit(skb, TCA_CAKE_BASE_RATE64,
+ READ_ONCE(q->rate_bps), TCA_CAKE_PAD))
goto nla_put_failure;
- if (nla_put_u32(skb, TCA_CAKE_FLOW_MODE,
- q->flow_mode & CAKE_FLOW_MASK))
+ flow_mode = READ_ONCE(q->flow_mode);
+ if (nla_put_u32(skb, TCA_CAKE_FLOW_MODE, flow_mode & CAKE_FLOW_MASK))
goto nla_put_failure;
- if (nla_put_u32(skb, TCA_CAKE_RTT, q->interval))
+ if (nla_put_u32(skb, TCA_CAKE_RTT, READ_ONCE(q->interval)))
goto nla_put_failure;
- if (nla_put_u32(skb, TCA_CAKE_TARGET, q->target))
+ if (nla_put_u32(skb, TCA_CAKE_TARGET, READ_ONCE(q->target)))
goto nla_put_failure;
- if (nla_put_u32(skb, TCA_CAKE_MEMORY, q->buffer_config_limit))
+ if (nla_put_u32(skb, TCA_CAKE_MEMORY,
+ READ_ONCE(q->buffer_config_limit)))
goto nla_put_failure;
+ rate_flags = READ_ONCE(q->rate_flags);
if (nla_put_u32(skb, TCA_CAKE_AUTORATE,
- !!(q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS)))
+ !!(rate_flags & CAKE_FLAG_AUTORATE_INGRESS)))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_INGRESS,
- !!(q->rate_flags & CAKE_FLAG_INGRESS)))
+ !!(rate_flags & CAKE_FLAG_INGRESS)))
goto nla_put_failure;
- if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter))
+ if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, READ_ONCE(q->ack_filter)))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_NAT,
- !!(q->flow_mode & CAKE_FLOW_NAT_FLAG)))
+ !!(flow_mode & CAKE_FLOW_NAT_FLAG)))
goto nla_put_failure;
- if (nla_put_u32(skb, TCA_CAKE_DIFFSERV_MODE, q->tin_mode))
+ if (nla_put_u32(skb, TCA_CAKE_DIFFSERV_MODE, READ_ONCE(q->tin_mode)))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_WASH,
- !!(q->rate_flags & CAKE_FLAG_WASH)))
+ !!(rate_flags & CAKE_FLAG_WASH)))
goto nla_put_failure;
- if (nla_put_u32(skb, TCA_CAKE_OVERHEAD, q->rate_overhead))
+ if (nla_put_u32(skb, TCA_CAKE_OVERHEAD, READ_ONCE(q->rate_overhead)))
goto nla_put_failure;
- if (!(q->rate_flags & CAKE_FLAG_OVERHEAD))
+ if (!(rate_flags & CAKE_FLAG_OVERHEAD))
if (nla_put_u32(skb, TCA_CAKE_RAW, 0))
goto nla_put_failure;
- if (nla_put_u32(skb, TCA_CAKE_ATM, q->atm_mode))
+ if (nla_put_u32(skb, TCA_CAKE_ATM, READ_ONCE(q->atm_mode)))
goto nla_put_failure;
- if (nla_put_u32(skb, TCA_CAKE_MPU, q->rate_mpu))
+ if (nla_put_u32(skb, TCA_CAKE_MPU, READ_ONCE(q->rate_mpu)))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_SPLIT_GSO,
- !!(q->rate_flags & CAKE_FLAG_SPLIT_GSO)))
+ !!(rate_flags & CAKE_FLAG_SPLIT_GSO)))
goto nla_put_failure;
- if (nla_put_u32(skb, TCA_CAKE_FWMARK, q->fwmark_mask))
+ if (nla_put_u32(skb, TCA_CAKE_FWMARK, READ_ONCE(q->fwmark_mask)))
goto nla_put_failure;
return nla_nest_end(skb, opts);
@@ -3103,6 +3118,7 @@ static struct Qdisc_ops cake_qdisc_ops __read_mostly = {
.dump_stats = cake_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("cake");
static int __init cake_module_init(void)
{
diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
index beece8e82c23..8c9a0400c862 100644
--- a/net/sched/sch_cbs.c
+++ b/net/sched/sch_cbs.c
@@ -310,7 +310,7 @@ static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q)
{
struct ethtool_link_ksettings ecmd;
int speed = SPEED_10;
- int port_rate;
+ s64 port_rate;
int err;
err = __ethtool_get_link_ksettings(dev, &ecmd);
@@ -389,11 +389,11 @@ static int cbs_change(struct Qdisc *sch, struct nlattr *opt,
}
/* Everything went OK, save the parameters used. */
- q->hicredit = qopt->hicredit;
- q->locredit = qopt->locredit;
- q->idleslope = qopt->idleslope * BYTES_PER_KBIT;
- q->sendslope = qopt->sendslope * BYTES_PER_KBIT;
- q->offload = qopt->offload;
+ WRITE_ONCE(q->hicredit, qopt->hicredit);
+ WRITE_ONCE(q->locredit, qopt->locredit);
+ WRITE_ONCE(q->idleslope, qopt->idleslope * BYTES_PER_KBIT);
+ WRITE_ONCE(q->sendslope, qopt->sendslope * BYTES_PER_KBIT);
+ WRITE_ONCE(q->offload, qopt->offload);
return 0;
}
@@ -459,11 +459,11 @@ static int cbs_dump(struct Qdisc *sch, struct sk_buff *skb)
if (!nest)
goto nla_put_failure;
- opt.hicredit = q->hicredit;
- opt.locredit = q->locredit;
- opt.sendslope = div64_s64(q->sendslope, BYTES_PER_KBIT);
- opt.idleslope = div64_s64(q->idleslope, BYTES_PER_KBIT);
- opt.offload = q->offload;
+ opt.hicredit = READ_ONCE(q->hicredit);
+ opt.locredit = READ_ONCE(q->locredit);
+ opt.sendslope = div64_s64(READ_ONCE(q->sendslope), BYTES_PER_KBIT);
+ opt.idleslope = div64_s64(READ_ONCE(q->idleslope), BYTES_PER_KBIT);
+ opt.offload = READ_ONCE(q->offload);
if (nla_put(skb, TCA_CBS_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
@@ -546,6 +546,7 @@ static struct Qdisc_ops cbs_qdisc_ops __read_mostly = {
.dump = cbs_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("cbs");
static struct notifier_block cbs_device_notifier = {
.notifier_call = cbs_dev_notifier,
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index ae1da08e268f..59e7bdf5063e 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -123,10 +123,10 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx,
if (idx == q->tail)
choke_zap_tail_holes(q);
+ --sch->q.qlen;
qdisc_qstats_backlog_dec(sch, skb);
qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
qdisc_drop(skb, sch, to_free);
- --sch->q.qlen;
}
struct choke_skb_cb {
@@ -356,7 +356,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
tb[TCA_CHOKE_STAB] == NULL)
return -EINVAL;
- max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
+ max_P = nla_get_u32_default(tb[TCA_CHOKE_MAX_P], 0);
ctl = nla_data(tb[TCA_CHOKE_PARMS]);
stab = nla_data(tb[TCA_CHOKE_STAB]);
@@ -405,8 +405,8 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
} else
sch_tree_lock(sch);
- q->flags = ctl->flags;
- q->limit = ctl->limit;
+ WRITE_ONCE(q->flags, ctl->flags);
+ WRITE_ONCE(q->limit, ctl->limit);
red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
ctl->Plog, ctl->Scell_log,
@@ -431,15 +431,16 @@ static int choke_init(struct Qdisc *sch, struct nlattr *opt,
static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct choke_sched_data *q = qdisc_priv(sch);
+ u8 Wlog = READ_ONCE(q->parms.Wlog);
struct nlattr *opts = NULL;
struct tc_red_qopt opt = {
- .limit = q->limit,
- .flags = q->flags,
- .qth_min = q->parms.qth_min >> q->parms.Wlog,
- .qth_max = q->parms.qth_max >> q->parms.Wlog,
- .Wlog = q->parms.Wlog,
- .Plog = q->parms.Plog,
- .Scell_log = q->parms.Scell_log,
+ .limit = READ_ONCE(q->limit),
+ .flags = READ_ONCE(q->flags),
+ .qth_min = READ_ONCE(q->parms.qth_min) >> Wlog,
+ .qth_max = READ_ONCE(q->parms.qth_max) >> Wlog,
+ .Wlog = Wlog,
+ .Plog = READ_ONCE(q->parms.Plog),
+ .Scell_log = READ_ONCE(q->parms.Scell_log),
};
opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
@@ -447,7 +448,7 @@ static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
goto nla_put_failure;
if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) ||
- nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P))
+ nla_put_u32(skb, TCA_CHOKE_MAX_P, READ_ONCE(q->parms.max_P)))
goto nla_put_failure;
return nla_nest_end(skb, opts);
@@ -498,6 +499,7 @@ static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
.dump_stats = choke_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("choke");
static int __init choke_module_init(void)
{
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index d7a4874543de..81189d02fee7 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Codel - The Controlled-Delay Active Queue Management algorithm
*
@@ -7,37 +8,6 @@
* Implemented on linux by :
* Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
* Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the authors may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, provided that this notice is retained in full, this
- * software may be distributed under the terms of the GNU General
- * Public License ("GPL") version 2, in which case the provisions of the
- * GPL apply INSTEAD OF those given above.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- *
*/
#include <linux/module.h>
@@ -82,7 +52,7 @@ static void drop_func(struct sk_buff *skb, void *ctx)
{
struct Qdisc *sch = ctx;
- kfree_skb(skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_QDISC_CONGESTED);
qdisc_qstats_drop(sch);
}
@@ -119,7 +89,8 @@ static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
}
q = qdisc_priv(sch);
q->drop_overlimit++;
- return qdisc_drop(skb, sch, to_free);
+ return qdisc_drop_reason(skb, sch, to_free,
+ SKB_DROP_REASON_QDISC_OVERLIMIT);
}
static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
@@ -148,26 +119,31 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt,
if (tb[TCA_CODEL_TARGET]) {
u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
- q->params.target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT;
+ WRITE_ONCE(q->params.target,
+ ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT);
}
if (tb[TCA_CODEL_CE_THRESHOLD]) {
u64 val = nla_get_u32(tb[TCA_CODEL_CE_THRESHOLD]);
- q->params.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
+ WRITE_ONCE(q->params.ce_threshold,
+ (val * NSEC_PER_USEC) >> CODEL_SHIFT);
}
if (tb[TCA_CODEL_INTERVAL]) {
u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
- q->params.interval = ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT;
+ WRITE_ONCE(q->params.interval,
+ ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT);
}
if (tb[TCA_CODEL_LIMIT])
- sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]);
+ WRITE_ONCE(sch->limit,
+ nla_get_u32(tb[TCA_CODEL_LIMIT]));
if (tb[TCA_CODEL_ECN])
- q->params.ecn = !!nla_get_u32(tb[TCA_CODEL_ECN]);
+ WRITE_ONCE(q->params.ecn,
+ !!nla_get_u32(tb[TCA_CODEL_ECN]));
qlen = sch->q.qlen;
while (sch->q.qlen > sch->limit) {
@@ -213,6 +189,7 @@ static int codel_init(struct Qdisc *sch, struct nlattr *opt,
static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct codel_sched_data *q = qdisc_priv(sch);
+ codel_time_t ce_threshold;
struct nlattr *opts;
opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
@@ -220,17 +197,18 @@ static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
goto nla_put_failure;
if (nla_put_u32(skb, TCA_CODEL_TARGET,
- codel_time_to_us(q->params.target)) ||
+ codel_time_to_us(READ_ONCE(q->params.target))) ||
nla_put_u32(skb, TCA_CODEL_LIMIT,
- sch->limit) ||
+ READ_ONCE(sch->limit)) ||
nla_put_u32(skb, TCA_CODEL_INTERVAL,
- codel_time_to_us(q->params.interval)) ||
+ codel_time_to_us(READ_ONCE(q->params.interval))) ||
nla_put_u32(skb, TCA_CODEL_ECN,
- q->params.ecn))
+ READ_ONCE(q->params.ecn)))
goto nla_put_failure;
- if (q->params.ce_threshold != CODEL_DISABLED_THRESHOLD &&
+ ce_threshold = READ_ONCE(q->params.ce_threshold);
+ if (ce_threshold != CODEL_DISABLED_THRESHOLD &&
nla_put_u32(skb, TCA_CODEL_CE_THRESHOLD,
- codel_time_to_us(q->params.ce_threshold)))
+ codel_time_to_us(ce_threshold)))
goto nla_put_failure;
return nla_nest_end(skb, opts);
@@ -287,6 +265,7 @@ static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
.dump_stats = codel_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("codel");
static int __init codel_module_init(void)
{
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 097740a9afea..c69b999fae17 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -481,6 +481,7 @@ static struct Qdisc_ops drr_qdisc_ops __read_mostly = {
.destroy = drr_destroy_qdisc,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("drr");
static int __init drr_init(void)
{
diff --git a/net/sched/sch_etf.c b/net/sched/sch_etf.c
index 4808159a5466..c74d778c32a1 100644
--- a/net/sched/sch_etf.c
+++ b/net/sched/sch_etf.c
@@ -467,15 +467,15 @@ static int etf_dump(struct Qdisc *sch, struct sk_buff *skb)
if (!nest)
goto nla_put_failure;
- opt.delta = q->delta;
- opt.clockid = q->clockid;
- if (q->offload)
+ opt.delta = READ_ONCE(q->delta);
+ opt.clockid = READ_ONCE(q->clockid);
+ if (READ_ONCE(q->offload))
opt.flags |= TC_ETF_OFFLOAD_ON;
- if (q->deadline_mode)
+ if (READ_ONCE(q->deadline_mode))
opt.flags |= TC_ETF_DEADLINE_MODE_ON;
- if (q->skip_sock_check)
+ if (READ_ONCE(q->skip_sock_check))
opt.flags |= TC_ETF_SKIP_SOCK_CHECK;
if (nla_put(skb, TCA_ETF_PARMS, sizeof(opt), &opt))
@@ -500,6 +500,7 @@ static struct Qdisc_ops etf_qdisc_ops __read_mostly = {
.dump = etf_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("etf");
static int __init etf_module_init(void)
{
diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c
index f7c88495946b..516038a44163 100644
--- a/net/sched/sch_ets.c
+++ b/net/sched/sch_ets.c
@@ -91,6 +91,8 @@ ets_class_from_arg(struct Qdisc *sch, unsigned long arg)
{
struct ets_sched *q = qdisc_priv(sch);
+ if (arg == 0 || arg > q->nbands)
+ return NULL;
return &q->classes[arg - 1];
}
@@ -646,7 +648,7 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
sch_tree_lock(sch);
- q->nbands = nbands;
+ WRITE_ONCE(q->nbands, nbands);
for (i = nstrict; i < q->nstrict; i++) {
if (q->classes[i].qdisc->q.qlen) {
list_add_tail(&q->classes[i].alist, &q->active);
@@ -658,11 +660,11 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
list_del(&q->classes[i].alist);
qdisc_tree_flush_backlog(q->classes[i].qdisc);
}
- q->nstrict = nstrict;
+ WRITE_ONCE(q->nstrict, nstrict);
memcpy(q->prio2band, priomap, sizeof(priomap));
for (i = 0; i < q->nbands; i++)
- q->classes[i].quantum = quanta[i];
+ WRITE_ONCE(q->classes[i].quantum, quanta[i]);
for (i = oldbands; i < q->nbands; i++) {
q->classes[i].qdisc = queues[i];
@@ -676,7 +678,7 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
for (i = q->nbands; i < oldbands; i++) {
qdisc_put(q->classes[i].qdisc);
q->classes[i].qdisc = NULL;
- q->classes[i].quantum = 0;
+ WRITE_ONCE(q->classes[i].quantum, 0);
q->classes[i].deficit = 0;
gnet_stats_basic_sync_init(&q->classes[i].bstats);
memset(&q->classes[i].qstats, 0, sizeof(q->classes[i].qstats));
@@ -733,6 +735,7 @@ static int ets_qdisc_dump(struct Qdisc *sch, struct sk_buff *skb)
struct ets_sched *q = qdisc_priv(sch);
struct nlattr *opts;
struct nlattr *nest;
+ u8 nbands, nstrict;
int band;
int prio;
int err;
@@ -745,21 +748,22 @@ static int ets_qdisc_dump(struct Qdisc *sch, struct sk_buff *skb)
if (!opts)
goto nla_err;
- if (nla_put_u8(skb, TCA_ETS_NBANDS, q->nbands))
+ nbands = READ_ONCE(q->nbands);
+ if (nla_put_u8(skb, TCA_ETS_NBANDS, nbands))
goto nla_err;
- if (q->nstrict &&
- nla_put_u8(skb, TCA_ETS_NSTRICT, q->nstrict))
+ nstrict = READ_ONCE(q->nstrict);
+ if (nstrict && nla_put_u8(skb, TCA_ETS_NSTRICT, nstrict))
goto nla_err;
- if (q->nbands > q->nstrict) {
+ if (nbands > nstrict) {
nest = nla_nest_start(skb, TCA_ETS_QUANTA);
if (!nest)
goto nla_err;
- for (band = q->nstrict; band < q->nbands; band++) {
+ for (band = nstrict; band < nbands; band++) {
if (nla_put_u32(skb, TCA_ETS_QUANTA_BAND,
- q->classes[band].quantum))
+ READ_ONCE(q->classes[band].quantum)))
goto nla_err;
}
@@ -771,7 +775,8 @@ static int ets_qdisc_dump(struct Qdisc *sch, struct sk_buff *skb)
goto nla_err;
for (prio = 0; prio <= TC_PRIO_MAX; prio++) {
- if (nla_put_u8(skb, TCA_ETS_PRIOMAP_BAND, q->prio2band[prio]))
+ if (nla_put_u8(skb, TCA_ETS_PRIOMAP_BAND,
+ READ_ONCE(q->prio2band[prio])))
goto nla_err;
}
@@ -812,6 +817,7 @@ static struct Qdisc_ops ets_qdisc_ops __read_mostly = {
.dump = ets_qdisc_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("ets");
static int __init ets_init(void)
{
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 450f5c67ac49..e6bfd39ff339 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -19,7 +19,8 @@
static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
- if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit))
+ if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
+ READ_ONCE(sch->limit)))
return qdisc_enqueue_tail(skb, sch);
return qdisc_drop(skb, sch, to_free);
@@ -28,7 +29,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
- if (likely(sch->q.qlen < sch->limit))
+ if (likely(sch->q.qlen < READ_ONCE(sch->limit)))
return qdisc_enqueue_tail(skb, sch);
return qdisc_drop(skb, sch, to_free);
@@ -39,7 +40,10 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
{
unsigned int prev_backlog;
- if (likely(sch->q.qlen < sch->limit))
+ if (unlikely(READ_ONCE(sch->limit) == 0))
+ return qdisc_drop(skb, sch, to_free);
+
+ if (likely(sch->q.qlen < READ_ONCE(sch->limit)))
return qdisc_enqueue_tail(skb, sch);
prev_backlog = sch->qstats.backlog;
@@ -105,14 +109,14 @@ static int __fifo_init(struct Qdisc *sch, struct nlattr *opt,
if (is_bfifo)
limit *= psched_mtu(qdisc_dev(sch));
- sch->limit = limit;
+ WRITE_ONCE(sch->limit, limit);
} else {
struct tc_fifo_qopt *ctl = nla_data(opt);
if (nla_len(opt) < sizeof(*ctl))
return -EINVAL;
- sch->limit = ctl->limit;
+ WRITE_ONCE(sch->limit, ctl->limit);
}
if (is_bfifo)
@@ -154,7 +158,7 @@ static void fifo_destroy(struct Qdisc *sch)
static int __fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
{
- struct tc_fifo_qopt opt = { .limit = sch->limit };
+ struct tc_fifo_qopt opt = { .limit = READ_ONCE(sch->limit) };
if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
goto nla_put_failure;
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 3a31c47fea9b..2ca5332cfcc5 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -106,9 +106,12 @@ struct fq_perband_flows {
int quantum; /* based on band nr : 576KB, 192KB, 64KB */
};
+#define FQ_PRIO2BAND_CRUMB_SIZE ((TC_PRIO_MAX + 1) >> 2)
+
struct fq_sched_data {
/* Read mostly cache line */
+ u64 offload_horizon;
u32 quantum;
u32 initial_quantum;
u32 flow_refill_delay;
@@ -122,7 +125,7 @@ struct fq_sched_data {
u8 rate_enable;
u8 fq_trees_log;
u8 horizon_drop;
- u8 prio2band[(TC_PRIO_MAX + 1) >> 2];
+ u8 prio2band[FQ_PRIO2BAND_CRUMB_SIZE];
u32 timer_slack; /* hrtimer slack in ns */
/* Read/Write fields. */
@@ -159,7 +162,7 @@ struct fq_sched_data {
/* return the i-th 2-bit value ("crumb") */
static u8 fq_prio2band(const u8 *prio2band, unsigned int prio)
{
- return (prio2band[prio / 4] >> (2 * (prio & 0x3))) & 0x3;
+ return (READ_ONCE(prio2band[prio / 4]) >> (2 * (prio & 0x3))) & 0x3;
}
/*
@@ -297,7 +300,7 @@ static void fq_gc(struct fq_sched_data *q,
}
/* Fast path can be used if :
- * 1) Packet tstamp is in the past.
+ * 1) Packet tstamp is in the past, or within the pacing offload horizon.
* 2) FQ qlen == 0 OR
* (no flow is currently eligible for transmit,
* AND fast path queue has less than 8 packets)
@@ -312,7 +315,7 @@ static bool fq_fastpath_check(const struct Qdisc *sch, struct sk_buff *skb,
const struct fq_sched_data *q = qdisc_priv(sch);
const struct sock *sk;
- if (fq_skb_cb(skb)->time_to_send > now)
+ if (fq_skb_cb(skb)->time_to_send > now + q->offload_horizon)
return false;
if (sch->q.qlen != 0) {
@@ -329,6 +332,12 @@ static bool fq_fastpath_check(const struct Qdisc *sch, struct sk_buff *skb,
*/
if (q->internal.qlen >= 8)
return false;
+
+ /* Ordering invariants fall apart if some delayed flows
+ * are ready but we haven't serviced them, yet.
+ */
+ if (q->time_next_delayed_flow <= now + q->offload_horizon)
+ return false;
}
sk = skb->sk;
@@ -359,8 +368,9 @@ static struct fq_flow *fq_classify(struct Qdisc *sch, struct sk_buff *skb,
* 3) We do not want to rate limit them (eg SYNFLOOD attack),
* especially if the listener set SO_MAX_PACING_RATE
* 4) We pretend they are orphaned
+ * TCP can also associate TIME_WAIT sockets with RST or ACK packets.
*/
- if (!sk || sk_listener(sk)) {
+ if (!sk || sk_listener_or_tw(sk)) {
unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
/* By forcing low order bit to 1, we make sure to not
@@ -527,6 +537,8 @@ static bool fq_packet_beyond_horizon(const struct sk_buff *skb,
return unlikely((s64)skb->tstamp > (s64)(now + q->horizon));
}
+#define FQDR(reason) SKB_DROP_REASON_FQ_##reason
+
static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
@@ -538,7 +550,8 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
band = fq_prio2band(q->prio2band, skb->priority & TC_PRIO_MAX);
if (unlikely(q->band_pkt_count[band] >= sch->limit)) {
q->stat_band_drops[band]++;
- return qdisc_drop(skb, sch, to_free);
+ return qdisc_drop_reason(skb, sch, to_free,
+ FQDR(BAND_LIMIT));
}
now = ktime_get_ns();
@@ -548,8 +561,9 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
/* Check if packet timestamp is too far in the future. */
if (fq_packet_beyond_horizon(skb, q, now)) {
if (q->horizon_drop) {
- q->stat_horizon_drops++;
- return qdisc_drop(skb, sch, to_free);
+ q->stat_horizon_drops++;
+ return qdisc_drop_reason(skb, sch, to_free,
+ FQDR(HORIZON_LIMIT));
}
q->stat_horizon_caps++;
skb->tstamp = now + q->horizon;
@@ -562,7 +576,8 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (f != &q->internal) {
if (unlikely(f->qlen >= q->flow_plimit)) {
q->stat_flows_plimit++;
- return qdisc_drop(skb, sch, to_free);
+ return qdisc_drop_reason(skb, sch, to_free,
+ FQDR(FLOW_LIMIT));
}
if (fq_flow_is_detached(f)) {
@@ -587,21 +602,25 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return NET_XMIT_SUCCESS;
}
+#undef FQDR
static void fq_check_throttled(struct fq_sched_data *q, u64 now)
{
unsigned long sample;
struct rb_node *p;
- if (q->time_next_delayed_flow > now)
+ if (q->time_next_delayed_flow > now + q->offload_horizon)
return;
/* Update unthrottle latency EWMA.
* This is cheap and can help diagnosing timer/latency problems.
*/
sample = (unsigned long)(now - q->time_next_delayed_flow);
- q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3;
- q->unthrottle_latency_ns += sample >> 3;
+ if ((long)sample > 0) {
+ q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3;
+ q->unthrottle_latency_ns += sample >> 3;
+ }
+ now += q->offload_horizon;
q->time_next_delayed_flow = ~0ULL;
while ((p = rb_first(&q->delayed)) != NULL) {
@@ -661,7 +680,9 @@ begin:
pband = &q->band_flows[q->band_nr];
pband->credit = min(pband->credit + pband->quantum,
pband->quantum);
- goto begin;
+ if (pband->credit > 0)
+ goto begin;
+ retry = 0;
}
if (q->time_next_delayed_flow != ~0ULL)
qdisc_watchdog_schedule_range_ns(&q->watchdog,
@@ -683,7 +704,7 @@ begin:
u64 time_next_packet = max_t(u64, fq_skb_cb(skb)->time_to_send,
f->time_next_packet);
- if (now < time_next_packet) {
+ if (now + q->offload_horizon < time_next_packet) {
head->first = f->next;
f->time_next_packet = time_next_packet;
fq_flow_set_throttled(q, f);
@@ -888,7 +909,7 @@ static int fq_resize(struct Qdisc *sch, u32 log)
fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
q->fq_root = array;
- q->fq_trees_log = log;
+ WRITE_ONCE(q->fq_trees_log, log);
sch_tree_unlock(sch);
@@ -921,17 +942,22 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
[TCA_FQ_HORIZON_DROP] = { .type = NLA_U8 },
[TCA_FQ_PRIOMAP] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_prio_qopt)),
[TCA_FQ_WEIGHTS] = NLA_POLICY_EXACT_LEN(FQ_BANDS * sizeof(s32)),
+ [TCA_FQ_OFFLOAD_HORIZON] = { .type = NLA_U32 },
};
/* compress a u8 array with all elems <= 3 to an array of 2-bit fields */
static void fq_prio2band_compress_crumb(const u8 *in, u8 *out)
{
const int num_elems = TC_PRIO_MAX + 1;
+ u8 tmp[FQ_PRIO2BAND_CRUMB_SIZE];
int i;
- memset(out, 0, num_elems / 4);
+ memset(tmp, 0, sizeof(tmp));
for (i = 0; i < num_elems; i++)
- out[i / 4] |= in[i] << (2 * (i & 0x3));
+ tmp[i / 4] |= in[i] << (2 * (i & 0x3));
+
+ for (i = 0; i < FQ_PRIO2BAND_CRUMB_SIZE; i++)
+ WRITE_ONCE(out[i], tmp[i]);
}
static void fq_prio2band_decompress_crumb(const u8 *in, u8 *out)
@@ -958,7 +984,7 @@ static int fq_load_weights(struct fq_sched_data *q,
}
}
for (i = 0; i < FQ_BANDS; i++)
- q->band_flows[i].quantum = weights[i];
+ WRITE_ONCE(q->band_flows[i].quantum, weights[i]);
return 0;
}
@@ -1011,16 +1037,18 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
err = -EINVAL;
}
if (tb[TCA_FQ_PLIMIT])
- sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]);
+ WRITE_ONCE(sch->limit,
+ nla_get_u32(tb[TCA_FQ_PLIMIT]));
if (tb[TCA_FQ_FLOW_PLIMIT])
- q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
+ WRITE_ONCE(q->flow_plimit,
+ nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]));
if (tb[TCA_FQ_QUANTUM]) {
u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
if (quantum > 0 && quantum <= (1 << 20)) {
- q->quantum = quantum;
+ WRITE_ONCE(q->quantum, quantum);
} else {
NL_SET_ERR_MSG_MOD(extack, "invalid quantum");
err = -EINVAL;
@@ -1028,7 +1056,8 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
}
if (tb[TCA_FQ_INITIAL_QUANTUM])
- q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
+ WRITE_ONCE(q->initial_quantum,
+ nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]));
if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
@@ -1037,17 +1066,19 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
if (tb[TCA_FQ_FLOW_MAX_RATE]) {
u32 rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
- q->flow_max_rate = (rate == ~0U) ? ~0UL : rate;
+ WRITE_ONCE(q->flow_max_rate,
+ (rate == ~0U) ? ~0UL : rate);
}
if (tb[TCA_FQ_LOW_RATE_THRESHOLD])
- q->low_rate_threshold =
- nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]);
+ WRITE_ONCE(q->low_rate_threshold,
+ nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]));
if (tb[TCA_FQ_RATE_ENABLE]) {
u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);
if (enable <= 1)
- q->rate_enable = enable;
+ WRITE_ONCE(q->rate_enable,
+ enable);
else
err = -EINVAL;
}
@@ -1055,7 +1086,8 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;
- q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
+ WRITE_ONCE(q->flow_refill_delay,
+ usecs_to_jiffies(usecs_delay));
}
if (!err && tb[TCA_FQ_PRIOMAP])
@@ -1065,22 +1097,38 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
err = fq_load_weights(q, tb[TCA_FQ_WEIGHTS], extack);
if (tb[TCA_FQ_ORPHAN_MASK])
- q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]);
+ WRITE_ONCE(q->orphan_mask,
+ nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]));
if (tb[TCA_FQ_CE_THRESHOLD])
- q->ce_threshold = (u64)NSEC_PER_USEC *
- nla_get_u32(tb[TCA_FQ_CE_THRESHOLD]);
+ WRITE_ONCE(q->ce_threshold,
+ (u64)NSEC_PER_USEC *
+ nla_get_u32(tb[TCA_FQ_CE_THRESHOLD]));
if (tb[TCA_FQ_TIMER_SLACK])
- q->timer_slack = nla_get_u32(tb[TCA_FQ_TIMER_SLACK]);
+ WRITE_ONCE(q->timer_slack,
+ nla_get_u32(tb[TCA_FQ_TIMER_SLACK]));
if (tb[TCA_FQ_HORIZON])
- q->horizon = (u64)NSEC_PER_USEC *
- nla_get_u32(tb[TCA_FQ_HORIZON]);
+ WRITE_ONCE(q->horizon,
+ (u64)NSEC_PER_USEC *
+ nla_get_u32(tb[TCA_FQ_HORIZON]));
if (tb[TCA_FQ_HORIZON_DROP])
- q->horizon_drop = nla_get_u8(tb[TCA_FQ_HORIZON_DROP]);
+ WRITE_ONCE(q->horizon_drop,
+ nla_get_u8(tb[TCA_FQ_HORIZON_DROP]));
+
+ if (tb[TCA_FQ_OFFLOAD_HORIZON]) {
+ u64 offload_horizon = (u64)NSEC_PER_USEC *
+ nla_get_u32(tb[TCA_FQ_OFFLOAD_HORIZON]);
+ if (offload_horizon <= qdisc_dev(sch)->max_pacing_offload_horizon) {
+ WRITE_ONCE(q->offload_horizon, offload_horizon);
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "invalid offload_horizon");
+ err = -EINVAL;
+ }
+ }
if (!err) {
sch_tree_unlock(sch);
@@ -1160,13 +1208,14 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt,
static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct fq_sched_data *q = qdisc_priv(sch);
- u64 ce_threshold = q->ce_threshold;
struct tc_prio_qopt prio = {
.bands = FQ_BANDS,
};
- u64 horizon = q->horizon;
struct nlattr *opts;
+ u64 offload_horizon;
+ u64 ce_threshold;
s32 weights[3];
+ u64 horizon;
opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
if (opts == NULL)
@@ -1174,35 +1223,52 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
/* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
+ ce_threshold = READ_ONCE(q->ce_threshold);
do_div(ce_threshold, NSEC_PER_USEC);
+
+ horizon = READ_ONCE(q->horizon);
do_div(horizon, NSEC_PER_USEC);
- if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
- nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
- nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
- nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
- nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
+ offload_horizon = READ_ONCE(q->offload_horizon);
+ do_div(offload_horizon, NSEC_PER_USEC);
+
+ if (nla_put_u32(skb, TCA_FQ_PLIMIT,
+ READ_ONCE(sch->limit)) ||
+ nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT,
+ READ_ONCE(q->flow_plimit)) ||
+ nla_put_u32(skb, TCA_FQ_QUANTUM,
+ READ_ONCE(q->quantum)) ||
+ nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM,
+ READ_ONCE(q->initial_quantum)) ||
+ nla_put_u32(skb, TCA_FQ_RATE_ENABLE,
+ READ_ONCE(q->rate_enable)) ||
nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE,
- min_t(unsigned long, q->flow_max_rate, ~0U)) ||
+ min_t(unsigned long,
+ READ_ONCE(q->flow_max_rate), ~0U)) ||
nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
- jiffies_to_usecs(q->flow_refill_delay)) ||
- nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) ||
+ jiffies_to_usecs(READ_ONCE(q->flow_refill_delay))) ||
+ nla_put_u32(skb, TCA_FQ_ORPHAN_MASK,
+ READ_ONCE(q->orphan_mask)) ||
nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD,
- q->low_rate_threshold) ||
+ READ_ONCE(q->low_rate_threshold)) ||
nla_put_u32(skb, TCA_FQ_CE_THRESHOLD, (u32)ce_threshold) ||
- nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log) ||
- nla_put_u32(skb, TCA_FQ_TIMER_SLACK, q->timer_slack) ||
+ nla_put_u32(skb, TCA_FQ_BUCKETS_LOG,
+ READ_ONCE(q->fq_trees_log)) ||
+ nla_put_u32(skb, TCA_FQ_TIMER_SLACK,
+ READ_ONCE(q->timer_slack)) ||
nla_put_u32(skb, TCA_FQ_HORIZON, (u32)horizon) ||
- nla_put_u8(skb, TCA_FQ_HORIZON_DROP, q->horizon_drop))
+ nla_put_u32(skb, TCA_FQ_OFFLOAD_HORIZON, (u32)offload_horizon) ||
+ nla_put_u8(skb, TCA_FQ_HORIZON_DROP,
+ READ_ONCE(q->horizon_drop)))
goto nla_put_failure;
fq_prio2band_decompress_crumb(q->prio2band, prio.priomap);
if (nla_put(skb, TCA_FQ_PRIOMAP, sizeof(prio), &prio))
goto nla_put_failure;
- weights[0] = q->band_flows[0].quantum;
- weights[1] = q->band_flows[1].quantum;
- weights[2] = q->band_flows[2].quantum;
+ weights[0] = READ_ONCE(q->band_flows[0].quantum);
+ weights[1] = READ_ONCE(q->band_flows[1].quantum);
+ weights[2] = READ_ONCE(q->band_flows[2].quantum);
if (nla_put(skb, TCA_FQ_WEIGHTS, sizeof(weights), &weights))
goto nla_put_failure;
@@ -1264,6 +1330,7 @@ static struct Qdisc_ops fq_qdisc_ops __read_mostly = {
.dump_stats = fq_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("fq");
static int __init fq_module_init(void)
{
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 8c4fee063436..799f5397ad4c 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -168,6 +168,7 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
skb = dequeue_head(flow);
len += qdisc_pkt_len(skb);
mem += get_codel_cb(skb)->mem_usage;
+ tcf_set_drop_reason(skb, SKB_DROP_REASON_QDISC_OVERLIMIT);
__qdisc_drop(skb, to_free);
} while (++i < max_packets && len < threshold);
@@ -274,7 +275,7 @@ static void drop_func(struct sk_buff *skb, void *ctx)
{
struct Qdisc *sch = ctx;
- kfree_skb(skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_QDISC_CONGESTED);
qdisc_qstats_drop(sch);
}
@@ -396,40 +397,49 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
if (tb[TCA_FQ_CODEL_TARGET]) {
u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
- q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
+ WRITE_ONCE(q->cparams.target,
+ (target * NSEC_PER_USEC) >> CODEL_SHIFT);
}
if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
- q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
+ WRITE_ONCE(q->cparams.ce_threshold,
+ (val * NSEC_PER_USEC) >> CODEL_SHIFT);
}
if (tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR])
- q->cparams.ce_threshold_selector = nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR]);
+ WRITE_ONCE(q->cparams.ce_threshold_selector,
+ nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR]));
if (tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK])
- q->cparams.ce_threshold_mask = nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK]);
+ WRITE_ONCE(q->cparams.ce_threshold_mask,
+ nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK]));
if (tb[TCA_FQ_CODEL_INTERVAL]) {
u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
- q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
+ WRITE_ONCE(q->cparams.interval,
+ (interval * NSEC_PER_USEC) >> CODEL_SHIFT);
}
if (tb[TCA_FQ_CODEL_LIMIT])
- sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
+ WRITE_ONCE(sch->limit,
+ nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]));
if (tb[TCA_FQ_CODEL_ECN])
- q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
+ WRITE_ONCE(q->cparams.ecn,
+ !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]));
if (quantum)
- q->quantum = quantum;
+ WRITE_ONCE(q->quantum, quantum);
if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
- q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
+ WRITE_ONCE(q->drop_batch_size,
+ max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])));
if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
- q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
+ WRITE_ONCE(q->memory_limit,
+ min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT])));
while (sch->q.qlen > sch->limit ||
q->memory_usage > q->memory_limit) {
@@ -522,6 +532,7 @@ init_failure:
static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct fq_codel_sched_data *q = qdisc_priv(sch);
+ codel_time_t ce_threshold;
struct nlattr *opts;
opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
@@ -529,30 +540,33 @@ static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
goto nla_put_failure;
if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
- codel_time_to_us(q->cparams.target)) ||
+ codel_time_to_us(READ_ONCE(q->cparams.target))) ||
nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
- sch->limit) ||
+ READ_ONCE(sch->limit)) ||
nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
- codel_time_to_us(q->cparams.interval)) ||
+ codel_time_to_us(READ_ONCE(q->cparams.interval))) ||
nla_put_u32(skb, TCA_FQ_CODEL_ECN,
- q->cparams.ecn) ||
+ READ_ONCE(q->cparams.ecn)) ||
nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
- q->quantum) ||
+ READ_ONCE(q->quantum)) ||
nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE,
- q->drop_batch_size) ||
+ READ_ONCE(q->drop_batch_size)) ||
nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT,
- q->memory_limit) ||
+ READ_ONCE(q->memory_limit)) ||
nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
- q->flows_cnt))
+ READ_ONCE(q->flows_cnt)))
goto nla_put_failure;
- if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD) {
+ ce_threshold = READ_ONCE(q->cparams.ce_threshold);
+ if (ce_threshold != CODEL_DISABLED_THRESHOLD) {
if (nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
- codel_time_to_us(q->cparams.ce_threshold)))
+ codel_time_to_us(ce_threshold)))
goto nla_put_failure;
- if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR, q->cparams.ce_threshold_selector))
+ if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR,
+ READ_ONCE(q->cparams.ce_threshold_selector)))
goto nla_put_failure;
- if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_MASK, q->cparams.ce_threshold_mask))
+ if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_MASK,
+ READ_ONCE(q->cparams.ce_threshold_mask)))
goto nla_put_failure;
}
@@ -717,6 +731,7 @@ static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
.dump_stats = fq_codel_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("fq_codel");
static int __init fq_codel_module_init(void)
{
diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c
index 5b595773e59b..93c36afbf576 100644
--- a/net/sched/sch_fq_pie.c
+++ b/net/sched/sch_fq_pie.c
@@ -10,6 +10,7 @@
*/
#include <linux/jhash.h>
+#include <linux/module.h>
#include <linux/sizes.h>
#include <linux/vmalloc.h>
#include <net/pkt_cls.h>
@@ -129,6 +130,7 @@ static inline void flow_queue_add(struct fq_pie_flow *flow,
static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
+ enum skb_drop_reason reason = SKB_DROP_REASON_QDISC_OVERLIMIT;
struct fq_pie_sched_data *q = qdisc_priv(sch);
struct fq_pie_flow *sel_flow;
int ret;
@@ -160,6 +162,8 @@ static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
q->overmemory++;
}
+ reason = SKB_DROP_REASON_QDISC_CONGESTED;
+
if (!pie_drop_early(sch, &q->p_params, &sel_flow->vars,
sel_flow->backlog, skb->len)) {
enqueue = true;
@@ -197,8 +201,7 @@ static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
out:
q->stats.dropped++;
sel_flow->vars.accu_prob = 0;
- __qdisc_drop(skb, to_free);
- qdisc_qstats_drop(sch);
+ qdisc_drop_reason(skb, sch, to_free, reason);
return NET_XMIT_CN;
}
@@ -298,8 +301,8 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
if (tb[TCA_FQ_PIE_LIMIT]) {
u32 limit = nla_get_u32(tb[TCA_FQ_PIE_LIMIT]);
- q->p_params.limit = limit;
- sch->limit = limit;
+ WRITE_ONCE(q->p_params.limit, limit);
+ WRITE_ONCE(sch->limit, limit);
}
if (tb[TCA_FQ_PIE_FLOWS]) {
if (q->flows) {
@@ -321,39 +324,45 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
u32 target = nla_get_u32(tb[TCA_FQ_PIE_TARGET]);
/* convert to pschedtime */
- q->p_params.target =
- PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC);
+ WRITE_ONCE(q->p_params.target,
+ PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC));
}
/* tupdate is in jiffies */
if (tb[TCA_FQ_PIE_TUPDATE])
- q->p_params.tupdate =
- usecs_to_jiffies(nla_get_u32(tb[TCA_FQ_PIE_TUPDATE]));
+ WRITE_ONCE(q->p_params.tupdate,
+ usecs_to_jiffies(nla_get_u32(tb[TCA_FQ_PIE_TUPDATE])));
if (tb[TCA_FQ_PIE_ALPHA])
- q->p_params.alpha = nla_get_u32(tb[TCA_FQ_PIE_ALPHA]);
+ WRITE_ONCE(q->p_params.alpha,
+ nla_get_u32(tb[TCA_FQ_PIE_ALPHA]));
if (tb[TCA_FQ_PIE_BETA])
- q->p_params.beta = nla_get_u32(tb[TCA_FQ_PIE_BETA]);
+ WRITE_ONCE(q->p_params.beta,
+ nla_get_u32(tb[TCA_FQ_PIE_BETA]));
if (tb[TCA_FQ_PIE_QUANTUM])
- q->quantum = nla_get_u32(tb[TCA_FQ_PIE_QUANTUM]);
+ WRITE_ONCE(q->quantum, nla_get_u32(tb[TCA_FQ_PIE_QUANTUM]));
if (tb[TCA_FQ_PIE_MEMORY_LIMIT])
- q->memory_limit = nla_get_u32(tb[TCA_FQ_PIE_MEMORY_LIMIT]);
+ WRITE_ONCE(q->memory_limit,
+ nla_get_u32(tb[TCA_FQ_PIE_MEMORY_LIMIT]));
if (tb[TCA_FQ_PIE_ECN_PROB])
- q->ecn_prob = nla_get_u32(tb[TCA_FQ_PIE_ECN_PROB]);
+ WRITE_ONCE(q->ecn_prob,
+ nla_get_u32(tb[TCA_FQ_PIE_ECN_PROB]));
if (tb[TCA_FQ_PIE_ECN])
- q->p_params.ecn = nla_get_u32(tb[TCA_FQ_PIE_ECN]);
+ WRITE_ONCE(q->p_params.ecn,
+ nla_get_u32(tb[TCA_FQ_PIE_ECN]));
if (tb[TCA_FQ_PIE_BYTEMODE])
- q->p_params.bytemode = nla_get_u32(tb[TCA_FQ_PIE_BYTEMODE]);
+ WRITE_ONCE(q->p_params.bytemode,
+ nla_get_u32(tb[TCA_FQ_PIE_BYTEMODE]));
if (tb[TCA_FQ_PIE_DQ_RATE_ESTIMATOR])
- q->p_params.dq_rate_estimator =
- nla_get_u32(tb[TCA_FQ_PIE_DQ_RATE_ESTIMATOR]);
+ WRITE_ONCE(q->p_params.dq_rate_estimator,
+ nla_get_u32(tb[TCA_FQ_PIE_DQ_RATE_ESTIMATOR]));
/* Drop excess packets if new limit is lower */
while (sch->q.qlen > sch->limit) {
@@ -470,22 +479,23 @@ static int fq_pie_dump(struct Qdisc *sch, struct sk_buff *skb)
return -EMSGSIZE;
/* convert target from pschedtime to us */
- if (nla_put_u32(skb, TCA_FQ_PIE_LIMIT, sch->limit) ||
- nla_put_u32(skb, TCA_FQ_PIE_FLOWS, q->flows_cnt) ||
+ if (nla_put_u32(skb, TCA_FQ_PIE_LIMIT, READ_ONCE(sch->limit)) ||
+ nla_put_u32(skb, TCA_FQ_PIE_FLOWS, READ_ONCE(q->flows_cnt)) ||
nla_put_u32(skb, TCA_FQ_PIE_TARGET,
- ((u32)PSCHED_TICKS2NS(q->p_params.target)) /
+ ((u32)PSCHED_TICKS2NS(READ_ONCE(q->p_params.target))) /
NSEC_PER_USEC) ||
nla_put_u32(skb, TCA_FQ_PIE_TUPDATE,
- jiffies_to_usecs(q->p_params.tupdate)) ||
- nla_put_u32(skb, TCA_FQ_PIE_ALPHA, q->p_params.alpha) ||
- nla_put_u32(skb, TCA_FQ_PIE_BETA, q->p_params.beta) ||
- nla_put_u32(skb, TCA_FQ_PIE_QUANTUM, q->quantum) ||
- nla_put_u32(skb, TCA_FQ_PIE_MEMORY_LIMIT, q->memory_limit) ||
- nla_put_u32(skb, TCA_FQ_PIE_ECN_PROB, q->ecn_prob) ||
- nla_put_u32(skb, TCA_FQ_PIE_ECN, q->p_params.ecn) ||
- nla_put_u32(skb, TCA_FQ_PIE_BYTEMODE, q->p_params.bytemode) ||
+ jiffies_to_usecs(READ_ONCE(q->p_params.tupdate))) ||
+ nla_put_u32(skb, TCA_FQ_PIE_ALPHA, READ_ONCE(q->p_params.alpha)) ||
+ nla_put_u32(skb, TCA_FQ_PIE_BETA, READ_ONCE(q->p_params.beta)) ||
+ nla_put_u32(skb, TCA_FQ_PIE_QUANTUM, READ_ONCE(q->quantum)) ||
+ nla_put_u32(skb, TCA_FQ_PIE_MEMORY_LIMIT,
+ READ_ONCE(q->memory_limit)) ||
+ nla_put_u32(skb, TCA_FQ_PIE_ECN_PROB, READ_ONCE(q->ecn_prob)) ||
+ nla_put_u32(skb, TCA_FQ_PIE_ECN, READ_ONCE(q->p_params.ecn)) ||
+ nla_put_u32(skb, TCA_FQ_PIE_BYTEMODE, READ_ONCE(q->p_params.bytemode)) ||
nla_put_u32(skb, TCA_FQ_PIE_DQ_RATE_ESTIMATOR,
- q->p_params.dq_rate_estimator))
+ READ_ONCE(q->p_params.dq_rate_estimator)))
goto nla_put_failure;
return nla_nest_end(skb, opts);
@@ -563,6 +573,7 @@ static struct Qdisc_ops fq_pie_qdisc_ops __read_mostly = {
.dump_stats = fq_pie_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("fq_pie");
static int __init fq_pie_module_init(void)
{
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 9b3e9262040b..14ab2f4c190a 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -27,6 +27,7 @@
#include <net/sch_generic.h>
#include <net/pkt_sched.h>
#include <net/dst.h>
+#include <net/hotdata.h>
#include <trace/events/qdisc.h>
#include <trace/events/net.h>
#include <net/xfrm.h>
@@ -409,7 +410,7 @@ static inline bool qdisc_restart(struct Qdisc *q, int *packets)
void __qdisc_run(struct Qdisc *q)
{
- int quota = READ_ONCE(dev_tx_weight);
+ int quota = READ_ONCE(net_hotdata.dev_tx_weight);
int packets;
while (qdisc_restart(q, &packets)) {
@@ -505,19 +506,28 @@ static void dev_watchdog(struct timer_list *t)
unsigned int timedout_ms = 0;
unsigned int i;
unsigned long trans_start;
+ unsigned long oldest_start = jiffies;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq;
txq = netdev_get_tx_queue(dev, i);
+ if (!netif_xmit_stopped(txq))
+ continue;
+
+ /* Paired with WRITE_ONCE() + smp_mb...() in
+ * netdev_tx_sent_queue() and netif_tx_stop_queue().
+ */
+ smp_mb();
trans_start = READ_ONCE(txq->trans_start);
- if (netif_xmit_stopped(txq) &&
- time_after(jiffies, (trans_start +
- dev->watchdog_timeo))) {
+
+ if (time_after(jiffies, trans_start + dev->watchdog_timeo)) {
timedout_ms = jiffies_to_msecs(jiffies - trans_start);
atomic_long_inc(&txq->trans_timeout);
break;
}
+ if (time_after(oldest_start, trans_start))
+ oldest_start = trans_start;
}
if (unlikely(timedout_ms)) {
@@ -530,7 +540,7 @@ static void dev_watchdog(struct timer_list *t)
netif_unfreeze_queues(dev);
}
if (!mod_timer(&dev->watchdog_timer,
- round_jiffies(jiffies +
+ round_jiffies(oldest_start +
dev->watchdog_timeo)))
release = false;
}
@@ -541,25 +551,20 @@ static void dev_watchdog(struct timer_list *t)
netdev_put(dev, &dev->watchdog_dev_tracker);
}
-void __netdev_watchdog_up(struct net_device *dev)
-{
- if (dev->netdev_ops->ndo_tx_timeout) {
- if (dev->watchdog_timeo <= 0)
- dev->watchdog_timeo = 5*HZ;
- if (!mod_timer(&dev->watchdog_timer,
- round_jiffies(jiffies + dev->watchdog_timeo)))
- netdev_hold(dev, &dev->watchdog_dev_tracker,
- GFP_ATOMIC);
- }
-}
-EXPORT_SYMBOL_GPL(__netdev_watchdog_up);
-
-static void dev_watchdog_up(struct net_device *dev)
+void netdev_watchdog_up(struct net_device *dev)
{
- __netdev_watchdog_up(dev);
+ if (!dev->netdev_ops->ndo_tx_timeout)
+ return;
+ if (dev->watchdog_timeo <= 0)
+ dev->watchdog_timeo = 5*HZ;
+ if (!mod_timer(&dev->watchdog_timer,
+ round_jiffies(jiffies + dev->watchdog_timeo)))
+ netdev_hold(dev, &dev->watchdog_dev_tracker,
+ GFP_ATOMIC);
}
+EXPORT_SYMBOL_GPL(netdev_watchdog_up);
-static void dev_watchdog_down(struct net_device *dev)
+static void netdev_watchdog_down(struct net_device *dev)
{
netif_tx_lock_bh(dev);
if (del_timer(&dev->watchdog_timer))
@@ -581,7 +586,7 @@ void netif_carrier_on(struct net_device *dev)
atomic_inc(&dev->carrier_up_count);
linkwatch_fire_event(dev);
if (netif_running(dev))
- __netdev_watchdog_up(dev);
+ netdev_watchdog_up(dev);
}
}
EXPORT_SYMBOL(netif_carrier_on);
@@ -629,6 +634,7 @@ EXPORT_SYMBOL_GPL(netif_carrier_event);
static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
struct sk_buff **to_free)
{
+ dev_core_stats_tx_dropped_inc(skb->dev);
__qdisc_drop(skb, to_free);
return NET_XMIT_CN;
}
@@ -672,6 +678,7 @@ struct Qdisc noop_qdisc = {
.qlen = 0,
.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock),
},
+ .owner = -1,
};
EXPORT_SYMBOL(noop_qdisc);
@@ -899,8 +906,8 @@ static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch,
bands[prio] = q;
}
- return skb_array_resize_multiple(bands, PFIFO_FAST_BANDS, new_len,
- GFP_KERNEL);
+ return skb_array_resize_multiple_bh(bands, PFIFO_FAST_BANDS, new_len,
+ GFP_KERNEL);
}
struct Qdisc_ops pfifo_fast_ops __read_mostly = {
@@ -944,7 +951,9 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
__skb_queue_head_init(&sch->gso_skb);
__skb_queue_head_init(&sch->skb_bad_txq);
gnet_stats_basic_sync_init(&sch->bstats);
+ lockdep_register_key(&sch->root_lock_key);
spin_lock_init(&sch->q.lock);
+ lockdep_set_class(&sch->q.lock, &sch->root_lock_key);
if (ops->static_flags & TCQ_F_CPUSTATS) {
sch->cpu_bstats =
@@ -973,11 +982,13 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue;
sch->dev_queue = dev_queue;
+ sch->owner = -1;
netdev_hold(dev, &sch->dev_tracker, GFP_KERNEL);
refcount_set(&sch->refcnt, 1);
return sch;
errout1:
+ lockdep_unregister_key(&sch->root_lock_key);
kfree(sch);
errout:
return ERR_PTR(err);
@@ -1066,6 +1077,7 @@ static void __qdisc_destroy(struct Qdisc *qdisc)
if (ops->destroy)
ops->destroy(qdisc);
+ lockdep_unregister_key(&qdisc->root_lock_key);
module_put(ops->owner);
netdev_put(dev, &qdisc->dev_tracker);
@@ -1250,7 +1262,7 @@ void dev_activate(struct net_device *dev)
if (need_watchdog) {
netif_trans_update(dev);
- dev_watchdog_up(dev);
+ netdev_watchdog_up(dev);
}
}
EXPORT_SYMBOL(dev_activate);
@@ -1265,15 +1277,17 @@ static void qdisc_deactivate(struct Qdisc *qdisc)
static void dev_deactivate_queue(struct net_device *dev,
struct netdev_queue *dev_queue,
- void *_qdisc_default)
+ void *_sync_needed)
{
- struct Qdisc *qdisc_default = _qdisc_default;
+ bool *sync_needed = _sync_needed;
struct Qdisc *qdisc;
qdisc = rtnl_dereference(dev_queue->qdisc);
if (qdisc) {
+ if (qdisc->enqueue)
+ *sync_needed = true;
qdisc_deactivate(qdisc);
- rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
+ rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
}
}
@@ -1340,24 +1354,22 @@ static bool some_qdisc_is_busy(struct net_device *dev)
*/
void dev_deactivate_many(struct list_head *head)
{
+ bool sync_needed = false;
struct net_device *dev;
list_for_each_entry(dev, head, close_list) {
netdev_for_each_tx_queue(dev, dev_deactivate_queue,
- &noop_qdisc);
+ &sync_needed);
if (dev_ingress_queue(dev))
dev_deactivate_queue(dev, dev_ingress_queue(dev),
- &noop_qdisc);
+ &sync_needed);
- dev_watchdog_down(dev);
+ netdev_watchdog_down(dev);
}
- /* Wait for outstanding qdisc-less dev_queue_xmit calls or
- * outstanding qdisc enqueuing calls.
- * This is avoided if all devices are in dismantle phase :
- * Caller will call synchronize_net() for us
- */
- synchronize_net();
+ /* Wait for outstanding qdisc enqueuing calls. */
+ if (sync_needed)
+ synchronize_net();
list_for_each_entry(dev, head, close_list) {
netdev_for_each_tx_queue(dev, dev_reset_queue, NULL);
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 8c61eb3dc943..532fde548b88 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -251,10 +251,10 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
q->stats.pdrop++;
drop:
- return qdisc_drop(skb, sch, to_free);
+ return qdisc_drop_reason(skb, sch, to_free, SKB_DROP_REASON_QDISC_OVERLIMIT);
congestion_drop:
- qdisc_drop(skb, sch, to_free);
+ qdisc_drop_reason(skb, sch, to_free, SKB_DROP_REASON_QDISC_CONGESTED);
return NET_XMIT_CN;
}
@@ -668,7 +668,7 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt,
return -EINVAL;
}
- max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
+ max_P = nla_get_u32_default(tb[TCA_GRED_MAX_P], 0);
ctl = nla_data(tb[TCA_GRED_PARMS]);
stab = nla_data(tb[TCA_GRED_STAB]);
@@ -913,7 +913,8 @@ static void gred_destroy(struct Qdisc *sch)
for (i = 0; i < table->DPs; i++)
gred_destroy_vq(table->tab[i]);
- gred_offload(sch, TC_GRED_DESTROY);
+ if (table->opt)
+ gred_offload(sch, TC_GRED_DESTROY);
kfree(table->opt);
}
@@ -930,6 +931,7 @@ static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
.dump = gred_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("gred");
static int __init gred_module_init(void)
{
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 16c45da4036a..c287bf8423b4 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1174,7 +1174,8 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
}
/* classification failed, try default class */
- cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
+ cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle),
+ READ_ONCE(q->defcls)), sch);
if (cl == NULL || cl->level > 0)
return NULL;
@@ -1443,9 +1444,7 @@ hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt,
return -EINVAL;
qopt = nla_data(opt);
- sch_tree_lock(sch);
- q->defcls = qopt->defcls;
- sch_tree_unlock(sch);
+ WRITE_ONCE(q->defcls, qopt->defcls);
return 0;
}
@@ -1525,7 +1524,7 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
unsigned char *b = skb_tail_pointer(skb);
struct tc_hfsc_qopt qopt;
- qopt.defcls = q->defcls;
+ qopt.defcls = READ_ONCE(q->defcls);
if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
goto nla_put_failure;
return skb->len;
@@ -1679,6 +1678,7 @@ static struct Qdisc_ops hfsc_qdisc_ops __read_mostly = {
.priv_size = sizeof(struct hfsc_sched),
.owner = THIS_MODULE
};
+MODULE_ALIAS_NET_SCH("hfsc");
static int __init
hfsc_init(void)
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index d26cd436cbe3..44d9efe1a96a 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -534,27 +534,31 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
sch_tree_lock(sch);
if (tb[TCA_HHF_BACKLOG_LIMIT])
- sch->limit = nla_get_u32(tb[TCA_HHF_BACKLOG_LIMIT]);
+ WRITE_ONCE(sch->limit, nla_get_u32(tb[TCA_HHF_BACKLOG_LIMIT]));
- q->quantum = new_quantum;
- q->hhf_non_hh_weight = new_hhf_non_hh_weight;
+ WRITE_ONCE(q->quantum, new_quantum);
+ WRITE_ONCE(q->hhf_non_hh_weight, new_hhf_non_hh_weight);
if (tb[TCA_HHF_HH_FLOWS_LIMIT])
- q->hh_flows_limit = nla_get_u32(tb[TCA_HHF_HH_FLOWS_LIMIT]);
+ WRITE_ONCE(q->hh_flows_limit,
+ nla_get_u32(tb[TCA_HHF_HH_FLOWS_LIMIT]));
if (tb[TCA_HHF_RESET_TIMEOUT]) {
u32 us = nla_get_u32(tb[TCA_HHF_RESET_TIMEOUT]);
- q->hhf_reset_timeout = usecs_to_jiffies(us);
+ WRITE_ONCE(q->hhf_reset_timeout,
+ usecs_to_jiffies(us));
}
if (tb[TCA_HHF_ADMIT_BYTES])
- q->hhf_admit_bytes = nla_get_u32(tb[TCA_HHF_ADMIT_BYTES]);
+ WRITE_ONCE(q->hhf_admit_bytes,
+ nla_get_u32(tb[TCA_HHF_ADMIT_BYTES]));
if (tb[TCA_HHF_EVICT_TIMEOUT]) {
u32 us = nla_get_u32(tb[TCA_HHF_EVICT_TIMEOUT]);
- q->hhf_evict_timeout = usecs_to_jiffies(us);
+ WRITE_ONCE(q->hhf_evict_timeout,
+ usecs_to_jiffies(us));
}
qlen = sch->q.qlen;
@@ -657,15 +661,18 @@ static int hhf_dump(struct Qdisc *sch, struct sk_buff *skb)
if (opts == NULL)
goto nla_put_failure;
- if (nla_put_u32(skb, TCA_HHF_BACKLOG_LIMIT, sch->limit) ||
- nla_put_u32(skb, TCA_HHF_QUANTUM, q->quantum) ||
- nla_put_u32(skb, TCA_HHF_HH_FLOWS_LIMIT, q->hh_flows_limit) ||
+ if (nla_put_u32(skb, TCA_HHF_BACKLOG_LIMIT, READ_ONCE(sch->limit)) ||
+ nla_put_u32(skb, TCA_HHF_QUANTUM, READ_ONCE(q->quantum)) ||
+ nla_put_u32(skb, TCA_HHF_HH_FLOWS_LIMIT,
+ READ_ONCE(q->hh_flows_limit)) ||
nla_put_u32(skb, TCA_HHF_RESET_TIMEOUT,
- jiffies_to_usecs(q->hhf_reset_timeout)) ||
- nla_put_u32(skb, TCA_HHF_ADMIT_BYTES, q->hhf_admit_bytes) ||
+ jiffies_to_usecs(READ_ONCE(q->hhf_reset_timeout))) ||
+ nla_put_u32(skb, TCA_HHF_ADMIT_BYTES,
+ READ_ONCE(q->hhf_admit_bytes)) ||
nla_put_u32(skb, TCA_HHF_EVICT_TIMEOUT,
- jiffies_to_usecs(q->hhf_evict_timeout)) ||
- nla_put_u32(skb, TCA_HHF_NON_HH_WEIGHT, q->hhf_non_hh_weight))
+ jiffies_to_usecs(READ_ONCE(q->hhf_evict_timeout))) ||
+ nla_put_u32(skb, TCA_HHF_NON_HH_WEIGHT,
+ READ_ONCE(q->hhf_non_hh_weight)))
goto nla_put_failure;
return nla_nest_end(skb, opts);
@@ -702,6 +709,7 @@ static struct Qdisc_ops hhf_qdisc_ops __read_mostly = {
.dump_stats = hhf_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("hhf");
static int __init hhf_module_init(void)
{
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 7349233eaa9b..c31bc5489bdd 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1039,13 +1039,6 @@ static void htb_work_func(struct work_struct *work)
rcu_read_unlock();
}
-static void htb_set_lockdep_class_child(struct Qdisc *q)
-{
- static struct lock_class_key child_key;
-
- lockdep_set_class(qdisc_lock(q), &child_key);
-}
-
static int htb_offload(struct net_device *dev, struct tc_htb_qopt_offload *opt)
{
return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_HTB, opt);
@@ -1132,7 +1125,6 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt,
return -ENOMEM;
}
- htb_set_lockdep_class_child(qdisc);
q->direct_qdiscs[ntx] = qdisc;
qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
}
@@ -1468,7 +1460,6 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
}
if (q->offload) {
- htb_set_lockdep_class_child(new);
/* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
qdisc_refcount_inc(new);
old_q = htb_graft_helper(dev_queue, new);
@@ -1733,11 +1724,8 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg,
new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
cl->parent->common.classid,
NULL);
- if (q->offload) {
- if (new_q)
- htb_set_lockdep_class_child(new_q);
+ if (q->offload)
htb_parent_to_leaf_offload(sch, dev_queue, new_q);
- }
}
sch_tree_lock(sch);
@@ -1822,8 +1810,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB],
NULL));
- rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
- ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
+ rate64 = nla_get_u64_default(tb[TCA_HTB_RATE64], 0);
+ ceil64 = nla_get_u64_default(tb[TCA_HTB_CEIL64], 0);
if (!cl) { /* new class */
struct net_device *dev = qdisc_dev(sch);
@@ -1947,13 +1935,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
classid, NULL);
if (q->offload) {
- if (new_q) {
- htb_set_lockdep_class_child(new_q);
- /* One ref for cl->leaf.q, the other for
- * dev_queue->qdisc.
- */
+ /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
+ if (new_q)
qdisc_refcount_inc(new_q);
- }
old_q = htb_graft_helper(dev_queue, new_q);
/* No qdisc_put needed. */
WARN_ON(!(old_q->flags & TCQ_F_BUILTIN));
@@ -2166,6 +2150,7 @@ static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
.dump = htb_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("htb");
static int __init htb_module_init(void)
{
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index 5fa9eaa79bfc..cc6051d4f2ef 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -91,7 +91,7 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
entry = tcx_entry_fetch_or_create(dev, true, &created);
if (!entry)
return -ENOMEM;
- tcx_miniq_set_active(entry, true);
+ tcx_miniq_inc(entry);
mini_qdisc_pair_init(&q->miniqp, sch, &tcx_entry(entry)->miniq);
if (created)
tcx_entry_update(dev, entry, true);
@@ -121,7 +121,7 @@ static void ingress_destroy(struct Qdisc *sch)
tcf_block_put_ext(q->block, sch, &q->block_info);
if (entry) {
- tcx_miniq_set_active(entry, false);
+ tcx_miniq_dec(entry);
if (!tcx_entry_is_active(entry)) {
tcx_entry_update(dev, NULL, true);
tcx_entry_free(entry);
@@ -168,6 +168,7 @@ static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
.ingress_block_get = ingress_ingress_block_get,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("ingress");
struct clsact_sched_data {
struct tcf_block *ingress_block;
@@ -256,7 +257,7 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
entry = tcx_entry_fetch_or_create(dev, true, &created);
if (!entry)
return -ENOMEM;
- tcx_miniq_set_active(entry, true);
+ tcx_miniq_inc(entry);
mini_qdisc_pair_init(&q->miniqp_ingress, sch, &tcx_entry(entry)->miniq);
if (created)
tcx_entry_update(dev, entry, true);
@@ -275,7 +276,7 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
entry = tcx_entry_fetch_or_create(dev, false, &created);
if (!entry)
return -ENOMEM;
- tcx_miniq_set_active(entry, true);
+ tcx_miniq_inc(entry);
mini_qdisc_pair_init(&q->miniqp_egress, sch, &tcx_entry(entry)->miniq);
if (created)
tcx_entry_update(dev, entry, false);
@@ -301,7 +302,7 @@ static void clsact_destroy(struct Qdisc *sch)
tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info);
if (ingress_entry) {
- tcx_miniq_set_active(ingress_entry, false);
+ tcx_miniq_dec(ingress_entry);
if (!tcx_entry_is_active(ingress_entry)) {
tcx_entry_update(dev, NULL, true);
tcx_entry_free(ingress_entry);
@@ -309,7 +310,7 @@ static void clsact_destroy(struct Qdisc *sch)
}
if (egress_entry) {
- tcx_miniq_set_active(egress_entry, false);
+ tcx_miniq_dec(egress_entry);
if (!tcx_entry_is_active(egress_entry)) {
tcx_entry_update(dev, NULL, false);
tcx_entry_free(egress_entry);
@@ -344,6 +345,7 @@ static struct Qdisc_ops clsact_qdisc_ops __read_mostly = {
.egress_block_get = clsact_egress_block_get,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("clsact");
static int __init ingress_module_init(void)
{
@@ -368,6 +370,5 @@ static void __exit ingress_module_exit(void)
module_init(ingress_module_init);
module_exit(ingress_module_exit);
-MODULE_ALIAS("sch_clsact");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Ingress and clsact based ingress and egress qdiscs");
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index 43e53ee00a56..51d4013b6121 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -215,10 +215,8 @@ static int mqprio_parse_tc_entries(struct Qdisc *sch, struct nlattr *nlattr_opt,
for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++)
fp[tc] = priv->fp[tc];
- nla_for_each_attr(n, nlattr_opt, nlattr_opt_len, rem) {
- if (nla_type(n) != TCA_MQPRIO_TC_ENTRY)
- continue;
-
+ nla_for_each_attr_type(n, TCA_MQPRIO_TC_ENTRY, nlattr_opt,
+ nlattr_opt_len, rem) {
err = mqprio_parse_tc_entry(fp, n, &seen_tcs, extack);
if (err)
goto out;
@@ -774,6 +772,7 @@ static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
.dump = mqprio_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("mqprio");
static int __init mqprio_module_init(void)
{
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index d66d5f0ec080..06e03f5cd7ce 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -185,7 +185,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
qopt->bands = qdisc_dev(sch)->real_num_tx_queues;
- removed = kmalloc(sizeof(*removed) * (q->max_bands - q->bands),
+ removed = kmalloc(sizeof(*removed) * (q->max_bands - qopt->bands),
GFP_KERNEL);
if (!removed)
return -ENOMEM;
@@ -395,6 +395,7 @@ static struct Qdisc_ops multiq_qdisc_ops __read_mostly = {
.dump = multiq_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("multiq");
static int __init multiq_module_init(void)
{
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index fa678eb88528..fdd79d3ccd8c 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -17,6 +17,7 @@
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
+#include <linux/prandom.h>
#include <linux/rtnetlink.h>
#include <linux/reciprocal_div.h>
#include <linux/rbtree.h>
@@ -78,6 +79,8 @@ struct netem_sched_data {
struct sk_buff *t_head;
struct sk_buff *t_tail;
+ u32 t_len;
+
/* optional qdisc for classful handling (NULL at netem init) */
struct Qdisc *qdisc;
@@ -382,6 +385,7 @@ static void tfifo_reset(struct Qdisc *sch)
rtnl_kfree_skbs(q->t_head, q->t_tail);
q->t_head = NULL;
q->t_tail = NULL;
+ q->t_len = 0;
}
static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
@@ -411,6 +415,7 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
rb_link_node(&nskb->rbnode, parent, p);
rb_insert_color(&nskb->rbnode, &q->t_root);
}
+ q->t_len++;
sch->q.qlen++;
}
@@ -446,12 +451,10 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct netem_sched_data *q = qdisc_priv(sch);
/* We don't fill cb now as skb_unshare() may invalidate it */
struct netem_skb_cb *cb;
- struct sk_buff *skb2;
+ struct sk_buff *skb2 = NULL;
struct sk_buff *segs = NULL;
unsigned int prev_len = qdisc_pkt_len(skb);
int count = 1;
- int rc = NET_XMIT_SUCCESS;
- int rc_drop = NET_XMIT_DROP;
/* Do not fool qdisc_drop_all() */
skb->prev = NULL;
@@ -480,19 +483,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb_orphan_partial(skb);
/*
- * If we need to duplicate packet, then re-insert at top of the
- * qdisc tree, since parent queuer expects that only one
- * skb will be queued.
+ * If we need to duplicate packet, then clone it before
+ * original is modified.
*/
- if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
- struct Qdisc *rootq = qdisc_root_bh(sch);
- u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
-
- q->duplicate = 0;
- rootq->enqueue(skb2, rootq, to_free);
- q->duplicate = dupsave;
- rc_drop = NET_XMIT_SUCCESS;
- }
+ if (count > 1)
+ skb2 = skb_clone(skb, GFP_ATOMIC);
/*
* Randomized packet corruption.
@@ -504,7 +499,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (skb_is_gso(skb)) {
skb = netem_segment(skb, sch, to_free);
if (!skb)
- return rc_drop;
+ goto finish_segs;
+
segs = skb->next;
skb_mark_not_on_list(skb);
qdisc_skb_cb(skb)->pkt_len = skb->len;
@@ -526,11 +522,28 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1<<get_random_u32_below(8);
}
- if (unlikely(sch->q.qlen >= sch->limit)) {
+ if (unlikely(q->t_len >= sch->limit)) {
/* re-link segs, so that qdisc_drop_all() frees them all */
skb->next = segs;
qdisc_drop_all(skb, sch, to_free);
- return rc_drop;
+ if (skb2)
+ __qdisc_drop(skb2, to_free);
+ return NET_XMIT_DROP;
+ }
+
+ /*
+ * If doing duplication then re-insert at top of the
+ * qdisc tree, since parent queuer expects that only one
+ * skb will be queued.
+ */
+ if (skb2) {
+ struct Qdisc *rootq = qdisc_root_bh(sch);
+ u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
+
+ q->duplicate = 0;
+ rootq->enqueue(skb2, rootq, to_free);
+ q->duplicate = dupsave;
+ skb2 = NULL;
}
qdisc_qstats_backlog_inc(sch, skb);
@@ -601,9 +614,12 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
}
finish_segs:
+ if (skb2)
+ __qdisc_drop(skb2, to_free);
+
if (segs) {
unsigned int len, last_len;
- int nb;
+ int rc, nb;
len = skb ? skb->len : 0;
nb = skb ? 1 : 0;
@@ -690,8 +706,8 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
tfifo_dequeue:
skb = __qdisc_dequeue_head(&sch->q);
if (skb) {
- qdisc_qstats_backlog_dec(sch, skb);
deliver:
+ qdisc_qstats_backlog_dec(sch, skb);
qdisc_bstats_update(sch, skb);
return skb;
}
@@ -707,8 +723,7 @@ deliver:
if (time_to_send <= now && q->slot.slot_next <= now) {
netem_erase_head(q, skb);
- sch->q.qlen--;
- qdisc_qstats_backlog_dec(sch, skb);
+ q->t_len--;
skb->next = NULL;
skb->prev = NULL;
/* skb->dev shares skb->rbnode area,
@@ -731,21 +746,25 @@ deliver:
err = qdisc_enqueue(skb, q->qdisc, &to_free);
kfree_skb_list(to_free);
- if (err != NET_XMIT_SUCCESS &&
- net_xmit_drop_count(err)) {
- qdisc_qstats_drop(sch);
- qdisc_tree_reduce_backlog(sch, 1,
- pkt_len);
+ if (err != NET_XMIT_SUCCESS) {
+ if (net_xmit_drop_count(err))
+ qdisc_qstats_drop(sch);
+ sch->qstats.backlog -= pkt_len;
+ sch->q.qlen--;
+ qdisc_tree_reduce_backlog(sch, 1, pkt_len);
}
goto tfifo_dequeue;
}
+ sch->q.qlen--;
goto deliver;
}
if (q->qdisc) {
skb = q->qdisc->ops->dequeue(q->qdisc);
- if (skb)
+ if (skb) {
+ sch->q.qlen--;
goto deliver;
+ }
}
qdisc_watchdog_schedule_ns(&q->watchdog,
@@ -755,8 +774,10 @@ deliver:
if (q->qdisc) {
skb = q->qdisc->ops->dequeue(q->qdisc);
- if (skb)
+ if (skb) {
+ sch->q.qlen--;
goto deliver;
+ }
}
return NULL;
}
@@ -1293,6 +1314,7 @@ static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
.dump = netem_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("netem");
static int __init netem_module_init(void)
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index 2da6250ec346..bb1fa9aa530b 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -85,6 +85,7 @@ EXPORT_SYMBOL_GPL(pie_drop_early);
static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
+ enum skb_drop_reason reason = SKB_DROP_REASON_QDISC_OVERLIMIT;
struct pie_sched_data *q = qdisc_priv(sch);
bool enqueue = false;
@@ -93,6 +94,8 @@ static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
goto out;
}
+ reason = SKB_DROP_REASON_QDISC_CONGESTED;
+
if (!pie_drop_early(sch, &q->params, &q->vars, sch->qstats.backlog,
skb->len)) {
enqueue = true;
@@ -121,7 +124,7 @@ static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
out:
q->stats.dropped++;
q->vars.accu_prob = 0;
- return qdisc_drop(skb, sch, to_free);
+ return qdisc_drop_reason(skb, sch, to_free, reason);
}
static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = {
@@ -156,36 +159,38 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt,
u32 target = nla_get_u32(tb[TCA_PIE_TARGET]);
/* convert to pschedtime */
- q->params.target = PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC);
+ WRITE_ONCE(q->params.target,
+ PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC));
}
/* tupdate is in jiffies */
if (tb[TCA_PIE_TUPDATE])
- q->params.tupdate =
- usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE]));
+ WRITE_ONCE(q->params.tupdate,
+ usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE])));
if (tb[TCA_PIE_LIMIT]) {
u32 limit = nla_get_u32(tb[TCA_PIE_LIMIT]);
- q->params.limit = limit;
- sch->limit = limit;
+ WRITE_ONCE(q->params.limit, limit);
+ WRITE_ONCE(sch->limit, limit);
}
if (tb[TCA_PIE_ALPHA])
- q->params.alpha = nla_get_u32(tb[TCA_PIE_ALPHA]);
+ WRITE_ONCE(q->params.alpha, nla_get_u32(tb[TCA_PIE_ALPHA]));
if (tb[TCA_PIE_BETA])
- q->params.beta = nla_get_u32(tb[TCA_PIE_BETA]);
+ WRITE_ONCE(q->params.beta, nla_get_u32(tb[TCA_PIE_BETA]));
if (tb[TCA_PIE_ECN])
- q->params.ecn = nla_get_u32(tb[TCA_PIE_ECN]);
+ WRITE_ONCE(q->params.ecn, nla_get_u32(tb[TCA_PIE_ECN]));
if (tb[TCA_PIE_BYTEMODE])
- q->params.bytemode = nla_get_u32(tb[TCA_PIE_BYTEMODE]);
+ WRITE_ONCE(q->params.bytemode,
+ nla_get_u32(tb[TCA_PIE_BYTEMODE]));
if (tb[TCA_PIE_DQ_RATE_ESTIMATOR])
- q->params.dq_rate_estimator =
- nla_get_u32(tb[TCA_PIE_DQ_RATE_ESTIMATOR]);
+ WRITE_ONCE(q->params.dq_rate_estimator,
+ nla_get_u32(tb[TCA_PIE_DQ_RATE_ESTIMATOR]));
/* Drop excess packets if new limit is lower */
qlen = sch->q.qlen;
@@ -469,17 +474,18 @@ static int pie_dump(struct Qdisc *sch, struct sk_buff *skb)
/* convert target from pschedtime to us */
if (nla_put_u32(skb, TCA_PIE_TARGET,
- ((u32)PSCHED_TICKS2NS(q->params.target)) /
+ ((u32)PSCHED_TICKS2NS(READ_ONCE(q->params.target))) /
NSEC_PER_USEC) ||
- nla_put_u32(skb, TCA_PIE_LIMIT, sch->limit) ||
+ nla_put_u32(skb, TCA_PIE_LIMIT, READ_ONCE(sch->limit)) ||
nla_put_u32(skb, TCA_PIE_TUPDATE,
- jiffies_to_usecs(q->params.tupdate)) ||
- nla_put_u32(skb, TCA_PIE_ALPHA, q->params.alpha) ||
- nla_put_u32(skb, TCA_PIE_BETA, q->params.beta) ||
+ jiffies_to_usecs(READ_ONCE(q->params.tupdate))) ||
+ nla_put_u32(skb, TCA_PIE_ALPHA, READ_ONCE(q->params.alpha)) ||
+ nla_put_u32(skb, TCA_PIE_BETA, READ_ONCE(q->params.beta)) ||
nla_put_u32(skb, TCA_PIE_ECN, q->params.ecn) ||
- nla_put_u32(skb, TCA_PIE_BYTEMODE, q->params.bytemode) ||
+ nla_put_u32(skb, TCA_PIE_BYTEMODE,
+ READ_ONCE(q->params.bytemode)) ||
nla_put_u32(skb, TCA_PIE_DQ_RATE_ESTIMATOR,
- q->params.dq_rate_estimator))
+ READ_ONCE(q->params.dq_rate_estimator)))
goto nla_put_failure;
return nla_nest_end(skb, opts);
@@ -556,6 +562,7 @@ static struct Qdisc_ops pie_qdisc_ops __read_mostly = {
.dump_stats = pie_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("pie");
static int __init pie_module_init(void)
{
diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c
index 992f0c8d7988..cefb65201e17 100644
--- a/net/sched/sch_plug.c
+++ b/net/sched/sch_plug.c
@@ -213,6 +213,7 @@ static struct Qdisc_ops plug_qdisc_ops __read_mostly = {
.reset = qdisc_reset_queue,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("plug");
static int __init plug_module_init(void)
{
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 8ecdd3ef6f8e..cc30f7a32f1a 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -418,6 +418,7 @@ static struct Qdisc_ops prio_qdisc_ops __read_mostly = {
.dump = prio_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("prio");
static int __init prio_module_init(void)
{
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 48a604c320c7..6a07cdbdb9e1 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -421,10 +421,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (err < 0)
return err;
- if (tb[TCA_QFQ_WEIGHT])
- weight = nla_get_u32(tb[TCA_QFQ_WEIGHT]);
- else
- weight = 1;
+ weight = nla_get_u32_default(tb[TCA_QFQ_WEIGHT], 1);
if (tb[TCA_QFQ_LMAX]) {
lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
@@ -1521,6 +1518,7 @@ static struct Qdisc_ops qfq_qdisc_ops __read_mostly = {
.destroy = qfq_destroy_qdisc,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("qfq");
static int __init qfq_init(void)
{
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 607b6c8b3a9b..ef8a2afed26b 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -70,6 +70,7 @@ static int red_use_nodrop(struct red_sched_data *q)
static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
+ enum skb_drop_reason reason = SKB_DROP_REASON_QDISC_CONGESTED;
struct red_sched_data *q = qdisc_priv(sch);
struct Qdisc *child = q->qdisc;
unsigned int len;
@@ -107,6 +108,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
break;
case RED_HARD_MARK:
+ reason = SKB_DROP_REASON_QDISC_OVERLIMIT;
qdisc_qstats_overlimit(sch);
if (red_use_harddrop(q) || !red_use_ecn(q)) {
q->stats.forced_drop++;
@@ -143,7 +145,7 @@ congestion_drop:
if (!skb)
return NET_XMIT_CN | ret;
- qdisc_drop(skb, sch, to_free);
+ qdisc_drop_reason(skb, sch, to_free, reason);
return NET_XMIT_CN;
}
@@ -248,7 +250,7 @@ static int __red_change(struct Qdisc *sch, struct nlattr **tb,
tb[TCA_RED_STAB] == NULL)
return -EINVAL;
- max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
+ max_P = nla_get_u32_default(tb[TCA_RED_MAX_P], 0);
ctl = nla_data(tb[TCA_RED_PARMS]);
stab = nla_data(tb[TCA_RED_STAB]);
@@ -548,6 +550,7 @@ static struct Qdisc_ops red_qdisc_ops __read_mostly = {
.dump_stats = red_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("red");
static int __init red_module_init(void)
{
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 1871a1c0224d..d2835f1168e1 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -280,6 +280,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
+ enum skb_drop_reason reason = SKB_DROP_REASON_QDISC_OVERLIMIT;
struct sfb_sched_data *q = qdisc_priv(sch);
unsigned int len = qdisc_pkt_len(skb);
struct Qdisc *child = q->qdisc;
@@ -380,6 +381,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
}
r = get_random_u16() & SFB_MAX_PROB;
+ reason = SKB_DROP_REASON_QDISC_CONGESTED;
if (unlikely(r < p_min)) {
if (unlikely(p_min > SFB_MAX_PROB / 2)) {
@@ -414,7 +416,7 @@ enqueue:
return ret;
drop:
- qdisc_drop(skb, sch, to_free);
+ qdisc_drop_reason(skb, sch, to_free, reason);
return NET_XMIT_CN;
other_drop:
if (ret & __NET_XMIT_BYPASS)
@@ -709,6 +711,7 @@ static struct Qdisc_ops sfb_qdisc_ops __read_mostly = {
.dump_stats = sfb_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("sfb");
static int __init sfb_module_init(void)
{
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index eb77558fa367..65d5b59da583 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -77,12 +77,6 @@
#define SFQ_EMPTY_SLOT 0xffff
#define SFQ_DEFAULT_HASH_DIVISOR 1024
-/* We use 16 bits to store allot, and want to handle packets up to 64K
- * Scale allot by 8 (1<<3) so that no overflow occurs.
- */
-#define SFQ_ALLOT_SHIFT 3
-#define SFQ_ALLOT_SIZE(X) DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)
-
/* This type should contain at least SFQ_MAX_DEPTH + 1 + SFQ_MAX_FLOWS values */
typedef u16 sfq_index;
@@ -104,7 +98,7 @@ struct sfq_slot {
sfq_index next; /* next slot in sfq RR chain */
struct sfq_head dep; /* anchor in dep[] chains */
unsigned short hash; /* hash value (index in ht[]) */
- short allot; /* credit for this slot */
+ int allot; /* credit for this slot */
unsigned int backlog;
struct red_vars vars;
@@ -120,7 +114,6 @@ struct sfq_sched_data {
siphash_key_t perturbation;
u8 cur_depth; /* depth of longest slot */
u8 flags;
- unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
struct tcf_proto __rcu *filter_list;
struct tcf_block *block;
sfq_index *ht; /* Hash table ('divisor' slots) */
@@ -456,7 +449,7 @@ enqueue:
*/
q->tail = slot;
/* We could use a bigger initial quantum for new flows */
- slot->allot = q->scaled_quantum;
+ slot->allot = q->quantum;
}
if (++sch->q.qlen <= q->limit)
return NET_XMIT_SUCCESS;
@@ -493,7 +486,7 @@ next_slot:
slot = &q->slots[a];
if (slot->allot <= 0) {
q->tail = slot;
- slot->allot += q->scaled_quantum;
+ slot->allot += q->quantum;
goto next_slot;
}
skb = slot_dequeue_head(slot);
@@ -512,7 +505,7 @@ next_slot:
}
q->tail->next = next_a;
} else {
- slot->allot -= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb));
+ slot->allot -= qdisc_pkt_len(skb);
}
return skb;
}
@@ -595,7 +588,7 @@ drop:
q->tail->next = x;
}
q->tail = slot;
- slot->allot = q->scaled_quantum;
+ slot->allot = q->quantum;
}
}
sch->q.qlen -= dropped;
@@ -608,6 +601,7 @@ static void sfq_perturbation(struct timer_list *t)
struct Qdisc *sch = q->sch;
spinlock_t *root_lock;
siphash_key_t nkey;
+ int period;
get_random_bytes(&nkey, sizeof(nkey));
rcu_read_lock();
@@ -618,12 +612,17 @@ static void sfq_perturbation(struct timer_list *t)
sfq_rehash(sch);
spin_unlock(root_lock);
- if (q->perturb_period)
- mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
+ /* q->perturb_period can change under us from
+ * sfq_change() and sfq_destroy().
+ */
+ period = READ_ONCE(q->perturb_period);
+ if (period)
+ mod_timer(&q->perturb_timer, jiffies + period);
rcu_read_unlock();
}
-static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
+static int sfq_change(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct sfq_sched_data *q = qdisc_priv(sch);
struct tc_sfq_qopt *ctl = nla_data(opt);
@@ -641,14 +640,10 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
(!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
return -EINVAL;
- /* slot->allot is a short, make sure quantum is not too big. */
- if (ctl->quantum) {
- unsigned int scaled = SFQ_ALLOT_SIZE(ctl->quantum);
-
- if (scaled <= 0 || scaled > SHRT_MAX)
- return -EINVAL;
+ if ((int)ctl->quantum < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid quantum");
+ return -EINVAL;
}
-
if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
ctl_v1->Wlog, ctl_v1->Scell_log, NULL))
return -EINVAL;
@@ -657,12 +652,14 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
if (!p)
return -ENOMEM;
}
+ if (ctl->limit == 1) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid limit");
+ return -EINVAL;
+ }
sch_tree_lock(sch);
- if (ctl->quantum) {
+ if (ctl->quantum)
q->quantum = ctl->quantum;
- q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
- }
- q->perturb_period = ctl->perturb_period * HZ;
+ WRITE_ONCE(q->perturb_period, ctl->perturb_period * HZ);
if (ctl->flows)
q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS);
if (ctl->divisor) {
@@ -724,7 +721,7 @@ static void sfq_destroy(struct Qdisc *sch)
struct sfq_sched_data *q = qdisc_priv(sch);
tcf_block_put(q->block);
- q->perturb_period = 0;
+ WRITE_ONCE(q->perturb_period, 0);
del_timer_sync(&q->perturb_timer);
sfq_free(q->ht);
sfq_free(q->slots);
@@ -757,12 +754,11 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt,
q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
q->maxflows = SFQ_DEFAULT_FLOWS;
q->quantum = psched_mtu(qdisc_dev(sch));
- q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
q->perturb_period = 0;
get_random_bytes(&q->perturbation, sizeof(q->perturbation));
if (opt) {
- int err = sfq_change(sch, opt);
+ int err = sfq_change(sch, opt, extack);
if (err)
return err;
}
@@ -873,7 +869,7 @@ static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
if (idx != SFQ_EMPTY_SLOT) {
const struct sfq_slot *slot = &q->slots[idx];
- xstats.allot = slot->allot << SFQ_ALLOT_SHIFT;
+ xstats.allot = slot->allot;
qs.qlen = slot->qlen;
qs.backlog = slot->backlog;
}
@@ -925,6 +921,7 @@ static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
.dump = sfq_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("sfq");
static int __init sfq_module_init(void)
{
diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c
index 28beb11762d8..20ff7386b74b 100644
--- a/net/sched/sch_skbprio.c
+++ b/net/sched/sch_skbprio.c
@@ -79,7 +79,9 @@ static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
prio = min(skb->priority, max_priority);
qdisc = &q->qdiscs[prio];
- if (sch->q.qlen < sch->limit) {
+
+ /* sch->limit can change under us from skbprio_change() */
+ if (sch->q.qlen < READ_ONCE(sch->limit)) {
__skb_queue_tail(qdisc, skb);
qdisc_qstats_backlog_inc(sch, skb);
q->qstats[prio].backlog += qdisc_pkt_len(skb);
@@ -172,7 +174,7 @@ static int skbprio_change(struct Qdisc *sch, struct nlattr *opt,
if (opt->nla_len != nla_attr_size(sizeof(*ctl)))
return -EINVAL;
- sch->limit = ctl->limit;
+ WRITE_ONCE(sch->limit, ctl->limit);
return 0;
}
@@ -200,7 +202,7 @@ static int skbprio_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct tc_skbprio_qopt opt;
- opt.limit = sch->limit;
+ opt.limit = READ_ONCE(sch->limit);
if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
return -1;
@@ -292,6 +294,7 @@ static struct Qdisc_ops skbprio_qdisc_ops __read_mostly = {
.destroy = skbprio_destroy,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("skbprio");
static int __init skbprio_module_init(void)
{
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 31a8252bd09c..a68e17891b0b 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -40,6 +40,8 @@ static struct static_key_false taprio_have_working_mqprio;
#define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)
#define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
+#define TAPRIO_SUPPORTED_FLAGS \
+ (TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST | TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
#define TAPRIO_FLAGS_INVALID U32_MAX
struct sched_entry {
@@ -408,19 +410,6 @@ static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch)
return entry;
}
-static bool taprio_flags_valid(u32 flags)
-{
- /* Make sure no other flag bits are set. */
- if (flags & ~(TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST |
- TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
- return false;
- /* txtime-assist and full offload are mutually exclusive */
- if ((flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) &&
- (flags & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
- return false;
- return true;
-}
-
/* This returns the tstamp value set by TCP in terms of the set clock. */
static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb)
{
@@ -1008,7 +997,8 @@ static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
};
static const struct nla_policy taprio_tc_policy[TCA_TAPRIO_TC_ENTRY_MAX + 1] = {
- [TCA_TAPRIO_TC_ENTRY_INDEX] = { .type = NLA_U32 },
+ [TCA_TAPRIO_TC_ENTRY_INDEX] = NLA_POLICY_MAX(NLA_U32,
+ TC_QOPT_MAX_QUEUE),
[TCA_TAPRIO_TC_ENTRY_MAX_SDU] = { .type = NLA_U32 },
[TCA_TAPRIO_TC_ENTRY_FP] = NLA_POLICY_RANGE(NLA_U32,
TC_FP_EXPRESS,
@@ -1031,7 +1021,8 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] =
NLA_POLICY_FULL_RANGE_SIGNED(NLA_S64, &taprio_cycle_time_range),
[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
- [TCA_TAPRIO_ATTR_FLAGS] = { .type = NLA_U32 },
+ [TCA_TAPRIO_ATTR_FLAGS] =
+ NLA_POLICY_MASK(NLA_U32, TAPRIO_SUPPORTED_FLAGS),
[TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 },
[TCA_TAPRIO_ATTR_TC_ENTRY] = { .type = NLA_NESTED },
};
@@ -1160,11 +1151,6 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
list_for_each_entry(entry, &new->entries, list)
cycle = ktime_add_ns(cycle, entry->interval);
- if (!cycle) {
- NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0");
- return -EINVAL;
- }
-
if (cycle < 0 || cycle > INT_MAX) {
NL_SET_ERR_MSG(extack, "'cycle_time' is too big");
return -EINVAL;
@@ -1173,6 +1159,11 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
new->cycle_time = cycle;
}
+ if (new->cycle_time < new->num_entries * length_to_duration(q, ETH_ZLEN)) {
+ NL_SET_ERR_MSG(extack, "'cycle_time' is too small");
+ return -EINVAL;
+ }
+
taprio_calculate_gate_durations(q, new);
return 0;
@@ -1185,16 +1176,13 @@ static int taprio_parse_mqprio_opt(struct net_device *dev,
{
bool allow_overlapping_txqs = TXTIME_ASSIST_IS_ENABLED(taprio_flags);
- if (!qopt && !dev->num_tc) {
- NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
- return -EINVAL;
- }
-
- /* If num_tc is already set, it means that the user already
- * configured the mqprio part
- */
- if (dev->num_tc)
+ if (!qopt) {
+ if (!dev->num_tc) {
+ NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
+ return -EINVAL;
+ }
return 0;
+ }
/* taprio imposes that traffic classes map 1:n to tx queues */
if (qopt->num_tc > dev->num_tx_queues) {
@@ -1622,7 +1610,7 @@ static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb,
if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
const struct ethtool_ops *ops = dev->ethtool_ops;
- struct ethtool_ts_info info = {
+ struct kernel_ethtool_ts_info info = {
.cmd = ETHTOOL_GET_TS_INFO,
.phc_index = -1,
};
@@ -1761,10 +1749,7 @@ static int taprio_parse_tc_entries(struct Qdisc *sch,
fp[tc] = q->fp[tc];
}
- nla_for_each_nested(n, opt, rem) {
- if (nla_type(n) != TCA_TAPRIO_ATTR_TC_ENTRY)
- continue;
-
+ nla_for_each_nested_type(n, TCA_TAPRIO_ATTR_TC_ENTRY, opt, rem) {
err = taprio_parse_tc_entry(sch, n, max_sdu, fp, &seen_tcs,
extack);
if (err)
@@ -1815,33 +1800,6 @@ static int taprio_mqprio_cmp(const struct net_device *dev,
return 0;
}
-/* The semantics of the 'flags' argument in relation to 'change()'
- * requests, are interpreted following two rules (which are applied in
- * this order): (1) an omitted 'flags' argument is interpreted as
- * zero; (2) the 'flags' of a "running" taprio instance cannot be
- * changed.
- */
-static int taprio_new_flags(const struct nlattr *attr, u32 old,
- struct netlink_ext_ack *extack)
-{
- u32 new = 0;
-
- if (attr)
- new = nla_get_u32(attr);
-
- if (old != TAPRIO_FLAGS_INVALID && old != new) {
- NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported");
- return -EOPNOTSUPP;
- }
-
- if (!taprio_flags_valid(new)) {
- NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid");
- return -EINVAL;
- }
-
- return new;
-}
-
static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
@@ -1852,6 +1810,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
struct net_device *dev = qdisc_dev(sch);
struct tc_mqprio_qopt *mqprio = NULL;
unsigned long flags;
+ u32 taprio_flags;
ktime_t start;
int i, err;
@@ -1863,12 +1822,31 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
if (tb[TCA_TAPRIO_ATTR_PRIOMAP])
mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]);
- err = taprio_new_flags(tb[TCA_TAPRIO_ATTR_FLAGS],
- q->flags, extack);
- if (err < 0)
- return err;
+ /* The semantics of the 'flags' argument in relation to 'change()'
+ * requests, are interpreted following two rules (which are applied in
+ * this order): (1) an omitted 'flags' argument is interpreted as
+ * zero; (2) the 'flags' of a "running" taprio instance cannot be
+ * changed.
+ */
+ taprio_flags = nla_get_u32_default(tb[TCA_TAPRIO_ATTR_FLAGS], 0);
- q->flags = err;
+ /* txtime-assist and full offload are mutually exclusive */
+ if ((taprio_flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) &&
+ (taprio_flags & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[TCA_TAPRIO_ATTR_FLAGS],
+ "TXTIME_ASSIST and FULL_OFFLOAD are mutually exclusive");
+ return -EINVAL;
+ }
+
+ if (q->flags != TAPRIO_FLAGS_INVALID && q->flags != taprio_flags) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Changing 'flags' of a running schedule is not supported");
+ return -EOPNOTSUPP;
+ }
+ q->flags = taprio_flags;
+
+ /* Needed for length_to_duration() during netlink attribute parsing */
+ taprio_set_picos_per_byte(dev, q);
err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
if (err < 0)
@@ -1929,7 +1907,6 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
if (err < 0)
goto free_sched;
- taprio_set_picos_per_byte(dev, q);
taprio_update_queue_max_sdu(q, new_admin, stab);
if (FULL_OFFLOAD_IS_ENABLED(q->flags))
@@ -1975,7 +1952,9 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
goto unlock;
}
- rcu_assign_pointer(q->admin_sched, new_admin);
+ /* Not going to race against advance_sched(), but still */
+ admin = rcu_replace_pointer(q->admin_sched, new_admin,
+ lockdep_rtnl_is_held());
if (admin)
call_rcu(&admin->rcu, taprio_free_sched_cb);
} else {
@@ -1986,7 +1965,8 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
taprio_start_sched(sch, start, new_admin);
- rcu_assign_pointer(q->admin_sched, new_admin);
+ admin = rcu_replace_pointer(q->admin_sched, new_admin,
+ lockdep_rtnl_is_held());
if (admin)
call_rcu(&admin->rcu, taprio_free_sched_cb);
@@ -2394,9 +2374,6 @@ static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
struct tc_mqprio_qopt opt = { 0 };
struct nlattr *nest, *sched_nest;
- oper = rtnl_dereference(q->oper_sched);
- admin = rtnl_dereference(q->admin_sched);
-
mqprio_qopt_reconstruct(dev, &opt);
nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
@@ -2417,18 +2394,23 @@ static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay))
goto options_error;
+ rcu_read_lock();
+
+ oper = rtnl_dereference(q->oper_sched);
+ admin = rtnl_dereference(q->admin_sched);
+
if (oper && taprio_dump_tc_entries(skb, q, oper))
- goto options_error;
+ goto options_error_rcu;
if (oper && dump_schedule(skb, oper))
- goto options_error;
+ goto options_error_rcu;
if (!admin)
goto done;
sched_nest = nla_nest_start_noflag(skb, TCA_TAPRIO_ATTR_ADMIN_SCHED);
if (!sched_nest)
- goto options_error;
+ goto options_error_rcu;
if (dump_schedule(skb, admin))
goto admin_error;
@@ -2436,11 +2418,15 @@ static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
nla_nest_end(skb, sched_nest);
done:
+ rcu_read_unlock();
return nla_nest_end(skb, nest);
admin_error:
nla_nest_cancel(skb, sched_nest);
+options_error_rcu:
+ rcu_read_unlock();
+
options_error:
nla_nest_cancel(skb, nest);
@@ -2548,6 +2534,7 @@ static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
.dump_stats = taprio_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("taprio");
static struct notifier_block taprio_device_notifier = {
.notifier_call = taprio_dev_notifier,
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index dd6b1a723bf7..dc26b22d53c7 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -208,7 +208,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
struct tbf_sched_data *q = qdisc_priv(sch);
struct sk_buff *segs, *nskb;
netdev_features_t features = netif_skb_features(skb);
- unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
+ unsigned int len = 0, prev_len = qdisc_pkt_len(skb), seg_len;
int ret, nb;
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
@@ -219,21 +219,27 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
nb = 0;
skb_list_walk_safe(segs, segs, nskb) {
skb_mark_not_on_list(segs);
- qdisc_skb_cb(segs)->pkt_len = segs->len;
- len += segs->len;
+ seg_len = segs->len;
+ qdisc_skb_cb(segs)->pkt_len = seg_len;
ret = qdisc_enqueue(segs, q->qdisc, to_free);
if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret))
qdisc_qstats_drop(sch);
} else {
nb++;
+ len += seg_len;
}
}
sch->q.qlen += nb;
- if (nb > 1)
+ sch->qstats.backlog += len;
+ if (nb > 0) {
qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
- consume_skb(skb);
- return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
+ consume_skb(skb);
+ return NET_XMIT_SUCCESS;
+ }
+
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
}
static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
@@ -608,6 +614,7 @@ static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
.dump = tbf_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("tbf");
static int __init tbf_module_init(void)
{
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 59304611dc00..8badec6d82a2 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -78,7 +78,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
struct net_device *dev = qdisc_dev(sch);
struct teql_sched_data *q = qdisc_priv(sch);
- if (q->q.qlen < dev->tx_queue_len) {
+ if (q->q.qlen < READ_ONCE(dev->tx_queue_len)) {
__skb_queue_tail(&q->q, skb);
return NET_XMIT_SUCCESS;
}
@@ -424,7 +424,7 @@ static int teql_master_mtu(struct net_device *dev, int new_mtu)
} while ((q = NEXT_SLAVE(q)) != m->slaves);
}
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index c45c192b7878..0b0794f164cf 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -137,7 +137,8 @@ static struct sctp_association *sctp_association_init(
= 5 * asoc->rto_max;
asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
- asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
+ asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
+ (unsigned long)sp->autoclose * HZ;
/* Initializes the timers */
for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
diff --git a/net/sctp/diag.c b/net/sctp/diag.c
index eb05131ff1dd..23359e522273 100644
--- a/net/sctp/diag.c
+++ b/net/sctp/diag.c
@@ -507,6 +507,7 @@ done:
}
static const struct inet_diag_handler sctp_diag_handler = {
+ .owner = THIS_MODULE,
.dump = sctp_diag_dump,
.dump_one = sctp_diag_dump_one,
.idiag_get_info = sctp_diag_get_info,
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 17fcaa9b0df9..a8a254a5008e 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -735,15 +735,19 @@ static int __sctp_hash_endpoint(struct sctp_endpoint *ep)
struct sock *sk = ep->base.sk;
struct net *net = sock_net(sk);
struct sctp_hashbucket *head;
+ int err = 0;
ep->hashent = sctp_ep_hashfn(net, ep->base.bind_addr.port);
head = &sctp_ep_hashtable[ep->hashent];
+ write_lock(&head->lock);
if (sk->sk_reuseport) {
bool any = sctp_is_ep_boundall(sk);
struct sctp_endpoint *ep2;
struct list_head *list;
- int cnt = 0, err = 1;
+ int cnt = 0;
+
+ err = 1;
list_for_each(list, &ep->base.bind_addr.address_list)
cnt++;
@@ -761,24 +765,24 @@ static int __sctp_hash_endpoint(struct sctp_endpoint *ep)
if (!err) {
err = reuseport_add_sock(sk, sk2, any);
if (err)
- return err;
+ goto out;
break;
} else if (err < 0) {
- return err;
+ goto out;
}
}
if (err) {
err = reuseport_alloc(sk, any);
if (err)
- return err;
+ goto out;
}
}
- write_lock(&head->lock);
hlist_add_head(&ep->node, &head->chain);
+out:
write_unlock(&head->lock);
- return 0;
+ return err;
}
/* Add an endpoint to the hash. Local BH-safe. */
@@ -803,10 +807,9 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
head = &sctp_ep_hashtable[ep->hashent];
+ write_lock(&head->lock);
if (rcu_access_pointer(sk->sk_reuseport_cb))
reuseport_detach_sock(sk);
-
- write_lock(&head->lock);
hlist_del_init(&ep->node);
write_unlock(&head->lock);
}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 24368f755ab1..a9ed2ccab1bd 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -103,10 +103,10 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
ipv6_addr_equal(&addr->a.v6.sin6_addr,
&ifa->addr) &&
addr->a.v6.sin6_scope_id == ifa->idev->dev->ifindex) {
- sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL);
found = 1;
addr->valid = 0;
list_del_rcu(&addr->list);
+ sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL);
break;
}
}
@@ -415,7 +415,7 @@ out:
if (!IS_ERR_OR_NULL(dst)) {
struct rt6_info *rt;
- rt = (struct rt6_info *)dst;
+ rt = dst_rt6_info(dst);
t->dst_cookie = rt6_get_cookie(rt);
pr_debug("rt6_dst:%pI6/%d rt6_src:%pI6\n",
&rt->rt6i_dst.addr, rt->rt6i_dst.plen,
@@ -683,7 +683,7 @@ static int sctp_v6_available(union sctp_addr *addr, struct sctp_sock *sp)
struct sock *sk = &sp->inet.sk;
struct net *net = sock_net(sk);
struct net_device *dev = NULL;
- int type;
+ int type, res, bound_dev_if;
type = ipv6_addr_type(in6);
if (IPV6_ADDR_ANY == type)
@@ -697,14 +697,21 @@ static int sctp_v6_available(union sctp_addr *addr, struct sctp_sock *sp)
if (!(type & IPV6_ADDR_UNICAST))
return 0;
- if (sk->sk_bound_dev_if) {
- dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
+ rcu_read_lock();
+ bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
+ if (bound_dev_if) {
+ res = 0;
+ dev = dev_get_by_index_rcu(net, bound_dev_if);
if (!dev)
- return 0;
+ goto out;
}
- return ipv6_can_nonlocal_bind(net, &sp->inet) ||
- ipv6_chk_addr(net, in6, dev, 0);
+ res = ipv6_can_nonlocal_bind(net, &sp->inet) ||
+ ipv6_chk_addr(net, in6, dev, 0);
+
+out:
+ rcu_read_unlock();
+ return res;
}
/* This function checks if the address is a valid address to be used for
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 94c6dd53cd62..29727ed1008e 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -43,7 +43,9 @@
#include <net/addrconf.h>
#include <net/inet_common.h>
#include <net/inet_ecn.h>
+#include <net/inet_sock.h>
#include <net/udp_tunnel.h>
+#include <net/inet_dscp.h>
#define MAX_SCTP_PORT_HASH_ENTRIES (64 * 1024)
@@ -426,16 +428,19 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
struct dst_entry *dst = NULL;
union sctp_addr *daddr = &t->ipaddr;
union sctp_addr dst_saddr;
- u8 tos = READ_ONCE(inet_sk(sk)->tos);
+ dscp_t dscp;
if (t->dscp & SCTP_DSCP_SET_MASK)
- tos = t->dscp & SCTP_DSCP_VAL_MASK;
+ dscp = inet_dsfield_to_dscp(t->dscp);
+ else
+ dscp = inet_sk_dscp(inet_sk(sk));
+
memset(&_fl, 0x0, sizeof(_fl));
fl4->daddr = daddr->v4.sin_addr.s_addr;
fl4->fl4_dport = daddr->v4.sin_port;
fl4->flowi4_proto = IPPROTO_SCTP;
if (asoc) {
- fl4->flowi4_tos = RT_TOS(tos);
+ fl4->flowi4_tos = inet_dscp_to_dsfield(dscp);
fl4->flowi4_scope = ip_sock_rt_scope(asoc->base.sk);
fl4->flowi4_oif = asoc->base.sk->sk_bound_dev_if;
fl4->fl4_sport = htons(asoc->base.bind_addr.port);
@@ -552,7 +557,7 @@ static void sctp_v4_get_saddr(struct sctp_sock *sk,
struct flowi *fl)
{
union sctp_addr *saddr = &t->saddr;
- struct rtable *rt = (struct rtable *)t->dst;
+ struct rtable *rt = dst_rtable(t->dst);
if (rt) {
saddr->v4.sin_family = AF_INET;
@@ -737,6 +742,20 @@ void sctp_addr_wq_mgmt(struct net *net, struct sctp_sockaddr_entry *addr, int cm
*/
spin_lock_bh(&net->sctp.addr_wq_lock);
+
+ /* Avoid searching the queue or modifying it if there are no consumers,
+ * as it can lead to performance degradation if addresses are modified
+ * en-masse.
+ *
+ * If the queue already contains some events, update it anyway to avoid
+ * ugly races between new sessions and new address events.
+ */
+ if (list_empty(&net->sctp.auto_asconf_splist) &&
+ list_empty(&net->sctp.addr_waitq)) {
+ spin_unlock_bh(&net->sctp.addr_wq_lock);
+ return;
+ }
+
/* Offsets existing events in addr_wq */
addrw = sctp_addr_wq_lookup(net, addr);
if (addrw) {
@@ -807,10 +826,10 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
if (addr->a.sa.sa_family == AF_INET &&
addr->a.v4.sin_addr.s_addr ==
ifa->ifa_local) {
- sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL);
found = 1;
addr->valid = 0;
list_del_rcu(&addr->list);
+ sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL);
break;
}
}
@@ -1085,7 +1104,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb, struct sctp_transport *t)
skb_reset_inner_mac_header(skb);
skb_reset_inner_transport_header(skb);
skb_set_inner_ipproto(skb, IPPROTO_SCTP);
- udp_tunnel_xmit_skb((struct rtable *)dst, sk, skb, fl4->saddr,
+ udp_tunnel_xmit_skb(dst_rtable(dst), sk, skb, fl4->saddr,
fl4->daddr, dscp, ip4_dst_hoplimit(dst), df,
sctp_sk(sk)->udp_port, t->encap_port, false, false);
return 0;
@@ -1495,17 +1514,11 @@ static __init int sctp_init(void)
/* Allocate bind_bucket and chunk caches. */
status = -ENOBUFS;
- sctp_bucket_cachep = kmem_cache_create("sctp_bind_bucket",
- sizeof(struct sctp_bind_bucket),
- 0, SLAB_HWCACHE_ALIGN,
- NULL);
+ sctp_bucket_cachep = KMEM_CACHE(sctp_bind_bucket, SLAB_HWCACHE_ALIGN);
if (!sctp_bucket_cachep)
goto out;
- sctp_chunk_cachep = kmem_cache_create("sctp_chunk",
- sizeof(struct sctp_chunk),
- 0, SLAB_HWCACHE_ALIGN,
- NULL);
+ sctp_chunk_cachep = KMEM_CACHE(sctp_chunk, SLAB_HWCACHE_ALIGN);
if (!sctp_chunk_cachep)
goto err_chunk_cachep;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 08fdf1251f46..a0524ba8d787 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -38,6 +38,7 @@
#include <linux/inet.h>
#include <linux/slab.h>
#include <net/sock.h>
+#include <net/proto_memory.h>
#include <net/inet_ecn.h>
#include <linux/skbuff.h>
#include <net/sctp/sctp.h>
@@ -2259,12 +2260,6 @@ enum sctp_disposition sctp_sf_do_5_2_4_dupcook(
}
}
- /* Update socket peer label if first association. */
- if (security_sctp_assoc_request(new_asoc, chunk->head_skb ?: chunk->skb)) {
- sctp_association_free(new_asoc);
- return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
- }
-
/* Set temp so that it won't be added into hashtable */
new_asoc->temp = 1;
@@ -2273,6 +2268,22 @@ enum sctp_disposition sctp_sf_do_5_2_4_dupcook(
*/
action = sctp_tietags_compare(new_asoc, asoc);
+ /* In cases C and E the association doesn't enter the ESTABLISHED
+ * state, so there is no need to call security_sctp_assoc_request().
+ */
+ switch (action) {
+ case 'A': /* Association restart. */
+ case 'B': /* Collision case B. */
+ case 'D': /* Collision case D. */
+ /* Update socket peer label if first association. */
+ if (security_sctp_assoc_request((struct sctp_association *)asoc,
+ chunk->head_skb ?: chunk->skb)) {
+ sctp_association_free(new_asoc);
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ }
+ break;
+ }
+
switch (action) {
case 'A': /* Association restart. */
retval = sctp_sf_do_dupcook_a(net, ep, asoc, chunk, commands,
@@ -3740,7 +3751,7 @@ enum sctp_disposition sctp_sf_ootb(struct net *net,
}
ch = (struct sctp_chunkhdr *)ch_end;
- } while (ch_end < skb_tail_pointer(skb));
+ } while (ch_end + sizeof(*ch) < skb_tail_pointer(skb));
if (ootb_shut_ack)
return sctp_sf_shut_8_4_5(net, ep, asoc, type, arg, commands);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6b9fcdb0952a..36ee34f483d7 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -67,6 +67,7 @@
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
#include <net/sctp/stream_sched.h>
+#include <net/rps.h>
/* Forward declarations for internal helper functions. */
static bool sctp_writeable(const struct sock *sk);
@@ -4833,10 +4834,14 @@ int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr,
return sctp_connect(sock->sk, uaddr, addr_len, flags);
}
-/* FIXME: Write comments. */
+/* Only called when shutdown a listening SCTP socket. */
static int sctp_disconnect(struct sock *sk, int flags)
{
- return -EOPNOTSUPP; /* STUB */
+ if (!sctp_style(sk, TCP))
+ return -EOPNOTSUPP;
+
+ sk->sk_shutdown |= RCV_SHUTDOWN;
+ return 0;
}
/* 4.1.4 accept() - TCP Style Syntax
@@ -4846,7 +4851,7 @@ static int sctp_disconnect(struct sock *sk, int flags)
* descriptor will be returned from accept() to represent the newly
* formed association.
*/
-static struct sock *sctp_accept(struct sock *sk, int flags, int *err, bool kern)
+static struct sock *sctp_accept(struct sock *sk, struct proto_accept_arg *arg)
{
struct sctp_sock *sp;
struct sctp_endpoint *ep;
@@ -4865,12 +4870,13 @@ static struct sock *sctp_accept(struct sock *sk, int flags, int *err, bool kern)
goto out;
}
- if (!sctp_sstate(sk, LISTENING)) {
+ if (!sctp_sstate(sk, LISTENING) ||
+ (sk->sk_shutdown & RCV_SHUTDOWN)) {
error = -EINVAL;
goto out;
}
- timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
+ timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK);
error = sctp_wait_for_accept(sk, timeo);
if (error)
@@ -4881,7 +4887,7 @@ static struct sock *sctp_accept(struct sock *sk, int flags, int *err, bool kern)
*/
asoc = list_entry(ep->asocs.next, struct sctp_association, asocs);
- newsk = sp->pf->create_accept_sk(sk, asoc, kern);
+ newsk = sp->pf->create_accept_sk(sk, asoc, arg->kern);
if (!newsk) {
error = -ENOMEM;
goto out;
@@ -4898,7 +4904,7 @@ static struct sock *sctp_accept(struct sock *sk, int flags, int *err, bool kern)
out:
release_sock(sk);
- *err = error;
+ arg->err = error;
return newsk;
}
@@ -7118,6 +7124,7 @@ static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
struct sctp_sock *sp = sctp_sk(sk);
struct sctp_association *asoc;
struct sctp_assoc_ids *ids;
+ size_t ids_size;
u32 num = 0;
if (sctp_style(sk, TCP))
@@ -7130,11 +7137,11 @@ static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
num++;
}
- if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num)
+ ids_size = struct_size(ids, gaids_assoc_id, num);
+ if (len < ids_size)
return -EINVAL;
- len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num;
-
+ len = ids_size;
ids = kmalloc(len, GFP_USER | __GFP_NOWARN);
if (unlikely(!ids))
return -ENOMEM;
@@ -8524,6 +8531,7 @@ static int sctp_listen_start(struct sock *sk, int backlog)
struct sctp_endpoint *ep = sp->ep;
struct crypto_shash *tfm = NULL;
char alg[32];
+ int err;
/* Allocate HMAC for generating cookie. */
if (!sp->hmac && sp->sctp_hmac_alg) {
@@ -8550,17 +8558,26 @@ static int sctp_listen_start(struct sock *sk, int backlog)
*/
inet_sk_set_state(sk, SCTP_SS_LISTENING);
if (!ep->base.bind_addr.port) {
- if (sctp_autobind(sk))
- return -EAGAIN;
+ if (sctp_autobind(sk)) {
+ err = -EAGAIN;
+ goto err;
+ }
} else {
if (sctp_get_port(sk, inet_sk(sk)->inet_num)) {
- inet_sk_set_state(sk, SCTP_SS_CLOSED);
- return -EADDRINUSE;
+ err = -EADDRINUSE;
+ goto err;
}
}
WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
- return sctp_hash_endpoint(ep);
+ err = sctp_hash_endpoint(ep);
+ if (err)
+ goto err;
+
+ return 0;
+err:
+ inet_sk_set_state(sk, SCTP_SS_CLOSED);
+ return err;
}
/*
@@ -9275,7 +9292,7 @@ void sctp_data_ready(struct sock *sk)
if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
EPOLLRDNORM | EPOLLRDBAND);
- sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
+ sk_wake_async_rcu(sk, SOCK_WAKE_WAITD, POLL_IN);
rcu_read_unlock();
}
@@ -9391,7 +9408,8 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo)
}
err = -EINVAL;
- if (!sctp_sstate(sk, LISTENING))
+ if (!sctp_sstate(sk, LISTENING) ||
+ (sk->sk_shutdown & RCV_SHUTDOWN))
break;
err = 0;
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index c241cc552e8d..bfcff6d6a438 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -735,7 +735,7 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
* value SHOULD be the smallest TSN not acknowledged by the
* receiver of the request plus 2^31.
*/
- init_tsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + (1 << 31);
+ init_tsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + (1U << 31);
sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
init_tsn, GFP_ATOMIC);
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index f65d6f92afcb..8e1e97be4df7 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -43,19 +43,19 @@ static unsigned long max_autoclose_max =
(MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX)
? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ;
-static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
+static int proc_sctp_do_hmac_alg(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
-static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
+static int proc_sctp_do_rto_min(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
-static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, void *buffer,
+static int proc_sctp_do_rto_max(const struct ctl_table *ctl, int write, void *buffer,
size_t *lenp, loff_t *ppos);
-static int proc_sctp_do_udp_port(struct ctl_table *ctl, int write, void *buffer,
+static int proc_sctp_do_udp_port(const struct ctl_table *ctl, int write, void *buffer,
size_t *lenp, loff_t *ppos);
-static int proc_sctp_do_alpha_beta(struct ctl_table *ctl, int write,
+static int proc_sctp_do_alpha_beta(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
-static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
+static int proc_sctp_do_auth(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
-static int proc_sctp_do_probe_interval(struct ctl_table *ctl, int write,
+static int proc_sctp_do_probe_interval(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
static struct ctl_table sctp_table[] = {
@@ -80,8 +80,6 @@ static struct ctl_table sctp_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
-
- { /* sentinel */ }
};
/* The following index defines are used in sctp_sysctl_net_register().
@@ -384,14 +382,13 @@ static struct ctl_table sctp_net_table[] = {
.extra1 = SYSCTL_ZERO,
.extra2 = &pf_expose_max,
},
-
- { /* sentinel */ }
};
-static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
+static int proc_sctp_do_hmac_alg(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
- struct net *net = current->nsproxy->net_ns;
+ struct net *net = container_of(ctl->data, struct net,
+ sctp.sctp_hmac_alg);
struct ctl_table tbl;
bool changed = false;
char *none = "none";
@@ -433,10 +430,10 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
return ret;
}
-static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
+static int proc_sctp_do_rto_min(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
- struct net *net = current->nsproxy->net_ns;
+ struct net *net = container_of(ctl->data, struct net, sctp.rto_min);
unsigned int min = *(unsigned int *) ctl->extra1;
unsigned int max = *(unsigned int *) ctl->extra2;
struct ctl_table tbl;
@@ -461,10 +458,10 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
return ret;
}
-static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
+static int proc_sctp_do_rto_max(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
- struct net *net = current->nsproxy->net_ns;
+ struct net *net = container_of(ctl->data, struct net, sctp.rto_max);
unsigned int min = *(unsigned int *) ctl->extra1;
unsigned int max = *(unsigned int *) ctl->extra2;
struct ctl_table tbl;
@@ -489,7 +486,7 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
return ret;
}
-static int proc_sctp_do_alpha_beta(struct ctl_table *ctl, int write,
+static int proc_sctp_do_alpha_beta(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
if (write)
@@ -499,10 +496,10 @@ static int proc_sctp_do_alpha_beta(struct ctl_table *ctl, int write,
return proc_dointvec_minmax(ctl, write, buffer, lenp, ppos);
}
-static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
+static int proc_sctp_do_auth(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
- struct net *net = current->nsproxy->net_ns;
+ struct net *net = container_of(ctl->data, struct net, sctp.auth_enable);
struct ctl_table tbl;
int new_value, ret;
@@ -528,10 +525,10 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
return ret;
}
-static int proc_sctp_do_udp_port(struct ctl_table *ctl, int write,
+static int proc_sctp_do_udp_port(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
- struct net *net = current->nsproxy->net_ns;
+ struct net *net = container_of(ctl->data, struct net, sctp.udp_port);
unsigned int min = *(unsigned int *)ctl->extra1;
unsigned int max = *(unsigned int *)ctl->extra2;
struct ctl_table tbl;
@@ -569,10 +566,11 @@ static int proc_sctp_do_udp_port(struct ctl_table *ctl, int write,
return ret;
}
-static int proc_sctp_do_probe_interval(struct ctl_table *ctl, int write,
+static int proc_sctp_do_probe_interval(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
- struct net *net = current->nsproxy->net_ns;
+ struct net *net = container_of(ctl->data, struct net,
+ sctp.probe_interval);
struct ctl_table tbl;
int ret, new_value;
@@ -597,6 +595,7 @@ static int proc_sctp_do_probe_interval(struct ctl_table *ctl, int write,
int sctp_sysctl_net_register(struct net *net)
{
+ size_t table_size = ARRAY_SIZE(sctp_net_table);
struct ctl_table *table;
int i;
@@ -604,7 +603,7 @@ int sctp_sysctl_net_register(struct net *net)
if (!table)
return -ENOMEM;
- for (i = 0; table[i].data; i++)
+ for (i = 0; i < table_size; i++)
table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp;
table[SCTP_RTO_MIN_IDX].extra2 = &net->sctp.rto_max;
@@ -613,8 +612,7 @@ int sctp_sysctl_net_register(struct net *net)
table[SCTP_PS_RETRANS_IDX].extra1 = &net->sctp.pf_retrans;
net->sctp.sysctl_header = register_net_sysctl_sz(net, "net/sctp",
- table,
- ARRAY_SIZE(sctp_net_table));
+ table, table_size);
if (net->sctp.sysctl_header == NULL) {
kfree(table);
return -ENOMEM;
@@ -624,7 +622,7 @@ int sctp_sysctl_net_register(struct net *net)
void sctp_sysctl_net_unregister(struct net *net)
{
- struct ctl_table *table;
+ const struct ctl_table *table;
table = net->sctp.sysctl_header->ctl_table_arg;
unregister_net_sysctl_table(net->sctp.sysctl_header);
diff --git a/net/shaper/Makefile b/net/shaper/Makefile
new file mode 100644
index 000000000000..54af7169a331
--- /dev/null
+++ b/net/shaper/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the net shaper infrastructure.
+#
+# Copyright (c) 2024, Red Hat, Inc.
+#
+
+obj-y += shaper.o shaper_nl_gen.o
diff --git a/net/shaper/shaper.c b/net/shaper/shaper.c
new file mode 100644
index 000000000000..7101a48bce54
--- /dev/null
+++ b/net/shaper/shaper.c
@@ -0,0 +1,1438 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/netlink.h>
+#include <linux/skbuff.h>
+#include <linux/xarray.h>
+#include <net/devlink.h>
+#include <net/net_shaper.h>
+
+#include "shaper_nl_gen.h"
+
+#include "../core/dev.h"
+
+#define NET_SHAPER_SCOPE_SHIFT 26
+#define NET_SHAPER_ID_MASK GENMASK(NET_SHAPER_SCOPE_SHIFT - 1, 0)
+#define NET_SHAPER_SCOPE_MASK GENMASK(31, NET_SHAPER_SCOPE_SHIFT)
+
+#define NET_SHAPER_ID_UNSPEC NET_SHAPER_ID_MASK
+
+struct net_shaper_hierarchy {
+ struct xarray shapers;
+};
+
+struct net_shaper_nl_ctx {
+ struct net_shaper_binding binding;
+ netdevice_tracker dev_tracker;
+ unsigned long start_index;
+};
+
+static struct net_shaper_binding *net_shaper_binding_from_ctx(void *ctx)
+{
+ return &((struct net_shaper_nl_ctx *)ctx)->binding;
+}
+
+static void net_shaper_lock(struct net_shaper_binding *binding)
+{
+ switch (binding->type) {
+ case NET_SHAPER_BINDING_TYPE_NETDEV:
+ netdev_lock(binding->netdev);
+ break;
+ }
+}
+
+static void net_shaper_unlock(struct net_shaper_binding *binding)
+{
+ switch (binding->type) {
+ case NET_SHAPER_BINDING_TYPE_NETDEV:
+ netdev_unlock(binding->netdev);
+ break;
+ }
+}
+
+static struct net_shaper_hierarchy *
+net_shaper_hierarchy(struct net_shaper_binding *binding)
+{
+ /* Pairs with WRITE_ONCE() in net_shaper_hierarchy_setup. */
+ if (binding->type == NET_SHAPER_BINDING_TYPE_NETDEV)
+ return READ_ONCE(binding->netdev->net_shaper_hierarchy);
+
+ /* No other type supported yet. */
+ return NULL;
+}
+
+static const struct net_shaper_ops *
+net_shaper_ops(struct net_shaper_binding *binding)
+{
+ if (binding->type == NET_SHAPER_BINDING_TYPE_NETDEV)
+ return binding->netdev->netdev_ops->net_shaper_ops;
+
+ /* No other type supported yet. */
+ return NULL;
+}
+
+/* Count the number of [multi] attributes of the given type. */
+static int net_shaper_list_len(struct genl_info *info, int type)
+{
+ struct nlattr *attr;
+ int rem, cnt = 0;
+
+ nla_for_each_attr_type(attr, type, genlmsg_data(info->genlhdr),
+ genlmsg_len(info->genlhdr), rem)
+ cnt++;
+ return cnt;
+}
+
+static int net_shaper_handle_size(void)
+{
+ return nla_total_size(nla_total_size(sizeof(u32)) +
+ nla_total_size(sizeof(u32)));
+}
+
+static int net_shaper_fill_binding(struct sk_buff *msg,
+ const struct net_shaper_binding *binding,
+ u32 type)
+{
+ /* Should never happen, as currently only NETDEV is supported. */
+ if (WARN_ON_ONCE(binding->type != NET_SHAPER_BINDING_TYPE_NETDEV))
+ return -EINVAL;
+
+ if (nla_put_u32(msg, type, binding->netdev->ifindex))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int net_shaper_fill_handle(struct sk_buff *msg,
+ const struct net_shaper_handle *handle,
+ u32 type)
+{
+ struct nlattr *handle_attr;
+
+ if (handle->scope == NET_SHAPER_SCOPE_UNSPEC)
+ return 0;
+
+ handle_attr = nla_nest_start(msg, type);
+ if (!handle_attr)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(msg, NET_SHAPER_A_HANDLE_SCOPE, handle->scope) ||
+ (handle->scope >= NET_SHAPER_SCOPE_QUEUE &&
+ nla_put_u32(msg, NET_SHAPER_A_HANDLE_ID, handle->id)))
+ goto handle_nest_cancel;
+
+ nla_nest_end(msg, handle_attr);
+ return 0;
+
+handle_nest_cancel:
+ nla_nest_cancel(msg, handle_attr);
+ return -EMSGSIZE;
+}
+
+static int
+net_shaper_fill_one(struct sk_buff *msg,
+ const struct net_shaper_binding *binding,
+ const struct net_shaper *shaper,
+ const struct genl_info *info)
+{
+ void *hdr;
+
+ hdr = genlmsg_iput(msg, info);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (net_shaper_fill_binding(msg, binding, NET_SHAPER_A_IFINDEX) ||
+ net_shaper_fill_handle(msg, &shaper->parent,
+ NET_SHAPER_A_PARENT) ||
+ net_shaper_fill_handle(msg, &shaper->handle,
+ NET_SHAPER_A_HANDLE) ||
+ ((shaper->bw_min || shaper->bw_max || shaper->burst) &&
+ nla_put_u32(msg, NET_SHAPER_A_METRIC, shaper->metric)) ||
+ (shaper->bw_min &&
+ nla_put_uint(msg, NET_SHAPER_A_BW_MIN, shaper->bw_min)) ||
+ (shaper->bw_max &&
+ nla_put_uint(msg, NET_SHAPER_A_BW_MAX, shaper->bw_max)) ||
+ (shaper->burst &&
+ nla_put_uint(msg, NET_SHAPER_A_BURST, shaper->burst)) ||
+ (shaper->priority &&
+ nla_put_u32(msg, NET_SHAPER_A_PRIORITY, shaper->priority)) ||
+ (shaper->weight &&
+ nla_put_u32(msg, NET_SHAPER_A_WEIGHT, shaper->weight)))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+/* Initialize the context fetching the relevant device and
+ * acquiring a reference to it.
+ */
+static int net_shaper_ctx_setup(const struct genl_info *info, int type,
+ struct net_shaper_nl_ctx *ctx)
+{
+ struct net *ns = genl_info_net(info);
+ struct net_device *dev;
+ int ifindex;
+
+ if (GENL_REQ_ATTR_CHECK(info, type))
+ return -EINVAL;
+
+ ifindex = nla_get_u32(info->attrs[type]);
+ dev = netdev_get_by_index(ns, ifindex, &ctx->dev_tracker, GFP_KERNEL);
+ if (!dev) {
+ NL_SET_BAD_ATTR(info->extack, info->attrs[type]);
+ return -ENOENT;
+ }
+
+ if (!dev->netdev_ops->net_shaper_ops) {
+ NL_SET_BAD_ATTR(info->extack, info->attrs[type]);
+ netdev_put(dev, &ctx->dev_tracker);
+ return -EOPNOTSUPP;
+ }
+
+ ctx->binding.type = NET_SHAPER_BINDING_TYPE_NETDEV;
+ ctx->binding.netdev = dev;
+ return 0;
+}
+
+static void net_shaper_ctx_cleanup(struct net_shaper_nl_ctx *ctx)
+{
+ if (ctx->binding.type == NET_SHAPER_BINDING_TYPE_NETDEV)
+ netdev_put(ctx->binding.netdev, &ctx->dev_tracker);
+}
+
+static u32 net_shaper_handle_to_index(const struct net_shaper_handle *handle)
+{
+ return FIELD_PREP(NET_SHAPER_SCOPE_MASK, handle->scope) |
+ FIELD_PREP(NET_SHAPER_ID_MASK, handle->id);
+}
+
+static void net_shaper_index_to_handle(u32 index,
+ struct net_shaper_handle *handle)
+{
+ handle->scope = FIELD_GET(NET_SHAPER_SCOPE_MASK, index);
+ handle->id = FIELD_GET(NET_SHAPER_ID_MASK, index);
+}
+
+static void net_shaper_default_parent(const struct net_shaper_handle *handle,
+ struct net_shaper_handle *parent)
+{
+ switch (handle->scope) {
+ case NET_SHAPER_SCOPE_UNSPEC:
+ case NET_SHAPER_SCOPE_NETDEV:
+ case __NET_SHAPER_SCOPE_MAX:
+ parent->scope = NET_SHAPER_SCOPE_UNSPEC;
+ break;
+
+ case NET_SHAPER_SCOPE_QUEUE:
+ case NET_SHAPER_SCOPE_NODE:
+ parent->scope = NET_SHAPER_SCOPE_NETDEV;
+ break;
+ }
+ parent->id = 0;
+}
+
+/*
+ * MARK_0 is already in use due to XA_FLAGS_ALLOC, can't reuse such flag as
+ * it's cleared by xa_store().
+ */
+#define NET_SHAPER_NOT_VALID XA_MARK_1
+
+static struct net_shaper *
+net_shaper_lookup(struct net_shaper_binding *binding,
+ const struct net_shaper_handle *handle)
+{
+ struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
+ u32 index = net_shaper_handle_to_index(handle);
+
+ if (!hierarchy || xa_get_mark(&hierarchy->shapers, index,
+ NET_SHAPER_NOT_VALID))
+ return NULL;
+
+ return xa_load(&hierarchy->shapers, index);
+}
+
+/* Allocate on demand the per device shaper's hierarchy container.
+ * Called under the net shaper lock
+ */
+static struct net_shaper_hierarchy *
+net_shaper_hierarchy_setup(struct net_shaper_binding *binding)
+{
+ struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
+
+ if (hierarchy)
+ return hierarchy;
+
+ hierarchy = kmalloc(sizeof(*hierarchy), GFP_KERNEL);
+ if (!hierarchy)
+ return NULL;
+
+ /* The flag is required for ID allocation */
+ xa_init_flags(&hierarchy->shapers, XA_FLAGS_ALLOC);
+
+ switch (binding->type) {
+ case NET_SHAPER_BINDING_TYPE_NETDEV:
+ /* Pairs with READ_ONCE in net_shaper_hierarchy. */
+ WRITE_ONCE(binding->netdev->net_shaper_hierarchy, hierarchy);
+ break;
+ }
+ return hierarchy;
+}
+
+/* Prepare the hierarchy container to actually insert the given shaper, doing
+ * in advance the needed allocations.
+ */
+static int net_shaper_pre_insert(struct net_shaper_binding *binding,
+ struct net_shaper_handle *handle,
+ struct netlink_ext_ack *extack)
+{
+ struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
+ struct net_shaper *prev, *cur;
+ bool id_allocated = false;
+ int ret, index;
+
+ if (!hierarchy)
+ return -ENOMEM;
+
+ index = net_shaper_handle_to_index(handle);
+ cur = xa_load(&hierarchy->shapers, index);
+ if (cur)
+ return 0;
+
+ /* Allocated a new id, if needed. */
+ if (handle->scope == NET_SHAPER_SCOPE_NODE &&
+ handle->id == NET_SHAPER_ID_UNSPEC) {
+ u32 min, max;
+
+ handle->id = NET_SHAPER_ID_MASK - 1;
+ max = net_shaper_handle_to_index(handle);
+ handle->id = 0;
+ min = net_shaper_handle_to_index(handle);
+
+ ret = xa_alloc(&hierarchy->shapers, &index, NULL,
+ XA_LIMIT(min, max), GFP_KERNEL);
+ if (ret < 0) {
+ NL_SET_ERR_MSG(extack, "Can't allocate new id for NODE shaper");
+ return ret;
+ }
+
+ net_shaper_index_to_handle(index, handle);
+ id_allocated = true;
+ }
+
+ cur = kzalloc(sizeof(*cur), GFP_KERNEL);
+ if (!cur) {
+ ret = -ENOMEM;
+ goto free_id;
+ }
+
+ /* Mark 'tentative' shaper inside the hierarchy container.
+ * xa_set_mark is a no-op if the previous store fails.
+ */
+ xa_lock(&hierarchy->shapers);
+ prev = __xa_store(&hierarchy->shapers, index, cur, GFP_KERNEL);
+ __xa_set_mark(&hierarchy->shapers, index, NET_SHAPER_NOT_VALID);
+ xa_unlock(&hierarchy->shapers);
+ if (xa_err(prev)) {
+ NL_SET_ERR_MSG(extack, "Can't insert shaper into device store");
+ kfree_rcu(cur, rcu);
+ ret = xa_err(prev);
+ goto free_id;
+ }
+ return 0;
+
+free_id:
+ if (id_allocated)
+ xa_erase(&hierarchy->shapers, index);
+ return ret;
+}
+
+/* Commit the tentative insert with the actual values.
+ * Must be called only after a successful net_shaper_pre_insert().
+ */
+static void net_shaper_commit(struct net_shaper_binding *binding,
+ int nr_shapers, const struct net_shaper *shapers)
+{
+ struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
+ struct net_shaper *cur;
+ int index;
+ int i;
+
+ xa_lock(&hierarchy->shapers);
+ for (i = 0; i < nr_shapers; ++i) {
+ index = net_shaper_handle_to_index(&shapers[i].handle);
+
+ cur = xa_load(&hierarchy->shapers, index);
+ if (WARN_ON_ONCE(!cur))
+ continue;
+
+ /* Successful update: drop the tentative mark
+ * and update the hierarchy container.
+ */
+ __xa_clear_mark(&hierarchy->shapers, index,
+ NET_SHAPER_NOT_VALID);
+ *cur = shapers[i];
+ }
+ xa_unlock(&hierarchy->shapers);
+}
+
+/* Rollback all the tentative inserts from the hierarchy. */
+static void net_shaper_rollback(struct net_shaper_binding *binding)
+{
+ struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
+ struct net_shaper *cur;
+ unsigned long index;
+
+ if (!hierarchy)
+ return;
+
+ xa_lock(&hierarchy->shapers);
+ xa_for_each_marked(&hierarchy->shapers, index, cur,
+ NET_SHAPER_NOT_VALID) {
+ __xa_erase(&hierarchy->shapers, index);
+ kfree(cur);
+ }
+ xa_unlock(&hierarchy->shapers);
+}
+
+static int net_shaper_parse_handle(const struct nlattr *attr,
+ const struct genl_info *info,
+ struct net_shaper_handle *handle)
+{
+ struct nlattr *tb[NET_SHAPER_A_HANDLE_MAX + 1];
+ struct nlattr *id_attr;
+ u32 id = 0;
+ int ret;
+
+ ret = nla_parse_nested(tb, NET_SHAPER_A_HANDLE_MAX, attr,
+ net_shaper_handle_nl_policy, info->extack);
+ if (ret < 0)
+ return ret;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, attr, tb,
+ NET_SHAPER_A_HANDLE_SCOPE))
+ return -EINVAL;
+
+ handle->scope = nla_get_u32(tb[NET_SHAPER_A_HANDLE_SCOPE]);
+
+ /* The default id for NODE scope shapers is an invalid one
+ * to help the 'group' operation discriminate between new
+ * NODE shaper creation (ID_UNSPEC) and reuse of existing
+ * shaper (any other value).
+ */
+ id_attr = tb[NET_SHAPER_A_HANDLE_ID];
+ if (id_attr)
+ id = nla_get_u32(id_attr);
+ else if (handle->scope == NET_SHAPER_SCOPE_NODE)
+ id = NET_SHAPER_ID_UNSPEC;
+
+ handle->id = id;
+ return 0;
+}
+
+static int net_shaper_validate_caps(struct net_shaper_binding *binding,
+ struct nlattr **tb,
+ const struct genl_info *info,
+ struct net_shaper *shaper)
+{
+ const struct net_shaper_ops *ops = net_shaper_ops(binding);
+ struct nlattr *bad = NULL;
+ unsigned long caps = 0;
+
+ ops->capabilities(binding, shaper->handle.scope, &caps);
+
+ if (tb[NET_SHAPER_A_PRIORITY] &&
+ !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_PRIORITY)))
+ bad = tb[NET_SHAPER_A_PRIORITY];
+ if (tb[NET_SHAPER_A_WEIGHT] &&
+ !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_WEIGHT)))
+ bad = tb[NET_SHAPER_A_WEIGHT];
+ if (tb[NET_SHAPER_A_BW_MIN] &&
+ !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MIN)))
+ bad = tb[NET_SHAPER_A_BW_MIN];
+ if (tb[NET_SHAPER_A_BW_MAX] &&
+ !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MAX)))
+ bad = tb[NET_SHAPER_A_BW_MAX];
+ if (tb[NET_SHAPER_A_BURST] &&
+ !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_BURST)))
+ bad = tb[NET_SHAPER_A_BURST];
+
+ if (!caps)
+ bad = tb[NET_SHAPER_A_HANDLE];
+
+ if (bad) {
+ NL_SET_BAD_ATTR(info->extack, bad);
+ return -EOPNOTSUPP;
+ }
+
+ if (shaper->handle.scope == NET_SHAPER_SCOPE_QUEUE &&
+ binding->type == NET_SHAPER_BINDING_TYPE_NETDEV &&
+ shaper->handle.id >= binding->netdev->real_num_tx_queues) {
+ NL_SET_ERR_MSG_FMT(info->extack,
+ "Not existing queue id %d max %d",
+ shaper->handle.id,
+ binding->netdev->real_num_tx_queues);
+ return -ENOENT;
+ }
+
+ /* The metric is really used only if there is *any* rate-related
+ * setting, either in current attributes set or in pre-existing
+ * values.
+ */
+ if (shaper->burst || shaper->bw_min || shaper->bw_max) {
+ u32 metric_cap = NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS +
+ shaper->metric;
+
+ /* The metric test can fail even when the user did not
+ * specify the METRIC attribute. Pointing to rate related
+ * attribute will be confusing, as the attribute itself
+ * could be indeed supported, with a different metric.
+ * Be more specific.
+ */
+ if (!(caps & BIT(metric_cap))) {
+ NL_SET_ERR_MSG_FMT(info->extack, "Bad metric %d",
+ shaper->metric);
+ return -EOPNOTSUPP;
+ }
+ }
+ return 0;
+}
+
+static int net_shaper_parse_info(struct net_shaper_binding *binding,
+ struct nlattr **tb,
+ const struct genl_info *info,
+ struct net_shaper *shaper,
+ bool *exists)
+{
+ struct net_shaper *old;
+ int ret;
+
+ /* The shaper handle is the only mandatory attribute. */
+ if (NL_REQ_ATTR_CHECK(info->extack, NULL, tb, NET_SHAPER_A_HANDLE))
+ return -EINVAL;
+
+ ret = net_shaper_parse_handle(tb[NET_SHAPER_A_HANDLE], info,
+ &shaper->handle);
+ if (ret)
+ return ret;
+
+ if (shaper->handle.scope == NET_SHAPER_SCOPE_UNSPEC) {
+ NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_HANDLE]);
+ return -EINVAL;
+ }
+
+ /* Fetch existing hierarchy, if any, so that user provide info will
+ * incrementally update the existing shaper configuration.
+ */
+ old = net_shaper_lookup(binding, &shaper->handle);
+ if (old)
+ *shaper = *old;
+ *exists = !!old;
+
+ if (tb[NET_SHAPER_A_METRIC])
+ shaper->metric = nla_get_u32(tb[NET_SHAPER_A_METRIC]);
+
+ if (tb[NET_SHAPER_A_BW_MIN])
+ shaper->bw_min = nla_get_uint(tb[NET_SHAPER_A_BW_MIN]);
+
+ if (tb[NET_SHAPER_A_BW_MAX])
+ shaper->bw_max = nla_get_uint(tb[NET_SHAPER_A_BW_MAX]);
+
+ if (tb[NET_SHAPER_A_BURST])
+ shaper->burst = nla_get_uint(tb[NET_SHAPER_A_BURST]);
+
+ if (tb[NET_SHAPER_A_PRIORITY])
+ shaper->priority = nla_get_u32(tb[NET_SHAPER_A_PRIORITY]);
+
+ if (tb[NET_SHAPER_A_WEIGHT])
+ shaper->weight = nla_get_u32(tb[NET_SHAPER_A_WEIGHT]);
+
+ ret = net_shaper_validate_caps(binding, tb, info, shaper);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int net_shaper_validate_nesting(struct net_shaper_binding *binding,
+ const struct net_shaper *shaper,
+ struct netlink_ext_ack *extack)
+{
+ const struct net_shaper_ops *ops = net_shaper_ops(binding);
+ unsigned long caps = 0;
+
+ ops->capabilities(binding, shaper->handle.scope, &caps);
+ if (!(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_NESTING))) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Nesting not supported for scope %d",
+ shaper->handle.scope);
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+/* Fetch the existing leaf and update it with the user-provided
+ * attributes.
+ */
+static int net_shaper_parse_leaf(struct net_shaper_binding *binding,
+ const struct nlattr *attr,
+ const struct genl_info *info,
+ const struct net_shaper *node,
+ struct net_shaper *shaper)
+{
+ struct nlattr *tb[NET_SHAPER_A_WEIGHT + 1];
+ bool exists;
+ int ret;
+
+ ret = nla_parse_nested(tb, NET_SHAPER_A_WEIGHT, attr,
+ net_shaper_leaf_info_nl_policy, info->extack);
+ if (ret < 0)
+ return ret;
+
+ ret = net_shaper_parse_info(binding, tb, info, shaper, &exists);
+ if (ret < 0)
+ return ret;
+
+ if (shaper->handle.scope != NET_SHAPER_SCOPE_QUEUE) {
+ NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_HANDLE]);
+ return -EINVAL;
+ }
+
+ if (node->handle.scope == NET_SHAPER_SCOPE_NODE) {
+ ret = net_shaper_validate_nesting(binding, shaper,
+ info->extack);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!exists)
+ net_shaper_default_parent(&shaper->handle, &shaper->parent);
+ return 0;
+}
+
+/* Alike net_parse_shaper_info(), but additionally allow the user specifying
+ * the shaper's parent handle.
+ */
+static int net_shaper_parse_node(struct net_shaper_binding *binding,
+ struct nlattr **tb,
+ const struct genl_info *info,
+ struct net_shaper *shaper)
+{
+ bool exists;
+ int ret;
+
+ ret = net_shaper_parse_info(binding, tb, info, shaper, &exists);
+ if (ret)
+ return ret;
+
+ if (shaper->handle.scope != NET_SHAPER_SCOPE_NODE &&
+ shaper->handle.scope != NET_SHAPER_SCOPE_NETDEV) {
+ NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_HANDLE]);
+ return -EINVAL;
+ }
+
+ if (tb[NET_SHAPER_A_PARENT]) {
+ ret = net_shaper_parse_handle(tb[NET_SHAPER_A_PARENT], info,
+ &shaper->parent);
+ if (ret)
+ return ret;
+
+ if (shaper->parent.scope != NET_SHAPER_SCOPE_NODE &&
+ shaper->parent.scope != NET_SHAPER_SCOPE_NETDEV) {
+ NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_PARENT]);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int net_shaper_generic_pre(struct genl_info *info, int type)
+{
+ struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)info->ctx;
+
+ BUILD_BUG_ON(sizeof(*ctx) > sizeof(info->ctx));
+
+ return net_shaper_ctx_setup(info, type, ctx);
+}
+
+int net_shaper_nl_pre_doit(const struct genl_split_ops *ops,
+ struct sk_buff *skb, struct genl_info *info)
+{
+ return net_shaper_generic_pre(info, NET_SHAPER_A_IFINDEX);
+}
+
+static void net_shaper_generic_post(struct genl_info *info)
+{
+ net_shaper_ctx_cleanup((struct net_shaper_nl_ctx *)info->ctx);
+}
+
+void net_shaper_nl_post_doit(const struct genl_split_ops *ops,
+ struct sk_buff *skb, struct genl_info *info)
+{
+ net_shaper_generic_post(info);
+}
+
+int net_shaper_nl_pre_dumpit(struct netlink_callback *cb)
+{
+ struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx;
+ const struct genl_info *info = genl_info_dump(cb);
+
+ return net_shaper_ctx_setup(info, NET_SHAPER_A_IFINDEX, ctx);
+}
+
+int net_shaper_nl_post_dumpit(struct netlink_callback *cb)
+{
+ net_shaper_ctx_cleanup((struct net_shaper_nl_ctx *)cb->ctx);
+ return 0;
+}
+
+int net_shaper_nl_cap_pre_doit(const struct genl_split_ops *ops,
+ struct sk_buff *skb, struct genl_info *info)
+{
+ return net_shaper_generic_pre(info, NET_SHAPER_A_CAPS_IFINDEX);
+}
+
+void net_shaper_nl_cap_post_doit(const struct genl_split_ops *ops,
+ struct sk_buff *skb, struct genl_info *info)
+{
+ net_shaper_generic_post(info);
+}
+
+int net_shaper_nl_cap_pre_dumpit(struct netlink_callback *cb)
+{
+ struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx;
+
+ return net_shaper_ctx_setup(genl_info_dump(cb),
+ NET_SHAPER_A_CAPS_IFINDEX, ctx);
+}
+
+int net_shaper_nl_cap_post_dumpit(struct netlink_callback *cb)
+{
+ struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx;
+
+ net_shaper_ctx_cleanup(ctx);
+ return 0;
+}
+
+int net_shaper_nl_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net_shaper_binding *binding;
+ struct net_shaper_handle handle;
+ struct net_shaper *shaper;
+ struct sk_buff *msg;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_HANDLE))
+ return -EINVAL;
+
+ binding = net_shaper_binding_from_ctx(info->ctx);
+ ret = net_shaper_parse_handle(info->attrs[NET_SHAPER_A_HANDLE], info,
+ &handle);
+ if (ret < 0)
+ return ret;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ rcu_read_lock();
+ shaper = net_shaper_lookup(binding, &handle);
+ if (!shaper) {
+ NL_SET_BAD_ATTR(info->extack,
+ info->attrs[NET_SHAPER_A_HANDLE]);
+ rcu_read_unlock();
+ ret = -ENOENT;
+ goto free_msg;
+ }
+
+ ret = net_shaper_fill_one(msg, binding, shaper, info);
+ rcu_read_unlock();
+ if (ret)
+ goto free_msg;
+
+ ret = genlmsg_reply(msg, info);
+ if (ret)
+ goto free_msg;
+
+ return 0;
+
+free_msg:
+ nlmsg_free(msg);
+ return ret;
+}
+
+int net_shaper_nl_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx;
+ const struct genl_info *info = genl_info_dump(cb);
+ struct net_shaper_hierarchy *hierarchy;
+ struct net_shaper_binding *binding;
+ struct net_shaper *shaper;
+ int ret = 0;
+
+ /* Don't error out dumps performed before any set operation. */
+ binding = net_shaper_binding_from_ctx(ctx);
+ hierarchy = net_shaper_hierarchy(binding);
+ if (!hierarchy)
+ return 0;
+
+ rcu_read_lock();
+ for (; (shaper = xa_find(&hierarchy->shapers, &ctx->start_index,
+ U32_MAX, XA_PRESENT)); ctx->start_index++) {
+ ret = net_shaper_fill_one(skb, binding, shaper, info);
+ if (ret)
+ break;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+int net_shaper_nl_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net_shaper_hierarchy *hierarchy;
+ struct net_shaper_binding *binding;
+ const struct net_shaper_ops *ops;
+ struct net_shaper_handle handle;
+ struct net_shaper shaper = {};
+ bool exists;
+ int ret;
+
+ binding = net_shaper_binding_from_ctx(info->ctx);
+
+ net_shaper_lock(binding);
+ ret = net_shaper_parse_info(binding, info->attrs, info, &shaper,
+ &exists);
+ if (ret)
+ goto unlock;
+
+ if (!exists)
+ net_shaper_default_parent(&shaper.handle, &shaper.parent);
+
+ hierarchy = net_shaper_hierarchy_setup(binding);
+ if (!hierarchy) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ /* The 'set' operation can't create node-scope shapers. */
+ handle = shaper.handle;
+ if (handle.scope == NET_SHAPER_SCOPE_NODE &&
+ !net_shaper_lookup(binding, &handle)) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ ret = net_shaper_pre_insert(binding, &handle, info->extack);
+ if (ret)
+ goto unlock;
+
+ ops = net_shaper_ops(binding);
+ ret = ops->set(binding, &shaper, info->extack);
+ if (ret) {
+ net_shaper_rollback(binding);
+ goto unlock;
+ }
+
+ net_shaper_commit(binding, 1, &shaper);
+
+unlock:
+ net_shaper_unlock(binding);
+ return ret;
+}
+
+static int __net_shaper_delete(struct net_shaper_binding *binding,
+ struct net_shaper *shaper,
+ struct netlink_ext_ack *extack)
+{
+ struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
+ struct net_shaper_handle parent_handle, handle = shaper->handle;
+ const struct net_shaper_ops *ops = net_shaper_ops(binding);
+ int ret;
+
+again:
+ parent_handle = shaper->parent;
+
+ ret = ops->delete(binding, &handle, extack);
+ if (ret < 0)
+ return ret;
+
+ xa_erase(&hierarchy->shapers, net_shaper_handle_to_index(&handle));
+ kfree_rcu(shaper, rcu);
+
+ /* Eventually delete the parent, if it is left over with no leaves. */
+ if (parent_handle.scope == NET_SHAPER_SCOPE_NODE) {
+ shaper = net_shaper_lookup(binding, &parent_handle);
+ if (shaper && !--shaper->leaves) {
+ handle = parent_handle;
+ goto again;
+ }
+ }
+ return 0;
+}
+
+static int net_shaper_handle_cmp(const struct net_shaper_handle *a,
+ const struct net_shaper_handle *b)
+{
+ /* Must avoid holes in struct net_shaper_handle. */
+ BUILD_BUG_ON(sizeof(*a) != 8);
+
+ return memcmp(a, b, sizeof(*a));
+}
+
+static int net_shaper_parent_from_leaves(int leaves_count,
+ const struct net_shaper *leaves,
+ struct net_shaper *node,
+ struct netlink_ext_ack *extack)
+{
+ struct net_shaper_handle parent = leaves[0].parent;
+ int i;
+
+ for (i = 1; i < leaves_count; ++i) {
+ if (net_shaper_handle_cmp(&leaves[i].parent, &parent)) {
+ NL_SET_ERR_MSG_FMT(extack, "All the leaves shapers must have the same old parent");
+ return -EINVAL;
+ }
+ }
+
+ node->parent = parent;
+ return 0;
+}
+
+static int __net_shaper_group(struct net_shaper_binding *binding,
+ bool update_node, int leaves_count,
+ struct net_shaper *leaves,
+ struct net_shaper *node,
+ struct netlink_ext_ack *extack)
+{
+ const struct net_shaper_ops *ops = net_shaper_ops(binding);
+ struct net_shaper_handle leaf_handle;
+ struct net_shaper *parent = NULL;
+ bool new_node = false;
+ int i, ret;
+
+ if (node->handle.scope == NET_SHAPER_SCOPE_NODE) {
+ new_node = node->handle.id == NET_SHAPER_ID_UNSPEC;
+
+ if (!new_node && !net_shaper_lookup(binding, &node->handle)) {
+ /* The related attribute is not available when
+ * reaching here from the delete() op.
+ */
+ NL_SET_ERR_MSG_FMT(extack, "Node shaper %d:%d does not exists",
+ node->handle.scope, node->handle.id);
+ return -ENOENT;
+ }
+
+ /* When unspecified, the node parent scope is inherited from
+ * the leaves.
+ */
+ if (node->parent.scope == NET_SHAPER_SCOPE_UNSPEC) {
+ ret = net_shaper_parent_from_leaves(leaves_count,
+ leaves, node,
+ extack);
+ if (ret)
+ return ret;
+ }
+
+ } else {
+ net_shaper_default_parent(&node->handle, &node->parent);
+ }
+
+ if (node->parent.scope == NET_SHAPER_SCOPE_NODE) {
+ parent = net_shaper_lookup(binding, &node->parent);
+ if (!parent) {
+ NL_SET_ERR_MSG_FMT(extack, "Node parent shaper %d:%d does not exists",
+ node->parent.scope, node->parent.id);
+ return -ENOENT;
+ }
+
+ ret = net_shaper_validate_nesting(binding, node, extack);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (update_node) {
+ /* For newly created node scope shaper, the following will
+ * update the handle, due to id allocation.
+ */
+ ret = net_shaper_pre_insert(binding, &node->handle, extack);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < leaves_count; ++i) {
+ leaf_handle = leaves[i].handle;
+
+ ret = net_shaper_pre_insert(binding, &leaf_handle, extack);
+ if (ret)
+ goto rollback;
+
+ if (!net_shaper_handle_cmp(&leaves[i].parent, &node->handle))
+ continue;
+
+ /* The leaves shapers will be nested to the node, update the
+ * linking accordingly.
+ */
+ leaves[i].parent = node->handle;
+ node->leaves++;
+ }
+
+ ret = ops->group(binding, leaves_count, leaves, node, extack);
+ if (ret < 0)
+ goto rollback;
+
+ /* The node's parent gains a new leaf only when the node itself
+ * is created by this group operation
+ */
+ if (new_node && parent)
+ parent->leaves++;
+ if (update_node)
+ net_shaper_commit(binding, 1, node);
+ net_shaper_commit(binding, leaves_count, leaves);
+ return 0;
+
+rollback:
+ net_shaper_rollback(binding);
+ return ret;
+}
+
+static int net_shaper_pre_del_node(struct net_shaper_binding *binding,
+ const struct net_shaper *shaper,
+ struct netlink_ext_ack *extack)
+{
+ struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
+ struct net_shaper *cur, *leaves, node = {};
+ int ret, leaves_count = 0;
+ unsigned long index;
+ bool update_node;
+
+ if (!shaper->leaves)
+ return 0;
+
+ /* Fetch the new node information. */
+ node.handle = shaper->parent;
+ cur = net_shaper_lookup(binding, &node.handle);
+ if (cur) {
+ node = *cur;
+ } else {
+ /* A scope NODE shaper can be nested only to the NETDEV scope
+ * shaper without creating the latter, this check may fail only
+ * if the data is in inconsistent status.
+ */
+ if (WARN_ON_ONCE(node.handle.scope != NET_SHAPER_SCOPE_NETDEV))
+ return -EINVAL;
+ }
+
+ leaves = kcalloc(shaper->leaves, sizeof(struct net_shaper),
+ GFP_KERNEL);
+ if (!leaves)
+ return -ENOMEM;
+
+ /* Build the leaves arrays. */
+ xa_for_each(&hierarchy->shapers, index, cur) {
+ if (net_shaper_handle_cmp(&cur->parent, &shaper->handle))
+ continue;
+
+ if (WARN_ON_ONCE(leaves_count == shaper->leaves)) {
+ ret = -EINVAL;
+ goto free;
+ }
+
+ leaves[leaves_count++] = *cur;
+ }
+
+ /* When re-linking to the netdev shaper, avoid the eventual, implicit,
+ * creation of the new node, would be surprising since the user is
+ * doing a delete operation.
+ */
+ update_node = node.handle.scope != NET_SHAPER_SCOPE_NETDEV;
+ ret = __net_shaper_group(binding, update_node, leaves_count,
+ leaves, &node, extack);
+
+free:
+ kfree(leaves);
+ return ret;
+}
+
+int net_shaper_nl_delete_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net_shaper_hierarchy *hierarchy;
+ struct net_shaper_binding *binding;
+ struct net_shaper_handle handle;
+ struct net_shaper *shaper;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_HANDLE))
+ return -EINVAL;
+
+ binding = net_shaper_binding_from_ctx(info->ctx);
+
+ net_shaper_lock(binding);
+ ret = net_shaper_parse_handle(info->attrs[NET_SHAPER_A_HANDLE], info,
+ &handle);
+ if (ret)
+ goto unlock;
+
+ hierarchy = net_shaper_hierarchy(binding);
+ if (!hierarchy) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ shaper = net_shaper_lookup(binding, &handle);
+ if (!shaper) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ if (handle.scope == NET_SHAPER_SCOPE_NODE) {
+ ret = net_shaper_pre_del_node(binding, shaper, info->extack);
+ if (ret)
+ goto unlock;
+ }
+
+ ret = __net_shaper_delete(binding, shaper, info->extack);
+
+unlock:
+ net_shaper_unlock(binding);
+ return ret;
+}
+
+static int net_shaper_group_send_reply(struct net_shaper_binding *binding,
+ const struct net_shaper_handle *handle,
+ struct genl_info *info,
+ struct sk_buff *msg)
+{
+ void *hdr;
+
+ hdr = genlmsg_iput(msg, info);
+ if (!hdr)
+ goto free_msg;
+
+ if (net_shaper_fill_binding(msg, binding, NET_SHAPER_A_IFINDEX) ||
+ net_shaper_fill_handle(msg, handle, NET_SHAPER_A_HANDLE))
+ goto free_msg;
+
+ genlmsg_end(msg, hdr);
+
+ return genlmsg_reply(msg, info);
+
+free_msg:
+ /* Should never happen as msg is pre-allocated with enough space. */
+ WARN_ONCE(true, "calculated message payload length (%d)",
+ net_shaper_handle_size());
+ nlmsg_free(msg);
+ return -EMSGSIZE;
+}
+
+int net_shaper_nl_group_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net_shaper **old_nodes, *leaves, node = {};
+ struct net_shaper_hierarchy *hierarchy;
+ struct net_shaper_binding *binding;
+ int i, ret, rem, leaves_count;
+ int old_nodes_count = 0;
+ struct sk_buff *msg;
+ struct nlattr *attr;
+
+ if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_LEAVES))
+ return -EINVAL;
+
+ binding = net_shaper_binding_from_ctx(info->ctx);
+
+ /* The group operation is optional. */
+ if (!net_shaper_ops(binding)->group)
+ return -EOPNOTSUPP;
+
+ net_shaper_lock(binding);
+ leaves_count = net_shaper_list_len(info, NET_SHAPER_A_LEAVES);
+ if (!leaves_count) {
+ NL_SET_BAD_ATTR(info->extack,
+ info->attrs[NET_SHAPER_A_LEAVES]);
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ leaves = kcalloc(leaves_count, sizeof(struct net_shaper) +
+ sizeof(struct net_shaper *), GFP_KERNEL);
+ if (!leaves) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ old_nodes = (void *)&leaves[leaves_count];
+
+ ret = net_shaper_parse_node(binding, info->attrs, info, &node);
+ if (ret)
+ goto free_leaves;
+
+ i = 0;
+ nla_for_each_attr_type(attr, NET_SHAPER_A_LEAVES,
+ genlmsg_data(info->genlhdr),
+ genlmsg_len(info->genlhdr), rem) {
+ if (WARN_ON_ONCE(i >= leaves_count))
+ goto free_leaves;
+
+ ret = net_shaper_parse_leaf(binding, attr, info,
+ &node, &leaves[i]);
+ if (ret)
+ goto free_leaves;
+ i++;
+ }
+
+ /* Prepare the msg reply in advance, to avoid device operation
+ * rollback on allocation failure.
+ */
+ msg = genlmsg_new(net_shaper_handle_size(), GFP_KERNEL);
+ if (!msg)
+ goto free_leaves;
+
+ hierarchy = net_shaper_hierarchy_setup(binding);
+ if (!hierarchy) {
+ ret = -ENOMEM;
+ goto free_msg;
+ }
+
+ /* Record the node shapers that this group() operation can make
+ * childless for later cleanup.
+ */
+ for (i = 0; i < leaves_count; i++) {
+ if (leaves[i].parent.scope == NET_SHAPER_SCOPE_NODE &&
+ net_shaper_handle_cmp(&leaves[i].parent, &node.handle)) {
+ struct net_shaper *tmp;
+
+ tmp = net_shaper_lookup(binding, &leaves[i].parent);
+ if (!tmp)
+ continue;
+
+ old_nodes[old_nodes_count++] = tmp;
+ }
+ }
+
+ ret = __net_shaper_group(binding, true, leaves_count, leaves, &node,
+ info->extack);
+ if (ret)
+ goto free_msg;
+
+ /* Check if we need to delete any node left alone by the new leaves
+ * linkage.
+ */
+ for (i = 0; i < old_nodes_count; ++i) {
+ struct net_shaper *tmp = old_nodes[i];
+
+ if (--tmp->leaves > 0)
+ continue;
+
+ /* Errors here are not fatal: the grouping operation is
+ * completed, and user-space can still explicitly clean-up
+ * left-over nodes.
+ */
+ __net_shaper_delete(binding, tmp, info->extack);
+ }
+
+ ret = net_shaper_group_send_reply(binding, &node.handle, info, msg);
+ if (ret)
+ GENL_SET_ERR_MSG_FMT(info, "Can't send reply");
+
+free_leaves:
+ kfree(leaves);
+
+unlock:
+ net_shaper_unlock(binding);
+ return ret;
+
+free_msg:
+ kfree_skb(msg);
+ goto free_leaves;
+}
+
+static int
+net_shaper_cap_fill_one(struct sk_buff *msg,
+ struct net_shaper_binding *binding,
+ enum net_shaper_scope scope, unsigned long flags,
+ const struct genl_info *info)
+{
+ unsigned long cur;
+ void *hdr;
+
+ hdr = genlmsg_iput(msg, info);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (net_shaper_fill_binding(msg, binding, NET_SHAPER_A_CAPS_IFINDEX) ||
+ nla_put_u32(msg, NET_SHAPER_A_CAPS_SCOPE, scope))
+ goto nla_put_failure;
+
+ for (cur = NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS;
+ cur <= NET_SHAPER_A_CAPS_MAX; ++cur) {
+ if (flags & BIT(cur) && nla_put_flag(msg, cur))
+ goto nla_put_failure;
+ }
+
+ genlmsg_end(msg, hdr);
+
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+int net_shaper_nl_cap_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net_shaper_binding *binding;
+ const struct net_shaper_ops *ops;
+ enum net_shaper_scope scope;
+ unsigned long flags = 0;
+ struct sk_buff *msg;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_CAPS_SCOPE))
+ return -EINVAL;
+
+ binding = net_shaper_binding_from_ctx(info->ctx);
+ scope = nla_get_u32(info->attrs[NET_SHAPER_A_CAPS_SCOPE]);
+ ops = net_shaper_ops(binding);
+ ops->capabilities(binding, scope, &flags);
+ if (!flags)
+ return -EOPNOTSUPP;
+
+ msg = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ ret = net_shaper_cap_fill_one(msg, binding, scope, flags, info);
+ if (ret)
+ goto free_msg;
+
+ ret = genlmsg_reply(msg, info);
+ if (ret)
+ goto free_msg;
+ return 0;
+
+free_msg:
+ nlmsg_free(msg);
+ return ret;
+}
+
+int net_shaper_nl_cap_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ const struct genl_info *info = genl_info_dump(cb);
+ struct net_shaper_binding *binding;
+ const struct net_shaper_ops *ops;
+ enum net_shaper_scope scope;
+ int ret;
+
+ binding = net_shaper_binding_from_ctx(cb->ctx);
+ ops = net_shaper_ops(binding);
+ for (scope = 0; scope <= NET_SHAPER_SCOPE_MAX; ++scope) {
+ unsigned long flags = 0;
+
+ ops->capabilities(binding, scope, &flags);
+ if (!flags)
+ continue;
+
+ ret = net_shaper_cap_fill_one(skb, binding, scope, flags,
+ info);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void net_shaper_flush(struct net_shaper_binding *binding)
+{
+ struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
+ struct net_shaper *cur;
+ unsigned long index;
+
+ if (!hierarchy)
+ return;
+
+ net_shaper_lock(binding);
+ xa_lock(&hierarchy->shapers);
+ xa_for_each(&hierarchy->shapers, index, cur) {
+ __xa_erase(&hierarchy->shapers, index);
+ kfree(cur);
+ }
+ xa_unlock(&hierarchy->shapers);
+ net_shaper_unlock(binding);
+
+ kfree(hierarchy);
+}
+
+void net_shaper_flush_netdev(struct net_device *dev)
+{
+ struct net_shaper_binding binding = {
+ .type = NET_SHAPER_BINDING_TYPE_NETDEV,
+ .netdev = dev,
+ };
+
+ net_shaper_flush(&binding);
+}
+
+void net_shaper_set_real_num_tx_queues(struct net_device *dev,
+ unsigned int txq)
+{
+ struct net_shaper_hierarchy *hierarchy;
+ struct net_shaper_binding binding;
+ int i;
+
+ binding.type = NET_SHAPER_BINDING_TYPE_NETDEV;
+ binding.netdev = dev;
+ hierarchy = net_shaper_hierarchy(&binding);
+ if (!hierarchy)
+ return;
+
+ /* Only drivers implementing shapers support ensure
+ * the lock is acquired in advance.
+ */
+ netdev_assert_locked(dev);
+
+ /* Take action only when decreasing the tx queue number. */
+ for (i = txq; i < dev->real_num_tx_queues; ++i) {
+ struct net_shaper_handle handle, parent_handle;
+ struct net_shaper *shaper;
+ u32 index;
+
+ handle.scope = NET_SHAPER_SCOPE_QUEUE;
+ handle.id = i;
+ shaper = net_shaper_lookup(&binding, &handle);
+ if (!shaper)
+ continue;
+
+ /* Don't touch the H/W for the queue shaper, the drivers already
+ * deleted the queue and related resources.
+ */
+ parent_handle = shaper->parent;
+ index = net_shaper_handle_to_index(&handle);
+ xa_erase(&hierarchy->shapers, index);
+ kfree_rcu(shaper, rcu);
+
+ /* The recursion on parent does the full job. */
+ if (parent_handle.scope != NET_SHAPER_SCOPE_NODE)
+ continue;
+
+ shaper = net_shaper_lookup(&binding, &parent_handle);
+ if (shaper && !--shaper->leaves)
+ __net_shaper_delete(&binding, shaper, NULL);
+ }
+}
+
+static int __init shaper_init(void)
+{
+ return genl_register_family(&net_shaper_nl_family);
+}
+
+subsys_initcall(shaper_init);
diff --git a/net/shaper/shaper_nl_gen.c b/net/shaper/shaper_nl_gen.c
new file mode 100644
index 000000000000..204c8ae8c7b1
--- /dev/null
+++ b/net/shaper/shaper_nl_gen.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/net_shaper.yaml */
+/* YNL-GEN kernel source */
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include "shaper_nl_gen.h"
+
+#include <uapi/linux/net_shaper.h>
+
+/* Common nested types */
+const struct nla_policy net_shaper_handle_nl_policy[NET_SHAPER_A_HANDLE_ID + 1] = {
+ [NET_SHAPER_A_HANDLE_SCOPE] = NLA_POLICY_MAX(NLA_U32, 3),
+ [NET_SHAPER_A_HANDLE_ID] = { .type = NLA_U32, },
+};
+
+const struct nla_policy net_shaper_leaf_info_nl_policy[NET_SHAPER_A_WEIGHT + 1] = {
+ [NET_SHAPER_A_HANDLE] = NLA_POLICY_NESTED(net_shaper_handle_nl_policy),
+ [NET_SHAPER_A_PRIORITY] = { .type = NLA_U32, },
+ [NET_SHAPER_A_WEIGHT] = { .type = NLA_U32, },
+};
+
+/* NET_SHAPER_CMD_GET - do */
+static const struct nla_policy net_shaper_get_do_nl_policy[NET_SHAPER_A_IFINDEX + 1] = {
+ [NET_SHAPER_A_IFINDEX] = { .type = NLA_U32, },
+ [NET_SHAPER_A_HANDLE] = NLA_POLICY_NESTED(net_shaper_handle_nl_policy),
+};
+
+/* NET_SHAPER_CMD_GET - dump */
+static const struct nla_policy net_shaper_get_dump_nl_policy[NET_SHAPER_A_IFINDEX + 1] = {
+ [NET_SHAPER_A_IFINDEX] = { .type = NLA_U32, },
+};
+
+/* NET_SHAPER_CMD_SET - do */
+static const struct nla_policy net_shaper_set_nl_policy[NET_SHAPER_A_IFINDEX + 1] = {
+ [NET_SHAPER_A_IFINDEX] = { .type = NLA_U32, },
+ [NET_SHAPER_A_HANDLE] = NLA_POLICY_NESTED(net_shaper_handle_nl_policy),
+ [NET_SHAPER_A_METRIC] = NLA_POLICY_MAX(NLA_U32, 1),
+ [NET_SHAPER_A_BW_MIN] = { .type = NLA_UINT, },
+ [NET_SHAPER_A_BW_MAX] = { .type = NLA_UINT, },
+ [NET_SHAPER_A_BURST] = { .type = NLA_UINT, },
+ [NET_SHAPER_A_PRIORITY] = { .type = NLA_U32, },
+ [NET_SHAPER_A_WEIGHT] = { .type = NLA_U32, },
+};
+
+/* NET_SHAPER_CMD_DELETE - do */
+static const struct nla_policy net_shaper_delete_nl_policy[NET_SHAPER_A_IFINDEX + 1] = {
+ [NET_SHAPER_A_IFINDEX] = { .type = NLA_U32, },
+ [NET_SHAPER_A_HANDLE] = NLA_POLICY_NESTED(net_shaper_handle_nl_policy),
+};
+
+/* NET_SHAPER_CMD_GROUP - do */
+static const struct nla_policy net_shaper_group_nl_policy[NET_SHAPER_A_LEAVES + 1] = {
+ [NET_SHAPER_A_IFINDEX] = { .type = NLA_U32, },
+ [NET_SHAPER_A_PARENT] = NLA_POLICY_NESTED(net_shaper_handle_nl_policy),
+ [NET_SHAPER_A_HANDLE] = NLA_POLICY_NESTED(net_shaper_handle_nl_policy),
+ [NET_SHAPER_A_METRIC] = NLA_POLICY_MAX(NLA_U32, 1),
+ [NET_SHAPER_A_BW_MIN] = { .type = NLA_UINT, },
+ [NET_SHAPER_A_BW_MAX] = { .type = NLA_UINT, },
+ [NET_SHAPER_A_BURST] = { .type = NLA_UINT, },
+ [NET_SHAPER_A_PRIORITY] = { .type = NLA_U32, },
+ [NET_SHAPER_A_WEIGHT] = { .type = NLA_U32, },
+ [NET_SHAPER_A_LEAVES] = NLA_POLICY_NESTED(net_shaper_leaf_info_nl_policy),
+};
+
+/* NET_SHAPER_CMD_CAP_GET - do */
+static const struct nla_policy net_shaper_cap_get_do_nl_policy[NET_SHAPER_A_CAPS_SCOPE + 1] = {
+ [NET_SHAPER_A_CAPS_IFINDEX] = { .type = NLA_U32, },
+ [NET_SHAPER_A_CAPS_SCOPE] = NLA_POLICY_MAX(NLA_U32, 3),
+};
+
+/* NET_SHAPER_CMD_CAP_GET - dump */
+static const struct nla_policy net_shaper_cap_get_dump_nl_policy[NET_SHAPER_A_CAPS_IFINDEX + 1] = {
+ [NET_SHAPER_A_CAPS_IFINDEX] = { .type = NLA_U32, },
+};
+
+/* Ops table for net_shaper */
+static const struct genl_split_ops net_shaper_nl_ops[] = {
+ {
+ .cmd = NET_SHAPER_CMD_GET,
+ .pre_doit = net_shaper_nl_pre_doit,
+ .doit = net_shaper_nl_get_doit,
+ .post_doit = net_shaper_nl_post_doit,
+ .policy = net_shaper_get_do_nl_policy,
+ .maxattr = NET_SHAPER_A_IFINDEX,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = NET_SHAPER_CMD_GET,
+ .start = net_shaper_nl_pre_dumpit,
+ .dumpit = net_shaper_nl_get_dumpit,
+ .done = net_shaper_nl_post_dumpit,
+ .policy = net_shaper_get_dump_nl_policy,
+ .maxattr = NET_SHAPER_A_IFINDEX,
+ .flags = GENL_CMD_CAP_DUMP,
+ },
+ {
+ .cmd = NET_SHAPER_CMD_SET,
+ .pre_doit = net_shaper_nl_pre_doit,
+ .doit = net_shaper_nl_set_doit,
+ .post_doit = net_shaper_nl_post_doit,
+ .policy = net_shaper_set_nl_policy,
+ .maxattr = NET_SHAPER_A_IFINDEX,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = NET_SHAPER_CMD_DELETE,
+ .pre_doit = net_shaper_nl_pre_doit,
+ .doit = net_shaper_nl_delete_doit,
+ .post_doit = net_shaper_nl_post_doit,
+ .policy = net_shaper_delete_nl_policy,
+ .maxattr = NET_SHAPER_A_IFINDEX,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = NET_SHAPER_CMD_GROUP,
+ .pre_doit = net_shaper_nl_pre_doit,
+ .doit = net_shaper_nl_group_doit,
+ .post_doit = net_shaper_nl_post_doit,
+ .policy = net_shaper_group_nl_policy,
+ .maxattr = NET_SHAPER_A_LEAVES,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = NET_SHAPER_CMD_CAP_GET,
+ .pre_doit = net_shaper_nl_cap_pre_doit,
+ .doit = net_shaper_nl_cap_get_doit,
+ .post_doit = net_shaper_nl_cap_post_doit,
+ .policy = net_shaper_cap_get_do_nl_policy,
+ .maxattr = NET_SHAPER_A_CAPS_SCOPE,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = NET_SHAPER_CMD_CAP_GET,
+ .start = net_shaper_nl_cap_pre_dumpit,
+ .dumpit = net_shaper_nl_cap_get_dumpit,
+ .done = net_shaper_nl_cap_post_dumpit,
+ .policy = net_shaper_cap_get_dump_nl_policy,
+ .maxattr = NET_SHAPER_A_CAPS_IFINDEX,
+ .flags = GENL_CMD_CAP_DUMP,
+ },
+};
+
+struct genl_family net_shaper_nl_family __ro_after_init = {
+ .name = NET_SHAPER_FAMILY_NAME,
+ .version = NET_SHAPER_FAMILY_VERSION,
+ .netnsok = true,
+ .parallel_ops = true,
+ .module = THIS_MODULE,
+ .split_ops = net_shaper_nl_ops,
+ .n_split_ops = ARRAY_SIZE(net_shaper_nl_ops),
+};
diff --git a/net/shaper/shaper_nl_gen.h b/net/shaper/shaper_nl_gen.h
new file mode 100644
index 000000000000..cb7f9026fc23
--- /dev/null
+++ b/net/shaper/shaper_nl_gen.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/net_shaper.yaml */
+/* YNL-GEN kernel header */
+
+#ifndef _LINUX_NET_SHAPER_GEN_H
+#define _LINUX_NET_SHAPER_GEN_H
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include <uapi/linux/net_shaper.h>
+
+/* Common nested types */
+extern const struct nla_policy net_shaper_handle_nl_policy[NET_SHAPER_A_HANDLE_ID + 1];
+extern const struct nla_policy net_shaper_leaf_info_nl_policy[NET_SHAPER_A_WEIGHT + 1];
+
+int net_shaper_nl_pre_doit(const struct genl_split_ops *ops,
+ struct sk_buff *skb, struct genl_info *info);
+int net_shaper_nl_cap_pre_doit(const struct genl_split_ops *ops,
+ struct sk_buff *skb, struct genl_info *info);
+void
+net_shaper_nl_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
+ struct genl_info *info);
+void
+net_shaper_nl_cap_post_doit(const struct genl_split_ops *ops,
+ struct sk_buff *skb, struct genl_info *info);
+int net_shaper_nl_pre_dumpit(struct netlink_callback *cb);
+int net_shaper_nl_cap_pre_dumpit(struct netlink_callback *cb);
+int net_shaper_nl_post_dumpit(struct netlink_callback *cb);
+int net_shaper_nl_cap_post_dumpit(struct netlink_callback *cb);
+
+int net_shaper_nl_get_doit(struct sk_buff *skb, struct genl_info *info);
+int net_shaper_nl_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
+int net_shaper_nl_set_doit(struct sk_buff *skb, struct genl_info *info);
+int net_shaper_nl_delete_doit(struct sk_buff *skb, struct genl_info *info);
+int net_shaper_nl_group_doit(struct sk_buff *skb, struct genl_info *info);
+int net_shaper_nl_cap_get_doit(struct sk_buff *skb, struct genl_info *info);
+int net_shaper_nl_cap_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb);
+
+extern struct genl_family net_shaper_nl_family;
+
+#endif /* _LINUX_NET_SHAPER_GEN_H */
diff --git a/net/smc/Kconfig b/net/smc/Kconfig
index 746be3996768..ba5e6a2dd2fd 100644
--- a/net/smc/Kconfig
+++ b/net/smc/Kconfig
@@ -20,3 +20,16 @@ config SMC_DIAG
smcss.
if unsure, say Y.
+
+config SMC_LO
+ bool "SMC intra-OS shortcut with loopback-ism"
+ depends on SMC
+ default n
+ help
+ SMC_LO enables the creation of an Emulated-ISM device named
+ loopback-ism in SMC and makes use of it for transferring data
+ when communication occurs within the same OS. This helps in
+ convenient testing of SMC-D since loopback-ism is independent
+ of architecture or hardware.
+
+ if unsure, say N.
diff --git a/net/smc/Makefile b/net/smc/Makefile
index 875efcd126a2..60f1c87d5212 100644
--- a/net/smc/Makefile
+++ b/net/smc/Makefile
@@ -4,5 +4,6 @@ obj-$(CONFIG_SMC) += smc.o
obj-$(CONFIG_SMC_DIAG) += smc_diag.o
smc-y := af_smc.o smc_pnet.o smc_ib.o smc_clc.o smc_core.o smc_wr.o smc_llc.o
smc-y += smc_cdc.o smc_tx.o smc_rx.o smc_close.o smc_ism.o smc_netlink.o smc_stats.o
-smc-y += smc_tracepoint.o
+smc-y += smc_tracepoint.o smc_inet.o
smc-$(CONFIG_SYSCTL) += smc_sysctl.o
+smc-$(CONFIG_SMC_LO) += smc_loopback.o
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 0f53a5c6fd9d..3e6cb35baf25 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -53,6 +53,8 @@
#include "smc_stats.h"
#include "smc_tracepoint.h"
#include "smc_sysctl.h"
+#include "smc_loopback.h"
+#include "smc_inet.h"
static DEFINE_MUTEX(smc_server_lgr_pending); /* serialize link group
* creation on server
@@ -169,11 +171,11 @@ static bool smc_hs_congested(const struct sock *sk)
return false;
}
-static struct smc_hashinfo smc_v4_hashinfo = {
+struct smc_hashinfo smc_v4_hashinfo = {
.lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock),
};
-static struct smc_hashinfo smc_v6_hashinfo = {
+struct smc_hashinfo smc_v6_hashinfo = {
.lock = __RW_LOCK_UNLOCKED(smc_v6_hashinfo.lock),
};
@@ -191,7 +193,6 @@ int smc_hash_sk(struct sock *sk)
return 0;
}
-EXPORT_SYMBOL_GPL(smc_hash_sk);
void smc_unhash_sk(struct sock *sk)
{
@@ -202,13 +203,12 @@ void smc_unhash_sk(struct sock *sk)
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
write_unlock_bh(&h->lock);
}
-EXPORT_SYMBOL_GPL(smc_unhash_sk);
/* This will be called before user really release sock_lock. So do the
* work which we didn't do because of user hold the sock_lock in the
* BH context
*/
-static void smc_release_cb(struct sock *sk)
+void smc_release_cb(struct sock *sk)
{
struct smc_sock *smc = smc_sk(sk);
@@ -308,7 +308,7 @@ static int __smc_release(struct smc_sock *smc)
return rc;
}
-static int smc_release(struct socket *sock)
+int smc_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct smc_sock *smc;
@@ -362,25 +362,15 @@ static void smc_destruct(struct sock *sk)
return;
}
-static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
- int protocol)
+void smc_sk_init(struct net *net, struct sock *sk, int protocol)
{
- struct smc_sock *smc;
- struct proto *prot;
- struct sock *sk;
-
- prot = (protocol == SMCPROTO_SMC6) ? &smc_proto6 : &smc_proto;
- sk = sk_alloc(net, PF_SMC, GFP_KERNEL, prot, 0);
- if (!sk)
- return NULL;
+ struct smc_sock *smc = smc_sk(sk);
- sock_init_data(sock, sk); /* sets sk_refcnt to 1 */
sk->sk_state = SMC_INIT;
sk->sk_destruct = smc_destruct;
sk->sk_protocol = protocol;
WRITE_ONCE(sk->sk_sndbuf, 2 * READ_ONCE(net->smc.sysctl_wmem));
WRITE_ONCE(sk->sk_rcvbuf, 2 * READ_ONCE(net->smc.sysctl_rmem));
- smc = smc_sk(sk);
INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
INIT_WORK(&smc->connect_work, smc_connect_work);
INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
@@ -390,12 +380,31 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
sk->sk_prot->hash(sk);
mutex_init(&smc->clcsock_release_lock);
smc_init_saved_callbacks(smc);
+ smc->limit_smc_hs = net->smc.limit_smc_hs;
+ smc->use_fallback = false; /* assume rdma capability first */
+ smc->fallback_rsn = 0;
+ smc_close_init(smc);
+}
+
+static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
+ int protocol)
+{
+ struct proto *prot;
+ struct sock *sk;
+
+ prot = (protocol == SMCPROTO_SMC6) ? &smc_proto6 : &smc_proto;
+ sk = sk_alloc(net, PF_SMC, GFP_KERNEL, prot, 0);
+ if (!sk)
+ return NULL;
+
+ sock_init_data(sock, sk); /* sets sk_refcnt to 1 */
+ smc_sk_init(net, sk, protocol);
return sk;
}
-static int smc_bind(struct socket *sock, struct sockaddr *uaddr,
- int addr_len)
+int smc_bind(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len)
{
struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
struct sock *sk = sock->sk;
@@ -460,29 +469,11 @@ out:
static void smc_adjust_sock_bufsizes(struct sock *nsk, struct sock *osk,
unsigned long mask)
{
- struct net *nnet = sock_net(nsk);
-
nsk->sk_userlocks = osk->sk_userlocks;
- if (osk->sk_userlocks & SOCK_SNDBUF_LOCK) {
+ if (osk->sk_userlocks & SOCK_SNDBUF_LOCK)
nsk->sk_sndbuf = osk->sk_sndbuf;
- } else {
- if (mask == SK_FLAGS_SMC_TO_CLC)
- WRITE_ONCE(nsk->sk_sndbuf,
- READ_ONCE(nnet->ipv4.sysctl_tcp_wmem[1]));
- else
- WRITE_ONCE(nsk->sk_sndbuf,
- 2 * READ_ONCE(nnet->smc.sysctl_wmem));
- }
- if (osk->sk_userlocks & SOCK_RCVBUF_LOCK) {
+ if (osk->sk_userlocks & SOCK_RCVBUF_LOCK)
nsk->sk_rcvbuf = osk->sk_rcvbuf;
- } else {
- if (mask == SK_FLAGS_SMC_TO_CLC)
- WRITE_ONCE(nsk->sk_rcvbuf,
- READ_ONCE(nnet->ipv4.sysctl_tcp_rmem[1]));
- else
- WRITE_ONCE(nsk->sk_rcvbuf,
- 2 * READ_ONCE(nnet->smc.sysctl_rmem));
- }
}
static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
@@ -1046,7 +1037,7 @@ static int smc_find_ism_v2_device_clnt(struct smc_sock *smc,
int rc = SMC_CLC_DECL_NOSMCDDEV;
struct smcd_dev *smcd;
int i = 1, entry = 1;
- bool is_virtual;
+ bool is_emulated;
u16 chid;
if (smcd_indicated(ini->smc_type_v1))
@@ -1058,12 +1049,12 @@ static int smc_find_ism_v2_device_clnt(struct smc_sock *smc,
chid = smc_ism_get_chid(smcd);
if (!smc_find_ism_v2_is_unique_chid(chid, ini, i))
continue;
- is_virtual = __smc_ism_is_virtual(chid);
+ is_emulated = __smc_ism_is_emulated(chid);
if (!smc_pnet_is_pnetid_set(smcd->pnetid) ||
smc_pnet_is_ndev_pnetid(sock_net(&smc->sk), smcd->pnetid)) {
- if (is_virtual && entry == SMCD_CLC_MAX_V2_GID_ENTRIES)
+ if (is_emulated && entry == SMCD_CLC_MAX_V2_GID_ENTRIES)
/* It's the last GID-CHID entry left in CLC
- * Proposal SMC-Dv2 extension, but a virtual
+ * Proposal SMC-Dv2 extension, but an Emulated-
* ISM device will take two entries. So give
* up it and try the next potential ISM device.
*/
@@ -1073,7 +1064,7 @@ static int smc_find_ism_v2_device_clnt(struct smc_sock *smc,
ini->is_smcd = true;
rc = 0;
i++;
- entry = is_virtual ? entry + 2 : entry + 1;
+ entry = is_emulated ? entry + 2 : entry + 1;
if (entry > SMCD_CLC_MAX_V2_GID_ENTRIES)
break;
}
@@ -1126,7 +1117,10 @@ static int smc_find_proposal_devices(struct smc_sock *smc,
ini->check_smcrv2 = true;
ini->smcrv2.saddr = smc->clcsock->sk->sk_rcv_saddr;
if (!(ini->smcr_version & SMC_V2) ||
- smc->clcsock->sk->sk_family != AF_INET ||
+#if IS_ENABLED(CONFIG_IPV6)
+ (smc->clcsock->sk->sk_family == AF_INET6 &&
+ !ipv6_addr_v4mapped(&smc->clcsock->sk->sk_v6_rcv_saddr)) ||
+#endif
!smc_clc_ueid_count() ||
smc_find_rdma_device(smc, ini))
ini->smcr_version &= ~SMC_V2;
@@ -1309,7 +1303,6 @@ static int smc_connect_rdma(struct smc_sock *smc,
goto connect_abort;
}
- smc_close_init(smc);
smc_rx_init(smc);
if (ini->first_contact_local) {
@@ -1414,10 +1407,10 @@ static int smc_connect_ism(struct smc_sock *smc,
if (rc)
return rc;
- if (__smc_ism_is_virtual(ini->ism_chid[ini->ism_selected]))
+ if (__smc_ism_is_emulated(ini->ism_chid[ini->ism_selected]))
ini->ism_peer_gid[ini->ism_selected].gid_ext =
ntohll(aclc->d1.gid_ext);
- /* for non-virtual ISM devices, peer gid_ext remains 0. */
+ /* for non-Emulated-ISM devices, peer gid_ext remains 0. */
}
ini->ism_peer_gid[ini->ism_selected].gid = ntohll(aclc->d0.gid);
@@ -1437,7 +1430,14 @@ static int smc_connect_ism(struct smc_sock *smc,
}
smc_conn_save_peer_info(smc, aclc);
- smc_close_init(smc);
+
+ if (smc_ism_support_dmb_nocopy(smc->conn.lgr->smcd)) {
+ rc = smcd_buf_attach(smc);
+ if (rc) {
+ rc = SMC_CLC_DECL_MEM; /* try to fallback */
+ goto connect_abort;
+ }
+ }
smc_rx_init(smc);
smc_tx_init(smc);
@@ -1468,10 +1468,6 @@ connect_abort:
static int smc_connect_check_aclc(struct smc_init_info *ini,
struct smc_clc_msg_accept_confirm *aclc)
{
- if (aclc->hdr.typev1 != SMC_TYPE_R &&
- aclc->hdr.typev1 != SMC_TYPE_D)
- return SMC_CLC_DECL_MODEUNSUPP;
-
if (aclc->hdr.version >= SMC_V2) {
if ((aclc->hdr.typev1 == SMC_TYPE_R &&
!smcr_indicated(ini->smc_type_v2)) ||
@@ -1525,10 +1521,6 @@ static int __smc_connect(struct smc_sock *smc)
ini->smcd_version &= ~SMC_V1;
ini->smcr_version = 0;
ini->smc_type_v1 = SMC_TYPE_N;
- if (!ini->smcd_version) {
- rc = SMC_CLC_DECL_GETVLANERR;
- goto fallback;
- }
}
rc = smc_find_proposal_devices(smc, ini);
@@ -1634,8 +1626,8 @@ out:
release_sock(&smc->sk);
}
-static int smc_connect(struct socket *sock, struct sockaddr *addr,
- int alen, int flags)
+int smc_connect(struct socket *sock, struct sockaddr *addr,
+ int alen, int flags)
{
struct sock *sk = sock->sk;
struct smc_sock *smc;
@@ -1911,6 +1903,7 @@ static void smc_listen_out(struct smc_sock *new_smc)
if (tcp_sk(new_smc->clcsock->sk)->syn_smc)
atomic_dec(&lsmc->queued_smc_hs);
+ release_sock(newsmcsk); /* lock in smc_listen_work() */
if (lsmc->sk.sk_state == SMC_LISTEN) {
lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
smc_accept_enqueue(&lsmc->sk, newsmcsk);
@@ -2042,6 +2035,8 @@ static int smc_listen_prfx_check(struct smc_sock *new_smc,
if (pclc->hdr.typev1 == SMC_TYPE_N)
return 0;
pclc_prfx = smc_clc_proposal_get_prefix(pclc);
+ if (!pclc_prfx)
+ return -EPROTO;
if (smc_clc_prfx_match(newclcsock, pclc_prfx))
return SMC_CLC_DECL_DIFFPREFIX;
@@ -2118,10 +2113,10 @@ static void smc_check_ism_v2_match(struct smc_init_info *ini,
if (smc_ism_get_chid(smcd) == proposed_chid &&
!smc_ism_cantalk(proposed_gid, ISM_RESERVED_VLANID, smcd)) {
ini->ism_peer_gid[*matches].gid = proposed_gid->gid;
- if (__smc_ism_is_virtual(proposed_chid))
+ if (__smc_ism_is_emulated(proposed_chid))
ini->ism_peer_gid[*matches].gid_ext =
proposed_gid->gid_ext;
- /* non-virtual ISM's peer gid_ext remains 0. */
+ /* non-Emulated-ISM's peer gid_ext remains 0. */
ini->ism_dev[*matches] = smcd;
(*matches)++;
break;
@@ -2155,6 +2150,8 @@ static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc,
pclc_smcd = smc_get_clc_msg_smcd(pclc);
smc_v2_ext = smc_get_clc_v2_ext(pclc);
smcd_v2_ext = smc_get_clc_smcd_v2_ext(smc_v2_ext);
+ if (!pclc_smcd || !smc_v2_ext || !smcd_v2_ext)
+ goto not_found;
mutex_lock(&smcd_dev_list.mutex);
if (pclc_smcd->ism.chid) {
@@ -2171,10 +2168,10 @@ static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc,
smcd_gid.gid = ntohll(smcd_v2_ext->gidchid[i].gid);
smcd_gid.gid_ext = 0;
chid = ntohs(smcd_v2_ext->gidchid[i].chid);
- if (__smc_ism_is_virtual(chid)) {
+ if (__smc_ism_is_emulated(chid)) {
if ((i + 1) == smc_v2_ext->hdr.ism_gid_cnt ||
chid != ntohs(smcd_v2_ext->gidchid[i + 1].chid))
- /* each virtual ISM device takes two GID-CHID
+ /* each Emulated-ISM device takes two GID-CHID
* entries and CHID of the second entry repeats
* that of the first entry.
*
@@ -2231,7 +2228,9 @@ static void smc_find_ism_v1_device_serv(struct smc_sock *new_smc,
int rc = 0;
/* check if ISM V1 is available */
- if (!(ini->smcd_version & SMC_V1) || !smcd_indicated(ini->smc_type_v1))
+ if (!(ini->smcd_version & SMC_V1) ||
+ !smcd_indicated(ini->smc_type_v1) ||
+ !pclc_smcd)
goto not_found;
ini->is_smcd = true; /* prepare ISM check */
ini->ism_peer_gid[0].gid = ntohll(pclc_smcd->ism.gid);
@@ -2282,7 +2281,8 @@ static void smc_find_rdma_v2_device_serv(struct smc_sock *new_smc,
goto not_found;
smc_v2_ext = smc_get_clc_v2_ext(pclc);
- if (!smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext, NULL, NULL))
+ if (!smc_v2_ext ||
+ !smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext, NULL, NULL))
goto not_found;
/* prepare RDMA check */
@@ -2432,6 +2432,7 @@ static void smc_listen_work(struct work_struct *work)
u8 accept_version;
int rc = 0;
+ lock_sock(&new_smc->sk); /* release in smc_listen_out() */
if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN)
return smc_listen_out_err(new_smc);
@@ -2489,7 +2490,6 @@ static void smc_listen_work(struct work_struct *work)
goto out_decl;
mutex_lock(&smc_server_lgr_pending);
- smc_close_init(new_smc);
smc_rx_init(new_smc);
smc_tx_init(new_smc);
@@ -2541,6 +2541,14 @@ static void smc_listen_work(struct work_struct *work)
mutex_unlock(&smc_server_lgr_pending);
}
smc_conn_save_peer_info(new_smc, cclc);
+
+ if (ini->is_smcd &&
+ smc_ism_support_dmb_nocopy(new_smc->conn.lgr->smcd)) {
+ rc = smcd_buf_attach(new_smc);
+ if (rc)
+ goto out_decl;
+ }
+
smc_listen_out_connected(new_smc);
SMC_STAT_SERV_SUCC_INC(sock_net(newclcsock->sk), ini);
goto out_free;
@@ -2608,7 +2616,7 @@ out:
read_unlock_bh(&listen_clcsock->sk_callback_lock);
}
-static int smc_listen(struct socket *sock, int backlog)
+int smc_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
struct smc_sock *smc;
@@ -2673,8 +2681,8 @@ out:
return rc;
}
-static int smc_accept(struct socket *sock, struct socket *new_sock,
- int flags, bool kern)
+int smc_accept(struct socket *sock, struct socket *new_sock,
+ struct proto_accept_arg *arg)
{
struct sock *sk = sock->sk, *nsk;
DECLARE_WAITQUEUE(wait, current);
@@ -2693,7 +2701,7 @@ static int smc_accept(struct socket *sock, struct socket *new_sock,
}
/* Wait for an incoming connection */
- timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
+ timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK);
add_wait_queue_exclusive(sk_sleep(sk), &wait);
while (!(nsk = smc_accept_dequeue(sk, new_sock))) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -2720,7 +2728,7 @@ static int smc_accept(struct socket *sock, struct socket *new_sock,
if (rc)
goto out;
- if (lsmc->sockopt_defer_accept && !(flags & O_NONBLOCK)) {
+ if (lsmc->sockopt_defer_accept && !(arg->flags & O_NONBLOCK)) {
/* wait till data arrives on the socket */
timeo = msecs_to_jiffies(lsmc->sockopt_defer_accept *
MSEC_PER_SEC);
@@ -2733,7 +2741,7 @@ static int smc_accept(struct socket *sock, struct socket *new_sock,
release_sock(clcsk);
} else if (!atomic_read(&smc_sk(nsk)->conn.bytes_to_rcv)) {
lock_sock(nsk);
- smc_rx_wait(smc_sk(nsk), &timeo, smc_rx_data_available);
+ smc_rx_wait(smc_sk(nsk), &timeo, 0, smc_rx_data_available);
release_sock(nsk);
}
}
@@ -2743,8 +2751,8 @@ out:
return rc;
}
-static int smc_getname(struct socket *sock, struct sockaddr *addr,
- int peer)
+int smc_getname(struct socket *sock, struct sockaddr *addr,
+ int peer)
{
struct smc_sock *smc;
@@ -2757,7 +2765,7 @@ static int smc_getname(struct socket *sock, struct sockaddr *addr,
return smc->clcsock->ops->getname(smc->clcsock, addr, peer);
}
-static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct smc_sock *smc;
@@ -2795,8 +2803,8 @@ out:
return rc;
}
-static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
- int flags)
+int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags)
{
struct sock *sk = sock->sk;
struct smc_sock *smc;
@@ -2845,8 +2853,8 @@ static __poll_t smc_accept_poll(struct sock *parent)
return mask;
}
-static __poll_t smc_poll(struct file *file, struct socket *sock,
- poll_table *wait)
+__poll_t smc_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
{
struct sock *sk = sock->sk;
struct smc_sock *smc;
@@ -2883,6 +2891,13 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
} else {
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+
+ if (sk->sk_state != SMC_INIT) {
+ /* Race breaker the same way as tcp_poll(). */
+ smp_mb__after_atomic();
+ if (atomic_read(&smc->conn.sndbuf_space))
+ mask |= EPOLLOUT | EPOLLWRNORM;
+ }
}
if (atomic_read(&smc->conn.bytes_to_rcv))
mask |= EPOLLIN | EPOLLRDNORM;
@@ -2898,7 +2913,7 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
return mask;
}
-static int smc_shutdown(struct socket *sock, int how)
+int smc_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
bool do_shutdown = true;
@@ -3038,8 +3053,8 @@ static int __smc_setsockopt(struct socket *sock, int level, int optname,
return rc;
}
-static int smc_setsockopt(struct socket *sock, int level, int optname,
- sockptr_t optval, unsigned int optlen)
+int smc_setsockopt(struct socket *sock, int level, int optname,
+ sockptr_t optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct smc_sock *smc;
@@ -3125,8 +3140,8 @@ out:
return rc;
}
-static int smc_getsockopt(struct socket *sock, int level, int optname,
- char __user *optval, int __user *optlen)
+int smc_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
{
struct smc_sock *smc;
int rc;
@@ -3151,8 +3166,8 @@ static int smc_getsockopt(struct socket *sock, int level, int optname,
return rc;
}
-static int smc_ioctl(struct socket *sock, unsigned int cmd,
- unsigned long arg)
+int smc_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg)
{
union smc_host_cursor cons, urg;
struct smc_connection *conn;
@@ -3238,9 +3253,9 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
* Note that subsequent recv() calls have to wait till all splice() processing
* completed.
*/
-static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
- struct pipe_inode_info *pipe, size_t len,
- unsigned int flags)
+ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags)
{
struct sock *sk = sock->sk;
struct smc_sock *smc;
@@ -3306,6 +3321,26 @@ static const struct proto_ops smc_sock_ops = {
.splice_read = smc_splice_read,
};
+int smc_create_clcsk(struct net *net, struct sock *sk, int family)
+{
+ struct smc_sock *smc = smc_sk(sk);
+ int rc;
+
+ rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP,
+ &smc->clcsock);
+ if (rc)
+ return rc;
+
+ /* smc_clcsock_release() does not wait smc->clcsock->sk's
+ * destruction; its sk_state might not be TCP_CLOSE after
+ * smc->sk is close()d, and TCP timers can be fired later,
+ * which need net ref.
+ */
+ sk = smc->clcsock->sk;
+ sk_net_refcnt_upgrade(sk);
+ return 0;
+}
+
static int __smc_create(struct net *net, struct socket *sock, int protocol,
int kern, struct socket *clcsock)
{
@@ -3331,35 +3366,17 @@ static int __smc_create(struct net *net, struct socket *sock, int protocol,
/* create internal TCP socket for CLC handshake and fallback */
smc = smc_sk(sk);
- smc->use_fallback = false; /* assume rdma capability first */
- smc->fallback_rsn = 0;
-
- /* default behavior from limit_smc_hs in every net namespace */
- smc->limit_smc_hs = net->smc.limit_smc_hs;
rc = 0;
- if (!clcsock) {
- rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP,
- &smc->clcsock);
- if (rc) {
- sk_common_release(sk);
- goto out;
- }
-
- /* smc_clcsock_release() does not wait smc->clcsock->sk's
- * destruction; its sk_state might not be TCP_CLOSE after
- * smc->sk is close()d, and TCP timers can be fired later,
- * which need net ref.
- */
- sk = smc->clcsock->sk;
- __netns_tracker_free(net, &sk->ns_tracker, false);
- sk->sk_net_refcnt = 1;
- get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
- sock_inuse_add(net, 1);
- } else {
+ if (clcsock)
smc->clcsock = clcsock;
- }
+ else
+ rc = smc_create_clcsk(net, sk, family);
+ if (rc) {
+ sk_common_release(sk);
+ sock->sk = NULL;
+ }
out:
return rc;
}
@@ -3557,15 +3574,28 @@ static int __init smc_init(void)
goto out_sock;
}
- rc = tcp_register_ulp(&smc_ulp_ops);
+ rc = smc_loopback_init();
if (rc) {
- pr_err("%s: tcp_ulp_register fails with %d\n", __func__, rc);
+ pr_err("%s: smc_loopback_init fails with %d\n", __func__, rc);
goto out_ib;
}
+ rc = tcp_register_ulp(&smc_ulp_ops);
+ if (rc) {
+ pr_err("%s: tcp_ulp_register fails with %d\n", __func__, rc);
+ goto out_lo;
+ }
+ rc = smc_inet_init();
+ if (rc) {
+ pr_err("%s: smc_inet_init fails with %d\n", __func__, rc);
+ goto out_ulp;
+ }
static_branch_enable(&tcp_have_smc);
return 0;
-
+out_ulp:
+ tcp_unregister_ulp(&smc_ulp_ops);
+out_lo:
+ smc_loopback_exit();
out_ib:
smc_ib_unregister_client();
out_sock:
@@ -3600,9 +3630,11 @@ out_pernet_subsys:
static void __exit smc_exit(void)
{
static_branch_disable(&tcp_have_smc);
+ smc_inet_exit();
tcp_unregister_ulp(&smc_ulp_ops);
sock_unregister(PF_SMC);
smc_core_exit();
+ smc_loopback_exit();
smc_ib_unregister_client();
smc_ism_exit();
destroy_workqueue(smc_close_wq);
@@ -3626,4 +3658,9 @@ MODULE_DESCRIPTION("smc socket address family");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_SMC);
MODULE_ALIAS_TCP_ULP("smc");
+/* 256 for IPPROTO_SMC and 1 for SOCK_STREAM */
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 256, 1);
+#if IS_ENABLED(CONFIG_IPV6)
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 256, 1);
+#endif /* CONFIG_IPV6 */
MODULE_ALIAS_GENL_FAMILY(SMC_GENL_FAMILY_NAME);
diff --git a/net/smc/smc.h b/net/smc/smc.h
index df64efd2dee8..78ae10d06ed2 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -34,6 +34,44 @@
extern struct proto smc_proto;
extern struct proto smc_proto6;
+extern struct smc_hashinfo smc_v4_hashinfo;
+extern struct smc_hashinfo smc_v6_hashinfo;
+
+int smc_hash_sk(struct sock *sk);
+void smc_unhash_sk(struct sock *sk);
+void smc_release_cb(struct sock *sk);
+
+int smc_release(struct socket *sock);
+int smc_bind(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len);
+int smc_connect(struct socket *sock, struct sockaddr *addr,
+ int alen, int flags);
+int smc_accept(struct socket *sock, struct socket *new_sock,
+ struct proto_accept_arg *arg);
+int smc_getname(struct socket *sock, struct sockaddr *addr,
+ int peer);
+__poll_t smc_poll(struct file *file, struct socket *sock,
+ poll_table *wait);
+int smc_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg);
+int smc_listen(struct socket *sock, int backlog);
+int smc_shutdown(struct socket *sock, int how);
+int smc_setsockopt(struct socket *sock, int level, int optname,
+ sockptr_t optval, unsigned int optlen);
+int smc_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen);
+int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len);
+int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags);
+ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags);
+
+/* smc sock initialization */
+void smc_sk_init(struct net *net, struct sock *sk, int protocol);
+/* clcsock initialization */
+int smc_create_clcsk(struct net *net, struct sock *sk, int family);
+
#ifdef ATOMIC64_INIT
#define KERNEL_HAS_ATOMIC64
#endif
@@ -56,11 +94,11 @@ enum smc_state { /* possible states of an SMC socket */
};
enum smc_supplemental_features {
- SMC_SPF_VIRT_ISM_DEV = 0,
+ SMC_SPF_EMULATED_ISM_DEV = 0,
};
#define SMC_FEATURE_MASK \
- (BIT(SMC_SPF_VIRT_ISM_DEV))
+ (BIT(SMC_SPF_EMULATED_ISM_DEV))
struct smc_link_group;
@@ -240,12 +278,15 @@ struct smc_connection {
*/
u64 peer_token; /* SMC-D token of peer */
u8 killed : 1; /* abnormal termination */
- u8 freed : 1; /* normal termiation */
+ u8 freed : 1; /* normal termination */
u8 out_of_sync : 1; /* out of sync with peer */
};
struct smc_sock { /* smc sock container */
struct sock sk;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct ipv6_pinfo *pinet6;
+#endif
struct socket *clcsock; /* internal tcp socket */
void (*clcsk_state_change)(struct sock *sk);
/* original stat_change fct. */
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index 3c06625ceb20..619b3bab3824 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -18,6 +18,7 @@
#include "smc_tx.h"
#include "smc_rx.h"
#include "smc_close.h"
+#include "smc_ism.h"
/********************************** send *************************************/
@@ -255,6 +256,14 @@ int smcd_cdc_msg_send(struct smc_connection *conn)
return rc;
smc_curs_copy(&conn->rx_curs_confirmed, &curs, conn);
conn->local_rx_ctrl.prod_flags.cons_curs_upd_req = 0;
+
+ if (smc_ism_support_dmb_nocopy(conn->lgr->smcd))
+ /* if local sndbuf shares the same memory region with
+ * peer DMB, then don't update the tx_curs_fin
+ * and sndbuf_space until peer has consumed the data.
+ */
+ return 0;
+
/* Calculate transmitted data and increment free send buffer space */
diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin,
&conn->tx_curs_sent);
@@ -266,7 +275,7 @@ int smcd_cdc_msg_send(struct smc_connection *conn)
smc_curs_copy(&conn->tx_curs_fin, &conn->tx_curs_sent, conn);
smc_tx_sndbuf_nonfull(smc);
- return rc;
+ return 0;
}
/********************************* receive ***********************************/
@@ -323,7 +332,7 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
{
union smc_host_cursor cons_old, prod_old;
struct smc_connection *conn = &smc->conn;
- int diff_cons, diff_prod;
+ int diff_cons, diff_prod, diff_tx;
smc_curs_copy(&prod_old, &conn->local_rx_ctrl.prod, conn);
smc_curs_copy(&cons_old, &conn->local_rx_ctrl.cons, conn);
@@ -339,6 +348,29 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
atomic_add(diff_cons, &conn->peer_rmbe_space);
/* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */
smp_mb__after_atomic();
+
+ /* if local sndbuf shares the same memory region with
+ * peer RMB, then update tx_curs_fin and sndbuf_space
+ * here since peer has already consumed the data.
+ */
+ if (conn->lgr->is_smcd &&
+ smc_ism_support_dmb_nocopy(conn->lgr->smcd)) {
+ /* Calculate consumed data and
+ * increment free send buffer space.
+ */
+ diff_tx = smc_curs_diff(conn->sndbuf_desc->len,
+ &conn->tx_curs_fin,
+ &conn->local_rx_ctrl.cons);
+ /* increase local sndbuf space and fin_curs */
+ smp_mb__before_atomic();
+ atomic_add(diff_tx, &conn->sndbuf_space);
+ /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */
+ smp_mb__after_atomic();
+ smc_curs_copy(&conn->tx_curs_fin,
+ &conn->local_rx_ctrl.cons, conn);
+
+ smc_tx_sndbuf_nonfull(smc);
+ }
}
diff_prod = smc_curs_diff(conn->rmb_desc->len, &prod_old,
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 9a13709bea1c..521f5df80e10 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -352,8 +352,11 @@ static bool smc_clc_msg_prop_valid(struct smc_clc_msg_proposal *pclc)
struct smc_clc_msg_hdr *hdr = &pclc->hdr;
struct smc_clc_v2_extension *v2_ext;
- v2_ext = smc_get_clc_v2_ext(pclc);
pclc_prfx = smc_clc_proposal_get_prefix(pclc);
+ if (!pclc_prfx ||
+ pclc_prfx->ipv6_prefixes_cnt > SMC_CLC_MAX_V6_PREFIX)
+ return false;
+
if (hdr->version == SMC_V1) {
if (hdr->typev1 == SMC_TYPE_N)
return false;
@@ -365,6 +368,13 @@ static bool smc_clc_msg_prop_valid(struct smc_clc_msg_proposal *pclc)
sizeof(struct smc_clc_msg_trail))
return false;
} else {
+ v2_ext = smc_get_clc_v2_ext(pclc);
+ if ((hdr->typev2 != SMC_TYPE_N &&
+ (!v2_ext || v2_ext->hdr.eid_cnt > SMC_CLC_MAX_UEID)) ||
+ (smcd_indicated(hdr->typev2) &&
+ v2_ext->hdr.ism_gid_cnt > SMCD_CLC_MAX_V2_GID_ENTRIES))
+ return false;
+
if (ntohs(hdr->length) !=
sizeof(*pclc) +
sizeof(struct smc_clc_msg_smcd) +
@@ -764,6 +774,11 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
SMC_CLC_RECV_BUF_LEN : datlen;
iov_iter_kvec(&msg.msg_iter, ITER_DEST, &vec, 1, recvlen);
len = sock_recvmsg(smc->clcsock, &msg, krflags);
+ if (len < recvlen) {
+ smc->sk.sk_err = EPROTO;
+ reason_code = -EPROTO;
+ goto out;
+ }
datlen -= len;
}
if (clcm->type == SMC_CLC_DECLINE) {
@@ -853,8 +868,10 @@ int smc_clc_send_proposal(struct smc_sock *smc, struct smc_init_info *ini)
pclc_smcd = &pclc->pclc_smcd;
pclc_prfx = &pclc->pclc_prfx;
ipv6_prfx = pclc->pclc_prfx_ipv6;
- v2_ext = &pclc->pclc_v2_ext;
- smcd_v2_ext = &pclc->pclc_smcd_v2_ext;
+ v2_ext = container_of(&pclc->pclc_v2_ext,
+ struct smc_clc_v2_extension, fixed);
+ smcd_v2_ext = container_of(&pclc->pclc_smcd_v2_ext,
+ struct smc_clc_smcd_v2_extension, fixed);
gidchids = pclc->pclc_gidchids;
trl = &pclc->pclc_trl;
@@ -952,8 +969,8 @@ int smc_clc_send_proposal(struct smc_sock *smc, struct smc_init_info *ini)
gidchids[entry].chid =
htons(smc_ism_get_chid(ini->ism_dev[i]));
gidchids[entry].gid = htonll(smcd_gid.gid);
- if (smc_ism_is_virtual(smcd)) {
- /* a virtual ISM device takes two
+ if (smc_ism_is_emulated(smcd)) {
+ /* an Emulated-ISM device takes two
* entries. CHID of the second entry
* repeats that of the first entry.
*/
@@ -1055,7 +1072,7 @@ smcd_clc_prep_confirm_accept(struct smc_connection *conn,
clc->d1.chid = htons(chid);
if (eid && eid[0])
memcpy(clc->d1.eid, eid, SMC_MAX_EID_LEN);
- if (__smc_ism_is_virtual(chid))
+ if (__smc_ism_is_emulated(chid))
clc->d1.gid_ext = htonll(smcd_gid.gid_ext);
len = SMCD_CLC_ACCEPT_CONFIRM_LEN_V2;
if (first_contact) {
diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h
index a9f9bdd26dcd..767289925410 100644
--- a/net/smc/smc_clc.h
+++ b/net/smc/smc_clc.h
@@ -134,14 +134,19 @@ struct smc_clc_smcd_gid_chid {
*/
struct smc_clc_v2_extension {
- struct smc_clnt_opts_area_hdr hdr;
- u8 roce[16]; /* RoCEv2 GID */
- u8 max_conns;
- u8 max_links;
- __be16 feature_mask;
- u8 reserved[12];
+ /* New members must be added within the struct_group() macro below. */
+ struct_group_tagged(smc_clc_v2_extension_fixed, fixed,
+ struct smc_clnt_opts_area_hdr hdr;
+ u8 roce[16]; /* RoCEv2 GID */
+ u8 max_conns;
+ u8 max_links;
+ __be16 feature_mask;
+ u8 reserved[12];
+ );
u8 user_eids[][SMC_MAX_EID_LEN];
};
+static_assert(offsetof(struct smc_clc_v2_extension, user_eids) == sizeof(struct smc_clc_v2_extension_fixed),
+ "struct member likely outside of struct_group_tagged()");
struct smc_clc_msg_proposal_prefix { /* prefix part of clc proposal message*/
__be32 outgoing_subnet; /* subnet mask */
@@ -151,7 +156,7 @@ struct smc_clc_msg_proposal_prefix { /* prefix part of clc proposal message*/
} __aligned(4);
struct smc_clc_msg_smcd { /* SMC-D GID information */
- struct smc_clc_smcd_gid_chid ism; /* ISM native GID+CHID of requestor */
+ struct smc_clc_smcd_gid_chid ism; /* ISM native GID+CHID of requester */
__be16 v2_ext_offset; /* SMC Version 2 Extension Offset */
u8 vendor_oui[3]; /* vendor organizationally unique identifier */
u8 vendor_exp_options[5];
@@ -159,10 +164,15 @@ struct smc_clc_msg_smcd { /* SMC-D GID information */
};
struct smc_clc_smcd_v2_extension {
- u8 system_eid[SMC_MAX_EID_LEN];
- u8 reserved[16];
+ /* New members must be added within the struct_group() macro below. */
+ struct_group_tagged(smc_clc_smcd_v2_extension_fixed, fixed,
+ u8 system_eid[SMC_MAX_EID_LEN];
+ u8 reserved[16];
+ );
struct smc_clc_smcd_gid_chid gidchid[];
};
+static_assert(offsetof(struct smc_clc_smcd_v2_extension, gidchid) == sizeof(struct smc_clc_smcd_v2_extension_fixed),
+ "struct member likely outside of struct_group_tagged()");
struct smc_clc_msg_proposal { /* clc proposal message sent by Linux */
struct smc_clc_msg_hdr hdr;
@@ -175,7 +185,7 @@ struct smc_clc_msg_proposal { /* clc proposal message sent by Linux */
#define SMCD_CLC_MAX_V2_GID_ENTRIES 8 /* max # of CHID-GID entries in CLC
* proposal SMC-Dv2 extension.
* each ISM device takes one entry and
- * each virtual ISM takes two entries.
+ * each Emulated-ISM takes two entries
*/
struct smc_clc_msg_proposal_area {
@@ -183,9 +193,9 @@ struct smc_clc_msg_proposal_area {
struct smc_clc_msg_smcd pclc_smcd;
struct smc_clc_msg_proposal_prefix pclc_prfx;
struct smc_clc_ipv6_prefix pclc_prfx_ipv6[SMC_CLC_MAX_V6_PREFIX];
- struct smc_clc_v2_extension pclc_v2_ext;
+ struct smc_clc_v2_extension_fixed pclc_v2_ext;
u8 user_eids[SMC_CLC_MAX_UEID][SMC_MAX_EID_LEN];
- struct smc_clc_smcd_v2_extension pclc_smcd_v2_ext;
+ struct smc_clc_smcd_v2_extension_fixed pclc_smcd_v2_ext;
struct smc_clc_smcd_gid_chid
pclc_gidchids[SMCD_CLC_MAX_V2_GID_ENTRIES];
struct smc_clc_msg_trail pclc_trl;
@@ -326,8 +336,12 @@ struct smc_clc_msg_decline_v2 { /* clc decline message */
static inline struct smc_clc_msg_proposal_prefix *
smc_clc_proposal_get_prefix(struct smc_clc_msg_proposal *pclc)
{
+ u16 offset = ntohs(pclc->iparea_offset);
+
+ if (offset > sizeof(struct smc_clc_msg_smcd))
+ return NULL;
return (struct smc_clc_msg_proposal_prefix *)
- ((u8 *)pclc + sizeof(*pclc) + ntohs(pclc->iparea_offset));
+ ((u8 *)pclc + sizeof(*pclc) + offset);
}
static inline bool smcr_indicated(int smc_type)
@@ -366,8 +380,14 @@ static inline struct smc_clc_v2_extension *
smc_get_clc_v2_ext(struct smc_clc_msg_proposal *prop)
{
struct smc_clc_msg_smcd *prop_smcd = smc_get_clc_msg_smcd(prop);
+ u16 max_offset;
- if (!prop_smcd || !ntohs(prop_smcd->v2_ext_offset))
+ max_offset = offsetof(struct smc_clc_msg_proposal_area, pclc_v2_ext) -
+ offsetof(struct smc_clc_msg_proposal_area, pclc_smcd) -
+ offsetofend(struct smc_clc_msg_smcd, v2_ext_offset);
+
+ if (!prop_smcd || !ntohs(prop_smcd->v2_ext_offset) ||
+ ntohs(prop_smcd->v2_ext_offset) > max_offset)
return NULL;
return (struct smc_clc_v2_extension *)
@@ -380,9 +400,15 @@ smc_get_clc_v2_ext(struct smc_clc_msg_proposal *prop)
static inline struct smc_clc_smcd_v2_extension *
smc_get_clc_smcd_v2_ext(struct smc_clc_v2_extension *prop_v2ext)
{
+ u16 max_offset = offsetof(struct smc_clc_msg_proposal_area, pclc_smcd_v2_ext) -
+ offsetof(struct smc_clc_msg_proposal_area, pclc_v2_ext) -
+ offsetof(struct smc_clc_v2_extension, hdr) -
+ offsetofend(struct smc_clnt_opts_area_hdr, smcd_v2_ext_offset);
+
if (!prop_v2ext)
return NULL;
- if (!ntohs(prop_v2ext->hdr.smcd_v2_ext_offset))
+ if (!ntohs(prop_v2ext->hdr.smcd_v2_ext_offset) ||
+ ntohs(prop_v2ext->hdr.smcd_v2_ext_offset) > max_offset)
return NULL;
return (struct smc_clc_smcd_v2_extension *)
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index e4c858411207..ac07b963aede 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -221,6 +221,35 @@ static void smc_lgr_unregister_conn(struct smc_connection *conn)
write_unlock_bh(&lgr->conns_lock);
}
+static void smc_lgr_buf_list_add(struct smc_link_group *lgr,
+ bool is_rmb,
+ struct list_head *buf_list,
+ struct smc_buf_desc *buf_desc)
+{
+ list_add(&buf_desc->list, buf_list);
+ if (is_rmb) {
+ lgr->alloc_rmbs += buf_desc->len;
+ lgr->alloc_rmbs +=
+ lgr->is_smcd ? sizeof(struct smcd_cdc_msg) : 0;
+ } else {
+ lgr->alloc_sndbufs += buf_desc->len;
+ }
+}
+
+static void smc_lgr_buf_list_del(struct smc_link_group *lgr,
+ bool is_rmb,
+ struct smc_buf_desc *buf_desc)
+{
+ list_del(&buf_desc->list);
+ if (is_rmb) {
+ lgr->alloc_rmbs -= buf_desc->len;
+ lgr->alloc_rmbs -=
+ lgr->is_smcd ? sizeof(struct smcd_cdc_msg) : 0;
+ } else {
+ lgr->alloc_sndbufs -= buf_desc->len;
+ }
+}
+
int smc_nl_get_sys_info(struct sk_buff *skb, struct netlink_callback *cb)
{
struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
@@ -363,6 +392,10 @@ static int smc_nl_fill_lgr(struct smc_link_group *lgr,
smc_target[SMC_MAX_PNETID_LEN] = 0;
if (nla_put_string(skb, SMC_NLA_LGR_R_PNETID, smc_target))
goto errattr;
+ if (nla_put_uint(skb, SMC_NLA_LGR_R_SNDBUF_ALLOC, lgr->alloc_sndbufs))
+ goto errattr;
+ if (nla_put_uint(skb, SMC_NLA_LGR_R_RMB_ALLOC, lgr->alloc_rmbs))
+ goto errattr;
if (lgr->smc_version > SMC_V1) {
v2_attrs = nla_nest_start(skb, SMC_NLA_LGR_R_V2_COMMON);
if (!v2_attrs)
@@ -541,6 +574,10 @@ static int smc_nl_fill_smcd_lgr(struct smc_link_group *lgr,
goto errattr;
if (nla_put_u32(skb, SMC_NLA_LGR_D_CHID, smc_ism_get_chid(lgr->smcd)))
goto errattr;
+ if (nla_put_uint(skb, SMC_NLA_LGR_D_SNDBUF_ALLOC, lgr->alloc_sndbufs))
+ goto errattr;
+ if (nla_put_uint(skb, SMC_NLA_LGR_D_DMB_ALLOC, lgr->alloc_rmbs))
+ goto errattr;
memcpy(smc_pnet, lgr->smcd->pnetid, SMC_MAX_PNETID_LEN);
smc_pnet[SMC_MAX_PNETID_LEN] = 0;
if (nla_put_string(skb, SMC_NLA_LGR_D_PNETID, smc_pnet))
@@ -758,9 +795,14 @@ int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
if (lgr->smc_version == SMC_V2) {
lnk->smcibdev = ini->smcrv2.ib_dev_v2;
lnk->ibport = ini->smcrv2.ib_port_v2;
+ lnk->wr_rx_sge_cnt = lnk->smcibdev->ibdev->attrs.max_recv_sge < 2 ? 1 : 2;
+ lnk->wr_rx_buflen = smc_link_shared_v2_rxbuf(lnk) ?
+ SMC_WR_BUF_SIZE : SMC_WR_BUF_V2_SIZE;
} else {
lnk->smcibdev = ini->ib_dev;
lnk->ibport = ini->ib_port;
+ lnk->wr_rx_sge_cnt = 1;
+ lnk->wr_rx_buflen = SMC_WR_BUF_SIZE;
}
get_device(&lnk->smcibdev->ibdev->dev);
atomic_inc(&lnk->smcibdev->lnk_cnt);
@@ -1138,7 +1180,7 @@ static void smcr_buf_unuse(struct smc_buf_desc *buf_desc, bool is_rmb,
lock = is_rmb ? &lgr->rmbs_lock :
&lgr->sndbufs_lock;
down_write(lock);
- list_del(&buf_desc->list);
+ smc_lgr_buf_list_del(lgr, is_rmb, buf_desc);
up_write(lock);
smc_buf_free(lgr, is_rmb, buf_desc);
@@ -1149,25 +1191,47 @@ static void smcr_buf_unuse(struct smc_buf_desc *buf_desc, bool is_rmb,
}
}
+static void smcd_buf_detach(struct smc_connection *conn)
+{
+ struct smcd_dev *smcd = conn->lgr->smcd;
+ u64 peer_token = conn->peer_token;
+
+ if (!conn->sndbuf_desc)
+ return;
+
+ smc_ism_detach_dmb(smcd, peer_token);
+
+ kfree(conn->sndbuf_desc);
+ conn->sndbuf_desc = NULL;
+}
+
static void smc_buf_unuse(struct smc_connection *conn,
struct smc_link_group *lgr)
{
+ struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
+ bool is_smcd = lgr->is_smcd;
+ int bufsize;
+
if (conn->sndbuf_desc) {
- if (!lgr->is_smcd && conn->sndbuf_desc->is_vm) {
+ bufsize = conn->sndbuf_desc->len;
+ if (!is_smcd && conn->sndbuf_desc->is_vm) {
smcr_buf_unuse(conn->sndbuf_desc, false, lgr);
} else {
- memzero_explicit(conn->sndbuf_desc->cpu_addr, conn->sndbuf_desc->len);
+ memzero_explicit(conn->sndbuf_desc->cpu_addr, bufsize);
WRITE_ONCE(conn->sndbuf_desc->used, 0);
}
+ SMC_STAT_RMB_SIZE(smc, is_smcd, false, false, bufsize);
}
if (conn->rmb_desc) {
- if (!lgr->is_smcd) {
+ bufsize = conn->rmb_desc->len;
+ if (!is_smcd) {
smcr_buf_unuse(conn->rmb_desc, true, lgr);
} else {
- memzero_explicit(conn->rmb_desc->cpu_addr,
- conn->rmb_desc->len + sizeof(struct smcd_cdc_msg));
+ bufsize += sizeof(struct smcd_cdc_msg);
+ memzero_explicit(conn->rmb_desc->cpu_addr, bufsize);
WRITE_ONCE(conn->rmb_desc->used, 0);
}
+ SMC_STAT_RMB_SIZE(smc, is_smcd, true, false, bufsize);
}
}
@@ -1192,6 +1256,8 @@ void smc_conn_free(struct smc_connection *conn)
if (lgr->is_smcd) {
if (!list_empty(&lgr->list))
smc_ism_unset_conn(conn);
+ if (smc_ism_support_dmb_nocopy(lgr->smcd))
+ smcd_buf_detach(conn);
tasklet_kill(&conn->rx_tsklet);
} else {
smc_cdc_wait_pend_tx_wr(conn);
@@ -1361,7 +1427,7 @@ static void __smc_lgr_free_bufs(struct smc_link_group *lgr, bool is_rmb)
buf_list = &lgr->sndbufs[i];
list_for_each_entry_safe(buf_desc, bf_desc, buf_list,
list) {
- list_del(&buf_desc->list);
+ smc_lgr_buf_list_del(lgr, is_rmb, buf_desc);
smc_buf_free(lgr, is_rmb, buf_desc);
}
}
@@ -1445,6 +1511,8 @@ static void smc_conn_kill(struct smc_connection *conn, bool soft)
smc_sk_wake_ups(smc);
if (conn->lgr->is_smcd) {
smc_ism_unset_conn(conn);
+ if (smc_ism_support_dmb_nocopy(conn->lgr->smcd))
+ smcd_buf_detach(conn);
if (soft)
tasklet_kill(&conn->rx_tsklet);
else
@@ -1535,7 +1603,7 @@ void smc_smcd_terminate(struct smcd_dev *dev, struct smcd_gid *peer_gid,
list_for_each_entry_safe(lgr, l, &dev->lgr_list, list) {
if ((!peer_gid->gid ||
(lgr->peer_gid.gid == peer_gid->gid &&
- !smc_ism_is_virtual(dev) ? 1 :
+ !smc_ism_is_emulated(dev) ? 1 :
lgr->peer_gid.gid_ext == peer_gid->gid_ext)) &&
(vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
if (peer_gid->gid) /* peer triggered termination */
@@ -1755,7 +1823,9 @@ void smcr_link_down_cond_sched(struct smc_link *lnk)
{
if (smc_link_downing(&lnk->state)) {
trace_smcr_link_down(lnk, __builtin_return_address(0));
- schedule_work(&lnk->link_down_wrk);
+ smcr_link_hold(lnk); /* smcr_link_put in link_down_wrk */
+ if (!schedule_work(&lnk->link_down_wrk))
+ smcr_link_put(lnk);
}
}
@@ -1787,11 +1857,14 @@ static void smc_link_down_work(struct work_struct *work)
struct smc_link_group *lgr = link->lgr;
if (list_empty(&lgr->list))
- return;
+ goto out;
wake_up_all(&lgr->llc_msg_waiter);
down_write(&lgr->llc_conf_mutex);
smcr_link_down(link);
up_write(&lgr->llc_conf_mutex);
+
+out:
+ smcr_link_put(link); /* smcr_link_hold by schedulers of link_down_work */
}
static int smc_vlan_by_tcpsk_walk(struct net_device *lower_dev,
@@ -1881,7 +1954,7 @@ static bool smcd_lgr_match(struct smc_link_group *lgr,
lgr->smcd != smcismdev)
return false;
- if (smc_ism_is_virtual(smcismdev) &&
+ if (smc_ism_is_emulated(smcismdev) &&
lgr->peer_gid.gid_ext != peer_gid->gid_ext)
return false;
@@ -1988,7 +2061,7 @@ out:
}
#define SMCD_DMBE_SIZES 6 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
-#define SMCR_RMBE_SIZES 5 /* 0 -> 16KB, 1 -> 32KB, .. 5 -> 512KB */
+#define SMCR_RMBE_SIZES 15 /* 0 -> 16KB, 1 -> 32KB, .. 15 -> 512MB */
/* convert the RMB size into the compressed notation (minimum 16K, see
* SMCD/R_DMBE_SIZES.
@@ -1997,7 +2070,6 @@ out:
*/
static u8 smc_compress_bufsize(int size, bool is_smcd, bool is_rmb)
{
- const unsigned int max_scat = SG_MAX_SINGLE_ALLOC * PAGE_SIZE;
u8 compressed;
if (size <= SMC_BUF_MIN_SIZE)
@@ -2007,9 +2079,11 @@ static u8 smc_compress_bufsize(int size, bool is_smcd, bool is_rmb)
compressed = min_t(u8, ilog2(size) + 1,
is_smcd ? SMCD_DMBE_SIZES : SMCR_RMBE_SIZES);
+#ifdef CONFIG_ARCH_NO_SG_CHAIN
if (!is_smcd && is_rmb)
/* RMBs are backed by & limited to max size of scatterlists */
- compressed = min_t(u8, compressed, ilog2(max_scat >> 14));
+ compressed = min_t(u8, compressed, ilog2((SG_MAX_SINGLE_ALLOC * PAGE_SIZE) >> 14));
+#endif
return compressed;
}
@@ -2081,7 +2155,7 @@ static int smcr_buf_map_link(struct smc_buf_desc *buf_desc, bool is_rmb,
for_each_sg(buf_desc->sgt[lnk->link_idx].sgl, sg, nents, i) {
size = min_t(int, PAGE_SIZE - offset, buf_size);
sg_set_page(sg, vmalloc_to_page(buf), size, offset);
- buf += size / sizeof(*buf);
+ buf += size;
buf_size -= size;
offset = 0;
}
@@ -2231,7 +2305,7 @@ int smcr_buf_reg_lgr(struct smc_link *lnk)
}
static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
- bool is_rmb, int bufsize)
+ int bufsize)
{
struct smc_buf_desc *buf_desc;
@@ -2257,7 +2331,7 @@ static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
}
if (lgr->buf_type == SMCR_PHYS_CONT_BUFS)
goto out;
- fallthrough; // try virtually continguous buf
+ fallthrough; // try virtually contiguous buf
case SMCR_VIRT_CONT_BUFS:
buf_desc->order = get_order(bufsize);
buf_desc->cpu_addr = vzalloc(PAGE_SIZE << buf_desc->order);
@@ -2371,7 +2445,7 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
buf_desc = smc_buf_get_slot(bufsize_comp, lock, buf_list);
if (buf_desc) {
buf_desc->is_dma_need_sync = 0;
- SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize);
+ SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, true, bufsize);
SMC_STAT_BUF_REUSE(smc, is_smcd, is_rmb);
break; /* found reusable slot */
}
@@ -2379,7 +2453,7 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
if (is_smcd)
buf_desc = smcd_new_buf_create(lgr, is_rmb, bufsize);
else
- buf_desc = smcr_new_buf_create(lgr, is_rmb, bufsize);
+ buf_desc = smcr_new_buf_create(lgr, bufsize);
if (PTR_ERR(buf_desc) == -ENOMEM)
break;
@@ -2392,10 +2466,10 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
}
SMC_STAT_RMB_ALLOC(smc, is_smcd, is_rmb);
- SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize);
+ SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, true, bufsize);
buf_desc->used = 1;
down_write(lock);
- list_add(&buf_desc->list, buf_list);
+ smc_lgr_buf_list_add(lgr, is_rmb, buf_list, buf_desc);
up_write(lock);
break; /* found */
}
@@ -2464,14 +2538,21 @@ int smc_buf_create(struct smc_sock *smc, bool is_smcd)
int rc;
/* create send buffer */
+ if (is_smcd &&
+ smc_ism_support_dmb_nocopy(smc->conn.lgr->smcd))
+ goto create_rmb;
+
rc = __smc_buf_create(smc, is_smcd, false);
if (rc)
return rc;
+
+create_rmb:
/* create rmb */
rc = __smc_buf_create(smc, is_smcd, true);
- if (rc) {
+ if (rc && smc->conn.sndbuf_desc) {
down_write(&smc->conn.lgr->sndbufs_lock);
- list_del(&smc->conn.sndbuf_desc->list);
+ smc_lgr_buf_list_del(smc->conn.lgr, false,
+ smc->conn.sndbuf_desc);
up_write(&smc->conn.lgr->sndbufs_lock);
smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
smc->conn.sndbuf_desc = NULL;
@@ -2479,6 +2560,41 @@ int smc_buf_create(struct smc_sock *smc, bool is_smcd)
return rc;
}
+int smcd_buf_attach(struct smc_sock *smc)
+{
+ struct smc_connection *conn = &smc->conn;
+ struct smcd_dev *smcd = conn->lgr->smcd;
+ u64 peer_token = conn->peer_token;
+ struct smc_buf_desc *buf_desc;
+ int rc;
+
+ buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
+ if (!buf_desc)
+ return -ENOMEM;
+
+ /* The ghost sndbuf_desc describes the same memory region as
+ * peer RMB. Its lifecycle is consistent with the connection's
+ * and it will be freed with the connections instead of the
+ * link group.
+ */
+ rc = smc_ism_attach_dmb(smcd, peer_token, buf_desc);
+ if (rc)
+ goto free;
+
+ smc->sk.sk_sndbuf = buf_desc->len;
+ buf_desc->cpu_addr =
+ (u8 *)buf_desc->cpu_addr + sizeof(struct smcd_cdc_msg);
+ buf_desc->len -= sizeof(struct smcd_cdc_msg);
+ conn->sndbuf_desc = buf_desc;
+ conn->sndbuf_desc->used = 1;
+ atomic_set(&conn->sndbuf_space, conn->sndbuf_desc->len);
+ return 0;
+
+free:
+ kfree(buf_desc);
+ return rc;
+}
+
static inline int smc_rmb_reserve_rtoken_idx(struct smc_link_group *lgr)
{
int i;
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index 1f175376037b..48a1b1dcb576 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -30,7 +30,7 @@
*/
#define SMC_CONN_PER_LGR_PREFER 255 /* Preferred connections per link group used for
* SMC-R v2.1 and later negotiation, vendors or
- * distrubutions may modify it to a value between
+ * distributions may modify it to a value between
* 16-255 as needed.
*/
@@ -122,10 +122,14 @@ struct smc_link {
} ____cacheline_aligned_in_smp;
struct completion tx_ref_comp;
- struct smc_wr_buf *wr_rx_bufs; /* WR recv payload buffers */
+ u8 *wr_rx_bufs; /* WR recv payload buffers */
struct ib_recv_wr *wr_rx_ibs; /* WR recv meta data */
struct ib_sge *wr_rx_sges; /* WR recv scatter meta data */
/* above three vectors have wr_rx_cnt elements and use the same index */
+ int wr_rx_sge_cnt; /* rx sge, V1 is 1, V2 is either 2 or 1 */
+ int wr_rx_buflen; /* buffer len for the first sge, len for the
+ * second sge is lgr shared if rx sge is 2.
+ */
dma_addr_t wr_rx_dma_addr; /* DMA address of wr_rx_bufs */
dma_addr_t wr_rx_v2_dma_addr; /* DMA address of v2 rx buf*/
u64 wr_rx_id; /* seq # of last recv WR */
@@ -181,7 +185,7 @@ struct smc_link {
*/
#define SMC_LINKS_PER_LGR_MAX_PREFER 2 /* Preferred max links per link group used for
* SMC-R v2.1 and later negotiation, vendors or
- * distrubutions may modify it to a value between
+ * distributions may modify it to a value between
* 1-2 as needed.
*/
@@ -281,6 +285,8 @@ struct smc_link_group {
struct rw_semaphore sndbufs_lock; /* protects tx buffers */
struct list_head rmbs[SMC_RMBE_SIZES]; /* rx buffers */
struct rw_semaphore rmbs_lock; /* protects rx buffers */
+ u64 alloc_sndbufs; /* stats of tx buffers */
+ u64 alloc_rmbs; /* stats of rx buffers */
u8 id[SMC_LGR_ID_SIZE]; /* unique lgr id */
struct delayed_work free_work; /* delayed freeing of an lgr */
@@ -504,6 +510,11 @@ static inline bool smc_link_active(struct smc_link *lnk)
return lnk->state == SMC_LNK_ACTIVE;
}
+static inline bool smc_link_shared_v2_rxbuf(struct smc_link *lnk)
+{
+ return lnk->wr_rx_sge_cnt > 1;
+}
+
static inline void smc_gid_be16_convert(__u8 *buf, u8 *gid_raw)
{
sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x",
@@ -557,6 +568,7 @@ void smc_smcd_terminate(struct smcd_dev *dev, struct smcd_gid *peer_gid,
void smc_smcd_terminate_all(struct smcd_dev *dev);
void smc_smcr_terminate_all(struct smc_ib_device *smcibdev);
int smc_buf_create(struct smc_sock *smc, bool is_smcd);
+int smcd_buf_attach(struct smc_sock *smc);
int smc_uncompress_bufsize(u8 compressed);
int smc_rmb_rtoken_handling(struct smc_connection *conn, struct smc_link *link,
struct smc_clc_msg_accept_confirm *clc);
diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
index 5a33908015f3..6fdb2d96777a 100644
--- a/net/smc/smc_diag.c
+++ b/net/smc/smc_diag.c
@@ -255,6 +255,7 @@ static int smc_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
}
static const struct sock_diag_handler smc_diag_handler = {
+ .owner = THIS_MODULE,
.family = AF_SMC,
.dump = smc_diag_handler_dump,
};
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index 97704a9e84c7..53828833a3f7 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -209,13 +209,18 @@ int smc_ib_find_route(struct net *net, __be32 saddr, __be32 daddr,
if (IS_ERR(rt))
goto out;
if (rt->rt_uses_gateway && rt->rt_gw_family != AF_INET)
- goto out;
- neigh = rt->dst.ops->neigh_lookup(&rt->dst, NULL, &fl4.daddr);
- if (neigh) {
- memcpy(nexthop_mac, neigh->ha, ETH_ALEN);
- *uses_gateway = rt->rt_uses_gateway;
- return 0;
- }
+ goto out_rt;
+ neigh = dst_neigh_lookup(&rt->dst, &fl4.daddr);
+ if (!neigh)
+ goto out_rt;
+ memcpy(nexthop_mac, neigh->ha, ETH_ALEN);
+ *uses_gateway = rt->rt_uses_gateway;
+ neigh_release(neigh);
+ ip_rt_put(rt);
+ return 0;
+
+out_rt:
+ ip_rt_put(rt);
out:
return -ENOENT;
}
@@ -657,7 +662,6 @@ void smc_ib_destroy_queue_pair(struct smc_link *lnk)
/* create a queue pair within the protection domain for a link */
int smc_ib_create_queue_pair(struct smc_link *lnk)
{
- int sges_per_buf = (lnk->lgr->smc_version == SMC_V2) ? 2 : 1;
struct ib_qp_init_attr qp_attr = {
.event_handler = smc_ib_qp_event_handler,
.qp_context = lnk,
@@ -671,7 +675,7 @@ int smc_ib_create_queue_pair(struct smc_link *lnk)
.max_send_wr = SMC_WR_BUF_CNT * 3,
.max_recv_wr = SMC_WR_BUF_CNT * 3,
.max_send_sge = SMC_IB_MAX_SEND_SGE,
- .max_recv_sge = sges_per_buf,
+ .max_recv_sge = lnk->wr_rx_sge_cnt,
.max_inline_data = 0,
},
.sq_sig_type = IB_SIGNAL_REQ_WR,
@@ -894,9 +898,7 @@ static void smc_copy_netdev_ifindex(struct smc_ib_device *smcibdev, int port)
struct ib_device *ibdev = smcibdev->ibdev;
struct net_device *ndev;
- if (!ibdev->ops.get_netdev)
- return;
- ndev = ibdev->ops.get_netdev(ibdev, port + 1);
+ ndev = ib_device_get_netdev(ibdev, port + 1);
if (ndev) {
smcibdev->ndev_ifidx[port] = ndev->ifindex;
dev_put(ndev);
@@ -916,9 +918,7 @@ void smc_ib_ndev_change(struct net_device *ndev, unsigned long event)
port_cnt = smcibdev->ibdev->phys_port_cnt;
for (i = 0; i < min_t(size_t, port_cnt, SMC_MAX_PORTS); i++) {
libdev = smcibdev->ibdev;
- if (!libdev->ops.get_netdev)
- continue;
- lndev = libdev->ops.get_netdev(libdev, i + 1);
+ lndev = ib_device_get_netdev(libdev, i + 1);
dev_put(lndev);
if (lndev != ndev)
continue;
diff --git a/net/smc/smc_inet.c b/net/smc/smc_inet.c
new file mode 100644
index 000000000000..a944e7dcb8b9
--- /dev/null
+++ b/net/smc/smc_inet.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ * Definitions for the IPPROTO_SMC (socket related)
+ *
+ * Copyright IBM Corp. 2016, 2018
+ * Copyright (c) 2024, Alibaba Inc.
+ *
+ * Author: D. Wythe <alibuda@linux.alibaba.com>
+ */
+
+#include <net/protocol.h>
+#include <net/sock.h>
+
+#include "smc_inet.h"
+#include "smc.h"
+
+static int smc_inet_init_sock(struct sock *sk);
+
+static struct proto smc_inet_prot = {
+ .name = "INET_SMC",
+ .owner = THIS_MODULE,
+ .init = smc_inet_init_sock,
+ .hash = smc_hash_sk,
+ .unhash = smc_unhash_sk,
+ .release_cb = smc_release_cb,
+ .obj_size = sizeof(struct smc_sock),
+ .h.smc_hash = &smc_v4_hashinfo,
+ .slab_flags = SLAB_TYPESAFE_BY_RCU,
+};
+
+static const struct proto_ops smc_inet_stream_ops = {
+ .family = PF_INET,
+ .owner = THIS_MODULE,
+ .release = smc_release,
+ .bind = smc_bind,
+ .connect = smc_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = smc_accept,
+ .getname = smc_getname,
+ .poll = smc_poll,
+ .ioctl = smc_ioctl,
+ .listen = smc_listen,
+ .shutdown = smc_shutdown,
+ .setsockopt = smc_setsockopt,
+ .getsockopt = smc_getsockopt,
+ .sendmsg = smc_sendmsg,
+ .recvmsg = smc_recvmsg,
+ .mmap = sock_no_mmap,
+ .splice_read = smc_splice_read,
+};
+
+static struct inet_protosw smc_inet_protosw = {
+ .type = SOCK_STREAM,
+ .protocol = IPPROTO_SMC,
+ .prot = &smc_inet_prot,
+ .ops = &smc_inet_stream_ops,
+ .flags = INET_PROTOSW_ICSK,
+};
+
+#if IS_ENABLED(CONFIG_IPV6)
+struct smc6_sock {
+ struct smc_sock smc;
+ struct ipv6_pinfo inet6;
+};
+
+static struct proto smc_inet6_prot = {
+ .name = "INET6_SMC",
+ .owner = THIS_MODULE,
+ .init = smc_inet_init_sock,
+ .hash = smc_hash_sk,
+ .unhash = smc_unhash_sk,
+ .release_cb = smc_release_cb,
+ .obj_size = sizeof(struct smc6_sock),
+ .h.smc_hash = &smc_v6_hashinfo,
+ .slab_flags = SLAB_TYPESAFE_BY_RCU,
+ .ipv6_pinfo_offset = offsetof(struct smc6_sock, inet6),
+};
+
+static const struct proto_ops smc_inet6_stream_ops = {
+ .family = PF_INET6,
+ .owner = THIS_MODULE,
+ .release = smc_release,
+ .bind = smc_bind,
+ .connect = smc_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = smc_accept,
+ .getname = smc_getname,
+ .poll = smc_poll,
+ .ioctl = smc_ioctl,
+ .listen = smc_listen,
+ .shutdown = smc_shutdown,
+ .setsockopt = smc_setsockopt,
+ .getsockopt = smc_getsockopt,
+ .sendmsg = smc_sendmsg,
+ .recvmsg = smc_recvmsg,
+ .mmap = sock_no_mmap,
+ .splice_read = smc_splice_read,
+};
+
+static struct inet_protosw smc_inet6_protosw = {
+ .type = SOCK_STREAM,
+ .protocol = IPPROTO_SMC,
+ .prot = &smc_inet6_prot,
+ .ops = &smc_inet6_stream_ops,
+ .flags = INET_PROTOSW_ICSK,
+};
+#endif /* CONFIG_IPV6 */
+
+static unsigned int smc_sync_mss(struct sock *sk, u32 pmtu)
+{
+ /* No need pass it through to clcsock, mss can always be set by
+ * sock_create_kern or smc_setsockopt.
+ */
+ return 0;
+}
+
+static int smc_inet_init_sock(struct sock *sk)
+{
+ struct net *net = sock_net(sk);
+
+ /* init common smc sock */
+ smc_sk_init(net, sk, IPPROTO_SMC);
+
+ inet_csk(sk)->icsk_sync_mss = smc_sync_mss;
+
+ /* create clcsock */
+ return smc_create_clcsk(net, sk, sk->sk_family);
+}
+
+int __init smc_inet_init(void)
+{
+ int rc;
+
+ rc = proto_register(&smc_inet_prot, 1);
+ if (rc) {
+ pr_err("%s: proto_register smc_inet_prot fails with %d\n",
+ __func__, rc);
+ return rc;
+ }
+ /* no return value */
+ inet_register_protosw(&smc_inet_protosw);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ rc = proto_register(&smc_inet6_prot, 1);
+ if (rc) {
+ pr_err("%s: proto_register smc_inet6_prot fails with %d\n",
+ __func__, rc);
+ goto out_inet6_prot;
+ }
+ rc = inet6_register_protosw(&smc_inet6_protosw);
+ if (rc) {
+ pr_err("%s: inet6_register_protosw smc_inet6_protosw fails with %d\n",
+ __func__, rc);
+ goto out_inet6_protosw;
+ }
+ return rc;
+out_inet6_protosw:
+ proto_unregister(&smc_inet6_prot);
+out_inet6_prot:
+ inet_unregister_protosw(&smc_inet_protosw);
+ proto_unregister(&smc_inet_prot);
+#endif /* CONFIG_IPV6 */
+ return rc;
+}
+
+void smc_inet_exit(void)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ inet6_unregister_protosw(&smc_inet6_protosw);
+ proto_unregister(&smc_inet6_prot);
+#endif /* CONFIG_IPV6 */
+ inet_unregister_protosw(&smc_inet_protosw);
+ proto_unregister(&smc_inet_prot);
+}
diff --git a/net/smc/smc_inet.h b/net/smc/smc_inet.h
new file mode 100644
index 000000000000..a489c8a2b8ef
--- /dev/null
+++ b/net/smc/smc_inet.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ * Definitions for the IPPROTO_SMC (socket related)
+
+ * Copyright IBM Corp. 2016
+ * Copyright (c) 2024, Alibaba Inc.
+ *
+ * Author: D. Wythe <alibuda@linux.alibaba.com>
+ */
+#ifndef __INET_SMC
+#define __INET_SMC
+
+/* Initialize protocol registration on IPPROTO_SMC,
+ * @return 0 on success
+ */
+int smc_inet_init(void);
+
+void smc_inet_exit(void);
+
+#endif /* __INET_SMC */
diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c
index ac88de2a06a0..84f98e18c7db 100644
--- a/net/smc/smc_ism.c
+++ b/net/smc/smc_ism.c
@@ -91,6 +91,11 @@ bool smc_ism_is_v2_capable(void)
return smc_ism_v2_capable;
}
+void smc_ism_set_v2_capable(void)
+{
+ smc_ism_v2_capable = true;
+}
+
/* Set a connection using this DMBE. */
void smc_ism_set_conn(struct smc_connection *conn)
{
@@ -126,6 +131,8 @@ int smc_ism_get_vlan(struct smcd_dev *smcd, unsigned short vlanid)
if (!vlanid) /* No valid vlan id */
return -EINVAL;
+ if (!smcd->ops->add_vlan_id)
+ return -EOPNOTSUPP;
/* create new vlan entry, in case we need it */
new_vlan = kzalloc(sizeof(*new_vlan), GFP_KERNEL);
@@ -171,6 +178,8 @@ int smc_ism_put_vlan(struct smcd_dev *smcd, unsigned short vlanid)
if (!vlanid) /* No valid vlan id */
return -EINVAL;
+ if (!smcd->ops->del_vlan_id)
+ return -EOPNOTSUPP;
spin_lock_irqsave(&smcd->lock, flags);
list_for_each_entry(vlan, &smcd->vlan, list) {
@@ -222,7 +231,6 @@ int smc_ism_unregister_dmb(struct smcd_dev *smcd, struct smc_buf_desc *dmb_desc)
int smc_ism_register_dmb(struct smc_link_group *lgr, int dmb_len,
struct smc_buf_desc *dmb_desc)
{
-#if IS_ENABLED(CONFIG_ISM)
struct smcd_dmb dmb;
int rc;
@@ -231,7 +239,7 @@ int smc_ism_register_dmb(struct smc_link_group *lgr, int dmb_len,
dmb.sba_idx = dmb_desc->sba_idx;
dmb.vlan_id = lgr->vlan_id;
dmb.rgid = lgr->peer_gid.gid;
- rc = lgr->smcd->ops->register_dmb(lgr->smcd, &dmb, &smc_ism_client);
+ rc = lgr->smcd->ops->register_dmb(lgr->smcd, &dmb, lgr->smcd->client);
if (!rc) {
dmb_desc->sba_idx = dmb.sba_idx;
dmb_desc->token = dmb.dmb_tok;
@@ -240,9 +248,46 @@ int smc_ism_register_dmb(struct smc_link_group *lgr, int dmb_len,
dmb_desc->len = dmb.dmb_len;
}
return rc;
-#else
- return 0;
-#endif
+}
+
+bool smc_ism_support_dmb_nocopy(struct smcd_dev *smcd)
+{
+ /* for now only loopback-ism supports
+ * merging sndbuf with peer DMB to avoid
+ * data copies between them.
+ */
+ return (smcd->ops->support_dmb_nocopy &&
+ smcd->ops->support_dmb_nocopy(smcd));
+}
+
+int smc_ism_attach_dmb(struct smcd_dev *dev, u64 token,
+ struct smc_buf_desc *dmb_desc)
+{
+ struct smcd_dmb dmb;
+ int rc = 0;
+
+ if (!dev->ops->attach_dmb)
+ return -EINVAL;
+
+ memset(&dmb, 0, sizeof(dmb));
+ dmb.dmb_tok = token;
+ rc = dev->ops->attach_dmb(dev, &dmb);
+ if (!rc) {
+ dmb_desc->sba_idx = dmb.sba_idx;
+ dmb_desc->token = dmb.dmb_tok;
+ dmb_desc->cpu_addr = dmb.cpu_addr;
+ dmb_desc->dma_addr = dmb.dma_addr;
+ dmb_desc->len = dmb.dmb_len;
+ }
+ return rc;
+}
+
+int smc_ism_detach_dmb(struct smcd_dev *dev, u64 token)
+{
+ if (!dev->ops->detach_dmb)
+ return -EINVAL;
+
+ return dev->ops->detach_dmb(dev, token);
}
static int smc_nl_handle_smcd_dev(struct smcd_dev *smcd,
@@ -322,6 +367,8 @@ static void smc_nl_prep_smcd_dev(struct smcd_dev_list *dev_list,
list_for_each_entry(smcd, &dev_list->list, list) {
if (num < snum)
goto next;
+ if (smc_ism_is_loopback(smcd))
+ goto next;
if (smc_nl_handle_smcd_dev(smcd, skb, cb))
goto errout;
next:
@@ -372,7 +419,8 @@ static void smcd_handle_sw_event(struct smc_ism_event_work *wrk)
smc_smcd_terminate(wrk->smcd, &peer_gid, ev_info.vlan_id);
break;
case ISM_EVENT_CODE_TESTLINK: /* Activity timer */
- if (ev_info.code == ISM_EVENT_REQUEST) {
+ if (ev_info.code == ISM_EVENT_REQUEST &&
+ wrk->smcd->ops->signal_event) {
ev_info.code = ISM_EVENT_RESPONSE;
wrk->smcd->ops->signal_event(wrk->smcd,
&peer_gid,
@@ -436,7 +484,7 @@ static struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
static void smcd_register_dev(struct ism_dev *ism)
{
const struct smcd_ops *ops = ism_get_smcd_ops();
- struct smcd_dev *smcd;
+ struct smcd_dev *smcd, *fentry;
if (!ops)
return;
@@ -446,20 +494,28 @@ static void smcd_register_dev(struct ism_dev *ism)
if (!smcd)
return;
smcd->priv = ism;
+ smcd->client = &smc_ism_client;
ism_set_priv(ism, &smc_ism_client, smcd);
if (smc_pnetid_by_dev_port(&ism->pdev->dev, 0, smcd->pnetid))
smc_pnetid_by_table_smcd(smcd);
+ if (smcd->ops->supports_v2())
+ smc_ism_set_v2_capable();
mutex_lock(&smcd_dev_list.mutex);
- if (list_empty(&smcd_dev_list.list)) {
- if (smcd->ops->supports_v2())
- smc_ism_v2_capable = true;
- }
- /* sort list: devices without pnetid before devices with pnetid */
- if (smcd->pnetid[0])
+ /* sort list:
+ * - devices without pnetid before devices with pnetid;
+ * - loopback-ism always at the very beginning;
+ */
+ if (!smcd->pnetid[0]) {
+ fentry = list_first_entry_or_null(&smcd_dev_list.list,
+ struct smcd_dev, list);
+ if (fentry && smc_ism_is_loopback(fentry))
+ list_add(&smcd->list, &fentry->list);
+ else
+ list_add(&smcd->list, &smcd_dev_list.list);
+ } else {
list_add_tail(&smcd->list, &smcd_dev_list.list);
- else
- list_add(&smcd->list, &smcd_dev_list.list);
+ }
mutex_unlock(&smcd_dev_list.mutex);
pr_warn_ratelimited("smc: adding smcd device %s with pnetid %.16s%s\n",
@@ -541,6 +597,8 @@ int smc_ism_signal_shutdown(struct smc_link_group *lgr)
if (lgr->peer_shutdown)
return 0;
+ if (!lgr->smcd->ops->signal_event)
+ return 0;
memcpy(ev_info.uid, lgr->id, SMC_LGR_ID_SIZE);
ev_info.vlan_id = lgr->vlan_id;
diff --git a/net/smc/smc_ism.h b/net/smc/smc_ism.h
index ffff40c30a06..6763133dd8d0 100644
--- a/net/smc/smc_ism.h
+++ b/net/smc/smc_ism.h
@@ -15,7 +15,7 @@
#include "smc.h"
-#define SMC_VIRTUAL_ISM_CHID_MASK 0xFF00
+#define SMC_EMULATED_ISM_CHID_MASK 0xFF00
#define SMC_ISM_IDENT_MASK 0x00FFFF
struct smcd_dev_list { /* List of SMCD devices */
@@ -48,10 +48,15 @@ int smc_ism_put_vlan(struct smcd_dev *dev, unsigned short vlan_id);
int smc_ism_register_dmb(struct smc_link_group *lgr, int buf_size,
struct smc_buf_desc *dmb_desc);
int smc_ism_unregister_dmb(struct smcd_dev *dev, struct smc_buf_desc *dmb_desc);
+bool smc_ism_support_dmb_nocopy(struct smcd_dev *smcd);
+int smc_ism_attach_dmb(struct smcd_dev *dev, u64 token,
+ struct smc_buf_desc *dmb_desc);
+int smc_ism_detach_dmb(struct smcd_dev *dev, u64 token);
int smc_ism_signal_shutdown(struct smc_link_group *lgr);
void smc_ism_get_system_eid(u8 **eid);
u16 smc_ism_get_chid(struct smcd_dev *dev);
bool smc_ism_is_v2_capable(void);
+void smc_ism_set_v2_capable(void);
int smc_ism_init(void);
void smc_ism_exit(void);
int smcd_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb);
@@ -66,10 +71,10 @@ static inline int smc_ism_write(struct smcd_dev *smcd, u64 dmb_tok,
return rc < 0 ? rc : 0;
}
-static inline bool __smc_ism_is_virtual(u16 chid)
+static inline bool __smc_ism_is_emulated(u16 chid)
{
/* CHIDs in range of 0xFF00 to 0xFFFF are reserved
- * for virtual ISM device.
+ * for Emulated-ISM device.
*
* loopback-ism: 0xFFFF
* virtio-ism: 0xFF00 ~ 0xFFFE
@@ -77,11 +82,16 @@ static inline bool __smc_ism_is_virtual(u16 chid)
return ((chid & 0xFF00) == 0xFF00);
}
-static inline bool smc_ism_is_virtual(struct smcd_dev *smcd)
+static inline bool smc_ism_is_emulated(struct smcd_dev *smcd)
{
u16 chid = smcd->ops->get_chid(smcd);
- return __smc_ism_is_virtual(chid);
+ return __smc_ism_is_emulated(chid);
+}
+
+static inline bool smc_ism_is_loopback(struct smcd_dev *smcd)
+{
+ return (smcd->ops->get_chid(smcd) == 0xFFFF);
}
#endif
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
index 018ce8133b02..f865c58c3aa7 100644
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@ -997,13 +997,14 @@ static int smc_llc_cli_conf_link(struct smc_link *link,
}
static void smc_llc_save_add_link_rkeys(struct smc_link *link,
- struct smc_link *link_new)
+ struct smc_link *link_new,
+ u8 *llc_msg)
{
struct smc_llc_msg_add_link_v2_ext *ext;
struct smc_link_group *lgr = link->lgr;
int max, i;
- ext = (struct smc_llc_msg_add_link_v2_ext *)((u8 *)lgr->wr_rx_buf_v2 +
+ ext = (struct smc_llc_msg_add_link_v2_ext *)(llc_msg +
SMC_WR_TX_SIZE);
max = min_t(u8, ext->num_rkeys, SMC_LLC_RKEYS_PER_MSG_V2);
down_write(&lgr->rmbs_lock);
@@ -1098,7 +1099,9 @@ int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry)
if (rc)
goto out_clear_lnk;
if (lgr->smc_version == SMC_V2) {
- smc_llc_save_add_link_rkeys(link, lnk_new);
+ u8 *llc_msg = smc_link_shared_v2_rxbuf(link) ?
+ (u8 *)lgr->wr_rx_buf_v2 : (u8 *)llc;
+ smc_llc_save_add_link_rkeys(link, lnk_new, llc_msg);
} else {
rc = smc_llc_cli_rkey_exchange(link, lnk_new);
if (rc) {
@@ -1498,7 +1501,9 @@ int smc_llc_srv_add_link(struct smc_link *link,
if (rc)
goto out_err;
if (lgr->smc_version == SMC_V2) {
- smc_llc_save_add_link_rkeys(link, link_new);
+ u8 *llc_msg = smc_link_shared_v2_rxbuf(link) ?
+ (u8 *)lgr->wr_rx_buf_v2 : (u8 *)add_llc;
+ smc_llc_save_add_link_rkeys(link, link_new, llc_msg);
} else {
rc = smc_llc_srv_rkey_exchange(link, link_new);
if (rc)
@@ -1807,8 +1812,12 @@ static void smc_llc_rmt_delete_rkey(struct smc_link_group *lgr)
if (lgr->smc_version == SMC_V2) {
struct smc_llc_msg_delete_rkey_v2 *llcv2;
- memcpy(lgr->wr_rx_buf_v2, llc, sizeof(*llc));
- llcv2 = (struct smc_llc_msg_delete_rkey_v2 *)lgr->wr_rx_buf_v2;
+ if (smc_link_shared_v2_rxbuf(link)) {
+ memcpy(lgr->wr_rx_buf_v2, llc, sizeof(*llc));
+ llcv2 = (struct smc_llc_msg_delete_rkey_v2 *)lgr->wr_rx_buf_v2;
+ } else {
+ llcv2 = (struct smc_llc_msg_delete_rkey_v2 *)llc;
+ }
llcv2->num_inval_rkeys = 0;
max = min_t(u8, llcv2->num_rkeys, SMC_LLC_RKEYS_PER_MSG_V2);
diff --git a/net/smc/smc_loopback.c b/net/smc/smc_loopback.c
new file mode 100644
index 000000000000..3c5f64ca4115
--- /dev/null
+++ b/net/smc/smc_loopback.c
@@ -0,0 +1,427 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Shared Memory Communications Direct over loopback-ism device.
+ *
+ * Functions for loopback-ism device.
+ *
+ * Copyright (c) 2024, Alibaba Inc.
+ *
+ * Author: Wen Gu <guwen@linux.alibaba.com>
+ * Tony Lu <tonylu@linux.alibaba.com>
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/types.h>
+#include <net/smc.h>
+
+#include "smc_cdc.h"
+#include "smc_ism.h"
+#include "smc_loopback.h"
+
+#define SMC_LO_V2_CAPABLE 0x1 /* loopback-ism acts as ISMv2 */
+#define SMC_LO_SUPPORT_NOCOPY 0x1
+#define SMC_DMA_ADDR_INVALID (~(dma_addr_t)0)
+
+static const char smc_lo_dev_name[] = "loopback-ism";
+static struct smc_lo_dev *lo_dev;
+
+static void smc_lo_generate_ids(struct smc_lo_dev *ldev)
+{
+ struct smcd_gid *lgid = &ldev->local_gid;
+ uuid_t uuid;
+
+ uuid_gen(&uuid);
+ memcpy(&lgid->gid, &uuid, sizeof(lgid->gid));
+ memcpy(&lgid->gid_ext, (u8 *)&uuid + sizeof(lgid->gid),
+ sizeof(lgid->gid_ext));
+
+ ldev->chid = SMC_LO_RESERVED_CHID;
+}
+
+static int smc_lo_query_rgid(struct smcd_dev *smcd, struct smcd_gid *rgid,
+ u32 vid_valid, u32 vid)
+{
+ struct smc_lo_dev *ldev = smcd->priv;
+
+ /* rgid should be the same as lgid */
+ if (!ldev || rgid->gid != ldev->local_gid.gid ||
+ rgid->gid_ext != ldev->local_gid.gid_ext)
+ return -ENETUNREACH;
+ return 0;
+}
+
+static int smc_lo_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb,
+ void *client_priv)
+{
+ struct smc_lo_dmb_node *dmb_node, *tmp_node;
+ struct smc_lo_dev *ldev = smcd->priv;
+ int sba_idx, rc;
+
+ /* check space for new dmb */
+ for_each_clear_bit(sba_idx, ldev->sba_idx_mask, SMC_LO_MAX_DMBS) {
+ if (!test_and_set_bit(sba_idx, ldev->sba_idx_mask))
+ break;
+ }
+ if (sba_idx == SMC_LO_MAX_DMBS)
+ return -ENOSPC;
+
+ dmb_node = kzalloc(sizeof(*dmb_node), GFP_KERNEL);
+ if (!dmb_node) {
+ rc = -ENOMEM;
+ goto err_bit;
+ }
+
+ dmb_node->sba_idx = sba_idx;
+ dmb_node->len = dmb->dmb_len;
+ dmb_node->cpu_addr = kzalloc(dmb_node->len, GFP_KERNEL |
+ __GFP_NOWARN | __GFP_NORETRY |
+ __GFP_NOMEMALLOC);
+ if (!dmb_node->cpu_addr) {
+ rc = -ENOMEM;
+ goto err_node;
+ }
+ dmb_node->dma_addr = SMC_DMA_ADDR_INVALID;
+ refcount_set(&dmb_node->refcnt, 1);
+
+again:
+ /* add new dmb into hash table */
+ get_random_bytes(&dmb_node->token, sizeof(dmb_node->token));
+ write_lock_bh(&ldev->dmb_ht_lock);
+ hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb_node->token) {
+ if (tmp_node->token == dmb_node->token) {
+ write_unlock_bh(&ldev->dmb_ht_lock);
+ goto again;
+ }
+ }
+ hash_add(ldev->dmb_ht, &dmb_node->list, dmb_node->token);
+ write_unlock_bh(&ldev->dmb_ht_lock);
+ atomic_inc(&ldev->dmb_cnt);
+
+ dmb->sba_idx = dmb_node->sba_idx;
+ dmb->dmb_tok = dmb_node->token;
+ dmb->cpu_addr = dmb_node->cpu_addr;
+ dmb->dma_addr = dmb_node->dma_addr;
+ dmb->dmb_len = dmb_node->len;
+
+ return 0;
+
+err_node:
+ kfree(dmb_node);
+err_bit:
+ clear_bit(sba_idx, ldev->sba_idx_mask);
+ return rc;
+}
+
+static void __smc_lo_unregister_dmb(struct smc_lo_dev *ldev,
+ struct smc_lo_dmb_node *dmb_node)
+{
+ /* remove dmb from hash table */
+ write_lock_bh(&ldev->dmb_ht_lock);
+ hash_del(&dmb_node->list);
+ write_unlock_bh(&ldev->dmb_ht_lock);
+
+ clear_bit(dmb_node->sba_idx, ldev->sba_idx_mask);
+ kvfree(dmb_node->cpu_addr);
+ kfree(dmb_node);
+
+ if (atomic_dec_and_test(&ldev->dmb_cnt))
+ wake_up(&ldev->ldev_release);
+}
+
+static int smc_lo_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
+{
+ struct smc_lo_dmb_node *dmb_node = NULL, *tmp_node;
+ struct smc_lo_dev *ldev = smcd->priv;
+
+ /* find dmb from hash table */
+ read_lock_bh(&ldev->dmb_ht_lock);
+ hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb->dmb_tok) {
+ if (tmp_node->token == dmb->dmb_tok) {
+ dmb_node = tmp_node;
+ break;
+ }
+ }
+ if (!dmb_node) {
+ read_unlock_bh(&ldev->dmb_ht_lock);
+ return -EINVAL;
+ }
+ read_unlock_bh(&ldev->dmb_ht_lock);
+
+ if (refcount_dec_and_test(&dmb_node->refcnt))
+ __smc_lo_unregister_dmb(ldev, dmb_node);
+ return 0;
+}
+
+static int smc_lo_support_dmb_nocopy(struct smcd_dev *smcd)
+{
+ return SMC_LO_SUPPORT_NOCOPY;
+}
+
+static int smc_lo_attach_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
+{
+ struct smc_lo_dmb_node *dmb_node = NULL, *tmp_node;
+ struct smc_lo_dev *ldev = smcd->priv;
+
+ /* find dmb_node according to dmb->dmb_tok */
+ read_lock_bh(&ldev->dmb_ht_lock);
+ hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb->dmb_tok) {
+ if (tmp_node->token == dmb->dmb_tok) {
+ dmb_node = tmp_node;
+ break;
+ }
+ }
+ if (!dmb_node) {
+ read_unlock_bh(&ldev->dmb_ht_lock);
+ return -EINVAL;
+ }
+ read_unlock_bh(&ldev->dmb_ht_lock);
+
+ if (!refcount_inc_not_zero(&dmb_node->refcnt))
+ /* the dmb is being unregistered, but has
+ * not been removed from the hash table.
+ */
+ return -EINVAL;
+
+ /* provide dmb information */
+ dmb->sba_idx = dmb_node->sba_idx;
+ dmb->dmb_tok = dmb_node->token;
+ dmb->cpu_addr = dmb_node->cpu_addr;
+ dmb->dma_addr = dmb_node->dma_addr;
+ dmb->dmb_len = dmb_node->len;
+ return 0;
+}
+
+static int smc_lo_detach_dmb(struct smcd_dev *smcd, u64 token)
+{
+ struct smc_lo_dmb_node *dmb_node = NULL, *tmp_node;
+ struct smc_lo_dev *ldev = smcd->priv;
+
+ /* find dmb_node according to dmb->dmb_tok */
+ read_lock_bh(&ldev->dmb_ht_lock);
+ hash_for_each_possible(ldev->dmb_ht, tmp_node, list, token) {
+ if (tmp_node->token == token) {
+ dmb_node = tmp_node;
+ break;
+ }
+ }
+ if (!dmb_node) {
+ read_unlock_bh(&ldev->dmb_ht_lock);
+ return -EINVAL;
+ }
+ read_unlock_bh(&ldev->dmb_ht_lock);
+
+ if (refcount_dec_and_test(&dmb_node->refcnt))
+ __smc_lo_unregister_dmb(ldev, dmb_node);
+ return 0;
+}
+
+static int smc_lo_move_data(struct smcd_dev *smcd, u64 dmb_tok,
+ unsigned int idx, bool sf, unsigned int offset,
+ void *data, unsigned int size)
+{
+ struct smc_lo_dmb_node *rmb_node = NULL, *tmp_node;
+ struct smc_lo_dev *ldev = smcd->priv;
+ struct smc_connection *conn;
+
+ if (!sf)
+ /* since sndbuf is merged with peer DMB, there is
+ * no need to copy data from sndbuf to peer DMB.
+ */
+ return 0;
+
+ read_lock_bh(&ldev->dmb_ht_lock);
+ hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb_tok) {
+ if (tmp_node->token == dmb_tok) {
+ rmb_node = tmp_node;
+ break;
+ }
+ }
+ if (!rmb_node) {
+ read_unlock_bh(&ldev->dmb_ht_lock);
+ return -EINVAL;
+ }
+ memcpy((char *)rmb_node->cpu_addr + offset, data, size);
+ read_unlock_bh(&ldev->dmb_ht_lock);
+
+ conn = smcd->conn[rmb_node->sba_idx];
+ if (!conn || conn->killed)
+ return -EPIPE;
+ tasklet_schedule(&conn->rx_tsklet);
+ return 0;
+}
+
+static int smc_lo_supports_v2(void)
+{
+ return SMC_LO_V2_CAPABLE;
+}
+
+static void smc_lo_get_local_gid(struct smcd_dev *smcd,
+ struct smcd_gid *smcd_gid)
+{
+ struct smc_lo_dev *ldev = smcd->priv;
+
+ smcd_gid->gid = ldev->local_gid.gid;
+ smcd_gid->gid_ext = ldev->local_gid.gid_ext;
+}
+
+static u16 smc_lo_get_chid(struct smcd_dev *smcd)
+{
+ return ((struct smc_lo_dev *)smcd->priv)->chid;
+}
+
+static struct device *smc_lo_get_dev(struct smcd_dev *smcd)
+{
+ return &((struct smc_lo_dev *)smcd->priv)->dev;
+}
+
+static const struct smcd_ops lo_ops = {
+ .query_remote_gid = smc_lo_query_rgid,
+ .register_dmb = smc_lo_register_dmb,
+ .unregister_dmb = smc_lo_unregister_dmb,
+ .support_dmb_nocopy = smc_lo_support_dmb_nocopy,
+ .attach_dmb = smc_lo_attach_dmb,
+ .detach_dmb = smc_lo_detach_dmb,
+ .add_vlan_id = NULL,
+ .del_vlan_id = NULL,
+ .set_vlan_required = NULL,
+ .reset_vlan_required = NULL,
+ .signal_event = NULL,
+ .move_data = smc_lo_move_data,
+ .supports_v2 = smc_lo_supports_v2,
+ .get_local_gid = smc_lo_get_local_gid,
+ .get_chid = smc_lo_get_chid,
+ .get_dev = smc_lo_get_dev,
+};
+
+static struct smcd_dev *smcd_lo_alloc_dev(const struct smcd_ops *ops,
+ int max_dmbs)
+{
+ struct smcd_dev *smcd;
+
+ smcd = kzalloc(sizeof(*smcd), GFP_KERNEL);
+ if (!smcd)
+ return NULL;
+
+ smcd->conn = kcalloc(max_dmbs, sizeof(struct smc_connection *),
+ GFP_KERNEL);
+ if (!smcd->conn)
+ goto out_smcd;
+
+ smcd->ops = ops;
+
+ spin_lock_init(&smcd->lock);
+ spin_lock_init(&smcd->lgr_lock);
+ INIT_LIST_HEAD(&smcd->vlan);
+ INIT_LIST_HEAD(&smcd->lgr_list);
+ init_waitqueue_head(&smcd->lgrs_deleted);
+ return smcd;
+
+out_smcd:
+ kfree(smcd);
+ return NULL;
+}
+
+static int smcd_lo_register_dev(struct smc_lo_dev *ldev)
+{
+ struct smcd_dev *smcd;
+
+ smcd = smcd_lo_alloc_dev(&lo_ops, SMC_LO_MAX_DMBS);
+ if (!smcd)
+ return -ENOMEM;
+ ldev->smcd = smcd;
+ smcd->priv = ldev;
+ smc_ism_set_v2_capable();
+ mutex_lock(&smcd_dev_list.mutex);
+ list_add(&smcd->list, &smcd_dev_list.list);
+ mutex_unlock(&smcd_dev_list.mutex);
+ pr_warn_ratelimited("smc: adding smcd device %s\n",
+ dev_name(&ldev->dev));
+ return 0;
+}
+
+static void smcd_lo_unregister_dev(struct smc_lo_dev *ldev)
+{
+ struct smcd_dev *smcd = ldev->smcd;
+
+ pr_warn_ratelimited("smc: removing smcd device %s\n",
+ dev_name(&ldev->dev));
+ smcd->going_away = 1;
+ smc_smcd_terminate_all(smcd);
+ mutex_lock(&smcd_dev_list.mutex);
+ list_del_init(&smcd->list);
+ mutex_unlock(&smcd_dev_list.mutex);
+ kfree(smcd->conn);
+ kfree(smcd);
+}
+
+static int smc_lo_dev_init(struct smc_lo_dev *ldev)
+{
+ smc_lo_generate_ids(ldev);
+ rwlock_init(&ldev->dmb_ht_lock);
+ hash_init(ldev->dmb_ht);
+ atomic_set(&ldev->dmb_cnt, 0);
+ init_waitqueue_head(&ldev->ldev_release);
+
+ return smcd_lo_register_dev(ldev);
+}
+
+static void smc_lo_dev_exit(struct smc_lo_dev *ldev)
+{
+ smcd_lo_unregister_dev(ldev);
+ if (atomic_read(&ldev->dmb_cnt))
+ wait_event(ldev->ldev_release, !atomic_read(&ldev->dmb_cnt));
+}
+
+static void smc_lo_dev_release(struct device *dev)
+{
+ struct smc_lo_dev *ldev =
+ container_of(dev, struct smc_lo_dev, dev);
+
+ kfree(ldev);
+}
+
+static int smc_lo_dev_probe(void)
+{
+ struct smc_lo_dev *ldev;
+ int ret;
+
+ ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
+ if (!ldev)
+ return -ENOMEM;
+
+ ldev->dev.parent = NULL;
+ ldev->dev.release = smc_lo_dev_release;
+ device_initialize(&ldev->dev);
+ dev_set_name(&ldev->dev, smc_lo_dev_name);
+
+ ret = smc_lo_dev_init(ldev);
+ if (ret)
+ goto free_dev;
+
+ lo_dev = ldev; /* global loopback device */
+ return 0;
+
+free_dev:
+ put_device(&ldev->dev);
+ return ret;
+}
+
+static void smc_lo_dev_remove(void)
+{
+ if (!lo_dev)
+ return;
+
+ smc_lo_dev_exit(lo_dev);
+ put_device(&lo_dev->dev); /* device_initialize in smc_lo_dev_probe */
+}
+
+int smc_loopback_init(void)
+{
+ return smc_lo_dev_probe();
+}
+
+void smc_loopback_exit(void)
+{
+ smc_lo_dev_remove();
+}
diff --git a/net/smc/smc_loopback.h b/net/smc/smc_loopback.h
new file mode 100644
index 000000000000..04dc6808d2e1
--- /dev/null
+++ b/net/smc/smc_loopback.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Shared Memory Communications Direct over loopback-ism device.
+ *
+ * SMC-D loopback-ism device structure definitions.
+ *
+ * Copyright (c) 2024, Alibaba Inc.
+ *
+ * Author: Wen Gu <guwen@linux.alibaba.com>
+ * Tony Lu <tonylu@linux.alibaba.com>
+ *
+ */
+
+#ifndef _SMC_LOOPBACK_H
+#define _SMC_LOOPBACK_H
+
+#include <linux/device.h>
+#include <net/smc.h>
+
+#if IS_ENABLED(CONFIG_SMC_LO)
+#define SMC_LO_MAX_DMBS 5000
+#define SMC_LO_DMBS_HASH_BITS 12
+#define SMC_LO_RESERVED_CHID 0xFFFF
+
+struct smc_lo_dmb_node {
+ struct hlist_node list;
+ u64 token;
+ u32 len;
+ u32 sba_idx;
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+ refcount_t refcnt;
+};
+
+struct smc_lo_dev {
+ struct smcd_dev *smcd;
+ struct device dev;
+ u16 chid;
+ struct smcd_gid local_gid;
+ atomic_t dmb_cnt;
+ rwlock_t dmb_ht_lock;
+ DECLARE_BITMAP(sba_idx_mask, SMC_LO_MAX_DMBS);
+ DECLARE_HASHTABLE(dmb_ht, SMC_LO_DMBS_HASH_BITS);
+ wait_queue_head_t ldev_release;
+};
+
+int smc_loopback_init(void);
+void smc_loopback_exit(void);
+#else
+static inline int smc_loopback_init(void)
+{
+ return 0;
+}
+
+static inline void smc_loopback_exit(void)
+{
+}
+#endif
+
+#endif /* _SMC_LOOPBACK_H */
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 9f2c58c5a86b..716808f374a8 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -753,7 +753,7 @@ static int smc_pnet_add_pnetid(struct net *net, u8 *pnetid)
write_lock(&sn->pnetids_ndev.lock);
list_for_each_entry(pi, &sn->pnetids_ndev.list, list) {
- if (smc_pnet_match(pnetid, pe->pnetid)) {
+ if (smc_pnet_match(pnetid, pi->pnetid)) {
refcount_inc(&pi->refcnt);
kfree(pe);
goto unlock;
@@ -806,6 +806,16 @@ static void smc_pnet_create_pnetids_list(struct net *net)
u8 ndev_pnetid[SMC_MAX_PNETID_LEN];
struct net_device *dev;
+ /* Newly created netns do not have devices.
+ * Do not even acquire rtnl.
+ */
+ if (list_empty(&net->dev_base_head))
+ return;
+
+ /* Note: This might not be needed, because smc_pnet_netdev_event()
+ * is also calling smc_pnet_add_base_pnetid() when handling
+ * NETDEV_UP event.
+ */
rtnl_lock();
for_each_netdev(net, dev)
smc_pnet_add_base_pnetid(net, dev, ndev_pnetid);
@@ -877,9 +887,6 @@ int smc_pnet_net_init(struct net *net)
smc_pnet_create_pnetids_list(net);
- /* disable handshake limitation by default */
- net->smc.limit_smc_hs = 0;
-
return 0;
}
@@ -1047,9 +1054,7 @@ static void smc_pnet_find_rdma_dev(struct net_device *netdev,
for (i = 1; i <= SMC_MAX_PORTS; i++) {
if (!rdma_is_port_valid(ibdev->ibdev, i))
continue;
- if (!ibdev->ibdev->ops.get_netdev)
- continue;
- ndev = ibdev->ibdev->ops.get_netdev(ibdev->ibdev, i);
+ ndev = ib_device_get_netdev(ibdev->ibdev, i);
if (!ndev)
continue;
dev_put(ndev);
diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c
index 9a2f3638d161..e7f1134453ef 100644
--- a/net/smc/smc_rx.c
+++ b/net/smc/smc_rx.c
@@ -42,10 +42,10 @@ static void smc_rx_wake_up(struct sock *sk)
if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI |
EPOLLRDNORM | EPOLLRDBAND);
- sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
+ sk_wake_async_rcu(sk, SOCK_WAKE_WAITD, POLL_IN);
if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
(sk->sk_state == SMC_CLOSED))
- sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
+ sk_wake_async_rcu(sk, SOCK_WAKE_WAITD, POLL_HUP);
rcu_read_unlock();
}
@@ -197,7 +197,7 @@ static int smc_rx_splice(struct pipe_inode_info *pipe, char *src, size_t len,
partial[i].offset = offset;
partial[i].len = size;
partial[i].private = (unsigned long)priv[i];
- buf += size / sizeof(*buf);
+ buf += size;
left -= size;
offset = 0;
}
@@ -238,22 +238,23 @@ out:
return -ENOMEM;
}
-static int smc_rx_data_available_and_no_splice_pend(struct smc_connection *conn)
+static int smc_rx_data_available_and_no_splice_pend(struct smc_connection *conn, size_t peeked)
{
- return atomic_read(&conn->bytes_to_rcv) &&
+ return smc_rx_data_available(conn, peeked) &&
!atomic_read(&conn->splice_pending);
}
/* blocks rcvbuf consumer until >=len bytes available or timeout or interrupted
* @smc smc socket
* @timeo pointer to max seconds to wait, pointer to value 0 for no timeout
+ * @peeked number of bytes already peeked
* @fcrit add'l criterion to evaluate as function pointer
* Returns:
* 1 if at least 1 byte available in rcvbuf or if socket error/shutdown.
* 0 otherwise (nothing in rcvbuf nor timeout, e.g. interrupted).
*/
-int smc_rx_wait(struct smc_sock *smc, long *timeo,
- int (*fcrit)(struct smc_connection *conn))
+int smc_rx_wait(struct smc_sock *smc, long *timeo, size_t peeked,
+ int (*fcrit)(struct smc_connection *conn, size_t baseline))
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct smc_connection *conn = &smc->conn;
@@ -262,7 +263,7 @@ int smc_rx_wait(struct smc_sock *smc, long *timeo,
struct sock *sk = &smc->sk;
int rc;
- if (fcrit(conn))
+ if (fcrit(conn, peeked))
return 1;
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
add_wait_queue(sk_sleep(sk), &wait);
@@ -271,7 +272,7 @@ int smc_rx_wait(struct smc_sock *smc, long *timeo,
cflags->peer_conn_abort ||
READ_ONCE(sk->sk_shutdown) & RCV_SHUTDOWN ||
conn->killed ||
- fcrit(conn),
+ fcrit(conn, peeked),
&wait);
remove_wait_queue(sk_sleep(sk), &wait);
sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
@@ -322,11 +323,11 @@ static int smc_rx_recv_urg(struct smc_sock *smc, struct msghdr *msg, int len,
return -EAGAIN;
}
-static bool smc_rx_recvmsg_data_available(struct smc_sock *smc)
+static bool smc_rx_recvmsg_data_available(struct smc_sock *smc, size_t peeked)
{
struct smc_connection *conn = &smc->conn;
- if (smc_rx_data_available(conn))
+ if (smc_rx_data_available(conn, peeked))
return true;
else if (conn->urg_state == SMC_URG_VALID)
/* we received a single urgent Byte - skip */
@@ -344,10 +345,10 @@ static bool smc_rx_recvmsg_data_available(struct smc_sock *smc)
int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
struct pipe_inode_info *pipe, size_t len, int flags)
{
- size_t copylen, read_done = 0, read_remaining = len;
+ size_t copylen, read_done = 0, read_remaining = len, peeked_bytes = 0;
size_t chunk_len, chunk_off, chunk_len_sum;
struct smc_connection *conn = &smc->conn;
- int (*func)(struct smc_connection *conn);
+ int (*func)(struct smc_connection *conn, size_t baseline);
union smc_host_cursor cons;
int readable, chunk;
char *rcvbuf_base;
@@ -384,14 +385,14 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
if (conn->killed)
break;
- if (smc_rx_recvmsg_data_available(smc))
+ if (smc_rx_recvmsg_data_available(smc, peeked_bytes))
goto copy;
if (sk->sk_shutdown & RCV_SHUTDOWN) {
/* smc_cdc_msg_recv_action() could have run after
* above smc_rx_recvmsg_data_available()
*/
- if (smc_rx_recvmsg_data_available(smc))
+ if (smc_rx_recvmsg_data_available(smc, peeked_bytes))
goto copy;
break;
}
@@ -425,26 +426,28 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
}
}
- if (!smc_rx_data_available(conn)) {
- smc_rx_wait(smc, &timeo, smc_rx_data_available);
+ if (!smc_rx_data_available(conn, peeked_bytes)) {
+ smc_rx_wait(smc, &timeo, peeked_bytes, smc_rx_data_available);
continue;
}
copy:
/* initialize variables for 1st iteration of subsequent loop */
/* could be just 1 byte, even after waiting on data above */
- readable = atomic_read(&conn->bytes_to_rcv);
+ readable = smc_rx_data_available(conn, peeked_bytes);
splbytes = atomic_read(&conn->splice_pending);
if (!readable || (msg && splbytes)) {
if (splbytes)
func = smc_rx_data_available_and_no_splice_pend;
else
func = smc_rx_data_available;
- smc_rx_wait(smc, &timeo, func);
+ smc_rx_wait(smc, &timeo, peeked_bytes, func);
continue;
}
smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
+ if ((flags & MSG_PEEK) && peeked_bytes)
+ smc_curs_add(conn->rmb_desc->len, &cons, peeked_bytes);
/* subsequent splice() calls pick up where previous left */
if (splbytes)
smc_curs_add(conn->rmb_desc->len, &cons, splbytes);
@@ -480,6 +483,8 @@ copy:
}
read_remaining -= chunk_len;
read_done += chunk_len;
+ if (flags & MSG_PEEK)
+ peeked_bytes += chunk_len;
if (chunk_len_sum == copylen)
break; /* either on 1st or 2nd iteration */
diff --git a/net/smc/smc_rx.h b/net/smc/smc_rx.h
index db823c97d824..994f5e42d1ba 100644
--- a/net/smc/smc_rx.h
+++ b/net/smc/smc_rx.h
@@ -21,11 +21,11 @@ void smc_rx_init(struct smc_sock *smc);
int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
struct pipe_inode_info *pipe, size_t len, int flags);
-int smc_rx_wait(struct smc_sock *smc, long *timeo,
- int (*fcrit)(struct smc_connection *conn));
-static inline int smc_rx_data_available(struct smc_connection *conn)
+int smc_rx_wait(struct smc_sock *smc, long *timeo, size_t peeked,
+ int (*fcrit)(struct smc_connection *conn, size_t baseline));
+static inline int smc_rx_data_available(struct smc_connection *conn, size_t peeked)
{
- return atomic_read(&conn->bytes_to_rcv);
+ return atomic_read(&conn->bytes_to_rcv) - peeked;
}
#endif /* SMC_RX_H */
diff --git a/net/smc/smc_stats.c b/net/smc/smc_stats.c
index ca14c0f3a07d..e71b17d1e21c 100644
--- a/net/smc/smc_stats.c
+++ b/net/smc/smc_stats.c
@@ -218,6 +218,12 @@ static int smc_nl_fill_stats_tech_data(struct sk_buff *skb,
smc_tech->tx_bytes,
SMC_NLA_STATS_PAD))
goto errattr;
+ if (nla_put_uint(skb, SMC_NLA_STATS_T_RX_RMB_USAGE,
+ smc_tech->rx_rmbuse))
+ goto errattr;
+ if (nla_put_uint(skb, SMC_NLA_STATS_T_TX_RMB_USAGE,
+ smc_tech->tx_rmbuse))
+ goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_RX_CNT,
smc_tech->rx_cnt,
SMC_NLA_STATS_PAD))
diff --git a/net/smc/smc_stats.h b/net/smc/smc_stats.h
index 9d32058db2b5..571f9d9e7814 100644
--- a/net/smc/smc_stats.h
+++ b/net/smc/smc_stats.h
@@ -19,7 +19,7 @@
#include "smc_clc.h"
-#define SMC_MAX_FBACK_RSN_CNT 30
+#define SMC_MAX_FBACK_RSN_CNT 36
enum {
SMC_BUF_8K,
@@ -79,6 +79,8 @@ struct smc_stats_tech {
u64 tx_bytes;
u64 rx_cnt;
u64 tx_cnt;
+ u64 rx_rmbuse;
+ u64 tx_rmbuse;
};
struct smc_stats {
@@ -135,38 +137,46 @@ do { \
} \
while (0)
-#define SMC_STAT_RMB_SIZE_SUB(_smc_stats, _tech, k, _len) \
+#define SMC_STAT_RMB_SIZE_SUB(_smc_stats, _tech, k, _is_add, _len) \
do { \
+ typeof(_smc_stats) stats = (_smc_stats); \
+ typeof(_is_add) is_a = (_is_add); \
typeof(_len) _l = (_len); \
typeof(_tech) t = (_tech); \
int _pos; \
int m = SMC_BUF_MAX - 1; \
if (_l <= 0) \
break; \
- _pos = fls((_l - 1) >> 13); \
- _pos = (_pos <= m) ? _pos : m; \
- this_cpu_inc((*(_smc_stats)).smc[t].k ## _rmbsize.buf[_pos]); \
+ if (is_a) { \
+ _pos = fls((_l - 1) >> 13); \
+ _pos = (_pos <= m) ? _pos : m; \
+ this_cpu_inc((*stats).smc[t].k ## _rmbsize.buf[_pos]); \
+ this_cpu_add((*stats).smc[t].k ## _rmbuse, _l); \
+ } else { \
+ this_cpu_sub((*stats).smc[t].k ## _rmbuse, _l); \
+ } \
} \
while (0)
#define SMC_STAT_RMB_SUB(_smc_stats, type, t, key) \
this_cpu_inc((*(_smc_stats)).smc[t].rmb ## _ ## key.type ## _cnt)
-#define SMC_STAT_RMB_SIZE(_smc, _is_smcd, _is_rx, _len) \
+#define SMC_STAT_RMB_SIZE(_smc, _is_smcd, _is_rx, _is_add, _len) \
do { \
struct net *_net = sock_net(&(_smc)->sk); \
struct smc_stats __percpu *_smc_stats = _net->smc.smc_stats; \
+ typeof(_is_add) is_add = (_is_add); \
typeof(_is_smcd) is_d = (_is_smcd); \
typeof(_is_rx) is_r = (_is_rx); \
typeof(_len) l = (_len); \
if ((is_d) && (is_r)) \
- SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_D, rx, l); \
+ SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_D, rx, is_add, l); \
if ((is_d) && !(is_r)) \
- SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_D, tx, l); \
+ SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_D, tx, is_add, l); \
if (!(is_d) && (is_r)) \
- SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_R, rx, l); \
+ SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_R, rx, is_add, l); \
if (!(is_d) && !(is_r)) \
- SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_R, tx, l); \
+ SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_R, tx, is_add, l); \
} \
while (0)
diff --git a/net/smc/smc_sysctl.c b/net/smc/smc_sysctl.c
index a5946d1b9d60..2fab6456f765 100644
--- a/net/smc/smc_sysctl.c
+++ b/net/smc/smc_sysctl.c
@@ -90,11 +90,20 @@ static struct ctl_table smc_table[] = {
.extra1 = &conns_per_lgr_min,
.extra2 = &conns_per_lgr_max,
},
- { }
+ {
+ .procname = "limit_smc_hs",
+ .data = &init_net.smc.limit_smc_hs,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
};
int __net_init smc_sysctl_net_init(struct net *net)
{
+ size_t table_size = ARRAY_SIZE(smc_table);
struct ctl_table *table;
table = smc_table;
@@ -105,12 +114,12 @@ int __net_init smc_sysctl_net_init(struct net *net)
if (!table)
goto err_alloc;
- for (i = 0; i < ARRAY_SIZE(smc_table) - 1; i++)
+ for (i = 0; i < table_size; i++)
table[i].data += (void *)net - (void *)&init_net;
}
net->smc.smc_hdr = register_net_sysctl_sz(net, "net/smc", table,
- ARRAY_SIZE(smc_table));
+ table_size);
if (!net->smc.smc_hdr)
goto err_reg;
@@ -121,6 +130,8 @@ int __net_init smc_sysctl_net_init(struct net *net)
WRITE_ONCE(net->smc.sysctl_rmem, net_smc_rmem_init);
net->smc.sysctl_max_links_per_lgr = SMC_LINKS_PER_LGR_MAX_PREFER;
net->smc.sysctl_max_conns_per_lgr = SMC_CONN_PER_LGR_PREFER;
+ /* disable handshake limitation by default */
+ net->smc.limit_smc_hs = 0;
return 0;
@@ -133,7 +144,7 @@ err_alloc:
void __net_exit smc_sysctl_net_exit(struct net *net)
{
- struct ctl_table *table;
+ const struct ctl_table *table;
table = net->smc.smc_hdr->ctl_table_arg;
unregister_net_sysctl_table(net->smc.smc_hdr);
diff --git a/net/smc/smc_tracepoint.h b/net/smc/smc_tracepoint.h
index 9fc5e586d24a..a9a6e3c1113a 100644
--- a/net/smc/smc_tracepoint.h
+++ b/net/smc/smc_tracepoint.h
@@ -60,7 +60,7 @@ DECLARE_EVENT_CLASS(smc_msg_event,
__entry->smc = smc;
__entry->net_cookie = sock_net(sk)->net_cookie;
__entry->len = len;
- __assign_str(name, smc->conn.lnk->ibname);
+ __assign_str(name);
),
TP_printk("smc=%p net=%llu len=%zu dev=%s",
@@ -104,7 +104,7 @@ TRACE_EVENT(smcr_link_down,
__entry->lgr = lgr;
__entry->net_cookie = lgr->net->net_cookie;
__entry->state = lnk->state;
- __assign_str(name, lnk->ibname);
+ __assign_str(name);
__entry->location = location;
),
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
index 0021065a600a..b04a21b8c511 100644
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -439,7 +439,7 @@ static inline void smc_wr_rx_demultiplex(struct ib_wc *wc)
return; /* short message */
temp_wr_id = wc->wr_id;
index = do_div(temp_wr_id, link->wr_rx_cnt);
- wr_rx = (struct smc_wr_rx_hdr *)&link->wr_rx_bufs[index];
+ wr_rx = (struct smc_wr_rx_hdr *)(link->wr_rx_bufs + index * link->wr_rx_buflen);
hash_for_each_possible(smc_wr_rx_hash, handler, list, wr_rx->type) {
if (handler->type == wr_rx->type)
handler->handler(wc, wr_rx);
@@ -555,7 +555,6 @@ void smc_wr_remember_qp_attr(struct smc_link *lnk)
static void smc_wr_init_sge(struct smc_link *lnk)
{
- int sges_per_buf = (lnk->lgr->smc_version == SMC_V2) ? 2 : 1;
bool send_inline = (lnk->qp_attr.cap.max_inline_data > SMC_WR_TX_SIZE);
u32 i;
@@ -608,13 +607,14 @@ static void smc_wr_init_sge(struct smc_link *lnk)
* the larger spillover buffer, allowing easy data mapping.
*/
for (i = 0; i < lnk->wr_rx_cnt; i++) {
- int x = i * sges_per_buf;
+ int x = i * lnk->wr_rx_sge_cnt;
lnk->wr_rx_sges[x].addr =
- lnk->wr_rx_dma_addr + i * SMC_WR_BUF_SIZE;
- lnk->wr_rx_sges[x].length = SMC_WR_TX_SIZE;
+ lnk->wr_rx_dma_addr + i * lnk->wr_rx_buflen;
+ lnk->wr_rx_sges[x].length = smc_link_shared_v2_rxbuf(lnk) ?
+ SMC_WR_TX_SIZE : lnk->wr_rx_buflen;
lnk->wr_rx_sges[x].lkey = lnk->roce_pd->local_dma_lkey;
- if (lnk->lgr->smc_version == SMC_V2) {
+ if (lnk->lgr->smc_version == SMC_V2 && smc_link_shared_v2_rxbuf(lnk)) {
lnk->wr_rx_sges[x + 1].addr =
lnk->wr_rx_v2_dma_addr + SMC_WR_TX_SIZE;
lnk->wr_rx_sges[x + 1].length =
@@ -624,7 +624,7 @@ static void smc_wr_init_sge(struct smc_link *lnk)
}
lnk->wr_rx_ibs[i].next = NULL;
lnk->wr_rx_ibs[i].sg_list = &lnk->wr_rx_sges[x];
- lnk->wr_rx_ibs[i].num_sge = sges_per_buf;
+ lnk->wr_rx_ibs[i].num_sge = lnk->wr_rx_sge_cnt;
}
lnk->wr_reg.wr.next = NULL;
lnk->wr_reg.wr.num_sge = 0;
@@ -648,12 +648,14 @@ void smc_wr_free_link(struct smc_link *lnk)
smc_wr_tx_wait_no_pending_sends(lnk);
percpu_ref_kill(&lnk->wr_reg_refs);
wait_for_completion(&lnk->reg_ref_comp);
+ percpu_ref_exit(&lnk->wr_reg_refs);
percpu_ref_kill(&lnk->wr_tx_refs);
wait_for_completion(&lnk->tx_ref_comp);
+ percpu_ref_exit(&lnk->wr_tx_refs);
if (lnk->wr_rx_dma_addr) {
ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
- SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
+ lnk->wr_rx_buflen * lnk->wr_rx_cnt,
DMA_FROM_DEVICE);
lnk->wr_rx_dma_addr = 0;
}
@@ -738,13 +740,11 @@ int smc_wr_alloc_lgr_mem(struct smc_link_group *lgr)
int smc_wr_alloc_link_mem(struct smc_link *link)
{
- int sges_per_buf = link->lgr->smc_version == SMC_V2 ? 2 : 1;
-
/* allocate link related memory */
link->wr_tx_bufs = kcalloc(SMC_WR_BUF_CNT, SMC_WR_BUF_SIZE, GFP_KERNEL);
if (!link->wr_tx_bufs)
goto no_mem;
- link->wr_rx_bufs = kcalloc(SMC_WR_BUF_CNT * 3, SMC_WR_BUF_SIZE,
+ link->wr_rx_bufs = kcalloc(SMC_WR_BUF_CNT * 3, link->wr_rx_buflen,
GFP_KERNEL);
if (!link->wr_rx_bufs)
goto no_mem_wr_tx_bufs;
@@ -772,7 +772,7 @@ int smc_wr_alloc_link_mem(struct smc_link *link)
if (!link->wr_tx_sges)
goto no_mem_wr_tx_rdma_sges;
link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3,
- sizeof(link->wr_rx_sges[0]) * sges_per_buf,
+ sizeof(link->wr_rx_sges[0]) * link->wr_rx_sge_cnt,
GFP_KERNEL);
if (!link->wr_rx_sges)
goto no_mem_wr_tx_sges;
@@ -870,7 +870,7 @@ int smc_wr_create_link(struct smc_link *lnk)
smc_wr_tx_set_wr_id(&lnk->wr_tx_id, 0);
lnk->wr_rx_id = 0;
lnk->wr_rx_dma_addr = ib_dma_map_single(
- ibdev, lnk->wr_rx_bufs, SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
+ ibdev, lnk->wr_rx_bufs, lnk->wr_rx_buflen * lnk->wr_rx_cnt,
DMA_FROM_DEVICE);
if (ib_dma_mapping_error(ibdev, lnk->wr_rx_dma_addr)) {
lnk->wr_rx_dma_addr = 0;
@@ -878,13 +878,15 @@ int smc_wr_create_link(struct smc_link *lnk)
goto out;
}
if (lnk->lgr->smc_version == SMC_V2) {
- lnk->wr_rx_v2_dma_addr = ib_dma_map_single(ibdev,
- lnk->lgr->wr_rx_buf_v2, SMC_WR_BUF_V2_SIZE,
- DMA_FROM_DEVICE);
- if (ib_dma_mapping_error(ibdev, lnk->wr_rx_v2_dma_addr)) {
- lnk->wr_rx_v2_dma_addr = 0;
- rc = -EIO;
- goto dma_unmap;
+ if (smc_link_shared_v2_rxbuf(lnk)) {
+ lnk->wr_rx_v2_dma_addr =
+ ib_dma_map_single(ibdev, lnk->lgr->wr_rx_buf_v2,
+ SMC_WR_BUF_V2_SIZE, DMA_FROM_DEVICE);
+ if (ib_dma_mapping_error(ibdev, lnk->wr_rx_v2_dma_addr)) {
+ lnk->wr_rx_v2_dma_addr = 0;
+ rc = -EIO;
+ goto dma_unmap;
+ }
}
lnk->wr_tx_v2_dma_addr = ib_dma_map_single(ibdev,
lnk->lgr->wr_tx_buf_v2, SMC_WR_BUF_V2_SIZE,
@@ -912,11 +914,13 @@ int smc_wr_create_link(struct smc_link *lnk)
init_waitqueue_head(&lnk->wr_reg_wait);
rc = percpu_ref_init(&lnk->wr_reg_refs, smcr_wr_reg_refs_free, 0, GFP_KERNEL);
if (rc)
- goto dma_unmap;
+ goto cancel_ref;
init_completion(&lnk->reg_ref_comp);
init_waitqueue_head(&lnk->wr_rx_empty_wait);
return rc;
+cancel_ref:
+ percpu_ref_exit(&lnk->wr_tx_refs);
dma_unmap:
if (lnk->wr_rx_v2_dma_addr) {
ib_dma_unmap_single(ibdev, lnk->wr_rx_v2_dma_addr,
@@ -931,7 +935,7 @@ dma_unmap:
lnk->wr_tx_v2_dma_addr = 0;
}
ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
- SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
+ lnk->wr_rx_buflen * lnk->wr_rx_cnt,
DMA_FROM_DEVICE);
lnk->wr_rx_dma_addr = 0;
out:
diff --git a/net/socket.c b/net/socket.c
index ed3df2f749bf..28bae5a94234 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -88,7 +88,7 @@
#include <linux/xattr.h>
#include <linux/nospec.h>
#include <linux/indirect_call_wrapper.h>
-#include <linux/io_uring.h>
+#include <linux/io_uring/net.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
@@ -110,6 +110,8 @@
#include <linux/ptp_clock_kernel.h>
#include <trace/events/sock.h>
+#include "core/dev.h"
+
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sysctl_net_busy_read __read_mostly;
unsigned int sysctl_net_busy_poll __read_mostly;
@@ -153,7 +155,6 @@ static void sock_show_fdinfo(struct seq_file *m, struct file *f)
static const struct file_operations socket_file_ops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read_iter = sock_read_iter,
.write_iter = sock_write_iter,
.poll = sock_poll,
@@ -343,7 +344,7 @@ static void init_inodecache(void)
0,
(SLAB_HWCACHE_ALIGN |
SLAB_RECLAIM_ACCOUNT |
- SLAB_MEM_SPREAD | SLAB_ACCOUNT),
+ SLAB_ACCOUNT),
init_once);
BUG_ON(sock_inode_cachep == NULL);
}
@@ -478,6 +479,11 @@ struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
sock->file = file;
file->private_data = sock;
stream_open(SOCK_INODE(sock), file);
+ /*
+ * Disable permission and pre-content events, but enable legacy
+ * inotify events for legacy users.
+ */
+ file_set_fsnotify_mode(file, FMODE_NONOTIFY_PERM);
return file;
}
EXPORT_SYMBOL(sock_alloc_file);
@@ -510,7 +516,7 @@ static int sock_map_fd(struct socket *sock, int flags)
struct socket *sock_from_file(struct file *file)
{
- if (file->f_op == &socket_file_ops)
+ if (likely(file->f_op == &socket_file_ops))
return file->private_data; /* set in sock_alloc_file */
return NULL;
@@ -550,24 +556,6 @@ struct socket *sockfd_lookup(int fd, int *err)
}
EXPORT_SYMBOL(sockfd_lookup);
-static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)
-{
- struct fd f = fdget(fd);
- struct socket *sock;
-
- *err = -EBADF;
- if (f.file) {
- sock = sock_from_file(f.file);
- if (likely(sock)) {
- *fput_needed = f.flags & FDPUT_FPUT;
- return sock;
- }
- *err = -ENOTSOCK;
- fdput(f);
- }
- return NULL;
-}
-
static ssize_t sockfs_listxattr(struct dentry *dentry, char *buffer,
size_t size)
{
@@ -688,7 +676,7 @@ void sock_release(struct socket *sock)
}
EXPORT_SYMBOL(sock_release);
-void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags)
+void __sock_tx_timestamp(__u32 tsflags, __u8 *tx_flags)
{
u8 flags = *tx_flags;
@@ -793,34 +781,6 @@ int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
}
EXPORT_SYMBOL(kernel_sendmsg);
-/**
- * kernel_sendmsg_locked - send a message through @sock (kernel-space)
- * @sk: sock
- * @msg: message header
- * @vec: output s/g array
- * @num: output s/g array length
- * @size: total message data size
- *
- * Builds the message data with @vec and sends it through @sock.
- * Returns the number of bytes sent, or an error code.
- * Caller must hold @sk.
- */
-
-int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg,
- struct kvec *vec, size_t num, size_t size)
-{
- struct socket *sock = sk->sk_socket;
- const struct proto_ops *ops = READ_ONCE(sock->ops);
-
- if (!ops->sendmsg_locked)
- return sock_no_sendmsg_locked(sk, msg, size);
-
- iov_iter_kvec(&msg->msg_iter, ITER_SOURCE, vec, num, size);
-
- return ops->sendmsg_locked(sk, msg, msg_data_left(msg));
-}
-EXPORT_SYMBOL(kernel_sendmsg_locked);
-
static bool skb_is_err_queue(const struct sk_buff *skb)
{
/* pkt_type of skbs enqueued on the error queue are set to
@@ -946,11 +906,17 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
memset(&tss, 0, sizeof(tss));
tsflags = READ_ONCE(sk->sk_tsflags);
- if ((tsflags & SOF_TIMESTAMPING_SOFTWARE) &&
+ if ((tsflags & SOF_TIMESTAMPING_SOFTWARE &&
+ (tsflags & SOF_TIMESTAMPING_RX_SOFTWARE ||
+ skb_is_err_queue(skb) ||
+ !(tsflags & SOF_TIMESTAMPING_OPT_RX_FILTER))) &&
ktime_to_timespec64_cond(skb->tstamp, tss.ts + 0))
empty = 0;
if (shhwtstamps &&
- (tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
+ (tsflags & SOF_TIMESTAMPING_RAW_HARDWARE &&
+ (tsflags & SOF_TIMESTAMPING_RX_HARDWARE ||
+ skb_is_err_queue(skb) ||
+ !(tsflags & SOF_TIMESTAMPING_OPT_RX_FILTER))) &&
!skb_is_swtx_tstamp(skb, false_tstamp)) {
if_index = 0;
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_NETDEV)
@@ -1021,12 +987,23 @@ static void sock_recv_mark(struct msghdr *msg, struct sock *sk,
}
}
+static void sock_recv_priority(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb)
+{
+ if (sock_flag(sk, SOCK_RCVPRIORITY) && skb) {
+ __u32 priority = skb->priority;
+
+ put_cmsg(msg, SOL_SOCKET, SO_PRIORITY, sizeof(__u32), &priority);
+ }
+}
+
void __sock_recv_cmsgs(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb)
{
sock_recv_timestamp(msg, sk, skb);
sock_recv_drops(msg, sk, skb);
sock_recv_mark(msg, sk, skb);
+ sock_recv_priority(msg, sk, skb);
}
EXPORT_SYMBOL_GPL(__sock_recv_cmsgs);
@@ -1569,8 +1546,15 @@ int __sock_create(struct net *net, int family, int type, int protocol,
rcu_read_unlock();
err = pf->create(net, sock, protocol, kern);
- if (err < 0)
+ if (err < 0) {
+ /* ->create should release the allocated sock->sk object on error
+ * and make sure sock->sk is set to NULL to avoid use-after-free
+ */
+ DEBUG_NET_WARN_ONCE(sock->sk,
+ "%ps must clear sock->sk on failure, family: %d, type: %d, protocol: %d\n",
+ pf->create, family, type, protocol);
goto out_module_put;
+ }
/*
* Now to bump the refcnt of the [loadable] module that owns this
@@ -1822,6 +1806,20 @@ SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol,
return __sys_socketpair(family, type, protocol, usockvec);
}
+int __sys_bind_socket(struct socket *sock, struct sockaddr_storage *address,
+ int addrlen)
+{
+ int err;
+
+ err = security_socket_bind(sock, (struct sockaddr *)address,
+ addrlen);
+ if (!err)
+ err = READ_ONCE(sock->ops)->bind(sock,
+ (struct sockaddr *)address,
+ addrlen);
+ return err;
+}
+
/*
* Bind a name to a socket. Nothing much to do here since it's
* the protocol's responsibility to handle the local address.
@@ -1834,23 +1832,20 @@ int __sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen)
{
struct socket *sock;
struct sockaddr_storage address;
- int err, fput_needed;
-
- sock = sockfd_lookup_light(fd, &err, &fput_needed);
- if (sock) {
- err = move_addr_to_kernel(umyaddr, addrlen, &address);
- if (!err) {
- err = security_socket_bind(sock,
- (struct sockaddr *)&address,
- addrlen);
- if (!err)
- err = READ_ONCE(sock->ops)->bind(sock,
- (struct sockaddr *)
- &address, addrlen);
- }
- fput_light(sock->file, fput_needed);
- }
- return err;
+ CLASS(fd, f)(fd);
+ int err;
+
+ if (fd_empty(f))
+ return -EBADF;
+ sock = sock_from_file(fd_file(f));
+ if (unlikely(!sock))
+ return -ENOTSOCK;
+
+ err = move_addr_to_kernel(umyaddr, addrlen, &address);
+ if (unlikely(err))
+ return err;
+
+ return __sys_bind_socket(sock, &address, addrlen);
}
SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
@@ -1863,26 +1858,32 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
* necessary for a listen, and if that works, we mark the socket as
* ready for listening.
*/
+int __sys_listen_socket(struct socket *sock, int backlog)
+{
+ int somaxconn, err;
+
+ somaxconn = READ_ONCE(sock_net(sock->sk)->core.sysctl_somaxconn);
+ if ((unsigned int)backlog > somaxconn)
+ backlog = somaxconn;
+
+ err = security_socket_listen(sock, backlog);
+ if (!err)
+ err = READ_ONCE(sock->ops)->listen(sock, backlog);
+ return err;
+}
int __sys_listen(int fd, int backlog)
{
+ CLASS(fd, f)(fd);
struct socket *sock;
- int err, fput_needed;
- int somaxconn;
- sock = sockfd_lookup_light(fd, &err, &fput_needed);
- if (sock) {
- somaxconn = READ_ONCE(sock_net(sock->sk)->core.sysctl_somaxconn);
- if ((unsigned int)backlog > somaxconn)
- backlog = somaxconn;
+ if (fd_empty(f))
+ return -EBADF;
+ sock = sock_from_file(fd_file(f));
+ if (unlikely(!sock))
+ return -ENOTSOCK;
- err = security_socket_listen(sock, backlog);
- if (!err)
- err = READ_ONCE(sock->ops)->listen(sock, backlog);
-
- fput_light(sock->file, fput_needed);
- }
- return err;
+ return __sys_listen_socket(sock, backlog);
}
SYSCALL_DEFINE2(listen, int, fd, int, backlog)
@@ -1890,7 +1891,7 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
return __sys_listen(fd, backlog);
}
-struct file *do_accept(struct file *file, unsigned file_flags,
+struct file *do_accept(struct file *file, struct proto_accept_arg *arg,
struct sockaddr __user *upeer_sockaddr,
int __user *upeer_addrlen, int flags)
{
@@ -1926,8 +1927,8 @@ struct file *do_accept(struct file *file, unsigned file_flags,
if (err)
goto out_fd;
- err = ops->accept(sock, newsock, sock->file->f_flags | file_flags,
- false);
+ arg->flags |= sock->file->f_flags;
+ err = ops->accept(sock, newsock, arg);
if (err < 0)
goto out_fd;
@@ -1953,6 +1954,7 @@ out_fd:
static int __sys_accept4_file(struct file *file, struct sockaddr __user *upeer_sockaddr,
int __user *upeer_addrlen, int flags)
{
+ struct proto_accept_arg arg = { };
struct file *newfile;
int newfd;
@@ -1966,7 +1968,7 @@ static int __sys_accept4_file(struct file *file, struct sockaddr __user *upeer_s
if (unlikely(newfd < 0))
return newfd;
- newfile = do_accept(file, 0, upeer_sockaddr, upeer_addrlen,
+ newfile = do_accept(file, &arg, upeer_sockaddr, upeer_addrlen,
flags);
if (IS_ERR(newfile)) {
put_unused_fd(newfd);
@@ -1991,17 +1993,12 @@ static int __sys_accept4_file(struct file *file, struct sockaddr __user *upeer_s
int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
int __user *upeer_addrlen, int flags)
{
- int ret = -EBADF;
- struct fd f;
+ CLASS(fd, f)(fd);
- f = fdget(fd);
- if (f.file) {
- ret = __sys_accept4_file(f.file, upeer_sockaddr,
+ if (fd_empty(f))
+ return -EBADF;
+ return __sys_accept4_file(fd_file(f), upeer_sockaddr,
upeer_addrlen, flags);
- fdput(f);
- }
-
- return ret;
}
SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
@@ -2053,20 +2050,18 @@ out:
int __sys_connect(int fd, struct sockaddr __user *uservaddr, int addrlen)
{
- int ret = -EBADF;
- struct fd f;
+ struct sockaddr_storage address;
+ CLASS(fd, f)(fd);
+ int ret;
- f = fdget(fd);
- if (f.file) {
- struct sockaddr_storage address;
+ if (fd_empty(f))
+ return -EBADF;
- ret = move_addr_to_kernel(uservaddr, addrlen, &address);
- if (!ret)
- ret = __sys_connect_file(f.file, &address, addrlen, 0);
- fdput(f);
- }
+ ret = move_addr_to_kernel(uservaddr, addrlen, &address);
+ if (ret)
+ return ret;
- return ret;
+ return __sys_connect_file(fd_file(f), &address, addrlen, 0);
}
SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
@@ -2085,26 +2080,25 @@ int __sys_getsockname(int fd, struct sockaddr __user *usockaddr,
{
struct socket *sock;
struct sockaddr_storage address;
- int err, fput_needed;
+ CLASS(fd, f)(fd);
+ int err;
- sock = sockfd_lookup_light(fd, &err, &fput_needed);
- if (!sock)
- goto out;
+ if (fd_empty(f))
+ return -EBADF;
+ sock = sock_from_file(fd_file(f));
+ if (unlikely(!sock))
+ return -ENOTSOCK;
err = security_socket_getsockname(sock);
if (err)
- goto out_put;
+ return err;
err = READ_ONCE(sock->ops)->getname(sock, (struct sockaddr *)&address, 0);
if (err < 0)
- goto out_put;
- /* "err" is actually length in this case */
- err = move_addr_to_user(&address, err, usockaddr, usockaddr_len);
+ return err;
-out_put:
- fput_light(sock->file, fput_needed);
-out:
- return err;
+ /* "err" is actually length in this case */
+ return move_addr_to_user(&address, err, usockaddr, usockaddr_len);
}
SYSCALL_DEFINE3(getsockname, int, fd, struct sockaddr __user *, usockaddr,
@@ -2123,26 +2117,25 @@ int __sys_getpeername(int fd, struct sockaddr __user *usockaddr,
{
struct socket *sock;
struct sockaddr_storage address;
- int err, fput_needed;
+ CLASS(fd, f)(fd);
+ int err;
- sock = sockfd_lookup_light(fd, &err, &fput_needed);
- if (sock != NULL) {
- const struct proto_ops *ops = READ_ONCE(sock->ops);
+ if (fd_empty(f))
+ return -EBADF;
+ sock = sock_from_file(fd_file(f));
+ if (unlikely(!sock))
+ return -ENOTSOCK;
- err = security_socket_getpeername(sock);
- if (err) {
- fput_light(sock->file, fput_needed);
- return err;
- }
+ err = security_socket_getpeername(sock);
+ if (err)
+ return err;
- err = ops->getname(sock, (struct sockaddr *)&address, 1);
- if (err >= 0)
- /* "err" is actually length in this case */
- err = move_addr_to_user(&address, err, usockaddr,
- usockaddr_len);
- fput_light(sock->file, fput_needed);
- }
- return err;
+ err = READ_ONCE(sock->ops)->getname(sock, (struct sockaddr *)&address, 1);
+ if (err < 0)
+ return err;
+
+ /* "err" is actually length in this case */
+ return move_addr_to_user(&address, err, usockaddr, usockaddr_len);
}
SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
@@ -2163,14 +2156,17 @@ int __sys_sendto(int fd, void __user *buff, size_t len, unsigned int flags,
struct sockaddr_storage address;
int err;
struct msghdr msg;
- int fput_needed;
err = import_ubuf(ITER_SOURCE, buff, len, &msg.msg_iter);
if (unlikely(err))
return err;
- sock = sockfd_lookup_light(fd, &err, &fput_needed);
- if (!sock)
- goto out;
+
+ CLASS(fd, f)(fd);
+ if (fd_empty(f))
+ return -EBADF;
+ sock = sock_from_file(fd_file(f));
+ if (unlikely(!sock))
+ return -ENOTSOCK;
msg.msg_name = NULL;
msg.msg_control = NULL;
@@ -2180,7 +2176,7 @@ int __sys_sendto(int fd, void __user *buff, size_t len, unsigned int flags,
if (addr) {
err = move_addr_to_kernel(addr, addr_len, &address);
if (err < 0)
- goto out_put;
+ return err;
msg.msg_name = (struct sockaddr *)&address;
msg.msg_namelen = addr_len;
}
@@ -2188,12 +2184,7 @@ int __sys_sendto(int fd, void __user *buff, size_t len, unsigned int flags,
if (sock->file->f_flags & O_NONBLOCK)
flags |= MSG_DONTWAIT;
msg.msg_flags = flags;
- err = __sock_sendmsg(sock, &msg);
-
-out_put:
- fput_light(sock->file, fput_needed);
-out:
- return err;
+ return __sock_sendmsg(sock, &msg);
}
SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
@@ -2228,14 +2219,18 @@ int __sys_recvfrom(int fd, void __user *ubuf, size_t size, unsigned int flags,
};
struct socket *sock;
int err, err2;
- int fput_needed;
err = import_ubuf(ITER_DEST, ubuf, size, &msg.msg_iter);
if (unlikely(err))
return err;
- sock = sockfd_lookup_light(fd, &err, &fput_needed);
- if (!sock)
- goto out;
+
+ CLASS(fd, f)(fd);
+
+ if (fd_empty(f))
+ return -EBADF;
+ sock = sock_from_file(fd_file(f));
+ if (unlikely(!sock))
+ return -ENOTSOCK;
if (sock->file->f_flags & O_NONBLOCK)
flags |= MSG_DONTWAIT;
@@ -2247,9 +2242,6 @@ int __sys_recvfrom(int fd, void __user *ubuf, size_t size, unsigned int flags,
if (err2 < 0)
err = err2;
}
-
- fput_light(sock->file, fput_needed);
-out:
return err;
}
@@ -2324,17 +2316,16 @@ int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval,
{
sockptr_t optval = USER_SOCKPTR(user_optval);
bool compat = in_compat_syscall();
- int err, fput_needed;
struct socket *sock;
+ CLASS(fd, f)(fd);
- sock = sockfd_lookup_light(fd, &err, &fput_needed);
- if (!sock)
- return err;
+ if (fd_empty(f))
+ return -EBADF;
+ sock = sock_from_file(fd_file(f));
+ if (unlikely(!sock))
+ return -ENOTSOCK;
- err = do_sock_setsockopt(sock, compat, level, optname, optval, optlen);
-
- fput_light(sock->file, fput_needed);
- return err;
+ return do_sock_setsockopt(sock, compat, level, optname, optval, optlen);
}
SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname,
@@ -2349,7 +2340,7 @@ INDIRECT_CALLABLE_DECLARE(bool tcp_bpf_bypass_getsockopt(int level,
int do_sock_getsockopt(struct socket *sock, bool compat, int level,
int optname, sockptr_t optval, sockptr_t optlen)
{
- int max_optlen __maybe_unused;
+ int max_optlen __maybe_unused = 0;
const struct proto_ops *ops;
int err;
@@ -2358,7 +2349,7 @@ int do_sock_getsockopt(struct socket *sock, bool compat, int level,
return err;
if (!compat)
- max_optlen = BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen);
+ copy_from_sockptr(&max_optlen, optlen, sizeof(int));
ops = READ_ONCE(sock->ops);
if (level == SOL_SOCKET) {
@@ -2390,20 +2381,17 @@ EXPORT_SYMBOL(do_sock_getsockopt);
int __sys_getsockopt(int fd, int level, int optname, char __user *optval,
int __user *optlen)
{
- int err, fput_needed;
struct socket *sock;
- bool compat;
+ CLASS(fd, f)(fd);
- sock = sockfd_lookup_light(fd, &err, &fput_needed);
- if (!sock)
- return err;
+ if (fd_empty(f))
+ return -EBADF;
+ sock = sock_from_file(fd_file(f));
+ if (unlikely(!sock))
+ return -ENOTSOCK;
- compat = in_compat_syscall();
- err = do_sock_getsockopt(sock, compat, level, optname,
+ return do_sock_getsockopt(sock, in_compat_syscall(), level, optname,
USER_SOCKPTR(optval), USER_SOCKPTR(optlen));
-
- fput_light(sock->file, fput_needed);
- return err;
}
SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname,
@@ -2429,15 +2417,16 @@ int __sys_shutdown_sock(struct socket *sock, int how)
int __sys_shutdown(int fd, int how)
{
- int err, fput_needed;
struct socket *sock;
+ CLASS(fd, f)(fd);
- sock = sockfd_lookup_light(fd, &err, &fput_needed);
- if (sock != NULL) {
- err = __sys_shutdown_sock(sock, how);
- fput_light(sock->file, fput_needed);
- }
- return err;
+ if (fd_empty(f))
+ return -EBADF;
+ sock = sock_from_file(fd_file(f));
+ if (unlikely(!sock))
+ return -ENOTSOCK;
+
+ return __sys_shutdown_sock(sock, how);
}
SYSCALL_DEFINE2(shutdown, int, fd, int, how)
@@ -2600,9 +2589,9 @@ out:
return err;
}
-int sendmsg_copy_msghdr(struct msghdr *msg,
- struct user_msghdr __user *umsg, unsigned flags,
- struct iovec **iov)
+static int sendmsg_copy_msghdr(struct msghdr *msg,
+ struct user_msghdr __user *umsg, unsigned flags,
+ struct iovec **iov)
{
int err;
@@ -2653,22 +2642,21 @@ long __sys_sendmsg_sock(struct socket *sock, struct msghdr *msg,
long __sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned int flags,
bool forbid_cmsg_compat)
{
- int fput_needed, err;
struct msghdr msg_sys;
struct socket *sock;
if (forbid_cmsg_compat && (flags & MSG_CMSG_COMPAT))
return -EINVAL;
- sock = sockfd_lookup_light(fd, &err, &fput_needed);
- if (!sock)
- goto out;
+ CLASS(fd, f)(fd);
- err = ___sys_sendmsg(sock, msg, &msg_sys, flags, NULL, 0);
+ if (fd_empty(f))
+ return -EBADF;
+ sock = sock_from_file(fd_file(f));
+ if (unlikely(!sock))
+ return -ENOTSOCK;
- fput_light(sock->file, fput_needed);
-out:
- return err;
+ return ___sys_sendmsg(sock, msg, &msg_sys, flags, NULL, 0);
}
SYSCALL_DEFINE3(sendmsg, int, fd, struct user_msghdr __user *, msg, unsigned int, flags)
@@ -2683,7 +2671,7 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct user_msghdr __user *, msg, unsigned int
int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
unsigned int flags, bool forbid_cmsg_compat)
{
- int fput_needed, err, datagrams;
+ int err, datagrams;
struct socket *sock;
struct mmsghdr __user *entry;
struct compat_mmsghdr __user *compat_entry;
@@ -2699,9 +2687,13 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
datagrams = 0;
- sock = sockfd_lookup_light(fd, &err, &fput_needed);
- if (!sock)
- return err;
+ CLASS(fd, f)(fd);
+
+ if (fd_empty(f))
+ return -EBADF;
+ sock = sock_from_file(fd_file(f));
+ if (unlikely(!sock))
+ return -ENOTSOCK;
used_address.name_len = UINT_MAX;
entry = mmsg;
@@ -2738,8 +2730,6 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
cond_resched();
}
- fput_light(sock->file, fput_needed);
-
/* We only return an error if no datagrams were able to be sent */
if (datagrams != 0)
return datagrams;
@@ -2753,10 +2743,10 @@ SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg,
return __sys_sendmmsg(fd, mmsg, vlen, flags, true);
}
-int recvmsg_copy_msghdr(struct msghdr *msg,
- struct user_msghdr __user *umsg, unsigned flags,
- struct sockaddr __user **uaddr,
- struct iovec **iov)
+static int recvmsg_copy_msghdr(struct msghdr *msg,
+ struct user_msghdr __user *umsg, unsigned flags,
+ struct sockaddr __user **uaddr,
+ struct iovec **iov)
{
ssize_t err;
@@ -2861,22 +2851,21 @@ long __sys_recvmsg_sock(struct socket *sock, struct msghdr *msg,
long __sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned int flags,
bool forbid_cmsg_compat)
{
- int fput_needed, err;
struct msghdr msg_sys;
struct socket *sock;
if (forbid_cmsg_compat && (flags & MSG_CMSG_COMPAT))
return -EINVAL;
- sock = sockfd_lookup_light(fd, &err, &fput_needed);
- if (!sock)
- goto out;
+ CLASS(fd, f)(fd);
- err = ___sys_recvmsg(sock, msg, &msg_sys, flags, 0);
+ if (fd_empty(f))
+ return -EBADF;
+ sock = sock_from_file(fd_file(f));
+ if (unlikely(!sock))
+ return -ENOTSOCK;
- fput_light(sock->file, fput_needed);
-out:
- return err;
+ return ___sys_recvmsg(sock, msg, &msg_sys, flags, 0);
}
SYSCALL_DEFINE3(recvmsg, int, fd, struct user_msghdr __user *, msg,
@@ -2893,7 +2882,7 @@ static int do_recvmmsg(int fd, struct mmsghdr __user *mmsg,
unsigned int vlen, unsigned int flags,
struct timespec64 *timeout)
{
- int fput_needed, err, datagrams;
+ int err = 0, datagrams;
struct socket *sock;
struct mmsghdr __user *entry;
struct compat_mmsghdr __user *compat_entry;
@@ -2908,16 +2897,18 @@ static int do_recvmmsg(int fd, struct mmsghdr __user *mmsg,
datagrams = 0;
- sock = sockfd_lookup_light(fd, &err, &fput_needed);
- if (!sock)
- return err;
+ CLASS(fd, f)(fd);
+
+ if (fd_empty(f))
+ return -EBADF;
+ sock = sock_from_file(fd_file(f));
+ if (unlikely(!sock))
+ return -ENOTSOCK;
if (likely(!(flags & MSG_ERRQUEUE))) {
err = sock_error(sock->sk);
- if (err) {
- datagrams = err;
- goto out_put;
- }
+ if (err)
+ return err;
}
entry = mmsg;
@@ -2974,12 +2965,10 @@ static int do_recvmmsg(int fd, struct mmsghdr __user *mmsg,
}
if (err == 0)
- goto out_put;
+ return datagrams;
- if (datagrams == 0) {
- datagrams = err;
- goto out_put;
- }
+ if (datagrams == 0)
+ return err;
/*
* We may return less entries than requested (vlen) if the
@@ -2994,9 +2983,6 @@ static int do_recvmmsg(int fd, struct mmsghdr __user *mmsg,
*/
WRITE_ONCE(sock->sk->sk_err, -err);
}
-out_put:
- fput_light(sock->file, fput_needed);
-
return datagrams;
}
@@ -3580,6 +3566,10 @@ int kernel_accept(struct socket *sock, struct socket **newsock, int flags)
{
struct sock *sk = sock->sk;
const struct proto_ops *ops = READ_ONCE(sock->ops);
+ struct proto_accept_arg arg = {
+ .flags = flags,
+ .kern = true,
+ };
int err;
err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
@@ -3587,7 +3577,7 @@ int kernel_accept(struct socket *sock, struct socket **newsock, int flags)
if (err < 0)
goto done;
- err = ops->accept(sock, *newsock, flags, true);
+ err = ops->accept(sock, *newsock, &arg);
if (err < 0) {
sock_release(*newsock);
*newsock = NULL;
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index 8299ceb3e373..95696f42647e 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -347,7 +347,10 @@ static int strp_read_sock(struct strparser *strp)
struct socket *sock = strp->sk->sk_socket;
read_descriptor_t desc;
- if (unlikely(!sock || !sock->ops || !sock->ops->read_sock))
+ if (unlikely(!sock || !sock->ops))
+ return -EBUSY;
+
+ if (unlikely(!strp->cb.read_sock && !sock->ops->read_sock))
return -EBUSY;
desc.arg.data = strp;
@@ -355,7 +358,10 @@ static int strp_read_sock(struct strparser *strp)
desc.count = 1; /* give more than one skb per call */
/* sk should be locked here, so okay to do read_sock */
- sock->ops->read_sock(strp->sk, &desc, strp_recv);
+ if (strp->cb.read_sock)
+ strp->cb.read_sock(strp, &desc, strp_recv);
+ else
+ sock->ops->read_sock(strp->sk, &desc, strp_recv);
desc.error = strp->cb.read_sock_done(strp, desc.error);
@@ -468,6 +474,7 @@ int strp_init(struct strparser *strp, struct sock *sk,
strp->cb.unlock = cb->unlock ? : strp_sock_unlock;
strp->cb.rcv_msg = cb->rcv_msg;
strp->cb.parse_msg = cb->parse_msg;
+ strp->cb.read_sock = cb->read_sock;
strp->cb.read_sock_done = cb->read_sock_done ? : default_read_sock_done;
strp->cb.abort_parser = cb->abort_parser ? : strp_abort_strp;
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index d435bffc6199..97ff11973c49 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -284,10 +284,10 @@ char *rpc_sockaddr2uaddr(const struct sockaddr *sap, gfp_t gfp_flags)
}
if (snprintf(portbuf, sizeof(portbuf),
- ".%u.%u", port >> 8, port & 0xff) > (int)sizeof(portbuf))
+ ".%u.%u", port >> 8, port & 0xff) >= (int)sizeof(portbuf))
return NULL;
- if (strlcat(addrbuf, portbuf, sizeof(addrbuf)) > sizeof(addrbuf))
+ if (strlcat(addrbuf, portbuf, sizeof(addrbuf)) >= sizeof(addrbuf))
return NULL;
return kstrdup(addrbuf, gfp_flags);
diff --git a/net/sunrpc/auth_gss/Makefile b/net/sunrpc/auth_gss/Makefile
index ad1736d93b76..452f67deebc6 100644
--- a/net/sunrpc/auth_gss/Makefile
+++ b/net/sunrpc/auth_gss/Makefile
@@ -5,7 +5,7 @@
obj-$(CONFIG_SUNRPC_GSS) += auth_rpcgss.o
-auth_rpcgss-y := auth_gss.o gss_generic_token.o \
+auth_rpcgss-y := auth_gss.o \
gss_mech_switch.o svcauth_gss.o \
gss_rpc_upcall.o gss_rpc_xdr.o trace.o
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index c7af0220f82f..369310909fc9 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1875,8 +1875,10 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
/* slack space should prevent this ever happening: */
- if (unlikely(snd_buf->len > snd_buf->buflen))
+ if (unlikely(snd_buf->len > snd_buf->buflen)) {
+ status = -EIO;
goto wrap_failed;
+ }
/* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
* done anyway, so it's safe to put the request on the wire: */
if (maj_stat == GSS_S_CONTEXT_EXPIRED)
diff --git a/net/sunrpc/auth_gss/auth_gss_internal.h b/net/sunrpc/auth_gss/auth_gss_internal.h
index c53b329092d4..4ebc1b7043d9 100644
--- a/net/sunrpc/auth_gss/auth_gss_internal.h
+++ b/net/sunrpc/auth_gss/auth_gss_internal.h
@@ -23,7 +23,7 @@ simple_get_bytes(const void *p, const void *end, void *res, size_t len)
}
static inline const void *
-simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
+simple_get_netobj_noprof(const void *p, const void *end, struct xdr_netobj *dest)
{
const void *q;
unsigned int len;
@@ -35,7 +35,7 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
if (unlikely(q > end || q < p))
return ERR_PTR(-EFAULT);
if (len) {
- dest->data = kmemdup(p, len, GFP_KERNEL);
+ dest->data = kmemdup_noprof(p, len, GFP_KERNEL);
if (unlikely(dest->data == NULL))
return ERR_PTR(-ENOMEM);
} else
@@ -43,3 +43,5 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
dest->len = len;
return q;
}
+
+#define simple_get_netobj(...) alloc_hooks(simple_get_netobj_noprof(__VA_ARGS__))
diff --git a/net/sunrpc/auth_gss/gss_generic_token.c b/net/sunrpc/auth_gss/gss_generic_token.c
deleted file mode 100644
index 4a4082bb22ad..000000000000
--- a/net/sunrpc/auth_gss/gss_generic_token.c
+++ /dev/null
@@ -1,231 +0,0 @@
-/*
- * linux/net/sunrpc/gss_generic_token.c
- *
- * Adapted from MIT Kerberos 5-1.2.1 lib/gssapi/generic/util_token.c
- *
- * Copyright (c) 2000 The Regents of the University of Michigan.
- * All rights reserved.
- *
- * Andy Adamson <andros@umich.edu>
- */
-
-/*
- * Copyright 1993 by OpenVision Technologies, Inc.
- *
- * Permission to use, copy, modify, distribute, and sell this software
- * and its documentation for any purpose is hereby granted without fee,
- * provided that the above copyright notice appears in all copies and
- * that both that copyright notice and this permission notice appear in
- * supporting documentation, and that the name of OpenVision not be used
- * in advertising or publicity pertaining to distribution of the software
- * without specific, written prior permission. OpenVision makes no
- * representations about the suitability of this software for any
- * purpose. It is provided "as is" without express or implied warranty.
- *
- * OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
- * EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
- * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
- * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/sunrpc/sched.h>
-#include <linux/sunrpc/gss_asn1.h>
-
-
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
-# define RPCDBG_FACILITY RPCDBG_AUTH
-#endif
-
-
-/* TWRITE_STR from gssapiP_generic.h */
-#define TWRITE_STR(ptr, str, len) \
- memcpy((ptr), (char *) (str), (len)); \
- (ptr) += (len);
-
-/* XXXX this code currently makes the assumption that a mech oid will
- never be longer than 127 bytes. This assumption is not inherent in
- the interfaces, so the code can be fixed if the OSI namespace
- balloons unexpectedly. */
-
-/* Each token looks like this:
-
-0x60 tag for APPLICATION 0, SEQUENCE
- (constructed, definite-length)
- <length> possible multiple bytes, need to parse/generate
- 0x06 tag for OBJECT IDENTIFIER
- <moid_length> compile-time constant string (assume 1 byte)
- <moid_bytes> compile-time constant string
- <inner_bytes> the ANY containing the application token
- bytes 0,1 are the token type
- bytes 2,n are the token data
-
-For the purposes of this abstraction, the token "header" consists of
-the sequence tag and length octets, the mech OID DER encoding, and the
-first two inner bytes, which indicate the token type. The token
-"body" consists of everything else.
-
-*/
-
-static int
-der_length_size( int length)
-{
- if (length < (1<<7))
- return 1;
- else if (length < (1<<8))
- return 2;
-#if (SIZEOF_INT == 2)
- else
- return 3;
-#else
- else if (length < (1<<16))
- return 3;
- else if (length < (1<<24))
- return 4;
- else
- return 5;
-#endif
-}
-
-static void
-der_write_length(unsigned char **buf, int length)
-{
- if (length < (1<<7)) {
- *(*buf)++ = (unsigned char) length;
- } else {
- *(*buf)++ = (unsigned char) (der_length_size(length)+127);
-#if (SIZEOF_INT > 2)
- if (length >= (1<<24))
- *(*buf)++ = (unsigned char) (length>>24);
- if (length >= (1<<16))
- *(*buf)++ = (unsigned char) ((length>>16)&0xff);
-#endif
- if (length >= (1<<8))
- *(*buf)++ = (unsigned char) ((length>>8)&0xff);
- *(*buf)++ = (unsigned char) (length&0xff);
- }
-}
-
-/* returns decoded length, or < 0 on failure. Advances buf and
- decrements bufsize */
-
-static int
-der_read_length(unsigned char **buf, int *bufsize)
-{
- unsigned char sf;
- int ret;
-
- if (*bufsize < 1)
- return -1;
- sf = *(*buf)++;
- (*bufsize)--;
- if (sf & 0x80) {
- if ((sf &= 0x7f) > ((*bufsize)-1))
- return -1;
- if (sf > SIZEOF_INT)
- return -1;
- ret = 0;
- for (; sf; sf--) {
- ret = (ret<<8) + (*(*buf)++);
- (*bufsize)--;
- }
- } else {
- ret = sf;
- }
-
- return ret;
-}
-
-/* returns the length of a token, given the mech oid and the body size */
-
-int
-g_token_size(struct xdr_netobj *mech, unsigned int body_size)
-{
- /* set body_size to sequence contents size */
- body_size += 2 + (int) mech->len; /* NEED overflow check */
- return 1 + der_length_size(body_size) + body_size;
-}
-
-EXPORT_SYMBOL_GPL(g_token_size);
-
-/* fills in a buffer with the token header. The buffer is assumed to
- be the right size. buf is advanced past the token header */
-
-void
-g_make_token_header(struct xdr_netobj *mech, int body_size, unsigned char **buf)
-{
- *(*buf)++ = 0x60;
- der_write_length(buf, 2 + mech->len + body_size);
- *(*buf)++ = 0x06;
- *(*buf)++ = (unsigned char) mech->len;
- TWRITE_STR(*buf, mech->data, ((int) mech->len));
-}
-
-EXPORT_SYMBOL_GPL(g_make_token_header);
-
-/*
- * Given a buffer containing a token, reads and verifies the token,
- * leaving buf advanced past the token header, and setting body_size
- * to the number of remaining bytes. Returns 0 on success,
- * G_BAD_TOK_HEADER for a variety of errors, and G_WRONG_MECH if the
- * mechanism in the token does not match the mech argument. buf and
- * *body_size are left unmodified on error.
- */
-u32
-g_verify_token_header(struct xdr_netobj *mech, int *body_size,
- unsigned char **buf_in, int toksize)
-{
- unsigned char *buf = *buf_in;
- int seqsize;
- struct xdr_netobj toid;
- int ret = 0;
-
- if ((toksize-=1) < 0)
- return G_BAD_TOK_HEADER;
- if (*buf++ != 0x60)
- return G_BAD_TOK_HEADER;
-
- if ((seqsize = der_read_length(&buf, &toksize)) < 0)
- return G_BAD_TOK_HEADER;
-
- if (seqsize != toksize)
- return G_BAD_TOK_HEADER;
-
- if ((toksize-=1) < 0)
- return G_BAD_TOK_HEADER;
- if (*buf++ != 0x06)
- return G_BAD_TOK_HEADER;
-
- if ((toksize-=1) < 0)
- return G_BAD_TOK_HEADER;
- toid.len = *buf++;
-
- if ((toksize-=toid.len) < 0)
- return G_BAD_TOK_HEADER;
- toid.data = buf;
- buf+=toid.len;
-
- if (! g_OID_equal(&toid, mech))
- ret = G_WRONG_MECH;
-
- /* G_WRONG_MECH is not returned immediately because it's more important
- to return G_BAD_TOK_HEADER if the token header is in fact bad */
-
- if ((toksize-=2) < 0)
- return G_BAD_TOK_HEADER;
-
- if (ret)
- return ret;
-
- *buf_in = buf;
- *body_size = toksize;
-
- return ret;
-}
-
-EXPORT_SYMBOL_GPL(g_verify_token_header);
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index d2b02710ab07..9a27201638e2 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -442,35 +442,6 @@ encryptor(struct scatterlist *sg, void *data)
return 0;
}
-int
-gss_encrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *buf,
- int offset, struct page **pages)
-{
- int ret;
- struct encryptor_desc desc;
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
-
- BUG_ON((buf->len - offset) % crypto_sync_skcipher_blocksize(tfm) != 0);
-
- skcipher_request_set_sync_tfm(req, tfm);
- skcipher_request_set_callback(req, 0, NULL, NULL);
-
- memset(desc.iv, 0, sizeof(desc.iv));
- desc.req = req;
- desc.pos = offset;
- desc.outbuf = buf;
- desc.pages = pages;
- desc.fragno = 0;
- desc.fraglen = 0;
-
- sg_init_table(desc.infrags, 4);
- sg_init_table(desc.outfrags, 4);
-
- ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc);
- skcipher_request_zero(req);
- return ret;
-}
-
struct decryptor_desc {
u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
struct skcipher_request *req;
@@ -525,32 +496,6 @@ decryptor(struct scatterlist *sg, void *data)
return 0;
}
-int
-gss_decrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *buf,
- int offset)
-{
- int ret;
- struct decryptor_desc desc;
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
-
- /* XXXJBF: */
- BUG_ON((buf->len - offset) % crypto_sync_skcipher_blocksize(tfm) != 0);
-
- skcipher_request_set_sync_tfm(req, tfm);
- skcipher_request_set_callback(req, 0, NULL, NULL);
-
- memset(desc.iv, 0, sizeof(desc.iv));
- desc.req = req;
- desc.fragno = 0;
- desc.fraglen = 0;
-
- sg_init_table(desc.frags, 4);
-
- ret = xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
- skcipher_request_zero(req);
- return ret;
-}
-
/*
* This function makes the assumption that it was ultimately called
* from gss_wrap().
diff --git a/net/sunrpc/auth_gss/gss_krb5_internal.h b/net/sunrpc/auth_gss/gss_krb5_internal.h
index 3afd4065bf3d..a47e9ec228a5 100644
--- a/net/sunrpc/auth_gss/gss_krb5_internal.h
+++ b/net/sunrpc/auth_gss/gss_krb5_internal.h
@@ -172,13 +172,6 @@ u32 krb5_decrypt(struct crypto_sync_skcipher *key, void *iv, void *in,
int xdr_extend_head(struct xdr_buf *buf, unsigned int base,
unsigned int shiftlen);
-int gss_encrypt_xdr_buf(struct crypto_sync_skcipher *tfm,
- struct xdr_buf *outbuf, int offset,
- struct page **pages);
-
-int gss_decrypt_xdr_buf(struct crypto_sync_skcipher *tfm,
- struct xdr_buf *inbuf, int offset);
-
u32 gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
struct xdr_buf *buf, struct page **pages);
diff --git a/net/sunrpc/auth_gss/gss_krb5_keys.c b/net/sunrpc/auth_gss/gss_krb5_keys.c
index 06d8ee0db000..4eb19c3a54c7 100644
--- a/net/sunrpc/auth_gss/gss_krb5_keys.c
+++ b/net/sunrpc/auth_gss/gss_krb5_keys.c
@@ -168,7 +168,7 @@ static int krb5_DK(const struct gss_krb5_enctype *gk5e,
goto err_return;
blocksize = crypto_sync_skcipher_blocksize(cipher);
if (crypto_sync_skcipher_setkey(cipher, inkey->data, inkey->len))
- goto err_return;
+ goto err_free_cipher;
ret = -ENOMEM;
inblockdata = kmalloc(blocksize, gfp_mask);
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 64cff717c3d9..3366505bc669 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -398,6 +398,7 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
u64 seq_send64;
int keylen;
u32 time32;
+ int ret;
p = simple_get_bytes(p, end, &ctx->flags, sizeof(ctx->flags));
if (IS_ERR(p))
@@ -450,8 +451,16 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
}
ctx->mech_used.len = gss_kerberos_mech.gm_oid.len;
- return gss_krb5_import_ctx_v2(ctx, gfp_mask);
+ ret = gss_krb5_import_ctx_v2(ctx, gfp_mask);
+ if (ret) {
+ p = ERR_PTR(ret);
+ goto out_free;
+ }
+ return 0;
+
+out_free:
+ kfree(ctx->mech_used.data);
out_err:
return PTR_ERR(p);
}
diff --git a/net/sunrpc/auth_gss/gss_krb5_test.c b/net/sunrpc/auth_gss/gss_krb5_test.c
index 85625e3f3814..a5bff02cd7ba 100644
--- a/net/sunrpc/auth_gss/gss_krb5_test.c
+++ b/net/sunrpc/auth_gss/gss_krb5_test.c
@@ -17,7 +17,7 @@
#include "gss_krb5_internal.h"
-MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
struct gss_krb5_test_param {
const char *desc;
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index fae632da1058..c84d0cf61980 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/oid_registry.h>
#include <linux/sunrpc/msg_prot.h>
-#include <linux/sunrpc/gss_asn1.h>
#include <linux/sunrpc/auth_gss.h>
#include <linux/sunrpc/svcauth_gss.h>
#include <linux/sunrpc/gss_err.h>
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
index d79f12c2550a..cb32ab9a8395 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
@@ -250,8 +250,8 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
creds = kzalloc(sizeof(struct svc_cred), GFP_KERNEL);
if (!creds) {
- kfree(oa->data);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto free_oa;
}
oa->data[0].option.data = CREDS_VALUE;
@@ -265,29 +265,40 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
/* option buffer */
p = xdr_inline_decode(xdr, 4);
- if (unlikely(p == NULL))
- return -ENOSPC;
+ if (unlikely(p == NULL)) {
+ err = -ENOSPC;
+ goto free_creds;
+ }
length = be32_to_cpup(p);
p = xdr_inline_decode(xdr, length);
- if (unlikely(p == NULL))
- return -ENOSPC;
+ if (unlikely(p == NULL)) {
+ err = -ENOSPC;
+ goto free_creds;
+ }
if (length == sizeof(CREDS_VALUE) &&
memcmp(p, CREDS_VALUE, sizeof(CREDS_VALUE)) == 0) {
/* We have creds here. parse them */
err = gssx_dec_linux_creds(xdr, creds);
if (err)
- return err;
+ goto free_creds;
oa->data[0].value.len = 1; /* presence */
} else {
/* consume uninteresting buffer */
err = gssx_dec_buffer(xdr, &dummy);
if (err)
- return err;
+ goto free_creds;
}
}
return 0;
+
+free_creds:
+ kfree(creds);
+free_oa:
+ kfree(oa->data);
+ oa->data = NULL;
+ return err;
}
static int gssx_dec_status(struct xdr_stream *xdr,
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 24de94184700..73a90ad873fb 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1033,17 +1033,11 @@ null_verifier:
static void gss_free_in_token_pages(struct gssp_in_token *in_token)
{
- u32 inlen;
int i;
i = 0;
- inlen = in_token->page_len;
- while (inlen) {
- if (in_token->pages[i])
- put_page(in_token->pages[i]);
- inlen -= inlen > PAGE_SIZE ? PAGE_SIZE : inlen;
- }
-
+ while (in_token->pages[i])
+ put_page(in_token->pages[i++]);
kfree(in_token->pages);
in_token->pages = NULL;
}
@@ -1075,7 +1069,7 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp,
goto out_denied_free;
pages = DIV_ROUND_UP(inlen, PAGE_SIZE);
- in_token->pages = kcalloc(pages, sizeof(struct page *), GFP_KERNEL);
+ in_token->pages = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
if (!in_token->pages)
goto out_denied_free;
in_token->page_base = 0;
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 95ff74706104..7ce5e28a6c03 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -281,21 +281,7 @@ static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h
return rv;
}
-/*
- * This is the generic cache management routine for all
- * the authentication caches.
- * It checks the currency of a cache item and will (later)
- * initiate an upcall to fill it if needed.
- *
- *
- * Returns 0 if the cache_head can be used, or cache_puts it and returns
- * -EAGAIN if upcall is pending and request has been queued
- * -ETIMEDOUT if upcall failed or request could not be queue or
- * upcall completed but item is still invalid (implying that
- * the cache item has been replaced with a newer one).
- * -ENOENT if cache entry was negative
- */
-int cache_check(struct cache_detail *detail,
+int cache_check_rcu(struct cache_detail *detail,
struct cache_head *h, struct cache_req *rqstp)
{
int rv;
@@ -336,6 +322,31 @@ int cache_check(struct cache_detail *detail,
rv = -ETIMEDOUT;
}
}
+
+ return rv;
+}
+EXPORT_SYMBOL_GPL(cache_check_rcu);
+
+/*
+ * This is the generic cache management routine for all
+ * the authentication caches.
+ * It checks the currency of a cache item and will (later)
+ * initiate an upcall to fill it if needed.
+ *
+ *
+ * Returns 0 if the cache_head can be used, or cache_puts it and returns
+ * -EAGAIN if upcall is pending and request has been queued
+ * -ETIMEDOUT if upcall failed or request could not be queue or
+ * upcall completed but item is still invalid (implying that
+ * the cache item has been replaced with a newer one).
+ * -ENOENT if cache entry was negative
+ */
+int cache_check(struct cache_detail *detail,
+ struct cache_head *h, struct cache_req *rqstp)
+{
+ int rv;
+
+ rv = cache_check_rcu(detail, h, rqstp);
if (rv)
cache_put(h, detail);
return rv;
@@ -731,11 +742,10 @@ static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
static void cache_revisit_request(struct cache_head *item)
{
struct cache_deferred_req *dreq;
- struct list_head pending;
struct hlist_node *tmp;
int hash = DFR_HASH(item);
+ LIST_HEAD(pending);
- INIT_LIST_HEAD(&pending);
spin_lock(&cache_defer_lock);
hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
@@ -756,10 +766,8 @@ static void cache_revisit_request(struct cache_head *item)
void cache_clean_deferred(void *owner)
{
struct cache_deferred_req *dreq, *tmp;
- struct list_head pending;
+ LIST_HEAD(pending);
-
- INIT_LIST_HEAD(&pending);
spin_lock(&cache_defer_lock);
list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
@@ -1085,9 +1093,8 @@ static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
{
struct cache_queue *cq, *tmp;
struct cache_request *cr;
- struct list_head dequeued;
+ LIST_HEAD(dequeued);
- INIT_LIST_HEAD(&dequeued);
spin_lock(&queue_lock);
list_for_each_entry_safe(cq, tmp, &detail->queue, list)
if (!cq->reader) {
@@ -1431,15 +1438,11 @@ static int c_show(struct seq_file *m, void *p)
seq_printf(m, "# expiry=%lld refcnt=%d flags=%lx\n",
convert_to_wallclock(cp->expiry_time),
kref_read(&cp->ref), cp->flags);
- cache_get(cp);
- if (cache_check(cd, cp, NULL))
- /* cache_check does a cache_put on failure */
+
+ if (cache_check_rcu(cd, cp, NULL))
+ seq_puts(m, "# ");
+ else if (cache_is_expired(cd, cp))
seq_puts(m, "# ");
- else {
- if (cache_is_expired(cd, cp))
- seq_puts(m, "# ");
- cache_put(cp, cd);
- }
return cd->cache_show(m, cd, cp);
}
@@ -1596,7 +1599,6 @@ static int cache_release_procfs(struct inode *inode, struct file *filp)
}
static const struct proc_ops cache_channel_proc_ops = {
- .proc_lseek = no_llseek,
.proc_read = cache_read_procfs,
.proc_write = cache_write_procfs,
.proc_poll = cache_poll_procfs,
@@ -1662,7 +1664,6 @@ static const struct proc_ops cache_flush_proc_ops = {
.proc_read = read_flush_procfs,
.proc_write = write_flush_procfs,
.proc_release = release_flush_procfs,
- .proc_lseek = no_llseek,
};
static void remove_cache_proc_entries(struct cache_detail *cd)
@@ -1673,12 +1674,14 @@ static void remove_cache_proc_entries(struct cache_detail *cd)
}
}
-#ifdef CONFIG_PROC_FS
static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
{
struct proc_dir_entry *p;
struct sunrpc_net *sn;
+ if (!IS_ENABLED(CONFIG_PROC_FS))
+ return 0;
+
sn = net_generic(net, sunrpc_net_id);
cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc);
if (cd->procfs == NULL)
@@ -1706,12 +1709,6 @@ out_nomem:
remove_cache_proc_entries(cd);
return -ENOMEM;
}
-#else /* CONFIG_PROC_FS */
-static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
-{
- return 0;
-}
-#endif
void __init cache_initialize(void)
{
@@ -1815,7 +1812,6 @@ static int cache_release_pipefs(struct inode *inode, struct file *filp)
const struct file_operations cache_file_operations_pipefs = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = cache_read_pipefs,
.write = cache_write_pipefs,
.poll = cache_poll_pipefs,
@@ -1881,7 +1877,6 @@ const struct file_operations cache_flush_operations_pipefs = {
.read = read_flush_pipefs,
.write = write_flush_pipefs,
.release = release_flush_pipefs,
- .llseek = no_llseek,
};
int sunrpc_cache_register_pipefs(struct dentry *parent,
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index cda0935a68c9..2fe88ea79a70 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -48,13 +48,8 @@
# define RPCDBG_FACILITY RPCDBG_CALL
#endif
-/*
- * All RPC clients are linked into this list
- */
-
static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
-
static void call_start(struct rpc_task *task);
static void call_reserve(struct rpc_task *task);
static void call_reserveresult(struct rpc_task *task);
@@ -405,7 +400,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
clnt->cl_maxproc = version->nrprocs;
clnt->cl_prog = args->prognumber ? : program->number;
clnt->cl_vers = version->number;
- clnt->cl_stats = program->stats;
+ clnt->cl_stats = args->stats ? : program->stats;
clnt->cl_metrics = rpc_alloc_iostats(clnt);
rpc_init_pipe_dir_head(&clnt->cl_pipedir_objects);
err = -ENOMEM;
@@ -546,7 +541,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
.connect_timeout = args->connect_timeout,
.reconnect_timeout = args->reconnect_timeout,
};
- char servername[48];
+ char servername[RPC_MAXNETNAMELEN];
struct rpc_clnt *clnt;
int i;
@@ -691,6 +686,7 @@ struct rpc_clnt *rpc_clone_client(struct rpc_clnt *clnt)
.version = clnt->cl_vers,
.authflavor = clnt->cl_auth->au_flavor,
.cred = clnt->cl_cred,
+ .stats = clnt->cl_stats,
};
return __rpc_clone_client(&args, clnt);
}
@@ -713,6 +709,7 @@ rpc_clone_client_set_auth(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
.version = clnt->cl_vers,
.authflavor = flavor,
.cred = clnt->cl_cred,
+ .stats = clnt->cl_stats,
};
return __rpc_clone_client(&args, clnt);
}
@@ -961,12 +958,17 @@ void rpc_shutdown_client(struct rpc_clnt *clnt)
trace_rpc_clnt_shutdown(clnt);
+ clnt->cl_shutdown = 1;
while (!list_empty(&clnt->cl_tasks)) {
rpc_killall_tasks(clnt);
wait_event_timeout(destroy_wait,
list_empty(&clnt->cl_tasks), 1*HZ);
}
+ /* wait for tasks still in workqueue or waitqueue */
+ wait_event_timeout(destroy_wait,
+ atomic_read(&clnt->cl_task_count) == 0, 1 * HZ);
+
rpc_release_client(clnt);
}
EXPORT_SYMBOL_GPL(rpc_shutdown_client);
@@ -1068,6 +1070,8 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
.version = vers,
.authflavor = old->cl_auth->au_flavor,
.cred = old->cl_cred,
+ .stats = old->cl_stats,
+ .timeout = old->cl_timeout,
};
struct rpc_clnt *clnt;
int err;
@@ -1140,6 +1144,7 @@ void rpc_task_release_client(struct rpc_task *task)
list_del(&task->tk_task);
spin_unlock(&clnt->cl_lock);
task->tk_client = NULL;
+ atomic_dec(&clnt->cl_task_count);
rpc_release_client(clnt);
}
@@ -1190,10 +1195,7 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
task->tk_flags |= RPC_TASK_TIMEOUT;
if (clnt->cl_noretranstimeo)
task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT;
- /* Add to the client's list of all tasks */
- spin_lock(&clnt->cl_lock);
- list_add_tail(&task->tk_task, &clnt->cl_tasks);
- spin_unlock(&clnt->cl_lock);
+ atomic_inc(&clnt->cl_task_count);
}
static void
@@ -1788,9 +1790,14 @@ call_reserveresult(struct rpc_task *task)
if (status >= 0) {
if (task->tk_rqstp) {
task->tk_action = call_refresh;
+
+ /* Add to the client's list of all tasks */
+ spin_lock(&task->tk_client->cl_lock);
+ if (list_empty(&task->tk_task))
+ list_add_tail(&task->tk_task, &task->tk_client->cl_tasks);
+ spin_unlock(&task->tk_client->cl_lock);
return;
}
-
rpc_call_rpcerror(task, -EIO);
return;
}
@@ -1855,13 +1862,13 @@ call_refreshresult(struct rpc_task *task)
fallthrough;
case -EAGAIN:
status = -EACCES;
- fallthrough;
- case -EKEYEXPIRED:
if (!task->tk_cred_retry)
break;
task->tk_cred_retry--;
trace_rpc_retry_refresh_status(task);
return;
+ case -EKEYEXPIRED:
+ break;
case -ENOMEM:
rpc_delay(task, HZ >> 4);
return;
@@ -1889,12 +1896,6 @@ call_allocate(struct rpc_task *task)
if (req->rq_buffer)
return;
- if (proc->p_proc != 0) {
- BUG_ON(proc->p_arglen == 0);
- if (proc->p_decode != NULL)
- BUG_ON(proc->p_replen == 0);
- }
-
/*
* Calculate the size (in quads) of the RPC call
* and reply headers, and convert both values
@@ -2322,12 +2323,13 @@ call_transmit_status(struct rpc_task *task)
task->tk_action = call_transmit;
task->tk_status = 0;
break;
- case -ECONNREFUSED:
case -EHOSTDOWN:
case -ENETDOWN:
case -EHOSTUNREACH:
case -ENETUNREACH:
case -EPERM:
+ break;
+ case -ECONNREFUSED:
if (RPC_IS_SOFTCONN(task)) {
if (!task->tk_msg.rpc_proc->p_proc)
trace_xprt_ping(task->tk_xprt,
@@ -2695,8 +2697,19 @@ rpc_decode_header(struct rpc_task *task, struct xdr_stream *xdr)
goto out_msg_denied;
error = rpcauth_checkverf(task, xdr);
- if (error)
+ if (error) {
+ struct rpc_cred *cred = task->tk_rqstp->rq_cred;
+
+ if (!test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) {
+ rpcauth_invalcred(task);
+ if (!task->tk_cred_retry)
+ goto out_err;
+ task->tk_cred_retry--;
+ trace_rpc__stale_creds(task);
+ return -EKEYREJECTED;
+ }
goto out_verifier;
+ }
p = xdr_inline_decode(xdr, sizeof(*p));
if (!p)
@@ -3314,8 +3327,11 @@ bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_has_addr);
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
-static void rpc_show_header(void)
+static void rpc_show_header(struct rpc_clnt *clnt)
{
+ printk(KERN_INFO "clnt[%pISpc] RPC tasks[%d]\n",
+ (struct sockaddr *)&clnt->cl_xprt->addr,
+ atomic_read(&clnt->cl_task_count));
printk(KERN_INFO "-pid- flgs status -client- --rqstp- "
"-timeout ---ops--\n");
}
@@ -3347,7 +3363,7 @@ void rpc_show_tasks(struct net *net)
spin_lock(&clnt->cl_lock);
list_for_each_entry(task, &clnt->cl_tasks, tk_task) {
if (!header) {
- rpc_show_header();
+ rpc_show_header(clnt);
header++;
}
rpc_show_task(clnt, task);
diff --git a/net/sunrpc/debugfs.c b/net/sunrpc/debugfs.c
index a176d5a0b0ee..32417db340de 100644
--- a/net/sunrpc/debugfs.c
+++ b/net/sunrpc/debugfs.c
@@ -74,6 +74,9 @@ tasks_stop(struct seq_file *f, void *v)
{
struct rpc_clnt *clnt = f->private;
spin_unlock(&clnt->cl_lock);
+ seq_printf(f, "clnt[%pISpc] RPC tasks[%d]\n",
+ (struct sockaddr *)&clnt->cl_xprt->addr,
+ atomic_read(&clnt->cl_task_count));
}
static const struct seq_operations tasks_seq_operations = {
@@ -179,6 +182,18 @@ xprt_info_show(struct seq_file *f, void *v)
seq_printf(f, "addr: %s\n", xprt->address_strings[RPC_DISPLAY_ADDR]);
seq_printf(f, "port: %s\n", xprt->address_strings[RPC_DISPLAY_PORT]);
seq_printf(f, "state: 0x%lx\n", xprt->state);
+ seq_printf(f, "netns: %u\n", xprt->xprt_net->ns.inum);
+
+ if (xprt->ops->get_srcaddr) {
+ int ret, buflen;
+ char buf[INET6_ADDRSTRLEN];
+
+ buflen = ARRAY_SIZE(buf);
+ ret = xprt->ops->get_srcaddr(xprt, buf, buflen);
+ if (ret < 0)
+ ret = sprintf(buf, "<closed>");
+ seq_printf(f, "saddr: %.*s\n", ret, buf);
+ }
return 0;
}
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index dcc2b4f49e77..eadc00410ebc 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -385,7 +385,6 @@ rpc_pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
static const struct file_operations rpc_pipe_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = rpc_pipe_read,
.write = rpc_pipe_write,
.poll = rpc_pipe_poll,
@@ -631,7 +630,7 @@ static int __rpc_rmpipe(struct inode *dir, struct dentry *dentry)
static struct dentry *__rpc_lookup_create_exclusive(struct dentry *parent,
const char *name)
{
- struct qstr q = QSTR_INIT(name, strlen(name));
+ struct qstr q = QSTR(name);
struct dentry *dentry = d_hash_and_lookup(parent, &q);
if (!dentry) {
dentry = d_alloc(parent, &q);
@@ -1191,8 +1190,7 @@ static const struct rpc_filelist files[] = {
struct dentry *rpc_d_lookup_sb(const struct super_block *sb,
const unsigned char *dir_name)
{
- struct qstr dir = QSTR_INIT(dir_name, strlen(dir_name));
- return d_hash_and_lookup(sb->s_root, &dir);
+ return d_hash_and_lookup(sb->s_root, &QSTR(dir_name));
}
EXPORT_SYMBOL_GPL(rpc_d_lookup_sb);
@@ -1301,11 +1299,9 @@ rpc_gssd_dummy_populate(struct dentry *root, struct rpc_pipe *pipe_data)
struct dentry *gssd_dentry;
struct dentry *clnt_dentry = NULL;
struct dentry *pipe_dentry = NULL;
- struct qstr q = QSTR_INIT(files[RPCAUTH_gssd].name,
- strlen(files[RPCAUTH_gssd].name));
/* We should never get this far if "gssd" doesn't exist */
- gssd_dentry = d_hash_and_lookup(root, &q);
+ gssd_dentry = d_hash_and_lookup(root, &QSTR(files[RPCAUTH_gssd].name));
if (!gssd_dentry)
return ERR_PTR(-ENOENT);
@@ -1315,9 +1311,8 @@ rpc_gssd_dummy_populate(struct dentry *root, struct rpc_pipe *pipe_data)
goto out;
}
- q.name = gssd_dummy_clnt_dir[0].name;
- q.len = strlen(gssd_dummy_clnt_dir[0].name);
- clnt_dentry = d_hash_and_lookup(gssd_dentry, &q);
+ clnt_dentry = d_hash_and_lookup(gssd_dentry,
+ &QSTR(gssd_dummy_clnt_dir[0].name));
if (!clnt_dentry) {
__rpc_depopulate(gssd_dentry, gssd_dummy_clnt_dir, 0, 1);
pipe_dentry = ERR_PTR(-ENOENT);
@@ -1490,7 +1485,7 @@ int register_rpc_pipefs(void)
rpc_inode_cachep = kmem_cache_create("rpc_inode_cache",
sizeof(struct rpc_inode),
0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
- SLAB_MEM_SPREAD|SLAB_ACCOUNT),
+ SLAB_ACCOUNT),
init_once);
if (!rpc_inode_cachep)
return -ENOMEM;
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 6debf4fd42d4..9b45fbdc90ca 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -369,8 +369,10 @@ static void rpc_make_runnable(struct workqueue_struct *wq,
if (RPC_IS_ASYNC(task)) {
INIT_WORK(&task->u.tk_work, rpc_async_schedule);
queue_work(wq, &task->u.tk_work);
- } else
+ } else {
+ smp_mb__after_atomic();
wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
+ }
}
/*
@@ -862,8 +864,6 @@ void rpc_signal_task(struct rpc_task *task)
if (!rpc_task_set_rpc_status(task, -ERESTARTSYS))
return;
trace_rpc_task_signalled(task, task->tk_action);
- set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
- smp_mb__after_atomic();
queue = READ_ONCE(task->tk_waitqueue);
if (queue)
rpc_wake_up_queued_task(queue, task);
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index 65fc1297c6df..383860cb1d5b 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -314,7 +314,7 @@ EXPORT_SYMBOL_GPL(rpc_proc_unregister);
struct proc_dir_entry *
svc_proc_register(struct net *net, struct svc_stat *statp, const struct proc_ops *proc_ops)
{
- return do_register(net, statp->program->pg_name, statp, proc_ops);
+ return do_register(net, statp->program->pg_name, net, proc_ops);
}
EXPORT_SYMBOL_GPL(svc_proc_register);
diff --git a/net/sunrpc/sunrpc.h b/net/sunrpc/sunrpc.h
index d4a362c9e4b3..e3c6e3b63f0b 100644
--- a/net/sunrpc/sunrpc.h
+++ b/net/sunrpc/sunrpc.h
@@ -36,7 +36,11 @@ static inline int sock_is_loopback(struct sock *sk)
return loopback;
}
+struct svc_serv;
+struct svc_rqst;
int rpc_clients_notifier_register(void);
void rpc_clients_notifier_unregister(void);
void auth_domain_cleanup(void);
+void svc_sock_update_bufs(struct svc_serv *serv);
+enum svc_auth_status svc_authenticate(struct svc_rqst *rqstp);
#endif /* _NET_SUNRPC_SUNRPC_H */
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index b969e505c7b7..e7f9c295d13c 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -32,6 +32,7 @@
#include <trace/events/sunrpc.h>
#include "fail.h"
+#include "sunrpc.h"
#define RPCDBG_FACILITY RPCDBG_SVCDSP
@@ -72,57 +73,100 @@ static struct svc_pool_map svc_pool_map = {
static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */
static int
-param_set_pool_mode(const char *val, const struct kernel_param *kp)
+__param_set_pool_mode(const char *val, struct svc_pool_map *m)
{
- int *ip = (int *)kp->arg;
- struct svc_pool_map *m = &svc_pool_map;
- int err;
+ int err, mode;
mutex_lock(&svc_pool_map_mutex);
- err = -EBUSY;
- if (m->count)
- goto out;
-
err = 0;
if (!strncmp(val, "auto", 4))
- *ip = SVC_POOL_AUTO;
+ mode = SVC_POOL_AUTO;
else if (!strncmp(val, "global", 6))
- *ip = SVC_POOL_GLOBAL;
+ mode = SVC_POOL_GLOBAL;
else if (!strncmp(val, "percpu", 6))
- *ip = SVC_POOL_PERCPU;
+ mode = SVC_POOL_PERCPU;
else if (!strncmp(val, "pernode", 7))
- *ip = SVC_POOL_PERNODE;
+ mode = SVC_POOL_PERNODE;
else
err = -EINVAL;
+ if (err)
+ goto out;
+
+ if (m->count == 0)
+ m->mode = mode;
+ else if (mode != m->mode)
+ err = -EBUSY;
out:
mutex_unlock(&svc_pool_map_mutex);
return err;
}
static int
-param_get_pool_mode(char *buf, const struct kernel_param *kp)
+param_set_pool_mode(const char *val, const struct kernel_param *kp)
+{
+ struct svc_pool_map *m = kp->arg;
+
+ return __param_set_pool_mode(val, m);
+}
+
+int sunrpc_set_pool_mode(const char *val)
+{
+ return __param_set_pool_mode(val, &svc_pool_map);
+}
+EXPORT_SYMBOL(sunrpc_set_pool_mode);
+
+/**
+ * sunrpc_get_pool_mode - get the current pool_mode for the host
+ * @buf: where to write the current pool_mode
+ * @size: size of @buf
+ *
+ * Grab the current pool_mode from the svc_pool_map and write
+ * the resulting string to @buf. Returns the number of characters
+ * written to @buf (a'la snprintf()).
+ */
+int
+sunrpc_get_pool_mode(char *buf, size_t size)
{
- int *ip = (int *)kp->arg;
+ struct svc_pool_map *m = &svc_pool_map;
- switch (*ip)
+ switch (m->mode)
{
case SVC_POOL_AUTO:
- return sysfs_emit(buf, "auto\n");
+ return snprintf(buf, size, "auto");
case SVC_POOL_GLOBAL:
- return sysfs_emit(buf, "global\n");
+ return snprintf(buf, size, "global");
case SVC_POOL_PERCPU:
- return sysfs_emit(buf, "percpu\n");
+ return snprintf(buf, size, "percpu");
case SVC_POOL_PERNODE:
- return sysfs_emit(buf, "pernode\n");
+ return snprintf(buf, size, "pernode");
default:
- return sysfs_emit(buf, "%d\n", *ip);
+ return snprintf(buf, size, "%d", m->mode);
}
}
+EXPORT_SYMBOL(sunrpc_get_pool_mode);
+
+static int
+param_get_pool_mode(char *buf, const struct kernel_param *kp)
+{
+ char str[16];
+ int len;
+
+ len = sunrpc_get_pool_mode(str, ARRAY_SIZE(str));
+
+ /* Ensure we have room for newline and NUL */
+ len = min_t(int, len, ARRAY_SIZE(str) - 2);
+
+ /* tack on the newline */
+ str[len] = '\n';
+ str[len + 1] = '\0';
+
+ return sysfs_emit(buf, "%s", str);
+}
module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode,
- &svc_pool_map.mode, 0644);
+ &svc_pool_map, 0644);
/*
* Detect best pool mapping mode heuristically,
@@ -250,10 +294,8 @@ svc_pool_map_get(void)
int npools = -1;
mutex_lock(&svc_pool_map_mutex);
-
if (m->count++) {
mutex_unlock(&svc_pool_map_mutex);
- WARN_ON_ONCE(m->npools <= 1);
return m->npools;
}
@@ -275,32 +317,21 @@ svc_pool_map_get(void)
m->mode = SVC_POOL_GLOBAL;
}
m->npools = npools;
-
- if (npools == 1)
- /* service is unpooled, so doesn't hold a reference */
- m->count--;
-
mutex_unlock(&svc_pool_map_mutex);
return npools;
}
/*
- * Drop a reference to the global map of cpus to pools, if
- * pools were in use, i.e. if npools > 1.
+ * Drop a reference to the global map of cpus to pools.
* When the last reference is dropped, the map data is
- * freed; this allows the sysadmin to change the pool
- * mode using the pool_mode module option without
- * rebooting or re-loading sunrpc.ko.
+ * freed; this allows the sysadmin to change the pool.
*/
static void
-svc_pool_map_put(int npools)
+svc_pool_map_put(void)
{
struct svc_pool_map *m = &svc_pool_map;
- if (npools <= 1)
- return;
mutex_lock(&svc_pool_map_mutex);
-
if (!--m->count) {
kfree(m->to_pool);
m->to_pool = NULL;
@@ -308,7 +339,6 @@ svc_pool_map_put(int npools)
m->pool_to = NULL;
m->npools = 0;
}
-
mutex_unlock(&svc_pool_map_mutex);
}
@@ -388,7 +418,7 @@ struct svc_pool *svc_pool_for_cpu(struct svc_serv *serv)
return &serv->sv_pools[pidx % serv->sv_nrpools];
}
-int svc_rpcb_setup(struct svc_serv *serv, struct net *net)
+static int svc_rpcb_setup(struct svc_serv *serv, struct net *net)
{
int err;
@@ -400,7 +430,6 @@ int svc_rpcb_setup(struct svc_serv *serv, struct net *net)
svc_unregister(serv, net);
return 0;
}
-EXPORT_SYMBOL_GPL(svc_rpcb_setup);
void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net)
{
@@ -411,10 +440,11 @@ EXPORT_SYMBOL_GPL(svc_rpcb_cleanup);
static int svc_uses_rpcbind(struct svc_serv *serv)
{
- struct svc_program *progp;
- unsigned int i;
+ unsigned int p, i;
+
+ for (p = 0; p < serv->sv_nprogs; p++) {
+ struct svc_program *progp = &serv->sv_programs[p];
- for (progp = serv->sv_program; progp; progp = progp->pg_next) {
for (i = 0; i < progp->pg_nvers; i++) {
if (progp->pg_vers[i] == NULL)
continue;
@@ -451,8 +481,8 @@ __svc_init_bc(struct svc_serv *serv)
* Create an RPC service
*/
static struct svc_serv *
-__svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
- int (*threadfn)(void *data))
+__svc_create(struct svc_program *prog, int nprogs, struct svc_stat *stats,
+ unsigned int bufsize, int npools, int (*threadfn)(void *data))
{
struct svc_serv *serv;
unsigned int vers;
@@ -462,25 +492,27 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL)))
return NULL;
serv->sv_name = prog->pg_name;
- serv->sv_program = prog;
- serv->sv_stats = prog->pg_stats;
+ serv->sv_programs = prog;
+ serv->sv_nprogs = nprogs;
+ serv->sv_stats = stats;
if (bufsize > RPCSVC_MAXPAYLOAD)
bufsize = RPCSVC_MAXPAYLOAD;
serv->sv_max_payload = bufsize? bufsize : 4096;
serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);
serv->sv_threadfn = threadfn;
xdrsize = 0;
- while (prog) {
- prog->pg_lovers = prog->pg_nvers-1;
- for (vers=0; vers<prog->pg_nvers ; vers++)
- if (prog->pg_vers[vers]) {
- prog->pg_hivers = vers;
- if (prog->pg_lovers > vers)
- prog->pg_lovers = vers;
- if (prog->pg_vers[vers]->vs_xdrsize > xdrsize)
- xdrsize = prog->pg_vers[vers]->vs_xdrsize;
+ for (i = 0; i < nprogs; i++) {
+ struct svc_program *progp = &prog[i];
+
+ progp->pg_lovers = progp->pg_nvers-1;
+ for (vers = 0; vers < progp->pg_nvers ; vers++)
+ if (progp->pg_vers[vers]) {
+ progp->pg_hivers = vers;
+ if (progp->pg_lovers > vers)
+ progp->pg_lovers = vers;
+ if (progp->pg_vers[vers]->vs_xdrsize > xdrsize)
+ xdrsize = progp->pg_vers[vers]->vs_xdrsize;
}
- prog = prog->pg_next;
}
serv->sv_xdrsize = xdrsize;
INIT_LIST_HEAD(&serv->sv_tempsocks);
@@ -529,31 +561,36 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
struct svc_serv *svc_create(struct svc_program *prog, unsigned int bufsize,
int (*threadfn)(void *data))
{
- return __svc_create(prog, bufsize, 1, threadfn);
+ return __svc_create(prog, 1, NULL, bufsize, 1, threadfn);
}
EXPORT_SYMBOL_GPL(svc_create);
/**
* svc_create_pooled - Create an RPC service with pooled threads
- * @prog: the RPC program the new service will handle
+ * @prog: Array of RPC programs the new service will handle
+ * @nprogs: Number of programs in the array
+ * @stats: the stats struct if desired
* @bufsize: maximum message size for @prog
* @threadfn: a function to service RPC requests for @prog
*
* Returns an instantiated struct svc_serv object or NULL.
*/
struct svc_serv *svc_create_pooled(struct svc_program *prog,
+ unsigned int nprogs,
+ struct svc_stat *stats,
unsigned int bufsize,
int (*threadfn)(void *data))
{
struct svc_serv *serv;
unsigned int npools = svc_pool_map_get();
- serv = __svc_create(prog, bufsize, npools, threadfn);
+ serv = __svc_create(prog, nprogs, stats, bufsize, npools, threadfn);
if (!serv)
goto out_err;
+ serv->sv_is_pooled = true;
return serv;
out_err:
- svc_pool_map_put(npools);
+ svc_pool_map_put();
return NULL;
}
EXPORT_SYMBOL_GPL(svc_create_pooled);
@@ -570,20 +607,21 @@ svc_destroy(struct svc_serv **servp)
*servp = NULL;
- dprintk("svc: svc_destroy(%s)\n", serv->sv_program->pg_name);
+ dprintk("svc: svc_destroy(%s)\n", serv->sv_programs->pg_name);
timer_shutdown_sync(&serv->sv_temptimer);
/*
* Remaining transports at this point are not expected.
*/
WARN_ONCE(!list_empty(&serv->sv_permsocks),
- "SVC: permsocks remain for %s\n", serv->sv_program->pg_name);
+ "SVC: permsocks remain for %s\n", serv->sv_programs->pg_name);
WARN_ONCE(!list_empty(&serv->sv_tempsocks),
- "SVC: tempsocks remain for %s\n", serv->sv_program->pg_name);
+ "SVC: tempsocks remain for %s\n", serv->sv_programs->pg_name);
cache_clean_deferred(serv);
- svc_pool_map_put(serv->sv_nrpools);
+ if (serv->sv_is_pooled)
+ svc_pool_map_put();
for (i = 0; i < serv->sv_nrpools; i++) {
struct svc_pool *pool = &serv->sv_pools[i];
@@ -613,8 +651,8 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node)
if (pages > RPCSVC_MAXPAGES)
pages = RPCSVC_MAXPAGES;
- ret = alloc_pages_bulk_array_node(GFP_KERNEL, node, pages,
- rqstp->rq_pages);
+ ret = alloc_pages_bulk_node(GFP_KERNEL, node, pages,
+ rqstp->rq_pages);
return ret == pages;
}
@@ -631,8 +669,21 @@ svc_release_buffer(struct svc_rqst *rqstp)
put_page(rqstp->rq_pages[i]);
}
-struct svc_rqst *
-svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node)
+static void
+svc_rqst_free(struct svc_rqst *rqstp)
+{
+ folio_batch_release(&rqstp->rq_fbatch);
+ svc_release_buffer(rqstp);
+ if (rqstp->rq_scratch_page)
+ put_page(rqstp->rq_scratch_page);
+ kfree(rqstp->rq_resp);
+ kfree(rqstp->rq_argp);
+ kfree(rqstp->rq_auth_data);
+ kfree_rcu(rqstp, rq_rcu_head);
+}
+
+static struct svc_rqst *
+svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
{
struct svc_rqst *rqstp;
@@ -660,27 +711,10 @@ svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node)
if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node))
goto out_enomem;
- return rqstp;
-out_enomem:
- svc_rqst_free(rqstp);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(svc_rqst_alloc);
-
-static struct svc_rqst *
-svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
-{
- struct svc_rqst *rqstp;
+ rqstp->rq_err = -EAGAIN; /* No error yet */
- rqstp = svc_rqst_alloc(serv, pool, node);
- if (!rqstp)
- return ERR_PTR(-ENOMEM);
-
- spin_lock_bh(&serv->sv_lock);
serv->sv_nrthreads += 1;
- spin_unlock_bh(&serv->sv_lock);
-
- atomic_inc(&pool->sp_nrthreads);
+ pool->sp_nrthreads += 1;
/* Protected by whatever lock the service uses when calling
* svc_set_num_threads()
@@ -688,6 +722,10 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads);
return rqstp;
+
+out_enomem:
+ svc_rqst_free(rqstp);
+ return NULL;
}
/**
@@ -735,31 +773,22 @@ svc_pool_victim(struct svc_serv *serv, struct svc_pool *target_pool,
struct svc_pool *pool;
unsigned int i;
-retry:
pool = target_pool;
- if (pool != NULL) {
- if (atomic_inc_not_zero(&pool->sp_nrthreads))
- goto found_pool;
- return NULL;
- } else {
+ if (!pool) {
for (i = 0; i < serv->sv_nrpools; i++) {
pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
- if (atomic_inc_not_zero(&pool->sp_nrthreads))
- goto found_pool;
+ if (pool->sp_nrthreads)
+ break;
}
- return NULL;
}
-found_pool:
- set_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
- set_bit(SP_NEED_VICTIM, &pool->sp_flags);
- if (!atomic_dec_and_test(&pool->sp_nrthreads))
+ if (pool && pool->sp_nrthreads) {
+ set_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
+ set_bit(SP_NEED_VICTIM, &pool->sp_flags);
return pool;
- /* Nothing left in this pool any more */
- clear_bit(SP_NEED_VICTIM, &pool->sp_flags);
- clear_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
- goto retry;
+ }
+ return NULL;
}
static int
@@ -770,6 +799,7 @@ svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
struct svc_pool *chosen_pool;
unsigned int state = serv->sv_nrthreads-1;
int node;
+ int err;
do {
nrservs--;
@@ -777,8 +807,8 @@ svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
node = svc_pool_map_get_node(chosen_pool->sp_id);
rqstp = svc_prepare_thread(serv, chosen_pool, node);
- if (IS_ERR(rqstp))
- return PTR_ERR(rqstp);
+ if (!rqstp)
+ return -ENOMEM;
task = kthread_create_on_node(serv->sv_threadfn, rqstp,
node, "%s", serv->sv_name);
if (IS_ERR(task)) {
@@ -792,6 +822,13 @@ svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
svc_sock_update_bufs(serv);
wake_up_process(task);
+
+ wait_var_event(&rqstp->rq_err, rqstp->rq_err != -EAGAIN);
+ err = rqstp->rq_err;
+ if (err) {
+ svc_exit_thread(rqstp);
+ return err;
+ }
} while (nrservs > 0);
return 0;
@@ -838,7 +875,7 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
if (!pool)
nrservs -= serv->sv_nrthreads;
else
- nrservs -= atomic_read(&pool->sp_nrthreads);
+ nrservs -= pool->sp_nrthreads;
if (nrservs > 0)
return svc_start_kthreads(serv, pool, nrservs);
@@ -900,25 +937,21 @@ void svc_rqst_release_pages(struct svc_rqst *rqstp)
}
}
-/*
- * Called from a server thread as it's exiting. Caller must hold the "service
- * mutex" for the service.
+/**
+ * svc_exit_thread - finalise the termination of a sunrpc server thread
+ * @rqstp: the svc_rqst which represents the thread.
+ *
+ * When a thread started with svc_new_thread() exits it must call
+ * svc_exit_thread() as its last act. This must be done with the
+ * service mutex held. Normally this is held by a DIFFERENT thread, the
+ * one that is calling svc_set_num_threads() and which will wait for
+ * SP_VICTIM_REMAINS to be cleared before dropping the mutex. If the
+ * thread exits for any reason other than svc_thread_should_stop()
+ * returning %true (which indicated that svc_set_num_threads() is
+ * waiting for it to exit), then it must take the service mutex itself,
+ * which can only safely be done using mutex_try_lock().
*/
void
-svc_rqst_free(struct svc_rqst *rqstp)
-{
- folio_batch_release(&rqstp->rq_fbatch);
- svc_release_buffer(rqstp);
- if (rqstp->rq_scratch_page)
- put_page(rqstp->rq_scratch_page);
- kfree(rqstp->rq_resp);
- kfree(rqstp->rq_argp);
- kfree(rqstp->rq_auth_data);
- kfree_rcu(rqstp, rq_rcu_head);
-}
-EXPORT_SYMBOL_GPL(svc_rqst_free);
-
-void
svc_exit_thread(struct svc_rqst *rqstp)
{
struct svc_serv *serv = rqstp->rq_server;
@@ -926,11 +959,8 @@ svc_exit_thread(struct svc_rqst *rqstp)
list_del_rcu(&rqstp->rq_all);
- atomic_dec(&pool->sp_nrthreads);
-
- spin_lock_bh(&serv->sv_lock);
+ pool->sp_nrthreads -= 1;
serv->sv_nrthreads -= 1;
- spin_unlock_bh(&serv->sv_lock);
svc_sock_update_bufs(serv);
svc_rqst_free(rqstp);
@@ -1065,6 +1095,7 @@ static int __svc_register(struct net *net, const char *progname,
return error;
}
+static
int svc_rpcbind_set_version(struct net *net,
const struct svc_program *progp,
u32 version, int family,
@@ -1075,7 +1106,6 @@ int svc_rpcbind_set_version(struct net *net,
version, family, proto, port);
}
-EXPORT_SYMBOL_GPL(svc_rpcbind_set_version);
int svc_generic_rpcbind_set(struct net *net,
const struct svc_program *progp,
@@ -1123,15 +1153,16 @@ int svc_register(const struct svc_serv *serv, struct net *net,
const int family, const unsigned short proto,
const unsigned short port)
{
- struct svc_program *progp;
- unsigned int i;
+ unsigned int p, i;
int error = 0;
WARN_ON_ONCE(proto == 0 && port == 0);
if (proto == 0 && port == 0)
return -EINVAL;
- for (progp = serv->sv_program; progp; progp = progp->pg_next) {
+ for (p = 0; p < serv->sv_nprogs; p++) {
+ struct svc_program *progp = &serv->sv_programs[p];
+
for (i = 0; i < progp->pg_nvers; i++) {
error = progp->pg_rpcbind_set(net, progp, i,
@@ -1183,13 +1214,14 @@ static void __svc_unregister(struct net *net, const u32 program, const u32 versi
static void svc_unregister(const struct svc_serv *serv, struct net *net)
{
struct sighand_struct *sighand;
- struct svc_program *progp;
unsigned long flags;
- unsigned int i;
+ unsigned int p, i;
clear_thread_flag(TIF_SIGPENDING);
- for (progp = serv->sv_program; progp; progp = progp->pg_next) {
+ for (p = 0; p < serv->sv_nprogs; p++) {
+ struct svc_program *progp = &serv->sv_programs[p];
+
for (i = 0; i < progp->pg_nvers; i++) {
if (progp->pg_vers[i] == NULL)
continue;
@@ -1263,8 +1295,6 @@ svc_generic_init_request(struct svc_rqst *rqstp,
if (rqstp->rq_proc >= versp->vs_nproc)
goto err_bad_proc;
rqstp->rq_procinfo = procp = &versp->vs_proc[rqstp->rq_proc];
- if (!procp)
- goto err_bad_proc;
/* Initialize storage for argp and resp */
memset(rqstp->rq_argp, 0, procp->pc_argzero);
@@ -1291,13 +1321,13 @@ static int
svc_process_common(struct svc_rqst *rqstp)
{
struct xdr_stream *xdr = &rqstp->rq_res_stream;
- struct svc_program *progp;
+ struct svc_program *progp = NULL;
const struct svc_procedure *procp = NULL;
struct svc_serv *serv = rqstp->rq_server;
struct svc_process_info process;
enum svc_auth_status auth_res;
unsigned int aoffset;
- int rc;
+ int pr, rc;
__be32 *p;
/* Will be turned off only when NFSv4 Sessions are used */
@@ -1321,9 +1351,9 @@ svc_process_common(struct svc_rqst *rqstp)
rqstp->rq_vers = be32_to_cpup(p++);
rqstp->rq_proc = be32_to_cpup(p);
- for (progp = serv->sv_program; progp; progp = progp->pg_next)
- if (rqstp->rq_prog == progp->pg_prog)
- break;
+ for (pr = 0; pr < serv->sv_nprogs; pr++)
+ if (rqstp->rq_prog == serv->sv_programs[pr].pg_prog)
+ progp = &serv->sv_programs[pr];
/*
* Decode auth data, and add verifier to reply buffer.
@@ -1375,7 +1405,8 @@ svc_process_common(struct svc_rqst *rqstp)
goto err_bad_proc;
/* Syntactic check complete */
- serv->sv_stats->rpccnt++;
+ if (serv->sv_stats)
+ serv->sv_stats->rpccnt++;
trace_svc_process(rqstp, progp->pg_name);
aoffset = xdr_stream_pos(xdr);
@@ -1427,7 +1458,8 @@ err_short_len:
goto close_xprt;
err_bad_rpc:
- serv->sv_stats->rpcbadfmt++;
+ if (serv->sv_stats)
+ serv->sv_stats->rpcbadfmt++;
xdr_stream_encode_u32(xdr, RPC_MSG_DENIED);
xdr_stream_encode_u32(xdr, RPC_MISMATCH);
/* Only RPCv2 supported */
@@ -1438,7 +1470,8 @@ err_bad_rpc:
err_bad_auth:
dprintk("svc: authentication failed (%d)\n",
be32_to_cpu(rqstp->rq_auth_stat));
- serv->sv_stats->rpcbadauth++;
+ if (serv->sv_stats)
+ serv->sv_stats->rpcbadauth++;
/* Restore write pointer to location of reply status: */
xdr_truncate_encode(xdr, XDR_UNIT * 2);
xdr_stream_encode_u32(xdr, RPC_MSG_DENIED);
@@ -1448,7 +1481,8 @@ err_bad_auth:
err_bad_prog:
dprintk("svc: unknown program %d\n", rqstp->rq_prog);
- serv->sv_stats->rpcbadfmt++;
+ if (serv->sv_stats)
+ serv->sv_stats->rpcbadfmt++;
*rqstp->rq_accept_statp = rpc_prog_unavail;
goto sendit;
@@ -1456,7 +1490,8 @@ err_bad_vers:
svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n",
rqstp->rq_vers, rqstp->rq_prog, progp->pg_name);
- serv->sv_stats->rpcbadfmt++;
+ if (serv->sv_stats)
+ serv->sv_stats->rpcbadfmt++;
*rqstp->rq_accept_statp = rpc_prog_mismatch;
/*
@@ -1470,23 +1505,34 @@ err_bad_vers:
err_bad_proc:
svc_printk(rqstp, "unknown procedure (%d)\n", rqstp->rq_proc);
- serv->sv_stats->rpcbadfmt++;
+ if (serv->sv_stats)
+ serv->sv_stats->rpcbadfmt++;
*rqstp->rq_accept_statp = rpc_proc_unavail;
goto sendit;
err_garbage_args:
svc_printk(rqstp, "failed to decode RPC header\n");
- serv->sv_stats->rpcbadfmt++;
+ if (serv->sv_stats)
+ serv->sv_stats->rpcbadfmt++;
*rqstp->rq_accept_statp = rpc_garbage_args;
goto sendit;
err_system_err:
- serv->sv_stats->rpcbadfmt++;
+ if (serv->sv_stats)
+ serv->sv_stats->rpcbadfmt++;
*rqstp->rq_accept_statp = rpc_system_err;
goto sendit;
}
+/*
+ * Drop request
+ */
+static void svc_drop(struct svc_rqst *rqstp)
+{
+ trace_svc_drop(rqstp);
+}
+
/**
* svc_process - Execute one RPC transaction
* @rqstp: RPC transaction context
@@ -1534,7 +1580,8 @@ void svc_process(struct svc_rqst *rqstp)
out_baddir:
svc_printk(rqstp, "bad direction 0x%08x, dropping request\n",
be32_to_cpu(*p));
- rqstp->rq_server->sv_stats->rpcbadfmt++;
+ if (rqstp->rq_server->sv_stats)
+ rqstp->rq_server->sv_stats->rpcbadfmt++;
out_drop:
svc_drop(rqstp);
}
@@ -1548,9 +1595,11 @@ out_drop:
*/
void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp)
{
+ struct rpc_timeout timeout = {
+ .to_increment = 0,
+ };
struct rpc_task *task;
int proc_error;
- struct rpc_timeout timeout;
/* Build the svc_rqst used by the common processing routine */
rqstp->rq_xid = req->rq_xid;
@@ -1603,6 +1652,7 @@ void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp)
timeout.to_initval = req->rq_xprt->timeout->to_initval;
timeout.to_retries = req->rq_xprt->timeout->to_retries;
}
+ timeout.to_maxval = timeout.to_initval;
memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
task = rpc_run_bc_task(req, &timeout);
@@ -1612,7 +1662,6 @@ void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp)
WARN_ON_ONCE(atomic_read(&task->tk_count) != 1);
rpc_put_task(task);
}
-EXPORT_SYMBOL_GPL(svc_process_bc);
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
/**
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index b4a85a227bd7..ae25405d8bd2 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -46,7 +46,6 @@ static LIST_HEAD(svc_xprt_class_list);
/* SMP locking strategy:
*
- * svc_pool->sp_lock protects most of the fields of that pool.
* svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
* when both need to be taken (rare), svc_serv->sv_lock is first.
* The "service mutex" protects svc_serv->sv_nrthread.
@@ -158,6 +157,7 @@ int svc_print_xprts(char *buf, int maxlen)
*/
void svc_xprt_deferred_close(struct svc_xprt *xprt)
{
+ trace_svc_xprt_close(xprt);
if (!test_and_set_bit(XPT_CLOSE, &xprt->xpt_flags))
svc_xprt_enqueue(xprt);
}
@@ -211,51 +211,6 @@ void svc_xprt_init(struct net *net, struct svc_xprt_class *xcl,
}
EXPORT_SYMBOL_GPL(svc_xprt_init);
-static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
- struct svc_serv *serv,
- struct net *net,
- const int family,
- const unsigned short port,
- int flags)
-{
- struct sockaddr_in sin = {
- .sin_family = AF_INET,
- .sin_addr.s_addr = htonl(INADDR_ANY),
- .sin_port = htons(port),
- };
-#if IS_ENABLED(CONFIG_IPV6)
- struct sockaddr_in6 sin6 = {
- .sin6_family = AF_INET6,
- .sin6_addr = IN6ADDR_ANY_INIT,
- .sin6_port = htons(port),
- };
-#endif
- struct svc_xprt *xprt;
- struct sockaddr *sap;
- size_t len;
-
- switch (family) {
- case PF_INET:
- sap = (struct sockaddr *)&sin;
- len = sizeof(sin);
- break;
-#if IS_ENABLED(CONFIG_IPV6)
- case PF_INET6:
- sap = (struct sockaddr *)&sin6;
- len = sizeof(sin6);
- break;
-#endif
- default:
- return ERR_PTR(-EAFNOSUPPORT);
- }
-
- xprt = xcl->xcl_ops->xpo_create(serv, net, sap, len, flags);
- if (IS_ERR(xprt))
- trace_svc_xprt_create_err(serv->sv_program->pg_name,
- xcl->xcl_name, sap, len, xprt);
- return xprt;
-}
-
/**
* svc_xprt_received - start next receiver thread
* @xprt: controlling transport
@@ -294,9 +249,8 @@ void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *new)
}
static int _svc_xprt_create(struct svc_serv *serv, const char *xprt_name,
- struct net *net, const int family,
- const unsigned short port, int flags,
- const struct cred *cred)
+ struct net *net, struct sockaddr *sap,
+ size_t len, int flags, const struct cred *cred)
{
struct svc_xprt_class *xcl;
@@ -312,8 +266,11 @@ static int _svc_xprt_create(struct svc_serv *serv, const char *xprt_name,
goto err;
spin_unlock(&svc_xprt_class_lock);
- newxprt = __svc_xpo_create(xcl, serv, net, family, port, flags);
+ newxprt = xcl->xcl_ops->xpo_create(serv, net, sap, len, flags);
if (IS_ERR(newxprt)) {
+ trace_svc_xprt_create_err(serv->sv_programs->pg_name,
+ xcl->xcl_name, sap, len,
+ newxprt);
module_put(xcl->xcl_owner);
return PTR_ERR(newxprt);
}
@@ -330,6 +287,48 @@ static int _svc_xprt_create(struct svc_serv *serv, const char *xprt_name,
}
/**
+ * svc_xprt_create_from_sa - Add a new listener to @serv from socket address
+ * @serv: target RPC service
+ * @xprt_name: transport class name
+ * @net: network namespace
+ * @sap: socket address pointer
+ * @flags: SVC_SOCK flags
+ * @cred: credential to bind to this transport
+ *
+ * Return local xprt port on success or %-EPROTONOSUPPORT on failure
+ */
+int svc_xprt_create_from_sa(struct svc_serv *serv, const char *xprt_name,
+ struct net *net, struct sockaddr *sap,
+ int flags, const struct cred *cred)
+{
+ size_t len;
+ int err;
+
+ switch (sap->sa_family) {
+ case AF_INET:
+ len = sizeof(struct sockaddr_in);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ len = sizeof(struct sockaddr_in6);
+ break;
+#endif
+ default:
+ return -EAFNOSUPPORT;
+ }
+
+ err = _svc_xprt_create(serv, xprt_name, net, sap, len, flags, cred);
+ if (err == -EPROTONOSUPPORT) {
+ request_module("svc%s", xprt_name);
+ err = _svc_xprt_create(serv, xprt_name, net, sap, len, flags,
+ cred);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(svc_xprt_create_from_sa);
+
+/**
* svc_xprt_create - Add a new listener to @serv
* @serv: target RPC service
* @xprt_name: transport class name
@@ -339,23 +338,41 @@ static int _svc_xprt_create(struct svc_serv *serv, const char *xprt_name,
* @flags: SVC_SOCK flags
* @cred: credential to bind to this transport
*
- * Return values:
- * %0: New listener added successfully
- * %-EPROTONOSUPPORT: Requested transport type not supported
+ * Return local xprt port on success or %-EPROTONOSUPPORT on failure
*/
int svc_xprt_create(struct svc_serv *serv, const char *xprt_name,
struct net *net, const int family,
const unsigned short port, int flags,
const struct cred *cred)
{
- int err;
+ struct sockaddr_in sin = {
+ .sin_family = AF_INET,
+ .sin_addr.s_addr = htonl(INADDR_ANY),
+ .sin_port = htons(port),
+ };
+#if IS_ENABLED(CONFIG_IPV6)
+ struct sockaddr_in6 sin6 = {
+ .sin6_family = AF_INET6,
+ .sin6_addr = IN6ADDR_ANY_INIT,
+ .sin6_port = htons(port),
+ };
+#endif
+ struct sockaddr *sap;
- err = _svc_xprt_create(serv, xprt_name, net, family, port, flags, cred);
- if (err == -EPROTONOSUPPORT) {
- request_module("svc%s", xprt_name);
- err = _svc_xprt_create(serv, xprt_name, net, family, port, flags, cred);
+ switch (family) {
+ case PF_INET:
+ sap = (struct sockaddr *)&sin;
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case PF_INET6:
+ sap = (struct sockaddr *)&sin6;
+ break;
+#endif
+ default:
+ return -EAFNOSUPPORT;
}
- return err;
+
+ return svc_xprt_create_from_sa(serv, xprt_name, net, sap, flags, cred);
}
EXPORT_SYMBOL_GPL(svc_xprt_create);
@@ -589,7 +606,8 @@ int svc_port_is_privileged(struct sockaddr *sin)
}
/*
- * Make sure that we don't have too many active connections. If we have,
+ * Make sure that we don't have too many connections that have not yet
+ * demonstrated that they have access to the NFS server. If we have,
* something must be dropped. It's not clear what will happen if we allow
* "too many" connections, but when dealing with network-facing software,
* we have to code defensively. Here we do that by imposing hard limits.
@@ -601,34 +619,26 @@ int svc_port_is_privileged(struct sockaddr *sin)
* The only somewhat efficient mechanism would be if drop old
* connections from the same IP first. But right now we don't even
* record the client IP in svc_sock.
- *
- * single-threaded services that expect a lot of clients will probably
- * need to set sv_maxconn to override the default value which is based
- * on the number of threads
*/
static void svc_check_conn_limits(struct svc_serv *serv)
{
- unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn :
- (serv->sv_nrthreads+3) * 20;
-
- if (serv->sv_tmpcnt > limit) {
- struct svc_xprt *xprt = NULL;
+ if (serv->sv_tmpcnt > XPT_MAX_TMP_CONN) {
+ struct svc_xprt *xprt = NULL, *xprti;
spin_lock_bh(&serv->sv_lock);
if (!list_empty(&serv->sv_tempsocks)) {
- /* Try to help the admin */
- net_notice_ratelimited("%s: too many open connections, consider increasing the %s\n",
- serv->sv_name, serv->sv_maxconn ?
- "max number of connections" :
- "number of threads");
/*
* Always select the oldest connection. It's not fair,
- * but so is life
+ * but nor is life.
*/
- xprt = list_entry(serv->sv_tempsocks.prev,
- struct svc_xprt,
- xpt_list);
- set_bit(XPT_CLOSE, &xprt->xpt_flags);
- svc_xprt_get(xprt);
+ list_for_each_entry_reverse(xprti, &serv->sv_tempsocks,
+ xpt_list) {
+ if (!test_bit(XPT_PEER_VALID, &xprti->xpt_flags)) {
+ xprt = xprti;
+ set_bit(XPT_CLOSE, &xprt->xpt_flags);
+ svc_xprt_get(xprt);
+ break;
+ }
+ }
}
spin_unlock_bh(&serv->sv_lock);
@@ -654,8 +664,7 @@ static bool svc_alloc_arg(struct svc_rqst *rqstp)
}
for (filled = 0; filled < pages; filled = ret) {
- ret = alloc_pages_bulk_array(GFP_KERNEL, pages,
- rqstp->rq_pages);
+ ret = alloc_pages_bulk(GFP_KERNEL, pages, rqstp->rq_pages);
if (ret > filled)
/* Made progress, don't sleep yet */
continue;
@@ -888,15 +897,6 @@ void svc_recv(struct svc_rqst *rqstp)
}
EXPORT_SYMBOL_GPL(svc_recv);
-/*
- * Drop request
- */
-void svc_drop(struct svc_rqst *rqstp)
-{
- trace_svc_drop(rqstp);
-}
-EXPORT_SYMBOL_GPL(svc_drop);
-
/**
* svc_send - Return reply to client
* @rqstp: RPC transaction context
@@ -1031,7 +1031,8 @@ static void svc_delete_xprt(struct svc_xprt *xprt)
spin_lock_bh(&serv->sv_lock);
list_del_init(&xprt->xpt_list);
- if (test_bit(XPT_TEMP, &xprt->xpt_flags))
+ if (test_bit(XPT_TEMP, &xprt->xpt_flags) &&
+ !test_bit(XPT_PEER_VALID, &xprt->xpt_flags))
serv->sv_tmpcnt--;
spin_unlock_bh(&serv->sv_lock);
@@ -1260,6 +1261,40 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt)
}
/**
+ * svc_find_listener - find an RPC transport instance
+ * @serv: pointer to svc_serv to search
+ * @xcl_name: C string containing transport's class name
+ * @net: owner net pointer
+ * @sa: sockaddr containing address
+ *
+ * Return the transport instance pointer for the endpoint accepting
+ * connections/peer traffic from the specified transport class,
+ * and matching sockaddr.
+ */
+struct svc_xprt *svc_find_listener(struct svc_serv *serv, const char *xcl_name,
+ struct net *net, const struct sockaddr *sa)
+{
+ struct svc_xprt *xprt;
+ struct svc_xprt *found = NULL;
+
+ spin_lock_bh(&serv->sv_lock);
+ list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
+ if (xprt->xpt_net != net)
+ continue;
+ if (strcmp(xprt->xpt_class->xcl_name, xcl_name))
+ continue;
+ if (!rpc_cmp_addr_port(sa, (struct sockaddr *)&xprt->xpt_local))
+ continue;
+ found = xprt;
+ svc_xprt_get(xprt);
+ break;
+ }
+ spin_unlock_bh(&serv->sv_lock);
+ return found;
+}
+EXPORT_SYMBOL_GPL(svc_find_listener);
+
+/**
* svc_find_xprt - find an RPC transport instance
* @serv: pointer to svc_serv to search
* @xcl_name: C string containing transport's class name
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index 1619211f0960..55b4d2874188 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -18,6 +18,7 @@
#include <linux/sunrpc/svcauth.h>
#include <linux/err.h>
#include <linux/hash.h>
+#include <linux/user_namespace.h>
#include <trace/events/sunrpc.h>
@@ -98,7 +99,6 @@ enum svc_auth_status svc_authenticate(struct svc_rqst *rqstp)
rqstp->rq_authop = aops;
return aops->accept(rqstp);
}
-EXPORT_SYMBOL_GPL(svc_authenticate);
/**
* svc_set_client - Assign an appropriate 'auth_domain' as the client
@@ -176,6 +176,33 @@ rpc_authflavor_t svc_auth_flavor(struct svc_rqst *rqstp)
}
EXPORT_SYMBOL_GPL(svc_auth_flavor);
+/**
+ * svcauth_map_clnt_to_svc_cred_local - maps a generic cred
+ * to a svc_cred suitable for use in nfsd.
+ * @clnt: rpc_clnt associated with nfs client
+ * @cred: generic cred associated with nfs client
+ * @svc: returned svc_cred that is suitable for use in nfsd
+ */
+void svcauth_map_clnt_to_svc_cred_local(struct rpc_clnt *clnt,
+ const struct cred *cred,
+ struct svc_cred *svc)
+{
+ struct user_namespace *userns = clnt->cl_cred ?
+ clnt->cl_cred->user_ns : &init_user_ns;
+
+ memset(svc, 0, sizeof(struct svc_cred));
+
+ svc->cr_uid = KUIDT_INIT(from_kuid_munged(userns, cred->fsuid));
+ svc->cr_gid = KGIDT_INIT(from_kgid_munged(userns, cred->fsgid));
+ svc->cr_flavor = clnt->cl_auth->au_flavor;
+ if (cred->group_info)
+ svc->cr_group_info = get_group_info(cred->group_info);
+ /* These aren't relevant for local (network is bypassed) */
+ svc->cr_principal = NULL;
+ svc->cr_gss_mech = NULL;
+}
+EXPORT_SYMBOL_GPL(svcauth_map_clnt_to_svc_cred_local);
+
/**************************************************
* 'auth_domains' are stored in a hash table indexed by name.
* When the last reference to an 'auth_domain' is dropped,
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 04b45588ae6f..8ca98b146ec8 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -697,7 +697,8 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
rqstp->rq_auth_stat = rpc_autherr_badcred;
ipm = ip_map_cached_get(xprt);
if (ipm == NULL)
- ipm = __ip_map_lookup(sn->ip_map_cache, rqstp->rq_server->sv_program->pg_class,
+ ipm = __ip_map_lookup(sn->ip_map_cache,
+ rqstp->rq_server->sv_programs->pg_class,
&sin6->sin6_addr);
if (ipm == NULL)
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 545017a3daa4..72e5a01df3d3 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1083,9 +1083,6 @@ static void svc_tcp_fragment_received(struct svc_sock *svsk)
/* If we have more data, signal svc_xprt_enqueue() to try again */
svsk->sk_tcplen = 0;
svsk->sk_marker = xdr_zero;
-
- smp_wmb();
- tcp_set_rcvlowat(svsk->sk_sk, 1);
}
/**
@@ -1175,17 +1172,10 @@ err_incomplete:
goto err_delete;
if (len == want)
svc_tcp_fragment_received(svsk);
- else {
- /* Avoid more ->sk_data_ready() calls until the rest
- * of the message has arrived. This reduces service
- * thread wake-ups on large incoming messages. */
- tcp_set_rcvlowat(svsk->sk_sk,
- svc_sock_reclen(svsk) - svsk->sk_tcplen);
-
+ else
trace_svcsock_tcp_recv_short(&svsk->sk_xprt,
svc_sock_reclen(svsk),
svsk->sk_tcplen - sizeof(rpc_fraghdr));
- }
goto err_noclose;
error:
if (len != -EAGAIN)
@@ -1206,15 +1196,6 @@ err_noclose:
* MSG_SPLICE_PAGES is used exclusively to reduce the number of
* copy operations in this path. Therefore the caller must ensure
* that the pages backing @xdr are unchanging.
- *
- * Note that the send is non-blocking. The caller has incremented
- * the reference count on each page backing the RPC message, and
- * the network layer will "put" these pages when transmission is
- * complete.
- *
- * This is safe for our RPC services because the memory backing
- * the head and tail components is never kmalloc'd. These always
- * come from pages in the svc_rqst::rq_pages array.
*/
static int svc_tcp_sendmsg(struct svc_sock *svsk, struct svc_rqst *rqstp,
rpc_fraghdr marker, unsigned int *sentp)
@@ -1244,6 +1225,7 @@ static int svc_tcp_sendmsg(struct svc_sock *svsk, struct svc_rqst *rqstp,
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec,
1 + count, sizeof(marker) + rqstp->rq_res.len);
ret = sock_sendmsg(svsk->sk_sock, &msg);
+ page_frag_free(buf);
if (ret < 0)
return ret;
*sentp += ret;
@@ -1386,7 +1368,6 @@ void svc_sock_update_bufs(struct svc_serv *serv)
set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
spin_unlock_bh(&serv->sv_lock);
}
-EXPORT_SYMBOL_GPL(svc_sock_update_bufs);
/*
* Initialize socket for RPC use and create svc_sock struct
@@ -1560,6 +1541,7 @@ static struct svc_xprt *svc_create_socket(struct svc_serv *serv,
newlen = error;
if (protocol == IPPROTO_TCP) {
+ sk_net_refcnt_upgrade(sock->sk);
if ((error = kernel_listen(sock, 64)) < 0)
goto bummer;
}
@@ -1617,7 +1599,6 @@ static void svc_tcp_sock_detach(struct svc_xprt *xprt)
static void svc_sock_free(struct svc_xprt *xprt)
{
struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
- struct page_frag_cache *pfc = &svsk->sk_frag_cache;
struct socket *sock = svsk->sk_sock;
trace_svcsock_free(svsk, sock);
@@ -1627,8 +1608,7 @@ static void svc_sock_free(struct svc_xprt *xprt)
sockfd_put(sock);
else
sock_release(sock);
- if (pfc->va)
- __page_frag_cache_drain(virt_to_head_page(pfc->va),
- pfc->pagecnt_bias);
+
+ page_frag_cache_drain(&svsk->sk_frag_cache);
kfree(svsk);
}
diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c
index 93941ab12549..bdb587a72422 100644
--- a/net/sunrpc/sysctl.c
+++ b/net/sunrpc/sysctl.c
@@ -40,7 +40,7 @@ EXPORT_SYMBOL_GPL(nlm_debug);
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
-static int proc_do_xprt(struct ctl_table *table, int write,
+static int proc_do_xprt(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
char tmpbuf[256];
@@ -62,7 +62,7 @@ static int proc_do_xprt(struct ctl_table *table, int write,
}
static int
-proc_dodebug(struct ctl_table *table, int write, void *buffer, size_t *lenp,
+proc_dodebug(const struct ctl_table *table, int write, void *buffer, size_t *lenp,
loff_t *ppos)
{
char tmpbuf[20], *s = NULL;
@@ -160,7 +160,6 @@ static struct ctl_table debug_table[] = {
.mode = 0444,
.proc_handler = proc_do_xprt,
},
- { }
};
void
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 62e07c330a66..4e003cb516fe 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -1097,6 +1097,12 @@ out_overflow:
* Checks that we have enough buffer space to encode 'nbytes' more
* bytes of data. If so, update the total xdr_buf length, and
* adjust the length of the current kvec.
+ *
+ * The returned pointer is valid only until the next call to
+ * xdr_reserve_space() or xdr_commit_encode() on @xdr. The current
+ * implementation of this API guarantees that space reserved for a
+ * four-byte data item remains valid until @xdr is destroyed, but
+ * that might not always be true in the future.
*/
__be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
{
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index af13fdfa6672..09f245cda526 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1398,6 +1398,12 @@ xprt_request_dequeue_transmit_locked(struct rpc_task *task)
if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
return;
if (!list_empty(&req->rq_xmit)) {
+ struct rpc_xprt *xprt = req->rq_xprt;
+
+ if (list_is_first(&req->rq_xmit, &xprt->xmit_queue) &&
+ xprt->ops->abort_send_request)
+ xprt->ops->abort_send_request(req);
+
list_del(&req->rq_xmit);
if (!list_empty(&req->rq_xmit2)) {
struct rpc_rqst *next = list_first_entry(&req->rq_xmit2,
@@ -1541,6 +1547,9 @@ xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
int is_retrans = RPC_WAS_SENT(task);
int status;
+ if (test_bit(XPRT_CLOSE_WAIT, &xprt->state))
+ return -ENOTCONN;
+
if (!req->rq_bytes_sent) {
if (xprt_request_data_received(task)) {
status = 0;
diff --git a/net/sunrpc/xprtmultipath.c b/net/sunrpc/xprtmultipath.c
index 720d3ba742ec..7e98d4dd9f10 100644
--- a/net/sunrpc/xprtmultipath.c
+++ b/net/sunrpc/xprtmultipath.c
@@ -603,23 +603,6 @@ struct rpc_xprt *xprt_iter_get_helper(struct rpc_xprt_iter *xpi,
}
/**
- * xprt_iter_get_xprt - Returns the rpc_xprt pointed to by the cursor
- * @xpi: pointer to rpc_xprt_iter
- *
- * Returns a reference to the struct rpc_xprt that is currently
- * pointed to by the cursor.
- */
-struct rpc_xprt *xprt_iter_get_xprt(struct rpc_xprt_iter *xpi)
-{
- struct rpc_xprt *xprt;
-
- rcu_read_lock();
- xprt = xprt_iter_get_helper(xpi, xprt_iter_ops(xpi)->xpi_xprt);
- rcu_read_unlock();
- return xprt;
-}
-
-/**
* xprt_iter_get_next - Returns the next rpc_xprt following the cursor
* @xpi: pointer to rpc_xprt_iter
*
diff --git a/net/sunrpc/xprtrdma/Makefile b/net/sunrpc/xprtrdma/Makefile
index 55b21bae866d..3232aa23cdb4 100644
--- a/net/sunrpc/xprtrdma/Makefile
+++ b/net/sunrpc/xprtrdma/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_SUNRPC_XPRT_RDMA) += rpcrdma.o
-rpcrdma-y := transport.o rpc_rdma.o verbs.o frwr_ops.o \
+rpcrdma-y := transport.o rpc_rdma.o verbs.o frwr_ops.o ib_client.o \
svc_rdma.o svc_rdma_backchannel.o svc_rdma_transport.o \
svc_rdma_sendto.o svc_rdma_recvfrom.o svc_rdma_rw.o \
svc_rdma_pcl.o module.o
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index ffbf99894970..31434aeb8e29 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -54,7 +54,7 @@ static void frwr_cid_init(struct rpcrdma_ep *ep,
cid->ci_completion_id = mr->mr_ibmr->res.id;
}
-static void frwr_mr_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
+static void frwr_mr_unmap(struct rpcrdma_mr *mr)
{
if (mr->mr_device) {
trace_xprtrdma_mr_unmap(mr);
@@ -73,7 +73,7 @@ void frwr_mr_release(struct rpcrdma_mr *mr)
{
int rc;
- frwr_mr_unmap(mr->mr_xprt, mr);
+ frwr_mr_unmap(mr);
rc = ib_dereg_mr(mr->mr_ibmr);
if (rc)
@@ -84,7 +84,7 @@ void frwr_mr_release(struct rpcrdma_mr *mr)
static void frwr_mr_put(struct rpcrdma_mr *mr)
{
- frwr_mr_unmap(mr->mr_xprt, mr);
+ frwr_mr_unmap(mr);
/* The MR is returned to the req's MR free list instead
* of to the xprt's MR free list. No spinlock is needed.
@@ -92,7 +92,8 @@ static void frwr_mr_put(struct rpcrdma_mr *mr)
rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs);
}
-/* frwr_reset - Place MRs back on the free list
+/**
+ * frwr_reset - Place MRs back on @req's free list
* @req: request to reset
*
* Used after a failed marshal. For FRWR, this means the MRs
diff --git a/net/sunrpc/xprtrdma/ib_client.c b/net/sunrpc/xprtrdma/ib_client.c
new file mode 100644
index 000000000000..28c68b5f6823
--- /dev/null
+++ b/net/sunrpc/xprtrdma/ib_client.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (c) 2024 Oracle. All rights reserved.
+ */
+
+/* #include <linux/module.h>
+#include <linux/slab.h> */
+#include <linux/xarray.h>
+#include <linux/types.h>
+#include <linux/kref.h>
+#include <linux/completion.h>
+
+#include <linux/sunrpc/svc_rdma.h>
+#include <linux/sunrpc/rdma_rn.h>
+
+#include "xprt_rdma.h"
+#include <trace/events/rpcrdma.h>
+
+/* Per-ib_device private data for rpcrdma */
+struct rpcrdma_device {
+ struct kref rd_kref;
+ unsigned long rd_flags;
+ struct ib_device *rd_device;
+ struct xarray rd_xa;
+ struct completion rd_done;
+};
+
+#define RPCRDMA_RD_F_REMOVING (0)
+
+static struct ib_client rpcrdma_ib_client;
+
+/*
+ * Listeners have no associated device, so we never register them.
+ * Note that ib_get_client_data() does not check if @device is
+ * NULL for us.
+ */
+static struct rpcrdma_device *rpcrdma_get_client_data(struct ib_device *device)
+{
+ if (!device)
+ return NULL;
+ return ib_get_client_data(device, &rpcrdma_ib_client);
+}
+
+/**
+ * rpcrdma_rn_register - register to get device removal notifications
+ * @device: device to monitor
+ * @rn: notification object that wishes to be notified
+ * @done: callback to notify caller of device removal
+ *
+ * Returns zero on success. The callback in rn_done is guaranteed
+ * to be invoked when the device is removed, unless this notification
+ * is unregistered first.
+ *
+ * On failure, a negative errno is returned.
+ */
+int rpcrdma_rn_register(struct ib_device *device,
+ struct rpcrdma_notification *rn,
+ void (*done)(struct rpcrdma_notification *rn))
+{
+ struct rpcrdma_device *rd = rpcrdma_get_client_data(device);
+
+ if (!rd || test_bit(RPCRDMA_RD_F_REMOVING, &rd->rd_flags))
+ return -ENETUNREACH;
+
+ if (xa_alloc(&rd->rd_xa, &rn->rn_index, rn, xa_limit_32b, GFP_KERNEL) < 0)
+ return -ENOMEM;
+ kref_get(&rd->rd_kref);
+ rn->rn_done = done;
+ trace_rpcrdma_client_register(device, rn);
+ return 0;
+}
+
+static void rpcrdma_rn_release(struct kref *kref)
+{
+ struct rpcrdma_device *rd = container_of(kref, struct rpcrdma_device,
+ rd_kref);
+
+ trace_rpcrdma_client_completion(rd->rd_device);
+ complete(&rd->rd_done);
+}
+
+/**
+ * rpcrdma_rn_unregister - stop device removal notifications
+ * @device: monitored device
+ * @rn: notification object that no longer wishes to be notified
+ */
+void rpcrdma_rn_unregister(struct ib_device *device,
+ struct rpcrdma_notification *rn)
+{
+ struct rpcrdma_device *rd = rpcrdma_get_client_data(device);
+
+ if (!rd)
+ return;
+
+ trace_rpcrdma_client_unregister(device, rn);
+ xa_erase(&rd->rd_xa, rn->rn_index);
+ kref_put(&rd->rd_kref, rpcrdma_rn_release);
+}
+
+/**
+ * rpcrdma_add_one - ib_client device insertion callback
+ * @device: device about to be inserted
+ *
+ * Returns zero on success. xprtrdma private data has been allocated
+ * for this device. On failure, a negative errno is returned.
+ */
+static int rpcrdma_add_one(struct ib_device *device)
+{
+ struct rpcrdma_device *rd;
+
+ rd = kzalloc(sizeof(*rd), GFP_KERNEL);
+ if (!rd)
+ return -ENOMEM;
+
+ kref_init(&rd->rd_kref);
+ xa_init_flags(&rd->rd_xa, XA_FLAGS_ALLOC);
+ rd->rd_device = device;
+ init_completion(&rd->rd_done);
+ ib_set_client_data(device, &rpcrdma_ib_client, rd);
+
+ trace_rpcrdma_client_add_one(device);
+ return 0;
+}
+
+/**
+ * rpcrdma_remove_one - ib_client device removal callback
+ * @device: device about to be removed
+ * @client_data: this module's private per-device data
+ *
+ * Upon return, all transports associated with @device have divested
+ * themselves from IB hardware resources.
+ */
+static void rpcrdma_remove_one(struct ib_device *device,
+ void *client_data)
+{
+ struct rpcrdma_device *rd = client_data;
+ struct rpcrdma_notification *rn;
+ unsigned long index;
+
+ trace_rpcrdma_client_remove_one(device);
+
+ set_bit(RPCRDMA_RD_F_REMOVING, &rd->rd_flags);
+ xa_for_each(&rd->rd_xa, index, rn)
+ rn->rn_done(rn);
+
+ /*
+ * Wait only if there are still outstanding notification
+ * registrants for this device.
+ */
+ if (!refcount_dec_and_test(&rd->rd_kref.refcount)) {
+ trace_rpcrdma_client_wait_on(device);
+ wait_for_completion(&rd->rd_done);
+ }
+
+ trace_rpcrdma_client_remove_one_done(device);
+ xa_destroy(&rd->rd_xa);
+ kfree(rd);
+}
+
+static struct ib_client rpcrdma_ib_client = {
+ .name = "rpcrdma",
+ .add = rpcrdma_add_one,
+ .remove = rpcrdma_remove_one,
+};
+
+/**
+ * rpcrdma_ib_client_unregister - unregister ib_client for xprtrdma
+ *
+ * cel: watch for orphaned rpcrdma_device objects on module unload
+ */
+void rpcrdma_ib_client_unregister(void)
+{
+ ib_unregister_client(&rpcrdma_ib_client);
+}
+
+/**
+ * rpcrdma_ib_client_register - register ib_client for rpcrdma
+ *
+ * Returns zero on success, or a negative errno.
+ */
+int rpcrdma_ib_client_register(void)
+{
+ return ib_register_client(&rpcrdma_ib_client);
+}
diff --git a/net/sunrpc/xprtrdma/module.c b/net/sunrpc/xprtrdma/module.c
index 45c5b41ac8dc..697f571d4c01 100644
--- a/net/sunrpc/xprtrdma/module.c
+++ b/net/sunrpc/xprtrdma/module.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sunrpc/svc_rdma.h>
+#include <linux/sunrpc/rdma_rn.h>
#include <asm/swab.h>
@@ -30,21 +31,32 @@ static void __exit rpc_rdma_cleanup(void)
{
xprt_rdma_cleanup();
svc_rdma_cleanup();
+ rpcrdma_ib_client_unregister();
}
static int __init rpc_rdma_init(void)
{
int rc;
+ rc = rpcrdma_ib_client_register();
+ if (rc)
+ goto out_rc;
+
rc = svc_rdma_init();
if (rc)
- goto out;
+ goto out_ib_client;
rc = xprt_rdma_init();
if (rc)
- svc_rdma_cleanup();
+ goto out_svc_rdma;
-out:
+ return 0;
+
+out_svc_rdma:
+ svc_rdma_cleanup();
+out_ib_client:
+ rpcrdma_ib_client_unregister();
+out_rc:
return rc;
}
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 190a4de239c8..1478c41c7e9d 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -1471,8 +1471,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
credits = 1; /* don't deadlock */
else if (credits > r_xprt->rx_ep->re_max_requests)
credits = r_xprt->rx_ep->re_max_requests;
- rpcrdma_post_recvs(r_xprt, credits + (buf->rb_bc_srv_max_requests << 1),
- false);
+ rpcrdma_post_recvs(r_xprt, credits + (buf->rb_bc_srv_max_requests << 1));
if (buf->rb_credits != credits)
rpcrdma_update_cwnd(r_xprt, credits);
diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
index f86970733eb0..415c0310101f 100644
--- a/net/sunrpc/xprtrdma/svc_rdma.c
+++ b/net/sunrpc/xprtrdma/svc_rdma.c
@@ -74,7 +74,7 @@ enum {
SVCRDMA_COUNTER_BUFSIZ = sizeof(unsigned long long),
};
-static int svcrdma_counter_handler(struct ctl_table *table, int write,
+static int svcrdma_counter_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct percpu_counter *stat = (struct percpu_counter *)table->data;
@@ -209,7 +209,6 @@ static struct ctl_table svcrdma_parm_table[] = {
.extra1 = &zero,
.extra2 = &zero,
},
- { },
};
static void svc_rdma_proc_cleanup(void)
@@ -234,25 +233,34 @@ static int svc_rdma_proc_init(void)
rc = percpu_counter_init(&svcrdma_stat_read, 0, GFP_KERNEL);
if (rc)
- goto out_err;
+ goto err;
rc = percpu_counter_init(&svcrdma_stat_recv, 0, GFP_KERNEL);
if (rc)
- goto out_err;
+ goto err_read;
rc = percpu_counter_init(&svcrdma_stat_sq_starve, 0, GFP_KERNEL);
if (rc)
- goto out_err;
+ goto err_recv;
rc = percpu_counter_init(&svcrdma_stat_write, 0, GFP_KERNEL);
if (rc)
- goto out_err;
+ goto err_sq;
svcrdma_table_header = register_sysctl("sunrpc/svc_rdma",
svcrdma_parm_table);
+ if (!svcrdma_table_header)
+ goto err_write;
+
return 0;
-out_err:
+err_write:
+ rc = -ENOMEM;
+ percpu_counter_destroy(&svcrdma_stat_write);
+err_sq:
percpu_counter_destroy(&svcrdma_stat_sq_starve);
+err_recv:
percpu_counter_destroy(&svcrdma_stat_recv);
+err_read:
percpu_counter_destroy(&svcrdma_stat_read);
+err:
return rc;
}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
index c9be6778643b..e5a78b761012 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
@@ -90,7 +90,7 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,
*/
get_page(virt_to_page(rqst->rq_buffer));
sctxt->sc_send_wr.opcode = IB_WR_SEND;
- return svc_rdma_send(rdma, sctxt);
+ return svc_rdma_post_send(rdma, sctxt);
}
/* Server-side transport endpoint wants a whole page for its send
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index d72953f29258..292022f0976e 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -94,7 +94,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
@@ -493,7 +493,13 @@ static bool xdr_check_write_chunk(struct svc_rdma_recv_ctxt *rctxt)
if (xdr_stream_decode_u32(&rctxt->rc_stream, &segcount))
return false;
- /* A bogus segcount causes this buffer overflow check to fail. */
+ /* Before trusting the segcount value enough to use it in
+ * a computation, perform a simple range check. This is an
+ * arbitrary but sensible limit (ie, not architectural).
+ */
+ if (unlikely(segcount > RPCSVC_MAXPAGES))
+ return false;
+
p = xdr_inline_decode(&rctxt->rc_stream,
segcount * rpcrdma_segment_maxsz * sizeof(*p));
return p != NULL;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c
index c00fcce61d1e..40797114d50a 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_rw.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c
@@ -197,28 +197,6 @@ void svc_rdma_cc_release(struct svcxprt_rdma *rdma,
llist_add_batch(first, last, &rdma->sc_rw_ctxts);
}
-/* State for sending a Write or Reply chunk.
- * - Tracks progress of writing one chunk over all its segments
- * - Stores arguments for the SGL constructor functions
- */
-struct svc_rdma_write_info {
- struct svcxprt_rdma *wi_rdma;
-
- const struct svc_rdma_chunk *wi_chunk;
-
- /* write state of this chunk */
- unsigned int wi_seg_off;
- unsigned int wi_seg_no;
-
- /* SGL constructor arguments */
- const struct xdr_buf *wi_xdr;
- unsigned char *wi_base;
- unsigned int wi_next_off;
-
- struct svc_rdma_chunk_ctxt wi_cc;
- struct work_struct wi_work;
-};
-
static struct svc_rdma_write_info *
svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma,
const struct svc_rdma_chunk *chunk)
@@ -253,6 +231,49 @@ static void svc_rdma_write_info_free(struct svc_rdma_write_info *info)
}
/**
+ * svc_rdma_reply_chunk_release - Release Reply chunk I/O resources
+ * @rdma: controlling transport
+ * @ctxt: Send context that is being released
+ */
+void svc_rdma_reply_chunk_release(struct svcxprt_rdma *rdma,
+ struct svc_rdma_send_ctxt *ctxt)
+{
+ struct svc_rdma_chunk_ctxt *cc = &ctxt->sc_reply_info.wi_cc;
+
+ if (!cc->cc_sqecount)
+ return;
+ svc_rdma_cc_release(rdma, cc, DMA_TO_DEVICE);
+}
+
+/**
+ * svc_rdma_reply_done - Reply chunk Write completion handler
+ * @cq: controlling Completion Queue
+ * @wc: Work Completion report
+ *
+ * Pages under I/O are released by a subsequent Send completion.
+ */
+static void svc_rdma_reply_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct ib_cqe *cqe = wc->wr_cqe;
+ struct svc_rdma_chunk_ctxt *cc =
+ container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
+ struct svcxprt_rdma *rdma = cq->cq_context;
+
+ switch (wc->status) {
+ case IB_WC_SUCCESS:
+ trace_svcrdma_wc_reply(&cc->cc_cid);
+ return;
+ case IB_WC_WR_FLUSH_ERR:
+ trace_svcrdma_wc_reply_flush(wc, &cc->cc_cid);
+ break;
+ default:
+ trace_svcrdma_wc_reply_err(wc, &cc->cc_cid);
+ }
+
+ svc_xprt_deferred_close(&rdma->sc_xprt);
+}
+
+/**
* svc_rdma_write_done - Write chunk completion
* @cq: controlling Completion Queue
* @wc: Work Completion
@@ -580,41 +601,33 @@ static int svc_rdma_xb_write(const struct xdr_buf *xdr, void *data)
return xdr->len;
}
-/**
- * svc_rdma_send_write_chunk - Write all segments in a Write chunk
- * @rdma: controlling RDMA transport
- * @chunk: Write chunk provided by the client
- * @xdr: xdr_buf containing the data payload
- *
- * Returns a non-negative number of bytes the chunk consumed, or
- * %-E2BIG if the payload was larger than the Write chunk,
- * %-EINVAL if client provided too many segments,
- * %-ENOMEM if rdma_rw context pool was exhausted,
- * %-ENOTCONN if posting failed (connection is lost),
- * %-EIO if rdma_rw initialization failed (DMA mapping, etc).
- */
-int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma,
- const struct svc_rdma_chunk *chunk,
- const struct xdr_buf *xdr)
+static int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma,
+ const struct svc_rdma_chunk *chunk,
+ const struct xdr_buf *xdr)
{
struct svc_rdma_write_info *info;
struct svc_rdma_chunk_ctxt *cc;
+ struct xdr_buf payload;
int ret;
+ if (xdr_buf_subsegment(xdr, &payload, chunk->ch_position,
+ chunk->ch_payload_length))
+ return -EMSGSIZE;
+
info = svc_rdma_write_info_alloc(rdma, chunk);
if (!info)
return -ENOMEM;
cc = &info->wi_cc;
- ret = svc_rdma_xb_write(xdr, info);
- if (ret != xdr->len)
+ ret = svc_rdma_xb_write(&payload, info);
+ if (ret != payload.len)
goto out_err;
trace_svcrdma_post_write_chunk(&cc->cc_cid, cc->cc_sqecount);
ret = svc_rdma_post_chunk_ctxt(rdma, cc);
if (ret < 0)
goto out_err;
- return xdr->len;
+ return 0;
out_err:
svc_rdma_write_info_free(info);
@@ -622,9 +635,37 @@ out_err:
}
/**
- * svc_rdma_send_reply_chunk - Write all segments in the Reply chunk
+ * svc_rdma_send_write_list - Send all chunks on the Write list
* @rdma: controlling RDMA transport
- * @rctxt: Write and Reply chunks from client
+ * @rctxt: Write list provisioned by the client
+ * @xdr: xdr_buf containing an RPC Reply message
+ *
+ * Returns zero on success, or a negative errno if one or more
+ * Write chunks could not be sent.
+ */
+int svc_rdma_send_write_list(struct svcxprt_rdma *rdma,
+ const struct svc_rdma_recv_ctxt *rctxt,
+ const struct xdr_buf *xdr)
+{
+ struct svc_rdma_chunk *chunk;
+ int ret;
+
+ pcl_for_each_chunk(chunk, &rctxt->rc_write_pcl) {
+ if (!chunk->ch_payload_length)
+ break;
+ ret = svc_rdma_send_write_chunk(rdma, chunk, xdr);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * svc_rdma_prepare_reply_chunk - Construct WR chain for writing the Reply chunk
+ * @rdma: controlling RDMA transport
+ * @write_pcl: Write chunk list provided by client
+ * @reply_pcl: Reply chunk provided by client
+ * @sctxt: Send WR resources
* @xdr: xdr_buf containing an RPC Reply
*
* Returns a non-negative number of bytes the chunk consumed, or
@@ -634,39 +675,45 @@ out_err:
* %-ENOTCONN if posting failed (connection is lost),
* %-EIO if rdma_rw initialization failed (DMA mapping, etc).
*/
-int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,
- const struct svc_rdma_recv_ctxt *rctxt,
- const struct xdr_buf *xdr)
+int svc_rdma_prepare_reply_chunk(struct svcxprt_rdma *rdma,
+ const struct svc_rdma_pcl *write_pcl,
+ const struct svc_rdma_pcl *reply_pcl,
+ struct svc_rdma_send_ctxt *sctxt,
+ const struct xdr_buf *xdr)
{
- struct svc_rdma_write_info *info;
- struct svc_rdma_chunk_ctxt *cc;
- struct svc_rdma_chunk *chunk;
+ struct svc_rdma_write_info *info = &sctxt->sc_reply_info;
+ struct svc_rdma_chunk_ctxt *cc = &info->wi_cc;
+ struct ib_send_wr *first_wr;
+ struct list_head *pos;
+ struct ib_cqe *cqe;
int ret;
- if (pcl_is_empty(&rctxt->rc_reply_pcl))
- return 0;
-
- chunk = pcl_first_chunk(&rctxt->rc_reply_pcl);
- info = svc_rdma_write_info_alloc(rdma, chunk);
- if (!info)
- return -ENOMEM;
- cc = &info->wi_cc;
+ info->wi_rdma = rdma;
+ info->wi_chunk = pcl_first_chunk(reply_pcl);
+ info->wi_seg_off = 0;
+ info->wi_seg_no = 0;
+ info->wi_cc.cc_cqe.done = svc_rdma_reply_done;
- ret = pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr,
+ ret = pcl_process_nonpayloads(write_pcl, xdr,
svc_rdma_xb_write, info);
if (ret < 0)
- goto out_err;
+ return ret;
- trace_svcrdma_post_reply_chunk(&cc->cc_cid, cc->cc_sqecount);
- ret = svc_rdma_post_chunk_ctxt(rdma, cc);
- if (ret < 0)
- goto out_err;
+ first_wr = sctxt->sc_wr_chain;
+ cqe = &cc->cc_cqe;
+ list_for_each(pos, &cc->cc_rwctxts) {
+ struct svc_rdma_rw_ctxt *rwc;
- return xdr->len;
+ rwc = list_entry(pos, struct svc_rdma_rw_ctxt, rw_list);
+ first_wr = rdma_rw_ctx_wrs(&rwc->rw_ctx, rdma->sc_qp,
+ rdma->sc_port_num, cqe, first_wr);
+ cqe = NULL;
+ }
+ sctxt->sc_wr_chain = first_wr;
+ sctxt->sc_sqecount += cc->cc_sqecount;
-out_err:
- svc_rdma_write_info_free(info);
- return ret;
+ trace_svcrdma_post_reply_chunk(&cc->cc_cid, cc->cc_sqecount);
+ return xdr->len;
}
/**
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 1a49b7f02041..96154a2367a1 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -100,7 +100,7 @@
*/
#include <linux/spinlock.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
@@ -205,9 +205,13 @@ out:
xdr_init_encode(&ctxt->sc_stream, &ctxt->sc_hdrbuf,
ctxt->sc_xprt_buf, NULL);
+ svc_rdma_cc_init(rdma, &ctxt->sc_reply_info.wi_cc);
ctxt->sc_send_wr.num_sge = 0;
ctxt->sc_cur_sge_no = 0;
ctxt->sc_page_count = 0;
+ ctxt->sc_wr_chain = &ctxt->sc_send_wr;
+ ctxt->sc_sqecount = 1;
+
return ctxt;
out_empty:
@@ -223,6 +227,8 @@ static void svc_rdma_send_ctxt_release(struct svcxprt_rdma *rdma,
struct ib_device *device = rdma->sc_cm_id->device;
unsigned int i;
+ svc_rdma_reply_chunk_release(rdma, ctxt);
+
if (ctxt->sc_page_count)
release_pages(ctxt->sc_pages, ctxt->sc_page_count);
@@ -293,7 +299,7 @@ static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
struct svc_rdma_send_ctxt *ctxt =
container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe);
- svc_rdma_wake_send_waiters(rdma, 1);
+ svc_rdma_wake_send_waiters(rdma, ctxt->sc_sqecount);
if (unlikely(wc->status != IB_WC_SUCCESS))
goto flushed;
@@ -312,51 +318,76 @@ flushed:
}
/**
- * svc_rdma_send - Post a single Send WR
- * @rdma: transport on which to post the WR
- * @ctxt: send ctxt with a Send WR ready to post
+ * svc_rdma_post_send - Post a WR chain to the Send Queue
+ * @rdma: transport context
+ * @ctxt: WR chain to post
+ *
+ * Copy fields in @ctxt to stack variables in order to guarantee
+ * that these values remain available after the ib_post_send() call.
+ * In some error flow cases, svc_rdma_wc_send() releases @ctxt.
+ *
+ * Note there is potential for starvation when the Send Queue is
+ * full because there is no order to when waiting threads are
+ * awoken. The transport is typically provisioned with a deep
+ * enough Send Queue that SQ exhaustion should be a rare event.
*
- * Returns zero if the Send WR was posted successfully. Otherwise, a
- * negative errno is returned.
+ * Return values:
+ * %0: @ctxt's WR chain was posted successfully
+ * %-ENOTCONN: The connection was lost
*/
-int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt)
+int svc_rdma_post_send(struct svcxprt_rdma *rdma,
+ struct svc_rdma_send_ctxt *ctxt)
{
- struct ib_send_wr *wr = &ctxt->sc_send_wr;
- int ret;
+ struct ib_send_wr *first_wr = ctxt->sc_wr_chain;
+ struct ib_send_wr *send_wr = &ctxt->sc_send_wr;
+ const struct ib_send_wr *bad_wr = first_wr;
+ struct rpc_rdma_cid cid = ctxt->sc_cid;
+ int ret, sqecount = ctxt->sc_sqecount;
might_sleep();
/* Sync the transport header buffer */
ib_dma_sync_single_for_device(rdma->sc_pd->device,
- wr->sg_list[0].addr,
- wr->sg_list[0].length,
+ send_wr->sg_list[0].addr,
+ send_wr->sg_list[0].length,
DMA_TO_DEVICE);
/* If the SQ is full, wait until an SQ entry is available */
- while (1) {
- if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) {
+ while (!test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags)) {
+ if (atomic_sub_return(sqecount, &rdma->sc_sq_avail) < 0) {
+ svc_rdma_wake_send_waiters(rdma, sqecount);
+
+ /* When the transport is torn down, assume
+ * ib_drain_sq() will trigger enough Send
+ * completions to wake us. The XPT_CLOSE test
+ * above should then cause the while loop to
+ * exit.
+ */
percpu_counter_inc(&svcrdma_stat_sq_starve);
- trace_svcrdma_sq_full(rdma, &ctxt->sc_cid);
- atomic_inc(&rdma->sc_sq_avail);
+ trace_svcrdma_sq_full(rdma, &cid);
wait_event(rdma->sc_send_wait,
- atomic_read(&rdma->sc_sq_avail) > 1);
- if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
- return -ENOTCONN;
- trace_svcrdma_sq_retry(rdma, &ctxt->sc_cid);
+ atomic_read(&rdma->sc_sq_avail) > 0);
+ trace_svcrdma_sq_retry(rdma, &cid);
continue;
}
trace_svcrdma_post_send(ctxt);
- ret = ib_post_send(rdma->sc_qp, wr, NULL);
- if (ret)
- break;
+ ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
+ if (ret) {
+ trace_svcrdma_sq_post_err(rdma, &cid, ret);
+ svc_xprt_deferred_close(&rdma->sc_xprt);
+
+ /* If even one WR was posted, there will be a
+ * Send completion that bumps sc_sq_avail.
+ */
+ if (bad_wr == first_wr) {
+ svc_rdma_wake_send_waiters(rdma, sqecount);
+ break;
+ }
+ }
return 0;
}
-
- trace_svcrdma_sq_post_err(rdma, &ctxt->sc_cid, ret);
- svc_xprt_deferred_close(&rdma->sc_xprt);
- wake_up(&rdma->sc_send_wait);
- return ret;
+ return -ENOTCONN;
}
/**
@@ -839,16 +870,10 @@ static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
* in sc_sges[0], and the RPC xdr_buf is prepared in following sges.
*
* Depending on whether a Write list or Reply chunk is present,
- * the server may send all, a portion of, or none of the xdr_buf.
+ * the server may Send all, a portion of, or none of the xdr_buf.
* In the latter case, only the transport header (sc_sges[0]) is
* transmitted.
*
- * RDMA Send is the last step of transmitting an RPC reply. Pages
- * involved in the earlier RDMA Writes are here transferred out
- * of the rqstp and into the sctxt's page array. These pages are
- * DMA unmapped by each Write completion, but the subsequent Send
- * completion finally releases these pages.
- *
* Assumptions:
* - The Reply's transport header will never be larger than a page.
*/
@@ -857,6 +882,7 @@ static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma,
const struct svc_rdma_recv_ctxt *rctxt,
struct svc_rqst *rqstp)
{
+ struct ib_send_wr *send_wr = &sctxt->sc_send_wr;
int ret;
ret = svc_rdma_map_reply_msg(rdma, sctxt, &rctxt->rc_write_pcl,
@@ -864,16 +890,19 @@ static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma,
if (ret < 0)
return ret;
+ /* Transfer pages involved in RDMA Writes to the sctxt's
+ * page array. Completion handling releases these pages.
+ */
svc_rdma_save_io_pages(rqstp, sctxt);
if (rctxt->rc_inv_rkey) {
- sctxt->sc_send_wr.opcode = IB_WR_SEND_WITH_INV;
- sctxt->sc_send_wr.ex.invalidate_rkey = rctxt->rc_inv_rkey;
+ send_wr->opcode = IB_WR_SEND_WITH_INV;
+ send_wr->ex.invalidate_rkey = rctxt->rc_inv_rkey;
} else {
- sctxt->sc_send_wr.opcode = IB_WR_SEND;
+ send_wr->opcode = IB_WR_SEND;
}
- return svc_rdma_send(rdma, sctxt);
+ return svc_rdma_post_send(rdma, sctxt);
}
/**
@@ -937,7 +966,7 @@ void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
sctxt->sc_send_wr.num_sge = 1;
sctxt->sc_send_wr.opcode = IB_WR_SEND;
sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
- if (svc_rdma_send(rdma, sctxt))
+ if (svc_rdma_post_send(rdma, sctxt))
goto put_ctxt;
return;
@@ -984,10 +1013,19 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
if (!p)
goto put_ctxt;
- ret = svc_rdma_send_reply_chunk(rdma, rctxt, &rqstp->rq_res);
+ ret = svc_rdma_send_write_list(rdma, rctxt, &rqstp->rq_res);
if (ret < 0)
- goto reply_chunk;
- rc_size = ret;
+ goto put_ctxt;
+
+ rc_size = 0;
+ if (!pcl_is_empty(&rctxt->rc_reply_pcl)) {
+ ret = svc_rdma_prepare_reply_chunk(rdma, &rctxt->rc_write_pcl,
+ &rctxt->rc_reply_pcl, sctxt,
+ &rqstp->rq_res);
+ if (ret < 0)
+ goto reply_chunk;
+ rc_size = ret;
+ }
*p++ = *rdma_argp;
*p++ = *(rdma_argp + 1);
@@ -1030,45 +1068,33 @@ drop_connection:
/**
* svc_rdma_result_payload - special processing for a result payload
- * @rqstp: svc_rqst to operate on
- * @offset: payload's byte offset in @xdr
+ * @rqstp: RPC transaction context
+ * @offset: payload's byte offset in @rqstp->rq_res
* @length: size of payload, in bytes
*
+ * Assign the passed-in result payload to the current Write chunk,
+ * and advance to cur_result_payload to the next Write chunk, if
+ * there is one.
+ *
* Return values:
* %0 if successful or nothing needed to be done
- * %-EMSGSIZE on XDR buffer overflow
* %-E2BIG if the payload was larger than the Write chunk
- * %-EINVAL if client provided too many segments
- * %-ENOMEM if rdma_rw context pool was exhausted
- * %-ENOTCONN if posting failed (connection is lost)
- * %-EIO if rdma_rw initialization failed (DMA mapping, etc)
*/
int svc_rdma_result_payload(struct svc_rqst *rqstp, unsigned int offset,
unsigned int length)
{
struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
struct svc_rdma_chunk *chunk;
- struct svcxprt_rdma *rdma;
- struct xdr_buf subbuf;
- int ret;
chunk = rctxt->rc_cur_result_payload;
if (!length || !chunk)
return 0;
rctxt->rc_cur_result_payload =
pcl_next_chunk(&rctxt->rc_write_pcl, chunk);
+
if (length > chunk->ch_length)
return -E2BIG;
-
chunk->ch_position = offset;
chunk->ch_payload_length = length;
-
- if (xdr_buf_subsegment(&rqstp->rq_res, &subbuf, offset, length))
- return -EMSGSIZE;
-
- rdma = container_of(rqstp->rq_xprt, struct svcxprt_rdma, sc_xprt);
- ret = svc_rdma_send_write_chunk(rdma, chunk, &subbuf);
- if (ret < 0)
- return ret;
return 0;
}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 4f27325ace4a..c3fbf0779d4a 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -65,6 +65,8 @@
static struct svcxprt_rdma *svc_rdma_create_xprt(struct svc_serv *serv,
struct net *net, int node);
+static int svc_rdma_listen_handler(struct rdma_cm_id *cma_id,
+ struct rdma_cm_event *event);
static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
struct net *net,
struct sockaddr *sa, int salen,
@@ -122,6 +124,41 @@ static void qp_event_handler(struct ib_event *event, void *context)
}
}
+static struct rdma_cm_id *
+svc_rdma_create_listen_id(struct net *net, struct sockaddr *sap,
+ void *context)
+{
+ struct rdma_cm_id *listen_id;
+ int ret;
+
+ listen_id = rdma_create_id(net, svc_rdma_listen_handler, context,
+ RDMA_PS_TCP, IB_QPT_RC);
+ if (IS_ERR(listen_id))
+ return listen_id;
+
+ /* Allow both IPv4 and IPv6 sockets to bind a single port
+ * at the same time.
+ */
+#if IS_ENABLED(CONFIG_IPV6)
+ ret = rdma_set_afonly(listen_id, 1);
+ if (ret)
+ goto out_destroy;
+#endif
+ ret = rdma_bind_addr(listen_id, sap);
+ if (ret)
+ goto out_destroy;
+
+ ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG);
+ if (ret)
+ goto out_destroy;
+
+ return listen_id;
+
+out_destroy:
+ rdma_destroy_id(listen_id);
+ return ERR_PTR(ret);
+}
+
static struct svcxprt_rdma *svc_rdma_create_xprt(struct svc_serv *serv,
struct net *net, int node)
{
@@ -247,17 +284,31 @@ static void handle_connect_req(struct rdma_cm_id *new_cma_id,
*
* Return values:
* %0: Do not destroy @cma_id
- * %1: Destroy @cma_id (never returned here)
+ * %1: Destroy @cma_id
*
* NB: There is never a DEVICE_REMOVAL event for INADDR_ANY listeners.
*/
static int svc_rdma_listen_handler(struct rdma_cm_id *cma_id,
struct rdma_cm_event *event)
{
+ struct sockaddr *sap = (struct sockaddr *)&cma_id->route.addr.src_addr;
+ struct svcxprt_rdma *cma_xprt = cma_id->context;
+ struct svc_xprt *cma_rdma = &cma_xprt->sc_xprt;
+ struct rdma_cm_id *listen_id;
+
switch (event->event) {
case RDMA_CM_EVENT_CONNECT_REQUEST:
handle_connect_req(cma_id, &event->param.conn);
break;
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ listen_id = svc_rdma_create_listen_id(cma_rdma->xpt_net,
+ sap, cma_xprt);
+ if (IS_ERR(listen_id)) {
+ pr_err("Listener dead, address change failed for device %s\n",
+ cma_id->device->name);
+ } else
+ cma_xprt->sc_cm_id = listen_id;
+ return 1;
default:
break;
}
@@ -288,7 +339,6 @@ static int svc_rdma_cma_handler(struct rdma_cm_id *cma_id,
svc_xprt_enqueue(xprt);
break;
case RDMA_CM_EVENT_DISCONNECTED:
- case RDMA_CM_EVENT_DEVICE_REMOVAL:
svc_xprt_deferred_close(xprt);
break;
default:
@@ -307,7 +357,6 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
{
struct rdma_cm_id *listen_id;
struct svcxprt_rdma *cma_xprt;
- int ret;
if (sa->sa_family != AF_INET && sa->sa_family != AF_INET6)
return ERR_PTR(-EAFNOSUPPORT);
@@ -317,30 +366,13 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags);
strcpy(cma_xprt->sc_xprt.xpt_remotebuf, "listener");
- listen_id = rdma_create_id(net, svc_rdma_listen_handler, cma_xprt,
- RDMA_PS_TCP, IB_QPT_RC);
+ listen_id = svc_rdma_create_listen_id(net, sa, cma_xprt);
if (IS_ERR(listen_id)) {
- ret = PTR_ERR(listen_id);
- goto err0;
+ kfree(cma_xprt);
+ return ERR_CAST(listen_id);
}
-
- /* Allow both IPv4 and IPv6 sockets to bind a single port
- * at the same time.
- */
-#if IS_ENABLED(CONFIG_IPV6)
- ret = rdma_set_afonly(listen_id, 1);
- if (ret)
- goto err1;
-#endif
- ret = rdma_bind_addr(listen_id, sa);
- if (ret)
- goto err1;
cma_xprt->sc_cm_id = listen_id;
- ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG);
- if (ret)
- goto err1;
-
/*
* We need to use the address from the cm_id in case the
* caller specified 0 for the port number.
@@ -349,12 +381,16 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen);
return &cma_xprt->sc_xprt;
+}
- err1:
- rdma_destroy_id(listen_id);
- err0:
- kfree(cma_xprt);
- return ERR_PTR(ret);
+static void svc_rdma_xprt_done(struct rpcrdma_notification *rn)
+{
+ struct svcxprt_rdma *rdma = container_of(rn, struct svcxprt_rdma,
+ sc_rn);
+ struct rdma_cm_id *id = rdma->sc_cm_id;
+
+ trace_svcrdma_device_removal(id);
+ svc_xprt_close(&rdma->sc_xprt);
}
/*
@@ -398,6 +434,9 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
dev = newxprt->sc_cm_id->device;
newxprt->sc_port_num = newxprt->sc_cm_id->port_num;
+ if (rpcrdma_rn_register(dev, &newxprt->sc_rn, svc_rdma_xprt_done))
+ goto errout;
+
newxprt->sc_max_req_size = svcrdma_max_req_size;
newxprt->sc_max_requests = svcrdma_max_requests;
newxprt->sc_max_bc_requests = svcrdma_max_bc_requests;
@@ -415,15 +454,20 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge)
newxprt->sc_max_send_sges = dev->attrs.max_send_sge;
rq_depth = newxprt->sc_max_requests + newxprt->sc_max_bc_requests +
- newxprt->sc_recv_batch;
+ newxprt->sc_recv_batch + 1 /* drain */;
if (rq_depth > dev->attrs.max_qp_wr) {
rq_depth = dev->attrs.max_qp_wr;
newxprt->sc_recv_batch = 1;
newxprt->sc_max_requests = rq_depth - 2;
newxprt->sc_max_bc_requests = 2;
}
- ctxts = rdma_rw_mr_factor(dev, newxprt->sc_port_num, RPCSVC_MAXPAGES);
- ctxts *= newxprt->sc_max_requests;
+
+ /* Arbitrarily estimate the number of rw_ctxs needed for
+ * this transport. This is enough rw_ctxs to make forward
+ * progress even if the client is using one rkey per page
+ * in each Read chunk.
+ */
+ ctxts = 3 * RPCSVC_MAXPAGES;
newxprt->sc_sq_depth = rq_depth + ctxts;
if (newxprt->sc_sq_depth > dev->attrs.max_qp_wr)
newxprt->sc_sq_depth = dev->attrs.max_qp_wr;
@@ -460,12 +504,14 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
qp_attr.cap.max_send_wr, qp_attr.cap.max_recv_wr);
dprintk(" cap.max_send_sge = %d, cap.max_recv_sge = %d\n",
qp_attr.cap.max_send_sge, qp_attr.cap.max_recv_sge);
-
+ dprintk(" send CQ depth = %u, recv CQ depth = %u\n",
+ newxprt->sc_sq_depth, rq_depth);
ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr);
if (ret) {
trace_svcrdma_qp_err(newxprt, ret);
goto errout;
}
+ newxprt->sc_max_send_sges = qp_attr.cap.max_send_sge;
newxprt->sc_qp = newxprt->sc_cm_id->qp;
if (!(dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
@@ -546,6 +592,7 @@ static void __svc_rdma_free(struct work_struct *work)
{
struct svcxprt_rdma *rdma =
container_of(work, struct svcxprt_rdma, sc_work);
+ struct ib_device *device = rdma->sc_cm_id->device;
/* This blocks until the Completion Queues are empty */
if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
@@ -574,6 +621,7 @@ static void __svc_rdma_free(struct work_struct *work)
/* Destroy the CM ID */
rdma_destroy_id(rdma->sc_cm_id);
+ rpcrdma_rn_unregister(device, &rdma->sc_rn);
kfree(rdma);
}
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 29b0562d62e7..9a8ce5df83ca 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -137,7 +137,6 @@ static struct ctl_table xr_tunables_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
- { },
};
#endif
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 4f8d7efa469f..63262ef0c2e3 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -49,14 +49,14 @@
* o buffer memory
*/
+#include <linux/bitops.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/sunrpc/addr.h>
#include <linux/sunrpc/svc_rdma.h>
#include <linux/log2.h>
-#include <asm-generic/barrier.h>
-#include <asm/bitops.h>
+#include <asm/barrier.h>
#include <rdma/ib_cm.h>
@@ -69,13 +69,15 @@ static void rpcrdma_sendctx_put_locked(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_sendctx *sc);
static int rpcrdma_reqs_setup(struct rpcrdma_xprt *r_xprt);
static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt);
-static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep);
static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt);
static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt);
static void rpcrdma_ep_get(struct rpcrdma_ep *ep);
static int rpcrdma_ep_put(struct rpcrdma_ep *ep);
static struct rpcrdma_regbuf *
+rpcrdma_regbuf_alloc_node(size_t size, enum dma_data_direction direction,
+ int node);
+static struct rpcrdma_regbuf *
rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction);
static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb);
static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb);
@@ -222,7 +224,6 @@ static void rpcrdma_update_cm_private(struct rpcrdma_ep *ep,
static int
rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
{
- struct sockaddr *sap = (struct sockaddr *)&id->route.addr.dst_addr;
struct rpcrdma_ep *ep = id->context;
might_sleep();
@@ -241,10 +242,6 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
ep->re_async_rc = -ENETUNREACH;
complete(&ep->re_done);
return 0;
- case RDMA_CM_EVENT_DEVICE_REMOVAL:
- pr_info("rpcrdma: removing device %s for %pISpc\n",
- ep->re_id->device->name, sap);
- fallthrough;
case RDMA_CM_EVENT_ADDR_CHANGE:
ep->re_connect_status = -ENODEV;
goto disconnected;
@@ -280,6 +277,14 @@ disconnected:
return 0;
}
+static void rpcrdma_ep_removal_done(struct rpcrdma_notification *rn)
+{
+ struct rpcrdma_ep *ep = container_of(rn, struct rpcrdma_ep, re_rn);
+
+ trace_xprtrdma_device_removal(ep->re_id);
+ xprt_force_disconnect(ep->re_xprt);
+}
+
static struct rdma_cm_id *rpcrdma_create_id(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_ep *ep)
{
@@ -319,6 +324,10 @@ static struct rdma_cm_id *rpcrdma_create_id(struct rpcrdma_xprt *r_xprt,
if (rc)
goto out;
+ rc = rpcrdma_rn_register(id->device, &ep->re_rn, rpcrdma_ep_removal_done);
+ if (rc)
+ goto out;
+
return id;
out:
@@ -346,6 +355,8 @@ static void rpcrdma_ep_destroy(struct kref *kref)
ib_dealloc_pd(ep->re_pd);
ep->re_pd = NULL;
+ rpcrdma_rn_unregister(ep->re_id->device, &ep->re_rn);
+
kfree(ep);
module_put(THIS_MODULE);
}
@@ -501,7 +512,7 @@ int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt)
* outstanding Receives.
*/
rpcrdma_ep_get(ep);
- rpcrdma_post_recvs(r_xprt, 1, true);
+ rpcrdma_post_recvs(r_xprt, 1);
rc = rdma_connect(ep->re_id, &ep->re_remote_cma);
if (rc)
@@ -893,6 +904,8 @@ static int rpcrdma_reqs_setup(struct rpcrdma_xprt *r_xprt)
static void rpcrdma_req_reset(struct rpcrdma_req *req)
{
+ struct rpcrdma_mr *mr;
+
/* Credits are valid for only one connection */
req->rl_slot.rq_cong = 0;
@@ -902,7 +915,19 @@ static void rpcrdma_req_reset(struct rpcrdma_req *req)
rpcrdma_regbuf_dma_unmap(req->rl_sendbuf);
rpcrdma_regbuf_dma_unmap(req->rl_recvbuf);
- frwr_reset(req);
+ /* The verbs consumer can't know the state of an MR on the
+ * req->rl_registered list unless a successful completion
+ * has occurred, so they cannot be re-used.
+ */
+ while ((mr = rpcrdma_mr_pop(&req->rl_registered))) {
+ struct rpcrdma_buffer *buf = &mr->mr_xprt->rx_buf;
+
+ spin_lock(&buf->rb_lock);
+ list_del(&mr->mr_all);
+ spin_unlock(&buf->rb_lock);
+
+ frwr_mr_release(mr);
+ }
}
/* ASSUMPTION: the rb_allreqs list is stable for the duration,
@@ -920,18 +945,20 @@ static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt)
}
static noinline
-struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
- bool temp)
+struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt)
{
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
+ struct rpcrdma_ep *ep = r_xprt->rx_ep;
+ struct ib_device *device = ep->re_id->device;
struct rpcrdma_rep *rep;
rep = kzalloc(sizeof(*rep), XPRTRDMA_GFP_FLAGS);
if (rep == NULL)
goto out;
- rep->rr_rdmabuf = rpcrdma_regbuf_alloc(r_xprt->rx_ep->re_inline_recv,
- DMA_FROM_DEVICE);
+ rep->rr_rdmabuf = rpcrdma_regbuf_alloc_node(ep->re_inline_recv,
+ DMA_FROM_DEVICE,
+ ibdev_to_node(device));
if (!rep->rr_rdmabuf)
goto out_free;
@@ -946,7 +973,6 @@ struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
rep->rr_recv_wr.wr_cqe = &rep->rr_cqe;
rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
rep->rr_recv_wr.num_sge = 1;
- rep->rr_temp = temp;
spin_lock(&buf->rb_lock);
list_add(&rep->rr_all, &buf->rb_all_reps);
@@ -965,17 +991,6 @@ static void rpcrdma_rep_free(struct rpcrdma_rep *rep)
kfree(rep);
}
-static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep)
-{
- struct rpcrdma_buffer *buf = &rep->rr_rxprt->rx_buf;
-
- spin_lock(&buf->rb_lock);
- list_del(&rep->rr_all);
- spin_unlock(&buf->rb_lock);
-
- rpcrdma_rep_free(rep);
-}
-
static struct rpcrdma_rep *rpcrdma_rep_get_locked(struct rpcrdma_buffer *buf)
{
struct llist_node *node;
@@ -1007,10 +1022,8 @@ static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt)
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
struct rpcrdma_rep *rep;
- list_for_each_entry(rep, &buf->rb_all_reps, rr_all) {
+ list_for_each_entry(rep, &buf->rb_all_reps, rr_all)
rpcrdma_regbuf_dma_unmap(rep->rr_rdmabuf);
- rep->rr_temp = true; /* Mark this rep for destruction */
- }
}
static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf)
@@ -1227,14 +1240,15 @@ void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
* or Replies they may be registered externally via frwr_map.
*/
static struct rpcrdma_regbuf *
-rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction)
+rpcrdma_regbuf_alloc_node(size_t size, enum dma_data_direction direction,
+ int node)
{
struct rpcrdma_regbuf *rb;
- rb = kmalloc(sizeof(*rb), XPRTRDMA_GFP_FLAGS);
+ rb = kmalloc_node(sizeof(*rb), XPRTRDMA_GFP_FLAGS, node);
if (!rb)
return NULL;
- rb->rg_data = kmalloc(size, XPRTRDMA_GFP_FLAGS);
+ rb->rg_data = kmalloc_node(size, XPRTRDMA_GFP_FLAGS, node);
if (!rb->rg_data) {
kfree(rb);
return NULL;
@@ -1246,6 +1260,12 @@ rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction)
return rb;
}
+static struct rpcrdma_regbuf *
+rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction)
+{
+ return rpcrdma_regbuf_alloc_node(size, direction, NUMA_NO_NODE);
+}
+
/**
* rpcrdma_regbuf_realloc - re-allocate a SEND/RECV buffer
* @rb: regbuf to reallocate
@@ -1323,10 +1343,9 @@ static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb)
* rpcrdma_post_recvs - Refill the Receive Queue
* @r_xprt: controlling transport instance
* @needed: current credit grant
- * @temp: mark Receive buffers to be deleted after one use
*
*/
-void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp)
+void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed)
{
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
struct rpcrdma_ep *ep = r_xprt->rx_ep;
@@ -1340,8 +1359,7 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp)
if (likely(ep->re_receive_count > needed))
goto out;
needed -= ep->re_receive_count;
- if (!temp)
- needed += RPCRDMA_MAX_RECV_BATCH;
+ needed += RPCRDMA_MAX_RECV_BATCH;
if (atomic_inc_return(&ep->re_receiving) > 1)
goto out;
@@ -1350,12 +1368,8 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp)
wr = NULL;
while (needed) {
rep = rpcrdma_rep_get_locked(buf);
- if (rep && rep->rr_temp) {
- rpcrdma_rep_destroy(rep);
- continue;
- }
if (!rep)
- rep = rpcrdma_rep_create(r_xprt, temp);
+ rep = rpcrdma_rep_create(r_xprt);
if (!rep)
break;
if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf)) {
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index da409450dfc0..8147d2b41494 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -56,6 +56,7 @@
#include <linux/sunrpc/rpc_rdma_cid.h> /* completion IDs */
#include <linux/sunrpc/rpc_rdma.h> /* RPC/RDMA protocol */
#include <linux/sunrpc/xprtrdma.h> /* xprt parameters */
+#include <linux/sunrpc/rdma_rn.h> /* removal notifications */
#define RDMA_RESOLVE_TIMEOUT (5000) /* 5 seconds */
#define RDMA_CONNECT_RETRY_MAX (2) /* retries if no listener backlog */
@@ -92,6 +93,7 @@ struct rpcrdma_ep {
struct rpcrdma_connect_private
re_cm_private;
struct rdma_conn_param re_remote_cma;
+ struct rpcrdma_notification re_rn;
int re_receive_count;
unsigned int re_max_requests; /* depends on device */
unsigned int re_inline_send; /* negotiated */
@@ -198,7 +200,6 @@ struct rpcrdma_rep {
__be32 rr_proc;
int rr_wc_flags;
u32 rr_inv_rkey;
- bool rr_temp;
struct rpcrdma_regbuf *rr_rdmabuf;
struct rpcrdma_xprt *rr_rxprt;
struct rpc_rqst *rr_rqst;
@@ -466,7 +467,7 @@ void rpcrdma_flush_disconnect(struct rpcrdma_xprt *r_xprt, struct ib_wc *wc);
int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt);
void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt);
-void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp);
+void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed);
/*
* Buffer calls - xprtrdma/verbs.c
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 58f3dc8d0d71..83cc095846d3 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -62,6 +62,7 @@
#include "sunrpc.h"
static void xs_close(struct rpc_xprt *xprt);
+static void xs_reset_srcport(struct sock_xprt *transport);
static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock);
static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt,
struct socket *sock);
@@ -159,7 +160,6 @@ static struct ctl_table xs_tunables_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
- { },
};
/*
@@ -883,6 +883,17 @@ static int xs_stream_prepare_request(struct rpc_rqst *req, struct xdr_buf *buf)
return xdr_alloc_bvec(buf, rpc_task_gfp_mask());
}
+static void xs_stream_abort_send_request(struct rpc_rqst *req)
+{
+ struct rpc_xprt *xprt = req->rq_xprt;
+ struct sock_xprt *transport =
+ container_of(xprt, struct sock_xprt, xprt);
+
+ if (transport->xmit.offset != 0 &&
+ !test_bit(XPRT_CLOSE_WAIT, &xprt->state))
+ xprt_force_disconnect(xprt);
+}
+
/*
* Determine if the previous message in the stream was aborted before it
* could complete transmission.
@@ -1187,6 +1198,7 @@ static void xs_sock_reset_state_flags(struct rpc_xprt *xprt)
clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state);
clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state);
clear_bit(XPRT_SOCK_NOSPACE, &transport->sock_state);
+ clear_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state);
}
static void xs_run_error_worker(struct sock_xprt *transport, unsigned int nr)
@@ -1267,6 +1279,7 @@ static void xs_reset_transport(struct sock_xprt *transport)
transport->file = NULL;
sk->sk_user_data = NULL;
+ sk->sk_sndtimeo = 0;
xs_restore_old_callbacks(transport, sk);
xprt_clear_connected(xprt);
@@ -1565,8 +1578,10 @@ static void xs_tcp_state_change(struct sock *sk)
break;
case TCP_CLOSE:
if (test_and_clear_bit(XPRT_SOCK_CONNECTING,
- &transport->sock_state))
+ &transport->sock_state)) {
+ xs_reset_srcport(transport);
xprt_clear_connecting(xprt);
+ }
clear_bit(XPRT_CLOSING, &xprt->state);
/* Trigger the socket release */
xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT);
@@ -1722,6 +1737,11 @@ static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
xs_update_peer_port(xprt);
}
+static void xs_reset_srcport(struct sock_xprt *transport)
+{
+ transport->srcport = 0;
+}
+
static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock)
{
if (transport->srcport == 0 && transport->xprt.reuseport)
@@ -1921,6 +1941,9 @@ static struct socket *xs_create_sock(struct rpc_xprt *xprt,
goto out;
}
+ if (protocol == IPPROTO_TCP)
+ sk_net_refcnt_upgrade(sock->sk);
+
filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
if (IS_ERR(filp))
return ERR_CAST(filp);
@@ -2423,6 +2446,13 @@ static void xs_tcp_setup_socket(struct work_struct *work)
transport->srcport = 0;
status = -EAGAIN;
break;
+ case -EPERM:
+ /* Happens, for instance, if a BPF program is preventing
+ * the connect. Remap the error so upper layers can better
+ * deal with it.
+ */
+ status = -ECONNREFUSED;
+ fallthrough;
case -EINVAL:
/* Happens, for instance, if the user specified a link
* local IPv6 address without a scope-id.
@@ -2434,6 +2464,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
case -EHOSTUNREACH:
case -EADDRINUSE:
case -ENOBUFS:
+ case -ENOTCONN:
break;
default:
printk("%s: connect returned unhandled error %d\n",
@@ -2546,7 +2577,15 @@ static void xs_tls_handshake_done(void *data, int status, key_serial_t peerid)
struct sock_xprt *lower_transport =
container_of(lower_xprt, struct sock_xprt, xprt);
- lower_transport->xprt_err = status ? -EACCES : 0;
+ switch (status) {
+ case 0:
+ case -EACCES:
+ case -ETIMEDOUT:
+ lower_transport->xprt_err = status;
+ break;
+ default:
+ lower_transport->xprt_err = -EACCES;
+ }
complete(&lower_transport->handshake_done);
xprt_put(lower_xprt);
}
@@ -2588,11 +2627,10 @@ static int xs_tls_handshake_sync(struct rpc_xprt *lower_xprt, struct xprtsec_par
rc = wait_for_completion_interruptible_timeout(&lower_transport->handshake_done,
XS_TLS_HANDSHAKE_TO);
if (rc <= 0) {
- if (!tls_handshake_cancel(sk)) {
- if (rc == 0)
- rc = -ETIMEDOUT;
- goto out_put_xprt;
- }
+ tls_handshake_cancel(sk);
+ if (rc == 0)
+ rc = -ETIMEDOUT;
+ goto out_put_xprt;
}
rc = lower_transport->xprt_err;
@@ -2645,6 +2683,7 @@ static void xs_tcp_tls_setup_socket(struct work_struct *work)
.xprtsec = {
.policy = RPC_XPRTSEC_NONE,
},
+ .stats = upper_clnt->cl_stats,
};
unsigned int pflags = current->flags;
struct rpc_clnt *lower_clnt;
@@ -2987,20 +3026,11 @@ static int bc_send_request(struct rpc_rqst *req)
return len;
}
-/*
- * The close routine. Since this is client initiated, we do nothing
- */
-
static void bc_close(struct rpc_xprt *xprt)
{
xprt_disconnect_done(xprt);
}
-/*
- * The xprt destroy routine. Again, because this connection is client
- * initiated, we do nothing
- */
-
static void bc_destroy(struct rpc_xprt *xprt)
{
dprintk("RPC: bc_destroy xprt %p\n", xprt);
@@ -3021,6 +3051,7 @@ static const struct rpc_xprt_ops xs_local_ops = {
.buf_free = rpc_free,
.prepare_request = xs_stream_prepare_request,
.send_request = xs_local_send_request,
+ .abort_send_request = xs_stream_abort_send_request,
.wait_for_reply_request = xprt_wait_for_reply_request_def,
.close = xs_close,
.destroy = xs_destroy,
@@ -3068,6 +3099,7 @@ static const struct rpc_xprt_ops xs_tcp_ops = {
.buf_free = rpc_free,
.prepare_request = xs_stream_prepare_request,
.send_request = xs_tcp_send_request,
+ .abort_send_request = xs_stream_abort_send_request,
.wait_for_reply_request = xprt_wait_for_reply_request_def,
.close = xs_tcp_shutdown,
.destroy = xs_destroy,
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index c9189a970eec..4d5fbacef496 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -244,6 +244,99 @@ static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
return 0;
}
+static void switchdev_obj_id_to_helpful_msg(struct net_device *dev,
+ enum switchdev_obj_id obj_id,
+ int err, bool add)
+{
+ const char *action = add ? "add" : "del";
+ const char *reason = "";
+ const char *problem;
+ const char *obj_str;
+
+ switch (obj_id) {
+ case SWITCHDEV_OBJ_ID_UNDEFINED:
+ obj_str = "Undefined object";
+ problem = "Attempted operation is undefined, indicating a possible programming\n"
+ "error.\n";
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ obj_str = "VLAN entry";
+ problem = "Failure in VLAN settings on this port might disrupt network\n"
+ "segmentation or traffic isolation, affecting network partitioning.\n";
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ obj_str = "Port Multicast Database entry";
+ problem = "Failure in updating the port's Multicast Database could lead to\n"
+ "multicast forwarding issues.\n";
+ break;
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ obj_str = "Host Multicast Database entry";
+ problem = "Failure in updating the host's Multicast Database may impact multicast\n"
+ "group memberships or traffic delivery, affecting multicast\n"
+ "communication.\n";
+ break;
+ case SWITCHDEV_OBJ_ID_MRP:
+ obj_str = "Media Redundancy Protocol configuration for port";
+ problem = "Failure to set MRP ring ID on this port prevents communication with\n"
+ "the specified redundancy ring, resulting in an inability to engage\n"
+ "in MRP-based network operations.\n";
+ break;
+ case SWITCHDEV_OBJ_ID_RING_TEST_MRP:
+ obj_str = "MRP Test Frame Operations for port";
+ problem = "Failure to generate/monitor MRP test frames may lead to inability to\n"
+ "assess the ring's operational integrity and fault response, hindering\n"
+ "proactive network management.\n";
+ break;
+ case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
+ obj_str = "MRP Ring Role Configuration";
+ problem = "Improper MRP ring role configuration may create conflicts in the ring,\n"
+ "disrupting communication for all participants, or isolate the local\n"
+ "system from the ring, hindering its ability to communicate with other\n"
+ "participants.\n";
+ break;
+ case SWITCHDEV_OBJ_ID_RING_STATE_MRP:
+ obj_str = "MRP Ring State Configuration";
+ problem = "Failure to correctly set the MRP ring state can result in network\n"
+ "loops or leave segments without communication. In a Closed state,\n"
+ "it maintains loop prevention by blocking one MRM port, while an Open\n"
+ "state activates in response to failures, changing port states to\n"
+ "preserve network connectivity.\n";
+ break;
+ case SWITCHDEV_OBJ_ID_IN_TEST_MRP:
+ obj_str = "MRP_InTest Frame Generation Configuration";
+ problem = "Failure in managing MRP_InTest frame generation can misjudge the\n"
+ "interconnection ring's state, leading to incorrect blocking or\n"
+ "unblocking of the I/C port. This misconfiguration might result\n"
+ "in unintended network loops or isolate critical network segments,\n"
+ "compromising network integrity and reliability.\n";
+ break;
+ case SWITCHDEV_OBJ_ID_IN_ROLE_MRP:
+ obj_str = "Interconnection Ring Role Configuration";
+ problem = "Failure in incorrect assignment of interconnection ring roles\n"
+ "(MIM/MIC) can impair the formation of the interconnection rings.\n";
+ break;
+ case SWITCHDEV_OBJ_ID_IN_STATE_MRP:
+ obj_str = "Interconnection Ring State Configuration";
+ problem = "Failure in updating the interconnection ring state can lead in\n"
+ "case of Open state to incorrect blocking or unblocking of the\n"
+ "I/C port, resulting in unintended network loops or isolation\n"
+ "of critical network\n";
+ break;
+ default:
+ obj_str = "Unknown object";
+ problem = "Indicating a possible programming error.\n";
+ }
+
+ switch (err) {
+ case -ENOSPC:
+ reason = "Current HW/SW setup lacks sufficient resources.\n";
+ break;
+ }
+
+ netdev_err(dev, "Failed to %s %s (object id=%d) with error: %pe (%d).\n%s%s\n",
+ action, obj_str, obj_id, ERR_PTR(err), err, problem, reason);
+}
+
static void switchdev_port_obj_add_deferred(struct net_device *dev,
const void *data)
{
@@ -254,8 +347,7 @@ static void switchdev_port_obj_add_deferred(struct net_device *dev,
err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
dev, obj, NULL);
if (err && err != -EOPNOTSUPP)
- netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
- err, obj->id);
+ switchdev_obj_id_to_helpful_msg(dev, obj->id, err, true);
if (obj->complete)
obj->complete(dev, err, obj->complete_priv);
}
@@ -304,8 +396,7 @@ static void switchdev_port_obj_del_deferred(struct net_device *dev,
err = switchdev_port_obj_del_now(dev, obj);
if (err && err != -EOPNOTSUPP)
- netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
- err, obj->id);
+ switchdev_obj_id_to_helpful_msg(dev, obj->id, err, false);
if (obj->complete)
obj->complete(dev, err, obj->complete_priv);
}
@@ -381,7 +472,7 @@ bool switchdev_port_obj_act_is_deferred(struct net_device *dev,
EXPORT_SYMBOL_GPL(switchdev_port_obj_act_is_deferred);
static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
-static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
+static RAW_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
/**
* register_switchdev_notifier - Register notifier
@@ -427,17 +518,27 @@ EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
int register_switchdev_blocking_notifier(struct notifier_block *nb)
{
- struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
+ struct raw_notifier_head *chain = &switchdev_blocking_notif_chain;
+ int err;
- return blocking_notifier_chain_register(chain, nb);
+ rtnl_lock();
+ err = raw_notifier_chain_register(chain, nb);
+ rtnl_unlock();
+
+ return err;
}
EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
{
- struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
+ struct raw_notifier_head *chain = &switchdev_blocking_notif_chain;
+ int err;
+
+ rtnl_lock();
+ err = raw_notifier_chain_unregister(chain, nb);
+ rtnl_unlock();
- return blocking_notifier_chain_unregister(chain, nb);
+ return err;
}
EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
@@ -445,10 +546,11 @@ int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
struct switchdev_notifier_info *info,
struct netlink_ext_ack *extack)
{
+ ASSERT_RTNL();
info->dev = dev;
info->extack = extack;
- return blocking_notifier_call_chain(&switchdev_blocking_notif_chain,
- val, info);
+ return raw_notifier_call_chain(&switchdev_blocking_notif_chain,
+ val, info);
}
EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index 051ed5f6fc93..19e8048241ba 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -40,7 +40,7 @@ static int is_seen(struct ctl_table_set *set)
/* Return standard mode bits for table entry. */
static int net_ctl_permissions(struct ctl_table_header *head,
- struct ctl_table *table)
+ const struct ctl_table *table)
{
struct net *net = container_of(head->set, struct net, sysctls);
@@ -54,7 +54,6 @@ static int net_ctl_permissions(struct ctl_table_header *head,
}
static void net_ctl_set_ownership(struct ctl_table_header *head,
- struct ctl_table *table,
kuid_t *uid, kgid_t *gid)
{
struct net *net = container_of(head->set, struct net, sysctls);
@@ -128,7 +127,7 @@ static void ensure_safe_net_sysctl(struct net *net, const char *path,
pr_debug("Registering net sysctl (net %p): %s\n", net, path);
ent = table;
- for (size_t i = 0; i < table_size && ent->procname; ent++, i++) {
+ for (size_t i = 0; i < table_size; ent++, i++) {
unsigned long addr;
const char *where;
@@ -166,17 +165,10 @@ struct ctl_table_header *register_net_sysctl_sz(struct net *net,
struct ctl_table *table,
size_t table_size)
{
- int count;
- struct ctl_table *entry;
-
if (!net_eq(net, &init_net))
ensure_safe_net_sysctl(net, path, table, table_size);
- entry = table;
- for (count = 0 ; count < table_size && entry->procname; entry++, count++)
- ;
-
- return __register_sysctl_table(&net->sysctls, path, table, count);
+ return __register_sysctl_table(&net->sysctls, path, table, table_size);
}
EXPORT_SYMBOL_GPL(register_net_sysctl_sz);
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
index be1c4003d67d..bb0d71eb02a6 100644
--- a/net/tipc/Kconfig
+++ b/net/tipc/Kconfig
@@ -32,16 +32,17 @@ config TIPC_MEDIA_UDP
bool "IP/UDP media type support"
depends on TIPC
select NET_UDP_TUNNEL
+ default y
help
Saying Y here will enable support for running TIPC over IP/UDP
- bool
- default y
+
config TIPC_CRYPTO
bool "TIPC encryption support"
depends on TIPC
select CRYPTO
select CRYPTO_AES
select CRYPTO_GCM
+ default y
help
Saying Y here will enable support for TIPC encryption.
All TIPC messages will be encrypted/decrypted by using the currently most
@@ -49,8 +50,6 @@ config TIPC_CRYPTO
entering the TIPC stack.
Key setting from user-space is performed via netlink by a user program
(e.g. the iproute2 'tipc' tool).
- bool
- default y
config TIPC_DIAG
tristate "TIPC: socket monitoring interface"
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
index ee49a9f1dd4f..18e1636aa036 100644
--- a/net/tipc/Makefile
+++ b/net/tipc/Makefile
@@ -18,5 +18,5 @@ tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o
tipc-$(CONFIG_SYSCTL) += sysctl.o
tipc-$(CONFIG_TIPC_CRYPTO) += crypto.o
-
-obj-$(CONFIG_TIPC_DIAG) += diag.o
+obj-$(CONFIG_TIPC_DIAG) += tipc_diag.o
+tipc_diag-y += diag.o
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 593846d25214..114fef65f92e 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -320,8 +320,8 @@ static int tipc_mcast_send_sync(struct net *net, struct sk_buff *skb,
{
struct tipc_msg *hdr, *_hdr;
struct sk_buff_head tmpq;
+ u16 cong_link_cnt = 0;
struct sk_buff *_skb;
- u16 cong_link_cnt;
int rc = 0;
/* Is a cluster supporting with new capabilities ? */
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 878415c43527..ae1ddbf71853 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -163,8 +163,12 @@ static int bearer_name_validate(const char *name,
/* return bearer name components, if necessary */
if (name_parts) {
- strcpy(name_parts->media_name, media_name);
- strcpy(name_parts->if_name, if_name);
+ if (strscpy(name_parts->media_name, media_name,
+ TIPC_MAX_MEDIA_NAME) < 0)
+ return 0;
+ if (strscpy(name_parts->if_name, if_name,
+ TIPC_MAX_IF_NAME) < 0)
+ return 0;
}
return 1;
}
@@ -322,7 +326,7 @@ static int tipc_enable_bearer(struct net *net, const char *name,
if (!b)
return -ENOMEM;
- strcpy(b->name, name);
+ strscpy(b->name, name);
b->media = m;
res = m->enable_media(net, b, attr);
if (res) {
@@ -1079,30 +1083,27 @@ int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
b = tipc_bearer_find(net, name);
if (!b) {
- rtnl_unlock();
NL_SET_ERR_MSG(info->extack, "Bearer not found");
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
#ifdef CONFIG_TIPC_MEDIA_UDP
if (attrs[TIPC_NLA_BEARER_UDP_OPTS]) {
if (b->media->type_id != TIPC_MEDIA_TYPE_UDP) {
- rtnl_unlock();
NL_SET_ERR_MSG(info->extack, "UDP option is unsupported");
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
err = tipc_udp_nl_bearer_add(b,
attrs[TIPC_NLA_BEARER_UDP_OPTS]);
- if (err) {
- rtnl_unlock();
- return err;
- }
}
#endif
+out:
rtnl_unlock();
- return 0;
+ return err;
}
int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 7eccd97e0609..7f3fe3401c45 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -72,7 +72,6 @@ struct tipc_node;
struct tipc_bearer;
struct tipc_bc_base;
struct tipc_link;
-struct tipc_name_table;
struct tipc_topsrv;
struct tipc_monitor;
#ifdef CONFIG_TIPC_CRYPTO
diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
index 43c3f1c971b8..c524421ec652 100644
--- a/net/tipc/crypto.c
+++ b/net/tipc/crypto.c
@@ -2293,8 +2293,8 @@ static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr)
keylen = ntohl(*((__be32 *)(data + TIPC_AEAD_ALG_NAME)));
/* Verify the supplied size values */
- if (unlikely(size != keylen + sizeof(struct tipc_aead_key) ||
- keylen > TIPC_AEAD_KEY_SIZE_MAX)) {
+ if (unlikely(keylen > TIPC_AEAD_KEY_SIZE_MAX ||
+ size != keylen + sizeof(struct tipc_aead_key))) {
pr_debug("%s: invalid MSG_CRYPTO key size\n", rx->name);
goto exit;
}
diff --git a/net/tipc/diag.c b/net/tipc/diag.c
index 18733451c9e0..54dde8c4e4d4 100644
--- a/net/tipc/diag.c
+++ b/net/tipc/diag.c
@@ -95,6 +95,7 @@ static int tipc_sock_diag_handler_dump(struct sk_buff *skb,
}
static const struct sock_diag_handler tipc_sock_diag_handler = {
+ .owner = THIS_MODULE,
.family = AF_TIPC,
.dump = tipc_sock_diag_handler_dump,
};
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 0716eb5c8a31..5c2088a469ce 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -241,13 +241,6 @@ enum {
LINK_SYNCHING = 0xc << 24
};
-/* Link FSM state checking routines
- */
-static int link_is_up(struct tipc_link *l)
-{
- return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
-}
-
static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
struct sk_buff_head *xmitq);
static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
@@ -274,7 +267,7 @@ static void tipc_link_update_cwin(struct tipc_link *l, int released,
*/
bool tipc_link_is_up(struct tipc_link *l)
{
- return link_is_up(l);
+ return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
}
bool tipc_link_peer_is_down(struct tipc_link *l)
@@ -1790,7 +1783,7 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
rcv_nxt = l->rcv_nxt;
win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
- if (unlikely(!link_is_up(l))) {
+ if (unlikely(!tipc_link_is_up(l))) {
if (l->state == LINK_ESTABLISHING)
rc = TIPC_LINK_UP_EVT;
kfree_skb(skb);
@@ -1848,7 +1841,7 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
struct tipc_link *bcl = l->bc_rcvlink;
struct tipc_msg *hdr;
struct sk_buff *skb;
- bool node_up = link_is_up(bcl);
+ bool node_up = tipc_link_is_up(bcl);
u16 glen = 0, bc_rcvgap = 0;
int dlen = 0;
void *data;
@@ -2163,7 +2156,7 @@ bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
if (session != curr_session)
return false;
/* Extra sanity check */
- if (!link_is_up(l) && msg_ack(hdr))
+ if (!tipc_link_is_up(l) && msg_ack(hdr))
return false;
if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
return true;
@@ -2261,7 +2254,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
}
/* ACTIVATE_MSG serves as PEER_RESET if link is already down */
- if (mtyp == RESET_MSG || !link_is_up(l))
+ if (mtyp == RESET_MSG || !tipc_link_is_up(l))
rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
/* ACTIVATE_MSG takes up link if it was already locally reset */
@@ -2300,7 +2293,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
if (msg_probe(hdr))
l->stats.recv_probes++;
- if (!link_is_up(l)) {
+ if (!tipc_link_is_up(l)) {
if (l->state == LINK_ESTABLISHING)
rc = TIPC_LINK_UP_EVT;
break;
@@ -2387,7 +2380,7 @@ void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
int mtyp = msg_type(hdr);
u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
- if (link_is_up(l))
+ if (tipc_link_is_up(l))
return;
if (msg_user(hdr) == BCAST_PROTOCOL) {
@@ -2415,7 +2408,7 @@ int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
int rc = 0;
- if (!link_is_up(l))
+ if (!tipc_link_is_up(l))
return rc;
if (!msg_peer_node_is_up(hdr))
@@ -2475,7 +2468,7 @@ int tipc_link_bc_ack_rcv(struct tipc_link *r, u16 acked, u16 gap,
bool unused = false;
int rc = 0;
- if (!link_is_up(r) || !r->bc_peer_is_up)
+ if (!tipc_link_is_up(r) || !r->bc_peer_is_up)
return 0;
if (gap) {
@@ -2873,7 +2866,7 @@ void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
l->tolerance = tol;
if (l->bc_rcvlink)
l->bc_rcvlink->tolerance = tol;
- if (link_is_up(l))
+ if (tipc_link_is_up(l))
tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
}
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
index 77a3d016cade..e2f19627e43d 100644
--- a/net/tipc/monitor.c
+++ b/net/tipc/monitor.c
@@ -149,7 +149,7 @@ static int dom_size(int peers)
while ((i * i) < peers)
i++;
- return i < MAX_MON_DOMAIN ? i : MAX_MON_DOMAIN;
+ return min(i, MAX_MON_DOMAIN);
}
static void map_set(u64 *up_map, int i, unsigned int v)
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 5c9fd4791c4b..76284fc538eb 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -142,9 +142,9 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
if (fragid == FIRST_FRAGMENT) {
if (unlikely(head))
goto err;
- *buf = NULL;
if (skb_has_frag_list(frag) && __skb_linearize(frag))
goto err;
+ *buf = NULL;
frag = skb_unshare(frag, GFP_ATOMIC);
if (unlikely(!frag))
goto err;
@@ -156,6 +156,11 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
if (!head)
goto err;
+ /* Either the input skb ownership is transferred to headskb
+ * or the input skb is freed, clear the reference to avoid
+ * bad access on error path.
+ */
+ *buf = NULL;
if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
kfree_skb_partial(frag, headstolen);
} else {
@@ -179,7 +184,6 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
*headbuf = NULL;
return 1;
}
- *buf = NULL;
return 0;
err:
kfree_skb(*buf);
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index d1180370fdf4..e74940eab3a4 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -949,8 +949,8 @@ void tipc_nametbl_stop(struct net *net)
}
spin_unlock_bh(&tn->nametbl_lock);
- synchronize_net();
- kfree(nt);
+ /* TODO: clear tn->nametbl, implement proper RCU rules ? */
+ kfree_rcu(nt, rcu);
}
static int __tipc_nl_add_nametable_publ(struct tipc_nl_msg *msg,
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index 3bcd9ef8cee3..7ff6eeebaae6 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -90,6 +90,7 @@ struct publication {
/**
* struct name_table - table containing all existing port name publications
+ * @rcu: RCU callback head used for deferred freeing
* @services: name sequence hash lists
* @node_scope: all local publications with node scope
* - used by name_distr during re-init of name table
@@ -102,6 +103,7 @@ struct publication {
* @snd_nxt: next sequence number to be used
*/
struct name_table {
+ struct rcu_head rcu;
struct hlist_head services[TIPC_NAMETBL_SIZE];
struct list_head node_scope;
struct list_head cluster_scope;
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 3105abe97bb9..500320e5ca47 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -86,8 +86,6 @@ struct tipc_bclink_entry {
* @lock: rwlock governing access to structure
* @net: the applicable net namespace
* @hash: links to adjacent nodes in unsorted hash chain
- * @inputq: pointer to input queue containing messages for msg event
- * @namedq: pointer to name table input queue with name table messages
* @active_links: bearer ids of active links, used as index into links[] array
* @links: array containing references to all links to node
* @bc_entry: broadcast link entry
@@ -2107,6 +2105,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
} else {
n = tipc_node_find_by_id(net, ehdr->id);
}
+ skb_dst_force(skb);
tipc_crypto_rcv(net, (n) ? n->crypto_rx : NULL, &skb, b);
if (!skb)
return;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index bb1118d02f95..65dcbb54f55d 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -80,7 +80,6 @@ struct sockaddr_pair {
* @phdr: preformatted message header used when sending messages
* @cong_links: list of congested links
* @publications: list of publications for port
- * @blocking_link: address of the congested link we are currently sleeping on
* @pub_count: total # of publications port has made during its lifetime
* @conn_timeout: the time we can wait for an unresponded setup request
* @probe_unacked: probe has not received ack yet
@@ -147,8 +146,6 @@ static void tipc_data_ready(struct sock *sk);
static void tipc_write_space(struct sock *sk);
static void tipc_sock_destruct(struct sock *sk);
static int tipc_release(struct socket *sock);
-static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
- bool kern);
static void tipc_sk_timeout(struct timer_list *t);
static int tipc_sk_publish(struct tipc_sock *tsk, struct tipc_uaddr *ua);
static int tipc_sk_withdraw(struct tipc_sock *tsk, struct tipc_uaddr *ua);
@@ -660,7 +657,7 @@ static int tipc_release(struct socket *sock)
}
/**
- * __tipc_bind - associate or disassocate TIPC name(s) with a socket
+ * __tipc_bind - associate or disassociate TIPC name(s) with a socket
* @sock: socket structure
* @skaddr: socket address describing name(s) and desired operation
* @alen: size of socket address data structure
@@ -1012,12 +1009,11 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
struct tipc_member *mbr = NULL;
struct net *net = sock_net(sk);
u32 node, port, exclude;
- struct list_head dsts;
+ LIST_HEAD(dsts);
int lookups = 0;
int dstcnt, rc;
bool cong;
- INIT_LIST_HEAD(&dsts);
ua->sa.type = msg_nametype(hdr);
ua->scope = msg_lookup_scope(hdr);
@@ -1164,10 +1160,9 @@ static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
struct tipc_group *grp = tsk->group;
struct tipc_msg *hdr = &tsk->phdr;
struct net *net = sock_net(sk);
- struct list_head dsts;
u32 dstcnt, exclude;
+ LIST_HEAD(dsts);
- INIT_LIST_HEAD(&dsts);
ua->sa.type = msg_nametype(hdr);
ua->scope = msg_lookup_scope(hdr);
exclude = tipc_group_exclude(grp);
@@ -2712,13 +2707,12 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
* tipc_accept - wait for connection request
* @sock: listening socket
* @new_sock: new socket that is to be connected
- * @flags: file-related flags associated with socket
- * @kern: caused by kernel or by userspace?
+ * @arg: arguments for accept
*
* Return: 0 on success, errno otherwise
*/
-static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
- bool kern)
+static int tipc_accept(struct socket *sock, struct socket *new_sock,
+ struct proto_accept_arg *arg)
{
struct sock *new_sk, *sk = sock->sk;
struct tipc_sock *new_tsock;
@@ -2734,14 +2728,14 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
res = -EINVAL;
goto exit;
}
- timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
+ timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK);
res = tipc_wait_for_accept(sock, timeo);
if (res)
goto exit;
buf = skb_peek(&sk->sk_receive_queue);
- res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
+ res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, arg->kern);
if (res)
goto exit;
security_sk_clone(sock->sk, new_sock->sk);
@@ -3566,11 +3560,8 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
rhashtable_walk_start(iter);
while ((tsk = rhashtable_walk_next(iter)) != NULL) {
if (IS_ERR(tsk)) {
- err = PTR_ERR(tsk);
- if (err == -EAGAIN) {
- err = 0;
+ if (PTR_ERR(tsk) == -EAGAIN)
continue;
- }
break;
}
diff --git a/net/tipc/sysctl.c b/net/tipc/sysctl.c
index 9fb65c988f7f..30d2e06e3d8c 100644
--- a/net/tipc/sysctl.c
+++ b/net/tipc/sysctl.c
@@ -91,7 +91,6 @@ static struct ctl_table tipc_table[] = {
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
- {}
};
int tipc_register_sysctl(void)
diff --git a/net/tipc/trace.h b/net/tipc/trace.h
index 04af83f0500c..865142ed0ab4 100644
--- a/net/tipc/trace.h
+++ b/net/tipc/trace.h
@@ -145,7 +145,7 @@ DECLARE_EVENT_CLASS(tipc_skb_class,
),
TP_fast_assign(
- __assign_str(header, header);
+ __assign_str(header);
tipc_skb_dump(skb, more, __get_str(buf));
),
@@ -172,7 +172,7 @@ DECLARE_EVENT_CLASS(tipc_list_class,
),
TP_fast_assign(
- __assign_str(header, header);
+ __assign_str(header);
tipc_list_dump(list, more, __get_str(buf));
),
@@ -200,7 +200,7 @@ DECLARE_EVENT_CLASS(tipc_sk_class,
),
TP_fast_assign(
- __assign_str(header, header);
+ __assign_str(header);
__entry->portid = tipc_sock_get_portid(sk);
tipc_sk_dump(sk, dqueues, __get_str(buf));
if (skb)
@@ -254,7 +254,7 @@ DECLARE_EVENT_CLASS(tipc_link_class,
),
TP_fast_assign(
- __assign_str(header, header);
+ __assign_str(header);
memcpy(__entry->name, tipc_link_name(l), TIPC_MAX_LINK_NAME);
tipc_link_dump(l, dqueues, __get_str(buf));
),
@@ -337,7 +337,7 @@ DECLARE_EVENT_CLASS(tipc_node_class,
),
TP_fast_assign(
- __assign_str(header, header);
+ __assign_str(header);
__entry->addr = tipc_node_get_addr(n);
tipc_node_dump(n, more, __get_str(buf));
),
@@ -374,7 +374,7 @@ DECLARE_EVENT_CLASS(tipc_fsm_class,
),
TP_fast_assign(
- __assign_str(name, name);
+ __assign_str(name);
__entry->os = os;
__entry->ns = ns;
__entry->evt = evt;
@@ -409,8 +409,8 @@ TRACE_EVENT(tipc_l2_device_event,
),
TP_fast_assign(
- __assign_str(dev_name, dev->name);
- __assign_str(b_name, b->name);
+ __assign_str(dev_name);
+ __assign_str(b_name);
__entry->evt = evt;
__entry->b_up = test_bit(0, &b->up);
__entry->carrier = netif_carrier_ok(dev);
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index f892b0903dba..108a4cc2e001 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -135,8 +135,11 @@ static int tipc_udp_addr2str(struct tipc_media_addr *a, char *buf, int size)
snprintf(buf, size, "%pI4:%u", &ua->ipv4, ntohs(ua->port));
else if (ntohs(ua->proto) == ETH_P_IPV6)
snprintf(buf, size, "%pI6:%u", &ua->ipv6, ntohs(ua->port));
- else
+ else {
pr_err("Invalid UDP media address\n");
+ return 1;
+ }
+
return 0;
}
@@ -174,7 +177,7 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb,
local_bh_disable();
ndst = dst_cache_get(cache);
if (dst->proto == htons(ETH_P_IP)) {
- struct rtable *rt = (struct rtable *)ndst;
+ struct rtable *rt = dst_rtable(ndst);
if (!rt) {
struct flowi4 fl = {
@@ -804,6 +807,7 @@ static void cleanup_bearer(struct work_struct *work)
{
struct udp_bearer *ub = container_of(work, struct udp_bearer, work);
struct udp_replicast *rcast, *tmp;
+ struct tipc_net *tn;
list_for_each_entry_safe(rcast, tmp, &ub->rcast.list, list) {
dst_cache_destroy(&rcast->dst_cache);
@@ -811,10 +815,14 @@ static void cleanup_bearer(struct work_struct *work)
kfree_rcu(rcast, rcu);
}
- atomic_dec(&tipc_net(sock_net(ub->ubsock->sk))->wq_count);
+ tn = tipc_net(sock_net(ub->ubsock->sk));
+
dst_cache_destroy(&ub->rcast.dst_cache);
udp_tunnel_sock_release(ub->ubsock);
+
+ /* Note: could use a call_rcu() to avoid another synchronize_net() */
synchronize_net();
+ atomic_dec(&tn->wq_count);
kfree(ub);
}
diff --git a/net/tls/Kconfig b/net/tls/Kconfig
index 0cdc1f7b6b08..ce8d56a19187 100644
--- a/net/tls/Kconfig
+++ b/net/tls/Kconfig
@@ -20,6 +20,7 @@ config TLS
config TLS_DEVICE
bool "Transport Layer Security HW offload"
depends on TLS
+ select SKB_DECRYPTED
select SOCK_VALIDATE_XMIT
select SOCK_RX_QUEUE_MAPPING
default n
diff --git a/net/tls/tls.h b/net/tls/tls.h
index 762f424ff2d5..774859b63f0d 100644
--- a/net/tls/tls.h
+++ b/net/tls/tls.h
@@ -145,7 +145,8 @@ void tls_err_abort(struct sock *sk, int err);
int init_prot_info(struct tls_prot_info *prot,
const struct tls_crypto_info *crypto_info,
const struct tls_cipher_desc *cipher_desc);
-int tls_set_sw_offload(struct sock *sk, int tx);
+int tls_set_sw_offload(struct sock *sk, int tx,
+ struct tls_crypto_info *new_crypto_info);
void tls_update_rx_zc_capable(struct tls_context *tls_ctx);
void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
void tls_sw_strparser_done(struct tls_context *tls_ctx);
@@ -215,7 +216,7 @@ static inline struct sk_buff *tls_strp_msg(struct tls_sw_context_rx *ctx)
static inline bool tls_strp_msg_ready(struct tls_sw_context_rx *ctx)
{
- return ctx->strp.msg_ready;
+ return READ_ONCE(ctx->strp.msg_ready);
}
static inline bool tls_strp_msg_mixed_decrypted(struct tls_sw_context_rx *ctx)
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index bf8ed36b1ad6..e50b6e71df13 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -37,6 +37,7 @@
#include <net/inet_connection_sock.h>
#include <net/tcp.h>
#include <net/tls.h>
+#include <linux/skbuff_ref.h>
#include "tls.h"
#include "trace.h"
@@ -230,14 +231,10 @@ static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
u32 seq)
{
struct net_device *netdev;
- struct sk_buff *skb;
int err = 0;
u8 *rcd_sn;
- skb = tcp_write_queue_tail(sk);
- if (skb)
- TCP_SKB_CB(skb)->eor = 1;
-
+ tcp_write_collapse_fence(sk);
rcd_sn = tls_ctx->tx.rec_seq;
trace_tls_device_tx_resync_send(sk, seq, rcd_sn);
@@ -1066,7 +1063,6 @@ int tls_set_device_offload(struct sock *sk)
struct tls_prot_info *prot;
struct net_device *netdev;
struct tls_context *ctx;
- struct sk_buff *skb;
char *iv, *rec_seq;
int rc;
@@ -1137,9 +1133,7 @@ int tls_set_device_offload(struct sock *sk)
* SKBs where only part of the payload needs to be encrypted.
* So mark the last skb in the write queue as end of record.
*/
- skb = tcp_write_queue_tail(sk);
- if (skb)
- TCP_SKB_CB(skb)->eor = 1;
+ tcp_write_collapse_fence(sk);
/* Avoid offloading if the device is down
* We don't want to offload new flows after
@@ -1233,7 +1227,7 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
context->resync_nh_reset = 1;
ctx->priv_ctx_rx = context;
- rc = tls_set_sw_offload(sk, 0);
+ rc = tls_set_sw_offload(sk, 0, NULL);
if (rc)
goto release_ctx;
diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
index 4e7228f275fa..f9e3d3d90dcf 100644
--- a/net/tls/tls_device_fallback.c
+++ b/net/tls/tls_device_fallback.c
@@ -33,6 +33,7 @@
#include <crypto/aead.h>
#include <crypto/scatterwalk.h>
#include <net/ip6_checksum.h>
+#include <linux/skbuff_ref.h>
#include "tls.h"
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index b4674f03d71a..99ca4465f702 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -423,9 +423,10 @@ static __poll_t tls_sk_poll(struct file *file, struct socket *sock,
ctx = tls_sw_ctx_rx(tls_ctx);
psock = sk_psock_get(sk);
- if (skb_queue_empty_lockless(&ctx->rx_list) &&
- !tls_strp_msg_ready(ctx) &&
- sk_psock_queue_empty(psock))
+ if ((skb_queue_empty_lockless(&ctx->rx_list) &&
+ !tls_strp_msg_ready(ctx) &&
+ sk_psock_queue_empty(psock)) ||
+ READ_ONCE(ctx->key_update_pending))
mask &= ~(EPOLLIN | EPOLLRDNORM);
if (psock)
@@ -612,10 +613,13 @@ static int validate_crypto_info(const struct tls_crypto_info *crypto_info,
static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
unsigned int optlen, int tx)
{
- struct tls_crypto_info *crypto_info;
- struct tls_crypto_info *alt_crypto_info;
+ struct tls_crypto_info *crypto_info, *alt_crypto_info;
+ struct tls_crypto_info *old_crypto_info = NULL;
struct tls_context *ctx = tls_get_ctx(sk);
const struct tls_cipher_desc *cipher_desc;
+ union tls_crypto_context *crypto_ctx;
+ union tls_crypto_context tmp = {};
+ bool update = false;
int rc = 0;
int conf;
@@ -623,16 +627,30 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
return -EINVAL;
if (tx) {
- crypto_info = &ctx->crypto_send.info;
+ crypto_ctx = &ctx->crypto_send;
alt_crypto_info = &ctx->crypto_recv.info;
} else {
- crypto_info = &ctx->crypto_recv.info;
+ crypto_ctx = &ctx->crypto_recv;
alt_crypto_info = &ctx->crypto_send.info;
}
- /* Currently we don't support set crypto info more than one time */
- if (TLS_CRYPTO_INFO_READY(crypto_info))
- return -EBUSY;
+ crypto_info = &crypto_ctx->info;
+
+ if (TLS_CRYPTO_INFO_READY(crypto_info)) {
+ /* Currently we only support setting crypto info more
+ * than one time for TLS 1.3
+ */
+ if (crypto_info->version != TLS_1_3_VERSION) {
+ TLS_INC_STATS(sock_net(sk), tx ? LINUX_MIB_TLSTXREKEYERROR
+ : LINUX_MIB_TLSRXREKEYERROR);
+ return -EBUSY;
+ }
+
+ update = true;
+ old_crypto_info = crypto_info;
+ crypto_info = &tmp.info;
+ crypto_ctx = &tmp;
+ }
rc = copy_from_sockptr(crypto_info, optval, sizeof(*crypto_info));
if (rc) {
@@ -640,7 +658,14 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
goto err_crypto_info;
}
- rc = validate_crypto_info(crypto_info, alt_crypto_info);
+ if (update) {
+ /* Ensure that TLS version and ciphers are not modified */
+ if (crypto_info->version != old_crypto_info->version ||
+ crypto_info->cipher_type != old_crypto_info->cipher_type)
+ rc = -EINVAL;
+ } else {
+ rc = validate_crypto_info(crypto_info, alt_crypto_info);
+ }
if (rc)
goto err_crypto_info;
@@ -670,11 +695,17 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXDEVICE);
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXDEVICE);
} else {
- rc = tls_set_sw_offload(sk, 1);
+ rc = tls_set_sw_offload(sk, 1,
+ update ? crypto_info : NULL);
if (rc)
goto err_crypto_info;
- TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXSW);
- TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXSW);
+
+ if (update) {
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXREKEYOK);
+ } else {
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXSW);
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXSW);
+ }
conf = TLS_SW;
}
} else {
@@ -684,14 +715,21 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICE);
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXDEVICE);
} else {
- rc = tls_set_sw_offload(sk, 0);
+ rc = tls_set_sw_offload(sk, 0,
+ update ? crypto_info : NULL);
if (rc)
goto err_crypto_info;
- TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXSW);
- TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXSW);
+
+ if (update) {
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXREKEYOK);
+ } else {
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXSW);
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXSW);
+ }
conf = TLS_SW;
}
- tls_sw_strparser_arm(sk, ctx);
+ if (!update)
+ tls_sw_strparser_arm(sk, ctx);
}
if (tx)
@@ -699,6 +737,10 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
else
ctx->rx_conf = conf;
update_sk_prot(sk, ctx);
+
+ if (update)
+ return 0;
+
if (tx) {
ctx->sk_write_space = sk->sk_write_space;
sk->sk_write_space = tls_write_space;
@@ -710,7 +752,11 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
return 0;
err_crypto_info:
- memzero_explicit(crypto_info, sizeof(union tls_crypto_context));
+ if (update) {
+ TLS_INC_STATS(sock_net(sk), tx ? LINUX_MIB_TLSTXREKEYERROR
+ : LINUX_MIB_TLSRXREKEYERROR);
+ }
+ memzero_explicit(crypto_ctx, sizeof(*crypto_ctx));
return rc;
}
@@ -816,9 +862,17 @@ struct tls_context *tls_ctx_create(struct sock *sk)
return NULL;
mutex_init(&ctx->tx_lock);
- rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
ctx->sk_proto = READ_ONCE(sk->sk_prot);
ctx->sk = sk;
+ /* Release semantic of rcu_assign_pointer() ensures that
+ * ctx->sk_proto is visible before changing sk->sk_prot in
+ * update_sk_prot(), and prevents reading uninitialized value in
+ * tls_{getsockopt, setsockopt}. Note that we do not need a
+ * read barrier in tls_{getsockopt,setsockopt} as there is an
+ * address dependency between sk->sk_proto->{getsockopt,setsockopt}
+ * and ctx->sk_proto.
+ */
+ rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
return ctx;
}
diff --git a/net/tls/tls_proc.c b/net/tls/tls_proc.c
index 68982728f620..367666aa07b8 100644
--- a/net/tls/tls_proc.c
+++ b/net/tls/tls_proc.c
@@ -22,6 +22,11 @@ static const struct snmp_mib tls_mib_list[] = {
SNMP_MIB_ITEM("TlsRxDeviceResync", LINUX_MIB_TLSRXDEVICERESYNC),
SNMP_MIB_ITEM("TlsDecryptRetry", LINUX_MIB_TLSDECRYPTRETRY),
SNMP_MIB_ITEM("TlsRxNoPadViolation", LINUX_MIB_TLSRXNOPADVIOL),
+ SNMP_MIB_ITEM("TlsRxRekeyOk", LINUX_MIB_TLSRXREKEYOK),
+ SNMP_MIB_ITEM("TlsRxRekeyError", LINUX_MIB_TLSRXREKEYERROR),
+ SNMP_MIB_ITEM("TlsTxRekeyOk", LINUX_MIB_TLSTXREKEYOK),
+ SNMP_MIB_ITEM("TlsTxRekeyError", LINUX_MIB_TLSTXREKEYERROR),
+ SNMP_MIB_ITEM("TlsRxRekeyReceived", LINUX_MIB_TLSRXREKEYRECEIVED),
SNMP_MIB_SENTINEL
};
diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c
index ca1e0e198ceb..77e33e1e340e 100644
--- a/net/tls/tls_strp.c
+++ b/net/tls/tls_strp.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2016 Tom Herbert <tom@herbertland.com> */
#include <linux/skbuff.h>
+#include <linux/skbuff_ref.h>
#include <linux/workqueue.h>
#include <net/strparser.h>
#include <net/tcp.h>
@@ -360,7 +361,7 @@ static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb,
if (strp->stm.full_len && strp->stm.full_len == skb->len) {
desc->count = 0;
- strp->msg_ready = 1;
+ WRITE_ONCE(strp->msg_ready, 1);
tls_rx_msg_ready(strp);
}
@@ -528,7 +529,7 @@ static int tls_strp_read_sock(struct tls_strparser *strp)
if (!tls_strp_check_queue_ok(strp))
return tls_strp_read_copy(strp, false);
- strp->msg_ready = 1;
+ WRITE_ONCE(strp->msg_ready, 1);
tls_rx_msg_ready(strp);
return 0;
@@ -580,7 +581,7 @@ void tls_strp_msg_done(struct tls_strparser *strp)
else
tls_strp_flush_anchor_copy(strp);
- strp->msg_ready = 0;
+ WRITE_ONCE(strp->msg_ready, 0);
memset(&strp->stm, 0, sizeof(strp->stm));
tls_strp_check_rcv(strp);
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 211f57164cb6..914d4e1516a3 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -458,7 +458,7 @@ int tls_tx_records(struct sock *sk, int flags)
tx_err:
if (rc < 0 && rc != -EAGAIN)
- tls_err_abort(sk, -EBADMSG);
+ tls_err_abort(sk, rc);
return rc;
}
@@ -1201,7 +1201,7 @@ trim_sgl:
if (!num_async) {
goto send_end;
- } else if (num_zc) {
+ } else if (num_zc || eor) {
int err;
/* Wait for pending encryptions to get completed */
@@ -1314,6 +1314,10 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
int ret = 0;
long timeo;
+ /* a rekey is pending, let userspace deal with it */
+ if (unlikely(ctx->key_update_pending))
+ return -EKEYEXPIRED;
+
timeo = sock_rcvtimeo(sk, nonblock);
while (!tls_strp_msg_ready(ctx)) {
@@ -1720,6 +1724,36 @@ tls_decrypt_device(struct sock *sk, struct msghdr *msg,
return 1;
}
+static int tls_check_pending_rekey(struct sock *sk, struct tls_context *ctx,
+ struct sk_buff *skb)
+{
+ const struct strp_msg *rxm = strp_msg(skb);
+ const struct tls_msg *tlm = tls_msg(skb);
+ char hs_type;
+ int err;
+
+ if (likely(tlm->control != TLS_RECORD_TYPE_HANDSHAKE))
+ return 0;
+
+ if (rxm->full_len < 1)
+ return 0;
+
+ err = skb_copy_bits(skb, rxm->offset, &hs_type, 1);
+ if (err < 0) {
+ DEBUG_NET_WARN_ON_ONCE(1);
+ return err;
+ }
+
+ if (hs_type == TLS_HANDSHAKE_KEYUPDATE) {
+ struct tls_sw_context_rx *rx_ctx = ctx->priv_ctx_rx;
+
+ WRITE_ONCE(rx_ctx->key_update_pending, true);
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXREKEYRECEIVED);
+ }
+
+ return 0;
+}
+
static int tls_rx_one_record(struct sock *sk, struct msghdr *msg,
struct tls_decrypt_arg *darg)
{
@@ -1739,7 +1773,7 @@ static int tls_rx_one_record(struct sock *sk, struct msghdr *msg,
rxm->full_len -= prot->overhead_size;
tls_advance_record_sn(sk, prot, &tls_ctx->rx);
- return 0;
+ return tls_check_pending_rekey(sk, tls_ctx, darg->skb);
}
int decrypt_skb(struct sock *sk, struct scatterlist *sgout)
@@ -1976,10 +2010,10 @@ int tls_sw_recvmsg(struct sock *sk,
if (unlikely(flags & MSG_ERRQUEUE))
return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
- psock = sk_psock_get(sk);
err = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT);
if (err < 0)
return err;
+ psock = sk_psock_get(sk);
bpf_strp_enabled = sk_psock_strp_enabled(psock);
/* If crypto failed the connection is broken */
@@ -2147,17 +2181,19 @@ recv_end:
if (ret) {
if (err >= 0 || err == -EINPROGRESS)
err = ret;
- decrypted = 0;
goto end;
}
/* Drain records from the rx_list & copy if required */
- if (is_peek || is_kvec)
+ if (is_peek)
err = process_rx_list(ctx, msg, &control, copied + peeked,
decrypted - peeked, is_peek, NULL);
else
err = process_rx_list(ctx, msg, &control, 0,
async_copy_bytes, is_peek, NULL);
+
+ /* we could have copied less than we wanted, and possibly nothing */
+ decrypted += max(err, 0) - async_copy_bytes;
}
copied += decrypted;
@@ -2682,12 +2718,22 @@ int init_prot_info(struct tls_prot_info *prot,
return 0;
}
-int tls_set_sw_offload(struct sock *sk, int tx)
+static void tls_finish_key_update(struct sock *sk, struct tls_context *tls_ctx)
{
+ struct tls_sw_context_rx *ctx = tls_ctx->priv_ctx_rx;
+
+ WRITE_ONCE(ctx->key_update_pending, false);
+ /* wake-up pre-existing poll() */
+ ctx->saved_data_ready(sk);
+}
+
+int tls_set_sw_offload(struct sock *sk, int tx,
+ struct tls_crypto_info *new_crypto_info)
+{
+ struct tls_crypto_info *crypto_info, *src_crypto_info;
struct tls_sw_context_tx *sw_ctx_tx = NULL;
struct tls_sw_context_rx *sw_ctx_rx = NULL;
const struct tls_cipher_desc *cipher_desc;
- struct tls_crypto_info *crypto_info;
char *iv, *rec_seq, *key, *salt;
struct cipher_context *cctx;
struct tls_prot_info *prot;
@@ -2699,44 +2745,47 @@ int tls_set_sw_offload(struct sock *sk, int tx)
ctx = tls_get_ctx(sk);
prot = &ctx->prot_info;
- if (tx) {
- ctx->priv_ctx_tx = init_ctx_tx(ctx, sk);
- if (!ctx->priv_ctx_tx)
- return -ENOMEM;
+ /* new_crypto_info != NULL means rekey */
+ if (!new_crypto_info) {
+ if (tx) {
+ ctx->priv_ctx_tx = init_ctx_tx(ctx, sk);
+ if (!ctx->priv_ctx_tx)
+ return -ENOMEM;
+ } else {
+ ctx->priv_ctx_rx = init_ctx_rx(ctx);
+ if (!ctx->priv_ctx_rx)
+ return -ENOMEM;
+ }
+ }
+ if (tx) {
sw_ctx_tx = ctx->priv_ctx_tx;
crypto_info = &ctx->crypto_send.info;
cctx = &ctx->tx;
aead = &sw_ctx_tx->aead_send;
} else {
- ctx->priv_ctx_rx = init_ctx_rx(ctx);
- if (!ctx->priv_ctx_rx)
- return -ENOMEM;
-
sw_ctx_rx = ctx->priv_ctx_rx;
crypto_info = &ctx->crypto_recv.info;
cctx = &ctx->rx;
aead = &sw_ctx_rx->aead_recv;
}
- cipher_desc = get_cipher_desc(crypto_info->cipher_type);
+ src_crypto_info = new_crypto_info ?: crypto_info;
+
+ cipher_desc = get_cipher_desc(src_crypto_info->cipher_type);
if (!cipher_desc) {
rc = -EINVAL;
goto free_priv;
}
- rc = init_prot_info(prot, crypto_info, cipher_desc);
+ rc = init_prot_info(prot, src_crypto_info, cipher_desc);
if (rc)
goto free_priv;
- iv = crypto_info_iv(crypto_info, cipher_desc);
- key = crypto_info_key(crypto_info, cipher_desc);
- salt = crypto_info_salt(crypto_info, cipher_desc);
- rec_seq = crypto_info_rec_seq(crypto_info, cipher_desc);
-
- memcpy(cctx->iv, salt, cipher_desc->salt);
- memcpy(cctx->iv + cipher_desc->salt, iv, cipher_desc->iv);
- memcpy(cctx->rec_seq, rec_seq, cipher_desc->rec_seq);
+ iv = crypto_info_iv(src_crypto_info, cipher_desc);
+ key = crypto_info_key(src_crypto_info, cipher_desc);
+ salt = crypto_info_salt(src_crypto_info, cipher_desc);
+ rec_seq = crypto_info_rec_seq(src_crypto_info, cipher_desc);
if (!*aead) {
*aead = crypto_alloc_aead(cipher_desc->cipher_name, 0, 0);
@@ -2749,20 +2798,30 @@ int tls_set_sw_offload(struct sock *sk, int tx)
ctx->push_pending_record = tls_sw_push_pending_record;
+ /* setkey is the last operation that could fail during a
+ * rekey. if it succeeds, we can start modifying the
+ * context.
+ */
rc = crypto_aead_setkey(*aead, key, cipher_desc->key);
- if (rc)
- goto free_aead;
+ if (rc) {
+ if (new_crypto_info)
+ goto out;
+ else
+ goto free_aead;
+ }
- rc = crypto_aead_setauthsize(*aead, prot->tag_size);
- if (rc)
- goto free_aead;
+ if (!new_crypto_info) {
+ rc = crypto_aead_setauthsize(*aead, prot->tag_size);
+ if (rc)
+ goto free_aead;
+ }
- if (sw_ctx_rx) {
+ if (!tx && !new_crypto_info) {
tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
tls_update_rx_zc_capable(ctx);
sw_ctx_rx->async_capable =
- crypto_info->version != TLS_1_3_VERSION &&
+ src_crypto_info->version != TLS_1_3_VERSION &&
!!(tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC);
rc = tls_strp_init(&sw_ctx_rx->strp, sk);
@@ -2770,18 +2829,33 @@ int tls_set_sw_offload(struct sock *sk, int tx)
goto free_aead;
}
+ memcpy(cctx->iv, salt, cipher_desc->salt);
+ memcpy(cctx->iv + cipher_desc->salt, iv, cipher_desc->iv);
+ memcpy(cctx->rec_seq, rec_seq, cipher_desc->rec_seq);
+
+ if (new_crypto_info) {
+ unsafe_memcpy(crypto_info, new_crypto_info,
+ cipher_desc->crypto_info,
+ /* size was checked in do_tls_setsockopt_conf */);
+ memzero_explicit(new_crypto_info, cipher_desc->crypto_info);
+ if (!tx)
+ tls_finish_key_update(sk, ctx);
+ }
+
goto out;
free_aead:
crypto_free_aead(*aead);
*aead = NULL;
free_priv:
- if (tx) {
- kfree(ctx->priv_ctx_tx);
- ctx->priv_ctx_tx = NULL;
- } else {
- kfree(ctx->priv_ctx_rx);
- ctx->priv_ctx_rx = NULL;
+ if (!new_crypto_info) {
+ if (tx) {
+ kfree(ctx->priv_ctx_tx);
+ ctx->priv_ctx_tx = NULL;
+ } else {
+ kfree(ctx->priv_ctx_rx);
+ ctx->priv_ctx_rx = NULL;
+ }
}
out:
return rc;
diff --git a/net/tls/trace.h b/net/tls/trace.h
index 9ba5f600ea43..2d8ce4ff3265 100644
--- a/net/tls/trace.h
+++ b/net/tls/trace.h
@@ -7,7 +7,7 @@
#if !defined(_TLS_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _TLS_TRACE_H_
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/tracepoint.h>
struct sock;
diff --git a/net/unix/Kconfig b/net/unix/Kconfig
index 28b232f281ab..6f1783c1659b 100644
--- a/net/unix/Kconfig
+++ b/net/unix/Kconfig
@@ -16,15 +16,12 @@ config UNIX
Say Y unless you know what you are doing.
-config UNIX_SCM
- bool
- depends on UNIX
- default y
-
config AF_UNIX_OOB
- bool
+ bool "UNIX: out-of-bound messages"
depends on UNIX
default y
+ help
+ Support for MSG_OOB in UNIX domain sockets. If unsure, say Y.
config UNIX_DIAG
tristate "UNIX: socket monitoring interface"
diff --git a/net/unix/Makefile b/net/unix/Makefile
index 20491825b4d0..4ddd125c4642 100644
--- a/net/unix/Makefile
+++ b/net/unix/Makefile
@@ -11,5 +11,3 @@ unix-$(CONFIG_BPF_SYSCALL) += unix_bpf.o
obj-$(CONFIG_UNIX_DIAG) += unix_diag.o
unix_diag-y := diag.o
-
-obj-$(CONFIG_UNIX_SCM) += scm.o
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 0748e7ea5210..f0e613d97664 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -118,8 +118,6 @@
#include <linux/btf_ids.h>
#include <linux/bpf-cgroup.h>
-#include "scm.h"
-
static atomic_long_t unix_nr_socks;
static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2];
static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2];
@@ -128,6 +126,81 @@ static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2];
* hash table is protected with spinlock.
* each socket state is protected by separate spinlock.
*/
+#ifdef CONFIG_PROVE_LOCKING
+#define cmp_ptr(l, r) (((l) > (r)) - ((l) < (r)))
+
+static int unix_table_lock_cmp_fn(const struct lockdep_map *a,
+ const struct lockdep_map *b)
+{
+ return cmp_ptr(a, b);
+}
+
+static int unix_state_lock_cmp_fn(const struct lockdep_map *_a,
+ const struct lockdep_map *_b)
+{
+ const struct unix_sock *a, *b;
+
+ a = container_of(_a, struct unix_sock, lock.dep_map);
+ b = container_of(_b, struct unix_sock, lock.dep_map);
+
+ if (a->sk.sk_state == TCP_LISTEN) {
+ /* unix_stream_connect(): Before the 2nd unix_state_lock(),
+ *
+ * 1. a is TCP_LISTEN.
+ * 2. b is not a.
+ * 3. concurrent connect(b -> a) must fail.
+ *
+ * Except for 2. & 3., the b's state can be any possible
+ * value due to concurrent connect() or listen().
+ *
+ * 2. is detected in debug_spin_lock_before(), and 3. cannot
+ * be expressed as lock_cmp_fn.
+ */
+ switch (b->sk.sk_state) {
+ case TCP_CLOSE:
+ case TCP_ESTABLISHED:
+ case TCP_LISTEN:
+ return -1;
+ default:
+ /* Invalid case. */
+ return 0;
+ }
+ }
+
+ /* Should never happen. Just to be symmetric. */
+ if (b->sk.sk_state == TCP_LISTEN) {
+ switch (b->sk.sk_state) {
+ case TCP_CLOSE:
+ case TCP_ESTABLISHED:
+ return 1;
+ default:
+ return 0;
+ }
+ }
+
+ /* unix_state_double_lock(): ascending address order. */
+ return cmp_ptr(a, b);
+}
+
+static int unix_recvq_lock_cmp_fn(const struct lockdep_map *_a,
+ const struct lockdep_map *_b)
+{
+ const struct sock *a, *b;
+
+ a = container_of(_a, struct sock, sk_receive_queue.lock.dep_map);
+ b = container_of(_b, struct sock, sk_receive_queue.lock.dep_map);
+
+ /* unix_collect_skb(): listener -> embryo order. */
+ if (a->sk_state == TCP_LISTEN && unix_sk(b)->listener == a)
+ return -1;
+
+ /* Should never happen. Just to be symmetric. */
+ if (b->sk_state == TCP_LISTEN && unix_sk(a)->listener == b)
+ return 1;
+
+ return 0;
+}
+#endif
static unsigned int unix_unbound_hash(struct sock *sk)
{
@@ -170,7 +243,7 @@ static void unix_table_double_lock(struct net *net,
swap(hash1, hash2);
spin_lock(&net->unx.table.locks[hash1]);
- spin_lock_nested(&net->unx.table.locks[hash2], SINGLE_DEPTH_NESTING);
+ spin_lock(&net->unx.table.locks[hash2]);
}
static void unix_table_double_unlock(struct net *net,
@@ -213,25 +286,14 @@ static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
}
#endif /* CONFIG_SECURITY_NETWORK */
-static inline int unix_our_peer(struct sock *sk, struct sock *osk)
-{
- return unix_peer(osk) == sk;
-}
-
static inline int unix_may_send(struct sock *sk, struct sock *osk)
{
- return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
-}
-
-static inline int unix_recvq_full(const struct sock *sk)
-{
- return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
+ return !unix_peer(osk) || unix_peer(osk) == sk;
}
static inline int unix_recvq_full_lockless(const struct sock *sk)
{
- return skb_queue_len_lockless(&sk->sk_receive_queue) >
- READ_ONCE(sk->sk_max_ack_backlog);
+ return skb_queue_len_lockless(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
}
struct sock *unix_peer_get(struct sock *s)
@@ -532,10 +594,10 @@ static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
return 0;
}
-static int unix_writable(const struct sock *sk)
+static int unix_writable(const struct sock *sk, unsigned char state)
{
- return sk->sk_state != TCP_LISTEN &&
- (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
+ return state != TCP_LISTEN &&
+ (refcount_read(&sk->sk_wmem_alloc) << 2) <= READ_ONCE(sk->sk_sndbuf);
}
static void unix_write_space(struct sock *sk)
@@ -543,12 +605,12 @@ static void unix_write_space(struct sock *sk)
struct socket_wq *wq;
rcu_read_lock();
- if (unix_writable(sk)) {
+ if (unix_writable(sk, READ_ONCE(sk->sk_state))) {
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait,
EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
- sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
+ sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
rcu_read_unlock();
}
@@ -560,7 +622,9 @@ static void unix_write_space(struct sock *sk)
static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
{
if (!skb_queue_empty(&sk->sk_receive_queue)) {
- skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge_reason(&sk->sk_receive_queue,
+ SKB_DROP_REASON_UNIX_DISCONNECT);
+
wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
/* If one link of bidirectional dgram pipe is disconnected,
@@ -572,14 +636,13 @@ static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
sk_error_report(other);
}
}
- other->sk_state = TCP_CLOSE;
}
static void unix_sock_destructor(struct sock *sk)
{
struct unix_sock *u = unix_sk(sk);
- skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge_reason(&sk->sk_receive_queue, SKB_DROP_REASON_SOCKET_CLOSE);
DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
@@ -619,7 +682,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
u->path.dentry = NULL;
u->path.mnt = NULL;
state = sk->sk_state;
- sk->sk_state = TCP_CLOSE;
+ WRITE_ONCE(sk->sk_state, TCP_CLOSE);
skpair = unix_peer(sk);
unix_peer(sk) = NULL;
@@ -627,10 +690,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
unix_state_unlock(sk);
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
- if (u->oob_skb) {
- kfree_skb(u->oob_skb);
- u->oob_skb = NULL;
- }
+ u->oob_skb = NULL;
#endif
wake_up_interruptible_all(&u->peer_wait);
@@ -640,7 +700,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
unix_state_lock(skpair);
/* No more writes */
WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
- if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
+ if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || embrion)
WRITE_ONCE(skpair->sk_err, ECONNRESET);
unix_state_unlock(skpair);
skpair->sk_state_change(skpair);
@@ -656,9 +716,9 @@ static void unix_release_sock(struct sock *sk, int embrion)
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if (state == TCP_LISTEN)
unix_release_sock(skb->sk, 1);
- /* passed fds are erased in the kfree_skb hook */
- UNIXCB(skb).consumed = skb->len;
- kfree_skb(skb);
+
+ /* passed fds are erased in the kfree_skb hook */
+ kfree_skb_reason(skb, SKB_DROP_REASON_SOCKET_CLOSE);
}
if (path.dentry)
@@ -685,14 +745,19 @@ static void unix_release_sock(struct sock *sk, int embrion)
static void init_peercred(struct sock *sk)
{
+ sk->sk_peer_pid = get_pid(task_tgid(current));
+ sk->sk_peer_cred = get_current_cred();
+}
+
+static void update_peercred(struct sock *sk)
+{
const struct cred *old_cred;
struct pid *old_pid;
spin_lock(&sk->sk_peer_lock);
old_pid = sk->sk_peer_pid;
old_cred = sk->sk_peer_cred;
- sk->sk_peer_pid = get_pid(task_tgid(current));
- sk->sk_peer_cred = get_current_cred();
+ init_peercred(sk);
spin_unlock(&sk->sk_peer_lock);
put_pid(old_pid);
@@ -701,26 +766,12 @@ static void init_peercred(struct sock *sk)
static void copy_peercred(struct sock *sk, struct sock *peersk)
{
- const struct cred *old_cred;
- struct pid *old_pid;
+ lockdep_assert_held(&unix_sk(peersk)->lock);
- if (sk < peersk) {
- spin_lock(&sk->sk_peer_lock);
- spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
- } else {
- spin_lock(&peersk->sk_peer_lock);
- spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
- }
- old_pid = sk->sk_peer_pid;
- old_cred = sk->sk_peer_cred;
- sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
+ spin_lock(&sk->sk_peer_lock);
+ sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
-
spin_unlock(&sk->sk_peer_lock);
- spin_unlock(&peersk->sk_peer_lock);
-
- put_pid(old_pid);
- put_cred(old_cred);
}
static int unix_listen(struct socket *sock, int backlog)
@@ -733,7 +784,7 @@ static int unix_listen(struct socket *sock, int backlog)
if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
goto out; /* Only stream/seqpacket sockets accept */
err = -EINVAL;
- if (!u->addr)
+ if (!READ_ONCE(u->addr))
goto out; /* No listens on an unbound socket */
unix_state_lock(sk);
if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
@@ -741,9 +792,10 @@ static int unix_listen(struct socket *sock, int backlog)
if (backlog > sk->sk_max_ack_backlog)
wake_up_interruptible_all(&u->peer_wait);
sk->sk_max_ack_backlog = backlog;
- sk->sk_state = TCP_LISTEN;
+ WRITE_ONCE(sk->sk_state, TCP_LISTEN);
+
/* set credentials so connect can copy them */
- init_peercred(sk);
+ update_peercred(sk);
err = 0;
out_unlock:
@@ -757,7 +809,7 @@ static int unix_bind(struct socket *, struct sockaddr *, int);
static int unix_stream_connect(struct socket *, struct sockaddr *,
int addr_len, int flags);
static int unix_socketpair(struct socket *, struct socket *);
-static int unix_accept(struct socket *, struct socket *, int, bool);
+static int unix_accept(struct socket *, struct socket *, struct proto_accept_arg *arg);
static int unix_getname(struct socket *, struct sockaddr *, int);
static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
static __poll_t unix_dgram_poll(struct file *, struct socket *,
@@ -978,14 +1030,17 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,
sk->sk_hash = unix_unbound_hash(sk);
sk->sk_allocation = GFP_KERNEL_ACCOUNT;
sk->sk_write_space = unix_write_space;
- sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
+ sk->sk_max_ack_backlog = READ_ONCE(net->unx.sysctl_max_dgram_qlen);
sk->sk_destruct = unix_sock_destructor;
- u = unix_sk(sk);
+ lock_set_cmp_fn(&sk->sk_receive_queue.lock, unix_recvq_lock_cmp_fn, NULL);
+
+ u = unix_sk(sk);
+ u->listener = NULL;
+ u->vertex = NULL;
u->path.dentry = NULL;
u->path.mnt = NULL;
spin_lock_init(&u->lock);
- atomic_long_set(&u->inflight, 0);
- INIT_LIST_HEAD(&u->link);
+ lock_set_cmp_fn(&u->lock, unix_state_lock_cmp_fn, NULL);
mutex_init(&u->iolock); /* single task reading lock */
mutex_init(&u->bindlock); /* single task binding lock */
init_waitqueue_head(&u->peer_wait);
@@ -1133,8 +1188,8 @@ static struct sock *unix_find_other(struct net *net,
static int unix_autobind(struct sock *sk)
{
- unsigned int new_hash, old_hash = sk->sk_hash;
struct unix_sock *u = unix_sk(sk);
+ unsigned int new_hash, old_hash;
struct net *net = sock_net(sk);
struct unix_address *addr;
u32 lastnum, ordernum;
@@ -1157,6 +1212,7 @@ static int unix_autobind(struct sock *sk)
addr->name->sun_family = AF_UNIX;
refcount_set(&addr->refcnt, 1);
+ old_hash = sk->sk_hash;
ordernum = get_random_u32();
lastnum = ordernum & 0xFFFFF;
retry:
@@ -1197,8 +1253,8 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
{
umode_t mode = S_IFSOCK |
(SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
- unsigned int new_hash, old_hash = sk->sk_hash;
struct unix_sock *u = unix_sk(sk);
+ unsigned int new_hash, old_hash;
struct net *net = sock_net(sk);
struct mnt_idmap *idmap;
struct unix_address *addr;
@@ -1236,6 +1292,7 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
if (u->addr)
goto out_unlock;
+ old_hash = sk->sk_hash;
new_hash = unix_bsd_hash(d_backing_inode(dentry));
unix_table_double_lock(net, old_hash, new_hash);
u->path.mnt = mntget(parent.mnt);
@@ -1263,8 +1320,8 @@ out:
static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
int addr_len)
{
- unsigned int new_hash, old_hash = sk->sk_hash;
struct unix_sock *u = unix_sk(sk);
+ unsigned int new_hash, old_hash;
struct net *net = sock_net(sk);
struct unix_address *addr;
int err;
@@ -1282,6 +1339,7 @@ static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
goto out_mutex;
}
+ old_hash = sk->sk_hash;
new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
unix_table_double_lock(net, old_hash, new_hash);
@@ -1331,11 +1389,12 @@ static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
unix_state_lock(sk1);
return;
}
+
if (sk1 > sk2)
swap(sk1, sk2);
unix_state_lock(sk1);
- unix_state_lock_nested(sk2, U_LOCK_SECOND);
+ unix_state_lock(sk2);
}
static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
@@ -1371,7 +1430,7 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
- !unix_sk(sk)->addr) {
+ !READ_ONCE(unix_sk(sk)->addr)) {
err = unix_autobind(sk);
if (err)
goto out;
@@ -1401,7 +1460,8 @@ restart:
if (err)
goto out_unlock;
- sk->sk_state = other->sk_state = TCP_ESTABLISHED;
+ WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
+ WRITE_ONCE(other->sk_state, TCP_ESTABLISHED);
} else {
/*
* 1003.1g breaking connected state with AF_UNSPEC
@@ -1418,13 +1478,20 @@ restart:
unix_peer(sk) = other;
if (!other)
- sk->sk_state = TCP_CLOSE;
+ WRITE_ONCE(sk->sk_state, TCP_CLOSE);
unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
unix_state_double_unlock(sk, other);
- if (other != old_peer)
+ if (other != old_peer) {
unix_dgram_disconnected(sk, old_peer);
+
+ unix_state_lock(old_peer);
+ if (!unix_peer(old_peer))
+ WRITE_ONCE(old_peer->sk_state, TCP_CLOSE);
+ unix_state_unlock(old_peer);
+ }
+
sock_put(old_peer);
} else {
unix_peer(sk) = other;
@@ -1470,9 +1537,9 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
struct unix_sock *u = unix_sk(sk), *newu, *otheru;
struct net *net = sock_net(sk);
struct sk_buff *skb = NULL;
+ unsigned char state;
long timeo;
int err;
- int st;
err = unix_validate_addr(sunaddr, addr_len);
if (err)
@@ -1483,7 +1550,8 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
goto out;
if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
- test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) {
+ test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
+ !READ_ONCE(u->addr)) {
err = unix_autobind(sk);
if (err)
goto out;
@@ -1492,35 +1560,32 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
/* First of all allocate resources.
- If we will make it after state is locked,
- we will have to recheck all again in any case.
+ * If we will make it after state is locked,
+ * we will have to recheck all again in any case.
*/
/* create new sock for complete connection */
newsk = unix_create1(net, NULL, 0, sock->type);
if (IS_ERR(newsk)) {
err = PTR_ERR(newsk);
- newsk = NULL;
goto out;
}
- err = -ENOMEM;
-
/* Allocate skb for sending to listening sock */
skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
- if (skb == NULL)
- goto out;
+ if (!skb) {
+ err = -ENOMEM;
+ goto out_free_sk;
+ }
restart:
/* Find listening sock. */
other = unix_find_other(net, sunaddr, addr_len, sk->sk_type);
if (IS_ERR(other)) {
err = PTR_ERR(other);
- other = NULL;
- goto out;
+ goto out_free_skb;
}
- /* Latch state of peer */
unix_state_lock(other);
/* Apparently VFS overslept socket death. Retry. */
@@ -1530,59 +1595,43 @@ restart:
goto restart;
}
- err = -ECONNREFUSED;
- if (other->sk_state != TCP_LISTEN)
- goto out_unlock;
- if (other->sk_shutdown & RCV_SHUTDOWN)
+ if (other->sk_state != TCP_LISTEN ||
+ other->sk_shutdown & RCV_SHUTDOWN) {
+ err = -ECONNREFUSED;
goto out_unlock;
+ }
- if (unix_recvq_full(other)) {
- err = -EAGAIN;
- if (!timeo)
+ if (unix_recvq_full_lockless(other)) {
+ if (!timeo) {
+ err = -EAGAIN;
goto out_unlock;
+ }
timeo = unix_wait_for_peer(other, timeo);
+ sock_put(other);
err = sock_intr_errno(timeo);
if (signal_pending(current))
- goto out;
- sock_put(other);
+ goto out_free_skb;
+
goto restart;
}
- /* Latch our state.
-
- It is tricky place. We need to grab our state lock and cannot
- drop lock on peer. It is dangerous because deadlock is
- possible. Connect to self case and simultaneous
- attempt to connect are eliminated by checking socket
- state. other is TCP_LISTEN, if sk is TCP_LISTEN we
- check this before attempt to grab lock.
-
- Well, and we have to recheck the state after socket locked.
+ /* self connect and simultaneous connect are eliminated
+ * by rejecting TCP_LISTEN socket to avoid deadlock.
*/
- st = sk->sk_state;
-
- switch (st) {
- case TCP_CLOSE:
- /* This is ok... continue with connect */
- break;
- case TCP_ESTABLISHED:
- /* Socket is already connected */
- err = -EISCONN;
- goto out_unlock;
- default:
- err = -EINVAL;
+ state = READ_ONCE(sk->sk_state);
+ if (unlikely(state != TCP_CLOSE)) {
+ err = state == TCP_ESTABLISHED ? -EISCONN : -EINVAL;
goto out_unlock;
}
- unix_state_lock_nested(sk, U_LOCK_SECOND);
+ unix_state_lock(sk);
- if (sk->sk_state != st) {
+ if (unlikely(sk->sk_state != TCP_CLOSE)) {
+ err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EINVAL;
unix_state_unlock(sk);
- unix_state_unlock(other);
- sock_put(other);
- goto restart;
+ goto out_unlock;
}
err = security_unix_stream_connect(sk, other, newsk);
@@ -1599,6 +1648,7 @@ restart:
newsk->sk_type = sk->sk_type;
init_peercred(newsk);
newu = unix_sk(newsk);
+ newu->listener = other;
RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
otheru = unix_sk(other);
@@ -1630,7 +1680,7 @@ restart:
copy_peercred(sk, other);
sock->state = SS_CONNECTED;
- sk->sk_state = TCP_ESTABLISHED;
+ WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
sock_hold(newsk);
smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
@@ -1648,15 +1698,13 @@ restart:
return 0;
out_unlock:
- if (other)
- unix_state_unlock(other);
-
+ unix_state_unlock(other);
+ sock_put(other);
+out_free_skb:
+ consume_skb(skb);
+out_free_sk:
+ unix_release_sock(newsk, 0);
out:
- kfree_skb(skb);
- if (newsk)
- unix_release_sock(newsk, 0);
- if (other)
- sock_put(other);
return err;
}
@@ -1690,32 +1738,31 @@ static void unix_sock_inherit_flags(const struct socket *old,
set_bit(SOCK_PASSSEC, &new->flags);
}
-static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
- bool kern)
+static int unix_accept(struct socket *sock, struct socket *newsock,
+ struct proto_accept_arg *arg)
{
struct sock *sk = sock->sk;
- struct sock *tsk;
struct sk_buff *skb;
- int err;
+ struct sock *tsk;
- err = -EOPNOTSUPP;
+ arg->err = -EOPNOTSUPP;
if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
goto out;
- err = -EINVAL;
- if (sk->sk_state != TCP_LISTEN)
+ arg->err = -EINVAL;
+ if (READ_ONCE(sk->sk_state) != TCP_LISTEN)
goto out;
/* If socket state is TCP_LISTEN it cannot change (for now...),
* so that no locks are necessary.
*/
- skb = skb_recv_datagram(sk, (flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
- &err);
+ skb = skb_recv_datagram(sk, (arg->flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
+ &arg->err);
if (!skb) {
/* This means receive shutdown. */
- if (err == 0)
- err = -EINVAL;
+ if (arg->err == 0)
+ arg->err = -EINVAL;
goto out;
}
@@ -1725,6 +1772,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
/* attach accepted sock to socket */
unix_state_lock(tsk);
+ unix_update_edges(unix_sk(tsk));
newsock->state = SS_CONNECTED;
unix_sock_inherit_flags(sock, newsock);
sock_graft(tsk, newsock);
@@ -1732,7 +1780,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
return 0;
out:
- return err;
+ return arg->err;
}
@@ -1775,51 +1823,60 @@ out:
return err;
}
+/* The "user->unix_inflight" variable is protected by the garbage
+ * collection lock, and we just read it locklessly here. If you go
+ * over the limit, there might be a tiny race in actually noticing
+ * it across threads. Tough.
+ */
+static inline bool too_many_unix_fds(struct task_struct *p)
+{
+ struct user_struct *user = current_user();
+
+ if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE)))
+ return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
+ return false;
+}
+
+static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
+{
+ if (too_many_unix_fds(current))
+ return -ETOOMANYREFS;
+
+ UNIXCB(skb).fp = scm->fp;
+ scm->fp = NULL;
+
+ if (unix_prepare_fpl(UNIXCB(skb).fp))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
+{
+ scm->fp = UNIXCB(skb).fp;
+ UNIXCB(skb).fp = NULL;
+
+ unix_destroy_fpl(scm->fp);
+}
+
static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
{
scm->fp = scm_fp_dup(UNIXCB(skb).fp);
+}
- /*
- * Garbage collection of unix sockets starts by selecting a set of
- * candidate sockets which have reference only from being in flight
- * (total_refs == inflight_refs). This condition is checked once during
- * the candidate collection phase, and candidates are marked as such, so
- * that non-candidates can later be ignored. While inflight_refs is
- * protected by unix_gc_lock, total_refs (file count) is not, hence this
- * is an instantaneous decision.
- *
- * Once a candidate, however, the socket must not be reinstalled into a
- * file descriptor while the garbage collection is in progress.
- *
- * If the above conditions are met, then the directed graph of
- * candidates (*) does not change while unix_gc_lock is held.
- *
- * Any operations that changes the file count through file descriptors
- * (dup, close, sendmsg) does not change the graph since candidates are
- * not installed in fds.
- *
- * Dequeing a candidate via recvmsg would install it into an fd, but
- * that takes unix_gc_lock to decrement the inflight count, so it's
- * serialized with garbage collection.
- *
- * MSG_PEEK is special in that it does not change the inflight count,
- * yet does install the socket into an fd. The following lock/unlock
- * pair is to ensure serialization with garbage collection. It must be
- * done between incrementing the file count and installing the file into
- * an fd.
- *
- * If garbage collection starts after the barrier provided by the
- * lock/unlock, then it will see the elevated refcount and not mark this
- * as a candidate. If a garbage collection is already in progress
- * before the file count was incremented, then the lock/unlock pair will
- * ensure that garbage collection is finished before progressing to
- * installing the fd.
- *
- * (*) A -> B where B is on the queue of A or B is on the queue of C
- * which is on the queue of listening socket A.
- */
- spin_lock(&unix_gc_lock);
- spin_unlock(&unix_gc_lock);
+static void unix_destruct_scm(struct sk_buff *skb)
+{
+ struct scm_cookie scm;
+
+ memset(&scm, 0, sizeof(scm));
+ scm.pid = UNIXCB(skb).pid;
+ if (UNIXCB(skb).fp)
+ unix_detach_fds(&scm, skb);
+
+ /* Alas, it calls VFS */
+ /* So fscking what? fput() had been SMP-safe since the last Summer */
+ scm_destroy(&scm);
+ sock_wfree(skb);
}
static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
@@ -1878,8 +1935,10 @@ static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
struct scm_fp_list *fp = UNIXCB(skb).fp;
struct unix_sock *u = unix_sk(sk);
- if (unlikely(fp && fp->count))
+ if (unlikely(fp && fp->count)) {
atomic_add(fp->count, &u->scm_stat.nr_fds);
+ unix_add_edges(fp, u);
+ }
}
static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
@@ -1887,8 +1946,10 @@ static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
struct scm_fp_list *fp = UNIXCB(skb).fp;
struct unix_sock *u = unix_sk(sk);
- if (unlikely(fp && fp->count))
+ if (unlikely(fp && fp->count)) {
atomic_sub(fp->count, &u->scm_stat.nr_fds);
+ unix_del_edges(fp);
+ }
}
/*
@@ -1898,7 +1959,6 @@ static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
size_t len)
{
- DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
struct sock *sk = sock->sk, *other = NULL;
struct unix_sock *u = unix_sk(sk);
struct scm_cookie scm;
@@ -1908,17 +1968,19 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
long timeo;
int err;
- wait_for_unix_gc();
err = scm_send(sock, msg, &scm, false);
if (err < 0)
return err;
- err = -EOPNOTSUPP;
- if (msg->msg_flags&MSG_OOB)
+ wait_for_unix_gc(scm.fp);
+
+ if (msg->msg_flags & MSG_OOB) {
+ err = -EOPNOTSUPP;
goto out;
+ }
if (msg->msg_namelen) {
- err = unix_validate_addr(sunaddr, msg->msg_namelen);
+ err = unix_validate_addr(msg->msg_name, msg->msg_namelen);
if (err)
goto out;
@@ -1928,24 +1990,20 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
NULL);
if (err)
goto out;
- } else {
- sunaddr = NULL;
- err = -ENOTCONN;
- other = unix_peer_get(sk);
- if (!other)
- goto out;
}
if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
- test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) {
+ test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
+ !READ_ONCE(u->addr)) {
err = unix_autobind(sk);
if (err)
goto out;
}
- err = -EMSGSIZE;
- if (len > sk->sk_sndbuf - 32)
+ if (len > READ_ONCE(sk->sk_sndbuf) - 32) {
+ err = -EMSGSIZE;
goto out;
+ }
if (len > SKB_MAX_ALLOC) {
data_len = min_t(size_t,
@@ -1959,7 +2017,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
msg->msg_flags & MSG_DONTWAIT, &err,
PAGE_ALLOC_COSTLY_ORDER);
- if (skb == NULL)
+ if (!skb)
goto out;
err = unix_scm_to_skb(&scm, skb, true);
@@ -1975,17 +2033,18 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
-restart:
- if (!other) {
- err = -ECONNRESET;
- if (sunaddr == NULL)
- goto out_free;
-
- other = unix_find_other(sock_net(sk), sunaddr, msg->msg_namelen,
- sk->sk_type);
+ if (msg->msg_namelen) {
+lookup:
+ other = unix_find_other(sock_net(sk), msg->msg_name,
+ msg->msg_namelen, sk->sk_type);
if (IS_ERR(other)) {
err = PTR_ERR(other);
- other = NULL;
+ goto out_free;
+ }
+ } else {
+ other = unix_peer_get(sk);
+ if (!other) {
+ err = -ENOTCONN;
goto out_free;
}
}
@@ -1993,58 +2052,64 @@ restart:
if (sk_filter(other, skb) < 0) {
/* Toss the packet but do not return any error to the sender */
err = len;
- goto out_free;
+ goto out_sock_put;
}
+restart:
sk_locked = 0;
unix_state_lock(other);
restart_locked:
- err = -EPERM;
- if (!unix_may_send(sk, other))
+
+ if (!unix_may_send(sk, other)) {
+ err = -EPERM;
goto out_unlock;
+ }
if (unlikely(sock_flag(other, SOCK_DEAD))) {
- /*
- * Check with 1003.1g - what should
- * datagram error
- */
- unix_state_unlock(other);
- sock_put(other);
+ /* Check with 1003.1g - what should datagram error */
- if (!sk_locked)
- unix_state_lock(sk);
+ unix_state_unlock(other);
- err = 0;
if (sk->sk_type == SOCK_SEQPACKET) {
/* We are here only when racing with unix_release_sock()
* is clearing @other. Never change state to TCP_CLOSE
* unlike SOCK_DGRAM wants.
*/
- unix_state_unlock(sk);
err = -EPIPE;
- } else if (unix_peer(sk) == other) {
+ goto out_sock_put;
+ }
+
+ if (!sk_locked)
+ unix_state_lock(sk);
+
+ if (unix_peer(sk) == other) {
unix_peer(sk) = NULL;
unix_dgram_peer_wake_disconnect_wakeup(sk, other);
- sk->sk_state = TCP_CLOSE;
+ WRITE_ONCE(sk->sk_state, TCP_CLOSE);
unix_state_unlock(sk);
unix_dgram_disconnected(sk, other);
sock_put(other);
err = -ECONNREFUSED;
- } else {
- unix_state_unlock(sk);
+ goto out_sock_put;
}
- other = NULL;
- if (err)
- goto out_free;
- goto restart;
+ unix_state_unlock(sk);
+
+ if (!msg->msg_namelen) {
+ err = -ECONNRESET;
+ goto out_sock_put;
+ }
+
+ sock_put(other);
+ goto lookup;
}
- err = -EPIPE;
- if (other->sk_shutdown & RCV_SHUTDOWN)
+ if (other->sk_shutdown & RCV_SHUTDOWN) {
+ err = -EPIPE;
goto out_unlock;
+ }
if (sk->sk_type != SOCK_SEQPACKET) {
err = security_unix_may_send(sk->sk_socket, other->sk_socket);
@@ -2064,7 +2129,7 @@ restart_locked:
err = sock_intr_errno(timeo);
if (signal_pending(current))
- goto out_free;
+ goto out_sock_put;
goto restart;
}
@@ -2105,11 +2170,11 @@ out_unlock:
if (sk_locked)
unix_state_unlock(sk);
unix_state_unlock(other);
+out_sock_put:
+ sock_put(other);
out_free:
- kfree_skb(skb);
+ consume_skb(skb);
out:
- if (other)
- sock_put(other);
scm_destroy(&scm);
return err;
}
@@ -2125,7 +2190,7 @@ static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other
{
struct unix_sock *ousk = unix_sk(other);
struct sk_buff *skb;
- int err = 0;
+ int err;
skb = sock_alloc_send_skb(sock->sk, 1, msg->msg_flags & MSG_DONTWAIT, &err);
@@ -2133,41 +2198,39 @@ static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other
return err;
err = unix_scm_to_skb(scm, skb, !fds_sent);
- if (err < 0) {
- kfree_skb(skb);
- return err;
- }
+ if (err < 0)
+ goto out;
+
skb_put(skb, 1);
err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1);
- if (err) {
- kfree_skb(skb);
- return err;
- }
+ if (err)
+ goto out;
unix_state_lock(other);
if (sock_flag(other, SOCK_DEAD) ||
(other->sk_shutdown & RCV_SHUTDOWN)) {
unix_state_unlock(other);
- kfree_skb(skb);
- return -EPIPE;
+ err = -EPIPE;
+ goto out;
}
maybe_add_creds(skb, sock, other);
- skb_get(skb);
-
- if (ousk->oob_skb)
- consume_skb(ousk->oob_skb);
+ scm_stat_add(other, skb);
+ spin_lock(&other->sk_receive_queue.lock);
WRITE_ONCE(ousk->oob_skb, skb);
+ __skb_queue_tail(&other->sk_receive_queue, skb);
+ spin_unlock(&other->sk_receive_queue.lock);
- scm_stat_add(other, skb);
- skb_queue_tail(&other->sk_receive_queue, skb);
sk_send_sigurg(other);
unix_state_unlock(other);
other->sk_data_ready(other);
+ return 0;
+out:
+ consume_skb(skb);
return err;
}
#endif
@@ -2176,21 +2239,20 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
size_t len)
{
struct sock *sk = sock->sk;
+ struct sk_buff *skb = NULL;
struct sock *other = NULL;
- int err, size;
- struct sk_buff *skb;
- int sent = 0;
struct scm_cookie scm;
bool fds_sent = false;
- int data_len;
+ int err, sent = 0;
- wait_for_unix_gc();
err = scm_send(sock, msg, &scm, false);
if (err < 0)
return err;
- err = -EOPNOTSUPP;
+ wait_for_unix_gc(scm.fp);
+
if (msg->msg_flags & MSG_OOB) {
+ err = -EOPNOTSUPP;
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
if (len)
len--;
@@ -2200,20 +2262,22 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
}
if (msg->msg_namelen) {
- err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
+ err = READ_ONCE(sk->sk_state) == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
goto out_err;
} else {
- err = -ENOTCONN;
other = unix_peer(sk);
- if (!other)
+ if (!other) {
+ err = -ENOTCONN;
goto out_err;
+ }
}
- if (sk->sk_shutdown & SEND_SHUTDOWN)
- goto pipe_err;
+ if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
+ goto out_pipe;
while (sent < len) {
- size = len - sent;
+ int size = len - sent;
+ int data_len;
if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
skb = sock_alloc_send_pskb(sk, 0, 0,
@@ -2221,7 +2285,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
&err, 0);
} else {
/* Keep two messages in the pipe so it schedules better */
- size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
+ size = min_t(int, size, (READ_ONCE(sk->sk_sndbuf) >> 1) - 64);
/* allow fallback to order-0 allocations */
size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
@@ -2239,19 +2303,18 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
/* Only send the fds in the first buffer */
err = unix_scm_to_skb(&scm, skb, !fds_sent);
- if (err < 0) {
- kfree_skb(skb);
- goto out_err;
- }
+ if (err < 0)
+ goto out_free;
+
fds_sent = true;
if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
err = skb_splice_from_iter(skb, &msg->msg_iter, size,
sk->sk_allocation);
- if (err < 0) {
- kfree_skb(skb);
- goto out_err;
- }
+ if (err < 0)
+ goto out_free;
+
size = err;
refcount_add(size, &sk->sk_wmem_alloc);
} else {
@@ -2259,17 +2322,15 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
skb->data_len = data_len;
skb->len = size;
err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
- if (err) {
- kfree_skb(skb);
- goto out_err;
- }
+ if (err)
+ goto out_free;
}
unix_state_lock(other);
if (sock_flag(other, SOCK_DEAD) ||
(other->sk_shutdown & RCV_SHUTDOWN))
- goto pipe_err_free;
+ goto out_pipe_unlock;
maybe_add_creds(skb, sock, other);
scm_stat_add(other, skb);
@@ -2292,13 +2353,14 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
return sent;
-pipe_err_free:
+out_pipe_unlock:
unix_state_unlock(other);
- kfree_skb(skb);
-pipe_err:
- if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
+out_pipe:
+ if (!sent && !(msg->msg_flags & MSG_NOSIGNAL))
send_sig(SIGPIPE, current, 0);
err = -EPIPE;
+out_free:
+ consume_skb(skb);
out_err:
scm_destroy(&scm);
return sent ? : err;
@@ -2314,7 +2376,7 @@ static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
if (err)
return err;
- if (sk->sk_state != TCP_ESTABLISHED)
+ if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
return -ENOTCONN;
if (msg->msg_namelen)
@@ -2328,7 +2390,7 @@ static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
{
struct sock *sk = sock->sk;
- if (sk->sk_state != TCP_ESTABLISHED)
+ if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
return -ENOTCONN;
return unix_dgram_recvmsg(sock, msg, size, flags);
@@ -2553,8 +2615,10 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
mutex_lock(&u->iolock);
unix_state_lock(sk);
+ spin_lock(&sk->sk_receive_queue.lock);
if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) {
+ spin_unlock(&sk->sk_receive_queue.lock);
unix_state_unlock(sk);
mutex_unlock(&u->iolock);
return -EINVAL;
@@ -2564,8 +2628,8 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
if (!(state->flags & MSG_PEEK))
WRITE_ONCE(u->oob_skb, NULL);
- else
- skb_get(oob_skb);
+
+ spin_unlock(&sk->sk_receive_queue.lock);
unix_state_unlock(sk);
chunk = state->recv_actor(oob_skb, 0, chunk, state);
@@ -2573,8 +2637,6 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
if (!(state->flags & MSG_PEEK))
UNIXCB(oob_skb).consumed += 1;
- consume_skb(oob_skb);
-
mutex_unlock(&u->iolock);
if (chunk < 0)
@@ -2587,38 +2649,100 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
int flags, int copied)
{
+ struct sk_buff *read_skb = NULL, *unread_skb = NULL;
struct unix_sock *u = unix_sk(sk);
- if (!unix_skb_len(skb) && !(flags & MSG_PEEK)) {
- skb_unlink(skb, &sk->sk_receive_queue);
- consume_skb(skb);
+ if (likely(unix_skb_len(skb) && skb != READ_ONCE(u->oob_skb)))
+ return skb;
+
+ spin_lock(&sk->sk_receive_queue.lock);
+
+ if (!unix_skb_len(skb)) {
+ if (copied && (!u->oob_skb || skb == u->oob_skb)) {
+ skb = NULL;
+ } else if (flags & MSG_PEEK) {
+ skb = skb_peek_next(skb, &sk->sk_receive_queue);
+ } else {
+ read_skb = skb;
+ skb = skb_peek_next(skb, &sk->sk_receive_queue);
+ __skb_unlink(read_skb, &sk->sk_receive_queue);
+ }
+
+ if (!skb)
+ goto unlock;
+ }
+
+ if (skb != u->oob_skb)
+ goto unlock;
+
+ if (copied) {
skb = NULL;
- } else {
- if (skb == u->oob_skb) {
- if (copied) {
- skb = NULL;
- } else if (sock_flag(sk, SOCK_URGINLINE)) {
- if (!(flags & MSG_PEEK)) {
- WRITE_ONCE(u->oob_skb, NULL);
- consume_skb(skb);
- }
- } else if (!(flags & MSG_PEEK)) {
- skb_unlink(skb, &sk->sk_receive_queue);
- consume_skb(skb);
- skb = skb_peek(&sk->sk_receive_queue);
- }
+ } else if (!(flags & MSG_PEEK)) {
+ WRITE_ONCE(u->oob_skb, NULL);
+
+ if (!sock_flag(sk, SOCK_URGINLINE)) {
+ __skb_unlink(skb, &sk->sk_receive_queue);
+ unread_skb = skb;
+ skb = skb_peek(&sk->sk_receive_queue);
}
+ } else if (!sock_flag(sk, SOCK_URGINLINE)) {
+ skb = skb_peek_next(skb, &sk->sk_receive_queue);
}
+
+unlock:
+ spin_unlock(&sk->sk_receive_queue.lock);
+
+ consume_skb(read_skb);
+ kfree_skb_reason(unread_skb, SKB_DROP_REASON_UNIX_SKIP_OOB);
+
return skb;
}
#endif
static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
{
- if (unlikely(sk->sk_state != TCP_ESTABLISHED))
+ struct unix_sock *u = unix_sk(sk);
+ struct sk_buff *skb;
+ int err;
+
+ if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED))
return -ENOTCONN;
- return unix_read_skb(sk, recv_actor);
+ mutex_lock(&u->iolock);
+ skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
+ mutex_unlock(&u->iolock);
+ if (!skb)
+ return err;
+
+#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
+ if (unlikely(skb == READ_ONCE(u->oob_skb))) {
+ bool drop = false;
+
+ unix_state_lock(sk);
+
+ if (sock_flag(sk, SOCK_DEAD)) {
+ unix_state_unlock(sk);
+ kfree_skb_reason(skb, SKB_DROP_REASON_SOCKET_CLOSE);
+ return -ECONNRESET;
+ }
+
+ spin_lock(&sk->sk_receive_queue.lock);
+ if (likely(skb == u->oob_skb)) {
+ WRITE_ONCE(u->oob_skb, NULL);
+ drop = true;
+ }
+ spin_unlock(&sk->sk_receive_queue.lock);
+
+ unix_state_unlock(sk);
+
+ if (drop) {
+ kfree_skb_reason(skb, SKB_DROP_REASON_UNIX_SKIP_OOB);
+ return -EAGAIN;
+ }
+ }
+#endif
+
+ return recv_actor(sk, skb);
}
static int unix_stream_read_generic(struct unix_stream_read_state *state,
@@ -2639,7 +2763,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state,
size_t size = state->size;
unsigned int last_len;
- if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
+ if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)) {
err = -EINVAL;
goto out;
}
@@ -2665,9 +2789,8 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state,
skip = max(sk_peek_offset(sk, flags), 0);
do {
- int chunk;
- bool drop_skb;
struct sk_buff *skb, *last;
+ int chunk;
redo:
unix_state_lock(sk);
@@ -2678,18 +2801,16 @@ redo:
last = skb = skb_peek(&sk->sk_receive_queue);
last_len = last ? last->len : 0;
+again:
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
if (skb) {
skb = manage_oob(skb, sk, flags, copied);
- if (!skb) {
+ if (!skb && copied) {
unix_state_unlock(sk);
- if (copied)
- break;
- goto redo;
+ break;
}
}
#endif
-again:
if (skb == NULL) {
if (copied >= target)
goto unlock;
@@ -2765,11 +2886,7 @@ unlock:
}
chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
- skb_get(skb);
chunk = state->recv_actor(skb, skip, chunk, state);
- drop_skb = !unix_skb_len(skb);
- /* skb is only safe to use if !drop_skb */
- consume_skb(skb);
if (chunk < 0) {
if (copied == 0)
copied = -EFAULT;
@@ -2778,18 +2895,6 @@ unlock:
copied += chunk;
size -= chunk;
- if (drop_skb) {
- /* the skb was touched by a concurrent reader;
- * we should not expect anything from this skb
- * anymore and assume it invalid - we can be
- * sure it was dropped from the socket queue
- *
- * let's report a short read
- */
- err = 0;
- break;
- }
-
/* Mark read part of skb as used */
if (!(flags & MSG_PEEK)) {
UNIXCB(skb).consumed += chunk;
@@ -2972,7 +3077,7 @@ long unix_inq_len(struct sock *sk)
struct sk_buff *skb;
long amount = 0;
- if (sk->sk_state == TCP_LISTEN)
+ if (READ_ONCE(sk->sk_state) == TCP_LISTEN)
return -EINVAL;
spin_lock(&sk->sk_receive_queue.lock);
@@ -3057,12 +3162,27 @@ static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
case SIOCATMARK:
{
+ struct unix_sock *u = unix_sk(sk);
struct sk_buff *skb;
int answ = 0;
+ mutex_lock(&u->iolock);
+
skb = skb_peek(&sk->sk_receive_queue);
- if (skb && skb == READ_ONCE(unix_sk(sk)->oob_skb))
- answ = 1;
+ if (skb) {
+ struct sk_buff *oob_skb = READ_ONCE(u->oob_skb);
+ struct sk_buff *next_skb;
+
+ next_skb = skb_peek_next(skb, &sk->sk_receive_queue);
+
+ if (skb == oob_skb ||
+ (!unix_skb_len(skb) &&
+ (!oob_skb || next_skb == oob_skb)))
+ answ = 1;
+ }
+
+ mutex_unlock(&u->iolock);
+
err = put_user(answ, (int __user *)arg);
}
break;
@@ -3084,12 +3204,14 @@ static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned lon
static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
+ unsigned char state;
__poll_t mask;
u8 shutdown;
sock_poll_wait(file, sock, wait);
mask = 0;
shutdown = READ_ONCE(sk->sk_shutdown);
+ state = READ_ONCE(sk->sk_state);
/* exceptional events? */
if (READ_ONCE(sk->sk_err))
@@ -3111,14 +3233,14 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
/* Connection-based need to check for termination and startup */
if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
- sk->sk_state == TCP_CLOSE)
+ state == TCP_CLOSE)
mask |= EPOLLHUP;
/*
* we set writable also when the other side has shut down the
* connection. This prevents stuck sockets.
*/
- if (unix_writable(sk))
+ if (unix_writable(sk, state))
mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
return mask;
@@ -3129,12 +3251,14 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
{
struct sock *sk = sock->sk, *other;
unsigned int writable;
+ unsigned char state;
__poll_t mask;
u8 shutdown;
sock_poll_wait(file, sock, wait);
mask = 0;
shutdown = READ_ONCE(sk->sk_shutdown);
+ state = READ_ONCE(sk->sk_state);
/* exceptional events? */
if (READ_ONCE(sk->sk_err) ||
@@ -3154,19 +3278,14 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
mask |= EPOLLIN | EPOLLRDNORM;
/* Connection-based need to check for termination and startup */
- if (sk->sk_type == SOCK_SEQPACKET) {
- if (sk->sk_state == TCP_CLOSE)
- mask |= EPOLLHUP;
- /* connection hasn't started yet? */
- if (sk->sk_state == TCP_SYN_SENT)
- return mask;
- }
+ if (sk->sk_type == SOCK_SEQPACKET && state == TCP_CLOSE)
+ mask |= EPOLLHUP;
/* No write status requested, avoid expensive OUT tests. */
if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
return mask;
- writable = unix_writable(sk);
+ writable = unix_writable(sk, state);
if (writable) {
unix_state_lock(sk);
@@ -3560,6 +3679,7 @@ static int __net_init unix_net_init(struct net *net)
for (i = 0; i < UNIX_HASH_SIZE; i++) {
spin_lock_init(&net->unx.table.locks[i]);
+ lock_set_cmp_fn(&net->unx.table.locks[i], unix_table_lock_cmp_fn, NULL);
INIT_HLIST_HEAD(&net->unx.table.buckets[i]);
}
diff --git a/net/unix/diag.c b/net/unix/diag.c
index be19827eca36..9138af8b465e 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -47,9 +47,7 @@ static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
peer = unix_peer_get(sk);
if (peer) {
- unix_state_lock(peer);
ino = sock_i_ino(peer);
- unix_state_unlock(peer);
sock_put(peer);
return nla_put_u32(nlskb, UNIX_DIAG_PEER, ino);
@@ -65,7 +63,7 @@ static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
u32 *buf;
int i;
- if (sk->sk_state == TCP_LISTEN) {
+ if (READ_ONCE(sk->sk_state) == TCP_LISTEN) {
spin_lock(&sk->sk_receive_queue.lock);
attr = nla_reserve(nlskb, UNIX_DIAG_ICONS,
@@ -75,20 +73,9 @@ static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
buf = nla_data(attr);
i = 0;
- skb_queue_walk(&sk->sk_receive_queue, skb) {
- struct sock *req, *peer;
-
- req = skb->sk;
- /*
- * The state lock is outer for the same sk's
- * queue lock. With the other's queue locked it's
- * OK to lock the state.
- */
- unix_state_lock_nested(req, U_LOCK_DIAG);
- peer = unix_sk(req)->peer;
- buf[i++] = (peer ? sock_i_ino(peer) : 0);
- unix_state_unlock(req);
- }
+ skb_queue_walk(&sk->sk_receive_queue, skb)
+ buf[i++] = sock_i_ino(unix_peer(skb->sk));
+
spin_unlock(&sk->sk_receive_queue.lock);
}
@@ -103,8 +90,8 @@ static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
{
struct unix_diag_rqlen rql;
- if (sk->sk_state == TCP_LISTEN) {
- rql.udiag_rqueue = sk->sk_receive_queue.qlen;
+ if (READ_ONCE(sk->sk_state) == TCP_LISTEN) {
+ rql.udiag_rqueue = skb_queue_len_lockless(&sk->sk_receive_queue);
rql.udiag_wqueue = sk->sk_max_ack_backlog;
} else {
rql.udiag_rqueue = (u32) unix_inq_len(sk);
@@ -136,7 +123,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
rep = nlmsg_data(nlh);
rep->udiag_family = AF_UNIX;
rep->udiag_type = sk->sk_type;
- rep->udiag_state = sk->sk_state;
+ rep->udiag_state = READ_ONCE(sk->sk_state);
rep->pad = 0;
rep->udiag_ino = sk_ino;
sock_diag_save_cookie(sk, rep->udiag_cookie);
@@ -165,7 +152,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
goto out_nlmsg_trim;
- if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown))
+ if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, READ_ONCE(sk->sk_shutdown)))
goto out_nlmsg_trim;
if ((req->udiag_show & UDIAG_SHOW_UID) &&
@@ -180,22 +167,6 @@ out_nlmsg_trim:
return -EMSGSIZE;
}
-static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
- struct user_namespace *user_ns,
- u32 portid, u32 seq, u32 flags)
-{
- int sk_ino;
-
- unix_state_lock(sk);
- sk_ino = sock_i_ino(sk);
- unix_state_unlock(sk);
-
- if (!sk_ino)
- return 0;
-
- return sk_diag_fill(sk, skb, req, user_ns, portid, seq, flags, sk_ino);
-}
-
static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
@@ -213,14 +184,22 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
num = 0;
spin_lock(&net->unx.table.locks[slot]);
sk_for_each(sk, &net->unx.table.buckets[slot]) {
+ int sk_ino;
+
if (num < s_num)
goto next;
- if (!(req->udiag_states & (1 << sk->sk_state)))
+
+ if (!(req->udiag_states & (1 << READ_ONCE(sk->sk_state))))
goto next;
- if (sk_diag_dump(sk, skb, req, sk_user_ns(skb->sk),
+
+ sk_ino = sock_i_ino(sk);
+ if (!sk_ino)
+ goto next;
+
+ if (sk_diag_fill(sk, skb, req, sk_user_ns(skb->sk),
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
- NLM_F_MULTI) < 0) {
+ NLM_F_MULTI, sk_ino) < 0) {
spin_unlock(&net->unx.table.locks[slot]);
goto done;
}
@@ -322,6 +301,7 @@ static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
}
static const struct sock_diag_handler unix_diag_handler = {
+ .owner = THIS_MODULE,
.family = AF_UNIX,
.dump = unix_diag_handler_dump,
};
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 2a81880dac7b..9848b7b78701 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -81,263 +81,533 @@
#include <net/scm.h>
#include <net/tcp_states.h>
-#include "scm.h"
+struct unix_sock *unix_get_socket(struct file *filp)
+{
+ struct inode *inode = file_inode(filp);
+
+ /* Socket ? */
+ if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
+ struct socket *sock = SOCKET_I(inode);
+ const struct proto_ops *ops;
+ struct sock *sk = sock->sk;
+
+ ops = READ_ONCE(sock->ops);
-/* Internal data structures and random procedures: */
+ /* PF_UNIX ? */
+ if (sk && ops && ops->family == PF_UNIX)
+ return unix_sk(sk);
+ }
-static LIST_HEAD(gc_candidates);
-static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
+ return NULL;
+}
-static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
- struct sk_buff_head *hitlist)
+static struct unix_vertex *unix_edge_successor(struct unix_edge *edge)
{
- struct sk_buff *skb;
- struct sk_buff *next;
-
- spin_lock(&x->sk_receive_queue.lock);
- skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
- /* Do we have file descriptors ? */
- if (UNIXCB(skb).fp) {
- bool hit = false;
- /* Process the descriptors of this socket */
- int nfd = UNIXCB(skb).fp->count;
- struct file **fp = UNIXCB(skb).fp->fp;
-
- while (nfd--) {
- /* Get the socket the fd matches if it indeed does so */
- struct sock *sk = unix_get_socket(*fp++);
-
- if (sk) {
- struct unix_sock *u = unix_sk(sk);
-
- /* Ignore non-candidates, they could
- * have been added to the queues after
- * starting the garbage collection
- */
- if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
- hit = true;
-
- func(u);
- }
- }
- }
- if (hit && hitlist != NULL) {
- __skb_unlink(skb, &x->sk_receive_queue);
- __skb_queue_tail(hitlist, skb);
- }
- }
+ /* If an embryo socket has a fd,
+ * the listener indirectly holds the fd's refcnt.
+ */
+ if (edge->successor->listener)
+ return unix_sk(edge->successor->listener)->vertex;
+
+ return edge->successor->vertex;
+}
+
+static bool unix_graph_maybe_cyclic;
+static bool unix_graph_grouped;
+
+static void unix_update_graph(struct unix_vertex *vertex)
+{
+ /* If the receiver socket is not inflight, no cyclic
+ * reference could be formed.
+ */
+ if (!vertex)
+ return;
+
+ unix_graph_maybe_cyclic = true;
+ unix_graph_grouped = false;
+}
+
+static LIST_HEAD(unix_unvisited_vertices);
+
+enum unix_vertex_index {
+ UNIX_VERTEX_INDEX_MARK1,
+ UNIX_VERTEX_INDEX_MARK2,
+ UNIX_VERTEX_INDEX_START,
+};
+
+static unsigned long unix_vertex_unvisited_index = UNIX_VERTEX_INDEX_MARK1;
+
+static void unix_add_edge(struct scm_fp_list *fpl, struct unix_edge *edge)
+{
+ struct unix_vertex *vertex = edge->predecessor->vertex;
+
+ if (!vertex) {
+ vertex = list_first_entry(&fpl->vertices, typeof(*vertex), entry);
+ vertex->index = unix_vertex_unvisited_index;
+ vertex->out_degree = 0;
+ INIT_LIST_HEAD(&vertex->edges);
+ INIT_LIST_HEAD(&vertex->scc_entry);
+
+ list_move_tail(&vertex->entry, &unix_unvisited_vertices);
+ edge->predecessor->vertex = vertex;
}
- spin_unlock(&x->sk_receive_queue.lock);
+
+ vertex->out_degree++;
+ list_add_tail(&edge->vertex_entry, &vertex->edges);
+
+ unix_update_graph(unix_edge_successor(edge));
}
-static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
- struct sk_buff_head *hitlist)
+static void unix_del_edge(struct scm_fp_list *fpl, struct unix_edge *edge)
{
- if (x->sk_state != TCP_LISTEN) {
- scan_inflight(x, func, hitlist);
- } else {
- struct sk_buff *skb;
- struct sk_buff *next;
- struct unix_sock *u;
- LIST_HEAD(embryos);
+ struct unix_vertex *vertex = edge->predecessor->vertex;
- /* For a listening socket collect the queued embryos
- * and perform a scan on them as well.
- */
- spin_lock(&x->sk_receive_queue.lock);
- skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
- u = unix_sk(skb->sk);
+ if (!fpl->dead)
+ unix_update_graph(unix_edge_successor(edge));
- /* An embryo cannot be in-flight, so it's safe
- * to use the list link.
- */
- BUG_ON(!list_empty(&u->link));
- list_add_tail(&u->link, &embryos);
- }
- spin_unlock(&x->sk_receive_queue.lock);
+ list_del(&edge->vertex_entry);
+ vertex->out_degree--;
- while (!list_empty(&embryos)) {
- u = list_entry(embryos.next, struct unix_sock, link);
- scan_inflight(&u->sk, func, hitlist);
- list_del_init(&u->link);
- }
+ if (!vertex->out_degree) {
+ edge->predecessor->vertex = NULL;
+ list_move_tail(&vertex->entry, &fpl->vertices);
+ }
+}
+
+static void unix_free_vertices(struct scm_fp_list *fpl)
+{
+ struct unix_vertex *vertex, *next_vertex;
+
+ list_for_each_entry_safe(vertex, next_vertex, &fpl->vertices, entry) {
+ list_del(&vertex->entry);
+ kfree(vertex);
}
}
-static void dec_inflight(struct unix_sock *usk)
+static DEFINE_SPINLOCK(unix_gc_lock);
+unsigned int unix_tot_inflight;
+
+void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver)
{
- atomic_long_dec(&usk->inflight);
+ int i = 0, j = 0;
+
+ spin_lock(&unix_gc_lock);
+
+ if (!fpl->count_unix)
+ goto out;
+
+ do {
+ struct unix_sock *inflight = unix_get_socket(fpl->fp[j++]);
+ struct unix_edge *edge;
+
+ if (!inflight)
+ continue;
+
+ edge = fpl->edges + i++;
+ edge->predecessor = inflight;
+ edge->successor = receiver;
+
+ unix_add_edge(fpl, edge);
+ } while (i < fpl->count_unix);
+
+ receiver->scm_stat.nr_unix_fds += fpl->count_unix;
+ WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + fpl->count_unix);
+out:
+ WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight + fpl->count);
+
+ spin_unlock(&unix_gc_lock);
+
+ fpl->inflight = true;
+
+ unix_free_vertices(fpl);
}
-static void inc_inflight(struct unix_sock *usk)
+void unix_del_edges(struct scm_fp_list *fpl)
{
- atomic_long_inc(&usk->inflight);
+ struct unix_sock *receiver;
+ int i = 0;
+
+ spin_lock(&unix_gc_lock);
+
+ if (!fpl->count_unix)
+ goto out;
+
+ do {
+ struct unix_edge *edge = fpl->edges + i++;
+
+ unix_del_edge(fpl, edge);
+ } while (i < fpl->count_unix);
+
+ if (!fpl->dead) {
+ receiver = fpl->edges[0].successor;
+ receiver->scm_stat.nr_unix_fds -= fpl->count_unix;
+ }
+ WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - fpl->count_unix);
+out:
+ WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight - fpl->count);
+
+ spin_unlock(&unix_gc_lock);
+
+ fpl->inflight = false;
}
-static void inc_inflight_move_tail(struct unix_sock *u)
+void unix_update_edges(struct unix_sock *receiver)
{
- atomic_long_inc(&u->inflight);
- /* If this still might be part of a cycle, move it to the end
- * of the list, so that it's checked even if it was already
- * passed over
+ /* nr_unix_fds is only updated under unix_state_lock().
+ * If it's 0 here, the embryo socket is not part of the
+ * inflight graph, and GC will not see it, so no lock needed.
*/
- if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags))
- list_move_tail(&u->link, &gc_candidates);
+ if (!receiver->scm_stat.nr_unix_fds) {
+ receiver->listener = NULL;
+ } else {
+ spin_lock(&unix_gc_lock);
+ unix_update_graph(unix_sk(receiver->listener)->vertex);
+ receiver->listener = NULL;
+ spin_unlock(&unix_gc_lock);
+ }
}
-static bool gc_in_progress;
-#define UNIX_INFLIGHT_TRIGGER_GC 16000
+int unix_prepare_fpl(struct scm_fp_list *fpl)
+{
+ struct unix_vertex *vertex;
+ int i;
+
+ if (!fpl->count_unix)
+ return 0;
+
+ for (i = 0; i < fpl->count_unix; i++) {
+ vertex = kmalloc(sizeof(*vertex), GFP_KERNEL);
+ if (!vertex)
+ goto err;
+
+ list_add(&vertex->entry, &fpl->vertices);
+ }
-void wait_for_unix_gc(void)
+ fpl->edges = kvmalloc_array(fpl->count_unix, sizeof(*fpl->edges),
+ GFP_KERNEL_ACCOUNT);
+ if (!fpl->edges)
+ goto err;
+
+ return 0;
+
+err:
+ unix_free_vertices(fpl);
+ return -ENOMEM;
+}
+
+void unix_destroy_fpl(struct scm_fp_list *fpl)
{
- /* If number of inflight sockets is insane,
- * force a garbage collect right now.
- * Paired with the WRITE_ONCE() in unix_inflight(),
- * unix_notinflight() and gc_in_progress().
- */
- if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
- !READ_ONCE(gc_in_progress))
- unix_gc();
- wait_event(unix_gc_wait, gc_in_progress == false);
+ if (fpl->inflight)
+ unix_del_edges(fpl);
+
+ kvfree(fpl->edges);
+ unix_free_vertices(fpl);
}
-/* The external entry point: unix_gc() */
-void unix_gc(void)
+static bool unix_vertex_dead(struct unix_vertex *vertex)
{
- struct sk_buff *next_skb, *skb;
+ struct unix_edge *edge;
struct unix_sock *u;
- struct unix_sock *next;
- struct sk_buff_head hitlist;
- struct list_head cursor;
- LIST_HEAD(not_cycle_list);
+ long total_ref;
- spin_lock(&unix_gc_lock);
+ list_for_each_entry(edge, &vertex->edges, vertex_entry) {
+ struct unix_vertex *next_vertex = unix_edge_successor(edge);
- /* Avoid a recursive GC. */
- if (gc_in_progress)
- goto out;
+ /* The vertex's fd can be received by a non-inflight socket. */
+ if (!next_vertex)
+ return false;
- /* Paired with READ_ONCE() in wait_for_unix_gc(). */
- WRITE_ONCE(gc_in_progress, true);
+ /* The vertex's fd can be received by an inflight socket in
+ * another SCC.
+ */
+ if (next_vertex->scc_index != vertex->scc_index)
+ return false;
+ }
- /* First, select candidates for garbage collection. Only
- * in-flight sockets are considered, and from those only ones
- * which don't have any external reference.
- *
- * Holding unix_gc_lock will protect these candidates from
- * being detached, and hence from gaining an external
- * reference. Since there are no possible receivers, all
- * buffers currently on the candidates' queues stay there
- * during the garbage collection.
- *
- * We also know that no new candidate can be added onto the
- * receive queues. Other, non candidate sockets _can_ be
- * added to queue, so we must make sure only to touch
- * candidates.
- */
- list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
- long total_refs;
- long inflight_refs;
-
- total_refs = file_count(u->sk.sk_socket->file);
- inflight_refs = atomic_long_read(&u->inflight);
-
- BUG_ON(inflight_refs < 1);
- BUG_ON(total_refs < inflight_refs);
- if (total_refs == inflight_refs) {
- list_move_tail(&u->link, &gc_candidates);
- __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
- __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
+ /* No receiver exists out of the same SCC. */
+
+ edge = list_first_entry(&vertex->edges, typeof(*edge), vertex_entry);
+ u = edge->predecessor;
+ total_ref = file_count(u->sk.sk_socket->file);
+
+ /* If not close()d, total_ref > out_degree. */
+ if (total_ref != vertex->out_degree)
+ return false;
+
+ return true;
+}
+
+static void unix_collect_skb(struct list_head *scc, struct sk_buff_head *hitlist)
+{
+ struct unix_vertex *vertex;
+
+ list_for_each_entry_reverse(vertex, scc, scc_entry) {
+ struct sk_buff_head *queue;
+ struct unix_edge *edge;
+ struct unix_sock *u;
+
+ edge = list_first_entry(&vertex->edges, typeof(*edge), vertex_entry);
+ u = edge->predecessor;
+ queue = &u->sk.sk_receive_queue;
+
+ spin_lock(&queue->lock);
+
+ if (u->sk.sk_state == TCP_LISTEN) {
+ struct sk_buff *skb;
+
+ skb_queue_walk(queue, skb) {
+ struct sk_buff_head *embryo_queue = &skb->sk->sk_receive_queue;
+
+ spin_lock(&embryo_queue->lock);
+ skb_queue_splice_init(embryo_queue, hitlist);
+ spin_unlock(&embryo_queue->lock);
+ }
+ } else {
+ skb_queue_splice_init(queue, hitlist);
}
+
+ spin_unlock(&queue->lock);
}
+}
- /* Now remove all internal in-flight reference to children of
- * the candidates.
- */
- list_for_each_entry(u, &gc_candidates, link)
- scan_children(&u->sk, dec_inflight, NULL);
+static bool unix_scc_cyclic(struct list_head *scc)
+{
+ struct unix_vertex *vertex;
+ struct unix_edge *edge;
- /* Restore the references for children of all candidates,
- * which have remaining references. Do this recursively, so
- * only those remain, which form cyclic references.
- *
- * Use a "cursor" link, to make the list traversal safe, even
- * though elements might be moved about.
+ /* SCC containing multiple vertices ? */
+ if (!list_is_singular(scc))
+ return true;
+
+ vertex = list_first_entry(scc, typeof(*vertex), scc_entry);
+
+ /* Self-reference or a embryo-listener circle ? */
+ list_for_each_entry(edge, &vertex->edges, vertex_entry) {
+ if (unix_edge_successor(edge) == vertex)
+ return true;
+ }
+
+ return false;
+}
+
+static LIST_HEAD(unix_visited_vertices);
+static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2;
+
+static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_index,
+ struct sk_buff_head *hitlist)
+{
+ LIST_HEAD(vertex_stack);
+ struct unix_edge *edge;
+ LIST_HEAD(edge_stack);
+
+next_vertex:
+ /* Push vertex to vertex_stack and mark it as on-stack
+ * (index >= UNIX_VERTEX_INDEX_START).
+ * The vertex will be popped when finalising SCC later.
*/
- list_add(&cursor, &gc_candidates);
- while (cursor.next != &gc_candidates) {
- u = list_entry(cursor.next, struct unix_sock, link);
+ list_add(&vertex->scc_entry, &vertex_stack);
+
+ vertex->index = *last_index;
+ vertex->scc_index = *last_index;
+ (*last_index)++;
+
+ /* Explore neighbour vertices (receivers of the current vertex's fd). */
+ list_for_each_entry(edge, &vertex->edges, vertex_entry) {
+ struct unix_vertex *next_vertex = unix_edge_successor(edge);
+
+ if (!next_vertex)
+ continue;
+
+ if (next_vertex->index == unix_vertex_unvisited_index) {
+ /* Iterative deepening depth first search
+ *
+ * 1. Push a forward edge to edge_stack and set
+ * the successor to vertex for the next iteration.
+ */
+ list_add(&edge->stack_entry, &edge_stack);
- /* Move cursor to after the current position. */
- list_move(&cursor, &u->link);
+ vertex = next_vertex;
+ goto next_vertex;
- if (atomic_long_read(&u->inflight) > 0) {
- list_move_tail(&u->link, &not_cycle_list);
- __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
- scan_children(&u->sk, inc_inflight_move_tail, NULL);
+ /* 2. Pop the edge directed to the current vertex
+ * and restore the ancestor for backtracking.
+ */
+prev_vertex:
+ edge = list_first_entry(&edge_stack, typeof(*edge), stack_entry);
+ list_del_init(&edge->stack_entry);
+
+ next_vertex = vertex;
+ vertex = edge->predecessor->vertex;
+
+ /* If the successor has a smaller scc_index, two vertices
+ * are in the same SCC, so propagate the smaller scc_index
+ * to skip SCC finalisation.
+ */
+ vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index);
+ } else if (next_vertex->index != unix_vertex_grouped_index) {
+ /* Loop detected by a back/cross edge.
+ *
+ * The successor is on vertex_stack, so two vertices are in
+ * the same SCC. If the successor has a smaller *scc_index*,
+ * propagate it to skip SCC finalisation.
+ */
+ vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index);
+ } else {
+ /* The successor was already grouped as another SCC */
}
}
- list_del(&cursor);
- /* Now gc_candidates contains only garbage. Restore original
- * inflight counters for these as well, and remove the skbuffs
- * which are creating the cycle(s).
- */
- skb_queue_head_init(&hitlist);
- list_for_each_entry(u, &gc_candidates, link) {
- scan_children(&u->sk, inc_inflight, &hitlist);
-
-#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
- if (u->oob_skb) {
- kfree_skb(u->oob_skb);
- u->oob_skb = NULL;
+ if (vertex->index == vertex->scc_index) {
+ struct unix_vertex *v;
+ struct list_head scc;
+ bool scc_dead = true;
+
+ /* SCC finalised.
+ *
+ * If the scc_index was not updated, all the vertices above on
+ * vertex_stack are in the same SCC. Group them using scc_entry.
+ */
+ __list_cut_position(&scc, &vertex_stack, &vertex->scc_entry);
+
+ list_for_each_entry_reverse(v, &scc, scc_entry) {
+ /* Don't restart DFS from this vertex in unix_walk_scc(). */
+ list_move_tail(&v->entry, &unix_visited_vertices);
+
+ /* Mark vertex as off-stack. */
+ v->index = unix_vertex_grouped_index;
+
+ if (scc_dead)
+ scc_dead = unix_vertex_dead(v);
}
-#endif
+
+ if (scc_dead)
+ unix_collect_skb(&scc, hitlist);
+ else if (!unix_graph_maybe_cyclic)
+ unix_graph_maybe_cyclic = unix_scc_cyclic(&scc);
+
+ list_del(&scc);
}
- /* not_cycle_list contains those sockets which do not make up a
- * cycle. Restore these to the inflight list.
+ /* Need backtracking ? */
+ if (!list_empty(&edge_stack))
+ goto prev_vertex;
+}
+
+static void unix_walk_scc(struct sk_buff_head *hitlist)
+{
+ unsigned long last_index = UNIX_VERTEX_INDEX_START;
+
+ unix_graph_maybe_cyclic = false;
+
+ /* Visit every vertex exactly once.
+ * __unix_walk_scc() moves visited vertices to unix_visited_vertices.
*/
- while (!list_empty(&not_cycle_list)) {
- u = list_entry(not_cycle_list.next, struct unix_sock, link);
- __clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
- list_move_tail(&u->link, &gc_inflight_list);
+ while (!list_empty(&unix_unvisited_vertices)) {
+ struct unix_vertex *vertex;
+
+ vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry);
+ __unix_walk_scc(vertex, &last_index, hitlist);
}
- spin_unlock(&unix_gc_lock);
+ list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices);
+ swap(unix_vertex_unvisited_index, unix_vertex_grouped_index);
- /* We need io_uring to clean its registered files, ignore all io_uring
- * originated skbs. It's fine as io_uring doesn't keep references to
- * other io_uring instances and so killing all other files in the cycle
- * will put all io_uring references forcing it to go through normal
- * release.path eventually putting registered files.
- */
- skb_queue_walk_safe(&hitlist, skb, next_skb) {
- if (skb->destructor == io_uring_destruct_scm) {
- __skb_unlink(skb, &hitlist);
- skb_queue_tail(&skb->sk->sk_receive_queue, skb);
+ unix_graph_grouped = true;
+}
+
+static void unix_walk_scc_fast(struct sk_buff_head *hitlist)
+{
+ unix_graph_maybe_cyclic = false;
+
+ while (!list_empty(&unix_unvisited_vertices)) {
+ struct unix_vertex *vertex;
+ struct list_head scc;
+ bool scc_dead = true;
+
+ vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry);
+ list_add(&scc, &vertex->scc_entry);
+
+ list_for_each_entry_reverse(vertex, &scc, scc_entry) {
+ list_move_tail(&vertex->entry, &unix_visited_vertices);
+
+ if (scc_dead)
+ scc_dead = unix_vertex_dead(vertex);
}
+
+ if (scc_dead)
+ unix_collect_skb(&scc, hitlist);
+ else if (!unix_graph_maybe_cyclic)
+ unix_graph_maybe_cyclic = unix_scc_cyclic(&scc);
+
+ list_del(&scc);
}
- /* Here we are. Hitlist is filled. Die. */
- __skb_queue_purge(&hitlist);
+ list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices);
+}
+
+static bool gc_in_progress;
+
+static void __unix_gc(struct work_struct *work)
+{
+ struct sk_buff_head hitlist;
+ struct sk_buff *skb;
spin_lock(&unix_gc_lock);
- /* There could be io_uring registered files, just push them back to
- * the inflight list
- */
- list_for_each_entry_safe(u, next, &gc_candidates, link)
- list_move_tail(&u->link, &gc_inflight_list);
+ if (!unix_graph_maybe_cyclic) {
+ spin_unlock(&unix_gc_lock);
+ goto skip_gc;
+ }
- /* All candidates should have been detached by now. */
- BUG_ON(!list_empty(&gc_candidates));
+ __skb_queue_head_init(&hitlist);
+
+ if (unix_graph_grouped)
+ unix_walk_scc_fast(&hitlist);
+ else
+ unix_walk_scc(&hitlist);
+
+ spin_unlock(&unix_gc_lock);
- /* Paired with READ_ONCE() in wait_for_unix_gc(). */
+ skb_queue_walk(&hitlist, skb) {
+ if (UNIXCB(skb).fp)
+ UNIXCB(skb).fp->dead = true;
+ }
+
+ __skb_queue_purge_reason(&hitlist, SKB_DROP_REASON_SOCKET_CLOSE);
+skip_gc:
WRITE_ONCE(gc_in_progress, false);
+}
- wake_up(&unix_gc_wait);
+static DECLARE_WORK(unix_gc_work, __unix_gc);
- out:
- spin_unlock(&unix_gc_lock);
+void unix_gc(void)
+{
+ WRITE_ONCE(gc_in_progress, true);
+ queue_work(system_unbound_wq, &unix_gc_work);
+}
+
+#define UNIX_INFLIGHT_TRIGGER_GC 16000
+#define UNIX_INFLIGHT_SANE_USER (SCM_MAX_FD * 8)
+
+void wait_for_unix_gc(struct scm_fp_list *fpl)
+{
+ /* If number of inflight sockets is insane,
+ * force a garbage collect right now.
+ *
+ * Paired with the WRITE_ONCE() in unix_inflight(),
+ * unix_notinflight(), and __unix_gc().
+ */
+ if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
+ !READ_ONCE(gc_in_progress))
+ unix_gc();
+
+ /* Penalise users who want to send AF_UNIX sockets
+ * but whose sockets have not been received yet.
+ */
+ if (!fpl || !fpl->count_unix ||
+ READ_ONCE(fpl->user->unix_inflight) < UNIX_INFLIGHT_SANE_USER)
+ return;
+
+ if (READ_ONCE(gc_in_progress))
+ flush_work(&unix_gc_work);
}
diff --git a/net/unix/scm.c b/net/unix/scm.c
deleted file mode 100644
index 822ce0d0d791..000000000000
--- a/net/unix/scm.c
+++ /dev/null
@@ -1,159 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/socket.h>
-#include <linux/net.h>
-#include <linux/fs.h>
-#include <net/af_unix.h>
-#include <net/scm.h>
-#include <linux/init.h>
-#include <linux/io_uring.h>
-
-#include "scm.h"
-
-unsigned int unix_tot_inflight;
-EXPORT_SYMBOL(unix_tot_inflight);
-
-LIST_HEAD(gc_inflight_list);
-EXPORT_SYMBOL(gc_inflight_list);
-
-DEFINE_SPINLOCK(unix_gc_lock);
-EXPORT_SYMBOL(unix_gc_lock);
-
-struct sock *unix_get_socket(struct file *filp)
-{
- struct sock *u_sock = NULL;
- struct inode *inode = file_inode(filp);
-
- /* Socket ? */
- if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
- struct socket *sock = SOCKET_I(inode);
- const struct proto_ops *ops = READ_ONCE(sock->ops);
- struct sock *s = sock->sk;
-
- /* PF_UNIX ? */
- if (s && ops && ops->family == PF_UNIX)
- u_sock = s;
- }
-
- return u_sock;
-}
-EXPORT_SYMBOL(unix_get_socket);
-
-/* Keep the number of times in flight count for the file
- * descriptor if it is for an AF_UNIX socket.
- */
-void unix_inflight(struct user_struct *user, struct file *fp)
-{
- struct sock *s = unix_get_socket(fp);
-
- spin_lock(&unix_gc_lock);
-
- if (s) {
- struct unix_sock *u = unix_sk(s);
-
- if (atomic_long_inc_return(&u->inflight) == 1) {
- BUG_ON(!list_empty(&u->link));
- list_add_tail(&u->link, &gc_inflight_list);
- } else {
- BUG_ON(list_empty(&u->link));
- }
- /* Paired with READ_ONCE() in wait_for_unix_gc() */
- WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1);
- }
- WRITE_ONCE(user->unix_inflight, user->unix_inflight + 1);
- spin_unlock(&unix_gc_lock);
-}
-
-void unix_notinflight(struct user_struct *user, struct file *fp)
-{
- struct sock *s = unix_get_socket(fp);
-
- spin_lock(&unix_gc_lock);
-
- if (s) {
- struct unix_sock *u = unix_sk(s);
-
- BUG_ON(!atomic_long_read(&u->inflight));
- BUG_ON(list_empty(&u->link));
-
- if (atomic_long_dec_and_test(&u->inflight))
- list_del_init(&u->link);
- /* Paired with READ_ONCE() in wait_for_unix_gc() */
- WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1);
- }
- WRITE_ONCE(user->unix_inflight, user->unix_inflight - 1);
- spin_unlock(&unix_gc_lock);
-}
-
-/*
- * The "user->unix_inflight" variable is protected by the garbage
- * collection lock, and we just read it locklessly here. If you go
- * over the limit, there might be a tiny race in actually noticing
- * it across threads. Tough.
- */
-static inline bool too_many_unix_fds(struct task_struct *p)
-{
- struct user_struct *user = current_user();
-
- if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE)))
- return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
- return false;
-}
-
-int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
-{
- int i;
-
- if (too_many_unix_fds(current))
- return -ETOOMANYREFS;
-
- /*
- * Need to duplicate file references for the sake of garbage
- * collection. Otherwise a socket in the fps might become a
- * candidate for GC while the skb is not yet queued.
- */
- UNIXCB(skb).fp = scm_fp_dup(scm->fp);
- if (!UNIXCB(skb).fp)
- return -ENOMEM;
-
- for (i = scm->fp->count - 1; i >= 0; i--)
- unix_inflight(scm->fp->user, scm->fp->fp[i]);
- return 0;
-}
-EXPORT_SYMBOL(unix_attach_fds);
-
-void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
-{
- int i;
-
- scm->fp = UNIXCB(skb).fp;
- UNIXCB(skb).fp = NULL;
-
- for (i = scm->fp->count-1; i >= 0; i--)
- unix_notinflight(scm->fp->user, scm->fp->fp[i]);
-}
-EXPORT_SYMBOL(unix_detach_fds);
-
-void unix_destruct_scm(struct sk_buff *skb)
-{
- struct scm_cookie scm;
-
- memset(&scm, 0, sizeof(scm));
- scm.pid = UNIXCB(skb).pid;
- if (UNIXCB(skb).fp)
- unix_detach_fds(&scm, skb);
-
- /* Alas, it calls VFS */
- /* So fscking what? fput() had been SMP-safe since the last Summer */
- scm_destroy(&scm);
- sock_wfree(skb);
-}
-EXPORT_SYMBOL(unix_destruct_scm);
-
-void io_uring_destruct_scm(struct sk_buff *skb)
-{
- unix_destruct_scm(skb);
-}
-EXPORT_SYMBOL(io_uring_destruct_scm);
diff --git a/net/unix/scm.h b/net/unix/scm.h
deleted file mode 100644
index 5a255a477f16..000000000000
--- a/net/unix/scm.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef NET_UNIX_SCM_H
-#define NET_UNIX_SCM_H
-
-extern struct list_head gc_inflight_list;
-extern spinlock_t unix_gc_lock;
-
-int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb);
-void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb);
-
-#endif
diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c
index 3e84b31c355a..357b3e5f3847 100644
--- a/net/unix/sysctl_net_unix.c
+++ b/net/unix/sysctl_net_unix.c
@@ -19,7 +19,6 @@ static struct ctl_table unix_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
- { }
};
int __net_init unix_sysctl_register(struct net *net)
@@ -52,7 +51,7 @@ err_alloc:
void unix_sysctl_unregister(struct net *net)
{
- struct ctl_table *table;
+ const struct ctl_table *table;
table = net->unx.ctl->ctl_table_arg;
unregister_net_sysctl_table(net->unx.ctl);
diff --git a/net/unix/unix_bpf.c b/net/unix/unix_bpf.c
index bd84785bf8d6..bca2d86ba97d 100644
--- a/net/unix/unix_bpf.c
+++ b/net/unix/unix_bpf.c
@@ -54,6 +54,9 @@ static int unix_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
struct sk_psock *psock;
int copied;
+ if (flags & MSG_OOB)
+ return -EOPNOTSUPP;
+
if (!len)
return 0;
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 54ba7316f808..7e3db87ae433 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -112,16 +112,19 @@
#include <net/sock.h>
#include <net/af_vsock.h>
#include <uapi/linux/vm_sockets.h>
+#include <uapi/asm-generic/ioctls.h>
static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
static void vsock_sk_destruct(struct sock *sk);
static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+static void vsock_close(struct sock *sk, long timeout);
/* Protocol family. */
struct proto vsock_proto = {
.name = "AF_VSOCK",
.owner = THIS_MODULE,
.obj_size = sizeof(struct vsock_sock),
+ .close = vsock_close,
#ifdef CONFIG_BPF_SYSCALL
.psock_update_sk_prot = vsock_bpf_update_proto,
#endif
@@ -334,7 +337,10 @@ EXPORT_SYMBOL_GPL(vsock_find_connected_socket);
void vsock_remove_sock(struct vsock_sock *vsk)
{
- vsock_remove_bound(vsk);
+ /* Transport reassignment must not remove the binding. */
+ if (sock_flag(sk_vsock(vsk), SOCK_DEAD))
+ vsock_remove_bound(vsk);
+
vsock_remove_connected(vsk);
}
EXPORT_SYMBOL_GPL(vsock_remove_sock);
@@ -488,6 +494,15 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
*/
vsk->transport->release(vsk);
vsock_deassign_transport(vsk);
+
+ /* transport's release() and destruct() can touch some socket
+ * state, since we are reassigning the socket to a new transport
+ * during vsock_connect(), let's reset these fields to have a
+ * clean state.
+ */
+ sock_reset_flag(sk, SOCK_DONE);
+ sk->sk_state = TCP_CLOSE;
+ vsk->peer_shutdown = 0;
}
/* We increase the module refcnt to prevent the transport unloading
@@ -796,45 +811,53 @@ static bool sock_type_connectible(u16 type)
static void __vsock_release(struct sock *sk, int level)
{
- if (sk) {
- struct sock *pending;
- struct vsock_sock *vsk;
+ struct vsock_sock *vsk;
+ struct sock *pending;
- vsk = vsock_sk(sk);
- pending = NULL; /* Compiler warning. */
+ vsk = vsock_sk(sk);
+ pending = NULL; /* Compiler warning. */
- /* When "level" is SINGLE_DEPTH_NESTING, use the nested
- * version to avoid the warning "possible recursive locking
- * detected". When "level" is 0, lock_sock_nested(sk, level)
- * is the same as lock_sock(sk).
- */
- lock_sock_nested(sk, level);
+ /* When "level" is SINGLE_DEPTH_NESTING, use the nested
+ * version to avoid the warning "possible recursive locking
+ * detected". When "level" is 0, lock_sock_nested(sk, level)
+ * is the same as lock_sock(sk).
+ */
+ lock_sock_nested(sk, level);
- if (vsk->transport)
- vsk->transport->release(vsk);
- else if (sock_type_connectible(sk->sk_type))
- vsock_remove_sock(vsk);
+ /* Indicate to vsock_remove_sock() that the socket is being released and
+ * can be removed from the bound_table. Unlike transport reassignment
+ * case, where the socket must remain bound despite vsock_remove_sock()
+ * being called from the transport release() callback.
+ */
+ sock_set_flag(sk, SOCK_DEAD);
- sock_orphan(sk);
- sk->sk_shutdown = SHUTDOWN_MASK;
+ if (vsk->transport)
+ vsk->transport->release(vsk);
+ else if (sock_type_connectible(sk->sk_type))
+ vsock_remove_sock(vsk);
- skb_queue_purge(&sk->sk_receive_queue);
+ sock_orphan(sk);
+ sk->sk_shutdown = SHUTDOWN_MASK;
- /* Clean up any sockets that never were accepted. */
- while ((pending = vsock_dequeue_accept(sk)) != NULL) {
- __vsock_release(pending, SINGLE_DEPTH_NESTING);
- sock_put(pending);
- }
+ skb_queue_purge(&sk->sk_receive_queue);
- release_sock(sk);
- sock_put(sk);
+ /* Clean up any sockets that never were accepted. */
+ while ((pending = vsock_dequeue_accept(sk)) != NULL) {
+ __vsock_release(pending, SINGLE_DEPTH_NESTING);
+ sock_put(pending);
}
+
+ release_sock(sk);
+ sock_put(sk);
}
static void vsock_sk_destruct(struct sock *sk)
{
struct vsock_sock *vsk = vsock_sk(sk);
+ /* Flush MSG_ZEROCOPY leftovers. */
+ __skb_queue_purge(&sk->sk_error_queue);
+
vsock_deassign_transport(vsk);
/* When clearing these addresses, there's no need to set the family and
@@ -866,6 +889,9 @@ EXPORT_SYMBOL_GPL(vsock_create_connected);
s64 vsock_stream_has_data(struct vsock_sock *vsk)
{
+ if (WARN_ON(!vsk->transport))
+ return 0;
+
return vsk->transport->stream_has_data(vsk);
}
EXPORT_SYMBOL_GPL(vsock_stream_has_data);
@@ -874,6 +900,9 @@ s64 vsock_connectible_has_data(struct vsock_sock *vsk)
{
struct sock *sk = sk_vsock(vsk);
+ if (WARN_ON(!vsk->transport))
+ return 0;
+
if (sk->sk_type == SOCK_SEQPACKET)
return vsk->transport->seqpacket_has_data(vsk);
else
@@ -883,6 +912,9 @@ EXPORT_SYMBOL_GPL(vsock_connectible_has_data);
s64 vsock_stream_has_space(struct vsock_sock *vsk)
{
+ if (WARN_ON(!vsk->transport))
+ return 0;
+
return vsk->transport->stream_has_space(vsk);
}
EXPORT_SYMBOL_GPL(vsock_stream_has_space);
@@ -897,9 +929,22 @@ void vsock_data_ready(struct sock *sk)
}
EXPORT_SYMBOL_GPL(vsock_data_ready);
+/* Dummy callback required by sockmap.
+ * See unconditional call of saved_close() in sock_map_close().
+ */
+static void vsock_close(struct sock *sk, long timeout)
+{
+}
+
static int vsock_release(struct socket *sock)
{
- __vsock_release(sock->sk, 0);
+ struct sock *sk = sock->sk;
+
+ if (!sk)
+ return 0;
+
+ sk->sk_prot->close(sk, 0);
+ __vsock_release(sk, 0);
sock->sk = NULL;
sock->state = SS_FREE;
@@ -1050,6 +1095,9 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock,
mask |= EPOLLRDHUP;
}
+ if (sk_is_readable(sk))
+ mask |= EPOLLIN | EPOLLRDNORM;
+
if (sock->type == SOCK_DGRAM) {
/* For datagram sockets we can read if there is something in
* the queue and write as long as the socket isn't shutdown for
@@ -1141,6 +1189,9 @@ static int vsock_read_skb(struct sock *sk, skb_read_actor_t read_actor)
{
struct vsock_sock *vsk = vsock_sk(sk);
+ if (WARN_ON_ONCE(!vsk->transport))
+ return -ENODEV;
+
return vsk->transport->read_skb(vsk, read_actor);
}
@@ -1270,28 +1321,82 @@ out:
return err;
}
+int __vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t len, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct vsock_sock *vsk = vsock_sk(sk);
+
+ return vsk->transport->dgram_dequeue(vsk, msg, len, flags);
+}
+
int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags)
{
#ifdef CONFIG_BPF_SYSCALL
+ struct sock *sk = sock->sk;
const struct proto *prot;
-#endif
- struct vsock_sock *vsk;
- struct sock *sk;
-
- sk = sock->sk;
- vsk = vsock_sk(sk);
-#ifdef CONFIG_BPF_SYSCALL
prot = READ_ONCE(sk->sk_prot);
if (prot != &vsock_proto)
return prot->recvmsg(sk, msg, len, flags, NULL);
#endif
- return vsk->transport->dgram_dequeue(vsk, msg, len, flags);
+ return __vsock_dgram_recvmsg(sock, msg, len, flags);
}
EXPORT_SYMBOL_GPL(vsock_dgram_recvmsg);
+static int vsock_do_ioctl(struct socket *sock, unsigned int cmd,
+ int __user *arg)
+{
+ struct sock *sk = sock->sk;
+ struct vsock_sock *vsk;
+ int ret;
+
+ vsk = vsock_sk(sk);
+
+ switch (cmd) {
+ case SIOCOUTQ: {
+ ssize_t n_bytes;
+
+ if (!vsk->transport || !vsk->transport->unsent_bytes) {
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ if (sock_type_connectible(sk->sk_type) && sk->sk_state == TCP_LISTEN) {
+ ret = -EINVAL;
+ break;
+ }
+
+ n_bytes = vsk->transport->unsent_bytes(vsk);
+ if (n_bytes < 0) {
+ ret = n_bytes;
+ break;
+ }
+
+ ret = put_user(n_bytes, arg);
+ break;
+ }
+ default:
+ ret = -ENOIOCTLCMD;
+ }
+
+ return ret;
+}
+
+static int vsock_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+
+ lock_sock(sock->sk);
+ ret = vsock_do_ioctl(sock, cmd, (int __user *)arg);
+ release_sock(sock->sk);
+
+ return ret;
+}
+
static const struct proto_ops vsock_dgram_ops = {
.family = PF_VSOCK,
.owner = THIS_MODULE,
@@ -1302,7 +1407,7 @@ static const struct proto_ops vsock_dgram_ops = {
.accept = sock_no_accept,
.getname = vsock_getname,
.poll = vsock_poll,
- .ioctl = sock_no_ioctl,
+ .ioctl = vsock_ioctl,
.listen = sock_no_listen,
.shutdown = vsock_shutdown,
.sendmsg = vsock_dgram_sendmsg,
@@ -1427,6 +1532,11 @@ static int vsock_connect(struct socket *sock, struct sockaddr *addr,
if (err < 0)
goto out;
+ /* sk_err might have been set as a result of an earlier
+ * (failed) connect attempt.
+ */
+ sk->sk_err = 0;
+
/* Mark sock as connecting and set the error code to in
* progress in case this is a non-blocking connect.
*/
@@ -1500,8 +1610,8 @@ out:
return err;
}
-static int vsock_accept(struct socket *sock, struct socket *newsock, int flags,
- bool kern)
+static int vsock_accept(struct socket *sock, struct socket *newsock,
+ struct proto_accept_arg *arg)
{
struct sock *listener;
int err;
@@ -1528,7 +1638,7 @@ static int vsock_accept(struct socket *sock, struct socket *newsock, int flags,
/* Wait for children sockets to appear; these are the new sockets
* created upon connection establishment.
*/
- timeout = sock_rcvtimeo(listener, flags & O_NONBLOCK);
+ timeout = sock_rcvtimeo(listener, arg->flags & O_NONBLOCK);
prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
while ((connected = vsock_dequeue_accept(listener)) == NULL &&
@@ -2174,15 +2284,12 @@ out:
}
int
-vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
- int flags)
+__vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags)
{
struct sock *sk;
struct vsock_sock *vsk;
const struct vsock_transport *transport;
-#ifdef CONFIG_BPF_SYSCALL
- const struct proto *prot;
-#endif
int err;
sk = sock->sk;
@@ -2233,14 +2340,6 @@ vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
goto out;
}
-#ifdef CONFIG_BPF_SYSCALL
- prot = READ_ONCE(sk->sk_prot);
- if (prot != &vsock_proto) {
- release_sock(sk);
- return prot->recvmsg(sk, msg, len, flags, NULL);
- }
-#endif
-
if (sk->sk_type == SOCK_STREAM)
err = __vsock_stream_recvmsg(sk, msg, len, flags);
else
@@ -2250,6 +2349,22 @@ out:
release_sock(sk);
return err;
}
+
+int
+vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags)
+{
+#ifdef CONFIG_BPF_SYSCALL
+ struct sock *sk = sock->sk;
+ const struct proto *prot;
+
+ prot = READ_ONCE(sk->sk_prot);
+ if (prot != &vsock_proto)
+ return prot->recvmsg(sk, msg, len, flags, NULL);
+#endif
+
+ return __vsock_connectible_recvmsg(sock, msg, len, flags);
+}
EXPORT_SYMBOL_GPL(vsock_connectible_recvmsg);
static int vsock_set_rcvlowat(struct sock *sk, int val)
@@ -2286,7 +2401,7 @@ static const struct proto_ops vsock_stream_ops = {
.accept = vsock_accept,
.getname = vsock_getname,
.poll = vsock_poll,
- .ioctl = sock_no_ioctl,
+ .ioctl = vsock_ioctl,
.listen = vsock_listen,
.shutdown = vsock_shutdown,
.setsockopt = vsock_connectible_setsockopt,
@@ -2308,7 +2423,7 @@ static const struct proto_ops vsock_seqpacket_ops = {
.accept = vsock_accept,
.getname = vsock_getname,
.poll = vsock_poll,
- .ioctl = sock_no_ioctl,
+ .ioctl = vsock_ioctl,
.listen = vsock_listen,
.shutdown = vsock_shutdown,
.setsockopt = vsock_connectible_setsockopt,
@@ -2357,6 +2472,7 @@ static int vsock_create(struct net *net, struct socket *sock,
if (sock->type == SOCK_DGRAM) {
ret = vsock_assign_transport(vsk, NULL);
if (ret < 0) {
+ sock->sk = NULL;
sock_put(sk);
return ret;
}
diff --git a/net/vmw_vsock/diag.c b/net/vmw_vsock/diag.c
index 2e29994f92ff..ab87ef66c1e8 100644
--- a/net/vmw_vsock/diag.c
+++ b/net/vmw_vsock/diag.c
@@ -157,6 +157,7 @@ static int vsock_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
}
static const struct sock_diag_handler vsock_diag_handler = {
+ .owner = THIS_MODULE,
.family = AF_VSOCK,
.dump = vsock_diag_handler_dump,
};
diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
index e2157e387217..31342ab502b4 100644
--- a/net/vmw_vsock/hyperv_transport.c
+++ b/net/vmw_vsock/hyperv_transport.c
@@ -13,12 +13,12 @@
#include <linux/hyperv.h>
#include <net/sock.h>
#include <net/af_vsock.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
/* Older (VMBUS version 'VERSION_WIN10' or before) Windows hosts have some
* stricter requirements on the hv_sock ring buffer size of six 4K pages.
- * hyperv-tlfs defines HV_HYP_PAGE_SIZE as 4K. Newer hosts don't have this
- * limitation; but, keep the defaults the same for compat.
+ * HV_HYP_PAGE_SIZE is defined as 4K. Newer hosts don't have this limitation;
+ * but, keep the defaults the same for compat.
*/
#define RINGBUFFER_HVS_RCV_SIZE (HV_HYP_PAGE_SIZE * 6)
#define RINGBUFFER_HVS_SND_SIZE (HV_HYP_PAGE_SIZE * 6)
@@ -549,6 +549,7 @@ static void hvs_destruct(struct vsock_sock *vsk)
vmbus_hvsock_device_unregister(chan);
kfree(hvs);
+ vsk->trans = NULL;
}
static int hvs_dgram_bind(struct vsock_sock *vsk, struct sockaddr_vm *addr)
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 1748268e0694..f0e48e6911fc 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -94,6 +94,63 @@ out_rcu:
return ret;
}
+/* Caller need to hold vsock->tx_lock on vq */
+static int virtio_transport_send_skb(struct sk_buff *skb, struct virtqueue *vq,
+ struct virtio_vsock *vsock, gfp_t gfp)
+{
+ int ret, in_sg = 0, out_sg = 0;
+ struct scatterlist **sgs;
+
+ sgs = vsock->out_sgs;
+ sg_init_one(sgs[out_sg], virtio_vsock_hdr(skb),
+ sizeof(*virtio_vsock_hdr(skb)));
+ out_sg++;
+
+ if (!skb_is_nonlinear(skb)) {
+ if (skb->len > 0) {
+ sg_init_one(sgs[out_sg], skb->data, skb->len);
+ out_sg++;
+ }
+ } else {
+ struct skb_shared_info *si;
+ int i;
+
+ /* If skb is nonlinear, then its buffer must contain
+ * only header and nothing more. Data is stored in
+ * the fragged part.
+ */
+ WARN_ON_ONCE(skb_headroom(skb) != sizeof(*virtio_vsock_hdr(skb)));
+
+ si = skb_shinfo(skb);
+
+ for (i = 0; i < si->nr_frags; i++) {
+ skb_frag_t *skb_frag = &si->frags[i];
+ void *va;
+
+ /* We will use 'page_to_virt()' for the userspace page
+ * here, because virtio or dma-mapping layers will call
+ * 'virt_to_phys()' later to fill the buffer descriptor.
+ * We don't touch memory at "virtual" address of this page.
+ */
+ va = page_to_virt(skb_frag_page(skb_frag));
+ sg_init_one(sgs[out_sg],
+ va + skb_frag_off(skb_frag),
+ skb_frag_size(skb_frag));
+ out_sg++;
+ }
+ }
+
+ ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, skb, gfp);
+ /* Usually this means that there is no more space available in
+ * the vq
+ */
+ if (ret < 0)
+ return ret;
+
+ virtio_transport_deliver_tap_pkt(skb);
+ return 0;
+}
+
static void
virtio_transport_send_pkt_work(struct work_struct *work)
{
@@ -111,60 +168,17 @@ virtio_transport_send_pkt_work(struct work_struct *work)
vq = vsock->vqs[VSOCK_VQ_TX];
for (;;) {
- int ret, in_sg = 0, out_sg = 0;
- struct scatterlist **sgs;
struct sk_buff *skb;
bool reply;
+ int ret;
skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue);
if (!skb)
break;
- virtio_transport_deliver_tap_pkt(skb);
reply = virtio_vsock_skb_reply(skb);
- sgs = vsock->out_sgs;
- sg_init_one(sgs[out_sg], virtio_vsock_hdr(skb),
- sizeof(*virtio_vsock_hdr(skb)));
- out_sg++;
-
- if (!skb_is_nonlinear(skb)) {
- if (skb->len > 0) {
- sg_init_one(sgs[out_sg], skb->data, skb->len);
- out_sg++;
- }
- } else {
- struct skb_shared_info *si;
- int i;
-
- /* If skb is nonlinear, then its buffer must contain
- * only header and nothing more. Data is stored in
- * the fragged part.
- */
- WARN_ON_ONCE(skb_headroom(skb) != sizeof(*virtio_vsock_hdr(skb)));
- si = skb_shinfo(skb);
-
- for (i = 0; i < si->nr_frags; i++) {
- skb_frag_t *skb_frag = &si->frags[i];
- void *va;
-
- /* We will use 'page_to_virt()' for the userspace page
- * here, because virtio or dma-mapping layers will call
- * 'virt_to_phys()' later to fill the buffer descriptor.
- * We don't touch memory at "virtual" address of this page.
- */
- va = page_to_virt(skb_frag_page(skb_frag));
- sg_init_one(sgs[out_sg],
- va + skb_frag_off(skb_frag),
- skb_frag_size(skb_frag));
- out_sg++;
- }
- }
-
- ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, skb, GFP_KERNEL);
- /* Usually this means that there is no more space available in
- * the vq
- */
+ ret = virtio_transport_send_skb(skb, vq, vsock, GFP_KERNEL);
if (ret < 0) {
virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
break;
@@ -194,6 +208,28 @@ out:
queue_work(virtio_vsock_workqueue, &vsock->rx_work);
}
+/* Caller need to hold RCU for vsock.
+ * Returns 0 if the packet is successfully put on the vq.
+ */
+static int virtio_transport_send_skb_fast_path(struct virtio_vsock *vsock, struct sk_buff *skb)
+{
+ struct virtqueue *vq = vsock->vqs[VSOCK_VQ_TX];
+ int ret;
+
+ /* Inside RCU, can't sleep! */
+ ret = mutex_trylock(&vsock->tx_lock);
+ if (unlikely(ret == 0))
+ return -EBUSY;
+
+ ret = virtio_transport_send_skb(skb, vq, vsock, GFP_ATOMIC);
+ if (ret == 0)
+ virtqueue_kick(vq);
+
+ mutex_unlock(&vsock->tx_lock);
+
+ return ret;
+}
+
static int
virtio_transport_send_pkt(struct sk_buff *skb)
{
@@ -217,11 +253,20 @@ virtio_transport_send_pkt(struct sk_buff *skb)
goto out_rcu;
}
- if (virtio_vsock_skb_reply(skb))
- atomic_inc(&vsock->queued_replies);
+ /* If send_pkt_queue is empty, we can safely bypass this queue
+ * because packet order is maintained and (try) to put the packet
+ * on the virtqueue using virtio_transport_send_skb_fast_path.
+ * If this fails we simply put the packet on the intermediate
+ * queue and schedule the worker.
+ */
+ if (!skb_queue_empty_lockless(&vsock->send_pkt_queue) ||
+ virtio_transport_send_skb_fast_path(vsock, skb)) {
+ if (virtio_vsock_skb_reply(skb))
+ atomic_inc(&vsock->queued_replies);
- virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb);
- queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
+ virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb);
+ queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
+ }
out_rcu:
rcu_read_unlock();
@@ -310,7 +355,7 @@ static void virtio_transport_tx_work(struct work_struct *work)
virtqueue_disable_cb(vq);
while ((skb = virtqueue_get_buf(vq, &len)) != NULL) {
- consume_skb(skb);
+ virtio_transport_consume_skb_sent(skb, true);
added = true;
}
} while (!virtqueue_enable_cb(vq));
@@ -539,6 +584,8 @@ static struct virtio_transport virtio_transport = {
.notify_buffer_size = virtio_transport_notify_buffer_size,
.notify_set_rcvlowat = virtio_transport_notify_set_rcvlowat,
+ .unsent_bytes = virtio_transport_unsent_bytes,
+
.read_skb = virtio_transport_read_skb,
},
@@ -616,20 +663,21 @@ out:
static int virtio_vsock_vqs_init(struct virtio_vsock *vsock)
{
struct virtio_device *vdev = vsock->vdev;
- static const char * const names[] = {
- "rx",
- "tx",
- "event",
- };
- vq_callback_t *callbacks[] = {
- virtio_vsock_rx_done,
- virtio_vsock_tx_done,
- virtio_vsock_event_done,
+ struct virtqueue_info vqs_info[] = {
+ { "rx", virtio_vsock_rx_done },
+ { "tx", virtio_vsock_tx_done },
+ { "event", virtio_vsock_event_done },
};
int ret;
- ret = virtio_find_vqs(vdev, VSOCK_VQ_MAX, vsock->vqs, callbacks, names,
- NULL);
+ mutex_lock(&vsock->rx_lock);
+ vsock->rx_buf_nr = 0;
+ vsock->rx_buf_max_nr = 0;
+ mutex_unlock(&vsock->rx_lock);
+
+ atomic_set(&vsock->queued_replies, 0);
+
+ ret = virtio_find_vqs(vdev, VSOCK_VQ_MAX, vsock->vqs, vqs_info, NULL);
if (ret < 0)
return ret;
@@ -738,9 +786,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
vsock->vdev = vdev;
- vsock->rx_buf_nr = 0;
- vsock->rx_buf_max_nr = 0;
- atomic_set(&vsock->queued_replies, 0);
mutex_init(&vsock->tx_lock);
mutex_init(&vsock->rx_lock);
@@ -858,7 +903,6 @@ static struct virtio_driver virtio_vsock_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtio_vsock_probe,
.remove = virtio_vsock_remove,
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 16ff976a86e3..7f7de6d88096 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -26,6 +26,9 @@
/* Threshold for detecting small packets to copy */
#define GOOD_COPY_LEN 128
+static void virtio_transport_cancel_close_work(struct vsock_sock *vsk,
+ bool cancel_timeout);
+
static const struct virtio_transport *
virtio_transport_get_ops(struct vsock_sock *vsk)
{
@@ -400,6 +403,7 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
if (virtio_transport_init_zcopy_skb(vsk, skb,
info->msg,
can_zcopy)) {
+ kfree_skb(skb);
ret = -ENOMEM;
break;
}
@@ -463,6 +467,26 @@ void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *
}
EXPORT_SYMBOL_GPL(virtio_transport_inc_tx_pkt);
+void virtio_transport_consume_skb_sent(struct sk_buff *skb, bool consume)
+{
+ struct sock *s = skb->sk;
+
+ if (s && skb->len) {
+ struct vsock_sock *vs = vsock_sk(s);
+ struct virtio_vsock_sock *vvs;
+
+ vvs = vs->trans;
+
+ spin_lock_bh(&vvs->tx_lock);
+ vvs->bytes_unsent -= skb->len;
+ spin_unlock_bh(&vvs->tx_lock);
+ }
+
+ if (consume)
+ consume_skb(skb);
+}
+EXPORT_SYMBOL_GPL(virtio_transport_consume_skb_sent);
+
u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 credit)
{
u32 ret;
@@ -475,6 +499,7 @@ u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 credit)
if (ret > credit)
ret = credit;
vvs->tx_cnt += ret;
+ vvs->bytes_unsent += ret;
spin_unlock_bh(&vvs->tx_lock);
return ret;
@@ -488,6 +513,7 @@ void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit)
spin_lock_bh(&vvs->tx_lock);
vvs->tx_cnt -= credit;
+ vvs->bytes_unsent -= credit;
spin_unlock_bh(&vvs->tx_lock);
}
EXPORT_SYMBOL_GPL(virtio_transport_put_credit);
@@ -1086,10 +1112,26 @@ void virtio_transport_destruct(struct vsock_sock *vsk)
{
struct virtio_vsock_sock *vvs = vsk->trans;
+ virtio_transport_cancel_close_work(vsk, true);
+
kfree(vvs);
+ vsk->trans = NULL;
}
EXPORT_SYMBOL_GPL(virtio_transport_destruct);
+ssize_t virtio_transport_unsent_bytes(struct vsock_sock *vsk)
+{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+ size_t ret;
+
+ spin_lock_bh(&vvs->tx_lock);
+ ret = vvs->bytes_unsent;
+ spin_unlock_bh(&vvs->tx_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_unsent_bytes);
+
static int virtio_transport_reset(struct vsock_sock *vsk,
struct sk_buff *skb)
{
@@ -1167,17 +1209,11 @@ static void virtio_transport_wait_close(struct sock *sk, long timeout)
}
}
-static void virtio_transport_do_close(struct vsock_sock *vsk,
- bool cancel_timeout)
+static void virtio_transport_cancel_close_work(struct vsock_sock *vsk,
+ bool cancel_timeout)
{
struct sock *sk = sk_vsock(vsk);
- sock_set_flag(sk, SOCK_DONE);
- vsk->peer_shutdown = SHUTDOWN_MASK;
- if (vsock_stream_has_data(vsk) <= 0)
- sk->sk_state = TCP_CLOSING;
- sk->sk_state_change(sk);
-
if (vsk->close_work_scheduled &&
(!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
vsk->close_work_scheduled = false;
@@ -1189,6 +1225,20 @@ static void virtio_transport_do_close(struct vsock_sock *vsk,
}
}
+static void virtio_transport_do_close(struct vsock_sock *vsk,
+ bool cancel_timeout)
+{
+ struct sock *sk = sk_vsock(vsk);
+
+ sock_set_flag(sk, SOCK_DONE);
+ vsk->peer_shutdown = SHUTDOWN_MASK;
+ if (vsock_stream_has_data(vsk) <= 0)
+ sk->sk_state = TCP_CLOSING;
+ sk->sk_state_change(sk);
+
+ virtio_transport_cancel_close_work(vsk, cancel_timeout);
+}
+
static void virtio_transport_close_timeout(struct work_struct *work)
{
struct vsock_sock *vsk =
@@ -1477,6 +1527,14 @@ virtio_transport_recv_listen(struct sock *sk, struct sk_buff *skb,
return -ENOMEM;
}
+ /* __vsock_release() might have already flushed accept_queue.
+ * Subsequent enqueues would lead to a memory leak.
+ */
+ if (sk->sk_shutdown == SHUTDOWN_MASK) {
+ virtio_transport_reset_no_sock(t, skb);
+ return -ESHUTDOWN;
+ }
+
child = vsock_create_connected(sk);
if (!child) {
virtio_transport_reset_no_sock(t, skb);
@@ -1583,8 +1641,11 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
lock_sock(sk);
- /* Check if sk has been closed before lock_sock */
- if (sock_flag(sk, SOCK_DONE)) {
+ /* Check if sk has been closed or assigned to another transport before
+ * lock_sock (note: listener sockets are not assigned to any transport)
+ */
+ if (sock_flag(sk, SOCK_DONE) ||
+ (sk->sk_state != TCP_LISTEN && vsk->transport != &t->transport)) {
(void)virtio_transport_reset_no_sock(t, skb);
release_sock(sk);
sock_put(sk);
@@ -1672,6 +1733,7 @@ int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t recv_acto
{
struct virtio_vsock_sock *vvs = vsk->trans;
struct sock *sk = sk_vsock(vsk);
+ struct virtio_vsock_hdr *hdr;
struct sk_buff *skb;
int off = 0;
int err;
@@ -1681,10 +1743,19 @@ int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t recv_acto
* works for types other than dgrams.
*/
skb = __skb_recv_datagram(sk, &vvs->rx_queue, MSG_DONTWAIT, &off, &err);
+ if (!skb) {
+ spin_unlock_bh(&vvs->rx_lock);
+ return err;
+ }
+
+ hdr = virtio_vsock_hdr(skb);
+ if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM)
+ vvs->msg_count--;
+
+ virtio_transport_dec_rx_pkt(vvs, le32_to_cpu(hdr->len));
spin_unlock_bh(&vvs->rx_lock);
- if (!skb)
- return err;
+ virtio_transport_send_credit_update(vsk);
return recv_actor(sk, skb);
}
diff --git a/net/vmw_vsock/vsock_bpf.c b/net/vmw_vsock/vsock_bpf.c
index a3c97546ab84..07b96d56f3a5 100644
--- a/net/vmw_vsock/vsock_bpf.c
+++ b/net/vmw_vsock/vsock_bpf.c
@@ -64,9 +64,9 @@ static int __vsock_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int
int err;
if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)
- err = vsock_connectible_recvmsg(sock, msg, len, flags);
+ err = __vsock_connectible_recvmsg(sock, msg, len, flags);
else if (sk->sk_type == SOCK_DGRAM)
- err = vsock_dgram_recvmsg(sock, msg, len, flags);
+ err = __vsock_dgram_recvmsg(sock, msg, len, flags);
else
err = -EPROTOTYPE;
@@ -77,6 +77,7 @@ static int vsock_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
size_t len, int flags, int *addr_len)
{
struct sk_psock *psock;
+ struct vsock_sock *vsk;
int copied;
psock = sk_psock_get(sk);
@@ -84,6 +85,13 @@ static int vsock_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
return __vsock_recvmsg(sk, msg, len, flags);
lock_sock(sk);
+ vsk = vsock_sk(sk);
+
+ if (WARN_ON_ONCE(!vsk->transport)) {
+ copied = -ENODEV;
+ goto out;
+ }
+
if (vsock_has_data(sk, psock) && sk_psock_queue_empty(psock)) {
release_sock(sk);
sk_psock_put(sk, psock);
@@ -108,20 +116,13 @@ static int vsock_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
}
+out:
release_sock(sk);
sk_psock_put(sk, psock);
return copied;
}
-/* Copy of original proto with updated sock_map methods */
-static struct proto vsock_bpf_prot = {
- .close = sock_map_close,
- .recvmsg = vsock_bpf_recvmsg,
- .sock_is_readable = sk_msg_is_readable,
- .unhash = sock_map_unhash,
-};
-
static void vsock_bpf_rebuild_protos(struct proto *prot, const struct proto *base)
{
*prot = *base;
diff --git a/net/vmw_vsock/vsock_loopback.c b/net/vmw_vsock/vsock_loopback.c
index 6dea6119f5b2..6e78927a598e 100644
--- a/net/vmw_vsock/vsock_loopback.c
+++ b/net/vmw_vsock/vsock_loopback.c
@@ -98,6 +98,8 @@ static struct virtio_transport loopback_transport = {
.notify_buffer_size = virtio_transport_notify_buffer_size,
.notify_set_rcvlowat = virtio_transport_notify_set_rcvlowat,
+ .unsent_bytes = virtio_transport_unsent_bytes,
+
.read_skb = virtio_transport_read_skb,
},
@@ -123,6 +125,10 @@ static void vsock_loopback_work(struct work_struct *work)
spin_unlock_bh(&vsock->pkt_queue.lock);
while ((skb = __skb_dequeue(&pkts))) {
+ /* Decrement the bytes_unsent counter without deallocating skb
+ * It is freed by the receiver.
+ */
+ virtio_transport_consume_skb_sent(skb, false);
virtio_transport_deliver_tap_pkt(skb);
virtio_transport_recv_pkt(&loopback_transport, skb);
}
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 10345388ad13..2d67b5f2010e 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -11,9 +11,6 @@ config WEXT_PROC
depends on PROC_FS
depends on WEXT_CORE
-config WEXT_SPY
- bool
-
config WEXT_PRIV
bool
@@ -188,19 +185,12 @@ config CFG80211_CRDA_SUPPORT
If unsure, say Y.
config CFG80211_WEXT
- bool "cfg80211 wireless extensions compatibility" if !CFG80211_WEXT_EXPORT
+ bool "cfg80211 wireless extensions compatibility"
select WEXT_CORE
- default y if CFG80211_WEXT_EXPORT
help
Enable this option if you need old userspace for wireless
extensions with cfg80211-based drivers.
-config CFG80211_WEXT_EXPORT
- bool
- help
- Drivers should select this option if they require cfg80211's
- wext compatibility symbols to be exported.
-
config CFG80211_KUNIT_TEST
tristate "KUnit tests for cfg80211" if !KUNIT_ALL_TESTS
depends on KUNIT
@@ -212,36 +202,3 @@ config CFG80211_KUNIT_TEST
If unsure, say N.
endif # CFG80211
-
-config LIB80211
- tristate
- default n
- help
- This options enables a library of common routines used
- by IEEE802.11 wireless LAN drivers.
-
- Drivers should select this themselves if needed.
-
-config LIB80211_CRYPT_WEP
- tristate
- select CRYPTO_LIB_ARC4
-
-config LIB80211_CRYPT_CCMP
- tristate
- select CRYPTO
- select CRYPTO_AES
- select CRYPTO_CCM
-
-config LIB80211_CRYPT_TKIP
- tristate
- select CRYPTO_LIB_ARC4
-
-config LIB80211_DEBUG
- bool "lib80211 debugging messages"
- depends on LIB80211
- default n
- help
- You can enable this if you want verbose debugging messages
- from lib80211.
-
- If unsure, say N.
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index 72074fd36df4..62a83faf0e07 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -1,14 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CFG80211) += cfg80211.o
-obj-$(CONFIG_LIB80211) += lib80211.o
-obj-$(CONFIG_LIB80211_CRYPT_WEP) += lib80211_crypt_wep.o
-obj-$(CONFIG_LIB80211_CRYPT_CCMP) += lib80211_crypt_ccmp.o
-obj-$(CONFIG_LIB80211_CRYPT_TKIP) += lib80211_crypt_tkip.o
obj-y += tests/
obj-$(CONFIG_WEXT_CORE) += wext-core.o
obj-$(CONFIG_WEXT_PROC) += wext-proc.o
-obj-$(CONFIG_WEXT_SPY) += wext-spy.o
obj-$(CONFIG_WEXT_PRIV) += wext-priv.o
cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o
@@ -25,7 +20,7 @@ ifneq ($(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR),)
cfg80211-y += extra-certs.o
endif
-$(obj)/shipped-certs.c: $(sort $(wildcard $(srctree)/$(src)/certs/*.hex))
+$(obj)/shipped-certs.c: $(sort $(wildcard $(src)/certs/*.hex))
@$(kecho) " GEN $@"
$(Q)(echo '#include "reg.h"'; \
echo 'const u8 shipped_regdb_certs[] = {'; \
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index ceb9174c5c3d..9f918b77b40e 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -6,7 +6,7 @@
*
* Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright 2018-2023 Intel Corporation
+ * Copyright 2018-2024 Intel Corporation
*/
#include <linux/export.h>
@@ -27,11 +27,10 @@ void cfg80211_chandef_create(struct cfg80211_chan_def *chandef,
if (WARN_ON(!chan))
return;
- chandef->chan = chan;
- chandef->freq1_offset = chan->freq_offset;
- chandef->center_freq2 = 0;
- chandef->edmg.bw_config = 0;
- chandef->edmg.channels = 0;
+ *chandef = (struct cfg80211_chan_def) {
+ .chan = chan,
+ .freq1_offset = chan->freq_offset,
+ };
switch (chan_type) {
case NL80211_CHAN_NO_HT:
@@ -56,6 +55,123 @@ void cfg80211_chandef_create(struct cfg80211_chan_def *chandef,
}
EXPORT_SYMBOL(cfg80211_chandef_create);
+static int cfg80211_chandef_get_width(const struct cfg80211_chan_def *c)
+{
+ return nl80211_chan_width_to_mhz(c->width);
+}
+
+static u32 cfg80211_get_start_freq(const struct cfg80211_chan_def *chandef,
+ u32 cf)
+{
+ u32 start_freq, center_freq, bandwidth;
+
+ center_freq = MHZ_TO_KHZ((cf == 1) ?
+ chandef->center_freq1 : chandef->center_freq2);
+ bandwidth = MHZ_TO_KHZ(cfg80211_chandef_get_width(chandef));
+
+ if (bandwidth <= MHZ_TO_KHZ(20))
+ start_freq = center_freq;
+ else
+ start_freq = center_freq - bandwidth / 2 + MHZ_TO_KHZ(10);
+
+ return start_freq;
+}
+
+static u32 cfg80211_get_end_freq(const struct cfg80211_chan_def *chandef,
+ u32 cf)
+{
+ u32 end_freq, center_freq, bandwidth;
+
+ center_freq = MHZ_TO_KHZ((cf == 1) ?
+ chandef->center_freq1 : chandef->center_freq2);
+ bandwidth = MHZ_TO_KHZ(cfg80211_chandef_get_width(chandef));
+
+ if (bandwidth <= MHZ_TO_KHZ(20))
+ end_freq = center_freq;
+ else
+ end_freq = center_freq + bandwidth / 2 - MHZ_TO_KHZ(10);
+
+ return end_freq;
+}
+
+#define for_each_subchan(chandef, freq, cf) \
+ for (u32 punctured = chandef->punctured, \
+ cf = 1, freq = cfg80211_get_start_freq(chandef, cf); \
+ freq <= cfg80211_get_end_freq(chandef, cf); \
+ freq += MHZ_TO_KHZ(20), \
+ ((cf == 1 && chandef->center_freq2 != 0 && \
+ freq > cfg80211_get_end_freq(chandef, cf)) ? \
+ (cf++, freq = cfg80211_get_start_freq(chandef, cf), \
+ punctured = 0) : (punctured >>= 1))) \
+ if (!(punctured & 1))
+
+struct cfg80211_per_bw_puncturing_values {
+ u8 len;
+ const u16 *valid_values;
+};
+
+static const u16 puncturing_values_80mhz[] = {
+ 0x8, 0x4, 0x2, 0x1
+};
+
+static const u16 puncturing_values_160mhz[] = {
+ 0x80, 0x40, 0x20, 0x10, 0x8, 0x4, 0x2, 0x1, 0xc0, 0x30, 0xc, 0x3
+};
+
+static const u16 puncturing_values_320mhz[] = {
+ 0xc000, 0x3000, 0xc00, 0x300, 0xc0, 0x30, 0xc, 0x3, 0xf000, 0xf00,
+ 0xf0, 0xf, 0xfc00, 0xf300, 0xf0c0, 0xf030, 0xf00c, 0xf003, 0xc00f,
+ 0x300f, 0xc0f, 0x30f, 0xcf, 0x3f
+};
+
+#define CFG80211_PER_BW_VALID_PUNCTURING_VALUES(_bw) \
+ { \
+ .len = ARRAY_SIZE(puncturing_values_ ## _bw ## mhz), \
+ .valid_values = puncturing_values_ ## _bw ## mhz \
+ }
+
+static const struct cfg80211_per_bw_puncturing_values per_bw_puncturing[] = {
+ CFG80211_PER_BW_VALID_PUNCTURING_VALUES(80),
+ CFG80211_PER_BW_VALID_PUNCTURING_VALUES(160),
+ CFG80211_PER_BW_VALID_PUNCTURING_VALUES(320)
+};
+
+static bool valid_puncturing_bitmap(const struct cfg80211_chan_def *chandef)
+{
+ u32 idx, i, start_freq, primary_center = chandef->chan->center_freq;
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_80:
+ idx = 0;
+ start_freq = chandef->center_freq1 - 40;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ idx = 1;
+ start_freq = chandef->center_freq1 - 80;
+ break;
+ case NL80211_CHAN_WIDTH_320:
+ idx = 2;
+ start_freq = chandef->center_freq1 - 160;
+ break;
+ default:
+ return chandef->punctured == 0;
+ }
+
+ if (!chandef->punctured)
+ return true;
+
+ /* check if primary channel is punctured */
+ if (chandef->punctured & (u16)BIT((primary_center - start_freq) / 20))
+ return false;
+
+ for (i = 0; i < per_bw_puncturing[idx].len; i++) {
+ if (per_bw_puncturing[idx].valid_values[i] == chandef->punctured)
+ return true;
+ }
+
+ return false;
+}
+
static bool cfg80211_edmg_chandef_valid(const struct cfg80211_chan_def *chandef)
{
int max_contiguous = 0;
@@ -192,9 +308,35 @@ int nl80211_chan_width_to_mhz(enum nl80211_chan_width chan_width)
}
EXPORT_SYMBOL(nl80211_chan_width_to_mhz);
-static int cfg80211_chandef_get_width(const struct cfg80211_chan_def *c)
+static bool cfg80211_valid_center_freq(u32 center,
+ enum nl80211_chan_width width)
{
- return nl80211_chan_width_to_mhz(c->width);
+ int bw;
+ int step;
+
+ /* We only do strict verification on 6 GHz */
+ if (center < 5955 || center > 7115)
+ return true;
+
+ bw = nl80211_chan_width_to_mhz(width);
+ if (bw < 0)
+ return false;
+
+ /* Validate that the channels bw is entirely within the 6 GHz band */
+ if (center - bw / 2 < 5945 || center + bw / 2 > 7125)
+ return false;
+
+ /* With 320 MHz the permitted channels overlap */
+ if (bw == 320)
+ step = 160;
+ else
+ step = bw;
+
+ /*
+ * Valid channels are packed from lowest frequency towards higher ones.
+ * So test that the lower frequency aligns with one of these steps.
+ */
+ return (center - bw / 2 - 5945) % step == 0;
}
bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
@@ -308,6 +450,13 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
return false;
}
+ if (!cfg80211_valid_center_freq(chandef->center_freq1, chandef->width))
+ return false;
+
+ if (chandef->width == NL80211_CHAN_WIDTH_80P80 &&
+ !cfg80211_valid_center_freq(chandef->center_freq2, chandef->width))
+ return false;
+
/* channel 14 is only for IEEE 802.11b */
if (chandef->center_freq1 == 2484 &&
chandef->width != NL80211_CHAN_WIDTH_20_NOHT)
@@ -317,72 +466,81 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
!cfg80211_edmg_chandef_valid(chandef))
return false;
- return true;
+ return valid_puncturing_bitmap(chandef);
}
EXPORT_SYMBOL(cfg80211_chandef_valid);
-static void chandef_primary_freqs(const struct cfg80211_chan_def *c,
- u32 *pri40, u32 *pri80, u32 *pri160)
+int cfg80211_chandef_primary(const struct cfg80211_chan_def *c,
+ enum nl80211_chan_width primary_chan_width,
+ u16 *punctured)
{
- int tmp;
+ int pri_width = nl80211_chan_width_to_mhz(primary_chan_width);
+ int width = cfg80211_chandef_get_width(c);
+ u32 control = c->chan->center_freq;
+ u32 center = c->center_freq1;
+ u16 _punct = 0;
- switch (c->width) {
- case NL80211_CHAN_WIDTH_40:
- *pri40 = c->center_freq1;
- *pri80 = 0;
- *pri160 = 0;
- break;
- case NL80211_CHAN_WIDTH_80:
- case NL80211_CHAN_WIDTH_80P80:
- *pri160 = 0;
- *pri80 = c->center_freq1;
- /* n_P20 */
- tmp = (30 + c->chan->center_freq - c->center_freq1)/20;
- /* n_P40 */
- tmp /= 2;
- /* freq_P40 */
- *pri40 = c->center_freq1 - 20 + 40 * tmp;
- break;
- case NL80211_CHAN_WIDTH_160:
- *pri160 = c->center_freq1;
- /* n_P20 */
- tmp = (70 + c->chan->center_freq - c->center_freq1)/20;
- /* n_P40 */
- tmp /= 2;
- /* freq_P40 */
- *pri40 = c->center_freq1 - 60 + 40 * tmp;
- /* n_P80 */
- tmp /= 2;
- *pri80 = c->center_freq1 - 40 + 80 * tmp;
- break;
- case NL80211_CHAN_WIDTH_320:
- /* n_P20 */
- tmp = (150 + c->chan->center_freq - c->center_freq1) / 20;
- /* n_P40 */
- tmp /= 2;
- /* freq_P40 */
- *pri40 = c->center_freq1 - 140 + 40 * tmp;
- /* n_P80 */
- tmp /= 2;
- *pri80 = c->center_freq1 - 120 + 80 * tmp;
- /* n_P160 */
- tmp /= 2;
- *pri160 = c->center_freq1 - 80 + 160 * tmp;
- break;
- default:
- WARN_ON_ONCE(1);
+ if (WARN_ON_ONCE(pri_width < 0 || width < 0))
+ return -1;
+
+ /* not intended to be called this way, can't determine */
+ if (WARN_ON_ONCE(pri_width > width))
+ return -1;
+
+ if (!punctured)
+ punctured = &_punct;
+
+ *punctured = c->punctured;
+
+ while (width > pri_width) {
+ unsigned int bits_to_drop = width / 20 / 2;
+
+ if (control > center) {
+ center += width / 4;
+ *punctured >>= bits_to_drop;
+ } else {
+ center -= width / 4;
+ *punctured &= (1 << bits_to_drop) - 1;
+ }
+ width /= 2;
}
+
+ return center;
}
+EXPORT_SYMBOL(cfg80211_chandef_primary);
-const struct cfg80211_chan_def *
-cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1,
- const struct cfg80211_chan_def *c2)
+static const struct cfg80211_chan_def *
+check_chandef_primary_compat(const struct cfg80211_chan_def *c1,
+ const struct cfg80211_chan_def *c2,
+ enum nl80211_chan_width primary_chan_width)
{
- u32 c1_pri40, c1_pri80, c2_pri40, c2_pri80, c1_pri160, c2_pri160;
+ u16 punct_c1 = 0, punct_c2 = 0;
+
+ /* check primary is compatible -> error if not */
+ if (cfg80211_chandef_primary(c1, primary_chan_width, &punct_c1) !=
+ cfg80211_chandef_primary(c2, primary_chan_width, &punct_c2))
+ return ERR_PTR(-EINVAL);
+
+ if (punct_c1 != punct_c2)
+ return ERR_PTR(-EINVAL);
+
+ /* assumes c1 is smaller width, if that was just checked -> done */
+ if (c1->width == primary_chan_width)
+ return c2;
+
+ /* otherwise continue checking the next width */
+ return NULL;
+}
+
+static const struct cfg80211_chan_def *
+_cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1,
+ const struct cfg80211_chan_def *c2)
+{
+ const struct cfg80211_chan_def *ret;
/* If they are identical, return */
if (cfg80211_chandef_identical(c1, c2))
- return c1;
+ return c2;
/* otherwise, must have same control channel */
if (c1->chan != c2->chan)
@@ -396,79 +554,84 @@ cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1,
return NULL;
/*
- * can't be compatible if one of them is 5 or 10 MHz,
+ * can't be compatible if one of them is 5/10 MHz or S1G
* but they don't have the same width.
*/
- if (c1->width == NL80211_CHAN_WIDTH_5 ||
- c1->width == NL80211_CHAN_WIDTH_10 ||
- c2->width == NL80211_CHAN_WIDTH_5 ||
- c2->width == NL80211_CHAN_WIDTH_10)
+#define NARROW_OR_S1G(width) ((width) == NL80211_CHAN_WIDTH_5 || \
+ (width) == NL80211_CHAN_WIDTH_10 || \
+ (width) == NL80211_CHAN_WIDTH_1 || \
+ (width) == NL80211_CHAN_WIDTH_2 || \
+ (width) == NL80211_CHAN_WIDTH_4 || \
+ (width) == NL80211_CHAN_WIDTH_8 || \
+ (width) == NL80211_CHAN_WIDTH_16)
+
+ if (NARROW_OR_S1G(c1->width) || NARROW_OR_S1G(c2->width))
return NULL;
- if (c1->width == NL80211_CHAN_WIDTH_20_NOHT ||
- c1->width == NL80211_CHAN_WIDTH_20)
- return c2;
-
- if (c2->width == NL80211_CHAN_WIDTH_20_NOHT ||
- c2->width == NL80211_CHAN_WIDTH_20)
- return c1;
-
- chandef_primary_freqs(c1, &c1_pri40, &c1_pri80, &c1_pri160);
- chandef_primary_freqs(c2, &c2_pri40, &c2_pri80, &c2_pri160);
-
- if (c1_pri40 != c2_pri40)
- return NULL;
+ /*
+ * Make sure that c1 is always the narrower one, so that later
+ * we either return NULL or c2 and don't have to check both
+ * directions.
+ */
+ if (c1->width > c2->width)
+ swap(c1, c2);
- if (c1->width == NL80211_CHAN_WIDTH_40)
+ /*
+ * No further checks needed if the "narrower" one is only 20 MHz.
+ * Here "narrower" includes being a 20 MHz non-HT channel vs. a
+ * 20 MHz HT (or later) one.
+ */
+ if (c1->width <= NL80211_CHAN_WIDTH_20)
return c2;
- if (c2->width == NL80211_CHAN_WIDTH_40)
- return c1;
+ ret = check_chandef_primary_compat(c1, c2, NL80211_CHAN_WIDTH_40);
+ if (ret)
+ return ret;
- if (c1_pri80 != c2_pri80)
- return NULL;
+ ret = check_chandef_primary_compat(c1, c2, NL80211_CHAN_WIDTH_80);
+ if (ret)
+ return ret;
- if (c1->width == NL80211_CHAN_WIDTH_80 &&
- c2->width > NL80211_CHAN_WIDTH_80)
- return c2;
+ /*
+ * If c1 is 80+80, then c2 is 160 or higher, but that cannot
+ * match. If c2 was also 80+80 it was already either accepted
+ * or rejected above (identical or not, respectively.)
+ */
+ if (c1->width == NL80211_CHAN_WIDTH_80P80)
+ return NULL;
- if (c2->width == NL80211_CHAN_WIDTH_80 &&
- c1->width > NL80211_CHAN_WIDTH_80)
- return c1;
+ ret = check_chandef_primary_compat(c1, c2, NL80211_CHAN_WIDTH_160);
+ if (ret)
+ return ret;
- WARN_ON(!c1_pri160 && !c2_pri160);
- if (c1_pri160 && c2_pri160 && c1_pri160 != c2_pri160)
- return NULL;
+ /*
+ * Getting here would mean they're both wider than 160, have the
+ * same primary 160, but are not identical - this cannot happen
+ * since they must be 320 (no wider chandefs exist, at least yet.)
+ */
+ WARN_ON_ONCE(1);
- if (c1->width > c2->width)
- return c1;
- return c2;
+ return NULL;
}
-EXPORT_SYMBOL(cfg80211_chandef_compatible);
-static void cfg80211_set_chans_dfs_state(struct wiphy *wiphy, u32 center_freq,
- u32 bandwidth,
- enum nl80211_dfs_state dfs_state)
+const struct cfg80211_chan_def *
+cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1,
+ const struct cfg80211_chan_def *c2)
{
- struct ieee80211_channel *c;
- u32 freq;
+ const struct cfg80211_chan_def *ret;
- for (freq = center_freq - bandwidth/2 + 10;
- freq <= center_freq + bandwidth/2 - 10;
- freq += 20) {
- c = ieee80211_get_channel(wiphy, freq);
- if (!c || !(c->flags & IEEE80211_CHAN_RADAR))
- continue;
-
- c->dfs_state = dfs_state;
- c->dfs_state_entered = jiffies;
- }
+ ret = _cfg80211_chandef_compatible(c1, c2);
+ if (IS_ERR(ret))
+ return NULL;
+ return ret;
}
+EXPORT_SYMBOL(cfg80211_chandef_compatible);
void cfg80211_set_dfs_state(struct wiphy *wiphy,
const struct cfg80211_chan_def *chandef,
enum nl80211_dfs_state dfs_state)
{
+ struct ieee80211_channel *c;
int width;
if (WARN_ON(!cfg80211_chandef_valid(chandef)))
@@ -478,41 +641,14 @@ void cfg80211_set_dfs_state(struct wiphy *wiphy,
if (width < 0)
return;
- cfg80211_set_chans_dfs_state(wiphy, chandef->center_freq1,
- width, dfs_state);
-
- if (!chandef->center_freq2)
- return;
- cfg80211_set_chans_dfs_state(wiphy, chandef->center_freq2,
- width, dfs_state);
-}
-
-static u32 cfg80211_get_start_freq(u32 center_freq,
- u32 bandwidth)
-{
- u32 start_freq;
-
- bandwidth = MHZ_TO_KHZ(bandwidth);
- if (bandwidth <= MHZ_TO_KHZ(20))
- start_freq = center_freq;
- else
- start_freq = center_freq - bandwidth / 2 + MHZ_TO_KHZ(10);
-
- return start_freq;
-}
-
-static u32 cfg80211_get_end_freq(u32 center_freq,
- u32 bandwidth)
-{
- u32 end_freq;
-
- bandwidth = MHZ_TO_KHZ(bandwidth);
- if (bandwidth <= MHZ_TO_KHZ(20))
- end_freq = center_freq;
- else
- end_freq = center_freq + bandwidth / 2 - MHZ_TO_KHZ(10);
+ for_each_subchan(chandef, freq, cf) {
+ c = ieee80211_get_channel_khz(wiphy, freq);
+ if (!c || !(c->flags & IEEE80211_CHAN_RADAR))
+ continue;
- return end_freq;
+ c->dfs_state = dfs_state;
+ c->dfs_state_entered = jiffies;
+ }
}
static bool
@@ -589,17 +725,12 @@ static bool cfg80211_dfs_permissive_chan(struct wiphy *wiphy,
}
static int cfg80211_get_chans_dfs_required(struct wiphy *wiphy,
- u32 center_freq,
- u32 bandwidth,
- enum nl80211_iftype iftype)
+ const struct cfg80211_chan_def *chandef,
+ enum nl80211_iftype iftype)
{
struct ieee80211_channel *c;
- u32 freq, start_freq, end_freq;
-
- start_freq = cfg80211_get_start_freq(center_freq, bandwidth);
- end_freq = cfg80211_get_end_freq(center_freq, bandwidth);
- for (freq = start_freq; freq <= end_freq; freq += MHZ_TO_KHZ(20)) {
+ for_each_subchan(chandef, freq, cf) {
c = ieee80211_get_channel_khz(wiphy, freq);
if (!c)
return -EINVAL;
@@ -632,25 +763,9 @@ int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
if (width < 0)
return -EINVAL;
- ret = cfg80211_get_chans_dfs_required(wiphy,
- ieee80211_chandef_to_khz(chandef),
- width, iftype);
- if (ret < 0)
- return ret;
- else if (ret > 0)
- return BIT(chandef->width);
-
- if (!chandef->center_freq2)
- return 0;
-
- ret = cfg80211_get_chans_dfs_required(wiphy,
- MHZ_TO_KHZ(chandef->center_freq2),
- width, iftype);
- if (ret < 0)
- return ret;
- else if (ret > 0)
- return BIT(chandef->width);
+ ret = cfg80211_get_chans_dfs_required(wiphy, chandef, iftype);
+ return (ret > 0) ? BIT(chandef->width) : ret;
break;
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_OCB:
@@ -670,16 +785,18 @@ int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
}
EXPORT_SYMBOL(cfg80211_chandef_dfs_required);
-static int cfg80211_get_chans_dfs_usable(struct wiphy *wiphy,
- u32 center_freq,
- u32 bandwidth)
+bool cfg80211_chandef_dfs_usable(struct wiphy *wiphy,
+ const struct cfg80211_chan_def *chandef)
{
struct ieee80211_channel *c;
- u32 freq, start_freq, end_freq;
- int count = 0;
+ int width, count = 0;
- start_freq = cfg80211_get_start_freq(center_freq, bandwidth);
- end_freq = cfg80211_get_end_freq(center_freq, bandwidth);
+ if (WARN_ON(!cfg80211_chandef_valid(chandef)))
+ return false;
+
+ width = cfg80211_chandef_get_width(chandef);
+ if (width < 0)
+ return false;
/*
* Check entire range of channels for the bandwidth.
@@ -687,61 +804,24 @@ static int cfg80211_get_chans_dfs_usable(struct wiphy *wiphy,
* DFS_AVAILABLE). Return number of usable channels
* (require CAC). Allow DFS and non-DFS channel mix.
*/
- for (freq = start_freq; freq <= end_freq; freq += MHZ_TO_KHZ(20)) {
+ for_each_subchan(chandef, freq, cf) {
c = ieee80211_get_channel_khz(wiphy, freq);
if (!c)
- return -EINVAL;
+ return false;
if (c->flags & IEEE80211_CHAN_DISABLED)
- return -EINVAL;
+ return false;
if (c->flags & IEEE80211_CHAN_RADAR) {
if (c->dfs_state == NL80211_DFS_UNAVAILABLE)
- return -EINVAL;
+ return false;
if (c->dfs_state == NL80211_DFS_USABLE)
count++;
}
}
- return count;
-}
-
-bool cfg80211_chandef_dfs_usable(struct wiphy *wiphy,
- const struct cfg80211_chan_def *chandef)
-{
- int width;
- int r1, r2 = 0;
-
- if (WARN_ON(!cfg80211_chandef_valid(chandef)))
- return false;
-
- width = cfg80211_chandef_get_width(chandef);
- if (width < 0)
- return false;
-
- r1 = cfg80211_get_chans_dfs_usable(wiphy,
- MHZ_TO_KHZ(chandef->center_freq1),
- width);
-
- if (r1 < 0)
- return false;
-
- switch (chandef->width) {
- case NL80211_CHAN_WIDTH_80P80:
- WARN_ON(!chandef->center_freq2);
- r2 = cfg80211_get_chans_dfs_usable(wiphy,
- MHZ_TO_KHZ(chandef->center_freq2),
- width);
- if (r2 < 0)
- return false;
- break;
- default:
- WARN_ON(chandef->center_freq2);
- break;
- }
-
- return (r1 + r2 > 0);
+ return count > 0;
}
EXPORT_SYMBOL(cfg80211_chandef_dfs_usable);
@@ -903,10 +983,10 @@ bool cfg80211_any_wiphy_oper_chan(struct wiphy *wiphy,
if (!reg_dfs_domain_same(wiphy, &rdev->wiphy))
continue;
- wiphy_lock(&rdev->wiphy);
+ guard(wiphy)(&rdev->wiphy);
+
found = cfg80211_is_wiphy_oper_chan(&rdev->wiphy, chan) ||
cfg80211_offchan_chain_is_active(rdev, chan);
- wiphy_unlock(&rdev->wiphy);
if (found)
return true;
@@ -915,26 +995,29 @@ bool cfg80211_any_wiphy_oper_chan(struct wiphy *wiphy,
return false;
}
-static bool cfg80211_get_chans_dfs_available(struct wiphy *wiphy,
- u32 center_freq,
- u32 bandwidth)
+static bool cfg80211_chandef_dfs_available(struct wiphy *wiphy,
+ const struct cfg80211_chan_def *chandef)
{
struct ieee80211_channel *c;
- u32 freq, start_freq, end_freq;
+ int width;
bool dfs_offload;
+ if (WARN_ON(!cfg80211_chandef_valid(chandef)))
+ return false;
+
+ width = cfg80211_chandef_get_width(chandef);
+ if (width < 0)
+ return false;
+
dfs_offload = wiphy_ext_feature_isset(wiphy,
NL80211_EXT_FEATURE_DFS_OFFLOAD);
- start_freq = cfg80211_get_start_freq(center_freq, bandwidth);
- end_freq = cfg80211_get_end_freq(center_freq, bandwidth);
-
/*
* Check entire range of channels for the bandwidth.
* If any channel in between is disabled or has not
* had gone through CAC return false
*/
- for (freq = start_freq; freq <= end_freq; freq += MHZ_TO_KHZ(20)) {
+ for_each_subchan(chandef, freq, cf) {
c = ieee80211_get_channel_khz(wiphy, freq);
if (!c)
return false;
@@ -951,119 +1034,54 @@ static bool cfg80211_get_chans_dfs_available(struct wiphy *wiphy,
return true;
}
-static bool cfg80211_chandef_dfs_available(struct wiphy *wiphy,
- const struct cfg80211_chan_def *chandef)
+unsigned int
+cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy,
+ const struct cfg80211_chan_def *chandef)
{
+ struct ieee80211_channel *c;
int width;
- int r;
+ unsigned int t1 = 0, t2 = 0;
if (WARN_ON(!cfg80211_chandef_valid(chandef)))
- return false;
+ return 0;
width = cfg80211_chandef_get_width(chandef);
if (width < 0)
- return false;
-
- r = cfg80211_get_chans_dfs_available(wiphy,
- MHZ_TO_KHZ(chandef->center_freq1),
- width);
-
- /* If any of channels unavailable for cf1 just return */
- if (!r)
- return r;
-
- switch (chandef->width) {
- case NL80211_CHAN_WIDTH_80P80:
- WARN_ON(!chandef->center_freq2);
- r = cfg80211_get_chans_dfs_available(wiphy,
- MHZ_TO_KHZ(chandef->center_freq2),
- width);
- break;
- default:
- WARN_ON(chandef->center_freq2);
- break;
- }
-
- return r;
-}
-
-static unsigned int cfg80211_get_chans_dfs_cac_time(struct wiphy *wiphy,
- u32 center_freq,
- u32 bandwidth)
-{
- struct ieee80211_channel *c;
- u32 start_freq, end_freq, freq;
- unsigned int dfs_cac_ms = 0;
-
- start_freq = cfg80211_get_start_freq(center_freq, bandwidth);
- end_freq = cfg80211_get_end_freq(center_freq, bandwidth);
+ return 0;
- for (freq = start_freq; freq <= end_freq; freq += MHZ_TO_KHZ(20)) {
+ for_each_subchan(chandef, freq, cf) {
c = ieee80211_get_channel_khz(wiphy, freq);
- if (!c)
- return 0;
-
- if (c->flags & IEEE80211_CHAN_DISABLED)
- return 0;
+ if (!c || (c->flags & IEEE80211_CHAN_DISABLED)) {
+ if (cf == 1)
+ t1 = INT_MAX;
+ else
+ t2 = INT_MAX;
+ continue;
+ }
if (!(c->flags & IEEE80211_CHAN_RADAR))
continue;
- if (c->dfs_cac_ms > dfs_cac_ms)
- dfs_cac_ms = c->dfs_cac_ms;
- }
-
- return dfs_cac_ms;
-}
+ if (cf == 1 && c->dfs_cac_ms > t1)
+ t1 = c->dfs_cac_ms;
-unsigned int
-cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy,
- const struct cfg80211_chan_def *chandef)
-{
- int width;
- unsigned int t1 = 0, t2 = 0;
-
- if (WARN_ON(!cfg80211_chandef_valid(chandef)))
- return 0;
+ if (cf == 2 && c->dfs_cac_ms > t2)
+ t2 = c->dfs_cac_ms;
+ }
- width = cfg80211_chandef_get_width(chandef);
- if (width < 0)
+ if (t1 == INT_MAX && t2 == INT_MAX)
return 0;
- t1 = cfg80211_get_chans_dfs_cac_time(wiphy,
- MHZ_TO_KHZ(chandef->center_freq1),
- width);
+ if (t1 == INT_MAX)
+ return t2;
- if (!chandef->center_freq2)
+ if (t2 == INT_MAX)
return t1;
- t2 = cfg80211_get_chans_dfs_cac_time(wiphy,
- MHZ_TO_KHZ(chandef->center_freq2),
- width);
-
return max(t1, t2);
}
EXPORT_SYMBOL(cfg80211_chandef_dfs_cac_time);
-static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy,
- u32 center_freq, u32 bandwidth,
- u32 prohibited_flags)
-{
- struct ieee80211_channel *c;
- u32 freq, start_freq, end_freq;
-
- start_freq = cfg80211_get_start_freq(center_freq, bandwidth);
- end_freq = cfg80211_get_end_freq(center_freq, bandwidth);
-
- for (freq = start_freq; freq <= end_freq; freq += MHZ_TO_KHZ(20)) {
- c = ieee80211_get_channel_khz(wiphy, freq);
- if (!c || c->flags & prohibited_flags)
- return false;
- }
-
- return true;
-}
-
/* check if the operating channels are valid and supported */
static bool cfg80211_edmg_usable(struct wiphy *wiphy, u8 edmg_channels,
enum ieee80211_edmg_bw_config edmg_bw_config,
@@ -1117,9 +1135,10 @@ static bool cfg80211_edmg_usable(struct wiphy *wiphy, u8 edmg_channels,
return true;
}
-bool cfg80211_chandef_usable(struct wiphy *wiphy,
- const struct cfg80211_chan_def *chandef,
- u32 prohibited_flags)
+bool _cfg80211_chandef_usable(struct wiphy *wiphy,
+ const struct cfg80211_chan_def *chandef,
+ u32 prohibited_flags,
+ u32 permitting_flags)
{
struct ieee80211_sta_ht_cap *ht_cap;
struct ieee80211_sta_vht_cap *vht_cap;
@@ -1128,6 +1147,7 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
bool ext_nss_cap, support_80_80 = false, support_320 = false;
const struct ieee80211_sband_iftype_data *iftd;
struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *c;
int i;
if (WARN_ON(!cfg80211_chandef_valid(chandef)))
@@ -1278,17 +1298,24 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
if (width < 20)
prohibited_flags |= IEEE80211_CHAN_NO_OFDM;
+ for_each_subchan(chandef, freq, cf) {
+ c = ieee80211_get_channel_khz(wiphy, freq);
+ if (!c)
+ return false;
+ if (c->flags & permitting_flags)
+ continue;
+ if (c->flags & prohibited_flags)
+ return false;
+ }
- if (!cfg80211_secondary_chans_ok(wiphy,
- ieee80211_chandef_to_khz(chandef),
- width, prohibited_flags))
- return false;
+ return true;
+}
- if (!chandef->center_freq2)
- return true;
- return cfg80211_secondary_chans_ok(wiphy,
- MHZ_TO_KHZ(chandef->center_freq2),
- width, prohibited_flags);
+bool cfg80211_chandef_usable(struct wiphy *wiphy,
+ const struct cfg80211_chan_def *chandef,
+ u32 prohibited_flags)
+{
+ return _cfg80211_chandef_usable(wiphy, chandef, prohibited_flags, 0);
}
EXPORT_SYMBOL(cfg80211_chandef_usable);
@@ -1410,49 +1437,50 @@ static bool cfg80211_ir_permissive_chan(struct wiphy *wiphy,
static bool _cfg80211_reg_can_beacon(struct wiphy *wiphy,
struct cfg80211_chan_def *chandef,
enum nl80211_iftype iftype,
- bool check_no_ir)
+ u32 prohibited_flags,
+ u32 permitting_flags)
{
- bool res;
- u32 prohibited_flags = IEEE80211_CHAN_DISABLED;
+ bool res, check_radar;
int dfs_required;
- trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype, check_no_ir);
+ trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype,
+ prohibited_flags,
+ permitting_flags);
- if (check_no_ir)
- prohibited_flags |= IEEE80211_CHAN_NO_IR;
+ if (!_cfg80211_chandef_usable(wiphy, chandef,
+ IEEE80211_CHAN_DISABLED, 0))
+ return false;
dfs_required = cfg80211_chandef_dfs_required(wiphy, chandef, iftype);
- if (dfs_required != 0)
- prohibited_flags |= IEEE80211_CHAN_RADAR;
+ check_radar = dfs_required != 0;
if (dfs_required > 0 &&
cfg80211_chandef_dfs_available(wiphy, chandef)) {
/* We can skip IEEE80211_CHAN_NO_IR if chandef dfs available */
- prohibited_flags = IEEE80211_CHAN_DISABLED;
+ prohibited_flags &= ~IEEE80211_CHAN_NO_IR;
+ check_radar = false;
}
- res = cfg80211_chandef_usable(wiphy, chandef, prohibited_flags);
+ if (check_radar &&
+ !_cfg80211_chandef_usable(wiphy, chandef,
+ IEEE80211_CHAN_RADAR, 0))
+ return false;
+
+ res = _cfg80211_chandef_usable(wiphy, chandef,
+ prohibited_flags,
+ permitting_flags);
trace_cfg80211_return_bool(res);
return res;
}
-bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
- struct cfg80211_chan_def *chandef,
- enum nl80211_iftype iftype)
-{
- return _cfg80211_reg_can_beacon(wiphy, chandef, iftype, true);
-}
-EXPORT_SYMBOL(cfg80211_reg_can_beacon);
-
-bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
- struct cfg80211_chan_def *chandef,
- enum nl80211_iftype iftype)
+bool cfg80211_reg_check_beaconing(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef,
+ struct cfg80211_beaconing_check_config *cfg)
{
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
- bool check_no_ir;
-
- lockdep_assert_held(&rdev->wiphy.mtx);
+ u32 permitting_flags = 0;
+ bool check_no_ir = true;
/*
* Under certain conditions suggested by some regulatory bodies a
@@ -1460,14 +1488,23 @@ bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
* only if such relaxations are not enabled and the conditions are not
* met.
*/
- check_no_ir = !cfg80211_ir_permissive_chan(wiphy, iftype,
- chandef->chan);
+ if (cfg->relax) {
+ lockdep_assert_held(&rdev->wiphy.mtx);
+ check_no_ir = !cfg80211_ir_permissive_chan(wiphy, cfg->iftype,
+ chandef->chan);
+ }
+
+ if (cfg->reg_power == IEEE80211_REG_VLP_AP)
+ permitting_flags |= IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP;
- return _cfg80211_reg_can_beacon(wiphy, chandef, iftype, check_no_ir);
+ return _cfg80211_reg_can_beacon(wiphy, chandef, cfg->iftype,
+ check_no_ir ? IEEE80211_CHAN_NO_IR : 0,
+ permitting_flags);
}
-EXPORT_SYMBOL(cfg80211_reg_can_beacon_relax);
+EXPORT_SYMBOL(cfg80211_reg_check_beaconing);
int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
struct cfg80211_chan_def *chandef)
{
if (!rdev->ops->set_monitor_channel)
@@ -1475,7 +1512,7 @@ int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev,
if (!cfg80211_has_monitors_only(rdev))
return -EBUSY;
- return rdev_set_monitor_channel(rdev, chandef);
+ return rdev_set_monitor_channel(rdev, dev, chandef);
}
bool cfg80211_any_usable_channels(struct wiphy *wiphy,
@@ -1532,72 +1569,3 @@ struct cfg80211_chan_def *wdev_chandef(struct wireless_dev *wdev,
}
}
EXPORT_SYMBOL(wdev_chandef);
-
-struct cfg80211_per_bw_puncturing_values {
- u8 len;
- const u16 *valid_values;
-};
-
-static const u16 puncturing_values_80mhz[] = {
- 0x8, 0x4, 0x2, 0x1
-};
-
-static const u16 puncturing_values_160mhz[] = {
- 0x80, 0x40, 0x20, 0x10, 0x8, 0x4, 0x2, 0x1, 0xc0, 0x30, 0xc, 0x3
-};
-
-static const u16 puncturing_values_320mhz[] = {
- 0xc000, 0x3000, 0xc00, 0x300, 0xc0, 0x30, 0xc, 0x3, 0xf000, 0xf00,
- 0xf0, 0xf, 0xfc00, 0xf300, 0xf0c0, 0xf030, 0xf00c, 0xf003, 0xc00f,
- 0x300f, 0xc0f, 0x30f, 0xcf, 0x3f
-};
-
-#define CFG80211_PER_BW_VALID_PUNCTURING_VALUES(_bw) \
- { \
- .len = ARRAY_SIZE(puncturing_values_ ## _bw ## mhz), \
- .valid_values = puncturing_values_ ## _bw ## mhz \
- }
-
-static const struct cfg80211_per_bw_puncturing_values per_bw_puncturing[] = {
- CFG80211_PER_BW_VALID_PUNCTURING_VALUES(80),
- CFG80211_PER_BW_VALID_PUNCTURING_VALUES(160),
- CFG80211_PER_BW_VALID_PUNCTURING_VALUES(320)
-};
-
-bool cfg80211_valid_disable_subchannel_bitmap(u16 *bitmap,
- const struct cfg80211_chan_def *chandef)
-{
- u32 idx, i, start_freq;
-
- switch (chandef->width) {
- case NL80211_CHAN_WIDTH_80:
- idx = 0;
- start_freq = chandef->center_freq1 - 40;
- break;
- case NL80211_CHAN_WIDTH_160:
- idx = 1;
- start_freq = chandef->center_freq1 - 80;
- break;
- case NL80211_CHAN_WIDTH_320:
- idx = 2;
- start_freq = chandef->center_freq1 - 160;
- break;
- default:
- *bitmap = 0;
- break;
- }
-
- if (!*bitmap)
- return true;
-
- /* check if primary channel is punctured */
- if (*bitmap & (u16)BIT((chandef->chan->center_freq - start_freq) / 20))
- return false;
-
- for (i = 0; i < per_bw_puncturing[idx].len; i++)
- if (per_bw_puncturing[idx].valid_values[i] == *bitmap)
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(cfg80211_valid_disable_subchannel_bitmap);
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 3fb1b637352a..828e29872633 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -143,10 +143,7 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
if (result)
return result;
- if (!IS_ERR_OR_NULL(rdev->wiphy.debugfsdir))
- debugfs_rename(rdev->wiphy.debugfsdir->d_parent,
- rdev->wiphy.debugfsdir,
- rdev->wiphy.debugfsdir->d_parent, newname);
+ debugfs_change_name(rdev->wiphy.debugfsdir, "%s", newname);
nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY);
@@ -165,11 +162,11 @@ int cfg80211_switch_netns(struct cfg80211_registered_device *rdev,
list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
if (!wdev->netdev)
continue;
- wdev->netdev->features &= ~NETIF_F_NETNS_LOCAL;
+ wdev->netdev->netns_local = false;
err = dev_change_net_namespace(wdev->netdev, net, "wlan%d");
if (err)
break;
- wdev->netdev->features |= NETIF_F_NETNS_LOCAL;
+ wdev->netdev->netns_local = true;
}
if (err) {
@@ -181,17 +178,18 @@ int cfg80211_switch_netns(struct cfg80211_registered_device *rdev,
list) {
if (!wdev->netdev)
continue;
- wdev->netdev->features &= ~NETIF_F_NETNS_LOCAL;
+ wdev->netdev->netns_local = false;
err = dev_change_net_namespace(wdev->netdev, net,
"wlan%d");
WARN_ON(err);
- wdev->netdev->features |= NETIF_F_NETNS_LOCAL;
+ wdev->netdev->netns_local = true;
}
return err;
}
- wiphy_lock(&rdev->wiphy);
+ guard(wiphy)(&rdev->wiphy);
+
list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
if (!wdev->netdev)
continue;
@@ -212,7 +210,6 @@ int cfg80211_switch_netns(struct cfg80211_registered_device *rdev,
continue;
nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE);
}
- wiphy_unlock(&rdev->wiphy);
return 0;
}
@@ -221,9 +218,9 @@ static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data)
{
struct cfg80211_registered_device *rdev = data;
- wiphy_lock(&rdev->wiphy);
+ guard(wiphy)(&rdev->wiphy);
+
rdev_rfkill_poll(rdev);
- wiphy_unlock(&rdev->wiphy);
}
void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
@@ -283,7 +280,7 @@ void cfg80211_shutdown_all_interfaces(struct wiphy *wiphy)
/* otherwise, check iftype */
- wiphy_lock(wiphy);
+ guard(wiphy)(wiphy);
switch (wdev->iftype) {
case NL80211_IFTYPE_P2P_DEVICE:
@@ -295,8 +292,6 @@ void cfg80211_shutdown_all_interfaces(struct wiphy *wiphy)
default:
break;
}
-
- wiphy_unlock(wiphy);
}
}
EXPORT_SYMBOL_GPL(cfg80211_shutdown_all_interfaces);
@@ -331,9 +326,9 @@ static void cfg80211_event_work(struct work_struct *work)
rdev = container_of(work, struct cfg80211_registered_device,
event_work);
- wiphy_lock(&rdev->wiphy);
+ guard(wiphy)(&rdev->wiphy);
+
cfg80211_process_rdev_events(rdev);
- wiphy_unlock(&rdev->wiphy);
}
void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev)
@@ -347,10 +342,10 @@ void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev)
if (wdev->netdev)
dev_close(wdev->netdev);
- wiphy_lock(&rdev->wiphy);
+ guard(wiphy)(&rdev->wiphy);
+
cfg80211_leave(rdev, wdev);
cfg80211_remove_virtual_intf(rdev, wdev);
- wiphy_unlock(&rdev->wiphy);
}
}
}
@@ -421,9 +416,11 @@ static void cfg80211_wiphy_work(struct work_struct *work)
rdev = container_of(work, struct cfg80211_registered_device, wiphy_work);
- wiphy_lock(&rdev->wiphy);
+ trace_wiphy_work_worker_start(&rdev->wiphy);
+
+ guard(wiphy)(&rdev->wiphy);
if (rdev->suspended)
- goto out;
+ return;
spin_lock_irq(&rdev->wiphy_work_lock);
wk = list_first_entry_or_null(&rdev->wiphy_work_list,
@@ -431,15 +428,14 @@ static void cfg80211_wiphy_work(struct work_struct *work)
if (wk) {
list_del_init(&wk->entry);
if (!list_empty(&rdev->wiphy_work_list))
- schedule_work(work);
+ queue_work(system_unbound_wq, work);
spin_unlock_irq(&rdev->wiphy_work_lock);
+ trace_wiphy_work_run(&rdev->wiphy, wk);
wk->func(&rdev->wiphy, wk);
} else {
spin_unlock_irq(&rdev->wiphy_work_lock);
}
-out:
- wiphy_unlock(&rdev->wiphy);
}
/* exported functions */
@@ -600,16 +596,20 @@ use_default_name:
}
EXPORT_SYMBOL(wiphy_new_nm);
-static int wiphy_verify_combinations(struct wiphy *wiphy)
+static
+int wiphy_verify_iface_combinations(struct wiphy *wiphy,
+ const struct ieee80211_iface_combination *iface_comb,
+ int n_iface_comb,
+ bool combined_radio)
{
const struct ieee80211_iface_combination *c;
int i, j;
- for (i = 0; i < wiphy->n_iface_combinations; i++) {
+ for (i = 0; i < n_iface_comb; i++) {
u32 cnt = 0;
u16 all_iftypes = 0;
- c = &wiphy->iface_combinations[i];
+ c = &iface_comb[i];
/*
* Combinations with just one interface aren't real,
@@ -622,9 +622,13 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
if (WARN_ON(!c->num_different_channels))
return -EINVAL;
- /* DFS only works on one channel. */
- if (WARN_ON(c->radar_detect_widths &&
- (c->num_different_channels > 1)))
+ /* DFS only works on one channel. Avoid this check
+ * for multi-radio global combination, since it hold
+ * the capabilities of all radio combinations.
+ */
+ if (!combined_radio &&
+ WARN_ON(c->radar_detect_widths &&
+ c->num_different_channels > 1))
return -EINVAL;
if (WARN_ON(!c->n_limits))
@@ -645,13 +649,21 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
if (WARN_ON(wiphy->software_iftypes & types))
return -EINVAL;
- /* Only a single P2P_DEVICE can be allowed */
- if (WARN_ON(types & BIT(NL80211_IFTYPE_P2P_DEVICE) &&
+ /* Only a single P2P_DEVICE can be allowed, avoid this
+ * check for multi-radio global combination, since it
+ * hold the capabilities of all radio combinations.
+ */
+ if (!combined_radio &&
+ WARN_ON(types & BIT(NL80211_IFTYPE_P2P_DEVICE) &&
c->limits[j].max > 1))
return -EINVAL;
- /* Only a single NAN can be allowed */
- if (WARN_ON(types & BIT(NL80211_IFTYPE_NAN) &&
+ /* Only a single NAN can be allowed, avoid this
+ * check for multi-radio global combination, since it
+ * hold the capabilities of all radio combinations.
+ */
+ if (!combined_radio &&
+ WARN_ON(types & BIT(NL80211_IFTYPE_NAN) &&
c->limits[j].max > 1))
return -EINVAL;
@@ -690,6 +702,34 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
return 0;
}
+static int wiphy_verify_combinations(struct wiphy *wiphy)
+{
+ int i, ret;
+ bool combined_radio = false;
+
+ if (wiphy->n_radio) {
+ for (i = 0; i < wiphy->n_radio; i++) {
+ const struct wiphy_radio *radio = &wiphy->radio[i];
+
+ ret = wiphy_verify_iface_combinations(wiphy,
+ radio->iface_combinations,
+ radio->n_iface_combinations,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ combined_radio = true;
+ }
+
+ ret = wiphy_verify_iface_combinations(wiphy,
+ wiphy->iface_combinations,
+ wiphy->n_iface_combinations,
+ combined_radio);
+
+ return ret;
+}
+
int wiphy_register(struct wiphy *wiphy)
{
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
@@ -1066,6 +1106,7 @@ void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev,
list_del_init(&wk->entry);
spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags);
+ trace_wiphy_work_run(&rdev->wiphy, wk);
wk->func(&rdev->wiphy, wk);
spin_lock_irqsave(&rdev->wiphy_work_lock, flags);
@@ -1141,7 +1182,8 @@ void wiphy_unregister(struct wiphy *wiphy)
flush_work(&rdev->background_cac_abort_wk);
cfg80211_rdev_free_wowlan(rdev);
- cfg80211_rdev_free_coalesce(rdev);
+ cfg80211_free_coalesce(rdev->coalesce);
+ rdev->coalesce = NULL;
}
EXPORT_SYMBOL(wiphy_unregister);
@@ -1149,6 +1191,13 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev)
{
struct cfg80211_internal_bss *scan, *tmp;
struct cfg80211_beacon_registration *reg, *treg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->wiphy_work_lock, flags);
+ WARN_ON(!list_empty(&rdev->wiphy_work_list));
+ spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags);
+ cancel_work_sync(&rdev->wiphy_work);
+
rfkill_destroy(rdev->wiphy.rfkill);
list_for_each_entry_safe(reg, treg, &rdev->beacon_registrations, list) {
list_del(&reg->list);
@@ -1231,6 +1280,7 @@ static void _cfg80211_unregister_wdev(struct wireless_dev *wdev,
/* deleted from the list, so can't be found from nl80211 any more */
cqm_config = rcu_access_pointer(wdev->cqm_config);
kfree_rcu(cqm_config, rcu_head);
+ RCU_INIT_POINTER(wdev->cqm_config, NULL);
/*
* Ensure that all events have been processed and
@@ -1381,6 +1431,8 @@ void cfg80211_init_wdev(struct wireless_dev *wdev)
/* allow mac80211 to determine the timeout */
wdev->ps_timeout = -1;
+ wdev->radio_mask = BIT(wdev->wiphy->n_radio) - 1;
+
if ((wdev->iftype == NL80211_IFTYPE_STATION ||
wdev->iftype == NL80211_IFTYPE_P2P_CLIENT ||
wdev->iftype == NL80211_IFTYPE_ADHOC) && !wdev->use_4addr)
@@ -1468,15 +1520,15 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
SET_NETDEV_DEVTYPE(dev, &wiphy_type);
wdev->netdev = dev;
/* can only change netns with wiphy */
- dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->netns_local = true;
cfg80211_init_wdev(wdev);
break;
case NETDEV_REGISTER:
if (!wdev->registered) {
- wiphy_lock(&rdev->wiphy);
+ guard(wiphy)(&rdev->wiphy);
+
cfg80211_register_wdev(rdev, wdev);
- wiphy_unlock(&rdev->wiphy);
}
break;
case NETDEV_UNREGISTER:
@@ -1485,16 +1537,16 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
* so check wdev->registered.
*/
if (wdev->registered && !wdev->registering) {
- wiphy_lock(&rdev->wiphy);
+ guard(wiphy)(&rdev->wiphy);
+
_cfg80211_unregister_wdev(wdev, false);
- wiphy_unlock(&rdev->wiphy);
}
break;
case NETDEV_GOING_DOWN:
- wiphy_lock(&rdev->wiphy);
- cfg80211_leave(rdev, wdev);
- cfg80211_remove_links(wdev);
- wiphy_unlock(&rdev->wiphy);
+ scoped_guard(wiphy, &rdev->wiphy) {
+ cfg80211_leave(rdev, wdev);
+ cfg80211_remove_links(wdev);
+ }
/* since we just did cfg80211_leave() nothing to do there */
cancel_work_sync(&wdev->disconnect_wk);
cancel_work_sync(&wdev->pmsr_free_wk);
@@ -1610,6 +1662,8 @@ void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work)
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
unsigned long flags;
+ trace_wiphy_work_queue(wiphy, work);
+
spin_lock_irqsave(&rdev->wiphy_work_lock, flags);
if (list_empty(&work->entry))
list_add_tail(&work->entry, &rdev->wiphy_work_list);
@@ -1626,6 +1680,8 @@ void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work)
lockdep_assert_held(&wiphy->mtx);
+ trace_wiphy_work_cancel(wiphy, work);
+
spin_lock_irqsave(&rdev->wiphy_work_lock, flags);
if (!list_empty(&work->entry))
list_del_init(&work->entry);
@@ -1639,6 +1695,8 @@ void wiphy_work_flush(struct wiphy *wiphy, struct wiphy_work *work)
unsigned long flags;
bool run;
+ trace_wiphy_work_flush(wiphy, work);
+
spin_lock_irqsave(&rdev->wiphy_work_lock, flags);
run = !work || !list_empty(&work->entry);
spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags);
@@ -1660,6 +1718,8 @@ void wiphy_delayed_work_queue(struct wiphy *wiphy,
struct wiphy_delayed_work *dwork,
unsigned long delay)
{
+ trace_wiphy_delayed_work_queue(wiphy, &dwork->work, delay);
+
if (!delay) {
del_timer(&dwork->timer);
wiphy_work_queue(wiphy, &dwork->work);
@@ -1691,6 +1751,13 @@ void wiphy_delayed_work_flush(struct wiphy *wiphy,
}
EXPORT_SYMBOL_GPL(wiphy_delayed_work_flush);
+bool wiphy_delayed_work_pending(struct wiphy *wiphy,
+ struct wiphy_delayed_work *dwork)
+{
+ return timer_pending(&dwork->timer);
+}
+EXPORT_SYMBOL_GPL(wiphy_delayed_work_pending);
+
static int __init cfg80211_init(void)
{
int err;
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 13657a85cf61..826299f3d781 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -3,7 +3,7 @@
* Wireless configuration interface internals.
*
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#ifndef __NET_WIRELESS_CORE_H
#define __NET_WIRELESS_CORE_H
@@ -170,6 +170,12 @@ static inline int for_each_rdev_check_rtnl(void)
if (for_each_rdev_check_rtnl()) {} else \
list_for_each_entry(rdev, &cfg80211_rdev_list, list)
+enum bss_source_type {
+ BSS_SOURCE_DIRECT = 0,
+ BSS_SOURCE_MBSSID,
+ BSS_SOURCE_STA_PROFILE,
+};
+
struct cfg80211_internal_bss {
struct list_head list;
struct list_head hidden_list;
@@ -191,6 +197,8 @@ struct cfg80211_internal_bss {
*/
u8 parent_bssid[ETH_ALEN] __aligned(2);
+ enum bss_source_type bss_source;
+
/* must be last because of priv member */
struct cfg80211_bss pub;
};
@@ -362,7 +370,8 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
struct cfg80211_auth_request *req);
int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
struct net_device *dev,
- struct cfg80211_assoc_request *req);
+ struct cfg80211_assoc_request *req,
+ struct netlink_ext_ack *extack);
int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
struct net_device *dev, const u8 *bssid,
const u8 *ie, int ie_len, u16 reason,
@@ -491,6 +500,10 @@ bool cfg80211_is_sub_chan(struct cfg80211_chan_def *chandef,
bool cfg80211_wdev_on_sub_chan(struct wireless_dev *wdev,
struct ieee80211_channel *chan,
bool primary_only);
+bool _cfg80211_chandef_usable(struct wiphy *wiphy,
+ const struct cfg80211_chan_def *chandef,
+ u32 prohibited_flags,
+ u32 permitting_flags);
static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
{
@@ -503,6 +516,7 @@ static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
}
int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
struct cfg80211_chan_def *chandef);
int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
@@ -528,6 +542,10 @@ struct cfg80211_internal_bss *
cfg80211_bss_update(struct cfg80211_registered_device *rdev,
struct cfg80211_internal_bss *tmp,
bool signal_valid, unsigned long ts);
+
+enum ieee80211_ap_reg_power
+cfg80211_get_6ghz_power_type(const u8 *elems, size_t elems_len);
+
#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
#define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond)
#else
@@ -549,9 +567,57 @@ int cfg80211_remove_virtual_intf(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev);
void cfg80211_wdev_release_link_bsses(struct wireless_dev *wdev, u16 link_mask);
+int cfg80211_assoc_ml_reconf(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct cfg80211_assoc_link *links,
+ u16 rem_links);
+/**
+ * struct cfg80211_colocated_ap - colocated AP information
+ *
+ * @list: linked list to all colocated APs
+ * @bssid: BSSID of the reported AP
+ * @ssid: SSID of the reported AP
+ * @ssid_len: length of the ssid
+ * @center_freq: frequency the reported AP is on
+ * @unsolicited_probe: the reported AP is part of an ESS, where all the APs
+ * that operate in the same channel as the reported AP and that might be
+ * detected by a STA receiving this frame, are transmitting unsolicited
+ * Probe Response frames every 20 TUs
+ * @oct_recommended: OCT is recommended to exchange MMPDUs with the reported AP
+ * @same_ssid: the reported AP has the same SSID as the reporting AP
+ * @multi_bss: the reported AP is part of a multiple BSSID set
+ * @transmitted_bssid: the reported AP is the transmitting BSSID
+ * @colocated_ess: all the APs that share the same ESS as the reported AP are
+ * colocated and can be discovered via legacy bands.
+ * @short_ssid_valid: short_ssid is valid and can be used
+ * @short_ssid: the short SSID for this SSID
+ * @psd_20: The 20MHz PSD EIRP of the primary 20MHz channel for the reported AP
+ */
+struct cfg80211_colocated_ap {
+ struct list_head list;
+ u8 bssid[ETH_ALEN];
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ size_t ssid_len;
+ u32 short_ssid;
+ u32 center_freq;
+ u8 unsolicited_probe:1,
+ oct_recommended:1,
+ same_ssid:1,
+ multi_bss:1,
+ transmitted_bssid:1,
+ colocated_ess:1,
+ short_ssid_valid:1;
+ s8 psd_20;
+};
+
#if IS_ENABLED(CONFIG_CFG80211_KUNIT_TEST)
#define EXPORT_SYMBOL_IF_CFG80211_KUNIT(sym) EXPORT_SYMBOL_IF_KUNIT(sym)
#define VISIBLE_IF_CFG80211_KUNIT
+void cfg80211_free_coloc_ap_list(struct list_head *coloc_ap_list);
+
+int cfg80211_parse_colocated_ap(const struct cfg80211_bss_ies *ies,
+ struct list_head *list);
+
size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen,
const u8 *subie, size_t subie_len,
u8 *new_ie, size_t new_ie_len);
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 9f02ee5f08be..1e3ed29f7cfc 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -3,7 +3,7 @@
* Some IBSS support code for cfg80211.
*
* Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#include <linux/etherdevice.h>
@@ -94,6 +94,9 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
lockdep_assert_held(&rdev->wiphy.mtx);
+ if (wdev->links[0].cac_started)
+ return -EBUSY;
+
if (wdev->u.ibss.ssid_len)
return -EALREADY;
diff --git a/net/wireless/lib80211.c b/net/wireless/lib80211.c
deleted file mode 100644
index d66a913027e0..000000000000
--- a/net/wireless/lib80211.c
+++ /dev/null
@@ -1,257 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * lib80211 -- common bits for IEEE802.11 drivers
- *
- * Copyright(c) 2008 John W. Linville <linville@tuxdriver.com>
- *
- * Portions copied from old ieee80211 component, w/ original copyright
- * notices below:
- *
- * Host AP crypto routines
- *
- * Copyright (c) 2002-2003, Jouni Malinen <j@w1.fi>
- * Portions Copyright (C) 2004, Intel Corporation <jketreno@linux.intel.com>
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/ctype.h>
-#include <linux/ieee80211.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-
-#include <net/lib80211.h>
-
-#define DRV_DESCRIPTION "common routines for IEEE802.11 drivers"
-
-MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_AUTHOR("John W. Linville <linville@tuxdriver.com>");
-MODULE_LICENSE("GPL");
-
-struct lib80211_crypto_alg {
- struct list_head list;
- struct lib80211_crypto_ops *ops;
-};
-
-static LIST_HEAD(lib80211_crypto_algs);
-static DEFINE_SPINLOCK(lib80211_crypto_lock);
-
-static void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *info,
- int force);
-static void lib80211_crypt_quiescing(struct lib80211_crypt_info *info);
-static void lib80211_crypt_deinit_handler(struct timer_list *t);
-
-int lib80211_crypt_info_init(struct lib80211_crypt_info *info, char *name,
- spinlock_t *lock)
-{
- memset(info, 0, sizeof(*info));
-
- info->name = name;
- info->lock = lock;
-
- INIT_LIST_HEAD(&info->crypt_deinit_list);
- timer_setup(&info->crypt_deinit_timer, lib80211_crypt_deinit_handler,
- 0);
-
- return 0;
-}
-EXPORT_SYMBOL(lib80211_crypt_info_init);
-
-void lib80211_crypt_info_free(struct lib80211_crypt_info *info)
-{
- int i;
-
- lib80211_crypt_quiescing(info);
- del_timer_sync(&info->crypt_deinit_timer);
- lib80211_crypt_deinit_entries(info, 1);
-
- for (i = 0; i < NUM_WEP_KEYS; i++) {
- struct lib80211_crypt_data *crypt = info->crypt[i];
- if (crypt) {
- if (crypt->ops) {
- crypt->ops->deinit(crypt->priv);
- module_put(crypt->ops->owner);
- }
- kfree(crypt);
- info->crypt[i] = NULL;
- }
- }
-}
-EXPORT_SYMBOL(lib80211_crypt_info_free);
-
-static void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *info,
- int force)
-{
- struct lib80211_crypt_data *entry, *next;
- unsigned long flags;
-
- spin_lock_irqsave(info->lock, flags);
- list_for_each_entry_safe(entry, next, &info->crypt_deinit_list, list) {
- if (atomic_read(&entry->refcnt) != 0 && !force)
- continue;
-
- list_del(&entry->list);
-
- if (entry->ops) {
- entry->ops->deinit(entry->priv);
- module_put(entry->ops->owner);
- }
- kfree(entry);
- }
- spin_unlock_irqrestore(info->lock, flags);
-}
-
-/* After this, crypt_deinit_list won't accept new members */
-static void lib80211_crypt_quiescing(struct lib80211_crypt_info *info)
-{
- unsigned long flags;
-
- spin_lock_irqsave(info->lock, flags);
- info->crypt_quiesced = 1;
- spin_unlock_irqrestore(info->lock, flags);
-}
-
-static void lib80211_crypt_deinit_handler(struct timer_list *t)
-{
- struct lib80211_crypt_info *info = from_timer(info, t,
- crypt_deinit_timer);
- unsigned long flags;
-
- lib80211_crypt_deinit_entries(info, 0);
-
- spin_lock_irqsave(info->lock, flags);
- if (!list_empty(&info->crypt_deinit_list) && !info->crypt_quiesced) {
- printk(KERN_DEBUG "%s: entries remaining in delayed crypt "
- "deletion list\n", info->name);
- info->crypt_deinit_timer.expires = jiffies + HZ;
- add_timer(&info->crypt_deinit_timer);
- }
- spin_unlock_irqrestore(info->lock, flags);
-}
-
-void lib80211_crypt_delayed_deinit(struct lib80211_crypt_info *info,
- struct lib80211_crypt_data **crypt)
-{
- struct lib80211_crypt_data *tmp;
- unsigned long flags;
-
- if (*crypt == NULL)
- return;
-
- tmp = *crypt;
- *crypt = NULL;
-
- /* must not run ops->deinit() while there may be pending encrypt or
- * decrypt operations. Use a list of delayed deinits to avoid needing
- * locking. */
-
- spin_lock_irqsave(info->lock, flags);
- if (!info->crypt_quiesced) {
- list_add(&tmp->list, &info->crypt_deinit_list);
- if (!timer_pending(&info->crypt_deinit_timer)) {
- info->crypt_deinit_timer.expires = jiffies + HZ;
- add_timer(&info->crypt_deinit_timer);
- }
- }
- spin_unlock_irqrestore(info->lock, flags);
-}
-EXPORT_SYMBOL(lib80211_crypt_delayed_deinit);
-
-int lib80211_register_crypto_ops(struct lib80211_crypto_ops *ops)
-{
- unsigned long flags;
- struct lib80211_crypto_alg *alg;
-
- alg = kzalloc(sizeof(*alg), GFP_KERNEL);
- if (alg == NULL)
- return -ENOMEM;
-
- alg->ops = ops;
-
- spin_lock_irqsave(&lib80211_crypto_lock, flags);
- list_add(&alg->list, &lib80211_crypto_algs);
- spin_unlock_irqrestore(&lib80211_crypto_lock, flags);
-
- printk(KERN_DEBUG "lib80211_crypt: registered algorithm '%s'\n",
- ops->name);
-
- return 0;
-}
-EXPORT_SYMBOL(lib80211_register_crypto_ops);
-
-int lib80211_unregister_crypto_ops(struct lib80211_crypto_ops *ops)
-{
- struct lib80211_crypto_alg *alg;
- unsigned long flags;
-
- spin_lock_irqsave(&lib80211_crypto_lock, flags);
- list_for_each_entry(alg, &lib80211_crypto_algs, list) {
- if (alg->ops == ops)
- goto found;
- }
- spin_unlock_irqrestore(&lib80211_crypto_lock, flags);
- return -EINVAL;
-
- found:
- printk(KERN_DEBUG "lib80211_crypt: unregistered algorithm '%s'\n",
- ops->name);
- list_del(&alg->list);
- spin_unlock_irqrestore(&lib80211_crypto_lock, flags);
- kfree(alg);
- return 0;
-}
-EXPORT_SYMBOL(lib80211_unregister_crypto_ops);
-
-struct lib80211_crypto_ops *lib80211_get_crypto_ops(const char *name)
-{
- struct lib80211_crypto_alg *alg;
- unsigned long flags;
-
- spin_lock_irqsave(&lib80211_crypto_lock, flags);
- list_for_each_entry(alg, &lib80211_crypto_algs, list) {
- if (strcmp(alg->ops->name, name) == 0)
- goto found;
- }
- spin_unlock_irqrestore(&lib80211_crypto_lock, flags);
- return NULL;
-
- found:
- spin_unlock_irqrestore(&lib80211_crypto_lock, flags);
- return alg->ops;
-}
-EXPORT_SYMBOL(lib80211_get_crypto_ops);
-
-static void *lib80211_crypt_null_init(int keyidx)
-{
- return (void *)1;
-}
-
-static void lib80211_crypt_null_deinit(void *priv)
-{
-}
-
-static struct lib80211_crypto_ops lib80211_crypt_null = {
- .name = "NULL",
- .init = lib80211_crypt_null_init,
- .deinit = lib80211_crypt_null_deinit,
- .owner = THIS_MODULE,
-};
-
-static int __init lib80211_init(void)
-{
- pr_info(DRV_DESCRIPTION "\n");
- return lib80211_register_crypto_ops(&lib80211_crypt_null);
-}
-
-static void __exit lib80211_exit(void)
-{
- lib80211_unregister_crypto_ops(&lib80211_crypt_null);
- BUG_ON(!list_empty(&lib80211_crypto_algs));
-}
-
-module_init(lib80211_init);
-module_exit(lib80211_exit);
diff --git a/net/wireless/lib80211_crypt_ccmp.c b/net/wireless/lib80211_crypt_ccmp.c
deleted file mode 100644
index cca5e1cf089e..000000000000
--- a/net/wireless/lib80211_crypt_ccmp.c
+++ /dev/null
@@ -1,448 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * lib80211 crypt: host-based CCMP encryption implementation for lib80211
- *
- * Copyright (c) 2003-2004, Jouni Malinen <j@w1.fi>
- * Copyright (c) 2008, John W. Linville <linville@tuxdriver.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/if_ether.h>
-#include <linux/if_arp.h>
-#include <asm/string.h>
-#include <linux/wireless.h>
-
-#include <linux/ieee80211.h>
-
-#include <linux/crypto.h>
-#include <crypto/aead.h>
-
-#include <net/lib80211.h>
-
-MODULE_AUTHOR("Jouni Malinen");
-MODULE_DESCRIPTION("Host AP crypt: CCMP");
-MODULE_LICENSE("GPL");
-
-#define AES_BLOCK_LEN 16
-#define CCMP_HDR_LEN 8
-#define CCMP_MIC_LEN 8
-#define CCMP_TK_LEN 16
-#define CCMP_PN_LEN 6
-
-struct lib80211_ccmp_data {
- u8 key[CCMP_TK_LEN];
- int key_set;
-
- u8 tx_pn[CCMP_PN_LEN];
- u8 rx_pn[CCMP_PN_LEN];
-
- u32 dot11RSNAStatsCCMPFormatErrors;
- u32 dot11RSNAStatsCCMPReplays;
- u32 dot11RSNAStatsCCMPDecryptErrors;
-
- int key_idx;
-
- struct crypto_aead *tfm;
-
- /* scratch buffers for virt_to_page() (crypto API) */
- u8 tx_aad[2 * AES_BLOCK_LEN];
- u8 rx_aad[2 * AES_BLOCK_LEN];
-};
-
-static void *lib80211_ccmp_init(int key_idx)
-{
- struct lib80211_ccmp_data *priv;
-
- priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
- if (priv == NULL)
- goto fail;
- priv->key_idx = key_idx;
-
- priv->tfm = crypto_alloc_aead("ccm(aes)", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(priv->tfm)) {
- priv->tfm = NULL;
- goto fail;
- }
-
- return priv;
-
- fail:
- if (priv) {
- if (priv->tfm)
- crypto_free_aead(priv->tfm);
- kfree(priv);
- }
-
- return NULL;
-}
-
-static void lib80211_ccmp_deinit(void *priv)
-{
- struct lib80211_ccmp_data *_priv = priv;
- if (_priv && _priv->tfm)
- crypto_free_aead(_priv->tfm);
- kfree(priv);
-}
-
-static int ccmp_init_iv_and_aad(const struct ieee80211_hdr *hdr,
- const u8 *pn, u8 *iv, u8 *aad)
-{
- u8 *pos, qc = 0;
- size_t aad_len;
- int a4_included, qc_included;
-
- a4_included = ieee80211_has_a4(hdr->frame_control);
- qc_included = ieee80211_is_data_qos(hdr->frame_control);
-
- aad_len = 22;
- if (a4_included)
- aad_len += 6;
- if (qc_included) {
- pos = (u8 *) & hdr->addr4;
- if (a4_included)
- pos += 6;
- qc = *pos & 0x0f;
- aad_len += 2;
- }
-
- /* In CCM, the initial vectors (IV) used for CTR mode encryption and CBC
- * mode authentication are not allowed to collide, yet both are derived
- * from the same vector. We only set L := 1 here to indicate that the
- * data size can be represented in (L+1) bytes. The CCM layer will take
- * care of storing the data length in the top (L+1) bytes and setting
- * and clearing the other bits as is required to derive the two IVs.
- */
- iv[0] = 0x1;
-
- /* Nonce: QC | A2 | PN */
- iv[1] = qc;
- memcpy(iv + 2, hdr->addr2, ETH_ALEN);
- memcpy(iv + 8, pn, CCMP_PN_LEN);
-
- /* AAD:
- * FC with bits 4..6 and 11..13 masked to zero; 14 is always one
- * A1 | A2 | A3
- * SC with bits 4..15 (seq#) masked to zero
- * A4 (if present)
- * QC (if present)
- */
- pos = (u8 *) hdr;
- aad[0] = pos[0] & 0x8f;
- aad[1] = pos[1] & 0xc7;
- memcpy(aad + 2, &hdr->addrs, 3 * ETH_ALEN);
- pos = (u8 *) & hdr->seq_ctrl;
- aad[20] = pos[0] & 0x0f;
- aad[21] = 0; /* all bits masked */
- memset(aad + 22, 0, 8);
- if (a4_included)
- memcpy(aad + 22, hdr->addr4, ETH_ALEN);
- if (qc_included) {
- aad[a4_included ? 28 : 22] = qc;
- /* rest of QC masked */
- }
- return aad_len;
-}
-
-static int lib80211_ccmp_hdr(struct sk_buff *skb, int hdr_len,
- u8 *aeskey, int keylen, void *priv)
-{
- struct lib80211_ccmp_data *key = priv;
- int i;
- u8 *pos;
-
- if (skb_headroom(skb) < CCMP_HDR_LEN || skb->len < hdr_len)
- return -1;
-
- if (aeskey != NULL && keylen >= CCMP_TK_LEN)
- memcpy(aeskey, key->key, CCMP_TK_LEN);
-
- pos = skb_push(skb, CCMP_HDR_LEN);
- memmove(pos, pos + CCMP_HDR_LEN, hdr_len);
- pos += hdr_len;
-
- i = CCMP_PN_LEN - 1;
- while (i >= 0) {
- key->tx_pn[i]++;
- if (key->tx_pn[i] != 0)
- break;
- i--;
- }
-
- *pos++ = key->tx_pn[5];
- *pos++ = key->tx_pn[4];
- *pos++ = 0;
- *pos++ = (key->key_idx << 6) | (1 << 5) /* Ext IV included */ ;
- *pos++ = key->tx_pn[3];
- *pos++ = key->tx_pn[2];
- *pos++ = key->tx_pn[1];
- *pos++ = key->tx_pn[0];
-
- return CCMP_HDR_LEN;
-}
-
-static int lib80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
-{
- struct lib80211_ccmp_data *key = priv;
- struct ieee80211_hdr *hdr;
- struct aead_request *req;
- struct scatterlist sg[2];
- u8 *aad = key->tx_aad;
- u8 iv[AES_BLOCK_LEN];
- int len, data_len, aad_len;
- int ret;
-
- if (skb_tailroom(skb) < CCMP_MIC_LEN || skb->len < hdr_len)
- return -1;
-
- data_len = skb->len - hdr_len;
- len = lib80211_ccmp_hdr(skb, hdr_len, NULL, 0, priv);
- if (len < 0)
- return -1;
-
- req = aead_request_alloc(key->tfm, GFP_ATOMIC);
- if (!req)
- return -ENOMEM;
-
- hdr = (struct ieee80211_hdr *)skb->data;
- aad_len = ccmp_init_iv_and_aad(hdr, key->tx_pn, iv, aad);
-
- skb_put(skb, CCMP_MIC_LEN);
-
- sg_init_table(sg, 2);
- sg_set_buf(&sg[0], aad, aad_len);
- sg_set_buf(&sg[1], skb->data + hdr_len + CCMP_HDR_LEN,
- data_len + CCMP_MIC_LEN);
-
- aead_request_set_callback(req, 0, NULL, NULL);
- aead_request_set_ad(req, aad_len);
- aead_request_set_crypt(req, sg, sg, data_len, iv);
-
- ret = crypto_aead_encrypt(req);
- aead_request_free(req);
-
- return ret;
-}
-
-/*
- * deal with seq counter wrapping correctly.
- * refer to timer_after() for jiffies wrapping handling
- */
-static inline int ccmp_replay_check(u8 *pn_n, u8 *pn_o)
-{
- u32 iv32_n, iv16_n;
- u32 iv32_o, iv16_o;
-
- iv32_n = (pn_n[0] << 24) | (pn_n[1] << 16) | (pn_n[2] << 8) | pn_n[3];
- iv16_n = (pn_n[4] << 8) | pn_n[5];
-
- iv32_o = (pn_o[0] << 24) | (pn_o[1] << 16) | (pn_o[2] << 8) | pn_o[3];
- iv16_o = (pn_o[4] << 8) | pn_o[5];
-
- if ((s32)iv32_n - (s32)iv32_o < 0 ||
- (iv32_n == iv32_o && iv16_n <= iv16_o))
- return 1;
- return 0;
-}
-
-static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
-{
- struct lib80211_ccmp_data *key = priv;
- u8 keyidx, *pos;
- struct ieee80211_hdr *hdr;
- struct aead_request *req;
- struct scatterlist sg[2];
- u8 *aad = key->rx_aad;
- u8 iv[AES_BLOCK_LEN];
- u8 pn[6];
- int aad_len, ret;
- size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN;
-
- if (skb->len < hdr_len + CCMP_HDR_LEN + CCMP_MIC_LEN) {
- key->dot11RSNAStatsCCMPFormatErrors++;
- return -1;
- }
-
- hdr = (struct ieee80211_hdr *)skb->data;
- pos = skb->data + hdr_len;
- keyidx = pos[3];
- if (!(keyidx & (1 << 5))) {
- net_dbg_ratelimited("CCMP: received packet without ExtIV flag from %pM\n",
- hdr->addr2);
- key->dot11RSNAStatsCCMPFormatErrors++;
- return -2;
- }
- keyidx >>= 6;
- if (key->key_idx != keyidx) {
- net_dbg_ratelimited("CCMP: RX tkey->key_idx=%d frame keyidx=%d\n",
- key->key_idx, keyidx);
- return -6;
- }
- if (!key->key_set) {
- net_dbg_ratelimited("CCMP: received packet from %pM with keyid=%d that does not have a configured key\n",
- hdr->addr2, keyidx);
- return -3;
- }
-
- pn[0] = pos[7];
- pn[1] = pos[6];
- pn[2] = pos[5];
- pn[3] = pos[4];
- pn[4] = pos[1];
- pn[5] = pos[0];
- pos += 8;
-
- if (ccmp_replay_check(pn, key->rx_pn)) {
-#ifdef CONFIG_LIB80211_DEBUG
- net_dbg_ratelimited("CCMP: replay detected: STA=%pM previous PN %02x%02x%02x%02x%02x%02x received PN %02x%02x%02x%02x%02x%02x\n",
- hdr->addr2,
- key->rx_pn[0], key->rx_pn[1], key->rx_pn[2],
- key->rx_pn[3], key->rx_pn[4], key->rx_pn[5],
- pn[0], pn[1], pn[2], pn[3], pn[4], pn[5]);
-#endif
- key->dot11RSNAStatsCCMPReplays++;
- return -4;
- }
-
- req = aead_request_alloc(key->tfm, GFP_ATOMIC);
- if (!req)
- return -ENOMEM;
-
- aad_len = ccmp_init_iv_and_aad(hdr, pn, iv, aad);
-
- sg_init_table(sg, 2);
- sg_set_buf(&sg[0], aad, aad_len);
- sg_set_buf(&sg[1], pos, data_len);
-
- aead_request_set_callback(req, 0, NULL, NULL);
- aead_request_set_ad(req, aad_len);
- aead_request_set_crypt(req, sg, sg, data_len, iv);
-
- ret = crypto_aead_decrypt(req);
- aead_request_free(req);
-
- if (ret) {
- net_dbg_ratelimited("CCMP: decrypt failed: STA=%pM (%d)\n",
- hdr->addr2, ret);
- key->dot11RSNAStatsCCMPDecryptErrors++;
- return -5;
- }
-
- memcpy(key->rx_pn, pn, CCMP_PN_LEN);
-
- /* Remove hdr and MIC */
- memmove(skb->data + CCMP_HDR_LEN, skb->data, hdr_len);
- skb_pull(skb, CCMP_HDR_LEN);
- skb_trim(skb, skb->len - CCMP_MIC_LEN);
-
- return keyidx;
-}
-
-static int lib80211_ccmp_set_key(void *key, int len, u8 * seq, void *priv)
-{
- struct lib80211_ccmp_data *data = priv;
- int keyidx;
- struct crypto_aead *tfm = data->tfm;
-
- keyidx = data->key_idx;
- memset(data, 0, sizeof(*data));
- data->key_idx = keyidx;
- data->tfm = tfm;
- if (len == CCMP_TK_LEN) {
- memcpy(data->key, key, CCMP_TK_LEN);
- data->key_set = 1;
- if (seq) {
- data->rx_pn[0] = seq[5];
- data->rx_pn[1] = seq[4];
- data->rx_pn[2] = seq[3];
- data->rx_pn[3] = seq[2];
- data->rx_pn[4] = seq[1];
- data->rx_pn[5] = seq[0];
- }
- if (crypto_aead_setauthsize(data->tfm, CCMP_MIC_LEN) ||
- crypto_aead_setkey(data->tfm, data->key, CCMP_TK_LEN))
- return -1;
- } else if (len == 0)
- data->key_set = 0;
- else
- return -1;
-
- return 0;
-}
-
-static int lib80211_ccmp_get_key(void *key, int len, u8 * seq, void *priv)
-{
- struct lib80211_ccmp_data *data = priv;
-
- if (len < CCMP_TK_LEN)
- return -1;
-
- if (!data->key_set)
- return 0;
- memcpy(key, data->key, CCMP_TK_LEN);
-
- if (seq) {
- seq[0] = data->tx_pn[5];
- seq[1] = data->tx_pn[4];
- seq[2] = data->tx_pn[3];
- seq[3] = data->tx_pn[2];
- seq[4] = data->tx_pn[1];
- seq[5] = data->tx_pn[0];
- }
-
- return CCMP_TK_LEN;
-}
-
-static void lib80211_ccmp_print_stats(struct seq_file *m, void *priv)
-{
- struct lib80211_ccmp_data *ccmp = priv;
-
- seq_printf(m,
- "key[%d] alg=CCMP key_set=%d "
- "tx_pn=%02x%02x%02x%02x%02x%02x "
- "rx_pn=%02x%02x%02x%02x%02x%02x "
- "format_errors=%d replays=%d decrypt_errors=%d\n",
- ccmp->key_idx, ccmp->key_set,
- ccmp->tx_pn[0], ccmp->tx_pn[1], ccmp->tx_pn[2],
- ccmp->tx_pn[3], ccmp->tx_pn[4], ccmp->tx_pn[5],
- ccmp->rx_pn[0], ccmp->rx_pn[1], ccmp->rx_pn[2],
- ccmp->rx_pn[3], ccmp->rx_pn[4], ccmp->rx_pn[5],
- ccmp->dot11RSNAStatsCCMPFormatErrors,
- ccmp->dot11RSNAStatsCCMPReplays,
- ccmp->dot11RSNAStatsCCMPDecryptErrors);
-}
-
-static struct lib80211_crypto_ops lib80211_crypt_ccmp = {
- .name = "CCMP",
- .init = lib80211_ccmp_init,
- .deinit = lib80211_ccmp_deinit,
- .encrypt_mpdu = lib80211_ccmp_encrypt,
- .decrypt_mpdu = lib80211_ccmp_decrypt,
- .encrypt_msdu = NULL,
- .decrypt_msdu = NULL,
- .set_key = lib80211_ccmp_set_key,
- .get_key = lib80211_ccmp_get_key,
- .print_stats = lib80211_ccmp_print_stats,
- .extra_mpdu_prefix_len = CCMP_HDR_LEN,
- .extra_mpdu_postfix_len = CCMP_MIC_LEN,
- .owner = THIS_MODULE,
-};
-
-static int __init lib80211_crypto_ccmp_init(void)
-{
- return lib80211_register_crypto_ops(&lib80211_crypt_ccmp);
-}
-
-static void __exit lib80211_crypto_ccmp_exit(void)
-{
- lib80211_unregister_crypto_ops(&lib80211_crypt_ccmp);
-}
-
-module_init(lib80211_crypto_ccmp_init);
-module_exit(lib80211_crypto_ccmp_exit);
diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c
deleted file mode 100644
index 5c8cdf7681e3..000000000000
--- a/net/wireless/lib80211_crypt_tkip.c
+++ /dev/null
@@ -1,738 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * lib80211 crypt: host-based TKIP encryption implementation for lib80211
- *
- * Copyright (c) 2003-2004, Jouni Malinen <j@w1.fi>
- * Copyright (c) 2008, John W. Linville <linville@tuxdriver.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/err.h>
-#include <linux/fips.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/scatterlist.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/mm.h>
-#include <linux/if_ether.h>
-#include <linux/if_arp.h>
-#include <asm/string.h>
-
-#include <linux/wireless.h>
-#include <linux/ieee80211.h>
-#include <net/iw_handler.h>
-
-#include <crypto/arc4.h>
-#include <crypto/hash.h>
-#include <linux/crypto.h>
-#include <linux/crc32.h>
-
-#include <net/lib80211.h>
-
-MODULE_AUTHOR("Jouni Malinen");
-MODULE_DESCRIPTION("lib80211 crypt: TKIP");
-MODULE_LICENSE("GPL");
-
-#define TKIP_HDR_LEN 8
-
-struct lib80211_tkip_data {
-#define TKIP_KEY_LEN 32
- u8 key[TKIP_KEY_LEN];
- int key_set;
-
- u32 tx_iv32;
- u16 tx_iv16;
- u16 tx_ttak[5];
- int tx_phase1_done;
-
- u32 rx_iv32;
- u16 rx_iv16;
- u16 rx_ttak[5];
- int rx_phase1_done;
- u32 rx_iv32_new;
- u16 rx_iv16_new;
-
- u32 dot11RSNAStatsTKIPReplays;
- u32 dot11RSNAStatsTKIPICVErrors;
- u32 dot11RSNAStatsTKIPLocalMICFailures;
-
- int key_idx;
-
- struct arc4_ctx rx_ctx_arc4;
- struct arc4_ctx tx_ctx_arc4;
- struct crypto_shash *rx_tfm_michael;
- struct crypto_shash *tx_tfm_michael;
-
- /* scratch buffers for virt_to_page() (crypto API) */
- u8 rx_hdr[16], tx_hdr[16];
-
- unsigned long flags;
-};
-
-static unsigned long lib80211_tkip_set_flags(unsigned long flags, void *priv)
-{
- struct lib80211_tkip_data *_priv = priv;
- unsigned long old_flags = _priv->flags;
- _priv->flags = flags;
- return old_flags;
-}
-
-static unsigned long lib80211_tkip_get_flags(void *priv)
-{
- struct lib80211_tkip_data *_priv = priv;
- return _priv->flags;
-}
-
-static void *lib80211_tkip_init(int key_idx)
-{
- struct lib80211_tkip_data *priv;
-
- if (fips_enabled)
- return NULL;
-
- priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
- if (priv == NULL)
- goto fail;
-
- priv->key_idx = key_idx;
-
- priv->tx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0);
- if (IS_ERR(priv->tx_tfm_michael)) {
- priv->tx_tfm_michael = NULL;
- goto fail;
- }
-
- priv->rx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0);
- if (IS_ERR(priv->rx_tfm_michael)) {
- priv->rx_tfm_michael = NULL;
- goto fail;
- }
-
- return priv;
-
- fail:
- if (priv) {
- crypto_free_shash(priv->tx_tfm_michael);
- crypto_free_shash(priv->rx_tfm_michael);
- kfree(priv);
- }
-
- return NULL;
-}
-
-static void lib80211_tkip_deinit(void *priv)
-{
- struct lib80211_tkip_data *_priv = priv;
- if (_priv) {
- crypto_free_shash(_priv->tx_tfm_michael);
- crypto_free_shash(_priv->rx_tfm_michael);
- }
- kfree_sensitive(priv);
-}
-
-static inline u16 RotR1(u16 val)
-{
- return (val >> 1) | (val << 15);
-}
-
-static inline u8 Lo8(u16 val)
-{
- return val & 0xff;
-}
-
-static inline u8 Hi8(u16 val)
-{
- return val >> 8;
-}
-
-static inline u16 Lo16(u32 val)
-{
- return val & 0xffff;
-}
-
-static inline u16 Hi16(u32 val)
-{
- return val >> 16;
-}
-
-static inline u16 Mk16(u8 hi, u8 lo)
-{
- return lo | (((u16) hi) << 8);
-}
-
-static inline u16 Mk16_le(__le16 * v)
-{
- return le16_to_cpu(*v);
-}
-
-static const u16 Sbox[256] = {
- 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154,
- 0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A,
- 0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B,
- 0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B,
- 0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F,
- 0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F,
- 0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5,
- 0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F,
- 0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB,
- 0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397,
- 0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED,
- 0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A,
- 0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194,
- 0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3,
- 0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104,
- 0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D,
- 0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39,
- 0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695,
- 0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83,
- 0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76,
- 0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4,
- 0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B,
- 0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0,
- 0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018,
- 0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751,
- 0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85,
- 0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12,
- 0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9,
- 0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7,
- 0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A,
- 0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8,
- 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A,
-};
-
-static inline u16 _S_(u16 v)
-{
- u16 t = Sbox[Hi8(v)];
- return Sbox[Lo8(v)] ^ ((t << 8) | (t >> 8));
-}
-
-#define PHASE1_LOOP_COUNT 8
-
-static void tkip_mixing_phase1(u16 * TTAK, const u8 * TK, const u8 * TA,
- u32 IV32)
-{
- int i, j;
-
- /* Initialize the 80-bit TTAK from TSC (IV32) and TA[0..5] */
- TTAK[0] = Lo16(IV32);
- TTAK[1] = Hi16(IV32);
- TTAK[2] = Mk16(TA[1], TA[0]);
- TTAK[3] = Mk16(TA[3], TA[2]);
- TTAK[4] = Mk16(TA[5], TA[4]);
-
- for (i = 0; i < PHASE1_LOOP_COUNT; i++) {
- j = 2 * (i & 1);
- TTAK[0] += _S_(TTAK[4] ^ Mk16(TK[1 + j], TK[0 + j]));
- TTAK[1] += _S_(TTAK[0] ^ Mk16(TK[5 + j], TK[4 + j]));
- TTAK[2] += _S_(TTAK[1] ^ Mk16(TK[9 + j], TK[8 + j]));
- TTAK[3] += _S_(TTAK[2] ^ Mk16(TK[13 + j], TK[12 + j]));
- TTAK[4] += _S_(TTAK[3] ^ Mk16(TK[1 + j], TK[0 + j])) + i;
- }
-}
-
-static void tkip_mixing_phase2(u8 * WEPSeed, const u8 * TK, const u16 * TTAK,
- u16 IV16)
-{
- /* Make temporary area overlap WEP seed so that the final copy can be
- * avoided on little endian hosts. */
- u16 *PPK = (u16 *) & WEPSeed[4];
-
- /* Step 1 - make copy of TTAK and bring in TSC */
- PPK[0] = TTAK[0];
- PPK[1] = TTAK[1];
- PPK[2] = TTAK[2];
- PPK[3] = TTAK[3];
- PPK[4] = TTAK[4];
- PPK[5] = TTAK[4] + IV16;
-
- /* Step 2 - 96-bit bijective mixing using S-box */
- PPK[0] += _S_(PPK[5] ^ Mk16_le((__le16 *) & TK[0]));
- PPK[1] += _S_(PPK[0] ^ Mk16_le((__le16 *) & TK[2]));
- PPK[2] += _S_(PPK[1] ^ Mk16_le((__le16 *) & TK[4]));
- PPK[3] += _S_(PPK[2] ^ Mk16_le((__le16 *) & TK[6]));
- PPK[4] += _S_(PPK[3] ^ Mk16_le((__le16 *) & TK[8]));
- PPK[5] += _S_(PPK[4] ^ Mk16_le((__le16 *) & TK[10]));
-
- PPK[0] += RotR1(PPK[5] ^ Mk16_le((__le16 *) & TK[12]));
- PPK[1] += RotR1(PPK[0] ^ Mk16_le((__le16 *) & TK[14]));
- PPK[2] += RotR1(PPK[1]);
- PPK[3] += RotR1(PPK[2]);
- PPK[4] += RotR1(PPK[3]);
- PPK[5] += RotR1(PPK[4]);
-
- /* Step 3 - bring in last of TK bits, assign 24-bit WEP IV value
- * WEPSeed[0..2] is transmitted as WEP IV */
- WEPSeed[0] = Hi8(IV16);
- WEPSeed[1] = (Hi8(IV16) | 0x20) & 0x7F;
- WEPSeed[2] = Lo8(IV16);
- WEPSeed[3] = Lo8((PPK[5] ^ Mk16_le((__le16 *) & TK[0])) >> 1);
-
-#ifdef __BIG_ENDIAN
- {
- int i;
- for (i = 0; i < 6; i++)
- PPK[i] = (PPK[i] << 8) | (PPK[i] >> 8);
- }
-#endif
-}
-
-static int lib80211_tkip_hdr(struct sk_buff *skb, int hdr_len,
- u8 * rc4key, int keylen, void *priv)
-{
- struct lib80211_tkip_data *tkey = priv;
- u8 *pos;
- struct ieee80211_hdr *hdr;
-
- hdr = (struct ieee80211_hdr *)skb->data;
-
- if (skb_headroom(skb) < TKIP_HDR_LEN || skb->len < hdr_len)
- return -1;
-
- if (rc4key == NULL || keylen < 16)
- return -1;
-
- if (!tkey->tx_phase1_done) {
- tkip_mixing_phase1(tkey->tx_ttak, tkey->key, hdr->addr2,
- tkey->tx_iv32);
- tkey->tx_phase1_done = 1;
- }
- tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16);
-
- pos = skb_push(skb, TKIP_HDR_LEN);
- memmove(pos, pos + TKIP_HDR_LEN, hdr_len);
- pos += hdr_len;
-
- *pos++ = *rc4key;
- *pos++ = *(rc4key + 1);
- *pos++ = *(rc4key + 2);
- *pos++ = (tkey->key_idx << 6) | (1 << 5) /* Ext IV included */ ;
- *pos++ = tkey->tx_iv32 & 0xff;
- *pos++ = (tkey->tx_iv32 >> 8) & 0xff;
- *pos++ = (tkey->tx_iv32 >> 16) & 0xff;
- *pos++ = (tkey->tx_iv32 >> 24) & 0xff;
-
- tkey->tx_iv16++;
- if (tkey->tx_iv16 == 0) {
- tkey->tx_phase1_done = 0;
- tkey->tx_iv32++;
- }
-
- return TKIP_HDR_LEN;
-}
-
-static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
-{
- struct lib80211_tkip_data *tkey = priv;
- int len;
- u8 rc4key[16], *pos, *icv;
- u32 crc;
-
- if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) {
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- net_dbg_ratelimited("TKIP countermeasures: dropped TX packet to %pM\n",
- hdr->addr1);
- return -1;
- }
-
- if (skb_tailroom(skb) < 4 || skb->len < hdr_len)
- return -1;
-
- len = skb->len - hdr_len;
- pos = skb->data + hdr_len;
-
- if ((lib80211_tkip_hdr(skb, hdr_len, rc4key, 16, priv)) < 0)
- return -1;
-
- crc = ~crc32_le(~0, pos, len);
- icv = skb_put(skb, 4);
- icv[0] = crc;
- icv[1] = crc >> 8;
- icv[2] = crc >> 16;
- icv[3] = crc >> 24;
-
- arc4_setkey(&tkey->tx_ctx_arc4, rc4key, 16);
- arc4_crypt(&tkey->tx_ctx_arc4, pos, pos, len + 4);
-
- return 0;
-}
-
-/*
- * deal with seq counter wrapping correctly.
- * refer to timer_after() for jiffies wrapping handling
- */
-static inline int tkip_replay_check(u32 iv32_n, u16 iv16_n,
- u32 iv32_o, u16 iv16_o)
-{
- if ((s32)iv32_n - (s32)iv32_o < 0 ||
- (iv32_n == iv32_o && iv16_n <= iv16_o))
- return 1;
- return 0;
-}
-
-static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
-{
- struct lib80211_tkip_data *tkey = priv;
- u8 rc4key[16];
- u8 keyidx, *pos;
- u32 iv32;
- u16 iv16;
- struct ieee80211_hdr *hdr;
- u8 icv[4];
- u32 crc;
- int plen;
-
- hdr = (struct ieee80211_hdr *)skb->data;
-
- if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) {
- net_dbg_ratelimited("TKIP countermeasures: dropped received packet from %pM\n",
- hdr->addr2);
- return -1;
- }
-
- if (skb->len < hdr_len + TKIP_HDR_LEN + 4)
- return -1;
-
- pos = skb->data + hdr_len;
- keyidx = pos[3];
- if (!(keyidx & (1 << 5))) {
- net_dbg_ratelimited("TKIP: received packet without ExtIV flag from %pM\n",
- hdr->addr2);
- return -2;
- }
- keyidx >>= 6;
- if (tkey->key_idx != keyidx) {
- net_dbg_ratelimited("TKIP: RX tkey->key_idx=%d frame keyidx=%d\n",
- tkey->key_idx, keyidx);
- return -6;
- }
- if (!tkey->key_set) {
- net_dbg_ratelimited("TKIP: received packet from %pM with keyid=%d that does not have a configured key\n",
- hdr->addr2, keyidx);
- return -3;
- }
- iv16 = (pos[0] << 8) | pos[2];
- iv32 = pos[4] | (pos[5] << 8) | (pos[6] << 16) | (pos[7] << 24);
- pos += TKIP_HDR_LEN;
-
- if (tkip_replay_check(iv32, iv16, tkey->rx_iv32, tkey->rx_iv16)) {
-#ifdef CONFIG_LIB80211_DEBUG
- net_dbg_ratelimited("TKIP: replay detected: STA=%pM previous TSC %08x%04x received TSC %08x%04x\n",
- hdr->addr2, tkey->rx_iv32, tkey->rx_iv16,
- iv32, iv16);
-#endif
- tkey->dot11RSNAStatsTKIPReplays++;
- return -4;
- }
-
- if (iv32 != tkey->rx_iv32 || !tkey->rx_phase1_done) {
- tkip_mixing_phase1(tkey->rx_ttak, tkey->key, hdr->addr2, iv32);
- tkey->rx_phase1_done = 1;
- }
- tkip_mixing_phase2(rc4key, tkey->key, tkey->rx_ttak, iv16);
-
- plen = skb->len - hdr_len - 12;
-
- arc4_setkey(&tkey->rx_ctx_arc4, rc4key, 16);
- arc4_crypt(&tkey->rx_ctx_arc4, pos, pos, plen + 4);
-
- crc = ~crc32_le(~0, pos, plen);
- icv[0] = crc;
- icv[1] = crc >> 8;
- icv[2] = crc >> 16;
- icv[3] = crc >> 24;
- if (memcmp(icv, pos + plen, 4) != 0) {
- if (iv32 != tkey->rx_iv32) {
- /* Previously cached Phase1 result was already lost, so
- * it needs to be recalculated for the next packet. */
- tkey->rx_phase1_done = 0;
- }
-#ifdef CONFIG_LIB80211_DEBUG
- net_dbg_ratelimited("TKIP: ICV error detected: STA=%pM\n",
- hdr->addr2);
-#endif
- tkey->dot11RSNAStatsTKIPICVErrors++;
- return -5;
- }
-
- /* Update real counters only after Michael MIC verification has
- * completed */
- tkey->rx_iv32_new = iv32;
- tkey->rx_iv16_new = iv16;
-
- /* Remove IV and ICV */
- memmove(skb->data + TKIP_HDR_LEN, skb->data, hdr_len);
- skb_pull(skb, TKIP_HDR_LEN);
- skb_trim(skb, skb->len - 4);
-
- return keyidx;
-}
-
-static int michael_mic(struct crypto_shash *tfm_michael, u8 *key, u8 *hdr,
- u8 *data, size_t data_len, u8 *mic)
-{
- SHASH_DESC_ON_STACK(desc, tfm_michael);
- int err;
-
- if (tfm_michael == NULL) {
- pr_warn("%s(): tfm_michael == NULL\n", __func__);
- return -1;
- }
-
- desc->tfm = tfm_michael;
-
- if (crypto_shash_setkey(tfm_michael, key, 8))
- return -1;
-
- err = crypto_shash_init(desc);
- if (err)
- goto out;
- err = crypto_shash_update(desc, hdr, 16);
- if (err)
- goto out;
- err = crypto_shash_update(desc, data, data_len);
- if (err)
- goto out;
- err = crypto_shash_final(desc, mic);
-
-out:
- shash_desc_zero(desc);
- return err;
-}
-
-static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr)
-{
- struct ieee80211_hdr *hdr11;
-
- hdr11 = (struct ieee80211_hdr *)skb->data;
-
- switch (le16_to_cpu(hdr11->frame_control) &
- (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
- case IEEE80211_FCTL_TODS:
- memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */
- memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */
- break;
- case IEEE80211_FCTL_FROMDS:
- memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */
- memcpy(hdr + ETH_ALEN, hdr11->addr3, ETH_ALEN); /* SA */
- break;
- case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
- memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */
- memcpy(hdr + ETH_ALEN, hdr11->addr4, ETH_ALEN); /* SA */
- break;
- default:
- memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */
- memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */
- break;
- }
-
- if (ieee80211_is_data_qos(hdr11->frame_control)) {
- hdr[12] = le16_to_cpu(*((__le16 *)ieee80211_get_qos_ctl(hdr11)))
- & IEEE80211_QOS_CTL_TID_MASK;
- } else
- hdr[12] = 0; /* priority */
-
- hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */
-}
-
-static int lib80211_michael_mic_add(struct sk_buff *skb, int hdr_len,
- void *priv)
-{
- struct lib80211_tkip_data *tkey = priv;
- u8 *pos;
-
- if (skb_tailroom(skb) < 8 || skb->len < hdr_len) {
- printk(KERN_DEBUG "Invalid packet for Michael MIC add "
- "(tailroom=%d hdr_len=%d skb->len=%d)\n",
- skb_tailroom(skb), hdr_len, skb->len);
- return -1;
- }
-
- michael_mic_hdr(skb, tkey->tx_hdr);
- pos = skb_put(skb, 8);
- if (michael_mic(tkey->tx_tfm_michael, &tkey->key[16], tkey->tx_hdr,
- skb->data + hdr_len, skb->len - 8 - hdr_len, pos))
- return -1;
-
- return 0;
-}
-
-static void lib80211_michael_mic_failure(struct net_device *dev,
- struct ieee80211_hdr *hdr,
- int keyidx)
-{
- union iwreq_data wrqu;
- struct iw_michaelmicfailure ev;
-
- /* TODO: needed parameters: count, keyid, key type, TSC */
- memset(&ev, 0, sizeof(ev));
- ev.flags = keyidx & IW_MICFAILURE_KEY_ID;
- if (hdr->addr1[0] & 0x01)
- ev.flags |= IW_MICFAILURE_GROUP;
- else
- ev.flags |= IW_MICFAILURE_PAIRWISE;
- ev.src_addr.sa_family = ARPHRD_ETHER;
- memcpy(ev.src_addr.sa_data, hdr->addr2, ETH_ALEN);
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = sizeof(ev);
- wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *)&ev);
-}
-
-static int lib80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
- int hdr_len, void *priv)
-{
- struct lib80211_tkip_data *tkey = priv;
- u8 mic[8];
-
- if (!tkey->key_set)
- return -1;
-
- michael_mic_hdr(skb, tkey->rx_hdr);
- if (michael_mic(tkey->rx_tfm_michael, &tkey->key[24], tkey->rx_hdr,
- skb->data + hdr_len, skb->len - 8 - hdr_len, mic))
- return -1;
- if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) {
- struct ieee80211_hdr *hdr;
- hdr = (struct ieee80211_hdr *)skb->data;
- printk(KERN_DEBUG "%s: Michael MIC verification failed for "
- "MSDU from %pM keyidx=%d\n",
- skb->dev ? skb->dev->name : "N/A", hdr->addr2,
- keyidx);
- if (skb->dev)
- lib80211_michael_mic_failure(skb->dev, hdr, keyidx);
- tkey->dot11RSNAStatsTKIPLocalMICFailures++;
- return -1;
- }
-
- /* Update TSC counters for RX now that the packet verification has
- * completed. */
- tkey->rx_iv32 = tkey->rx_iv32_new;
- tkey->rx_iv16 = tkey->rx_iv16_new;
-
- skb_trim(skb, skb->len - 8);
-
- return 0;
-}
-
-static int lib80211_tkip_set_key(void *key, int len, u8 * seq, void *priv)
-{
- struct lib80211_tkip_data *tkey = priv;
- int keyidx;
- struct crypto_shash *tfm = tkey->tx_tfm_michael;
- struct arc4_ctx *tfm2 = &tkey->tx_ctx_arc4;
- struct crypto_shash *tfm3 = tkey->rx_tfm_michael;
- struct arc4_ctx *tfm4 = &tkey->rx_ctx_arc4;
-
- keyidx = tkey->key_idx;
- memset(tkey, 0, sizeof(*tkey));
- tkey->key_idx = keyidx;
- tkey->tx_tfm_michael = tfm;
- tkey->tx_ctx_arc4 = *tfm2;
- tkey->rx_tfm_michael = tfm3;
- tkey->rx_ctx_arc4 = *tfm4;
- if (len == TKIP_KEY_LEN) {
- memcpy(tkey->key, key, TKIP_KEY_LEN);
- tkey->key_set = 1;
- tkey->tx_iv16 = 1; /* TSC is initialized to 1 */
- if (seq) {
- tkey->rx_iv32 = (seq[5] << 24) | (seq[4] << 16) |
- (seq[3] << 8) | seq[2];
- tkey->rx_iv16 = (seq[1] << 8) | seq[0];
- }
- } else if (len == 0)
- tkey->key_set = 0;
- else
- return -1;
-
- return 0;
-}
-
-static int lib80211_tkip_get_key(void *key, int len, u8 * seq, void *priv)
-{
- struct lib80211_tkip_data *tkey = priv;
-
- if (len < TKIP_KEY_LEN)
- return -1;
-
- if (!tkey->key_set)
- return 0;
- memcpy(key, tkey->key, TKIP_KEY_LEN);
-
- if (seq) {
- /*
- * Not clear if this should return the value as is
- * or - as the code previously seemed to partially
- * have been written as - subtract one from it. It
- * was working this way for a long time so leave it.
- */
- seq[0] = tkey->tx_iv16;
- seq[1] = tkey->tx_iv16 >> 8;
- seq[2] = tkey->tx_iv32;
- seq[3] = tkey->tx_iv32 >> 8;
- seq[4] = tkey->tx_iv32 >> 16;
- seq[5] = tkey->tx_iv32 >> 24;
- }
-
- return TKIP_KEY_LEN;
-}
-
-static void lib80211_tkip_print_stats(struct seq_file *m, void *priv)
-{
- struct lib80211_tkip_data *tkip = priv;
- seq_printf(m,
- "key[%d] alg=TKIP key_set=%d "
- "tx_pn=%02x%02x%02x%02x%02x%02x "
- "rx_pn=%02x%02x%02x%02x%02x%02x "
- "replays=%d icv_errors=%d local_mic_failures=%d\n",
- tkip->key_idx, tkip->key_set,
- (tkip->tx_iv32 >> 24) & 0xff,
- (tkip->tx_iv32 >> 16) & 0xff,
- (tkip->tx_iv32 >> 8) & 0xff,
- tkip->tx_iv32 & 0xff,
- (tkip->tx_iv16 >> 8) & 0xff,
- tkip->tx_iv16 & 0xff,
- (tkip->rx_iv32 >> 24) & 0xff,
- (tkip->rx_iv32 >> 16) & 0xff,
- (tkip->rx_iv32 >> 8) & 0xff,
- tkip->rx_iv32 & 0xff,
- (tkip->rx_iv16 >> 8) & 0xff,
- tkip->rx_iv16 & 0xff,
- tkip->dot11RSNAStatsTKIPReplays,
- tkip->dot11RSNAStatsTKIPICVErrors,
- tkip->dot11RSNAStatsTKIPLocalMICFailures);
-}
-
-static struct lib80211_crypto_ops lib80211_crypt_tkip = {
- .name = "TKIP",
- .init = lib80211_tkip_init,
- .deinit = lib80211_tkip_deinit,
- .encrypt_mpdu = lib80211_tkip_encrypt,
- .decrypt_mpdu = lib80211_tkip_decrypt,
- .encrypt_msdu = lib80211_michael_mic_add,
- .decrypt_msdu = lib80211_michael_mic_verify,
- .set_key = lib80211_tkip_set_key,
- .get_key = lib80211_tkip_get_key,
- .print_stats = lib80211_tkip_print_stats,
- .extra_mpdu_prefix_len = 4 + 4, /* IV + ExtIV */
- .extra_mpdu_postfix_len = 4, /* ICV */
- .extra_msdu_postfix_len = 8, /* MIC */
- .get_flags = lib80211_tkip_get_flags,
- .set_flags = lib80211_tkip_set_flags,
- .owner = THIS_MODULE,
-};
-
-static int __init lib80211_crypto_tkip_init(void)
-{
- return lib80211_register_crypto_ops(&lib80211_crypt_tkip);
-}
-
-static void __exit lib80211_crypto_tkip_exit(void)
-{
- lib80211_unregister_crypto_ops(&lib80211_crypt_tkip);
-}
-
-module_init(lib80211_crypto_tkip_init);
-module_exit(lib80211_crypto_tkip_exit);
diff --git a/net/wireless/lib80211_crypt_wep.c b/net/wireless/lib80211_crypt_wep.c
deleted file mode 100644
index 6ab9957b8f96..000000000000
--- a/net/wireless/lib80211_crypt_wep.c
+++ /dev/null
@@ -1,256 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * lib80211 crypt: host-based WEP encryption implementation for lib80211
- *
- * Copyright (c) 2002-2004, Jouni Malinen <j@w1.fi>
- * Copyright (c) 2008, John W. Linville <linville@tuxdriver.com>
- */
-
-#include <linux/err.h>
-#include <linux/fips.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/scatterlist.h>
-#include <linux/skbuff.h>
-#include <linux/mm.h>
-#include <asm/string.h>
-
-#include <net/lib80211.h>
-
-#include <crypto/arc4.h>
-#include <linux/crc32.h>
-
-MODULE_AUTHOR("Jouni Malinen");
-MODULE_DESCRIPTION("lib80211 crypt: WEP");
-MODULE_LICENSE("GPL");
-
-struct lib80211_wep_data {
- u32 iv;
-#define WEP_KEY_LEN 13
- u8 key[WEP_KEY_LEN + 1];
- u8 key_len;
- u8 key_idx;
- struct arc4_ctx tx_ctx;
- struct arc4_ctx rx_ctx;
-};
-
-static void *lib80211_wep_init(int keyidx)
-{
- struct lib80211_wep_data *priv;
-
- if (fips_enabled)
- return NULL;
-
- priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
- if (priv == NULL)
- return NULL;
- priv->key_idx = keyidx;
-
- /* start WEP IV from a random value */
- get_random_bytes(&priv->iv, 4);
-
- return priv;
-}
-
-static void lib80211_wep_deinit(void *priv)
-{
- kfree_sensitive(priv);
-}
-
-/* Add WEP IV/key info to a frame that has at least 4 bytes of headroom */
-static int lib80211_wep_build_iv(struct sk_buff *skb, int hdr_len,
- u8 *key, int keylen, void *priv)
-{
- struct lib80211_wep_data *wep = priv;
- u32 klen;
- u8 *pos;
-
- if (skb_headroom(skb) < 4 || skb->len < hdr_len)
- return -1;
-
- pos = skb_push(skb, 4);
- memmove(pos, pos + 4, hdr_len);
- pos += hdr_len;
-
- klen = 3 + wep->key_len;
-
- wep->iv++;
-
- /* Fluhrer, Mantin, and Shamir have reported weaknesses in the key
- * scheduling algorithm of RC4. At least IVs (KeyByte + 3, 0xff, N)
- * can be used to speedup attacks, so avoid using them. */
- if ((wep->iv & 0xff00) == 0xff00) {
- u8 B = (wep->iv >> 16) & 0xff;
- if (B >= 3 && B < klen)
- wep->iv += 0x0100;
- }
-
- /* Prepend 24-bit IV to RC4 key and TX frame */
- *pos++ = (wep->iv >> 16) & 0xff;
- *pos++ = (wep->iv >> 8) & 0xff;
- *pos++ = wep->iv & 0xff;
- *pos++ = wep->key_idx << 6;
-
- return 0;
-}
-
-/* Perform WEP encryption on given skb that has at least 4 bytes of headroom
- * for IV and 4 bytes of tailroom for ICV. Both IV and ICV will be transmitted,
- * so the payload length increases with 8 bytes.
- *
- * WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data))
- */
-static int lib80211_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
-{
- struct lib80211_wep_data *wep = priv;
- u32 crc, klen, len;
- u8 *pos, *icv;
- u8 key[WEP_KEY_LEN + 3];
-
- /* other checks are in lib80211_wep_build_iv */
- if (skb_tailroom(skb) < 4)
- return -1;
-
- /* add the IV to the frame */
- if (lib80211_wep_build_iv(skb, hdr_len, NULL, 0, priv))
- return -1;
-
- /* Copy the IV into the first 3 bytes of the key */
- skb_copy_from_linear_data_offset(skb, hdr_len, key, 3);
-
- /* Copy rest of the WEP key (the secret part) */
- memcpy(key + 3, wep->key, wep->key_len);
-
- len = skb->len - hdr_len - 4;
- pos = skb->data + hdr_len + 4;
- klen = 3 + wep->key_len;
-
- /* Append little-endian CRC32 over only the data and encrypt it to produce ICV */
- crc = ~crc32_le(~0, pos, len);
- icv = skb_put(skb, 4);
- icv[0] = crc;
- icv[1] = crc >> 8;
- icv[2] = crc >> 16;
- icv[3] = crc >> 24;
-
- arc4_setkey(&wep->tx_ctx, key, klen);
- arc4_crypt(&wep->tx_ctx, pos, pos, len + 4);
-
- return 0;
-}
-
-/* Perform WEP decryption on given buffer. Buffer includes whole WEP part of
- * the frame: IV (4 bytes), encrypted payload (including SNAP header),
- * ICV (4 bytes). len includes both IV and ICV.
- *
- * Returns 0 if frame was decrypted successfully and ICV was correct and -1 on
- * failure. If frame is OK, IV and ICV will be removed.
- */
-static int lib80211_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
-{
- struct lib80211_wep_data *wep = priv;
- u32 crc, klen, plen;
- u8 key[WEP_KEY_LEN + 3];
- u8 keyidx, *pos, icv[4];
-
- if (skb->len < hdr_len + 8)
- return -1;
-
- pos = skb->data + hdr_len;
- key[0] = *pos++;
- key[1] = *pos++;
- key[2] = *pos++;
- keyidx = *pos++ >> 6;
- if (keyidx != wep->key_idx)
- return -1;
-
- klen = 3 + wep->key_len;
-
- /* Copy rest of the WEP key (the secret part) */
- memcpy(key + 3, wep->key, wep->key_len);
-
- /* Apply RC4 to data and compute CRC32 over decrypted data */
- plen = skb->len - hdr_len - 8;
-
- arc4_setkey(&wep->rx_ctx, key, klen);
- arc4_crypt(&wep->rx_ctx, pos, pos, plen + 4);
-
- crc = ~crc32_le(~0, pos, plen);
- icv[0] = crc;
- icv[1] = crc >> 8;
- icv[2] = crc >> 16;
- icv[3] = crc >> 24;
- if (memcmp(icv, pos + plen, 4) != 0) {
- /* ICV mismatch - drop frame */
- return -2;
- }
-
- /* Remove IV and ICV */
- memmove(skb->data + 4, skb->data, hdr_len);
- skb_pull(skb, 4);
- skb_trim(skb, skb->len - 4);
-
- return 0;
-}
-
-static int lib80211_wep_set_key(void *key, int len, u8 * seq, void *priv)
-{
- struct lib80211_wep_data *wep = priv;
-
- if (len < 0 || len > WEP_KEY_LEN)
- return -1;
-
- memcpy(wep->key, key, len);
- wep->key_len = len;
-
- return 0;
-}
-
-static int lib80211_wep_get_key(void *key, int len, u8 * seq, void *priv)
-{
- struct lib80211_wep_data *wep = priv;
-
- if (len < wep->key_len)
- return -1;
-
- memcpy(key, wep->key, wep->key_len);
-
- return wep->key_len;
-}
-
-static void lib80211_wep_print_stats(struct seq_file *m, void *priv)
-{
- struct lib80211_wep_data *wep = priv;
- seq_printf(m, "key[%d] alg=WEP len=%d\n", wep->key_idx, wep->key_len);
-}
-
-static struct lib80211_crypto_ops lib80211_crypt_wep = {
- .name = "WEP",
- .init = lib80211_wep_init,
- .deinit = lib80211_wep_deinit,
- .encrypt_mpdu = lib80211_wep_encrypt,
- .decrypt_mpdu = lib80211_wep_decrypt,
- .encrypt_msdu = NULL,
- .decrypt_msdu = NULL,
- .set_key = lib80211_wep_set_key,
- .get_key = lib80211_wep_get_key,
- .print_stats = lib80211_wep_print_stats,
- .extra_mpdu_prefix_len = 4, /* IV */
- .extra_mpdu_postfix_len = 4, /* ICV */
- .owner = THIS_MODULE,
-};
-
-static int __init lib80211_crypto_wep_init(void)
-{
- return lib80211_register_crypto_ops(&lib80211_crypt_wep);
-}
-
-static void __exit lib80211_crypto_wep_exit(void)
-{
- lib80211_unregister_crypto_ops(&lib80211_crypt_wep);
-}
-
-module_init(lib80211_crypto_wep_init);
-module_exit(lib80211_crypto_wep_exit);
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index 83306979fbe2..2c6654075ca9 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Portions
- * Copyright (C) 2022-2023 Intel Corporation
+ * Copyright (C) 2022-2024 Intel Corporation
*/
#include <linux/ieee80211.h>
#include <linux/export.h>
@@ -127,6 +127,9 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
if (!rdev->ops->join_mesh)
return -EOPNOTSUPP;
+ if (wdev->links[0].cac_started)
+ return -EBUSY;
+
if (!setup->chandef.chan) {
/* if no channel explicitly given, use preset channel */
setup->chandef = wdev->u.mesh.preset_chandef;
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index f635a8b6ca2e..e10f2b3b4b7f 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -4,7 +4,7 @@
*
* Copyright (c) 2009, Jouni Malinen <j@w1.fi>
* Copyright (c) 2015 Intel Deutschland GmbH
- * Copyright (C) 2019-2020, 2022-2023 Intel Corporation
+ * Copyright (C) 2019-2020, 2022-2024 Intel Corporation
*/
#include <linux/kernel.h>
@@ -241,12 +241,12 @@ void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr,
char *buf = kmalloc(128, gfp);
if (buf) {
- sprintf(buf, "MLME-MICHAELMICFAILURE.indication("
- "keyid=%d %scast addr=%pM)", key_id,
- key_type == NL80211_KEYTYPE_GROUP ? "broad" : "uni",
- addr);
memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = strlen(buf);
+ wrqu.data.length =
+ sprintf(buf, "MLME-MICHAELMICFAILURE."
+ "indication(keyid=%d %scast addr=%pM)",
+ key_id, key_type == NL80211_KEYTYPE_GROUP
+ ? "broad" : "uni", addr);
wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf);
kfree(buf);
}
@@ -325,28 +325,137 @@ void cfg80211_oper_and_vht_capa(struct ieee80211_vht_cap *vht_capa,
p1[i] &= p2[i];
}
-/* Note: caller must cfg80211_put_bss() regardless of result */
-int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
- struct net_device *dev,
- struct cfg80211_assoc_request *req)
+static int
+cfg80211_mlme_check_mlo_compat(const struct ieee80211_multi_link_elem *mle_a,
+ const struct ieee80211_multi_link_elem *mle_b,
+ struct netlink_ext_ack *extack)
{
- struct wireless_dev *wdev = dev->ieee80211_ptr;
- int err, i, j;
+ const struct ieee80211_mle_basic_common_info *common_a, *common_b;
- lockdep_assert_wiphy(wdev->wiphy);
+ common_a = (const void *)mle_a->variable;
+ common_b = (const void *)mle_b->variable;
+
+ if (memcmp(common_a->mld_mac_addr, common_b->mld_mac_addr, ETH_ALEN)) {
+ NL_SET_ERR_MSG(extack, "AP MLD address mismatch");
+ return -EINVAL;
+ }
+
+ if (ieee80211_mle_get_eml_cap((const u8 *)mle_a) !=
+ ieee80211_mle_get_eml_cap((const u8 *)mle_b)) {
+ NL_SET_ERR_MSG(extack, "link EML capabilities mismatch");
+ return -EINVAL;
+ }
+
+ if (ieee80211_mle_get_mld_capa_op((const u8 *)mle_a) !=
+ ieee80211_mle_get_mld_capa_op((const u8 *)mle_b)) {
+ NL_SET_ERR_MSG(extack, "link MLD capabilities/ops mismatch");
+ return -EINVAL;
+ }
+
+ if (ieee80211_mle_get_ext_mld_capa_op((const u8 *)mle_a) !=
+ ieee80211_mle_get_ext_mld_capa_op((const u8 *)mle_b)) {
+ NL_SET_ERR_MSG(extack,
+ "extended link MLD capabilities/ops mismatch");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cfg80211_mlme_check_mlo(struct net_device *dev,
+ struct cfg80211_assoc_request *req,
+ struct netlink_ext_ack *extack)
+{
+ const struct ieee80211_multi_link_elem *mles[ARRAY_SIZE(req->links)] = {};
+ int i;
+
+ if (req->link_id < 0)
+ return 0;
+
+ if (!req->links[req->link_id].bss) {
+ NL_SET_ERR_MSG(extack, "no BSS for assoc link");
+ return -EINVAL;
+ }
+
+ rcu_read_lock();
+ for (i = 0; i < ARRAY_SIZE(req->links); i++) {
+ const struct cfg80211_bss_ies *ies;
+ const struct element *ml;
- for (i = 1; i < ARRAY_SIZE(req->links); i++) {
if (!req->links[i].bss)
continue;
- for (j = 0; j < i; j++) {
- if (req->links[i].bss == req->links[j].bss)
- return -EINVAL;
+
+ if (ether_addr_equal(req->links[i].bss->bssid, dev->dev_addr)) {
+ NL_SET_ERR_MSG(extack, "BSSID must not be our address");
+ req->links[i].error = -EINVAL;
+ goto error;
}
- if (ether_addr_equal(req->links[i].bss->bssid, dev->dev_addr))
- return -EINVAL;
+ ies = rcu_dereference(req->links[i].bss->ies);
+ ml = cfg80211_find_ext_elem(WLAN_EID_EXT_EHT_MULTI_LINK,
+ ies->data, ies->len);
+ if (!ml) {
+ NL_SET_ERR_MSG(extack, "MLO BSS w/o ML element");
+ req->links[i].error = -EINVAL;
+ goto error;
+ }
+
+ if (!ieee80211_mle_type_ok(ml->data + 1,
+ IEEE80211_ML_CONTROL_TYPE_BASIC,
+ ml->datalen - 1)) {
+ NL_SET_ERR_MSG(extack, "BSS with invalid ML element");
+ req->links[i].error = -EINVAL;
+ goto error;
+ }
+
+ mles[i] = (const void *)(ml->data + 1);
+
+ if (ieee80211_mle_get_link_id((const u8 *)mles[i]) != i) {
+ NL_SET_ERR_MSG(extack, "link ID mismatch");
+ req->links[i].error = -EINVAL;
+ goto error;
+ }
+ }
+
+ if (WARN_ON(!mles[req->link_id]))
+ goto error;
+
+ for (i = 0; i < ARRAY_SIZE(req->links); i++) {
+ if (i == req->link_id || !req->links[i].bss)
+ continue;
+
+ if (WARN_ON(!mles[i]))
+ goto error;
+
+ if (cfg80211_mlme_check_mlo_compat(mles[req->link_id], mles[i],
+ extack)) {
+ req->links[i].error = -EINVAL;
+ goto error;
+ }
}
+ rcu_read_unlock();
+ return 0;
+error:
+ rcu_read_unlock();
+ return -EINVAL;
+}
+
+/* Note: caller must cfg80211_put_bss() regardless of result */
+int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct cfg80211_assoc_request *req,
+ struct netlink_ext_ack *extack)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int err;
+
+ lockdep_assert_wiphy(wdev->wiphy);
+
+ err = cfg80211_mlme_check_mlo(dev, req, extack);
+ if (err)
+ return err;
+
if (wdev->connected &&
(!req->prev_bssid ||
!ether_addr_equal(wdev->u.client.connected_addr, req->prev_bssid)))
@@ -525,10 +634,10 @@ void cfg80211_mgmt_registrations_update_wk(struct work_struct *wk)
rdev = container_of(wk, struct cfg80211_registered_device,
mgmt_registrations_update_wk);
- wiphy_lock(&rdev->wiphy);
+ guard(wiphy)(&rdev->wiphy);
+
list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list)
cfg80211_mgmt_registrations_update(wdev);
- wiphy_unlock(&rdev->wiphy);
}
int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
@@ -1002,26 +1111,28 @@ EXPORT_SYMBOL(__cfg80211_radar_event);
void cfg80211_cac_event(struct net_device *netdev,
const struct cfg80211_chan_def *chandef,
- enum nl80211_radar_event event, gfp_t gfp)
+ enum nl80211_radar_event event, gfp_t gfp,
+ unsigned int link_id)
{
struct wireless_dev *wdev = netdev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
unsigned long timeout;
- /* not yet supported */
- if (wdev->valid_links)
+ if (WARN_ON(wdev->valid_links &&
+ !(wdev->valid_links & BIT(link_id))))
return;
- trace_cfg80211_cac_event(netdev, event);
+ trace_cfg80211_cac_event(netdev, event, link_id);
- if (WARN_ON(!wdev->cac_started && event != NL80211_RADAR_CAC_STARTED))
+ if (WARN_ON(!wdev->links[link_id].cac_started &&
+ event != NL80211_RADAR_CAC_STARTED))
return;
switch (event) {
case NL80211_RADAR_CAC_FINISHED:
- timeout = wdev->cac_start_time +
- msecs_to_jiffies(wdev->cac_time_ms);
+ timeout = wdev->links[link_id].cac_start_time +
+ msecs_to_jiffies(wdev->links[link_id].cac_time_ms);
WARN_ON(!time_after_eq(jiffies, timeout));
cfg80211_set_dfs_state(wiphy, chandef, NL80211_DFS_AVAILABLE);
memcpy(&rdev->cac_done_chandef, chandef,
@@ -1030,10 +1141,10 @@ void cfg80211_cac_event(struct net_device *netdev,
cfg80211_sched_dfs_chan_update(rdev);
fallthrough;
case NL80211_RADAR_CAC_ABORTED:
- wdev->cac_started = false;
+ wdev->links[link_id].cac_started = false;
break;
case NL80211_RADAR_CAC_STARTED:
- wdev->cac_started = true;
+ wdev->links[link_id].cac_started = true;
break;
default:
WARN_ON(1);
@@ -1089,10 +1200,10 @@ cfg80211_background_cac_event(struct cfg80211_registered_device *rdev,
const struct cfg80211_chan_def *chandef,
enum nl80211_radar_event event)
{
- wiphy_lock(&rdev->wiphy);
+ guard(wiphy)(&rdev->wiphy);
+
__cfg80211_background_cac_event(rdev, rdev->background_radar_wdev,
chandef, event);
- wiphy_unlock(&rdev->wiphy);
}
void cfg80211_background_cac_done_wk(struct work_struct *work)
@@ -1183,3 +1294,80 @@ void cfg80211_stop_background_radar_detection(struct wireless_dev *wdev)
&rdev->background_radar_chandef,
NL80211_RADAR_CAC_ABORTED);
}
+
+int cfg80211_assoc_ml_reconf(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct cfg80211_assoc_link *links,
+ u16 rem_links)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int err;
+
+ lockdep_assert_wiphy(wdev->wiphy);
+
+ err = rdev_assoc_ml_reconf(rdev, dev, links, rem_links);
+ if (!err) {
+ int link_id;
+
+ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS;
+ link_id++) {
+ if (!links[link_id].bss)
+ continue;
+
+ cfg80211_ref_bss(&rdev->wiphy, links[link_id].bss);
+ cfg80211_hold_bss(bss_from_pub(links[link_id].bss));
+ }
+ }
+
+ return err;
+}
+
+void cfg80211_mlo_reconf_add_done(struct net_device *dev,
+ struct cfg80211_mlo_reconf_done_data *data)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct wiphy *wiphy = wdev->wiphy;
+ int link_id;
+
+ lockdep_assert_wiphy(wiphy);
+
+ trace_cfg80211_mlo_reconf_add_done(dev, data->added_links,
+ data->buf, data->len);
+
+ if (WARN_ON(!wdev->valid_links))
+ return;
+
+ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION &&
+ wdev->iftype != NL80211_IFTYPE_P2P_CLIENT))
+ return;
+
+ /* validate that a BSS is given for each added link */
+ for (link_id = 0; link_id < ARRAY_SIZE(data->links); link_id++) {
+ struct cfg80211_bss *bss = data->links[link_id].bss;
+
+ if (!(data->added_links & BIT(link_id)))
+ continue;
+
+ if (WARN_ON(!bss))
+ return;
+ }
+
+ for (link_id = 0; link_id < ARRAY_SIZE(data->links); link_id++) {
+ struct cfg80211_bss *bss = data->links[link_id].bss;
+
+ if (!bss)
+ continue;
+
+ if (data->added_links & BIT(link_id)) {
+ wdev->links[link_id].client.current_bss =
+ bss_from_pub(bss);
+ } else {
+ cfg80211_unhold_bss(bss_from_pub(bss));
+ cfg80211_put_bss(wiphy, bss);
+ }
+ }
+
+ wdev->valid_links |= data->added_links;
+ nl80211_mlo_reconf_add_done(dev, data);
+}
+EXPORT_SYMBOL(cfg80211_mlo_reconf_add_done);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index bd54a928bab4..aac0e7298dc7 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -5,7 +5,7 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/if.h>
@@ -294,6 +294,21 @@ static int validate_he_capa(const struct nlattr *attr,
return 0;
}
+static int validate_supported_selectors(const struct nlattr *attr,
+ struct netlink_ext_ack *extack)
+{
+ const u8 *supported_selectors = nla_data(attr);
+ u8 supported_selectors_len = nla_len(attr);
+
+ /* The top bit must not be set as it is not part of the selector */
+ for (int i = 0; i < supported_selectors_len; i++) {
+ if (supported_selectors[i] & 0x80)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/* policy for the attributes */
static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR];
@@ -315,8 +330,7 @@ nl80211_pmsr_ftm_req_attr_policy[NL80211_PMSR_FTM_REQ_ATTR_MAX + 1] = {
[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD] = { .type = NLA_U16 },
[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION] =
NLA_POLICY_MAX(NLA_U8, 15),
- [NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST] =
- NLA_POLICY_MAX(NLA_U8, 31),
+ [NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST] = { .type = NLA_U8 },
[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES] = { .type = NLA_U8 },
[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI] = { .type = NLA_FLAG },
[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC] = { .type = NLA_FLAG },
@@ -468,6 +482,10 @@ static const struct netlink_range_validation nl80211_punct_bitmap_range = {
.max = 0xffff,
};
+static const struct netlink_range_validation q_range = {
+ .max = INT_MAX,
+};
+
static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[0] = { .strict_start_type = NL80211_ATTR_HE_OBSS_PD },
[NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
@@ -581,7 +599,11 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_PRIVACY] = { .type = NLA_FLAG },
[NL80211_ATTR_STATUS_CODE] = { .type = NLA_U16 },
[NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 },
- [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
+ [NL80211_ATTR_WPA_VERSIONS] =
+ NLA_POLICY_RANGE(NLA_U32, 0,
+ NL80211_WPA_VERSION_1 |
+ NL80211_WPA_VERSION_2 |
+ NL80211_WPA_VERSION_3),
[NL80211_ATTR_PID] = { .type = NLA_U32 },
[NL80211_ATTR_4ADDR] = { .type = NLA_U8 },
[NL80211_ATTR_PMKID] = NLA_POLICY_EXACT_LEN_WARN(WLAN_PMKID_LEN),
@@ -750,7 +772,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_TXQ_LIMIT] = { .type = NLA_U32 },
[NL80211_ATTR_TXQ_MEMORY_LIMIT] = { .type = NLA_U32 },
- [NL80211_ATTR_TXQ_QUANTUM] = { .type = NLA_U32 },
+ [NL80211_ATTR_TXQ_QUANTUM] = NLA_POLICY_FULL_RANGE(NLA_U32, &q_range),
[NL80211_ATTR_HE_CAPABILITY] =
NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_he_capa,
NL80211_HE_MAX_CAPABILITY_LEN),
@@ -807,7 +829,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_MLO_LINKS] =
NLA_POLICY_NESTED_ARRAY(nl80211_policy),
[NL80211_ATTR_MLO_LINK_ID] =
- NLA_POLICY_RANGE(NLA_U8, 0, IEEE80211_MLD_MAX_NUM_LINKS),
+ NLA_POLICY_RANGE(NLA_U8, 0, IEEE80211_MLD_MAX_NUM_LINKS - 1),
[NL80211_ATTR_MLD_ADDR] = NLA_POLICY_EXACT_LEN(ETH_ALEN),
[NL80211_ATTR_MLO_SUPPORT] = { .type = NLA_FLAG },
[NL80211_ATTR_MAX_NUM_AKM_SUITES] = { .type = NLA_REJECT },
@@ -821,6 +843,13 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_BSS_DUMP_INCLUDE_USE_DATA] = { .type = NLA_FLAG },
[NL80211_ATTR_MLO_TTLM_DLINK] = NLA_POLICY_EXACT_LEN(sizeof(u16) * 8),
[NL80211_ATTR_MLO_TTLM_ULINK] = NLA_POLICY_EXACT_LEN(sizeof(u16) * 8),
+ [NL80211_ATTR_ASSOC_SPP_AMSDU] = { .type = NLA_FLAG },
+ [NL80211_ATTR_VIF_RADIO_MASK] = { .type = NLA_U32 },
+ [NL80211_ATTR_SUPPORTED_SELECTORS] =
+ NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_supported_selectors,
+ NL80211_MAX_SUPP_SELECTORS),
+ [NL80211_ATTR_MLO_RECONF_REM_LINKS] = { .type = NLA_U16 },
+ [NL80211_ATTR_EPCS] = { .type = NLA_FLAG },
};
/* policy for the key attributes */
@@ -906,22 +935,11 @@ nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = {
};
static const struct nla_policy
-nl80211_match_band_rssi_policy[NUM_NL80211_BANDS] = {
- [NL80211_BAND_2GHZ] = { .type = NLA_S32 },
- [NL80211_BAND_5GHZ] = { .type = NLA_S32 },
- [NL80211_BAND_6GHZ] = { .type = NLA_S32 },
- [NL80211_BAND_60GHZ] = { .type = NLA_S32 },
- [NL80211_BAND_LC] = { .type = NLA_S32 },
-};
-
-static const struct nla_policy
nl80211_match_policy[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1] = {
[NL80211_SCHED_SCAN_MATCH_ATTR_SSID] = { .type = NLA_BINARY,
.len = IEEE80211_MAX_SSID_LEN },
[NL80211_SCHED_SCAN_MATCH_ATTR_BSSID] = NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN),
[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI] = { .type = NLA_U32 },
- [NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI] =
- NLA_POLICY_NESTED(nl80211_match_band_rssi_policy),
};
static const struct nla_policy
@@ -1204,11 +1222,17 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy,
if ((chan->flags & IEEE80211_CHAN_DFS_CONCURRENT) &&
nla_put_flag(msg, NL80211_FREQUENCY_ATTR_DFS_CONCURRENT))
goto nla_put_failure;
- if ((chan->flags & IEEE80211_CHAN_NO_UHB_VLP_CLIENT) &&
- nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_UHB_VLP_CLIENT))
+ if ((chan->flags & IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT))
+ goto nla_put_failure;
+ if ((chan->flags & IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT))
goto nla_put_failure;
- if ((chan->flags & IEEE80211_CHAN_NO_UHB_AFC_CLIENT) &&
- nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_UHB_AFC_CLIENT))
+ if ((chan->flags & IEEE80211_CHAN_CAN_MONITOR) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_CAN_MONITOR))
+ goto nla_put_failure;
+ if ((chan->flags & IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_ALLOW_6GHZ_VLP_AP))
goto nla_put_failure;
}
@@ -1282,10 +1306,7 @@ static unsigned int nl80211_link_id(struct nlattr **attrs)
{
struct nlattr *linkid = attrs[NL80211_ATTR_MLO_LINK_ID];
- if (!linkid)
- return 0;
-
- return nla_get_u8(linkid);
+ return nla_get_u8_default(linkid, 0);
}
static int nl80211_link_id_or_invalid(struct nlattr **attrs)
@@ -1628,71 +1649,87 @@ nla_put_failure:
return -ENOBUFS;
}
-static int nl80211_put_iface_combinations(struct wiphy *wiphy,
- struct sk_buff *msg,
- bool large)
+static int nl80211_put_ifcomb_data(struct sk_buff *msg, bool large, int idx,
+ const struct ieee80211_iface_combination *c,
+ u16 nested)
{
- struct nlattr *nl_combis;
- int i, j;
+ struct nlattr *nl_combi, *nl_limits;
+ int i;
- nl_combis = nla_nest_start_noflag(msg,
- NL80211_ATTR_INTERFACE_COMBINATIONS);
- if (!nl_combis)
+ nl_combi = nla_nest_start_noflag(msg, idx | nested);
+ if (!nl_combi)
goto nla_put_failure;
- for (i = 0; i < wiphy->n_iface_combinations; i++) {
- const struct ieee80211_iface_combination *c;
- struct nlattr *nl_combi, *nl_limits;
+ nl_limits = nla_nest_start_noflag(msg, NL80211_IFACE_COMB_LIMITS |
+ nested);
+ if (!nl_limits)
+ goto nla_put_failure;
- c = &wiphy->iface_combinations[i];
+ for (i = 0; i < c->n_limits; i++) {
+ struct nlattr *nl_limit;
- nl_combi = nla_nest_start_noflag(msg, i + 1);
- if (!nl_combi)
+ nl_limit = nla_nest_start_noflag(msg, i + 1);
+ if (!nl_limit)
goto nla_put_failure;
-
- nl_limits = nla_nest_start_noflag(msg,
- NL80211_IFACE_COMB_LIMITS);
- if (!nl_limits)
+ if (nla_put_u32(msg, NL80211_IFACE_LIMIT_MAX, c->limits[i].max))
+ goto nla_put_failure;
+ if (nl80211_put_iftypes(msg, NL80211_IFACE_LIMIT_TYPES,
+ c->limits[i].types))
goto nla_put_failure;
+ nla_nest_end(msg, nl_limit);
+ }
- for (j = 0; j < c->n_limits; j++) {
- struct nlattr *nl_limit;
+ nla_nest_end(msg, nl_limits);
- nl_limit = nla_nest_start_noflag(msg, j + 1);
- if (!nl_limit)
- goto nla_put_failure;
- if (nla_put_u32(msg, NL80211_IFACE_LIMIT_MAX,
- c->limits[j].max))
- goto nla_put_failure;
- if (nl80211_put_iftypes(msg, NL80211_IFACE_LIMIT_TYPES,
- c->limits[j].types))
- goto nla_put_failure;
- nla_nest_end(msg, nl_limit);
- }
+ if (c->beacon_int_infra_match &&
+ nla_put_flag(msg, NL80211_IFACE_COMB_STA_AP_BI_MATCH))
+ goto nla_put_failure;
+ if (nla_put_u32(msg, NL80211_IFACE_COMB_NUM_CHANNELS,
+ c->num_different_channels) ||
+ nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM,
+ c->max_interfaces))
+ goto nla_put_failure;
+ if (large &&
+ (nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS,
+ c->radar_detect_widths) ||
+ nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_REGIONS,
+ c->radar_detect_regions)))
+ goto nla_put_failure;
+ if (c->beacon_int_min_gcd &&
+ nla_put_u32(msg, NL80211_IFACE_COMB_BI_MIN_GCD,
+ c->beacon_int_min_gcd))
+ goto nla_put_failure;
- nla_nest_end(msg, nl_limits);
+ nla_nest_end(msg, nl_combi);
- if (c->beacon_int_infra_match &&
- nla_put_flag(msg, NL80211_IFACE_COMB_STA_AP_BI_MATCH))
- goto nla_put_failure;
- if (nla_put_u32(msg, NL80211_IFACE_COMB_NUM_CHANNELS,
- c->num_different_channels) ||
- nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM,
- c->max_interfaces))
- goto nla_put_failure;
- if (large &&
- (nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS,
- c->radar_detect_widths) ||
- nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_REGIONS,
- c->radar_detect_regions)))
- goto nla_put_failure;
- if (c->beacon_int_min_gcd &&
- nla_put_u32(msg, NL80211_IFACE_COMB_BI_MIN_GCD,
- c->beacon_int_min_gcd))
- goto nla_put_failure;
+ return 0;
+nla_put_failure:
+ return -ENOBUFS;
+}
- nla_nest_end(msg, nl_combi);
+static int nl80211_put_iface_combinations(struct wiphy *wiphy,
+ struct sk_buff *msg,
+ int attr, int radio,
+ bool large, u16 nested)
+{
+ const struct ieee80211_iface_combination *c;
+ struct nlattr *nl_combis;
+ int i, n;
+
+ nl_combis = nla_nest_start_noflag(msg, attr | nested);
+ if (!nl_combis)
+ goto nla_put_failure;
+
+ if (radio >= 0) {
+ c = wiphy->radio[0].iface_combinations;
+ n = wiphy->radio[0].n_iface_combinations;
+ } else {
+ c = wiphy->iface_combinations;
+ n = wiphy->n_iface_combinations;
}
+ for (i = 0; i < n; i++)
+ if (nl80211_put_ifcomb_data(msg, large, i + 1, &c[i], nested))
+ goto nla_put_failure;
nla_nest_end(msg, nl_combis);
@@ -2398,6 +2435,85 @@ fail:
return -ENOBUFS;
}
+static int nl80211_put_radio(struct wiphy *wiphy, struct sk_buff *msg, int idx)
+{
+ const struct wiphy_radio *r = &wiphy->radio[idx];
+ struct nlattr *radio, *freq;
+ int i;
+
+ radio = nla_nest_start(msg, idx);
+ if (!radio)
+ return -ENOBUFS;
+
+ if (nla_put_u32(msg, NL80211_WIPHY_RADIO_ATTR_INDEX, idx))
+ goto nla_put_failure;
+
+ if (r->antenna_mask &&
+ nla_put_u32(msg, NL80211_WIPHY_RADIO_ATTR_ANTENNA_MASK,
+ r->antenna_mask))
+ goto nla_put_failure;
+
+ for (i = 0; i < r->n_freq_range; i++) {
+ const struct wiphy_radio_freq_range *range = &r->freq_range[i];
+
+ freq = nla_nest_start(msg, NL80211_WIPHY_RADIO_ATTR_FREQ_RANGE);
+ if (!freq)
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, NL80211_WIPHY_RADIO_FREQ_ATTR_START,
+ range->start_freq) ||
+ nla_put_u32(msg, NL80211_WIPHY_RADIO_FREQ_ATTR_END,
+ range->end_freq))
+ goto nla_put_failure;
+
+ nla_nest_end(msg, freq);
+ }
+
+ for (i = 0; i < r->n_iface_combinations; i++)
+ if (nl80211_put_ifcomb_data(msg, true,
+ NL80211_WIPHY_RADIO_ATTR_INTERFACE_COMBINATION,
+ &r->iface_combinations[i],
+ NLA_F_NESTED))
+ goto nla_put_failure;
+
+ nla_nest_end(msg, radio);
+
+ return 0;
+
+nla_put_failure:
+ return -ENOBUFS;
+}
+
+static int nl80211_put_radios(struct wiphy *wiphy, struct sk_buff *msg)
+{
+ struct nlattr *radios;
+ int i;
+
+ if (!wiphy->n_radio)
+ return 0;
+
+ radios = nla_nest_start(msg, NL80211_ATTR_WIPHY_RADIOS);
+ if (!radios)
+ return -ENOBUFS;
+
+ for (i = 0; i < wiphy->n_radio; i++)
+ if (nl80211_put_radio(wiphy, msg, i))
+ goto fail;
+
+ nla_nest_end(msg, radios);
+
+ if (nl80211_put_iface_combinations(wiphy, msg,
+ NL80211_ATTR_WIPHY_INTERFACE_COMBINATIONS,
+ -1, true, NLA_F_NESTED))
+ return -ENOBUFS;
+
+ return 0;
+
+fail:
+ nla_nest_cancel(msg, radios);
+ return -ENOBUFS;
+}
+
struct nl80211_dump_wiphy_state {
s64 filter_wiphy;
long start;
@@ -2693,7 +2809,9 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
goto nla_put_failure;
if (nl80211_put_iface_combinations(&rdev->wiphy, msg,
- state->split))
+ NL80211_ATTR_INTERFACE_COMBINATIONS,
+ rdev->wiphy.n_radio ? 0 : -1,
+ state->split, 0))
goto nla_put_failure;
state->split_start++;
@@ -3007,6 +3125,12 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
rdev->wiphy.hw_timestamp_max_peers))
goto nla_put_failure;
+ state->split_start++;
+ break;
+ case 17:
+ if (nl80211_put_radios(&rdev->wiphy, msg))
+ goto nla_put_failure;
+
/* done */
state->split_start = 0;
break;
@@ -3224,24 +3348,9 @@ static bool nl80211_can_set_dev_channel(struct wireless_dev *wdev)
wdev->iftype == NL80211_IFTYPE_P2P_GO;
}
-static int nl80211_parse_punct_bitmap(struct cfg80211_registered_device *rdev,
- struct genl_info *info,
- const struct cfg80211_chan_def *chandef,
- u16 *punct_bitmap)
-{
- if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_PUNCT))
- return -EINVAL;
-
- *punct_bitmap = nla_get_u32(info->attrs[NL80211_ATTR_PUNCT_BITMAP]);
- if (!cfg80211_valid_disable_subchannel_bitmap(punct_bitmap, chandef))
- return -EINVAL;
-
- return 0;
-}
-
-int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
- struct genl_info *info,
- struct cfg80211_chan_def *chandef)
+static int _nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
+ struct genl_info *info, bool monitor,
+ struct cfg80211_chan_def *chandef)
{
struct netlink_ext_ack *extack = info->extack;
struct nlattr **attrs = info->attrs;
@@ -3266,10 +3375,9 @@ int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
chandef->freq1_offset = control_freq % 1000;
chandef->center_freq2 = 0;
- /* Primary channel not allowed */
- if (!chandef->chan || chandef->chan->flags & IEEE80211_CHAN_DISABLED) {
+ if (!chandef->chan) {
NL_SET_ERR_MSG_ATTR(extack, attrs[NL80211_ATTR_WIPHY_FREQ],
- "Channel is disabled");
+ "Unknown channel");
return -EINVAL;
}
@@ -3323,11 +3431,9 @@ int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
if (attrs[NL80211_ATTR_CENTER_FREQ1]) {
chandef->center_freq1 =
nla_get_u32(attrs[NL80211_ATTR_CENTER_FREQ1]);
- if (attrs[NL80211_ATTR_CENTER_FREQ1_OFFSET])
- chandef->freq1_offset = nla_get_u32(
- attrs[NL80211_ATTR_CENTER_FREQ1_OFFSET]);
- else
- chandef->freq1_offset = 0;
+ chandef->freq1_offset =
+ nla_get_u32_default(attrs[NL80211_ATTR_CENTER_FREQ1_OFFSET],
+ 0);
}
if (attrs[NL80211_ATTR_CENTER_FREQ2])
chandef->center_freq2 =
@@ -3346,13 +3452,27 @@ int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
chandef->edmg.channels = 0;
}
+ if (info->attrs[NL80211_ATTR_PUNCT_BITMAP]) {
+ chandef->punctured =
+ nla_get_u32(info->attrs[NL80211_ATTR_PUNCT_BITMAP]);
+
+ if (chandef->punctured &&
+ !wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_PUNCT)) {
+ NL_SET_ERR_MSG(extack,
+ "driver doesn't support puncturing");
+ return -EINVAL;
+ }
+ }
+
if (!cfg80211_chandef_valid(chandef)) {
NL_SET_ERR_MSG(extack, "invalid channel definition");
return -EINVAL;
}
- if (!cfg80211_chandef_usable(&rdev->wiphy, chandef,
- IEEE80211_CHAN_DISABLED)) {
+ if (!_cfg80211_chandef_usable(&rdev->wiphy, chandef,
+ IEEE80211_CHAN_DISABLED,
+ monitor ? IEEE80211_CHAN_CAN_MONITOR : 0)) {
NL_SET_ERR_MSG(extack, "(extension) channel is disabled");
return -EINVAL;
}
@@ -3367,6 +3487,13 @@ int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
return 0;
}
+int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
+ struct genl_info *info,
+ struct cfg80211_chan_def *chandef)
+{
+ return _nl80211_parse_chandef(rdev, info, false, chandef);
+}
+
static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
struct net_device *dev,
struct genl_info *info,
@@ -3391,7 +3518,9 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
link_id = 0;
}
- result = nl80211_parse_chandef(rdev, info, &chandef);
+ result = _nl80211_parse_chandef(rdev, info,
+ iftype == NL80211_IFTYPE_MONITOR,
+ &chandef);
if (result)
return result;
@@ -3414,6 +3543,33 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
if (chandef.chan != cur_chan)
return -EBUSY;
+ /* only allow this for regular channel widths */
+ switch (wdev->links[link_id].ap.chandef.width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ case NL80211_CHAN_WIDTH_40:
+ case NL80211_CHAN_WIDTH_80:
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ case NL80211_CHAN_WIDTH_320:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (chandef.width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ case NL80211_CHAN_WIDTH_40:
+ case NL80211_CHAN_WIDTH_80:
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ case NL80211_CHAN_WIDTH_320:
+ break;
+ default:
+ return -EINVAL;
+ }
+
result = rdev_set_ap_chanwidth(rdev, dev, link_id,
&chandef);
if (result)
@@ -3426,7 +3582,7 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
case NL80211_IFTYPE_MESH_POINT:
return cfg80211_set_mesh_channel(rdev, wdev, &chandef);
case NL80211_IFTYPE_MONITOR:
- return cfg80211_set_monitor_channel(rdev, &chandef);
+ return cfg80211_set_monitor_channel(rdev, dev, &chandef);
default:
break;
}
@@ -3490,7 +3646,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
} else
wdev = netdev->ieee80211_ptr;
- wiphy_lock(&rdev->wiphy);
+ guard(wiphy)(&rdev->wiphy);
/*
* end workaround code, by now the rdev is available
@@ -3503,32 +3659,24 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
rtnl_unlock();
if (result)
- goto out;
+ return result;
if (info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS]) {
struct ieee80211_txq_params txq_params;
struct nlattr *tb[NL80211_TXQ_ATTR_MAX + 1];
- if (!rdev->ops->set_txq_params) {
- result = -EOPNOTSUPP;
- goto out;
- }
+ if (!rdev->ops->set_txq_params)
+ return -EOPNOTSUPP;
- if (!netdev) {
- result = -EINVAL;
- goto out;
- }
+ if (!netdev)
+ return -EINVAL;
if (netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
- netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) {
- result = -EINVAL;
- goto out;
- }
+ netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
+ return -EINVAL;
- if (!netif_running(netdev)) {
- result = -ENETDOWN;
- goto out;
- }
+ if (!netif_running(netdev))
+ return -ENETDOWN;
nla_for_each_nested(nl_txq_params,
info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS],
@@ -3539,10 +3687,11 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
txq_params_policy,
info->extack);
if (result)
- goto out;
+ return result;
+
result = parse_txq_params(tb, &txq_params);
if (result)
- goto out;
+ return result;
txq_params.link_id =
nl80211_link_id_or_invalid(info->attrs);
@@ -3558,7 +3707,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
result = rdev_set_txq_params(rdev, netdev,
&txq_params);
if (result)
- goto out;
+ return result;
}
}
@@ -3575,7 +3724,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
}
if (result)
- goto out;
+ return result;
}
if (info->attrs[NL80211_ATTR_WIPHY_TX_POWER_SETTING]) {
@@ -3586,19 +3735,15 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
if (!(rdev->wiphy.features & NL80211_FEATURE_VIF_TXPOWER))
txp_wdev = NULL;
- if (!rdev->ops->set_tx_power) {
- result = -EOPNOTSUPP;
- goto out;
- }
+ if (!rdev->ops->set_tx_power)
+ return -EOPNOTSUPP;
idx = NL80211_ATTR_WIPHY_TX_POWER_SETTING;
type = nla_get_u32(info->attrs[idx]);
if (!info->attrs[NL80211_ATTR_WIPHY_TX_POWER_LEVEL] &&
- (type != NL80211_TX_POWER_AUTOMATIC)) {
- result = -EINVAL;
- goto out;
- }
+ (type != NL80211_TX_POWER_AUTOMATIC))
+ return -EINVAL;
if (type != NL80211_TX_POWER_AUTOMATIC) {
idx = NL80211_ATTR_WIPHY_TX_POWER_LEVEL;
@@ -3607,7 +3752,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
result = rdev_set_tx_power(rdev, txp_wdev, type, mbm);
if (result)
- goto out;
+ return result;
}
if (info->attrs[NL80211_ATTR_WIPHY_ANTENNA_TX] &&
@@ -3616,10 +3761,8 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
if ((!rdev->wiphy.available_antennas_tx &&
!rdev->wiphy.available_antennas_rx) ||
- !rdev->ops->set_antenna) {
- result = -EOPNOTSUPP;
- goto out;
- }
+ !rdev->ops->set_antenna)
+ return -EOPNOTSUPP;
tx_ant = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_ANTENNA_TX]);
rx_ant = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_ANTENNA_RX]);
@@ -3627,17 +3770,15 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
/* reject antenna configurations which don't match the
* available antenna masks, except for the "all" mask */
if ((~tx_ant && (tx_ant & ~rdev->wiphy.available_antennas_tx)) ||
- (~rx_ant && (rx_ant & ~rdev->wiphy.available_antennas_rx))) {
- result = -EINVAL;
- goto out;
- }
+ (~rx_ant && (rx_ant & ~rdev->wiphy.available_antennas_rx)))
+ return -EINVAL;
tx_ant = tx_ant & rdev->wiphy.available_antennas_tx;
rx_ant = rx_ant & rdev->wiphy.available_antennas_rx;
result = rdev_set_antenna(rdev, tx_ant, rx_ant);
if (result)
- goto out;
+ return result;
}
changed = 0;
@@ -3659,10 +3800,8 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_WIPHY_FRAG_THRESHOLD]) {
frag_threshold = nla_get_u32(
info->attrs[NL80211_ATTR_WIPHY_FRAG_THRESHOLD]);
- if (frag_threshold < 256) {
- result = -EINVAL;
- goto out;
- }
+ if (frag_threshold < 256)
+ return -EINVAL;
if (frag_threshold != (u32) -1) {
/*
@@ -3683,10 +3822,8 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
}
if (info->attrs[NL80211_ATTR_WIPHY_COVERAGE_CLASS]) {
- if (info->attrs[NL80211_ATTR_WIPHY_DYN_ACK]) {
- result = -EINVAL;
- goto out;
- }
+ if (info->attrs[NL80211_ATTR_WIPHY_DYN_ACK])
+ return -EINVAL;
coverage_class = nla_get_u8(
info->attrs[NL80211_ATTR_WIPHY_COVERAGE_CLASS]);
@@ -3694,20 +3831,17 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
}
if (info->attrs[NL80211_ATTR_WIPHY_DYN_ACK]) {
- if (!(rdev->wiphy.features & NL80211_FEATURE_ACKTO_ESTIMATION)) {
- result = -EOPNOTSUPP;
- goto out;
- }
+ if (!(rdev->wiphy.features & NL80211_FEATURE_ACKTO_ESTIMATION))
+ return -EOPNOTSUPP;
changed |= WIPHY_PARAM_DYN_ACK;
}
if (info->attrs[NL80211_ATTR_TXQ_LIMIT]) {
if (!wiphy_ext_feature_isset(&rdev->wiphy,
- NL80211_EXT_FEATURE_TXQS)) {
- result = -EOPNOTSUPP;
- goto out;
- }
+ NL80211_EXT_FEATURE_TXQS))
+ return -EOPNOTSUPP;
+
txq_limit = nla_get_u32(
info->attrs[NL80211_ATTR_TXQ_LIMIT]);
changed |= WIPHY_PARAM_TXQ_LIMIT;
@@ -3715,10 +3849,9 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_TXQ_MEMORY_LIMIT]) {
if (!wiphy_ext_feature_isset(&rdev->wiphy,
- NL80211_EXT_FEATURE_TXQS)) {
- result = -EOPNOTSUPP;
- goto out;
- }
+ NL80211_EXT_FEATURE_TXQS))
+ return -EOPNOTSUPP;
+
txq_memory_limit = nla_get_u32(
info->attrs[NL80211_ATTR_TXQ_MEMORY_LIMIT]);
changed |= WIPHY_PARAM_TXQ_MEMORY_LIMIT;
@@ -3726,10 +3859,9 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_TXQ_QUANTUM]) {
if (!wiphy_ext_feature_isset(&rdev->wiphy,
- NL80211_EXT_FEATURE_TXQS)) {
- result = -EOPNOTSUPP;
- goto out;
- }
+ NL80211_EXT_FEATURE_TXQS))
+ return -EOPNOTSUPP;
+
txq_quantum = nla_get_u32(
info->attrs[NL80211_ATTR_TXQ_QUANTUM]);
changed |= WIPHY_PARAM_TXQ_QUANTUM;
@@ -3741,10 +3873,8 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
u8 old_coverage_class;
u32 old_txq_limit, old_txq_memory_limit, old_txq_quantum;
- if (!rdev->ops->set_wiphy_params) {
- result = -EOPNOTSUPP;
- goto out;
- }
+ if (!rdev->ops->set_wiphy_params)
+ return -EOPNOTSUPP;
old_retry_short = rdev->wiphy.retry_short;
old_retry_long = rdev->wiphy.retry_long;
@@ -3782,15 +3912,11 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
rdev->wiphy.txq_limit = old_txq_limit;
rdev->wiphy.txq_memory_limit = old_txq_memory_limit;
rdev->wiphy.txq_quantum = old_txq_quantum;
- goto out;
+ return result;
}
}
- result = 0;
-
-out:
- wiphy_unlock(&rdev->wiphy);
- return result;
+ return 0;
}
int nl80211_send_chandef(struct sk_buff *msg, const struct cfg80211_chan_def *chandef)
@@ -3822,6 +3948,10 @@ int nl80211_send_chandef(struct sk_buff *msg, const struct cfg80211_chan_def *ch
if (chandef->center_freq2 &&
nla_put_u32(msg, NL80211_ATTR_CENTER_FREQ2, chandef->center_freq2))
return -ENOBUFS;
+ if (chandef->punctured &&
+ nla_put_u32(msg, NL80211_ATTR_PUNCT_BITMAP, chandef->punctured))
+ return -ENOBUFS;
+
return 0;
}
EXPORT_SYMBOL(nl80211_send_chandef);
@@ -3857,7 +3987,8 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
nla_put_u32(msg, NL80211_ATTR_GENERATION,
rdev->devlist_generation ^
(cfg80211_rdev_list_generation << 2)) ||
- nla_put_u8(msg, NL80211_ATTR_4ADDR, wdev->use_4addr))
+ nla_put_u8(msg, NL80211_ATTR_4ADDR, wdev->use_4addr) ||
+ nla_put_u32(msg, NL80211_ATTR_VIF_RADIO_MASK, wdev->radio_mask))
goto nla_put_failure;
if (rdev->ops->get_channel && !wdev->valid_links) {
@@ -3869,10 +4000,10 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
goto nla_put_failure;
}
- if (rdev->ops->get_tx_power) {
+ if (rdev->ops->get_tx_power && !wdev->valid_links) {
int dbm, ret;
- ret = rdev_get_tx_power(rdev, wdev, &dbm);
+ ret = rdev_get_tx_power(rdev, wdev, 0, &dbm);
if (ret == 0 &&
nla_put_u32(msg, NL80211_ATTR_WIPHY_TX_POWER_LEVEL,
DBM_TO_MBM(dbm)))
@@ -3941,6 +4072,15 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
if (ret == 0 && nl80211_send_chandef(msg, &chandef))
goto nla_put_failure;
+ if (rdev->ops->get_tx_power) {
+ int dbm, ret;
+
+ ret = rdev_get_tx_power(rdev, wdev, link_id, &dbm);
+ if (ret == 0 &&
+ nla_put_u32(msg, NL80211_ATTR_WIPHY_TX_POWER_LEVEL,
+ DBM_TO_MBM(dbm)))
+ goto nla_put_failure;
+ }
nla_nest_end(msg, link);
}
@@ -4003,22 +4143,22 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
if_idx = 0;
- wiphy_lock(&rdev->wiphy);
+ guard(wiphy)(&rdev->wiphy);
+
list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
if (if_idx < if_start) {
if_idx++;
continue;
}
+
if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
rdev, wdev,
- NL80211_CMD_NEW_INTERFACE) < 0) {
- wiphy_unlock(&rdev->wiphy);
+ NL80211_CMD_NEW_INTERFACE) < 0)
goto out;
- }
+
if_idx++;
}
- wiphy_unlock(&rdev->wiphy);
if_start = 0;
wp_idx++;
@@ -4060,6 +4200,7 @@ static const struct nla_policy mntr_flags_policy[NL80211_MNTR_FLAG_MAX + 1] = {
[NL80211_MNTR_FLAG_OTHER_BSS] = { .type = NLA_FLAG },
[NL80211_MNTR_FLAG_COOK_FRAMES] = { .type = NLA_FLAG },
[NL80211_MNTR_FLAG_ACTIVE] = { .type = NLA_FLAG },
+ [NL80211_MNTR_FLAG_SKIP_TX] = { .type = NLA_FLAG },
};
static int parse_monitor_flags(struct nlattr *nla, u32 *mntrflags)
@@ -4079,6 +4220,11 @@ static int parse_monitor_flags(struct nlattr *nla, u32 *mntrflags)
if (flags[flag])
*mntrflags |= (1<<flag);
+ /* cooked monitor mode is incompatible with other modes */
+ if (*mntrflags & MONITOR_FLAG_COOK_FRAMES &&
+ *mntrflags != MONITOR_FLAG_COOK_FRAMES)
+ return -EOPNOTSUPP;
+
*mntrflags |= MONITOR_FLAG_CHANGED;
return 0;
@@ -4173,6 +4319,29 @@ static int nl80211_valid_4addr(struct cfg80211_registered_device *rdev,
return -EOPNOTSUPP;
}
+static int nl80211_parse_vif_radio_mask(struct genl_info *info,
+ u32 *radio_mask)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct nlattr *attr = info->attrs[NL80211_ATTR_VIF_RADIO_MASK];
+ u32 mask, allowed;
+
+ if (!attr) {
+ *radio_mask = 0;
+ return 0;
+ }
+
+ allowed = BIT(rdev->wiphy.n_radio) - 1;
+ mask = nla_get_u32(attr);
+ if (mask & ~allowed)
+ return -EINVAL;
+ if (!mask)
+ mask = allowed;
+ *radio_mask = mask;
+
+ return 1;
+}
+
static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -4180,6 +4349,8 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
int err;
enum nl80211_iftype otype, ntype;
struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ u32 radio_mask = 0;
bool change = false;
memset(&params, 0, sizeof(params));
@@ -4193,8 +4364,6 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
}
if (info->attrs[NL80211_ATTR_MESH_ID]) {
- struct wireless_dev *wdev = dev->ieee80211_ptr;
-
if (ntype != NL80211_IFTYPE_MESH_POINT)
return -EINVAL;
if (otype != NL80211_IFTYPE_MESH_POINT)
@@ -4202,8 +4371,6 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
if (netif_running(dev))
return -EBUSY;
- BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN !=
- IEEE80211_MAX_MESH_ID_LEN);
wdev->u.mesh.id_up_len =
nla_len(info->attrs[NL80211_ATTR_MESH_ID]);
memcpy(wdev->u.mesh.id,
@@ -4227,6 +4394,12 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
if (err > 0)
change = true;
+ err = nl80211_parse_vif_radio_mask(info, &radio_mask);
+ if (err < 0)
+ return err;
+ if (err && netif_running(dev))
+ return -EBUSY;
+
if (change)
err = cfg80211_change_iface(rdev, dev, ntype, &params);
else
@@ -4235,11 +4408,11 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
if (!err && params.use_4addr != -1)
dev->ieee80211_ptr->use_4addr = params.use_4addr;
- if (change && !err) {
- struct wireless_dev *wdev = dev->ieee80211_ptr;
+ if (radio_mask)
+ wdev->radio_mask = radio_mask;
+ if (change && !err)
nl80211_notify_iface(rdev, wdev, NL80211_CMD_SET_INTERFACE);
- }
return err;
}
@@ -4250,6 +4423,7 @@ static int _nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
struct vif_params params;
struct wireless_dev *wdev;
struct sk_buff *msg;
+ u32 radio_mask;
int err;
enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED;
@@ -4287,6 +4461,10 @@ static int _nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
if (err < 0)
return err;
+ err = nl80211_parse_vif_radio_mask(info, &radio_mask);
+ if (err < 0)
+ return err;
+
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
@@ -4309,8 +4487,6 @@ static int _nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
case NL80211_IFTYPE_MESH_POINT:
if (!info->attrs[NL80211_ATTR_MESH_ID])
break;
- BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN !=
- IEEE80211_MAX_MESH_ID_LEN);
wdev->u.mesh.id_up_len =
nla_len(info->attrs[NL80211_ATTR_MESH_ID]);
memcpy(wdev->u.mesh.id,
@@ -4330,6 +4506,9 @@ static int _nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
break;
}
+ if (radio_mask)
+ wdev->radio_mask = radio_mask;
+
if (nl80211_send_iface(msg, info->snd_portid, info->snd_seq, 0,
rdev, wdev, NL80211_CMD_NEW_INTERFACE) < 0) {
nlmsg_free(msg);
@@ -4342,16 +4521,13 @@ static int _nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
- int ret;
/* to avoid failing a new interface creation due to pending removal */
cfg80211_destroy_ifaces(rdev);
- wiphy_lock(&rdev->wiphy);
- ret = _nl80211_new_interface(skb, info);
- wiphy_unlock(&rdev->wiphy);
+ guard(wiphy)(&rdev->wiphy);
- return ret;
+ return _nl80211_new_interface(skb, info);
}
static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info)
@@ -4450,10 +4626,7 @@ static void get_key_callback(void *c, struct key_params *params)
struct nlattr *key;
struct get_key_cookie *cookie = c;
- if ((params->key &&
- nla_put(cookie->msg, NL80211_ATTR_KEY_DATA,
- params->key_len, params->key)) ||
- (params->seq &&
+ if ((params->seq &&
nla_put(cookie->msg, NL80211_ATTR_KEY_SEQ,
params->seq_len, params->seq)) ||
(params->cipher &&
@@ -4465,10 +4638,7 @@ static void get_key_callback(void *c, struct key_params *params)
if (!key)
goto nla_put_failure;
- if ((params->key &&
- nla_put(cookie->msg, NL80211_KEY_DATA,
- params->key_len, params->key)) ||
- (params->seq &&
+ if ((params->seq &&
nla_put(cookie->msg, NL80211_KEY_SEQ,
params->seq_len, params->seq)) ||
(params->cipher &&
@@ -5923,6 +6093,7 @@ static int nl80211_validate_ap_phy_operation(struct cfg80211_ap_settings *params
static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct cfg80211_beaconing_check_config beacon_check = {};
unsigned int link_id = nl80211_link_id(info->attrs);
struct net_device *dev = info->user_ptr[1];
struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -5936,6 +6107,9 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
if (!rdev->ops->start_ap)
return -EOPNOTSUPP;
+ if (wdev->links[link_id].cac_started)
+ return -EBUSY;
+
if (wdev->links[link_id].ap.beacon_interval)
return -EALREADY;
@@ -5945,6 +6119,10 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
!info->attrs[NL80211_ATTR_BEACON_HEAD])
return -EINVAL;
+ if (info->attrs[NL80211_ATTR_SMPS_MODE] &&
+ nla_get_u8(info->attrs[NL80211_ATTR_SMPS_MODE]) != NL80211_SMPS_OFF)
+ return -EOPNOTSUPP;
+
params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params)
return -ENOMEM;
@@ -6069,16 +6247,13 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- if (info->attrs[NL80211_ATTR_PUNCT_BITMAP]) {
- err = nl80211_parse_punct_bitmap(rdev, info,
- &params->chandef,
- &params->punct_bitmap);
- if (err)
- goto out;
- }
-
- if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &params->chandef,
- wdev->iftype)) {
+ beacon_check.iftype = wdev->iftype;
+ beacon_check.relax = true;
+ beacon_check.reg_power =
+ cfg80211_get_6ghz_power_type(params->beacon.tail,
+ params->beacon.tail_len);
+ if (!cfg80211_reg_check_beaconing(&rdev->wiphy, &params->chandef,
+ &beacon_check)) {
err = -EINVAL;
goto out;
}
@@ -6097,34 +6272,6 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- if (info->attrs[NL80211_ATTR_SMPS_MODE]) {
- params->smps_mode =
- nla_get_u8(info->attrs[NL80211_ATTR_SMPS_MODE]);
- switch (params->smps_mode) {
- case NL80211_SMPS_OFF:
- break;
- case NL80211_SMPS_STATIC:
- if (!(rdev->wiphy.features &
- NL80211_FEATURE_STATIC_SMPS)) {
- err = -EINVAL;
- goto out;
- }
- break;
- case NL80211_SMPS_DYNAMIC:
- if (!(rdev->wiphy.features &
- NL80211_FEATURE_DYNAMIC_SMPS)) {
- err = -EINVAL;
- goto out;
- }
- break;
- default:
- err = -EINVAL;
- goto out;
- }
- } else {
- params->smps_mode = NL80211_SMPS_OFF;
- }
-
params->pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
if (params->pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ]) {
err = -EOPNOTSUPP;
@@ -6235,6 +6382,7 @@ out:
static int nl80211_set_beacon(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct cfg80211_beaconing_check_config beacon_check = {};
unsigned int link_id = nl80211_link_id(info->attrs);
struct net_device *dev = info->user_ptr[1];
struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -6261,6 +6409,19 @@ static int nl80211_set_beacon(struct sk_buff *skb, struct genl_info *info)
if (err)
goto out;
+ /* recheck beaconing is permitted with possibly changed power type */
+ beacon_check.iftype = wdev->iftype;
+ beacon_check.relax = true;
+ beacon_check.reg_power =
+ cfg80211_get_6ghz_power_type(params->beacon.tail,
+ params->beacon.tail_len);
+ if (!cfg80211_reg_check_beaconing(&rdev->wiphy,
+ &wdev->links[link_id].ap.chandef,
+ &beacon_check)) {
+ err = -EINVAL;
+ goto out;
+ }
+
attr = info->attrs[NL80211_ATTR_FILS_DISCOVERY];
if (attr) {
err = nl80211_parse_fils_discovery(rdev, attr,
@@ -6876,7 +7037,7 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
return -EINVAL;
/* When you run into this, adjust the code below for the new flag */
- BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7);
+ BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 8);
switch (statype) {
case CFG80211_STA_MESH_PEER_KERNEL:
@@ -6936,6 +7097,8 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
params->link_sta_params.he_capa ||
params->link_sta_params.eht_capa)
return -EINVAL;
+ if (params->sta_flags_mask & BIT(NL80211_STA_FLAG_SPP_AMSDU))
+ return -EINVAL;
}
if (statype != CFG80211_STA_AP_CLIENT &&
@@ -6959,7 +7122,8 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
BIT(NL80211_STA_FLAG_ASSOCIATED) |
BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) |
BIT(NL80211_STA_FLAG_WME) |
- BIT(NL80211_STA_FLAG_MFP)))
+ BIT(NL80211_STA_FLAG_MFP) |
+ BIT(NL80211_STA_FLAG_SPP_AMSDU)))
return -EINVAL;
/* but authenticated/associated only if driver handles it */
@@ -7518,7 +7682,7 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
/* When you run into this, adjust the code below for the new flag */
- BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7);
+ BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 8);
switch (dev->ieee80211_ptr->iftype) {
case NL80211_IFTYPE_AP:
@@ -7542,6 +7706,11 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
params.sta_flags_mask & auth_assoc)
return -EINVAL;
+ if (!wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT) &&
+ params.sta_flags_mask & BIT(NL80211_STA_FLAG_SPP_AMSDU))
+ return -EINVAL;
+
/* Older userspace, or userspace wanting to be compatible with
* !NL80211_FEATURE_FULL_AP_CLIENT_STATE, will not set the auth
* and assoc flags in the mask, but assumes the station will be
@@ -7630,14 +7799,16 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
struct station_del_parameters params;
+ int link_id = nl80211_link_id_or_invalid(info->attrs);
memset(&params, 0, sizeof(params));
if (info->attrs[NL80211_ATTR_MAC])
params.mac = nla_data(info->attrs[NL80211_ATTR_MAC]);
- switch (dev->ieee80211_ptr->iftype) {
+ switch (wdev->iftype) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_MESH_POINT:
@@ -7678,6 +7849,17 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
params.reason_code = WLAN_REASON_PREV_AUTH_NOT_VALID;
}
+ /* Link ID not expected in case of non-ML operation */
+ if (!wdev->valid_links && link_id != -1)
+ return -EINVAL;
+
+ /* If given, a valid link ID should be passed during MLO */
+ if (wdev->valid_links && link_id >= 0 &&
+ !(wdev->valid_links & BIT(link_id)))
+ return -EINVAL;
+
+ params.link_id = link_id;
+
return rdev_del_station(rdev, dev, &params);
}
@@ -8079,11 +8261,9 @@ static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
if (unlikely(!rcu_access_pointer(cfg80211_regdomain)))
return -EINPROGRESS;
- if (info->attrs[NL80211_ATTR_USER_REG_HINT_TYPE])
- user_reg_hint_type =
- nla_get_u32(info->attrs[NL80211_ATTR_USER_REG_HINT_TYPE]);
- else
- user_reg_hint_type = NL80211_USER_REG_HINT_USER;
+ user_reg_hint_type =
+ nla_get_u32_default(info->attrs[NL80211_ATTR_USER_REG_HINT_TYPE],
+ NL80211_USER_REG_HINT_USER);
switch (user_reg_hint_type) {
case NL80211_USER_REG_HINT_USER:
@@ -8102,7 +8282,8 @@ static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
is_indoor = true;
}
- return regulatory_hint_indoor(is_indoor, owner_nlportid);
+ regulatory_hint_indoor(is_indoor, owner_nlportid);
+ return 0;
default:
return -EINVAL;
}
@@ -9014,6 +9195,9 @@ static bool cfg80211_off_channel_oper_allowed(struct wireless_dev *wdev,
lockdep_assert_wiphy(wdev->wiphy);
+ if (!cfg80211_wdev_channel_allowed(wdev, chan))
+ return false;
+
if (!cfg80211_beaconing_iface_active(wdev))
return true;
@@ -9148,6 +9332,7 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
struct wiphy *wiphy;
int err, tmp, n_ssids = 0, n_channels, i;
size_t ie_len, size;
+ size_t ssids_offset, ie_offset;
wiphy = &rdev->wiphy;
@@ -9193,21 +9378,20 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
size = struct_size(request, channels, n_channels);
+ ssids_offset = size;
size = size_add(size, array_size(sizeof(*request->ssids), n_ssids));
+ ie_offset = size;
size = size_add(size, ie_len);
request = kzalloc(size, GFP_KERNEL);
if (!request)
return -ENOMEM;
+ request->n_channels = n_channels;
if (n_ssids)
- request->ssids = (void *)&request->channels[n_channels];
+ request->ssids = (void *)request + ssids_offset;
request->n_ssids = n_ssids;
- if (ie_len) {
- if (n_ssids)
- request->ie = (void *)(request->ssids + n_ssids);
- else
- request->ie = (void *)(request->channels + n_channels);
- }
+ if (ie_len)
+ request->ie = (void *)request + ie_offset;
i = 0;
if (scan_freqs) {
@@ -9226,7 +9410,8 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
}
/* ignore disabled channels */
- if (chan->flags & IEEE80211_CHAN_DISABLED)
+ if (chan->flags & IEEE80211_CHAN_DISABLED ||
+ !cfg80211_wdev_channel_allowed(wdev, chan))
continue;
request->channels[i] = chan;
@@ -9246,7 +9431,8 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
chan = &wiphy->bands[band]->channels[j];
- if (chan->flags & IEEE80211_CHAN_DISABLED)
+ if (chan->flags & IEEE80211_CHAN_DISABLED ||
+ !cfg80211_wdev_channel_allowed(wdev, chan))
continue;
request->channels[i] = chan;
@@ -9483,41 +9669,6 @@ nl80211_parse_sched_scan_plans(struct wiphy *wiphy, int n_plans,
return 0;
}
-static int
-nl80211_parse_sched_scan_per_band_rssi(struct wiphy *wiphy,
- struct cfg80211_match_set *match_sets,
- struct nlattr *tb_band_rssi,
- s32 rssi_thold)
-{
- struct nlattr *attr;
- int i, tmp, ret = 0;
-
- if (!wiphy_ext_feature_isset(wiphy,
- NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD)) {
- if (tb_band_rssi)
- ret = -EOPNOTSUPP;
- else
- for (i = 0; i < NUM_NL80211_BANDS; i++)
- match_sets->per_band_rssi_thold[i] =
- NL80211_SCAN_RSSI_THOLD_OFF;
- return ret;
- }
-
- for (i = 0; i < NUM_NL80211_BANDS; i++)
- match_sets->per_band_rssi_thold[i] = rssi_thold;
-
- nla_for_each_nested(attr, tb_band_rssi, tmp) {
- enum nl80211_band band = nla_type(attr);
-
- if (band < 0 || band >= NUM_NL80211_BANDS)
- return -EINVAL;
-
- match_sets->per_band_rssi_thold[band] = nla_get_s32(attr);
- }
-
- return 0;
-}
-
static struct cfg80211_sched_scan_request *
nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
struct nlattr **attrs, int max_match_sets)
@@ -9645,9 +9796,11 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
request = kzalloc(size, GFP_KERNEL);
if (!request)
return ERR_PTR(-ENOMEM);
+ request->n_channels = n_channels;
if (n_ssids)
- request->ssids = (void *)&request->channels[n_channels];
+ request->ssids = (void *)request +
+ struct_size(request, channels, n_channels);
request->n_ssids = n_ssids;
if (ie_len) {
if (n_ssids)
@@ -9792,15 +9945,6 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
if (rssi)
request->match_sets[i].rssi_thold =
nla_get_s32(rssi);
-
- /* Parse per band RSSI attribute */
- err = nl80211_parse_sched_scan_per_band_rssi(wiphy,
- &request->match_sets[i],
- tb[NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI],
- request->match_sets[i].rssi_thold);
- if (err)
- goto out_free;
-
i++;
}
@@ -9950,80 +10094,100 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int link_id = nl80211_link_id(info->attrs);
struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_chan_def chandef;
enum nl80211_dfs_regions dfs_region;
unsigned int cac_time_ms;
- int err = -EINVAL;
+ int err;
flush_delayed_work(&rdev->dfs_update_channels_wk);
- wiphy_lock(wiphy);
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_ADHOC:
+ break;
+ default:
+ /* caution - see cfg80211_beaconing_iface_active() below */
+ return -EINVAL;
+ }
+
+ guard(wiphy)(wiphy);
dfs_region = reg_get_dfs_region(wiphy);
if (dfs_region == NL80211_DFS_UNSET)
- goto unlock;
+ return -EINVAL;
err = nl80211_parse_chandef(rdev, info, &chandef);
if (err)
- goto unlock;
+ return err;
err = cfg80211_chandef_dfs_required(wiphy, &chandef, wdev->iftype);
if (err < 0)
- goto unlock;
+ return err;
- if (err == 0) {
- err = -EINVAL;
- goto unlock;
- }
+ if (err == 0)
+ return -EINVAL;
- if (!cfg80211_chandef_dfs_usable(wiphy, &chandef)) {
- err = -EINVAL;
- goto unlock;
- }
+ if (!cfg80211_chandef_dfs_usable(wiphy, &chandef))
+ return -EINVAL;
- if (nla_get_flag(info->attrs[NL80211_ATTR_RADAR_BACKGROUND])) {
- err = cfg80211_start_background_radar_detection(rdev, wdev,
- &chandef);
- goto unlock;
- }
+ if (nla_get_flag(info->attrs[NL80211_ATTR_RADAR_BACKGROUND]))
+ return cfg80211_start_background_radar_detection(rdev, wdev,
+ &chandef);
- if (netif_carrier_ok(dev)) {
- err = -EBUSY;
- goto unlock;
+ if (cfg80211_beaconing_iface_active(wdev)) {
+ /* During MLO other link(s) can beacon, only the current link
+ * can not already beacon
+ */
+ if (wdev->valid_links &&
+ !wdev->links[link_id].ap.beacon_interval) {
+ /* nothing */
+ } else {
+ return -EBUSY;
+ }
}
- if (wdev->cac_started) {
- err = -EBUSY;
- goto unlock;
- }
+ if (wdev->links[link_id].cac_started)
+ return -EBUSY;
/* CAC start is offloaded to HW and can't be started manually */
- if (wiphy_ext_feature_isset(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD)) {
- err = -EOPNOTSUPP;
- goto unlock;
- }
+ if (wiphy_ext_feature_isset(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD))
+ return -EOPNOTSUPP;
- if (!rdev->ops->start_radar_detection) {
- err = -EOPNOTSUPP;
- goto unlock;
- }
+ if (!rdev->ops->start_radar_detection)
+ return -EOPNOTSUPP;
cac_time_ms = cfg80211_chandef_dfs_cac_time(&rdev->wiphy, &chandef);
if (WARN_ON(!cac_time_ms))
cac_time_ms = IEEE80211_DFS_MIN_CAC_TIME_MS;
- err = rdev_start_radar_detection(rdev, dev, &chandef, cac_time_ms);
- if (!err) {
+ err = rdev_start_radar_detection(rdev, dev, &chandef, cac_time_ms,
+ link_id);
+ if (err)
+ return err;
+
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
wdev->links[0].ap.chandef = chandef;
- wdev->cac_started = true;
- wdev->cac_start_time = jiffies;
- wdev->cac_time_ms = cac_time_ms;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ wdev->u.ibss.chandef = chandef;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ wdev->u.mesh.chandef = chandef;
+ break;
+ default:
+ break;
}
-unlock:
- wiphy_unlock(wiphy);
+ wdev->links[link_id].cac_started = true;
+ wdev->links[link_id].cac_start_time = jiffies;
+ wdev->links[link_id].cac_time_ms = cac_time_ms;
- return err;
+ return 0;
}
static int nl80211_notify_radar_detection(struct sk_buff *skb,
@@ -10080,6 +10244,42 @@ static int nl80211_notify_radar_detection(struct sk_buff *skb,
return 0;
}
+static int nl80211_parse_counter_offsets(struct cfg80211_registered_device *rdev,
+ const u8 *data, size_t datalen,
+ int first_count, struct nlattr *attr,
+ const u16 **offsets, unsigned int *n_offsets)
+{
+ int i;
+
+ *n_offsets = 0;
+
+ if (!attr)
+ return 0;
+
+ if (!nla_len(attr) || (nla_len(attr) % sizeof(u16)))
+ return -EINVAL;
+
+ *n_offsets = nla_len(attr) / sizeof(u16);
+ if (rdev->wiphy.max_num_csa_counters &&
+ (*n_offsets > rdev->wiphy.max_num_csa_counters))
+ return -EINVAL;
+
+ *offsets = nla_data(attr);
+
+ /* sanity checks - counters should fit and be the same */
+ for (i = 0; i < *n_offsets; i++) {
+ u16 offset = (*offsets)[i];
+
+ if (offset >= datalen)
+ return -EINVAL;
+
+ if (first_count != -1 && data[offset] != first_count)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -10091,7 +10291,6 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
int err;
bool need_new_beacon = false;
bool need_handle_dfs_flag = true;
- int len, i;
u32 cs_count;
if (!rdev->ops->channel_switch ||
@@ -10176,72 +10375,23 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
goto free;
}
- len = nla_len(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_BEACON]);
- if (!len || (len % sizeof(u16))) {
- err = -EINVAL;
+ err = nl80211_parse_counter_offsets(rdev, params.beacon_csa.tail,
+ params.beacon_csa.tail_len,
+ params.count,
+ csa_attrs[NL80211_ATTR_CNTDWN_OFFS_BEACON],
+ &params.counter_offsets_beacon,
+ &params.n_counter_offsets_beacon);
+ if (err)
goto free;
- }
- params.n_counter_offsets_beacon = len / sizeof(u16);
- if (rdev->wiphy.max_num_csa_counters &&
- (params.n_counter_offsets_beacon >
- rdev->wiphy.max_num_csa_counters)) {
- err = -EINVAL;
+ err = nl80211_parse_counter_offsets(rdev, params.beacon_csa.probe_resp,
+ params.beacon_csa.probe_resp_len,
+ params.count,
+ csa_attrs[NL80211_ATTR_CNTDWN_OFFS_PRESP],
+ &params.counter_offsets_presp,
+ &params.n_counter_offsets_presp);
+ if (err)
goto free;
- }
-
- params.counter_offsets_beacon =
- nla_data(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_BEACON]);
-
- /* sanity checks - counters should fit and be the same */
- for (i = 0; i < params.n_counter_offsets_beacon; i++) {
- u16 offset = params.counter_offsets_beacon[i];
-
- if (offset >= params.beacon_csa.tail_len) {
- err = -EINVAL;
- goto free;
- }
-
- if (params.beacon_csa.tail[offset] != params.count) {
- err = -EINVAL;
- goto free;
- }
- }
-
- if (csa_attrs[NL80211_ATTR_CNTDWN_OFFS_PRESP]) {
- len = nla_len(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_PRESP]);
- if (!len || (len % sizeof(u16))) {
- err = -EINVAL;
- goto free;
- }
-
- params.n_counter_offsets_presp = len / sizeof(u16);
- if (rdev->wiphy.max_num_csa_counters &&
- (params.n_counter_offsets_presp >
- rdev->wiphy.max_num_csa_counters)) {
- err = -EINVAL;
- goto free;
- }
-
- params.counter_offsets_presp =
- nla_data(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_PRESP]);
-
- /* sanity checks - counters should fit and be the same */
- for (i = 0; i < params.n_counter_offsets_presp; i++) {
- u16 offset = params.counter_offsets_presp[i];
-
- if (offset >= params.beacon_csa.probe_resp_len) {
- err = -EINVAL;
- goto free;
- }
-
- if (params.beacon_csa.probe_resp[offset] !=
- params.count) {
- err = -EINVAL;
- goto free;
- }
- }
- }
skip_beacons:
err = nl80211_parse_chandef(rdev, info, &params.chandef);
@@ -10272,14 +10422,7 @@ skip_beacons:
if (info->attrs[NL80211_ATTR_CH_SWITCH_BLOCK_TX])
params.block_tx = true;
- if (info->attrs[NL80211_ATTR_PUNCT_BITMAP]) {
- err = nl80211_parse_punct_bitmap(rdev, info,
- &params.chandef,
- &params.punct_bitmap);
- if (err)
- goto free;
- }
-
+ params.link_id = link_id;
err = rdev_channel_switch(rdev, dev, &params);
free:
@@ -10387,17 +10530,21 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
NL80211_BSS_CHAIN_SIGNAL))
goto nla_put_failure;
- switch (rdev->wiphy.signal_type) {
- case CFG80211_SIGNAL_TYPE_MBM:
- if (nla_put_u32(msg, NL80211_BSS_SIGNAL_MBM, res->signal))
- goto nla_put_failure;
- break;
- case CFG80211_SIGNAL_TYPE_UNSPEC:
- if (nla_put_u8(msg, NL80211_BSS_SIGNAL_UNSPEC, res->signal))
- goto nla_put_failure;
- break;
- default:
- break;
+ if (intbss->bss_source != BSS_SOURCE_STA_PROFILE) {
+ switch (rdev->wiphy.signal_type) {
+ case CFG80211_SIGNAL_TYPE_MBM:
+ if (nla_put_u32(msg, NL80211_BSS_SIGNAL_MBM,
+ res->signal))
+ goto nla_put_failure;
+ break;
+ case CFG80211_SIGNAL_TYPE_UNSPEC:
+ if (nla_put_u8(msg, NL80211_BSS_SIGNAL_UNSPEC,
+ res->signal))
+ goto nla_put_failure;
+ break;
+ default:
+ break;
+ }
}
switch (wdev->iftype) {
@@ -10652,13 +10799,6 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb)
return res;
}
-static bool nl80211_valid_wpa_versions(u32 wpa_versions)
-{
- return !(wpa_versions & ~(NL80211_WPA_VERSION_1 |
- NL80211_WPA_VERSION_2 |
- NL80211_WPA_VERSION_3));
-}
-
static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -10744,6 +10884,13 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
}
+ if (info->attrs[NL80211_ATTR_SUPPORTED_SELECTORS]) {
+ req.supported_selectors =
+ nla_data(info->attrs[NL80211_ATTR_SUPPORTED_SELECTORS]);
+ req.supported_selectors_len =
+ nla_len(info->attrs[NL80211_ATTR_SUPPORTED_SELECTORS]);
+ }
+
auth_type = nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]);
if (!nl80211_valid_auth_type(rdev, auth_type, NL80211_CMD_AUTHENTICATE))
return -EINVAL;
@@ -10884,12 +11031,9 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
return -EINVAL;
}
- if (info->attrs[NL80211_ATTR_WPA_VERSIONS]) {
+ if (info->attrs[NL80211_ATTR_WPA_VERSIONS])
settings->wpa_versions =
nla_get_u32(info->attrs[NL80211_ATTR_WPA_VERSIONS]);
- if (!nl80211_valid_wpa_versions(settings->wpa_versions))
- return -EINVAL;
- }
if (info->attrs[NL80211_ATTR_AKM_SUITES]) {
void *data;
@@ -10931,11 +11075,9 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
nla_len(info->attrs[NL80211_ATTR_SAE_PASSWORD]);
}
- if (info->attrs[NL80211_ATTR_SAE_PWE])
- settings->sae_pwe =
- nla_get_u8(info->attrs[NL80211_ATTR_SAE_PWE]);
- else
- settings->sae_pwe = NL80211_SAE_PWE_UNSPECIFIED;
+ settings->sae_pwe =
+ nla_get_u8_default(info->attrs[NL80211_ATTR_SAE_PWE],
+ NL80211_SAE_PWE_UNSPECIFIED);
return 0;
}
@@ -10979,12 +11121,84 @@ static struct cfg80211_bss *nl80211_assoc_bss(struct cfg80211_registered_device
return bss;
}
+static int nl80211_process_links(struct cfg80211_registered_device *rdev,
+ struct cfg80211_assoc_link *links,
+ int assoc_link_id,
+ const u8 *ssid, int ssid_len,
+ struct genl_info *info)
+{
+ unsigned int attrsize = NUM_NL80211_ATTR * sizeof(struct nlattr *);
+ struct nlattr **attrs __free(kfree) = kzalloc(attrsize, GFP_KERNEL);
+ struct nlattr *link;
+ unsigned int link_id;
+ int rem, err;
+
+ if (!attrs)
+ return -ENOMEM;
+
+ nla_for_each_nested(link, info->attrs[NL80211_ATTR_MLO_LINKS], rem) {
+ memset(attrs, 0, attrsize);
+
+ nla_parse_nested(attrs, NL80211_ATTR_MAX, link, NULL, NULL);
+
+ if (!attrs[NL80211_ATTR_MLO_LINK_ID]) {
+ NL_SET_BAD_ATTR(info->extack, link);
+ return -EINVAL;
+ }
+
+ link_id = nla_get_u8(attrs[NL80211_ATTR_MLO_LINK_ID]);
+ /* cannot use the same link ID again */
+ if (links[link_id].bss) {
+ NL_SET_BAD_ATTR(info->extack, link);
+ return -EINVAL;
+ }
+ links[link_id].bss =
+ nl80211_assoc_bss(rdev, ssid, ssid_len, attrs,
+ assoc_link_id, link_id);
+ if (IS_ERR(links[link_id].bss)) {
+ err = PTR_ERR(links[link_id].bss);
+ links[link_id].bss = NULL;
+ NL_SET_ERR_MSG_ATTR(info->extack, link,
+ "Error fetching BSS for link");
+ return err;
+ }
+
+ if (attrs[NL80211_ATTR_IE]) {
+ links[link_id].elems = nla_data(attrs[NL80211_ATTR_IE]);
+ links[link_id].elems_len =
+ nla_len(attrs[NL80211_ATTR_IE]);
+
+ if (cfg80211_find_elem(WLAN_EID_FRAGMENT,
+ links[link_id].elems,
+ links[link_id].elems_len)) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ attrs[NL80211_ATTR_IE],
+ "cannot deal with fragmentation");
+ return -EINVAL;
+ }
+
+ if (cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
+ links[link_id].elems,
+ links[link_id].elems_len)) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ attrs[NL80211_ATTR_IE],
+ "cannot deal with non-inheritance");
+ return -EINVAL;
+ }
+ }
+
+ links[link_id].disabled =
+ nla_get_flag(attrs[NL80211_ATTR_MLO_LINK_DISABLED]);
+ }
+
+ return 0;
+}
+
static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct cfg80211_assoc_request req = {};
- struct nlattr **attrs = NULL;
const u8 *ap_addr, *ssid;
unsigned int link_id;
int err, ssid_len;
@@ -11031,6 +11245,13 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_PREV_BSSID])
req.prev_bssid = nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]);
+ if (info->attrs[NL80211_ATTR_SUPPORTED_SELECTORS]) {
+ req.supported_selectors =
+ nla_data(info->attrs[NL80211_ATTR_SUPPORTED_SELECTORS]);
+ req.supported_selectors_len =
+ nla_len(info->attrs[NL80211_ATTR_SUPPORTED_SELECTORS]);
+ }
+
if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HT]))
req.flags |= ASSOC_REQ_DISABLE_HT;
@@ -11104,13 +11325,18 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
sizeof(req.s1g_capa));
}
+ if (nla_get_flag(info->attrs[NL80211_ATTR_ASSOC_SPP_AMSDU])) {
+ if (!wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT)) {
+ GENL_SET_ERR_MSG(info, "SPP A-MSDUs not supported");
+ return -EINVAL;
+ }
+ req.flags |= ASSOC_REQ_SPP_AMSDU;
+ }
+
req.link_id = nl80211_link_id_or_invalid(info->attrs);
if (info->attrs[NL80211_ATTR_MLO_LINKS]) {
- unsigned int attrsize = NUM_NL80211_ATTR * sizeof(*attrs);
- struct nlattr *link;
- int rem = 0;
-
if (req.link_id < 0)
return -EINVAL;
@@ -11125,72 +11351,10 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
req.ap_mld_addr = nla_data(info->attrs[NL80211_ATTR_MLD_ADDR]);
ap_addr = req.ap_mld_addr;
- attrs = kzalloc(attrsize, GFP_KERNEL);
- if (!attrs)
- return -ENOMEM;
-
- nla_for_each_nested(link,
- info->attrs[NL80211_ATTR_MLO_LINKS],
- rem) {
- memset(attrs, 0, attrsize);
-
- nla_parse_nested(attrs, NL80211_ATTR_MAX,
- link, NULL, NULL);
-
- if (!attrs[NL80211_ATTR_MLO_LINK_ID]) {
- err = -EINVAL;
- NL_SET_BAD_ATTR(info->extack, link);
- goto free;
- }
-
- link_id = nla_get_u8(attrs[NL80211_ATTR_MLO_LINK_ID]);
- /* cannot use the same link ID again */
- if (req.links[link_id].bss) {
- err = -EINVAL;
- NL_SET_BAD_ATTR(info->extack, link);
- goto free;
- }
- req.links[link_id].bss =
- nl80211_assoc_bss(rdev, ssid, ssid_len, attrs,
- req.link_id, link_id);
- if (IS_ERR(req.links[link_id].bss)) {
- err = PTR_ERR(req.links[link_id].bss);
- req.links[link_id].bss = NULL;
- NL_SET_ERR_MSG_ATTR(info->extack,
- link, "Error fetching BSS for link");
- goto free;
- }
-
- if (attrs[NL80211_ATTR_IE]) {
- req.links[link_id].elems =
- nla_data(attrs[NL80211_ATTR_IE]);
- req.links[link_id].elems_len =
- nla_len(attrs[NL80211_ATTR_IE]);
-
- if (cfg80211_find_elem(WLAN_EID_FRAGMENT,
- req.links[link_id].elems,
- req.links[link_id].elems_len)) {
- NL_SET_ERR_MSG_ATTR(info->extack,
- attrs[NL80211_ATTR_IE],
- "cannot deal with fragmentation");
- err = -EINVAL;
- goto free;
- }
-
- if (cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
- req.links[link_id].elems,
- req.links[link_id].elems_len)) {
- NL_SET_ERR_MSG_ATTR(info->extack,
- attrs[NL80211_ATTR_IE],
- "cannot deal with non-inheritance");
- err = -EINVAL;
- goto free;
- }
- }
-
- req.links[link_id].disabled =
- nla_get_flag(attrs[NL80211_ATTR_MLO_LINK_DISABLED]);
- }
+ err = nl80211_process_links(rdev, req.links, req.link_id,
+ ssid, ssid_len, info);
+ if (err)
+ goto free;
if (!req.links[req.link_id].bss) {
err = -EINVAL;
@@ -11210,9 +11374,6 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
err = -EINVAL;
goto free;
}
-
- kfree(attrs);
- attrs = NULL;
} else {
if (req.link_id >= 0)
return -EINVAL;
@@ -11229,7 +11390,8 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
struct nlattr *link;
int rem = 0;
- err = cfg80211_mlme_assoc(rdev, dev, &req);
+ err = cfg80211_mlme_assoc(rdev, dev, &req,
+ info->extack);
if (!err && info->attrs[NL80211_ATTR_SOCKET_OWNER]) {
dev->ieee80211_ptr->conn_owner_nlportid =
@@ -11271,7 +11433,6 @@ free:
for (link_id = 0; link_id < ARRAY_SIZE(req.links); link_id++)
cfg80211_put_bss(&rdev->wiphy, req.links[link_id].bss);
cfg80211_put_bss(&rdev->wiphy, req.bss);
- kfree(attrs);
return err;
}
@@ -12181,10 +12342,8 @@ static int nl80211_disconnect(struct sk_buff *skb, struct genl_info *info)
dev->ieee80211_ptr->conn_owner_nlportid != info->snd_portid)
return -EPERM;
- if (!info->attrs[NL80211_ATTR_REASON_CODE])
- reason = WLAN_REASON_DEAUTH_LEAVING;
- else
- reason = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]);
+ reason = nla_get_u16_default(info->attrs[NL80211_ATTR_REASON_CODE],
+ WLAN_REASON_DEAUTH_LEAVING);
if (reason == 0)
return -EINVAL;
@@ -12306,7 +12465,7 @@ static int nl80211_del_pmksa(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_MAC]) {
pmksa.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
} else if (info->attrs[NL80211_ATTR_SSID]) {
- /* SSID based pmksa flush suppported only for FILS,
+ /* SSID based pmksa flush supported only for FILS,
* OWE/SAE OFFLOAD cases
*/
if (info->attrs[NL80211_ATTR_FILS_CACHE_ID] &&
@@ -12677,23 +12836,12 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
params.buf = nla_data(info->attrs[NL80211_ATTR_FRAME]);
params.len = nla_len(info->attrs[NL80211_ATTR_FRAME]);
- if (info->attrs[NL80211_ATTR_CSA_C_OFFSETS_TX]) {
- int len = nla_len(info->attrs[NL80211_ATTR_CSA_C_OFFSETS_TX]);
- int i;
-
- if (len % sizeof(u16))
- return -EINVAL;
-
- params.n_csa_offsets = len / sizeof(u16);
- params.csa_offsets =
- nla_data(info->attrs[NL80211_ATTR_CSA_C_OFFSETS_TX]);
-
- /* check that all the offsets fit the frame */
- for (i = 0; i < params.n_csa_offsets; i++) {
- if (params.csa_offsets[i] >= params.len)
- return -EINVAL;
- }
- }
+ err = nl80211_parse_counter_offsets(rdev, NULL, params.len, -1,
+ info->attrs[NL80211_ATTR_CSA_C_OFFSETS_TX],
+ &params.csa_offsets,
+ &params.n_csa_offsets);
+ if (err)
+ return err;
if (!params.dont_wait_for_ack) {
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
@@ -13541,10 +13689,7 @@ static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev,
cfg->dst = nla_get_in_addr(tb[NL80211_WOWLAN_TCP_DST_IPV4]);
memcpy(cfg->dst_mac, nla_data(tb[NL80211_WOWLAN_TCP_DST_MAC]),
ETH_ALEN);
- if (tb[NL80211_WOWLAN_TCP_SRC_PORT])
- port = nla_get_u16(tb[NL80211_WOWLAN_TCP_SRC_PORT]);
- else
- port = 0;
+ port = nla_get_u16_default(tb[NL80211_WOWLAN_TCP_SRC_PORT], 0);
#ifdef CONFIG_INET
/* allocate a socket and port for it and use it */
err = __sock_create(wiphy_net(&rdev->wiphy), PF_INET, SOCK_STREAM,
@@ -13755,11 +13900,9 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
pat_len < wowlan->pattern_min_len)
goto error;
- if (!pat_tb[NL80211_PKTPAT_OFFSET])
- pkt_offset = 0;
- else
- pkt_offset = nla_get_u32(
- pat_tb[NL80211_PKTPAT_OFFSET]);
+ pkt_offset =
+ nla_get_u32_default(pat_tb[NL80211_PKTPAT_OFFSET],
+ 0);
if (pkt_offset > wowlan->max_pkt_offset)
goto error;
new_triggers.patterns[i].pkt_offset = pkt_offset;
@@ -13922,9 +14065,8 @@ nla_put_failure:
return -ENOBUFS;
}
-void cfg80211_rdev_free_coalesce(struct cfg80211_registered_device *rdev)
+void cfg80211_free_coalesce(struct cfg80211_coalesce *coalesce)
{
- struct cfg80211_coalesce *coalesce = rdev->coalesce;
int i, j;
struct cfg80211_coalesce_rules *rule;
@@ -13937,9 +14079,7 @@ void cfg80211_rdev_free_coalesce(struct cfg80211_registered_device *rdev)
kfree(rule->patterns[j].mask);
kfree(rule->patterns);
}
- kfree(coalesce->rules);
kfree(coalesce);
- rdev->coalesce = NULL;
}
static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev,
@@ -14006,10 +14146,8 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev,
pat_len < coalesce->pattern_min_len)
return -EINVAL;
- if (!pat_tb[NL80211_PKTPAT_OFFSET])
- pkt_offset = 0;
- else
- pkt_offset = nla_get_u32(pat_tb[NL80211_PKTPAT_OFFSET]);
+ pkt_offset = nla_get_u32_default(pat_tb[NL80211_PKTPAT_OFFSET],
+ 0);
if (pkt_offset > coalesce->max_pkt_offset)
return -EINVAL;
new_rule->patterns[i].pkt_offset = pkt_offset;
@@ -14037,17 +14175,16 @@ static int nl80211_set_coalesce(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
const struct wiphy_coalesce_support *coalesce = rdev->wiphy.coalesce;
- struct cfg80211_coalesce new_coalesce = {};
- struct cfg80211_coalesce *n_coalesce;
- int err, rem_rule, n_rules = 0, i, j;
+ struct cfg80211_coalesce *new_coalesce;
+ int err, rem_rule, n_rules = 0, i;
struct nlattr *rule;
- struct cfg80211_coalesce_rules *tmp_rule;
if (!rdev->wiphy.coalesce || !rdev->ops->set_coalesce)
return -EOPNOTSUPP;
if (!info->attrs[NL80211_ATTR_COALESCE_RULE]) {
- cfg80211_rdev_free_coalesce(rdev);
+ cfg80211_free_coalesce(rdev->coalesce);
+ rdev->coalesce = NULL;
rdev_set_coalesce(rdev, NULL);
return 0;
}
@@ -14058,45 +14195,34 @@ static int nl80211_set_coalesce(struct sk_buff *skb, struct genl_info *info)
if (n_rules > coalesce->n_rules)
return -EINVAL;
- new_coalesce.rules = kcalloc(n_rules, sizeof(new_coalesce.rules[0]),
- GFP_KERNEL);
- if (!new_coalesce.rules)
+ new_coalesce = kzalloc(struct_size(new_coalesce, rules, n_rules),
+ GFP_KERNEL);
+ if (!new_coalesce)
return -ENOMEM;
- new_coalesce.n_rules = n_rules;
+ new_coalesce->n_rules = n_rules;
i = 0;
nla_for_each_nested(rule, info->attrs[NL80211_ATTR_COALESCE_RULE],
rem_rule) {
err = nl80211_parse_coalesce_rule(rdev, rule,
- &new_coalesce.rules[i]);
+ &new_coalesce->rules[i]);
if (err)
goto error;
i++;
}
- err = rdev_set_coalesce(rdev, &new_coalesce);
+ err = rdev_set_coalesce(rdev, new_coalesce);
if (err)
goto error;
- n_coalesce = kmemdup(&new_coalesce, sizeof(new_coalesce), GFP_KERNEL);
- if (!n_coalesce) {
- err = -ENOMEM;
- goto error;
- }
- cfg80211_rdev_free_coalesce(rdev);
- rdev->coalesce = n_coalesce;
+ cfg80211_free_coalesce(rdev->coalesce);
+ rdev->coalesce = new_coalesce;
return 0;
error:
- for (i = 0; i < new_coalesce.n_rules; i++) {
- tmp_rule = &new_coalesce.rules[i];
- for (j = 0; j < tmp_rule->n_patterns; j++)
- kfree(tmp_rule->patterns[j].mask);
- kfree(tmp_rule->patterns);
- }
- kfree(new_coalesce.rules);
+ cfg80211_free_coalesce(new_coalesce);
return err;
}
@@ -15382,7 +15508,7 @@ static int nl80211_add_tx_ts(struct sk_buff *skb, struct genl_info *info)
if (tsid >= IEEE80211_FIRST_TSPEC_TSID) {
/* TODO: handle 802.11 TSPEC/admission control
* need more attributes for that (e.g. BA session requirement);
- * change the WMM adminssion test above to allow both then
+ * change the WMM admission test above to allow both then
*/
return -EINVAL;
}
@@ -16091,6 +16217,7 @@ static int nl80211_color_change(struct sk_buff *skb, struct genl_info *info)
params.counter_offset_presp = offset;
}
+ params.link_id = nl80211_link_id(info->attrs);
err = rdev_color_change(rdev, dev, &params);
out:
@@ -16357,6 +16484,89 @@ nl80211_set_ttlm(struct sk_buff *skb, struct genl_info *info)
return rdev_set_ttlm(rdev, dev, &params);
}
+static int nl80211_assoc_ml_reconf(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_assoc_link links[IEEE80211_MLD_MAX_NUM_LINKS] = {};
+ unsigned int link_id;
+ u16 add_links, rem_links;
+ int err;
+
+ if (!wdev->valid_links)
+ return -EINVAL;
+
+ if (dev->ieee80211_ptr->conn_owner_nlportid &&
+ dev->ieee80211_ptr->conn_owner_nlportid != info->snd_portid)
+ return -EPERM;
+
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT)
+ return -EOPNOTSUPP;
+
+ add_links = 0;
+ if (info->attrs[NL80211_ATTR_MLO_LINKS]) {
+ err = nl80211_process_links(rdev, links,
+ /* mark as MLO, but not assoc */
+ IEEE80211_MLD_MAX_NUM_LINKS,
+ NULL, 0, info);
+ if (err)
+ return err;
+
+ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS;
+ link_id++) {
+ if (!links[link_id].bss)
+ continue;
+ add_links |= BIT(link_id);
+ }
+ }
+
+ if (info->attrs[NL80211_ATTR_MLO_RECONF_REM_LINKS])
+ rem_links =
+ nla_get_u16(info->attrs[NL80211_ATTR_MLO_RECONF_REM_LINKS]);
+ else
+ rem_links = 0;
+
+ /* Validate that existing links are not added, removed links are valid
+ * and don't allow adding and removing the same links
+ */
+ if ((add_links & rem_links) || !(add_links | rem_links) ||
+ (wdev->valid_links & add_links) ||
+ ((wdev->valid_links & rem_links) != rem_links)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = -EOPNOTSUPP;
+
+out:
+ for (link_id = 0; link_id < ARRAY_SIZE(links); link_id++)
+ cfg80211_put_bss(&rdev->wiphy, links[link_id].bss);
+
+ return err;
+}
+
+static int
+nl80211_epcs_cfg(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ bool val;
+
+ if (wdev->iftype != NL80211_IFTYPE_STATION &&
+ wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)
+ return -EOPNOTSUPP;
+
+ if (!wdev->connected)
+ return -ENOLINK;
+
+ val = nla_get_flag(info->attrs[NL80211_ATTR_EPCS]);
+
+ return rdev_set_epcs(rdev, dev, val);
+}
+
#define NL80211_FLAG_NEED_WIPHY 0x01
#define NL80211_FLAG_NEED_NETDEV 0x02
#define NL80211_FLAG_NEED_RTNL 0x04
@@ -16414,10 +16624,10 @@ nl80211_set_ttlm(struct sk_buff *skb, struct genl_info *info)
SELECTOR(__sel, NETDEV_UP_NOTMX, \
NL80211_FLAG_NEED_NETDEV_UP | \
NL80211_FLAG_NO_WIPHY_MTX) \
- SELECTOR(__sel, NETDEV_UP_NOTMX_NOMLO, \
+ SELECTOR(__sel, NETDEV_UP_NOTMX_MLO, \
NL80211_FLAG_NEED_NETDEV_UP | \
NL80211_FLAG_NO_WIPHY_MTX | \
- NL80211_FLAG_MLO_UNSUPPORTED) \
+ NL80211_FLAG_MLO_VALID_LINK_ID) \
SELECTOR(__sel, NETDEV_UP_CLEAR, \
NL80211_FLAG_NEED_NETDEV_UP | \
NL80211_FLAG_CLEAR_SKB) \
@@ -16830,6 +17040,10 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_del_station,
.flags = GENL_UNS_ADMIN_PERM,
+ /* cannot use NL80211_FLAG_MLO_VALID_LINK_ID, depends on
+ * whether MAC address is passed or not. If MAC address is
+ * passed, then even during MLO, link ID is not required.
+ */
.internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
@@ -17308,7 +17522,7 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.flags = GENL_UNS_ADMIN_PERM,
.internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NO_WIPHY_MTX |
- NL80211_FLAG_MLO_UNSUPPORTED),
+ NL80211_FLAG_MLO_VALID_LINK_ID),
},
{
.cmd = NL80211_CMD_GET_PROTOCOL_FEATURES,
@@ -17489,7 +17703,8 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_color_change,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_MLO_VALID_LINK_ID),
},
{
.cmd = NL80211_CMD_SET_FILS_AAD,
@@ -17544,6 +17759,18 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.flags = GENL_UNS_ADMIN_PERM,
.internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
+ {
+ .cmd = NL80211_CMD_ASSOC_MLO_RECONF,
+ .doit = nl80211_assoc_ml_reconf,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
+ },
+ {
+ .cmd = NL80211_CMD_EPCS_CFG,
+ .doit = nl80211_epcs_cfg,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
+ },
};
static struct genl_family nl80211_fam __ro_after_init = {
@@ -17864,10 +18091,8 @@ void nl80211_common_reg_change_event(enum nl80211_commands cmd_id,
genlmsg_end(msg, hdr);
- rcu_read_lock();
genlmsg_multicast_allns(&nl80211_fam, msg, 0,
- NL80211_MCGRP_REGULATORY, GFP_ATOMIC);
- rcu_read_unlock();
+ NL80211_MCGRP_REGULATORY);
return;
@@ -18321,10 +18546,9 @@ void nl80211_send_port_authorized(struct cfg80211_registered_device *rdev,
nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer_addr))
goto nla_put_failure;
- if ((td_bitmap_len > 0) && td_bitmap)
- if (nla_put(msg, NL80211_ATTR_TD_BITMAP,
- td_bitmap_len, td_bitmap))
- goto nla_put_failure;
+ if (td_bitmap_len > 0 && td_bitmap &&
+ nla_put(msg, NL80211_ATTR_TD_BITMAP, td_bitmap_len, td_bitmap))
+ goto nla_put_failure;
genlmsg_end(msg, hdr);
@@ -18442,6 +18666,23 @@ void cfg80211_links_removed(struct net_device *dev, u16 link_mask)
}
EXPORT_SYMBOL(cfg80211_links_removed);
+void nl80211_mlo_reconf_add_done(struct net_device *dev,
+ struct cfg80211_mlo_reconf_done_data *data)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct wiphy *wiphy = wdev->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+ struct nl80211_mlme_event event = {
+ .cmd = NL80211_CMD_ASSOC_MLO_RECONF,
+ .buf = data->buf,
+ .buf_len = data->len,
+ .uapsd_queues = -1,
+ };
+
+ nl80211_send_mlme_event(rdev, dev, &event, GFP_KERNEL);
+}
+EXPORT_SYMBOL(nl80211_mlo_reconf_add_done);
+
void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *bssid,
gfp_t gfp)
@@ -18600,10 +18841,8 @@ void nl80211_send_beacon_hint_event(struct wiphy *wiphy,
genlmsg_end(msg, hdr);
- rcu_read_lock();
genlmsg_multicast_allns(&nl80211_fam, msg, 0,
- NL80211_MCGRP_REGULATORY, GFP_ATOMIC);
- rcu_read_unlock();
+ NL80211_MCGRP_REGULATORY);
return;
@@ -19400,7 +19639,7 @@ static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
struct cfg80211_chan_def *chandef,
gfp_t gfp,
enum nl80211_commands notif,
- u8 count, bool quiet, u16 punct_bitmap)
+ u8 count, bool quiet)
{
struct wireless_dev *wdev = netdev->ieee80211_ptr;
struct sk_buff *msg;
@@ -19434,9 +19673,6 @@ static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
goto nla_put_failure;
}
- if (nla_put_u32(msg, NL80211_ATTR_PUNCT_BITMAP, punct_bitmap))
- goto nla_put_failure;
-
genlmsg_end(msg, hdr);
genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
@@ -19449,7 +19685,7 @@ static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
void cfg80211_ch_switch_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef,
- unsigned int link_id, u16 punct_bitmap)
+ unsigned int link_id)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
@@ -19458,7 +19694,7 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
lockdep_assert_wiphy(wdev->wiphy);
WARN_INVALID_LINK_ID(wdev, link_id);
- trace_cfg80211_ch_switch_notify(dev, chandef, link_id, punct_bitmap);
+ trace_cfg80211_ch_switch_notify(dev, chandef, link_id);
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
@@ -19487,15 +19723,14 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
cfg80211_sched_dfs_chan_update(rdev);
nl80211_ch_switch_notify(rdev, dev, link_id, chandef, GFP_KERNEL,
- NL80211_CMD_CH_SWITCH_NOTIFY, 0, false,
- punct_bitmap);
+ NL80211_CMD_CH_SWITCH_NOTIFY, 0, false);
}
EXPORT_SYMBOL(cfg80211_ch_switch_notify);
void cfg80211_ch_switch_started_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef,
unsigned int link_id, u8 count,
- bool quiet, u16 punct_bitmap)
+ bool quiet)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
@@ -19504,19 +19739,18 @@ void cfg80211_ch_switch_started_notify(struct net_device *dev,
lockdep_assert_wiphy(wdev->wiphy);
WARN_INVALID_LINK_ID(wdev, link_id);
- trace_cfg80211_ch_switch_started_notify(dev, chandef, link_id,
- punct_bitmap);
+ trace_cfg80211_ch_switch_started_notify(dev, chandef, link_id);
nl80211_ch_switch_notify(rdev, dev, link_id, chandef, GFP_KERNEL,
NL80211_CMD_CH_SWITCH_STARTED_NOTIFY,
- count, quiet, punct_bitmap);
+ count, quiet);
}
EXPORT_SYMBOL(cfg80211_ch_switch_started_notify);
int cfg80211_bss_color_notify(struct net_device *dev,
enum nl80211_commands cmd, u8 count,
- u64 color_bitmap)
+ u64 color_bitmap, u8 link_id)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
@@ -19539,6 +19773,10 @@ int cfg80211_bss_color_notify(struct net_device *dev,
if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex))
goto nla_put_failure;
+ if (wdev->valid_links &&
+ nla_put_u8(msg, NL80211_ATTR_MLO_LINK_ID, link_id))
+ goto nla_put_failure;
+
if (cmd == NL80211_CMD_COLOR_CHANGE_STARTED &&
nla_put_u32(msg, NL80211_ATTR_COLOR_CHANGE_COUNT, count))
goto nla_put_failure;
@@ -19887,6 +20125,11 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev,
NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS))
goto free_msg;
+ if (wakeup->unprot_deauth_disassoc &&
+ nla_put_flag(msg,
+ NL80211_WOWLAN_TRIG_UNPROTECTED_DEAUTH_DISASSOC))
+ goto free_msg;
+
if (wakeup->packet) {
u32 pkt_attr = NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211;
u32 len_attr = NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211_LEN;
@@ -20167,9 +20410,26 @@ int cfg80211_external_auth_request(struct net_device *dev,
if (!hdr)
goto nla_put_failure;
+ /* Some historical mistakes in drivers <-> userspace interface (notably
+ * between drivers and wpa_supplicant) led to a big-endian conversion
+ * being needed on NL80211_ATTR_AKM_SUITES _only_ when its value is
+ * WLAN_AKM_SUITE_SAE. This is now fixed on userspace side, but for the
+ * benefit of older wpa_supplicant versions, send this particular value
+ * in big-endian. Note that newer wpa_supplicant will also detect this
+ * particular value in big endian still, so it all continues to work.
+ */
+ if (params->key_mgmt_suite == WLAN_AKM_SUITE_SAE) {
+ if (nla_put_be32(msg, NL80211_ATTR_AKM_SUITES,
+ cpu_to_be32(WLAN_AKM_SUITE_SAE)))
+ goto nla_put_failure;
+ } else {
+ if (nla_put_u32(msg, NL80211_ATTR_AKM_SUITES,
+ params->key_mgmt_suite))
+ goto nla_put_failure;
+ }
+
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
- nla_put_u32(msg, NL80211_ATTR_AKM_SUITES, params->key_mgmt_suite) ||
nla_put_u32(msg, NL80211_ATTR_EXTERNAL_AUTH_ACTION,
params->action) ||
nla_put(msg, NL80211_ATTR_BSSID, ETH_ALEN, params->bssid) ||
@@ -20255,6 +20515,39 @@ void cfg80211_schedule_channels_check(struct wireless_dev *wdev)
}
EXPORT_SYMBOL(cfg80211_schedule_channels_check);
+void cfg80211_epcs_changed(struct net_device *netdev, bool enabled)
+{
+ struct wireless_dev *wdev = netdev->ieee80211_ptr;
+ struct wiphy *wiphy = wdev->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+ struct sk_buff *msg;
+ void *hdr;
+
+ trace_cfg80211_epcs_changed(wdev, enabled);
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_EPCS_CFG);
+ if (!hdr) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ if (enabled && nla_put_flag(msg, NL80211_ATTR_EPCS))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
+ NL80211_MCGRP_MLME, GFP_KERNEL);
+ return;
+
+ nla_put_failure:
+ nlmsg_free(msg);
+}
+EXPORT_SYMBOL(cfg80211_epcs_changed);
+
/* initialisation/exit functions */
int __init nl80211_init(void)
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 6376f3a87f8a..5e25782af1e0 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Portions of this file
- * Copyright (C) 2018, 2020-2022 Intel Corporation
+ * Copyright (C) 2018, 2020-2024 Intel Corporation
*/
#ifndef __NET_WIRELESS_NL80211_H
#define __NET_WIRELESS_NL80211_H
@@ -119,9 +119,12 @@ nl80211_radar_notify(struct cfg80211_registered_device *rdev,
void nl80211_send_ap_stopped(struct wireless_dev *wdev, unsigned int link_id);
-void cfg80211_rdev_free_coalesce(struct cfg80211_registered_device *rdev);
+void cfg80211_free_coalesce(struct cfg80211_coalesce *coalesce);
/* peer measurement */
int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info);
+void nl80211_mlo_reconf_add_done(struct net_device *dev,
+ struct cfg80211_mlo_reconf_done_data *data);
+
#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c
index e106dcea3977..a117f5093ca2 100644
--- a/net/wireless/pmsr.c
+++ b/net/wireless/pmsr.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (C) 2018 - 2021, 2023 Intel Corporation
+ * Copyright (C) 2018 - 2021, 2023 - 2024 Intel Corporation
*/
#include <net/cfg80211.h>
#include "core.h"
@@ -56,7 +56,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
out->ftm.burst_period = 0;
if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD])
out->ftm.burst_period =
- nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]);
+ nla_get_u16(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]);
out->ftm.asap = !!tb[NL80211_PMSR_FTM_REQ_ATTR_ASAP];
if (out->ftm.asap && !capa->ftm.asap) {
@@ -75,7 +75,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
out->ftm.num_bursts_exp = 0;
if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP])
out->ftm.num_bursts_exp =
- nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]);
+ nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]);
if (capa->ftm.max_bursts_exponent >= 0 &&
out->ftm.num_bursts_exp > capa->ftm.max_bursts_exponent) {
@@ -88,7 +88,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
out->ftm.burst_duration = 15;
if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION])
out->ftm.burst_duration =
- nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]);
+ nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]);
out->ftm.ftms_per_burst = 0;
if (tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST])
@@ -107,7 +107,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
out->ftm.ftmr_retries = 3;
if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES])
out->ftm.ftmr_retries =
- nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]);
+ nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]);
out->ftm.request_lci = !!tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI];
if (out->ftm.request_lci && !capa->ftm.request_lci) {
@@ -148,6 +148,14 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
return -EINVAL;
}
+ if (out->ftm.ftms_per_burst > 31 && !out->ftm.non_trigger_based &&
+ !out->ftm.trigger_based) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST],
+ "FTM: FTMs per burst must be set lower than 31");
+ return -ERANGE;
+ }
+
if ((out->ftm.trigger_based || out->ftm.non_trigger_based) &&
out->ftm.preamble != NL80211_PREAMBLE_HE) {
NL_SET_ERR_MSG_ATTR(info->extack,
@@ -622,9 +630,9 @@ void cfg80211_pmsr_free_wk(struct work_struct *work)
struct wireless_dev *wdev = container_of(work, struct wireless_dev,
pmsr_free_wk);
- wiphy_lock(wdev->wiphy);
+ guard(wiphy)(wdev->wiphy);
+
cfg80211_pmsr_process_abort(wdev);
- wiphy_unlock(wdev->wiphy);
}
void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev)
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
index ae2e1a896461..326faea38ca3 100644
--- a/net/wireless/radiotap.c
+++ b/net/wireless/radiotap.c
@@ -18,7 +18,7 @@
#include <linux/export.h>
#include <net/cfg80211.h>
#include <net/ieee80211_radiotap.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
/* function prototypes and related defs are in include/net/cfg80211.h */
@@ -200,7 +200,7 @@ static void find_ns(struct ieee80211_radiotap_iterator *iterator,
* present fields. @this_arg can be changed by the caller (eg,
* incremented to move inside a compound argument like
* IEEE80211_RADIOTAP_CHANNEL). The args pointed to are in
- * little-endian format whatever the endianess of your CPU.
+ * little-endian format whatever the endianness of your CPU.
*
* Alignment Gotcha:
* You must take care when dereferencing iterator.this_arg
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index 43897a5269b6..759da1623342 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -2,7 +2,7 @@
/*
* Portions of this file
* Copyright(c) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2018, 2021-2023 Intel Corporation
+ * Copyright (C) 2018, 2021-2024 Intel Corporation
*/
#ifndef __CFG80211_RDEV_OPS
#define __CFG80211_RDEV_OPS
@@ -445,11 +445,12 @@ rdev_libertas_set_mesh_channel(struct cfg80211_registered_device *rdev,
static inline int
rdev_set_monitor_channel(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
struct cfg80211_chan_def *chandef)
{
int ret;
- trace_rdev_set_monitor_channel(&rdev->wiphy, chandef);
- ret = rdev->ops->set_monitor_channel(&rdev->wiphy, chandef);
+ trace_rdev_set_monitor_channel(&rdev->wiphy, dev, chandef);
+ ret = rdev->ops->set_monitor_channel(&rdev->wiphy, dev, chandef);
trace_rdev_return_int(&rdev->wiphy, ret);
return ret;
}
@@ -458,6 +459,10 @@ static inline int rdev_scan(struct cfg80211_registered_device *rdev,
struct cfg80211_scan_request *request)
{
int ret;
+
+ if (WARN_ON_ONCE(!request->n_ssids && request->ssids))
+ return -EINVAL;
+
trace_rdev_scan(&rdev->wiphy, request);
ret = rdev->ops->scan(&rdev->wiphy, request);
trace_rdev_return_int(&rdev->wiphy, ret);
@@ -574,13 +579,11 @@ static inline int rdev_leave_ibss(struct cfg80211_registered_device *rdev,
static inline int
rdev_set_wiphy_params(struct cfg80211_registered_device *rdev, u32 changed)
{
- int ret;
-
- if (!rdev->ops->set_wiphy_params)
- return -EOPNOTSUPP;
+ int ret = -EOPNOTSUPP;
trace_rdev_set_wiphy_params(&rdev->wiphy, changed);
- ret = rdev->ops->set_wiphy_params(&rdev->wiphy, changed);
+ if (rdev->ops->set_wiphy_params)
+ ret = rdev->ops->set_wiphy_params(&rdev->wiphy, changed);
trace_rdev_return_int(&rdev->wiphy, ret);
return ret;
}
@@ -597,11 +600,12 @@ static inline int rdev_set_tx_power(struct cfg80211_registered_device *rdev,
}
static inline int rdev_get_tx_power(struct cfg80211_registered_device *rdev,
- struct wireless_dev *wdev, int *dbm)
+ struct wireless_dev *wdev, unsigned int link_id,
+ int *dbm)
{
int ret;
- trace_rdev_get_tx_power(&rdev->wiphy, wdev);
- ret = rdev->ops->get_tx_power(&rdev->wiphy, wdev, dbm);
+ trace_rdev_get_tx_power(&rdev->wiphy, wdev, link_id);
+ ret = rdev->ops->get_tx_power(&rdev->wiphy, wdev, link_id, dbm);
trace_rdev_return_int_int(&rdev->wiphy, ret, *dbm);
return ret;
}
@@ -1198,26 +1202,27 @@ static inline int
rdev_start_radar_detection(struct cfg80211_registered_device *rdev,
struct net_device *dev,
struct cfg80211_chan_def *chandef,
- u32 cac_time_ms)
+ u32 cac_time_ms, int link_id)
{
int ret = -EOPNOTSUPP;
trace_rdev_start_radar_detection(&rdev->wiphy, dev, chandef,
- cac_time_ms);
+ cac_time_ms, link_id);
if (rdev->ops->start_radar_detection)
ret = rdev->ops->start_radar_detection(&rdev->wiphy, dev,
- chandef, cac_time_ms);
+ chandef, cac_time_ms,
+ link_id);
trace_rdev_return_int(&rdev->wiphy, ret);
return ret;
}
static inline void
rdev_end_cac(struct cfg80211_registered_device *rdev,
- struct net_device *dev)
+ struct net_device *dev, unsigned int link_id)
{
- trace_rdev_end_cac(&rdev->wiphy, dev);
+ trace_rdev_end_cac(&rdev->wiphy, dev, link_id);
if (rdev->ops->end_cac)
- rdev->ops->end_cac(&rdev->wiphy, dev);
+ rdev->ops->end_cac(&rdev->wiphy, dev, link_id);
trace_rdev_return_void(&rdev->wiphy);
}
@@ -1421,13 +1426,11 @@ rdev_set_radar_background(struct cfg80211_registered_device *rdev,
struct cfg80211_chan_def *chandef)
{
struct wiphy *wiphy = &rdev->wiphy;
- int ret;
-
- if (!rdev->ops->set_radar_background)
- return -EOPNOTSUPP;
+ int ret = -EOPNOTSUPP;
trace_rdev_set_radar_background(wiphy, chandef);
- ret = rdev->ops->set_radar_background(wiphy, chandef);
+ if (rdev->ops->set_radar_background)
+ ret = rdev->ops->set_radar_background(wiphy, chandef);
trace_rdev_return_int(wiphy, ret);
return ret;
@@ -1464,13 +1467,11 @@ rdev_add_link_station(struct cfg80211_registered_device *rdev,
struct net_device *dev,
struct link_station_parameters *params)
{
- int ret;
-
- if (!rdev->ops->add_link_station)
- return -EOPNOTSUPP;
+ int ret = -EOPNOTSUPP;
trace_rdev_add_link_station(&rdev->wiphy, dev, params);
- ret = rdev->ops->add_link_station(&rdev->wiphy, dev, params);
+ if (rdev->ops->add_link_station)
+ ret = rdev->ops->add_link_station(&rdev->wiphy, dev, params);
trace_rdev_return_int(&rdev->wiphy, ret);
return ret;
}
@@ -1480,13 +1481,11 @@ rdev_mod_link_station(struct cfg80211_registered_device *rdev,
struct net_device *dev,
struct link_station_parameters *params)
{
- int ret;
-
- if (!rdev->ops->mod_link_station)
- return -EOPNOTSUPP;
+ int ret = -EOPNOTSUPP;
trace_rdev_mod_link_station(&rdev->wiphy, dev, params);
- ret = rdev->ops->mod_link_station(&rdev->wiphy, dev, params);
+ if (rdev->ops->mod_link_station)
+ ret = rdev->ops->mod_link_station(&rdev->wiphy, dev, params);
trace_rdev_return_int(&rdev->wiphy, ret);
return ret;
}
@@ -1496,13 +1495,11 @@ rdev_del_link_station(struct cfg80211_registered_device *rdev,
struct net_device *dev,
struct link_station_del_parameters *params)
{
- int ret;
-
- if (!rdev->ops->del_link_station)
- return -EOPNOTSUPP;
+ int ret = -EOPNOTSUPP;
trace_rdev_del_link_station(&rdev->wiphy, dev, params);
- ret = rdev->ops->del_link_station(&rdev->wiphy, dev, params);
+ if (rdev->ops->del_link_station)
+ ret = rdev->ops->del_link_station(&rdev->wiphy, dev, params);
trace_rdev_return_int(&rdev->wiphy, ret);
return ret;
}
@@ -1513,13 +1510,11 @@ rdev_set_hw_timestamp(struct cfg80211_registered_device *rdev,
struct cfg80211_set_hw_timestamp *hwts)
{
struct wiphy *wiphy = &rdev->wiphy;
- int ret;
-
- if (!rdev->ops->set_hw_timestamp)
- return -EOPNOTSUPP;
+ int ret = -EOPNOTSUPP;
trace_rdev_set_hw_timestamp(wiphy, dev, hwts);
- ret = rdev->ops->set_hw_timestamp(wiphy, dev, hwts);
+ if (rdev->ops->set_hw_timestamp)
+ ret = rdev->ops->set_hw_timestamp(wiphy, dev, hwts);
trace_rdev_return_int(wiphy, ret);
return ret;
@@ -1531,15 +1526,59 @@ rdev_set_ttlm(struct cfg80211_registered_device *rdev,
struct cfg80211_ttlm_params *params)
{
struct wiphy *wiphy = &rdev->wiphy;
- int ret;
-
- if (!rdev->ops->set_ttlm)
- return -EOPNOTSUPP;
+ int ret = -EOPNOTSUPP;
trace_rdev_set_ttlm(wiphy, dev, params);
- ret = rdev->ops->set_ttlm(wiphy, dev, params);
+ if (rdev->ops->set_ttlm)
+ ret = rdev->ops->set_ttlm(wiphy, dev, params);
+ trace_rdev_return_int(wiphy, ret);
+
+ return ret;
+}
+
+static inline u32
+rdev_get_radio_mask(struct cfg80211_registered_device *rdev,
+ struct net_device *dev)
+{
+ struct wiphy *wiphy = &rdev->wiphy;
+
+ if (!rdev->ops->get_radio_mask)
+ return 0;
+
+ return rdev->ops->get_radio_mask(wiphy, dev);
+}
+
+static inline int
+rdev_assoc_ml_reconf(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct cfg80211_assoc_link *add_links,
+ u16 rem_links)
+{
+ struct wiphy *wiphy = &rdev->wiphy;
+ int ret = -EOPNOTSUPP;
+
+ trace_rdev_assoc_ml_reconf(wiphy, dev, add_links, rem_links);
+ if (rdev->ops->assoc_ml_reconf)
+ ret = rdev->ops->assoc_ml_reconf(wiphy, dev, add_links,
+ rem_links);
+ trace_rdev_return_int(wiphy, ret);
+
+ return ret;
+}
+
+static inline int
+rdev_set_epcs(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, bool val)
+{
+ struct wiphy *wiphy = &rdev->wiphy;
+ int ret = -EOPNOTSUPP;
+
+ trace_rdev_set_epcs(wiphy, dev, val);
+ if (rdev->ops->set_epcs)
+ ret = rdev->ops->set_epcs(wiphy, dev, val);
trace_rdev_return_int(wiphy, ret);
return ret;
}
+
#endif /* __CFG80211_RDEV_OPS */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 2741b626919a..212e9561aae7 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -5,7 +5,7 @@
* Copyright 2008-2011 Luis R. Rodriguez <mcgrof@qca.qualcomm.com>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2023 Intel Corporation
+ * Copyright (C) 2018 - 2024 Intel Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -57,6 +57,8 @@
#include <linux/verification.h>
#include <linux/moduleparam.h>
#include <linux/firmware.h>
+#include <linux/units.h>
+
#include <net/cfg80211.h>
#include "core.h"
#include "reg.h"
@@ -405,7 +407,8 @@ static bool is_an_alpha2(const char *alpha2)
{
if (!alpha2)
return false;
- return isalpha(alpha2[0]) && isalpha(alpha2[1]);
+ return isascii(alpha2[0]) && isalpha(alpha2[0]) &&
+ isascii(alpha2[1]) && isalpha(alpha2[1]);
}
static bool alpha2_equal(const char *alpha2_x, const char *alpha2_y)
@@ -1145,7 +1148,7 @@ static const struct ieee80211_regdomain *reg_get_regdomain(struct wiphy *wiphy)
/*
* Follow the driver's regulatory domain, if present, unless a country
- * IE has been processed or a user wants to help complaince further
+ * IE has been processed or a user wants to help compliance further
*/
if (lr->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
lr->initiator != NL80211_REGDOM_SET_BY_USER &&
@@ -1289,20 +1292,17 @@ static bool is_valid_rd(const struct ieee80211_regdomain *rd)
static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range,
u32 freq_khz)
{
-#define ONE_GHZ_IN_KHZ 1000000
/*
* From 802.11ad: directional multi-gigabit (DMG):
* Pertaining to operation in a frequency band containing a channel
* with the Channel starting frequency above 45 GHz.
*/
- u32 limit = freq_khz > 45 * ONE_GHZ_IN_KHZ ?
- 20 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ;
+ u32 limit = freq_khz > 45 * KHZ_PER_GHZ ? 20 * KHZ_PER_GHZ : 2 * KHZ_PER_GHZ;
if (abs(freq_khz - freq_range->start_freq_khz) <= limit)
return true;
if (abs(freq_khz - freq_range->end_freq_khz) <= limit)
return true;
return false;
-#undef ONE_GHZ_IN_KHZ
}
/*
@@ -1595,12 +1595,14 @@ static u32 map_regdom_flags(u32 rd_flags)
channel_flags |= IEEE80211_CHAN_NO_EHT;
if (rd_flags & NL80211_RRF_DFS_CONCURRENT)
channel_flags |= IEEE80211_CHAN_DFS_CONCURRENT;
- if (rd_flags & NL80211_RRF_NO_UHB_VLP_CLIENT)
- channel_flags |= IEEE80211_CHAN_NO_UHB_VLP_CLIENT;
- if (rd_flags & NL80211_RRF_NO_UHB_AFC_CLIENT)
- channel_flags |= IEEE80211_CHAN_NO_UHB_AFC_CLIENT;
+ if (rd_flags & NL80211_RRF_NO_6GHZ_VLP_CLIENT)
+ channel_flags |= IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT;
+ if (rd_flags & NL80211_RRF_NO_6GHZ_AFC_CLIENT)
+ channel_flags |= IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT;
if (rd_flags & NL80211_RRF_PSD)
channel_flags |= IEEE80211_CHAN_PSD;
+ if (rd_flags & NL80211_RRF_ALLOW_6GHZ_VLP_AP)
+ channel_flags |= IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP;
return channel_flags;
}
@@ -2464,11 +2466,11 @@ static void reg_leave_invalid_chans(struct wiphy *wiphy)
struct wireless_dev *wdev;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
- wiphy_lock(wiphy);
+ guard(wiphy)(wiphy);
+
list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list)
if (!reg_wdev_chan_valid(wiphy, wdev))
cfg80211_leave(rdev, wdev);
- wiphy_unlock(wiphy);
}
static void reg_check_chans_work(struct work_struct *work)
@@ -2648,13 +2650,11 @@ void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
return;
rtnl_lock();
- wiphy_lock(wiphy);
-
- tmp = get_wiphy_regdom(wiphy);
- rcu_assign_pointer(wiphy->regd, new_regd);
- rcu_free_regdom(tmp);
-
- wiphy_unlock(wiphy);
+ scoped_guard(wiphy, wiphy) {
+ tmp = get_wiphy_regdom(wiphy);
+ rcu_assign_pointer(wiphy->regd, new_regd);
+ rcu_free_regdom(tmp);
+ }
rtnl_unlock();
}
EXPORT_SYMBOL(wiphy_apply_custom_regulatory);
@@ -2824,9 +2824,9 @@ reg_process_hint_driver(struct wiphy *wiphy,
tmp = get_wiphy_regdom(wiphy);
ASSERT_RTNL();
- wiphy_lock(wiphy);
- rcu_assign_pointer(wiphy->regd, regd);
- wiphy_unlock(wiphy);
+ scoped_guard(wiphy, wiphy) {
+ rcu_assign_pointer(wiphy->regd, regd);
+ }
rcu_free_regdom(tmp);
}
@@ -3204,9 +3204,9 @@ static void reg_process_self_managed_hints(void)
ASSERT_RTNL();
for_each_rdev(rdev) {
- wiphy_lock(&rdev->wiphy);
+ guard(wiphy)(&rdev->wiphy);
+
reg_process_self_managed_hint(&rdev->wiphy);
- wiphy_unlock(&rdev->wiphy);
}
reg_check_channels();
@@ -3285,7 +3285,7 @@ int regulatory_hint_user(const char *alpha2,
return 0;
}
-int regulatory_hint_indoor(bool is_indoor, u32 portid)
+void regulatory_hint_indoor(bool is_indoor, u32 portid)
{
spin_lock(&reg_indoor_lock);
@@ -3308,8 +3308,6 @@ int regulatory_hint_indoor(bool is_indoor, u32 portid)
if (!is_indoor)
reg_check_channels();
-
- return 0;
}
void regulatory_netlink_notify(u32 portid)
@@ -3601,14 +3599,12 @@ static bool is_wiphy_all_set_reg_flag(enum ieee80211_regulatory_flags flag)
struct wireless_dev *wdev;
for_each_rdev(rdev) {
- wiphy_lock(&rdev->wiphy);
+ guard(wiphy)(&rdev->wiphy);
+
list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
- if (!(wdev->wiphy->regulatory_flags & flag)) {
- wiphy_unlock(&rdev->wiphy);
+ if (!(wdev->wiphy->regulatory_flags & flag))
return false;
- }
}
- wiphy_unlock(&rdev->wiphy);
}
return true;
@@ -3667,9 +3663,9 @@ static bool pending_reg_beacon(struct ieee80211_channel *beacon_chan)
return false;
}
-int regulatory_hint_found_beacon(struct wiphy *wiphy,
- struct ieee80211_channel *beacon_chan,
- gfp_t gfp)
+void regulatory_hint_found_beacon(struct wiphy *wiphy,
+ struct ieee80211_channel *beacon_chan,
+ gfp_t gfp)
{
struct reg_beacon *reg_beacon;
bool processing;
@@ -3678,18 +3674,18 @@ int regulatory_hint_found_beacon(struct wiphy *wiphy,
beacon_chan->flags & IEEE80211_CHAN_RADAR ||
(beacon_chan->band == NL80211_BAND_2GHZ &&
!freq_is_chan_12_13_14(beacon_chan->center_freq)))
- return 0;
+ return;
spin_lock_bh(&reg_pending_beacons_lock);
processing = pending_reg_beacon(beacon_chan);
spin_unlock_bh(&reg_pending_beacons_lock);
if (processing)
- return 0;
+ return;
reg_beacon = kzalloc(sizeof(struct reg_beacon), gfp);
if (!reg_beacon)
- return -ENOMEM;
+ return;
pr_debug("Found new beacon on frequency: %d.%03d MHz (Ch %d) on %s\n",
beacon_chan->center_freq, beacon_chan->freq_offset,
@@ -3709,8 +3705,6 @@ int regulatory_hint_found_beacon(struct wiphy *wiphy,
spin_unlock_bh(&reg_pending_beacons_lock);
schedule_work(&reg_work);
-
- return 0;
}
static void print_rd_rules(const struct ieee80211_regdomain *rd)
@@ -3886,19 +3880,18 @@ static int reg_set_rd_driver(const struct ieee80211_regdomain *rd,
if (!driver_request->intersect) {
ASSERT_RTNL();
- wiphy_lock(request_wiphy);
- if (request_wiphy->regd)
- tmp = get_wiphy_regdom(request_wiphy);
-
- regd = reg_copy_regd(rd);
- if (IS_ERR(regd)) {
- wiphy_unlock(request_wiphy);
- return PTR_ERR(regd);
+ scoped_guard(wiphy, request_wiphy) {
+ if (request_wiphy->regd)
+ tmp = get_wiphy_regdom(request_wiphy);
+
+ regd = reg_copy_regd(rd);
+ if (IS_ERR(regd))
+ return PTR_ERR(regd);
+
+ rcu_assign_pointer(request_wiphy->regd, regd);
+ rcu_free_regdom(tmp);
}
- rcu_assign_pointer(request_wiphy->regd, regd);
- rcu_free_regdom(tmp);
- wiphy_unlock(request_wiphy);
reset_regdomains(false, rd);
return 0;
}
@@ -4232,6 +4225,8 @@ EXPORT_SYMBOL(regulatory_pre_cac_allowed);
static void cfg80211_check_and_end_cac(struct cfg80211_registered_device *rdev)
{
struct wireless_dev *wdev;
+ unsigned int link_id;
+
/* If we finished CAC or received radar, we should end any
* CAC running on the same channels.
* the check !cfg80211_chandef_dfs_usable contain 2 options:
@@ -4244,16 +4239,17 @@ static void cfg80211_check_and_end_cac(struct cfg80211_registered_device *rdev)
list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
struct cfg80211_chan_def *chandef;
- if (!wdev->cac_started)
- continue;
+ for_each_valid_link(wdev, link_id) {
+ if (!wdev->links[link_id].cac_started)
+ continue;
- /* FIXME: radar detection is tied to link 0 for now */
- chandef = wdev_chandef(wdev, 0);
- if (!chandef)
- continue;
+ chandef = wdev_chandef(wdev, link_id);
+ if (!chandef)
+ continue;
- if (!cfg80211_chandef_dfs_usable(&rdev->wiphy, chandef))
- rdev_end_cac(rdev, wdev->netdev);
+ if (!cfg80211_chandef_dfs_usable(&rdev->wiphy, chandef))
+ rdev_end_cac(rdev, wdev->netdev, link_id);
+ }
}
}
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index a02ef5609f52..e1b211c4f75c 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -42,7 +42,7 @@ int regulatory_hint_user(const char *alpha2,
* device is operating in an indoor environment.
* @portid: the netlink port ID on which the hint was given.
*/
-int regulatory_hint_indoor(bool is_indoor, u32 portid);
+void regulatory_hint_indoor(bool is_indoor, u32 portid);
/**
* regulatory_netlink_notify - notify on released netlink socket
@@ -82,9 +82,9 @@ bool reg_last_request_cell_base(void);
* on a newly found BSS. If you cannot make use of this feature you can
* set the wiphy->disable_beacon_hints to true.
*/
-int regulatory_hint_found_beacon(struct wiphy *wiphy,
- struct ieee80211_channel *beacon_chan,
- gfp_t gfp);
+void regulatory_hint_found_beacon(struct wiphy *wiphy,
+ struct ieee80211_channel *beacon_chan,
+ gfp_t gfp);
/**
* regulatory_hint_country_ie - hints a country IE as a regulatory domain
@@ -137,13 +137,14 @@ void regulatory_hint_disconnect(void);
* Get a value specifying the U-NII band frequency belongs to.
* U-NII bands are defined by the FCC in C.F.R 47 part 15.
*
- * Returns -EINVAL if freq is invalid, 0 for UNII-1, 1 for UNII-2A,
+ * Return: -EINVAL if freq is invalid, 0 for UNII-1, 1 for UNII-2A,
* 2 for UNII-2B, 3 for UNII-2C and 4 for UNII-3.
*/
int cfg80211_get_unii(int freq);
/**
* regulatory_indoor_allowed - is indoor operation allowed
+ * Return: %true if indoor operation is allowed, %false otherwise
*/
bool regulatory_indoor_allowed(void);
@@ -173,11 +174,13 @@ void regulatory_propagate_dfs_state(struct wiphy *wiphy,
* reg_dfs_domain_same - Checks if both wiphy have same DFS domain configured
* @wiphy1: wiphy it's dfs_region to be checked against that of wiphy2
* @wiphy2: wiphy it's dfs_region to be checked against that of wiphy1
+ * Return: %true if both wiphys have the same DFS domain, %false otherwise
*/
bool reg_dfs_domain_same(struct wiphy *wiphy1, struct wiphy *wiphy2);
/**
* reg_reload_regdb - reload the regulatory.db firmware file
+ * Return: 0 for success, an error code otherwise
*/
int reg_reload_regdb(void);
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 389a52c29bfc..cd2124329521 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -5,7 +5,7 @@
* Copyright 2008 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2016 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -77,45 +77,6 @@ MODULE_PARM_DESC(bss_entries_limit,
#define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ)
-/**
- * struct cfg80211_colocated_ap - colocated AP information
- *
- * @list: linked list to all colocated aPS
- * @bssid: BSSID of the reported AP
- * @ssid: SSID of the reported AP
- * @ssid_len: length of the ssid
- * @center_freq: frequency the reported AP is on
- * @unsolicited_probe: the reported AP is part of an ESS, where all the APs
- * that operate in the same channel as the reported AP and that might be
- * detected by a STA receiving this frame, are transmitting unsolicited
- * Probe Response frames every 20 TUs
- * @oct_recommended: OCT is recommended to exchange MMPDUs with the reported AP
- * @same_ssid: the reported AP has the same SSID as the reporting AP
- * @multi_bss: the reported AP is part of a multiple BSSID set
- * @transmitted_bssid: the reported AP is the transmitting BSSID
- * @colocated_ess: all the APs that share the same ESS as the reported AP are
- * colocated and can be discovered via legacy bands.
- * @short_ssid_valid: short_ssid is valid and can be used
- * @short_ssid: the short SSID for this SSID
- * @psd_20: The 20MHz PSD EIRP of the primary 20MHz channel for the reported AP
- */
-struct cfg80211_colocated_ap {
- struct list_head list;
- u8 bssid[ETH_ALEN];
- u8 ssid[IEEE80211_MAX_SSID_LEN];
- size_t ssid_len;
- u32 short_ssid;
- u32 center_freq;
- u8 unsolicited_probe:1,
- oct_recommended:1,
- same_ssid:1,
- multi_bss:1,
- transmitted_bssid:1,
- colocated_ess:1,
- short_ssid_valid:1;
- s8 psd_20;
-};
-
static void bss_free(struct cfg80211_internal_bss *bss)
{
struct cfg80211_bss_ies *ies;
@@ -311,12 +272,19 @@ cfg80211_gen_new_ie(const u8 *ie, size_t ielen,
{
const struct element *non_inherit_elem, *parent, *sub;
u8 *pos = new_ie;
- u8 id, ext_id;
+ const u8 *mbssid_index_ie;
+ u8 id, ext_id, bssid_index = 255;
unsigned int match_len;
non_inherit_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
subie, subie_len);
+ mbssid_index_ie = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX, subie,
+ subie_len);
+ if (mbssid_index_ie && mbssid_index_ie[1] > 0 &&
+ mbssid_index_ie[2] > 0 && mbssid_index_ie[2] <= 46)
+ bssid_index = mbssid_index_ie[2];
+
/* We copy the elements one by one from the parent to the generated
* elements.
* If they are not inherited (included in subie or in the non
@@ -355,6 +323,24 @@ cfg80211_gen_new_ie(const u8 *ie, size_t ielen,
continue;
}
+ /* For ML probe response, match the MLE in the frame body with
+ * MLD id being 'bssid_index'
+ */
+ if (parent->id == WLAN_EID_EXTENSION && parent->datalen > 1 &&
+ parent->data[0] == WLAN_EID_EXT_EHT_MULTI_LINK &&
+ bssid_index == ieee80211_mle_get_mld_id(parent->data + 1)) {
+ if (!cfg80211_copy_elem_with_frags(parent,
+ ie, ielen,
+ &pos, new_ie,
+ new_ie_len))
+ return 0;
+
+ /* Continue here to prevent processing the MLE in
+ * sub-element, which AP MLD should not carry
+ */
+ continue;
+ }
+
/* Already copied if an earlier element had the same type */
if (cfg80211_find_elem_match(id, ie, (u8 *)parent - ie,
&ext_id, match_len, 0))
@@ -566,7 +552,8 @@ static int cfg80211_calc_short_ssid(const struct cfg80211_bss_ies *ies,
return 0;
}
-static void cfg80211_free_coloc_ap_list(struct list_head *coloc_ap_list)
+VISIBLE_IF_CFG80211_KUNIT void
+cfg80211_free_coloc_ap_list(struct list_head *coloc_ap_list)
{
struct cfg80211_colocated_ap *ap, *tmp_ap;
@@ -575,6 +562,7 @@ static void cfg80211_free_coloc_ap_list(struct list_head *coloc_ap_list)
kfree(ap);
}
}
+EXPORT_SYMBOL_IF_CFG80211_KUNIT(cfg80211_free_coloc_ap_list);
static int cfg80211_parse_ap_info(struct cfg80211_colocated_ap *entry,
const u8 *pos, u8 length,
@@ -648,104 +636,151 @@ static int cfg80211_parse_ap_info(struct cfg80211_colocated_ap *entry,
return 0;
}
-static int cfg80211_parse_colocated_ap(const struct cfg80211_bss_ies *ies,
- struct list_head *list)
+bool cfg80211_iter_rnr(const u8 *elems, size_t elems_len,
+ enum cfg80211_rnr_iter_ret
+ (*iter)(void *data, u8 type,
+ const struct ieee80211_neighbor_ap_info *info,
+ const u8 *tbtt_info, u8 tbtt_info_len),
+ void *iter_data)
{
- struct ieee80211_neighbor_ap_info *ap_info;
- const struct element *elem, *ssid_elem;
+ const struct element *rnr;
const u8 *pos, *end;
- u32 s_ssid_tmp;
- int n_coloc = 0, ret;
- LIST_HEAD(ap_list);
- ret = cfg80211_calc_short_ssid(ies, &ssid_elem, &s_ssid_tmp);
- if (ret)
- return 0;
+ for_each_element_id(rnr, WLAN_EID_REDUCED_NEIGHBOR_REPORT,
+ elems, elems_len) {
+ const struct ieee80211_neighbor_ap_info *info;
- for_each_element_id(elem, WLAN_EID_REDUCED_NEIGHBOR_REPORT,
- ies->data, ies->len) {
- pos = elem->data;
- end = elem->data + elem->datalen;
+ pos = rnr->data;
+ end = rnr->data + rnr->datalen;
/* RNR IE may contain more than one NEIGHBOR_AP_INFO */
- while (pos + sizeof(*ap_info) <= end) {
- enum nl80211_band band;
- int freq;
+ while (sizeof(*info) <= end - pos) {
u8 length, i, count;
+ u8 type;
- ap_info = (void *)pos;
- count = u8_get_bits(ap_info->tbtt_info_hdr,
- IEEE80211_AP_INFO_TBTT_HDR_COUNT) + 1;
- length = ap_info->tbtt_info_len;
+ info = (void *)pos;
+ count = u8_get_bits(info->tbtt_info_hdr,
+ IEEE80211_AP_INFO_TBTT_HDR_COUNT) +
+ 1;
+ length = info->tbtt_info_len;
- pos += sizeof(*ap_info);
+ pos += sizeof(*info);
- if (!ieee80211_operating_class_to_band(ap_info->op_class,
- &band))
- break;
+ if (count * length > end - pos)
+ return false;
- freq = ieee80211_channel_to_frequency(ap_info->channel,
- band);
+ type = u8_get_bits(info->tbtt_info_hdr,
+ IEEE80211_AP_INFO_TBTT_HDR_TYPE);
- if (end - pos < count * length)
- break;
+ for (i = 0; i < count; i++) {
+ switch (iter(iter_data, type, info,
+ pos, length)) {
+ case RNR_ITER_CONTINUE:
+ break;
+ case RNR_ITER_BREAK:
+ return true;
+ case RNR_ITER_ERROR:
+ return false;
+ }
- if (u8_get_bits(ap_info->tbtt_info_hdr,
- IEEE80211_AP_INFO_TBTT_HDR_TYPE) !=
- IEEE80211_TBTT_INFO_TYPE_TBTT) {
- pos += count * length;
- continue;
+ pos += length;
}
+ }
- /* TBTT info must include bss param + BSSID +
- * (short SSID or same_ssid bit to be set).
- * ignore other options, and move to the
- * next AP info
- */
- if (band != NL80211_BAND_6GHZ ||
- !(length == offsetofend(struct ieee80211_tbtt_info_7_8_9,
- bss_params) ||
- length == sizeof(struct ieee80211_tbtt_info_7_8_9) ||
- length >= offsetofend(struct ieee80211_tbtt_info_ge_11,
- bss_params))) {
- pos += count * length;
- continue;
- }
+ if (pos != end)
+ return false;
+ }
- for (i = 0; i < count; i++) {
- struct cfg80211_colocated_ap *entry;
+ return true;
+}
+EXPORT_SYMBOL_GPL(cfg80211_iter_rnr);
- entry = kzalloc(sizeof(*entry) + IEEE80211_MAX_SSID_LEN,
- GFP_ATOMIC);
+struct colocated_ap_data {
+ const struct element *ssid_elem;
+ struct list_head ap_list;
+ u32 s_ssid_tmp;
+ int n_coloc;
+};
- if (!entry)
- goto error;
+static enum cfg80211_rnr_iter_ret
+cfg80211_parse_colocated_ap_iter(void *_data, u8 type,
+ const struct ieee80211_neighbor_ap_info *info,
+ const u8 *tbtt_info, u8 tbtt_info_len)
+{
+ struct colocated_ap_data *data = _data;
+ struct cfg80211_colocated_ap *entry;
+ enum nl80211_band band;
- entry->center_freq = freq;
+ if (type != IEEE80211_TBTT_INFO_TYPE_TBTT)
+ return RNR_ITER_CONTINUE;
- if (!cfg80211_parse_ap_info(entry, pos, length,
- ssid_elem,
- s_ssid_tmp)) {
- n_coloc++;
- list_add_tail(&entry->list, &ap_list);
- } else {
- kfree(entry);
- }
+ if (!ieee80211_operating_class_to_band(info->op_class, &band))
+ return RNR_ITER_CONTINUE;
- pos += length;
+ /* TBTT info must include bss param + BSSID + (short SSID or
+ * same_ssid bit to be set). Ignore other options, and move to
+ * the next AP info
+ */
+ if (band != NL80211_BAND_6GHZ ||
+ !(tbtt_info_len == offsetofend(struct ieee80211_tbtt_info_7_8_9,
+ bss_params) ||
+ tbtt_info_len == sizeof(struct ieee80211_tbtt_info_7_8_9) ||
+ tbtt_info_len >= offsetofend(struct ieee80211_tbtt_info_ge_11,
+ bss_params)))
+ return RNR_ITER_CONTINUE;
+
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry)
+ return RNR_ITER_ERROR;
+
+ entry->center_freq =
+ ieee80211_channel_to_frequency(info->channel, band);
+
+ if (!cfg80211_parse_ap_info(entry, tbtt_info, tbtt_info_len,
+ data->ssid_elem, data->s_ssid_tmp)) {
+ struct cfg80211_colocated_ap *tmp;
+
+ /* Don't add duplicate BSSIDs on the same channel. */
+ list_for_each_entry(tmp, &data->ap_list, list) {
+ if (ether_addr_equal(tmp->bssid, entry->bssid) &&
+ tmp->center_freq == entry->center_freq) {
+ kfree(entry);
+ return RNR_ITER_CONTINUE;
}
}
-error:
- if (pos != end) {
- cfg80211_free_coloc_ap_list(&ap_list);
- return 0;
- }
+ data->n_coloc++;
+ list_add_tail(&entry->list, &data->ap_list);
+ } else {
+ kfree(entry);
+ }
+
+ return RNR_ITER_CONTINUE;
+}
+
+VISIBLE_IF_CFG80211_KUNIT int
+cfg80211_parse_colocated_ap(const struct cfg80211_bss_ies *ies,
+ struct list_head *list)
+{
+ struct colocated_ap_data data = {};
+ int ret;
+
+ INIT_LIST_HEAD(&data.ap_list);
+
+ ret = cfg80211_calc_short_ssid(ies, &data.ssid_elem, &data.s_ssid_tmp);
+ if (ret)
+ return 0;
+
+ if (!cfg80211_iter_rnr(ies->data, ies->len,
+ cfg80211_parse_colocated_ap_iter, &data)) {
+ cfg80211_free_coloc_ap_list(&data.ap_list);
+ return 0;
}
- list_splice_tail(&ap_list, list);
- return n_coloc;
+ list_splice_tail(&data.ap_list, list);
+ return data.n_coloc;
}
+EXPORT_SYMBOL_IF_CFG80211_KUNIT(cfg80211_parse_colocated_ap);
static void cfg80211_scan_req_add_chan(struct cfg80211_scan_request *request,
struct ieee80211_channel *chan,
@@ -764,12 +799,11 @@ static void cfg80211_scan_req_add_chan(struct cfg80211_scan_request *request,
}
}
+ request->n_channels++;
request->channels[n_channels] = chan;
if (add_to_6ghz)
request->scan_6ghz_params[request->n_6ghz_params].channel_idx =
n_channels;
-
- request->n_channels++;
}
static bool cfg80211_find_ssid_match(struct cfg80211_colocated_ap *ap,
@@ -813,6 +847,7 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
LIST_HEAD(coloc_ap_list);
bool need_scan_psc = true;
const struct ieee80211_sband_iftype_data *iftd;
+ size_t size, offs_ssids, offs_6ghz_params, offs_ies;
rdev_req->scan_6ghz = true;
@@ -858,9 +893,7 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
if (ret)
continue;
- entry = kzalloc(sizeof(*entry) + IEEE80211_MAX_SSID_LEN,
- GFP_ATOMIC);
-
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
continue;
@@ -878,10 +911,15 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
spin_unlock_bh(&rdev->bss_lock);
}
- request = kzalloc(struct_size(request, channels, n_channels) +
- sizeof(*request->scan_6ghz_params) * count +
- sizeof(*request->ssids) * rdev_req->n_ssids,
- GFP_KERNEL);
+ size = struct_size(request, channels, n_channels);
+ offs_ssids = size;
+ size += sizeof(*request->ssids) * rdev_req->n_ssids;
+ offs_6ghz_params = size;
+ size += sizeof(*request->scan_6ghz_params) * count;
+ offs_ies = size;
+ size += rdev_req->ie_len;
+
+ request = kzalloc(size, GFP_KERNEL);
if (!request) {
cfg80211_free_coloc_ap_list(&coloc_ap_list);
return -ENOMEM;
@@ -889,8 +927,26 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
*request = *rdev_req;
request->n_channels = 0;
- request->scan_6ghz_params =
- (void *)&request->channels[n_channels];
+ request->n_6ghz_params = 0;
+ if (rdev_req->n_ssids) {
+ /*
+ * Add the ssids from the parent scan request to the new
+ * scan request, so the driver would be able to use them
+ * in its probe requests to discover hidden APs on PSC
+ * channels.
+ */
+ request->ssids = (void *)request + offs_ssids;
+ memcpy(request->ssids, rdev_req->ssids,
+ sizeof(*request->ssids) * request->n_ssids);
+ }
+ request->scan_6ghz_params = (void *)request + offs_6ghz_params;
+
+ if (rdev_req->ie_len) {
+ void *ie = (void *)request + offs_ies;
+
+ memcpy(ie, rdev_req->ie, rdev_req->ie_len);
+ request->ie = ie;
+ }
/*
* PSC channels should not be scanned in case of direct scan with 1 SSID
@@ -933,7 +989,8 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
struct ieee80211_channel *chan =
ieee80211_get_channel(&rdev->wiphy, ap->center_freq);
- if (!chan || chan->flags & IEEE80211_CHAN_DISABLED)
+ if (!chan || chan->flags & IEEE80211_CHAN_DISABLED ||
+ !cfg80211_wdev_channel_allowed(rdev_req->wdev, chan))
continue;
for (i = 0; i < rdev_req->n_channels; i++) {
@@ -979,17 +1036,8 @@ skip:
if (request->n_channels) {
struct cfg80211_scan_request *old = rdev->int_scan_req;
- rdev->int_scan_req = request;
- /*
- * Add the ssids from the parent scan request to the new scan
- * request, so the driver would be able to use them in its
- * probe requests to discover hidden APs on PSC channels.
- */
- request->ssids = (void *)&request->channels[request->n_channels];
- request->n_ssids = rdev_req->n_ssids;
- memcpy(request->ssids, rdev_req->ssids, sizeof(*request->ssids) *
- request->n_ssids);
+ rdev->int_scan_req = request;
/*
* If this scan follows a previous scan, save the scan start
@@ -1223,7 +1271,8 @@ void cfg80211_sched_scan_results_wk(struct work_struct *work)
rdev = container_of(work, struct cfg80211_registered_device,
sched_scan_res_wk);
- wiphy_lock(&rdev->wiphy);
+ guard(wiphy)(&rdev->wiphy);
+
list_for_each_entry_safe(req, tmp, &rdev->sched_scan_req_list, list) {
if (req->report_results) {
req->report_results = false;
@@ -1238,7 +1287,6 @@ void cfg80211_sched_scan_results_wk(struct work_struct *work)
NL80211_CMD_SCHED_SCAN_RESULTS);
}
}
- wiphy_unlock(&rdev->wiphy);
}
void cfg80211_sched_scan_results(struct wiphy *wiphy, u64 reqid)
@@ -1273,9 +1321,9 @@ EXPORT_SYMBOL(cfg80211_sched_scan_stopped_locked);
void cfg80211_sched_scan_stopped(struct wiphy *wiphy, u64 reqid)
{
- wiphy_lock(wiphy);
+ guard(wiphy)(wiphy);
+
cfg80211_sched_scan_stopped_locked(wiphy, reqid);
- wiphy_unlock(wiphy);
}
EXPORT_SYMBOL(cfg80211_sched_scan_stopped);
@@ -1590,7 +1638,7 @@ struct cfg80211_bss *__cfg80211_get_bss(struct wiphy *wiphy,
}
EXPORT_SYMBOL(__cfg80211_get_bss);
-static void rb_insert_bss(struct cfg80211_registered_device *rdev,
+static bool rb_insert_bss(struct cfg80211_registered_device *rdev,
struct cfg80211_internal_bss *bss)
{
struct rb_node **p = &rdev->bss_tree.rb_node;
@@ -1606,7 +1654,7 @@ static void rb_insert_bss(struct cfg80211_registered_device *rdev,
if (WARN_ON(!cmp)) {
/* will sort of leak this BSS */
- return;
+ return false;
}
if (cmp < 0)
@@ -1617,6 +1665,7 @@ static void rb_insert_bss(struct cfg80211_registered_device *rdev,
rb_link_node(&bss->rbn, parent, p);
rb_insert_color(&bss->rbn, &rdev->bss_tree);
+ return true;
}
static struct cfg80211_internal_bss *
@@ -1643,6 +1692,34 @@ rb_find_bss(struct cfg80211_registered_device *rdev,
return NULL;
}
+static void cfg80211_insert_bss(struct cfg80211_registered_device *rdev,
+ struct cfg80211_internal_bss *bss)
+{
+ lockdep_assert_held(&rdev->bss_lock);
+
+ if (!rb_insert_bss(rdev, bss))
+ return;
+ list_add_tail(&bss->list, &rdev->bss_list);
+ rdev->bss_entries++;
+}
+
+static void cfg80211_rehash_bss(struct cfg80211_registered_device *rdev,
+ struct cfg80211_internal_bss *bss)
+{
+ lockdep_assert_held(&rdev->bss_lock);
+
+ rb_erase(&bss->rbn, &rdev->bss_tree);
+ if (!rb_insert_bss(rdev, bss)) {
+ list_del(&bss->list);
+ if (!list_empty(&bss->hidden_list))
+ list_del_init(&bss->hidden_list);
+ if (!list_empty(&bss->pub.nontrans_list))
+ list_del_init(&bss->pub.nontrans_list);
+ rdev->bss_entries--;
+ }
+ rdev->bss_generation++;
+}
+
static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
struct cfg80211_internal_bss *new)
{
@@ -1867,6 +1944,7 @@ cfg80211_update_known_bss(struct cfg80211_registered_device *rdev,
known->pub.bssid_index = new->pub.bssid_index;
known->pub.use_for &= new->pub.use_for;
known->pub.cannot_use_reasons = new->pub.cannot_use_reasons;
+ known->bss_source = new->bss_source;
return true;
}
@@ -1955,9 +2033,7 @@ __cfg80211_bss_update(struct cfg80211_registered_device *rdev,
bss_ref_get(rdev, bss_from_pub(tmp->pub.transmitted_bss));
}
- list_add_tail(&new->list, &rdev->bss_list);
- rdev->bss_entries++;
- rb_insert_bss(rdev, new);
+ cfg80211_insert_bss(rdev, new);
found = new;
}
@@ -1967,10 +2043,10 @@ __cfg80211_bss_update(struct cfg80211_registered_device *rdev,
return found;
free_ies:
- ies = (void *)rcu_dereference(tmp->pub.beacon_ies);
+ ies = (void *)rcu_access_pointer(tmp->pub.beacon_ies);
if (ies)
kfree_rcu(ies, rcu_head);
- ies = (void *)rcu_dereference(tmp->pub.proberesp_ies);
+ ies = (void *)rcu_access_pointer(tmp->pub.proberesp_ies);
if (ies)
kfree_rcu(ies, rcu_head);
@@ -2108,11 +2184,7 @@ struct cfg80211_inform_single_bss_data {
const u8 *ie;
size_t ielen;
- enum {
- BSS_SOURCE_DIRECT = 0,
- BSS_SOURCE_MBSSID,
- BSS_SOURCE_STA_PROFILE,
- } bss_source;
+ enum bss_source_type bss_source;
/* Set if reporting bss_source != BSS_SOURCE_DIRECT */
struct cfg80211_bss *source_bss;
u8 max_bssid_indicator;
@@ -2122,6 +2194,55 @@ struct cfg80211_inform_single_bss_data {
u64 cannot_use_reasons;
};
+enum ieee80211_ap_reg_power
+cfg80211_get_6ghz_power_type(const u8 *elems, size_t elems_len)
+{
+ const struct ieee80211_he_6ghz_oper *he_6ghz_oper;
+ struct ieee80211_he_operation *he_oper;
+ const struct element *tmp;
+
+ tmp = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION,
+ elems, elems_len);
+ if (!tmp || tmp->datalen < sizeof(*he_oper) + 1 ||
+ tmp->datalen < ieee80211_he_oper_size(tmp->data + 1))
+ return IEEE80211_REG_UNSET_AP;
+
+ he_oper = (void *)&tmp->data[1];
+ he_6ghz_oper = ieee80211_he_6ghz_oper(he_oper);
+
+ if (!he_6ghz_oper)
+ return IEEE80211_REG_UNSET_AP;
+
+ switch (u8_get_bits(he_6ghz_oper->control,
+ IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) {
+ case IEEE80211_6GHZ_CTRL_REG_LPI_AP:
+ case IEEE80211_6GHZ_CTRL_REG_INDOOR_LPI_AP:
+ return IEEE80211_REG_LPI_AP;
+ case IEEE80211_6GHZ_CTRL_REG_SP_AP:
+ case IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP:
+ return IEEE80211_REG_SP_AP;
+ case IEEE80211_6GHZ_CTRL_REG_VLP_AP:
+ return IEEE80211_REG_VLP_AP;
+ default:
+ return IEEE80211_REG_UNSET_AP;
+ }
+}
+
+static bool cfg80211_6ghz_power_type_valid(const u8 *elems, size_t elems_len,
+ const u32 flags)
+{
+ switch (cfg80211_get_6ghz_power_type(elems, elems_len)) {
+ case IEEE80211_REG_LPI_AP:
+ return true;
+ case IEEE80211_REG_SP_AP:
+ return !(flags & IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT);
+ case IEEE80211_REG_VLP_AP:
+ return !(flags & IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT);
+ default:
+ return false;
+ }
+}
+
/* Returned bss is reference counted and must be cleaned up appropriately. */
static struct cfg80211_bss *
cfg80211_inform_single_bss_data(struct wiphy *wiphy,
@@ -2154,6 +2275,14 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
if (!channel)
return NULL;
+ if (channel->band == NL80211_BAND_6GHZ &&
+ !cfg80211_6ghz_power_type_valid(data->ie, data->ielen,
+ channel->flags)) {
+ data->use_for = 0;
+ data->cannot_use_reasons =
+ NL80211_BSS_CANNOT_USE_6GHZ_PWR_MISMATCH;
+ }
+
memcpy(tmp.pub.bssid, data->bssid, ETH_ALEN);
tmp.pub.channel = channel;
if (data->bss_source != BSS_SOURCE_STA_PROFILE)
@@ -2165,15 +2294,23 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
tmp.ts_boottime = drv_data->boottime_ns;
tmp.parent_tsf = drv_data->parent_tsf;
ether_addr_copy(tmp.parent_bssid, drv_data->parent_bssid);
+ tmp.pub.chains = drv_data->chains;
+ memcpy(tmp.pub.chain_signal, drv_data->chain_signal,
+ IEEE80211_MAX_CHAINS);
tmp.pub.use_for = data->use_for;
tmp.pub.cannot_use_reasons = data->cannot_use_reasons;
+ tmp.bss_source = data->bss_source;
- if (data->bss_source != BSS_SOURCE_DIRECT) {
+ switch (data->bss_source) {
+ case BSS_SOURCE_MBSSID:
tmp.pub.transmitted_bss = data->source_bss;
+ fallthrough;
+ case BSS_SOURCE_STA_PROFILE:
ts = bss_from_pub(data->source_bss)->ts;
tmp.pub.bssid_index = data->bssid_index;
tmp.pub.max_bssid_indicator = data->max_bssid_indicator;
- } else {
+ break;
+ case BSS_SOURCE_DIRECT:
ts = jiffies;
if (channel->band == NL80211_BAND_60GHZ) {
@@ -2188,6 +2325,7 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
regulatory_hint_found_beacon(wiphy, channel,
gfp);
}
+ break;
}
/*
@@ -2208,6 +2346,7 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
switch (data->ftype) {
case CFG80211_BSS_FTYPE_BEACON:
+ case CFG80211_BSS_FTYPE_S1G_BEACON:
ies->from_beacon = true;
fallthrough;
case CFG80211_BSS_FTYPE_UNKNOWN:
@@ -2403,7 +2542,8 @@ cfg80211_parse_mbssid_data(struct wiphy *wiphy,
profile, profile_len);
if (!mbssid_index_ie || mbssid_index_ie[1] < 1 ||
mbssid_index_ie[2] == 0 ||
- mbssid_index_ie[2] > 46) {
+ mbssid_index_ie[2] > 46 ||
+ mbssid_index_ie[2] >= (1 << elem->data[0])) {
/* No valid Multiple BSSID-Index element */
continue;
}
@@ -2464,16 +2604,22 @@ ssize_t cfg80211_defragment_element(const struct element *elem, const u8 *ies,
if (elem->id == WLAN_EID_EXTENSION) {
copied = elem->datalen - 1;
- if (copied > data_len)
- return -ENOSPC;
- memmove(data, elem->data + 1, copied);
+ if (data) {
+ if (copied > data_len)
+ return -ENOSPC;
+
+ memmove(data, elem->data + 1, copied);
+ }
} else {
copied = elem->datalen;
- if (copied > data_len)
- return -ENOSPC;
- memmove(data, elem->data, copied);
+ if (data) {
+ if (copied > data_len)
+ return -ENOSPC;
+
+ memmove(data, elem->data, copied);
+ }
}
/* Fragmented elements must have 255 bytes */
@@ -2492,10 +2638,13 @@ ssize_t cfg80211_defragment_element(const struct element *elem, const u8 *ies,
elem_datalen = elem->datalen;
- if (copied + elem_datalen > data_len)
- return -ENOSPC;
+ if (data) {
+ if (copied + elem_datalen > data_len)
+ return -ENOSPC;
+
+ memmove(data + copied, elem->data, elem_datalen);
+ }
- memmove(data + copied, elem->data, elem_datalen);
copied += elem_datalen;
/* Only the last fragment may be short */
@@ -2601,77 +2750,177 @@ error:
return NULL;
}
+struct tbtt_info_iter_data {
+ const struct ieee80211_neighbor_ap_info *ap_info;
+ u8 param_ch_count;
+ u32 use_for;
+ u8 mld_id, link_id;
+ bool non_tx;
+};
+
+static enum cfg80211_rnr_iter_ret
+cfg802121_mld_ap_rnr_iter(void *_data, u8 type,
+ const struct ieee80211_neighbor_ap_info *info,
+ const u8 *tbtt_info, u8 tbtt_info_len)
+{
+ const struct ieee80211_rnr_mld_params *mld_params;
+ struct tbtt_info_iter_data *data = _data;
+ u8 link_id;
+ bool non_tx = false;
+
+ if (type == IEEE80211_TBTT_INFO_TYPE_TBTT &&
+ tbtt_info_len >= offsetofend(struct ieee80211_tbtt_info_ge_11,
+ mld_params)) {
+ const struct ieee80211_tbtt_info_ge_11 *tbtt_info_ge_11 =
+ (void *)tbtt_info;
+
+ non_tx = (tbtt_info_ge_11->bss_params &
+ (IEEE80211_RNR_TBTT_PARAMS_MULTI_BSSID |
+ IEEE80211_RNR_TBTT_PARAMS_TRANSMITTED_BSSID)) ==
+ IEEE80211_RNR_TBTT_PARAMS_MULTI_BSSID;
+ mld_params = &tbtt_info_ge_11->mld_params;
+ } else if (type == IEEE80211_TBTT_INFO_TYPE_MLD &&
+ tbtt_info_len >= sizeof(struct ieee80211_rnr_mld_params))
+ mld_params = (void *)tbtt_info;
+ else
+ return RNR_ITER_CONTINUE;
+
+ link_id = le16_get_bits(mld_params->params,
+ IEEE80211_RNR_MLD_PARAMS_LINK_ID);
+
+ if (data->mld_id != mld_params->mld_id)
+ return RNR_ITER_CONTINUE;
+
+ if (data->link_id != link_id)
+ return RNR_ITER_CONTINUE;
+
+ data->ap_info = info;
+ data->param_ch_count =
+ le16_get_bits(mld_params->params,
+ IEEE80211_RNR_MLD_PARAMS_BSS_CHANGE_COUNT);
+ data->non_tx = non_tx;
+
+ if (type == IEEE80211_TBTT_INFO_TYPE_TBTT)
+ data->use_for = NL80211_BSS_USE_FOR_ALL;
+ else
+ data->use_for = NL80211_BSS_USE_FOR_MLD_LINK;
+ return RNR_ITER_BREAK;
+}
+
static u8
-cfg80211_tbtt_info_for_mld_ap(const u8 *ie, size_t ielen, u8 mld_id, u8 link_id,
- const struct ieee80211_neighbor_ap_info **ap_info,
- const u8 **tbtt_info)
+cfg80211_rnr_info_for_mld_ap(const u8 *ie, size_t ielen, u8 mld_id, u8 link_id,
+ const struct ieee80211_neighbor_ap_info **ap_info,
+ u8 *param_ch_count, bool *non_tx)
{
- const struct ieee80211_neighbor_ap_info *info;
- const struct element *rnr;
- const u8 *pos, *end;
+ struct tbtt_info_iter_data data = {
+ .mld_id = mld_id,
+ .link_id = link_id,
+ };
- for_each_element_id(rnr, WLAN_EID_REDUCED_NEIGHBOR_REPORT, ie, ielen) {
- pos = rnr->data;
- end = rnr->data + rnr->datalen;
+ cfg80211_iter_rnr(ie, ielen, cfg802121_mld_ap_rnr_iter, &data);
- /* RNR IE may contain more than one NEIGHBOR_AP_INFO */
- while (sizeof(*info) <= end - pos) {
- const struct ieee80211_rnr_mld_params *mld_params;
- u16 params;
- u8 length, i, count, mld_params_offset;
- u8 type, lid;
- u32 use_for;
+ *ap_info = data.ap_info;
+ *param_ch_count = data.param_ch_count;
+ *non_tx = data.non_tx;
- info = (void *)pos;
- count = u8_get_bits(info->tbtt_info_hdr,
- IEEE80211_AP_INFO_TBTT_HDR_COUNT) + 1;
- length = info->tbtt_info_len;
+ return data.use_for;
+}
- pos += sizeof(*info);
+static struct element *
+cfg80211_gen_reporter_rnr(struct cfg80211_bss *source_bss, bool is_mbssid,
+ bool same_mld, u8 link_id, u8 bss_change_count,
+ gfp_t gfp)
+{
+ const struct cfg80211_bss_ies *ies;
+ struct ieee80211_neighbor_ap_info ap_info;
+ struct ieee80211_tbtt_info_ge_11 tbtt_info;
+ u32 short_ssid;
+ const struct element *elem;
+ struct element *res;
- if (count * length > end - pos)
- return 0;
+ /*
+ * We only generate the RNR to permit ML lookups. For that we do not
+ * need an entry for the corresponding transmitting BSS, lets just skip
+ * it even though it would be easy to add.
+ */
+ if (!same_mld)
+ return NULL;
- type = u8_get_bits(info->tbtt_info_hdr,
- IEEE80211_AP_INFO_TBTT_HDR_TYPE);
+ /* We could use tx_data->ies if we change cfg80211_calc_short_ssid */
+ rcu_read_lock();
+ ies = rcu_dereference(source_bss->ies);
- if (type == IEEE80211_TBTT_INFO_TYPE_TBTT &&
- length >=
- offsetofend(struct ieee80211_tbtt_info_ge_11,
- mld_params)) {
- mld_params_offset =
- offsetof(struct ieee80211_tbtt_info_ge_11, mld_params);
- use_for = NL80211_BSS_USE_FOR_ALL;
- } else if (type == IEEE80211_TBTT_INFO_TYPE_MLD &&
- length >= sizeof(struct ieee80211_rnr_mld_params)) {
- mld_params_offset = 0;
- use_for = NL80211_BSS_USE_FOR_MLD_LINK;
- } else {
- pos += count * length;
- continue;
- }
+ ap_info.tbtt_info_len = offsetofend(typeof(tbtt_info), mld_params);
+ ap_info.tbtt_info_hdr =
+ u8_encode_bits(IEEE80211_TBTT_INFO_TYPE_TBTT,
+ IEEE80211_AP_INFO_TBTT_HDR_TYPE) |
+ u8_encode_bits(0, IEEE80211_AP_INFO_TBTT_HDR_COUNT);
- for (i = 0; i < count; i++) {
- mld_params = (void *)pos + mld_params_offset;
- params = le16_to_cpu(mld_params->params);
+ ap_info.channel = ieee80211_frequency_to_channel(source_bss->channel->center_freq);
- lid = u16_get_bits(params,
- IEEE80211_RNR_MLD_PARAMS_LINK_ID);
+ /* operating class */
+ elem = cfg80211_find_elem(WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
+ ies->data, ies->len);
+ if (elem && elem->datalen >= 1) {
+ ap_info.op_class = elem->data[0];
+ } else {
+ struct cfg80211_chan_def chandef;
- if (mld_id == mld_params->mld_id &&
- link_id == lid) {
- *ap_info = info;
- *tbtt_info = pos;
+ /* The AP is not providing us with anything to work with. So
+ * make up a somewhat reasonable operating class, but don't
+ * bother with it too much as no one will ever use the
+ * information.
+ */
+ cfg80211_chandef_create(&chandef, source_bss->channel,
+ NL80211_CHAN_NO_HT);
- return use_for;
- }
+ if (!ieee80211_chandef_to_operating_class(&chandef,
+ &ap_info.op_class))
+ goto out_unlock;
+ }
- pos += length;
- }
- }
+ /* Just set TBTT offset and PSD 20 to invalid/unknown */
+ tbtt_info.tbtt_offset = 255;
+ tbtt_info.psd_20 = IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED;
+
+ memcpy(tbtt_info.bssid, source_bss->bssid, ETH_ALEN);
+ if (cfg80211_calc_short_ssid(ies, &elem, &short_ssid))
+ goto out_unlock;
+
+ rcu_read_unlock();
+
+ tbtt_info.short_ssid = cpu_to_le32(short_ssid);
+
+ tbtt_info.bss_params = IEEE80211_RNR_TBTT_PARAMS_SAME_SSID;
+
+ if (is_mbssid) {
+ tbtt_info.bss_params |= IEEE80211_RNR_TBTT_PARAMS_MULTI_BSSID;
+ tbtt_info.bss_params |= IEEE80211_RNR_TBTT_PARAMS_TRANSMITTED_BSSID;
}
- return 0;
+ tbtt_info.mld_params.mld_id = 0;
+ tbtt_info.mld_params.params =
+ le16_encode_bits(link_id, IEEE80211_RNR_MLD_PARAMS_LINK_ID) |
+ le16_encode_bits(bss_change_count,
+ IEEE80211_RNR_MLD_PARAMS_BSS_CHANGE_COUNT);
+
+ res = kzalloc(struct_size(res, data,
+ sizeof(ap_info) + ap_info.tbtt_info_len),
+ gfp);
+ if (!res)
+ return NULL;
+
+ /* Copy the data */
+ res->id = WLAN_EID_REDUCED_NEIGHBOR_REPORT;
+ res->datalen = sizeof(ap_info) + ap_info.tbtt_info_len;
+ memcpy(res->data, &ap_info, sizeof(ap_info));
+ memcpy(res->data + sizeof(ap_info), &tbtt_info, ap_info.tbtt_info_len);
+
+ return res;
+
+out_unlock:
+ rcu_read_unlock();
+ return NULL;
}
static void
@@ -2687,25 +2936,28 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
.source_bss = source_bss,
.bss_source = BSS_SOURCE_STA_PROFILE,
};
+ struct element *reporter_rnr = NULL;
struct ieee80211_multi_link_elem *ml_elem;
struct cfg80211_mle *mle;
+ const struct element *ssid_elem;
+ const u8 *ssid = NULL;
+ size_t ssid_len = 0;
u16 control;
u8 ml_common_len;
- u8 *new_ie;
+ u8 *new_ie = NULL;
struct cfg80211_bss *bss;
- int mld_id;
+ u8 mld_id, reporter_link_id, bss_change_count;
u16 seen_links = 0;
- const u8 *pos;
u8 i;
- if (!ieee80211_mle_size_ok(elem->data + 1, elem->datalen - 1))
+ if (!ieee80211_mle_type_ok(elem->data + 1,
+ IEEE80211_ML_CONTROL_TYPE_BASIC,
+ elem->datalen - 1))
return;
- ml_elem = (void *)elem->data + 1;
+ ml_elem = (void *)(elem->data + 1);
control = le16_to_cpu(ml_elem->control);
- if (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE) !=
- IEEE80211_ML_CONTROL_TYPE_BASIC)
- return;
+ ml_common_len = ml_elem->variable[0];
/* Must be present when transmitted by an AP (in a probe response) */
if (!(control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT) ||
@@ -2713,18 +2965,8 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
!(control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP))
return;
- ml_common_len = ml_elem->variable[0];
-
- /* length + MLD MAC address + link ID info + BSS Params Change Count */
- pos = ml_elem->variable + 1 + 6 + 1 + 1;
-
- if (u16_get_bits(control, IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY))
- pos += 2;
- if (u16_get_bits(control, IEEE80211_MLC_BASIC_PRES_EML_CAPA))
- pos += 2;
-
- /* MLD capabilities and operations */
- pos += 2;
+ reporter_link_id = ieee80211_mle_get_link_id(elem->data + 1);
+ bss_change_count = ieee80211_mle_get_bss_param_ch_cnt(elem->data + 1);
/*
* The MLD ID of the reporting AP is always zero. It is set if the AP
@@ -2732,33 +2974,44 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
* relating to a nontransmitted BSS (matching the Multi-BSSID Index,
* Draft P802.11be_D3.2, 35.3.4.2)
*/
- if (u16_get_bits(control, IEEE80211_MLC_BASIC_PRES_MLD_ID)) {
- mld_id = *pos;
- pos += 1;
- } else {
- mld_id = 0;
- }
-
- /* Extended MLD capabilities and operations */
- pos += 2;
+ mld_id = ieee80211_mle_get_mld_id(elem->data + 1);
/* Fully defrag the ML element for sta information/profile iteration */
mle = cfg80211_defrag_mle(elem, tx_data->ie, tx_data->ielen, gfp);
if (!mle)
return;
+ /* No point in doing anything if there is no per-STA profile */
+ if (!mle->sta_prof[0])
+ goto out;
+
new_ie = kmalloc(IEEE80211_MAX_DATA_LEN, gfp);
if (!new_ie)
goto out;
+ reporter_rnr = cfg80211_gen_reporter_rnr(source_bss,
+ u16_get_bits(control,
+ IEEE80211_MLC_BASIC_PRES_MLD_ID),
+ mld_id == 0, reporter_link_id,
+ bss_change_count,
+ gfp);
+
+ ssid_elem = cfg80211_find_elem(WLAN_EID_SSID, tx_data->ie,
+ tx_data->ielen);
+ if (ssid_elem) {
+ ssid = ssid_elem->data;
+ ssid_len = ssid_elem->datalen;
+ }
+
for (i = 0; i < ARRAY_SIZE(mle->sta_prof) && mle->sta_prof[i]; i++) {
const struct ieee80211_neighbor_ap_info *ap_info;
enum nl80211_band band;
u32 freq;
const u8 *profile;
- const u8 *tbtt_info;
ssize_t profile_len;
+ u8 param_ch_count;
u8 link_id, use_for;
+ bool non_tx;
if (!ieee80211_mle_basic_sta_prof_size_ok((u8 *)mle->sta_prof[i],
mle->sta_prof_len[i]))
@@ -2800,13 +3053,28 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
profile_len -= 2;
/* Find in RNR to look up channel information */
- use_for = cfg80211_tbtt_info_for_mld_ap(tx_data->ie,
- tx_data->ielen,
- mld_id, link_id,
- &ap_info, &tbtt_info);
+ use_for = cfg80211_rnr_info_for_mld_ap(tx_data->ie,
+ tx_data->ielen,
+ mld_id, link_id,
+ &ap_info,
+ &param_ch_count,
+ &non_tx);
if (!use_for)
continue;
+ /*
+ * As of 802.11be_D5.0, the specification does not give us any
+ * way of discovering both the MaxBSSID and the Multiple-BSSID
+ * Index. It does seem like the Multiple-BSSID Index element
+ * may be provided, but section 9.4.2.45 explicitly forbids
+ * including a Multiple-BSSID Element (in this case without any
+ * subelements).
+ * Without both pieces of information we cannot calculate the
+ * reference BSSID, so simply ignore the BSS.
+ */
+ if (non_tx)
+ continue;
+
/* We could sanity check the BSSID is included */
if (!ieee80211_operating_class_to_band(ap_info->op_class,
@@ -2816,6 +3084,27 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
freq = ieee80211_channel_to_freq_khz(ap_info->channel, band);
data.channel = ieee80211_get_channel_khz(wiphy, freq);
+ /* Skip if RNR element specifies an unsupported channel */
+ if (!data.channel)
+ continue;
+
+ /* Skip if BSS entry generated from MBSSID or DIRECT source
+ * frame data available already.
+ */
+ bss = cfg80211_get_bss(wiphy, data.channel, data.bssid, ssid,
+ ssid_len, IEEE80211_BSS_TYPE_ANY,
+ IEEE80211_PRIVACY_ANY);
+ if (bss) {
+ struct cfg80211_internal_bss *ibss = bss_from_pub(bss);
+
+ if (data.capability == bss->capability &&
+ ibss->bss_source != BSS_SOURCE_STA_PROFILE) {
+ cfg80211_put_bss(wiphy, bss);
+ continue;
+ }
+ cfg80211_put_bss(wiphy, bss);
+ }
+
if (use_for == NL80211_BSS_USE_FOR_MLD_LINK &&
!(wiphy->flags & WIPHY_FLAG_SUPPORTS_NSTR_NONPRIMARY)) {
use_for = 0;
@@ -2846,7 +3135,8 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
continue;
/* Copy the Basic Multi-Link element including the common
- * information, and then fix up the link ID.
+ * information, and then fix up the link ID and BSS param
+ * change count.
* Note that the ML element length has been verified and we
* also checked that it contains the link ID.
*/
@@ -2857,10 +3147,21 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
sizeof(*ml_elem) + ml_common_len);
new_ie[data.ielen + sizeof(*ml_elem) + 1 + ETH_ALEN] = link_id;
+ new_ie[data.ielen + sizeof(*ml_elem) + 1 + ETH_ALEN + 1] =
+ param_ch_count;
data.ielen += sizeof(*ml_elem) + ml_common_len;
- /* TODO: Add an RNR containing only the reporting AP */
+ if (reporter_rnr && (use_for & NL80211_BSS_USE_FOR_NORMAL)) {
+ if (data.ielen + sizeof(struct element) +
+ reporter_rnr->datalen > IEEE80211_MAX_DATA_LEN)
+ continue;
+
+ memcpy(new_ie + data.ielen, reporter_rnr,
+ sizeof(struct element) + reporter_rnr->datalen);
+ data.ielen += sizeof(struct element) +
+ reporter_rnr->datalen;
+ }
bss = cfg80211_inform_single_bss_data(wiphy, &data, gfp);
if (!bss)
@@ -2869,6 +3170,7 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
}
out:
+ kfree(reporter_rnr);
kfree(new_ie);
kfree(mle);
}
@@ -2921,6 +3223,10 @@ cfg80211_inform_bss_data(struct wiphy *wiphy,
if (!res)
return NULL;
+ /* don't do any further MBSSID/ML handling for S1G */
+ if (ftype == CFG80211_BSS_FTYPE_S1G_BEACON)
+ return res;
+
cfg80211_parse_mbssid_data(wiphy, &inform_data, res, gfp);
cfg80211_parse_ml_sta_data(wiphy, &inform_data, res, gfp);
@@ -2929,59 +3235,21 @@ cfg80211_inform_bss_data(struct wiphy *wiphy,
}
EXPORT_SYMBOL(cfg80211_inform_bss_data);
-static bool cfg80211_uhb_power_type_valid(const u8 *ie,
- size_t ielen,
- const u32 flags)
-{
- const struct element *tmp;
- struct ieee80211_he_operation *he_oper;
-
- tmp = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ie, ielen);
- if (tmp && tmp->datalen >= sizeof(*he_oper) + 1) {
- const struct ieee80211_he_6ghz_oper *he_6ghz_oper;
-
- he_oper = (void *)&tmp->data[1];
- he_6ghz_oper = ieee80211_he_6ghz_oper(he_oper);
-
- if (!he_6ghz_oper)
- return false;
-
- switch (u8_get_bits(he_6ghz_oper->control,
- IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) {
- case IEEE80211_6GHZ_CTRL_REG_LPI_AP:
- return true;
- case IEEE80211_6GHZ_CTRL_REG_SP_AP:
- return !(flags & IEEE80211_CHAN_NO_UHB_AFC_CLIENT);
- case IEEE80211_6GHZ_CTRL_REG_VLP_AP:
- return !(flags & IEEE80211_CHAN_NO_UHB_VLP_CLIENT);
- }
- }
- return false;
-}
-
-/* cfg80211_inform_bss_width_frame helper */
-static struct cfg80211_bss *
-cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
- struct cfg80211_inform_bss *data,
- struct ieee80211_mgmt *mgmt, size_t len,
- gfp_t gfp)
+struct cfg80211_bss *
+cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
+ struct cfg80211_inform_bss *data,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ gfp_t gfp)
{
- struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
- struct cfg80211_internal_bss tmp = {}, *res;
- struct cfg80211_bss_ies *ies;
- struct ieee80211_channel *channel;
- bool signal_valid;
+ size_t min_hdr_len;
struct ieee80211_ext *ext = NULL;
- u8 *bssid, *variable;
- u16 capability, beacon_int;
- size_t ielen, min_hdr_len = offsetof(struct ieee80211_mgmt,
- u.probe_resp.variable);
- int bss_type;
-
- BUILD_BUG_ON(offsetof(struct ieee80211_mgmt, u.probe_resp.variable) !=
- offsetof(struct ieee80211_mgmt, u.beacon.variable));
-
- trace_cfg80211_inform_bss_frame(wiphy, data, mgmt, len);
+ enum cfg80211_bss_frame_type ftype;
+ u16 beacon_interval;
+ const u8 *bssid;
+ u16 capability;
+ const u8 *ie;
+ size_t ielen;
+ u64 tsf;
if (WARN_ON(!mgmt))
return NULL;
@@ -2989,48 +3257,40 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
if (WARN_ON(!wiphy))
return NULL;
- if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC &&
- (data->signal < 0 || data->signal > 100)))
- return NULL;
+ BUILD_BUG_ON(offsetof(struct ieee80211_mgmt, u.probe_resp.variable) !=
+ offsetof(struct ieee80211_mgmt, u.beacon.variable));
+
+ trace_cfg80211_inform_bss_frame(wiphy, data, mgmt, len);
if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
ext = (void *) mgmt;
- min_hdr_len = offsetof(struct ieee80211_ext, u.s1g_beacon);
if (ieee80211_is_s1g_short_beacon(mgmt->frame_control))
min_hdr_len = offsetof(struct ieee80211_ext,
u.s1g_short_beacon.variable);
+ else
+ min_hdr_len = offsetof(struct ieee80211_ext,
+ u.s1g_beacon.variable);
+ } else {
+ /* same for beacons */
+ min_hdr_len = offsetof(struct ieee80211_mgmt,
+ u.probe_resp.variable);
}
if (WARN_ON(len < min_hdr_len))
return NULL;
ielen = len - min_hdr_len;
- variable = mgmt->u.probe_resp.variable;
- if (ext) {
- if (ieee80211_is_s1g_short_beacon(mgmt->frame_control))
- variable = ext->u.s1g_short_beacon.variable;
- else
- variable = ext->u.s1g_beacon.variable;
- }
-
- channel = cfg80211_get_bss_channel(wiphy, variable, ielen, data->chan);
- if (!channel)
- return NULL;
-
- if (channel->band == NL80211_BAND_6GHZ &&
- !cfg80211_uhb_power_type_valid(variable, ielen, channel->flags)) {
- data->restrict_use = 1;
- data->use_for = 0;
- data->cannot_use_reasons =
- NL80211_BSS_CANNOT_USE_UHB_PWR_MISMATCH;
- }
-
+ ie = mgmt->u.probe_resp.variable;
if (ext) {
const struct ieee80211_s1g_bcn_compat_ie *compat;
const struct element *elem;
- elem = cfg80211_find_elem(WLAN_EID_S1G_BCN_COMPAT,
- variable, ielen);
+ if (ieee80211_is_s1g_short_beacon(mgmt->frame_control))
+ ie = ext->u.s1g_short_beacon.variable;
+ else
+ ie = ext->u.s1g_beacon.variable;
+
+ elem = cfg80211_find_elem(WLAN_EID_S1G_BCN_COMPAT, ie, ielen);
if (!elem)
return NULL;
if (elem->datalen < sizeof(*compat))
@@ -3038,112 +3298,26 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
compat = (void *)elem->data;
bssid = ext->u.s1g_beacon.sa;
capability = le16_to_cpu(compat->compat_info);
- beacon_int = le16_to_cpu(compat->beacon_int);
+ beacon_interval = le16_to_cpu(compat->beacon_int);
} else {
bssid = mgmt->bssid;
- beacon_int = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
+ beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
}
- if (channel->band == NL80211_BAND_60GHZ) {
- bss_type = capability & WLAN_CAPABILITY_DMG_TYPE_MASK;
- if (bss_type == WLAN_CAPABILITY_DMG_TYPE_AP ||
- bss_type == WLAN_CAPABILITY_DMG_TYPE_PBSS)
- regulatory_hint_found_beacon(wiphy, channel, gfp);
- } else {
- if (capability & WLAN_CAPABILITY_ESS)
- regulatory_hint_found_beacon(wiphy, channel, gfp);
- }
-
- ies = kzalloc(sizeof(*ies) + ielen, gfp);
- if (!ies)
- return NULL;
- ies->len = ielen;
- ies->tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
- ies->from_beacon = ieee80211_is_beacon(mgmt->frame_control) ||
- ieee80211_is_s1g_beacon(mgmt->frame_control);
- memcpy(ies->data, variable, ielen);
+ tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
if (ieee80211_is_probe_resp(mgmt->frame_control))
- rcu_assign_pointer(tmp.pub.proberesp_ies, ies);
+ ftype = CFG80211_BSS_FTYPE_PRESP;
+ else if (ext)
+ ftype = CFG80211_BSS_FTYPE_S1G_BEACON;
else
- rcu_assign_pointer(tmp.pub.beacon_ies, ies);
- rcu_assign_pointer(tmp.pub.ies, ies);
-
- memcpy(tmp.pub.bssid, bssid, ETH_ALEN);
- tmp.pub.beacon_interval = beacon_int;
- tmp.pub.capability = capability;
- tmp.pub.channel = channel;
- tmp.pub.signal = data->signal;
- tmp.ts_boottime = data->boottime_ns;
- tmp.parent_tsf = data->parent_tsf;
- tmp.pub.chains = data->chains;
- memcpy(tmp.pub.chain_signal, data->chain_signal, IEEE80211_MAX_CHAINS);
- ether_addr_copy(tmp.parent_bssid, data->parent_bssid);
- tmp.pub.use_for = data->restrict_use ?
- data->use_for :
- NL80211_BSS_USE_FOR_ALL;
- tmp.pub.cannot_use_reasons = data->cannot_use_reasons;
+ ftype = CFG80211_BSS_FTYPE_BEACON;
- signal_valid = data->chan == channel;
- spin_lock_bh(&rdev->bss_lock);
- res = __cfg80211_bss_update(rdev, &tmp, signal_valid, jiffies);
- if (!res)
- goto drop;
-
- rdev_inform_bss(rdev, &res->pub, ies, data->drv_data);
-
- spin_unlock_bh(&rdev->bss_lock);
-
- trace_cfg80211_return_bss(&res->pub);
- /* __cfg80211_bss_update gives us a referenced result */
- return &res->pub;
-
-drop:
- spin_unlock_bh(&rdev->bss_lock);
- return NULL;
-}
-
-struct cfg80211_bss *
-cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
- struct cfg80211_inform_bss *data,
- struct ieee80211_mgmt *mgmt, size_t len,
- gfp_t gfp)
-{
- struct cfg80211_inform_single_bss_data inform_data = {
- .drv_data = data,
- .ie = mgmt->u.probe_resp.variable,
- .ielen = len - offsetof(struct ieee80211_mgmt,
- u.probe_resp.variable),
- .use_for = data->restrict_use ?
- data->use_for :
- NL80211_BSS_USE_FOR_ALL,
- .cannot_use_reasons = data->cannot_use_reasons,
- };
- struct cfg80211_bss *res;
-
- res = cfg80211_inform_single_bss_frame_data(wiphy, data, mgmt,
- len, gfp);
- if (!res)
- return NULL;
-
- /* don't do any further MBSSID/ML handling for S1G */
- if (ieee80211_is_s1g_beacon(mgmt->frame_control))
- return res;
-
- inform_data.ftype = ieee80211_is_beacon(mgmt->frame_control) ?
- CFG80211_BSS_FTYPE_BEACON : CFG80211_BSS_FTYPE_PRESP;
- memcpy(inform_data.bssid, mgmt->bssid, ETH_ALEN);
- inform_data.tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
- inform_data.beacon_interval =
- le16_to_cpu(mgmt->u.probe_resp.beacon_int);
-
- /* process each non-transmitting bss */
- cfg80211_parse_mbssid_data(wiphy, &inform_data, res, gfp);
-
- cfg80211_parse_ml_sta_data(wiphy, &inform_data, res, gfp);
-
- return res;
+ return cfg80211_inform_bss_data(wiphy, data, ftype,
+ bssid, tsf, capability,
+ beacon_interval, ie, ielen,
+ gfp);
}
EXPORT_SYMBOL(cfg80211_inform_bss_frame_data);
@@ -3285,19 +3459,14 @@ void cfg80211_update_assoc_bss_entry(struct wireless_dev *wdev,
if (!WARN_ON(!__cfg80211_unlink_bss(rdev, new)))
rdev->bss_generation++;
}
-
- rb_erase(&cbss->rbn, &rdev->bss_tree);
- rb_insert_bss(rdev, cbss);
- rdev->bss_generation++;
+ cfg80211_rehash_bss(rdev, cbss);
list_for_each_entry_safe(nontrans_bss, tmp,
&cbss->pub.nontrans_list,
nontrans_list) {
bss = bss_from_pub(nontrans_bss);
bss->pub.channel = chan;
- rb_erase(&bss->rbn, &rdev->bss_tree);
- rb_insert_bss(rdev, bss);
- rdev->bss_generation++;
+ cfg80211_rehash_bss(rdev, bss);
}
done:
@@ -3352,13 +3521,17 @@ int cfg80211_wext_siwscan(struct net_device *dev,
wiphy = &rdev->wiphy;
/* Determine number of channels, needed to allocate creq */
- if (wreq && wreq->num_channels)
+ if (wreq && wreq->num_channels) {
+ /* Passed from userspace so should be checked */
+ if (unlikely(wreq->num_channels > IW_MAX_FREQUENCIES))
+ return -EINVAL;
n_channels = wreq->num_channels;
- else
+ } else {
n_channels = ieee80211_get_num_supported_channels(wiphy);
+ }
- creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) +
- n_channels * sizeof(void *),
+ creq = kzalloc(struct_size(creq, channels, n_channels) +
+ sizeof(struct cfg80211_ssid),
GFP_ATOMIC);
if (!creq)
return -ENOMEM;
@@ -3366,7 +3539,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
creq->wiphy = wiphy;
creq->wdev = dev->ieee80211_ptr;
/* SSIDs come after channels */
- creq->ssids = (void *)&creq->channels[n_channels];
+ creq->ssids = (void *)creq + struct_size(creq, channels, n_channels);
creq->n_channels = n_channels;
creq->n_ssids = 1;
creq->scan_start = jiffies;
@@ -3380,9 +3553,12 @@ int cfg80211_wext_siwscan(struct net_device *dev,
continue;
for (j = 0; j < wiphy->bands[band]->n_channels; j++) {
+ struct ieee80211_channel *chan;
+
/* ignore disabled channels */
- if (wiphy->bands[band]->channels[j].flags &
- IEEE80211_CHAN_DISABLED)
+ chan = &wiphy->bands[band]->channels[j];
+ if (chan->flags & IEEE80211_CHAN_DISABLED ||
+ !cfg80211_wdev_channel_allowed(creq->wdev, chan))
continue;
/* If we have a wireless request structure and the
@@ -3422,15 +3598,15 @@ int cfg80211_wext_siwscan(struct net_device *dev,
/* translate "Scan for SSID" request */
if (wreq) {
if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
- if (wreq->essid_len > IEEE80211_MAX_SSID_LEN) {
- err = -EINVAL;
- goto out;
- }
+ if (wreq->essid_len > IEEE80211_MAX_SSID_LEN)
+ return -EINVAL;
memcpy(creq->ssids[0].ssid, wreq->essid, wreq->essid_len);
creq->ssids[0].ssid_len = wreq->essid_len;
}
- if (wreq->scan_type == IW_SCAN_TYPE_PASSIVE)
+ if (wreq->scan_type == IW_SCAN_TYPE_PASSIVE) {
+ creq->ssids = NULL;
creq->n_ssids = 0;
+ }
}
for (i = 0; i < NUM_NL80211_BANDS; i++)
@@ -3439,25 +3615,24 @@ int cfg80211_wext_siwscan(struct net_device *dev,
eth_broadcast_addr(creq->bssid);
- wiphy_lock(&rdev->wiphy);
-
- rdev->scan_req = creq;
- err = rdev_scan(rdev, creq);
- if (err) {
- rdev->scan_req = NULL;
- /* creq will be freed below */
- } else {
- nl80211_send_scan_start(rdev, dev->ieee80211_ptr);
- /* creq now owned by driver */
- creq = NULL;
- dev_hold(dev);
+ scoped_guard(wiphy, &rdev->wiphy) {
+ rdev->scan_req = creq;
+ err = rdev_scan(rdev, creq);
+ if (err) {
+ rdev->scan_req = NULL;
+ /* creq will be freed below */
+ } else {
+ nl80211_send_scan_start(rdev, dev->ieee80211_ptr);
+ /* creq now owned by driver */
+ creq = NULL;
+ dev_hold(dev);
+ }
}
- wiphy_unlock(&rdev->wiphy);
+
out:
kfree(creq);
return err;
}
-EXPORT_WEXT_HANDLER(cfg80211_wext_siwscan);
static char *ieee80211_scan_add_ies(struct iw_request_info *info,
const struct cfg80211_bss_ies *ies,
@@ -3829,5 +4004,4 @@ int cfg80211_wext_giwscan(struct net_device *dev,
return res;
}
-EXPORT_WEXT_HANDLER(cfg80211_wext_giwscan);
#endif
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 195c8532734b..cf998500a965 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -5,7 +5,7 @@
* (for nl80211's connect() and wext)
*
* Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2009, 2020, 2022-2023 Intel Corporation. All rights reserved.
+ * Copyright (C) 2009, 2020, 2022-2024 Intel Corporation. All rights reserved.
* Copyright 2017 Intel Deutschland GmbH
*/
@@ -83,6 +83,7 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
if (!request)
return -ENOMEM;
+ request->n_channels = n_channels;
if (wdev->conn->params.channel) {
enum nl80211_band band = wdev->conn->params.channel->band;
struct ieee80211_supported_band *sband =
@@ -115,7 +116,8 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
n_channels = i;
}
request->n_channels = n_channels;
- request->ssids = (void *)&request->channels[n_channels];
+ request->ssids = (void *)request +
+ struct_size(request, channels, n_channels);
request->n_ssids = 1;
memcpy(request->ssids[0].ssid, wdev->conn->params.ssid,
@@ -130,7 +132,7 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
rdev->scan_req = request;
- err = rdev_scan(rdev, request);
+ err = cfg80211_scan(rdev);
if (!err) {
wdev->conn->state = CFG80211_CONN_SCANNING;
nl80211_send_scan_start(rdev, wdev);
@@ -209,7 +211,8 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev,
if (!req.bss) {
err = -ENOENT;
} else {
- err = cfg80211_mlme_assoc(rdev, wdev->netdev, &req);
+ err = cfg80211_mlme_assoc(rdev, wdev->netdev,
+ &req, NULL);
cfg80211_put_bss(&rdev->wiphy, req.bss);
}
@@ -249,7 +252,7 @@ void cfg80211_conn_work(struct work_struct *work)
u8 bssid_buf[ETH_ALEN], *bssid = NULL;
enum nl80211_timeout_reason treason;
- wiphy_lock(&rdev->wiphy);
+ guard(wiphy)(&rdev->wiphy);
list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
if (!wdev->netdev)
@@ -277,8 +280,6 @@ void cfg80211_conn_work(struct work_struct *work)
__cfg80211_connect_result(wdev->netdev, &cr, false);
}
}
-
- wiphy_unlock(&rdev->wiphy);
}
static void cfg80211_step_auth_next(struct cfg80211_conn *conn,
@@ -690,13 +691,13 @@ static bool cfg80211_is_all_idle(void)
* as chan dfs state, etc.
*/
for_each_rdev(rdev) {
- wiphy_lock(&rdev->wiphy);
+ guard(wiphy)(&rdev->wiphy);
+
list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
if (wdev->conn || wdev->connected ||
cfg80211_beaconing_iface_active(wdev))
is_all_idle = false;
}
- wiphy_unlock(&rdev->wiphy);
}
return is_all_idle;
@@ -1044,6 +1045,7 @@ void cfg80211_connect_done(struct net_device *dev,
cfg80211_hold_bss(
bss_from_pub(params->links[link].bss));
ev->cr.links[link].bss = params->links[link].bss;
+ ev->cr.links[link].status = params->links[link].status;
if (params->links[link].addr) {
ev->cr.links[link].addr = next;
@@ -1352,6 +1354,7 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
return;
cfg80211_wdev_release_bsses(wdev);
+ wdev->valid_links = 0;
wdev->connected = false;
wdev->u.client.ssid_len = 0;
wdev->conn_owner_nlportid = 0;
@@ -1578,7 +1581,7 @@ void cfg80211_autodisconnect_wk(struct work_struct *work)
container_of(work, struct wireless_dev, disconnect_wk);
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
- wiphy_lock(wdev->wiphy);
+ guard(wiphy)(wdev->wiphy);
if (wdev->conn_owner_nlportid) {
switch (wdev->iftype) {
@@ -1614,6 +1617,4 @@ void cfg80211_autodisconnect_wk(struct work_struct *work)
break;
}
}
-
- wiphy_unlock(wdev->wiphy);
}
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 565511a3f461..62f26618f674 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -5,7 +5,7 @@
*
* Copyright 2005-2006 Jiri Benc <jbenc@suse.cz>
* Copyright 2006 Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2020-2021, 2023 Intel Corporation
+ * Copyright (C) 2020-2021, 2023-2024 Intel Corporation
*/
#include <linux/device.h>
@@ -137,7 +137,7 @@ static int wiphy_resume(struct device *dev)
if (rdev->wiphy.registered && rdev->ops->resume)
ret = rdev_resume(rdev);
rdev->suspended = false;
- schedule_work(&rdev->wiphy_work);
+ queue_work(system_unbound_wq, &rdev->wiphy_work);
wiphy_unlock(&rdev->wiphy);
if (ret)
diff --git a/net/wireless/tests/Makefile b/net/wireless/tests/Makefile
index 1f6622fcb758..c364e63b508e 100644
--- a/net/wireless/tests/Makefile
+++ b/net/wireless/tests/Makefile
@@ -1,3 +1,3 @@
-cfg80211-tests-y += module.o fragmentation.o scan.o util.o
+cfg80211-tests-y += module.o fragmentation.o scan.o util.o chan.o
obj-$(CONFIG_CFG80211_KUNIT_TEST) += cfg80211-tests.o
diff --git a/net/wireless/tests/chan.c b/net/wireless/tests/chan.c
new file mode 100644
index 000000000000..7b97b731993c
--- /dev/null
+++ b/net/wireless/tests/chan.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KUnit tests for channel helper functions
+ *
+ * Copyright (C) 2023-2024 Intel Corporation
+ */
+#include <net/cfg80211.h>
+#include <kunit/test.h>
+
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
+
+static struct ieee80211_channel chan_6ghz_1 = {
+ .band = NL80211_BAND_6GHZ,
+ .center_freq = 5955,
+};
+
+static struct ieee80211_channel chan_6ghz_5 = {
+ .band = NL80211_BAND_6GHZ,
+ .center_freq = 5975,
+};
+
+static struct ieee80211_channel chan_6ghz_105 = {
+ .band = NL80211_BAND_6GHZ,
+ .center_freq = 6475,
+};
+
+static const struct chandef_compat_case {
+ const char *desc;
+ /* leave c1 empty for tests for identical */
+ struct cfg80211_chan_def c1, c2;
+ /* we test both ways around, so c2 should always be the compat one */
+ bool compat;
+} chandef_compat_cases[] = {
+ {
+ .desc = "identical non-HT",
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_20_NOHT,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "identical 20 MHz",
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_20,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "identical 40 MHz",
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_40,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955 + 10,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "identical 80 MHz",
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_80,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955 + 10 + 20,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "identical 160 MHz",
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_160,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955 + 10 + 20 + 40,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "identical 320 MHz",
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955 + 10 + 20 + 40 + 80,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "20 MHz in 320 MHz\n",
+ .c1 = {
+ .width = NL80211_CHAN_WIDTH_20,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955,
+ },
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955 + 10 + 20 + 40 + 80,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "different 20 MHz",
+ .c1 = {
+ .width = NL80211_CHAN_WIDTH_20,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955,
+ },
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_20,
+ .chan = &chan_6ghz_5,
+ .center_freq1 = 5975,
+ },
+ },
+ {
+ .desc = "different primary 320 MHz",
+ .c1 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 + 110,
+ },
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 - 50,
+ },
+ },
+ {
+ /* similar to previous test but one has lower BW */
+ .desc = "matching primary 160 MHz",
+ .c1 = {
+ .width = NL80211_CHAN_WIDTH_160,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 + 30,
+ },
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 - 50,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "matching primary 160 MHz & punctured secondary 160 Mhz",
+ .c1 = {
+ .width = NL80211_CHAN_WIDTH_160,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 + 30,
+ },
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 - 50,
+ .punctured = 0xf,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "matching primary 160 MHz & punctured matching",
+ .c1 = {
+ .width = NL80211_CHAN_WIDTH_160,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 + 30,
+ .punctured = 0xc0,
+ },
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 - 50,
+ .punctured = 0xc000,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "matching primary 160 MHz & punctured not matching",
+ .c1 = {
+ .width = NL80211_CHAN_WIDTH_160,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 + 30,
+ .punctured = 0x80,
+ },
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 - 50,
+ .punctured = 0xc000,
+ },
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(chandef_compat, chandef_compat_cases, desc)
+
+static void test_chandef_compat(struct kunit *test)
+{
+ const struct chandef_compat_case *params = test->param_value;
+ const struct cfg80211_chan_def *ret, *expect;
+ struct cfg80211_chan_def c1 = params->c1;
+
+ /* tests with identical ones */
+ if (!params->c1.chan)
+ c1 = params->c2;
+
+ KUNIT_EXPECT_EQ(test, cfg80211_chandef_valid(&c1), true);
+ KUNIT_EXPECT_EQ(test, cfg80211_chandef_valid(&params->c2), true);
+
+ expect = params->compat ? &params->c2 : NULL;
+
+ ret = cfg80211_chandef_compatible(&c1, &params->c2);
+ KUNIT_EXPECT_PTR_EQ(test, ret, expect);
+
+ if (!params->c1.chan)
+ expect = &c1;
+
+ ret = cfg80211_chandef_compatible(&params->c2, &c1);
+ KUNIT_EXPECT_PTR_EQ(test, ret, expect);
+}
+
+static struct kunit_case chandef_compat_test_cases[] = {
+ KUNIT_CASE_PARAM(test_chandef_compat, chandef_compat_gen_params),
+ {}
+};
+
+static struct kunit_suite chandef_compat = {
+ .name = "cfg80211-chandef-compat",
+ .test_cases = chandef_compat_test_cases,
+};
+
+kunit_test_suite(chandef_compat);
diff --git a/net/wireless/tests/fragmentation.c b/net/wireless/tests/fragmentation.c
index 49a339ca8880..411fae18cd88 100644
--- a/net/wireless/tests/fragmentation.c
+++ b/net/wireless/tests/fragmentation.c
@@ -2,7 +2,7 @@
/*
* KUnit tests for element fragmentation
*
- * Copyright (C) 2023 Intel Corporation
+ * Copyright (C) 2023-2024 Intel Corporation
*/
#include <linux/ieee80211.h>
#include <net/cfg80211.h>
@@ -27,7 +27,12 @@ static void defragment_0(struct kunit *test)
ret = cfg80211_defragment_element((void *)input,
input, sizeof(input),
- data, sizeof(input),
+ NULL, 0,
+ WLAN_EID_FRAGMENT);
+ KUNIT_EXPECT_EQ(test, ret, 253);
+ ret = cfg80211_defragment_element((void *)input,
+ input, sizeof(input),
+ data, ret,
WLAN_EID_FRAGMENT);
KUNIT_EXPECT_EQ(test, ret, 253);
KUNIT_EXPECT_MEMEQ(test, data, input + 3, 253);
@@ -63,7 +68,12 @@ static void defragment_1(struct kunit *test)
ret = cfg80211_defragment_element((void *)input,
input, sizeof(input),
- data, sizeof(input),
+ NULL, 0,
+ WLAN_EID_FRAGMENT);
+ KUNIT_EXPECT_EQ(test, ret, 254 + 7);
+ ret = cfg80211_defragment_element((void *)input,
+ input, sizeof(input),
+ data, ret,
WLAN_EID_FRAGMENT);
/* this means the last fragment was not used */
KUNIT_EXPECT_EQ(test, ret, 254 + 7);
@@ -106,10 +116,15 @@ static void defragment_2(struct kunit *test)
ret = cfg80211_defragment_element((void *)input,
input, sizeof(input),
- data, sizeof(input),
+ NULL, 0,
WLAN_EID_FRAGMENT);
/* this means the last fragment was not used */
KUNIT_EXPECT_EQ(test, ret, 254 + 255 + 1);
+ ret = cfg80211_defragment_element((void *)input,
+ input, sizeof(input),
+ data, ret,
+ WLAN_EID_FRAGMENT);
+ KUNIT_EXPECT_EQ(test, ret, 254 + 255 + 1);
KUNIT_EXPECT_MEMEQ(test, data, input + 3, 254);
KUNIT_EXPECT_MEMEQ(test, data + 254, input + 257 + 2, 255);
KUNIT_EXPECT_MEMEQ(test, data + 254 + 255, input + 2 * 257 + 2, 1);
@@ -134,7 +149,12 @@ static void defragment_at_end(struct kunit *test)
ret = cfg80211_defragment_element((void *)input,
input, sizeof(input),
- data, sizeof(input),
+ NULL, 0,
+ WLAN_EID_FRAGMENT);
+ KUNIT_EXPECT_EQ(test, ret, 254 + 7);
+ ret = cfg80211_defragment_element((void *)input,
+ input, sizeof(input),
+ data, ret,
WLAN_EID_FRAGMENT);
KUNIT_EXPECT_EQ(test, ret, 254 + 7);
KUNIT_EXPECT_MEMEQ(test, data, input + 3, 254);
diff --git a/net/wireless/tests/scan.c b/net/wireless/tests/scan.c
index 77854161cd22..b1a9c1466d6c 100644
--- a/net/wireless/tests/scan.c
+++ b/net/wireless/tests/scan.c
@@ -2,7 +2,7 @@
/*
* KUnit tests for inform_bss functions
*
- * Copyright (C) 2023 Intel Corporation
+ * Copyright (C) 2023-2024 Intel Corporation
*/
#include <linux/ieee80211.h>
#include <net/cfg80211.h>
@@ -14,7 +14,7 @@
/* mac80211 helpers for element building */
#include "../../mac80211/ieee80211_i.h"
-MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
struct test_elem {
u8 id;
@@ -406,9 +406,32 @@ static struct inform_bss_ml_sta_case {
const char *desc;
int mld_id;
bool sta_prof_vendor_elems;
+ bool include_oper_class;
+ bool nstr;
} inform_bss_ml_sta_cases[] = {
- { .desc = "no_mld_id", .mld_id = 0, .sta_prof_vendor_elems = false },
- { .desc = "mld_id_eq_1", .mld_id = 1, .sta_prof_vendor_elems = true },
+ {
+ .desc = "zero_mld_id",
+ .mld_id = 0,
+ .sta_prof_vendor_elems = false,
+ }, {
+ .desc = "zero_mld_id_with_oper_class",
+ .mld_id = 0,
+ .sta_prof_vendor_elems = false,
+ .include_oper_class = true,
+ }, {
+ .desc = "mld_id_eq_1",
+ .mld_id = 1,
+ .sta_prof_vendor_elems = true,
+ }, {
+ .desc = "mld_id_eq_1_with_oper_class",
+ .mld_id = 1,
+ .sta_prof_vendor_elems = true,
+ .include_oper_class = true,
+ }, {
+ .desc = "nstr",
+ .mld_id = 0,
+ .nstr = true,
+ },
};
KUNIT_ARRAY_PARAM_DESC(inform_bss_ml_sta, inform_bss_ml_sta_cases, desc)
@@ -440,7 +463,7 @@ static void test_inform_bss_ml_sta(struct kunit *test)
struct {
struct ieee80211_neighbor_ap_info info;
struct ieee80211_tbtt_info_ge_11 ap;
- } __packed rnr = {
+ } __packed rnr_normal = {
.info = {
.tbtt_info_hdr = u8_encode_bits(0, IEEE80211_AP_INFO_TBTT_HDR_COUNT),
.tbtt_info_len = sizeof(struct ieee80211_tbtt_info_ge_11),
@@ -460,6 +483,28 @@ static void test_inform_bss_ml_sta(struct kunit *test)
}
};
struct {
+ struct ieee80211_neighbor_ap_info info;
+ struct ieee80211_rnr_mld_params mld_params;
+ } __packed rnr_nstr = {
+ .info = {
+ .tbtt_info_hdr =
+ u8_encode_bits(0, IEEE80211_AP_INFO_TBTT_HDR_COUNT) |
+ u8_encode_bits(IEEE80211_TBTT_INFO_TYPE_MLD,
+ IEEE80211_AP_INFO_TBTT_HDR_TYPE),
+ .tbtt_info_len = sizeof(struct ieee80211_rnr_mld_params),
+ .op_class = 81,
+ .channel = 11,
+ },
+ .mld_params = {
+ .mld_id = params->mld_id,
+ .params =
+ le16_encode_bits(link_id,
+ IEEE80211_RNR_MLD_PARAMS_LINK_ID),
+ }
+ };
+ size_t rnr_len = params->nstr ? sizeof(rnr_nstr) : sizeof(rnr_normal);
+ void *rnr = params->nstr ? (void *)&rnr_nstr : (void *)&rnr_normal;
+ struct {
__le16 control;
u8 var_len;
u8 mld_mac_addr[ETH_ALEN];
@@ -498,7 +543,7 @@ static void test_inform_bss_ml_sta(struct kunit *test)
u16_encode_bits(link_id,
IEEE80211_MLE_STA_CONTROL_LINK_ID)),
.var_len = sizeof(sta_prof) - 2 - 2,
- .bssid = { *rnr.ap.bssid },
+ .bssid = { *rnr_normal.ap.bssid },
.beacon_int = cpu_to_le16(101),
.tsf_offset = cpu_to_le64(-123ll),
.capabilities = cpu_to_le16(0xdead),
@@ -515,9 +560,15 @@ static void test_inform_bss_ml_sta(struct kunit *test)
skb_put_u8(input, 4);
skb_put_data(input, "TEST", 4);
+ if (params->include_oper_class) {
+ skb_put_u8(input, WLAN_EID_SUPPORTED_REGULATORY_CLASSES);
+ skb_put_u8(input, 1);
+ skb_put_u8(input, 81);
+ }
+
skb_put_u8(input, WLAN_EID_REDUCED_NEIGHBOR_REPORT);
- skb_put_u8(input, sizeof(rnr));
- skb_put_data(input, &rnr, sizeof(rnr));
+ skb_put_u8(input, rnr_len);
+ skb_put_data(input, rnr, rnr_len);
/* build a multi-link element */
skb_put_u8(input, WLAN_EID_EXTENSION);
@@ -563,9 +614,10 @@ static void test_inform_bss_ml_sta(struct kunit *test)
KUNIT_EXPECT_EQ(test, ctx.inform_bss_count, 2);
/* Check link_bss *****************************************************/
- link_bss = cfg80211_get_bss(wiphy, NULL, sta_prof.bssid, NULL, 0,
- IEEE80211_BSS_TYPE_ANY,
- IEEE80211_PRIVACY_ANY);
+ link_bss = __cfg80211_get_bss(wiphy, NULL, sta_prof.bssid, NULL, 0,
+ IEEE80211_BSS_TYPE_ANY,
+ IEEE80211_PRIVACY_ANY,
+ 0);
KUNIT_ASSERT_NOT_NULL(test, link_bss);
KUNIT_EXPECT_EQ(test, link_bss->signal, 0);
KUNIT_EXPECT_EQ(test, link_bss->beacon_interval,
@@ -576,21 +628,43 @@ static void test_inform_bss_ml_sta(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, link_bss->channel,
ieee80211_get_channel_khz(wiphy, MHZ_TO_KHZ(2462)));
+ /* Test wiphy does not set WIPHY_FLAG_SUPPORTS_NSTR_NONPRIMARY */
+ if (params->nstr) {
+ KUNIT_EXPECT_EQ(test, link_bss->use_for, 0);
+ KUNIT_EXPECT_EQ(test, link_bss->cannot_use_reasons,
+ NL80211_BSS_CANNOT_USE_NSTR_NONPRIMARY);
+ KUNIT_EXPECT_NULL(test,
+ cfg80211_get_bss(wiphy, NULL, sta_prof.bssid,
+ NULL, 0,
+ IEEE80211_BSS_TYPE_ANY,
+ IEEE80211_PRIVACY_ANY));
+ } else {
+ KUNIT_EXPECT_EQ(test, link_bss->use_for,
+ NL80211_BSS_USE_FOR_ALL);
+ KUNIT_EXPECT_EQ(test, link_bss->cannot_use_reasons, 0);
+ }
+
rcu_read_lock();
ies = rcu_dereference(link_bss->ies);
KUNIT_EXPECT_NOT_NULL(test, ies);
KUNIT_EXPECT_EQ(test, ies->tsf, tsf + le64_to_cpu(sta_prof.tsf_offset));
/* Resulting length should be:
* SSID (inherited) + RNR (inherited) + vendor element(s) +
+ * operating class (if requested) +
+ * generated RNR (if MLD ID == 0 and not NSTR) +
* MLE common info + MLE header and control
*/
if (params->sta_prof_vendor_elems)
KUNIT_EXPECT_EQ(test, ies->len,
- 6 + 2 + sizeof(rnr) + 2 + 160 + 2 + 165 +
+ 6 + 2 + rnr_len + 2 + 160 + 2 + 165 +
+ (params->include_oper_class ? 3 : 0) +
+ (!params->mld_id && !params->nstr ? 22 : 0) +
mle_basic_common_info.var_len + 5);
else
KUNIT_EXPECT_EQ(test, ies->len,
- 6 + 2 + sizeof(rnr) + 2 + 155 +
+ 6 + 2 + rnr_len + 2 + 155 +
+ (params->include_oper_class ? 3 : 0) +
+ (!params->mld_id && !params->nstr ? 22 : 0) +
mle_basic_common_info.var_len + 5);
rcu_read_unlock();
@@ -598,6 +672,174 @@ static void test_inform_bss_ml_sta(struct kunit *test)
cfg80211_put_bss(wiphy, link_bss);
}
+static struct cfg80211_parse_colocated_ap_case {
+ const char *desc;
+ u8 op_class;
+ u8 channel;
+ struct ieee80211_neighbor_ap_info info;
+ union {
+ struct ieee80211_tbtt_info_ge_11 tbtt_long;
+ struct ieee80211_tbtt_info_7_8_9 tbtt_short;
+ };
+ bool add_junk;
+ bool same_ssid;
+ bool valid;
+} cfg80211_parse_colocated_ap_cases[] = {
+ {
+ .desc = "wrong_band",
+ .info.op_class = 81,
+ .info.channel = 11,
+ .tbtt_long = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP,
+ },
+ .valid = false,
+ },
+ {
+ .desc = "wrong_type",
+ /* IEEE80211_AP_INFO_TBTT_HDR_TYPE is in the least significant bits */
+ .info.tbtt_info_hdr = IEEE80211_TBTT_INFO_TYPE_MLD,
+ .tbtt_long = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP,
+ },
+ .valid = false,
+ },
+ {
+ .desc = "colocated_invalid_len_short",
+ .info.tbtt_info_len = 6,
+ .tbtt_short = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP |
+ IEEE80211_RNR_TBTT_PARAMS_SAME_SSID,
+ },
+ .valid = false,
+ },
+ {
+ .desc = "colocated_invalid_len_short_mld",
+ .info.tbtt_info_len = 10,
+ .tbtt_long = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP,
+ },
+ .valid = false,
+ },
+ {
+ .desc = "colocated_non_mld",
+ .info.tbtt_info_len = sizeof(struct ieee80211_tbtt_info_7_8_9),
+ .tbtt_short = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP |
+ IEEE80211_RNR_TBTT_PARAMS_SAME_SSID,
+ },
+ .same_ssid = true,
+ .valid = true,
+ },
+ {
+ .desc = "colocated_non_mld_invalid_bssid",
+ .info.tbtt_info_len = sizeof(struct ieee80211_tbtt_info_7_8_9),
+ .tbtt_short = {
+ .bssid = { 0xff, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP |
+ IEEE80211_RNR_TBTT_PARAMS_SAME_SSID,
+ },
+ .same_ssid = true,
+ .valid = false,
+ },
+ {
+ .desc = "colocated_mld",
+ .tbtt_long = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP,
+ },
+ .valid = true,
+ },
+ {
+ .desc = "colocated_mld",
+ .tbtt_long = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP,
+ },
+ .add_junk = true,
+ .valid = false,
+ },
+ {
+ .desc = "colocated_disabled_mld",
+ .tbtt_long = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP,
+ .mld_params.params = cpu_to_le16(IEEE80211_RNR_MLD_PARAMS_DISABLED_LINK),
+ },
+ .valid = false,
+ },
+};
+KUNIT_ARRAY_PARAM_DESC(cfg80211_parse_colocated_ap, cfg80211_parse_colocated_ap_cases, desc)
+
+static void test_cfg80211_parse_colocated_ap(struct kunit *test)
+{
+ const struct cfg80211_parse_colocated_ap_case *params = test->param_value;
+ struct sk_buff *input = kunit_zalloc_skb(test, 1024, GFP_KERNEL);
+ struct cfg80211_bss_ies *ies;
+ struct ieee80211_neighbor_ap_info info;
+ LIST_HEAD(coloc_ap_list);
+ int count;
+
+ KUNIT_ASSERT_NOT_NULL(test, input);
+
+ info = params->info;
+
+ /* Reasonable values for a colocated AP */
+ if (!info.tbtt_info_len)
+ info.tbtt_info_len = sizeof(params->tbtt_long);
+ if (!info.op_class)
+ info.op_class = 131;
+ if (!info.channel)
+ info.channel = 33;
+ /* Zero is the correct default for .btt_info_hdr (one entry, TBTT type) */
+
+ skb_put_u8(input, WLAN_EID_SSID);
+ skb_put_u8(input, 4);
+ skb_put_data(input, "TEST", 4);
+
+ skb_put_u8(input, WLAN_EID_REDUCED_NEIGHBOR_REPORT);
+ skb_put_u8(input, sizeof(info) + info.tbtt_info_len + (params->add_junk ? 3 : 0));
+ skb_put_data(input, &info, sizeof(info));
+ skb_put_data(input, &params->tbtt_long, info.tbtt_info_len);
+
+ if (params->add_junk)
+ skb_put_data(input, "123", 3);
+
+ ies = kunit_kzalloc(test, struct_size(ies, data, input->len), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ies);
+
+ ies->len = input->len;
+ memcpy(ies->data, input->data, input->len);
+
+ count = cfg80211_parse_colocated_ap(ies, &coloc_ap_list);
+
+ KUNIT_EXPECT_EQ(test, count, params->valid);
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&coloc_ap_list), params->valid);
+
+ if (params->valid && !list_empty(&coloc_ap_list)) {
+ struct cfg80211_colocated_ap *ap;
+
+ ap = list_first_entry(&coloc_ap_list, typeof(*ap), list);
+ if (info.tbtt_info_len <= sizeof(params->tbtt_short))
+ KUNIT_EXPECT_MEMEQ(test, ap->bssid, params->tbtt_short.bssid, ETH_ALEN);
+ else
+ KUNIT_EXPECT_MEMEQ(test, ap->bssid, params->tbtt_long.bssid, ETH_ALEN);
+
+ if (params->same_ssid) {
+ KUNIT_EXPECT_EQ(test, ap->ssid_len, 4);
+ KUNIT_EXPECT_MEMEQ(test, ap->ssid, "TEST", 4);
+ } else {
+ KUNIT_EXPECT_EQ(test, ap->ssid_len, 0);
+ }
+ }
+
+ cfg80211_free_coloc_ap_list(&coloc_ap_list);
+}
+
static struct kunit_case gen_new_ie_test_cases[] = {
KUNIT_CASE_PARAM(test_gen_new_ie, gen_new_ie_gen_params),
KUNIT_CASE(test_gen_new_ie_malformed),
@@ -623,3 +865,16 @@ static struct kunit_suite inform_bss = {
};
kunit_test_suite(inform_bss);
+
+static struct kunit_case scan_6ghz_cases[] = {
+ KUNIT_CASE_PARAM(test_cfg80211_parse_colocated_ap,
+ cfg80211_parse_colocated_ap_gen_params),
+ {}
+};
+
+static struct kunit_suite scan_6ghz = {
+ .name = "cfg80211-scan-6ghz",
+ .test_cases = scan_6ghz_cases,
+};
+
+kunit_test_suite(scan_6ghz);
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 1f374c8a17a5..4f0abd5d49df 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -1,4 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Portions of this file
+ * Copyright(c) 2016-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018, 2020-2024 Intel Corporation
+ */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM cfg80211
@@ -135,7 +140,8 @@
__field(u32, width) \
__field(u32, center_freq1) \
__field(u32, freq1_offset) \
- __field(u32, center_freq2)
+ __field(u32, center_freq2) \
+ __field(u16, punctured)
#define CHAN_DEF_ASSIGN(chandef) \
do { \
if ((chandef) && (chandef)->chan) { \
@@ -148,6 +154,7 @@
__entry->center_freq1 = (chandef)->center_freq1;\
__entry->freq1_offset = (chandef)->freq1_offset;\
__entry->center_freq2 = (chandef)->center_freq2;\
+ __entry->punctured = (chandef)->punctured; \
} else { \
__entry->band = 0; \
__entry->control_freq = 0; \
@@ -156,14 +163,15 @@
__entry->center_freq1 = 0; \
__entry->freq1_offset = 0; \
__entry->center_freq2 = 0; \
+ __entry->punctured = 0; \
} \
} while (0)
#define CHAN_DEF_PR_FMT \
- "band: %d, control freq: %u.%03u, width: %d, cf1: %u.%03u, cf2: %u"
+ "band: %d, control freq: %u.%03u, width: %d, cf1: %u.%03u, cf2: %u, punct: 0x%x"
#define CHAN_DEF_PR_ARG __entry->band, __entry->control_freq, \
__entry->freq_offset, __entry->width, \
__entry->center_freq1, __entry->freq1_offset, \
- __entry->center_freq2
+ __entry->center_freq2, __entry->punctured
#define FILS_AAD_ASSIGN(fa) \
do { \
@@ -235,6 +243,80 @@
} while (0)
/*************************************************************
+ * wiphy work traces *
+ *************************************************************/
+
+DECLARE_EVENT_CLASS(wiphy_work_event,
+ TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work),
+ TP_ARGS(wiphy, work),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ __field(void *, instance)
+ __field(void *, func)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ __entry->instance = work;
+ __entry->func = work ? work->func : NULL;
+ ),
+ TP_printk(WIPHY_PR_FMT " instance=%p func=%pS",
+ WIPHY_PR_ARG, __entry->instance, __entry->func)
+);
+
+DEFINE_EVENT(wiphy_work_event, wiphy_work_queue,
+ TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work),
+ TP_ARGS(wiphy, work)
+);
+
+DEFINE_EVENT(wiphy_work_event, wiphy_work_run,
+ TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work),
+ TP_ARGS(wiphy, work)
+);
+
+DEFINE_EVENT(wiphy_work_event, wiphy_work_cancel,
+ TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work),
+ TP_ARGS(wiphy, work)
+);
+
+DEFINE_EVENT(wiphy_work_event, wiphy_work_flush,
+ TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work),
+ TP_ARGS(wiphy, work)
+);
+
+TRACE_EVENT(wiphy_delayed_work_queue,
+ TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work,
+ unsigned long delay),
+ TP_ARGS(wiphy, work, delay),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ __field(void *, instance)
+ __field(void *, func)
+ __field(unsigned long, delay)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ __entry->instance = work;
+ __entry->func = work->func;
+ __entry->delay = delay;
+ ),
+ TP_printk(WIPHY_PR_FMT " instance=%p func=%pS delay=%ld",
+ WIPHY_PR_ARG, __entry->instance, __entry->func,
+ __entry->delay)
+);
+
+TRACE_EVENT(wiphy_work_worker_start,
+ TP_PROTO(struct wiphy *wiphy),
+ TP_ARGS(wiphy),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ ),
+ TP_printk(WIPHY_PR_FMT, WIPHY_PR_ARG)
+);
+
+/*************************************************************
* rdev->ops traces *
*************************************************************/
@@ -364,7 +446,7 @@ TRACE_EVENT(rdev_add_virtual_intf,
),
TP_fast_assign(
WIPHY_ASSIGN;
- __assign_str(vir_intf_name, name ? name : "<noname>");
+ __assign_str(vir_intf_name);
__entry->type = type;
),
TP_printk(WIPHY_PR_FMT ", virtual intf name: %s, type: %d",
@@ -723,9 +805,22 @@ DEFINE_EVENT(wiphy_netdev_evt, rdev_flush_pmksa,
TP_ARGS(wiphy, netdev)
);
-DEFINE_EVENT(wiphy_netdev_evt, rdev_end_cac,
- TP_PROTO(struct wiphy *wiphy, struct net_device *netdev),
- TP_ARGS(wiphy, netdev)
+TRACE_EVENT(rdev_end_cac,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ unsigned int link_id),
+ TP_ARGS(wiphy, netdev, link_id),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ __field(unsigned int, link_id)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ __entry->link_id = link_id;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", link_id: %d",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->link_id)
);
DECLARE_EVENT_CLASS(station_add_change,
@@ -810,8 +905,8 @@ DECLARE_EVENT_CLASS(station_add_change,
params->link_sta_params.opmode_notif_used;
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: %pM"
- ", station flags mask: %u, station flags set: %u, "
- "station modify mask: %u, listen interval: %d, aid: %u, "
+ ", station flags mask: 0x%x, station flags set: 0x%x, "
+ "station modify mask: 0x%x, listen interval: %d, aid: %u, "
"plink action: %u, plink state: %u, uapsd queues: %u, vlan:%s",
WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->sta_mac,
__entry->sta_flags_mask, __entry->sta_flags_set,
@@ -859,6 +954,7 @@ DECLARE_EVENT_CLASS(station_del,
MAC_ENTRY(sta_mac)
__field(u8, subtype)
__field(u16, reason_code)
+ __field(int, link_id)
),
TP_fast_assign(
WIPHY_ASSIGN;
@@ -866,11 +962,13 @@ DECLARE_EVENT_CLASS(station_del,
MAC_ASSIGN(sta_mac, params->mac);
__entry->subtype = params->subtype;
__entry->reason_code = params->reason_code;
+ __entry->link_id = params->link_id;
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: %pM"
- ", subtype: %u, reason_code: %u",
+ ", subtype: %u, reason_code: %u, link_id: %d",
WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->sta_mac,
- __entry->subtype, __entry->reason_code)
+ __entry->subtype, __entry->reason_code,
+ __entry->link_id)
);
DEFINE_EVENT(station_del, rdev_del_station,
@@ -1013,7 +1111,7 @@ TRACE_EVENT(rdev_get_mpp,
TRACE_EVENT(rdev_dump_mpp,
TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int _idx,
u8 *dst, u8 *mpp),
- TP_ARGS(wiphy, netdev, _idx, mpp, dst),
+ TP_ARGS(wiphy, netdev, _idx, dst, mpp),
TP_STRUCT__entry(
WIPHY_ENTRY
NETDEV_ENTRY
@@ -1064,7 +1162,7 @@ TRACE_EVENT(rdev_return_int_mpath_info,
),
TP_printk(WIPHY_PR_FMT ", returned %d. mpath info - generation: %d, "
"filled: %u, frame qlen: %u, sn: %u, metric: %u, exptime: %u,"
- " discovery timeout: %u, discovery retries: %u, flags: %u",
+ " discovery timeout: %u, discovery retries: %u, flags: 0x%x",
WIPHY_PR_ARG, __entry->ret, __entry->generation,
__entry->filled, __entry->frame_qlen, __entry->sn,
__entry->metric, __entry->exptime, __entry->discovery_timeout,
@@ -1220,19 +1318,21 @@ TRACE_EVENT(rdev_libertas_set_mesh_channel,
);
TRACE_EVENT(rdev_set_monitor_channel,
- TP_PROTO(struct wiphy *wiphy,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
struct cfg80211_chan_def *chandef),
- TP_ARGS(wiphy, chandef),
+ TP_ARGS(wiphy, netdev, chandef),
TP_STRUCT__entry(
WIPHY_ENTRY
+ NETDEV_ENTRY
CHAN_DEF_ENTRY
),
TP_fast_assign(
WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
CHAN_DEF_ASSIGN(chandef);
),
- TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT,
- WIPHY_PR_ARG, CHAN_DEF_PR_ARG)
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT,
+ WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG)
);
TRACE_EVENT(rdev_auth,
@@ -1306,7 +1406,7 @@ TRACE_EVENT(rdev_assoc,
req->fils_nonces, 2 * FILS_NONCE_LEN);
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: %pM"
- ", previous bssid: %pM, use mfp: %s, flags: %u",
+ ", previous bssid: %pM, use mfp: %s, flags: 0x%x",
WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->bssid,
__entry->prev_bssid, BOOL_TO_STR(__entry->use_mfp),
__entry->flags)
@@ -1428,7 +1528,7 @@ TRACE_EVENT(rdev_connect,
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: %pM"
", ssid: %s, auth type: %d, privacy: %s, wpa versions: %u, "
- "flags: %u, previous bssid: %pM",
+ "flags: 0x%x, previous bssid: %pM",
WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->bssid, __entry->ssid,
__entry->auth_type, BOOL_TO_STR(__entry->privacy),
__entry->wpa_versions, __entry->flags, __entry->prev_bssid)
@@ -1590,9 +1690,28 @@ TRACE_EVENT(rdev_set_wiphy_params,
WIPHY_PR_ARG, __entry->changed)
);
-DEFINE_EVENT(wiphy_wdev_evt, rdev_get_tx_power,
- TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
- TP_ARGS(wiphy, wdev)
+DECLARE_EVENT_CLASS(wiphy_wdev_link_evt,
+ TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
+ unsigned int link_id),
+ TP_ARGS(wiphy, wdev, link_id),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ WDEV_ENTRY
+ __field(unsigned int, link_id)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ WDEV_ASSIGN;
+ __entry->link_id = link_id;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", link_id: %u",
+ WIPHY_PR_ARG, WDEV_PR_ARG, __entry->link_id)
+);
+
+DEFINE_EVENT(wiphy_wdev_link_evt, rdev_get_tx_power,
+ TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
+ unsigned int link_id),
+ TP_ARGS(wiphy, wdev, link_id)
);
TRACE_EVENT(rdev_set_tx_power,
@@ -1747,7 +1866,7 @@ TRACE_EVENT(rdev_return_void_tx_rx,
DECLARE_EVENT_CLASS(tx_rx_evt,
TP_PROTO(struct wiphy *wiphy, u32 tx, u32 rx),
- TP_ARGS(wiphy, rx, tx),
+ TP_ARGS(wiphy, tx, rx),
TP_STRUCT__entry(
WIPHY_ENTRY
__field(u32, tx)
@@ -1764,7 +1883,7 @@ DECLARE_EVENT_CLASS(tx_rx_evt,
DEFINE_EVENT(tx_rx_evt, rdev_set_antenna,
TP_PROTO(struct wiphy *wiphy, u32 tx, u32 rx),
- TP_ARGS(wiphy, rx, tx)
+ TP_ARGS(wiphy, tx, rx)
);
DECLARE_EVENT_CLASS(wiphy_netdev_id_evt,
@@ -2092,25 +2211,6 @@ TRACE_EVENT(rdev_set_noack_map,
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", noack_map: %u",
WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->noack_map)
);
-
-DECLARE_EVENT_CLASS(wiphy_wdev_link_evt,
- TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
- unsigned int link_id),
- TP_ARGS(wiphy, wdev, link_id),
- TP_STRUCT__entry(
- WIPHY_ENTRY
- WDEV_ENTRY
- __field(unsigned int, link_id)
- ),
- TP_fast_assign(
- WIPHY_ASSIGN;
- WDEV_ASSIGN;
- __entry->link_id = link_id;
- ),
- TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", link_id: %u",
- WIPHY_PR_ARG, WDEV_PR_ARG, __entry->link_id)
-);
-
DEFINE_EVENT(wiphy_wdev_link_evt, rdev_get_channel,
TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
unsigned int link_id),
@@ -2324,6 +2424,7 @@ TRACE_EVENT(rdev_channel_switch,
__field(u8, count)
__dynamic_array(u16, bcn_ofs, params->n_counter_offsets_beacon)
__dynamic_array(u16, pres_ofs, params->n_counter_offsets_presp)
+ __field(u8, link_id)
),
TP_fast_assign(
WIPHY_ASSIGN;
@@ -2341,11 +2442,13 @@ TRACE_EVENT(rdev_channel_switch,
memcpy(__get_dynamic_array(pres_ofs),
params->counter_offsets_presp,
params->n_counter_offsets_presp * sizeof(u16));
+ __entry->link_id = params->link_id;
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT
- ", block_tx: %d, count: %u, radar_required: %d",
+ ", block_tx: %d, count: %u, radar_required: %d, link_id: %d",
WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG,
- __entry->block_tx, __entry->count, __entry->radar_required)
+ __entry->block_tx, __entry->count, __entry->radar_required,
+ __entry->link_id)
);
TRACE_EVENT(rdev_set_qos_map,
@@ -2564,24 +2667,26 @@ TRACE_EVENT(rdev_external_auth,
TRACE_EVENT(rdev_start_radar_detection,
TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
struct cfg80211_chan_def *chandef,
- u32 cac_time_ms),
- TP_ARGS(wiphy, netdev, chandef, cac_time_ms),
+ u32 cac_time_ms, int link_id),
+ TP_ARGS(wiphy, netdev, chandef, cac_time_ms, link_id),
TP_STRUCT__entry(
WIPHY_ENTRY
NETDEV_ENTRY
CHAN_DEF_ENTRY
__field(u32, cac_time_ms)
+ __field(int, link_id)
),
TP_fast_assign(
WIPHY_ASSIGN;
NETDEV_ASSIGN;
CHAN_DEF_ASSIGN(chandef);
__entry->cac_time_ms = cac_time_ms;
+ __entry->link_id = link_id;
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT
- ", cac_time_ms=%u",
+ ", cac_time_ms=%u, link_id=%d",
WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG,
- __entry->cac_time_ms)
+ __entry->cac_time_ms, __entry->link_id)
);
TRACE_EVENT(rdev_set_mcast_rate,
@@ -2828,6 +2933,7 @@ TRACE_EVENT(rdev_color_change,
__field(u8, count)
__field(u16, bcn_ofs)
__field(u16, pres_ofs)
+ __field(u8, link_id)
),
TP_fast_assign(
WIPHY_ASSIGN;
@@ -2835,11 +2941,12 @@ TRACE_EVENT(rdev_color_change,
__entry->count = params->count;
__entry->bcn_ofs = params->counter_offset_beacon;
__entry->pres_ofs = params->counter_offset_presp;
+ __entry->link_id = params->link_id;
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT
- ", count: %u",
+ ", count: %u, link_id: %d",
WIPHY_PR_ARG, NETDEV_PR_ARG,
- __entry->count)
+ __entry->count, __entry->link_id)
);
TRACE_EVENT(rdev_set_radar_background,
@@ -2873,6 +2980,93 @@ DEFINE_EVENT(wiphy_wdev_link_evt, rdev_del_intf_link,
TP_ARGS(wiphy, wdev, link_id)
);
+TRACE_EVENT(rdev_del_link_station,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ struct link_station_del_parameters *params),
+ TP_ARGS(wiphy, netdev, params),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ __array(u8, mld_mac, 6)
+ __field(u32, link_id)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ memset(__entry->mld_mac, 0, 6);
+ if (params->mld_mac)
+ memcpy(__entry->mld_mac, params->mld_mac, 6);
+ __entry->link_id = params->link_id;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: %pM"
+ ", link id: %u",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->mld_mac,
+ __entry->link_id)
+);
+
+TRACE_EVENT(rdev_set_hw_timestamp,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_set_hw_timestamp *hwts),
+
+ TP_ARGS(wiphy, netdev, hwts),
+
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ MAC_ENTRY(macaddr)
+ __field(bool, enable)
+ ),
+
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ MAC_ASSIGN(macaddr, hwts->macaddr);
+ __entry->enable = hwts->enable;
+ ),
+
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", mac %pM, enable: %u",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->macaddr,
+ __entry->enable)
+);
+
+TRACE_EVENT(rdev_set_ttlm,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_ttlm_params *params),
+ TP_ARGS(wiphy, netdev, params),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ __array(u8, dlink, sizeof(u16) * 8)
+ __array(u8, ulink, sizeof(u16) * 8)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ memcpy(__entry->dlink, params->dlink, sizeof(params->dlink));
+ memcpy(__entry->ulink, params->ulink, sizeof(params->ulink));
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT,
+ WIPHY_PR_ARG, NETDEV_PR_ARG)
+);
+
+TRACE_EVENT(rdev_set_epcs,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ bool val),
+ TP_ARGS(wiphy, netdev, val),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ __field(bool, val)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ __entry->val = val;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", config=%u",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->val)
+);
+
/*************************************************************
* cfg80211 exported functions traces *
*************************************************************/
@@ -3230,23 +3424,26 @@ TRACE_EVENT(cfg80211_cqm_rssi_notify,
TRACE_EVENT(cfg80211_reg_can_beacon,
TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef,
- enum nl80211_iftype iftype, bool check_no_ir),
- TP_ARGS(wiphy, chandef, iftype, check_no_ir),
+ enum nl80211_iftype iftype, u32 prohibited_flags,
+ u32 permitting_flags),
+ TP_ARGS(wiphy, chandef, iftype, prohibited_flags, permitting_flags),
TP_STRUCT__entry(
WIPHY_ENTRY
CHAN_DEF_ENTRY
__field(enum nl80211_iftype, iftype)
- __field(bool, check_no_ir)
+ __field(u32, prohibited_flags)
+ __field(u32, permitting_flags)
),
TP_fast_assign(
WIPHY_ASSIGN;
CHAN_DEF_ASSIGN(chandef);
__entry->iftype = iftype;
- __entry->check_no_ir = check_no_ir;
+ __entry->prohibited_flags = prohibited_flags;
+ __entry->permitting_flags = permitting_flags;
),
- TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d check_no_ir=%s",
+ TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d prohibited_flags=0x%x permitting_flags=0x%x",
WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->iftype,
- BOOL_TO_STR(__entry->check_no_ir))
+ __entry->prohibited_flags, __entry->permitting_flags)
);
TRACE_EVENT(cfg80211_chandef_dfs_required,
@@ -3267,47 +3464,39 @@ TRACE_EVENT(cfg80211_chandef_dfs_required,
TRACE_EVENT(cfg80211_ch_switch_notify,
TP_PROTO(struct net_device *netdev,
struct cfg80211_chan_def *chandef,
- unsigned int link_id,
- u16 punct_bitmap),
- TP_ARGS(netdev, chandef, link_id, punct_bitmap),
+ unsigned int link_id),
+ TP_ARGS(netdev, chandef, link_id),
TP_STRUCT__entry(
NETDEV_ENTRY
CHAN_DEF_ENTRY
__field(unsigned int, link_id)
- __field(u16, punct_bitmap)
),
TP_fast_assign(
NETDEV_ASSIGN;
CHAN_DEF_ASSIGN(chandef);
__entry->link_id = link_id;
- __entry->punct_bitmap = punct_bitmap;
),
- TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", link:%d, punct_bitmap:%u",
- NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->link_id,
- __entry->punct_bitmap)
+ TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", link:%d",
+ NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->link_id)
);
TRACE_EVENT(cfg80211_ch_switch_started_notify,
TP_PROTO(struct net_device *netdev,
struct cfg80211_chan_def *chandef,
- unsigned int link_id,
- u16 punct_bitmap),
- TP_ARGS(netdev, chandef, link_id, punct_bitmap),
+ unsigned int link_id),
+ TP_ARGS(netdev, chandef, link_id),
TP_STRUCT__entry(
NETDEV_ENTRY
CHAN_DEF_ENTRY
__field(unsigned int, link_id)
- __field(u16, punct_bitmap)
),
TP_fast_assign(
NETDEV_ASSIGN;
CHAN_DEF_ASSIGN(chandef);
__entry->link_id = link_id;
- __entry->punct_bitmap = punct_bitmap;
),
- TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", link:%d, punct_bitmap:%u",
- NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->link_id,
- __entry->punct_bitmap)
+ TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", link:%d",
+ NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->link_id)
);
TRACE_EVENT(cfg80211_radar_event,
@@ -3329,18 +3518,21 @@ TRACE_EVENT(cfg80211_radar_event,
);
TRACE_EVENT(cfg80211_cac_event,
- TP_PROTO(struct net_device *netdev, enum nl80211_radar_event evt),
- TP_ARGS(netdev, evt),
+ TP_PROTO(struct net_device *netdev, enum nl80211_radar_event evt,
+ unsigned int link_id),
+ TP_ARGS(netdev, evt, link_id),
TP_STRUCT__entry(
NETDEV_ENTRY
__field(enum nl80211_radar_event, evt)
+ __field(unsigned int, link_id)
),
TP_fast_assign(
NETDEV_ASSIGN;
__entry->evt = evt;
+ __entry->link_id = link_id;
),
- TP_printk(NETDEV_PR_FMT ", event: %d",
- NETDEV_PR_ARG, __entry->evt)
+ TP_printk(NETDEV_PR_FMT ", event: %d, link_id=%u",
+ NETDEV_PR_ARG, __entry->evt, __entry->link_id)
);
DECLARE_EVENT_CLASS(cfg80211_rx_evt,
@@ -3915,88 +4107,79 @@ DEFINE_EVENT(link_station_add_mod, rdev_mod_link_station,
TP_ARGS(wiphy, netdev, params)
);
-TRACE_EVENT(rdev_del_link_station,
- TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
- struct link_station_del_parameters *params),
- TP_ARGS(wiphy, netdev, params),
+TRACE_EVENT(cfg80211_links_removed,
+ TP_PROTO(struct net_device *netdev, u16 link_mask),
+ TP_ARGS(netdev, link_mask),
TP_STRUCT__entry(
- WIPHY_ENTRY
NETDEV_ENTRY
- __array(u8, mld_mac, 6)
- __field(u32, link_id)
+ __field(u16, link_mask)
),
TP_fast_assign(
- WIPHY_ASSIGN;
NETDEV_ASSIGN;
- memset(__entry->mld_mac, 0, 6);
- if (params->mld_mac)
- memcpy(__entry->mld_mac, params->mld_mac, 6);
- __entry->link_id = params->link_id;
+ __entry->link_mask = link_mask;
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: %pM"
- ", link id: %u",
- WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->mld_mac,
- __entry->link_id)
+ TP_printk(NETDEV_PR_FMT ", link_mask:%u", NETDEV_PR_ARG,
+ __entry->link_mask)
);
-TRACE_EVENT(rdev_set_hw_timestamp,
- TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
- struct cfg80211_set_hw_timestamp *hwts),
-
- TP_ARGS(wiphy, netdev, hwts),
-
+TRACE_EVENT(cfg80211_mlo_reconf_add_done,
+ TP_PROTO(struct net_device *netdev, u16 link_mask,
+ const u8 *buf, size_t len),
+ TP_ARGS(netdev, link_mask, buf, len),
TP_STRUCT__entry(
- WIPHY_ENTRY
NETDEV_ENTRY
- MAC_ENTRY(macaddr)
- __field(bool, enable)
+ __field(u16, link_mask)
+ __dynamic_array(u8, buf, len)
),
-
TP_fast_assign(
- WIPHY_ASSIGN;
NETDEV_ASSIGN;
- MAC_ASSIGN(macaddr, hwts->macaddr);
- __entry->enable = hwts->enable;
+ __entry->link_mask = link_mask;
+ memcpy(__get_dynamic_array(buf), buf, len);
),
-
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", mac %pM, enable: %u",
- WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->macaddr,
- __entry->enable)
+ TP_printk(NETDEV_PR_FMT ", link_mask:0x%x",
+ NETDEV_PR_ARG, __entry->link_mask)
);
-TRACE_EVENT(cfg80211_links_removed,
- TP_PROTO(struct net_device *netdev, u16 link_mask),
- TP_ARGS(netdev, link_mask),
+TRACE_EVENT(rdev_assoc_ml_reconf,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_assoc_link *add_links,
+ u16 rem_links),
+ TP_ARGS(wiphy, netdev, add_links, rem_links),
TP_STRUCT__entry(
+ WIPHY_ENTRY
NETDEV_ENTRY
- __field(u16, link_mask)
+ __field(u16, add_links)
+ __field(u16, rem_links)
),
TP_fast_assign(
+ WIPHY_ASSIGN;
NETDEV_ASSIGN;
- __entry->link_mask = link_mask;
+ u32 i;
+
+ __entry->add_links = 0;
+ __entry->rem_links = rem_links;
+ for (i = 0; add_links && i < IEEE80211_MLD_MAX_NUM_LINKS; i++)
+ if (add_links[i].bss)
+ __entry->add_links |= BIT(i);
),
- TP_printk(NETDEV_PR_FMT ", link_mask:%u", NETDEV_PR_ARG,
- __entry->link_mask)
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", add_links=0x%x, rem_links=0x%x",
+ WIPHY_PR_ARG, NETDEV_PR_ARG,
+ __entry->add_links, __entry->rem_links)
);
-TRACE_EVENT(rdev_set_ttlm,
- TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
- struct cfg80211_ttlm_params *params),
- TP_ARGS(wiphy, netdev, params),
+TRACE_EVENT(cfg80211_epcs_changed,
+ TP_PROTO(struct wireless_dev *wdev, bool enabled),
+ TP_ARGS(wdev, enabled),
TP_STRUCT__entry(
- WIPHY_ENTRY
- NETDEV_ENTRY
- __array(u8, dlink, sizeof(u16) * 8)
- __array(u8, ulink, sizeof(u16) * 8)
+ WDEV_ENTRY
+ __field(u32, enabled)
),
TP_fast_assign(
- WIPHY_ASSIGN;
- NETDEV_ASSIGN;
- memcpy(__entry->dlink, params->dlink, sizeof(params->dlink));
- memcpy(__entry->ulink, params->ulink, sizeof(params->ulink));
+ WDEV_ASSIGN;
+ __entry->enabled = enabled;
),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT,
- WIPHY_PR_ARG, NETDEV_PR_ARG)
+ TP_printk(WDEV_PR_FMT ", enabled=%u",
+ WDEV_PR_ARG, __entry->enabled)
);
#endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */
diff --git a/net/wireless/util.c b/net/wireless/util.c
index d1ce3bee2797..60157943d351 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -743,7 +743,7 @@ __ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen,
return NULL;
/*
- * When reusing framents, copy some data to the head to simplify
+ * When reusing fragments, copy some data to the head to simplify
* ethernet header handling and speed up protocol header processing
* in the stack later.
*/
@@ -791,15 +791,19 @@ ieee80211_amsdu_subframe_length(void *field, u8 mesh_flags, u8 hdr_type)
bool ieee80211_is_valid_amsdu(struct sk_buff *skb, u8 mesh_hdr)
{
- int offset = 0, remaining, subframe_len, padding;
+ int offset = 0, subframe_len, padding;
for (offset = 0; offset < skb->len; offset += subframe_len + padding) {
+ int remaining = skb->len - offset;
struct {
__be16 len;
u8 mesh_flags;
} hdr;
u16 len;
+ if (sizeof(hdr) > remaining)
+ return false;
+
if (skb_copy_bits(skb, offset + 2 * ETH_ALEN, &hdr, sizeof(hdr)) < 0)
return false;
@@ -807,7 +811,6 @@ bool ieee80211_is_valid_amsdu(struct sk_buff *skb, u8 mesh_hdr)
mesh_hdr);
subframe_len = sizeof(struct ethhdr) + len;
padding = (4 - subframe_len) & 0x3;
- remaining = skb->len - offset;
if (subframe_len > remaining)
return false;
@@ -825,7 +828,7 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
{
unsigned int hlen = ALIGN(extra_headroom, 4);
struct sk_buff *frame = NULL;
- int offset = 0, remaining;
+ int offset = 0;
struct {
struct ethhdr eth;
uint8_t flags;
@@ -839,10 +842,14 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
copy_len = sizeof(hdr);
while (!last) {
+ int remaining = skb->len - offset;
unsigned int subframe_len;
int len, mesh_len = 0;
u8 padding;
+ if (copy_len > remaining)
+ goto purge;
+
skb_copy_bits(skb, offset, &hdr, copy_len);
if (iftype == NL80211_IFTYPE_MESH_POINT)
mesh_len = __ieee80211_get_mesh_hdrlen(hdr.flags);
@@ -852,7 +859,6 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
padding = (4 - subframe_len) & 0x3;
/* the last MSDU has no padding */
- remaining = skb->len - offset;
if (subframe_len > remaining)
goto purge;
/* mitigate A-MSDU aggregation injection attacks */
@@ -992,10 +998,10 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
* Diffserv Service Classes no update is needed:
* - Standard: DF
* - Low Priority Data: CS1
- * - Multimedia Streaming: AF31, AF32, AF33
* - Multimedia Conferencing: AF41, AF42, AF43
* - Network Control Traffic: CS7
* - Real-Time Interactive: CS4
+ * - Signaling: CS5
*/
switch (dscp >> 2) {
case 10:
@@ -1020,9 +1026,11 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
/* Broadcasting video: CS3 */
ret = 4;
break;
- case 40:
- /* Signaling: CS5 */
- ret = 5;
+ case 26:
+ case 28:
+ case 30:
+ /* Multimedia Streaming: AF31, AF32, AF33 */
+ ret = 4;
break;
case 44:
/* Voice Admit: VA */
@@ -1498,7 +1506,7 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate)
5120, /* 0.833333... */
};
u32 rates_160M[3] = { 960777777, 907400000, 816666666 };
- u32 rates_969[3] = { 480388888, 453700000, 408333333 };
+ u32 rates_996[3] = { 480388888, 453700000, 408333333 };
u32 rates_484[3] = { 229411111, 216666666, 195000000 };
u32 rates_242[3] = { 114711111, 108333333, 97500000 };
u32 rates_106[3] = { 40000000, 37777777, 34000000 };
@@ -1518,12 +1526,14 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate)
if (WARN_ON_ONCE(rate->nss < 1 || rate->nss > 8))
return 0;
- if (rate->bw == RATE_INFO_BW_160)
+ if (rate->bw == RATE_INFO_BW_160 ||
+ (rate->bw == RATE_INFO_BW_HE_RU &&
+ rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_2x996))
result = rates_160M[rate->he_gi];
else if (rate->bw == RATE_INFO_BW_80 ||
(rate->bw == RATE_INFO_BW_HE_RU &&
rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_996))
- result = rates_969[rate->he_gi];
+ result = rates_996[rate->he_gi];
else if (rate->bw == RATE_INFO_BW_40 ||
(rate->bw == RATE_INFO_BW_HE_RU &&
rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_484))
@@ -2073,6 +2083,82 @@ bool ieee80211_operating_class_to_band(u8 operating_class,
}
EXPORT_SYMBOL(ieee80211_operating_class_to_band);
+bool ieee80211_operating_class_to_chandef(u8 operating_class,
+ struct ieee80211_channel *chan,
+ struct cfg80211_chan_def *chandef)
+{
+ u32 control_freq, offset = 0;
+ enum nl80211_band band;
+
+ if (!ieee80211_operating_class_to_band(operating_class, &band) ||
+ !chan || band != chan->band)
+ return false;
+
+ control_freq = chan->center_freq;
+ chandef->chan = chan;
+
+ if (control_freq >= 5955)
+ offset = control_freq - 5955;
+ else if (control_freq >= 5745)
+ offset = control_freq - 5745;
+ else if (control_freq >= 5180)
+ offset = control_freq - 5180;
+ offset /= 20;
+
+ switch (operating_class) {
+ case 81: /* 2 GHz band; 20 MHz; channels 1..13 */
+ case 82: /* 2 GHz band; 20 MHz; channel 14 */
+ case 115: /* 5 GHz band; 20 MHz; channels 36,40,44,48 */
+ case 118: /* 5 GHz band; 20 MHz; channels 52,56,60,64 */
+ case 121: /* 5 GHz band; 20 MHz; channels 100..144 */
+ case 124: /* 5 GHz band; 20 MHz; channels 149,153,157,161 */
+ case 125: /* 5 GHz band; 20 MHz; channels 149..177 */
+ case 131: /* 6 GHz band; 20 MHz; channels 1..233*/
+ case 136: /* 6 GHz band; 20 MHz; channel 2 */
+ chandef->center_freq1 = control_freq;
+ chandef->width = NL80211_CHAN_WIDTH_20;
+ return true;
+ case 83: /* 2 GHz band; 40 MHz; channels 1..9 */
+ case 116: /* 5 GHz band; 40 MHz; channels 36,44 */
+ case 119: /* 5 GHz band; 40 MHz; channels 52,60 */
+ case 122: /* 5 GHz band; 40 MHz; channels 100,108,116,124,132,140 */
+ case 126: /* 5 GHz band; 40 MHz; channels 149,157,165,173 */
+ chandef->center_freq1 = control_freq + 10;
+ chandef->width = NL80211_CHAN_WIDTH_40;
+ return true;
+ case 84: /* 2 GHz band; 40 MHz; channels 5..13 */
+ case 117: /* 5 GHz band; 40 MHz; channels 40,48 */
+ case 120: /* 5 GHz band; 40 MHz; channels 56,64 */
+ case 123: /* 5 GHz band; 40 MHz; channels 104,112,120,128,136,144 */
+ case 127: /* 5 GHz band; 40 MHz; channels 153,161,169,177 */
+ chandef->center_freq1 = control_freq - 10;
+ chandef->width = NL80211_CHAN_WIDTH_40;
+ return true;
+ case 132: /* 6 GHz band; 40 MHz; channels 1,5,..,229*/
+ chandef->center_freq1 = control_freq + 10 - (offset & 1) * 20;
+ chandef->width = NL80211_CHAN_WIDTH_40;
+ return true;
+ case 128: /* 5 GHz band; 80 MHz; channels 36..64,100..144,149..177 */
+ case 133: /* 6 GHz band; 80 MHz; channels 1,5,..,229 */
+ chandef->center_freq1 = control_freq + 30 - (offset & 3) * 20;
+ chandef->width = NL80211_CHAN_WIDTH_80;
+ return true;
+ case 129: /* 5 GHz band; 160 MHz; channels 36..64,100..144,149..177 */
+ case 134: /* 6 GHz band; 160 MHz; channels 1,5,..,229 */
+ chandef->center_freq1 = control_freq + 70 - (offset & 7) * 20;
+ chandef->width = NL80211_CHAN_WIDTH_160;
+ return true;
+ case 130: /* 5 GHz band; 80+80 MHz; channels 36..64,100..144,149..177 */
+ case 135: /* 6 GHz band; 80+80 MHz; channels 1,5,..,229 */
+ /* The center_freq2 of 80+80 MHz is unknown */
+ case 137: /* 6 GHz band; 320 MHz; channels 1,5,..,229 */
+ /* 320-1 or 320-2 channelization is unknown */
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL(ieee80211_operating_class_to_chandef);
+
bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
u8 *op_class)
{
@@ -2223,13 +2309,16 @@ static int cfg80211_wdev_bi(struct wireless_dev *wdev)
static void cfg80211_calculate_bi_data(struct wiphy *wiphy, u32 new_beacon_int,
u32 *beacon_int_gcd,
- bool *beacon_int_different)
+ bool *beacon_int_different,
+ int radio_idx)
{
+ struct cfg80211_registered_device *rdev;
struct wireless_dev *wdev;
*beacon_int_gcd = 0;
*beacon_int_different = false;
+ rdev = wiphy_to_rdev(wiphy);
list_for_each_entry(wdev, &wiphy->wdev_list, list) {
int wdev_bi;
@@ -2237,6 +2326,11 @@ static void cfg80211_calculate_bi_data(struct wiphy *wiphy, u32 new_beacon_int,
if (wdev->valid_links)
continue;
+ /* skip wdevs not active on the given wiphy radio */
+ if (radio_idx >= 0 &&
+ !(rdev_get_radio_mask(rdev, wdev->netdev) & BIT(radio_idx)))
+ continue;
+
wdev_bi = cfg80211_wdev_bi(wdev);
if (!wdev_bi)
@@ -2284,14 +2378,19 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
void *data),
void *data)
{
+ const struct wiphy_radio *radio = NULL;
+ const struct ieee80211_iface_combination *c, *cs;
const struct ieee80211_regdomain *regdom;
enum nl80211_dfs_regions region = 0;
- int i, j, iftype;
+ int i, j, n, iftype;
int num_interfaces = 0;
u32 used_iftypes = 0;
u32 beacon_int_gcd;
bool beacon_int_different;
+ if (params->radio_idx >= 0)
+ radio = &wiphy->radio[params->radio_idx];
+
/*
* This is a bit strange, since the iteration used to rely only on
* the data given by the driver, but here it now relies on context,
@@ -2303,7 +2402,8 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
* interfaces (while being brought up) and channel/radar data.
*/
cfg80211_calculate_bi_data(wiphy, params->new_beacon_int,
- &beacon_int_gcd, &beacon_int_different);
+ &beacon_int_gcd, &beacon_int_different,
+ params->radio_idx);
if (params->radar_detect) {
rcu_read_lock();
@@ -2320,20 +2420,25 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
used_iftypes |= BIT(iftype);
}
- for (i = 0; i < wiphy->n_iface_combinations; i++) {
- const struct ieee80211_iface_combination *c;
+ if (radio) {
+ cs = radio->iface_combinations;
+ n = radio->n_iface_combinations;
+ } else {
+ cs = wiphy->iface_combinations;
+ n = wiphy->n_iface_combinations;
+ }
+ for (i = 0; i < n; i++) {
struct ieee80211_iface_limit *limits;
u32 all_iftypes = 0;
- c = &wiphy->iface_combinations[i];
-
+ c = &cs[i];
if (num_interfaces > c->max_interfaces)
continue;
if (params->num_different_channels > c->num_different_channels)
continue;
- limits = kmemdup(c->limits, sizeof(limits[0]) * c->n_limits,
- GFP_KERNEL);
+ limits = kmemdup_array(c->limits, c->n_limits, sizeof(*limits),
+ GFP_KERNEL);
if (!limits)
return -ENOMEM;
@@ -2478,6 +2583,8 @@ int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
memset(sinfo, 0, sizeof(*sinfo));
+ guard(wiphy)(&rdev->wiphy);
+
return rdev_get_station(rdev, dev, mac_addr, sinfo);
}
EXPORT_SYMBOL(cfg80211_get_station);
@@ -2733,10 +2840,9 @@ void cfg80211_remove_link(struct wireless_dev *wdev, unsigned int link_id)
break;
}
- wdev->valid_links &= ~BIT(link_id);
-
rdev_del_intf_link(rdev, wdev, link_id);
+ wdev->valid_links &= ~BIT(link_id);
eth_zero_addr(wdev->links[link_id].addr);
}
@@ -2778,3 +2884,67 @@ cfg80211_get_iftype_ext_capa(struct wiphy *wiphy, enum nl80211_iftype type)
return NULL;
}
EXPORT_SYMBOL(cfg80211_get_iftype_ext_capa);
+
+static bool
+ieee80211_radio_freq_range_valid(const struct wiphy_radio *radio,
+ u32 freq, u32 width)
+{
+ const struct wiphy_radio_freq_range *r;
+ int i;
+
+ for (i = 0; i < radio->n_freq_range; i++) {
+ r = &radio->freq_range[i];
+ if (freq - width / 2 >= r->start_freq &&
+ freq + width / 2 <= r->end_freq)
+ return true;
+ }
+
+ return false;
+}
+
+bool cfg80211_radio_chandef_valid(const struct wiphy_radio *radio,
+ const struct cfg80211_chan_def *chandef)
+{
+ u32 freq, width;
+
+ freq = ieee80211_chandef_to_khz(chandef);
+ width = nl80211_chan_width_to_mhz(chandef->width);
+ if (!ieee80211_radio_freq_range_valid(radio, freq, width))
+ return false;
+
+ freq = MHZ_TO_KHZ(chandef->center_freq2);
+ if (freq && !ieee80211_radio_freq_range_valid(radio, freq, width))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL(cfg80211_radio_chandef_valid);
+
+bool cfg80211_wdev_channel_allowed(struct wireless_dev *wdev,
+ struct ieee80211_channel *chan)
+{
+ struct wiphy *wiphy = wdev->wiphy;
+ const struct wiphy_radio *radio;
+ struct cfg80211_chan_def chandef;
+ u32 radio_mask;
+ int i;
+
+ radio_mask = wdev->radio_mask;
+ if (!wiphy->n_radio || radio_mask == BIT(wiphy->n_radio) - 1)
+ return true;
+
+ cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20);
+ for (i = 0; i < wiphy->n_radio; i++) {
+ if (!(radio_mask & BIT(i)))
+ continue;
+
+ radio = &wiphy->radio[i];
+ if (!cfg80211_radio_chandef_valid(radio, &chandef))
+ continue;
+
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(cfg80211_wdev_channel_allowed);
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 2371069f3c43..a74b1afc594e 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -30,7 +30,6 @@ int cfg80211_wext_giwname(struct net_device *dev,
strcpy(wrqu->name, "IEEE 802.11");
return 0;
}
-EXPORT_WEXT_HANDLER(cfg80211_wext_giwname);
int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -40,7 +39,6 @@ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
struct cfg80211_registered_device *rdev;
struct vif_params vifparams;
enum nl80211_iftype type;
- int ret;
rdev = wiphy_to_rdev(wdev->wiphy);
@@ -63,13 +61,10 @@ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
memset(&vifparams, 0, sizeof(vifparams));
- wiphy_lock(wdev->wiphy);
- ret = cfg80211_change_iface(rdev, dev, type, &vifparams);
- wiphy_unlock(wdev->wiphy);
+ guard(wiphy)(wdev->wiphy);
- return ret;
+ return cfg80211_change_iface(rdev, dev, type, &vifparams);
}
-EXPORT_WEXT_HANDLER(cfg80211_wext_siwmode);
int cfg80211_wext_giwmode(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -105,7 +100,6 @@ int cfg80211_wext_giwmode(struct net_device *dev, struct iw_request_info *info,
}
return 0;
}
-EXPORT_WEXT_HANDLER(cfg80211_wext_giwmode);
int cfg80211_wext_giwrange(struct net_device *dev,
@@ -220,7 +214,6 @@ int cfg80211_wext_giwrange(struct net_device *dev,
return 0;
}
-EXPORT_WEXT_HANDLER(cfg80211_wext_giwrange);
/**
@@ -262,26 +255,19 @@ int cfg80211_wext_siwrts(struct net_device *dev,
u32 orts = wdev->wiphy->rts_threshold;
int err;
- wiphy_lock(&rdev->wiphy);
- if (rts->disabled || !rts->fixed) {
+ guard(wiphy)(&rdev->wiphy);
+ if (rts->disabled || !rts->fixed)
wdev->wiphy->rts_threshold = (u32) -1;
- } else if (rts->value < 0) {
- err = -EINVAL;
- goto out;
- } else {
+ else if (rts->value < 0)
+ return -EINVAL;
+ else
wdev->wiphy->rts_threshold = rts->value;
- }
err = rdev_set_wiphy_params(rdev, WIPHY_PARAM_RTS_THRESHOLD);
-
if (err)
wdev->wiphy->rts_threshold = orts;
-
-out:
- wiphy_unlock(&rdev->wiphy);
return err;
}
-EXPORT_WEXT_HANDLER(cfg80211_wext_siwrts);
int cfg80211_wext_giwrts(struct net_device *dev,
struct iw_request_info *info,
@@ -296,7 +282,6 @@ int cfg80211_wext_giwrts(struct net_device *dev,
return 0;
}
-EXPORT_WEXT_HANDLER(cfg80211_wext_giwrts);
int cfg80211_wext_siwfrag(struct net_device *dev,
struct iw_request_info *info,
@@ -308,12 +293,12 @@ int cfg80211_wext_siwfrag(struct net_device *dev,
u32 ofrag = wdev->wiphy->frag_threshold;
int err;
- wiphy_lock(&rdev->wiphy);
+ guard(wiphy)(&rdev->wiphy);
+
if (frag->disabled || !frag->fixed) {
wdev->wiphy->frag_threshold = (u32) -1;
} else if (frag->value < 256) {
- err = -EINVAL;
- goto out;
+ return -EINVAL;
} else {
/* Fragment length must be even, so strip LSB. */
wdev->wiphy->frag_threshold = frag->value & ~0x1;
@@ -322,12 +307,8 @@ int cfg80211_wext_siwfrag(struct net_device *dev,
err = rdev_set_wiphy_params(rdev, WIPHY_PARAM_FRAG_THRESHOLD);
if (err)
wdev->wiphy->frag_threshold = ofrag;
-out:
- wiphy_unlock(&rdev->wiphy);
-
return err;
}
-EXPORT_WEXT_HANDLER(cfg80211_wext_siwfrag);
int cfg80211_wext_giwfrag(struct net_device *dev,
struct iw_request_info *info,
@@ -342,7 +323,6 @@ int cfg80211_wext_giwfrag(struct net_device *dev,
return 0;
}
-EXPORT_WEXT_HANDLER(cfg80211_wext_giwfrag);
static int cfg80211_wext_siwretry(struct net_device *dev,
struct iw_request_info *info,
@@ -360,7 +340,8 @@ static int cfg80211_wext_siwretry(struct net_device *dev,
(retry->flags & IW_RETRY_TYPE) != IW_RETRY_LIMIT)
return -EINVAL;
- wiphy_lock(&rdev->wiphy);
+ guard(wiphy)(&rdev->wiphy);
+
if (retry->flags & IW_RETRY_LONG) {
wdev->wiphy->retry_long = retry->value;
changed |= WIPHY_PARAM_RETRY_LONG;
@@ -379,7 +360,6 @@ static int cfg80211_wext_siwretry(struct net_device *dev,
wdev->wiphy->retry_short = oshort;
wdev->wiphy->retry_long = olong;
}
- wiphy_unlock(&rdev->wiphy);
return err;
}
@@ -413,7 +393,6 @@ int cfg80211_wext_giwretry(struct net_device *dev,
return 0;
}
-EXPORT_WEXT_HANDLER(cfg80211_wext_giwretry);
static int cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
struct net_device *dev, bool pairwise,
@@ -587,9 +566,9 @@ static int cfg80211_wext_siwencode(struct net_device *dev,
struct iw_point *erq = &wrqu->encoding;
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
- int idx, err;
- bool remove = false;
struct key_params params;
+ bool remove = false;
+ int idx;
if (wdev->iftype != NL80211_IFTYPE_STATION &&
wdev->iftype != NL80211_IFTYPE_ADHOC)
@@ -601,11 +580,9 @@ static int cfg80211_wext_siwencode(struct net_device *dev,
!rdev->ops->set_default_key)
return -EOPNOTSUPP;
- wiphy_lock(&rdev->wiphy);
- if (wdev->valid_links) {
- err = -EOPNOTSUPP;
- goto out;
- }
+ guard(wiphy)(&rdev->wiphy);
+ if (wdev->valid_links)
+ return -EOPNOTSUPP;
idx = erq->flags & IW_ENCODE_INDEX;
if (idx == 0) {
@@ -613,8 +590,7 @@ static int cfg80211_wext_siwencode(struct net_device *dev,
if (idx < 0)
idx = 0;
} else if (idx < 1 || idx > 4) {
- err = -EINVAL;
- goto out;
+ return -EINVAL;
} else {
idx--;
}
@@ -623,7 +599,8 @@ static int cfg80211_wext_siwencode(struct net_device *dev,
remove = true;
else if (erq->length == 0) {
/* No key data - just set the default TX key index */
- err = 0;
+ int err = 0;
+
if (wdev->connected ||
(wdev->iftype == NL80211_IFTYPE_ADHOC &&
wdev->u.ibss.current_bss))
@@ -631,28 +608,22 @@ static int cfg80211_wext_siwencode(struct net_device *dev,
true);
if (!err)
wdev->wext.default_key = idx;
- goto out;
+ return err;
}
memset(&params, 0, sizeof(params));
params.key = keybuf;
params.key_len = erq->length;
- if (erq->length == 5) {
+ if (erq->length == 5)
params.cipher = WLAN_CIPHER_SUITE_WEP40;
- } else if (erq->length == 13) {
+ else if (erq->length == 13)
params.cipher = WLAN_CIPHER_SUITE_WEP104;
- } else if (!remove) {
- err = -EINVAL;
- goto out;
- }
-
- err = cfg80211_set_encryption(rdev, dev, false, NULL, remove,
- wdev->wext.default_key == -1,
- idx, &params);
-out:
- wiphy_unlock(&rdev->wiphy);
+ else if (!remove)
+ return -EINVAL;
- return err;
+ return cfg80211_set_encryption(rdev, dev, false, NULL, remove,
+ wdev->wext.default_key == -1,
+ idx, &params);
}
static int cfg80211_wext_siwencodeext(struct net_device *dev,
@@ -668,7 +639,6 @@ static int cfg80211_wext_siwencodeext(struct net_device *dev,
bool remove = false;
struct key_params params;
u32 cipher;
- int ret;
if (wdev->iftype != NL80211_IFTYPE_STATION &&
wdev->iftype != NL80211_IFTYPE_ADHOC)
@@ -743,16 +713,13 @@ static int cfg80211_wext_siwencodeext(struct net_device *dev,
params.seq_len = 6;
}
- wiphy_lock(wdev->wiphy);
- ret = cfg80211_set_encryption(
- rdev, dev,
- !(ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY),
- addr, remove,
- ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY,
- idx, &params);
- wiphy_unlock(wdev->wiphy);
+ guard(wiphy)(wdev->wiphy);
- return ret;
+ return cfg80211_set_encryption(rdev, dev,
+ !(ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY),
+ addr, remove,
+ ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY,
+ idx, &params);
}
static int cfg80211_wext_giwencode(struct net_device *dev,
@@ -803,61 +770,41 @@ static int cfg80211_wext_siwfreq(struct net_device *dev,
struct cfg80211_chan_def chandef = {
.width = NL80211_CHAN_WIDTH_20_NOHT,
};
- int freq, ret;
+ int freq;
- wiphy_lock(&rdev->wiphy);
+ guard(wiphy)(&rdev->wiphy);
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
- ret = cfg80211_mgd_wext_siwfreq(dev, info, wextfreq, extra);
- break;
+ return cfg80211_mgd_wext_siwfreq(dev, info, wextfreq, extra);
case NL80211_IFTYPE_ADHOC:
- ret = cfg80211_ibss_wext_siwfreq(dev, info, wextfreq, extra);
- break;
+ return cfg80211_ibss_wext_siwfreq(dev, info, wextfreq, extra);
case NL80211_IFTYPE_MONITOR:
freq = cfg80211_wext_freq(wextfreq);
- if (freq < 0) {
- ret = freq;
- break;
- }
- if (freq == 0) {
- ret = -EINVAL;
- break;
- }
+ if (freq < 0)
+ return freq;
+ if (freq == 0)
+ return -EINVAL;
+
chandef.center_freq1 = freq;
chandef.chan = ieee80211_get_channel(&rdev->wiphy, freq);
- if (!chandef.chan) {
- ret = -EINVAL;
- break;
- }
- ret = cfg80211_set_monitor_channel(rdev, &chandef);
- break;
+ if (!chandef.chan)
+ return -EINVAL;
+ return cfg80211_set_monitor_channel(rdev, dev, &chandef);
case NL80211_IFTYPE_MESH_POINT:
freq = cfg80211_wext_freq(wextfreq);
- if (freq < 0) {
- ret = freq;
- break;
- }
- if (freq == 0) {
- ret = -EINVAL;
- break;
- }
+ if (freq < 0)
+ return freq;
+ if (freq == 0)
+ return -EINVAL;
chandef.center_freq1 = freq;
chandef.chan = ieee80211_get_channel(&rdev->wiphy, freq);
- if (!chandef.chan) {
- ret = -EINVAL;
- break;
- }
- ret = cfg80211_set_mesh_channel(rdev, wdev, &chandef);
- break;
+ if (!chandef.chan)
+ return -EINVAL;
+ return cfg80211_set_mesh_channel(rdev, wdev, &chandef);
default:
- ret = -EOPNOTSUPP;
- break;
+ return -EOPNOTSUPP;
}
-
- wiphy_unlock(&rdev->wiphy);
-
- return ret;
}
static int cfg80211_wext_giwfreq(struct net_device *dev,
@@ -870,35 +817,26 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
struct cfg80211_chan_def chandef = {};
int ret;
- wiphy_lock(&rdev->wiphy);
+ guard(wiphy)(&rdev->wiphy);
+
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
- ret = cfg80211_mgd_wext_giwfreq(dev, info, freq, extra);
- break;
+ return cfg80211_mgd_wext_giwfreq(dev, info, freq, extra);
case NL80211_IFTYPE_ADHOC:
- ret = cfg80211_ibss_wext_giwfreq(dev, info, freq, extra);
- break;
+ return cfg80211_ibss_wext_giwfreq(dev, info, freq, extra);
case NL80211_IFTYPE_MONITOR:
- if (!rdev->ops->get_channel) {
- ret = -EINVAL;
- break;
- }
+ if (!rdev->ops->get_channel)
+ return -EINVAL;
ret = rdev_get_channel(rdev, wdev, 0, &chandef);
if (ret)
- break;
+ return ret;
freq->m = chandef.chan->center_freq;
freq->e = 6;
- ret = 0;
- break;
+ return ret;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
-
- wiphy_unlock(&rdev->wiphy);
-
- return ret;
}
static int cfg80211_wext_siwtxpower(struct net_device *dev,
@@ -909,7 +847,6 @@ static int cfg80211_wext_siwtxpower(struct net_device *dev,
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
enum nl80211_tx_power_setting type;
int dbm = 0;
- int ret;
if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM)
return -EINVAL;
@@ -951,11 +888,9 @@ static int cfg80211_wext_siwtxpower(struct net_device *dev,
return 0;
}
- wiphy_lock(&rdev->wiphy);
- ret = rdev_set_tx_power(rdev, wdev, type, DBM_TO_MBM(dbm));
- wiphy_unlock(&rdev->wiphy);
+ guard(wiphy)(&rdev->wiphy);
- return ret;
+ return rdev_set_tx_power(rdev, wdev, type, DBM_TO_MBM(dbm));
}
static int cfg80211_wext_giwtxpower(struct net_device *dev,
@@ -974,9 +909,9 @@ static int cfg80211_wext_giwtxpower(struct net_device *dev,
if (!rdev->ops->get_tx_power)
return -EOPNOTSUPP;
- wiphy_lock(&rdev->wiphy);
- err = rdev_get_tx_power(rdev, wdev, &val);
- wiphy_unlock(&rdev->wiphy);
+ scoped_guard(wiphy, &rdev->wiphy) {
+ err = rdev_get_tx_power(rdev, wdev, 0, &val);
+ }
if (err)
return err;
@@ -1204,7 +1139,7 @@ static int cfg80211_wext_siwpower(struct net_device *dev,
switch (wrq->flags & IW_POWER_MODE) {
case IW_POWER_ON: /* If not specified */
case IW_POWER_MODE: /* If set all mask */
- case IW_POWER_ALL_R: /* If explicitely state all */
+ case IW_POWER_ALL_R: /* If explicitly state all */
ps = true;
break;
default: /* Otherwise we ignore */
@@ -1218,9 +1153,9 @@ static int cfg80211_wext_siwpower(struct net_device *dev,
timeout = wrq->value / 1000;
}
- wiphy_lock(&rdev->wiphy);
+ guard(wiphy)(&rdev->wiphy);
+
err = rdev_set_power_mgmt(rdev, dev, ps, timeout);
- wiphy_unlock(&rdev->wiphy);
if (err)
return err;
@@ -1253,8 +1188,8 @@ static int cfg80211_wext_siwrate(struct net_device *dev,
struct cfg80211_bitrate_mask mask;
u32 fixed, maxrate;
struct ieee80211_supported_band *sband;
- int band, ridx, ret;
bool match = false;
+ int band, ridx;
if (!rdev->ops->set_bitrate_mask)
return -EOPNOTSUPP;
@@ -1292,14 +1227,12 @@ static int cfg80211_wext_siwrate(struct net_device *dev,
if (!match)
return -EINVAL;
- wiphy_lock(&rdev->wiphy);
- if (dev->ieee80211_ptr->valid_links)
- ret = -EOPNOTSUPP;
- else
- ret = rdev_set_bitrate_mask(rdev, dev, 0, NULL, &mask);
- wiphy_unlock(&rdev->wiphy);
+ guard(wiphy)(&rdev->wiphy);
- return ret;
+ if (dev->ieee80211_ptr->valid_links)
+ return -EOPNOTSUPP;
+
+ return rdev_set_bitrate_mask(rdev, dev, 0, NULL, &mask);
}
static int cfg80211_wext_giwrate(struct net_device *dev,
@@ -1328,9 +1261,9 @@ static int cfg80211_wext_giwrate(struct net_device *dev,
if (err)
return err;
- wiphy_lock(&rdev->wiphy);
- err = rdev_get_station(rdev, dev, addr, &sinfo);
- wiphy_unlock(&rdev->wiphy);
+ scoped_guard(wiphy, &rdev->wiphy) {
+ err = rdev_get_station(rdev, dev, addr, &sinfo);
+ }
if (err)
return err;
@@ -1429,23 +1362,17 @@ static int cfg80211_wext_siwap(struct net_device *dev,
struct sockaddr *ap_addr = &wrqu->ap_addr;
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
- int ret;
- wiphy_lock(&rdev->wiphy);
+ guard(wiphy)(&rdev->wiphy);
+
switch (wdev->iftype) {
case NL80211_IFTYPE_ADHOC:
- ret = cfg80211_ibss_wext_siwap(dev, info, ap_addr, extra);
- break;
+ return cfg80211_ibss_wext_siwap(dev, info, ap_addr, extra);
case NL80211_IFTYPE_STATION:
- ret = cfg80211_mgd_wext_siwap(dev, info, ap_addr, extra);
- break;
+ return cfg80211_mgd_wext_siwap(dev, info, ap_addr, extra);
default:
- ret = -EOPNOTSUPP;
- break;
+ return -EOPNOTSUPP;
}
- wiphy_unlock(&rdev->wiphy);
-
- return ret;
}
static int cfg80211_wext_giwap(struct net_device *dev,
@@ -1455,23 +1382,17 @@ static int cfg80211_wext_giwap(struct net_device *dev,
struct sockaddr *ap_addr = &wrqu->ap_addr;
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
- int ret;
- wiphy_lock(&rdev->wiphy);
+ guard(wiphy)(&rdev->wiphy);
+
switch (wdev->iftype) {
case NL80211_IFTYPE_ADHOC:
- ret = cfg80211_ibss_wext_giwap(dev, info, ap_addr, extra);
- break;
+ return cfg80211_ibss_wext_giwap(dev, info, ap_addr, extra);
case NL80211_IFTYPE_STATION:
- ret = cfg80211_mgd_wext_giwap(dev, info, ap_addr, extra);
- break;
+ return cfg80211_mgd_wext_giwap(dev, info, ap_addr, extra);
default:
- ret = -EOPNOTSUPP;
- break;
+ return -EOPNOTSUPP;
}
- wiphy_unlock(&rdev->wiphy);
-
- return ret;
}
static int cfg80211_wext_siwessid(struct net_device *dev,
@@ -1481,23 +1402,17 @@ static int cfg80211_wext_siwessid(struct net_device *dev,
struct iw_point *data = &wrqu->data;
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
- int ret;
- wiphy_lock(&rdev->wiphy);
+ guard(wiphy)(&rdev->wiphy);
+
switch (wdev->iftype) {
case NL80211_IFTYPE_ADHOC:
- ret = cfg80211_ibss_wext_siwessid(dev, info, data, ssid);
- break;
+ return cfg80211_ibss_wext_siwessid(dev, info, data, ssid);
case NL80211_IFTYPE_STATION:
- ret = cfg80211_mgd_wext_siwessid(dev, info, data, ssid);
- break;
+ return cfg80211_mgd_wext_siwessid(dev, info, data, ssid);
default:
- ret = -EOPNOTSUPP;
- break;
+ return -EOPNOTSUPP;
}
- wiphy_unlock(&rdev->wiphy);
-
- return ret;
}
static int cfg80211_wext_giwessid(struct net_device *dev,
@@ -1507,26 +1422,20 @@ static int cfg80211_wext_giwessid(struct net_device *dev,
struct iw_point *data = &wrqu->data;
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
- int ret;
data->flags = 0;
data->length = 0;
- wiphy_lock(&rdev->wiphy);
+ guard(wiphy)(&rdev->wiphy);
+
switch (wdev->iftype) {
case NL80211_IFTYPE_ADHOC:
- ret = cfg80211_ibss_wext_giwessid(dev, info, data, ssid);
- break;
+ return cfg80211_ibss_wext_giwessid(dev, info, data, ssid);
case NL80211_IFTYPE_STATION:
- ret = cfg80211_mgd_wext_giwessid(dev, info, data, ssid);
- break;
+ return cfg80211_mgd_wext_giwessid(dev, info, data, ssid);
default:
- ret = -EOPNOTSUPP;
- break;
+ return -EOPNOTSUPP;
}
- wiphy_unlock(&rdev->wiphy);
-
- return ret;
}
static int cfg80211_wext_siwpmksa(struct net_device *dev,
@@ -1537,7 +1446,6 @@ static int cfg80211_wext_siwpmksa(struct net_device *dev,
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
struct cfg80211_pmksa cfg_pmksa;
struct iw_pmksa *pmksa = (struct iw_pmksa *)extra;
- int ret;
memset(&cfg_pmksa, 0, sizeof(struct cfg80211_pmksa));
@@ -1547,39 +1455,27 @@ static int cfg80211_wext_siwpmksa(struct net_device *dev,
cfg_pmksa.bssid = pmksa->bssid.sa_data;
cfg_pmksa.pmkid = pmksa->pmkid;
- wiphy_lock(&rdev->wiphy);
+ guard(wiphy)(&rdev->wiphy);
+
switch (pmksa->cmd) {
case IW_PMKSA_ADD:
- if (!rdev->ops->set_pmksa) {
- ret = -EOPNOTSUPP;
- break;
- }
+ if (!rdev->ops->set_pmksa)
+ return -EOPNOTSUPP;
- ret = rdev_set_pmksa(rdev, dev, &cfg_pmksa);
- break;
+ return rdev_set_pmksa(rdev, dev, &cfg_pmksa);
case IW_PMKSA_REMOVE:
- if (!rdev->ops->del_pmksa) {
- ret = -EOPNOTSUPP;
- break;
- }
+ if (!rdev->ops->del_pmksa)
+ return -EOPNOTSUPP;
- ret = rdev_del_pmksa(rdev, dev, &cfg_pmksa);
- break;
+ return rdev_del_pmksa(rdev, dev, &cfg_pmksa);
case IW_PMKSA_FLUSH:
- if (!rdev->ops->flush_pmksa) {
- ret = -EOPNOTSUPP;
- break;
- }
+ if (!rdev->ops->flush_pmksa)
+ return -EOPNOTSUPP;
- ret = rdev_flush_pmksa(rdev, dev);
- break;
+ return rdev_flush_pmksa(rdev, dev);
default:
- ret = -EOPNOTSUPP;
- break;
+ return -EOPNOTSUPP;
}
- wiphy_unlock(&rdev->wiphy);
-
- return ret;
}
static const iw_handler cfg80211_handlers[] = {
diff --git a/net/wireless/wext-compat.h b/net/wireless/wext-compat.h
index c02eb789e676..8251ca5df8ae 100644
--- a/net/wireless/wext-compat.h
+++ b/net/wireless/wext-compat.h
@@ -5,12 +5,6 @@
#include <net/iw_handler.h>
#include <linux/wireless.h>
-#ifdef CONFIG_CFG80211_WEXT_EXPORT
-#define EXPORT_WEXT_HANDLER(h) EXPORT_SYMBOL_GPL(h)
-#else
-#define EXPORT_WEXT_HANDLER(h)
-#endif /* CONFIG_CFG80211_WEXT_EXPORT */
-
int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
struct iw_request_info *info,
struct iw_freq *wextfreq, char *extra);
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index a161c64d1765..bea70eb6f034 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -4,6 +4,7 @@
* Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
* Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved.
* Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright (C) 2024 Intel Corporation
*
* (As all part of the Linux kernel, this file is GPL)
*/
@@ -639,10 +640,8 @@ EXPORT_SYMBOL(wireless_send_event);
#ifdef CONFIG_CFG80211_WEXT
static void wireless_warn_cfg80211_wext(void)
{
- char name[sizeof(current->comm)];
-
pr_warn_once("warning: `%s' uses wireless extensions which will stop working for Wi-Fi 7 hardware; use nl80211\n",
- get_task_comm(name, current));
+ current->comm);
}
#endif
@@ -662,7 +661,8 @@ struct iw_statistics *get_wireless_stats(struct net_device *dev)
dev->ieee80211_ptr->wiphy->wext &&
dev->ieee80211_ptr->wiphy->wext->get_wireless_stats) {
wireless_warn_cfg80211_wext();
- if (dev->ieee80211_ptr->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO)
+ if (dev->ieee80211_ptr->wiphy->flags & (WIPHY_FLAG_SUPPORTS_MLO |
+ WIPHY_FLAG_DISABLE_WEXT))
return NULL;
return dev->ieee80211_ptr->wiphy->wext->get_wireless_stats(dev);
}
@@ -704,7 +704,8 @@ static iw_handler get_handler(struct net_device *dev, unsigned int cmd)
#ifdef CONFIG_CFG80211_WEXT
if (dev->ieee80211_ptr && dev->ieee80211_ptr->wiphy) {
wireless_warn_cfg80211_wext();
- if (dev->ieee80211_ptr->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO)
+ if (dev->ieee80211_ptr->wiphy->flags & (WIPHY_FLAG_SUPPORTS_MLO |
+ WIPHY_FLAG_DISABLE_WEXT))
return NULL;
handlers = dev->ieee80211_ptr->wiphy->wext;
}
@@ -1156,7 +1157,7 @@ char *iwe_stream_add_event(struct iw_request_info *info, char *stream,
/* Check if it's possible */
if (likely((stream + event_len) < ends)) {
iwe->len = event_len;
- /* Beware of alignement issues on 64 bits */
+ /* Beware of alignment issues on 64 bits */
memcpy(stream, (char *) iwe, IW_EV_LCP_PK_LEN);
memcpy(stream + lcp_len, &iwe->u,
event_len - lcp_len);
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index 8edd9ada69d0..573b6b15a446 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -302,8 +302,8 @@ int cfg80211_wext_siwgenie(struct net_device *dev,
struct iw_point *data = &wrqu->data;
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+ int ie_len = data->length;
u8 *ie = extra;
- int ie_len = data->length, err;
if (wdev->iftype != NL80211_IFTYPE_STATION)
return -EOPNOTSUPP;
@@ -311,39 +311,31 @@ int cfg80211_wext_siwgenie(struct net_device *dev,
if (!ie_len)
ie = NULL;
- wiphy_lock(wdev->wiphy);
+ guard(wiphy)(wdev->wiphy);
/* no change */
- err = 0;
if (wdev->wext.ie_len == ie_len &&
memcmp(wdev->wext.ie, ie, ie_len) == 0)
- goto out;
+ return 0;
if (ie_len) {
ie = kmemdup(extra, ie_len, GFP_KERNEL);
- if (!ie) {
- err = -ENOMEM;
- goto out;
- }
- } else
+ if (!ie)
+ return -ENOMEM;
+ } else {
ie = NULL;
+ }
kfree(wdev->wext.ie);
wdev->wext.ie = ie;
wdev->wext.ie_len = ie_len;
- if (wdev->conn) {
- err = cfg80211_disconnect(rdev, dev,
- WLAN_REASON_DEAUTH_LEAVING, false);
- if (err)
- goto out;
- }
+ if (wdev->conn)
+ return cfg80211_disconnect(rdev, dev,
+ WLAN_REASON_DEAUTH_LEAVING, false);
/* userspace better not think we'll reconnect */
- err = 0;
- out:
- wiphy_unlock(wdev->wiphy);
- return err;
+ return 0;
}
int cfg80211_wext_siwmlme(struct net_device *dev,
@@ -353,7 +345,6 @@ int cfg80211_wext_siwmlme(struct net_device *dev,
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct iw_mlme *mlme = (struct iw_mlme *)extra;
struct cfg80211_registered_device *rdev;
- int err;
if (!wdev)
return -EOPNOTSUPP;
@@ -366,17 +357,13 @@ int cfg80211_wext_siwmlme(struct net_device *dev,
if (mlme->addr.sa_family != ARPHRD_ETHER)
return -EINVAL;
- wiphy_lock(&rdev->wiphy);
+ guard(wiphy)(&rdev->wiphy);
+
switch (mlme->cmd) {
case IW_MLME_DEAUTH:
case IW_MLME_DISASSOC:
- err = cfg80211_disconnect(rdev, dev, mlme->reason_code, true);
- break;
+ return cfg80211_disconnect(rdev, dev, mlme->reason_code, true);
default:
- err = -EOPNOTSUPP;
- break;
+ return -EOPNOTSUPP;
}
- wiphy_unlock(&rdev->wiphy);
-
- return err;
}
diff --git a/net/wireless/wext-spy.c b/net/wireless/wext-spy.c
deleted file mode 100644
index b379a0371653..000000000000
--- a/net/wireless/wext-spy.c
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * This file implement the Wireless Extensions spy API.
- *
- * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
- * Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved.
- *
- * (As all part of the Linux kernel, this file is GPL)
- */
-
-#include <linux/wireless.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/export.h>
-#include <net/iw_handler.h>
-#include <net/arp.h>
-#include <net/wext.h>
-
-static inline struct iw_spy_data *get_spydata(struct net_device *dev)
-{
- /* This is the new way */
- if (dev->wireless_data)
- return dev->wireless_data->spy_data;
- return NULL;
-}
-
-int iw_handler_set_spy(struct net_device * dev,
- struct iw_request_info * info,
- union iwreq_data * wrqu,
- char * extra)
-{
- struct iw_spy_data * spydata = get_spydata(dev);
- struct sockaddr * address = (struct sockaddr *) extra;
-
- /* Make sure driver is not buggy or using the old API */
- if (!spydata)
- return -EOPNOTSUPP;
-
- /* Disable spy collection while we copy the addresses.
- * While we copy addresses, any call to wireless_spy_update()
- * will NOP. This is OK, as anyway the addresses are changing. */
- spydata->spy_number = 0;
-
- /* We want to operate without locking, because wireless_spy_update()
- * most likely will happen in the interrupt handler, and therefore
- * have its own locking constraints and needs performance.
- * The rtnl_lock() make sure we don't race with the other iw_handlers.
- * This make sure wireless_spy_update() "see" that the spy list
- * is temporarily disabled. */
- smp_wmb();
-
- /* Are there are addresses to copy? */
- if (wrqu->data.length > 0) {
- int i;
-
- /* Copy addresses */
- for (i = 0; i < wrqu->data.length; i++)
- memcpy(spydata->spy_address[i], address[i].sa_data,
- ETH_ALEN);
- /* Reset stats */
- memset(spydata->spy_stat, 0,
- sizeof(struct iw_quality) * IW_MAX_SPY);
- }
-
- /* Make sure above is updated before re-enabling */
- smp_wmb();
-
- /* Enable addresses */
- spydata->spy_number = wrqu->data.length;
-
- return 0;
-}
-EXPORT_SYMBOL(iw_handler_set_spy);
-
-int iw_handler_get_spy(struct net_device * dev,
- struct iw_request_info * info,
- union iwreq_data * wrqu,
- char * extra)
-{
- struct iw_spy_data * spydata = get_spydata(dev);
- struct sockaddr * address = (struct sockaddr *) extra;
- int i;
-
- /* Make sure driver is not buggy or using the old API */
- if (!spydata)
- return -EOPNOTSUPP;
-
- wrqu->data.length = spydata->spy_number;
-
- /* Copy addresses. */
- for (i = 0; i < spydata->spy_number; i++) {
- memcpy(address[i].sa_data, spydata->spy_address[i], ETH_ALEN);
- address[i].sa_family = AF_UNIX;
- }
- /* Copy stats to the user buffer (just after). */
- if (spydata->spy_number > 0)
- memcpy(extra + (sizeof(struct sockaddr) *spydata->spy_number),
- spydata->spy_stat,
- sizeof(struct iw_quality) * spydata->spy_number);
- /* Reset updated flags. */
- for (i = 0; i < spydata->spy_number; i++)
- spydata->spy_stat[i].updated &= ~IW_QUAL_ALL_UPDATED;
- return 0;
-}
-EXPORT_SYMBOL(iw_handler_get_spy);
-
-/*------------------------------------------------------------------*/
-/*
- * Standard Wireless Handler : set spy threshold
- */
-int iw_handler_set_thrspy(struct net_device * dev,
- struct iw_request_info *info,
- union iwreq_data * wrqu,
- char * extra)
-{
- struct iw_spy_data * spydata = get_spydata(dev);
- struct iw_thrspy * threshold = (struct iw_thrspy *) extra;
-
- /* Make sure driver is not buggy or using the old API */
- if (!spydata)
- return -EOPNOTSUPP;
-
- /* Just do it */
- spydata->spy_thr_low = threshold->low;
- spydata->spy_thr_high = threshold->high;
-
- /* Clear flag */
- memset(spydata->spy_thr_under, '\0', sizeof(spydata->spy_thr_under));
-
- return 0;
-}
-EXPORT_SYMBOL(iw_handler_set_thrspy);
-
-/*------------------------------------------------------------------*/
-/*
- * Standard Wireless Handler : get spy threshold
- */
-int iw_handler_get_thrspy(struct net_device * dev,
- struct iw_request_info *info,
- union iwreq_data * wrqu,
- char * extra)
-{
- struct iw_spy_data * spydata = get_spydata(dev);
- struct iw_thrspy * threshold = (struct iw_thrspy *) extra;
-
- /* Make sure driver is not buggy or using the old API */
- if (!spydata)
- return -EOPNOTSUPP;
-
- /* Just do it */
- threshold->low = spydata->spy_thr_low;
- threshold->high = spydata->spy_thr_high;
-
- return 0;
-}
-EXPORT_SYMBOL(iw_handler_get_thrspy);
-
-/*------------------------------------------------------------------*/
-/*
- * Prepare and send a Spy Threshold event
- */
-static void iw_send_thrspy_event(struct net_device * dev,
- struct iw_spy_data * spydata,
- unsigned char * address,
- struct iw_quality * wstats)
-{
- union iwreq_data wrqu;
- struct iw_thrspy threshold;
-
- /* Init */
- wrqu.data.length = 1;
- wrqu.data.flags = 0;
- /* Copy address */
- memcpy(threshold.addr.sa_data, address, ETH_ALEN);
- threshold.addr.sa_family = ARPHRD_ETHER;
- /* Copy stats */
- threshold.qual = *wstats;
- /* Copy also thresholds */
- threshold.low = spydata->spy_thr_low;
- threshold.high = spydata->spy_thr_high;
-
- /* Send event to user space */
- wireless_send_event(dev, SIOCGIWTHRSPY, &wrqu, (char *) &threshold);
-}
-
-/* ---------------------------------------------------------------- */
-/*
- * Call for the driver to update the spy data.
- * For now, the spy data is a simple array. As the size of the array is
- * small, this is good enough. If we wanted to support larger number of
- * spy addresses, we should use something more efficient...
- */
-void wireless_spy_update(struct net_device * dev,
- unsigned char * address,
- struct iw_quality * wstats)
-{
- struct iw_spy_data * spydata = get_spydata(dev);
- int i;
- int match = -1;
-
- /* Make sure driver is not buggy or using the old API */
- if (!spydata)
- return;
-
- /* Update all records that match */
- for (i = 0; i < spydata->spy_number; i++)
- if (ether_addr_equal(address, spydata->spy_address[i])) {
- memcpy(&(spydata->spy_stat[i]), wstats,
- sizeof(struct iw_quality));
- match = i;
- }
-
- /* Generate an event if we cross the spy threshold.
- * To avoid event storms, we have a simple hysteresis : we generate
- * event only when we go under the low threshold or above the
- * high threshold. */
- if (match >= 0) {
- if (spydata->spy_thr_under[match]) {
- if (wstats->level > spydata->spy_thr_high.level) {
- spydata->spy_thr_under[match] = 0;
- iw_send_thrspy_event(dev, spydata,
- address, wstats);
- }
- } else {
- if (wstats->level < spydata->spy_thr_low.level) {
- spydata->spy_thr_under[match] = 1;
- iw_send_thrspy_event(dev, spydata,
- address, wstats);
- }
- }
- }
-}
-EXPORT_SYMBOL(wireless_spy_update);
diff --git a/net/x25/Kconfig b/net/x25/Kconfig
index 68729aa3a5d5..dc72302cbd07 100644
--- a/net/x25/Kconfig
+++ b/net/x25/Kconfig
@@ -17,8 +17,6 @@ config X25
if you want that) and the lower level data link layer protocol LAPB
(say Y to "LAPB Data Link Driver" below if you want that).
- You can read more about X.25 at <https://www.sangoma.com/tutorials/x25/> and
- <http://docwiki.cisco.com/wiki/X.25>.
Information about X.25 for Linux is contained in the files
<file:Documentation/networking/x25.rst> and
<file:Documentation/networking/x25-iface.rst>.
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index f7a7c7798c3b..8dda4178497c 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -460,12 +460,12 @@ static int x25_getsockopt(struct socket *sock, int level, int optname,
if (get_user(len, optlen))
goto out;
- len = min_t(unsigned int, len, sizeof(int));
-
rc = -EINVAL;
if (len < 0)
goto out;
+ len = min_t(unsigned int, len, sizeof(int));
+
rc = -EFAULT;
if (put_user(len, optlen))
goto out;
@@ -871,8 +871,8 @@ static int x25_wait_for_data(struct sock *sk, long timeout)
return rc;
}
-static int x25_accept(struct socket *sock, struct socket *newsock, int flags,
- bool kern)
+static int x25_accept(struct socket *sock, struct socket *newsock,
+ struct proto_accept_arg *arg)
{
struct sock *sk = sock->sk;
struct sock *newsk;
diff --git a/net/x25/sysctl_net_x25.c b/net/x25/sysctl_net_x25.c
index e9802afa43d0..643f50874dfe 100644
--- a/net/x25/sysctl_net_x25.c
+++ b/net/x25/sysctl_net_x25.c
@@ -71,7 +71,6 @@ static struct ctl_table x25_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
- { },
};
int __init x25_register_sysctl(void)
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index caa340134b0e..9f76ca591d54 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -151,6 +151,7 @@ static int xdp_umem_account_pages(struct xdp_umem *umem)
#define XDP_UMEM_FLAGS_VALID ( \
XDP_UMEM_UNALIGNED_CHUNK_FLAG | \
XDP_UMEM_TX_SW_CSUM | \
+ XDP_UMEM_TX_METADATA_LEN | \
0)
static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
@@ -204,8 +205,11 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
if (headroom >= chunk_size - XDP_PACKET_HEADROOM)
return -EINVAL;
- if (mr->tx_metadata_len >= 256 || mr->tx_metadata_len % 8)
- return -EINVAL;
+ if (mr->flags & XDP_UMEM_TX_METADATA_LEN) {
+ if (mr->tx_metadata_len >= 256 || mr->tx_metadata_len % 8)
+ return -EINVAL;
+ umem->tx_metadata_len = mr->tx_metadata_len;
+ }
umem->size = size;
umem->headroom = headroom;
@@ -215,7 +219,6 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
umem->pgs = NULL;
umem->user = NULL;
umem->flags = mr->flags;
- umem->tx_metadata_len = mr->tx_metadata_len;
INIT_LIST_HEAD(&umem->xsk_dma_list);
refcount_set(&umem->users, 1);
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index b78c0e095e22..89d2bef96469 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -35,8 +35,6 @@
#define TX_BATCH_SIZE 32
#define MAX_PER_SOCKET_BUDGET (TX_BATCH_SIZE)
-static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
-
void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
{
if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
@@ -143,7 +141,7 @@ static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len,
u64 addr;
int err;
- addr = xp_get_handle(xskb);
+ addr = xp_get_handle(xskb, xskb->pool);
err = xskq_prod_reserve_desc(xs->rx, addr, len, flags);
if (err) {
xs->rx_queue_full++;
@@ -173,14 +171,14 @@ static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
return 0;
xskb_list = &xskb->pool->xskb_list;
- list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) {
+ list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
if (list_is_singular(xskb_list))
contd = 0;
len = pos->xdp.data_end - pos->xdp.data;
err = __xsk_rcv_zc(xs, pos, len, contd);
if (err)
goto err;
- list_del(&pos->xskb_list_node);
+ list_del(&pos->list_node);
}
return 0;
@@ -324,7 +322,6 @@ static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
return -ENOSPC;
}
- sk_mark_napi_id_once_xdp(&xs->sk, xdp);
return 0;
}
@@ -372,22 +369,23 @@ static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
{
- struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
int err;
err = xsk_rcv(xs, xdp);
if (err)
return err;
- if (!xs->flush_node.prev)
+ if (!xs->flush_node.prev) {
+ struct list_head *flush_list = bpf_net_ctx_get_xskmap_flush_list();
+
list_add(&xs->flush_node, flush_list);
+ }
return 0;
}
-void __xsk_map_flush(void)
+void __xsk_map_flush(struct list_head *flush_list)
{
- struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
struct xdp_sock *xs, *tmp;
list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
@@ -396,16 +394,6 @@ void __xsk_map_flush(void)
}
}
-#ifdef CONFIG_DEBUG_NET
-bool xsk_map_check_flush(void)
-{
- if (list_empty(this_cpu_ptr(&xskmap_flush_list)))
- return false;
- __xsk_map_flush();
- return true;
-}
-#endif
-
void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
{
xskq_prod_submit_n(pool->cq, nb_entries);
@@ -538,34 +526,34 @@ static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
}
-static int xsk_cq_reserve_addr_locked(struct xdp_sock *xs, u64 addr)
+static int xsk_cq_reserve_addr_locked(struct xsk_buff_pool *pool, u64 addr)
{
unsigned long flags;
int ret;
- spin_lock_irqsave(&xs->pool->cq_lock, flags);
- ret = xskq_prod_reserve_addr(xs->pool->cq, addr);
- spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
+ spin_lock_irqsave(&pool->cq_lock, flags);
+ ret = xskq_prod_reserve_addr(pool->cq, addr);
+ spin_unlock_irqrestore(&pool->cq_lock, flags);
return ret;
}
-static void xsk_cq_submit_locked(struct xdp_sock *xs, u32 n)
+static void xsk_cq_submit_locked(struct xsk_buff_pool *pool, u32 n)
{
unsigned long flags;
- spin_lock_irqsave(&xs->pool->cq_lock, flags);
- xskq_prod_submit_n(xs->pool->cq, n);
- spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
+ spin_lock_irqsave(&pool->cq_lock, flags);
+ xskq_prod_submit_n(pool->cq, n);
+ spin_unlock_irqrestore(&pool->cq_lock, flags);
}
-static void xsk_cq_cancel_locked(struct xdp_sock *xs, u32 n)
+static void xsk_cq_cancel_locked(struct xsk_buff_pool *pool, u32 n)
{
unsigned long flags;
- spin_lock_irqsave(&xs->pool->cq_lock, flags);
- xskq_prod_cancel_n(xs->pool->cq, n);
- spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
+ spin_lock_irqsave(&pool->cq_lock, flags);
+ xskq_prod_cancel_n(pool->cq, n);
+ spin_unlock_irqrestore(&pool->cq_lock, flags);
}
static u32 xsk_get_num_desc(struct sk_buff *skb)
@@ -582,7 +570,7 @@ static void xsk_destruct_skb(struct sk_buff *skb)
*compl->tx_timestamp = ktime_get_tai_fast_ns();
}
- xsk_cq_submit_locked(xdp_sk(skb->sk), xsk_get_num_desc(skb));
+ xsk_cq_submit_locked(xdp_sk(skb->sk)->pool, xsk_get_num_desc(skb));
sock_wfree(skb);
}
@@ -598,7 +586,7 @@ static void xsk_consume_skb(struct sk_buff *skb)
struct xdp_sock *xs = xdp_sk(skb->sk);
skb->destructor = sock_wfree;
- xsk_cq_cancel_locked(xs, xsk_get_num_desc(skb));
+ xsk_cq_cancel_locked(xs->pool, xsk_get_num_desc(skb));
/* Free skb without triggering the perf drop trace */
consume_skb(skb);
xs->skb = NULL;
@@ -686,6 +674,8 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
len = desc->len;
if (!skb) {
+ first_frag = true;
+
hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
tr = dev->needed_tailroom;
skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
@@ -696,12 +686,8 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
skb_put(skb, len);
err = skb_store_bits(skb, 0, buffer, len);
- if (unlikely(err)) {
- kfree_skb(skb);
+ if (unlikely(err))
goto free_err;
- }
-
- first_frag = true;
} else {
int nr_frags = skb_shinfo(skb)->nr_frags;
struct page *page;
@@ -769,6 +755,9 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
return skb;
free_err:
+ if (first_frag && skb)
+ kfree_skb(skb);
+
if (err == -EOVERFLOW) {
/* Drop the packet */
xsk_set_destructor_arg(xs->skb);
@@ -776,7 +765,7 @@ free_err:
xskq_cons_release(xs->tx);
} else {
/* Let application retry */
- xsk_cq_cancel_locked(xs, 1);
+ xsk_cq_cancel_locked(xs->pool, 1);
}
return ERR_PTR(err);
@@ -813,7 +802,7 @@ static int __xsk_generic_xmit(struct sock *sk)
* if there is space in it. This avoids having to implement
* any buffering in the Tx path.
*/
- if (xsk_cq_reserve_addr_locked(xs, desc.addr))
+ if (xsk_cq_reserve_addr_locked(xs->pool, desc.addr))
goto out;
skb = xsk_build_skb(xs, &desc);
@@ -918,11 +907,8 @@ static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len
if (unlikely(!xs->tx))
return -ENOBUFS;
- if (sk_can_busy_loop(sk)) {
- if (xs->zc)
- __sk_mark_napi_id_once(sk, xsk_pool_get_napi_id(xs->pool));
+ if (sk_can_busy_loop(sk))
sk_busy_loop(sk, 1); /* only support non-blocking sockets */
- }
if (xs->zc && xsk_no_wakeup(sk))
return 0;
@@ -1308,6 +1294,14 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
xs->queue_id = qid;
xp_add_xsk(xs->pool, xs);
+ if (xs->zc && qid < dev->real_num_rx_queues) {
+ struct netdev_rx_queue *rxq;
+
+ rxq = __netif_get_rx_queue(dev, qid);
+ if (rxq->napi)
+ __sk_mark_napi_id_once(sk, rxq->napi->napi_id);
+ }
+
out_unlock:
if (err) {
dev_put(dev);
@@ -1331,14 +1325,6 @@ struct xdp_umem_reg_v1 {
__u32 headroom;
};
-struct xdp_umem_reg_v2 {
- __u64 addr; /* Start of packet data area */
- __u64 len; /* Length of packet data area */
- __u32 chunk_size;
- __u32 headroom;
- __u32 flags;
-};
-
static int xsk_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
@@ -1382,10 +1368,19 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
if (optlen < sizeof(struct xdp_umem_reg_v1))
return -EINVAL;
- else if (optlen < sizeof(struct xdp_umem_reg_v2))
- mr_size = sizeof(struct xdp_umem_reg_v1);
else if (optlen < sizeof(mr))
- mr_size = sizeof(struct xdp_umem_reg_v2);
+ mr_size = sizeof(struct xdp_umem_reg_v1);
+
+ BUILD_BUG_ON(sizeof(struct xdp_umem_reg_v1) >= sizeof(struct xdp_umem_reg));
+
+ /* Make sure the last field of the struct doesn't have
+ * uninitialized padding. All padding has to be explicit
+ * and has to be set to zero by the userspace to make
+ * struct xdp_umem_reg extensible in the future.
+ */
+ BUILD_BUG_ON(offsetof(struct xdp_umem_reg, tx_metadata_len) +
+ sizeof_field(struct xdp_umem_reg, tx_metadata_len) !=
+ sizeof(struct xdp_umem_reg));
if (copy_from_sockptr(&mr, optval, mr_size))
return -EFAULT;
@@ -1414,6 +1409,8 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
struct xsk_queue **q;
int entries;
+ if (optlen < sizeof(entries))
+ return -EINVAL;
if (copy_from_sockptr(&entries, optval, sizeof(entries)))
return -EFAULT;
@@ -1770,7 +1767,7 @@ static struct pernet_operations xsk_net_ops = {
static int __init xsk_init(void)
{
- int err, cpu;
+ int err;
err = proto_register(&xsk_proto, 0 /* no slab */);
if (err)
@@ -1788,8 +1785,6 @@ static int __init xsk_init(void)
if (err)
goto out_pernet;
- for_each_possible_cpu(cpu)
- INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
return 0;
out_pernet:
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index ce60ecd48a4d..d158cb6dd391 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -101,12 +101,11 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
xskb = &pool->heads[i];
xskb->pool = pool;
xskb->xdp.frame_sz = umem->chunk_size - umem->headroom;
- INIT_LIST_HEAD(&xskb->free_list_node);
- INIT_LIST_HEAD(&xskb->xskb_list_node);
+ INIT_LIST_HEAD(&xskb->list_node);
if (pool->unaligned)
pool->free_heads[i] = xskb;
else
- xp_init_xskb_addr(xskb, pool, i * pool->chunk_size);
+ xp_init_xskb_addr(xskb, pool, (u64)i * pool->chunk_size);
}
return pool;
@@ -211,6 +210,11 @@ int xp_assign_dev(struct xsk_buff_pool *pool,
goto err_unreg_pool;
}
+ if (dev_get_min_mp_channel_count(netdev)) {
+ err = -EBUSY;
+ goto err_unreg_pool;
+ }
+
bpf.command = XDP_SETUP_XSK_POOL;
bpf.xsk.pool = pool;
bpf.xsk.queue_id = queue_id;
@@ -225,6 +229,7 @@ int xp_assign_dev(struct xsk_buff_pool *pool,
goto err_unreg_xsk;
}
pool->umem->zc = true;
+ pool->xdp_zc_max_segs = netdev->xdp_zc_max_segs;
return 0;
err_unreg_xsk:
@@ -338,7 +343,6 @@ static struct xsk_dma_map *xp_create_dma_map(struct device *dev, struct net_devi
dma_map->netdev = netdev;
dma_map->dev = dev;
- dma_map->dma_need_sync = false;
dma_map->dma_pages_cnt = nr_pages;
refcount_set(&dma_map->users, 1);
list_add(&dma_map->list, &umem->xsk_dma_list);
@@ -383,10 +387,9 @@ void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs)
return;
}
- if (!refcount_dec_and_test(&dma_map->users))
- return;
+ if (refcount_dec_and_test(&dma_map->users))
+ __xp_dma_unmap(dma_map, attrs);
- __xp_dma_unmap(dma_map, attrs);
kvfree(pool->dma_pages);
pool->dma_pages = NULL;
pool->dma_pages_cnt = 0;
@@ -413,8 +416,10 @@ static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_
for (i = 0; i < pool->heads_cnt; i++) {
struct xdp_buff_xsk *xskb = &pool->heads[i];
+ u64 orig_addr;
- xp_init_xskb_dma(xskb, pool, dma_map->dma_pages, xskb->orig_addr);
+ orig_addr = xskb->xdp.data_hard_start - pool->addrs - pool->headroom;
+ xp_init_xskb_dma(xskb, pool, dma_map->dma_pages, orig_addr);
}
}
@@ -424,7 +429,6 @@ static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_
pool->dev = dma_map->dev;
pool->dma_pages_cnt = dma_map->dma_pages_cnt;
- pool->dma_need_sync = dma_map->dma_need_sync;
memcpy(pool->dma_pages, dma_map->dma_pages,
pool->dma_pages_cnt * sizeof(*pool->dma_pages));
@@ -460,8 +464,6 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
__xp_dma_unmap(dma_map, attrs);
return -ENOMEM;
}
- if (dma_need_sync(dev, dma))
- dma_map->dma_need_sync = true;
dma_map->dma_pages[i] = dma;
}
@@ -500,6 +502,22 @@ static bool xp_check_aligned(struct xsk_buff_pool *pool, u64 *addr)
return *addr < pool->addrs_cnt;
}
+static struct xdp_buff_xsk *xp_get_xskb(struct xsk_buff_pool *pool, u64 addr)
+{
+ struct xdp_buff_xsk *xskb;
+
+ if (pool->unaligned) {
+ xskb = pool->free_heads[--pool->free_heads_cnt];
+ xp_init_xskb_addr(xskb, pool, addr);
+ if (pool->dma_pages)
+ xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr);
+ } else {
+ xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)];
+ }
+
+ return xskb;
+}
+
static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool)
{
struct xdp_buff_xsk *xskb;
@@ -525,14 +543,7 @@ static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool)
break;
}
- if (pool->unaligned) {
- xskb = pool->free_heads[--pool->free_heads_cnt];
- xp_init_xskb_addr(xskb, pool, addr);
- if (pool->dma_pages)
- xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr);
- } else {
- xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)];
- }
+ xskb = xp_get_xskb(pool, addr);
xskq_cons_release(pool->fq);
return xskb;
@@ -549,19 +560,17 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
} else {
pool->free_list_cnt--;
xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk,
- free_list_node);
- list_del_init(&xskb->free_list_node);
+ list_node);
+ list_del_init(&xskb->list_node);
}
xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
xskb->xdp.data_meta = xskb->xdp.data;
xskb->xdp.flags = 0;
- if (pool->dma_need_sync) {
- dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
- pool->frame_len,
- DMA_BIDIRECTIONAL);
- }
+ if (pool->dev)
+ xp_dma_sync_for_device(pool, xskb->dma, pool->frame_len);
+
return &xskb->xdp;
}
EXPORT_SYMBOL(xp_alloc);
@@ -592,14 +601,7 @@ static u32 xp_alloc_new_from_fq(struct xsk_buff_pool *pool, struct xdp_buff **xd
continue;
}
- if (pool->unaligned) {
- xskb = pool->free_heads[--pool->free_heads_cnt];
- xp_init_xskb_addr(xskb, pool, addr);
- if (pool->dma_pages)
- xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr);
- } else {
- xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)];
- }
+ xskb = xp_get_xskb(pool, addr);
*xdp = &xskb->xdp;
xdp++;
@@ -618,8 +620,8 @@ static u32 xp_alloc_reused(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u3
i = nb_entries;
while (i--) {
- xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, free_list_node);
- list_del_init(&xskb->free_list_node);
+ xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, list_node);
+ list_del_init(&xskb->list_node);
*xdp = &xskb->xdp;
xdp++;
@@ -629,20 +631,31 @@ static u32 xp_alloc_reused(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u3
return nb_entries;
}
-u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
+static u32 xp_alloc_slow(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
+ u32 max)
{
- u32 nb_entries1 = 0, nb_entries2;
+ int i;
- if (unlikely(pool->dma_need_sync)) {
+ for (i = 0; i < max; i++) {
struct xdp_buff *buff;
- /* Slow path */
buff = xp_alloc(pool);
- if (buff)
- *xdp = buff;
- return !!buff;
+ if (unlikely(!buff))
+ return i;
+ *xdp = buff;
+ xdp++;
}
+ return max;
+}
+
+u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
+{
+ u32 nb_entries1 = 0, nb_entries2;
+
+ if (unlikely(pool->dev && dma_dev_need_sync(pool->dev)))
+ return xp_alloc_slow(pool, xdp, max);
+
if (unlikely(pool->free_list_cnt)) {
nb_entries1 = xp_alloc_reused(pool, xdp, max);
if (nb_entries1 == max)
@@ -662,19 +675,27 @@ EXPORT_SYMBOL(xp_alloc_batch);
bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count)
{
+ u32 req_count, avail_count;
+
if (pool->free_list_cnt >= count)
return true;
- return xskq_cons_has_entries(pool->fq, count - pool->free_list_cnt);
+
+ req_count = count - pool->free_list_cnt;
+ avail_count = xskq_cons_nb_entries(pool->fq, req_count);
+ if (!avail_count)
+ pool->fq->queue_empty_descs++;
+
+ return avail_count >= req_count;
}
EXPORT_SYMBOL(xp_can_alloc);
void xp_free(struct xdp_buff_xsk *xskb)
{
- if (!list_empty(&xskb->free_list_node))
+ if (!list_empty(&xskb->list_node))
return;
xskb->pool->free_list_cnt++;
- list_add(&xskb->free_list_node, &xskb->pool->free_list);
+ list_add(&xskb->list_node, &xskb->pool->free_list);
}
EXPORT_SYMBOL(xp_free);
@@ -693,18 +714,3 @@ dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr)
(addr & ~PAGE_MASK);
}
EXPORT_SYMBOL(xp_raw_get_dma);
-
-void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb)
-{
- dma_sync_single_range_for_cpu(xskb->pool->dev, xskb->dma, 0,
- xskb->pool->frame_len, DMA_BIDIRECTIONAL);
-}
-EXPORT_SYMBOL(xp_dma_sync_for_cpu_slow);
-
-void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
- size_t size)
-{
- dma_sync_single_range_for_device(pool->dev, dma, 0,
- size, DMA_BIDIRECTIONAL);
-}
-EXPORT_SYMBOL(xp_dma_sync_for_device_slow);
diff --git a/net/xdp/xsk_diag.c b/net/xdp/xsk_diag.c
index 9f8955367275..09dcea0cbbed 100644
--- a/net/xdp/xsk_diag.c
+++ b/net/xdp/xsk_diag.c
@@ -194,6 +194,7 @@ static int xsk_diag_handler_dump(struct sk_buff *nlskb, struct nlmsghdr *hdr)
}
static const struct sock_diag_handler xsk_diag_handler = {
+ .owner = THIS_MODULE,
.family = AF_XDP,
.dump = xsk_diag_handler_dump,
};
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index 6f2d1621c992..46d87e961ad6 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -260,7 +260,7 @@ u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool,
nr_frags = 0;
} else {
nr_frags++;
- if (nr_frags == pool->netdev->xdp_zc_max_segs) {
+ if (nr_frags == pool->xdp_zc_max_segs) {
nr_frags = 0;
break;
}
@@ -306,11 +306,6 @@ static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max)
return entries >= max ? max : entries;
}
-static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt)
-{
- return xskq_cons_nb_entries(q, cnt) >= cnt;
-}
-
static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr)
{
if (q->cached_prod == q->cached_cons)
diff --git a/net/xdp/xskmap.c b/net/xdp/xskmap.c
index e1c526f97ce3..afa457506274 100644
--- a/net/xdp/xskmap.c
+++ b/net/xdp/xskmap.c
@@ -224,7 +224,7 @@ static long xsk_map_delete_elem(struct bpf_map *map, void *key)
struct xsk_map *m = container_of(map, struct xsk_map, map);
struct xdp_sock __rcu **map_entry;
struct xdp_sock *old_xs;
- int k = *(u32 *)key;
+ u32 k = *(u32 *)key;
if (k >= map->max_entries)
return -EINVAL;
diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig
index d7b16f2c23e9..f0157702718f 100644
--- a/net/xfrm/Kconfig
+++ b/net/xfrm/Kconfig
@@ -135,6 +135,22 @@ config NET_KEY_MIGRATE
If unsure, say N.
+config XFRM_IPTFS
+ tristate "IPsec IP-TFS/AGGFRAG (RFC 9347) encapsulation support"
+ depends on XFRM
+ help
+ Information on the IP-TFS/AGGFRAG encapsulation can be found
+ in RFC 9347. This feature supports demand driven (i.e.,
+ non-constant send rate) IP-TFS to take advantage of the
+ AGGFRAG ESP payload encapsulation. This payload type
+ supports aggregation and fragmentation of the inner IP
+ packet stream which in turn yields higher small-packet
+ bandwidth as well as reducing MTU/PMTU issues. Congestion
+ control is unimplementated as the send rate is demand driven
+ rather than constant.
+
+ If unsure, say N.
+
config XFRM_ESPINTCP
bool
diff --git a/net/xfrm/Makefile b/net/xfrm/Makefile
index 547cec77ba03..5a1787587cb3 100644
--- a/net/xfrm/Makefile
+++ b/net/xfrm/Makefile
@@ -13,12 +13,14 @@ endif
obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \
xfrm_input.o xfrm_output.o \
- xfrm_sysctl.o xfrm_replay.o xfrm_device.o
+ xfrm_sysctl.o xfrm_replay.o xfrm_device.o \
+ xfrm_nat_keepalive.o
obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o
obj-$(CONFIG_XFRM_ALGO) += xfrm_algo.o
obj-$(CONFIG_XFRM_USER) += xfrm_user.o
obj-$(CONFIG_XFRM_USER_COMPAT) += xfrm_compat.o
obj-$(CONFIG_XFRM_IPCOMP) += xfrm_ipcomp.o
obj-$(CONFIG_XFRM_INTERFACE) += xfrm_interface.o
+obj-$(CONFIG_XFRM_IPTFS) += xfrm_iptfs.o
obj-$(CONFIG_XFRM_ESPINTCP) += espintcp.o
obj-$(CONFIG_DEBUG_INFO_BTF) += xfrm_state_bpf.o
diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c
index d3b3f9e720b3..fe82e2d07300 100644
--- a/net/xfrm/espintcp.c
+++ b/net/xfrm/espintcp.c
@@ -10,6 +10,7 @@
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6_stubs.h>
#endif
+#include <net/hotdata.h>
static void handle_nonesp(struct espintcp_ctx *ctx, struct sk_buff *skb,
struct sock *sk)
@@ -169,7 +170,8 @@ int espintcp_queue_out(struct sock *sk, struct sk_buff *skb)
{
struct espintcp_ctx *ctx = espintcp_getctx(sk);
- if (skb_queue_len(&ctx->out_queue) >= READ_ONCE(netdev_max_backlog))
+ if (skb_queue_len(&ctx->out_queue) >=
+ READ_ONCE(net_hotdata.max_backlog))
return -ENOBUFS;
__skb_queue_tail(&ctx->out_queue, skb);
diff --git a/net/xfrm/trace_iptfs.h b/net/xfrm/trace_iptfs.h
new file mode 100644
index 000000000000..74391ba24445
--- /dev/null
+++ b/net/xfrm/trace_iptfs.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* xfrm_trace_iptfs.h
+ *
+ * August 12 2023, Christian Hopps <chopps@labn.net>
+ *
+ * Copyright (c) 2023, LabN Consulting, L.L.C.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iptfs
+
+#if !defined(_TRACE_IPTFS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_IPTFS_H
+
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/tracepoint.h>
+#include <net/ip.h>
+
+struct xfrm_iptfs_data;
+
+TRACE_EVENT(iptfs_egress_recv,
+ TP_PROTO(struct sk_buff *skb, struct xfrm_iptfs_data *xtfs, u16 blkoff),
+ TP_ARGS(skb, xtfs, blkoff),
+ TP_STRUCT__entry(__field(struct sk_buff *, skb)
+ __field(void *, head)
+ __field(void *, head_pg_addr)
+ __field(void *, pg0addr)
+ __field(u32, skb_len)
+ __field(u32, data_len)
+ __field(u32, headroom)
+ __field(u32, tailroom)
+ __field(u32, tail)
+ __field(u32, end)
+ __field(u32, pg0off)
+ __field(u8, head_frag)
+ __field(u8, frag_list)
+ __field(u8, nr_frags)
+ __field(u16, blkoff)),
+ TP_fast_assign(__entry->skb = skb;
+ __entry->head = skb->head;
+ __entry->skb_len = skb->len;
+ __entry->data_len = skb->data_len;
+ __entry->headroom = skb_headroom(skb);
+ __entry->tailroom = skb_tailroom(skb);
+ __entry->tail = (u32)skb->tail;
+ __entry->end = (u32)skb->end;
+ __entry->head_frag = skb->head_frag;
+ __entry->frag_list = (bool)skb_shinfo(skb)->frag_list;
+ __entry->nr_frags = skb_shinfo(skb)->nr_frags;
+ __entry->blkoff = blkoff;
+ __entry->head_pg_addr = page_address(virt_to_head_page(skb->head));
+ __entry->pg0addr = (__entry->nr_frags
+ ? page_address(netmem_to_page(skb_shinfo(skb)->frags[0].netmem))
+ : NULL);
+ __entry->pg0off = (__entry->nr_frags
+ ? skb_shinfo(skb)->frags[0].offset
+ : 0);
+ ),
+ TP_printk("EGRESS: skb=%p len=%u data_len=%u headroom=%u head_frag=%u frag_list=%u nr_frags=%u blkoff=%u\n\t\ttailroom=%u tail=%u end=%u head=%p hdpgaddr=%p pg0->addr=%p pg0->data=%p pg0->off=%u",
+ __entry->skb, __entry->skb_len, __entry->data_len, __entry->headroom,
+ __entry->head_frag, __entry->frag_list, __entry->nr_frags, __entry->blkoff,
+ __entry->tailroom, __entry->tail, __entry->end, __entry->head,
+ __entry->head_pg_addr, __entry->pg0addr, __entry->pg0addr + __entry->pg0off,
+ __entry->pg0off)
+ )
+
+DECLARE_EVENT_CLASS(iptfs_ingress_preq_event,
+ TP_PROTO(struct sk_buff *skb, struct xfrm_iptfs_data *xtfs,
+ u32 pmtu, u8 was_gso),
+ TP_ARGS(skb, xtfs, pmtu, was_gso),
+ TP_STRUCT__entry(__field(struct sk_buff *, skb)
+ __field(u32, skb_len)
+ __field(u32, data_len)
+ __field(u32, pmtu)
+ __field(u32, queue_size)
+ __field(u32, proto_seq)
+ __field(u8, proto)
+ __field(u8, was_gso)
+ ),
+ TP_fast_assign(__entry->skb = skb;
+ __entry->skb_len = skb->len;
+ __entry->data_len = skb->data_len;
+ __entry->queue_size =
+ xtfs->cfg.max_queue_size - xtfs->queue_size;
+ __entry->proto = __trace_ip_proto(ip_hdr(skb));
+ __entry->proto_seq = __trace_ip_proto_seq(ip_hdr(skb));
+ __entry->pmtu = pmtu;
+ __entry->was_gso = was_gso;
+ ),
+ TP_printk("INGRPREQ: skb=%p len=%u data_len=%u qsize=%u proto=%u proto_seq=%u pmtu=%u was_gso=%u",
+ __entry->skb, __entry->skb_len, __entry->data_len,
+ __entry->queue_size, __entry->proto, __entry->proto_seq,
+ __entry->pmtu, __entry->was_gso));
+
+DEFINE_EVENT(iptfs_ingress_preq_event, iptfs_enqueue,
+ TP_PROTO(struct sk_buff *skb, struct xfrm_iptfs_data *xtfs, u32 pmtu, u8 was_gso),
+ TP_ARGS(skb, xtfs, pmtu, was_gso));
+
+DEFINE_EVENT(iptfs_ingress_preq_event, iptfs_no_queue_space,
+ TP_PROTO(struct sk_buff *skb, struct xfrm_iptfs_data *xtfs, u32 pmtu, u8 was_gso),
+ TP_ARGS(skb, xtfs, pmtu, was_gso));
+
+DEFINE_EVENT(iptfs_ingress_preq_event, iptfs_too_big,
+ TP_PROTO(struct sk_buff *skb, struct xfrm_iptfs_data *xtfs, u32 pmtu, u8 was_gso),
+ TP_ARGS(skb, xtfs, pmtu, was_gso));
+
+DECLARE_EVENT_CLASS(iptfs_ingress_postq_event,
+ TP_PROTO(struct sk_buff *skb, u32 mtu, u16 blkoff, struct iphdr *iph),
+ TP_ARGS(skb, mtu, blkoff, iph),
+ TP_STRUCT__entry(__field(struct sk_buff *, skb)
+ __field(u32, skb_len)
+ __field(u32, data_len)
+ __field(u32, mtu)
+ __field(u32, proto_seq)
+ __field(u16, blkoff)
+ __field(u8, proto)),
+ TP_fast_assign(__entry->skb = skb;
+ __entry->skb_len = skb->len;
+ __entry->data_len = skb->data_len;
+ __entry->mtu = mtu;
+ __entry->blkoff = blkoff;
+ __entry->proto = iph ? __trace_ip_proto(iph) : 0;
+ __entry->proto_seq = iph ? __trace_ip_proto_seq(iph) : 0;
+ ),
+ TP_printk("INGRPSTQ: skb=%p len=%u data_len=%u mtu=%u blkoff=%u proto=%u proto_seq=%u",
+ __entry->skb, __entry->skb_len, __entry->data_len, __entry->mtu,
+ __entry->blkoff, __entry->proto, __entry->proto_seq));
+
+DEFINE_EVENT(iptfs_ingress_postq_event, iptfs_first_dequeue,
+ TP_PROTO(struct sk_buff *skb, u32 mtu, u16 blkoff,
+ struct iphdr *iph),
+ TP_ARGS(skb, mtu, blkoff, iph));
+
+DEFINE_EVENT(iptfs_ingress_postq_event, iptfs_first_fragmenting,
+ TP_PROTO(struct sk_buff *skb, u32 mtu, u16 blkoff,
+ struct iphdr *iph),
+ TP_ARGS(skb, mtu, blkoff, iph));
+
+DEFINE_EVENT(iptfs_ingress_postq_event, iptfs_first_final_fragment,
+ TP_PROTO(struct sk_buff *skb, u32 mtu, u16 blkoff,
+ struct iphdr *iph),
+ TP_ARGS(skb, mtu, blkoff, iph));
+
+DEFINE_EVENT(iptfs_ingress_postq_event, iptfs_first_toobig,
+ TP_PROTO(struct sk_buff *skb, u32 mtu, u16 blkoff,
+ struct iphdr *iph),
+ TP_ARGS(skb, mtu, blkoff, iph));
+
+TRACE_EVENT(iptfs_ingress_nth_peek,
+ TP_PROTO(struct sk_buff *skb, u32 remaining),
+ TP_ARGS(skb, remaining),
+ TP_STRUCT__entry(__field(struct sk_buff *, skb)
+ __field(u32, skb_len)
+ __field(u32, remaining)),
+ TP_fast_assign(__entry->skb = skb;
+ __entry->skb_len = skb->len;
+ __entry->remaining = remaining;
+ ),
+ TP_printk("INGRPSTQ: NTHPEEK: skb=%p len=%u remaining=%u",
+ __entry->skb, __entry->skb_len, __entry->remaining));
+
+TRACE_EVENT(iptfs_ingress_nth_add, TP_PROTO(struct sk_buff *skb, u8 share_ok),
+ TP_ARGS(skb, share_ok),
+ TP_STRUCT__entry(__field(struct sk_buff *, skb)
+ __field(u32, skb_len)
+ __field(u32, data_len)
+ __field(u8, share_ok)
+ __field(u8, head_frag)
+ __field(u8, pp_recycle)
+ __field(u8, cloned)
+ __field(u8, shared)
+ __field(u8, nr_frags)
+ __field(u8, frag_list)
+ ),
+ TP_fast_assign(__entry->skb = skb;
+ __entry->skb_len = skb->len;
+ __entry->data_len = skb->data_len;
+ __entry->share_ok = share_ok;
+ __entry->head_frag = skb->head_frag;
+ __entry->pp_recycle = skb->pp_recycle;
+ __entry->cloned = skb_cloned(skb);
+ __entry->shared = skb_shared(skb);
+ __entry->nr_frags = skb_shinfo(skb)->nr_frags;
+ __entry->frag_list = (bool)skb_shinfo(skb)->frag_list;
+ ),
+ TP_printk("INGRPSTQ: NTHADD: skb=%p len=%u data_len=%u share_ok=%u head_frag=%u pp_recycle=%u cloned=%u shared=%u nr_frags=%u frag_list=%u",
+ __entry->skb, __entry->skb_len, __entry->data_len, __entry->share_ok,
+ __entry->head_frag, __entry->pp_recycle, __entry->cloned, __entry->shared,
+ __entry->nr_frags, __entry->frag_list));
+
+DECLARE_EVENT_CLASS(iptfs_timer_event,
+ TP_PROTO(struct xfrm_iptfs_data *xtfs, u64 time_val),
+ TP_ARGS(xtfs, time_val),
+ TP_STRUCT__entry(__field(u64, time_val)
+ __field(u64, set_time)),
+ TP_fast_assign(__entry->time_val = time_val;
+ __entry->set_time = xtfs->iptfs_settime;
+ ),
+ TP_printk("TIMER: set_time=%llu time_val=%llu",
+ __entry->set_time, __entry->time_val));
+
+DEFINE_EVENT(iptfs_timer_event, iptfs_timer_start,
+ TP_PROTO(struct xfrm_iptfs_data *xtfs, u64 time_val),
+ TP_ARGS(xtfs, time_val));
+
+DEFINE_EVENT(iptfs_timer_event, iptfs_timer_expire,
+ TP_PROTO(struct xfrm_iptfs_data *xtfs, u64 time_val),
+ TP_ARGS(xtfs, time_val));
+
+#endif /* _TRACE_IPTFS_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../net/xfrm
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace_iptfs
+#include <trace/define_trace.h>
diff --git a/net/xfrm/xfrm_compat.c b/net/xfrm/xfrm_compat.c
index 655fe4ff8621..b8d2e6930041 100644
--- a/net/xfrm/xfrm_compat.c
+++ b/net/xfrm/xfrm_compat.c
@@ -98,6 +98,7 @@ static const int compat_msg_min[XFRM_NR_MSGTYPES] = {
};
static const struct nla_policy compat_policy[XFRMA_MAX+1] = {
+ [XFRMA_UNSPEC] = { .strict_start_type = XFRMA_SA_DIR },
[XFRMA_SA] = { .len = XMSGSIZE(compat_xfrm_usersa_info)},
[XFRMA_POLICY] = { .len = XMSGSIZE(compat_xfrm_userpolicy_info)},
[XFRMA_LASTUSED] = { .type = NLA_U64},
@@ -129,6 +130,9 @@ static const struct nla_policy compat_policy[XFRMA_MAX+1] = {
[XFRMA_SET_MARK_MASK] = { .type = NLA_U32 },
[XFRMA_IF_ID] = { .type = NLA_U32 },
[XFRMA_MTIMER_THRESH] = { .type = NLA_U32 },
+ [XFRMA_SA_DIR] = NLA_POLICY_RANGE(NLA_U8, XFRM_SA_DIR_IN, XFRM_SA_DIR_OUT),
+ [XFRMA_NAT_KEEPALIVE_INTERVAL] = { .type = NLA_U32 },
+ [XFRMA_SA_PCPU] = { .type = NLA_U32 },
};
static struct nlmsghdr *xfrm_nlmsg_put_compat(struct sk_buff *skb,
@@ -277,9 +281,18 @@ static int xfrm_xlate64_attr(struct sk_buff *dst, const struct nlattr *src)
case XFRMA_SET_MARK_MASK:
case XFRMA_IF_ID:
case XFRMA_MTIMER_THRESH:
+ case XFRMA_SA_DIR:
+ case XFRMA_NAT_KEEPALIVE_INTERVAL:
+ case XFRMA_SA_PCPU:
+ case XFRMA_IPTFS_DROP_TIME:
+ case XFRMA_IPTFS_REORDER_WINDOW:
+ case XFRMA_IPTFS_DONT_FRAG:
+ case XFRMA_IPTFS_INIT_DELAY:
+ case XFRMA_IPTFS_MAX_QSIZE:
+ case XFRMA_IPTFS_PKT_SIZE:
return xfrm_nla_cpy(dst, src, nla_len(src));
default:
- BUILD_BUG_ON(XFRMA_MAX != XFRMA_MTIMER_THRESH);
+ BUILD_BUG_ON(XFRMA_MAX != XFRMA_IPTFS_PKT_SIZE);
pr_warn_once("unsupported nla_type %d\n", src->nla_type);
return -EOPNOTSUPP;
}
@@ -434,7 +447,7 @@ static int xfrm_xlate32_attr(void *dst, const struct nlattr *nla,
int err;
if (type > XFRMA_MAX) {
- BUILD_BUG_ON(XFRMA_MAX != XFRMA_MTIMER_THRESH);
+ BUILD_BUG_ON(XFRMA_MAX != XFRMA_IPTFS_PKT_SIZE);
NL_SET_ERR_MSG(extack, "Bad attribute");
return -EOPNOTSUPP;
}
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 653e51ae3964..d1fa94e52cea 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -42,7 +42,8 @@ static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb,
skb->transport_header = skb->network_header + hsize;
skb_reset_mac_len(skb);
- pskb_pull(skb, skb->mac_len + x->props.header_len);
+ pskb_pull(skb,
+ skb->mac_len + x->props.header_len - x->props.enc_hdr_len);
}
static void __xfrm_mode_beet_prep(struct xfrm_state *x, struct sk_buff *skb,
@@ -68,6 +69,7 @@ static void __xfrm_mode_beet_prep(struct xfrm_state *x, struct sk_buff *skb,
static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb)
{
switch (x->outer_mode.encap) {
+ case XFRM_MODE_IPTFS:
case XFRM_MODE_TUNNEL:
if (x->outer_mode.family == AF_INET)
return __xfrm_mode_tunnel_prep(x, skb,
@@ -253,16 +255,24 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
return -EINVAL;
}
+ if ((xuo->flags & XFRM_OFFLOAD_INBOUND && x->dir == XFRM_SA_DIR_OUT) ||
+ (!(xuo->flags & XFRM_OFFLOAD_INBOUND) && x->dir == XFRM_SA_DIR_IN)) {
+ NL_SET_ERR_MSG(extack, "Mismatched SA and offload direction");
+ return -EINVAL;
+ }
+
is_packet_offload = xuo->flags & XFRM_OFFLOAD_PACKET;
- /* We don't yet support UDP encapsulation and TFC padding. */
- if ((!is_packet_offload && x->encap) || x->tfcpad) {
- NL_SET_ERR_MSG(extack, "Encapsulation and TFC padding can't be offloaded");
+ /* We don't yet support TFC padding. */
+ if (x->tfcpad) {
+ NL_SET_ERR_MSG(extack, "TFC padding can't be offloaded");
return -EINVAL;
}
dev = dev_get_by_index(net, xuo->ifindex);
if (!dev) {
+ struct xfrm_dst_lookup_params params;
+
if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
saddr = &x->props.saddr;
daddr = &x->id.daddr;
@@ -271,9 +281,12 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
daddr = &x->props.saddr;
}
- dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr,
- x->props.family,
- xfrm_smark_get(0, x));
+ memset(&params, 0, sizeof(params));
+ params.net = net;
+ params.saddr = saddr;
+ params.daddr = daddr;
+ params.mark = xfrm_smark_get(0, x);
+ dst = __xfrm_dst_lookup(x->props.family, &params);
if (IS_ERR(dst))
return (is_packet_offload) ? -EINVAL : 0;
@@ -322,12 +335,8 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
/* User explicitly requested packet offload mode and configured
* policy in addition to the XFRM state. So be civil to users,
* and return an error instead of taking fallback path.
- *
- * This WARN_ON() can be seen as a documentation for driver
- * authors to do not return -EOPNOTSUPP in packet offload mode.
*/
- WARN_ON(err == -EOPNOTSUPP && is_packet_offload);
- if (err != -EOPNOTSUPP || is_packet_offload) {
+ if ((err != -EOPNOTSUPP && !is_packet_offload) || is_packet_offload) {
NL_SET_ERR_MSG_WEAK(extack, "Device failed to offload this state");
return err;
}
@@ -407,7 +416,8 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
struct net_device *dev = x->xso.dev;
- if (!x->type_offload)
+ if (!x->type_offload ||
+ (x->xso.type == XFRM_DEV_OFFLOAD_UNSPECIFIED && x->encap))
return false;
if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET ||
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index bd4ce21d76d7..7e6a71b9d6a3 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -21,6 +21,7 @@
#include <net/ip_tunnels.h>
#include <net/ip6_tunnel.h>
#include <net/dst_metadata.h>
+#include <net/hotdata.h>
#include "xfrm_inout.h"
@@ -47,7 +48,7 @@ static DEFINE_SPINLOCK(xfrm_input_afinfo_lock);
static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[2][AF_INET6 + 1];
static struct gro_cells gro_cells;
-static struct net_device xfrm_napi_dev;
+static struct net_device *xfrm_napi_dev;
static DEFINE_PER_CPU(struct xfrm_trans_tasklet, xfrm_trans_tasklet);
@@ -388,11 +389,15 @@ static int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb)
*/
static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
{
+ struct xfrm_offload *xo = xfrm_offload(skb);
int ihl = skb->data - skb_transport_header(skb);
if (skb->transport_header != skb->network_header) {
memmove(skb_transport_header(skb),
skb_network_header(skb), ihl);
+ if (xo)
+ xo->orig_mac_len =
+ skb_mac_header_was_set(skb) ? skb_mac_header_len(skb) : 0;
skb->network_header = skb->transport_header;
}
ip_hdr(skb)->tot_len = htons(skb->len + ihl);
@@ -403,11 +408,15 @@ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_IPV6)
+ struct xfrm_offload *xo = xfrm_offload(skb);
int ihl = skb->data - skb_transport_header(skb);
if (skb->transport_header != skb->network_header) {
memmove(skb_transport_header(skb),
skb_network_header(skb), ihl);
+ if (xo)
+ xo->orig_mac_len =
+ skb_mac_header_was_set(skb) ? skb_mac_header_len(skb) : 0;
skb->network_header = skb->transport_header;
}
ipv6_hdr(skb)->payload_len = htons(skb->len + ihl -
@@ -437,6 +446,9 @@ static int xfrm_inner_mode_input(struct xfrm_state *x,
WARN_ON_ONCE(1);
break;
default:
+ if (x->mode_cbs && x->mode_cbs->input)
+ return x->mode_cbs->input(x, skb);
+
WARN_ON_ONCE(1);
break;
}
@@ -444,6 +456,10 @@ static int xfrm_inner_mode_input(struct xfrm_state *x,
return -EOPNOTSUPP;
}
+/* NOTE: encap_type - In addition to the normal (non-negative) values for
+ * encap_type, a negative value of -1 or -2 can be used to resume/restart this
+ * function after a previous invocation early terminated for async operation.
+ */
int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
{
const struct xfrm_state_afinfo *afinfo;
@@ -462,7 +478,8 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
struct xfrm_offload *xo = xfrm_offload(skb);
struct sec_path *sp;
- if (encap_type < 0 || (xo && xo->flags & XFRM_GRO)) {
+ if (encap_type < 0 || (xo && (xo->flags & XFRM_GRO || encap_type == 0 ||
+ encap_type == UDP_ENCAP_ESPINUDP))) {
x = xfrm_input_state(skb);
if (unlikely(x->km.state != XFRM_STATE_VALID)) {
@@ -479,6 +496,10 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
family = x->props.family;
+ /* An encap_type of -2 indicates reconstructed inner packet */
+ if (encap_type == -2)
+ goto resume_decapped;
+
/* An encap_type of -1 indicates async resumption. */
if (encap_type == -1) {
async = 1;
@@ -562,7 +583,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
goto drop;
}
- x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family);
+ x = xfrm_input_state_lookup(net, mark, daddr, spi, nexthdr, family);
if (x == NULL) {
secpath_reset(skb);
XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
@@ -570,6 +591,15 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
goto drop;
}
+ if (unlikely(x->dir && x->dir != XFRM_SA_DIR_IN)) {
+ secpath_reset(skb);
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEDIRERROR);
+ xfrm_audit_state_notfound(skb, family, spi, seq);
+ xfrm_state_put(x);
+ x = NULL;
+ goto drop;
+ }
+
skb->mark = xfrm_smark_get(skb->mark, x);
sp->xvec[sp->len++] = x;
@@ -660,11 +690,14 @@ resume:
XFRM_MODE_SKB_CB(skb)->protocol = nexthdr;
- if (xfrm_inner_mode_input(x, skb)) {
+ err = xfrm_inner_mode_input(x, skb);
+ if (err == -EINPROGRESS)
+ return 0;
+ else if (err) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
goto drop;
}
-
+resume_decapped:
if (x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL) {
decaps = 1;
break;
@@ -764,7 +797,7 @@ int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
trans = this_cpu_ptr(&xfrm_trans_tasklet);
- if (skb_queue_len(&trans->queue) >= READ_ONCE(netdev_max_backlog))
+ if (skb_queue_len(&trans->queue) >= READ_ONCE(net_hotdata.max_backlog))
return -ENOBUFS;
BUILD_BUG_ON(sizeof(struct xfrm_trans_cb) > sizeof(skb->cb));
@@ -792,8 +825,11 @@ void __init xfrm_input_init(void)
int err;
int i;
- init_dummy_netdev(&xfrm_napi_dev);
- err = gro_cells_init(&gro_cells, &xfrm_napi_dev);
+ xfrm_napi_dev = alloc_netdev_dummy(0);
+ if (!xfrm_napi_dev)
+ panic("Failed to allocate XFRM dummy netdev\n");
+
+ err = gro_cells_init(&gro_cells, xfrm_napi_dev);
if (err)
gro_cells.cells = NULL;
diff --git a/net/xfrm/xfrm_interface_bpf.c b/net/xfrm/xfrm_interface_bpf.c
index 7d5e920141e9..5ea15037ebd1 100644
--- a/net/xfrm/xfrm_interface_bpf.c
+++ b/net/xfrm/xfrm_interface_bpf.c
@@ -93,10 +93,10 @@ __bpf_kfunc int bpf_skb_set_xfrm_info(struct __sk_buff *skb_ctx, const struct bp
__bpf_kfunc_end_defs();
-BTF_SET8_START(xfrm_ifc_kfunc_set)
+BTF_KFUNCS_START(xfrm_ifc_kfunc_set)
BTF_ID_FLAGS(func, bpf_skb_get_xfrm_info)
BTF_ID_FLAGS(func, bpf_skb_set_xfrm_info)
-BTF_SET8_END(xfrm_ifc_kfunc_set)
+BTF_KFUNCS_END(xfrm_ifc_kfunc_set)
static const struct btf_kfunc_id_set xfrm_interface_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c
index 21d50d75c260..c397eb99d867 100644
--- a/net/xfrm/xfrm_interface_core.c
+++ b/net/xfrm/xfrm_interface_core.c
@@ -240,7 +240,6 @@ static void xfrmi_dev_free(struct net_device *dev)
struct xfrm_if *xi = netdev_priv(dev);
gro_cells_destroy(&xi->gro_cells);
- free_percpu(dev->tstats);
}
static int xfrmi_create(struct net_device *dev)
@@ -507,7 +506,7 @@ xmit:
skb_dst_set(skb, dst);
skb->dev = tdev;
- err = dst_output(xi->net, skb->sk, skb);
+ err = dst_output(xi->net, skb_to_full_sk(skb), skb);
if (net_xmit_eval(err) == 0) {
dev_sw_netstats_tx_add(dev, 1, length);
} else {
@@ -727,7 +726,7 @@ static int xfrmi_get_iflink(const struct net_device *dev)
{
struct xfrm_if *xi = netdev_priv(dev);
- return xi->p.link;
+ return READ_ONCE(xi->p.link);
}
static const struct net_device_ops xfrmi_netdev_ops = {
@@ -749,6 +748,7 @@ static void xfrmi_dev_setup(struct net_device *dev)
dev->flags = IFF_NOARP;
dev->needs_free_netdev = true;
dev->priv_destructor = xfrmi_dev_free;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
netif_keep_dst(dev);
eth_broadcast_addr(dev->broadcast);
@@ -765,17 +765,11 @@ static int xfrmi_dev_init(struct net_device *dev)
struct net_device *phydev = __dev_get_by_index(xi->net, xi->p.link);
int err;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
err = gro_cells_init(&xi->gro_cells, dev);
- if (err) {
- free_percpu(dev->tstats);
+ if (err)
return err;
- }
- dev->features |= NETIF_F_LLTX;
+ dev->lltx = true;
dev->features |= XFRMI_FEATURES;
dev->hw_features |= XFRMI_FEATURES;
@@ -932,7 +926,7 @@ static struct net *xfrmi_get_link_net(const struct net_device *dev)
{
struct xfrm_if *xi = netdev_priv(dev);
- return xi->net;
+ return READ_ONCE(xi->net);
}
static const struct nla_policy xfrmi_policy[IFLA_XFRM_MAX + 1] = {
@@ -957,12 +951,12 @@ static struct rtnl_link_ops xfrmi_link_ops __read_mostly = {
.get_link_net = xfrmi_get_link_net,
};
-static void __net_exit xfrmi_exit_batch_net(struct list_head *net_exit_list)
+static void __net_exit xfrmi_exit_batch_rtnl(struct list_head *net_exit_list,
+ struct list_head *dev_to_kill)
{
struct net *net;
- LIST_HEAD(list);
- rtnl_lock();
+ ASSERT_RTNL();
list_for_each_entry(net, net_exit_list, exit_list) {
struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
struct xfrm_if __rcu **xip;
@@ -973,18 +967,16 @@ static void __net_exit xfrmi_exit_batch_net(struct list_head *net_exit_list)
for (xip = &xfrmn->xfrmi[i];
(xi = rtnl_dereference(*xip)) != NULL;
xip = &xi->next)
- unregister_netdevice_queue(xi->dev, &list);
+ unregister_netdevice_queue(xi->dev, dev_to_kill);
}
xi = rtnl_dereference(xfrmn->collect_md_xfrmi);
if (xi)
- unregister_netdevice_queue(xi->dev, &list);
+ unregister_netdevice_queue(xi->dev, dev_to_kill);
}
- unregister_netdevice_many(&list);
- rtnl_unlock();
}
static struct pernet_operations xfrmi_net_ops = {
- .exit_batch = xfrmi_exit_batch_net,
+ .exit_batch_rtnl = xfrmi_exit_batch_rtnl,
.id = &xfrmi_net_id,
.size = sizeof(struct xfrmi_net),
};
diff --git a/net/xfrm/xfrm_iptfs.c b/net/xfrm/xfrm_iptfs.c
new file mode 100644
index 000000000000..755f1eea8bfa
--- /dev/null
+++ b/net/xfrm/xfrm_iptfs.c
@@ -0,0 +1,2764 @@
+// SPDX-License-Identifier: GPL-2.0
+/* xfrm_iptfs: IPTFS encapsulation support
+ *
+ * April 21 2022, Christian Hopps <chopps@labn.net>
+ *
+ * Copyright (c) 2022, LabN Consulting, L.L.C.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/icmpv6.h>
+#include <linux/skbuff_ref.h>
+#include <net/gro.h>
+#include <net/icmp.h>
+#include <net/ip6_route.h>
+#include <net/inet_ecn.h>
+#include <net/xfrm.h>
+
+#include <crypto/aead.h>
+
+#include "xfrm_inout.h"
+#include "trace_iptfs.h"
+
+/* IPTFS encap (header) values. */
+#define IPTFS_SUBTYPE_BASIC 0
+#define IPTFS_SUBTYPE_CC 1
+
+/* ----------------------------------------------- */
+/* IP-TFS default SA values (tunnel egress/dir-in) */
+/* ----------------------------------------------- */
+
+/**
+ * define IPTFS_DEFAULT_DROP_TIME_USECS - default drop time
+ *
+ * The default IPTFS drop time in microseconds. The drop time is the amount of
+ * time before a missing out-of-order IPTFS tunnel packet is considered lost.
+ * See also the reorder window.
+ *
+ * Default 1s.
+ */
+#define IPTFS_DEFAULT_DROP_TIME_USECS 1000000
+
+/**
+ * define IPTFS_DEFAULT_REORDER_WINDOW - default reorder window size
+ *
+ * The default IPTFS reorder window size. The reorder window size dictates the
+ * maximum number of IPTFS tunnel packets in a sequence that may arrive out of
+ * order.
+ *
+ * Default 3. (tcp folks suggested)
+ */
+#define IPTFS_DEFAULT_REORDER_WINDOW 3
+
+/* ------------------------------------------------ */
+/* IPTFS default SA values (tunnel ingress/dir-out) */
+/* ------------------------------------------------ */
+
+/**
+ * define IPTFS_DEFAULT_INIT_DELAY_USECS - default initial output delay
+ *
+ * The initial output delay is the amount of time prior to servicing the output
+ * queue after queueing the first packet on said queue. This applies anytime the
+ * output queue was previously empty.
+ *
+ * Default 0.
+ */
+#define IPTFS_DEFAULT_INIT_DELAY_USECS 0
+
+/**
+ * define IPTFS_DEFAULT_MAX_QUEUE_SIZE - default max output queue size.
+ *
+ * The default IPTFS max output queue size in octets. The output queue is where
+ * received packets destined for output over an IPTFS tunnel are stored prior to
+ * being output in aggregated/fragmented form over the IPTFS tunnel.
+ *
+ * Default 1M.
+ */
+#define IPTFS_DEFAULT_MAX_QUEUE_SIZE (1024 * 10240)
+
+/* Assumed: skb->head is cache aligned.
+ *
+ * L2 Header resv: Arrange for cacheline to start at skb->data - 16 to keep the
+ * to-be-pushed L2 header in the same cacheline as resulting `skb->data` (i.e.,
+ * the L3 header). If cacheline size is > 64 then skb->data + pushed L2 will all
+ * be in a single cacheline if we simply reserve 64 bytes.
+ *
+ * L3 Header resv: For L3+L2 headers (i.e., skb->data points at the IPTFS payload)
+ * we want `skb->data` to be cacheline aligned and all pushed L2L3 headers will
+ * be in their own cacheline[s]. 128 works for cachelins up to 128 bytes, for
+ * any larger cacheline sizes the pushed headers will simply share the cacheline
+ * with the start of the IPTFS payload (skb->data).
+ */
+#define XFRM_IPTFS_MIN_L3HEADROOM 128
+#define XFRM_IPTFS_MIN_L2HEADROOM (L1_CACHE_BYTES > 64 ? 64 : 64 + 16)
+
+/* Min to try to share outer iptfs skb data vs copying into new skb */
+#define IPTFS_PKT_SHARE_MIN 129
+
+#define NSECS_IN_USEC 1000
+
+#define IPTFS_HRTIMER_MODE HRTIMER_MODE_REL_SOFT
+
+/**
+ * struct xfrm_iptfs_config - configuration for the IPTFS tunnel.
+ * @pkt_size: size of the outer IP packet. 0 to use interface and MTU discovery,
+ * otherwise the user specified value.
+ * @max_queue_size: The maximum number of octets allowed to be queued to be sent
+ * over the IPTFS SA. The queue size is measured as the size of all the
+ * packets enqueued.
+ * @reorder_win_size: the number slots in the reorder window, thus the number of
+ * packets that may arrive out of order.
+ * @dont_frag: true to inhibit fragmenting across IPTFS outer packets.
+ */
+struct xfrm_iptfs_config {
+ u32 pkt_size; /* outer_packet_size or 0 */
+ u32 max_queue_size; /* octets */
+ u16 reorder_win_size;
+ u8 dont_frag : 1;
+};
+
+struct skb_wseq {
+ struct sk_buff *skb;
+ u64 drop_time;
+};
+
+/**
+ * struct xfrm_iptfs_data - mode specific xfrm state.
+ * @cfg: IPTFS tunnel config.
+ * @x: owning SA (xfrm_state).
+ * @queue: queued user packets to send.
+ * @queue_size: number of octets on queue (sum of packet sizes).
+ * @ecn_queue_size: octets above with ECN mark.
+ * @init_delay_ns: nanoseconds to wait to send initial IPTFS packet.
+ * @iptfs_timer: output timer.
+ * @iptfs_settime: time the output timer was set.
+ * @payload_mtu: max payload size.
+ * @w_seq_set: true after first seq received.
+ * @w_wantseq: waiting for this seq number as next to process (in order).
+ * @w_saved: the saved buf array (reorder window).
+ * @w_savedlen: the saved len (not size).
+ * @drop_lock: lock to protect reorder queue.
+ * @drop_timer: timer for considering next packet lost.
+ * @drop_time_ns: timer intervan in nanoseconds.
+ * @ra_newskb: new pkt being reassembled.
+ * @ra_wantseq: expected next sequence for reassembly.
+ * @ra_runt: last pkt bytes from very end of last skb.
+ * @ra_runtlen: size of ra_runt.
+ */
+struct xfrm_iptfs_data {
+ struct xfrm_iptfs_config cfg;
+
+ /* Ingress User Input */
+ struct xfrm_state *x; /* owning state */
+ struct sk_buff_head queue; /* output queue */
+
+ u32 queue_size; /* octets */
+ u32 ecn_queue_size; /* octets above which ECN mark */
+ u64 init_delay_ns; /* nanoseconds */
+ struct hrtimer iptfs_timer; /* output timer */
+ time64_t iptfs_settime; /* time timer was set */
+ u32 payload_mtu; /* max payload size */
+
+ /* Tunnel input reordering */
+ bool w_seq_set; /* true after first seq received */
+ u64 w_wantseq; /* expected next sequence */
+ struct skb_wseq *w_saved; /* the saved buf array */
+ u32 w_savedlen; /* the saved len (not size) */
+ spinlock_t drop_lock;
+ struct hrtimer drop_timer;
+ u64 drop_time_ns;
+
+ /* Tunnel input reassembly */
+ struct sk_buff *ra_newskb; /* new pkt being reassembled */
+ u64 ra_wantseq; /* expected next sequence */
+ u8 ra_runt[6]; /* last pkt bytes from last skb */
+ u8 ra_runtlen; /* count of ra_runt */
+};
+
+static u32 __iptfs_get_inner_mtu(struct xfrm_state *x, int outer_mtu);
+static enum hrtimer_restart iptfs_delay_timer(struct hrtimer *me);
+static enum hrtimer_restart iptfs_drop_timer(struct hrtimer *me);
+
+/* ================= */
+/* Utility Functions */
+/* ================= */
+
+#ifdef TRACEPOINTS_ENABLED
+static u32 __trace_ip_proto(struct iphdr *iph)
+{
+ if (iph->version == 4)
+ return iph->protocol;
+ return ((struct ipv6hdr *)iph)->nexthdr;
+}
+
+static u32 __trace_ip_proto_seq(struct iphdr *iph)
+{
+ void *nexthdr;
+ u32 protocol = 0;
+
+ if (iph->version == 4) {
+ nexthdr = (void *)(iph + 1);
+ protocol = iph->protocol;
+ } else if (iph->version == 6) {
+ nexthdr = (void *)(((struct ipv6hdr *)(iph)) + 1);
+ protocol = ((struct ipv6hdr *)(iph))->nexthdr;
+ }
+ switch (protocol) {
+ case IPPROTO_ICMP:
+ return ntohs(((struct icmphdr *)nexthdr)->un.echo.sequence);
+ case IPPROTO_ICMPV6:
+ return ntohs(((struct icmp6hdr *)nexthdr)->icmp6_sequence);
+ case IPPROTO_TCP:
+ return ntohl(((struct tcphdr *)nexthdr)->seq);
+ case IPPROTO_UDP:
+ return ntohs(((struct udphdr *)nexthdr)->source);
+ default:
+ return 0;
+ }
+}
+#endif /*TRACEPOINTS_ENABLED*/
+
+static u64 __esp_seq(struct sk_buff *skb)
+{
+ u64 seq = ntohl(XFRM_SKB_CB(skb)->seq.input.low);
+
+ return seq | (u64)ntohl(XFRM_SKB_CB(skb)->seq.input.hi) << 32;
+}
+
+/* ======================= */
+/* IPTFS SK_BUFF Functions */
+/* ======================= */
+
+/**
+ * iptfs_alloc_skb() - Allocate a new `skb`.
+ * @tpl: the skb to copy required meta-data from.
+ * @len: the linear length of the head data, zero is fine.
+ * @l3resv: true if skb reserve needs to support pushing L3 headers
+ *
+ * A new `skb` is allocated and required meta-data is copied from `tpl`, the
+ * head data is sized to `len` + reserved space set according to the @l3resv
+ * boolean.
+ *
+ * When @l3resv is false, resv is XFRM_IPTFS_MIN_L2HEADROOM which arranges for
+ * `skb->data - 16` which is a good guess for good cache alignment (placing the
+ * to be pushed L2 header at the start of a cacheline.
+ *
+ * Otherwise, @l3resv is true and resv is set to the correct reserved space for
+ * dst->dev plus the calculated L3 overhead for the xfrm dst or
+ * XFRM_IPTFS_MIN_L3HEADROOM whichever is larger. This is then cache aligned so
+ * that all the headers will commonly fall in a cacheline when possible.
+ *
+ * l3resv=true is used on tunnel ingress (tx), because we need to reserve for
+ * the new IPTFS packet (i.e., L2+L3 headers). On tunnel egress (rx) the data
+ * being copied into the skb includes the user L3 headers already so we only
+ * need to reserve for L2.
+ *
+ * Return: the new skb or NULL.
+ */
+static struct sk_buff *iptfs_alloc_skb(struct sk_buff *tpl, u32 len, bool l3resv)
+{
+ struct sk_buff *skb;
+ u32 resv;
+
+ if (!l3resv) {
+ resv = XFRM_IPTFS_MIN_L2HEADROOM;
+ } else {
+ struct dst_entry *dst = skb_dst(tpl);
+
+ resv = LL_RESERVED_SPACE(dst->dev) + dst->header_len;
+ resv = max(resv, XFRM_IPTFS_MIN_L3HEADROOM);
+ resv = L1_CACHE_ALIGN(resv);
+ }
+
+ skb = alloc_skb(len + resv, GFP_ATOMIC | __GFP_NOWARN);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, resv);
+
+ if (!l3resv) {
+ /* xfrm_input resume needs dev and xfrm ext from tunnel pkt */
+ skb->dev = tpl->dev;
+ __skb_ext_copy(skb, tpl);
+ }
+
+ /* dropped by xfrm_input, used by xfrm_output */
+ skb_dst_copy(skb, tpl);
+
+ return skb;
+}
+
+/**
+ * iptfs_skb_head_to_frag() - initialize a skb_frag_t based on skb head data
+ * @skb: skb with the head data
+ * @frag: frag to initialize
+ */
+static void iptfs_skb_head_to_frag(const struct sk_buff *skb, skb_frag_t *frag)
+{
+ struct page *page = virt_to_head_page(skb->data);
+ unsigned char *addr = (unsigned char *)page_address(page);
+
+ skb_frag_fill_page_desc(frag, page, skb->data - addr, skb_headlen(skb));
+}
+
+/**
+ * struct iptfs_skb_frag_walk - use to track a walk through fragments
+ * @fragi: current fragment index
+ * @past: length of data in fragments before @fragi
+ * @total: length of data in all fragments
+ * @nr_frags: number of fragments present in array
+ * @initial_offset: the value passed in to skb_prepare_frag_walk()
+ * @frags: the page fragments inc. room for head page
+ * @pp_recycle: copy of skb->pp_recycle
+ */
+struct iptfs_skb_frag_walk {
+ u32 fragi;
+ u32 past;
+ u32 total;
+ u32 nr_frags;
+ u32 initial_offset;
+ skb_frag_t frags[MAX_SKB_FRAGS + 1];
+ bool pp_recycle;
+};
+
+/**
+ * iptfs_skb_prepare_frag_walk() - initialize a frag walk over an skb.
+ * @skb: the skb to walk.
+ * @initial_offset: start the walk @initial_offset into the skb.
+ * @walk: the walk to initialize
+ *
+ * Future calls to skb_add_frags() will expect the @offset value to be at
+ * least @initial_offset large.
+ */
+static void iptfs_skb_prepare_frag_walk(struct sk_buff *skb, u32 initial_offset,
+ struct iptfs_skb_frag_walk *walk)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ skb_frag_t *frag, *from;
+ u32 i;
+
+ walk->initial_offset = initial_offset;
+ walk->fragi = 0;
+ walk->past = 0;
+ walk->total = 0;
+ walk->nr_frags = 0;
+ walk->pp_recycle = skb->pp_recycle;
+
+ if (skb->head_frag) {
+ if (initial_offset >= skb_headlen(skb)) {
+ initial_offset -= skb_headlen(skb);
+ } else {
+ frag = &walk->frags[walk->nr_frags++];
+ iptfs_skb_head_to_frag(skb, frag);
+ frag->offset += initial_offset;
+ frag->len -= initial_offset;
+ walk->total += frag->len;
+ initial_offset = 0;
+ }
+ } else {
+ initial_offset -= skb_headlen(skb);
+ }
+
+ for (i = 0; i < shinfo->nr_frags; i++) {
+ from = &shinfo->frags[i];
+ if (initial_offset >= from->len) {
+ initial_offset -= from->len;
+ continue;
+ }
+ frag = &walk->frags[walk->nr_frags++];
+ *frag = *from;
+ if (initial_offset) {
+ frag->offset += initial_offset;
+ frag->len -= initial_offset;
+ initial_offset = 0;
+ }
+ walk->total += frag->len;
+ }
+}
+
+static u32 iptfs_skb_reset_frag_walk(struct iptfs_skb_frag_walk *walk,
+ u32 offset)
+{
+ /* Adjust offset to refer to internal walk values */
+ offset -= walk->initial_offset;
+
+ /* Get to the correct fragment for offset */
+ while (offset < walk->past) {
+ walk->past -= walk->frags[--walk->fragi].len;
+ if (offset >= walk->past)
+ break;
+ }
+ while (offset >= walk->past + walk->frags[walk->fragi].len)
+ walk->past += walk->frags[walk->fragi++].len;
+
+ /* offset now relative to this current frag */
+ offset -= walk->past;
+ return offset;
+}
+
+/**
+ * iptfs_skb_can_add_frags() - check if ok to add frags from walk to skb
+ * @skb: skb to check for adding frags to
+ * @walk: the walk that will be used as source for frags.
+ * @offset: offset from beginning of original skb to start from.
+ * @len: amount of data to add frag references to in @skb.
+ *
+ * Return: true if ok to add frags.
+ */
+static bool iptfs_skb_can_add_frags(const struct sk_buff *skb,
+ struct iptfs_skb_frag_walk *walk,
+ u32 offset, u32 len)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ u32 fragi, nr_frags, fraglen;
+
+ if (skb_has_frag_list(skb) || skb->pp_recycle != walk->pp_recycle)
+ return false;
+
+ /* Make offset relative to current frag after setting that */
+ offset = iptfs_skb_reset_frag_walk(walk, offset);
+
+ /* Verify we have array space for the fragments we need to add */
+ fragi = walk->fragi;
+ nr_frags = shinfo->nr_frags;
+ while (len && fragi < walk->nr_frags) {
+ skb_frag_t *frag = &walk->frags[fragi];
+
+ fraglen = frag->len;
+ if (offset) {
+ fraglen -= offset;
+ offset = 0;
+ }
+ if (++nr_frags > MAX_SKB_FRAGS)
+ return false;
+ if (len <= fraglen)
+ return true;
+ len -= fraglen;
+ fragi++;
+ }
+ /* We may not copy all @len but what we have will fit. */
+ return true;
+}
+
+/**
+ * iptfs_skb_add_frags() - add a range of fragment references into an skb
+ * @skb: skb to add references into
+ * @walk: the walk to add referenced fragments from.
+ * @offset: offset from beginning of original skb to start from.
+ * @len: amount of data to add frag references to in @skb.
+ *
+ * iptfs_skb_can_add_frags() should be called before this function to verify
+ * that the destination @skb is compatible with the walk and has space in the
+ * array for the to be added frag references.
+ *
+ * Return: The number of bytes not added to @skb b/c we reached the end of the
+ * walk before adding all of @len.
+ */
+static int iptfs_skb_add_frags(struct sk_buff *skb,
+ struct iptfs_skb_frag_walk *walk, u32 offset,
+ u32 len)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ u32 fraglen;
+
+ if (!walk->nr_frags || offset >= walk->total + walk->initial_offset)
+ return len;
+
+ /* make offset relative to current frag after setting that */
+ offset = iptfs_skb_reset_frag_walk(walk, offset);
+
+ while (len && walk->fragi < walk->nr_frags) {
+ skb_frag_t *frag = &walk->frags[walk->fragi];
+ skb_frag_t *tofrag = &shinfo->frags[shinfo->nr_frags];
+
+ *tofrag = *frag;
+ if (offset) {
+ tofrag->offset += offset;
+ tofrag->len -= offset;
+ offset = 0;
+ }
+ __skb_frag_ref(tofrag);
+ shinfo->nr_frags++;
+
+ /* see if we are done */
+ fraglen = tofrag->len;
+ if (len < fraglen) {
+ tofrag->len = len;
+ skb->len += len;
+ skb->data_len += len;
+ return 0;
+ }
+ /* advance to next source fragment */
+ len -= fraglen; /* careful, use dst bv_len */
+ skb->len += fraglen; /* careful, " " " */
+ skb->data_len += fraglen; /* careful, " " " */
+ walk->past += frag->len; /* careful, use src bv_len */
+ walk->fragi++;
+ }
+ return len;
+}
+
+/* ================================== */
+/* IPTFS Trace Event Definitions */
+/* ================================== */
+
+#define CREATE_TRACE_POINTS
+#include "trace_iptfs.h"
+
+/* ================================== */
+/* IPTFS Receiving (egress) Functions */
+/* ================================== */
+
+/**
+ * iptfs_pskb_add_frags() - Create and add frags into a new sk_buff.
+ * @tpl: template to create new skb from.
+ * @walk: The source for fragments to add.
+ * @off: The offset into @walk to add frags from, also used with @st and
+ * @copy_len.
+ * @len: The length of data to add covering frags from @walk into @skb.
+ * This must be <= @skblen.
+ * @st: The sequence state to copy from into the new head skb.
+ * @copy_len: Copy @copy_len bytes from @st at offset @off into the new skb
+ * linear space.
+ *
+ * Create a new sk_buff `skb` using the template @tpl. Copy @copy_len bytes from
+ * @st into the new skb linear space, and then add shared fragments from the
+ * frag walk for the remaining @len of data (i.e., @len - @copy_len bytes).
+ *
+ * Return: The newly allocated sk_buff `skb` or NULL if an error occurs.
+ */
+static struct sk_buff *
+iptfs_pskb_add_frags(struct sk_buff *tpl, struct iptfs_skb_frag_walk *walk,
+ u32 off, u32 len, struct skb_seq_state *st, u32 copy_len)
+{
+ struct sk_buff *skb;
+
+ skb = iptfs_alloc_skb(tpl, copy_len, false);
+ if (!skb)
+ return NULL;
+
+ /* this should not normally be happening */
+ if (!iptfs_skb_can_add_frags(skb, walk, off + copy_len,
+ len - copy_len)) {
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ if (copy_len &&
+ skb_copy_seq_read(st, off, skb_put(skb, copy_len), copy_len)) {
+ XFRM_INC_STATS(dev_net(st->root_skb->dev),
+ LINUX_MIB_XFRMINERROR);
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ iptfs_skb_add_frags(skb, walk, off + copy_len, len - copy_len);
+ return skb;
+}
+
+/**
+ * iptfs_pskb_extract_seq() - Create and load data into a new sk_buff.
+ * @skblen: the total data size for `skb`.
+ * @st: The source for the rest of the data to copy into `skb`.
+ * @off: The offset into @st to copy data from.
+ * @len: The length of data to copy from @st into `skb`. This must be <=
+ * @skblen.
+ *
+ * Create a new sk_buff `skb` with @skblen of packet data space. If non-zero,
+ * copy @rlen bytes of @runt into `skb`. Then using seq functions copy @len
+ * bytes from @st into `skb` starting from @off.
+ *
+ * It is an error for @len to be greater than the amount of data left in @st.
+ *
+ * Return: The newly allocated sk_buff `skb` or NULL if an error occurs.
+ */
+static struct sk_buff *
+iptfs_pskb_extract_seq(u32 skblen, struct skb_seq_state *st, u32 off, int len)
+{
+ struct sk_buff *skb = iptfs_alloc_skb(st->root_skb, skblen, false);
+
+ if (!skb)
+ return NULL;
+ if (skb_copy_seq_read(st, off, skb_put(skb, len), len)) {
+ XFRM_INC_STATS(dev_net(st->root_skb->dev), LINUX_MIB_XFRMINERROR);
+ kfree_skb(skb);
+ return NULL;
+ }
+ return skb;
+}
+
+/**
+ * iptfs_input_save_runt() - save data in xtfs runt space.
+ * @xtfs: xtfs state
+ * @seq: the current sequence
+ * @buf: packet data
+ * @len: length of packet data
+ *
+ * Save the small (`len`) start of a fragmented packet in `buf` in the xtfs data
+ * runt space.
+ */
+static void iptfs_input_save_runt(struct xfrm_iptfs_data *xtfs, u64 seq,
+ u8 *buf, int len)
+{
+ memcpy(xtfs->ra_runt, buf, len);
+
+ xtfs->ra_runtlen = len;
+ xtfs->ra_wantseq = seq + 1;
+}
+
+/**
+ * __iptfs_iphlen() - return the v4/v6 header length using packet data.
+ * @data: pointer at octet with version nibble
+ *
+ * The version data has been checked to be valid (i.e., either 4 or 6).
+ *
+ * Return: the IP header size based on the IP version.
+ */
+static u32 __iptfs_iphlen(u8 *data)
+{
+ struct iphdr *iph = (struct iphdr *)data;
+
+ if (iph->version == 0x4)
+ return sizeof(*iph);
+ return sizeof(struct ipv6hdr);
+}
+
+/**
+ * __iptfs_iplen() - return the v4/v6 length using packet data.
+ * @data: pointer to ip (v4/v6) packet header
+ *
+ * Grab the IPv4 or IPv6 length value in the start of the inner packet header
+ * pointed to by `data`. Assumes data len is enough for the length field only.
+ *
+ * The version data has been checked to be valid (i.e., either 4 or 6).
+ *
+ * Return: the length value.
+ */
+static u32 __iptfs_iplen(u8 *data)
+{
+ struct iphdr *iph = (struct iphdr *)data;
+
+ if (iph->version == 0x4)
+ return ntohs(iph->tot_len);
+ return ntohs(((struct ipv6hdr *)iph)->payload_len) +
+ sizeof(struct ipv6hdr);
+}
+
+/**
+ * iptfs_complete_inner_skb() - finish preparing the inner packet for gro recv.
+ * @x: xfrm state
+ * @skb: the inner packet
+ *
+ * Finish the standard xfrm processing on the inner packet prior to sending back
+ * through gro_cells_receive. We do this separately b/c we are building a list
+ * of packets in the hopes that one day a list will be taken by
+ * xfrm_input.
+ */
+static void iptfs_complete_inner_skb(struct xfrm_state *x, struct sk_buff *skb)
+{
+ skb_reset_network_header(skb);
+
+ /* The packet is going back through gro_cells_receive no need to
+ * set this.
+ */
+ skb_reset_transport_header(skb);
+
+ /* Packet already has checksum value set. */
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Our skb will contain the header data copied when this outer packet
+ * which contained the start of this inner packet. This is true
+ * when we allocate a new skb as well as when we reuse the existing skb.
+ */
+ if (ip_hdr(skb)->version == 0x4) {
+ struct iphdr *iph = ip_hdr(skb);
+
+ if (x->props.flags & XFRM_STATE_DECAP_DSCP)
+ ipv4_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, iph);
+ if (!(x->props.flags & XFRM_STATE_NOECN))
+ if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos))
+ IP_ECN_set_ce(iph);
+
+ skb->protocol = htons(ETH_P_IP);
+ } else {
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+
+ if (x->props.flags & XFRM_STATE_DECAP_DSCP)
+ ipv6_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, iph);
+ if (!(x->props.flags & XFRM_STATE_NOECN))
+ if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos))
+ IP6_ECN_set_ce(skb, iph);
+
+ skb->protocol = htons(ETH_P_IPV6);
+ }
+}
+
+static void __iptfs_reassem_done(struct xfrm_iptfs_data *xtfs, bool free)
+{
+ assert_spin_locked(&xtfs->drop_lock);
+
+ /* We don't care if it works locking takes care of things */
+ hrtimer_try_to_cancel(&xtfs->drop_timer);
+ if (free)
+ kfree_skb(xtfs->ra_newskb);
+ xtfs->ra_newskb = NULL;
+}
+
+/**
+ * iptfs_reassem_abort() - In-progress packet is aborted free the state.
+ * @xtfs: xtfs state
+ */
+static void iptfs_reassem_abort(struct xfrm_iptfs_data *xtfs)
+{
+ __iptfs_reassem_done(xtfs, true);
+}
+
+/**
+ * iptfs_reassem_done() - In-progress packet is complete, clear the state.
+ * @xtfs: xtfs state
+ */
+static void iptfs_reassem_done(struct xfrm_iptfs_data *xtfs)
+{
+ __iptfs_reassem_done(xtfs, false);
+}
+
+/**
+ * iptfs_reassem_cont() - Continue the reassembly of an inner packets.
+ * @xtfs: xtfs state
+ * @seq: sequence of current packet
+ * @st: seq read stat for current packet
+ * @skb: current packet
+ * @data: offset into sequential packet data
+ * @blkoff: packet blkoff value
+ * @list: list of skbs to enqueue completed packet on
+ *
+ * Process an IPTFS payload that has a non-zero `blkoff` or when we are
+ * expecting the continuation b/c we have a runt or in-progress packet.
+ *
+ * Return: the new data offset to continue processing from.
+ */
+static u32 iptfs_reassem_cont(struct xfrm_iptfs_data *xtfs, u64 seq,
+ struct skb_seq_state *st, struct sk_buff *skb,
+ u32 data, u32 blkoff, struct list_head *list)
+{
+ struct iptfs_skb_frag_walk _fragwalk;
+ struct iptfs_skb_frag_walk *fragwalk = NULL;
+ struct sk_buff *newskb = xtfs->ra_newskb;
+ u32 remaining = skb->len - data;
+ u32 runtlen = xtfs->ra_runtlen;
+ u32 copylen, fraglen, ipremain, iphlen, iphremain, rrem;
+
+ /* Handle packet fragment we aren't expecting */
+ if (!runtlen && !xtfs->ra_newskb)
+ return data + min(blkoff, remaining);
+
+ /* Important to remember that input to this function is an ordered
+ * packet stream (unless the user disabled the reorder window). Thus if
+ * we are waiting for, and expecting the next packet so we can continue
+ * assembly, a newer sequence number indicates older ones are not coming
+ * (or if they do should be ignored). Technically we can receive older
+ * ones when the reorder window is disabled; however, the user should
+ * have disabled fragmentation in this case, and regardless we don't
+ * deal with it.
+ *
+ * blkoff could be zero if the stream is messed up (or it's an all pad
+ * insertion) be careful to handle that case in each of the below
+ */
+
+ /* Too old case: This can happen when the reorder window is disabled so
+ * ordering isn't actually guaranteed.
+ */
+ if (seq < xtfs->ra_wantseq)
+ return data + remaining;
+
+ /* Too new case: We missed what we wanted cleanup. */
+ if (seq > xtfs->ra_wantseq) {
+ XFRM_INC_STATS(xs_net(xtfs->x), LINUX_MIB_XFRMINIPTFSERROR);
+ goto abandon;
+ }
+
+ if (blkoff == 0) {
+ if ((*skb->data & 0xF0) != 0) {
+ XFRM_INC_STATS(xs_net(xtfs->x),
+ LINUX_MIB_XFRMINIPTFSERROR);
+ goto abandon;
+ }
+ /* Handle all pad case, advance expected sequence number.
+ * (RFC 9347 S2.2.3)
+ */
+ xtfs->ra_wantseq++;
+ /* will end parsing */
+ return data + remaining;
+ }
+
+ if (runtlen) {
+ /* Regardless of what happens we're done with the runt */
+ xtfs->ra_runtlen = 0;
+
+ /* The start of this inner packet was at the very end of the last
+ * iptfs payload which didn't include enough for the ip header
+ * length field. We must have *at least* that now.
+ */
+ rrem = sizeof(xtfs->ra_runt) - runtlen;
+ if (remaining < rrem || blkoff < rrem) {
+ XFRM_INC_STATS(xs_net(xtfs->x),
+ LINUX_MIB_XFRMINIPTFSERROR);
+ goto abandon;
+ }
+
+ /* fill in the runt data */
+ if (skb_copy_seq_read(st, data, &xtfs->ra_runt[runtlen],
+ rrem)) {
+ XFRM_INC_STATS(xs_net(xtfs->x),
+ LINUX_MIB_XFRMINBUFFERERROR);
+ goto abandon;
+ }
+
+ /* We have enough data to get the ip length value now,
+ * allocate an in progress skb
+ */
+ ipremain = __iptfs_iplen(xtfs->ra_runt);
+ if (ipremain < sizeof(xtfs->ra_runt)) {
+ /* length has to be at least runtsize large */
+ XFRM_INC_STATS(xs_net(xtfs->x),
+ LINUX_MIB_XFRMINIPTFSERROR);
+ goto abandon;
+ }
+
+ /* For the runt case we don't attempt sharing currently. NOTE:
+ * Currently, this IPTFS implementation will not create runts.
+ */
+
+ newskb = iptfs_alloc_skb(skb, ipremain, false);
+ if (!newskb) {
+ XFRM_INC_STATS(xs_net(xtfs->x), LINUX_MIB_XFRMINERROR);
+ goto abandon;
+ }
+ xtfs->ra_newskb = newskb;
+
+ /* Copy the runt data into the buffer, but leave data
+ * pointers the same as normal non-runt case. The extra `rrem`
+ * recopied bytes are basically cacheline free. Allows using
+ * same logic below to complete.
+ */
+ memcpy(skb_put(newskb, runtlen), xtfs->ra_runt,
+ sizeof(xtfs->ra_runt));
+ }
+
+ /* Continue reassembling the packet */
+ ipremain = __iptfs_iplen(newskb->data);
+ iphlen = __iptfs_iphlen(newskb->data);
+
+ ipremain -= newskb->len;
+ if (blkoff < ipremain) {
+ /* Corrupt data, we don't have enough to complete the packet */
+ XFRM_INC_STATS(xs_net(xtfs->x), LINUX_MIB_XFRMINIPTFSERROR);
+ goto abandon;
+ }
+
+ /* We want the IP header in linear space */
+ if (newskb->len < iphlen) {
+ iphremain = iphlen - newskb->len;
+ if (blkoff < iphremain) {
+ XFRM_INC_STATS(xs_net(xtfs->x),
+ LINUX_MIB_XFRMINIPTFSERROR);
+ goto abandon;
+ }
+ fraglen = min(blkoff, remaining);
+ copylen = min(fraglen, iphremain);
+ if (skb_copy_seq_read(st, data, skb_put(newskb, copylen),
+ copylen)) {
+ XFRM_INC_STATS(xs_net(xtfs->x),
+ LINUX_MIB_XFRMINBUFFERERROR);
+ goto abandon;
+ }
+ /* this is a silly condition that might occur anyway */
+ if (copylen < iphremain) {
+ xtfs->ra_wantseq++;
+ return data + fraglen;
+ }
+ /* update data and things derived from it */
+ data += copylen;
+ blkoff -= copylen;
+ remaining -= copylen;
+ ipremain -= copylen;
+ }
+
+ fraglen = min(blkoff, remaining);
+ copylen = min(fraglen, ipremain);
+
+ /* If we may have the opportunity to share prepare a fragwalk. */
+ if (!skb_has_frag_list(skb) && !skb_has_frag_list(newskb) &&
+ (skb->head_frag || skb->len == skb->data_len) &&
+ skb->pp_recycle == newskb->pp_recycle) {
+ fragwalk = &_fragwalk;
+ iptfs_skb_prepare_frag_walk(skb, data, fragwalk);
+ }
+
+ /* Try share then copy. */
+ if (fragwalk &&
+ iptfs_skb_can_add_frags(newskb, fragwalk, data, copylen)) {
+ iptfs_skb_add_frags(newskb, fragwalk, data, copylen);
+ } else {
+ /* copy fragment data into newskb */
+ if (skb_copy_seq_read(st, data, skb_put(newskb, copylen),
+ copylen)) {
+ XFRM_INC_STATS(xs_net(xtfs->x),
+ LINUX_MIB_XFRMINBUFFERERROR);
+ goto abandon;
+ }
+ }
+
+ if (copylen < ipremain) {
+ xtfs->ra_wantseq++;
+ } else {
+ /* We are done with packet reassembly! */
+ iptfs_reassem_done(xtfs);
+ iptfs_complete_inner_skb(xtfs->x, newskb);
+ list_add_tail(&newskb->list, list);
+ }
+
+ /* will continue on to new data block or end */
+ return data + fraglen;
+
+abandon:
+ if (xtfs->ra_newskb) {
+ iptfs_reassem_abort(xtfs);
+ } else {
+ xtfs->ra_runtlen = 0;
+ xtfs->ra_wantseq = 0;
+ }
+ /* skip past fragment, maybe to end */
+ return data + min(blkoff, remaining);
+}
+
+static bool __input_process_payload(struct xfrm_state *x, u32 data,
+ struct skb_seq_state *skbseq,
+ struct list_head *sublist)
+{
+ u8 hbytes[sizeof(struct ipv6hdr)];
+ struct iptfs_skb_frag_walk _fragwalk;
+ struct iptfs_skb_frag_walk *fragwalk = NULL;
+ struct sk_buff *defer, *first_skb, *next, *skb;
+ const unsigned char *old_mac;
+ struct xfrm_iptfs_data *xtfs;
+ struct iphdr *iph;
+ struct net *net;
+ u32 first_iplen, iphlen, iplen, remaining, tail;
+ u32 capturelen;
+ u64 seq;
+
+ xtfs = x->mode_data;
+ net = xs_net(x);
+ skb = skbseq->root_skb;
+ first_skb = NULL;
+ defer = NULL;
+
+ seq = __esp_seq(skb);
+
+ /* Save the old mac header if set */
+ old_mac = skb_mac_header_was_set(skb) ? skb_mac_header(skb) : NULL;
+
+ /* New packets */
+
+ tail = skb->len;
+ while (data < tail) {
+ __be16 protocol = 0;
+
+ /* Gather information on the next data block.
+ * `data` points to the start of the data block.
+ */
+ remaining = tail - data;
+
+ /* try and copy enough bytes to read length from ipv4/ipv6 */
+ iphlen = min_t(u32, remaining, 6);
+ if (skb_copy_seq_read(skbseq, data, hbytes, iphlen)) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
+ goto done;
+ }
+
+ iph = (struct iphdr *)hbytes;
+ if (iph->version == 0x4) {
+ /* must have at least tot_len field present */
+ if (remaining < 4) {
+ /* save the bytes we have, advance data and exit */
+ iptfs_input_save_runt(xtfs, seq, hbytes,
+ remaining);
+ data += remaining;
+ break;
+ }
+
+ iplen = be16_to_cpu(iph->tot_len);
+ iphlen = iph->ihl << 2;
+ protocol = cpu_to_be16(ETH_P_IP);
+ XFRM_MODE_SKB_CB(skbseq->root_skb)->tos = iph->tos;
+ } else if (iph->version == 0x6) {
+ /* must have at least payload_len field present */
+ if (remaining < 6) {
+ /* save the bytes we have, advance data and exit */
+ iptfs_input_save_runt(xtfs, seq, hbytes,
+ remaining);
+ data += remaining;
+ break;
+ }
+
+ iplen = be16_to_cpu(((struct ipv6hdr *)hbytes)->payload_len);
+ iplen += sizeof(struct ipv6hdr);
+ iphlen = sizeof(struct ipv6hdr);
+ protocol = cpu_to_be16(ETH_P_IPV6);
+ XFRM_MODE_SKB_CB(skbseq->root_skb)->tos =
+ ipv6_get_dsfield((struct ipv6hdr *)iph);
+ } else if (iph->version == 0x0) {
+ /* pad */
+ data = tail;
+ break;
+ } else {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
+ goto done;
+ }
+
+ if (unlikely(skbseq->stepped_offset)) {
+ /* We need to reset our seq read, it can't backup at
+ * this point.
+ */
+ struct sk_buff *save = skbseq->root_skb;
+
+ skb_abort_seq_read(skbseq);
+ skb_prepare_seq_read(save, data, tail, skbseq);
+ }
+
+ if (first_skb) {
+ skb = NULL;
+ } else {
+ first_skb = skb;
+ first_iplen = iplen;
+ fragwalk = NULL;
+
+ /* We are going to skip over `data` bytes to reach the
+ * start of the IP header of `iphlen` len for `iplen`
+ * inner packet.
+ */
+
+ if (skb_has_frag_list(skb)) {
+ defer = skb;
+ skb = NULL;
+ } else if (data + iphlen <= skb_headlen(skb) &&
+ /* make sure our header is 32-bit aligned? */
+ /* ((uintptr_t)(skb->data + data) & 0x3) == 0 && */
+ skb_tailroom(skb) + tail - data >= iplen) {
+ /* Reuse the received skb.
+ *
+ * We have enough headlen to pull past any
+ * initial fragment data, leaving at least the
+ * IP header in the linear buffer space.
+ *
+ * For linear buffer space we only require that
+ * linear buffer space is large enough to
+ * eventually hold the entire reassembled
+ * packet (by including tailroom in the check).
+ *
+ * For non-linear tailroom is 0 and so we only
+ * re-use if the entire packet is present
+ * already.
+ *
+ * NOTE: there are many more options for
+ * sharing, KISS for now. Also, this can produce
+ * skb's with the IP header unaligned to 32
+ * bits. If that ends up being a problem then a
+ * check should be added to the conditional
+ * above that the header lies on a 32-bit
+ * boundary as well.
+ */
+ skb_pull(skb, data);
+
+ /* our range just changed */
+ data = 0;
+ tail = skb->len;
+ remaining = skb->len;
+
+ skb->protocol = protocol;
+ skb_mac_header_rebuild(skb);
+ if (skb->mac_len)
+ eth_hdr(skb)->h_proto = skb->protocol;
+
+ /* all pointers could be changed now reset walk */
+ skb_abort_seq_read(skbseq);
+ skb_prepare_seq_read(skb, data, tail, skbseq);
+ } else if (skb->head_frag &&
+ /* We have the IP header right now */
+ remaining >= iphlen) {
+ fragwalk = &_fragwalk;
+ iptfs_skb_prepare_frag_walk(skb, data, fragwalk);
+ defer = skb;
+ skb = NULL;
+ } else {
+ /* We couldn't reuse the input skb so allocate a
+ * new one.
+ */
+ defer = skb;
+ skb = NULL;
+ }
+
+ /* Don't trim `first_skb` until the end as we are
+ * walking that data now.
+ */
+ }
+
+ capturelen = min(iplen, remaining);
+ if (!skb) {
+ if (!fragwalk ||
+ /* Large enough to be worth sharing */
+ iplen < IPTFS_PKT_SHARE_MIN ||
+ /* Have IP header + some data to share. */
+ capturelen <= iphlen ||
+ /* Try creating skb and adding frags */
+ !(skb = iptfs_pskb_add_frags(first_skb, fragwalk,
+ data, capturelen,
+ skbseq, iphlen))) {
+ skb = iptfs_pskb_extract_seq(iplen, skbseq, data, capturelen);
+ }
+ if (!skb) {
+ /* skip to next packet or done */
+ data += capturelen;
+ continue;
+ }
+
+ skb->protocol = protocol;
+ if (old_mac) {
+ /* rebuild the mac header */
+ skb_set_mac_header(skb, -first_skb->mac_len);
+ memcpy(skb_mac_header(skb), old_mac, first_skb->mac_len);
+ eth_hdr(skb)->h_proto = skb->protocol;
+ }
+ }
+
+ data += capturelen;
+
+ if (skb->len < iplen) {
+ /* Start reassembly */
+ spin_lock(&xtfs->drop_lock);
+
+ xtfs->ra_newskb = skb;
+ xtfs->ra_wantseq = seq + 1;
+ if (!hrtimer_is_queued(&xtfs->drop_timer)) {
+ /* softirq blocked lest the timer fire and interrupt us */
+ hrtimer_start(&xtfs->drop_timer,
+ xtfs->drop_time_ns,
+ IPTFS_HRTIMER_MODE);
+ }
+
+ spin_unlock(&xtfs->drop_lock);
+
+ break;
+ }
+
+ iptfs_complete_inner_skb(x, skb);
+ list_add_tail(&skb->list, sublist);
+ }
+
+ if (data != tail)
+ /* this should not happen from the above code */
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINIPTFSERROR);
+
+ if (first_skb && first_iplen && !defer && first_skb != xtfs->ra_newskb) {
+ /* first_skb is queued b/c !defer and not partial */
+ if (pskb_trim(first_skb, first_iplen)) {
+ /* error trimming */
+ list_del(&first_skb->list);
+ defer = first_skb;
+ }
+ first_skb->ip_summed = CHECKSUM_NONE;
+ }
+
+ /* Send the packets! */
+ list_for_each_entry_safe(skb, next, sublist, list) {
+ skb_list_del_init(skb);
+ if (xfrm_input(skb, 0, 0, -2))
+ kfree_skb(skb);
+ }
+done:
+ skb = skbseq->root_skb;
+ skb_abort_seq_read(skbseq);
+
+ if (defer) {
+ consume_skb(defer);
+ } else if (!first_skb) {
+ /* skb is the original passed in skb, but we didn't get far
+ * enough to process it as the first_skb, if we had it would
+ * either be save in ra_newskb, trimmed and sent on as an skb or
+ * placed in defer to be freed.
+ */
+ kfree_skb(skb);
+ }
+ return true;
+}
+
+/**
+ * iptfs_input_ordered() - handle next in order IPTFS payload.
+ * @x: xfrm state
+ * @skb: current packet
+ *
+ * Process the IPTFS payload in `skb` and consume it afterwards.
+ */
+static void iptfs_input_ordered(struct xfrm_state *x, struct sk_buff *skb)
+{
+ struct ip_iptfs_cc_hdr iptcch;
+ struct skb_seq_state skbseq;
+ struct list_head sublist; /* rename this it's just a list */
+ struct xfrm_iptfs_data *xtfs;
+ struct ip_iptfs_hdr *ipth;
+ struct net *net;
+ u32 blkoff, data, remaining;
+ bool consumed = false;
+ u64 seq;
+
+ xtfs = x->mode_data;
+ net = xs_net(x);
+
+ seq = __esp_seq(skb);
+
+ /* Large enough to hold both types of header */
+ ipth = (struct ip_iptfs_hdr *)&iptcch;
+
+ skb_prepare_seq_read(skb, 0, skb->len, &skbseq);
+
+ /* Get the IPTFS header and validate it */
+
+ if (skb_copy_seq_read(&skbseq, 0, ipth, sizeof(*ipth))) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
+ goto done;
+ }
+ data = sizeof(*ipth);
+
+ trace_iptfs_egress_recv(skb, xtfs, be16_to_cpu(ipth->block_offset));
+
+ /* Set data past the basic header */
+ if (ipth->subtype == IPTFS_SUBTYPE_CC) {
+ /* Copy the rest of the CC header */
+ remaining = sizeof(iptcch) - sizeof(*ipth);
+ if (skb_copy_seq_read(&skbseq, data, ipth + 1, remaining)) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
+ goto done;
+ }
+ data += remaining;
+ } else if (ipth->subtype != IPTFS_SUBTYPE_BASIC) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
+ goto done;
+ }
+
+ if (ipth->flags != 0) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
+ goto done;
+ }
+
+ INIT_LIST_HEAD(&sublist);
+
+ /* Handle fragment at start of payload, and/or waiting reassembly. */
+
+ blkoff = ntohs(ipth->block_offset);
+ /* check before locking i.e., maybe */
+ if (blkoff || xtfs->ra_runtlen || xtfs->ra_newskb) {
+ spin_lock(&xtfs->drop_lock);
+
+ /* check again after lock */
+ if (blkoff || xtfs->ra_runtlen || xtfs->ra_newskb) {
+ data = iptfs_reassem_cont(xtfs, seq, &skbseq, skb, data,
+ blkoff, &sublist);
+ }
+
+ spin_unlock(&xtfs->drop_lock);
+ }
+
+ /* New packets */
+ consumed = __input_process_payload(x, data, &skbseq, &sublist);
+done:
+ if (!consumed) {
+ skb = skbseq.root_skb;
+ skb_abort_seq_read(&skbseq);
+ kfree_skb(skb);
+ }
+}
+
+/* ------------------------------- */
+/* Input (Egress) Re-ordering Code */
+/* ------------------------------- */
+
+static void __vec_shift(struct xfrm_iptfs_data *xtfs, u32 shift)
+{
+ u32 savedlen = xtfs->w_savedlen;
+
+ if (shift > savedlen)
+ shift = savedlen;
+ if (shift != savedlen)
+ memcpy(xtfs->w_saved, xtfs->w_saved + shift,
+ (savedlen - shift) * sizeof(*xtfs->w_saved));
+ memset(xtfs->w_saved + savedlen - shift, 0,
+ shift * sizeof(*xtfs->w_saved));
+ xtfs->w_savedlen -= shift;
+}
+
+static void __reorder_past(struct xfrm_iptfs_data *xtfs, struct sk_buff *inskb,
+ struct list_head *freelist)
+{
+ list_add_tail(&inskb->list, freelist);
+}
+
+static u32 __reorder_drop(struct xfrm_iptfs_data *xtfs, struct list_head *list)
+
+{
+ struct skb_wseq *s, *se;
+ const u32 savedlen = xtfs->w_savedlen;
+ time64_t now = ktime_get_raw_fast_ns();
+ u32 count = 0;
+ u32 scount = 0;
+
+ if (xtfs->w_saved[0].drop_time > now)
+ goto set_timer;
+
+ ++xtfs->w_wantseq;
+
+ /* Keep flushing packets until we reach a drop time greater than now. */
+ s = xtfs->w_saved;
+ se = s + savedlen;
+ do {
+ /* Walking past empty slots until we reach a packet */
+ for (; s < se && !s->skb; s++) {
+ if (s->drop_time > now)
+ goto outerdone;
+ }
+ /* Sending packets until we hit another empty slot. */
+ for (; s < se && s->skb; scount++, s++)
+ list_add_tail(&s->skb->list, list);
+ } while (s < se);
+outerdone:
+
+ count = s - xtfs->w_saved;
+ if (count) {
+ xtfs->w_wantseq += count;
+
+ /* Shift handled slots plus final empty slot into slot 0. */
+ __vec_shift(xtfs, count);
+ }
+
+ if (xtfs->w_savedlen) {
+set_timer:
+ /* Drifting is OK */
+ hrtimer_start(&xtfs->drop_timer,
+ xtfs->w_saved[0].drop_time - now,
+ IPTFS_HRTIMER_MODE);
+ }
+ return scount;
+}
+
+static void __reorder_this(struct xfrm_iptfs_data *xtfs, struct sk_buff *inskb,
+ struct list_head *list)
+{
+ struct skb_wseq *s, *se;
+ const u32 savedlen = xtfs->w_savedlen;
+ u32 count = 0;
+
+ /* Got what we wanted. */
+ list_add_tail(&inskb->list, list);
+ ++xtfs->w_wantseq;
+ if (!savedlen)
+ return;
+
+ /* Flush remaining consecutive packets. */
+
+ /* Keep sending until we hit another missed pkt. */
+ for (s = xtfs->w_saved, se = s + savedlen; s < se && s->skb; s++)
+ list_add_tail(&s->skb->list, list);
+ count = s - xtfs->w_saved;
+ if (count)
+ xtfs->w_wantseq += count;
+
+ /* Shift handled slots plus final empty slot into slot 0. */
+ __vec_shift(xtfs, count + 1);
+}
+
+/* Set the slot's drop time and all the empty slots below it until reaching a
+ * filled slot which will already be set.
+ */
+static void iptfs_set_window_drop_times(struct xfrm_iptfs_data *xtfs, int index)
+{
+ const u32 savedlen = xtfs->w_savedlen;
+ struct skb_wseq *s = xtfs->w_saved;
+ time64_t drop_time;
+
+ assert_spin_locked(&xtfs->drop_lock);
+
+ if (savedlen > index + 1) {
+ /* we are below another, our drop time and the timer are already set */
+ return;
+ }
+ /* we are the most future so get a new drop time. */
+ drop_time = ktime_get_raw_fast_ns();
+ drop_time += xtfs->drop_time_ns;
+
+ /* Walk back through the array setting drop times as we go */
+ s[index].drop_time = drop_time;
+ while (index-- > 0 && !s[index].skb)
+ s[index].drop_time = drop_time;
+
+ /* If we walked all the way back, schedule the drop timer if needed */
+ if (index == -1 && !hrtimer_is_queued(&xtfs->drop_timer))
+ hrtimer_start(&xtfs->drop_timer, xtfs->drop_time_ns,
+ IPTFS_HRTIMER_MODE);
+}
+
+static void __reorder_future_fits(struct xfrm_iptfs_data *xtfs,
+ struct sk_buff *inskb,
+ struct list_head *freelist)
+{
+ const u64 inseq = __esp_seq(inskb);
+ const u64 wantseq = xtfs->w_wantseq;
+ const u64 distance = inseq - wantseq;
+ const u32 savedlen = xtfs->w_savedlen;
+ const u32 index = distance - 1;
+
+ /* Handle future sequence number received which fits in the window.
+ *
+ * We know we don't have the seq we want so we won't be able to flush
+ * anything.
+ */
+
+ /* slot count is 4, saved size is 3 savedlen is 2
+ *
+ * "window boundary" is based on the fixed window size
+ * distance is also slot number
+ * index is an array index (i.e., - 1 of slot)
+ * : : - implicit NULL after array len
+ *
+ * +--------- used length (savedlen == 2)
+ * | +----- array size (nslots - 1 == 3)
+ * | | + window boundary (nslots == 4)
+ * V V | V
+ * |
+ * 0 1 2 3 | slot number
+ * --- 0 1 2 | array index
+ * [-] [b] : :| array
+ *
+ * "2" "3" "4" *5*| seq numbers
+ *
+ * We receive seq number 5
+ * distance == 3 [inseq(5) - w_wantseq(2)]
+ * index == 2 [distance(6) - 1]
+ */
+
+ if (xtfs->w_saved[index].skb) {
+ /* a dup of a future */
+ list_add_tail(&inskb->list, freelist);
+ return;
+ }
+
+ xtfs->w_saved[index].skb = inskb;
+ xtfs->w_savedlen = max(savedlen, index + 1);
+ iptfs_set_window_drop_times(xtfs, index);
+}
+
+static void __reorder_future_shifts(struct xfrm_iptfs_data *xtfs,
+ struct sk_buff *inskb,
+ struct list_head *list)
+{
+ const u32 nslots = xtfs->cfg.reorder_win_size + 1;
+ const u64 inseq = __esp_seq(inskb);
+ u32 savedlen = xtfs->w_savedlen;
+ u64 wantseq = xtfs->w_wantseq;
+ struct skb_wseq *wnext;
+ struct sk_buff *slot0;
+ u32 beyond, shifting, slot;
+ u64 distance;
+
+ /* Handle future sequence number received.
+ *
+ * IMPORTANT: we are at least advancing w_wantseq (i.e., wantseq) by 1
+ * b/c we are beyond the window boundary.
+ *
+ * We know we don't have the wantseq so that counts as a drop.
+ */
+
+ /* example: slot count is 4, array size is 3 savedlen is 2, slot 0 is
+ * the missing sequence number.
+ *
+ * the final slot at savedlen (index savedlen - 1) is always occupied.
+ *
+ * beyond is "beyond array size" not savedlen.
+ *
+ * +--------- array length (savedlen == 2)
+ * | +----- array size (nslots - 1 == 3)
+ * | | +- window boundary (nslots == 4)
+ * V V |
+ * |
+ * 0 1 2 3 | slot number
+ * --- 0 1 2 | array index
+ * [b] [c] : :| array
+ * |
+ * "2" "3" "4" "5"|*6* seq numbers
+ *
+ * We receive seq number 6
+ * distance == 4 [inseq(6) - w_wantseq(2)]
+ * newslot == distance
+ * index == 3 [distance(4) - 1]
+ * beyond == 1 [newslot(4) - lastslot((nslots(4) - 1))]
+ * shifting == 1 [min(savedlen(2), beyond(1)]
+ * slot0_skb == [b], and should match w_wantseq
+ *
+ * +--- window boundary (nslots == 4)
+ * 0 1 2 3 | 4 slot number
+ * --- 0 1 2 | 3 array index
+ * [b] : : : :| array
+ * "2" "3" "4" "5" *6* seq numbers
+ *
+ * We receive seq number 6
+ * distance == 4 [inseq(6) - w_wantseq(2)]
+ * newslot == distance
+ * index == 3 [distance(4) - 1]
+ * beyond == 1 [newslot(4) - lastslot((nslots(4) - 1))]
+ * shifting == 1 [min(savedlen(1), beyond(1)]
+ * slot0_skb == [b] and should match w_wantseq
+ *
+ * +-- window boundary (nslots == 4)
+ * 0 1 2 3 | 4 5 6 slot number
+ * --- 0 1 2 | 3 4 5 array index
+ * [-] [c] : :| array
+ * "2" "3" "4" "5" "6" "7" *8* seq numbers
+ *
+ * savedlen = 2, beyond = 3
+ * iter 1: slot0 == NULL, missed++, lastdrop = 2 (2+1-1), slot0 = [-]
+ * iter 2: slot0 == NULL, missed++, lastdrop = 3 (2+2-1), slot0 = [c]
+ * 2 < 3, extra = 1 (3-2), missed += extra, lastdrop = 4 (2+2+1-1)
+ *
+ * We receive seq number 8
+ * distance == 6 [inseq(8) - w_wantseq(2)]
+ * newslot == distance
+ * index == 5 [distance(6) - 1]
+ * beyond == 3 [newslot(6) - lastslot((nslots(4) - 1))]
+ * shifting == 2 [min(savedlen(2), beyond(3)]
+ *
+ * slot0_skb == NULL changed from [b] when "savedlen < beyond" is true.
+ */
+
+ /* Now send any packets that are being shifted out of saved, and account
+ * for missing packets that are exiting the window as we shift it.
+ */
+
+ distance = inseq - wantseq;
+ beyond = distance - (nslots - 1);
+
+ /* If savedlen > beyond we are shifting some, else all. */
+ shifting = min(savedlen, beyond);
+
+ /* slot0 is the buf that just shifted out and into slot0 */
+ slot0 = NULL;
+ wnext = xtfs->w_saved;
+ for (slot = 1; slot <= shifting; slot++, wnext++) {
+ /* handle what was in slot0 before we occupy it */
+ if (slot0)
+ list_add_tail(&slot0->list, list);
+ slot0 = wnext->skb;
+ wnext->skb = NULL;
+ }
+
+ /* slot0 is now either NULL (in which case it's what we now are waiting
+ * for, or a buf in which case we need to handle it like we received it;
+ * however, we may be advancing past that buffer as well..
+ */
+
+ /* Handle case where we need to shift more than we had saved, slot0 will
+ * be NULL iff savedlen is 0, otherwise slot0 will always be
+ * non-NULL b/c we shifted the final element, which is always set if
+ * there is any saved, into slot0.
+ */
+ if (savedlen < beyond) {
+ if (savedlen != 0)
+ list_add_tail(&slot0->list, list);
+ slot0 = NULL;
+ /* slot0 has had an empty slot pushed into it */
+ }
+
+ /* Remove the entries */
+ __vec_shift(xtfs, beyond);
+
+ /* Advance want seq */
+ xtfs->w_wantseq += beyond;
+
+ /* Process drops here when implementing congestion control */
+
+ /* We've shifted. plug the packet in at the end. */
+ xtfs->w_savedlen = nslots - 1;
+ xtfs->w_saved[xtfs->w_savedlen - 1].skb = inskb;
+ iptfs_set_window_drop_times(xtfs, xtfs->w_savedlen - 1);
+
+ /* if we don't have a slot0 then we must wait for it */
+ if (!slot0)
+ return;
+
+ /* If slot0, seq must match new want seq */
+
+ /* slot0 is valid, treat like we received expected. */
+ __reorder_this(xtfs, slot0, list);
+}
+
+/* Receive a new packet into the reorder window. Return a list of ordered
+ * packets from the window.
+ */
+static void iptfs_input_reorder(struct xfrm_iptfs_data *xtfs,
+ struct sk_buff *inskb, struct list_head *list,
+ struct list_head *freelist)
+{
+ const u32 nslots = xtfs->cfg.reorder_win_size + 1;
+ u64 inseq = __esp_seq(inskb);
+ u64 wantseq;
+
+ assert_spin_locked(&xtfs->drop_lock);
+
+ if (unlikely(!xtfs->w_seq_set)) {
+ xtfs->w_seq_set = true;
+ xtfs->w_wantseq = inseq;
+ }
+ wantseq = xtfs->w_wantseq;
+
+ if (likely(inseq == wantseq))
+ __reorder_this(xtfs, inskb, list);
+ else if (inseq < wantseq)
+ __reorder_past(xtfs, inskb, freelist);
+ else if ((inseq - wantseq) < nslots)
+ __reorder_future_fits(xtfs, inskb, freelist);
+ else
+ __reorder_future_shifts(xtfs, inskb, list);
+}
+
+/**
+ * iptfs_drop_timer() - Handle drop timer expiry.
+ * @me: the timer
+ *
+ * This is similar to our input function.
+ *
+ * The drop timer is set when we start an in progress reassembly, and also when
+ * we save a future packet in the window saved array.
+ *
+ * NOTE packets in the save window are always newer WRT drop times as
+ * they get further in the future. i.e. for:
+ *
+ * if slots (S0, S1, ... Sn) and `Dn` is the drop time for slot `Sn`,
+ * then D(n-1) <= D(n).
+ *
+ * So, regardless of why the timer is firing we can always discard any inprogress
+ * fragment; either it's the reassembly timer, or slot 0 is going to be
+ * dropped as S0 must have the most recent drop time, and slot 0 holds the
+ * continuation fragment of the in progress packet.
+ *
+ * Returns HRTIMER_NORESTART.
+ */
+static enum hrtimer_restart iptfs_drop_timer(struct hrtimer *me)
+{
+ struct sk_buff *skb, *next;
+ struct list_head list;
+ struct xfrm_iptfs_data *xtfs;
+ struct xfrm_state *x;
+ u32 count;
+
+ xtfs = container_of(me, typeof(*xtfs), drop_timer);
+ x = xtfs->x;
+
+ INIT_LIST_HEAD(&list);
+
+ spin_lock(&xtfs->drop_lock);
+
+ /* Drop any in progress packet */
+ skb = xtfs->ra_newskb;
+ xtfs->ra_newskb = NULL;
+
+ /* Now drop as many packets as we should from the reordering window
+ * saved array
+ */
+ count = xtfs->w_savedlen ? __reorder_drop(xtfs, &list) : 0;
+
+ spin_unlock(&xtfs->drop_lock);
+
+ if (skb)
+ kfree_skb_reason(skb, SKB_DROP_REASON_FRAG_REASM_TIMEOUT);
+
+ if (count) {
+ list_for_each_entry_safe(skb, next, &list, list) {
+ skb_list_del_init(skb);
+ iptfs_input_ordered(x, skb);
+ }
+ }
+
+ return HRTIMER_NORESTART;
+}
+
+/**
+ * iptfs_input() - handle receipt of iptfs payload
+ * @x: xfrm state
+ * @skb: the packet
+ *
+ * We have an IPTFS payload order it if needed, then process newly in order
+ * packets.
+ *
+ * Return: -EINPROGRESS to inform xfrm_input to stop processing the skb.
+ */
+static int iptfs_input(struct xfrm_state *x, struct sk_buff *skb)
+{
+ struct list_head freelist, list;
+ struct xfrm_iptfs_data *xtfs = x->mode_data;
+ struct sk_buff *next;
+
+ /* Fast path for no reorder window. */
+ if (xtfs->cfg.reorder_win_size == 0) {
+ iptfs_input_ordered(x, skb);
+ goto done;
+ }
+
+ /* Fetch list of in-order packets from the reordering window as well as
+ * a list of buffers we need to now free.
+ */
+ INIT_LIST_HEAD(&list);
+ INIT_LIST_HEAD(&freelist);
+
+ spin_lock(&xtfs->drop_lock);
+ iptfs_input_reorder(xtfs, skb, &list, &freelist);
+ spin_unlock(&xtfs->drop_lock);
+
+ list_for_each_entry_safe(skb, next, &list, list) {
+ skb_list_del_init(skb);
+ iptfs_input_ordered(x, skb);
+ }
+
+ list_for_each_entry_safe(skb, next, &freelist, list) {
+ skb_list_del_init(skb);
+ kfree_skb(skb);
+ }
+done:
+ /* We always have dealt with the input SKB, either we are re-using it,
+ * or we have freed it. Return EINPROGRESS so that xfrm_input stops
+ * processing it.
+ */
+ return -EINPROGRESS;
+}
+
+/* ================================= */
+/* IPTFS Sending (ingress) Functions */
+/* ================================= */
+
+/* ------------------------- */
+/* Enqueue to send functions */
+/* ------------------------- */
+
+/**
+ * iptfs_enqueue() - enqueue packet if ok to send.
+ * @xtfs: xtfs state
+ * @skb: the packet
+ *
+ * Return: true if packet enqueued.
+ */
+static bool iptfs_enqueue(struct xfrm_iptfs_data *xtfs, struct sk_buff *skb)
+{
+ u64 newsz = xtfs->queue_size + skb->len;
+ struct iphdr *iph;
+
+ assert_spin_locked(&xtfs->x->lock);
+
+ if (newsz > xtfs->cfg.max_queue_size)
+ return false;
+
+ /* Set ECN CE if we are above our ECN queue threshold */
+ if (newsz > xtfs->ecn_queue_size) {
+ iph = ip_hdr(skb);
+ if (iph->version == 4)
+ IP_ECN_set_ce(iph);
+ else if (iph->version == 6)
+ IP6_ECN_set_ce(skb, ipv6_hdr(skb));
+ }
+
+ __skb_queue_tail(&xtfs->queue, skb);
+ xtfs->queue_size += skb->len;
+ return true;
+}
+
+static int iptfs_get_cur_pmtu(struct xfrm_state *x, struct xfrm_iptfs_data *xtfs,
+ struct sk_buff *skb)
+{
+ struct xfrm_dst *xdst = (struct xfrm_dst *)skb_dst(skb);
+ u32 payload_mtu = xtfs->payload_mtu;
+ u32 pmtu = __iptfs_get_inner_mtu(x, xdst->child_mtu_cached);
+
+ if (payload_mtu && payload_mtu < pmtu)
+ pmtu = payload_mtu;
+
+ return pmtu;
+}
+
+static int iptfs_is_too_big(struct sock *sk, struct sk_buff *skb, u32 pmtu)
+{
+ if (skb->len <= pmtu)
+ return 0;
+
+ /* We only send ICMP too big if the user has configured us as
+ * dont-fragment.
+ */
+ if (skb->dev)
+ XFRM_INC_STATS(dev_net(skb->dev), LINUX_MIB_XFRMOUTERROR);
+
+ if (sk)
+ xfrm_local_error(skb, pmtu);
+ else if (ip_hdr(skb)->version == 4)
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(pmtu));
+ else
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, pmtu);
+
+ return 1;
+}
+
+/* IPv4/IPv6 packet ingress to IPTFS tunnel, arrange to send in IPTFS payload
+ * (i.e., aggregating or fragmenting as appropriate).
+ * This is set in dst->output for an SA.
+ */
+static int iptfs_output_collect(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+ struct xfrm_state *x = dst->xfrm;
+ struct xfrm_iptfs_data *xtfs = x->mode_data;
+ struct sk_buff *segs, *nskb;
+ u32 pmtu = 0;
+ bool ok = true;
+ bool was_gso;
+
+ /* We have hooked into dst_entry->output which means we have skipped the
+ * protocol specific netfilter (see xfrm4_output, xfrm6_output).
+ * when our timer runs we will end up calling xfrm_output directly on
+ * the encapsulated traffic.
+ *
+ * For both cases this is the NF_INET_POST_ROUTING hook which allows
+ * changing the skb->dst entry which then may not be xfrm based anymore
+ * in which case a REROUTED flag is set. and dst_output is called.
+ *
+ * For IPv6 we are also skipping fragmentation handling for local
+ * sockets, which may or may not be good depending on our tunnel DF
+ * setting. Normally with fragmentation supported we want to skip this
+ * fragmentation.
+ */
+
+ if (xtfs->cfg.dont_frag)
+ pmtu = iptfs_get_cur_pmtu(x, xtfs, skb);
+
+ /* Break apart GSO skbs. If the queue is nearing full then we want the
+ * accounting and queuing to be based on the individual packets not on the
+ * aggregate GSO buffer.
+ */
+ was_gso = skb_is_gso(skb);
+ if (!was_gso) {
+ segs = skb;
+ } else {
+ segs = skb_gso_segment(skb, 0);
+ if (IS_ERR_OR_NULL(segs)) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
+ kfree_skb(skb);
+ if (IS_ERR(segs))
+ return PTR_ERR(segs);
+ return -EINVAL;
+ }
+ consume_skb(skb);
+ skb = NULL;
+ }
+
+ /* We can be running on multiple cores and from the network softirq or
+ * from user context depending on where the packet is coming from.
+ */
+ spin_lock_bh(&x->lock);
+
+ skb_list_walk_safe(segs, skb, nskb) {
+ skb_mark_not_on_list(skb);
+
+ /* Once we drop due to no queue space we continue to drop the
+ * rest of the packets from that GRO.
+ */
+ if (!ok) {
+nospace:
+ trace_iptfs_no_queue_space(skb, xtfs, pmtu, was_gso);
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOQSPACE);
+ kfree_skb_reason(skb, SKB_DROP_REASON_FULL_RING);
+ continue;
+ }
+
+ /* If the user indicated no iptfs fragmenting check before
+ * enqueue.
+ */
+ if (xtfs->cfg.dont_frag && iptfs_is_too_big(sk, skb, pmtu)) {
+ trace_iptfs_too_big(skb, xtfs, pmtu, was_gso);
+ kfree_skb_reason(skb, SKB_DROP_REASON_PKT_TOO_BIG);
+ continue;
+ }
+
+ /* Enqueue to send in tunnel */
+ ok = iptfs_enqueue(xtfs, skb);
+ if (!ok)
+ goto nospace;
+
+ trace_iptfs_enqueue(skb, xtfs, pmtu, was_gso);
+ }
+
+ /* Start a delay timer if we don't have one yet */
+ if (!hrtimer_is_queued(&xtfs->iptfs_timer)) {
+ hrtimer_start(&xtfs->iptfs_timer, xtfs->init_delay_ns, IPTFS_HRTIMER_MODE);
+ xtfs->iptfs_settime = ktime_get_raw_fast_ns();
+ trace_iptfs_timer_start(xtfs, xtfs->init_delay_ns);
+ }
+
+ spin_unlock_bh(&x->lock);
+ return 0;
+}
+
+/* -------------------------- */
+/* Dequeue and send functions */
+/* -------------------------- */
+
+static void iptfs_output_prepare_skb(struct sk_buff *skb, u32 blkoff)
+{
+ struct ip_iptfs_hdr *h;
+ size_t hsz = sizeof(*h);
+
+ /* now reset values to be pointing at the rest of the packets */
+ h = skb_push(skb, hsz);
+ memset(h, 0, hsz);
+ if (blkoff)
+ h->block_offset = htons(blkoff);
+
+ /* network_header current points at the inner IP packet
+ * move it to the iptfs header
+ */
+ skb->transport_header = skb->network_header;
+ skb->network_header -= hsz;
+
+ IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
+}
+
+/**
+ * iptfs_copy_create_frag() - create an inner fragment skb.
+ * @st: The source packet data.
+ * @offset: offset in @st of the new fragment data.
+ * @copy_len: the amount of data to copy from @st.
+ *
+ * Create a new skb holding a single IPTFS inner packet fragment. @copy_len must
+ * not be greater than the max fragment size.
+ *
+ * Return: the new fragment skb or an ERR_PTR().
+ */
+static struct sk_buff *iptfs_copy_create_frag(struct skb_seq_state *st, u32 offset, u32 copy_len)
+{
+ struct sk_buff *src = st->root_skb;
+ struct sk_buff *skb;
+ int err;
+
+ skb = iptfs_alloc_skb(src, copy_len, true);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ /* Now copy `copy_len` data from src */
+ err = skb_copy_seq_read(st, offset, skb_put(skb, copy_len), copy_len);
+ if (err) {
+ kfree_skb(skb);
+ return ERR_PTR(err);
+ }
+
+ return skb;
+}
+
+/**
+ * iptfs_copy_create_frags() - create and send N-1 fragments of a larger skb.
+ * @skbp: the source packet skb (IN), skb holding the last fragment in
+ * the fragment stream (OUT).
+ * @xtfs: IPTFS SA state.
+ * @mtu: the max IPTFS fragment size.
+ *
+ * This function is responsible for fragmenting a larger inner packet into a
+ * sequence of IPTFS payload packets. The last fragment is returned rather than
+ * being sent so that the caller can append more inner packets (aggregation) if
+ * there is room.
+ *
+ * Return: 0 on success or a negative error code on failure
+ */
+static int iptfs_copy_create_frags(struct sk_buff **skbp, struct xfrm_iptfs_data *xtfs, u32 mtu)
+{
+ struct skb_seq_state skbseq;
+ struct list_head sublist;
+ struct sk_buff *skb = *skbp;
+ struct sk_buff *nskb = *skbp;
+ u32 copy_len, offset;
+ u32 to_copy = skb->len - mtu;
+ u32 blkoff = 0;
+ int err = 0;
+
+ INIT_LIST_HEAD(&sublist);
+
+ skb_prepare_seq_read(skb, 0, skb->len, &skbseq);
+
+ /* A trimmed `skb` will be sent as the first fragment, later. */
+ offset = mtu;
+ to_copy = skb->len - offset;
+ while (to_copy) {
+ /* Send all but last fragment to allow agg. append */
+ trace_iptfs_first_fragmenting(nskb, mtu, to_copy, NULL);
+ list_add_tail(&nskb->list, &sublist);
+
+ /* FUTURE: if the packet has an odd/non-aligning length we could
+ * send less data in the penultimate fragment so that the last
+ * fragment then ends on an aligned boundary.
+ */
+ copy_len = min(to_copy, mtu);
+ nskb = iptfs_copy_create_frag(&skbseq, offset, copy_len);
+ if (IS_ERR(nskb)) {
+ XFRM_INC_STATS(xs_net(xtfs->x), LINUX_MIB_XFRMOUTERROR);
+ skb_abort_seq_read(&skbseq);
+ err = PTR_ERR(nskb);
+ nskb = NULL;
+ break;
+ }
+ iptfs_output_prepare_skb(nskb, to_copy);
+ offset += copy_len;
+ to_copy -= copy_len;
+ blkoff = to_copy;
+ }
+ skb_abort_seq_read(&skbseq);
+
+ /* return last fragment that will be unsent (or NULL) */
+ *skbp = nskb;
+ if (nskb)
+ trace_iptfs_first_final_fragment(nskb, mtu, blkoff, NULL);
+
+ /* trim the original skb to MTU */
+ if (!err)
+ err = pskb_trim(skb, mtu);
+
+ if (err) {
+ /* Free all frags. Don't bother sending a partial packet we will
+ * never complete.
+ */
+ kfree_skb(nskb);
+ list_for_each_entry_safe(skb, nskb, &sublist, list) {
+ skb_list_del_init(skb);
+ kfree_skb(skb);
+ }
+ return err;
+ }
+
+ /* prepare the initial fragment with an iptfs header */
+ iptfs_output_prepare_skb(skb, 0);
+
+ /* Send all but last fragment, if we fail to send a fragment then free
+ * the rest -- no point in sending a packet that can't be reassembled.
+ */
+ list_for_each_entry_safe(skb, nskb, &sublist, list) {
+ skb_list_del_init(skb);
+ if (!err)
+ err = xfrm_output(NULL, skb);
+ else
+ kfree_skb(skb);
+ }
+ if (err)
+ kfree_skb(*skbp);
+ return err;
+}
+
+/**
+ * iptfs_first_skb() - handle the first dequeued inner packet for output
+ * @skbp: the source packet skb (IN), skb holding the last fragment in
+ * the fragment stream (OUT).
+ * @xtfs: IPTFS SA state.
+ * @mtu: the max IPTFS fragment size.
+ *
+ * This function is responsible for fragmenting a larger inner packet into a
+ * sequence of IPTFS payload packets.
+ *
+ * The last fragment is returned rather than being sent so that the caller can
+ * append more inner packets (aggregation) if there is room.
+ *
+ * Return: 0 on success or a negative error code on failure
+ */
+static int iptfs_first_skb(struct sk_buff **skbp, struct xfrm_iptfs_data *xtfs, u32 mtu)
+{
+ struct sk_buff *skb = *skbp;
+ int err;
+
+ /* Classic ESP skips the don't fragment ICMP error if DF is clear on
+ * the inner packet or ignore_df is set. Otherwise it will send an ICMP
+ * or local error if the inner packet won't fit it's MTU.
+ *
+ * With IPTFS we do not care about the inner packet DF bit. If the
+ * tunnel is configured to "don't fragment" we error back if things
+ * don't fit in our max packet size. Otherwise we iptfs-fragment as
+ * normal.
+ */
+
+ /* The opportunity for HW offload has ended */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ err = skb_checksum_help(skb);
+ if (err)
+ return err;
+ }
+
+ /* We've split gso up before queuing */
+
+ trace_iptfs_first_dequeue(skb, mtu, 0, ip_hdr(skb));
+
+ /* Consider the buffer Tx'd and no longer owned */
+ skb_orphan(skb);
+
+ /* Simple case -- it fits. `mtu` accounted for all the overhead
+ * including the basic IPTFS header.
+ */
+ if (skb->len <= mtu) {
+ iptfs_output_prepare_skb(skb, 0);
+ return 0;
+ }
+
+ return iptfs_copy_create_frags(skbp, xtfs, mtu);
+}
+
+static struct sk_buff **iptfs_rehome_fraglist(struct sk_buff **nextp, struct sk_buff *child)
+{
+ u32 fllen = 0;
+
+ /* It might be possible to account for a frag list in addition to page
+ * fragment if it's a valid state to be in. The page fragments size
+ * should be kept as data_len so only the frag_list size is removed,
+ * this must be done above as well.
+ */
+ *nextp = skb_shinfo(child)->frag_list;
+ while (*nextp) {
+ fllen += (*nextp)->len;
+ nextp = &(*nextp)->next;
+ }
+ skb_frag_list_init(child);
+ child->len -= fllen;
+ child->data_len -= fllen;
+
+ return nextp;
+}
+
+static void iptfs_consume_frags(struct sk_buff *to, struct sk_buff *from)
+{
+ struct skb_shared_info *fromi = skb_shinfo(from);
+ struct skb_shared_info *toi = skb_shinfo(to);
+ unsigned int new_truesize;
+
+ /* If we have data in a head page, grab it */
+ if (!skb_headlen(from)) {
+ new_truesize = SKB_TRUESIZE(skb_end_offset(from));
+ } else {
+ iptfs_skb_head_to_frag(from, &toi->frags[toi->nr_frags]);
+ skb_frag_ref(to, toi->nr_frags++);
+ new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
+ }
+
+ /* Move any other page fragments rather than copy */
+ memcpy(&toi->frags[toi->nr_frags], fromi->frags,
+ sizeof(fromi->frags[0]) * fromi->nr_frags);
+ toi->nr_frags += fromi->nr_frags;
+ fromi->nr_frags = 0;
+ from->data_len = 0;
+ from->len = 0;
+ to->truesize += from->truesize - new_truesize;
+ from->truesize = new_truesize;
+
+ /* We are done with this SKB */
+ consume_skb(from);
+}
+
+static void iptfs_output_queued(struct xfrm_state *x, struct sk_buff_head *list)
+{
+ struct xfrm_iptfs_data *xtfs = x->mode_data;
+ struct sk_buff *skb, *skb2, **nextp;
+ struct skb_shared_info *shi, *shi2;
+
+ /* If we are fragmenting due to a large inner packet we will output all
+ * the outer IPTFS packets required to contain the fragments of the
+ * single large inner packet. These outer packets need to be sent
+ * consecutively (ESP seq-wise). Since this output function is always
+ * running from a timer we do not need a lock to provide this guarantee.
+ * We will output our packets consecutively before the timer is allowed
+ * to run again on some other CPU.
+ */
+
+ while ((skb = __skb_dequeue(list))) {
+ u32 mtu = iptfs_get_cur_pmtu(x, xtfs, skb);
+ bool share_ok = true;
+ int remaining;
+
+ /* protocol comes to us cleared sometimes */
+ skb->protocol = x->outer_mode.family == AF_INET ? htons(ETH_P_IP) :
+ htons(ETH_P_IPV6);
+
+ if (skb->len > mtu && xtfs->cfg.dont_frag) {
+ /* We handle this case before enqueueing so we are only
+ * here b/c MTU changed after we enqueued before we
+ * dequeued, just drop these.
+ */
+ XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTERROR);
+
+ trace_iptfs_first_toobig(skb, mtu, 0, ip_hdr(skb));
+ kfree_skb_reason(skb, SKB_DROP_REASON_PKT_TOO_BIG);
+ continue;
+ }
+
+ /* Convert first inner packet into an outer IPTFS packet,
+ * dealing with any fragmentation into multiple outer packets
+ * if necessary.
+ */
+ if (iptfs_first_skb(&skb, xtfs, mtu))
+ continue;
+
+ /* If fragmentation was required the returned skb is the last
+ * IPTFS fragment in the chain, and it's IPTFS header blkoff has
+ * been set just past the end of the fragment data.
+ *
+ * In either case the space remaining to send more inner packet
+ * data is `mtu` - (skb->len - sizeof iptfs header). This is b/c
+ * the `mtu` value has the basic IPTFS header len accounted for,
+ * and we added that header to the skb so it is a part of
+ * skb->len, thus we subtract it from the skb length.
+ */
+ remaining = mtu - (skb->len - sizeof(struct ip_iptfs_hdr));
+
+ /* Re-home (un-nest) nested fragment lists. We need to do this
+ * b/c we will simply be appending any following aggregated
+ * inner packets using the frag list.
+ */
+ shi = skb_shinfo(skb);
+ nextp = &shi->frag_list;
+ while (*nextp) {
+ if (skb_has_frag_list(*nextp))
+ nextp = iptfs_rehome_fraglist(&(*nextp)->next, *nextp);
+ else
+ nextp = &(*nextp)->next;
+ }
+
+ if (shi->frag_list || skb_cloned(skb) || skb_shared(skb))
+ share_ok = false;
+
+ /* See if we have enough space to simply append.
+ *
+ * NOTE: Maybe do not append if we will be mis-aligned,
+ * SW-based endpoints will probably have to copy in this
+ * case.
+ */
+ while ((skb2 = skb_peek(list))) {
+ trace_iptfs_ingress_nth_peek(skb2, remaining);
+ if (skb2->len > remaining)
+ break;
+
+ __skb_unlink(skb2, list);
+
+ /* Consider the buffer Tx'd and no longer owned */
+ skb_orphan(skb);
+
+ /* If we don't have a cksum in the packet we need to add
+ * one before encapsulation.
+ */
+ if (skb2->ip_summed == CHECKSUM_PARTIAL) {
+ if (skb_checksum_help(skb2)) {
+ XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTERROR);
+ kfree_skb(skb2);
+ continue;
+ }
+ }
+
+ /* skb->pp_recycle is passed to __skb_flag_unref for all
+ * frag pages so we can only share pages with skb's who
+ * match ourselves.
+ */
+ shi2 = skb_shinfo(skb2);
+ if (share_ok &&
+ (shi2->frag_list ||
+ (!skb2->head_frag && skb_headlen(skb)) ||
+ skb->pp_recycle != skb2->pp_recycle ||
+ skb_zcopy(skb2) ||
+ (shi->nr_frags + shi2->nr_frags + 1 > MAX_SKB_FRAGS)))
+ share_ok = false;
+
+ /* Do accounting */
+ skb->data_len += skb2->len;
+ skb->len += skb2->len;
+ remaining -= skb2->len;
+
+ trace_iptfs_ingress_nth_add(skb2, share_ok);
+
+ if (share_ok) {
+ iptfs_consume_frags(skb, skb2);
+ } else {
+ /* Append to the frag_list */
+ *nextp = skb2;
+ nextp = &skb2->next;
+ if (skb_has_frag_list(skb2))
+ nextp = iptfs_rehome_fraglist(nextp,
+ skb2);
+ skb->truesize += skb2->truesize;
+ }
+ }
+
+ xfrm_output(NULL, skb);
+ }
+}
+
+static enum hrtimer_restart iptfs_delay_timer(struct hrtimer *me)
+{
+ struct sk_buff_head list;
+ struct xfrm_iptfs_data *xtfs;
+ struct xfrm_state *x;
+ time64_t settime;
+
+ xtfs = container_of(me, typeof(*xtfs), iptfs_timer);
+ x = xtfs->x;
+
+ /* Process all the queued packets
+ *
+ * softirq execution order: timer > tasklet > hrtimer
+ *
+ * Network rx will have run before us giving one last chance to queue
+ * ingress packets for us to process and transmit.
+ */
+
+ spin_lock(&x->lock);
+ __skb_queue_head_init(&list);
+ skb_queue_splice_init(&xtfs->queue, &list);
+ xtfs->queue_size = 0;
+ settime = xtfs->iptfs_settime;
+ spin_unlock(&x->lock);
+
+ /* After the above unlock, packets can begin queuing again, and the
+ * timer can be set again, from another CPU either in softirq or user
+ * context (not from this one since we are running at softirq level
+ * already).
+ */
+
+ trace_iptfs_timer_expire(xtfs, (unsigned long long)(ktime_get_raw_fast_ns() - settime));
+
+ iptfs_output_queued(x, &list);
+
+ return HRTIMER_NORESTART;
+}
+
+/**
+ * iptfs_encap_add_ipv4() - add outer encaps
+ * @x: xfrm state
+ * @skb: the packet
+ *
+ * This was originally taken from xfrm4_tunnel_encap_add. The reason for the
+ * copy is that IP-TFS/AGGFRAG can have different functionality for how to set
+ * the TOS/DSCP bits. Sets the protocol to a different value and doesn't do
+ * anything with inner headers as they aren't pointing into a normal IP
+ * singleton inner packet.
+ *
+ * Return: 0 on success or a negative error code on failure
+ */
+static int iptfs_encap_add_ipv4(struct xfrm_state *x, struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+ struct iphdr *top_iph;
+
+ skb_reset_inner_network_header(skb);
+ skb_reset_inner_transport_header(skb);
+
+ skb_set_network_header(skb, -(x->props.header_len - x->props.enc_hdr_len));
+ skb->mac_header = skb->network_header + offsetof(struct iphdr, protocol);
+ skb->transport_header = skb->network_header + sizeof(*top_iph);
+
+ top_iph = ip_hdr(skb);
+ top_iph->ihl = 5;
+ top_iph->version = 4;
+ top_iph->protocol = IPPROTO_AGGFRAG;
+
+ /* As we have 0, fractional, 1 or N inner packets there's no obviously
+ * correct DSCP mapping to inherit. ECN should be cleared per RFC9347
+ * 3.1.
+ */
+ top_iph->tos = 0;
+
+ top_iph->frag_off = htons(IP_DF);
+ top_iph->ttl = ip4_dst_hoplimit(xfrm_dst_child(dst));
+ top_iph->saddr = x->props.saddr.a4;
+ top_iph->daddr = x->id.daddr.a4;
+ ip_select_ident(dev_net(dst->dev), skb, NULL);
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+/**
+ * iptfs_encap_add_ipv6() - add outer encaps
+ * @x: xfrm state
+ * @skb: the packet
+ *
+ * This was originally taken from xfrm6_tunnel_encap_add. The reason for the
+ * copy is that IP-TFS/AGGFRAG can have different functionality for how to set
+ * the flow label and TOS/DSCP bits. It also sets the protocol to a different
+ * value and doesn't do anything with inner headers as they aren't pointing into
+ * a normal IP singleton inner packet.
+ *
+ * Return: 0 on success or a negative error code on failure
+ */
+static int iptfs_encap_add_ipv6(struct xfrm_state *x, struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+ struct ipv6hdr *top_iph;
+ int dsfield;
+
+ skb_reset_inner_network_header(skb);
+ skb_reset_inner_transport_header(skb);
+
+ skb_set_network_header(skb, -x->props.header_len + x->props.enc_hdr_len);
+ skb->mac_header = skb->network_header + offsetof(struct ipv6hdr, nexthdr);
+ skb->transport_header = skb->network_header + sizeof(*top_iph);
+
+ top_iph = ipv6_hdr(skb);
+ top_iph->version = 6;
+ top_iph->priority = 0;
+ memset(top_iph->flow_lbl, 0, sizeof(top_iph->flow_lbl));
+ top_iph->nexthdr = IPPROTO_AGGFRAG;
+
+ /* As we have 0, fractional, 1 or N inner packets there's no obviously
+ * correct DSCP mapping to inherit. ECN should be cleared per RFC9347
+ * 3.1.
+ */
+ dsfield = 0;
+ ipv6_change_dsfield(top_iph, 0, dsfield);
+
+ top_iph->hop_limit = ip6_dst_hoplimit(xfrm_dst_child(dst));
+ top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
+ top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
+
+ return 0;
+}
+#endif
+
+/**
+ * iptfs_prepare_output() - prepare the skb for output
+ * @x: xfrm state
+ * @skb: the packet
+ *
+ * Return: Error value, if 0 then skb values should be as follows:
+ * - transport_header should point at ESP header
+ * - network_header should point at Outer IP header
+ * - mac_header should point at protocol/nexthdr of the outer IP
+ */
+static int iptfs_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
+{
+ if (x->outer_mode.family == AF_INET)
+ return iptfs_encap_add_ipv4(x, skb);
+ if (x->outer_mode.family == AF_INET6) {
+#if IS_ENABLED(CONFIG_IPV6)
+ return iptfs_encap_add_ipv6(x, skb);
+#else
+ return -EAFNOSUPPORT;
+#endif
+ }
+ return -EOPNOTSUPP;
+}
+
+/* ========================== */
+/* State Management Functions */
+/* ========================== */
+
+/**
+ * __iptfs_get_inner_mtu() - return inner MTU with no fragmentation.
+ * @x: xfrm state.
+ * @outer_mtu: the outer mtu
+ *
+ * Return: Correct MTU taking in to account the encap overhead.
+ */
+static u32 __iptfs_get_inner_mtu(struct xfrm_state *x, int outer_mtu)
+{
+ struct crypto_aead *aead;
+ u32 blksize;
+
+ aead = x->data;
+ blksize = ALIGN(crypto_aead_blocksize(aead), 4);
+ return ((outer_mtu - x->props.header_len - crypto_aead_authsize(aead)) &
+ ~(blksize - 1)) - 2;
+}
+
+/**
+ * iptfs_get_inner_mtu() - return the inner MTU for an IPTFS xfrm.
+ * @x: xfrm state.
+ * @outer_mtu: Outer MTU for the encapsulated packet.
+ *
+ * Return: Correct MTU taking in to account the encap overhead.
+ */
+static u32 iptfs_get_inner_mtu(struct xfrm_state *x, int outer_mtu)
+{
+ struct xfrm_iptfs_data *xtfs = x->mode_data;
+
+ /* If not dont-frag we have no MTU */
+ if (!xtfs->cfg.dont_frag)
+ return x->outer_mode.family == AF_INET ? IP_MAX_MTU : IP6_MAX_MTU;
+ return __iptfs_get_inner_mtu(x, outer_mtu);
+}
+
+/**
+ * iptfs_user_init() - initialize the SA with IPTFS options from netlink.
+ * @net: the net data
+ * @x: xfrm state
+ * @attrs: netlink attributes
+ * @extack: extack return data
+ *
+ * Return: 0 on success or a negative error code on failure
+ */
+static int iptfs_user_init(struct net *net, struct xfrm_state *x,
+ struct nlattr **attrs,
+ struct netlink_ext_ack *extack)
+{
+ struct xfrm_iptfs_data *xtfs = x->mode_data;
+ struct xfrm_iptfs_config *xc;
+ u64 q;
+
+ xc = &xtfs->cfg;
+ xc->max_queue_size = IPTFS_DEFAULT_MAX_QUEUE_SIZE;
+ xc->reorder_win_size = IPTFS_DEFAULT_REORDER_WINDOW;
+ xtfs->drop_time_ns = IPTFS_DEFAULT_DROP_TIME_USECS * NSECS_IN_USEC;
+ xtfs->init_delay_ns = IPTFS_DEFAULT_INIT_DELAY_USECS * NSECS_IN_USEC;
+
+ if (attrs[XFRMA_IPTFS_DONT_FRAG])
+ xc->dont_frag = true;
+ if (attrs[XFRMA_IPTFS_REORDER_WINDOW])
+ xc->reorder_win_size =
+ nla_get_u16(attrs[XFRMA_IPTFS_REORDER_WINDOW]);
+ /* saved array is for saving 1..N seq nums from wantseq */
+ if (xc->reorder_win_size) {
+ xtfs->w_saved = kcalloc(xc->reorder_win_size,
+ sizeof(*xtfs->w_saved), GFP_KERNEL);
+ if (!xtfs->w_saved) {
+ NL_SET_ERR_MSG(extack, "Cannot alloc reorder window");
+ return -ENOMEM;
+ }
+ }
+ if (attrs[XFRMA_IPTFS_PKT_SIZE]) {
+ xc->pkt_size = nla_get_u32(attrs[XFRMA_IPTFS_PKT_SIZE]);
+ if (!xc->pkt_size) {
+ xtfs->payload_mtu = 0;
+ } else if (xc->pkt_size > x->props.header_len) {
+ xtfs->payload_mtu = xc->pkt_size - x->props.header_len;
+ } else {
+ NL_SET_ERR_MSG(extack,
+ "Packet size must be 0 or greater than IPTFS/ESP header length");
+ return -EINVAL;
+ }
+ }
+ if (attrs[XFRMA_IPTFS_MAX_QSIZE])
+ xc->max_queue_size = nla_get_u32(attrs[XFRMA_IPTFS_MAX_QSIZE]);
+ if (attrs[XFRMA_IPTFS_DROP_TIME])
+ xtfs->drop_time_ns =
+ (u64)nla_get_u32(attrs[XFRMA_IPTFS_DROP_TIME]) *
+ NSECS_IN_USEC;
+ if (attrs[XFRMA_IPTFS_INIT_DELAY])
+ xtfs->init_delay_ns =
+ (u64)nla_get_u32(attrs[XFRMA_IPTFS_INIT_DELAY]) * NSECS_IN_USEC;
+
+ q = (u64)xc->max_queue_size * 95;
+ do_div(q, 100);
+ xtfs->ecn_queue_size = (u32)q;
+
+ return 0;
+}
+
+static unsigned int iptfs_sa_len(const struct xfrm_state *x)
+{
+ struct xfrm_iptfs_data *xtfs = x->mode_data;
+ struct xfrm_iptfs_config *xc = &xtfs->cfg;
+ unsigned int l = 0;
+
+ if (x->dir == XFRM_SA_DIR_IN) {
+ l += nla_total_size(sizeof(u32)); /* drop time usec */
+ l += nla_total_size(sizeof(xc->reorder_win_size));
+ } else {
+ if (xc->dont_frag)
+ l += nla_total_size(0); /* dont-frag flag */
+ l += nla_total_size(sizeof(u32)); /* init delay usec */
+ l += nla_total_size(sizeof(xc->max_queue_size));
+ l += nla_total_size(sizeof(xc->pkt_size));
+ }
+
+ return l;
+}
+
+static int iptfs_copy_to_user(struct xfrm_state *x, struct sk_buff *skb)
+{
+ struct xfrm_iptfs_data *xtfs = x->mode_data;
+ struct xfrm_iptfs_config *xc = &xtfs->cfg;
+ int ret = 0;
+ u64 q;
+
+ if (x->dir == XFRM_SA_DIR_IN) {
+ q = xtfs->drop_time_ns;
+ do_div(q, NSECS_IN_USEC);
+ ret = nla_put_u32(skb, XFRMA_IPTFS_DROP_TIME, q);
+ if (ret)
+ return ret;
+
+ ret = nla_put_u16(skb, XFRMA_IPTFS_REORDER_WINDOW,
+ xc->reorder_win_size);
+ } else {
+ if (xc->dont_frag) {
+ ret = nla_put_flag(skb, XFRMA_IPTFS_DONT_FRAG);
+ if (ret)
+ return ret;
+ }
+
+ q = xtfs->init_delay_ns;
+ do_div(q, NSECS_IN_USEC);
+ ret = nla_put_u32(skb, XFRMA_IPTFS_INIT_DELAY, q);
+ if (ret)
+ return ret;
+
+ ret = nla_put_u32(skb, XFRMA_IPTFS_MAX_QSIZE, xc->max_queue_size);
+ if (ret)
+ return ret;
+
+ ret = nla_put_u32(skb, XFRMA_IPTFS_PKT_SIZE, xc->pkt_size);
+ }
+
+ return ret;
+}
+
+static void __iptfs_init_state(struct xfrm_state *x,
+ struct xfrm_iptfs_data *xtfs)
+{
+ __skb_queue_head_init(&xtfs->queue);
+ hrtimer_init(&xtfs->iptfs_timer, CLOCK_MONOTONIC, IPTFS_HRTIMER_MODE);
+ xtfs->iptfs_timer.function = iptfs_delay_timer;
+
+ spin_lock_init(&xtfs->drop_lock);
+ hrtimer_init(&xtfs->drop_timer, CLOCK_MONOTONIC, IPTFS_HRTIMER_MODE);
+ xtfs->drop_timer.function = iptfs_drop_timer;
+
+ /* Modify type (esp) adjustment values */
+
+ if (x->props.family == AF_INET)
+ x->props.header_len += sizeof(struct iphdr) + sizeof(struct ip_iptfs_hdr);
+ else if (x->props.family == AF_INET6)
+ x->props.header_len += sizeof(struct ipv6hdr) + sizeof(struct ip_iptfs_hdr);
+ x->props.enc_hdr_len = sizeof(struct ip_iptfs_hdr);
+
+ /* Always keep a module reference when x->mode_data is set */
+ __module_get(x->mode_cbs->owner);
+
+ x->mode_data = xtfs;
+ xtfs->x = x;
+}
+
+static int iptfs_clone_state(struct xfrm_state *x, struct xfrm_state *orig)
+{
+ struct xfrm_iptfs_data *xtfs;
+
+ xtfs = kmemdup(orig->mode_data, sizeof(*xtfs), GFP_KERNEL);
+ if (!xtfs)
+ return -ENOMEM;
+
+ x->mode_data = xtfs;
+ xtfs->x = x;
+
+ xtfs->ra_newskb = NULL;
+ if (xtfs->cfg.reorder_win_size) {
+ xtfs->w_saved = kcalloc(xtfs->cfg.reorder_win_size,
+ sizeof(*xtfs->w_saved), GFP_KERNEL);
+ if (!xtfs->w_saved) {
+ kfree_sensitive(xtfs);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static int iptfs_init_state(struct xfrm_state *x)
+{
+ struct xfrm_iptfs_data *xtfs;
+
+ if (x->mode_data) {
+ /* We have arrived here from xfrm_state_clone() */
+ xtfs = x->mode_data;
+ } else {
+ xtfs = kzalloc(sizeof(*xtfs), GFP_KERNEL);
+ if (!xtfs)
+ return -ENOMEM;
+ }
+
+ __iptfs_init_state(x, xtfs);
+
+ return 0;
+}
+
+static void iptfs_destroy_state(struct xfrm_state *x)
+{
+ struct xfrm_iptfs_data *xtfs = x->mode_data;
+ struct sk_buff_head list;
+ struct skb_wseq *s, *se;
+ struct sk_buff *skb;
+
+ if (!xtfs)
+ return;
+
+ spin_lock_bh(&xtfs->x->lock);
+ hrtimer_cancel(&xtfs->iptfs_timer);
+ __skb_queue_head_init(&list);
+ skb_queue_splice_init(&xtfs->queue, &list);
+ spin_unlock_bh(&xtfs->x->lock);
+
+ while ((skb = __skb_dequeue(&list)))
+ kfree_skb(skb);
+
+ spin_lock_bh(&xtfs->drop_lock);
+ hrtimer_cancel(&xtfs->drop_timer);
+ spin_unlock_bh(&xtfs->drop_lock);
+
+ if (xtfs->ra_newskb)
+ kfree_skb(xtfs->ra_newskb);
+
+ for (s = xtfs->w_saved, se = s + xtfs->w_savedlen; s < se; s++) {
+ if (s->skb)
+ kfree_skb(s->skb);
+ }
+
+ kfree_sensitive(xtfs->w_saved);
+ kfree_sensitive(xtfs);
+
+ module_put(x->mode_cbs->owner);
+}
+
+static const struct xfrm_mode_cbs iptfs_mode_cbs = {
+ .owner = THIS_MODULE,
+ .init_state = iptfs_init_state,
+ .clone_state = iptfs_clone_state,
+ .destroy_state = iptfs_destroy_state,
+ .user_init = iptfs_user_init,
+ .copy_to_user = iptfs_copy_to_user,
+ .sa_len = iptfs_sa_len,
+ .get_inner_mtu = iptfs_get_inner_mtu,
+ .input = iptfs_input,
+ .output = iptfs_output_collect,
+ .prepare_output = iptfs_prepare_output,
+};
+
+static int __init xfrm_iptfs_init(void)
+{
+ int err;
+
+ pr_info("xfrm_iptfs: IPsec IP-TFS tunnel mode module\n");
+
+ err = xfrm_register_mode_cbs(XFRM_MODE_IPTFS, &iptfs_mode_cbs);
+ if (err < 0)
+ pr_info("%s: can't register IP-TFS\n", __func__);
+
+ return err;
+}
+
+static void __exit xfrm_iptfs_fini(void)
+{
+ xfrm_unregister_mode_cbs(XFRM_MODE_IPTFS);
+}
+
+module_init(xfrm_iptfs_init);
+module_exit(xfrm_iptfs_fini);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IP-TFS support for xfrm ipsec tunnels");
diff --git a/net/xfrm/xfrm_nat_keepalive.c b/net/xfrm/xfrm_nat_keepalive.c
new file mode 100644
index 000000000000..82f0a301683f
--- /dev/null
+++ b/net/xfrm/xfrm_nat_keepalive.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * xfrm_nat_keepalive.c
+ *
+ * (c) 2024 Eyal Birger <eyal.birger@gmail.com>
+ */
+
+#include <net/inet_common.h>
+#include <net/ip6_checksum.h>
+#include <net/xfrm.h>
+
+static DEFINE_PER_CPU(struct sock *, nat_keepalive_sk_ipv4);
+#if IS_ENABLED(CONFIG_IPV6)
+static DEFINE_PER_CPU(struct sock *, nat_keepalive_sk_ipv6);
+#endif
+
+struct nat_keepalive {
+ struct net *net;
+ u16 family;
+ xfrm_address_t saddr;
+ xfrm_address_t daddr;
+ __be16 encap_sport;
+ __be16 encap_dport;
+ __u32 smark;
+};
+
+static void nat_keepalive_init(struct nat_keepalive *ka, struct xfrm_state *x)
+{
+ ka->net = xs_net(x);
+ ka->family = x->props.family;
+ ka->saddr = x->props.saddr;
+ ka->daddr = x->id.daddr;
+ ka->encap_sport = x->encap->encap_sport;
+ ka->encap_dport = x->encap->encap_dport;
+ ka->smark = xfrm_smark_get(0, x);
+}
+
+static int nat_keepalive_send_ipv4(struct sk_buff *skb,
+ struct nat_keepalive *ka)
+{
+ struct net *net = ka->net;
+ struct flowi4 fl4;
+ struct rtable *rt;
+ struct sock *sk;
+ __u8 tos = 0;
+ int err;
+
+ flowi4_init_output(&fl4, 0 /* oif */, skb->mark, tos,
+ RT_SCOPE_UNIVERSE, IPPROTO_UDP, 0,
+ ka->daddr.a4, ka->saddr.a4, ka->encap_dport,
+ ka->encap_sport, sock_net_uid(net, NULL));
+
+ rt = ip_route_output_key(net, &fl4);
+ if (IS_ERR(rt))
+ return PTR_ERR(rt);
+
+ skb_dst_set(skb, &rt->dst);
+
+ sk = *this_cpu_ptr(&nat_keepalive_sk_ipv4);
+ sock_net_set(sk, net);
+ err = ip_build_and_send_pkt(skb, sk, fl4.saddr, fl4.daddr, NULL, tos);
+ sock_net_set(sk, &init_net);
+ return err;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static int nat_keepalive_send_ipv6(struct sk_buff *skb,
+ struct nat_keepalive *ka,
+ struct udphdr *uh)
+{
+ struct net *net = ka->net;
+ struct dst_entry *dst;
+ struct flowi6 fl6;
+ struct sock *sk;
+ __wsum csum;
+ int err;
+
+ csum = skb_checksum(skb, 0, skb->len, 0);
+ uh->check = csum_ipv6_magic(&ka->saddr.in6, &ka->daddr.in6,
+ skb->len, IPPROTO_UDP, csum);
+ if (uh->check == 0)
+ uh->check = CSUM_MANGLED_0;
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_mark = skb->mark;
+ fl6.saddr = ka->saddr.in6;
+ fl6.daddr = ka->daddr.in6;
+ fl6.flowi6_proto = IPPROTO_UDP;
+ fl6.fl6_sport = ka->encap_sport;
+ fl6.fl6_dport = ka->encap_dport;
+
+ sk = *this_cpu_ptr(&nat_keepalive_sk_ipv6);
+ sock_net_set(sk, net);
+ dst = ipv6_stub->ipv6_dst_lookup_flow(net, sk, &fl6, NULL);
+ if (IS_ERR(dst))
+ return PTR_ERR(dst);
+
+ skb_dst_set(skb, dst);
+ err = ipv6_stub->ip6_xmit(sk, skb, &fl6, skb->mark, NULL, 0, 0);
+ sock_net_set(sk, &init_net);
+ return err;
+}
+#endif
+
+static void nat_keepalive_send(struct nat_keepalive *ka)
+{
+ const int nat_ka_hdrs_len = max(sizeof(struct iphdr),
+ sizeof(struct ipv6hdr)) +
+ sizeof(struct udphdr);
+ const u8 nat_ka_payload = 0xFF;
+ int err = -EAFNOSUPPORT;
+ struct sk_buff *skb;
+ struct udphdr *uh;
+
+ skb = alloc_skb(nat_ka_hdrs_len + sizeof(nat_ka_payload), GFP_ATOMIC);
+ if (unlikely(!skb))
+ return;
+
+ skb_reserve(skb, nat_ka_hdrs_len);
+
+ skb_put_u8(skb, nat_ka_payload);
+
+ uh = skb_push(skb, sizeof(*uh));
+ uh->source = ka->encap_sport;
+ uh->dest = ka->encap_dport;
+ uh->len = htons(skb->len);
+ uh->check = 0;
+
+ skb->mark = ka->smark;
+
+ switch (ka->family) {
+ case AF_INET:
+ err = nat_keepalive_send_ipv4(skb, ka);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ err = nat_keepalive_send_ipv6(skb, ka, uh);
+ break;
+#endif
+ }
+ if (err)
+ kfree_skb(skb);
+}
+
+struct nat_keepalive_work_ctx {
+ time64_t next_run;
+ time64_t now;
+};
+
+static int nat_keepalive_work_single(struct xfrm_state *x, int count, void *ptr)
+{
+ struct nat_keepalive_work_ctx *ctx = ptr;
+ bool send_keepalive = false;
+ struct nat_keepalive ka;
+ time64_t next_run;
+ u32 interval;
+ int delta;
+
+ interval = x->nat_keepalive_interval;
+ if (!interval)
+ return 0;
+
+ spin_lock(&x->lock);
+
+ delta = (int)(ctx->now - x->lastused);
+ if (delta < interval) {
+ x->nat_keepalive_expiration = ctx->now + interval - delta;
+ next_run = x->nat_keepalive_expiration;
+ } else if (x->nat_keepalive_expiration > ctx->now) {
+ next_run = x->nat_keepalive_expiration;
+ } else {
+ next_run = ctx->now + interval;
+ nat_keepalive_init(&ka, x);
+ send_keepalive = true;
+ }
+
+ spin_unlock(&x->lock);
+
+ if (send_keepalive)
+ nat_keepalive_send(&ka);
+
+ if (!ctx->next_run || next_run < ctx->next_run)
+ ctx->next_run = next_run;
+ return 0;
+}
+
+static void nat_keepalive_work(struct work_struct *work)
+{
+ struct nat_keepalive_work_ctx ctx;
+ struct xfrm_state_walk walk;
+ struct net *net;
+
+ ctx.next_run = 0;
+ ctx.now = ktime_get_real_seconds();
+
+ net = container_of(work, struct net, xfrm.nat_keepalive_work.work);
+ xfrm_state_walk_init(&walk, IPPROTO_ESP, NULL);
+ xfrm_state_walk(net, &walk, nat_keepalive_work_single, &ctx);
+ xfrm_state_walk_done(&walk, net);
+ if (ctx.next_run)
+ schedule_delayed_work(&net->xfrm.nat_keepalive_work,
+ (ctx.next_run - ctx.now) * HZ);
+}
+
+static int nat_keepalive_sk_init(struct sock * __percpu *socks,
+ unsigned short family)
+{
+ struct sock *sk;
+ int err, i;
+
+ for_each_possible_cpu(i) {
+ err = inet_ctl_sock_create(&sk, family, SOCK_RAW, IPPROTO_UDP,
+ &init_net);
+ if (err < 0)
+ goto err;
+
+ *per_cpu_ptr(socks, i) = sk;
+ }
+
+ return 0;
+err:
+ for_each_possible_cpu(i)
+ inet_ctl_sock_destroy(*per_cpu_ptr(socks, i));
+ return err;
+}
+
+static void nat_keepalive_sk_fini(struct sock * __percpu *socks)
+{
+ int i;
+
+ for_each_possible_cpu(i)
+ inet_ctl_sock_destroy(*per_cpu_ptr(socks, i));
+}
+
+void xfrm_nat_keepalive_state_updated(struct xfrm_state *x)
+{
+ struct net *net;
+
+ if (!x->nat_keepalive_interval)
+ return;
+
+ net = xs_net(x);
+ schedule_delayed_work(&net->xfrm.nat_keepalive_work, 0);
+}
+
+int __net_init xfrm_nat_keepalive_net_init(struct net *net)
+{
+ INIT_DELAYED_WORK(&net->xfrm.nat_keepalive_work, nat_keepalive_work);
+ return 0;
+}
+
+int xfrm_nat_keepalive_net_fini(struct net *net)
+{
+ cancel_delayed_work_sync(&net->xfrm.nat_keepalive_work);
+ return 0;
+}
+
+int xfrm_nat_keepalive_init(unsigned short family)
+{
+ int err = -EAFNOSUPPORT;
+
+ switch (family) {
+ case AF_INET:
+ err = nat_keepalive_sk_init(&nat_keepalive_sk_ipv4, PF_INET);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ err = nat_keepalive_sk_init(&nat_keepalive_sk_ipv6, PF_INET6);
+ break;
+#endif
+ }
+
+ if (err)
+ pr_err("xfrm nat keepalive init: failed to init err:%d\n", err);
+ return err;
+}
+EXPORT_SYMBOL_GPL(xfrm_nat_keepalive_init);
+
+void xfrm_nat_keepalive_fini(unsigned short family)
+{
+ switch (family) {
+ case AF_INET:
+ nat_keepalive_sk_fini(&nat_keepalive_sk_ipv4);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ nat_keepalive_sk_fini(&nat_keepalive_sk_ipv6);
+ break;
+#endif
+ }
+}
+EXPORT_SYMBOL_GPL(xfrm_nat_keepalive_fini);
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index e5722c95b8bb..3cabc87978dd 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -472,6 +472,8 @@ static int xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb)
WARN_ON_ONCE(1);
break;
default:
+ if (x->mode_cbs && x->mode_cbs->prepare_output)
+ return x->mode_cbs->prepare_output(x, skb);
WARN_ON_ONCE(1);
break;
}
@@ -610,6 +612,40 @@ out:
}
EXPORT_SYMBOL_GPL(xfrm_output_resume);
+static int xfrm_dev_direct_output(struct sock *sk, struct xfrm_state *x,
+ struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+ struct net *net = xs_net(x);
+ int err;
+
+ dst = skb_dst_pop(skb);
+ if (!dst) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
+ kfree_skb(skb);
+ return -EHOSTUNREACH;
+ }
+ skb_dst_set(skb, dst);
+ nf_reset_ct(skb);
+
+ err = skb_dst(skb)->ops->local_out(net, sk, skb);
+ if (unlikely(err != 1)) {
+ kfree_skb(skb);
+ return err;
+ }
+
+ /* In transport mode, network destination is
+ * directly reachable, while in tunnel mode,
+ * inner packet network may not be. In packet
+ * offload type, HW is responsible for hard
+ * header packet mangling so directly xmit skb
+ * to netdevice.
+ */
+ skb->dev = x->xso.dev;
+ __skb_push(skb, skb->dev->hard_header_len);
+ return dev_queue_xmit(skb);
+}
+
static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
{
return xfrm_output_resume(sk, skb, 1);
@@ -675,6 +711,10 @@ static void xfrm_get_inner_ipproto(struct sk_buff *skb, struct xfrm_state *x)
return;
}
+ if (x->outer_mode.encap == XFRM_MODE_IPTFS) {
+ xo->inner_ipproto = IPPROTO_AGGFRAG;
+ return;
+ }
/* non-Tunnel Mode */
if (!skb->encapsulation)
@@ -729,6 +769,13 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
return -EHOSTUNREACH;
}
+ /* Exclusive direct xmit for tunnel mode, as
+ * some filtering or matching rules may apply
+ * in transport mode.
+ */
+ if (x->props.mode == XFRM_MODE_TUNNEL)
+ return xfrm_dev_direct_output(sk, x, skb);
+
return xfrm_output_resume(sk, skb, 0);
}
@@ -752,7 +799,7 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
skb->encapsulation = 1;
if (skb_is_gso(skb)) {
- if (skb->inner_protocol)
+ if (skb->inner_protocol && x->props.mode == XFRM_MODE_TUNNEL)
return xfrm_output_gso(net, sk, skb);
skb_shinfo(skb)->gso_type |= SKB_GSO_ESP;
@@ -796,7 +843,7 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
!skb_gso_validate_network_len(skb, ip_skb_dst_mtu(skb->sk, skb)))) {
skb->protocol = htons(ETH_P_IP);
- if (skb->sk)
+ if (skb->sk && sk_fullsock(skb->sk))
xfrm_local_error(skb, mtu);
else
icmp_send(skb, ICMP_DEST_UNREACH,
@@ -832,6 +879,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
{
int mtu, ret = 0;
struct dst_entry *dst = skb_dst(skb);
+ struct sock *sk = skb_to_full_sk(skb);
if (skb->ignore_df)
goto out;
@@ -846,9 +894,9 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
skb->dev = dst->dev;
skb->protocol = htons(ETH_P_IPV6);
- if (xfrm6_local_dontfrag(skb->sk))
+ if (xfrm6_local_dontfrag(sk))
ipv6_stub->xfrm6_local_rxpmtu(skb, mtu);
- else if (skb->sk)
+ else if (sk)
xfrm_local_error(skb, mtu);
else
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index da6ecc6b3e15..6551e588fe52 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -29,6 +29,7 @@
#include <linux/audit.h>
#include <linux/rhashtable.h>
#include <linux/if_tunnel.h>
+#include <linux/icmp.h>
#include <net/dst.h>
#include <net/flow.h>
#include <net/inet_ecn.h>
@@ -44,6 +45,7 @@
#ifdef CONFIG_XFRM_ESPINTCP
#include <net/espintcp.h>
#endif
+#include <net/inet_dscp.h>
#include "xfrm_hash.h"
@@ -108,7 +110,11 @@ struct xfrm_pol_inexact_node {
* 4. saddr:any list from saddr tree
*
* This result set then needs to be searched for the policy with
- * the lowest priority. If two results have same prio, youngest one wins.
+ * the lowest priority. If two candidates have the same priority, the
+ * struct xfrm_policy pos member with the lower number is used.
+ *
+ * This replicates previous single-list-search algorithm which would
+ * return first matching policy in the (ordered-by-priority) list.
*/
struct xfrm_pol_inexact_key {
@@ -195,8 +201,6 @@ xfrm_policy_inexact_lookup_rcu(struct net *net,
static struct xfrm_policy *
xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy,
bool excl);
-static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
- struct xfrm_policy *policy);
static bool
xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
@@ -266,10 +270,8 @@ static const struct xfrm_if_cb *xfrm_if_get_cb(void)
return rcu_dereference(xfrm_if_cb);
}
-struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
- const xfrm_address_t *saddr,
- const xfrm_address_t *daddr,
- int family, u32 mark)
+struct dst_entry *__xfrm_dst_lookup(int family,
+ const struct xfrm_dst_lookup_params *params)
{
const struct xfrm_policy_afinfo *afinfo;
struct dst_entry *dst;
@@ -278,7 +280,7 @@ struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
if (unlikely(afinfo == NULL))
return ERR_PTR(-EAFNOSUPPORT);
- dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
+ dst = afinfo->dst_lookup(params);
rcu_read_unlock();
@@ -287,11 +289,12 @@ struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
EXPORT_SYMBOL(__xfrm_dst_lookup);
static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
- int tos, int oif,
+ dscp_t dscp, int oif,
xfrm_address_t *prev_saddr,
xfrm_address_t *prev_daddr,
int family, u32 mark)
{
+ struct xfrm_dst_lookup_params params;
struct net *net = xs_net(x);
xfrm_address_t *saddr = &x->props.saddr;
xfrm_address_t *daddr = &x->id.daddr;
@@ -306,7 +309,29 @@ static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
daddr = x->coaddr;
}
- dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
+ params.net = net;
+ params.saddr = saddr;
+ params.daddr = daddr;
+ params.dscp = dscp;
+ params.oif = oif;
+ params.mark = mark;
+ params.ipproto = x->id.proto;
+ if (x->encap) {
+ switch (x->encap->encap_type) {
+ case UDP_ENCAP_ESPINUDP:
+ params.ipproto = IPPROTO_UDP;
+ params.uli.ports.sport = x->encap->encap_sport;
+ params.uli.ports.dport = x->encap->encap_dport;
+ break;
+ case TCP_ENCAP_ESPINTCP:
+ params.ipproto = IPPROTO_TCP;
+ params.uli.ports.sport = x->encap->encap_sport;
+ params.uli.ports.dport = x->encap->encap_dport;
+ break;
+ }
+ }
+
+ dst = __xfrm_dst_lookup(family, &params);
if (!IS_ERR(dst)) {
if (prev_saddr != saddr)
@@ -409,7 +434,7 @@ struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
if (policy) {
write_pnet(&policy->xp_net, net);
INIT_LIST_HEAD(&policy->walk.all);
- INIT_HLIST_NODE(&policy->bydst_inexact_list);
+ INIT_HLIST_HEAD(&policy->state_cache_list);
INIT_HLIST_NODE(&policy->bydst);
INIT_HLIST_NODE(&policy->byidx);
rwlock_init(&policy->lock);
@@ -451,6 +476,11 @@ EXPORT_SYMBOL(xfrm_policy_destroy);
static void xfrm_policy_kill(struct xfrm_policy *policy)
{
+ struct net *net = xp_net(policy);
+ struct xfrm_state *x;
+
+ xfrm_dev_policy_delete(policy);
+
write_lock_bh(&policy->lock);
policy->walk.dead = 1;
write_unlock_bh(&policy->lock);
@@ -464,6 +494,13 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
if (del_timer(&policy->timer))
xfrm_pol_put(policy);
+ /* XXX: Flush state cache */
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
+ hlist_for_each_entry_rcu(x, &policy->state_cache_list, state_cache) {
+ hlist_del_init_rcu(&x->state_cache);
+ }
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+
xfrm_pol_put(policy);
}
@@ -1225,26 +1262,31 @@ xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl)
return ERR_PTR(-EEXIST);
}
- chain = &net->xfrm.policy_inexact[dir];
- xfrm_policy_insert_inexact_list(chain, policy);
-
if (delpol)
__xfrm_policy_inexact_prune_bin(bin, false);
return delpol;
}
+static bool xfrm_policy_is_dead_or_sk(const struct xfrm_policy *policy)
+{
+ int dir;
+
+ if (policy->walk.dead)
+ return true;
+
+ dir = xfrm_policy_id2dir(policy->index);
+ return dir >= XFRM_POLICY_MAX;
+}
+
static void xfrm_hash_rebuild(struct work_struct *work)
{
struct net *net = container_of(work, struct net,
xfrm.policy_hthresh.work);
- unsigned int hmask;
struct xfrm_policy *pol;
struct xfrm_policy *policy;
struct hlist_head *chain;
- struct hlist_head *odst;
struct hlist_node *newpos;
- int i;
int dir;
unsigned seq;
u8 lbits4, rbits4, lbits6, rbits6;
@@ -1271,13 +1313,10 @@ static void xfrm_hash_rebuild(struct work_struct *work)
struct xfrm_pol_inexact_bin *bin;
u8 dbits, sbits;
- if (policy->walk.dead)
+ if (xfrm_policy_is_dead_or_sk(policy))
continue;
dir = xfrm_policy_id2dir(policy->index);
- if (dir >= XFRM_POLICY_MAX)
- continue;
-
if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
if (policy->family == AF_INET) {
dbits = rbits4;
@@ -1308,23 +1347,7 @@ static void xfrm_hash_rebuild(struct work_struct *work)
goto out_unlock;
}
- /* reset the bydst and inexact table in all directions */
for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
- struct hlist_node *n;
-
- hlist_for_each_entry_safe(policy, n,
- &net->xfrm.policy_inexact[dir],
- bydst_inexact_list) {
- hlist_del_rcu(&policy->bydst);
- hlist_del_init(&policy->bydst_inexact_list);
- }
-
- hmask = net->xfrm.policy_bydst[dir].hmask;
- odst = net->xfrm.policy_bydst[dir].table;
- for (i = hmask; i >= 0; i--) {
- hlist_for_each_entry_safe(policy, n, odst + i, bydst)
- hlist_del_rcu(&policy->bydst);
- }
if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
/* dir out => dst = remote, src = local */
net->xfrm.policy_bydst[dir].dbits4 = rbits4;
@@ -1342,14 +1365,13 @@ static void xfrm_hash_rebuild(struct work_struct *work)
/* re-insert all policies by order of creation */
list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
- if (policy->walk.dead)
- continue;
- dir = xfrm_policy_id2dir(policy->index);
- if (dir >= XFRM_POLICY_MAX) {
- /* skip socket policies */
+ if (xfrm_policy_is_dead_or_sk(policy))
continue;
- }
+
+ hlist_del_rcu(&policy->bydst);
+
newpos = NULL;
+ dir = xfrm_policy_id2dir(policy->index);
chain = policy_hash_bysel(net, &policy->selector,
policy->family, dir);
@@ -1516,42 +1538,6 @@ static const struct rhashtable_params xfrm_pol_inexact_params = {
.automatic_shrinking = true,
};
-static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
- struct xfrm_policy *policy)
-{
- struct xfrm_policy *pol, *delpol = NULL;
- struct hlist_node *newpos = NULL;
- int i = 0;
-
- hlist_for_each_entry(pol, chain, bydst_inexact_list) {
- if (pol->type == policy->type &&
- pol->if_id == policy->if_id &&
- !selector_cmp(&pol->selector, &policy->selector) &&
- xfrm_policy_mark_match(&policy->mark, pol) &&
- xfrm_sec_ctx_match(pol->security, policy->security) &&
- !WARN_ON(delpol)) {
- delpol = pol;
- if (policy->priority > pol->priority)
- continue;
- } else if (policy->priority >= pol->priority) {
- newpos = &pol->bydst_inexact_list;
- continue;
- }
- if (delpol)
- break;
- }
-
- if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
- hlist_add_behind_rcu(&policy->bydst_inexact_list, newpos);
- else
- hlist_add_head_rcu(&policy->bydst_inexact_list, chain);
-
- hlist_for_each_entry(pol, chain, bydst_inexact_list) {
- pol->pos = i;
- i++;
- }
-}
-
static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
struct xfrm_policy *policy,
bool excl)
@@ -1849,7 +1835,6 @@ again:
__xfrm_policy_unlink(pol, dir);
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
- xfrm_dev_policy_delete(pol);
cnt++;
xfrm_audit_policy_delete(pol, 1, task_valid);
xfrm_policy_kill(pol);
@@ -1890,7 +1875,6 @@ again:
__xfrm_policy_unlink(pol, dir);
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
- xfrm_dev_policy_delete(pol);
cnt++;
xfrm_audit_policy_delete(pol, 1, task_valid);
xfrm_policy_kill(pol);
@@ -2293,10 +2277,52 @@ out:
return pol;
}
+static u32 xfrm_gen_pos_slow(struct net *net)
+{
+ struct xfrm_policy *policy;
+ u32 i = 0;
+
+ /* oldest entry is last in list */
+ list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
+ if (!xfrm_policy_is_dead_or_sk(policy))
+ policy->pos = ++i;
+ }
+
+ return i;
+}
+
+static u32 xfrm_gen_pos(struct net *net)
+{
+ const struct xfrm_policy *policy;
+ u32 i = 0;
+
+ /* most recently added policy is at the head of the list */
+ list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
+ if (xfrm_policy_is_dead_or_sk(policy))
+ continue;
+
+ if (policy->pos == UINT_MAX)
+ return xfrm_gen_pos_slow(net);
+
+ i = policy->pos + 1;
+ break;
+ }
+
+ return i;
+}
+
static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
{
struct net *net = xp_net(pol);
+ switch (dir) {
+ case XFRM_POLICY_IN:
+ case XFRM_POLICY_FWD:
+ case XFRM_POLICY_OUT:
+ pol->pos = xfrm_gen_pos(net);
+ break;
+ }
+
list_add(&pol->walk.all, &net->xfrm.policy_all);
net->xfrm.policy_count[dir]++;
xfrm_pol_hold(pol);
@@ -2313,7 +2339,6 @@ static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
/* Socket policies are not hashed. */
if (!hlist_unhashed(&pol->bydst)) {
hlist_del_rcu(&pol->bydst);
- hlist_del_init(&pol->bydst_inexact_list);
hlist_del(&pol->byidx);
}
@@ -2341,7 +2366,6 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
pol = __xfrm_policy_unlink(pol, dir);
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
if (pol) {
- xfrm_dev_policy_delete(pol);
xfrm_policy_kill(pol);
return 0;
}
@@ -2440,15 +2464,15 @@ int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
}
static int
-xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
- xfrm_address_t *remote, unsigned short family, u32 mark)
+xfrm_get_saddr(unsigned short family, xfrm_address_t *saddr,
+ const struct xfrm_dst_lookup_params *params)
{
int err;
const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
if (unlikely(afinfo == NULL))
return -EINVAL;
- err = afinfo->get_saddr(net, oif, local, remote, mark);
+ err = afinfo->get_saddr(saddr, params);
rcu_read_unlock();
return err;
}
@@ -2473,13 +2497,19 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
if (tmpl->mode == XFRM_MODE_TUNNEL ||
+ tmpl->mode == XFRM_MODE_IPTFS ||
tmpl->mode == XFRM_MODE_BEET) {
remote = &tmpl->id.daddr;
local = &tmpl->saddr;
if (xfrm_addr_any(local, tmpl->encap_family)) {
- error = xfrm_get_saddr(net, fl->flowi_oif,
- &tmp, remote,
- tmpl->encap_family, 0);
+ struct xfrm_dst_lookup_params params;
+
+ memset(&params, 0, sizeof(params));
+ params.net = net;
+ params.oif = fl->flowi_oif;
+ params.daddr = remote;
+ error = xfrm_get_saddr(tmpl->encap_family, &tmp,
+ &params);
if (error)
goto fail;
local = &tmp;
@@ -2488,6 +2518,12 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
x = xfrm_state_find(remote, local, fl, tmpl, policy, &error,
family, policy->if_id);
+ if (x && x->dir && x->dir != XFRM_SA_DIR_OUT) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEDIRERROR);
+ xfrm_state_put(x);
+ error = -EINVAL;
+ goto fail;
+ }
if (x && x->km.state == XFRM_STATE_VALID) {
xfrm[nx++] = x;
@@ -2552,10 +2588,10 @@ xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
}
-static int xfrm_get_tos(const struct flowi *fl, int family)
+static dscp_t xfrm_get_dscp(const struct flowi *fl, int family)
{
if (family == AF_INET)
- return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos;
+ return inet_dsfield_to_dscp(fl->u.ip4.flowi4_tos);
return 0;
}
@@ -2597,8 +2633,7 @@ static void xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
int nfheader_len)
{
if (dst->ops->family == AF_INET6) {
- struct rt6_info *rt = (struct rt6_info *)dst;
- path->path_cookie = rt6_get_cookie(rt);
+ path->path_cookie = rt6_get_cookie(dst_rt6_info(dst));
path->u.rt6.rt6i_nfheader_len = nfheader_len;
}
}
@@ -2644,13 +2679,13 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
int header_len = 0;
int nfheader_len = 0;
int trailer_len = 0;
- int tos;
int family = policy->selector.family;
xfrm_address_t saddr, daddr;
+ dscp_t dscp;
xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
- tos = xfrm_get_tos(fl, family);
+ dscp = xfrm_get_dscp(fl, family);
dst_hold(dst);
@@ -2698,8 +2733,8 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
family = xfrm[i]->props.family;
oif = fl->flowi_oif ? : fl->flowi_l3mdev;
- dst = xfrm_dst_lookup(xfrm[i], tos, oif,
- &saddr, &daddr, family, mark);
+ dst = xfrm_dst_lookup(xfrm[i], dscp, oif, &saddr,
+ &daddr, family, mark);
err = PTR_ERR(dst);
if (IS_ERR(dst))
goto put_states;
@@ -2714,13 +2749,17 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
dst1->input = dst_discard;
- rcu_read_lock();
- afinfo = xfrm_state_afinfo_get_rcu(inner_mode->family);
- if (likely(afinfo))
- dst1->output = afinfo->output;
- else
- dst1->output = dst_discard_out;
- rcu_read_unlock();
+ if (xfrm[i]->mode_cbs && xfrm[i]->mode_cbs->output) {
+ dst1->output = xfrm[i]->mode_cbs->output;
+ } else {
+ rcu_read_lock();
+ afinfo = xfrm_state_afinfo_get_rcu(inner_mode->family);
+ if (likely(afinfo))
+ dst1->output = afinfo->output;
+ else
+ dst1->output = dst_discard_out;
+ rcu_read_unlock();
+ }
xdst_prev = xdst;
@@ -2925,7 +2964,7 @@ static void xfrm_policy_queue_process(struct timer_list *t)
skb_dst_drop(skb);
skb_dst_set(skb, dst);
- dst_output(net, skb->sk, skb);
+ dst_output(net, skb_to_full_sk(skb), skb);
}
out:
@@ -3252,10 +3291,12 @@ no_transform:
dst_release(dst);
dst = dst_orig;
}
+
ok:
xfrm_pols_put(pols, drop_pols);
if (dst && dst->xfrm &&
- dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
+ (dst->xfrm->props.mode == XFRM_MODE_TUNNEL ||
+ dst->xfrm->props.mode == XFRM_MODE_IPTFS))
dst->flags |= DST_XFRM_TUNNEL;
return dst;
@@ -3505,6 +3546,130 @@ static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int
return 0;
}
+static bool icmp_err_packet(const struct flowi *fl, unsigned short family)
+{
+ const struct flowi4 *fl4 = &fl->u.ip4;
+
+ if (family == AF_INET &&
+ fl4->flowi4_proto == IPPROTO_ICMP &&
+ (fl4->fl4_icmp_type == ICMP_DEST_UNREACH ||
+ fl4->fl4_icmp_type == ICMP_TIME_EXCEEDED))
+ return true;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (family == AF_INET6) {
+ const struct flowi6 *fl6 = &fl->u.ip6;
+
+ if (fl6->flowi6_proto == IPPROTO_ICMPV6 &&
+ (fl6->fl6_icmp_type == ICMPV6_DEST_UNREACH ||
+ fl6->fl6_icmp_type == ICMPV6_PKT_TOOBIG ||
+ fl6->fl6_icmp_type == ICMPV6_TIME_EXCEED))
+ return true;
+ }
+#endif
+ return false;
+}
+
+static bool xfrm_icmp_flow_decode(struct sk_buff *skb, unsigned short family,
+ const struct flowi *fl, struct flowi *fl1)
+{
+ bool ret = true;
+ struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
+ int hl = family == AF_INET ? (sizeof(struct iphdr) + sizeof(struct icmphdr)) :
+ (sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr));
+
+ if (!newskb)
+ return true;
+
+ if (!pskb_pull(newskb, hl))
+ goto out;
+
+ skb_reset_network_header(newskb);
+
+ if (xfrm_decode_session_reverse(dev_net(skb->dev), newskb, fl1, family) < 0)
+ goto out;
+
+ fl1->flowi_oif = fl->flowi_oif;
+ fl1->flowi_mark = fl->flowi_mark;
+ fl1->flowi_tos = fl->flowi_tos;
+ nf_nat_decode_session(newskb, fl1, family);
+ ret = false;
+
+out:
+ consume_skb(newskb);
+ return ret;
+}
+
+static bool xfrm_selector_inner_icmp_match(struct sk_buff *skb, unsigned short family,
+ const struct xfrm_selector *sel,
+ const struct flowi *fl)
+{
+ bool ret = false;
+
+ if (icmp_err_packet(fl, family)) {
+ struct flowi fl1;
+
+ if (xfrm_icmp_flow_decode(skb, family, fl, &fl1))
+ return ret;
+
+ ret = xfrm_selector_match(sel, &fl1, family);
+ }
+
+ return ret;
+}
+
+static inline struct
+xfrm_policy *xfrm_in_fwd_icmp(struct sk_buff *skb,
+ const struct flowi *fl, unsigned short family,
+ u32 if_id)
+{
+ struct xfrm_policy *pol = NULL;
+
+ if (icmp_err_packet(fl, family)) {
+ struct flowi fl1;
+ struct net *net = dev_net(skb->dev);
+
+ if (xfrm_icmp_flow_decode(skb, family, fl, &fl1))
+ return pol;
+
+ pol = xfrm_policy_lookup(net, &fl1, family, XFRM_POLICY_FWD, if_id);
+ if (IS_ERR(pol))
+ pol = NULL;
+ }
+
+ return pol;
+}
+
+static inline struct
+dst_entry *xfrm_out_fwd_icmp(struct sk_buff *skb, struct flowi *fl,
+ unsigned short family, struct dst_entry *dst)
+{
+ if (icmp_err_packet(fl, family)) {
+ struct net *net = dev_net(skb->dev);
+ struct dst_entry *dst2;
+ struct flowi fl1;
+
+ if (xfrm_icmp_flow_decode(skb, family, fl, &fl1))
+ return dst;
+
+ dst_hold(dst);
+
+ dst2 = xfrm_lookup(net, dst, &fl1, NULL, (XFRM_LOOKUP_QUEUE | XFRM_LOOKUP_ICMP));
+
+ if (IS_ERR(dst2))
+ return dst;
+
+ if (dst2->xfrm) {
+ dst_release(dst);
+ dst = dst2;
+ } else {
+ dst_release(dst2);
+ }
+ }
+
+ return dst;
+}
+
int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
unsigned short family)
{
@@ -3551,9 +3716,17 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
for (i = sp->len - 1; i >= 0; i--) {
struct xfrm_state *x = sp->xvec[i];
+ int ret = 0;
+
if (!xfrm_selector_match(&x->sel, &fl, family)) {
- XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
- return 0;
+ ret = 1;
+ if (x->props.flags & XFRM_STATE_ICMP &&
+ xfrm_selector_inner_icmp_match(skb, family, &x->sel, &fl))
+ ret = 0;
+ if (ret) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
+ return 0;
+ }
}
}
}
@@ -3576,13 +3749,19 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
return 0;
}
+ if (!pol && dir == XFRM_POLICY_FWD)
+ pol = xfrm_in_fwd_icmp(skb, &fl, family, if_id);
+
if (!pol) {
+ const bool is_crypto_offload = sp &&
+ (xfrm_input_state(skb)->xso.type == XFRM_DEV_OFFLOAD_CRYPTO);
+
if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
return 0;
}
- if (sp && secpath_has_nontransport(sp, 0, &xerr_idx)) {
+ if (sp && secpath_has_nontransport(sp, 0, &xerr_idx) && !is_crypto_offload) {
xfrm_secpath_reject(xerr_idx, skb, &fl);
XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
return 0;
@@ -3709,6 +3888,10 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
res = 0;
dst = NULL;
}
+
+ if (dst && !dst->xfrm)
+ dst = xfrm_out_fwd_icmp(skb, &fl, family, dst);
+
skb_dst_set(skb, dst);
return res;
}
@@ -3765,15 +3948,10 @@ static void xfrm_link_failure(struct sk_buff *skb)
/* Impossible. Such dst must be popped before reaches point of failure. */
}
-static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
+static void xfrm_negative_advice(struct sock *sk, struct dst_entry *dst)
{
- if (dst) {
- if (dst->obsolete) {
- dst_release(dst);
- dst = NULL;
- }
- }
- return dst;
+ if (dst->obsolete)
+ sk_dst_reset(sk);
}
static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
@@ -4027,10 +4205,7 @@ static int __net_init xfrm_policy_init(struct net *net)
int dir, err;
if (net_eq(net, &init_net)) {
- xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
- sizeof(struct xfrm_dst),
- 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
- NULL);
+ xfrm_dst_cache = KMEM_CACHE(xfrm_dst, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
err = rhashtable_init(&xfrm_policy_inexact_table,
&xfrm_pol_inexact_params);
BUG_ON(err);
@@ -4049,7 +4224,6 @@ static int __net_init xfrm_policy_init(struct net *net)
net->xfrm.policy_count[dir] = 0;
net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
- INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
htab = &net->xfrm.policy_bydst[dir];
htab->table = xfrm_hash_alloc(sz);
@@ -4103,8 +4277,6 @@ static void xfrm_policy_fini(struct net *net)
for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
struct xfrm_policy_hash *htab;
- WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
-
htab = &net->xfrm.policy_bydst[dir];
sz = (htab->hmask + 1) * sizeof(struct hlist_head);
WARN_ON(!hlist_empty(htab->table));
@@ -4147,8 +4319,14 @@ static int __net_init xfrm_net_init(struct net *net)
if (rv < 0)
goto out_sysctl;
+ rv = xfrm_nat_keepalive_net_init(net);
+ if (rv < 0)
+ goto out_nat_keepalive;
+
return 0;
+out_nat_keepalive:
+ xfrm_sysctl_fini(net);
out_sysctl:
xfrm_policy_fini(net);
out_policy:
@@ -4161,6 +4339,7 @@ out_statistics:
static void __net_exit xfrm_net_exit(struct net *net)
{
+ xfrm_nat_keepalive_net_fini(net);
xfrm_sysctl_fini(net);
xfrm_policy_fini(net);
xfrm_state_fini(net);
@@ -4222,6 +4401,7 @@ void __init xfrm_init(void)
#endif
register_xfrm_state_bpf();
+ xfrm_nat_keepalive_init(AF_INET);
}
#ifdef CONFIG_AUDITSYSCALL
@@ -4290,63 +4470,50 @@ EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
#endif
#ifdef CONFIG_XFRM_MIGRATE
-static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
- const struct xfrm_selector *sel_tgt)
-{
- if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
- if (sel_tgt->family == sel_cmp->family &&
- xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
- sel_cmp->family) &&
- xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
- sel_cmp->family) &&
- sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
- sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
- return true;
- }
- } else {
- if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
- return true;
- }
- }
- return false;
-}
-
static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
u8 dir, u8 type, struct net *net, u32 if_id)
{
- struct xfrm_policy *pol, *ret = NULL;
- struct hlist_head *chain;
- u32 priority = ~0U;
+ struct xfrm_policy *pol;
+ struct flowi fl;
- spin_lock_bh(&net->xfrm.xfrm_policy_lock);
- chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
- hlist_for_each_entry(pol, chain, bydst) {
- if ((if_id == 0 || pol->if_id == if_id) &&
- xfrm_migrate_selector_match(sel, &pol->selector) &&
- pol->type == type) {
- ret = pol;
- priority = ret->priority;
- break;
- }
- }
- chain = &net->xfrm.policy_inexact[dir];
- hlist_for_each_entry(pol, chain, bydst_inexact_list) {
- if ((pol->priority >= priority) && ret)
- break;
+ memset(&fl, 0, sizeof(fl));
- if ((if_id == 0 || pol->if_id == if_id) &&
- xfrm_migrate_selector_match(sel, &pol->selector) &&
- pol->type == type) {
- ret = pol;
+ fl.flowi_proto = sel->proto;
+
+ switch (sel->family) {
+ case AF_INET:
+ fl.u.ip4.saddr = sel->saddr.a4;
+ fl.u.ip4.daddr = sel->daddr.a4;
+ if (sel->proto == IPSEC_ULPROTO_ANY)
break;
- }
+ fl.u.flowi4_oif = sel->ifindex;
+ fl.u.ip4.fl4_sport = sel->sport;
+ fl.u.ip4.fl4_dport = sel->dport;
+ break;
+ case AF_INET6:
+ fl.u.ip6.saddr = sel->saddr.in6;
+ fl.u.ip6.daddr = sel->daddr.in6;
+ if (sel->proto == IPSEC_ULPROTO_ANY)
+ break;
+ fl.u.flowi6_oif = sel->ifindex;
+ fl.u.ip6.fl4_sport = sel->sport;
+ fl.u.ip6.fl4_dport = sel->dport;
+ break;
+ default:
+ return ERR_PTR(-EAFNOSUPPORT);
}
- xfrm_pol_hold(ret);
+ rcu_read_lock();
- spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ pol = xfrm_policy_lookup_bytype(net, type, &fl, sel->family, dir, if_id);
+ if (IS_ERR_OR_NULL(pol))
+ goto out_unlock;
- return ret;
+ if (!xfrm_pol_hold_rcu(pol))
+ pol = NULL;
+out_unlock:
+ rcu_read_unlock();
+ return pol;
}
static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
@@ -4358,6 +4525,7 @@ static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tm
switch (t->mode) {
case XFRM_MODE_TUNNEL:
case XFRM_MODE_BEET:
+ case XFRM_MODE_IPTFS:
if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
m->old_family) &&
xfrm_addr_equal(&t->saddr, &m->old_saddr,
@@ -4400,7 +4568,8 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
continue;
n++;
if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
- pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
+ pol->xfrm_vec[i].mode != XFRM_MODE_BEET &&
+ pol->xfrm_vec[i].mode != XFRM_MODE_IPTFS)
continue;
/* update endpoints */
memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
@@ -4483,9 +4652,9 @@ int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
/* Stage 1 - find policy */
pol = xfrm_migrate_policy_find(sel, dir, type, net, if_id);
- if (!pol) {
+ if (IS_ERR_OR_NULL(pol)) {
NL_SET_ERR_MSG(extack, "Target policy not found");
- err = -ENOENT;
+ err = IS_ERR(pol) ? PTR_ERR(pol) : -ENOENT;
goto out;
}
diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c
index fee9b5cf37a7..8e07dd614b0b 100644
--- a/net/xfrm/xfrm_proc.c
+++ b/net/xfrm/xfrm_proc.c
@@ -41,6 +41,10 @@ static const struct snmp_mib xfrm_mib_list[] = {
SNMP_MIB_ITEM("XfrmFwdHdrError", LINUX_MIB_XFRMFWDHDRERROR),
SNMP_MIB_ITEM("XfrmOutStateInvalid", LINUX_MIB_XFRMOUTSTATEINVALID),
SNMP_MIB_ITEM("XfrmAcquireError", LINUX_MIB_XFRMACQUIREERROR),
+ SNMP_MIB_ITEM("XfrmOutStateDirError", LINUX_MIB_XFRMOUTSTATEDIRERROR),
+ SNMP_MIB_ITEM("XfrmInStateDirError", LINUX_MIB_XFRMINSTATEDIRERROR),
+ SNMP_MIB_ITEM("XfrmInIptfsError", LINUX_MIB_XFRMINIPTFSERROR),
+ SNMP_MIB_ITEM("XfrmOutNoQueueSpace", LINUX_MIB_XFRMOUTNOQSPACE),
SNMP_MIB_SENTINEL
};
@@ -52,6 +56,7 @@ static int xfrm_statistics_seq_show(struct seq_file *seq, void *v)
memset(buff, 0, sizeof(unsigned long) * LINUX_MIB_XFRMMAX);
+ xfrm_state_update_stats(net);
snmp_get_cpu_field_batch(buff, xfrm_mib_list,
net->mib.xfrm_statistics);
for (i = 0; xfrm_mib_list[i].name; i++)
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index ce56d659c55a..dbdf8a39dffe 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -714,10 +714,12 @@ static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff
oseq += skb_shinfo(skb)->gso_segs;
}
- if (unlikely(xo->seq.low < replay_esn->oseq)) {
- XFRM_SKB_CB(skb)->seq.output.hi = ++oseq_hi;
- xo->seq.hi = oseq_hi;
- replay_esn->oseq_hi = oseq_hi;
+ if (unlikely(oseq < replay_esn->oseq)) {
+ replay_esn->oseq_hi = ++oseq_hi;
+ if (xo->seq.low < replay_esn->oseq) {
+ XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi;
+ xo->seq.hi = oseq_hi;
+ }
if (replay_esn->oseq_hi == 0) {
replay_esn->oseq--;
replay_esn->oseq_hi--;
@@ -729,6 +731,7 @@ static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff
}
replay_esn->oseq = oseq;
+ xfrm_dev_state_advance_esn(x);
if (xfrm_aevent_is_on(net))
xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
@@ -778,7 +781,8 @@ int xfrm_init_replay(struct xfrm_state *x, struct netlink_ext_ack *extack)
}
if (x->props.flags & XFRM_STATE_ESN) {
- if (replay_esn->replay_window == 0) {
+ if (replay_esn->replay_window == 0 &&
+ (!x->dir || x->dir == XFRM_SA_DIR_IN)) {
NL_SET_ERR_MSG(extack, "ESN replay window must be > 0");
return -EINVAL;
}
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index bda5327bf34d..ad2202fa82f3 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -34,6 +34,8 @@
#define xfrm_state_deref_prot(table, net) \
rcu_dereference_protected((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock))
+#define xfrm_state_deref_check(table, net) \
+ rcu_dereference_check((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock))
static void xfrm_state_gc_task(struct work_struct *work);
@@ -49,6 +51,7 @@ static struct kmem_cache *xfrm_state_cache __ro_after_init;
static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task);
static HLIST_HEAD(xfrm_state_gc_list);
+static HLIST_HEAD(xfrm_state_dev_gc_list);
static inline bool xfrm_state_hold_rcu(struct xfrm_state __rcu *x)
{
@@ -61,6 +64,8 @@ static inline unsigned int xfrm_dst_hash(struct net *net,
u32 reqid,
unsigned short family)
{
+ lockdep_assert_held(&net->xfrm.xfrm_state_lock);
+
return __xfrm_dst_hash(daddr, saddr, reqid, family, net->xfrm.state_hmask);
}
@@ -69,6 +74,8 @@ static inline unsigned int xfrm_src_hash(struct net *net,
const xfrm_address_t *saddr,
unsigned short family)
{
+ lockdep_assert_held(&net->xfrm.xfrm_state_lock);
+
return __xfrm_src_hash(daddr, saddr, family, net->xfrm.state_hmask);
}
@@ -76,11 +83,15 @@ static inline unsigned int
xfrm_spi_hash(struct net *net, const xfrm_address_t *daddr,
__be32 spi, u8 proto, unsigned short family)
{
+ lockdep_assert_held(&net->xfrm.xfrm_state_lock);
+
return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask);
}
static unsigned int xfrm_seq_hash(struct net *net, u32 seq)
{
+ lockdep_assert_held(&net->xfrm.xfrm_state_lock);
+
return __xfrm_seq_hash(seq, net->xfrm.state_hmask);
}
@@ -214,6 +225,7 @@ static DEFINE_SPINLOCK(xfrm_state_afinfo_lock);
static struct xfrm_state_afinfo __rcu *xfrm_state_afinfo[NPROTO];
static DEFINE_SPINLOCK(xfrm_state_gc_lock);
+static DEFINE_SPINLOCK(xfrm_state_dev_gc_lock);
int __xfrm_state_delete(struct xfrm_state *x);
@@ -465,6 +477,11 @@ static const struct xfrm_mode xfrm4_mode_map[XFRM_MODE_MAX] = {
.flags = XFRM_MODE_FLAG_TUNNEL,
.family = AF_INET,
},
+ [XFRM_MODE_IPTFS] = {
+ .encap = XFRM_MODE_IPTFS,
+ .flags = XFRM_MODE_FLAG_TUNNEL,
+ .family = AF_INET,
+ },
};
static const struct xfrm_mode xfrm6_mode_map[XFRM_MODE_MAX] = {
@@ -486,6 +503,11 @@ static const struct xfrm_mode xfrm6_mode_map[XFRM_MODE_MAX] = {
.flags = XFRM_MODE_FLAG_TUNNEL,
.family = AF_INET6,
},
+ [XFRM_MODE_IPTFS] = {
+ .encap = XFRM_MODE_IPTFS,
+ .flags = XFRM_MODE_FLAG_TUNNEL,
+ .family = AF_INET6,
+ },
};
static const struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
@@ -513,6 +535,60 @@ static const struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
return NULL;
}
+static const struct xfrm_mode_cbs __rcu *xfrm_mode_cbs_map[XFRM_MODE_MAX];
+static DEFINE_SPINLOCK(xfrm_mode_cbs_map_lock);
+
+int xfrm_register_mode_cbs(u8 mode, const struct xfrm_mode_cbs *mode_cbs)
+{
+ if (mode >= XFRM_MODE_MAX)
+ return -EINVAL;
+
+ spin_lock_bh(&xfrm_mode_cbs_map_lock);
+ rcu_assign_pointer(xfrm_mode_cbs_map[mode], mode_cbs);
+ spin_unlock_bh(&xfrm_mode_cbs_map_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(xfrm_register_mode_cbs);
+
+void xfrm_unregister_mode_cbs(u8 mode)
+{
+ if (mode >= XFRM_MODE_MAX)
+ return;
+
+ spin_lock_bh(&xfrm_mode_cbs_map_lock);
+ RCU_INIT_POINTER(xfrm_mode_cbs_map[mode], NULL);
+ spin_unlock_bh(&xfrm_mode_cbs_map_lock);
+ synchronize_rcu();
+}
+EXPORT_SYMBOL(xfrm_unregister_mode_cbs);
+
+static const struct xfrm_mode_cbs *xfrm_get_mode_cbs(u8 mode)
+{
+ const struct xfrm_mode_cbs *cbs;
+ bool try_load = true;
+
+ if (mode >= XFRM_MODE_MAX)
+ return NULL;
+
+retry:
+ rcu_read_lock();
+
+ cbs = rcu_dereference(xfrm_mode_cbs_map[mode]);
+ if (cbs && !try_module_get(cbs->owner))
+ cbs = NULL;
+
+ rcu_read_unlock();
+
+ if (mode == XFRM_MODE_IPTFS && !cbs && try_load) {
+ request_module("xfrm-iptfs");
+ try_load = false;
+ goto retry;
+ }
+
+ return cbs;
+}
+
void xfrm_state_free(struct xfrm_state *x)
{
kmem_cache_free(xfrm_state_cache, x);
@@ -521,6 +597,8 @@ EXPORT_SYMBOL(xfrm_state_free);
static void ___xfrm_state_destroy(struct xfrm_state *x)
{
+ if (x->mode_cbs && x->mode_cbs->destroy_state)
+ x->mode_cbs->destroy_state(x);
hrtimer_cancel(&x->mtimer);
del_timer_sync(&x->rtimer);
kfree(x->aead);
@@ -570,7 +648,7 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
int err = 0;
spin_lock(&x->lock);
- xfrm_dev_state_update_curlft(x);
+ xfrm_dev_state_update_stats(x);
if (x->km.state == XFRM_STATE_DEAD)
goto out;
@@ -663,6 +741,7 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
refcount_set(&x->refcnt, 1);
atomic_set(&x->tunnel_users, 0);
INIT_LIST_HEAD(&x->km.all);
+ INIT_HLIST_NODE(&x->state_cache);
INIT_HLIST_NODE(&x->bydst);
INIT_HLIST_NODE(&x->bysrc);
INIT_HLIST_NODE(&x->byspi);
@@ -677,12 +756,49 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
x->lft.hard_packet_limit = XFRM_INF;
x->replay_maxage = 0;
x->replay_maxdiff = 0;
+ x->pcpu_num = UINT_MAX;
spin_lock_init(&x->lock);
+ x->mode_data = NULL;
}
return x;
}
EXPORT_SYMBOL(xfrm_state_alloc);
+#ifdef CONFIG_XFRM_OFFLOAD
+void xfrm_dev_state_delete(struct xfrm_state *x)
+{
+ struct xfrm_dev_offload *xso = &x->xso;
+ struct net_device *dev = READ_ONCE(xso->dev);
+
+ if (dev) {
+ dev->xfrmdev_ops->xdo_dev_state_delete(x);
+ spin_lock_bh(&xfrm_state_dev_gc_lock);
+ hlist_add_head(&x->dev_gclist, &xfrm_state_dev_gc_list);
+ spin_unlock_bh(&xfrm_state_dev_gc_lock);
+ }
+}
+EXPORT_SYMBOL_GPL(xfrm_dev_state_delete);
+
+void xfrm_dev_state_free(struct xfrm_state *x)
+{
+ struct xfrm_dev_offload *xso = &x->xso;
+ struct net_device *dev = READ_ONCE(xso->dev);
+
+ if (dev && dev->xfrmdev_ops) {
+ spin_lock_bh(&xfrm_state_dev_gc_lock);
+ if (!hlist_unhashed(&x->dev_gclist))
+ hlist_del(&x->dev_gclist);
+ spin_unlock_bh(&xfrm_state_dev_gc_lock);
+
+ if (dev->xfrmdev_ops->xdo_dev_state_free)
+ dev->xfrmdev_ops->xdo_dev_state_free(x);
+ WRITE_ONCE(xso->dev, NULL);
+ xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
+ netdev_put(dev, &xso->dev_tracker);
+ }
+}
+#endif
+
void __xfrm_state_destroy(struct xfrm_state *x, bool sync)
{
WARN_ON(x->km.state != XFRM_STATE_DEAD);
@@ -706,15 +822,22 @@ int __xfrm_state_delete(struct xfrm_state *x)
if (x->km.state != XFRM_STATE_DEAD) {
x->km.state = XFRM_STATE_DEAD;
+
spin_lock(&net->xfrm.xfrm_state_lock);
list_del(&x->km.all);
hlist_del_rcu(&x->bydst);
hlist_del_rcu(&x->bysrc);
if (x->km.seq)
hlist_del_rcu(&x->byseq);
+ if (!hlist_unhashed(&x->state_cache))
+ hlist_del_rcu(&x->state_cache);
+ if (!hlist_unhashed(&x->state_cache_input))
+ hlist_del_rcu(&x->state_cache_input);
+
if (x->id.spi)
hlist_del_rcu(&x->byspi);
net->xfrm.state_num--;
+ xfrm_nat_keepalive_state_updated(x);
spin_unlock(&net->xfrm.xfrm_state_lock);
if (x->encap_sk)
@@ -848,6 +971,9 @@ EXPORT_SYMBOL(xfrm_state_flush);
int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid)
{
+ struct xfrm_state *x;
+ struct hlist_node *tmp;
+ struct xfrm_dev_offload *xso;
int i, err = 0, cnt = 0;
spin_lock_bh(&net->xfrm.xfrm_state_lock);
@@ -857,8 +983,6 @@ int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_vali
err = -ESRCH;
for (i = 0; i <= net->xfrm.state_hmask; i++) {
- struct xfrm_state *x;
- struct xfrm_dev_offload *xso;
restart:
hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
xso = &x->xso;
@@ -868,6 +992,8 @@ restart:
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
err = xfrm_state_delete(x);
+ xfrm_dev_state_free(x);
+
xfrm_audit_state_delete(x, err ? 0 : 1,
task_valid);
xfrm_state_put(x);
@@ -884,6 +1010,24 @@ restart:
out:
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+
+ spin_lock_bh(&xfrm_state_dev_gc_lock);
+restart_gc:
+ hlist_for_each_entry_safe(x, tmp, &xfrm_state_dev_gc_list, dev_gclist) {
+ xso = &x->xso;
+
+ if (xso->dev == dev) {
+ spin_unlock_bh(&xfrm_state_dev_gc_lock);
+ xfrm_dev_state_free(x);
+ spin_lock_bh(&xfrm_state_dev_gc_lock);
+ goto restart_gc;
+ }
+
+ }
+ spin_unlock_bh(&xfrm_state_dev_gc_lock);
+
+ xfrm_flush_gc();
+
return err;
}
EXPORT_SYMBOL(xfrm_dev_state_flush);
@@ -974,16 +1118,38 @@ xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl,
x->props.family = tmpl->encap_family;
}
-static struct xfrm_state *__xfrm_state_lookup_all(struct net *net, u32 mark,
+struct xfrm_hash_state_ptrs {
+ const struct hlist_head *bydst;
+ const struct hlist_head *bysrc;
+ const struct hlist_head *byspi;
+ unsigned int hmask;
+};
+
+static void xfrm_hash_ptrs_get(const struct net *net, struct xfrm_hash_state_ptrs *ptrs)
+{
+ unsigned int sequence;
+
+ do {
+ sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
+
+ ptrs->bydst = xfrm_state_deref_check(net->xfrm.state_bydst, net);
+ ptrs->bysrc = xfrm_state_deref_check(net->xfrm.state_bysrc, net);
+ ptrs->byspi = xfrm_state_deref_check(net->xfrm.state_byspi, net);
+ ptrs->hmask = net->xfrm.state_hmask;
+ } while (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence));
+}
+
+static struct xfrm_state *__xfrm_state_lookup_all(const struct xfrm_hash_state_ptrs *state_ptrs,
+ u32 mark,
const xfrm_address_t *daddr,
__be32 spi, u8 proto,
unsigned short family,
struct xfrm_dev_offload *xdo)
{
- unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
+ unsigned int h = __xfrm_spi_hash(daddr, spi, proto, family, state_ptrs->hmask);
struct xfrm_state *x;
- hlist_for_each_entry_rcu(x, net->xfrm.state_byspi + h, byspi) {
+ hlist_for_each_entry_rcu(x, state_ptrs->byspi + h, byspi) {
#ifdef CONFIG_XFRM_OFFLOAD
if (xdo->type == XFRM_DEV_OFFLOAD_PACKET) {
if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
@@ -1017,15 +1183,16 @@ static struct xfrm_state *__xfrm_state_lookup_all(struct net *net, u32 mark,
return NULL;
}
-static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
+static struct xfrm_state *__xfrm_state_lookup(const struct xfrm_hash_state_ptrs *state_ptrs,
+ u32 mark,
const xfrm_address_t *daddr,
__be32 spi, u8 proto,
unsigned short family)
{
- unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
+ unsigned int h = __xfrm_spi_hash(daddr, spi, proto, family, state_ptrs->hmask);
struct xfrm_state *x;
- hlist_for_each_entry_rcu(x, net->xfrm.state_byspi + h, byspi) {
+ hlist_for_each_entry_rcu(x, state_ptrs->byspi + h, byspi) {
if (x->props.family != family ||
x->id.spi != spi ||
x->id.proto != proto ||
@@ -1042,15 +1209,63 @@ static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
return NULL;
}
-static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
+struct xfrm_state *xfrm_input_state_lookup(struct net *net, u32 mark,
+ const xfrm_address_t *daddr,
+ __be32 spi, u8 proto,
+ unsigned short family)
+{
+ struct xfrm_hash_state_ptrs state_ptrs;
+ struct hlist_head *state_cache_input;
+ struct xfrm_state *x = NULL;
+
+ state_cache_input = raw_cpu_ptr(net->xfrm.state_cache_input);
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(x, state_cache_input, state_cache_input) {
+ if (x->props.family != family ||
+ x->id.spi != spi ||
+ x->id.proto != proto ||
+ !xfrm_addr_equal(&x->id.daddr, daddr, family))
+ continue;
+
+ if ((mark & x->mark.m) != x->mark.v)
+ continue;
+ if (!xfrm_state_hold_rcu(x))
+ continue;
+ goto out;
+ }
+
+ xfrm_hash_ptrs_get(net, &state_ptrs);
+
+ x = __xfrm_state_lookup(&state_ptrs, mark, daddr, spi, proto, family);
+
+ if (x && x->km.state == XFRM_STATE_VALID) {
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
+ if (hlist_unhashed(&x->state_cache_input)) {
+ hlist_add_head_rcu(&x->state_cache_input, state_cache_input);
+ } else {
+ hlist_del_rcu(&x->state_cache_input);
+ hlist_add_head_rcu(&x->state_cache_input, state_cache_input);
+ }
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+ }
+
+out:
+ rcu_read_unlock();
+ return x;
+}
+EXPORT_SYMBOL(xfrm_input_state_lookup);
+
+static struct xfrm_state *__xfrm_state_lookup_byaddr(const struct xfrm_hash_state_ptrs *state_ptrs,
+ u32 mark,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr,
u8 proto, unsigned short family)
{
- unsigned int h = xfrm_src_hash(net, daddr, saddr, family);
+ unsigned int h = __xfrm_src_hash(daddr, saddr, family, state_ptrs->hmask);
struct xfrm_state *x;
- hlist_for_each_entry_rcu(x, net->xfrm.state_bysrc + h, bysrc) {
+ hlist_for_each_entry_rcu(x, state_ptrs->bysrc + h, bysrc) {
if (x->props.family != family ||
x->id.proto != proto ||
!xfrm_addr_equal(&x->id.daddr, daddr, family) ||
@@ -1070,14 +1285,17 @@ static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
static inline struct xfrm_state *
__xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
{
+ struct xfrm_hash_state_ptrs state_ptrs;
struct net *net = xs_net(x);
u32 mark = x->mark.v & x->mark.m;
+ xfrm_hash_ptrs_get(net, &state_ptrs);
+
if (use_spi)
- return __xfrm_state_lookup(net, mark, &x->id.daddr,
+ return __xfrm_state_lookup(&state_ptrs, mark, &x->id.daddr,
x->id.spi, x->id.proto, family);
else
- return __xfrm_state_lookup_byaddr(net, mark,
+ return __xfrm_state_lookup_byaddr(&state_ptrs, mark,
&x->id.daddr,
&x->props.saddr,
x->id.proto, family);
@@ -1096,6 +1314,12 @@ static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
struct xfrm_state **best, int *acq_in_progress,
int *error)
{
+ /* We need the cpu id just as a lookup key,
+ * we don't require it to be stable.
+ */
+ unsigned int pcpu_id = get_cpu();
+ put_cpu();
+
/* Resolution logic:
* 1. There is a valid state with matching selector. Done.
* 2. Valid state with inappropriate selector. Skip.
@@ -1115,13 +1339,18 @@ static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
&fl->u.__fl_common))
return;
+ if (x->pcpu_num != UINT_MAX && x->pcpu_num != pcpu_id)
+ return;
+
if (!*best ||
+ ((*best)->pcpu_num == UINT_MAX && x->pcpu_num == pcpu_id) ||
(*best)->km.dying > x->km.dying ||
((*best)->km.dying == x->km.dying &&
(*best)->curlft.add_time < x->curlft.add_time))
*best = x;
} else if (x->km.state == XFRM_STATE_ACQ) {
- *acq_in_progress = 1;
+ if (!*best || x->pcpu_num == pcpu_id)
+ *acq_in_progress = 1;
} else if (x->km.state == XFRM_STATE_ERROR ||
x->km.state == XFRM_STATE_EXPIRED) {
if ((!x->sel.family ||
@@ -1140,6 +1369,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
unsigned short family, u32 if_id)
{
static xfrm_address_t saddr_wildcard = { };
+ struct xfrm_hash_state_ptrs state_ptrs;
struct net *net = xp_net(pol);
unsigned int h, h_wildcard;
struct xfrm_state *x, *x0, *to_put;
@@ -1150,14 +1380,64 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
unsigned short encap_family = tmpl->encap_family;
unsigned int sequence;
struct km_event c;
+ unsigned int pcpu_id;
+ bool cached = false;
+
+ /* We need the cpu id just as a lookup key,
+ * we don't require it to be stable.
+ */
+ pcpu_id = get_cpu();
+ put_cpu();
to_put = NULL;
sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
rcu_read_lock();
- h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
- hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h, bydst) {
+ hlist_for_each_entry_rcu(x, &pol->state_cache_list, state_cache) {
+ if (x->props.family == encap_family &&
+ x->props.reqid == tmpl->reqid &&
+ (mark & x->mark.m) == x->mark.v &&
+ x->if_id == if_id &&
+ !(x->props.flags & XFRM_STATE_WILDRECV) &&
+ xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
+ tmpl->mode == x->props.mode &&
+ tmpl->id.proto == x->id.proto &&
+ (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
+ xfrm_state_look_at(pol, x, fl, encap_family,
+ &best, &acquire_in_progress, &error);
+ }
+
+ if (best)
+ goto cached;
+
+ hlist_for_each_entry_rcu(x, &pol->state_cache_list, state_cache) {
+ if (x->props.family == encap_family &&
+ x->props.reqid == tmpl->reqid &&
+ (mark & x->mark.m) == x->mark.v &&
+ x->if_id == if_id &&
+ !(x->props.flags & XFRM_STATE_WILDRECV) &&
+ xfrm_addr_equal(&x->id.daddr, daddr, encap_family) &&
+ tmpl->mode == x->props.mode &&
+ tmpl->id.proto == x->id.proto &&
+ (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
+ xfrm_state_look_at(pol, x, fl, family,
+ &best, &acquire_in_progress, &error);
+ }
+
+cached:
+ cached = true;
+ if (best)
+ goto found;
+ else if (error)
+ best = NULL;
+ else if (acquire_in_progress) /* XXX: acquire_in_progress should not happen */
+ WARN_ON(1);
+
+ xfrm_hash_ptrs_get(net, &state_ptrs);
+
+ h = __xfrm_dst_hash(daddr, saddr, tmpl->reqid, encap_family, state_ptrs.hmask);
+ hlist_for_each_entry_rcu(x, state_ptrs.bydst + h, bydst) {
#ifdef CONFIG_XFRM_OFFLOAD
if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
@@ -1190,8 +1470,9 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
if (best || acquire_in_progress)
goto found;
- h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
- hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h_wildcard, bydst) {
+ h_wildcard = __xfrm_dst_hash(daddr, &saddr_wildcard, tmpl->reqid,
+ encap_family, state_ptrs.hmask);
+ hlist_for_each_entry_rcu(x, state_ptrs.bydst + h_wildcard, bydst) {
#ifdef CONFIG_XFRM_OFFLOAD
if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
@@ -1223,10 +1504,13 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
}
found:
- x = best;
+ if (!(pol->flags & XFRM_POLICY_CPU_ACQUIRE) ||
+ (best && (best->pcpu_num == pcpu_id)))
+ x = best;
+
if (!x && !error && !acquire_in_progress) {
if (tmpl->id.spi &&
- (x0 = __xfrm_state_lookup_all(net, mark, daddr,
+ (x0 = __xfrm_state_lookup_all(&state_ptrs, mark, daddr,
tmpl->id.spi, tmpl->id.proto,
encap_family,
&pol->xdo)) != NULL) {
@@ -1255,6 +1539,8 @@ found:
xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family);
memcpy(&x->mark, &pol->mark, sizeof(x->mark));
x->if_id = if_id;
+ if ((pol->flags & XFRM_POLICY_CPU_ACQUIRE) && best)
+ x->pcpu_num = pcpu_id;
error = security_xfrm_state_alloc_acquire(x, pol->security, fl->flowi_secid);
if (error) {
@@ -1273,8 +1559,7 @@ found:
xso->dev = xdo->dev;
xso->real_dev = xdo->real_dev;
xso->flags = XFRM_DEV_OFFLOAD_FLAG_ACQ;
- netdev_tracker_alloc(xso->dev, &xso->dev_tracker,
- GFP_ATOMIC);
+ netdev_hold(xso->dev, &xso->dev_tracker, GFP_ATOMIC);
error = xso->dev->xfrmdev_ops->xdo_dev_state_add(x, NULL);
if (error) {
xso->dir = 0;
@@ -1292,7 +1577,9 @@ found:
if (km_query(x, tmpl, pol) == 0) {
spin_lock_bh(&net->xfrm.xfrm_state_lock);
x->km.state = XFRM_STATE_ACQ;
+ x->dir = XFRM_SA_DIR_OUT;
list_add(&x->km.all, &net->xfrm.state_all);
+ h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
XFRM_STATE_INSERT(bydst, &x->bydst,
net->xfrm.state_bydst + h,
x->xso.type);
@@ -1300,6 +1587,7 @@ found:
XFRM_STATE_INSERT(bysrc, &x->bysrc,
net->xfrm.state_bysrc + h,
x->xso.type);
+ INIT_HLIST_NODE(&x->state_cache);
if (x->id.spi) {
h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
XFRM_STATE_INSERT(byspi, &x->byspi,
@@ -1333,6 +1621,11 @@ found:
x = NULL;
error = -ESRCH;
}
+
+ /* Use the already installed 'fallback' while the CPU-specific
+ * SA acquire is handled*/
+ if (best)
+ x = best;
}
out:
if (x) {
@@ -1343,6 +1636,15 @@ out:
} else {
*err = acquire_in_progress ? -EAGAIN : error;
}
+
+ if (x && x->km.state == XFRM_STATE_VALID && !cached &&
+ (!(pol->flags & XFRM_POLICY_CPU_ACQUIRE) || x->pcpu_num == pcpu_id)) {
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
+ if (hlist_unhashed(&x->state_cache))
+ hlist_add_head_rcu(&x->state_cache, &pol->state_cache_list);
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+ }
+
rcu_read_unlock();
if (to_put)
xfrm_state_put(to_put);
@@ -1452,6 +1754,7 @@ static void __xfrm_state_insert(struct xfrm_state *x)
net->xfrm.state_num++;
xfrm_hash_grow_check(net, x->bydst.next != NULL);
+ xfrm_nat_keepalive_state_updated(x);
}
/* net->xfrm.xfrm_state_lock is held */
@@ -1464,12 +1767,14 @@ static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
unsigned int h;
u32 mark = xnew->mark.v & xnew->mark.m;
u32 if_id = xnew->if_id;
+ u32 cpu_id = xnew->pcpu_num;
h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family);
hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
if (x->props.family == family &&
x->props.reqid == reqid &&
x->if_id == if_id &&
+ x->pcpu_num == cpu_id &&
(mark & x->mark.m) == x->mark.v &&
xfrm_addr_equal(&x->id.daddr, &xnew->id.daddr, family) &&
xfrm_addr_equal(&x->props.saddr, &xnew->props.saddr, family))
@@ -1492,7 +1797,7 @@ EXPORT_SYMBOL(xfrm_state_insert);
static struct xfrm_state *__find_acq_core(struct net *net,
const struct xfrm_mark *m,
unsigned short family, u8 mode,
- u32 reqid, u32 if_id, u8 proto,
+ u32 reqid, u32 if_id, u32 pcpu_num, u8 proto,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr,
int create)
@@ -1509,6 +1814,7 @@ static struct xfrm_state *__find_acq_core(struct net *net,
x->id.spi != 0 ||
x->id.proto != proto ||
(mark & x->mark.m) != x->mark.v ||
+ x->pcpu_num != pcpu_num ||
!xfrm_addr_equal(&x->id.daddr, daddr, family) ||
!xfrm_addr_equal(&x->props.saddr, saddr, family))
continue;
@@ -1542,6 +1848,7 @@ static struct xfrm_state *__find_acq_core(struct net *net,
break;
}
+ x->pcpu_num = pcpu_num;
x->km.state = XFRM_STATE_ACQ;
x->id.proto = proto;
x->props.family = family;
@@ -1570,7 +1877,7 @@ static struct xfrm_state *__find_acq_core(struct net *net,
return x;
}
-static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
+static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num);
int xfrm_state_add(struct xfrm_state *x)
{
@@ -1596,7 +1903,7 @@ int xfrm_state_add(struct xfrm_state *x)
}
if (use_spi && x->km.seq) {
- x1 = __xfrm_find_acq_byseq(net, mark, x->km.seq);
+ x1 = __xfrm_find_acq_byseq(net, mark, x->km.seq, x->pcpu_num);
if (x1 && ((x1->id.proto != x->id.proto) ||
!xfrm_addr_equal(&x1->id.daddr, &x->id.daddr, family))) {
to_put = x1;
@@ -1606,7 +1913,7 @@ int xfrm_state_add(struct xfrm_state *x)
if (use_spi && !x1)
x1 = __find_acq_core(net, &x->mark, family, x->props.mode,
- x->props.reqid, x->if_id, x->id.proto,
+ x->props.reqid, x->if_id, x->pcpu_num, x->id.proto,
&x->id.daddr, &x->props.saddr, 0);
__xfrm_state_bump_genids(x);
@@ -1731,6 +2038,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
x->props.flags = orig->props.flags;
x->props.extra_flags = orig->props.extra_flags;
+ x->pcpu_num = orig->pcpu_num;
x->if_id = orig->if_id;
x->tfcpad = orig->tfcpad;
x->replay_maxdiff = orig->replay_maxdiff;
@@ -1744,6 +2052,13 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
x->lastused = orig->lastused;
x->new_mapping = 0;
x->new_mapping_sport = 0;
+ x->dir = orig->dir;
+
+ x->mode_cbs = orig->mode_cbs;
+ if (x->mode_cbs && x->mode_cbs->clone_state) {
+ if (x->mode_cbs->clone_state(x, orig))
+ goto error;
+ }
return x;
@@ -1864,8 +2179,14 @@ int xfrm_state_update(struct xfrm_state *x)
}
if (x1->km.state == XFRM_STATE_ACQ) {
+ if (x->dir && x1->dir != x->dir)
+ goto out;
+
__xfrm_state_insert(x);
x = NULL;
+ } else {
+ if (x1->dir != x->dir)
+ goto out;
}
err = 0;
@@ -1935,7 +2256,7 @@ EXPORT_SYMBOL(xfrm_state_update);
int xfrm_state_check_expire(struct xfrm_state *x)
{
- xfrm_dev_state_update_curlft(x);
+ xfrm_dev_state_update_stats(x);
if (!READ_ONCE(x->curlft.use_time))
WRITE_ONCE(x->curlft.use_time, ktime_get_real_seconds());
@@ -1957,14 +2278,30 @@ int xfrm_state_check_expire(struct xfrm_state *x)
}
EXPORT_SYMBOL(xfrm_state_check_expire);
+void xfrm_state_update_stats(struct net *net)
+{
+ struct xfrm_state *x;
+ int i;
+
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
+ for (i = 0; i <= net->xfrm.state_hmask; i++) {
+ hlist_for_each_entry(x, net->xfrm.state_bydst + i, bydst)
+ xfrm_dev_state_update_stats(x);
+ }
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+}
+
struct xfrm_state *
xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi,
u8 proto, unsigned short family)
{
+ struct xfrm_hash_state_ptrs state_ptrs;
struct xfrm_state *x;
rcu_read_lock();
- x = __xfrm_state_lookup(net, mark, daddr, spi, proto, family);
+ xfrm_hash_ptrs_get(net, &state_ptrs);
+
+ x = __xfrm_state_lookup(&state_ptrs, mark, daddr, spi, proto, family);
rcu_read_unlock();
return x;
}
@@ -1975,10 +2312,14 @@ xfrm_state_lookup_byaddr(struct net *net, u32 mark,
const xfrm_address_t *daddr, const xfrm_address_t *saddr,
u8 proto, unsigned short family)
{
+ struct xfrm_hash_state_ptrs state_ptrs;
struct xfrm_state *x;
spin_lock_bh(&net->xfrm.xfrm_state_lock);
- x = __xfrm_state_lookup_byaddr(net, mark, daddr, saddr, proto, family);
+
+ xfrm_hash_ptrs_get(net, &state_ptrs);
+
+ x = __xfrm_state_lookup_byaddr(&state_ptrs, mark, daddr, saddr, proto, family);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
return x;
}
@@ -1986,13 +2327,14 @@ EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
struct xfrm_state *
xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid,
- u32 if_id, u8 proto, const xfrm_address_t *daddr,
+ u32 if_id, u32 pcpu_num, u8 proto, const xfrm_address_t *daddr,
const xfrm_address_t *saddr, int create, unsigned short family)
{
struct xfrm_state *x;
spin_lock_bh(&net->xfrm.xfrm_state_lock);
- x = __find_acq_core(net, mark, family, mode, reqid, if_id, proto, daddr, saddr, create);
+ x = __find_acq_core(net, mark, family, mode, reqid, if_id, pcpu_num,
+ proto, daddr, saddr, create);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
return x;
@@ -2051,6 +2393,7 @@ static int __xfrm6_state_sort_cmp(const void *p)
#endif
case XFRM_MODE_TUNNEL:
case XFRM_MODE_BEET:
+ case XFRM_MODE_IPTFS:
return 4;
}
return 5;
@@ -2077,6 +2420,7 @@ static int __xfrm6_tmpl_sort_cmp(const void *p)
#endif
case XFRM_MODE_TUNNEL:
case XFRM_MODE_BEET:
+ case XFRM_MODE_IPTFS:
return 3;
}
return 4;
@@ -2127,7 +2471,7 @@ xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
/* Silly enough, but I'm lazy to build resolution list */
-static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
+static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num)
{
unsigned int h = xfrm_seq_hash(net, seq);
struct xfrm_state *x;
@@ -2135,6 +2479,7 @@ static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 s
hlist_for_each_entry_rcu(x, net->xfrm.state_byseq + h, byseq) {
if (x->km.seq == seq &&
(mark & x->mark.m) == x->mark.v &&
+ x->pcpu_num == pcpu_num &&
x->km.state == XFRM_STATE_ACQ) {
xfrm_state_hold(x);
return x;
@@ -2144,12 +2489,12 @@ static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 s
return NULL;
}
-struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
+struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num)
{
struct xfrm_state *x;
spin_lock_bh(&net->xfrm.xfrm_state_lock);
- x = __xfrm_find_acq_byseq(net, mark, seq);
+ x = __xfrm_find_acq_byseq(net, mark, seq, pcpu_num);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
return x;
}
@@ -2765,6 +3110,9 @@ u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
case XFRM_MODE_TUNNEL:
break;
default:
+ if (x->mode_cbs && x->mode_cbs->get_inner_mtu)
+ return x->mode_cbs->get_inner_mtu(x, mtu);
+
WARN_ON_ONCE(1);
break;
}
@@ -2850,6 +3198,27 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload,
goto error;
}
+ if (x->nat_keepalive_interval) {
+ if (x->dir != XFRM_SA_DIR_OUT) {
+ NL_SET_ERR_MSG(extack, "NAT keepalive is only supported for outbound SAs");
+ err = -EINVAL;
+ goto error;
+ }
+
+ if (!x->encap || x->encap->encap_type != UDP_ENCAP_ESPINUDP) {
+ NL_SET_ERR_MSG(extack,
+ "NAT keepalive is only supported for UDP encapsulation");
+ err = -EINVAL;
+ goto error;
+ }
+ }
+
+ x->mode_cbs = xfrm_get_mode_cbs(x->props.mode);
+ if (x->mode_cbs) {
+ if (x->mode_cbs->init_state)
+ err = x->mode_cbs->init_state(x);
+ module_put(x->mode_cbs->owner);
+ }
error:
return err;
}
@@ -2893,6 +3262,11 @@ int __net_init xfrm_state_init(struct net *net)
net->xfrm.state_byseq = xfrm_hash_alloc(sz);
if (!net->xfrm.state_byseq)
goto out_byseq;
+
+ net->xfrm.state_cache_input = alloc_percpu(struct hlist_head);
+ if (!net->xfrm.state_cache_input)
+ goto out_state_cache_input;
+
net->xfrm.state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
net->xfrm.state_num = 0;
@@ -2902,6 +3276,8 @@ int __net_init xfrm_state_init(struct net *net)
&net->xfrm.xfrm_state_lock);
return 0;
+out_state_cache_input:
+ xfrm_hash_free(net->xfrm.state_byseq, sz);
out_byseq:
xfrm_hash_free(net->xfrm.state_byspi, sz);
out_byspi:
@@ -2931,6 +3307,7 @@ void xfrm_state_fini(struct net *net)
xfrm_hash_free(net->xfrm.state_bysrc, sz);
WARN_ON(!hlist_empty(net->xfrm.state_bydst));
xfrm_hash_free(net->xfrm.state_bydst, sz);
+ free_percpu(net->xfrm.state_cache_input);
}
#ifdef CONFIG_AUDITSYSCALL
diff --git a/net/xfrm/xfrm_state_bpf.c b/net/xfrm/xfrm_state_bpf.c
index 9e20d4a377f7..2248eda741f8 100644
--- a/net/xfrm/xfrm_state_bpf.c
+++ b/net/xfrm/xfrm_state_bpf.c
@@ -117,10 +117,10 @@ __bpf_kfunc void bpf_xdp_xfrm_state_release(struct xfrm_state *x)
__bpf_kfunc_end_defs();
-BTF_SET8_START(xfrm_state_kfunc_set)
+BTF_KFUNCS_START(xfrm_state_kfunc_set)
BTF_ID_FLAGS(func, bpf_xdp_get_xfrm_state, KF_RET_NULL | KF_ACQUIRE)
BTF_ID_FLAGS(func, bpf_xdp_xfrm_state_release, KF_RELEASE)
-BTF_SET8_END(xfrm_state_kfunc_set)
+BTF_KFUNCS_END(xfrm_state_kfunc_set)
static const struct btf_kfunc_id_set xfrm_state_xdp_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/xfrm/xfrm_sysctl.c b/net/xfrm/xfrm_sysctl.c
index 7fdeafc838a7..ca003e8a0376 100644
--- a/net/xfrm/xfrm_sysctl.c
+++ b/net/xfrm/xfrm_sysctl.c
@@ -38,7 +38,6 @@ static struct ctl_table xfrm_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
- {}
};
int __net_init xfrm_sysctl_init(struct net *net)
@@ -57,10 +56,8 @@ int __net_init xfrm_sysctl_init(struct net *net)
table[3].data = &net->xfrm.sysctl_acq_expires;
/* Don't export sysctls to unprivileged users */
- if (net->user_ns != &init_user_ns) {
- table[0].procname = NULL;
+ if (net->user_ns != &init_user_ns)
table_size = 0;
- }
net->xfrm.sysctl_hdr = register_net_sysctl_sz(net, "net/core", table,
table_size);
@@ -76,7 +73,7 @@ out_kmemdup:
void __net_exit xfrm_sysctl_fini(struct net *net)
{
- struct ctl_table *table;
+ const struct ctl_table *table;
table = net->xfrm.sysctl_hdr->ctl_table_arg;
unregister_net_sysctl_table(net->xfrm.sysctl_hdr);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 912c1189ba41..08c6d6f0179f 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -33,7 +33,7 @@
#if IS_ENABLED(CONFIG_IPV6)
#include <linux/in6.h>
#endif
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type,
struct netlink_ext_ack *extack)
@@ -130,7 +130,7 @@ static inline int verify_sec_ctx_len(struct nlattr **attrs, struct netlink_ext_a
}
static inline int verify_replay(struct xfrm_usersa_info *p,
- struct nlattr **attrs,
+ struct nlattr **attrs, u8 sa_dir,
struct netlink_ext_ack *extack)
{
struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
@@ -168,6 +168,30 @@ static inline int verify_replay(struct xfrm_usersa_info *p,
return -EINVAL;
}
+ if (sa_dir == XFRM_SA_DIR_OUT) {
+ if (rs->replay_window) {
+ NL_SET_ERR_MSG(extack, "Replay window should be 0 for output SA");
+ return -EINVAL;
+ }
+ if (rs->seq || rs->seq_hi) {
+ NL_SET_ERR_MSG(extack,
+ "Replay seq and seq_hi should be 0 for output SA");
+ return -EINVAL;
+ }
+ if (rs->bmp_len) {
+ NL_SET_ERR_MSG(extack, "Replay bmp_len should 0 for output SA");
+ return -EINVAL;
+ }
+ }
+
+ if (sa_dir == XFRM_SA_DIR_IN) {
+ if (rs->oseq || rs->oseq_hi) {
+ NL_SET_ERR_MSG(extack,
+ "Replay oseq and oseq_hi should be 0 for input SA");
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -176,6 +200,8 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
struct netlink_ext_ack *extack)
{
int err;
+ u8 sa_dir = nla_get_u8_default(attrs[XFRMA_SA_DIR], 0);
+ u16 family = p->sel.family;
err = -EINVAL;
switch (p->family) {
@@ -196,7 +222,10 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
goto out;
}
- switch (p->sel.family) {
+ if (!family && !(p->flags & XFRM_STATE_AF_UNSPEC))
+ family = p->family;
+
+ switch (family) {
case AF_UNSPEC:
break;
@@ -272,6 +301,16 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
NL_SET_ERR_MSG(extack, "TFC padding can only be used in tunnel mode");
goto out;
}
+ if ((attrs[XFRMA_IPTFS_DROP_TIME] ||
+ attrs[XFRMA_IPTFS_REORDER_WINDOW] ||
+ attrs[XFRMA_IPTFS_DONT_FRAG] ||
+ attrs[XFRMA_IPTFS_INIT_DELAY] ||
+ attrs[XFRMA_IPTFS_MAX_QSIZE] ||
+ attrs[XFRMA_IPTFS_PKT_SIZE]) &&
+ p->mode != XFRM_MODE_IPTFS) {
+ NL_SET_ERR_MSG(extack, "IP-TFS options can only be used in IP-TFS mode");
+ goto out;
+ }
break;
case IPPROTO_COMP:
@@ -334,7 +373,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
goto out;
if ((err = verify_sec_ctx_len(attrs, extack)))
goto out;
- if ((err = verify_replay(p, attrs, extack)))
+ if ((err = verify_replay(p, attrs, sa_dir, extack)))
goto out;
err = -EINVAL;
@@ -344,6 +383,16 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
case XFRM_MODE_ROUTEOPTIMIZATION:
case XFRM_MODE_BEET:
break;
+ case XFRM_MODE_IPTFS:
+ if (p->id.proto != IPPROTO_ESP) {
+ NL_SET_ERR_MSG(extack, "IP-TFS mode only supported with ESP");
+ goto out;
+ }
+ if (sa_dir == 0) {
+ NL_SET_ERR_MSG(extack, "IP-TFS mode requires in or out direction attribute");
+ goto out;
+ }
+ break;
default:
NL_SET_ERR_MSG(extack, "Unsupported mode");
@@ -358,6 +407,119 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
err = -EINVAL;
goto out;
}
+
+ if (sa_dir == XFRM_SA_DIR_OUT) {
+ NL_SET_ERR_MSG(extack,
+ "MTIMER_THRESH attribute should not be set on output SA");
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (sa_dir == XFRM_SA_DIR_OUT) {
+ if (p->flags & XFRM_STATE_DECAP_DSCP) {
+ NL_SET_ERR_MSG(extack, "Flag DECAP_DSCP should not be set for output SA");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (p->flags & XFRM_STATE_ICMP) {
+ NL_SET_ERR_MSG(extack, "Flag ICMP should not be set for output SA");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (p->flags & XFRM_STATE_WILDRECV) {
+ NL_SET_ERR_MSG(extack, "Flag WILDRECV should not be set for output SA");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (p->replay_window) {
+ NL_SET_ERR_MSG(extack, "Replay window should be 0 for output SA");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (attrs[XFRMA_IPTFS_DROP_TIME]) {
+ NL_SET_ERR_MSG(extack, "IP-TFS drop time should not be set for output SA");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (attrs[XFRMA_IPTFS_REORDER_WINDOW]) {
+ NL_SET_ERR_MSG(extack, "IP-TFS reorder window should not be set for output SA");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (attrs[XFRMA_REPLAY_VAL]) {
+ struct xfrm_replay_state *replay;
+
+ replay = nla_data(attrs[XFRMA_REPLAY_VAL]);
+
+ if (replay->seq || replay->bitmap) {
+ NL_SET_ERR_MSG(extack,
+ "Replay seq and bitmap should be 0 for output SA");
+ err = -EINVAL;
+ goto out;
+ }
+ }
+ }
+
+ if (sa_dir == XFRM_SA_DIR_IN) {
+ if (p->flags & XFRM_STATE_NOPMTUDISC) {
+ NL_SET_ERR_MSG(extack, "Flag NOPMTUDISC should not be set for input SA");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (attrs[XFRMA_SA_EXTRA_FLAGS]) {
+ u32 xflags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]);
+
+ if (xflags & XFRM_SA_XFLAG_DONT_ENCAP_DSCP) {
+ NL_SET_ERR_MSG(extack, "Flag DONT_ENCAP_DSCP should not be set for input SA");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (xflags & XFRM_SA_XFLAG_OSEQ_MAY_WRAP) {
+ NL_SET_ERR_MSG(extack, "Flag OSEQ_MAY_WRAP should not be set for input SA");
+ err = -EINVAL;
+ goto out;
+ }
+
+ }
+
+ if (attrs[XFRMA_IPTFS_DONT_FRAG]) {
+ NL_SET_ERR_MSG(extack, "IP-TFS don't fragment should not be set for input SA");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (attrs[XFRMA_IPTFS_INIT_DELAY]) {
+ NL_SET_ERR_MSG(extack, "IP-TFS initial delay should not be set for input SA");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (attrs[XFRMA_IPTFS_MAX_QSIZE]) {
+ NL_SET_ERR_MSG(extack, "IP-TFS max queue size should not be set for input SA");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (attrs[XFRMA_IPTFS_PKT_SIZE]) {
+ NL_SET_ERR_MSG(extack, "IP-TFS packet size should not be set for input SA");
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (!sa_dir && attrs[XFRMA_SA_PCPU]) {
+ NL_SET_ERR_MSG(extack, "SA_PCPU only supported with SA_DIR");
+ err = -EINVAL;
+ goto out;
}
out:
@@ -667,10 +829,8 @@ static void xfrm_smark_init(struct nlattr **attrs, struct xfrm_mark *m)
{
if (attrs[XFRMA_SET_MARK]) {
m->v = nla_get_u32(attrs[XFRMA_SET_MARK]);
- if (attrs[XFRMA_SET_MARK_MASK])
- m->m = nla_get_u32(attrs[XFRMA_SET_MARK_MASK]);
- else
- m->m = 0xffffffff;
+ m->m = nla_get_u32_default(attrs[XFRMA_SET_MARK_MASK],
+ 0xffffffff);
} else {
m->v = m->m = 0;
}
@@ -734,6 +894,19 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
if (attrs[XFRMA_IF_ID])
x->if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
+ if (attrs[XFRMA_SA_DIR])
+ x->dir = nla_get_u8(attrs[XFRMA_SA_DIR]);
+
+ if (attrs[XFRMA_NAT_KEEPALIVE_INTERVAL])
+ x->nat_keepalive_interval =
+ nla_get_u32(attrs[XFRMA_NAT_KEEPALIVE_INTERVAL]);
+
+ if (attrs[XFRMA_SA_PCPU]) {
+ x->pcpu_num = nla_get_u32(attrs[XFRMA_SA_PCPU]);
+ if (x->pcpu_num >= num_possible_cpus())
+ goto error;
+ }
+
err = __xfrm_init_state(x, false, attrs[XFRMA_OFFLOAD_DEV], extack);
if (err)
goto error;
@@ -769,6 +942,12 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
goto error;
}
+ if (x->mode_cbs && x->mode_cbs->user_init) {
+ err = x->mode_cbs->user_init(net, x, attrs, extack);
+ if (err)
+ goto error;
+ }
+
return x;
error:
@@ -902,7 +1081,7 @@ static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
memcpy(&p->sel, &x->sel, sizeof(p->sel));
memcpy(&p->lft, &x->lft, sizeof(p->lft));
if (x->xso.dev)
- xfrm_dev_state_update_curlft(x);
+ xfrm_dev_state_update_stats(x);
memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
put_unaligned(x->stats.replay_window, &p->stats.replay_window);
put_unaligned(x->stats.replay, &p->stats.replay);
@@ -982,7 +1161,7 @@ static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
if (!nla)
return -EMSGSIZE;
algo = nla_data(nla);
- strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name));
+ strscpy_pad(algo->alg_name, auth->alg_name, sizeof(algo->alg_name));
if (redact_secret && auth->alg_key_len)
memset(algo->alg_key, 0, (auth->alg_key_len + 7) / 8);
@@ -995,7 +1174,9 @@ static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
if (!nla)
return -EMSGSIZE;
ap = nla_data(nla);
- memcpy(ap, auth, sizeof(struct xfrm_algo_auth));
+ strscpy_pad(ap->alg_name, auth->alg_name, sizeof(ap->alg_name));
+ ap->alg_key_len = auth->alg_key_len;
+ ap->alg_trunc_len = auth->alg_trunc_len;
if (redact_secret && auth->alg_key_len)
memset(ap->alg_key, 0, (auth->alg_key_len + 7) / 8);
else
@@ -1182,8 +1363,29 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
if (ret)
goto out;
}
- if (x->mapping_maxage)
+ if (x->mode_cbs && x->mode_cbs->copy_to_user)
+ ret = x->mode_cbs->copy_to_user(x, skb);
+ if (ret)
+ goto out;
+ if (x->mapping_maxage) {
ret = nla_put_u32(skb, XFRMA_MTIMER_THRESH, x->mapping_maxage);
+ if (ret)
+ goto out;
+ }
+ if (x->pcpu_num != UINT_MAX) {
+ ret = nla_put_u32(skb, XFRMA_SA_PCPU, x->pcpu_num);
+ if (ret)
+ goto out;
+ }
+ if (x->dir)
+ ret = nla_put_u8(skb, XFRMA_SA_DIR, x->dir);
+
+ if (x->nat_keepalive_interval) {
+ ret = nla_put_u32(skb, XFRMA_NAT_KEEPALIVE_INTERVAL,
+ x->nat_keepalive_interval);
+ if (ret)
+ goto out;
+ }
out:
return ret;
}
@@ -1579,6 +1781,7 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
u32 mark;
struct xfrm_mark m;
u32 if_id = 0;
+ u32 pcpu_num = UINT_MAX;
p = nlmsg_data(nlh);
err = verify_spi_info(p->info.id.proto, p->min, p->max, extack);
@@ -1595,8 +1798,16 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
if (attrs[XFRMA_IF_ID])
if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
+ if (attrs[XFRMA_SA_PCPU]) {
+ pcpu_num = nla_get_u32(attrs[XFRMA_SA_PCPU]);
+ if (pcpu_num >= num_possible_cpus()) {
+ err = -EINVAL;
+ goto out_noput;
+ }
+ }
+
if (p->info.seq) {
- x = xfrm_find_acq_byseq(net, mark, p->info.seq);
+ x = xfrm_find_acq_byseq(net, mark, p->info.seq, pcpu_num);
if (x && !xfrm_addr_equal(&x->id.daddr, daddr, family)) {
xfrm_state_put(x);
x = NULL;
@@ -1605,7 +1816,7 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
if (!x)
x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid,
- if_id, p->info.id.proto, daddr,
+ if_id, pcpu_num, p->info.id.proto, daddr,
&p->info.saddr, 1,
family);
err = -ENOENT;
@@ -1618,6 +1829,9 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err)
goto out;
+ if (attrs[XFRMA_SA_DIR])
+ x->dir = nla_get_u8(attrs[XFRMA_SA_DIR]);
+
resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
if (IS_ERR(resp_skb)) {
err = PTR_ERR(resp_skb);
@@ -1810,6 +2024,8 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family,
return -EINVAL;
}
break;
+ case XFRM_MODE_IPTFS:
+ break;
default:
if (ut[i].family != prev_family) {
NL_SET_ERR_MSG(extack, "Mode in template doesn't support a family change");
@@ -2348,7 +2564,6 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
NETLINK_CB(skb).portid);
}
} else {
- xfrm_dev_policy_delete(xp);
xfrm_audit_policy_delete(xp, err ? 0 : 1, true);
if (err != 0)
@@ -2402,7 +2617,9 @@ static inline unsigned int xfrm_aevent_msgsize(struct xfrm_state *x)
+ nla_total_size_64bit(sizeof(struct xfrm_lifetime_cur))
+ nla_total_size(sizeof(struct xfrm_mark))
+ nla_total_size(4) /* XFRM_AE_RTHR */
- + nla_total_size(4); /* XFRM_AE_ETHR */
+ + nla_total_size(4) /* XFRM_AE_ETHR */
+ + nla_total_size(sizeof(x->dir)) /* XFRMA_SA_DIR */
+ + nla_total_size(4); /* XFRMA_SA_PCPU */
}
static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
@@ -2458,6 +2675,17 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct
err = xfrm_if_id_put(skb, x->if_id);
if (err)
goto out_cancel;
+ if (x->pcpu_num != UINT_MAX) {
+ err = nla_put_u32(skb, XFRMA_SA_PCPU, x->pcpu_num);
+ if (err)
+ goto out_cancel;
+ }
+
+ if (x->dir) {
+ err = nla_put_u8(skb, XFRMA_SA_DIR, x->dir);
+ if (err)
+ goto out_cancel;
+ }
nlmsg_end(skb, nlh);
return 0;
@@ -2722,6 +2950,13 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
xfrm_mark_get(attrs, &mark);
+ if (attrs[XFRMA_SA_PCPU]) {
+ x->pcpu_num = nla_get_u32(attrs[XFRMA_SA_PCPU]);
+ err = -EINVAL;
+ if (x->pcpu_num >= num_possible_cpus())
+ goto free_state;
+ }
+
err = verify_newpolicy_info(&ua->policy, extack);
if (err)
goto free_state;
@@ -3018,6 +3253,7 @@ EXPORT_SYMBOL_GPL(xfrm_msg_min);
#undef XMSGSIZE
const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
+ [XFRMA_UNSPEC] = { .strict_start_type = XFRMA_SA_DIR },
[XFRMA_SA] = { .len = sizeof(struct xfrm_usersa_info)},
[XFRMA_POLICY] = { .len = sizeof(struct xfrm_userpolicy_info)},
[XFRMA_LASTUSED] = { .type = NLA_U64},
@@ -3049,6 +3285,15 @@ const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
[XFRMA_SET_MARK_MASK] = { .type = NLA_U32 },
[XFRMA_IF_ID] = { .type = NLA_U32 },
[XFRMA_MTIMER_THRESH] = { .type = NLA_U32 },
+ [XFRMA_SA_DIR] = NLA_POLICY_RANGE(NLA_U8, XFRM_SA_DIR_IN, XFRM_SA_DIR_OUT),
+ [XFRMA_NAT_KEEPALIVE_INTERVAL] = { .type = NLA_U32 },
+ [XFRMA_SA_PCPU] = { .type = NLA_U32 },
+ [XFRMA_IPTFS_DROP_TIME] = { .type = NLA_U32 },
+ [XFRMA_IPTFS_REORDER_WINDOW] = { .type = NLA_U16 },
+ [XFRMA_IPTFS_DONT_FRAG] = { .type = NLA_FLAG },
+ [XFRMA_IPTFS_INIT_DELAY] = { .type = NLA_U32 },
+ [XFRMA_IPTFS_MAX_QSIZE] = { .type = NLA_U32 },
+ [XFRMA_IPTFS_PKT_SIZE] = { .type = NLA_U32 },
};
EXPORT_SYMBOL_GPL(xfrma_policy);
@@ -3097,6 +3342,38 @@ static const struct xfrm_link {
[XFRM_MSG_GETDEFAULT - XFRM_MSG_BASE] = { .doit = xfrm_get_default },
};
+static int xfrm_reject_unused_attr(int type, struct nlattr **attrs,
+ struct netlink_ext_ack *extack)
+{
+ if (attrs[XFRMA_SA_DIR]) {
+ switch (type) {
+ case XFRM_MSG_NEWSA:
+ case XFRM_MSG_UPDSA:
+ case XFRM_MSG_ALLOCSPI:
+ break;
+ default:
+ NL_SET_ERR_MSG(extack, "Invalid attribute SA_DIR");
+ return -EINVAL;
+ }
+ }
+
+ if (attrs[XFRMA_SA_PCPU]) {
+ switch (type) {
+ case XFRM_MSG_NEWSA:
+ case XFRM_MSG_UPDSA:
+ case XFRM_MSG_ALLOCSPI:
+ case XFRM_MSG_ACQUIRE:
+
+ break;
+ default:
+ NL_SET_ERR_MSG(extack, "Invalid attribute SA_PCPU");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -3156,6 +3433,12 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err < 0)
goto err;
+ if (!link->nla_pol || link->nla_pol == xfrma_policy) {
+ err = xfrm_reject_unused_attr((type + XFRM_MSG_BASE), attrs, extack);
+ if (err < 0)
+ goto err;
+ }
+
if (link->doit == NULL) {
err = -EINVAL;
goto err;
@@ -3189,8 +3472,10 @@ static void xfrm_netlink_rcv(struct sk_buff *skb)
static inline unsigned int xfrm_expire_msgsize(void)
{
- return NLMSG_ALIGN(sizeof(struct xfrm_user_expire))
- + nla_total_size(sizeof(struct xfrm_mark));
+ return NLMSG_ALIGN(sizeof(struct xfrm_user_expire)) +
+ nla_total_size(sizeof(struct xfrm_mark)) +
+ nla_total_size(sizeof_field(struct xfrm_state, dir)) +
+ nla_total_size(4); /* XFRMA_SA_PCPU */
}
static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
@@ -3216,6 +3501,17 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct
err = xfrm_if_id_put(skb, x->if_id);
if (err)
return err;
+ if (x->pcpu_num != UINT_MAX) {
+ err = nla_put_u32(skb, XFRMA_SA_PCPU, x->pcpu_num);
+ if (err)
+ return err;
+ }
+
+ if (x->dir) {
+ err = nla_put_u8(skb, XFRMA_SA_DIR, x->dir);
+ if (err)
+ return err;
+ }
nlmsg_end(skb, nlh);
return 0;
@@ -3317,6 +3613,8 @@ static inline unsigned int xfrm_sa_len(struct xfrm_state *x)
}
if (x->if_id)
l += nla_total_size(sizeof(x->if_id));
+ if (x->pcpu_num)
+ l += nla_total_size(sizeof(x->pcpu_num));
/* Must count x->lastused as it may become non-zero behind our back. */
l += nla_total_size_64bit(sizeof(u64));
@@ -3324,6 +3622,15 @@ static inline unsigned int xfrm_sa_len(struct xfrm_state *x)
if (x->mapping_maxage)
l += nla_total_size(sizeof(x->mapping_maxage));
+ if (x->dir)
+ l += nla_total_size(sizeof(x->dir));
+
+ if (x->nat_keepalive_interval)
+ l += nla_total_size(sizeof(x->nat_keepalive_interval));
+
+ if (x->mode_cbs && x->mode_cbs->sa_len)
+ l += x->mode_cbs->sa_len(x);
+
return l;
}
@@ -3417,6 +3724,7 @@ static inline unsigned int xfrm_acquire_msgsize(struct xfrm_state *x,
+ nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
+ nla_total_size(sizeof(struct xfrm_mark))
+ nla_total_size(xfrm_user_sec_ctx_size(x->security))
+ + nla_total_size(4) /* XFRMA_SA_PCPU */
+ userpolicy_type_attrsize();
}
@@ -3453,6 +3761,8 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
err = xfrm_if_id_put(skb, xp->if_id);
if (!err && xp->xdo.dev)
err = copy_user_offload(&xp->xdo, skb);
+ if (!err && x->pcpu_num != UINT_MAX)
+ err = nla_put_u32(skb, XFRMA_SA_PCPU, x->pcpu_num);
if (err) {
nlmsg_cancel(skb, nlh);
return err;