summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorKalle Valo <kvalo@kernel.org>2023-01-17 13:36:25 +0200
committerKalle Valo <kvalo@kernel.org>2023-01-17 13:36:25 +0200
commitd0e99511834b6828c960e978d9a8cb6e5731250d (patch)
treee7b062c1f9d28a55083477a1462286a7923a57fd /net
parentde7d0ff301fccc75281d7d8eb98c4a47faacf32d (diff)
parent80f8a66dede0a4b4e9e846765a97809c6fe49ce5 (diff)
Merge wireless into wireless-next
Due to the two cherry picked commits from wireless to wireless-next we have several conflicts in mt76. To avoid any bugs with conflicts merge wireless into wireless-next. 96f134dc1964 wifi: mt76: handle possible mt76_rx_token_consume failures fe13dad8992b wifi: mt76: dma: do not increment queue head if mt76_dma_add_buf fails
Diffstat (limited to 'net')
-rw-r--r--net/802/garp.c4
-rw-r--r--net/802/mrp.c6
-rw-r--r--net/9p/client.c35
-rw-r--r--net/9p/trans_fd.c14
-rw-r--r--net/9p/trans_rdma.c5
-rw-r--r--net/9p/trans_virtio.c10
-rw-r--r--net/9p/trans_xen.c8
-rw-r--r--net/atm/atm_sysfs.c4
-rw-r--r--net/batman-adv/bat_iv_ogm.c4
-rw-r--r--net/batman-adv/bat_v_elp.c2
-rw-r--r--net/batman-adv/bat_v_ogm.c4
-rw-r--r--net/batman-adv/network-coding.c2
-rw-r--r--net/bluetooth/6lowpan.c2
-rw-r--r--net/bluetooth/a2mp.c2
-rw-r--r--net/bluetooth/mgmt.c5
-rw-r--r--net/bluetooth/smp.c2
-rw-r--r--net/bpf/bpf_dummy_struct_ops.c3
-rw-r--r--net/bridge/br_if.c2
-rw-r--r--net/bridge/br_multicast.c8
-rw-r--r--net/bridge/br_multicast_eht.c4
-rw-r--r--net/caif/cfctrl.c6
-rw-r--r--net/can/j1939/socket.c2
-rw-r--r--net/can/j1939/transport.c2
-rw-r--r--net/ceph/messenger.c1
-rw-r--r--net/ceph/messenger_v1.c4
-rw-r--r--net/ceph/messenger_v2.c14
-rw-r--r--net/ceph/mon_client.c2
-rw-r--r--net/ceph/osd_client.c2
-rw-r--r--net/compat.c3
-rw-r--r--net/core/dev_addr_lists_test.c4
-rw-r--r--net/core/devlink.c5
-rw-r--r--net/core/dst.c2
-rw-r--r--net/core/filter.c7
-rw-r--r--net/core/gen_estimator.c2
-rw-r--r--net/core/gro.c5
-rw-r--r--net/core/neighbour.c4
-rw-r--r--net/core/net-sysfs.c20
-rw-r--r--net/core/pktgen.c37
-rw-r--r--net/core/skbuff.c3
-rw-r--r--net/core/sock.c4
-rw-r--r--net/core/stream.c8
-rw-r--r--net/dns_resolver/dns_key.c2
-rw-r--r--net/dsa/tag_8021q.c11
-rw-r--r--net/ethtool/ioctl.c107
-rw-r--r--net/ipv4/af_inet.c1
-rw-r--r--net/ipv4/devinet.c19
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/igmp.c6
-rw-r--r--net/ipv4/inet_connection_sock.c42
-rw-r--r--net/ipv4/inet_hashtables.c10
-rw-r--r--net/ipv4/inet_timewait_sock.c31
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/route.c4
-rw-r--r--net/ipv4/syncookies.c7
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv4/tcp_bbr.c2
-rw-r--r--net/ipv4/tcp_input.c3
-rw-r--r--net/ipv4/tcp_plb.c2
-rw-r--r--net/ipv4/tcp_ulp.c4
-rw-r--r--net/ipv4/udp_tunnel_core.c1
-rw-r--r--net/ipv6/addrconf.c8
-rw-r--r--net/ipv6/datagram.c15
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/ipv6/mcast.c10
-rw-r--r--net/ipv6/output_core.c8
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/mac80211/agg-tx.c8
-rw-r--r--net/mac80211/cfg.c7
-rw-r--r--net/mac80211/debugfs_sta.c5
-rw-r--r--net/mac80211/driver-ops.c3
-rw-r--r--net/mac80211/driver-ops.h2
-rw-r--r--net/mac80211/ht.c31
-rw-r--r--net/mac80211/ieee80211_i.h2
-rw-r--r--net/mac80211/iface.c5
-rw-r--r--net/mac80211/mesh_pathtbl.c2
-rw-r--r--net/mac80211/rx.c225
-rw-r--r--net/mac80211/tx.c34
-rw-r--r--net/mac80211/util.c42
-rw-r--r--net/mctp/device.c14
-rw-r--r--net/mptcp/pm_userspace.c4
-rw-r--r--net/mptcp/protocol.c20
-rw-r--r--net/mptcp/protocol.h4
-rw-r--r--net/mptcp/subflow.c80
-rw-r--r--net/netfilter/ipset/ip_set_core.c7
-rw-r--r--net/netfilter/ipset/ip_set_hash_ip.c14
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipmark.c13
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c13
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c13
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c13
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c17
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c15
-rw-r--r--net/netfilter/ipset/ip_set_hash_netnet.c23
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c19
-rw-r--r--net/netfilter/ipset/ip_set_hash_netportnet.c40
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_est.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_twos.c4
-rw-r--r--net/netfilter/nf_conntrack_core.c4
-rw-r--r--net/netfilter/nf_conntrack_proto.c7
-rw-r--r--net/netfilter/nf_flow_table_offload.c6
-rw-r--r--net/netfilter/nf_nat_helper.c2
-rw-r--r--net/netfilter/nf_tables_api.c261
-rw-r--r--net/netfilter/xt_IDLETIMER.c4
-rw-r--r--net/netfilter/xt_LED.c2
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/nfc/netlink.c52
-rw-r--r--net/openvswitch/datapath.c33
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/rfkill/core.c2
-rw-r--r--net/rxrpc/Makefile1
-rw-r--r--net/rxrpc/af_rxrpc.c27
-rw-r--r--net/rxrpc/ar-internal.h214
-rw-r--r--net/rxrpc/call_accept.c41
-rw-r--r--net/rxrpc/call_event.c86
-rw-r--r--net/rxrpc/call_object.c117
-rw-r--r--net/rxrpc/call_state.c69
-rw-r--r--net/rxrpc/conn_client.c711
-rw-r--r--net/rxrpc/conn_event.c382
-rw-r--r--net/rxrpc/conn_object.c67
-rw-r--r--net/rxrpc/conn_service.c1
-rw-r--r--net/rxrpc/input.c175
-rw-r--r--net/rxrpc/insecure.c20
-rw-r--r--net/rxrpc/io_thread.c208
-rw-r--r--net/rxrpc/local_object.c40
-rw-r--r--net/rxrpc/net_ns.c17
-rw-r--r--net/rxrpc/output.c60
-rw-r--r--net/rxrpc/peer_event.c10
-rw-r--r--net/rxrpc/peer_object.c46
-rw-r--r--net/rxrpc/proc.c17
-rw-r--r--net/rxrpc/recvmsg.c270
-rw-r--r--net/rxrpc/rxkad.c356
-rw-r--r--net/rxrpc/rxperf.c19
-rw-r--r--net/rxrpc/security.c59
-rw-r--r--net/rxrpc/sendmsg.c197
-rw-r--r--net/sched/act_gact.c2
-rw-r--r--net/sched/act_sample.c2
-rw-r--r--net/sched/cls_flow.c2
-rw-r--r--net/sched/cls_tcindex.c12
-rw-r--r--net/sched/ematch.c2
-rw-r--r--net/sched/sch_atm.c5
-rw-r--r--net/sched/sch_cbq.c4
-rw-r--r--net/sched/sch_choke.c2
-rw-r--r--net/sched/sch_htb.c8
-rw-r--r--net/sched/sch_netem.c4
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/sctp/transport.c2
-rw-r--r--net/smc/smc_clc.c6
-rw-r--r--net/smc/smc_tx.c2
-rw-r--r--net/socket.c12
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c64
-rw-r--r--net/sunrpc/cache.c2
-rw-r--r--net/sunrpc/clnt.c2
-rw-r--r--net/sunrpc/socklib.c6
-rw-r--r--net/sunrpc/svc.c6
-rw-r--r--net/sunrpc/svcsock.c4
-rw-r--r--net/sunrpc/sysfs.c8
-rw-r--r--net/sunrpc/xdr.c22
-rw-r--r--net/sunrpc/xprt.c2
-rw-r--r--net/sunrpc/xprtrdma/verbs.c2
-rw-r--r--net/sunrpc/xprtsock.c11
-rw-r--r--net/tipc/discover.c2
-rw-r--r--net/tipc/monitor.c2
-rw-r--r--net/tipc/node.c12
-rw-r--r--net/tipc/socket.c2
-rw-r--r--net/tipc/topsrv.c2
-rw-r--r--net/tls/tls_device.c4
-rw-r--r--net/unix/af_unix.c12
-rw-r--r--net/vmw_vsock/af_vsock.c3
-rw-r--r--net/vmw_vsock/vmci_transport.c6
-rw-r--r--net/wireless/sysfs.c2
-rw-r--r--net/xfrm/espintcp.c3
-rw-r--r--net/xfrm/xfrm_state.c2
176 files changed, 2585 insertions, 2515 deletions
diff --git a/net/802/garp.c b/net/802/garp.c
index fc9eb02a912f..ab24b21fbb49 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -407,7 +407,7 @@ static void garp_join_timer_arm(struct garp_applicant *app)
{
unsigned long delay;
- delay = prandom_u32_max(msecs_to_jiffies(garp_join_time));
+ delay = get_random_u32_below(msecs_to_jiffies(garp_join_time));
mod_timer(&app->join_timer, jiffies + delay);
}
@@ -618,7 +618,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
/* Delete timer and generate a final TRANSMIT_PDU event to flush out
* all pending messages before the applicant is gone. */
- del_timer_sync(&app->join_timer);
+ timer_shutdown_sync(&app->join_timer);
spin_lock_bh(&app->lock);
garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU);
diff --git a/net/802/mrp.c b/net/802/mrp.c
index 6c927d4b35f0..eafc21ecc287 100644
--- a/net/802/mrp.c
+++ b/net/802/mrp.c
@@ -592,7 +592,7 @@ static void mrp_join_timer_arm(struct mrp_applicant *app)
{
unsigned long delay;
- delay = prandom_u32_max(msecs_to_jiffies(mrp_join_time));
+ delay = get_random_u32_below(msecs_to_jiffies(mrp_join_time));
mod_timer(&app->join_timer, jiffies + delay);
}
@@ -911,8 +911,8 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
/* Delete timer and generate a final TX event to flush out
* all pending messages before the applicant is gone.
*/
- del_timer_sync(&app->join_timer);
- del_timer_sync(&app->periodic_timer);
+ timer_shutdown_sync(&app->join_timer);
+ timer_shutdown_sync(&app->periodic_timer);
spin_lock_bh(&app->lock);
mrp_mad_event(app, MRP_EVENT_TX);
diff --git a/net/9p/client.c b/net/9p/client.c
index aaa37b07e30a..622ec6a586ee 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -297,6 +297,11 @@ p9_tag_alloc(struct p9_client *c, int8_t type, uint t_size, uint r_size,
p9pdu_reset(&req->rc);
req->t_err = 0;
req->status = REQ_STATUS_ALLOC;
+ /* refcount needs to be set to 0 before inserting into the idr
+ * so p9_tag_lookup does not accept a request that is not fully
+ * initialized. refcount_set to 2 below will mark request ready.
+ */
+ refcount_set(&req->refcount, 0);
init_waitqueue_head(&req->wq);
INIT_LIST_HEAD(&req->req_list);
@@ -438,7 +443,7 @@ void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status)
* the status change is visible to another thread
*/
smp_wmb();
- req->status = status;
+ WRITE_ONCE(req->status, status);
wake_up(&req->wq);
p9_debug(P9_DEBUG_MUX, "wakeup: %d\n", req->tc.tag);
@@ -514,10 +519,9 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
int ecode;
err = p9_parse_header(&req->rc, NULL, &type, NULL, 0);
- if (req->rc.size >= c->msize) {
- p9_debug(P9_DEBUG_ERROR,
- "requested packet size too big: %d\n",
- req->rc.size);
+ if (req->rc.size > req->rc.capacity && !req->rc.zc) {
+ pr_err("requested packet size too big: %d does not fit %zu (type=%d)\n",
+ req->rc.size, req->rc.capacity, req->rc.id);
return -EIO;
}
/* dump the response from server
@@ -600,7 +604,7 @@ static int p9_client_flush(struct p9_client *c, struct p9_req_t *oldreq)
/* if we haven't received a response for oldreq,
* remove it from the list
*/
- if (oldreq->status == REQ_STATUS_SENT) {
+ if (READ_ONCE(oldreq->status) == REQ_STATUS_SENT) {
if (c->trans_mod->cancelled)
c->trans_mod->cancelled(c, oldreq);
}
@@ -680,6 +684,9 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
if (IS_ERR(req))
return req;
+ req->tc.zc = false;
+ req->rc.zc = false;
+
if (signal_pending(current)) {
sigpending = 1;
clear_thread_flag(TIF_SIGPENDING);
@@ -697,7 +704,8 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
}
again:
/* Wait for the response */
- err = wait_event_killable(req->wq, req->status >= REQ_STATUS_RCVD);
+ err = wait_event_killable(req->wq,
+ READ_ONCE(req->status) >= REQ_STATUS_RCVD);
/* Make sure our req is coherent with regard to updates in other
* threads - echoes to wmb() in the callback
@@ -711,7 +719,7 @@ again:
goto again;
}
- if (req->status == REQ_STATUS_ERROR) {
+ if (READ_ONCE(req->status) == REQ_STATUS_ERROR) {
p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
err = req->t_err;
}
@@ -724,7 +732,7 @@ again:
p9_client_flush(c, req);
/* if we received the response anyway, don't signal error */
- if (req->status == REQ_STATUS_RCVD)
+ if (READ_ONCE(req->status) == REQ_STATUS_RCVD)
err = 0;
}
recalc_sigpending:
@@ -778,6 +786,9 @@ static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
if (IS_ERR(req))
return req;
+ req->tc.zc = true;
+ req->rc.zc = true;
+
if (signal_pending(current)) {
sigpending = 1;
clear_thread_flag(TIF_SIGPENDING);
@@ -793,7 +804,7 @@ static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
if (err != -ERESTARTSYS)
goto recalc_sigpending;
}
- if (req->status == REQ_STATUS_ERROR) {
+ if (READ_ONCE(req->status) == REQ_STATUS_ERROR) {
p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
err = req->t_err;
}
@@ -806,7 +817,7 @@ static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
p9_client_flush(c, req);
/* if we received the response anyway, don't signal error */
- if (req->status == REQ_STATUS_RCVD)
+ if (READ_ONCE(req->status) == REQ_STATUS_RCVD)
err = 0;
}
recalc_sigpending:
@@ -2043,7 +2054,7 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
struct kvec kv = {.iov_base = data, .iov_len = count};
struct iov_iter to;
- iov_iter_kvec(&to, READ, &kv, 1, count);
+ iov_iter_kvec(&to, ITER_DEST, &kv, 1, count);
p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n",
fid->fid, offset, count);
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 07db2f436d44..00b684616e8d 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -20,7 +20,6 @@
#include <linux/un.h>
#include <linux/uaccess.h>
#include <linux/inet.h>
-#include <linux/idr.h>
#include <linux/file.h>
#include <linux/parser.h>
#include <linux/slab.h>
@@ -202,11 +201,11 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
list_move(&req->req_list, &cancel_list);
- req->status = REQ_STATUS_ERROR;
+ WRITE_ONCE(req->status, REQ_STATUS_ERROR);
}
list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
list_move(&req->req_list, &cancel_list);
- req->status = REQ_STATUS_ERROR;
+ WRITE_ONCE(req->status, REQ_STATUS_ERROR);
}
spin_unlock(&m->req_lock);
@@ -467,7 +466,7 @@ static void p9_write_work(struct work_struct *work)
req = list_entry(m->unsent_req_list.next, struct p9_req_t,
req_list);
- req->status = REQ_STATUS_SENT;
+ WRITE_ONCE(req->status, REQ_STATUS_SENT);
p9_debug(P9_DEBUG_TRANS, "move req %p\n", req);
list_move_tail(&req->req_list, &m->req_list);
@@ -676,7 +675,7 @@ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
return m->err;
spin_lock(&m->req_lock);
- req->status = REQ_STATUS_UNSENT;
+ WRITE_ONCE(req->status, REQ_STATUS_UNSENT);
list_add_tail(&req->req_list, &m->unsent_req_list);
spin_unlock(&m->req_lock);
@@ -703,7 +702,7 @@ static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
if (req->status == REQ_STATUS_UNSENT) {
list_del(&req->req_list);
- req->status = REQ_STATUS_FLSHD;
+ WRITE_ONCE(req->status, REQ_STATUS_FLSHD);
p9_req_put(client, req);
ret = 0;
}
@@ -732,7 +731,7 @@ static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req)
* remove it from the list.
*/
list_del(&req->req_list);
- req->status = REQ_STATUS_FLSHD;
+ WRITE_ONCE(req->status, REQ_STATUS_FLSHD);
spin_unlock(&m->req_lock);
p9_req_put(client, req);
@@ -868,6 +867,7 @@ static int p9_socket_open(struct p9_client *client, struct socket *csocket)
}
csocket->sk->sk_allocation = GFP_NOIO;
+ csocket->sk->sk_use_task_frag = false;
file = sock_alloc_file(csocket, 0, NULL);
if (IS_ERR(file)) {
pr_err("%s (%d): failed to map fd\n",
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 6ff706760676..83f9100d46bf 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -21,7 +21,6 @@
#include <linux/un.h>
#include <linux/uaccess.h>
#include <linux/inet.h>
-#include <linux/idr.h>
#include <linux/file.h>
#include <linux/parser.h>
#include <linux/semaphore.h>
@@ -507,7 +506,7 @@ dont_need_post_recv:
* because doing if after could erase the REQ_STATUS_RCVD
* status in case of a very fast reply.
*/
- req->status = REQ_STATUS_SENT;
+ WRITE_ONCE(req->status, REQ_STATUS_SENT);
err = ib_post_send(rdma->qp, &wr, NULL);
if (err)
goto send_error;
@@ -517,7 +516,7 @@ dont_need_post_recv:
/* Handle errors that happened during or while preparing the send: */
send_error:
- req->status = REQ_STATUS_ERROR;
+ WRITE_ONCE(req->status, REQ_STATUS_ERROR);
kfree(c);
p9_debug(P9_DEBUG_ERROR, "Error %d in rdma_request()\n", err);
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index e757f0601304..3c27ffb781e3 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -22,7 +22,6 @@
#include <linux/un.h>
#include <linux/uaccess.h>
#include <linux/inet.h>
-#include <linux/idr.h>
#include <linux/file.h>
#include <linux/highmem.h>
#include <linux/slab.h>
@@ -263,7 +262,7 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
p9_debug(P9_DEBUG_TRANS, "9p debug: virtio request\n");
- req->status = REQ_STATUS_SENT;
+ WRITE_ONCE(req->status, REQ_STATUS_SENT);
req_retry:
spin_lock_irqsave(&chan->lock, flags);
@@ -469,7 +468,7 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
inlen = n;
}
}
- req->status = REQ_STATUS_SENT;
+ WRITE_ONCE(req->status, REQ_STATUS_SENT);
req_retry_pinned:
spin_lock_irqsave(&chan->lock, flags);
@@ -532,9 +531,10 @@ req_retry_pinned:
spin_unlock_irqrestore(&chan->lock, flags);
kicked = 1;
p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
- err = wait_event_killable(req->wq, req->status >= REQ_STATUS_RCVD);
+ err = wait_event_killable(req->wq,
+ READ_ONCE(req->status) >= REQ_STATUS_RCVD);
// RERROR needs reply (== error string) in static data
- if (req->status == REQ_STATUS_RCVD &&
+ if (READ_ONCE(req->status) == REQ_STATUS_RCVD &&
unlikely(req->rc.sdata[4] == P9_RERROR))
handle_rerror(req, in_hdr_len, offs, in_pages);
diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
index aaa5fd364691..9630b1275557 100644
--- a/net/9p/trans_xen.c
+++ b/net/9p/trans_xen.c
@@ -157,7 +157,7 @@ again:
&masked_prod, masked_cons,
XEN_9PFS_RING_SIZE(ring));
- p9_req->status = REQ_STATUS_SENT;
+ WRITE_ONCE(p9_req->status, REQ_STATUS_SENT);
virt_wmb(); /* write ring before updating pointer */
prod += size;
ring->intf->out_prod = prod;
@@ -212,11 +212,13 @@ static void p9_xen_response(struct work_struct *work)
dev_warn(&priv->dev->dev,
"requested packet size too big: %d for tag %d with capacity %zd\n",
h.size, h.tag, req->rc.capacity);
- req->status = REQ_STATUS_ERROR;
+ WRITE_ONCE(req->status, REQ_STATUS_ERROR);
goto recv_error;
}
- memcpy(&req->rc, &h, sizeof(h));
+ req->rc.size = h.size;
+ req->rc.id = h.id;
+ req->rc.tag = h.tag;
req->rc.offset = 0;
masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE(ring));
diff --git a/net/atm/atm_sysfs.c b/net/atm/atm_sysfs.c
index 0fdbdfd19474..466353b3dde4 100644
--- a/net/atm/atm_sysfs.c
+++ b/net/atm/atm_sysfs.c
@@ -108,9 +108,9 @@ static struct device_attribute *atm_attrs[] = {
};
-static int atm_uevent(struct device *cdev, struct kobj_uevent_env *env)
+static int atm_uevent(const struct device *cdev, struct kobj_uevent_env *env)
{
- struct atm_dev *adev;
+ const struct atm_dev *adev;
if (!cdev)
return -ENODEV;
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 7f6a7c96ac92..114ee5da261f 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -280,7 +280,7 @@ batadv_iv_ogm_emit_send_time(const struct batadv_priv *bat_priv)
unsigned int msecs;
msecs = atomic_read(&bat_priv->orig_interval) - BATADV_JITTER;
- msecs += prandom_u32_max(2 * BATADV_JITTER);
+ msecs += get_random_u32_below(2 * BATADV_JITTER);
return jiffies + msecs_to_jiffies(msecs);
}
@@ -288,7 +288,7 @@ batadv_iv_ogm_emit_send_time(const struct batadv_priv *bat_priv)
/* when do we schedule a ogm packet to be sent */
static unsigned long batadv_iv_ogm_fwd_send_time(void)
{
- return jiffies + msecs_to_jiffies(prandom_u32_max(BATADV_JITTER / 2));
+ return jiffies + msecs_to_jiffies(get_random_u32_below(BATADV_JITTER / 2));
}
/* apply hop penalty for a normal link */
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index f1741fbfb617..f9a58fb5442e 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -51,7 +51,7 @@ static void batadv_v_elp_start_timer(struct batadv_hard_iface *hard_iface)
unsigned int msecs;
msecs = atomic_read(&hard_iface->bat_v.elp_interval) - BATADV_JITTER;
- msecs += prandom_u32_max(2 * BATADV_JITTER);
+ msecs += get_random_u32_below(2 * BATADV_JITTER);
queue_delayed_work(batadv_event_workqueue, &hard_iface->bat_v.elp_wq,
msecs_to_jiffies(msecs));
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index 033639df96d8..addfd8c4fe95 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -90,7 +90,7 @@ static void batadv_v_ogm_start_queue_timer(struct batadv_hard_iface *hard_iface)
unsigned int msecs = BATADV_MAX_AGGREGATION_MS * 1000;
/* msecs * [0.9, 1.1] */
- msecs += prandom_u32_max(msecs / 5) - (msecs / 10);
+ msecs += get_random_u32_below(msecs / 5) - (msecs / 10);
queue_delayed_work(batadv_event_workqueue, &hard_iface->bat_v.aggr_wq,
msecs_to_jiffies(msecs / 1000));
}
@@ -109,7 +109,7 @@ static void batadv_v_ogm_start_timer(struct batadv_priv *bat_priv)
return;
msecs = atomic_read(&bat_priv->orig_interval) - BATADV_JITTER;
- msecs += prandom_u32_max(2 * BATADV_JITTER);
+ msecs += get_random_u32_below(2 * BATADV_JITTER);
queue_delayed_work(batadv_event_workqueue, &bat_priv->bat_v.ogm_wq,
msecs_to_jiffies(msecs));
}
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index 5f4aeeb60dc4..bf29fba4dde5 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -1009,7 +1009,7 @@ static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
static u8 batadv_nc_random_weight_tq(u8 tq)
{
/* randomize the estimated packet loss (max TQ - estimated TQ) */
- u8 rand_tq = prandom_u32_max(BATADV_TQ_MAX_VALUE + 1 - tq);
+ u8 rand_tq = get_random_u32_below(BATADV_TQ_MAX_VALUE + 1 - tq);
/* convert to (randomized) estimated tq again */
return BATADV_TQ_MAX_VALUE - rand_tq;
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index c57d643afb10..4eb1b3ced0d2 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -441,7 +441,7 @@ static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb,
iv.iov_len = skb->len;
memset(&msg, 0, sizeof(msg));
- iov_iter_kvec(&msg.msg_iter, WRITE, &iv, 1, skb->len);
+ iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iv, 1, skb->len);
err = l2cap_chan_send(chan, &msg, skb->len);
if (err > 0) {
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
index 1fcc482397c3..e7adb8a98cf9 100644
--- a/net/bluetooth/a2mp.c
+++ b/net/bluetooth/a2mp.c
@@ -56,7 +56,7 @@ static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *dat
memset(&msg, 0, sizeof(msg));
- iov_iter_kvec(&msg.msg_iter, WRITE, &iv, 1, total_len);
+ iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iv, 1, total_len);
l2cap_chan_send(chan, &msg, total_len);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 0dd30a3beb77..d2ea8e19aa1b 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -7373,9 +7373,8 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
/* To avoid client trying to guess when to poll again for information we
* calculate conn info age as random value between min/max set in hdev.
*/
- conn_info_age = hdev->conn_info_min_age +
- prandom_u32_max(hdev->conn_info_max_age -
- hdev->conn_info_min_age);
+ conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
+ hdev->conn_info_max_age - 1);
/* Query controller to refresh cached values if they are too old or were
* never read.
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 11f853d0500f..70663229b3cc 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -605,7 +605,7 @@ static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data)
memset(&msg, 0, sizeof(msg));
- iov_iter_kvec(&msg.msg_iter, WRITE, iv, 2, 1 + len);
+ iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, iv, 2, 1 + len);
l2cap_chan_send(chan, &msg, 1 + len);
diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c
index 2d434c1f4617..1ac4467928a9 100644
--- a/net/bpf/bpf_dummy_struct_ops.c
+++ b/net/bpf/bpf_dummy_struct_ops.c
@@ -124,8 +124,7 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
if (err < 0)
goto out;
- set_memory_ro((long)image, 1);
- set_memory_x((long)image, 1);
+ set_memory_rox((long)image, 1);
prog_ret = dummy_ops_call_op(image, args);
err = dummy_ops_copy_args(args);
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 228fd5b20f10..ad13b48e3e08 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -262,7 +262,7 @@ static void release_nbp(struct kobject *kobj)
kfree(p);
}
-static void brport_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
+static void brport_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid)
{
struct net_bridge_port *p = kobj_to_brport(kobj);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 48170bd3785e..dea1ee1bd095 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -606,7 +606,7 @@ static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc)
WARN_ON(!hlist_unhashed(&mp->mdb_node));
WARN_ON(mp->ports);
- del_timer_sync(&mp->timer);
+ timer_shutdown_sync(&mp->timer);
kfree_rcu(mp, rcu);
}
@@ -647,7 +647,7 @@ static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc)
src = container_of(gc, struct net_bridge_group_src, mcast_gc);
WARN_ON(!hlist_unhashed(&src->node));
- del_timer_sync(&src->timer);
+ timer_shutdown_sync(&src->timer);
kfree_rcu(src, rcu);
}
@@ -676,8 +676,8 @@ static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc)
WARN_ON(!hlist_unhashed(&pg->mglist));
WARN_ON(!hlist_empty(&pg->src_list));
- del_timer_sync(&pg->rexmit_timer);
- del_timer_sync(&pg->timer);
+ timer_shutdown_sync(&pg->rexmit_timer);
+ timer_shutdown_sync(&pg->timer);
kfree_rcu(pg, rcu);
}
diff --git a/net/bridge/br_multicast_eht.c b/net/bridge/br_multicast_eht.c
index f91c071d1608..c126aa4e7551 100644
--- a/net/bridge/br_multicast_eht.c
+++ b/net/bridge/br_multicast_eht.c
@@ -142,7 +142,7 @@ static void br_multicast_destroy_eht_set_entry(struct net_bridge_mcast_gc *gc)
set_h = container_of(gc, struct net_bridge_group_eht_set_entry, mcast_gc);
WARN_ON(!RB_EMPTY_NODE(&set_h->rb_node));
- del_timer_sync(&set_h->timer);
+ timer_shutdown_sync(&set_h->timer);
kfree(set_h);
}
@@ -154,7 +154,7 @@ static void br_multicast_destroy_eht_set(struct net_bridge_mcast_gc *gc)
WARN_ON(!RB_EMPTY_NODE(&eht_set->rb_node));
WARN_ON(!RB_EMPTY_ROOT(&eht_set->entry_tree));
- del_timer_sync(&eht_set->timer);
+ timer_shutdown_sync(&eht_set->timer);
kfree(eht_set);
}
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index cc405d8c7c30..8480684f2762 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -269,11 +269,15 @@ int cfctrl_linkup_request(struct cflayer *layer,
default:
pr_warn("Request setup of bad link type = %d\n",
param->linktype);
+ cfpkt_destroy(pkt);
return -EINVAL;
}
req = kzalloc(sizeof(*req), GFP_KERNEL);
- if (!req)
+ if (!req) {
+ cfpkt_destroy(pkt);
return -ENOMEM;
+ }
+
req->client_layer = user_layer;
req->cmd = CFCTRL_CMD_LINK_SETUP;
req->param = *param;
diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
index b670ba03a675..7e90f9e61d9b 100644
--- a/net/can/j1939/socket.c
+++ b/net/can/j1939/socket.c
@@ -189,7 +189,7 @@ activate_next:
int time_ms = 0;
if (err)
- time_ms = 10 + prandom_u32_max(16);
+ time_ms = 10 + get_random_u32_below(16);
j1939_tp_schedule_txtimer(first, time_ms);
}
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
index f26f4cfa9e63..5c722b55fe23 100644
--- a/net/can/j1939/transport.c
+++ b/net/can/j1939/transport.c
@@ -1168,7 +1168,7 @@ static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer)
if (session->tx_retry < J1939_XTP_TX_RETRY_LIMIT) {
session->tx_retry++;
j1939_tp_schedule_txtimer(session,
- 10 + prandom_u32_max(16));
+ 10 + get_random_u32_below(16));
} else {
netdev_alert(priv->ndev, "%s: 0x%p: tx retry count reached\n",
__func__, session);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index dfa237fbd5a3..1d06e114ba3f 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -446,6 +446,7 @@ int ceph_tcp_connect(struct ceph_connection *con)
if (ret)
return ret;
sock->sk->sk_allocation = GFP_NOFS;
+ sock->sk->sk_use_task_frag = false;
#ifdef CONFIG_LOCKDEP
lockdep_set_class(&sock->sk->sk_lock, &socket_class);
diff --git a/net/ceph/messenger_v1.c b/net/ceph/messenger_v1.c
index 3ddbde87e4d6..d1787d7d33ef 100644
--- a/net/ceph/messenger_v1.c
+++ b/net/ceph/messenger_v1.c
@@ -30,7 +30,7 @@ static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
if (!buf)
msg.msg_flags |= MSG_TRUNC;
- iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, len);
+ iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, len);
r = sock_recvmsg(sock, &msg, msg.msg_flags);
if (r == -EAGAIN)
r = 0;
@@ -49,7 +49,7 @@ static int ceph_tcp_recvpage(struct socket *sock, struct page *page,
int r;
BUG_ON(page_offset + length > PAGE_SIZE);
- iov_iter_bvec(&msg.msg_iter, READ, &bvec, 1, length);
+ iov_iter_bvec(&msg.msg_iter, ITER_DEST, &bvec, 1, length);
r = sock_recvmsg(sock, &msg, msg.msg_flags);
if (r == -EAGAIN)
r = 0;
diff --git a/net/ceph/messenger_v2.c b/net/ceph/messenger_v2.c
index cc8ff81a50b7..3009028c4fa2 100644
--- a/net/ceph/messenger_v2.c
+++ b/net/ceph/messenger_v2.c
@@ -168,7 +168,7 @@ static int do_try_sendpage(struct socket *sock, struct iov_iter *it)
bv.bv_offset, bv.bv_len,
CEPH_MSG_FLAGS);
} else {
- iov_iter_bvec(&msg.msg_iter, WRITE, &bv, 1, bv.bv_len);
+ iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bv, 1, bv.bv_len);
ret = sock_sendmsg(sock, &msg);
}
if (ret <= 0) {
@@ -225,7 +225,7 @@ static void reset_in_kvecs(struct ceph_connection *con)
WARN_ON(iov_iter_count(&con->v2.in_iter));
con->v2.in_kvec_cnt = 0;
- iov_iter_kvec(&con->v2.in_iter, READ, con->v2.in_kvecs, 0, 0);
+ iov_iter_kvec(&con->v2.in_iter, ITER_DEST, con->v2.in_kvecs, 0, 0);
}
static void set_in_bvec(struct ceph_connection *con, const struct bio_vec *bv)
@@ -233,7 +233,7 @@ static void set_in_bvec(struct ceph_connection *con, const struct bio_vec *bv)
WARN_ON(iov_iter_count(&con->v2.in_iter));
con->v2.in_bvec = *bv;
- iov_iter_bvec(&con->v2.in_iter, READ, &con->v2.in_bvec, 1, bv->bv_len);
+ iov_iter_bvec(&con->v2.in_iter, ITER_DEST, &con->v2.in_bvec, 1, bv->bv_len);
}
static void set_in_skip(struct ceph_connection *con, int len)
@@ -241,7 +241,7 @@ static void set_in_skip(struct ceph_connection *con, int len)
WARN_ON(iov_iter_count(&con->v2.in_iter));
dout("%s con %p len %d\n", __func__, con, len);
- iov_iter_discard(&con->v2.in_iter, READ, len);
+ iov_iter_discard(&con->v2.in_iter, ITER_DEST, len);
}
static void add_out_kvec(struct ceph_connection *con, void *buf, int len)
@@ -265,7 +265,7 @@ static void reset_out_kvecs(struct ceph_connection *con)
con->v2.out_kvec_cnt = 0;
- iov_iter_kvec(&con->v2.out_iter, WRITE, con->v2.out_kvecs, 0, 0);
+ iov_iter_kvec(&con->v2.out_iter, ITER_SOURCE, con->v2.out_kvecs, 0, 0);
con->v2.out_iter_sendpage = false;
}
@@ -277,7 +277,7 @@ static void set_out_bvec(struct ceph_connection *con, const struct bio_vec *bv,
con->v2.out_bvec = *bv;
con->v2.out_iter_sendpage = zerocopy;
- iov_iter_bvec(&con->v2.out_iter, WRITE, &con->v2.out_bvec, 1,
+ iov_iter_bvec(&con->v2.out_iter, ITER_SOURCE, &con->v2.out_bvec, 1,
con->v2.out_bvec.bv_len);
}
@@ -290,7 +290,7 @@ static void set_out_bvec_zero(struct ceph_connection *con)
con->v2.out_bvec.bv_offset = 0;
con->v2.out_bvec.bv_len = min(con->v2.out_zero, (int)PAGE_SIZE);
con->v2.out_iter_sendpage = true;
- iov_iter_bvec(&con->v2.out_iter, WRITE, &con->v2.out_bvec, 1,
+ iov_iter_bvec(&con->v2.out_iter, ITER_SOURCE, &con->v2.out_bvec, 1,
con->v2.out_bvec.bv_len);
}
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index db60217f911b..faabad6603db 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -222,7 +222,7 @@ static void pick_new_mon(struct ceph_mon_client *monc)
max--;
}
- n = prandom_u32_max(max);
+ n = get_random_u32_below(max);
if (o >= 0 && n >= o)
n++;
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 4e4f1e4bc265..11c04e7d928e 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1479,7 +1479,7 @@ static bool target_should_be_paused(struct ceph_osd_client *osdc,
static int pick_random_replica(const struct ceph_osds *acting)
{
- int i = prandom_u32_max(acting->size);
+ int i = get_random_u32_below(acting->size);
dout("%s picked osd%d, primary osd%d\n", __func__,
acting->osds[i], acting->primary);
diff --git a/net/compat.c b/net/compat.c
index 385f04a6be2f..161b7bea1f62 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -95,7 +95,8 @@ int get_compat_msghdr(struct msghdr *kmsg,
if (err)
return err;
- err = import_iovec(save_addr ? READ : WRITE, compat_ptr(msg.msg_iov), msg.msg_iovlen,
+ err = import_iovec(save_addr ? ITER_DEST : ITER_SOURCE,
+ compat_ptr(msg.msg_iov), msg.msg_iovlen,
UIO_FASTIOV, iov, &kmsg->msg_iter);
return err < 0 ? err : 0;
}
diff --git a/net/core/dev_addr_lists_test.c b/net/core/dev_addr_lists_test.c
index 049cfbc58aa9..90e7e3811ae7 100644
--- a/net/core/dev_addr_lists_test.c
+++ b/net/core/dev_addr_lists_test.c
@@ -71,11 +71,11 @@ static void dev_addr_test_basic(struct kunit *test)
memset(addr, 2, sizeof(addr));
eth_hw_addr_set(netdev, addr);
- KUNIT_EXPECT_EQ(test, 0, memcmp(netdev->dev_addr, addr, sizeof(addr)));
+ KUNIT_EXPECT_MEMEQ(test, netdev->dev_addr, addr, sizeof(addr));
memset(addr, 3, sizeof(addr));
dev_addr_set(netdev, addr);
- KUNIT_EXPECT_EQ(test, 0, memcmp(netdev->dev_addr, addr, sizeof(addr)));
+ KUNIT_EXPECT_MEMEQ(test, netdev->dev_addr, addr, sizeof(addr));
}
static void dev_addr_test_sync_one(struct kunit *test)
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 6004bd0ccee4..032d6d0a5ce6 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -1648,10 +1648,13 @@ static int devlink_nl_cmd_get_dumpit(struct sk_buff *msg,
continue;
}
+ devl_lock(devlink);
err = devlink_nl_fill(msg, devlink, DEVLINK_CMD_NEW,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI);
+ devl_unlock(devlink);
devlink_put(devlink);
+
if (err)
goto out;
idx++;
@@ -11925,8 +11928,10 @@ void devl_region_destroy(struct devlink_region *region)
devl_assert_locked(devlink);
/* Free all snapshots of region */
+ mutex_lock(&region->snapshot_lock);
list_for_each_entry_safe(snapshot, ts, &region->snapshot_list, list)
devlink_region_snapshot_del(region, snapshot);
+ mutex_unlock(&region->snapshot_lock);
list_del(&region->list);
mutex_destroy(&region->snapshot_lock);
diff --git a/net/core/dst.c b/net/core/dst.c
index bb14a0392388..6d2dd03dafa8 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -174,7 +174,7 @@ void dst_release(struct dst_entry *dst)
net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
__func__, dst, newrefcnt);
if (!newrefcnt)
- call_rcu(&dst->rcu_head, dst_destroy_rcu);
+ call_rcu_hurry(&dst->rcu_head, dst_destroy_rcu);
}
}
EXPORT_SYMBOL(dst_release);
diff --git a/net/core/filter.c b/net/core/filter.c
index 929358677183..43cc1fe58a2c 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3180,15 +3180,18 @@ static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len)
static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len)
{
+ void *old_data;
+
/* skb_ensure_writable() is not needed here, as we're
* already working on an uncloned skb.
*/
if (unlikely(!pskb_may_pull(skb, off + len)))
return -ENOMEM;
- skb_postpull_rcsum(skb, skb->data + off, len);
- memmove(skb->data + len, skb->data, off);
+ old_data = skb->data;
__skb_pull(skb, len);
+ skb_postpull_rcsum(skb, old_data + off, len);
+ memmove(skb->data, old_data, off);
return 0;
}
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 4fcbdd71c59f..fae9c4694186 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -208,7 +208,7 @@ void gen_kill_estimator(struct net_rate_estimator __rcu **rate_est)
est = xchg((__force struct net_rate_estimator **)rate_est, NULL);
if (est) {
- del_timer_sync(&est->timer);
+ timer_shutdown_sync(&est->timer);
kfree_rcu(est, rcu);
}
}
diff --git a/net/core/gro.c b/net/core/gro.c
index fd8c6a7e8d3e..506f83d715f8 100644
--- a/net/core/gro.c
+++ b/net/core/gro.c
@@ -505,8 +505,9 @@ found_ptype:
NAPI_GRO_CB(skb)->count = 1;
if (unlikely(skb_is_gso(skb))) {
NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
- /* Only support TCP at the moment. */
- if (!skb_is_gso_tcp(skb))
+ /* Only support TCP and non DODGY users. */
+ if (!skb_is_gso_tcp(skb) ||
+ (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
NAPI_GRO_CB(skb)->flush = 1;
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 952a54763358..f00a79fc301b 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -111,7 +111,7 @@ static void neigh_cleanup_and_release(struct neighbour *neigh)
unsigned long neigh_rand_reach_time(unsigned long base)
{
- return base ? prandom_u32_max(base) + (base >> 1) : 0;
+ return base ? get_random_u32_below(base) + (base >> 1) : 0;
}
EXPORT_SYMBOL(neigh_rand_reach_time);
@@ -1666,7 +1666,7 @@ void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
struct sk_buff *skb)
{
unsigned long sched_next = jiffies +
- prandom_u32_max(NEIGH_VAR(p, PROXY_DELAY));
+ get_random_u32_below(NEIGH_VAR(p, PROXY_DELAY));
if (p->qlen > NEIGH_VAR(p, PROXY_QLEN)) {
kfree_skb(skb);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 679b84cc8794..ca55dd747d6c 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -1020,7 +1020,7 @@ static void rx_queue_release(struct kobject *kobj)
netdev_put(queue->dev, &queue->dev_tracker);
}
-static const void *rx_queue_namespace(struct kobject *kobj)
+static const void *rx_queue_namespace(const struct kobject *kobj)
{
struct netdev_rx_queue *queue = to_rx_queue(kobj);
struct device *dev = &queue->dev->dev;
@@ -1032,7 +1032,7 @@ static const void *rx_queue_namespace(struct kobject *kobj)
return ns;
}
-static void rx_queue_get_ownership(struct kobject *kobj,
+static void rx_queue_get_ownership(const struct kobject *kobj,
kuid_t *uid, kgid_t *gid)
{
const struct net *net = rx_queue_namespace(kobj);
@@ -1623,7 +1623,7 @@ static void netdev_queue_release(struct kobject *kobj)
netdev_put(queue->dev, &queue->dev_tracker);
}
-static const void *netdev_queue_namespace(struct kobject *kobj)
+static const void *netdev_queue_namespace(const struct kobject *kobj)
{
struct netdev_queue *queue = to_netdev_queue(kobj);
struct device *dev = &queue->dev->dev;
@@ -1635,7 +1635,7 @@ static const void *netdev_queue_namespace(struct kobject *kobj)
return ns;
}
-static void netdev_queue_get_ownership(struct kobject *kobj,
+static void netdev_queue_get_ownership(const struct kobject *kobj,
kuid_t *uid, kgid_t *gid)
{
const struct net *net = netdev_queue_namespace(kobj);
@@ -1873,9 +1873,9 @@ const struct kobj_ns_type_operations net_ns_type_operations = {
};
EXPORT_SYMBOL_GPL(net_ns_type_operations);
-static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
+static int netdev_uevent(const struct device *d, struct kobj_uevent_env *env)
{
- struct net_device *dev = to_net_dev(d);
+ const struct net_device *dev = to_net_dev(d);
int retval;
/* pass interface to uevent. */
@@ -1910,16 +1910,16 @@ static void netdev_release(struct device *d)
netdev_freemem(dev);
}
-static const void *net_namespace(struct device *d)
+static const void *net_namespace(const struct device *d)
{
- struct net_device *dev = to_net_dev(d);
+ const struct net_device *dev = to_net_dev(d);
return dev_net(dev);
}
-static void net_get_ownership(struct device *d, kuid_t *uid, kgid_t *gid)
+static void net_get_ownership(const struct device *d, kuid_t *uid, kgid_t *gid)
{
- struct net_device *dev = to_net_dev(d);
+ const struct net_device *dev = to_net_dev(d);
const struct net *net = dev_net(dev);
net_ns_get_ownership(net, uid, gid);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index c3763056c554..760238196db1 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2324,7 +2324,7 @@ static inline int f_pick(struct pktgen_dev *pkt_dev)
pkt_dev->curfl = 0; /*reset */
}
} else {
- flow = prandom_u32_max(pkt_dev->cflows);
+ flow = get_random_u32_below(pkt_dev->cflows);
pkt_dev->curfl = flow;
if (pkt_dev->flows[flow].count > pkt_dev->lflow) {
@@ -2380,9 +2380,8 @@ static void set_cur_queue_map(struct pktgen_dev *pkt_dev)
else if (pkt_dev->queue_map_min <= pkt_dev->queue_map_max) {
__u16 t;
if (pkt_dev->flags & F_QUEUE_MAP_RND) {
- t = prandom_u32_max(pkt_dev->queue_map_max -
- pkt_dev->queue_map_min + 1) +
- pkt_dev->queue_map_min;
+ t = get_random_u32_inclusive(pkt_dev->queue_map_min,
+ pkt_dev->queue_map_max);
} else {
t = pkt_dev->cur_queue_map + 1;
if (t > pkt_dev->queue_map_max)
@@ -2411,7 +2410,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
__u32 tmp;
if (pkt_dev->flags & F_MACSRC_RND)
- mc = prandom_u32_max(pkt_dev->src_mac_count);
+ mc = get_random_u32_below(pkt_dev->src_mac_count);
else {
mc = pkt_dev->cur_src_mac_offset++;
if (pkt_dev->cur_src_mac_offset >=
@@ -2437,7 +2436,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
__u32 tmp;
if (pkt_dev->flags & F_MACDST_RND)
- mc = prandom_u32_max(pkt_dev->dst_mac_count);
+ mc = get_random_u32_below(pkt_dev->dst_mac_count);
else {
mc = pkt_dev->cur_dst_mac_offset++;
@@ -2469,18 +2468,17 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
}
if ((pkt_dev->flags & F_VID_RND) && (pkt_dev->vlan_id != 0xffff)) {
- pkt_dev->vlan_id = prandom_u32_max(4096);
+ pkt_dev->vlan_id = get_random_u32_below(4096);
}
if ((pkt_dev->flags & F_SVID_RND) && (pkt_dev->svlan_id != 0xffff)) {
- pkt_dev->svlan_id = prandom_u32_max(4096);
+ pkt_dev->svlan_id = get_random_u32_below(4096);
}
if (pkt_dev->udp_src_min < pkt_dev->udp_src_max) {
if (pkt_dev->flags & F_UDPSRC_RND)
- pkt_dev->cur_udp_src = prandom_u32_max(
- pkt_dev->udp_src_max - pkt_dev->udp_src_min) +
- pkt_dev->udp_src_min;
+ pkt_dev->cur_udp_src = get_random_u32_inclusive(pkt_dev->udp_src_min,
+ pkt_dev->udp_src_max - 1);
else {
pkt_dev->cur_udp_src++;
@@ -2491,9 +2489,8 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
if (pkt_dev->udp_dst_min < pkt_dev->udp_dst_max) {
if (pkt_dev->flags & F_UDPDST_RND) {
- pkt_dev->cur_udp_dst = prandom_u32_max(
- pkt_dev->udp_dst_max - pkt_dev->udp_dst_min) +
- pkt_dev->udp_dst_min;
+ pkt_dev->cur_udp_dst = get_random_u32_inclusive(pkt_dev->udp_dst_min,
+ pkt_dev->udp_dst_max - 1);
} else {
pkt_dev->cur_udp_dst++;
if (pkt_dev->cur_udp_dst >= pkt_dev->udp_dst_max)
@@ -2508,7 +2505,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
if (imn < imx) {
__u32 t;
if (pkt_dev->flags & F_IPSRC_RND)
- t = prandom_u32_max(imx - imn) + imn;
+ t = get_random_u32_inclusive(imn, imx - 1);
else {
t = ntohl(pkt_dev->cur_saddr);
t++;
@@ -2530,8 +2527,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
if (pkt_dev->flags & F_IPDST_RND) {
do {
- t = prandom_u32_max(imx - imn) +
- imn;
+ t = get_random_u32_inclusive(imn, imx - 1);
s = htonl(t);
} while (ipv4_is_loopback(s) ||
ipv4_is_multicast(s) ||
@@ -2578,9 +2574,8 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
if (pkt_dev->min_pkt_size < pkt_dev->max_pkt_size) {
__u32 t;
if (pkt_dev->flags & F_TXSIZE_RND) {
- t = prandom_u32_max(pkt_dev->max_pkt_size -
- pkt_dev->min_pkt_size) +
- pkt_dev->min_pkt_size;
+ t = get_random_u32_inclusive(pkt_dev->min_pkt_size,
+ pkt_dev->max_pkt_size - 1);
} else {
t = pkt_dev->cur_pkt_size + 1;
if (t > pkt_dev->max_pkt_size)
@@ -2589,7 +2584,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
pkt_dev->cur_pkt_size = t;
} else if (pkt_dev->n_imix_entries > 0) {
struct imix_pkt *entry;
- __u32 t = prandom_u32_max(IMIX_PRECISION);
+ __u32 t = get_random_u32_below(IMIX_PRECISION);
__u8 entry_index = pkt_dev->imix_distribution[t];
entry = &pkt_dev->imix_entries[entry_index];
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 3cbba7099c0f..4a0eb5593275 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2482,6 +2482,9 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
insp = list;
} else {
/* Eaten partially. */
+ if (skb_is_gso(skb) && !list->head_frag &&
+ skb_headlen(list))
+ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
if (skb_shared(list)) {
/* Sucks! We need to fork list. :-( */
diff --git a/net/core/sock.c b/net/core/sock.c
index b0ab841e0aed..f954d5893e79 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1800,7 +1800,8 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
break;
case SO_PEERSEC:
- return security_socket_getpeersec_stream(sock, optval.user, optlen.user, len);
+ return security_socket_getpeersec_stream(sock,
+ optval, optlen, len);
case SO_MARK:
v.val = sk->sk_mark;
@@ -3389,6 +3390,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_rcvbuf = READ_ONCE(sysctl_rmem_default);
sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default);
sk->sk_state = TCP_CLOSE;
+ sk->sk_use_task_frag = true;
sk_set_socket(sk, sock);
sock_set_flag(sk, SOCK_ZAPPED);
diff --git a/net/core/stream.c b/net/core/stream.c
index 75fded8495f5..cd06750dd329 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -123,7 +123,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
DEFINE_WAIT_FUNC(wait, woken_wake_function);
if (sk_stream_memory_free(sk))
- current_timeo = vm_wait = prandom_u32_max(HZ / 5) + 2;
+ current_timeo = vm_wait = get_random_u32_below(HZ / 5) + 2;
add_wait_queue(sk_sleep(sk), &wait);
@@ -196,6 +196,12 @@ void sk_stream_kill_queues(struct sock *sk)
/* First the read buffer. */
__skb_queue_purge(&sk->sk_receive_queue);
+ /* Next, the error queue.
+ * We need to use queue lock, because other threads might
+ * add packets to the queue without socket lock being held.
+ */
+ skb_queue_purge(&sk->sk_error_queue);
+
/* Next, the write queue. */
WARN_ON_ONCE(!skb_queue_empty(&sk->sk_write_queue));
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index 3aced951d5ab..01e54b46ae0b 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -337,7 +337,7 @@ static int __init init_dns_resolver(void)
* this is used to prevent malicious redirections from being installed
* with add_key().
*/
- cred = prepare_kernel_cred(NULL);
+ cred = prepare_kernel_cred(&init_task);
if (!cred)
return -ENOMEM;
diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c
index b1263917fcb2..5ee9ef00954e 100644
--- a/net/dsa/tag_8021q.c
+++ b/net/dsa/tag_8021q.c
@@ -415,6 +415,7 @@ static void dsa_tag_8021q_teardown(struct dsa_switch *ds)
int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto)
{
struct dsa_8021q_context *ctx;
+ int err;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -427,7 +428,15 @@ int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto)
ds->tag_8021q_ctx = ctx;
- return dsa_tag_8021q_setup(ds);
+ err = dsa_tag_8021q_setup(ds);
+ if (err)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ kfree(ctx);
+ return err;
}
EXPORT_SYMBOL_GPL(dsa_tag_8021q_register);
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index c2f1a542e6fa..646b3e490c71 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -2078,58 +2078,91 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
return ret;
}
-static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
+static int ethtool_vzalloc_stats_array(int n_stats, u64 **data)
{
+ if (n_stats < 0)
+ return n_stats;
+ if (n_stats > S32_MAX / sizeof(u64))
+ return -ENOMEM;
+ if (WARN_ON_ONCE(!n_stats))
+ return -EOPNOTSUPP;
+
+ *data = vzalloc(array_size(n_stats, sizeof(u64)));
+ if (!*data)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int ethtool_get_phy_stats_phydev(struct phy_device *phydev,
+ struct ethtool_stats *stats,
+ u64 **data)
+ {
const struct ethtool_phy_ops *phy_ops = ethtool_phy_ops;
+ int n_stats, ret;
+
+ if (!phy_ops || !phy_ops->get_sset_count || !phy_ops->get_stats)
+ return -EOPNOTSUPP;
+
+ n_stats = phy_ops->get_sset_count(phydev);
+
+ ret = ethtool_vzalloc_stats_array(n_stats, data);
+ if (ret)
+ return ret;
+
+ stats->n_stats = n_stats;
+ return phy_ops->get_stats(phydev, stats, *data);
+}
+
+static int ethtool_get_phy_stats_ethtool(struct net_device *dev,
+ struct ethtool_stats *stats,
+ u64 **data)
+{
const struct ethtool_ops *ops = dev->ethtool_ops;
- struct phy_device *phydev = dev->phydev;
- struct ethtool_stats stats;
- u64 *data;
- int ret, n_stats;
+ int n_stats, ret;
- if (!phydev && (!ops->get_ethtool_phy_stats || !ops->get_sset_count))
+ if (!ops || !ops->get_sset_count || ops->get_ethtool_phy_stats)
return -EOPNOTSUPP;
- if (phydev && !ops->get_ethtool_phy_stats &&
- phy_ops && phy_ops->get_sset_count)
- n_stats = phy_ops->get_sset_count(phydev);
- else
- n_stats = ops->get_sset_count(dev, ETH_SS_PHY_STATS);
- if (n_stats < 0)
- return n_stats;
- if (n_stats > S32_MAX / sizeof(u64))
- return -ENOMEM;
- WARN_ON_ONCE(!n_stats);
+ n_stats = ops->get_sset_count(dev, ETH_SS_PHY_STATS);
+
+ ret = ethtool_vzalloc_stats_array(n_stats, data);
+ if (ret)
+ return ret;
+
+ stats->n_stats = n_stats;
+ ops->get_ethtool_phy_stats(dev, stats, *data);
+
+ return 0;
+}
+
+static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
+{
+ struct phy_device *phydev = dev->phydev;
+ struct ethtool_stats stats;
+ u64 *data = NULL;
+ int ret = -EOPNOTSUPP;
if (copy_from_user(&stats, useraddr, sizeof(stats)))
return -EFAULT;
- stats.n_stats = n_stats;
+ if (phydev)
+ ret = ethtool_get_phy_stats_phydev(phydev, &stats, &data);
- if (n_stats) {
- data = vzalloc(array_size(n_stats, sizeof(u64)));
- if (!data)
- return -ENOMEM;
+ if (ret == -EOPNOTSUPP)
+ ret = ethtool_get_phy_stats_ethtool(dev, &stats, &data);
- if (phydev && !ops->get_ethtool_phy_stats &&
- phy_ops && phy_ops->get_stats) {
- ret = phy_ops->get_stats(phydev, &stats, data);
- if (ret < 0)
- goto out;
- } else {
- ops->get_ethtool_phy_stats(dev, &stats, data);
- }
- } else {
- data = NULL;
- }
+ if (ret)
+ goto out;
- ret = -EFAULT;
- if (copy_to_user(useraddr, &stats, sizeof(stats)))
+ if (copy_to_user(useraddr, &stats, sizeof(stats))) {
+ ret = -EFAULT;
goto out;
+ }
+
useraddr += sizeof(stats);
- if (n_stats && copy_to_user(useraddr, data, array_size(n_stats, sizeof(u64))))
- goto out;
- ret = 0;
+ if (copy_to_user(useraddr, data, array_size(stats.n_stats, sizeof(u64))))
+ ret = -EFAULT;
out:
vfree(data);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index ab4a06be489b..6c0ec2789943 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1665,6 +1665,7 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
if (rc == 0) {
*sk = sock->sk;
(*sk)->sk_allocation = GFP_ATOMIC;
+ (*sk)->sk_use_task_frag = false;
/*
* Unhash it so that IP input processing does not even see it,
* we do not wish this socket to see incoming packets.
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index e8b9a9202fec..b0acf6e19aed 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -234,13 +234,20 @@ static void inet_free_ifa(struct in_ifaddr *ifa)
call_rcu(&ifa->rcu_head, inet_rcu_free_ifa);
}
+static void in_dev_free_rcu(struct rcu_head *head)
+{
+ struct in_device *idev = container_of(head, struct in_device, rcu_head);
+
+ kfree(rcu_dereference_protected(idev->mc_hash, 1));
+ kfree(idev);
+}
+
void in_dev_finish_destroy(struct in_device *idev)
{
struct net_device *dev = idev->dev;
WARN_ON(idev->ifa_list);
WARN_ON(idev->mc_list);
- kfree(rcu_dereference_protected(idev->mc_hash, 1));
#ifdef NET_REFCNT_DEBUG
pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL");
#endif
@@ -248,7 +255,7 @@ void in_dev_finish_destroy(struct in_device *idev)
if (!idev->dead)
pr_err("Freeing alive in_device %p\n", idev);
else
- kfree(idev);
+ call_rcu(&idev->rcu_head, in_dev_free_rcu);
}
EXPORT_SYMBOL(in_dev_finish_destroy);
@@ -298,12 +305,6 @@ out_kfree:
goto out;
}
-static void in_dev_rcu_put(struct rcu_head *head)
-{
- struct in_device *idev = container_of(head, struct in_device, rcu_head);
- in_dev_put(idev);
-}
-
static void inetdev_destroy(struct in_device *in_dev)
{
struct net_device *dev;
@@ -328,7 +329,7 @@ static void inetdev_destroy(struct in_device *in_dev)
neigh_parms_release(&arp_tbl, in_dev->arp_parms);
arp_ifdown(dev);
- call_rcu(&in_dev->rcu_head, in_dev_rcu_put);
+ in_dev_put(in_dev);
}
int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index d5d745c3e345..46aa2d65e40a 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -263,7 +263,7 @@ bool icmp_global_allow(void)
/* We want to use a credit of one in average, but need to randomize
* it for security reasons.
*/
- credit = max_t(int, credit - prandom_u32_max(3), 0);
+ credit = max_t(int, credit - get_random_u32_below(3), 0);
rc = true;
}
WRITE_ONCE(icmp_global.credit, credit);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 81be3e0f0e70..c920aa9a62a9 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -213,7 +213,7 @@ static void igmp_stop_timer(struct ip_mc_list *im)
/* It must be called with locked im->lock */
static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
{
- int tv = prandom_u32_max(max_delay);
+ int tv = get_random_u32_below(max_delay);
im->tm_running = 1;
if (!mod_timer(&im->timer, jiffies+tv+2))
@@ -222,7 +222,7 @@ static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
static void igmp_gq_start_timer(struct in_device *in_dev)
{
- int tv = prandom_u32_max(in_dev->mr_maxdelay);
+ int tv = get_random_u32_below(in_dev->mr_maxdelay);
unsigned long exp = jiffies + tv + 2;
if (in_dev->mr_gq_running &&
@@ -236,7 +236,7 @@ static void igmp_gq_start_timer(struct in_device *in_dev)
static void igmp_ifc_start_timer(struct in_device *in_dev, int delay)
{
- int tv = prandom_u32_max(delay);
+ int tv = get_random_u32_below(delay);
if (!mod_timer(&in_dev->mr_ifc_timer, jiffies+tv+2))
in_dev_hold(in_dev);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 4a34bc7cb15e..d1f837579398 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -173,22 +173,40 @@ static bool inet_bind_conflict(const struct sock *sk, struct sock *sk2,
return false;
}
+static bool __inet_bhash2_conflict(const struct sock *sk, struct sock *sk2,
+ kuid_t sk_uid, bool relax,
+ bool reuseport_cb_ok, bool reuseport_ok)
+{
+ if (sk->sk_family == AF_INET && ipv6_only_sock(sk2))
+ return false;
+
+ return inet_bind_conflict(sk, sk2, sk_uid, relax,
+ reuseport_cb_ok, reuseport_ok);
+}
+
static bool inet_bhash2_conflict(const struct sock *sk,
const struct inet_bind2_bucket *tb2,
kuid_t sk_uid,
bool relax, bool reuseport_cb_ok,
bool reuseport_ok)
{
+ struct inet_timewait_sock *tw2;
struct sock *sk2;
sk_for_each_bound_bhash2(sk2, &tb2->owners) {
- if (sk->sk_family == AF_INET && ipv6_only_sock(sk2))
- continue;
+ if (__inet_bhash2_conflict(sk, sk2, sk_uid, relax,
+ reuseport_cb_ok, reuseport_ok))
+ return true;
+ }
- if (inet_bind_conflict(sk, sk2, sk_uid, relax,
- reuseport_cb_ok, reuseport_ok))
+ twsk_for_each_bound_bhash2(tw2, &tb2->deathrow) {
+ sk2 = (struct sock *)tw2;
+
+ if (__inet_bhash2_conflict(sk, sk2, sk_uid, relax,
+ reuseport_cb_ok, reuseport_ok))
return true;
}
+
return false;
}
@@ -314,7 +332,7 @@ other_half_scan:
if (likely(remaining > 1))
remaining &= ~1U;
- offset = prandom_u32_max(remaining);
+ offset = get_random_u32_below(remaining);
/* __inet_hash_connect() favors ports having @low parity
* We do the opposite to not pollute connect() users.
*/
@@ -1182,12 +1200,26 @@ void inet_csk_prepare_forced_close(struct sock *sk)
}
EXPORT_SYMBOL(inet_csk_prepare_forced_close);
+static int inet_ulp_can_listen(const struct sock *sk)
+{
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+
+ if (icsk->icsk_ulp_ops && !icsk->icsk_ulp_ops->clone)
+ return -EINVAL;
+
+ return 0;
+}
+
int inet_csk_listen_start(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct inet_sock *inet = inet_sk(sk);
int err;
+ err = inet_ulp_can_listen(sk);
+ if (unlikely(err))
+ return err;
+
reqsk_queue_alloc(&icsk->icsk_accept_queue);
sk->sk_ack_backlog = 0;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 3cec471a2cd2..24a38b56fab9 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -116,6 +116,7 @@ static void inet_bind2_bucket_init(struct inet_bind2_bucket *tb,
#endif
tb->rcv_saddr = sk->sk_rcv_saddr;
INIT_HLIST_HEAD(&tb->owners);
+ INIT_HLIST_HEAD(&tb->deathrow);
hlist_add_head(&tb->node, &head->chain);
}
@@ -137,7 +138,7 @@ struct inet_bind2_bucket *inet_bind2_bucket_create(struct kmem_cache *cachep,
/* Caller must hold hashbucket lock for this tb with local BH disabled */
void inet_bind2_bucket_destroy(struct kmem_cache *cachep, struct inet_bind2_bucket *tb)
{
- if (hlist_empty(&tb->owners)) {
+ if (hlist_empty(&tb->owners) && hlist_empty(&tb->deathrow)) {
__hlist_del(&tb->node);
kmem_cache_free(cachep, tb);
}
@@ -1097,21 +1098,22 @@ ok:
* on low contention the randomness is maximal and on high contention
* it may be inexistent.
*/
- i = max_t(int, i, prandom_u32_max(8) * 2);
+ i = max_t(int, i, get_random_u32_below(8) * 2);
WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2);
/* Head lock still held and bh's disabled */
inet_bind_hash(sk, tb, tb2, port);
- spin_unlock(&head2->lock);
-
if (sk_unhashed(sk)) {
inet_sk(sk)->inet_sport = htons(port);
inet_ehash_nolisten(sk, (struct sock *)tw, NULL);
}
if (tw)
inet_twsk_bind_unhash(tw, hinfo);
+
+ spin_unlock(&head2->lock);
spin_unlock(&head->lock);
+
if (tw)
inet_twsk_deschedule_put(tw);
local_bh_enable();
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 66fc940f9521..1d77d992e6e7 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -29,6 +29,7 @@
void inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
struct inet_hashinfo *hashinfo)
{
+ struct inet_bind2_bucket *tb2 = tw->tw_tb2;
struct inet_bind_bucket *tb = tw->tw_tb;
if (!tb)
@@ -37,6 +38,11 @@ void inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
__hlist_del(&tw->tw_bind_node);
tw->tw_tb = NULL;
inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
+
+ __hlist_del(&tw->tw_bind2_node);
+ tw->tw_tb2 = NULL;
+ inet_bind2_bucket_destroy(hashinfo->bind2_bucket_cachep, tb2);
+
__sock_put((struct sock *)tw);
}
@@ -45,7 +51,7 @@ static void inet_twsk_kill(struct inet_timewait_sock *tw)
{
struct inet_hashinfo *hashinfo = tw->tw_dr->hashinfo;
spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
- struct inet_bind_hashbucket *bhead;
+ struct inet_bind_hashbucket *bhead, *bhead2;
spin_lock(lock);
sk_nulls_del_node_init_rcu((struct sock *)tw);
@@ -54,9 +60,13 @@ static void inet_twsk_kill(struct inet_timewait_sock *tw)
/* Disassociate with bind bucket. */
bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
hashinfo->bhash_size)];
+ bhead2 = inet_bhashfn_portaddr(hashinfo, (struct sock *)tw,
+ twsk_net(tw), tw->tw_num);
spin_lock(&bhead->lock);
+ spin_lock(&bhead2->lock);
inet_twsk_bind_unhash(tw, hashinfo);
+ spin_unlock(&bhead2->lock);
spin_unlock(&bhead->lock);
refcount_dec(&tw->tw_dr->tw_refcount);
@@ -93,6 +103,12 @@ static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
hlist_add_head(&tw->tw_bind_node, list);
}
+static void inet_twsk_add_bind2_node(struct inet_timewait_sock *tw,
+ struct hlist_head *list)
+{
+ hlist_add_head(&tw->tw_bind2_node, list);
+}
+
/*
* Enter the time wait state. This is called with locally disabled BH.
* Essentially we whip up a timewait bucket, copy the relevant info into it
@@ -105,17 +121,28 @@ void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
const struct inet_connection_sock *icsk = inet_csk(sk);
struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
- struct inet_bind_hashbucket *bhead;
+ struct inet_bind_hashbucket *bhead, *bhead2;
+
/* Step 1: Put TW into bind hash. Original socket stays there too.
Note, that any socket with inet->num != 0 MUST be bound in
binding cache, even if it is closed.
*/
bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num,
hashinfo->bhash_size)];
+ bhead2 = inet_bhashfn_portaddr(hashinfo, sk, twsk_net(tw), inet->inet_num);
+
spin_lock(&bhead->lock);
+ spin_lock(&bhead2->lock);
+
tw->tw_tb = icsk->icsk_bind_hash;
WARN_ON(!icsk->icsk_bind_hash);
inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
+
+ tw->tw_tb2 = icsk->icsk_bind2_hash;
+ WARN_ON(!icsk->icsk_bind2_hash);
+ inet_twsk_add_bind2_node(tw, &tw->tw_tb2->deathrow);
+
+ spin_unlock(&bhead2->lock);
spin_unlock(&bhead->lock);
spin_lock(lock);
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index b58df3c1bf7d..eec1f6df80d8 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -412,7 +412,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
static void ipmr_free_table(struct mr_table *mrt)
{
- del_timer_sync(&mrt->ipmr_expire_timer);
+ timer_shutdown_sync(&mrt->ipmr_expire_timer);
mroute_clean_tables(mrt, MRT_FLUSH_VIFS | MRT_FLUSH_VIFS_STATIC |
MRT_FLUSH_MFC | MRT_FLUSH_MFC_STATIC);
rhltable_destroy(&mrt->mfc_hash);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index cd1fa9f70f1a..de6e3515ab4f 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -471,7 +471,7 @@ static u32 ip_idents_reserve(u32 hash, int segs)
old = READ_ONCE(*p_tstamp);
if (old != now && cmpxchg(p_tstamp, old, now) == old)
- delta = prandom_u32_max(now - old);
+ delta = get_random_u32_below(now - old);
/* If UBSAN reports an error there, please make sure your compiler
* supports -fno-strict-overflow before reporting it that was a bug
@@ -689,7 +689,7 @@ static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr,
} else {
/* Randomize max depth to avoid some side channels attacks. */
int max_depth = FNHE_RECLAIM_DEPTH +
- prandom_u32_max(FNHE_RECLAIM_DEPTH);
+ get_random_u32_below(FNHE_RECLAIM_DEPTH);
while (depth > max_depth) {
fnhe_remove_oldest(hash);
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 942d2dfa1115..26fb97d1d4d9 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -288,12 +288,11 @@ struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
struct tcp_request_sock *treq;
struct request_sock *req;
-#ifdef CONFIG_MPTCP
if (sk_is_mptcp(sk))
- ops = &mptcp_subflow_request_sock_ops;
-#endif
+ req = mptcp_subflow_reqsk_alloc(ops, sk, false);
+ else
+ req = inet_reqsk_alloc(ops, sk, false);
- req = inet_reqsk_alloc(ops, sk, false);
if (!req)
return NULL;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 001947136b0a..c567d5e8053e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2000,7 +2000,7 @@ static int receive_fallback_to_copy(struct sock *sk,
if (copy_address != zc->copybuf_address)
return -EINVAL;
- err = import_single_range(READ, (void __user *)copy_address,
+ err = import_single_range(ITER_DEST, (void __user *)copy_address,
inq, &iov, &msg.msg_iter);
if (err)
return err;
@@ -2034,7 +2034,7 @@ static int tcp_copy_straggler_data(struct tcp_zerocopy_receive *zc,
if (copy_address != zc->copybuf_address)
return -EINVAL;
- err = import_single_range(READ, (void __user *)copy_address,
+ err = import_single_range(ITER_DEST, (void __user *)copy_address,
copylen, &iov, &msg.msg_iter);
if (err)
return err;
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 54eec33c6e1c..d2c470524e58 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -618,7 +618,7 @@ static void bbr_reset_probe_bw_mode(struct sock *sk)
struct bbr *bbr = inet_csk_ca(sk);
bbr->mode = BBR_PROBE_BW;
- bbr->cycle_idx = CYCLE_LEN - 1 - prandom_u32_max(bbr_cycle_rand);
+ bbr->cycle_idx = CYCLE_LEN - 1 - get_random_u32_below(bbr_cycle_rand);
bbr_advance_cycle_phase(sk); /* flip to next phase of gain cycle */
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 1efacbe948da..cc072d2cfcd8 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3646,7 +3646,8 @@ static void tcp_send_challenge_ack(struct sock *sk)
u32 half = (ack_limit + 1) >> 1;
WRITE_ONCE(net->ipv4.tcp_challenge_timestamp, now);
- WRITE_ONCE(net->ipv4.tcp_challenge_count, half + prandom_u32_max(ack_limit));
+ WRITE_ONCE(net->ipv4.tcp_challenge_count,
+ get_random_u32_inclusive(half, ack_limit + half - 1));
}
count = READ_ONCE(net->ipv4.tcp_challenge_count);
if (count > 0) {
diff --git a/net/ipv4/tcp_plb.c b/net/ipv4/tcp_plb.c
index bb1a08fda113..4bcf7eff95e3 100644
--- a/net/ipv4/tcp_plb.c
+++ b/net/ipv4/tcp_plb.c
@@ -97,7 +97,7 @@ void tcp_plb_update_state_upon_rto(struct sock *sk, struct tcp_plb_state *plb)
return;
pause = READ_ONCE(net->ipv4.sysctl_tcp_plb_suspend_rto_sec) * HZ;
- pause += prandom_u32_max(pause);
+ pause += get_random_u32_below(pause);
plb->pause_until = tcp_jiffies32 + pause;
/* Reset PLB state upon RTO, since an RTO causes a sk_rethink_txhash() call
diff --git a/net/ipv4/tcp_ulp.c b/net/ipv4/tcp_ulp.c
index 9ae50b1bd844..05b6077b9f2c 100644
--- a/net/ipv4/tcp_ulp.c
+++ b/net/ipv4/tcp_ulp.c
@@ -139,6 +139,10 @@ static int __tcp_set_ulp(struct sock *sk, const struct tcp_ulp_ops *ulp_ops)
if (sk->sk_socket)
clear_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
+ err = -EINVAL;
+ if (!ulp_ops->clone && sk->sk_state == TCP_LISTEN)
+ goto out_err;
+
err = ulp_ops->init(sk);
if (err)
goto out_err;
diff --git a/net/ipv4/udp_tunnel_core.c b/net/ipv4/udp_tunnel_core.c
index 8242c8947340..5f8104cf082d 100644
--- a/net/ipv4/udp_tunnel_core.c
+++ b/net/ipv4/udp_tunnel_core.c
@@ -176,6 +176,7 @@ EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb);
void udp_tunnel_sock_release(struct socket *sock)
{
rcu_assign_sk_user_data(sock->sk, NULL);
+ synchronize_rcu();
kernel_sock_shutdown(sock, SHUT_RDWR);
sock_release(sock);
}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index c338dfb05d2c..f7a84a4acffc 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -104,7 +104,7 @@ static inline u32 cstamp_delta(unsigned long cstamp)
static inline s32 rfc3315_s14_backoff_init(s32 irt)
{
/* multiply 'initial retransmission time' by 0.9 .. 1.1 */
- u64 tmp = (900000 + prandom_u32_max(200001)) * (u64)irt;
+ u64 tmp = get_random_u32_inclusive(900000, 1100000) * (u64)irt;
do_div(tmp, 1000000);
return (s32)tmp;
}
@@ -112,11 +112,11 @@ static inline s32 rfc3315_s14_backoff_init(s32 irt)
static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt)
{
/* multiply 'retransmission timeout' by 1.9 .. 2.1 */
- u64 tmp = (1900000 + prandom_u32_max(200001)) * (u64)rt;
+ u64 tmp = get_random_u32_inclusive(1900000, 2100000) * (u64)rt;
do_div(tmp, 1000000);
if ((s32)tmp > mrt) {
/* multiply 'maximum retransmission time' by 0.9 .. 1.1 */
- tmp = (900000 + prandom_u32_max(200001)) * (u64)mrt;
+ tmp = get_random_u32_inclusive(900000, 1100000) * (u64)mrt;
do_div(tmp, 1000000);
}
return (s32)tmp;
@@ -3967,7 +3967,7 @@ static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
if (ifp->flags & IFA_F_OPTIMISTIC)
rand_num = 0;
else
- rand_num = prandom_u32_max(idev->cnf.rtr_solicit_delay ?: 1);
+ rand_num = get_random_u32_below(idev->cnf.rtr_solicit_delay ? : 1);
nonce = 0;
if (idev->cnf.enhanced_dad ||
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 7c7155b48f17..e624497fa992 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -42,24 +42,29 @@ static void ip6_datagram_flow_key_init(struct flowi6 *fl6, struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
+ int oif = sk->sk_bound_dev_if;
memset(fl6, 0, sizeof(*fl6));
fl6->flowi6_proto = sk->sk_protocol;
fl6->daddr = sk->sk_v6_daddr;
fl6->saddr = np->saddr;
- fl6->flowi6_oif = sk->sk_bound_dev_if;
fl6->flowi6_mark = sk->sk_mark;
fl6->fl6_dport = inet->inet_dport;
fl6->fl6_sport = inet->inet_sport;
fl6->flowlabel = np->flow_label;
fl6->flowi6_uid = sk->sk_uid;
- if (!fl6->flowi6_oif)
- fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
+ if (!oif)
+ oif = np->sticky_pktinfo.ipi6_ifindex;
- if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr))
- fl6->flowi6_oif = np->mcast_oif;
+ if (!oif) {
+ if (ipv6_addr_is_multicast(&fl6->daddr))
+ oif = np->mcast_oif;
+ else
+ oif = np->ucast_oif;
+ }
+ fl6->flowi6_oif = oif;
security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6));
}
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 23e766597f36..51cf37abd142 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -392,7 +392,7 @@ static struct mr_table *ip6mr_new_table(struct net *net, u32 id)
static void ip6mr_free_table(struct mr_table *mrt)
{
- del_timer_sync(&mrt->ipmr_expire_timer);
+ timer_shutdown_sync(&mrt->ipmr_expire_timer);
mroute_clean_tables(mrt, MRT6_FLUSH_MIFS | MRT6_FLUSH_MIFS_STATIC |
MRT6_FLUSH_MFC | MRT6_FLUSH_MFC_STATIC);
rhltable_destroy(&mrt->mfc_hash);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 7860383295d8..1c02160cf7a4 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1050,7 +1050,7 @@ bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
/* called with mc_lock */
static void mld_gq_start_work(struct inet6_dev *idev)
{
- unsigned long tv = prandom_u32_max(idev->mc_maxdelay);
+ unsigned long tv = get_random_u32_below(idev->mc_maxdelay);
idev->mc_gq_running = 1;
if (!mod_delayed_work(mld_wq, &idev->mc_gq_work, tv + 2))
@@ -1068,7 +1068,7 @@ static void mld_gq_stop_work(struct inet6_dev *idev)
/* called with mc_lock */
static void mld_ifc_start_work(struct inet6_dev *idev, unsigned long delay)
{
- unsigned long tv = prandom_u32_max(delay);
+ unsigned long tv = get_random_u32_below(delay);
if (!mod_delayed_work(mld_wq, &idev->mc_ifc_work, tv + 2))
in6_dev_hold(idev);
@@ -1085,7 +1085,7 @@ static void mld_ifc_stop_work(struct inet6_dev *idev)
/* called with mc_lock */
static void mld_dad_start_work(struct inet6_dev *idev, unsigned long delay)
{
- unsigned long tv = prandom_u32_max(delay);
+ unsigned long tv = get_random_u32_below(delay);
if (!mod_delayed_work(mld_wq, &idev->mc_dad_work, tv + 2))
in6_dev_hold(idev);
@@ -1130,7 +1130,7 @@ static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
}
if (delay >= resptime)
- delay = prandom_u32_max(resptime);
+ delay = get_random_u32_below(resptime);
if (!mod_delayed_work(mld_wq, &ma->mca_work, delay))
refcount_inc(&ma->mca_refcnt);
@@ -2574,7 +2574,7 @@ static void igmp6_join_group(struct ifmcaddr6 *ma)
igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
- delay = prandom_u32_max(unsolicited_report_interval(ma->idev));
+ delay = get_random_u32_below(unsolicited_report_interval(ma->idev));
if (cancel_delayed_work(&ma->mca_work)) {
refcount_dec(&ma->mca_refcnt);
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index 2685c3f15e9d..b5205311f372 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -15,13 +15,7 @@ static u32 __ipv6_select_ident(struct net *net,
const struct in6_addr *dst,
const struct in6_addr *src)
{
- u32 id;
-
- do {
- id = get_random_u32();
- } while (!id);
-
- return id;
+ return get_random_u32_above(0);
}
/* This function exists only for tap drivers that must support broken
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 2f355f0ec32a..e74e0361fd92 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1713,7 +1713,7 @@ static int rt6_insert_exception(struct rt6_info *nrt,
net->ipv6.rt6_stats->fib_rt_cache++;
/* Randomize max depth to avoid some side channels attacks. */
- max_depth = FIB6_MAX_DEPTH + prandom_u32_max(FIB6_MAX_DEPTH);
+ max_depth = FIB6_MAX_DEPTH + get_random_u32_below(FIB6_MAX_DEPTH);
while (bucket->depth > max_depth)
rt6_exception_remove_oldest(bucket);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 9c40f8d3bce8..f9514bacbd4a 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -491,7 +491,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
{
struct tid_ampdu_tx *tid_tx;
struct ieee80211_local *local = sta->local;
- struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct ieee80211_sub_if_data *sdata;
struct ieee80211_ampdu_params params = {
.sta = &sta->sta,
.action = IEEE80211_AMPDU_TX_START,
@@ -511,8 +511,6 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
*/
clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
- ieee80211_agg_stop_txq(sta, tid);
-
/*
* Make sure no packets are being processed. This ensures that
* we have a valid starting sequence number and that in-flight
@@ -521,6 +519,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
*/
synchronize_net();
+ sdata = sta->sdata;
params.ssn = sta->tid_seq[tid] >> 4;
ret = drv_ampdu_action(local, sdata, &params);
tid_tx->ssn = params.ssn;
@@ -534,6 +533,9 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
*/
set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state);
} else if (ret) {
+ if (!sdata)
+ return;
+
ht_dbg(sdata,
"BA request denied - HW unavailable for %pM tid %d\n",
sta->sta.addr, tid);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index c885076fae89..7f549862c83e 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -147,6 +147,7 @@ static int ieee80211_set_ap_mbssid_options(struct ieee80211_sub_if_data *sdata,
link_conf->bssid_index = 0;
link_conf->nontransmitted = false;
link_conf->ema_ap = false;
+ link_conf->bssid_indicator = 0;
if (sdata->vif.type != NL80211_IFTYPE_AP || !params.tx_wdev)
return -EINVAL;
@@ -1511,6 +1512,12 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
kfree(link_conf->ftmr_params);
link_conf->ftmr_params = NULL;
+ sdata->vif.mbssid_tx_vif = NULL;
+ link_conf->bssid_index = 0;
+ link_conf->nontransmitted = false;
+ link_conf->ema_ap = false;
+ link_conf->bssid_indicator = 0;
+
__sta_info_flush(sdata, true);
ieee80211_free_keys(sdata, true);
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 7a3d7893e19d..f1914bf39f0e 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -167,7 +167,7 @@ static ssize_t sta_aqm_read(struct file *file, char __user *userbuf,
continue;
txqi = to_txq_info(sta->sta.txq[i]);
p += scnprintf(p, bufsz + buf - p,
- "%d %d %u %u %u %u %u %u %u %u %u 0x%lx(%s%s%s)\n",
+ "%d %d %u %u %u %u %u %u %u %u %u 0x%lx(%s%s%s%s)\n",
txqi->txq.tid,
txqi->txq.ac,
txqi->tin.backlog_bytes,
@@ -182,7 +182,8 @@ static ssize_t sta_aqm_read(struct file *file, char __user *userbuf,
txqi->flags,
test_bit(IEEE80211_TXQ_STOP, &txqi->flags) ? "STOP" : "RUN",
test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags) ? " AMPDU" : "",
- test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags) ? " NO-AMSDU" : "");
+ test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags) ? " NO-AMSDU" : "",
+ test_bit(IEEE80211_TXQ_DIRTY, &txqi->flags) ? " DIRTY" : "");
}
rcu_read_unlock();
diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
index d737db4e07e2..cfb09e4aed4d 100644
--- a/net/mac80211/driver-ops.c
+++ b/net/mac80211/driver-ops.c
@@ -392,6 +392,9 @@ int drv_ampdu_action(struct ieee80211_local *local,
might_sleep();
+ if (!sdata)
+ return -EIO;
+
sdata = get_bss_sdata(sdata);
if (!check_sdata_in_driver(sdata))
return -EIO;
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 809bad53e15b..5d13a3dfd366 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -1199,7 +1199,7 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local,
/* In reconfig don't transmit now, but mark for waking later */
if (local->in_reconfig) {
- set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txq->flags);
+ set_bit(IEEE80211_TXQ_DIRTY, &txq->flags);
return;
}
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 83bc41346ae7..5315ab750280 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -391,6 +391,37 @@ void ieee80211_ba_session_work(struct work_struct *work)
tid_tx = sta->ampdu_mlme.tid_start_tx[tid];
if (!blocked && tid_tx) {
+ struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]);
+ struct ieee80211_sub_if_data *sdata =
+ vif_to_sdata(txqi->txq.vif);
+ struct fq *fq = &sdata->local->fq;
+
+ spin_lock_bh(&fq->lock);
+
+ /* Allow only frags to be dequeued */
+ set_bit(IEEE80211_TXQ_STOP, &txqi->flags);
+
+ if (!skb_queue_empty(&txqi->frags)) {
+ /* Fragmented Tx is ongoing, wait for it to
+ * finish. Reschedule worker to retry later.
+ */
+
+ spin_unlock_bh(&fq->lock);
+ spin_unlock_bh(&sta->lock);
+
+ /* Give the task working on the txq a chance
+ * to send out the queued frags
+ */
+ synchronize_net();
+
+ mutex_unlock(&sta->ampdu_mlme.mtx);
+
+ ieee80211_queue_work(&sdata->local->hw, work);
+ return;
+ }
+
+ spin_unlock_bh(&fq->lock);
+
/*
* Assign it over to the normal tid_tx array
* where it "goes live".
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 63ff0d2524b6..d16606e84e22 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -838,7 +838,7 @@ enum txq_info_flags {
IEEE80211_TXQ_STOP,
IEEE80211_TXQ_AMPDU,
IEEE80211_TXQ_NO_AMSDU,
- IEEE80211_TXQ_STOP_NETIF_TX,
+ IEEE80211_TXQ_DIRTY,
};
/**
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index d49a5906a943..23ed13f15067 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -364,7 +364,9 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
/* No support for VLAN with MLO yet */
if (iftype == NL80211_IFTYPE_AP_VLAN &&
- nsdata->wdev.use_4addr)
+ sdata->wdev.use_4addr &&
+ nsdata->vif.type == NL80211_IFTYPE_AP &&
+ nsdata->vif.valid_links)
return -EOPNOTSUPP;
/*
@@ -2195,7 +2197,6 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
ret = cfg80211_register_netdevice(ndev);
if (ret) {
- ieee80211_if_free(ndev);
free_netdev(ndev);
return ret;
}
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 69d5e1ec6ede..3b81e6df3f34 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -512,7 +512,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl,
mpath->flags |= MESH_PATH_RESOLVING | MESH_PATH_DELETED;
mesh_gate_del(tbl, mpath);
spin_unlock_bh(&mpath->state_lock);
- del_timer_sync(&mpath->timer);
+ timer_shutdown_sync(&mpath->timer);
atomic_dec(&sdata->u.mesh.mpaths);
atomic_dec(&tbl->entries);
mesh_path_flush_pending(mpath);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 7e3ab6e1b28f..c6562a6d2503 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -4049,6 +4049,58 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
#undef CALL_RXH
}
+static bool
+ieee80211_rx_is_valid_sta_link_id(struct ieee80211_sta *sta, u8 link_id)
+{
+ if (!sta->mlo)
+ return false;
+
+ return !!(sta->valid_links & BIT(link_id));
+}
+
+static bool ieee80211_rx_data_set_link(struct ieee80211_rx_data *rx,
+ u8 link_id)
+{
+ rx->link_id = link_id;
+ rx->link = rcu_dereference(rx->sdata->link[link_id]);
+
+ if (!rx->sta)
+ return rx->link;
+
+ if (!ieee80211_rx_is_valid_sta_link_id(&rx->sta->sta, link_id))
+ return false;
+
+ rx->link_sta = rcu_dereference(rx->sta->link[link_id]);
+
+ return rx->link && rx->link_sta;
+}
+
+static bool ieee80211_rx_data_set_sta(struct ieee80211_rx_data *rx,
+ struct ieee80211_sta *pubsta,
+ int link_id)
+{
+ struct sta_info *sta;
+
+ sta = container_of(pubsta, struct sta_info, sta);
+
+ rx->link_id = link_id;
+ rx->sta = sta;
+
+ if (sta) {
+ rx->local = sta->sdata->local;
+ if (!rx->sdata)
+ rx->sdata = sta->sdata;
+ rx->link_sta = &sta->deflink;
+ }
+
+ if (link_id < 0)
+ rx->link = &rx->sdata->deflink;
+ else if (!ieee80211_rx_data_set_link(rx, link_id))
+ return false;
+
+ return true;
+}
+
/*
* This function makes calls into the RX path, therefore
* it has to be invoked under RCU read lock.
@@ -4057,16 +4109,19 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
{
struct sk_buff_head frames;
struct ieee80211_rx_data rx = {
- .sta = sta,
- .sdata = sta->sdata,
- .local = sta->local,
/* This is OK -- must be QoS data frame */
.security_idx = tid,
.seqno_idx = tid,
- .link_id = -1,
};
struct tid_ampdu_rx *tid_agg_rx;
- u8 link_id;
+ int link_id = -1;
+
+ /* FIXME: statistics won't be right with this */
+ if (sta->sta.valid_links)
+ link_id = ffs(sta->sta.valid_links) - 1;
+
+ if (!ieee80211_rx_data_set_sta(&rx, &sta->sta, link_id))
+ return;
tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
if (!tid_agg_rx)
@@ -4086,10 +4141,6 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
};
drv_event_callback(rx.local, rx.sdata, &event);
}
- /* FIXME: statistics won't be right with this */
- link_id = sta->sta.valid_links ? ffs(sta->sta.valid_links) - 1 : 0;
- rx.link = rcu_dereference(sta->sdata->link[link_id]);
- rx.link_sta = rcu_dereference(sta->link[link_id]);
ieee80211_rx_handlers(&rx, &frames);
}
@@ -4105,7 +4156,6 @@ void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
/* This is OK -- must be QoS data frame */
.security_idx = tid,
.seqno_idx = tid,
- .link_id = -1,
};
int i, diff;
@@ -4116,10 +4166,8 @@ void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
sta = container_of(pubsta, struct sta_info, sta);
- rx.sta = sta;
- rx.sdata = sta->sdata;
- rx.link = &rx.sdata->deflink;
- rx.local = sta->local;
+ if (!ieee80211_rx_data_set_sta(&rx, pubsta, -1))
+ return;
rcu_read_lock();
tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
@@ -4506,15 +4554,6 @@ void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
mutex_unlock(&local->sta_mtx);
}
-static bool
-ieee80211_rx_is_valid_sta_link_id(struct ieee80211_sta *sta, u8 link_id)
-{
- if (!sta->mlo)
- return false;
-
- return !!(sta->valid_links & BIT(link_id));
-}
-
static void ieee80211_rx_8023(struct ieee80211_rx_data *rx,
struct ieee80211_fast_rx *fast_rx,
int orig_len)
@@ -4625,7 +4664,6 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
struct sk_buff *skb = rx->skb;
struct ieee80211_hdr *hdr = (void *)skb->data;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
- struct sta_info *sta = rx->sta;
int orig_len = skb->len;
int hdrlen = ieee80211_hdrlen(hdr->frame_control);
int snap_offs = hdrlen;
@@ -4637,7 +4675,6 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
u8 da[ETH_ALEN];
u8 sa[ETH_ALEN];
} addrs __aligned(2);
- struct link_sta_info *link_sta;
struct ieee80211_sta_rx_stats *stats;
/* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write
@@ -4740,18 +4777,10 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
drop:
dev_kfree_skb(skb);
- if (rx->link_id >= 0) {
- link_sta = rcu_dereference(sta->link[rx->link_id]);
- if (!link_sta)
- return true;
- } else {
- link_sta = &sta->deflink;
- }
-
if (fast_rx->uses_rss)
- stats = this_cpu_ptr(link_sta->pcpu_rx_stats);
+ stats = this_cpu_ptr(rx->link_sta->pcpu_rx_stats);
else
- stats = &link_sta->rx_stats;
+ stats = &rx->link_sta->rx_stats;
stats->dropped++;
return true;
@@ -4769,8 +4798,8 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
struct ieee80211_local *local = rx->local;
struct ieee80211_sub_if_data *sdata = rx->sdata;
struct ieee80211_hdr *hdr = (void *)skb->data;
- struct link_sta_info *link_sta = NULL;
- struct ieee80211_link_data *link;
+ struct link_sta_info *link_sta = rx->link_sta;
+ struct ieee80211_link_data *link = rx->link;
rx->skb = skb;
@@ -4792,35 +4821,6 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
if (!ieee80211_accept_frame(rx))
return false;
- if (rx->link_id >= 0) {
- link = rcu_dereference(rx->sdata->link[rx->link_id]);
-
- /* we might race link removal */
- if (!link)
- return true;
- rx->link = link;
-
- if (rx->sta) {
- rx->link_sta =
- rcu_dereference(rx->sta->link[rx->link_id]);
- if (!rx->link_sta)
- return true;
- }
- } else {
- if (rx->sta)
- rx->link_sta = &rx->sta->deflink;
-
- rx->link = &sdata->deflink;
- }
-
- if (unlikely(!is_multicast_ether_addr(hdr->addr1) &&
- rx->link_id >= 0 && rx->sta && rx->sta->sta.mlo)) {
- link_sta = rcu_dereference(rx->sta->link[rx->link_id]);
-
- if (WARN_ON_ONCE(!link_sta))
- return true;
- }
-
if (!consume) {
struct skb_shared_hwtstamps *shwt;
@@ -4838,9 +4838,12 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
*/
shwt = skb_hwtstamps(rx->skb);
shwt->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
+
+ /* Update the hdr pointer to the new skb for translation below */
+ hdr = (struct ieee80211_hdr *)rx->skb->data;
}
- if (unlikely(link_sta)) {
+ if (unlikely(rx->sta && rx->sta->sta.mlo)) {
/* translate to MLD addresses */
if (ether_addr_equal(link->conf->addr, hdr->addr1))
ether_addr_copy(hdr->addr1, rx->sdata->vif.addr);
@@ -4870,6 +4873,7 @@ static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw,
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_fast_rx *fast_rx;
struct ieee80211_rx_data rx;
+ int link_id = -1;
memset(&rx, 0, sizeof(rx));
rx.skb = skb;
@@ -4886,12 +4890,8 @@ static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw,
if (!pubsta)
goto drop;
- rx.sta = container_of(pubsta, struct sta_info, sta);
- rx.sdata = rx.sta->sdata;
-
- if (status->link_valid &&
- !ieee80211_rx_is_valid_sta_link_id(pubsta, status->link_id))
- goto drop;
+ if (status->link_valid)
+ link_id = status->link_id;
/*
* TODO: Should the frame be dropped if the right link_id is not
@@ -4900,19 +4900,8 @@ static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw,
* link_id is used only for stats purpose and updating the stats on
* the deflink is fine?
*/
- if (status->link_valid)
- rx.link_id = status->link_id;
-
- if (rx.link_id >= 0) {
- struct ieee80211_link_data *link;
-
- link = rcu_dereference(rx.sdata->link[rx.link_id]);
- if (!link)
- goto drop;
- rx.link = link;
- } else {
- rx.link = &rx.sdata->deflink;
- }
+ if (!ieee80211_rx_data_set_sta(&rx, pubsta, link_id))
+ goto drop;
fast_rx = rcu_dereference(rx.sta->fast_rx);
if (!fast_rx)
@@ -4930,6 +4919,8 @@ static bool ieee80211_rx_for_interface(struct ieee80211_rx_data *rx,
{
struct link_sta_info *link_sta;
struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct sta_info *sta;
+ int link_id = -1;
/*
* Look up link station first, in case there's a
@@ -4939,24 +4930,19 @@ static bool ieee80211_rx_for_interface(struct ieee80211_rx_data *rx,
*/
link_sta = link_sta_info_get_bss(rx->sdata, hdr->addr2);
if (link_sta) {
- rx->sta = link_sta->sta;
- rx->link_id = link_sta->link_id;
+ sta = link_sta->sta;
+ link_id = link_sta->link_id;
} else {
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
- rx->sta = sta_info_get_bss(rx->sdata, hdr->addr2);
- if (rx->sta) {
- if (status->link_valid &&
- !ieee80211_rx_is_valid_sta_link_id(&rx->sta->sta,
- status->link_id))
- return false;
-
- rx->link_id = status->link_valid ? status->link_id : -1;
- } else {
- rx->link_id = -1;
- }
+ sta = sta_info_get_bss(rx->sdata, hdr->addr2);
+ if (status->link_valid)
+ link_id = status->link_id;
}
+ if (!ieee80211_rx_data_set_sta(rx, &sta->sta, link_id))
+ return false;
+
return ieee80211_prepare_and_rx_handle(rx, skb, consume);
}
@@ -5015,19 +5001,15 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
if (ieee80211_is_data(fc)) {
struct sta_info *sta, *prev_sta;
- u8 link_id = status->link_id;
+ int link_id = -1;
- if (pubsta) {
- rx.sta = container_of(pubsta, struct sta_info, sta);
- rx.sdata = rx.sta->sdata;
+ if (status->link_valid)
+ link_id = status->link_id;
- if (status->link_valid &&
- !ieee80211_rx_is_valid_sta_link_id(pubsta, link_id))
+ if (pubsta) {
+ if (!ieee80211_rx_data_set_sta(&rx, pubsta, link_id))
goto out;
- if (status->link_valid)
- rx.link_id = status->link_id;
-
/*
* In MLO connection, fetch the link_id using addr2
* when the driver does not pass link_id in status.
@@ -5045,7 +5027,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
if (!link_sta)
goto out;
- rx.link_id = link_sta->link_id;
+ ieee80211_rx_data_set_link(&rx, link_sta->link_id);
}
if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
@@ -5061,30 +5043,27 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
continue;
}
- if ((status->link_valid &&
- !ieee80211_rx_is_valid_sta_link_id(&prev_sta->sta,
- link_id)) ||
- (!status->link_valid && prev_sta->sta.mlo))
+ rx.sdata = prev_sta->sdata;
+ if (!ieee80211_rx_data_set_sta(&rx, &prev_sta->sta,
+ link_id))
+ goto out;
+
+ if (!status->link_valid && prev_sta->sta.mlo)
continue;
- rx.link_id = status->link_valid ? link_id : -1;
- rx.sta = prev_sta;
- rx.sdata = prev_sta->sdata;
ieee80211_prepare_and_rx_handle(&rx, skb, false);
prev_sta = sta;
}
if (prev_sta) {
- if ((status->link_valid &&
- !ieee80211_rx_is_valid_sta_link_id(&prev_sta->sta,
- link_id)) ||
- (!status->link_valid && prev_sta->sta.mlo))
+ rx.sdata = prev_sta->sdata;
+ if (!ieee80211_rx_data_set_sta(&rx, &prev_sta->sta,
+ link_id))
goto out;
- rx.link_id = status->link_valid ? link_id : -1;
- rx.sta = prev_sta;
- rx.sdata = prev_sta->sdata;
+ if (!status->link_valid && prev_sta->sta.mlo)
+ goto out;
if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
return;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 2171cd1ca807..defe97a31724 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1129,7 +1129,6 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
struct sk_buff *purge_skb = NULL;
if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
- info->flags |= IEEE80211_TX_CTL_AMPDU;
reset_agg_timer = true;
} else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
/*
@@ -1161,7 +1160,6 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
if (!tid_tx) {
/* do nothing, let packet pass through */
} else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
- info->flags |= IEEE80211_TX_CTL_AMPDU;
reset_agg_timer = true;
} else {
queued = true;
@@ -3677,8 +3675,7 @@ static void __ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
info->band = fast_tx->band;
info->control.vif = &sdata->vif;
info->flags = IEEE80211_TX_CTL_FIRST_FRAGMENT |
- IEEE80211_TX_CTL_DONTFRAG |
- (ampdu ? IEEE80211_TX_CTL_AMPDU : 0);
+ IEEE80211_TX_CTL_DONTFRAG;
info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT |
u32_encode_bits(IEEE80211_LINK_UNSPECIFIED,
IEEE80211_TX_CTRL_MLO_LINK);
@@ -3783,6 +3780,8 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
struct ieee80211_tx_data tx;
ieee80211_tx_result r;
struct ieee80211_vif *vif = txq->vif;
+ int q = vif->hw_queue[txq->ac];
+ bool q_stopped;
WARN_ON_ONCE(softirq_count() == 0);
@@ -3790,17 +3789,18 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
return NULL;
begin:
- spin_lock_bh(&fq->lock);
-
- if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags) ||
- test_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags))
- goto out;
+ spin_lock(&local->queue_stop_reason_lock);
+ q_stopped = local->queue_stop_reasons[q];
+ spin_unlock(&local->queue_stop_reason_lock);
- if (vif->txqs_stopped[txq->ac]) {
- set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags);
- goto out;
+ if (unlikely(q_stopped)) {
+ /* mark for waking later */
+ set_bit(IEEE80211_TXQ_DIRTY, &txqi->flags);
+ return NULL;
}
+ spin_lock_bh(&fq->lock);
+
/* Make sure fragments stay together. */
skb = __skb_dequeue(&txqi->frags);
if (unlikely(skb)) {
@@ -3810,6 +3810,9 @@ begin:
IEEE80211_SKB_CB(skb)->control.flags &=
~IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
} else {
+ if (unlikely(test_bit(IEEE80211_TXQ_STOP, &txqi->flags)))
+ goto out;
+
skb = fq_tin_dequeue(fq, tin, fq_tin_dequeue_func);
}
@@ -3860,9 +3863,8 @@ begin:
}
if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags))
- info->flags |= IEEE80211_TX_CTL_AMPDU;
- else
- info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+ info->flags |= (IEEE80211_TX_CTL_AMPDU |
+ IEEE80211_TX_CTL_DONTFRAG);
if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
if (!ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) {
@@ -4596,8 +4598,6 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
info = IEEE80211_SKB_CB(skb);
memset(info, 0, sizeof(*info));
- if (tid_tx)
- info->flags |= IEEE80211_TX_CTL_AMPDU;
info->hw_queue = sdata->vif.hw_queue[queue];
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index bc8c285355a1..d84215bd5f8c 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -292,22 +292,12 @@ static void wake_tx_push_queue(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct ieee80211_txq *queue)
{
- int q = sdata->vif.hw_queue[queue->ac];
struct ieee80211_tx_control control = {
.sta = queue->sta,
};
struct sk_buff *skb;
- unsigned long flags;
- bool q_stopped;
while (1) {
- spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
- q_stopped = local->queue_stop_reasons[q];
- spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
-
- if (q_stopped)
- break;
-
skb = ieee80211_tx_dequeue(&local->hw, queue);
if (!skb)
break;
@@ -347,8 +337,6 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
local_bh_disable();
spin_lock(&fq->lock);
- sdata->vif.txqs_stopped[ac] = false;
-
if (!test_bit(SDATA_STATE_RUNNING, &sdata->state))
goto out;
@@ -370,7 +358,7 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
if (ac != txq->ac)
continue;
- if (!test_and_clear_bit(IEEE80211_TXQ_STOP_NETIF_TX,
+ if (!test_and_clear_bit(IEEE80211_TXQ_DIRTY,
&txqi->flags))
continue;
@@ -385,7 +373,7 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
txqi = to_txq_info(vif->txq);
- if (!test_and_clear_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags) ||
+ if (!test_and_clear_bit(IEEE80211_TXQ_DIRTY, &txqi->flags) ||
(ps && atomic_read(&ps->num_sta_ps)) || ac != vif->txq->ac)
goto out;
@@ -517,8 +505,6 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
bool refcounted)
{
struct ieee80211_local *local = hw_to_local(hw);
- struct ieee80211_sub_if_data *sdata;
- int n_acs = IEEE80211_NUM_ACS;
trace_stop_queue(local, queue, reason);
@@ -530,29 +516,7 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
else
local->q_stop_reasons[queue][reason]++;
- if (__test_and_set_bit(reason, &local->queue_stop_reasons[queue]))
- return;
-
- if (local->hw.queues < IEEE80211_NUM_ACS)
- n_acs = 1;
-
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- int ac;
-
- if (!sdata->dev)
- continue;
-
- for (ac = 0; ac < n_acs; ac++) {
- if (sdata->vif.hw_queue[ac] == queue ||
- sdata->vif.cab_queue == queue) {
- spin_lock(&local->fq.lock);
- sdata->vif.txqs_stopped[ac] = true;
- spin_unlock(&local->fq.lock);
- }
- }
- }
- rcu_read_unlock();
+ set_bit(reason, &local->queue_stop_reasons[queue]);
}
void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
diff --git a/net/mctp/device.c b/net/mctp/device.c
index 99a3bda8852f..acb97b257428 100644
--- a/net/mctp/device.c
+++ b/net/mctp/device.c
@@ -429,12 +429,6 @@ static void mctp_unregister(struct net_device *dev)
struct mctp_dev *mdev;
mdev = mctp_dev_get_rtnl(dev);
- if (mdev && !mctp_known(dev)) {
- // Sanity check, should match what was set in mctp_register
- netdev_warn(dev, "%s: BUG mctp_ptr set for unknown type %d",
- __func__, dev->type);
- return;
- }
if (!mdev)
return;
@@ -451,14 +445,8 @@ static int mctp_register(struct net_device *dev)
struct mctp_dev *mdev;
/* Already registered? */
- mdev = rtnl_dereference(dev->mctp_ptr);
-
- if (mdev) {
- if (!mctp_known(dev))
- netdev_warn(dev, "%s: BUG mctp_ptr set for unknown type %d",
- __func__, dev->type);
+ if (rtnl_dereference(dev->mctp_ptr))
return 0;
- }
/* only register specific types */
if (!mctp_known(dev))
diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c
index 5cb65f0928f4..65dcc55a8ad8 100644
--- a/net/mptcp/pm_userspace.c
+++ b/net/mptcp/pm_userspace.c
@@ -156,6 +156,7 @@ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info)
if (addr_val.addr.id == 0 || !(addr_val.flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) {
GENL_SET_ERR_MSG(info, "invalid addr id or flags");
+ err = -EINVAL;
goto announce_err;
}
@@ -282,6 +283,7 @@ int mptcp_nl_cmd_sf_create(struct sk_buff *skb, struct genl_info *info)
if (addr_l.id == 0) {
NL_SET_ERR_MSG_ATTR(info->extack, laddr, "missing local addr id");
+ err = -EINVAL;
goto create_err;
}
@@ -395,11 +397,13 @@ int mptcp_nl_cmd_sf_destroy(struct sk_buff *skb, struct genl_info *info)
if (addr_l.family != addr_r.family) {
GENL_SET_ERR_MSG(info, "address families do not match");
+ err = -EINVAL;
goto destroy_err;
}
if (!addr_l.port || !addr_r.port) {
GENL_SET_ERR_MSG(info, "missing local or remote port");
+ err = -EINVAL;
goto destroy_err;
}
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index f6f93957275b..b7ad030dfe89 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1662,6 +1662,8 @@ static void mptcp_set_nospace(struct sock *sk)
set_bit(MPTCP_NOSPACE, &mptcp_sk(sk)->flags);
}
+static int mptcp_disconnect(struct sock *sk, int flags);
+
static int mptcp_sendmsg_fastopen(struct sock *sk, struct sock *ssk, struct msghdr *msg,
size_t len, int *copied_syn)
{
@@ -1672,9 +1674,9 @@ static int mptcp_sendmsg_fastopen(struct sock *sk, struct sock *ssk, struct msgh
lock_sock(ssk);
msg->msg_flags |= MSG_DONTWAIT;
msk->connect_flags = O_NONBLOCK;
- msk->is_sendmsg = 1;
+ msk->fastopening = 1;
ret = tcp_sendmsg_fastopen(ssk, msg, copied_syn, len, NULL);
- msk->is_sendmsg = 0;
+ msk->fastopening = 0;
msg->msg_flags = saved_flags;
release_sock(ssk);
@@ -1688,6 +1690,8 @@ static int mptcp_sendmsg_fastopen(struct sock *sk, struct sock *ssk, struct msgh
*/
if (ret && ret != -EINPROGRESS && ret != -ERESTARTSYS && ret != -EINTR)
*copied_syn = 0;
+ } else if (ret && ret != -EINPROGRESS) {
+ mptcp_disconnect(sk, 0);
}
return ret;
@@ -2353,7 +2357,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
/* otherwise tcp will dispose of the ssk and subflow ctx */
if (ssk->sk_state == TCP_LISTEN) {
tcp_set_state(ssk, TCP_CLOSE);
- mptcp_subflow_queue_clean(ssk);
+ mptcp_subflow_queue_clean(sk, ssk);
inet_csk_listen_stop(ssk);
mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CLOSED);
}
@@ -2989,6 +2993,14 @@ static int mptcp_disconnect(struct sock *sk, int flags)
{
struct mptcp_sock *msk = mptcp_sk(sk);
+ /* We are on the fastopen error path. We can't call straight into the
+ * subflows cleanup code due to lock nesting (we are already under
+ * msk->firstsocket lock). Do nothing and leave the cleanup to the
+ * caller.
+ */
+ if (msk->fastopening)
+ return 0;
+
inet_sk_state_store(sk, TCP_CLOSE);
mptcp_stop_timer(sk);
@@ -3532,7 +3544,7 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
/* if reaching here via the fastopen/sendmsg path, the caller already
* acquired the subflow socket lock, too.
*/
- if (msk->is_sendmsg)
+ if (msk->fastopening)
err = __inet_stream_connect(ssock, uaddr, addr_len, msk->connect_flags, 1);
else
err = inet_stream_connect(ssock, uaddr, addr_len, msk->connect_flags);
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 955fb3d88eb3..a0d1658ce59e 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -295,7 +295,7 @@ struct mptcp_sock {
u8 recvmsg_inq:1,
cork:1,
nodelay:1,
- is_sendmsg:1;
+ fastopening:1;
int connect_flags;
struct work_struct work;
struct sk_buff *ooo_last_skb;
@@ -628,7 +628,7 @@ void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
struct mptcp_subflow_context *subflow);
void __mptcp_subflow_send_ack(struct sock *ssk);
void mptcp_subflow_reset(struct sock *ssk);
-void mptcp_subflow_queue_clean(struct sock *ssk);
+void mptcp_subflow_queue_clean(struct sock *sk, struct sock *ssk);
void mptcp_sock_graft(struct sock *sk, struct socket *parent);
struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk);
bool __mptcp_close(struct sock *sk, long timeout);
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 29904303f5c2..bd387d4b5a38 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -45,7 +45,6 @@ static void subflow_req_destructor(struct request_sock *req)
sock_put((struct sock *)subflow_req->msk);
mptcp_token_destroy_request(req);
- tcp_request_sock_ops.destructor(req);
}
static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
@@ -590,7 +589,7 @@ static int subflow_v6_rebuild_header(struct sock *sk)
}
#endif
-struct request_sock_ops mptcp_subflow_request_sock_ops;
+static struct request_sock_ops mptcp_subflow_v4_request_sock_ops __ro_after_init;
static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops __ro_after_init;
static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
@@ -603,7 +602,7 @@ static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
goto drop;
- return tcp_conn_request(&mptcp_subflow_request_sock_ops,
+ return tcp_conn_request(&mptcp_subflow_v4_request_sock_ops,
&subflow_request_sock_ipv4_ops,
sk, skb);
drop:
@@ -611,7 +610,14 @@ drop:
return 0;
}
+static void subflow_v4_req_destructor(struct request_sock *req)
+{
+ subflow_req_destructor(req);
+ tcp_request_sock_ops.destructor(req);
+}
+
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+static struct request_sock_ops mptcp_subflow_v6_request_sock_ops __ro_after_init;
static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init;
static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init;
static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init;
@@ -634,15 +640,36 @@ static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
return 0;
}
- return tcp_conn_request(&mptcp_subflow_request_sock_ops,
+ return tcp_conn_request(&mptcp_subflow_v6_request_sock_ops,
&subflow_request_sock_ipv6_ops, sk, skb);
drop:
tcp_listendrop(sk);
return 0; /* don't send reset */
}
+
+static void subflow_v6_req_destructor(struct request_sock *req)
+{
+ subflow_req_destructor(req);
+ tcp6_request_sock_ops.destructor(req);
+}
+#endif
+
+struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops,
+ struct sock *sk_listener,
+ bool attach_listener)
+{
+ if (ops->family == AF_INET)
+ ops = &mptcp_subflow_v4_request_sock_ops;
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ else if (ops->family == AF_INET6)
+ ops = &mptcp_subflow_v6_request_sock_ops;
#endif
+ return inet_reqsk_alloc(ops, sk_listener, attach_listener);
+}
+EXPORT_SYMBOL(mptcp_subflow_reqsk_alloc);
+
/* validate hmac received in third ACK */
static bool subflow_hmac_valid(const struct request_sock *req,
const struct mptcp_options_received *mp_opt)
@@ -1764,7 +1791,7 @@ static void subflow_state_change(struct sock *sk)
}
}
-void mptcp_subflow_queue_clean(struct sock *listener_ssk)
+void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk)
{
struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue;
struct mptcp_sock *msk, *next, *head = NULL;
@@ -1813,8 +1840,23 @@ void mptcp_subflow_queue_clean(struct sock *listener_ssk)
do_cancel_work = __mptcp_close(sk, 0);
release_sock(sk);
- if (do_cancel_work)
+ if (do_cancel_work) {
+ /* lockdep will report a false positive ABBA deadlock
+ * between cancel_work_sync and the listener socket.
+ * The involved locks belong to different sockets WRT
+ * the existing AB chain.
+ * Using a per socket key is problematic as key
+ * deregistration requires process context and must be
+ * performed at socket disposal time, in atomic
+ * context.
+ * Just tell lockdep to consider the listener socket
+ * released here.
+ */
+ mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_);
mptcp_cancel_work(sk);
+ mutex_acquire(&listener_sk->sk_lock.dep_map,
+ SINGLE_DEPTH_NESTING, 0, _RET_IP_);
+ }
sock_put(sk);
}
@@ -1963,7 +2005,6 @@ static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
static int subflow_ops_init(struct request_sock_ops *subflow_ops)
{
subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock);
- subflow_ops->slab_name = "request_sock_subflow";
subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name,
subflow_ops->obj_size, 0,
@@ -1973,16 +2014,17 @@ static int subflow_ops_init(struct request_sock_ops *subflow_ops)
if (!subflow_ops->slab)
return -ENOMEM;
- subflow_ops->destructor = subflow_req_destructor;
-
return 0;
}
void __init mptcp_subflow_init(void)
{
- mptcp_subflow_request_sock_ops = tcp_request_sock_ops;
- if (subflow_ops_init(&mptcp_subflow_request_sock_ops) != 0)
- panic("MPTCP: failed to init subflow request sock ops\n");
+ mptcp_subflow_v4_request_sock_ops = tcp_request_sock_ops;
+ mptcp_subflow_v4_request_sock_ops.slab_name = "request_sock_subflow_v4";
+ mptcp_subflow_v4_request_sock_ops.destructor = subflow_v4_req_destructor;
+
+ if (subflow_ops_init(&mptcp_subflow_v4_request_sock_ops) != 0)
+ panic("MPTCP: failed to init subflow v4 request sock ops\n");
subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req;
@@ -1998,6 +2040,20 @@ void __init mptcp_subflow_init(void)
tcp_prot_override.release_cb = tcp_release_cb_override;
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ /* In struct mptcp_subflow_request_sock, we assume the TCP request sock
+ * structures for v4 and v6 have the same size. It should not changed in
+ * the future but better to make sure to be warned if it is no longer
+ * the case.
+ */
+ BUILD_BUG_ON(sizeof(struct tcp_request_sock) != sizeof(struct tcp6_request_sock));
+
+ mptcp_subflow_v6_request_sock_ops = tcp6_request_sock_ops;
+ mptcp_subflow_v6_request_sock_ops.slab_name = "request_sock_subflow_v6";
+ mptcp_subflow_v6_request_sock_ops.destructor = subflow_v6_req_destructor;
+
+ if (subflow_ops_init(&mptcp_subflow_v6_request_sock_ops) != 0)
+ panic("MPTCP: failed to init subflow v6 request sock ops\n");
+
subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req;
subflow_request_sock_ipv6_ops.send_synack = subflow_v6_send_synack;
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index e7ba5b6dd2b7..46ebee9400da 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1698,9 +1698,10 @@ call_ad(struct net *net, struct sock *ctnl, struct sk_buff *skb,
ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried);
ip_set_unlock(set);
retried = true;
- } while (ret == -EAGAIN &&
- set->variant->resize &&
- (ret = set->variant->resize(set, retried)) == 0);
+ } while (ret == -ERANGE ||
+ (ret == -EAGAIN &&
+ set->variant->resize &&
+ (ret = set->variant->resize(set, retried)) == 0));
if (!ret || (ret == -IPSET_ERR_EXIST && eexist))
return 0;
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index e30513cefd90..c9f4e3859663 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -100,11 +100,11 @@ static int
hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
- const struct hash_ip4 *h = set->data;
+ struct hash_ip4 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ip4_elem e = { 0 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
- u32 ip = 0, ip_to = 0, hosts;
+ u32 ip = 0, ip_to = 0, hosts, i = 0;
int ret = 0;
if (tb[IPSET_ATTR_LINENO])
@@ -149,14 +149,14 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
- /* 64bit division is not allowed on 32bit */
- if (((u64)ip_to - ip + 1) >> (32 - h->netmask) > IPSET_MAX_RANGE)
- return -ERANGE;
-
if (retried)
ip = ntohl(h->next.ip);
- for (; ip <= ip_to;) {
+ for (; ip <= ip_to; i++) {
e.ip = htonl(ip);
+ if (i > IPSET_MAX_RANGE) {
+ hash_ip4_data_next(&h->next, &e);
+ return -ERANGE;
+ }
ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags))
return ret;
diff --git a/net/netfilter/ipset/ip_set_hash_ipmark.c b/net/netfilter/ipset/ip_set_hash_ipmark.c
index 153de3457423..a22ec1a6f6ec 100644
--- a/net/netfilter/ipset/ip_set_hash_ipmark.c
+++ b/net/netfilter/ipset/ip_set_hash_ipmark.c
@@ -97,11 +97,11 @@ static int
hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
- const struct hash_ipmark4 *h = set->data;
+ struct hash_ipmark4 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipmark4_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
- u32 ip, ip_to = 0;
+ u32 ip, ip_to = 0, i = 0;
int ret;
if (tb[IPSET_ATTR_LINENO])
@@ -148,13 +148,14 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
ip_set_mask_from_to(ip, ip_to, cidr);
}
- if (((u64)ip_to - ip + 1) > IPSET_MAX_RANGE)
- return -ERANGE;
-
if (retried)
ip = ntohl(h->next.ip);
- for (; ip <= ip_to; ip++) {
+ for (; ip <= ip_to; ip++, i++) {
e.ip = htonl(ip);
+ if (i > IPSET_MAX_RANGE) {
+ hash_ipmark4_data_next(&h->next, &e);
+ return -ERANGE;
+ }
ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags))
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index 2ffbd0b78a8c..e977b5a9c48d 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -112,11 +112,11 @@ static int
hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
- const struct hash_ipport4 *h = set->data;
+ struct hash_ipport4 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipport4_elem e = { .ip = 0 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
- u32 ip, ip_to = 0, p = 0, port, port_to;
+ u32 ip, ip_to = 0, p = 0, port, port_to, i = 0;
bool with_ports = false;
int ret;
@@ -184,17 +184,18 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
swap(port, port_to);
}
- if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE)
- return -ERANGE;
-
if (retried)
ip = ntohl(h->next.ip);
for (; ip <= ip_to; ip++) {
p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
: port;
- for (; p <= port_to; p++) {
+ for (; p <= port_to; p++, i++) {
e.ip = htonl(ip);
e.port = htons(p);
+ if (i > IPSET_MAX_RANGE) {
+ hash_ipport4_data_next(&h->next, &e);
+ return -ERANGE;
+ }
ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags))
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index 334fb1ad0e86..39a01934b153 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -108,11 +108,11 @@ static int
hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
- const struct hash_ipportip4 *h = set->data;
+ struct hash_ipportip4 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportip4_elem e = { .ip = 0 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
- u32 ip, ip_to = 0, p = 0, port, port_to;
+ u32 ip, ip_to = 0, p = 0, port, port_to, i = 0;
bool with_ports = false;
int ret;
@@ -180,17 +180,18 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
swap(port, port_to);
}
- if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE)
- return -ERANGE;
-
if (retried)
ip = ntohl(h->next.ip);
for (; ip <= ip_to; ip++) {
p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
: port;
- for (; p <= port_to; p++) {
+ for (; p <= port_to; p++, i++) {
e.ip = htonl(ip);
e.port = htons(p);
+ if (i > IPSET_MAX_RANGE) {
+ hash_ipportip4_data_next(&h->next, &e);
+ return -ERANGE;
+ }
ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags))
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index 7df94f437f60..5c6de605a9fb 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -160,12 +160,12 @@ static int
hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
- const struct hash_ipportnet4 *h = set->data;
+ struct hash_ipportnet4 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportnet4_elem e = { .cidr = HOST_MASK - 1 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 ip = 0, ip_to = 0, p = 0, port, port_to;
- u32 ip2_from = 0, ip2_to = 0, ip2;
+ u32 ip2_from = 0, ip2_to = 0, ip2, i = 0;
bool with_ports = false;
u8 cidr;
int ret;
@@ -253,9 +253,6 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
swap(port, port_to);
}
- if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE)
- return -ERANGE;
-
ip2_to = ip2_from;
if (tb[IPSET_ATTR_IP2_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to);
@@ -282,9 +279,15 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
for (; p <= port_to; p++) {
e.port = htons(p);
do {
+ i++;
e.ip2 = htonl(ip2);
ip2 = ip_set_range_to_cidr(ip2, ip2_to, &cidr);
e.cidr = cidr - 1;
+ if (i > IPSET_MAX_RANGE) {
+ hash_ipportnet4_data_next(&h->next,
+ &e);
+ return -ERANGE;
+ }
ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags))
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index 1422739d9aa2..ce0a9ce5a91f 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -136,11 +136,11 @@ static int
hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
- const struct hash_net4 *h = set->data;
+ struct hash_net4 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_net4_elem e = { .cidr = HOST_MASK };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
- u32 ip = 0, ip_to = 0, ipn, n = 0;
+ u32 ip = 0, ip_to = 0, i = 0;
int ret;
if (tb[IPSET_ATTR_LINENO])
@@ -188,19 +188,16 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
if (ip + UINT_MAX == ip_to)
return -IPSET_ERR_HASH_RANGE;
}
- ipn = ip;
- do {
- ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr);
- n++;
- } while (ipn++ < ip_to);
-
- if (n > IPSET_MAX_RANGE)
- return -ERANGE;
if (retried)
ip = ntohl(h->next.ip);
do {
+ i++;
e.ip = htonl(ip);
+ if (i > IPSET_MAX_RANGE) {
+ hash_net4_data_next(&h->next, &e);
+ return -ERANGE;
+ }
ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags))
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index 9810f5bf63f5..031073286236 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -202,7 +202,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netiface4_elem e = { .cidr = HOST_MASK, .elem = 1 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
- u32 ip = 0, ip_to = 0, ipn, n = 0;
+ u32 ip = 0, ip_to = 0, i = 0;
int ret;
if (tb[IPSET_ATTR_LINENO])
@@ -256,19 +256,16 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
} else {
ip_set_mask_from_to(ip, ip_to, e.cidr);
}
- ipn = ip;
- do {
- ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr);
- n++;
- } while (ipn++ < ip_to);
-
- if (n > IPSET_MAX_RANGE)
- return -ERANGE;
if (retried)
ip = ntohl(h->next.ip);
do {
+ i++;
e.ip = htonl(ip);
+ if (i > IPSET_MAX_RANGE) {
+ hash_netiface4_data_next(&h->next, &e);
+ return -ERANGE;
+ }
ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
ret = adtfn(set, &e, &ext, &ext, flags);
diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c
index cdfb78c6e0d3..8fbe649c9dd3 100644
--- a/net/netfilter/ipset/ip_set_hash_netnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netnet.c
@@ -166,13 +166,12 @@ static int
hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
- const struct hash_netnet4 *h = set->data;
+ struct hash_netnet4 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netnet4_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 ip = 0, ip_to = 0;
- u32 ip2 = 0, ip2_from = 0, ip2_to = 0, ipn;
- u64 n = 0, m = 0;
+ u32 ip2 = 0, ip2_from = 0, ip2_to = 0, i = 0;
int ret;
if (tb[IPSET_ATTR_LINENO])
@@ -248,19 +247,6 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
} else {
ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
}
- ipn = ip;
- do {
- ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr[0]);
- n++;
- } while (ipn++ < ip_to);
- ipn = ip2_from;
- do {
- ipn = ip_set_range_to_cidr(ipn, ip2_to, &e.cidr[1]);
- m++;
- } while (ipn++ < ip2_to);
-
- if (n*m > IPSET_MAX_RANGE)
- return -ERANGE;
if (retried) {
ip = ntohl(h->next.ip[0]);
@@ -273,7 +259,12 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
e.ip[0] = htonl(ip);
ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
do {
+ i++;
e.ip[1] = htonl(ip2);
+ if (i > IPSET_MAX_RANGE) {
+ hash_netnet4_data_next(&h->next, &e);
+ return -ERANGE;
+ }
ip2 = ip_set_range_to_cidr(ip2, ip2_to, &e.cidr[1]);
ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags))
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index 09cf72eb37f8..d1a0628df4ef 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -154,12 +154,11 @@ static int
hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
- const struct hash_netport4 *h = set->data;
+ struct hash_netport4 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netport4_elem e = { .cidr = HOST_MASK - 1 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
- u32 port, port_to, p = 0, ip = 0, ip_to = 0, ipn;
- u64 n = 0;
+ u32 port, port_to, p = 0, ip = 0, ip_to = 0, i = 0;
bool with_ports = false;
u8 cidr;
int ret;
@@ -236,14 +235,6 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
} else {
ip_set_mask_from_to(ip, ip_to, e.cidr + 1);
}
- ipn = ip;
- do {
- ipn = ip_set_range_to_cidr(ipn, ip_to, &cidr);
- n++;
- } while (ipn++ < ip_to);
-
- if (n*(port_to - port + 1) > IPSET_MAX_RANGE)
- return -ERANGE;
if (retried) {
ip = ntohl(h->next.ip);
@@ -255,8 +246,12 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
e.ip = htonl(ip);
ip = ip_set_range_to_cidr(ip, ip_to, &cidr);
e.cidr = cidr - 1;
- for (; p <= port_to; p++) {
+ for (; p <= port_to; p++, i++) {
e.port = htons(p);
+ if (i > IPSET_MAX_RANGE) {
+ hash_netport4_data_next(&h->next, &e);
+ return -ERANGE;
+ }
ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags))
return ret;
diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
index 19bcdb3141f6..005a7ce87217 100644
--- a/net/netfilter/ipset/ip_set_hash_netportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netportnet.c
@@ -173,17 +173,26 @@ hash_netportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
}
+static u32
+hash_netportnet4_range_to_cidr(u32 from, u32 to, u8 *cidr)
+{
+ if (from == 0 && to == UINT_MAX) {
+ *cidr = 0;
+ return to;
+ }
+ return ip_set_range_to_cidr(from, to, cidr);
+}
+
static int
hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
- const struct hash_netportnet4 *h = set->data;
+ struct hash_netportnet4 *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netportnet4_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 ip = 0, ip_to = 0, p = 0, port, port_to;
- u32 ip2_from = 0, ip2_to = 0, ip2, ipn;
- u64 n = 0, m = 0;
+ u32 ip2_from = 0, ip2_to = 0, ip2, i = 0;
bool with_ports = false;
int ret;
@@ -285,19 +294,6 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
} else {
ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
}
- ipn = ip;
- do {
- ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr[0]);
- n++;
- } while (ipn++ < ip_to);
- ipn = ip2_from;
- do {
- ipn = ip_set_range_to_cidr(ipn, ip2_to, &e.cidr[1]);
- m++;
- } while (ipn++ < ip2_to);
-
- if (n*m*(port_to - port + 1) > IPSET_MAX_RANGE)
- return -ERANGE;
if (retried) {
ip = ntohl(h->next.ip[0]);
@@ -310,13 +306,19 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
do {
e.ip[0] = htonl(ip);
- ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
+ ip = hash_netportnet4_range_to_cidr(ip, ip_to, &e.cidr[0]);
for (; p <= port_to; p++) {
e.port = htons(p);
do {
+ i++;
e.ip[1] = htonl(ip2);
- ip2 = ip_set_range_to_cidr(ip2, ip2_to,
- &e.cidr[1]);
+ if (i > IPSET_MAX_RANGE) {
+ hash_netportnet4_data_next(&h->next,
+ &e);
+ return -ERANGE;
+ }
+ ip2 = hash_netportnet4_range_to_cidr(ip2,
+ ip2_to, &e.cidr[1]);
ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags))
return ret;
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 5a67f7966574..e162636525cf 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -427,7 +427,7 @@ list_set_destroy(struct ip_set *set)
struct set_elem *e, *n;
if (SET_WITH_TIMEOUT(set))
- del_timer_sync(&map->gc);
+ timer_shutdown_sync(&map->gc);
list_for_each_entry_safe(e, n, &map->members, list) {
list_del(&e->list);
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index c9f598505642..2a5ed71c82c3 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2841,6 +2841,11 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, sockptr_t ptr, unsigned int len)
break;
case IP_VS_SO_SET_DELDEST:
ret = ip_vs_del_dest(svc, &udest);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ break;
}
out_unlock:
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
index df56073bb282..ce2a1549b304 100644
--- a/net/netfilter/ipvs/ip_vs_est.c
+++ b/net/netfilter/ipvs/ip_vs_est.c
@@ -640,9 +640,10 @@ static int ip_vs_est_calc_limits(struct netns_ipvs *ipvs, int *chain_max)
int i, loops, ntest;
s32 min_est = 0;
ktime_t t1, t2;
- s64 diff, val;
int max = 8;
int ret = 1;
+ s64 diff;
+ u64 val;
INIT_HLIST_HEAD(&chain);
mutex_lock(&__ip_vs_mutex);
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 7ac7473e3804..1b87214d385e 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -384,7 +384,7 @@ static void ip_vs_lblc_done_svc(struct ip_vs_service *svc)
struct ip_vs_lblc_table *tbl = svc->sched_data;
/* remove periodic timer */
- del_timer_sync(&tbl->periodic_timer);
+ timer_shutdown_sync(&tbl->periodic_timer);
/* got to clean up table entries here */
ip_vs_lblc_flush(svc);
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index 77c323c36a88..ad8f5fea6d3a 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -547,7 +547,7 @@ static void ip_vs_lblcr_done_svc(struct ip_vs_service *svc)
struct ip_vs_lblcr_table *tbl = svc->sched_data;
/* remove periodic timer */
- del_timer_sync(&tbl->periodic_timer);
+ timer_shutdown_sync(&tbl->periodic_timer);
/* got to clean up table entries here */
ip_vs_lblcr_flush(svc);
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index a56fd0b5a430..4963fec815da 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -1617,7 +1617,7 @@ ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen)
EnterFunction(7);
/* Receive a packet */
- iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, buflen);
+ iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, buflen);
len = sock_recvmsg(sock, &msg, MSG_DONTWAIT);
if (len < 0)
return len;
diff --git a/net/netfilter/ipvs/ip_vs_twos.c b/net/netfilter/ipvs/ip_vs_twos.c
index f2579fc9c75b..3308e4cc740a 100644
--- a/net/netfilter/ipvs/ip_vs_twos.c
+++ b/net/netfilter/ipvs/ip_vs_twos.c
@@ -71,8 +71,8 @@ static struct ip_vs_dest *ip_vs_twos_schedule(struct ip_vs_service *svc,
* from 0 to total_weight
*/
total_weight += 1;
- rweight1 = prandom_u32_max(total_weight);
- rweight2 = prandom_u32_max(total_weight);
+ rweight1 = get_random_u32_below(total_weight);
+ rweight2 = get_random_u32_below(total_weight);
/* Pick two weighted servers */
list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 5c3cf0834af0..496c4920505b 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -902,7 +902,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));
} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
- max_chainlen = MIN_CHAINLEN + prandom_u32_max(MAX_CHAINLEN);
+ max_chainlen = MIN_CHAINLEN + get_random_u32_below(MAX_CHAINLEN);
/* See if there's one in the list already, including reverse */
hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode) {
@@ -1223,7 +1223,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
goto dying;
}
- max_chainlen = MIN_CHAINLEN + prandom_u32_max(MAX_CHAINLEN);
+ max_chainlen = MIN_CHAINLEN + get_random_u32_below(MAX_CHAINLEN);
/* See if there's one in the list already, including reverse:
NAT could have grabbed it without realizing, since we're
not in the hash. If there is, we lost race. */
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 99323fb12d0f..ccef340be575 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -141,6 +141,7 @@ unsigned int nf_confirm(void *priv,
struct nf_conn *ct;
bool seqadj_needed;
__be16 frag_off;
+ int start;
u8 pnum;
ct = nf_ct_get(skb, &ctinfo);
@@ -163,9 +164,11 @@ unsigned int nf_confirm(void *priv,
break;
case NFPROTO_IPV6:
pnum = ipv6_hdr(skb)->nexthdr;
- protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum, &frag_off);
- if (protoff < 0 || (frag_off & htons(~0x7)) != 0)
+ start = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum, &frag_off);
+ if (start < 0 || (frag_off & htons(~0x7)) != 0)
return nf_conntrack_confirm(skb);
+
+ protoff = start;
break;
default:
return nf_conntrack_confirm(skb);
diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
index 0fdcdb2c9ae4..4d9b99abe37d 100644
--- a/net/netfilter/nf_flow_table_offload.c
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -383,12 +383,12 @@ static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule,
const __be32 *addr, const __be32 *mask)
{
struct flow_action_entry *entry;
- int i, j;
+ int i;
- for (i = 0, j = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += sizeof(u32), j++) {
+ for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++) {
entry = flow_action_entry_next(flow_rule);
flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
- offset + i, &addr[j], mask);
+ offset + i * sizeof(u32), &addr[i], mask);
}
}
diff --git a/net/netfilter/nf_nat_helper.c b/net/netfilter/nf_nat_helper.c
index a95a25196943..bf591e6af005 100644
--- a/net/netfilter/nf_nat_helper.c
+++ b/net/netfilter/nf_nat_helper.c
@@ -223,7 +223,7 @@ u16 nf_nat_exp_find_port(struct nf_conntrack_expect *exp, u16 port)
if (res != -EBUSY || (--attempts_left < 0))
break;
- port = min + prandom_u32_max(range);
+ port = min + get_random_u32_below(range);
}
return 0;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 832b881f7c17..8c09e4d12ac1 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -465,8 +465,9 @@ static int nft_delrule_by_chain(struct nft_ctx *ctx)
return 0;
}
-static int nft_trans_set_add(const struct nft_ctx *ctx, int msg_type,
- struct nft_set *set)
+static int __nft_trans_set_add(const struct nft_ctx *ctx, int msg_type,
+ struct nft_set *set,
+ const struct nft_set_desc *desc)
{
struct nft_trans *trans;
@@ -474,17 +475,28 @@ static int nft_trans_set_add(const struct nft_ctx *ctx, int msg_type,
if (trans == NULL)
return -ENOMEM;
- if (msg_type == NFT_MSG_NEWSET && ctx->nla[NFTA_SET_ID] != NULL) {
+ if (msg_type == NFT_MSG_NEWSET && ctx->nla[NFTA_SET_ID] && !desc) {
nft_trans_set_id(trans) =
ntohl(nla_get_be32(ctx->nla[NFTA_SET_ID]));
nft_activate_next(ctx->net, set);
}
nft_trans_set(trans) = set;
+ if (desc) {
+ nft_trans_set_update(trans) = true;
+ nft_trans_set_gc_int(trans) = desc->gc_int;
+ nft_trans_set_timeout(trans) = desc->timeout;
+ }
nft_trans_commit_list_add_tail(ctx->net, trans);
return 0;
}
+static int nft_trans_set_add(const struct nft_ctx *ctx, int msg_type,
+ struct nft_set *set)
+{
+ return __nft_trans_set_add(ctx, msg_type, set, NULL);
+}
+
static int nft_delset(const struct nft_ctx *ctx, struct nft_set *set)
{
int err;
@@ -3780,8 +3792,7 @@ static bool nft_set_ops_candidate(const struct nft_set_type *type, u32 flags)
static const struct nft_set_ops *
nft_select_set_ops(const struct nft_ctx *ctx,
const struct nlattr * const nla[],
- const struct nft_set_desc *desc,
- enum nft_set_policies policy)
+ const struct nft_set_desc *desc)
{
struct nftables_pernet *nft_net = nft_pernet(ctx->net);
const struct nft_set_ops *ops, *bops;
@@ -3810,7 +3821,7 @@ nft_select_set_ops(const struct nft_ctx *ctx,
if (!ops->estimate(desc, flags, &est))
continue;
- switch (policy) {
+ switch (desc->policy) {
case NFT_SET_POL_PERFORMANCE:
if (est.lookup < best.lookup)
break;
@@ -4045,8 +4056,10 @@ static int nf_tables_fill_set_concat(struct sk_buff *skb,
static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
const struct nft_set *set, u16 event, u16 flags)
{
- struct nlmsghdr *nlh;
+ u64 timeout = READ_ONCE(set->timeout);
+ u32 gc_int = READ_ONCE(set->gc_int);
u32 portid = ctx->portid;
+ struct nlmsghdr *nlh;
struct nlattr *nest;
u32 seq = ctx->seq;
int i;
@@ -4082,13 +4095,13 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
nla_put_be32(skb, NFTA_SET_OBJ_TYPE, htonl(set->objtype)))
goto nla_put_failure;
- if (set->timeout &&
+ if (timeout &&
nla_put_be64(skb, NFTA_SET_TIMEOUT,
- nf_jiffies64_to_msecs(set->timeout),
+ nf_jiffies64_to_msecs(timeout),
NFTA_SET_PAD))
goto nla_put_failure;
- if (set->gc_int &&
- nla_put_be32(skb, NFTA_SET_GC_INTERVAL, htonl(set->gc_int)))
+ if (gc_int &&
+ nla_put_be32(skb, NFTA_SET_GC_INTERVAL, htonl(gc_int)))
goto nla_put_failure;
if (set->policy != NFT_SET_POL_PERFORMANCE) {
@@ -4389,15 +4402,94 @@ static int nf_tables_set_desc_parse(struct nft_set_desc *desc,
return err;
}
+static int nft_set_expr_alloc(struct nft_ctx *ctx, struct nft_set *set,
+ const struct nlattr * const *nla,
+ struct nft_expr **exprs, int *num_exprs,
+ u32 flags)
+{
+ struct nft_expr *expr;
+ int err, i;
+
+ if (nla[NFTA_SET_EXPR]) {
+ expr = nft_set_elem_expr_alloc(ctx, set, nla[NFTA_SET_EXPR]);
+ if (IS_ERR(expr)) {
+ err = PTR_ERR(expr);
+ goto err_set_expr_alloc;
+ }
+ exprs[0] = expr;
+ (*num_exprs)++;
+ } else if (nla[NFTA_SET_EXPRESSIONS]) {
+ struct nlattr *tmp;
+ int left;
+
+ if (!(flags & NFT_SET_EXPR)) {
+ err = -EINVAL;
+ goto err_set_expr_alloc;
+ }
+ i = 0;
+ nla_for_each_nested(tmp, nla[NFTA_SET_EXPRESSIONS], left) {
+ if (i == NFT_SET_EXPR_MAX) {
+ err = -E2BIG;
+ goto err_set_expr_alloc;
+ }
+ if (nla_type(tmp) != NFTA_LIST_ELEM) {
+ err = -EINVAL;
+ goto err_set_expr_alloc;
+ }
+ expr = nft_set_elem_expr_alloc(ctx, set, tmp);
+ if (IS_ERR(expr)) {
+ err = PTR_ERR(expr);
+ goto err_set_expr_alloc;
+ }
+ exprs[i++] = expr;
+ (*num_exprs)++;
+ }
+ }
+
+ return 0;
+
+err_set_expr_alloc:
+ for (i = 0; i < *num_exprs; i++)
+ nft_expr_destroy(ctx, exprs[i]);
+
+ return err;
+}
+
+static bool nft_set_is_same(const struct nft_set *set,
+ const struct nft_set_desc *desc,
+ struct nft_expr *exprs[], u32 num_exprs, u32 flags)
+{
+ int i;
+
+ if (set->ktype != desc->ktype ||
+ set->dtype != desc->dtype ||
+ set->flags != flags ||
+ set->klen != desc->klen ||
+ set->dlen != desc->dlen ||
+ set->field_count != desc->field_count ||
+ set->num_exprs != num_exprs)
+ return false;
+
+ for (i = 0; i < desc->field_count; i++) {
+ if (set->field_len[i] != desc->field_len[i])
+ return false;
+ }
+
+ for (i = 0; i < num_exprs; i++) {
+ if (set->exprs[i]->ops != exprs[i]->ops)
+ return false;
+ }
+
+ return true;
+}
+
static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
- u32 ktype, dtype, flags, policy, gc_int, objtype;
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_next(info->net);
u8 family = info->nfmsg->nfgen_family;
const struct nft_set_ops *ops;
- struct nft_expr *expr = NULL;
struct net *net = info->net;
struct nft_set_desc desc;
struct nft_table *table;
@@ -4405,10 +4497,11 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
struct nft_set *set;
struct nft_ctx ctx;
size_t alloc_size;
- u64 timeout;
+ int num_exprs = 0;
char *name;
int err, i;
u16 udlen;
+ u32 flags;
u64 size;
if (nla[NFTA_SET_TABLE] == NULL ||
@@ -4419,10 +4512,10 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
memset(&desc, 0, sizeof(desc));
- ktype = NFT_DATA_VALUE;
+ desc.ktype = NFT_DATA_VALUE;
if (nla[NFTA_SET_KEY_TYPE] != NULL) {
- ktype = ntohl(nla_get_be32(nla[NFTA_SET_KEY_TYPE]));
- if ((ktype & NFT_DATA_RESERVED_MASK) == NFT_DATA_RESERVED_MASK)
+ desc.ktype = ntohl(nla_get_be32(nla[NFTA_SET_KEY_TYPE]));
+ if ((desc.ktype & NFT_DATA_RESERVED_MASK) == NFT_DATA_RESERVED_MASK)
return -EINVAL;
}
@@ -4447,17 +4540,17 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
return -EOPNOTSUPP;
}
- dtype = 0;
+ desc.dtype = 0;
if (nla[NFTA_SET_DATA_TYPE] != NULL) {
if (!(flags & NFT_SET_MAP))
return -EINVAL;
- dtype = ntohl(nla_get_be32(nla[NFTA_SET_DATA_TYPE]));
- if ((dtype & NFT_DATA_RESERVED_MASK) == NFT_DATA_RESERVED_MASK &&
- dtype != NFT_DATA_VERDICT)
+ desc.dtype = ntohl(nla_get_be32(nla[NFTA_SET_DATA_TYPE]));
+ if ((desc.dtype & NFT_DATA_RESERVED_MASK) == NFT_DATA_RESERVED_MASK &&
+ desc.dtype != NFT_DATA_VERDICT)
return -EINVAL;
- if (dtype != NFT_DATA_VERDICT) {
+ if (desc.dtype != NFT_DATA_VERDICT) {
if (nla[NFTA_SET_DATA_LEN] == NULL)
return -EINVAL;
desc.dlen = ntohl(nla_get_be32(nla[NFTA_SET_DATA_LEN]));
@@ -4472,34 +4565,34 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
if (!(flags & NFT_SET_OBJECT))
return -EINVAL;
- objtype = ntohl(nla_get_be32(nla[NFTA_SET_OBJ_TYPE]));
- if (objtype == NFT_OBJECT_UNSPEC ||
- objtype > NFT_OBJECT_MAX)
+ desc.objtype = ntohl(nla_get_be32(nla[NFTA_SET_OBJ_TYPE]));
+ if (desc.objtype == NFT_OBJECT_UNSPEC ||
+ desc.objtype > NFT_OBJECT_MAX)
return -EOPNOTSUPP;
} else if (flags & NFT_SET_OBJECT)
return -EINVAL;
else
- objtype = NFT_OBJECT_UNSPEC;
+ desc.objtype = NFT_OBJECT_UNSPEC;
- timeout = 0;
+ desc.timeout = 0;
if (nla[NFTA_SET_TIMEOUT] != NULL) {
if (!(flags & NFT_SET_TIMEOUT))
return -EINVAL;
- err = nf_msecs_to_jiffies64(nla[NFTA_SET_TIMEOUT], &timeout);
+ err = nf_msecs_to_jiffies64(nla[NFTA_SET_TIMEOUT], &desc.timeout);
if (err)
return err;
}
- gc_int = 0;
+ desc.gc_int = 0;
if (nla[NFTA_SET_GC_INTERVAL] != NULL) {
if (!(flags & NFT_SET_TIMEOUT))
return -EINVAL;
- gc_int = ntohl(nla_get_be32(nla[NFTA_SET_GC_INTERVAL]));
+ desc.gc_int = ntohl(nla_get_be32(nla[NFTA_SET_GC_INTERVAL]));
}
- policy = NFT_SET_POL_PERFORMANCE;
+ desc.policy = NFT_SET_POL_PERFORMANCE;
if (nla[NFTA_SET_POLICY] != NULL)
- policy = ntohl(nla_get_be32(nla[NFTA_SET_POLICY]));
+ desc.policy = ntohl(nla_get_be32(nla[NFTA_SET_POLICY]));
if (nla[NFTA_SET_DESC] != NULL) {
err = nf_tables_set_desc_parse(&desc, nla[NFTA_SET_DESC]);
@@ -4531,6 +4624,8 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
return PTR_ERR(set);
}
} else {
+ struct nft_expr *exprs[NFT_SET_EXPR_MAX] = {};
+
if (info->nlh->nlmsg_flags & NLM_F_EXCL) {
NL_SET_BAD_ATTR(extack, nla[NFTA_SET_NAME]);
return -EEXIST;
@@ -4538,13 +4633,29 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
if (info->nlh->nlmsg_flags & NLM_F_REPLACE)
return -EOPNOTSUPP;
- return 0;
+ err = nft_set_expr_alloc(&ctx, set, nla, exprs, &num_exprs, flags);
+ if (err < 0)
+ return err;
+
+ err = 0;
+ if (!nft_set_is_same(set, &desc, exprs, num_exprs, flags)) {
+ NL_SET_BAD_ATTR(extack, nla[NFTA_SET_NAME]);
+ err = -EEXIST;
+ }
+
+ for (i = 0; i < num_exprs; i++)
+ nft_expr_destroy(&ctx, exprs[i]);
+
+ if (err < 0)
+ return err;
+
+ return __nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set, &desc);
}
if (!(info->nlh->nlmsg_flags & NLM_F_CREATE))
return -ENOENT;
- ops = nft_select_set_ops(&ctx, nla, &desc, policy);
+ ops = nft_select_set_ops(&ctx, nla, &desc);
if (IS_ERR(ops))
return PTR_ERR(ops);
@@ -4584,18 +4695,18 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
set->table = table;
write_pnet(&set->net, net);
set->ops = ops;
- set->ktype = ktype;
+ set->ktype = desc.ktype;
set->klen = desc.klen;
- set->dtype = dtype;
- set->objtype = objtype;
+ set->dtype = desc.dtype;
+ set->objtype = desc.objtype;
set->dlen = desc.dlen;
set->flags = flags;
set->size = desc.size;
- set->policy = policy;
+ set->policy = desc.policy;
set->udlen = udlen;
set->udata = udata;
- set->timeout = timeout;
- set->gc_int = gc_int;
+ set->timeout = desc.timeout;
+ set->gc_int = desc.gc_int;
set->field_count = desc.field_count;
for (i = 0; i < desc.field_count; i++)
@@ -4605,43 +4716,11 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
if (err < 0)
goto err_set_init;
- if (nla[NFTA_SET_EXPR]) {
- expr = nft_set_elem_expr_alloc(&ctx, set, nla[NFTA_SET_EXPR]);
- if (IS_ERR(expr)) {
- err = PTR_ERR(expr);
- goto err_set_expr_alloc;
- }
- set->exprs[0] = expr;
- set->num_exprs++;
- } else if (nla[NFTA_SET_EXPRESSIONS]) {
- struct nft_expr *expr;
- struct nlattr *tmp;
- int left;
-
- if (!(flags & NFT_SET_EXPR)) {
- err = -EINVAL;
- goto err_set_expr_alloc;
- }
- i = 0;
- nla_for_each_nested(tmp, nla[NFTA_SET_EXPRESSIONS], left) {
- if (i == NFT_SET_EXPR_MAX) {
- err = -E2BIG;
- goto err_set_expr_alloc;
- }
- if (nla_type(tmp) != NFTA_LIST_ELEM) {
- err = -EINVAL;
- goto err_set_expr_alloc;
- }
- expr = nft_set_elem_expr_alloc(&ctx, set, tmp);
- if (IS_ERR(expr)) {
- err = PTR_ERR(expr);
- goto err_set_expr_alloc;
- }
- set->exprs[i++] = expr;
- set->num_exprs++;
- }
- }
+ err = nft_set_expr_alloc(&ctx, set, nla, set->exprs, &num_exprs, flags);
+ if (err < 0)
+ goto err_set_destroy;
+ set->num_exprs = num_exprs;
set->handle = nf_tables_alloc_handle(table);
err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
@@ -4655,7 +4734,7 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
err_set_expr_alloc:
for (i = 0; i < set->num_exprs; i++)
nft_expr_destroy(&ctx, set->exprs[i]);
-
+err_set_destroy:
ops->destroy(set);
err_set_init:
kfree(set->name);
@@ -6008,7 +6087,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
return err;
} else if (set->flags & NFT_SET_TIMEOUT &&
!(flags & NFT_SET_ELEM_INTERVAL_END)) {
- timeout = set->timeout;
+ timeout = READ_ONCE(set->timeout);
}
expiration = 0;
@@ -6109,7 +6188,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
if (err < 0)
goto err_parse_key_end;
- if (timeout != set->timeout) {
+ if (timeout != READ_ONCE(set->timeout)) {
err = nft_set_ext_add(&tmpl, NFT_SET_EXT_TIMEOUT);
if (err < 0)
goto err_parse_key_end;
@@ -9031,14 +9110,20 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
nft_flow_rule_destroy(nft_trans_flow_rule(trans));
break;
case NFT_MSG_NEWSET:
- nft_clear(net, nft_trans_set(trans));
- /* This avoids hitting -EBUSY when deleting the table
- * from the transaction.
- */
- if (nft_set_is_anonymous(nft_trans_set(trans)) &&
- !list_empty(&nft_trans_set(trans)->bindings))
- trans->ctx.table->use--;
+ if (nft_trans_set_update(trans)) {
+ struct nft_set *set = nft_trans_set(trans);
+ WRITE_ONCE(set->timeout, nft_trans_set_timeout(trans));
+ WRITE_ONCE(set->gc_int, nft_trans_set_gc_int(trans));
+ } else {
+ nft_clear(net, nft_trans_set(trans));
+ /* This avoids hitting -EBUSY when deleting the table
+ * from the transaction.
+ */
+ if (nft_set_is_anonymous(nft_trans_set(trans)) &&
+ !list_empty(&nft_trans_set(trans)->bindings))
+ trans->ctx.table->use--;
+ }
nf_tables_set_notify(&trans->ctx, nft_trans_set(trans),
NFT_MSG_NEWSET, GFP_KERNEL);
nft_trans_destroy(trans);
@@ -9260,6 +9345,10 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
nft_trans_destroy(trans);
break;
case NFT_MSG_NEWSET:
+ if (nft_trans_set_update(trans)) {
+ nft_trans_destroy(trans);
+ break;
+ }
trans->ctx.table->use--;
if (nft_trans_set_bound(trans)) {
nft_trans_destroy(trans);
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index 0f8bb0bf558f..8d36303f3935 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -413,7 +413,7 @@ static void idletimer_tg_destroy(const struct xt_tgdtor_param *par)
pr_debug("deleting timer %s\n", info->label);
list_del(&info->timer->entry);
- del_timer_sync(&info->timer->timer);
+ timer_shutdown_sync(&info->timer->timer);
cancel_work_sync(&info->timer->work);
sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
kfree(info->timer->attr.attr.name);
@@ -441,7 +441,7 @@ static void idletimer_tg_destroy_v1(const struct xt_tgdtor_param *par)
if (info->timer->timer_type & XT_IDLETIMER_ALARM) {
alarm_cancel(&info->timer->alarm);
} else {
- del_timer_sync(&info->timer->timer);
+ timer_shutdown_sync(&info->timer->timer);
}
cancel_work_sync(&info->timer->work);
sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c
index 0371c387b0d1..66b0f941d8fb 100644
--- a/net/netfilter/xt_LED.c
+++ b/net/netfilter/xt_LED.c
@@ -166,7 +166,7 @@ static void led_tg_destroy(const struct xt_tgdtor_param *par)
list_del(&ledinternal->list);
- del_timer_sync(&ledinternal->timer);
+ timer_shutdown_sync(&ledinternal->timer);
led_trigger_unregister(&ledinternal->netfilter_led_trigger);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index d73091f6bb0f..bca2a470ccad 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -846,7 +846,7 @@ retry:
/* Bind collision, search negative portid values. */
if (rover == -4096)
/* rover will be in range [S32_MIN, -4097] */
- rover = S32_MIN + prandom_u32_max(-4096 - S32_MIN);
+ rover = S32_MIN + get_random_u32_below(-4096 - S32_MIN);
else if (rover >= -4096)
rover = -4097;
portid = rover--;
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 9d91087b9399..1fc339084d89 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -1497,6 +1497,7 @@ static int nfc_genl_se_io(struct sk_buff *skb, struct genl_info *info)
u32 dev_idx, se_idx;
u8 *apdu;
size_t apdu_len;
+ int rc;
if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
!info->attrs[NFC_ATTR_SE_INDEX] ||
@@ -1510,25 +1511,37 @@ static int nfc_genl_se_io(struct sk_buff *skb, struct genl_info *info)
if (!dev)
return -ENODEV;
- if (!dev->ops || !dev->ops->se_io)
- return -ENOTSUPP;
+ if (!dev->ops || !dev->ops->se_io) {
+ rc = -EOPNOTSUPP;
+ goto put_dev;
+ }
apdu_len = nla_len(info->attrs[NFC_ATTR_SE_APDU]);
- if (apdu_len == 0)
- return -EINVAL;
+ if (apdu_len == 0) {
+ rc = -EINVAL;
+ goto put_dev;
+ }
apdu = nla_data(info->attrs[NFC_ATTR_SE_APDU]);
- if (!apdu)
- return -EINVAL;
+ if (!apdu) {
+ rc = -EINVAL;
+ goto put_dev;
+ }
ctx = kzalloc(sizeof(struct se_io_ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto put_dev;
+ }
ctx->dev_idx = dev_idx;
ctx->se_idx = se_idx;
- return nfc_se_io(dev, se_idx, apdu, apdu_len, se_io_cb, ctx);
+ rc = nfc_se_io(dev, se_idx, apdu, apdu_len, se_io_cb, ctx);
+
+put_dev:
+ nfc_put_device(dev);
+ return rc;
}
static int nfc_genl_vendor_cmd(struct sk_buff *skb,
@@ -1551,14 +1564,21 @@ static int nfc_genl_vendor_cmd(struct sk_buff *skb,
subcmd = nla_get_u32(info->attrs[NFC_ATTR_VENDOR_SUBCMD]);
dev = nfc_get_device(dev_idx);
- if (!dev || !dev->vendor_cmds || !dev->n_vendor_cmds)
+ if (!dev)
return -ENODEV;
+ if (!dev->vendor_cmds || !dev->n_vendor_cmds) {
+ err = -ENODEV;
+ goto put_dev;
+ }
+
if (info->attrs[NFC_ATTR_VENDOR_DATA]) {
data = nla_data(info->attrs[NFC_ATTR_VENDOR_DATA]);
data_len = nla_len(info->attrs[NFC_ATTR_VENDOR_DATA]);
- if (data_len == 0)
- return -EINVAL;
+ if (data_len == 0) {
+ err = -EINVAL;
+ goto put_dev;
+ }
} else {
data = NULL;
data_len = 0;
@@ -1573,10 +1593,14 @@ static int nfc_genl_vendor_cmd(struct sk_buff *skb,
dev->cur_cmd_info = info;
err = cmd->doit(dev, data, data_len);
dev->cur_cmd_info = NULL;
- return err;
+ goto put_dev;
}
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+
+put_dev:
+ nfc_put_device(dev);
+ return err;
}
/* message building helper */
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 932bcf766d63..a71795355aec 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -973,6 +973,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
struct sw_flow_mask mask;
struct sk_buff *reply;
struct datapath *dp;
+ struct sw_flow_key *key;
struct sw_flow_actions *acts;
struct sw_flow_match match;
u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
@@ -1000,24 +1001,26 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
}
/* Extract key. */
- ovs_match_init(&match, &new_flow->key, false, &mask);
+ key = kzalloc(sizeof(*key), GFP_KERNEL);
+ if (!key) {
+ error = -ENOMEM;
+ goto err_kfree_key;
+ }
+
+ ovs_match_init(&match, key, false, &mask);
error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
a[OVS_FLOW_ATTR_MASK], log);
if (error)
goto err_kfree_flow;
+ ovs_flow_mask_key(&new_flow->key, key, true, &mask);
+
/* Extract flow identifier. */
error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
- &new_flow->key, log);
+ key, log);
if (error)
goto err_kfree_flow;
- /* unmasked key is needed to match when ufid is not used. */
- if (ovs_identifier_is_key(&new_flow->id))
- match.key = new_flow->id.unmasked_key;
-
- ovs_flow_mask_key(&new_flow->key, &new_flow->key, true, &mask);
-
/* Validate actions. */
error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS],
&new_flow->key, &acts, log);
@@ -1044,7 +1047,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
if (ovs_identifier_is_ufid(&new_flow->id))
flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id);
if (!flow)
- flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->key);
+ flow = ovs_flow_tbl_lookup(&dp->table, key);
if (likely(!flow)) {
rcu_assign_pointer(new_flow->sf_acts, acts);
@@ -1114,6 +1117,8 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
if (reply)
ovs_notify(&dp_flow_genl_family, reply, info);
+
+ kfree(key);
return 0;
err_unlock_ovs:
@@ -1123,6 +1128,8 @@ err_kfree_acts:
ovs_nla_free_flow_actions(acts);
err_kfree_flow:
ovs_flow_free(new_flow, false);
+err_kfree_key:
+ kfree(key);
error:
return error;
}
@@ -1854,7 +1861,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
vport->upcall_stats = netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu);
if (!vport->upcall_stats) {
err = -ENOMEM;
- goto err_destroy_portids;
+ goto err_destroy_vport;
}
err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
@@ -1869,6 +1876,8 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
ovs_notify(&dp_datapath_genl_family, reply, info);
return 0;
+err_destroy_vport:
+ ovs_dp_detach_port(vport);
err_destroy_portids:
kfree(rcu_dereference_raw(dp->upcall_portids));
err_unlock_and_destroy_meters:
@@ -2316,7 +2325,7 @@ restart:
vport->upcall_stats = netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu);
if (!vport->upcall_stats) {
err = -ENOMEM;
- goto exit_unlock_free;
+ goto exit_unlock_free_vport;
}
err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
@@ -2336,6 +2345,8 @@ restart:
ovs_notify(&dp_vport_genl_family, reply, info);
return 0;
+exit_unlock_free_vport:
+ ovs_dp_detach_port(vport);
exit_unlock_free:
ovs_unlock();
kfree_skb(reply);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 41c4ccc3a5d6..b5ab98ca2511 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1350,7 +1350,7 @@ static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
if (READ_ONCE(history[i]) == rxhash)
count++;
- victim = prandom_u32_max(ROLLOVER_HLEN);
+ victim = get_random_u32_below(ROLLOVER_HLEN);
/* Avoid dirtying the cache line if possible */
if (READ_ONCE(history[victim]) != rxhash)
@@ -1386,7 +1386,7 @@ static unsigned int fanout_demux_rnd(struct packet_fanout *f,
struct sk_buff *skb,
unsigned int num)
{
- return prandom_u32_max(num);
+ return get_random_u32_below(num);
}
static unsigned int fanout_demux_rollover(struct packet_fanout *f,
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index dac4fdc7488a..b390ff245d5e 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -832,7 +832,7 @@ static void rfkill_release(struct device *dev)
kfree(rfkill);
}
-static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int rfkill_dev_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
struct rfkill *rfkill = to_rfkill(dev);
unsigned long flags;
diff --git a/net/rxrpc/Makefile b/net/rxrpc/Makefile
index e76d3459d78e..ac5caf5a48e1 100644
--- a/net/rxrpc/Makefile
+++ b/net/rxrpc/Makefile
@@ -10,6 +10,7 @@ rxrpc-y := \
call_accept.o \
call_event.o \
call_object.o \
+ call_state.o \
conn_client.o \
conn_event.o \
conn_object.o \
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 7ea576f6ba4b..ebbd4a1c3f86 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -155,10 +155,10 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
if (service_id) {
write_lock(&local->services_lock);
- if (rcu_access_pointer(local->service))
+ if (local->service)
goto service_in_use;
rx->local = local;
- rcu_assign_pointer(local->service, rx);
+ local->service = rx;
write_unlock(&local->services_lock);
rx->sk.sk_state = RXRPC_SERVER_BOUND;
@@ -328,7 +328,6 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
mutex_unlock(&call->user_mutex);
}
- rxrpc_put_peer(cp.peer, rxrpc_peer_put_discard_tmp);
_leave(" = %p", call);
return call;
}
@@ -374,13 +373,17 @@ EXPORT_SYMBOL(rxrpc_kernel_end_call);
* @sock: The socket the call is on
* @call: The call to check
*
- * Allow a kernel service to find out whether a call is still alive -
- * ie. whether it has completed.
+ * Allow a kernel service to find out whether a call is still alive - whether
+ * it has completed successfully and all received data has been consumed.
*/
bool rxrpc_kernel_check_life(const struct socket *sock,
const struct rxrpc_call *call)
{
- return call->state != RXRPC_CALL_COMPLETE;
+ if (!rxrpc_call_is_complete(call))
+ return true;
+ if (call->completion != RXRPC_CALL_SUCCEEDED)
+ return false;
+ return !skb_queue_empty(&call->recvmsg_queue);
}
EXPORT_SYMBOL(rxrpc_kernel_check_life);
@@ -872,9 +875,9 @@ static int rxrpc_release_sock(struct sock *sk)
sk->sk_state = RXRPC_CLOSE;
- if (rx->local && rcu_access_pointer(rx->local->service) == rx) {
+ if (rx->local && rx->local->service == rx) {
write_lock(&rx->local->services_lock);
- rcu_assign_pointer(rx->local->service, NULL);
+ rx->local->service = NULL;
write_unlock(&rx->local->services_lock);
}
@@ -957,16 +960,9 @@ static const struct net_proto_family rxrpc_family_ops = {
static int __init af_rxrpc_init(void)
{
int ret = -1;
- unsigned int tmp;
BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > sizeof_field(struct sk_buff, cb));
- get_random_bytes(&tmp, sizeof(tmp));
- tmp &= 0x3fffffff;
- if (tmp == 0)
- tmp = 1;
- idr_set_cursor(&rxrpc_client_conn_ids, tmp);
-
ret = -ENOMEM;
rxrpc_call_jar = kmem_cache_create(
"rxrpc_call_jar", sizeof(struct rxrpc_call), 0,
@@ -1062,7 +1058,6 @@ static void __exit af_rxrpc_exit(void)
* are released.
*/
rcu_barrier();
- rxrpc_destroy_client_conn_ids();
destroy_workqueue(rxrpc_workqueue);
rxrpc_exit_security();
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index e7dccab7b741..433060cade03 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -38,6 +38,7 @@ struct rxrpc_txbuf;
enum rxrpc_skb_mark {
RXRPC_SKB_MARK_PACKET, /* Received packet */
RXRPC_SKB_MARK_ERROR, /* Error notification */
+ RXRPC_SKB_MARK_SERVICE_CONN_SECURED, /* Service connection response has been verified */
RXRPC_SKB_MARK_REJECT_BUSY, /* Reject with BUSY */
RXRPC_SKB_MARK_REJECT_ABORT, /* Reject with ABORT (code in skb->priority) */
};
@@ -75,13 +76,7 @@ struct rxrpc_net {
bool live;
- bool kill_all_client_conns;
atomic_t nr_client_conns;
- spinlock_t client_conn_cache_lock; /* Lock for ->*_client_conns */
- struct mutex client_conn_discard_lock; /* Prevent multiple discarders */
- struct list_head idle_client_conns;
- struct work_struct client_conn_reaper;
- struct timer_list client_conn_reap_timer;
struct hlist_head local_endpoints;
struct mutex local_mutex; /* Lock for ->local_endpoints */
@@ -202,6 +197,7 @@ struct rxrpc_host_header {
* - max 48 bytes (struct sk_buff::cb)
*/
struct rxrpc_skb_priv {
+ struct rxrpc_connection *conn; /* Connection referred to (poke packet) */
u16 offset; /* Offset of data */
u16 len; /* Length of data */
u8 flags;
@@ -262,13 +258,11 @@ struct rxrpc_security {
/* respond to a challenge */
int (*respond_to_challenge)(struct rxrpc_connection *,
- struct sk_buff *,
- u32 *);
+ struct sk_buff *);
/* verify a response */
int (*verify_response)(struct rxrpc_connection *,
- struct sk_buff *,
- u32 *);
+ struct sk_buff *);
/* clear connection security */
void (*clear)(struct rxrpc_connection *);
@@ -283,21 +277,34 @@ struct rxrpc_local {
struct rcu_head rcu;
atomic_t active_users; /* Number of users of the local endpoint */
refcount_t ref; /* Number of references to the structure */
- struct rxrpc_net *rxnet; /* The network ns in which this resides */
+ struct net *net; /* The network namespace */
+ struct rxrpc_net *rxnet; /* Our bits in the network namespace */
struct hlist_node link;
struct socket *socket; /* my UDP socket */
struct task_struct *io_thread;
- struct rxrpc_sock __rcu *service; /* Service(s) listening on this endpoint */
+ struct completion io_thread_ready; /* Indication that the I/O thread started */
+ struct rxrpc_sock *service; /* Service(s) listening on this endpoint */
struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */
struct sk_buff_head rx_queue; /* Received packets */
+ struct list_head conn_attend_q; /* Conns requiring immediate attention */
struct list_head call_attend_q; /* Calls requiring immediate attention */
+
struct rb_root client_bundles; /* Client connection bundles by socket params */
spinlock_t client_bundles_lock; /* Lock for client_bundles */
+ bool kill_all_client_conns;
+ struct list_head idle_client_conns;
+ struct timer_list client_conn_reap_timer;
+ unsigned long client_conn_flags;
+#define RXRPC_CLIENT_CONN_REAP_TIMER 0 /* The client conn reap timer expired */
+
spinlock_t lock; /* access lock */
rwlock_t services_lock; /* lock for services list */
int debug_id; /* debug ID for printks */
bool dead;
bool service_closed; /* Service socket closed */
+ struct idr conn_ids; /* List of connection IDs */
+ struct list_head new_client_calls; /* Newly created client calls need connection */
+ spinlock_t client_call_lock; /* Lock for ->new_client_calls */
struct sockaddr_rxrpc srx; /* local address */
};
@@ -355,7 +362,6 @@ struct rxrpc_conn_proto {
struct rxrpc_conn_parameters {
struct rxrpc_local *local; /* Representation of local endpoint */
- struct rxrpc_peer *peer; /* Remote endpoint */
struct key *key; /* Security details */
bool exclusive; /* T if conn is exclusive */
bool upgrade; /* T if service ID can be upgraded */
@@ -364,10 +370,21 @@ struct rxrpc_conn_parameters {
};
/*
+ * Call completion condition (state == RXRPC_CALL_COMPLETE).
+ */
+enum rxrpc_call_completion {
+ RXRPC_CALL_SUCCEEDED, /* - Normal termination */
+ RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
+ RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
+ RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */
+ RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
+ NR__RXRPC_CALL_COMPLETIONS
+};
+
+/*
* Bits in the connection flags.
*/
enum rxrpc_conn_flag {
- RXRPC_CONN_HAS_IDR, /* Has a client conn ID assigned */
RXRPC_CONN_IN_SERVICE_CONNS, /* Conn is in peer->service_conns */
RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */
RXRPC_CONN_PROBING_FOR_UPGRADE, /* Probing for service upgrade */
@@ -387,6 +404,7 @@ enum rxrpc_conn_flag {
*/
enum rxrpc_conn_event {
RXRPC_CONN_EV_CHALLENGE, /* Send challenge packet */
+ RXRPC_CONN_EV_ABORT_CALLS, /* Abort attached calls */
};
/*
@@ -394,13 +412,13 @@ enum rxrpc_conn_event {
*/
enum rxrpc_conn_proto_state {
RXRPC_CONN_UNUSED, /* Connection not yet attempted */
+ RXRPC_CONN_CLIENT_UNSECURED, /* Client connection needs security init */
RXRPC_CONN_CLIENT, /* Client connection */
RXRPC_CONN_SERVICE_PREALLOC, /* Service connection preallocation */
RXRPC_CONN_SERVICE_UNSECURED, /* Service unsecured connection */
RXRPC_CONN_SERVICE_CHALLENGING, /* Service challenging for security */
RXRPC_CONN_SERVICE, /* Service secured connection */
- RXRPC_CONN_REMOTELY_ABORTED, /* Conn aborted by peer */
- RXRPC_CONN_LOCALLY_ABORTED, /* Conn aborted locally */
+ RXRPC_CONN_ABORTED, /* Conn aborted */
RXRPC_CONN__NR_STATES
};
@@ -411,17 +429,16 @@ struct rxrpc_bundle {
struct rxrpc_local *local; /* Representation of local endpoint */
struct rxrpc_peer *peer; /* Remote endpoint */
struct key *key; /* Security details */
+ const struct rxrpc_security *security; /* applied security module */
refcount_t ref;
atomic_t active; /* Number of active users */
unsigned int debug_id;
u32 security_level; /* Security level selected */
u16 service_id; /* Service ID for this connection */
bool try_upgrade; /* True if the bundle is attempting upgrade */
- bool alloc_conn; /* True if someone's getting a conn */
bool exclusive; /* T if conn is exclusive */
bool upgrade; /* T if service ID can be upgraded */
- short alloc_error; /* Error from last conn allocation */
- spinlock_t channel_lock;
+ unsigned short alloc_error; /* Error from last conn allocation */
struct rb_node local_node; /* Node in local->client_conns */
struct list_head waiting_calls; /* Calls waiting for channels */
unsigned long avail_chans; /* Mask of available channels */
@@ -439,6 +456,7 @@ struct rxrpc_connection {
struct rxrpc_peer *peer; /* Remote endpoint */
struct rxrpc_net *rxnet; /* Network namespace to which call belongs */
struct key *key; /* Security details */
+ struct list_head attend_link; /* Link in local->conn_attend_q */
refcount_t ref;
atomic_t active; /* Active count for service conns */
@@ -448,7 +466,7 @@ struct rxrpc_connection {
unsigned char act_chans; /* Mask of active channels */
struct rxrpc_channel {
unsigned long final_ack_at; /* Time at which to issue final ACK */
- struct rxrpc_call __rcu *call; /* Active call */
+ struct rxrpc_call *call; /* Active call */
unsigned int call_debug_id; /* call->debug_id */
u32 call_id; /* ID of current call */
u32 call_counter; /* Call ID counter */
@@ -469,6 +487,7 @@ struct rxrpc_connection {
struct list_head link; /* link in master connection list */
struct sk_buff_head rx_queue; /* received conn-level packets */
+ struct mutex security_lock; /* Lock for security management */
const struct rxrpc_security *security; /* applied security module */
union {
struct {
@@ -482,7 +501,8 @@ struct rxrpc_connection {
unsigned long idle_timestamp; /* Time at which last became idle */
spinlock_t state_lock; /* state-change lock */
enum rxrpc_conn_proto_state state; /* current state of connection */
- u32 abort_code; /* Abort code of connection abort */
+ enum rxrpc_call_completion completion; /* Completion condition */
+ s32 abort_code; /* Abort code of connection abort */
int debug_id; /* debug ID for printks */
atomic_t serial; /* packet serial number counter */
unsigned int hi_serial; /* highest serial number received */
@@ -526,7 +546,8 @@ enum rxrpc_call_flag {
RXRPC_CALL_KERNEL, /* The call was made by the kernel */
RXRPC_CALL_UPGRADE, /* Service upgrade was requested for the call */
RXRPC_CALL_EXCLUSIVE, /* The call uses a once-only connection */
- RXRPC_CALL_RX_IS_IDLE, /* Reception is idle - send an ACK */
+ RXRPC_CALL_RX_IS_IDLE, /* recvmsg() is idle - send an ACK */
+ RXRPC_CALL_RECVMSG_READ_ALL, /* recvmsg() read all of the received data */
};
/*
@@ -557,18 +578,6 @@ enum rxrpc_call_state {
};
/*
- * Call completion condition (state == RXRPC_CALL_COMPLETE).
- */
-enum rxrpc_call_completion {
- RXRPC_CALL_SUCCEEDED, /* - Normal termination */
- RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
- RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
- RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */
- RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
- NR__RXRPC_CALL_COMPLETIONS
-};
-
-/*
* Call Tx congestion management modes.
*/
enum rxrpc_congest_mode {
@@ -586,6 +595,7 @@ enum rxrpc_congest_mode {
struct rxrpc_call {
struct rcu_head rcu;
struct rxrpc_connection *conn; /* connection carrying call */
+ struct rxrpc_bundle *bundle; /* Connection bundle to use */
struct rxrpc_peer *peer; /* Peer record for remote address */
struct rxrpc_local *local; /* Representation of local endpoint */
struct rxrpc_sock __rcu *socket; /* socket responsible */
@@ -608,7 +618,7 @@ struct rxrpc_call {
struct work_struct destroyer; /* In-process-context destroyer */
rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
struct list_head link; /* link in master call list */
- struct list_head chan_wait_link; /* Link in conn->bundle->waiting_calls */
+ struct list_head wait_link; /* Link in local->new_client_calls */
struct hlist_node error_link; /* link in error distribution list */
struct list_head accept_link; /* Link in rx->acceptq */
struct list_head recvmsg_link; /* Link in rx->recvmsg_q */
@@ -622,10 +632,13 @@ struct rxrpc_call {
unsigned long flags;
unsigned long events;
spinlock_t notify_lock; /* Kernel notification lock */
- rwlock_t state_lock; /* lock for state transition */
- u32 abort_code; /* Local/remote abort code */
+ unsigned int send_abort_why; /* Why the abort [enum rxrpc_abort_reason] */
+ s32 send_abort; /* Abort code to be sent */
+ short send_abort_err; /* Error to be associated with the abort */
+ rxrpc_seq_t send_abort_seq; /* DATA packet that incurred the abort (or 0) */
+ s32 abort_code; /* Local/remote abort code */
int error; /* Local error incurred */
- enum rxrpc_call_state state; /* current state of call */
+ enum rxrpc_call_state _state; /* Current state of call (needs barrier) */
enum rxrpc_call_completion completion; /* Call completion condition */
refcount_t ref;
u8 security_ix; /* Security type */
@@ -811,9 +824,11 @@ extern struct workqueue_struct *rxrpc_workqueue;
*/
int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
void rxrpc_discard_prealloc(struct rxrpc_sock *);
-bool rxrpc_new_incoming_call(struct rxrpc_local *, struct rxrpc_peer *,
- struct rxrpc_connection *, struct sockaddr_rxrpc *,
- struct sk_buff *);
+bool rxrpc_new_incoming_call(struct rxrpc_local *local,
+ struct rxrpc_peer *peer,
+ struct rxrpc_connection *conn,
+ struct sockaddr_rxrpc *peer_srx,
+ struct sk_buff *skb);
void rxrpc_accept_incoming_calls(struct rxrpc_local *);
int rxrpc_user_charge_accept(struct rxrpc_sock *, unsigned long);
@@ -833,7 +848,7 @@ void rxrpc_reduce_call_timer(struct rxrpc_call *call,
unsigned long now,
enum rxrpc_timer_trace why);
-void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb);
+bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb);
/*
* call_object.c
@@ -850,6 +865,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
struct sockaddr_rxrpc *,
struct rxrpc_call_params *, gfp_t,
unsigned int);
+void rxrpc_start_call_timer(struct rxrpc_call *call);
void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *,
struct sk_buff *);
void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *);
@@ -872,32 +888,88 @@ static inline bool rxrpc_is_client_call(const struct rxrpc_call *call)
}
/*
+ * call_state.c
+ */
+bool rxrpc_set_call_completion(struct rxrpc_call *call,
+ enum rxrpc_call_completion compl,
+ u32 abort_code,
+ int error);
+bool rxrpc_call_completed(struct rxrpc_call *call);
+bool rxrpc_abort_call(struct rxrpc_call *call, rxrpc_seq_t seq,
+ u32 abort_code, int error, enum rxrpc_abort_reason why);
+void rxrpc_prefail_call(struct rxrpc_call *call, enum rxrpc_call_completion compl,
+ int error);
+
+static inline void rxrpc_set_call_state(struct rxrpc_call *call,
+ enum rxrpc_call_state state)
+{
+ /* Order write of completion info before write of ->state. */
+ smp_store_release(&call->_state, state);
+ wake_up(&call->waitq);
+}
+
+static inline enum rxrpc_call_state __rxrpc_call_state(const struct rxrpc_call *call)
+{
+ return call->_state; /* Only inside I/O thread */
+}
+
+static inline bool __rxrpc_call_is_complete(const struct rxrpc_call *call)
+{
+ return __rxrpc_call_state(call) == RXRPC_CALL_COMPLETE;
+}
+
+static inline enum rxrpc_call_state rxrpc_call_state(const struct rxrpc_call *call)
+{
+ /* Order read ->state before read of completion info. */
+ return smp_load_acquire(&call->_state);
+}
+
+static inline bool rxrpc_call_is_complete(const struct rxrpc_call *call)
+{
+ return rxrpc_call_state(call) == RXRPC_CALL_COMPLETE;
+}
+
+static inline bool rxrpc_call_has_failed(const struct rxrpc_call *call)
+{
+ return rxrpc_call_is_complete(call) && call->completion != RXRPC_CALL_SUCCEEDED;
+}
+
+/*
* conn_client.c
*/
extern unsigned int rxrpc_reap_client_connections;
extern unsigned long rxrpc_conn_idle_client_expiry;
extern unsigned long rxrpc_conn_idle_client_fast_expiry;
-extern struct idr rxrpc_client_conn_ids;
-void rxrpc_destroy_client_conn_ids(void);
+void rxrpc_purge_client_connections(struct rxrpc_local *local);
struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *, enum rxrpc_bundle_trace);
void rxrpc_put_bundle(struct rxrpc_bundle *, enum rxrpc_bundle_trace);
-int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_call *,
- struct rxrpc_conn_parameters *, struct sockaddr_rxrpc *,
- gfp_t);
+int rxrpc_look_up_bundle(struct rxrpc_call *call, gfp_t gfp);
+void rxrpc_connect_client_calls(struct rxrpc_local *local);
void rxrpc_expose_client_call(struct rxrpc_call *);
void rxrpc_disconnect_client_call(struct rxrpc_bundle *, struct rxrpc_call *);
+void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle);
void rxrpc_put_client_conn(struct rxrpc_connection *, enum rxrpc_conn_trace);
-void rxrpc_discard_expired_client_conns(struct work_struct *);
-void rxrpc_destroy_all_client_connections(struct rxrpc_net *);
+void rxrpc_discard_expired_client_conns(struct rxrpc_local *local);
void rxrpc_clean_up_local_conns(struct rxrpc_local *);
/*
* conn_event.c
*/
+void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, struct sk_buff *skb,
+ unsigned int channel);
+int rxrpc_abort_conn(struct rxrpc_connection *conn, struct sk_buff *skb,
+ s32 abort_code, int err, enum rxrpc_abort_reason why);
void rxrpc_process_connection(struct work_struct *);
void rxrpc_process_delayed_final_acks(struct rxrpc_connection *, bool);
-int rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb);
+bool rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb);
+void rxrpc_input_conn_event(struct rxrpc_connection *conn, struct sk_buff *skb);
+
+static inline bool rxrpc_is_conn_aborted(const struct rxrpc_connection *conn)
+{
+ /* Order reading the abort info after the state check. */
+ return smp_load_acquire(&conn->state) == RXRPC_CONN_ABORTED;
+}
/*
* conn_object.c
@@ -905,6 +977,7 @@ int rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb);
extern unsigned int rxrpc_connection_expiry;
extern unsigned int rxrpc_closed_conn_expiry;
+void rxrpc_poke_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why);
struct rxrpc_connection *rxrpc_alloc_connection(struct rxrpc_net *, gfp_t);
struct rxrpc_connection *rxrpc_find_client_connection_rcu(struct rxrpc_local *,
struct sockaddr_rxrpc *,
@@ -960,12 +1033,19 @@ void rxrpc_implicit_end_call(struct rxrpc_call *, struct sk_buff *);
*/
int rxrpc_encap_rcv(struct sock *, struct sk_buff *);
void rxrpc_error_report(struct sock *);
+bool rxrpc_direct_abort(struct sk_buff *skb, enum rxrpc_abort_reason why,
+ s32 abort_code, int err);
int rxrpc_io_thread(void *data);
static inline void rxrpc_wake_up_io_thread(struct rxrpc_local *local)
{
wake_up_process(local->io_thread);
}
+static inline bool rxrpc_protocol_error(struct sk_buff *skb, enum rxrpc_abort_reason why)
+{
+ return rxrpc_direct_abort(skb, why, RX_PROTOCOL_ERROR, -EPROTO);
+}
+
/*
* insecure.c
*/
@@ -1047,6 +1127,7 @@ static inline struct rxrpc_net *rxrpc_net(struct net *net)
int rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb);
int rxrpc_send_abort_packet(struct rxrpc_call *);
int rxrpc_send_data_packet(struct rxrpc_call *, struct rxrpc_txbuf *);
+void rxrpc_send_conn_abort(struct rxrpc_connection *conn);
void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb);
void rxrpc_send_keepalive(struct rxrpc_peer *);
void rxrpc_transmit_one(struct rxrpc_call *call, struct rxrpc_txbuf *txb);
@@ -1062,17 +1143,15 @@ void rxrpc_peer_keepalive_worker(struct work_struct *);
*/
struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
const struct sockaddr_rxrpc *);
-struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *, struct rxrpc_local *,
- struct sockaddr_rxrpc *, gfp_t);
+struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
+ struct sockaddr_rxrpc *srx, gfp_t gfp);
struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t,
enum rxrpc_peer_trace);
-void rxrpc_new_incoming_peer(struct rxrpc_sock *, struct rxrpc_local *,
- struct rxrpc_peer *);
+void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer);
void rxrpc_destroy_all_peers(struct rxrpc_net *);
struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *, enum rxrpc_peer_trace);
struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *, enum rxrpc_peer_trace);
void rxrpc_put_peer(struct rxrpc_peer *, enum rxrpc_peer_trace);
-void rxrpc_put_peer_locked(struct rxrpc_peer *, enum rxrpc_peer_trace);
/*
* proc.c
@@ -1086,33 +1165,22 @@ extern const struct seq_operations rxrpc_local_seq_ops;
* recvmsg.c
*/
void rxrpc_notify_socket(struct rxrpc_call *);
-bool __rxrpc_set_call_completion(struct rxrpc_call *, enum rxrpc_call_completion, u32, int);
-bool rxrpc_set_call_completion(struct rxrpc_call *, enum rxrpc_call_completion, u32, int);
-bool __rxrpc_call_completed(struct rxrpc_call *);
-bool rxrpc_call_completed(struct rxrpc_call *);
-bool __rxrpc_abort_call(const char *, struct rxrpc_call *, rxrpc_seq_t, u32, int);
-bool rxrpc_abort_call(const char *, struct rxrpc_call *, rxrpc_seq_t, u32, int);
int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
/*
* Abort a call due to a protocol error.
*/
-static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call,
- struct sk_buff *skb,
- const char *eproto_why,
- const char *why,
- u32 abort_code)
+static inline int rxrpc_abort_eproto(struct rxrpc_call *call,
+ struct sk_buff *skb,
+ s32 abort_code,
+ enum rxrpc_abort_reason why)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- trace_rxrpc_rx_eproto(call, sp->hdr.serial, eproto_why);
- return rxrpc_abort_call(why, call, sp->hdr.seq, abort_code, -EPROTO);
+ rxrpc_abort_call(call, sp->hdr.seq, abort_code, -EPROTO, why);
+ return -EPROTO;
}
-#define rxrpc_abort_eproto(call, skb, eproto_why, abort_why, abort_code) \
- __rxrpc_abort_eproto((call), (skb), tracepoint_string(eproto_why), \
- (abort_why), (abort_code))
-
/*
* rtt.c
*/
@@ -1144,6 +1212,8 @@ struct key *rxrpc_look_up_server_security(struct rxrpc_connection *,
/*
* sendmsg.c
*/
+bool rxrpc_propose_abort(struct rxrpc_call *call, s32 abort_code, int error,
+ enum rxrpc_abort_reason why);
int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
/*
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index d1850863507f..3e8689fdc437 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -99,7 +99,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
if (!call)
return -ENOMEM;
call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
- call->state = RXRPC_CALL_SERVER_PREALLOC;
+ rxrpc_set_call_state(call, RXRPC_CALL_SERVER_PREALLOC);
__set_bit(RXRPC_CALL_EV_INITIAL_PING, &call->events);
trace_rxrpc_call(call->debug_id, refcount_read(&call->ref),
@@ -280,7 +280,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
(peer_tail + 1) &
(RXRPC_BACKLOG_MAX - 1));
- rxrpc_new_incoming_peer(rx, local, peer);
+ rxrpc_new_incoming_peer(local, peer);
}
/* Now allocate and set up the connection */
@@ -339,18 +339,17 @@ bool rxrpc_new_incoming_call(struct rxrpc_local *local,
_enter("");
- /* Don't set up a call for anything other than the first DATA packet. */
- if (sp->hdr.seq != 1 ||
- sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
- return true; /* Just discard */
+ /* Don't set up a call for anything other than a DATA packet. */
+ if (sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
+ return rxrpc_protocol_error(skb, rxrpc_eproto_no_service_call);
- rcu_read_lock();
+ read_lock(&local->services_lock);
/* Weed out packets to services we're not offering. Packets that would
* begin a call are explicitly rejected and the rest are just
* discarded.
*/
- rx = rcu_dereference(local->service);
+ rx = local->service;
if (!rx || (sp->hdr.serviceId != rx->srx.srx_service &&
sp->hdr.serviceId != rx->second_service)
) {
@@ -363,16 +362,14 @@ bool rxrpc_new_incoming_call(struct rxrpc_local *local,
if (!conn) {
sec = rxrpc_get_incoming_security(rx, skb);
if (!sec)
- goto reject;
+ goto unsupported_security;
}
spin_lock(&rx->incoming_lock);
if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
rx->sk.sk_state == RXRPC_CLOSE) {
- trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
- sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
- skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
- skb->priority = RX_INVALID_OPERATION;
+ rxrpc_direct_abort(skb, rxrpc_abort_shut_down,
+ RX_INVALID_OPERATION, -ESHUTDOWN);
goto no_call;
}
@@ -402,7 +399,7 @@ bool rxrpc_new_incoming_call(struct rxrpc_local *local,
spin_unlock(&conn->state_lock);
spin_unlock(&rx->incoming_lock);
- rcu_read_unlock();
+ read_unlock(&local->services_lock);
if (hlist_unhashed(&call->error_link)) {
spin_lock(&call->peer->lock);
@@ -416,18 +413,20 @@ bool rxrpc_new_incoming_call(struct rxrpc_local *local,
return true;
unsupported_service:
- trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
- RX_INVALID_OPERATION, EOPNOTSUPP);
- skb->priority = RX_INVALID_OPERATION;
- goto reject;
+ read_unlock(&local->services_lock);
+ return rxrpc_direct_abort(skb, rxrpc_abort_service_not_offered,
+ RX_INVALID_OPERATION, -EOPNOTSUPP);
+unsupported_security:
+ read_unlock(&local->services_lock);
+ return rxrpc_direct_abort(skb, rxrpc_abort_service_not_offered,
+ RX_INVALID_OPERATION, -EKEYREJECTED);
no_call:
spin_unlock(&rx->incoming_lock);
-reject:
- rcu_read_unlock();
+ read_unlock(&local->services_lock);
_leave(" = f [%u]", skb->mark);
return false;
discard:
- rcu_read_unlock();
+ read_unlock(&local->services_lock);
return true;
}
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index b2cf448fb02c..1abdef15debc 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -251,6 +251,41 @@ out:
_leave("");
}
+/*
+ * Start transmitting the reply to a service. This cancels the need to ACK the
+ * request if we haven't yet done so.
+ */
+static void rxrpc_begin_service_reply(struct rxrpc_call *call)
+{
+ unsigned long now = jiffies;
+
+ rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SEND_REPLY);
+ WRITE_ONCE(call->delay_ack_at, now + MAX_JIFFY_OFFSET);
+ if (call->ackr_reason == RXRPC_ACK_DELAY)
+ call->ackr_reason = 0;
+ trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now);
+}
+
+/*
+ * Close the transmission phase. After this point there is no more data to be
+ * transmitted in the call.
+ */
+static void rxrpc_close_tx_phase(struct rxrpc_call *call)
+{
+ _debug("________awaiting reply/ACK__________");
+
+ switch (__rxrpc_call_state(call)) {
+ case RXRPC_CALL_CLIENT_SEND_REQUEST:
+ rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_AWAIT_REPLY);
+ break;
+ case RXRPC_CALL_SERVER_SEND_REPLY:
+ rxrpc_set_call_state(call, RXRPC_CALL_SERVER_AWAIT_ACK);
+ break;
+ default:
+ break;
+ }
+}
+
static bool rxrpc_tx_window_has_space(struct rxrpc_call *call)
{
unsigned int winsize = min_t(unsigned int, call->tx_winsize,
@@ -270,9 +305,11 @@ static void rxrpc_decant_prepared_tx(struct rxrpc_call *call)
{
struct rxrpc_txbuf *txb;
- if (rxrpc_is_client_call(call) &&
- !test_bit(RXRPC_CALL_EXPOSED, &call->flags))
+ if (!test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
+ if (list_empty(&call->tx_sendmsg))
+ return;
rxrpc_expose_client_call(call);
+ }
while ((txb = list_first_entry_or_null(&call->tx_sendmsg,
struct rxrpc_txbuf, call_link))) {
@@ -283,6 +320,9 @@ static void rxrpc_decant_prepared_tx(struct rxrpc_call *call)
call->tx_top = txb->seq;
list_add_tail(&txb->call_link, &call->tx_buffer);
+ if (txb->wire.flags & RXRPC_LAST_PACKET)
+ rxrpc_close_tx_phase(call);
+
rxrpc_transmit_one(call, txb);
if (!rxrpc_tx_window_has_space(call))
@@ -292,16 +332,15 @@ static void rxrpc_decant_prepared_tx(struct rxrpc_call *call)
static void rxrpc_transmit_some_data(struct rxrpc_call *call)
{
- switch (call->state) {
+ switch (__rxrpc_call_state(call)) {
case RXRPC_CALL_SERVER_ACK_REQUEST:
if (list_empty(&call->tx_sendmsg))
return;
+ rxrpc_begin_service_reply(call);
fallthrough;
case RXRPC_CALL_SERVER_SEND_REPLY:
- case RXRPC_CALL_SERVER_AWAIT_ACK:
case RXRPC_CALL_CLIENT_SEND_REQUEST:
- case RXRPC_CALL_CLIENT_AWAIT_REPLY:
if (!rxrpc_tx_window_has_space(call))
return;
if (list_empty(&call->tx_sendmsg)) {
@@ -331,21 +370,31 @@ static void rxrpc_send_initial_ping(struct rxrpc_call *call)
/*
* Handle retransmission and deferred ACK/abort generation.
*/
-void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
+bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
{
unsigned long now, next, t;
rxrpc_serial_t ackr_serial;
bool resend = false, expired = false;
+ s32 abort_code;
rxrpc_see_call(call, rxrpc_call_see_input);
//printk("\n--------------------\n");
_enter("{%d,%s,%lx}",
- call->debug_id, rxrpc_call_states[call->state], call->events);
+ call->debug_id, rxrpc_call_states[__rxrpc_call_state(call)],
+ call->events);
- if (call->state == RXRPC_CALL_COMPLETE)
+ if (__rxrpc_call_is_complete(call))
goto out;
+ /* Handle abort request locklessly, vs rxrpc_propose_abort(). */
+ abort_code = smp_load_acquire(&call->send_abort);
+ if (abort_code) {
+ rxrpc_abort_call(call, 0, call->send_abort, call->send_abort_err,
+ call->send_abort_why);
+ goto out;
+ }
+
if (skb && skb->mark == RXRPC_SKB_MARK_ERROR)
goto out;
@@ -358,7 +407,7 @@ void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
}
t = READ_ONCE(call->expect_req_by);
- if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST &&
+ if (__rxrpc_call_state(call) == RXRPC_CALL_SERVER_RECV_REQUEST &&
time_after_eq(now, t)) {
trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now);
expired = true;
@@ -429,11 +478,12 @@ void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
if (test_bit(RXRPC_CALL_RX_HEARD, &call->flags) &&
(int)call->conn->hi_serial - (int)call->rx_serial > 0) {
trace_rxrpc_call_reset(call);
- rxrpc_abort_call("EXP", call, 0, RX_CALL_DEAD, -ECONNRESET);
+ rxrpc_abort_call(call, 0, RX_CALL_DEAD, -ECONNRESET,
+ rxrpc_abort_call_reset);
} else {
- rxrpc_abort_call("EXP", call, 0, RX_CALL_TIMEOUT, -ETIME);
+ rxrpc_abort_call(call, 0, RX_CALL_TIMEOUT, -ETIME,
+ rxrpc_abort_call_timeout);
}
- rxrpc_send_abort_packet(call);
goto out;
}
@@ -441,7 +491,7 @@ void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
rxrpc_propose_ack_ping_for_lost_ack);
- if (resend && call->state != RXRPC_CALL_CLIENT_RECV_REPLY)
+ if (resend && __rxrpc_call_state(call) != RXRPC_CALL_CLIENT_RECV_REPLY)
rxrpc_resend(call, NULL);
if (test_and_clear_bit(RXRPC_CALL_RX_IS_IDLE, &call->flags))
@@ -453,7 +503,7 @@ void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
rxrpc_propose_ack_input_data);
/* Make sure the timer is restarted */
- if (call->state != RXRPC_CALL_COMPLETE) {
+ if (!__rxrpc_call_is_complete(call)) {
next = call->expect_rx_by;
#define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; }
@@ -474,9 +524,15 @@ void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
}
out:
- if (call->state == RXRPC_CALL_COMPLETE)
+ if (__rxrpc_call_is_complete(call)) {
del_timer_sync(&call->timer);
+ if (!test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
+ rxrpc_disconnect_call(call);
+ if (call->security)
+ call->security->free_call_crypto(call);
+ }
if (call->acks_hard_ack != call->tx_bottom)
rxrpc_shrink_call_tx_buffer(call);
_leave("");
+ return true;
}
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index be5eb8cdf549..3ded5a24627c 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -50,7 +50,7 @@ void rxrpc_poke_call(struct rxrpc_call *call, enum rxrpc_call_poke_trace what)
struct rxrpc_local *local = call->local;
bool busy;
- if (call->state < RXRPC_CALL_COMPLETE) {
+ if (!test_bit(RXRPC_CALL_DISCONNECTED, &call->flags)) {
spin_lock_bh(&local->lock);
busy = !list_empty(&call->attend_link);
trace_rxrpc_poke_call(call, busy, what);
@@ -69,7 +69,7 @@ static void rxrpc_call_timer_expired(struct timer_list *t)
_enter("%d", call->debug_id);
- if (call->state < RXRPC_CALL_COMPLETE) {
+ if (!__rxrpc_call_is_complete(call)) {
trace_rxrpc_timer_expired(call, jiffies);
rxrpc_poke_call(call, rxrpc_call_poke_timer);
}
@@ -150,7 +150,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
timer_setup(&call->timer, rxrpc_call_timer_expired, 0);
INIT_WORK(&call->destroyer, rxrpc_destroy_call);
INIT_LIST_HEAD(&call->link);
- INIT_LIST_HEAD(&call->chan_wait_link);
+ INIT_LIST_HEAD(&call->wait_link);
INIT_LIST_HEAD(&call->accept_link);
INIT_LIST_HEAD(&call->recvmsg_link);
INIT_LIST_HEAD(&call->sock_link);
@@ -162,7 +162,6 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
init_waitqueue_head(&call->waitq);
spin_lock_init(&call->notify_lock);
spin_lock_init(&call->tx_lock);
- rwlock_init(&call->state_lock);
refcount_set(&call->ref, 1);
call->debug_id = debug_id;
call->tx_total_len = -1;
@@ -211,12 +210,12 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
now = ktime_get_real();
call->acks_latest_ts = now;
call->cong_tstamp = now;
- call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
call->dest_srx = *srx;
call->interruptibility = p->interruptibility;
call->tx_total_len = p->tx_total_len;
call->key = key_get(cp->key);
call->local = rxrpc_get_local(cp->local, rxrpc_local_get_call);
+ call->security_level = cp->security_level;
if (p->kernel)
__set_bit(RXRPC_CALL_KERNEL, &call->flags);
if (cp->upgrade)
@@ -226,11 +225,13 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
ret = rxrpc_init_client_call_security(call);
if (ret < 0) {
- __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 0, ret);
+ rxrpc_prefail_call(call, RXRPC_CALL_LOCAL_ERROR, ret);
rxrpc_put_call(call, rxrpc_call_put_discard_error);
return ERR_PTR(ret);
}
+ rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_AWAIT_CONN);
+
trace_rxrpc_call(call->debug_id, refcount_read(&call->ref),
p->user_call_ID, rxrpc_call_new_client);
@@ -241,7 +242,7 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
/*
* Initiate the call ack/resend/expiry timer.
*/
-static void rxrpc_start_call_timer(struct rxrpc_call *call)
+void rxrpc_start_call_timer(struct rxrpc_call *call)
{
unsigned long now = jiffies;
unsigned long j = now + MAX_JIFFY_OFFSET;
@@ -286,6 +287,39 @@ static void rxrpc_put_call_slot(struct rxrpc_call *call)
}
/*
+ * Start the process of connecting a call. We obtain a peer and a connection
+ * bundle, but the actual association of a call with a connection is offloaded
+ * to the I/O thread to simplify locking.
+ */
+static int rxrpc_connect_call(struct rxrpc_call *call, gfp_t gfp)
+{
+ struct rxrpc_local *local = call->local;
+ int ret = 0;
+
+ _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
+
+ call->peer = rxrpc_lookup_peer(local, &call->dest_srx, gfp);
+ if (!call->peer)
+ goto error;
+
+ ret = rxrpc_look_up_bundle(call, gfp);
+ if (ret < 0)
+ goto error;
+
+ trace_rxrpc_client(NULL, -1, rxrpc_client_queue_new_call);
+ rxrpc_get_call(call, rxrpc_call_get_io_thread);
+ spin_lock(&local->client_call_lock);
+ list_add_tail(&call->wait_link, &local->new_client_calls);
+ spin_unlock(&local->client_call_lock);
+ rxrpc_wake_up_io_thread(local);
+ return 0;
+
+error:
+ __set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
+ return ret;
+}
+
+/*
* Set up a call for the given parameters.
* - Called with the socket lock held, which it must release.
* - If it returns a call, the call's lock will need releasing by the caller.
@@ -364,14 +398,10 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
/* Set up or get a connection record and set the protocol parameters,
* including channel number and call ID.
*/
- ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
+ ret = rxrpc_connect_call(call, gfp);
if (ret < 0)
goto error_attached_to_socket;
- rxrpc_see_call(call, rxrpc_call_see_connected);
-
- rxrpc_start_call_timer(call);
-
_leave(" = %p [new]", call);
return call;
@@ -383,27 +413,23 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
error_dup_user_ID:
write_unlock(&rx->call_lock);
release_sock(&rx->sk);
- __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
- RX_CALL_DEAD, -EEXIST);
+ rxrpc_prefail_call(call, RXRPC_CALL_LOCAL_ERROR, -EEXIST);
trace_rxrpc_call(call->debug_id, refcount_read(&call->ref), 0,
rxrpc_call_see_userid_exists);
- rxrpc_release_call(rx, call);
mutex_unlock(&call->user_mutex);
rxrpc_put_call(call, rxrpc_call_put_userid_exists);
_leave(" = -EEXIST");
return ERR_PTR(-EEXIST);
/* We got an error, but the call is attached to the socket and is in
- * need of release. However, we might now race with recvmsg() when
- * completing the call queues it. Return 0 from sys_sendmsg() and
+ * need of release. However, we might now race with recvmsg() when it
+ * completion notifies the socket. Return 0 from sys_sendmsg() and
* leave the error to recvmsg() to deal with.
*/
error_attached_to_socket:
trace_rxrpc_call(call->debug_id, refcount_read(&call->ref), ret,
rxrpc_call_see_connect_failed);
- set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
- __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
- RX_CALL_DEAD, ret);
+ rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 0, ret);
_leave(" = c=%08x [err]", call->debug_id);
return call;
}
@@ -426,32 +452,32 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
call->call_id = sp->hdr.callNumber;
call->dest_srx.srx_service = sp->hdr.serviceId;
call->cid = sp->hdr.cid;
- call->state = RXRPC_CALL_SERVER_SECURING;
call->cong_tstamp = skb->tstamp;
+ __set_bit(RXRPC_CALL_EXPOSED, &call->flags);
+ rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SECURING);
+
spin_lock(&conn->state_lock);
switch (conn->state) {
case RXRPC_CONN_SERVICE_UNSECURED:
case RXRPC_CONN_SERVICE_CHALLENGING:
- call->state = RXRPC_CALL_SERVER_SECURING;
+ rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SECURING);
break;
case RXRPC_CONN_SERVICE:
- call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
+ rxrpc_set_call_state(call, RXRPC_CALL_SERVER_RECV_REQUEST);
break;
- case RXRPC_CONN_REMOTELY_ABORTED:
- __rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
- conn->abort_code, conn->error);
- break;
- case RXRPC_CONN_LOCALLY_ABORTED:
- __rxrpc_abort_call("CON", call, 1,
- conn->abort_code, conn->error);
+ case RXRPC_CONN_ABORTED:
+ rxrpc_set_call_completion(call, conn->completion,
+ conn->abort_code, conn->error);
break;
default:
BUG();
}
+ rxrpc_get_call(call, rxrpc_call_get_io_thread);
+
/* Set the channel for this call. We don't get channel_lock as we're
* only defending against the data_ready handler (which we're called
* from) and the RESPONSE packet parser (which is only really
@@ -461,7 +487,7 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
chan = sp->hdr.cid & RXRPC_CHANNELMASK;
conn->channels[chan].call_counter = call->call_id;
conn->channels[chan].call_id = call->call_id;
- rcu_assign_pointer(conn->channels[chan].call, call);
+ conn->channels[chan].call = call;
spin_unlock(&conn->state_lock);
spin_lock(&conn->peer->lock);
@@ -521,20 +547,17 @@ static void rxrpc_cleanup_ring(struct rxrpc_call *call)
void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
{
struct rxrpc_connection *conn = call->conn;
- bool put = false;
+ bool put = false, putu = false;
_enter("{%d,%d}", call->debug_id, refcount_read(&call->ref));
trace_rxrpc_call(call->debug_id, refcount_read(&call->ref),
call->flags, rxrpc_call_see_release);
- ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
-
if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
BUG();
rxrpc_put_call_slot(call);
- del_timer_sync(&call->timer);
/* Make sure we don't get any more notifications */
write_lock(&rx->recvmsg_lock);
@@ -559,7 +582,7 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
rb_erase(&call->sock_node, &rx->calls);
memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
- rxrpc_put_call(call, rxrpc_call_put_userid_exists);
+ putu = true;
}
list_del(&call->sock_link);
@@ -567,10 +590,9 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
_debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
- if (conn && !test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
- rxrpc_disconnect_call(call);
- if (call->security)
- call->security->free_call_crypto(call);
+ if (putu)
+ rxrpc_put_call(call, rxrpc_call_put_userid);
+
_leave("");
}
@@ -587,7 +609,8 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
call = list_entry(rx->to_be_accepted.next,
struct rxrpc_call, accept_link);
list_del(&call->accept_link);
- rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, -ECONNRESET);
+ rxrpc_propose_abort(call, RX_CALL_DEAD, -ECONNRESET,
+ rxrpc_abort_call_sock_release_tba);
rxrpc_put_call(call, rxrpc_call_put_release_sock_tba);
}
@@ -595,8 +618,8 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
call = list_entry(rx->sock_calls.next,
struct rxrpc_call, sock_link);
rxrpc_get_call(call, rxrpc_call_get_release_sock);
- rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, -ECONNRESET);
- rxrpc_send_abort_packet(call);
+ rxrpc_propose_abort(call, RX_CALL_DEAD, -ECONNRESET,
+ rxrpc_abort_call_sock_release);
rxrpc_release_call(rx, call);
rxrpc_put_call(call, rxrpc_call_put_release_sock);
}
@@ -619,7 +642,7 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace why)
dead = __refcount_dec_and_test(&call->ref, &r);
trace_rxrpc_call(debug_id, r - 1, 0, why);
if (dead) {
- ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
+ ASSERTCMP(__rxrpc_call_state(call), ==, RXRPC_CALL_COMPLETE);
if (!list_empty(&call->link)) {
spin_lock(&rxnet->call_lock);
@@ -668,6 +691,8 @@ static void rxrpc_destroy_call(struct work_struct *work)
rxrpc_put_txbuf(call->tx_pending, rxrpc_txbuf_put_cleaned);
rxrpc_put_connection(call->conn, rxrpc_conn_put_call);
+ rxrpc_deactivate_bundle(call->bundle);
+ rxrpc_put_bundle(call->bundle, rxrpc_bundle_put_call);
rxrpc_put_peer(call->peer, rxrpc_peer_put_call);
rxrpc_put_local(call->local, rxrpc_local_put_call);
call_rcu(&call->rcu, rxrpc_rcu_free_call);
@@ -680,7 +705,7 @@ void rxrpc_cleanup_call(struct rxrpc_call *call)
{
memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
- ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
+ ASSERTCMP(__rxrpc_call_state(call), ==, RXRPC_CALL_COMPLETE);
ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
del_timer(&call->timer);
@@ -718,7 +743,7 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
call, refcount_read(&call->ref),
- rxrpc_call_states[call->state],
+ rxrpc_call_states[__rxrpc_call_state(call)],
call->flags, call->events);
spin_unlock(&rxnet->call_lock);
diff --git a/net/rxrpc/call_state.c b/net/rxrpc/call_state.c
new file mode 100644
index 000000000000..6afb54373ebb
--- /dev/null
+++ b/net/rxrpc/call_state.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Call state changing functions.
+ *
+ * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include "ar-internal.h"
+
+/*
+ * Transition a call to the complete state.
+ */
+bool rxrpc_set_call_completion(struct rxrpc_call *call,
+ enum rxrpc_call_completion compl,
+ u32 abort_code,
+ int error)
+{
+ if (__rxrpc_call_state(call) == RXRPC_CALL_COMPLETE)
+ return false;
+
+ call->abort_code = abort_code;
+ call->error = error;
+ call->completion = compl;
+ /* Allow reader of completion state to operate locklessly */
+ rxrpc_set_call_state(call, RXRPC_CALL_COMPLETE);
+ trace_rxrpc_call_complete(call);
+ wake_up(&call->waitq);
+ rxrpc_notify_socket(call);
+ return true;
+}
+
+/*
+ * Record that a call successfully completed.
+ */
+bool rxrpc_call_completed(struct rxrpc_call *call)
+{
+ return rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0);
+}
+
+/*
+ * Record that a call is locally aborted.
+ */
+bool rxrpc_abort_call(struct rxrpc_call *call, rxrpc_seq_t seq,
+ u32 abort_code, int error, enum rxrpc_abort_reason why)
+{
+ trace_rxrpc_abort(call->debug_id, why, call->cid, call->call_id, seq,
+ abort_code, error);
+ if (!rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED,
+ abort_code, error))
+ return false;
+ if (test_bit(RXRPC_CALL_EXPOSED, &call->flags))
+ rxrpc_send_abort_packet(call);
+ return true;
+}
+
+/*
+ * Record that a call errored out before even getting off the ground, thereby
+ * setting the state to allow it to be destroyed.
+ */
+void rxrpc_prefail_call(struct rxrpc_call *call, enum rxrpc_call_completion compl,
+ int error)
+{
+ call->abort_code = RX_CALL_DEAD;
+ call->error = error;
+ call->completion = compl;
+ call->_state = RXRPC_CALL_COMPLETE;
+ trace_rxrpc_call_complete(call);
+ WARN_ON_ONCE(__test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags));
+}
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index a08e33c9e54b..981ca5b98bcb 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -34,104 +34,59 @@ __read_mostly unsigned int rxrpc_reap_client_connections = 900;
__read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
__read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
-/*
- * We use machine-unique IDs for our client connections.
- */
-DEFINE_IDR(rxrpc_client_conn_ids);
-static DEFINE_SPINLOCK(rxrpc_conn_id_lock);
-
-static void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle);
-
-/*
- * Get a connection ID and epoch for a client connection from the global pool.
- * The connection struct pointer is then recorded in the idr radix tree. The
- * epoch doesn't change until the client is rebooted (or, at least, unless the
- * module is unloaded).
- */
-static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn,
- gfp_t gfp)
+static void rxrpc_activate_bundle(struct rxrpc_bundle *bundle)
{
- struct rxrpc_net *rxnet = conn->rxnet;
- int id;
-
- _enter("");
-
- idr_preload(gfp);
- spin_lock(&rxrpc_conn_id_lock);
-
- id = idr_alloc_cyclic(&rxrpc_client_conn_ids, conn,
- 1, 0x40000000, GFP_NOWAIT);
- if (id < 0)
- goto error;
-
- spin_unlock(&rxrpc_conn_id_lock);
- idr_preload_end();
-
- conn->proto.epoch = rxnet->epoch;
- conn->proto.cid = id << RXRPC_CIDSHIFT;
- set_bit(RXRPC_CONN_HAS_IDR, &conn->flags);
- _leave(" [CID %x]", conn->proto.cid);
- return 0;
-
-error:
- spin_unlock(&rxrpc_conn_id_lock);
- idr_preload_end();
- _leave(" = %d", id);
- return id;
+ atomic_inc(&bundle->active);
}
/*
- * Release a connection ID for a client connection from the global pool.
+ * Release a connection ID for a client connection.
*/
-static void rxrpc_put_client_connection_id(struct rxrpc_connection *conn)
+static void rxrpc_put_client_connection_id(struct rxrpc_local *local,
+ struct rxrpc_connection *conn)
{
- if (test_bit(RXRPC_CONN_HAS_IDR, &conn->flags)) {
- spin_lock(&rxrpc_conn_id_lock);
- idr_remove(&rxrpc_client_conn_ids,
- conn->proto.cid >> RXRPC_CIDSHIFT);
- spin_unlock(&rxrpc_conn_id_lock);
- }
+ idr_remove(&local->conn_ids, conn->proto.cid >> RXRPC_CIDSHIFT);
}
/*
* Destroy the client connection ID tree.
*/
-void rxrpc_destroy_client_conn_ids(void)
+static void rxrpc_destroy_client_conn_ids(struct rxrpc_local *local)
{
struct rxrpc_connection *conn;
int id;
- if (!idr_is_empty(&rxrpc_client_conn_ids)) {
- idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) {
+ if (!idr_is_empty(&local->conn_ids)) {
+ idr_for_each_entry(&local->conn_ids, conn, id) {
pr_err("AF_RXRPC: Leaked client conn %p {%d}\n",
conn, refcount_read(&conn->ref));
}
BUG();
}
- idr_destroy(&rxrpc_client_conn_ids);
+ idr_destroy(&local->conn_ids);
}
/*
* Allocate a connection bundle.
*/
-static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_conn_parameters *cp,
+static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_call *call,
gfp_t gfp)
{
struct rxrpc_bundle *bundle;
bundle = kzalloc(sizeof(*bundle), gfp);
if (bundle) {
- bundle->local = cp->local;
- bundle->peer = rxrpc_get_peer(cp->peer, rxrpc_peer_get_bundle);
- bundle->key = cp->key;
- bundle->exclusive = cp->exclusive;
- bundle->upgrade = cp->upgrade;
- bundle->service_id = cp->service_id;
- bundle->security_level = cp->security_level;
+ bundle->local = call->local;
+ bundle->peer = rxrpc_get_peer(call->peer, rxrpc_peer_get_bundle);
+ bundle->key = key_get(call->key);
+ bundle->security = call->security;
+ bundle->exclusive = test_bit(RXRPC_CALL_EXCLUSIVE, &call->flags);
+ bundle->upgrade = test_bit(RXRPC_CALL_UPGRADE, &call->flags);
+ bundle->service_id = call->dest_srx.srx_service;
+ bundle->security_level = call->security_level;
refcount_set(&bundle->ref, 1);
atomic_set(&bundle->active, 1);
- spin_lock_init(&bundle->channel_lock);
INIT_LIST_HEAD(&bundle->waiting_calls);
trace_rxrpc_bundle(bundle->debug_id, 1, rxrpc_bundle_new);
}
@@ -152,84 +107,87 @@ static void rxrpc_free_bundle(struct rxrpc_bundle *bundle)
{
trace_rxrpc_bundle(bundle->debug_id, 1, rxrpc_bundle_free);
rxrpc_put_peer(bundle->peer, rxrpc_peer_put_bundle);
+ key_put(bundle->key);
kfree(bundle);
}
void rxrpc_put_bundle(struct rxrpc_bundle *bundle, enum rxrpc_bundle_trace why)
{
- unsigned int id = bundle->debug_id;
+ unsigned int id;
bool dead;
int r;
- dead = __refcount_dec_and_test(&bundle->ref, &r);
- trace_rxrpc_bundle(id, r - 1, why);
- if (dead)
- rxrpc_free_bundle(bundle);
+ if (bundle) {
+ id = bundle->debug_id;
+ dead = __refcount_dec_and_test(&bundle->ref, &r);
+ trace_rxrpc_bundle(id, r - 1, why);
+ if (dead)
+ rxrpc_free_bundle(bundle);
+ }
+}
+
+/*
+ * Get rid of outstanding client connection preallocations when a local
+ * endpoint is destroyed.
+ */
+void rxrpc_purge_client_connections(struct rxrpc_local *local)
+{
+ rxrpc_destroy_client_conn_ids(local);
}
/*
* Allocate a client connection.
*/
static struct rxrpc_connection *
-rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle, gfp_t gfp)
+rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle)
{
struct rxrpc_connection *conn;
- struct rxrpc_net *rxnet = bundle->local->rxnet;
- int ret;
+ struct rxrpc_local *local = bundle->local;
+ struct rxrpc_net *rxnet = local->rxnet;
+ int id;
_enter("");
- conn = rxrpc_alloc_connection(rxnet, gfp);
- if (!conn) {
- _leave(" = -ENOMEM");
+ conn = rxrpc_alloc_connection(rxnet, GFP_ATOMIC | __GFP_NOWARN);
+ if (!conn)
return ERR_PTR(-ENOMEM);
+
+ id = idr_alloc_cyclic(&local->conn_ids, conn, 1, 0x40000000,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (id < 0) {
+ kfree(conn);
+ return ERR_PTR(id);
}
refcount_set(&conn->ref, 1);
- conn->bundle = bundle;
- conn->local = bundle->local;
- conn->peer = bundle->peer;
- conn->key = bundle->key;
+ conn->proto.cid = id << RXRPC_CIDSHIFT;
+ conn->proto.epoch = local->rxnet->epoch;
+ conn->out_clientflag = RXRPC_CLIENT_INITIATED;
+ conn->bundle = rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_conn);
+ conn->local = rxrpc_get_local(bundle->local, rxrpc_local_get_client_conn);
+ conn->peer = rxrpc_get_peer(bundle->peer, rxrpc_peer_get_client_conn);
+ conn->key = key_get(bundle->key);
+ conn->security = bundle->security;
conn->exclusive = bundle->exclusive;
conn->upgrade = bundle->upgrade;
conn->orig_service_id = bundle->service_id;
conn->security_level = bundle->security_level;
- conn->out_clientflag = RXRPC_CLIENT_INITIATED;
- conn->state = RXRPC_CONN_CLIENT;
+ conn->state = RXRPC_CONN_CLIENT_UNSECURED;
conn->service_id = conn->orig_service_id;
- ret = rxrpc_get_client_connection_id(conn, gfp);
- if (ret < 0)
- goto error_0;
-
- ret = rxrpc_init_client_conn_security(conn);
- if (ret < 0)
- goto error_1;
+ if (conn->security == &rxrpc_no_security)
+ conn->state = RXRPC_CONN_CLIENT;
atomic_inc(&rxnet->nr_conns);
write_lock(&rxnet->conn_lock);
list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
write_unlock(&rxnet->conn_lock);
- rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_conn);
- rxrpc_get_peer(conn->peer, rxrpc_peer_get_client_conn);
- rxrpc_get_local(conn->local, rxrpc_local_get_client_conn);
- key_get(conn->key);
-
- trace_rxrpc_conn(conn->debug_id, refcount_read(&conn->ref),
- rxrpc_conn_new_client);
+ rxrpc_see_connection(conn, rxrpc_conn_new_client);
atomic_inc(&rxnet->nr_client_conns);
trace_rxrpc_client(conn, -1, rxrpc_client_alloc);
- _leave(" = %p", conn);
return conn;
-
-error_1:
- rxrpc_put_client_connection_id(conn);
-error_0:
- kfree(conn);
- _leave(" = %d", ret);
- return ERR_PTR(ret);
}
/*
@@ -247,7 +205,8 @@ static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags))
goto dont_reuse;
- if (conn->state != RXRPC_CONN_CLIENT ||
+ if ((conn->state != RXRPC_CONN_CLIENT_UNSECURED &&
+ conn->state != RXRPC_CONN_CLIENT) ||
conn->proto.epoch != rxnet->epoch)
goto mark_dont_reuse;
@@ -257,7 +216,7 @@ static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
* times the maximum number of client conns away from the current
* allocation point to try and keep the IDs concentrated.
*/
- id_cursor = idr_get_cursor(&rxrpc_client_conn_ids);
+ id_cursor = idr_get_cursor(&conn->local->conn_ids);
id = conn->proto.cid >> RXRPC_CIDSHIFT;
distance = id - id_cursor;
if (distance < 0)
@@ -278,20 +237,23 @@ dont_reuse:
* Look up the conn bundle that matches the connection parameters, adding it if
* it doesn't yet exist.
*/
-static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *cp,
- gfp_t gfp)
+int rxrpc_look_up_bundle(struct rxrpc_call *call, gfp_t gfp)
{
static atomic_t rxrpc_bundle_id;
struct rxrpc_bundle *bundle, *candidate;
- struct rxrpc_local *local = cp->local;
+ struct rxrpc_local *local = call->local;
struct rb_node *p, **pp, *parent;
long diff;
+ bool upgrade = test_bit(RXRPC_CALL_UPGRADE, &call->flags);
_enter("{%px,%x,%u,%u}",
- cp->peer, key_serial(cp->key), cp->security_level, cp->upgrade);
+ call->peer, key_serial(call->key), call->security_level,
+ upgrade);
- if (cp->exclusive)
- return rxrpc_alloc_bundle(cp, gfp);
+ if (test_bit(RXRPC_CALL_EXCLUSIVE, &call->flags)) {
+ call->bundle = rxrpc_alloc_bundle(call, gfp);
+ return call->bundle ? 0 : -ENOMEM;
+ }
/* First, see if the bundle is already there. */
_debug("search 1");
@@ -300,11 +262,11 @@ static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *c
while (p) {
bundle = rb_entry(p, struct rxrpc_bundle, local_node);
-#define cmp(X) ((long)bundle->X - (long)cp->X)
- diff = (cmp(peer) ?:
- cmp(key) ?:
- cmp(security_level) ?:
- cmp(upgrade));
+#define cmp(X, Y) ((long)(X) - (long)(Y))
+ diff = (cmp(bundle->peer, call->peer) ?:
+ cmp(bundle->key, call->key) ?:
+ cmp(bundle->security_level, call->security_level) ?:
+ cmp(bundle->upgrade, upgrade));
#undef cmp
if (diff < 0)
p = p->rb_left;
@@ -317,9 +279,9 @@ static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *c
_debug("not found");
/* It wasn't. We need to add one. */
- candidate = rxrpc_alloc_bundle(cp, gfp);
+ candidate = rxrpc_alloc_bundle(call, gfp);
if (!candidate)
- return NULL;
+ return -ENOMEM;
_debug("search 2");
spin_lock(&local->client_bundles_lock);
@@ -329,11 +291,11 @@ static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *c
parent = *pp;
bundle = rb_entry(parent, struct rxrpc_bundle, local_node);
-#define cmp(X) ((long)bundle->X - (long)cp->X)
- diff = (cmp(peer) ?:
- cmp(key) ?:
- cmp(security_level) ?:
- cmp(upgrade));
+#define cmp(X, Y) ((long)(X) - (long)(Y))
+ diff = (cmp(bundle->peer, call->peer) ?:
+ cmp(bundle->key, call->key) ?:
+ cmp(bundle->security_level, call->security_level) ?:
+ cmp(bundle->upgrade, upgrade));
#undef cmp
if (diff < 0)
pp = &(*pp)->rb_left;
@@ -347,178 +309,89 @@ static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *c
candidate->debug_id = atomic_inc_return(&rxrpc_bundle_id);
rb_link_node(&candidate->local_node, parent, pp);
rb_insert_color(&candidate->local_node, &local->client_bundles);
- rxrpc_get_bundle(candidate, rxrpc_bundle_get_client_call);
+ call->bundle = rxrpc_get_bundle(candidate, rxrpc_bundle_get_client_call);
spin_unlock(&local->client_bundles_lock);
- _leave(" = %u [new]", candidate->debug_id);
- return candidate;
+ _leave(" = B=%u [new]", call->bundle->debug_id);
+ return 0;
found_bundle_free:
rxrpc_free_bundle(candidate);
found_bundle:
- rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_call);
- atomic_inc(&bundle->active);
+ call->bundle = rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_call);
+ rxrpc_activate_bundle(bundle);
spin_unlock(&local->client_bundles_lock);
- _leave(" = %u [found]", bundle->debug_id);
- return bundle;
-}
-
-/*
- * Create or find a client bundle to use for a call.
- *
- * If we return with a connection, the call will be on its waiting list. It's
- * left to the caller to assign a channel and wake up the call.
- */
-static struct rxrpc_bundle *rxrpc_prep_call(struct rxrpc_sock *rx,
- struct rxrpc_call *call,
- struct rxrpc_conn_parameters *cp,
- struct sockaddr_rxrpc *srx,
- gfp_t gfp)
-{
- struct rxrpc_bundle *bundle;
-
- _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
-
- cp->peer = rxrpc_lookup_peer(rx, cp->local, srx, gfp);
- if (!cp->peer)
- goto error;
-
- call->tx_last_sent = ktime_get_real();
- call->cong_ssthresh = cp->peer->cong_ssthresh;
- if (call->cong_cwnd >= call->cong_ssthresh)
- call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
- else
- call->cong_mode = RXRPC_CALL_SLOW_START;
- if (cp->upgrade)
- __set_bit(RXRPC_CALL_UPGRADE, &call->flags);
-
- /* Find the client connection bundle. */
- bundle = rxrpc_look_up_bundle(cp, gfp);
- if (!bundle)
- goto error;
-
- /* Get this call queued. Someone else may activate it whilst we're
- * lining up a new connection, but that's fine.
- */
- spin_lock(&bundle->channel_lock);
- list_add_tail(&call->chan_wait_link, &bundle->waiting_calls);
- spin_unlock(&bundle->channel_lock);
-
- _leave(" = [B=%x]", bundle->debug_id);
- return bundle;
-
-error:
- _leave(" = -ENOMEM");
- return ERR_PTR(-ENOMEM);
+ _leave(" = B=%u [found]", call->bundle->debug_id);
+ return 0;
}
/*
* Allocate a new connection and add it into a bundle.
*/
-static void rxrpc_add_conn_to_bundle(struct rxrpc_bundle *bundle, gfp_t gfp)
- __releases(bundle->channel_lock)
+static bool rxrpc_add_conn_to_bundle(struct rxrpc_bundle *bundle,
+ unsigned int slot)
{
- struct rxrpc_connection *candidate = NULL, *old = NULL;
- bool conflict;
- int i;
-
- _enter("");
-
- conflict = bundle->alloc_conn;
- if (!conflict)
- bundle->alloc_conn = true;
- spin_unlock(&bundle->channel_lock);
- if (conflict) {
- _leave(" [conf]");
- return;
- }
-
- candidate = rxrpc_alloc_client_connection(bundle, gfp);
-
- spin_lock(&bundle->channel_lock);
- bundle->alloc_conn = false;
-
- if (IS_ERR(candidate)) {
- bundle->alloc_error = PTR_ERR(candidate);
- spin_unlock(&bundle->channel_lock);
- _leave(" [err %ld]", PTR_ERR(candidate));
- return;
- }
-
- bundle->alloc_error = 0;
-
- for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) {
- unsigned int shift = i * RXRPC_MAXCALLS;
- int j;
-
- old = bundle->conns[i];
- if (!rxrpc_may_reuse_conn(old)) {
- if (old)
- trace_rxrpc_client(old, -1, rxrpc_client_replace);
- candidate->bundle_shift = shift;
- atomic_inc(&bundle->active);
- bundle->conns[i] = candidate;
- for (j = 0; j < RXRPC_MAXCALLS; j++)
- set_bit(shift + j, &bundle->avail_chans);
- candidate = NULL;
- break;
- }
+ struct rxrpc_connection *conn, *old;
+ unsigned int shift = slot * RXRPC_MAXCALLS;
+ unsigned int i;
- old = NULL;
+ old = bundle->conns[slot];
+ if (old) {
+ bundle->conns[slot] = NULL;
+ trace_rxrpc_client(old, -1, rxrpc_client_replace);
+ rxrpc_put_connection(old, rxrpc_conn_put_noreuse);
}
- spin_unlock(&bundle->channel_lock);
-
- if (candidate) {
- _debug("discard C=%x", candidate->debug_id);
- trace_rxrpc_client(candidate, -1, rxrpc_client_duplicate);
- rxrpc_put_connection(candidate, rxrpc_conn_put_discard);
+ conn = rxrpc_alloc_client_connection(bundle);
+ if (IS_ERR(conn)) {
+ bundle->alloc_error = PTR_ERR(conn);
+ return false;
}
- rxrpc_put_connection(old, rxrpc_conn_put_noreuse);
- _leave("");
+ rxrpc_activate_bundle(bundle);
+ conn->bundle_shift = shift;
+ bundle->conns[slot] = conn;
+ for (i = 0; i < RXRPC_MAXCALLS; i++)
+ set_bit(shift + i, &bundle->avail_chans);
+ return true;
}
/*
* Add a connection to a bundle if there are no usable connections or we have
* connections waiting for extra capacity.
*/
-static void rxrpc_maybe_add_conn(struct rxrpc_bundle *bundle, gfp_t gfp)
+static bool rxrpc_bundle_has_space(struct rxrpc_bundle *bundle)
{
- struct rxrpc_call *call;
- int i, usable;
+ int slot = -1, i, usable;
_enter("");
- spin_lock(&bundle->channel_lock);
+ bundle->alloc_error = 0;
/* See if there are any usable connections. */
usable = 0;
- for (i = 0; i < ARRAY_SIZE(bundle->conns); i++)
+ for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) {
if (rxrpc_may_reuse_conn(bundle->conns[i]))
usable++;
-
- if (!usable && !list_empty(&bundle->waiting_calls)) {
- call = list_first_entry(&bundle->waiting_calls,
- struct rxrpc_call, chan_wait_link);
- if (test_bit(RXRPC_CALL_UPGRADE, &call->flags))
- bundle->try_upgrade = true;
+ else if (slot == -1)
+ slot = i;
}
+ if (!usable && bundle->upgrade)
+ bundle->try_upgrade = true;
+
if (!usable)
goto alloc_conn;
if (!bundle->avail_chans &&
!bundle->try_upgrade &&
- !list_empty(&bundle->waiting_calls) &&
usable < ARRAY_SIZE(bundle->conns))
goto alloc_conn;
- spin_unlock(&bundle->channel_lock);
_leave("");
- return;
+ return usable;
alloc_conn:
- return rxrpc_add_conn_to_bundle(bundle, gfp);
+ return slot >= 0 ? rxrpc_add_conn_to_bundle(bundle, slot) : false;
}
/*
@@ -532,11 +405,13 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
struct rxrpc_channel *chan = &conn->channels[channel];
struct rxrpc_bundle *bundle = conn->bundle;
struct rxrpc_call *call = list_entry(bundle->waiting_calls.next,
- struct rxrpc_call, chan_wait_link);
+ struct rxrpc_call, wait_link);
u32 call_id = chan->call_counter + 1;
_enter("C=%x,%u", conn->debug_id, channel);
+ list_del_init(&call->wait_link);
+
trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate);
/* Cancel the final ACK on the previous call if it hasn't been sent yet
@@ -546,70 +421,50 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
clear_bit(conn->bundle_shift + channel, &bundle->avail_chans);
rxrpc_see_call(call, rxrpc_call_see_activate_client);
- list_del_init(&call->chan_wait_link);
- call->peer = rxrpc_get_peer(conn->peer, rxrpc_peer_get_activate_call);
call->conn = rxrpc_get_connection(conn, rxrpc_conn_get_activate_call);
call->cid = conn->proto.cid | channel;
call->call_id = call_id;
- call->security = conn->security;
- call->security_ix = conn->security_ix;
call->dest_srx.srx_service = conn->service_id;
-
- trace_rxrpc_connect_call(call);
-
- write_lock(&call->state_lock);
- call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
- write_unlock(&call->state_lock);
-
- /* Paired with the read barrier in rxrpc_connect_call(). This orders
- * cid and epoch in the connection wrt to call_id without the need to
- * take the channel_lock.
- *
- * We provisionally assign a callNumber at this point, but we don't
- * confirm it until the call is about to be exposed.
- *
- * TODO: Pair with a barrier in the data_ready handler when that looks
- * at the call ID through a connection channel.
- */
- smp_wmb();
+ call->cong_ssthresh = call->peer->cong_ssthresh;
+ if (call->cong_cwnd >= call->cong_ssthresh)
+ call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
+ else
+ call->cong_mode = RXRPC_CALL_SLOW_START;
chan->call_id = call_id;
chan->call_debug_id = call->debug_id;
- rcu_assign_pointer(chan->call, call);
+ chan->call = call;
+
+ rxrpc_see_call(call, rxrpc_call_see_connected);
+ trace_rxrpc_connect_call(call);
+ call->tx_last_sent = ktime_get_real();
+ rxrpc_start_call_timer(call);
+ rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_SEND_REQUEST);
wake_up(&call->waitq);
}
/*
* Remove a connection from the idle list if it's on it.
*/
-static void rxrpc_unidle_conn(struct rxrpc_bundle *bundle, struct rxrpc_connection *conn)
+static void rxrpc_unidle_conn(struct rxrpc_connection *conn)
{
- struct rxrpc_net *rxnet = bundle->local->rxnet;
- bool drop_ref;
-
if (!list_empty(&conn->cache_link)) {
- drop_ref = false;
- spin_lock(&rxnet->client_conn_cache_lock);
- if (!list_empty(&conn->cache_link)) {
- list_del_init(&conn->cache_link);
- drop_ref = true;
- }
- spin_unlock(&rxnet->client_conn_cache_lock);
- if (drop_ref)
- rxrpc_put_connection(conn, rxrpc_conn_put_unidle);
+ list_del_init(&conn->cache_link);
+ rxrpc_put_connection(conn, rxrpc_conn_put_unidle);
}
}
/*
- * Assign channels and callNumbers to waiting calls with channel_lock
- * held by caller.
+ * Assign channels and callNumbers to waiting calls.
*/
-static void rxrpc_activate_channels_locked(struct rxrpc_bundle *bundle)
+static void rxrpc_activate_channels(struct rxrpc_bundle *bundle)
{
struct rxrpc_connection *conn;
unsigned long avail, mask;
unsigned int channel, slot;
+ trace_rxrpc_client(NULL, -1, rxrpc_client_activate_chans);
+
if (bundle->try_upgrade)
mask = 1;
else
@@ -629,7 +484,7 @@ static void rxrpc_activate_channels_locked(struct rxrpc_bundle *bundle)
if (bundle->try_upgrade)
set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags);
- rxrpc_unidle_conn(bundle, conn);
+ rxrpc_unidle_conn(conn);
channel &= (RXRPC_MAXCALLS - 1);
conn->act_chans |= 1 << channel;
@@ -638,132 +493,24 @@ static void rxrpc_activate_channels_locked(struct rxrpc_bundle *bundle)
}
/*
- * Assign channels and callNumbers to waiting calls.
- */
-static void rxrpc_activate_channels(struct rxrpc_bundle *bundle)
-{
- _enter("B=%x", bundle->debug_id);
-
- trace_rxrpc_client(NULL, -1, rxrpc_client_activate_chans);
-
- if (!bundle->avail_chans)
- return;
-
- spin_lock(&bundle->channel_lock);
- rxrpc_activate_channels_locked(bundle);
- spin_unlock(&bundle->channel_lock);
- _leave("");
-}
-
-/*
- * Wait for a callNumber and a channel to be granted to a call.
- */
-static int rxrpc_wait_for_channel(struct rxrpc_bundle *bundle,
- struct rxrpc_call *call, gfp_t gfp)
-{
- DECLARE_WAITQUEUE(myself, current);
- int ret = 0;
-
- _enter("%d", call->debug_id);
-
- if (!gfpflags_allow_blocking(gfp)) {
- rxrpc_maybe_add_conn(bundle, gfp);
- rxrpc_activate_channels(bundle);
- ret = bundle->alloc_error ?: -EAGAIN;
- goto out;
- }
-
- add_wait_queue_exclusive(&call->waitq, &myself);
- for (;;) {
- rxrpc_maybe_add_conn(bundle, gfp);
- rxrpc_activate_channels(bundle);
- ret = bundle->alloc_error;
- if (ret < 0)
- break;
-
- switch (call->interruptibility) {
- case RXRPC_INTERRUPTIBLE:
- case RXRPC_PREINTERRUPTIBLE:
- set_current_state(TASK_INTERRUPTIBLE);
- break;
- case RXRPC_UNINTERRUPTIBLE:
- default:
- set_current_state(TASK_UNINTERRUPTIBLE);
- break;
- }
- if (READ_ONCE(call->state) != RXRPC_CALL_CLIENT_AWAIT_CONN)
- break;
- if ((call->interruptibility == RXRPC_INTERRUPTIBLE ||
- call->interruptibility == RXRPC_PREINTERRUPTIBLE) &&
- signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
- }
- schedule();
- }
- remove_wait_queue(&call->waitq, &myself);
- __set_current_state(TASK_RUNNING);
-
-out:
- _leave(" = %d", ret);
- return ret;
-}
-
-/*
- * find a connection for a call
- * - called in process context with IRQs enabled
+ * Connect waiting channels (called from the I/O thread).
*/
-int rxrpc_connect_call(struct rxrpc_sock *rx,
- struct rxrpc_call *call,
- struct rxrpc_conn_parameters *cp,
- struct sockaddr_rxrpc *srx,
- gfp_t gfp)
+void rxrpc_connect_client_calls(struct rxrpc_local *local)
{
- struct rxrpc_bundle *bundle;
- struct rxrpc_net *rxnet = cp->local->rxnet;
- int ret = 0;
-
- _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
-
- rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper);
+ struct rxrpc_call *call;
- bundle = rxrpc_prep_call(rx, call, cp, srx, gfp);
- if (IS_ERR(bundle)) {
- ret = PTR_ERR(bundle);
- goto out;
- }
+ while ((call = list_first_entry_or_null(&local->new_client_calls,
+ struct rxrpc_call, wait_link))
+ ) {
+ struct rxrpc_bundle *bundle = call->bundle;
- if (call->state == RXRPC_CALL_CLIENT_AWAIT_CONN) {
- ret = rxrpc_wait_for_channel(bundle, call, gfp);
- if (ret < 0)
- goto wait_failed;
- }
+ spin_lock(&local->client_call_lock);
+ list_move_tail(&call->wait_link, &bundle->waiting_calls);
+ spin_unlock(&local->client_call_lock);
-granted_channel:
- /* Paired with the write barrier in rxrpc_activate_one_channel(). */
- smp_rmb();
-
-out_put_bundle:
- rxrpc_deactivate_bundle(bundle);
- rxrpc_put_bundle(bundle, rxrpc_bundle_get_client_call);
-out:
- _leave(" = %d", ret);
- return ret;
-
-wait_failed:
- spin_lock(&bundle->channel_lock);
- list_del_init(&call->chan_wait_link);
- spin_unlock(&bundle->channel_lock);
-
- if (call->state != RXRPC_CALL_CLIENT_AWAIT_CONN) {
- ret = 0;
- goto granted_channel;
+ if (rxrpc_bundle_has_space(bundle))
+ rxrpc_activate_channels(bundle);
}
-
- trace_rxrpc_client(call->conn, ret, rxrpc_client_chan_wait_failed);
- rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 0, ret);
- rxrpc_disconnect_client_call(bundle, call);
- goto out_put_bundle;
}
/*
@@ -796,14 +543,14 @@ void rxrpc_expose_client_call(struct rxrpc_call *call)
/*
* Set the reap timer.
*/
-static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet)
+static void rxrpc_set_client_reap_timer(struct rxrpc_local *local)
{
- if (!rxnet->kill_all_client_conns) {
+ if (!local->kill_all_client_conns) {
unsigned long now = jiffies;
unsigned long reap_at = now + rxrpc_conn_idle_client_expiry;
- if (rxnet->live)
- timer_reduce(&rxnet->client_conn_reap_timer, reap_at);
+ if (local->rxnet->live)
+ timer_reduce(&local->client_conn_reap_timer, reap_at);
}
}
@@ -814,16 +561,13 @@ void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call
{
struct rxrpc_connection *conn;
struct rxrpc_channel *chan = NULL;
- struct rxrpc_net *rxnet = bundle->local->rxnet;
+ struct rxrpc_local *local = bundle->local;
unsigned int channel;
bool may_reuse;
u32 cid;
_enter("c=%x", call->debug_id);
- spin_lock(&bundle->channel_lock);
- set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
-
/* Calls that have never actually been assigned a channel can simply be
* discarded.
*/
@@ -832,8 +576,8 @@ void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call
_debug("call is waiting");
ASSERTCMP(call->call_id, ==, 0);
ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags));
- list_del_init(&call->chan_wait_link);
- goto out;
+ list_del_init(&call->wait_link);
+ return;
}
cid = call->cid;
@@ -841,10 +585,8 @@ void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call
chan = &conn->channels[channel];
trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
- if (rcu_access_pointer(chan->call) != call) {
- spin_unlock(&bundle->channel_lock);
- BUG();
- }
+ if (WARN_ON(chan->call != call))
+ return;
may_reuse = rxrpc_may_reuse_conn(conn);
@@ -865,16 +607,15 @@ void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call
trace_rxrpc_client(conn, channel, rxrpc_client_to_active);
bundle->try_upgrade = false;
if (may_reuse)
- rxrpc_activate_channels_locked(bundle);
+ rxrpc_activate_channels(bundle);
}
-
}
/* See if we can pass the channel directly to another call. */
if (may_reuse && !list_empty(&bundle->waiting_calls)) {
trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
rxrpc_activate_one_channel(conn, channel);
- goto out;
+ return;
}
/* Schedule the final ACK to be transmitted in a short while so that it
@@ -892,7 +633,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call
}
/* Deactivate the channel. */
- rcu_assign_pointer(chan->call, NULL);
+ chan->call = NULL;
set_bit(conn->bundle_shift + channel, &conn->bundle->avail_chans);
conn->act_chans &= ~(1 << channel);
@@ -905,17 +646,10 @@ void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call
conn->idle_timestamp = jiffies;
rxrpc_get_connection(conn, rxrpc_conn_get_idle);
- spin_lock(&rxnet->client_conn_cache_lock);
- list_move_tail(&conn->cache_link, &rxnet->idle_client_conns);
- spin_unlock(&rxnet->client_conn_cache_lock);
+ list_move_tail(&conn->cache_link, &local->idle_client_conns);
- rxrpc_set_client_reap_timer(rxnet);
+ rxrpc_set_client_reap_timer(local);
}
-
-out:
- spin_unlock(&bundle->channel_lock);
- _leave("");
- return;
}
/*
@@ -925,7 +659,6 @@ static void rxrpc_unbundle_conn(struct rxrpc_connection *conn)
{
struct rxrpc_bundle *bundle = conn->bundle;
unsigned int bindex;
- bool need_drop = false;
int i;
_enter("C=%x", conn->debug_id);
@@ -933,18 +666,13 @@ static void rxrpc_unbundle_conn(struct rxrpc_connection *conn)
if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
rxrpc_process_delayed_final_acks(conn, true);
- spin_lock(&bundle->channel_lock);
bindex = conn->bundle_shift / RXRPC_MAXCALLS;
if (bundle->conns[bindex] == conn) {
_debug("clear slot %u", bindex);
bundle->conns[bindex] = NULL;
for (i = 0; i < RXRPC_MAXCALLS; i++)
clear_bit(conn->bundle_shift + i, &bundle->avail_chans);
- need_drop = true;
- }
- spin_unlock(&bundle->channel_lock);
-
- if (need_drop) {
+ rxrpc_put_client_connection_id(bundle->local, conn);
rxrpc_deactivate_bundle(bundle);
rxrpc_put_connection(conn, rxrpc_conn_put_unbundle);
}
@@ -953,11 +681,15 @@ static void rxrpc_unbundle_conn(struct rxrpc_connection *conn)
/*
* Drop the active count on a bundle.
*/
-static void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle)
+void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle)
{
- struct rxrpc_local *local = bundle->local;
+ struct rxrpc_local *local;
bool need_put = false;
+ if (!bundle)
+ return;
+
+ local = bundle->local;
if (atomic_dec_and_lock(&bundle->active, &local->client_bundles_lock)) {
if (!bundle->exclusive) {
_debug("erase bundle");
@@ -984,7 +716,7 @@ void rxrpc_kill_client_conn(struct rxrpc_connection *conn)
trace_rxrpc_client(conn, -1, rxrpc_client_cleanup);
atomic_dec(&rxnet->nr_client_conns);
- rxrpc_put_client_connection_id(conn);
+ rxrpc_put_client_connection_id(local, conn);
}
/*
@@ -994,42 +726,26 @@ void rxrpc_kill_client_conn(struct rxrpc_connection *conn)
* This may be called from conn setup or from a work item so cannot be
* considered non-reentrant.
*/
-void rxrpc_discard_expired_client_conns(struct work_struct *work)
+void rxrpc_discard_expired_client_conns(struct rxrpc_local *local)
{
struct rxrpc_connection *conn;
- struct rxrpc_net *rxnet =
- container_of(work, struct rxrpc_net, client_conn_reaper);
unsigned long expiry, conn_expires_at, now;
unsigned int nr_conns;
_enter("");
- if (list_empty(&rxnet->idle_client_conns)) {
- _leave(" [empty]");
- return;
- }
-
- /* Don't double up on the discarding */
- if (!mutex_trylock(&rxnet->client_conn_discard_lock)) {
- _leave(" [already]");
- return;
- }
-
/* We keep an estimate of what the number of conns ought to be after
* we've discarded some so that we don't overdo the discarding.
*/
- nr_conns = atomic_read(&rxnet->nr_client_conns);
+ nr_conns = atomic_read(&local->rxnet->nr_client_conns);
next:
- spin_lock(&rxnet->client_conn_cache_lock);
-
- if (list_empty(&rxnet->idle_client_conns))
- goto out;
-
- conn = list_entry(rxnet->idle_client_conns.next,
- struct rxrpc_connection, cache_link);
+ conn = list_first_entry_or_null(&local->idle_client_conns,
+ struct rxrpc_connection, cache_link);
+ if (!conn)
+ return;
- if (!rxnet->kill_all_client_conns) {
+ if (!local->kill_all_client_conns) {
/* If the number of connections is over the reap limit, we
* expedite discard by reducing the expiry timeout. We must,
* however, have at least a short grace period to be able to do
@@ -1052,8 +768,6 @@ next:
trace_rxrpc_client(conn, -1, rxrpc_client_discard);
list_del_init(&conn->cache_link);
- spin_unlock(&rxnet->client_conn_cache_lock);
-
rxrpc_unbundle_conn(conn);
/* Drop the ->cache_link ref */
rxrpc_put_connection(conn, rxrpc_conn_put_discard_idle);
@@ -1070,31 +784,8 @@ not_yet_expired:
* then things get messier.
*/
_debug("not yet");
- if (!rxnet->kill_all_client_conns)
- timer_reduce(&rxnet->client_conn_reap_timer, conn_expires_at);
-
-out:
- spin_unlock(&rxnet->client_conn_cache_lock);
- mutex_unlock(&rxnet->client_conn_discard_lock);
- _leave("");
-}
-
-/*
- * Preemptively destroy all the client connection records rather than waiting
- * for them to time out
- */
-void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet)
-{
- _enter("");
-
- spin_lock(&rxnet->client_conn_cache_lock);
- rxnet->kill_all_client_conns = true;
- spin_unlock(&rxnet->client_conn_cache_lock);
-
- del_timer_sync(&rxnet->client_conn_reap_timer);
-
- if (!rxrpc_queue_work(&rxnet->client_conn_reaper))
- _debug("destroy: queue failed");
+ if (!local->kill_all_client_conns)
+ timer_reduce(&local->client_conn_reap_timer, conn_expires_at);
_leave("");
}
@@ -1104,29 +795,19 @@ void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet)
*/
void rxrpc_clean_up_local_conns(struct rxrpc_local *local)
{
- struct rxrpc_connection *conn, *tmp;
- struct rxrpc_net *rxnet = local->rxnet;
- LIST_HEAD(graveyard);
+ struct rxrpc_connection *conn;
_enter("");
- spin_lock(&rxnet->client_conn_cache_lock);
-
- list_for_each_entry_safe(conn, tmp, &rxnet->idle_client_conns,
- cache_link) {
- if (conn->local == local) {
- atomic_dec(&conn->active);
- trace_rxrpc_client(conn, -1, rxrpc_client_discard);
- list_move(&conn->cache_link, &graveyard);
- }
- }
+ local->kill_all_client_conns = true;
- spin_unlock(&rxnet->client_conn_cache_lock);
+ del_timer_sync(&local->client_conn_reap_timer);
- while (!list_empty(&graveyard)) {
- conn = list_entry(graveyard.next,
- struct rxrpc_connection, cache_link);
+ while ((conn = list_first_entry_or_null(&local->idle_client_conns,
+ struct rxrpc_connection, cache_link))) {
list_del_init(&conn->cache_link);
+ atomic_dec(&conn->active);
+ trace_rxrpc_client(conn, -1, rxrpc_client_discard);
rxrpc_unbundle_conn(conn);
rxrpc_put_connection(conn, rxrpc_conn_put_local_dead);
}
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 480364bcbf85..44414e724415 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -17,11 +17,65 @@
#include "ar-internal.h"
/*
+ * Set the completion state on an aborted connection.
+ */
+static bool rxrpc_set_conn_aborted(struct rxrpc_connection *conn, struct sk_buff *skb,
+ s32 abort_code, int err,
+ enum rxrpc_call_completion compl)
+{
+ bool aborted = false;
+
+ if (conn->state != RXRPC_CONN_ABORTED) {
+ spin_lock(&conn->state_lock);
+ if (conn->state != RXRPC_CONN_ABORTED) {
+ conn->abort_code = abort_code;
+ conn->error = err;
+ conn->completion = compl;
+ /* Order the abort info before the state change. */
+ smp_store_release(&conn->state, RXRPC_CONN_ABORTED);
+ set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
+ set_bit(RXRPC_CONN_EV_ABORT_CALLS, &conn->events);
+ aborted = true;
+ }
+ spin_unlock(&conn->state_lock);
+ }
+
+ return aborted;
+}
+
+/*
+ * Mark a socket buffer to indicate that the connection it's on should be aborted.
+ */
+int rxrpc_abort_conn(struct rxrpc_connection *conn, struct sk_buff *skb,
+ s32 abort_code, int err, enum rxrpc_abort_reason why)
+{
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+
+ if (rxrpc_set_conn_aborted(conn, skb, abort_code, err,
+ RXRPC_CALL_LOCALLY_ABORTED)) {
+ trace_rxrpc_abort(0, why, sp->hdr.cid, sp->hdr.callNumber,
+ sp->hdr.seq, abort_code, err);
+ rxrpc_poke_conn(conn, rxrpc_conn_get_poke_abort);
+ }
+ return -EPROTO;
+}
+
+/*
+ * Mark a connection as being remotely aborted.
+ */
+static bool rxrpc_input_conn_abort(struct rxrpc_connection *conn,
+ struct sk_buff *skb)
+{
+ return rxrpc_set_conn_aborted(conn, skb, skb->priority, -ECONNABORTED,
+ RXRPC_CALL_REMOTELY_ABORTED);
+}
+
+/*
* Retransmit terminal ACK or ABORT of the previous call.
*/
-static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
- struct sk_buff *skb,
- unsigned int channel)
+void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
+ struct sk_buff *skb,
+ unsigned int channel)
{
struct rxrpc_skb_priv *sp = skb ? rxrpc_skb(skb) : NULL;
struct rxrpc_channel *chan;
@@ -46,9 +100,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
/* If the last call got moved on whilst we were waiting to run, just
* ignore this packet.
*/
- call_id = READ_ONCE(chan->last_call);
- /* Sync with __rxrpc_disconnect_call() */
- smp_rmb();
+ call_id = chan->last_call;
if (skb && call_id != sp->hdr.callNumber)
return;
@@ -65,9 +117,12 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
iov[2].iov_base = &ack_info;
iov[2].iov_len = sizeof(ack_info);
+ serial = atomic_inc_return(&conn->serial);
+
pkt.whdr.epoch = htonl(conn->proto.epoch);
pkt.whdr.cid = htonl(conn->proto.cid | channel);
pkt.whdr.callNumber = htonl(call_id);
+ pkt.whdr.serial = htonl(serial);
pkt.whdr.seq = 0;
pkt.whdr.type = chan->last_type;
pkt.whdr.flags = conn->out_clientflag;
@@ -104,31 +159,15 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
iov[0].iov_len += sizeof(pkt.ack);
len += sizeof(pkt.ack) + 3 + sizeof(ack_info);
ioc = 3;
- break;
-
- default:
- return;
- }
-
- /* Resync with __rxrpc_disconnect_call() and check that the last call
- * didn't get advanced whilst we were filling out the packets.
- */
- smp_rmb();
- if (READ_ONCE(chan->last_call) != call_id)
- return;
-
- serial = atomic_inc_return(&conn->serial);
- pkt.whdr.serial = htonl(serial);
- switch (chan->last_type) {
- case RXRPC_PACKET_TYPE_ABORT:
- break;
- case RXRPC_PACKET_TYPE_ACK:
trace_rxrpc_tx_ack(chan->call_debug_id, serial,
ntohl(pkt.ack.firstPacket),
ntohl(pkt.ack.serial),
pkt.ack.reason, 0);
break;
+
+ default:
+ return;
}
ret = kernel_sendmsg(conn->local->socket, &msg, iov, ioc, len);
@@ -146,131 +185,34 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
/*
* pass a connection-level abort onto all calls on that connection
*/
-static void rxrpc_abort_calls(struct rxrpc_connection *conn,
- enum rxrpc_call_completion compl,
- rxrpc_serial_t serial)
+static void rxrpc_abort_calls(struct rxrpc_connection *conn)
{
struct rxrpc_call *call;
int i;
_enter("{%d},%x", conn->debug_id, conn->abort_code);
- spin_lock(&conn->bundle->channel_lock);
-
for (i = 0; i < RXRPC_MAXCALLS; i++) {
- call = rcu_dereference_protected(
- conn->channels[i].call,
- lockdep_is_held(&conn->bundle->channel_lock));
- if (call) {
- if (compl == RXRPC_CALL_LOCALLY_ABORTED)
- trace_rxrpc_abort(call->debug_id,
- "CON", call->cid,
- call->call_id, 0,
+ call = conn->channels[i].call;
+ if (call)
+ rxrpc_set_call_completion(call,
+ conn->completion,
conn->abort_code,
conn->error);
- else
- trace_rxrpc_rx_abort(call, serial,
- conn->abort_code);
- rxrpc_set_call_completion(call, compl,
- conn->abort_code,
- conn->error);
- }
}
- spin_unlock(&conn->bundle->channel_lock);
_leave("");
}
/*
- * generate a connection-level abort
- */
-static int rxrpc_abort_connection(struct rxrpc_connection *conn,
- int error, u32 abort_code)
-{
- struct rxrpc_wire_header whdr;
- struct msghdr msg;
- struct kvec iov[2];
- __be32 word;
- size_t len;
- u32 serial;
- int ret;
-
- _enter("%d,,%u,%u", conn->debug_id, error, abort_code);
-
- /* generate a connection-level abort */
- spin_lock(&conn->state_lock);
- if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
- spin_unlock(&conn->state_lock);
- _leave(" = 0 [already dead]");
- return 0;
- }
-
- conn->error = error;
- conn->abort_code = abort_code;
- conn->state = RXRPC_CONN_LOCALLY_ABORTED;
- set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
- spin_unlock(&conn->state_lock);
-
- msg.msg_name = &conn->peer->srx.transport;
- msg.msg_namelen = conn->peer->srx.transport_len;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = 0;
-
- whdr.epoch = htonl(conn->proto.epoch);
- whdr.cid = htonl(conn->proto.cid);
- whdr.callNumber = 0;
- whdr.seq = 0;
- whdr.type = RXRPC_PACKET_TYPE_ABORT;
- whdr.flags = conn->out_clientflag;
- whdr.userStatus = 0;
- whdr.securityIndex = conn->security_ix;
- whdr._rsvd = 0;
- whdr.serviceId = htons(conn->service_id);
-
- word = htonl(conn->abort_code);
-
- iov[0].iov_base = &whdr;
- iov[0].iov_len = sizeof(whdr);
- iov[1].iov_base = &word;
- iov[1].iov_len = sizeof(word);
-
- len = iov[0].iov_len + iov[1].iov_len;
-
- serial = atomic_inc_return(&conn->serial);
- rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, serial);
- whdr.serial = htonl(serial);
-
- ret = kernel_sendmsg(conn->local->socket, &msg, iov, 2, len);
- if (ret < 0) {
- trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
- rxrpc_tx_point_conn_abort);
- _debug("sendmsg failed: %d", ret);
- return -EAGAIN;
- }
-
- trace_rxrpc_tx_packet(conn->debug_id, &whdr, rxrpc_tx_point_conn_abort);
-
- conn->peer->last_tx_at = ktime_get_seconds();
-
- _leave(" = 0");
- return 0;
-}
-
-/*
* mark a call as being on a now-secured channel
* - must be called with BH's disabled.
*/
static void rxrpc_call_is_secure(struct rxrpc_call *call)
{
- _enter("%p", call);
- if (call) {
- write_lock(&call->state_lock);
- if (call->state == RXRPC_CALL_SERVER_SECURING) {
- call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
- rxrpc_notify_socket(call);
- }
- write_unlock(&call->state_lock);
+ if (call && __rxrpc_call_state(call) == RXRPC_CALL_SERVER_SECURING) {
+ rxrpc_set_call_state(call, RXRPC_CALL_SERVER_RECV_REQUEST);
+ rxrpc_notify_socket(call);
}
}
@@ -278,44 +220,22 @@ static void rxrpc_call_is_secure(struct rxrpc_call *call)
* connection-level Rx packet processor
*/
static int rxrpc_process_event(struct rxrpc_connection *conn,
- struct sk_buff *skb,
- u32 *_abort_code)
+ struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- int loop, ret;
+ int ret;
- if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
- _leave(" = -ECONNABORTED [%u]", conn->state);
+ if (conn->state == RXRPC_CONN_ABORTED)
return -ECONNABORTED;
- }
_enter("{%d},{%u,%%%u},", conn->debug_id, sp->hdr.type, sp->hdr.serial);
switch (sp->hdr.type) {
- case RXRPC_PACKET_TYPE_DATA:
- case RXRPC_PACKET_TYPE_ACK:
- rxrpc_conn_retransmit_call(conn, skb,
- sp->hdr.cid & RXRPC_CHANNELMASK);
- return 0;
-
- case RXRPC_PACKET_TYPE_BUSY:
- /* Just ignore BUSY packets for now. */
- return 0;
-
- case RXRPC_PACKET_TYPE_ABORT:
- conn->error = -ECONNABORTED;
- conn->abort_code = skb->priority;
- conn->state = RXRPC_CONN_REMOTELY_ABORTED;
- set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
- rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, sp->hdr.serial);
- return -ECONNABORTED;
-
case RXRPC_PACKET_TYPE_CHALLENGE:
- return conn->security->respond_to_challenge(conn, skb,
- _abort_code);
+ return conn->security->respond_to_challenge(conn, skb);
case RXRPC_PACKET_TYPE_RESPONSE:
- ret = conn->security->verify_response(conn, skb, _abort_code);
+ ret = conn->security->verify_response(conn, skb);
if (ret < 0)
return ret;
@@ -324,27 +244,25 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
if (ret < 0)
return ret;
- spin_lock(&conn->bundle->channel_lock);
spin_lock(&conn->state_lock);
-
- if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) {
+ if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING)
conn->state = RXRPC_CONN_SERVICE;
- spin_unlock(&conn->state_lock);
- for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
- rxrpc_call_is_secure(
- rcu_dereference_protected(
- conn->channels[loop].call,
- lockdep_is_held(&conn->bundle->channel_lock)));
- } else {
- spin_unlock(&conn->state_lock);
- }
+ spin_unlock(&conn->state_lock);
- spin_unlock(&conn->bundle->channel_lock);
+ if (conn->state == RXRPC_CONN_SERVICE) {
+ /* Offload call state flipping to the I/O thread. As
+ * we've already received the packet, put it on the
+ * front of the queue.
+ */
+ skb->mark = RXRPC_SKB_MARK_SERVICE_CONN_SECURED;
+ rxrpc_get_skb(skb, rxrpc_skb_get_conn_secured);
+ skb_queue_head(&conn->local->rx_queue, skb);
+ rxrpc_wake_up_io_thread(conn->local);
+ }
return 0;
default:
- trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
- tracepoint_string("bad_conn_pkt"));
+ WARN_ON_ONCE(1);
return -EPROTO;
}
}
@@ -354,26 +272,9 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
*/
static void rxrpc_secure_connection(struct rxrpc_connection *conn)
{
- u32 abort_code;
- int ret;
-
- _enter("{%d}", conn->debug_id);
-
- ASSERT(conn->security_ix != 0);
-
- if (conn->security->issue_challenge(conn) < 0) {
- abort_code = RX_CALL_DEAD;
- ret = -ENOMEM;
- goto abort;
- }
-
- _leave("");
- return;
-
-abort:
- _debug("abort %d, %d", ret, abort_code);
- rxrpc_abort_connection(conn, ret, abort_code);
- _leave(" [aborted]");
+ if (conn->security->issue_challenge(conn) < 0)
+ rxrpc_abort_conn(conn, NULL, RX_CALL_DEAD, -ENOMEM,
+ rxrpc_abort_nomem);
}
/*
@@ -395,9 +296,7 @@ again:
if (!test_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags))
continue;
- smp_rmb(); /* vs rxrpc_disconnect_client_call */
- ack_at = READ_ONCE(chan->final_ack_at);
-
+ ack_at = chan->final_ack_at;
if (time_before(j, ack_at) && !force) {
if (time_before(ack_at, next_j)) {
next_j = ack_at;
@@ -424,47 +323,27 @@ again:
static void rxrpc_do_process_connection(struct rxrpc_connection *conn)
{
struct sk_buff *skb;
- u32 abort_code = RX_PROTOCOL_ERROR;
int ret;
if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events))
rxrpc_secure_connection(conn);
- /* Process delayed ACKs whose time has come. */
- if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
- rxrpc_process_delayed_final_acks(conn, false);
-
/* go through the conn-level event packets, releasing the ref on this
* connection that each one has when we've finished with it */
while ((skb = skb_dequeue(&conn->rx_queue))) {
rxrpc_see_skb(skb, rxrpc_skb_see_conn_work);
- ret = rxrpc_process_event(conn, skb, &abort_code);
+ ret = rxrpc_process_event(conn, skb);
switch (ret) {
- case -EPROTO:
- case -EKEYEXPIRED:
- case -EKEYREJECTED:
- goto protocol_error;
case -ENOMEM:
case -EAGAIN:
- goto requeue_and_leave;
- case -ECONNABORTED:
+ skb_queue_head(&conn->rx_queue, skb);
+ rxrpc_queue_conn(conn, rxrpc_conn_queue_retry_work);
+ break;
default:
rxrpc_free_skb(skb, rxrpc_skb_put_conn_work);
break;
}
}
-
- return;
-
-requeue_and_leave:
- skb_queue_head(&conn->rx_queue, skb);
- return;
-
-protocol_error:
- if (rxrpc_abort_connection(conn, ret, abort_code) < 0)
- goto requeue_and_leave;
- rxrpc_free_skb(skb, rxrpc_skb_put_conn_work);
- return;
}
void rxrpc_process_connection(struct work_struct *work)
@@ -498,44 +377,59 @@ static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
/*
* Input a connection-level packet.
*/
-int rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb)
+bool rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
- _leave(" = -ECONNABORTED [%u]", conn->state);
- return -ECONNABORTED;
- }
-
- _enter("{%d},{%u,%%%u},", conn->debug_id, sp->hdr.type, sp->hdr.serial);
-
switch (sp->hdr.type) {
- case RXRPC_PACKET_TYPE_DATA:
- case RXRPC_PACKET_TYPE_ACK:
- rxrpc_conn_retransmit_call(conn, skb,
- sp->hdr.cid & RXRPC_CHANNELMASK);
- return 0;
-
case RXRPC_PACKET_TYPE_BUSY:
/* Just ignore BUSY packets for now. */
- return 0;
+ return true;
case RXRPC_PACKET_TYPE_ABORT:
- conn->error = -ECONNABORTED;
- conn->abort_code = skb->priority;
- conn->state = RXRPC_CONN_REMOTELY_ABORTED;
- set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
- rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, sp->hdr.serial);
- return -ECONNABORTED;
+ if (rxrpc_is_conn_aborted(conn))
+ return true;
+ rxrpc_input_conn_abort(conn, skb);
+ rxrpc_abort_calls(conn);
+ return true;
case RXRPC_PACKET_TYPE_CHALLENGE:
case RXRPC_PACKET_TYPE_RESPONSE:
+ if (rxrpc_is_conn_aborted(conn)) {
+ if (conn->completion == RXRPC_CALL_LOCALLY_ABORTED)
+ rxrpc_send_conn_abort(conn);
+ return true;
+ }
rxrpc_post_packet_to_conn(conn, skb);
- return 0;
+ return true;
default:
- trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
- tracepoint_string("bad_conn_pkt"));
- return -EPROTO;
+ WARN_ON_ONCE(1);
+ return true;
}
}
+
+/*
+ * Input a connection event.
+ */
+void rxrpc_input_conn_event(struct rxrpc_connection *conn, struct sk_buff *skb)
+{
+ unsigned int loop;
+
+ if (test_and_clear_bit(RXRPC_CONN_EV_ABORT_CALLS, &conn->events))
+ rxrpc_abort_calls(conn);
+
+ switch (skb->mark) {
+ case RXRPC_SKB_MARK_SERVICE_CONN_SECURED:
+ if (conn->state != RXRPC_CONN_SERVICE)
+ break;
+
+ for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
+ rxrpc_call_is_secure(conn->channels[loop].call);
+ break;
+ }
+
+ /* Process delayed ACKs whose time has come. */
+ if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
+ rxrpc_process_delayed_final_acks(conn, false);
+}
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index 3c8f83dacb2b..ac85d4644a3c 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -23,12 +23,30 @@ static void rxrpc_clean_up_connection(struct work_struct *work);
static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
unsigned long reap_at);
+void rxrpc_poke_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why)
+{
+ struct rxrpc_local *local = conn->local;
+ bool busy;
+
+ if (WARN_ON_ONCE(!local))
+ return;
+
+ spin_lock_bh(&local->lock);
+ busy = !list_empty(&conn->attend_link);
+ if (!busy) {
+ rxrpc_get_connection(conn, why);
+ list_add_tail(&conn->attend_link, &local->conn_attend_q);
+ }
+ spin_unlock_bh(&local->lock);
+ rxrpc_wake_up_io_thread(local);
+}
+
static void rxrpc_connection_timer(struct timer_list *timer)
{
struct rxrpc_connection *conn =
container_of(timer, struct rxrpc_connection, timer);
- rxrpc_queue_conn(conn, rxrpc_conn_queue_timer);
+ rxrpc_poke_conn(conn, rxrpc_conn_get_poke_timer);
}
/*
@@ -49,6 +67,7 @@ struct rxrpc_connection *rxrpc_alloc_connection(struct rxrpc_net *rxnet,
INIT_WORK(&conn->destructor, rxrpc_clean_up_connection);
INIT_LIST_HEAD(&conn->proc_link);
INIT_LIST_HEAD(&conn->link);
+ mutex_init(&conn->security_lock);
skb_queue_head_init(&conn->rx_queue);
conn->rxnet = rxnet;
conn->security = &rxrpc_no_security;
@@ -82,10 +101,10 @@ struct rxrpc_connection *rxrpc_find_client_connection_rcu(struct rxrpc_local *lo
_enter(",%x", sp->hdr.cid & RXRPC_CIDMASK);
- /* Look up client connections by connection ID alone as their IDs are
- * unique for this machine.
+ /* Look up client connections by connection ID alone as their
+ * IDs are unique for this machine.
*/
- conn = idr_find(&rxrpc_client_conn_ids, sp->hdr.cid >> RXRPC_CIDSHIFT);
+ conn = idr_find(&local->conn_ids, sp->hdr.cid >> RXRPC_CIDSHIFT);
if (!conn || refcount_read(&conn->ref) == 0) {
_debug("no conn");
goto not_found;
@@ -139,7 +158,7 @@ void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
_enter("%d,%x", conn->debug_id, call->cid);
- if (rcu_access_pointer(chan->call) == call) {
+ if (chan->call == call) {
/* Save the result of the call so that we can repeat it if necessary
* through the channel, whilst disposing of the actual call record.
*/
@@ -159,12 +178,9 @@ void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
break;
}
- /* Sync with rxrpc_conn_retransmit(). */
- smp_wmb();
chan->last_call = chan->call_id;
chan->call_id = chan->call_counter;
-
- rcu_assign_pointer(chan->call, NULL);
+ chan->call = NULL;
}
_leave("");
@@ -178,6 +194,9 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
{
struct rxrpc_connection *conn = call->conn;
+ set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
+ rxrpc_see_call(call, rxrpc_call_see_disconnected);
+
call->peer->cong_ssthresh = call->cong_ssthresh;
if (!hlist_unhashed(&call->error_link)) {
@@ -186,18 +205,17 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
spin_unlock(&call->peer->lock);
}
- if (rxrpc_is_client_call(call))
- return rxrpc_disconnect_client_call(conn->bundle, call);
-
- spin_lock(&conn->bundle->channel_lock);
- __rxrpc_disconnect_call(conn, call);
- spin_unlock(&conn->bundle->channel_lock);
+ if (rxrpc_is_client_call(call)) {
+ rxrpc_disconnect_client_call(call->bundle, call);
+ } else {
+ __rxrpc_disconnect_call(conn, call);
+ conn->idle_timestamp = jiffies;
+ if (atomic_dec_and_test(&conn->active))
+ rxrpc_set_service_reap_timer(conn->rxnet,
+ jiffies + rxrpc_connection_expiry);
+ }
- set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
- conn->idle_timestamp = jiffies;
- if (atomic_dec_and_test(&conn->active))
- rxrpc_set_service_reap_timer(conn->rxnet,
- jiffies + rxrpc_connection_expiry);
+ rxrpc_put_call(call, rxrpc_call_put_io_thread);
}
/*
@@ -293,10 +311,10 @@ static void rxrpc_clean_up_connection(struct work_struct *work)
container_of(work, struct rxrpc_connection, destructor);
struct rxrpc_net *rxnet = conn->rxnet;
- ASSERT(!rcu_access_pointer(conn->channels[0].call) &&
- !rcu_access_pointer(conn->channels[1].call) &&
- !rcu_access_pointer(conn->channels[2].call) &&
- !rcu_access_pointer(conn->channels[3].call));
+ ASSERT(!conn->channels[0].call &&
+ !conn->channels[1].call &&
+ !conn->channels[2].call &&
+ !conn->channels[3].call);
ASSERT(list_empty(&conn->cache_link));
del_timer_sync(&conn->timer);
@@ -447,7 +465,6 @@ void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
_enter("");
atomic_dec(&rxnet->nr_conns);
- rxrpc_destroy_all_client_connections(rxnet);
del_timer_sync(&rxnet->service_conn_reap_timer);
rxrpc_queue_work(&rxnet->service_conn_reaper);
diff --git a/net/rxrpc/conn_service.c b/net/rxrpc/conn_service.c
index 2a55a88b2a5b..f30323de82bd 100644
--- a/net/rxrpc/conn_service.c
+++ b/net/rxrpc/conn_service.c
@@ -11,7 +11,6 @@
static struct rxrpc_bundle rxrpc_service_dummy_bundle = {
.ref = REFCOUNT_INIT(1),
.debug_id = UINT_MAX,
- .channel_lock = __SPIN_LOCK_UNLOCKED(&rxrpc_service_dummy_bundle.channel_lock),
};
/*
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index d0e20e946e48..367927a99881 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -9,11 +9,10 @@
#include "ar-internal.h"
-static void rxrpc_proto_abort(const char *why,
- struct rxrpc_call *call, rxrpc_seq_t seq)
+static void rxrpc_proto_abort(struct rxrpc_call *call, rxrpc_seq_t seq,
+ enum rxrpc_abort_reason why)
{
- if (rxrpc_abort_call(why, call, seq, RX_PROTOCOL_ERROR, -EBADMSG))
- rxrpc_send_abort_packet(call);
+ rxrpc_abort_call(call, seq, RX_PROTOCOL_ERROR, -EBADMSG, why);
}
/*
@@ -185,7 +184,7 @@ void rxrpc_congestion_degrade(struct rxrpc_call *call)
if (call->cong_mode != RXRPC_CALL_SLOW_START &&
call->cong_mode != RXRPC_CALL_CONGEST_AVOIDANCE)
return;
- if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY)
+ if (__rxrpc_call_state(call) == RXRPC_CALL_CLIENT_AWAIT_REPLY)
return;
rtt = ns_to_ktime(call->peer->srtt_us * (1000 / 8));
@@ -250,47 +249,34 @@ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
* This occurs when we get an ACKALL packet, the first DATA packet of a reply,
* or a final ACK packet.
*/
-static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
- const char *abort_why)
+static void rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
+ enum rxrpc_abort_reason abort_why)
{
- unsigned int state;
-
ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags));
- write_lock(&call->state_lock);
-
- state = call->state;
- switch (state) {
+ switch (__rxrpc_call_state(call)) {
case RXRPC_CALL_CLIENT_SEND_REQUEST:
case RXRPC_CALL_CLIENT_AWAIT_REPLY:
- if (reply_begun)
- call->state = state = RXRPC_CALL_CLIENT_RECV_REPLY;
- else
- call->state = state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
+ if (reply_begun) {
+ rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_RECV_REPLY);
+ trace_rxrpc_txqueue(call, rxrpc_txqueue_end);
+ break;
+ }
+
+ rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_AWAIT_REPLY);
+ trace_rxrpc_txqueue(call, rxrpc_txqueue_await_reply);
break;
case RXRPC_CALL_SERVER_AWAIT_ACK:
- __rxrpc_call_completed(call);
- state = call->state;
+ rxrpc_call_completed(call);
+ trace_rxrpc_txqueue(call, rxrpc_txqueue_end);
break;
default:
- goto bad_state;
+ kdebug("end_tx %s", rxrpc_call_states[__rxrpc_call_state(call)]);
+ rxrpc_proto_abort(call, call->tx_top, abort_why);
+ break;
}
-
- write_unlock(&call->state_lock);
- if (state == RXRPC_CALL_CLIENT_AWAIT_REPLY)
- trace_rxrpc_txqueue(call, rxrpc_txqueue_await_reply);
- else
- trace_rxrpc_txqueue(call, rxrpc_txqueue_end);
- _leave(" = ok");
- return true;
-
-bad_state:
- write_unlock(&call->state_lock);
- kdebug("end_tx %s", rxrpc_call_states[call->state]);
- rxrpc_proto_abort(abort_why, call, call->tx_top);
- return false;
}
/*
@@ -305,18 +291,48 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call)
if (call->ackr_reason) {
now = jiffies;
timo = now + MAX_JIFFY_OFFSET;
- WRITE_ONCE(call->resend_at, timo);
+
WRITE_ONCE(call->delay_ack_at, timo);
trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
}
if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
if (!rxrpc_rotate_tx_window(call, top, &summary)) {
- rxrpc_proto_abort("TXL", call, top);
+ rxrpc_proto_abort(call, top, rxrpc_eproto_early_reply);
return false;
}
}
- return rxrpc_end_tx_phase(call, true, "ETD");
+
+ rxrpc_end_tx_phase(call, true, rxrpc_eproto_unexpected_reply);
+ return true;
+}
+
+/*
+ * End the packet reception phase.
+ */
+static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
+{
+ rxrpc_seq_t whigh = READ_ONCE(call->rx_highest_seq);
+
+ _enter("%d,%s", call->debug_id, rxrpc_call_states[__rxrpc_call_state(call)]);
+
+ trace_rxrpc_receive(call, rxrpc_receive_end, 0, whigh);
+
+ switch (__rxrpc_call_state(call)) {
+ case RXRPC_CALL_CLIENT_RECV_REPLY:
+ rxrpc_propose_delay_ACK(call, serial, rxrpc_propose_ack_terminal_ack);
+ rxrpc_call_completed(call);
+ break;
+
+ case RXRPC_CALL_SERVER_RECV_REQUEST:
+ rxrpc_set_call_state(call, RXRPC_CALL_SERVER_ACK_REQUEST);
+ call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
+ rxrpc_propose_delay_ACK(call, serial, rxrpc_propose_ack_processing_op);
+ break;
+
+ default:
+ break;
+ }
}
static void rxrpc_input_update_ack_window(struct rxrpc_call *call,
@@ -337,8 +353,9 @@ static void rxrpc_input_queue_data(struct rxrpc_call *call, struct sk_buff *skb,
__skb_queue_tail(&call->recvmsg_queue, skb);
rxrpc_input_update_ack_window(call, window, wtop);
-
trace_rxrpc_receive(call, last ? why + 1 : why, sp->hdr.serial, sp->hdr.seq);
+ if (last)
+ rxrpc_end_rx_phase(call, sp->hdr.serial);
}
/*
@@ -366,17 +383,14 @@ static void rxrpc_input_data_one(struct rxrpc_call *call, struct sk_buff *skb,
if (last) {
if (test_and_set_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
- seq + 1 != wtop) {
- rxrpc_proto_abort("LSN", call, seq);
- return;
- }
+ seq + 1 != wtop)
+ return rxrpc_proto_abort(call, seq, rxrpc_eproto_different_last);
} else {
if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
after_eq(seq, wtop)) {
pr_warn("Packet beyond last: c=%x q=%x window=%x-%x wlimit=%x\n",
call->debug_id, seq, window, wtop, wlimit);
- rxrpc_proto_abort("LSA", call, seq);
- return;
+ return rxrpc_proto_abort(call, seq, rxrpc_eproto_data_after_last);
}
}
@@ -550,7 +564,6 @@ protocol_error:
static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- enum rxrpc_call_state state;
rxrpc_serial_t serial = sp->hdr.serial;
rxrpc_seq_t seq0 = sp->hdr.seq;
@@ -558,11 +571,20 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
atomic64_read(&call->ackr_window), call->rx_highest_seq,
skb->len, seq0);
- state = READ_ONCE(call->state);
- if (state >= RXRPC_CALL_COMPLETE)
+ if (__rxrpc_call_is_complete(call))
return;
- if (state == RXRPC_CALL_SERVER_RECV_REQUEST) {
+ switch (__rxrpc_call_state(call)) {
+ case RXRPC_CALL_CLIENT_SEND_REQUEST:
+ case RXRPC_CALL_CLIENT_AWAIT_REPLY:
+ /* Received data implicitly ACKs all of the request
+ * packets we sent when we're acting as a client.
+ */
+ if (!rxrpc_receiving_reply(call))
+ goto out_notify;
+ break;
+
+ case RXRPC_CALL_SERVER_RECV_REQUEST: {
unsigned long timo = READ_ONCE(call->next_req_timo);
unsigned long now, expect_req_by;
@@ -573,18 +595,15 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
rxrpc_reduce_call_timer(call, expect_req_by, now,
rxrpc_timer_set_for_idle);
}
+ break;
}
- /* Received data implicitly ACKs all of the request packets we sent
- * when we're acting as a client.
- */
- if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST ||
- state == RXRPC_CALL_CLIENT_AWAIT_REPLY) &&
- !rxrpc_receiving_reply(call))
- goto out_notify;
+ default:
+ break;
+ }
if (!rxrpc_input_split_jumbo(call, skb)) {
- rxrpc_proto_abort("VLD", call, sp->hdr.seq);
+ rxrpc_proto_abort(call, sp->hdr.seq, rxrpc_badmsg_bad_jumbo);
goto out_notify;
}
skb = NULL;
@@ -765,7 +784,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
offset = sizeof(struct rxrpc_wire_header);
if (skb_copy_bits(skb, offset, &ack, sizeof(ack)) < 0)
- return rxrpc_proto_abort("XAK", call, 0);
+ return rxrpc_proto_abort(call, 0, rxrpc_badmsg_short_ack);
offset += sizeof(ack);
ack_serial = sp->hdr.serial;
@@ -845,7 +864,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
ioffset = offset + nr_acks + 3;
if (skb->len >= ioffset + sizeof(info) &&
skb_copy_bits(skb, ioffset, &info, sizeof(info)) < 0)
- return rxrpc_proto_abort("XAI", call, 0);
+ return rxrpc_proto_abort(call, 0, rxrpc_badmsg_short_ack_info);
if (nr_acks > 0)
skb_condense(skb);
@@ -868,10 +887,10 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
rxrpc_input_ackinfo(call, skb, &info);
if (first_soft_ack == 0)
- return rxrpc_proto_abort("AK0", call, 0);
+ return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_zero);
/* Ignore ACKs unless we are or have just been transmitting. */
- switch (READ_ONCE(call->state)) {
+ switch (__rxrpc_call_state(call)) {
case RXRPC_CALL_CLIENT_SEND_REQUEST:
case RXRPC_CALL_CLIENT_AWAIT_REPLY:
case RXRPC_CALL_SERVER_SEND_REPLY:
@@ -883,20 +902,20 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
if (before(hard_ack, call->acks_hard_ack) ||
after(hard_ack, call->tx_top))
- return rxrpc_proto_abort("AKW", call, 0);
+ return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_outside_window);
if (nr_acks > call->tx_top - hard_ack)
- return rxrpc_proto_abort("AKN", call, 0);
+ return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_sack_overflow);
if (after(hard_ack, call->acks_hard_ack)) {
if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) {
- rxrpc_end_tx_phase(call, false, "ETA");
+ rxrpc_end_tx_phase(call, false, rxrpc_eproto_unexpected_ack);
return;
}
}
if (nr_acks > 0) {
if (offset > (int)skb->len - nr_acks)
- return rxrpc_proto_abort("XSA", call, 0);
+ return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_short_sack);
rxrpc_input_soft_acks(call, skb->data + offset, first_soft_ack,
nr_acks, &summary);
}
@@ -918,7 +937,7 @@ static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb)
struct rxrpc_ack_summary summary = { 0 };
if (rxrpc_rotate_tx_window(call, call->tx_top, &summary))
- rxrpc_end_tx_phase(call, false, "ETL");
+ rxrpc_end_tx_phase(call, false, rxrpc_eproto_unexpected_ackall);
}
/*
@@ -963,27 +982,23 @@ void rxrpc_input_call_packet(struct rxrpc_call *call, struct sk_buff *skb)
switch (sp->hdr.type) {
case RXRPC_PACKET_TYPE_DATA:
- rxrpc_input_data(call, skb);
- break;
+ return rxrpc_input_data(call, skb);
case RXRPC_PACKET_TYPE_ACK:
- rxrpc_input_ack(call, skb);
- break;
+ return rxrpc_input_ack(call, skb);
case RXRPC_PACKET_TYPE_BUSY:
/* Just ignore BUSY packets from the server; the retry and
* lifespan timers will take care of business. BUSY packets
* from the client don't make sense.
*/
- break;
+ return;
case RXRPC_PACKET_TYPE_ABORT:
- rxrpc_input_abort(call, skb);
- break;
+ return rxrpc_input_abort(call, skb);
case RXRPC_PACKET_TYPE_ACKALL:
- rxrpc_input_ackall(call, skb);
- break;
+ return rxrpc_input_ackall(call, skb);
default:
break;
@@ -998,24 +1013,18 @@ void rxrpc_input_call_packet(struct rxrpc_call *call, struct sk_buff *skb)
*/
void rxrpc_implicit_end_call(struct rxrpc_call *call, struct sk_buff *skb)
{
- struct rxrpc_connection *conn = call->conn;
-
- switch (READ_ONCE(call->state)) {
+ switch (__rxrpc_call_state(call)) {
case RXRPC_CALL_SERVER_AWAIT_ACK:
rxrpc_call_completed(call);
fallthrough;
case RXRPC_CALL_COMPLETE:
break;
default:
- if (rxrpc_abort_call("IMP", call, 0, RX_CALL_DEAD, -ESHUTDOWN))
- rxrpc_send_abort_packet(call);
+ rxrpc_abort_call(call, 0, RX_CALL_DEAD, -ESHUTDOWN,
+ rxrpc_eproto_improper_term);
trace_rxrpc_improper_term(call);
break;
}
rxrpc_input_call_event(call, skb);
-
- spin_lock(&conn->bundle->channel_lock);
- __rxrpc_disconnect_call(conn, call);
- spin_unlock(&conn->bundle->channel_lock);
}
diff --git a/net/rxrpc/insecure.c b/net/rxrpc/insecure.c
index 0eb8471bfc53..34353b6e584b 100644
--- a/net/rxrpc/insecure.c
+++ b/net/rxrpc/insecure.c
@@ -43,25 +43,17 @@ static void none_free_call_crypto(struct rxrpc_call *call)
}
static int none_respond_to_challenge(struct rxrpc_connection *conn,
- struct sk_buff *skb,
- u32 *_abort_code)
+ struct sk_buff *skb)
{
- struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
-
- trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
- tracepoint_string("chall_none"));
- return -EPROTO;
+ return rxrpc_abort_conn(conn, skb, RX_PROTOCOL_ERROR, -EPROTO,
+ rxrpc_eproto_rxnull_challenge);
}
static int none_verify_response(struct rxrpc_connection *conn,
- struct sk_buff *skb,
- u32 *_abort_code)
+ struct sk_buff *skb)
{
- struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
-
- trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
- tracepoint_string("resp_none"));
- return -EPROTO;
+ return rxrpc_abort_conn(conn, skb, RX_PROTOCOL_ERROR, -EPROTO,
+ rxrpc_eproto_rxnull_response);
}
static void none_clear(struct rxrpc_connection *conn)
diff --git a/net/rxrpc/io_thread.c b/net/rxrpc/io_thread.c
index d83ae3193032..9e9dfb2fc559 100644
--- a/net/rxrpc/io_thread.c
+++ b/net/rxrpc/io_thread.c
@@ -67,9 +67,31 @@ void rxrpc_error_report(struct sock *sk)
}
/*
+ * Directly produce an abort from a packet.
+ */
+bool rxrpc_direct_abort(struct sk_buff *skb, enum rxrpc_abort_reason why,
+ s32 abort_code, int err)
+{
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+
+ trace_rxrpc_abort(0, why, sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+ abort_code, err);
+ skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
+ skb->priority = abort_code;
+ return false;
+}
+
+static bool rxrpc_bad_message(struct sk_buff *skb, enum rxrpc_abort_reason why)
+{
+ return rxrpc_direct_abort(skb, why, RX_PROTOCOL_ERROR, -EBADMSG);
+}
+
+#define just_discard true
+
+/*
* Process event packets targeted at a local endpoint.
*/
-static void rxrpc_input_version(struct rxrpc_local *local, struct sk_buff *skb)
+static bool rxrpc_input_version(struct rxrpc_local *local, struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
char v;
@@ -81,22 +103,21 @@ static void rxrpc_input_version(struct rxrpc_local *local, struct sk_buff *skb)
if (v == 0)
rxrpc_send_version_request(local, &sp->hdr, skb);
}
+
+ return true;
}
/*
* Extract the wire header from a packet and translate the byte order.
*/
-static noinline
-int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
+static bool rxrpc_extract_header(struct rxrpc_skb_priv *sp,
+ struct sk_buff *skb)
{
struct rxrpc_wire_header whdr;
/* dig out the RxRPC connection details */
- if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0) {
- trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
- tracepoint_string("bad_hdr"));
- return -EBADMSG;
- }
+ if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0)
+ return rxrpc_bad_message(skb, rxrpc_badmsg_short_hdr);
memset(sp, 0, sizeof(*sp));
sp->hdr.epoch = ntohl(whdr.epoch);
@@ -110,7 +131,7 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
sp->hdr.securityIndex = whdr.securityIndex;
sp->hdr._rsvd = ntohs(whdr._rsvd);
sp->hdr.serviceId = ntohs(whdr.serviceId);
- return 0;
+ return true;
}
/*
@@ -130,28 +151,28 @@ static bool rxrpc_extract_abort(struct sk_buff *skb)
/*
* Process packets received on the local endpoint
*/
-static int rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
+static bool rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
{
struct rxrpc_connection *conn;
struct sockaddr_rxrpc peer_srx;
struct rxrpc_skb_priv *sp;
struct rxrpc_peer *peer = NULL;
struct sk_buff *skb = *_skb;
- int ret = 0;
+ bool ret = false;
skb_pull(skb, sizeof(struct udphdr));
sp = rxrpc_skb(skb);
/* dig out the RxRPC connection details */
- if (rxrpc_extract_header(sp, skb) < 0)
- goto bad_message;
+ if (!rxrpc_extract_header(sp, skb))
+ return just_discard;
if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
static int lose;
if ((lose++ & 7) == 7) {
trace_rxrpc_rx_lose(sp);
- return 0;
+ return just_discard;
}
}
@@ -160,28 +181,28 @@ static int rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
switch (sp->hdr.type) {
case RXRPC_PACKET_TYPE_VERSION:
if (rxrpc_to_client(sp))
- return 0;
- rxrpc_input_version(local, skb);
- return 0;
+ return just_discard;
+ return rxrpc_input_version(local, skb);
case RXRPC_PACKET_TYPE_BUSY:
if (rxrpc_to_server(sp))
- return 0;
+ return just_discard;
fallthrough;
case RXRPC_PACKET_TYPE_ACK:
case RXRPC_PACKET_TYPE_ACKALL:
if (sp->hdr.callNumber == 0)
- goto bad_message;
+ return rxrpc_bad_message(skb, rxrpc_badmsg_zero_call);
break;
case RXRPC_PACKET_TYPE_ABORT:
if (!rxrpc_extract_abort(skb))
- return 0; /* Just discard if malformed */
+ return just_discard; /* Just discard if malformed */
break;
case RXRPC_PACKET_TYPE_DATA:
- if (sp->hdr.callNumber == 0 ||
- sp->hdr.seq == 0)
- goto bad_message;
+ if (sp->hdr.callNumber == 0)
+ return rxrpc_bad_message(skb, rxrpc_badmsg_zero_call);
+ if (sp->hdr.seq == 0)
+ return rxrpc_bad_message(skb, rxrpc_badmsg_zero_seq);
/* Unshare the packet so that it can be modified for in-place
* decryption.
@@ -191,7 +212,7 @@ static int rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
if (!skb) {
rxrpc_eaten_skb(*_skb, rxrpc_skb_eaten_by_unshare_nomem);
*_skb = NULL;
- return 0;
+ return just_discard;
}
if (skb != *_skb) {
@@ -205,28 +226,28 @@ static int rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
case RXRPC_PACKET_TYPE_CHALLENGE:
if (rxrpc_to_server(sp))
- return 0;
+ return just_discard;
break;
case RXRPC_PACKET_TYPE_RESPONSE:
if (rxrpc_to_client(sp))
- return 0;
+ return just_discard;
break;
/* Packet types 9-11 should just be ignored. */
case RXRPC_PACKET_TYPE_PARAMS:
case RXRPC_PACKET_TYPE_10:
case RXRPC_PACKET_TYPE_11:
- return 0;
+ return just_discard;
default:
- goto bad_message;
+ return rxrpc_bad_message(skb, rxrpc_badmsg_unsupported_packet);
}
if (sp->hdr.serviceId == 0)
- goto bad_message;
+ return rxrpc_bad_message(skb, rxrpc_badmsg_zero_service);
if (WARN_ON_ONCE(rxrpc_extract_addr_from_skb(&peer_srx, skb) < 0))
- return true; /* Unsupported address type - discard. */
+ return just_discard; /* Unsupported address type. */
if (peer_srx.transport.family != local->srx.transport.family &&
(peer_srx.transport.family == AF_INET &&
@@ -234,7 +255,7 @@ static int rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n",
peer_srx.transport.family,
local->srx.transport.family);
- return true; /* Wrong address type - discard. */
+ return just_discard; /* Wrong address type. */
}
if (rxrpc_to_client(sp)) {
@@ -242,12 +263,8 @@ static int rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
conn = rxrpc_find_client_connection_rcu(local, &peer_srx, skb);
conn = rxrpc_get_connection_maybe(conn, rxrpc_conn_get_call_input);
rcu_read_unlock();
- if (!conn) {
- trace_rxrpc_abort(0, "NCC", sp->hdr.cid,
- sp->hdr.callNumber, sp->hdr.seq,
- RXKADINCONSISTENCY, EBADMSG);
- goto protocol_error;
- }
+ if (!conn)
+ return rxrpc_protocol_error(skb, rxrpc_eproto_no_client_conn);
ret = rxrpc_input_packet_on_conn(conn, &peer_srx, skb);
rxrpc_put_connection(conn, rxrpc_conn_put_call_input);
@@ -280,18 +297,6 @@ static int rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
ret = rxrpc_new_incoming_call(local, peer, NULL, &peer_srx, skb);
rxrpc_put_peer(peer, rxrpc_peer_put_input);
- if (ret < 0)
- goto reject_packet;
- return 0;
-
-bad_message:
- trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
- RX_PROTOCOL_ERROR, EBADMSG);
-protocol_error:
- skb->priority = RX_PROTOCOL_ERROR;
- skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
-reject_packet:
- rxrpc_reject_packet(local, skb);
return ret;
}
@@ -306,21 +311,23 @@ static int rxrpc_input_packet_on_conn(struct rxrpc_connection *conn,
struct rxrpc_channel *chan;
struct rxrpc_call *call = NULL;
unsigned int channel;
+ bool ret;
if (sp->hdr.securityIndex != conn->security_ix)
- goto wrong_security;
+ return rxrpc_direct_abort(skb, rxrpc_eproto_wrong_security,
+ RXKADINCONSISTENCY, -EBADMSG);
if (sp->hdr.serviceId != conn->service_id) {
int old_id;
if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags))
- goto reupgrade;
+ return rxrpc_protocol_error(skb, rxrpc_eproto_reupgrade);
+
old_id = cmpxchg(&conn->service_id, conn->orig_service_id,
sp->hdr.serviceId);
-
if (old_id != conn->orig_service_id &&
old_id != sp->hdr.serviceId)
- goto reupgrade;
+ return rxrpc_protocol_error(skb, rxrpc_eproto_bad_upgrade);
}
if (after(sp->hdr.serial, conn->hi_serial))
@@ -336,19 +343,19 @@ static int rxrpc_input_packet_on_conn(struct rxrpc_connection *conn,
/* Ignore really old calls */
if (sp->hdr.callNumber < chan->last_call)
- return 0;
+ return just_discard;
if (sp->hdr.callNumber == chan->last_call) {
if (chan->call ||
sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)
- return 0;
+ return just_discard;
/* For the previous service call, if completed successfully, we
* discard all further packets.
*/
if (rxrpc_conn_is_service(conn) &&
chan->last_type == RXRPC_PACKET_TYPE_ACK)
- return 0;
+ return just_discard;
/* But otherwise we need to retransmit the final packet from
* data cached in the connection record.
@@ -358,19 +365,17 @@ static int rxrpc_input_packet_on_conn(struct rxrpc_connection *conn,
sp->hdr.seq,
sp->hdr.serial,
sp->hdr.flags);
- rxrpc_input_conn_packet(conn, skb);
- return 0;
+ rxrpc_conn_retransmit_call(conn, skb, channel);
+ return just_discard;
}
- rcu_read_lock();
- call = rxrpc_try_get_call(rcu_dereference(chan->call),
- rxrpc_call_get_input);
- rcu_read_unlock();
+ call = rxrpc_try_get_call(chan->call, rxrpc_call_get_input);
if (sp->hdr.callNumber > chan->call_id) {
if (rxrpc_to_client(sp)) {
rxrpc_put_call(call, rxrpc_call_put_input);
- goto reject_packet;
+ return rxrpc_protocol_error(skb,
+ rxrpc_eproto_unexpected_implicit_end);
}
if (call) {
@@ -382,38 +387,14 @@ static int rxrpc_input_packet_on_conn(struct rxrpc_connection *conn,
if (!call) {
if (rxrpc_to_client(sp))
- goto bad_message;
- if (rxrpc_new_incoming_call(conn->local, conn->peer, conn,
- peer_srx, skb))
- return 0;
- goto reject_packet;
+ return rxrpc_protocol_error(skb, rxrpc_eproto_no_client_call);
+ return rxrpc_new_incoming_call(conn->local, conn->peer, conn,
+ peer_srx, skb);
}
- rxrpc_input_call_event(call, skb);
+ ret = rxrpc_input_call_event(call, skb);
rxrpc_put_call(call, rxrpc_call_put_input);
- return 0;
-
-wrong_security:
- trace_rxrpc_abort(0, "SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
- RXKADINCONSISTENCY, EBADMSG);
- skb->priority = RXKADINCONSISTENCY;
- goto post_abort;
-
-reupgrade:
- trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
- RX_PROTOCOL_ERROR, EBADMSG);
- goto protocol_error;
-
-bad_message:
- trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
- RX_PROTOCOL_ERROR, EBADMSG);
-protocol_error:
- skb->priority = RX_PROTOCOL_ERROR;
-post_abort:
- skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
-reject_packet:
- rxrpc_reject_packet(conn->local, skb);
- return 0;
+ return ret;
}
/*
@@ -421,10 +402,14 @@ reject_packet:
*/
int rxrpc_io_thread(void *data)
{
+ struct rxrpc_connection *conn;
struct sk_buff_head rx_queue;
struct rxrpc_local *local = data;
struct rxrpc_call *call;
struct sk_buff *skb;
+ bool should_stop;
+
+ complete(&local->io_thread_ready);
skb_queue_head_init(&rx_queue);
@@ -433,6 +418,24 @@ int rxrpc_io_thread(void *data)
for (;;) {
rxrpc_inc_stat(local->rxnet, stat_io_loop);
+ /* Deal with connections that want immediate attention. */
+ conn = list_first_entry_or_null(&local->conn_attend_q,
+ struct rxrpc_connection,
+ attend_link);
+ if (conn) {
+ spin_lock_bh(&local->lock);
+ list_del_init(&conn->attend_link);
+ spin_unlock_bh(&local->lock);
+
+ rxrpc_input_conn_event(conn, NULL);
+ rxrpc_put_connection(conn, rxrpc_conn_put_poke);
+ continue;
+ }
+
+ if (test_and_clear_bit(RXRPC_CLIENT_CONN_REAP_TIMER,
+ &local->client_conn_flags))
+ rxrpc_discard_expired_client_conns(local);
+
/* Deal with calls that want immediate attention. */
if ((call = list_first_entry_or_null(&local->call_attend_q,
struct rxrpc_call,
@@ -447,12 +450,17 @@ int rxrpc_io_thread(void *data)
continue;
}
+ if (!list_empty(&local->new_client_calls))
+ rxrpc_connect_client_calls(local);
+
/* Process received packets and errors. */
if ((skb = __skb_dequeue(&rx_queue))) {
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
switch (skb->mark) {
case RXRPC_SKB_MARK_PACKET:
skb->priority = 0;
- rxrpc_input_packet(local, &skb);
+ if (!rxrpc_input_packet(local, &skb))
+ rxrpc_reject_packet(local, skb);
trace_rxrpc_rx_done(skb->mark, skb->priority);
rxrpc_free_skb(skb, rxrpc_skb_put_input);
break;
@@ -460,6 +468,11 @@ int rxrpc_io_thread(void *data)
rxrpc_input_error(local, skb);
rxrpc_free_skb(skb, rxrpc_skb_put_error_report);
break;
+ case RXRPC_SKB_MARK_SERVICE_CONN_SECURED:
+ rxrpc_input_conn_event(sp->conn, skb);
+ rxrpc_put_connection(sp->conn, rxrpc_conn_put_poke);
+ rxrpc_free_skb(skb, rxrpc_skb_put_conn_secured);
+ break;
default:
WARN_ON_ONCE(1);
rxrpc_free_skb(skb, rxrpc_skb_put_unknown);
@@ -476,13 +489,18 @@ int rxrpc_io_thread(void *data)
}
set_current_state(TASK_INTERRUPTIBLE);
+ should_stop = kthread_should_stop();
if (!skb_queue_empty(&local->rx_queue) ||
- !list_empty(&local->call_attend_q)) {
+ !list_empty(&local->call_attend_q) ||
+ !list_empty(&local->conn_attend_q) ||
+ !list_empty(&local->new_client_calls) ||
+ test_bit(RXRPC_CLIENT_CONN_REAP_TIMER,
+ &local->client_conn_flags)) {
__set_current_state(TASK_RUNNING);
continue;
}
- if (kthread_should_stop())
+ if (should_stop)
break;
schedule();
}
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 44222923c0d1..b8eaca5d9f22 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -82,30 +82,59 @@ static long rxrpc_local_cmp_key(const struct rxrpc_local *local,
}
}
+static void rxrpc_client_conn_reap_timeout(struct timer_list *timer)
+{
+ struct rxrpc_local *local =
+ container_of(timer, struct rxrpc_local, client_conn_reap_timer);
+
+ if (local->kill_all_client_conns &&
+ test_and_set_bit(RXRPC_CLIENT_CONN_REAP_TIMER, &local->client_conn_flags))
+ rxrpc_wake_up_io_thread(local);
+}
+
/*
* Allocate a new local endpoint.
*/
-static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
+static struct rxrpc_local *rxrpc_alloc_local(struct net *net,
const struct sockaddr_rxrpc *srx)
{
struct rxrpc_local *local;
+ u32 tmp;
local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
if (local) {
refcount_set(&local->ref, 1);
atomic_set(&local->active_users, 1);
- local->rxnet = rxnet;
+ local->net = net;
+ local->rxnet = rxrpc_net(net);
INIT_HLIST_NODE(&local->link);
init_rwsem(&local->defrag_sem);
+ init_completion(&local->io_thread_ready);
skb_queue_head_init(&local->rx_queue);
+ INIT_LIST_HEAD(&local->conn_attend_q);
INIT_LIST_HEAD(&local->call_attend_q);
+
local->client_bundles = RB_ROOT;
spin_lock_init(&local->client_bundles_lock);
+ local->kill_all_client_conns = false;
+ INIT_LIST_HEAD(&local->idle_client_conns);
+ timer_setup(&local->client_conn_reap_timer,
+ rxrpc_client_conn_reap_timeout, 0);
+
spin_lock_init(&local->lock);
rwlock_init(&local->services_lock);
local->debug_id = atomic_inc_return(&rxrpc_debug_id);
memcpy(&local->srx, srx, sizeof(*srx));
local->srx.srx_service = 0;
+ idr_init(&local->conn_ids);
+ get_random_bytes(&tmp, sizeof(tmp));
+ tmp &= 0x3fffffff;
+ if (tmp == 0)
+ tmp = 1;
+ idr_set_cursor(&local->conn_ids, tmp);
+ INIT_LIST_HEAD(&local->new_client_calls);
+ spin_lock_init(&local->client_call_lock);
+
trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, 1);
}
@@ -189,6 +218,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
goto error_sock;
}
+ wait_for_completion(&local->io_thread_ready);
local->io_thread = io_thread;
_leave(" = 0");
return 0;
@@ -246,7 +276,7 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
goto found;
}
- local = rxrpc_alloc_local(rxnet, srx);
+ local = rxrpc_alloc_local(net, srx);
if (!local)
goto nomem;
@@ -357,10 +387,11 @@ struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local,
*/
void rxrpc_unuse_local(struct rxrpc_local *local, enum rxrpc_local_trace why)
{
- unsigned int debug_id = local->debug_id;
+ unsigned int debug_id;
int r, u;
if (local) {
+ debug_id = local->debug_id;
r = refcount_read(&local->ref);
u = atomic_dec_return(&local->active_users);
trace_rxrpc_local(debug_id, why, r, u);
@@ -404,6 +435,7 @@ void rxrpc_destroy_local(struct rxrpc_local *local)
* local endpoint.
*/
rxrpc_purge_queue(&local->rx_queue);
+ rxrpc_purge_client_connections(local);
}
/*
diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c
index 5905530e2f33..a0319c040c25 100644
--- a/net/rxrpc/net_ns.c
+++ b/net/rxrpc/net_ns.c
@@ -10,15 +10,6 @@
unsigned int rxrpc_net_id;
-static void rxrpc_client_conn_reap_timeout(struct timer_list *timer)
-{
- struct rxrpc_net *rxnet =
- container_of(timer, struct rxrpc_net, client_conn_reap_timer);
-
- if (rxnet->live)
- rxrpc_queue_work(&rxnet->client_conn_reaper);
-}
-
static void rxrpc_service_conn_reap_timeout(struct timer_list *timer)
{
struct rxrpc_net *rxnet =
@@ -63,14 +54,6 @@ static __net_init int rxrpc_init_net(struct net *net)
rxrpc_service_conn_reap_timeout, 0);
atomic_set(&rxnet->nr_client_conns, 0);
- rxnet->kill_all_client_conns = false;
- spin_lock_init(&rxnet->client_conn_cache_lock);
- mutex_init(&rxnet->client_conn_discard_lock);
- INIT_LIST_HEAD(&rxnet->idle_client_conns);
- INIT_WORK(&rxnet->client_conn_reaper,
- rxrpc_discard_expired_client_conns);
- timer_setup(&rxnet->client_conn_reap_timer,
- rxrpc_client_conn_reap_timeout, 0);
INIT_HLIST_HEAD(&rxnet->local_endpoints);
mutex_init(&rxnet->local_mutex);
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 3d8c9f830ee0..a9746be29634 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -261,7 +261,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
rxrpc_tx_point_call_ack);
rxrpc_tx_backoff(call, ret);
- if (call->state < RXRPC_CALL_COMPLETE) {
+ if (!__rxrpc_call_is_complete(call)) {
if (ret < 0)
rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
rxrpc_set_keepalive(call);
@@ -545,6 +545,62 @@ send_fragmentable:
}
/*
+ * Transmit a connection-level abort.
+ */
+void rxrpc_send_conn_abort(struct rxrpc_connection *conn)
+{
+ struct rxrpc_wire_header whdr;
+ struct msghdr msg;
+ struct kvec iov[2];
+ __be32 word;
+ size_t len;
+ u32 serial;
+ int ret;
+
+ msg.msg_name = &conn->peer->srx.transport;
+ msg.msg_namelen = conn->peer->srx.transport_len;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_flags = 0;
+
+ whdr.epoch = htonl(conn->proto.epoch);
+ whdr.cid = htonl(conn->proto.cid);
+ whdr.callNumber = 0;
+ whdr.seq = 0;
+ whdr.type = RXRPC_PACKET_TYPE_ABORT;
+ whdr.flags = conn->out_clientflag;
+ whdr.userStatus = 0;
+ whdr.securityIndex = conn->security_ix;
+ whdr._rsvd = 0;
+ whdr.serviceId = htons(conn->service_id);
+
+ word = htonl(conn->abort_code);
+
+ iov[0].iov_base = &whdr;
+ iov[0].iov_len = sizeof(whdr);
+ iov[1].iov_base = &word;
+ iov[1].iov_len = sizeof(word);
+
+ len = iov[0].iov_len + iov[1].iov_len;
+
+ serial = atomic_inc_return(&conn->serial);
+ whdr.serial = htonl(serial);
+
+ iov_iter_kvec(&msg.msg_iter, WRITE, iov, 2, len);
+ ret = do_udp_sendmsg(conn->local->socket, &msg, len);
+ if (ret < 0) {
+ trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
+ rxrpc_tx_point_conn_abort);
+ _debug("sendmsg failed: %d", ret);
+ return;
+ }
+
+ trace_rxrpc_tx_packet(conn->debug_id, &whdr, rxrpc_tx_point_conn_abort);
+
+ conn->peer->last_tx_at = ktime_get_seconds();
+}
+
+/*
* Reject a packet through the local endpoint.
*/
void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
@@ -667,7 +723,7 @@ void rxrpc_send_keepalive(struct rxrpc_peer *peer)
static inline void rxrpc_instant_resend(struct rxrpc_call *call,
struct rxrpc_txbuf *txb)
{
- if (call->state < RXRPC_CALL_COMPLETE)
+ if (!__rxrpc_call_is_complete(call))
kdebug("resend");
}
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 6685bf917aa6..552ba84a255c 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -235,6 +235,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
struct rxrpc_peer *peer;
const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
time64_t keepalive_at;
+ bool use;
int slot;
spin_lock(&rxnet->peer_hash_lock);
@@ -247,9 +248,10 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
if (!rxrpc_get_peer_maybe(peer, rxrpc_peer_get_keepalive))
continue;
- if (__rxrpc_use_local(peer->local, rxrpc_local_use_peer_keepalive)) {
- spin_unlock(&rxnet->peer_hash_lock);
+ use = __rxrpc_use_local(peer->local, rxrpc_local_use_peer_keepalive);
+ spin_unlock(&rxnet->peer_hash_lock);
+ if (use) {
keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
slot = keepalive_at - base;
_debug("%02x peer %u t=%d {%pISp}",
@@ -270,9 +272,11 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
spin_lock(&rxnet->peer_hash_lock);
list_add_tail(&peer->keepalive_link,
&rxnet->peer_keepalive[slot & mask]);
+ spin_unlock(&rxnet->peer_hash_lock);
rxrpc_unuse_local(peer->local, rxrpc_local_unuse_peer_keepalive);
}
- rxrpc_put_peer_locked(peer, rxrpc_peer_put_keepalive);
+ rxrpc_put_peer(peer, rxrpc_peer_put_keepalive);
+ spin_lock(&rxnet->peer_hash_lock);
}
spin_unlock(&rxnet->peer_hash_lock);
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 608946dcc505..8d7a715a0bb1 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -147,10 +147,10 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local,
* assess the MTU size for the network interface through which this peer is
* reached
*/
-static void rxrpc_assess_MTU_size(struct rxrpc_sock *rx,
+static void rxrpc_assess_MTU_size(struct rxrpc_local *local,
struct rxrpc_peer *peer)
{
- struct net *net = sock_net(&rx->sk);
+ struct net *net = local->net;
struct dst_entry *dst;
struct rtable *rt;
struct flowi fl;
@@ -226,7 +226,7 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp,
rxrpc_peer_init_rtt(peer);
peer->cong_ssthresh = RXRPC_TX_MAX_WINDOW;
- trace_rxrpc_peer(peer->debug_id, why, 1);
+ trace_rxrpc_peer(peer->debug_id, 1, why);
}
_leave(" = %p", peer);
@@ -236,11 +236,11 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp,
/*
* Initialise peer record.
*/
-static void rxrpc_init_peer(struct rxrpc_sock *rx, struct rxrpc_peer *peer,
+static void rxrpc_init_peer(struct rxrpc_local *local, struct rxrpc_peer *peer,
unsigned long hash_key)
{
peer->hash_key = hash_key;
- rxrpc_assess_MTU_size(rx, peer);
+ rxrpc_assess_MTU_size(local, peer);
peer->mtu = peer->if_mtu;
peer->rtt_last_req = ktime_get_real();
@@ -272,8 +272,7 @@ static void rxrpc_init_peer(struct rxrpc_sock *rx, struct rxrpc_peer *peer,
/*
* Set up a new peer.
*/
-static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx,
- struct rxrpc_local *local,
+static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
struct sockaddr_rxrpc *srx,
unsigned long hash_key,
gfp_t gfp)
@@ -285,7 +284,7 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx,
peer = rxrpc_alloc_peer(local, gfp, rxrpc_peer_new_client);
if (peer) {
memcpy(&peer->srx, srx, sizeof(*srx));
- rxrpc_init_peer(rx, peer, hash_key);
+ rxrpc_init_peer(local, peer, hash_key);
}
_leave(" = %p", peer);
@@ -304,14 +303,13 @@ static void rxrpc_free_peer(struct rxrpc_peer *peer)
* since we've already done a search in the list from the non-reentrant context
* (the data_ready handler) that is the only place we can add new peers.
*/
-void rxrpc_new_incoming_peer(struct rxrpc_sock *rx, struct rxrpc_local *local,
- struct rxrpc_peer *peer)
+void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer)
{
struct rxrpc_net *rxnet = local->rxnet;
unsigned long hash_key;
hash_key = rxrpc_peer_hash_key(local, &peer->srx);
- rxrpc_init_peer(rx, peer, hash_key);
+ rxrpc_init_peer(local, peer, hash_key);
spin_lock(&rxnet->peer_hash_lock);
hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
@@ -322,8 +320,7 @@ void rxrpc_new_incoming_peer(struct rxrpc_sock *rx, struct rxrpc_local *local,
/*
* obtain a remote transport endpoint for the specified address
*/
-struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
- struct rxrpc_local *local,
+struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
struct sockaddr_rxrpc *srx, gfp_t gfp)
{
struct rxrpc_peer *peer, *candidate;
@@ -343,7 +340,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
/* The peer is not yet present in hash - create a candidate
* for a new record and then redo the search.
*/
- candidate = rxrpc_create_peer(rx, local, srx, hash_key, gfp);
+ candidate = rxrpc_create_peer(local, srx, hash_key, gfp);
if (!candidate) {
_leave(" = NULL [nomem]");
return NULL;
@@ -382,7 +379,7 @@ struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer, enum rxrpc_peer_trace
int r;
__refcount_inc(&peer->ref, &r);
- trace_rxrpc_peer(peer->debug_id, why, r + 1);
+ trace_rxrpc_peer(peer->debug_id, r + 1, why);
return peer;
}
@@ -439,25 +436,6 @@ void rxrpc_put_peer(struct rxrpc_peer *peer, enum rxrpc_peer_trace why)
}
/*
- * Drop a ref on a peer record where the caller already holds the
- * peer_hash_lock.
- */
-void rxrpc_put_peer_locked(struct rxrpc_peer *peer, enum rxrpc_peer_trace why)
-{
- unsigned int debug_id = peer->debug_id;
- bool dead;
- int r;
-
- dead = __refcount_dec_and_test(&peer->ref, &r);
- trace_rxrpc_peer(debug_id, r - 1, why);
- if (dead) {
- hash_del_rcu(&peer->hash_link);
- list_del_init(&peer->keepalive_link);
- rxrpc_free_peer(peer);
- }
-}
-
-/*
* Make sure all peer records have been discarded.
*/
void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet)
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c
index 3a59591ec061..750158a085cd 100644
--- a/net/rxrpc/proc.c
+++ b/net/rxrpc/proc.c
@@ -12,13 +12,13 @@
static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = {
[RXRPC_CONN_UNUSED] = "Unused ",
+ [RXRPC_CONN_CLIENT_UNSECURED] = "ClUnsec ",
[RXRPC_CONN_CLIENT] = "Client ",
[RXRPC_CONN_SERVICE_PREALLOC] = "SvPrealc",
[RXRPC_CONN_SERVICE_UNSECURED] = "SvUnsec ",
[RXRPC_CONN_SERVICE_CHALLENGING] = "SvChall ",
[RXRPC_CONN_SERVICE] = "SvSecure",
- [RXRPC_CONN_REMOTELY_ABORTED] = "RmtAbort",
- [RXRPC_CONN_LOCALLY_ABORTED] = "LocAbort",
+ [RXRPC_CONN_ABORTED] = "Aborted ",
};
/*
@@ -51,6 +51,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
struct rxrpc_local *local;
struct rxrpc_call *call;
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
+ enum rxrpc_call_state state;
unsigned long timeout = 0;
rxrpc_seq_t acks_hard_ack;
char lbuff[50], rbuff[50];
@@ -75,7 +76,8 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
sprintf(rbuff, "%pISpc", &call->dest_srx.transport);
- if (call->state != RXRPC_CALL_SERVER_PREALLOC) {
+ state = rxrpc_call_state(call);
+ if (state != RXRPC_CALL_SERVER_PREALLOC) {
timeout = READ_ONCE(call->expect_rx_by);
timeout -= jiffies;
}
@@ -92,7 +94,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
call->call_id,
rxrpc_is_service_call(call) ? "Svc" : "Clt",
refcount_read(&call->ref),
- rxrpc_call_states[call->state],
+ rxrpc_call_states[state],
call->abort_code,
call->debug_id,
acks_hard_ack, READ_ONCE(call->tx_top) - acks_hard_ack,
@@ -143,6 +145,7 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
{
struct rxrpc_connection *conn;
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
+ const char *state;
char lbuff[50], rbuff[50];
if (v == &rxnet->conn_proc_list) {
@@ -163,9 +166,11 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
}
sprintf(lbuff, "%pISpc", &conn->local->srx.transport);
-
sprintf(rbuff, "%pISpc", &conn->peer->srx.transport);
print:
+ state = rxrpc_is_conn_aborted(conn) ?
+ rxrpc_call_completions[conn->completion] :
+ rxrpc_conn_states[conn->state];
seq_printf(seq,
"UDP %-47.47s %-47.47s %4x %08x %s %3u %3d"
" %s %08x %08x %08x %08x %08x %08x %08x\n",
@@ -176,7 +181,7 @@ print:
rxrpc_conn_is_service(conn) ? "Svc" : "Clt",
refcount_read(&conn->ref),
atomic_read(&conn->active),
- rxrpc_conn_states[conn->state],
+ state,
key_serial(conn->key),
atomic_read(&conn->serial),
conn->hi_serial,
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 36b25d003cf0..dd54ceee7bcc 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -59,85 +59,6 @@ void rxrpc_notify_socket(struct rxrpc_call *call)
}
/*
- * Transition a call to the complete state.
- */
-bool __rxrpc_set_call_completion(struct rxrpc_call *call,
- enum rxrpc_call_completion compl,
- u32 abort_code,
- int error)
-{
- if (call->state < RXRPC_CALL_COMPLETE) {
- call->abort_code = abort_code;
- call->error = error;
- call->completion = compl;
- call->state = RXRPC_CALL_COMPLETE;
- trace_rxrpc_call_complete(call);
- wake_up(&call->waitq);
- rxrpc_notify_socket(call);
- return true;
- }
- return false;
-}
-
-bool rxrpc_set_call_completion(struct rxrpc_call *call,
- enum rxrpc_call_completion compl,
- u32 abort_code,
- int error)
-{
- bool ret = false;
-
- if (call->state < RXRPC_CALL_COMPLETE) {
- write_lock(&call->state_lock);
- ret = __rxrpc_set_call_completion(call, compl, abort_code, error);
- write_unlock(&call->state_lock);
- }
- return ret;
-}
-
-/*
- * Record that a call successfully completed.
- */
-bool __rxrpc_call_completed(struct rxrpc_call *call)
-{
- return __rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0);
-}
-
-bool rxrpc_call_completed(struct rxrpc_call *call)
-{
- bool ret = false;
-
- if (call->state < RXRPC_CALL_COMPLETE) {
- write_lock(&call->state_lock);
- ret = __rxrpc_call_completed(call);
- write_unlock(&call->state_lock);
- }
- return ret;
-}
-
-/*
- * Record that a call is locally aborted.
- */
-bool __rxrpc_abort_call(const char *why, struct rxrpc_call *call,
- rxrpc_seq_t seq, u32 abort_code, int error)
-{
- trace_rxrpc_abort(call->debug_id, why, call->cid, call->call_id, seq,
- abort_code, error);
- return __rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED,
- abort_code, error);
-}
-
-bool rxrpc_abort_call(const char *why, struct rxrpc_call *call,
- rxrpc_seq_t seq, u32 abort_code, int error)
-{
- bool ret;
-
- write_lock(&call->state_lock);
- ret = __rxrpc_abort_call(why, call, seq, abort_code, error);
- write_unlock(&call->state_lock);
- return ret;
-}
-
-/*
* Pass a call terminating message to userspace.
*/
static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg)
@@ -168,7 +89,7 @@ static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg)
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4, &tmp);
break;
default:
- pr_err("Invalid terminal call state %u\n", call->state);
+ pr_err("Invalid terminal call state %u\n", call->completion);
BUG();
break;
}
@@ -180,41 +101,6 @@ static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg)
}
/*
- * End the packet reception phase.
- */
-static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
-{
- rxrpc_seq_t whigh = READ_ONCE(call->rx_highest_seq);
-
- _enter("%d,%s", call->debug_id, rxrpc_call_states[call->state]);
-
- trace_rxrpc_receive(call, rxrpc_receive_end, 0, whigh);
-
- if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY)
- rxrpc_propose_delay_ACK(call, serial, rxrpc_propose_ack_terminal_ack);
-
- write_lock(&call->state_lock);
-
- switch (call->state) {
- case RXRPC_CALL_CLIENT_RECV_REPLY:
- __rxrpc_call_completed(call);
- write_unlock(&call->state_lock);
- break;
-
- case RXRPC_CALL_SERVER_RECV_REQUEST:
- call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
- call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
- write_unlock(&call->state_lock);
- rxrpc_propose_delay_ACK(call, serial,
- rxrpc_propose_ack_processing_op);
- break;
- default:
- write_unlock(&call->state_lock);
- break;
- }
-}
-
-/*
* Discard a packet we've used up and advance the Rx window by one.
*/
static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
@@ -244,10 +130,9 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
trace_rxrpc_receive(call, last ? rxrpc_receive_rotate_last : rxrpc_receive_rotate,
serial, call->rx_consumed);
- if (last) {
- rxrpc_end_rx_phase(call, serial);
- return;
- }
+
+ if (last)
+ set_bit(RXRPC_CALL_RECVMSG_READ_ALL, &call->flags);
/* Check to see if there's an ACK that needs sending. */
acked = atomic_add_return(call->rx_consumed - old_consumed,
@@ -272,7 +157,8 @@ static int rxrpc_verify_data(struct rxrpc_call *call, struct sk_buff *skb)
/*
* Deliver messages to a call. This keeps processing packets until the buffer
* is filled and we find either more DATA (returns 0) or the end of the DATA
- * (returns 1). If more packets are required, it returns -EAGAIN.
+ * (returns 1). If more packets are required, it returns -EAGAIN and if the
+ * call has failed it returns -EIO.
*/
static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
struct msghdr *msg, struct iov_iter *iter,
@@ -288,7 +174,13 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
rx_pkt_offset = call->rx_pkt_offset;
rx_pkt_len = call->rx_pkt_len;
- if (call->state >= RXRPC_CALL_SERVER_ACK_REQUEST) {
+ if (rxrpc_call_has_failed(call)) {
+ seq = lower_32_bits(atomic64_read(&call->ackr_window)) - 1;
+ ret = -EIO;
+ goto done;
+ }
+
+ if (test_bit(RXRPC_CALL_RECVMSG_READ_ALL, &call->flags)) {
seq = lower_32_bits(atomic64_read(&call->ackr_window)) - 1;
ret = 1;
goto done;
@@ -312,14 +204,15 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
if (rx_pkt_offset == 0) {
ret2 = rxrpc_verify_data(call, skb);
- rx_pkt_offset = sp->offset;
- rx_pkt_len = sp->len;
trace_rxrpc_recvdata(call, rxrpc_recvmsg_next, seq,
- rx_pkt_offset, rx_pkt_len, ret2);
+ sp->offset, sp->len, ret2);
if (ret2 < 0) {
+ kdebug("verify = %d", ret2);
ret = ret2;
goto out;
}
+ rx_pkt_offset = sp->offset;
+ rx_pkt_len = sp->len;
} else {
trace_rxrpc_recvdata(call, rxrpc_recvmsg_cont, seq,
rx_pkt_offset, rx_pkt_len, 0);
@@ -388,13 +281,14 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
struct rxrpc_call *call;
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
struct list_head *l;
+ unsigned int call_debug_id = 0;
size_t copied = 0;
long timeo;
int ret;
DEFINE_WAIT(wait);
- trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_enter, 0);
+ trace_rxrpc_recvmsg(0, rxrpc_recvmsg_enter, 0);
if (flags & (MSG_OOB | MSG_TRUNC))
return -EOPNOTSUPP;
@@ -431,7 +325,7 @@ try_again:
if (list_empty(&rx->recvmsg_q)) {
if (signal_pending(current))
goto wait_interrupted;
- trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_wait, 0);
+ trace_rxrpc_recvmsg(0, rxrpc_recvmsg_wait, 0);
timeo = schedule_timeout(timeo);
}
finish_wait(sk_sleep(&rx->sk), &wait);
@@ -450,7 +344,8 @@ try_again:
rxrpc_get_call(call, rxrpc_call_get_recvmsg);
write_unlock(&rx->recvmsg_lock);
- trace_rxrpc_recvmsg(call, rxrpc_recvmsg_dequeue, 0);
+ call_debug_id = call->debug_id;
+ trace_rxrpc_recvmsg(call_debug_id, rxrpc_recvmsg_dequeue, 0);
/* We're going to drop the socket lock, so we need to lock the call
* against interference by sendmsg.
@@ -492,36 +387,36 @@ try_again:
msg->msg_namelen = len;
}
- switch (READ_ONCE(call->state)) {
- case RXRPC_CALL_CLIENT_RECV_REPLY:
- case RXRPC_CALL_SERVER_RECV_REQUEST:
- case RXRPC_CALL_SERVER_ACK_REQUEST:
- ret = rxrpc_recvmsg_data(sock, call, msg, &msg->msg_iter, len,
- flags, &copied);
- if (ret == -EAGAIN)
- ret = 0;
-
- if (!skb_queue_empty(&call->recvmsg_queue))
- rxrpc_notify_socket(call);
- break;
- default:
+ ret = rxrpc_recvmsg_data(sock, call, msg, &msg->msg_iter, len,
+ flags, &copied);
+ if (ret == -EAGAIN)
ret = 0;
- break;
- }
-
+ if (ret == -EIO)
+ goto call_failed;
if (ret < 0)
goto error_unlock_call;
- if (call->state == RXRPC_CALL_COMPLETE) {
- ret = rxrpc_recvmsg_term(call, msg);
- if (ret < 0)
- goto error_unlock_call;
- if (!(flags & MSG_PEEK))
- rxrpc_release_call(rx, call);
- msg->msg_flags |= MSG_EOR;
- ret = 1;
- }
+ if (rxrpc_call_is_complete(call) &&
+ skb_queue_empty(&call->recvmsg_queue))
+ goto call_complete;
+ if (rxrpc_call_has_failed(call))
+ goto call_failed;
+ rxrpc_notify_socket(call);
+ goto not_yet_complete;
+
+call_failed:
+ rxrpc_purge_queue(&call->recvmsg_queue);
+call_complete:
+ ret = rxrpc_recvmsg_term(call, msg);
+ if (ret < 0)
+ goto error_unlock_call;
+ if (!(flags & MSG_PEEK))
+ rxrpc_release_call(rx, call);
+ msg->msg_flags |= MSG_EOR;
+ ret = 1;
+
+not_yet_complete:
if (ret == 0)
msg->msg_flags |= MSG_MORE;
else
@@ -531,7 +426,7 @@ try_again:
error_unlock_call:
mutex_unlock(&call->user_mutex);
rxrpc_put_call(call, rxrpc_call_put_recvmsg);
- trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, ret);
+ trace_rxrpc_recvmsg(call_debug_id, rxrpc_recvmsg_return, ret);
return ret;
error_requeue_call:
@@ -539,14 +434,14 @@ error_requeue_call:
write_lock(&rx->recvmsg_lock);
list_add(&call->recvmsg_link, &rx->recvmsg_q);
write_unlock(&rx->recvmsg_lock);
- trace_rxrpc_recvmsg(call, rxrpc_recvmsg_requeue, 0);
+ trace_rxrpc_recvmsg(call_debug_id, rxrpc_recvmsg_requeue, 0);
} else {
rxrpc_put_call(call, rxrpc_call_put_recvmsg);
}
error_no_call:
release_sock(&rx->sk);
error_trace:
- trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, ret);
+ trace_rxrpc_recvmsg(call_debug_id, rxrpc_recvmsg_return, ret);
return ret;
wait_interrupted:
@@ -584,49 +479,34 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
size_t offset = 0;
int ret;
- _enter("{%d,%s},%zu,%d",
- call->debug_id, rxrpc_call_states[call->state],
- *_len, want_more);
-
- ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_SECURING);
+ _enter("{%d},%zu,%d", call->debug_id, *_len, want_more);
mutex_lock(&call->user_mutex);
- switch (READ_ONCE(call->state)) {
- case RXRPC_CALL_CLIENT_RECV_REPLY:
- case RXRPC_CALL_SERVER_RECV_REQUEST:
- case RXRPC_CALL_SERVER_ACK_REQUEST:
- ret = rxrpc_recvmsg_data(sock, call, NULL, iter,
- *_len, 0, &offset);
- *_len -= offset;
- if (ret < 0)
- goto out;
-
- /* We can only reach here with a partially full buffer if we
- * have reached the end of the data. We must otherwise have a
- * full buffer or have been given -EAGAIN.
- */
- if (ret == 1) {
- if (iov_iter_count(iter) > 0)
- goto short_data;
- if (!want_more)
- goto read_phase_complete;
- ret = 0;
- goto out;
- }
-
- if (!want_more)
- goto excess_data;
+ ret = rxrpc_recvmsg_data(sock, call, NULL, iter, *_len, 0, &offset);
+ *_len -= offset;
+ if (ret == -EIO)
+ goto call_failed;
+ if (ret < 0)
goto out;
- case RXRPC_CALL_COMPLETE:
- goto call_complete;
-
- default:
- ret = -EINPROGRESS;
+ /* We can only reach here with a partially full buffer if we have
+ * reached the end of the data. We must otherwise have a full buffer
+ * or have been given -EAGAIN.
+ */
+ if (ret == 1) {
+ if (iov_iter_count(iter) > 0)
+ goto short_data;
+ if (!want_more)
+ goto read_phase_complete;
+ ret = 0;
goto out;
}
+ if (!want_more)
+ goto excess_data;
+ goto out;
+
read_phase_complete:
ret = 1;
out:
@@ -637,14 +517,18 @@ out:
return ret;
short_data:
- trace_rxrpc_rx_eproto(call, 0, tracepoint_string("short_data"));
+ trace_rxrpc_abort(call->debug_id, rxrpc_recvmsg_short_data,
+ call->cid, call->call_id, call->rx_consumed,
+ 0, -EBADMSG);
ret = -EBADMSG;
goto out;
excess_data:
- trace_rxrpc_rx_eproto(call, 0, tracepoint_string("excess_data"));
+ trace_rxrpc_abort(call->debug_id, rxrpc_recvmsg_excess_data,
+ call->cid, call->call_id, call->rx_consumed,
+ 0, -EMSGSIZE);
ret = -EMSGSIZE;
goto out;
-call_complete:
+call_failed:
*_abort = call->abort_code;
ret = call->error;
if (call->completion == RXRPC_CALL_SUCCEEDED) {
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index d1233720e05f..1bf571a66e02 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -411,18 +411,15 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct rxrpc_crypt iv;
struct scatterlist sg[16];
- bool aborted;
u32 data_size, buf;
u16 check;
int ret;
_enter("");
- if (sp->len < 8) {
- aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_hdr", "V1H",
- RXKADSEALEDINCON);
- goto protocol_error;
- }
+ if (sp->len < 8)
+ return rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON,
+ rxkad_abort_1_short_header);
/* Decrypt the skbuff in-place. TODO: We really want to decrypt
* directly into the target buffer.
@@ -442,11 +439,9 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
skcipher_request_zero(req);
/* Extract the decrypted packet length */
- if (skb_copy_bits(skb, sp->offset, &sechdr, sizeof(sechdr)) < 0) {
- aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_len", "XV1",
- RXKADDATALEN);
- goto protocol_error;
- }
+ if (skb_copy_bits(skb, sp->offset, &sechdr, sizeof(sechdr)) < 0)
+ return rxrpc_abort_eproto(call, skb, RXKADDATALEN,
+ rxkad_abort_1_short_encdata);
sp->offset += sizeof(sechdr);
sp->len -= sizeof(sechdr);
@@ -456,26 +451,16 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
check = buf >> 16;
check ^= seq ^ call->call_id;
check &= 0xffff;
- if (check != 0) {
- aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_check", "V1C",
- RXKADSEALEDINCON);
- goto protocol_error;
- }
-
- if (data_size > sp->len) {
- aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_datalen", "V1L",
- RXKADDATALEN);
- goto protocol_error;
- }
+ if (check != 0)
+ return rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON,
+ rxkad_abort_1_short_check);
+ if (data_size > sp->len)
+ return rxrpc_abort_eproto(call, skb, RXKADDATALEN,
+ rxkad_abort_1_short_data);
sp->len = data_size;
_leave(" = 0 [dlen=%x]", data_size);
return 0;
-
-protocol_error:
- if (aborted)
- rxrpc_send_abort_packet(call);
- return -EPROTO;
}
/*
@@ -490,18 +475,15 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct rxrpc_crypt iv;
struct scatterlist _sg[4], *sg;
- bool aborted;
u32 data_size, buf;
u16 check;
int nsg, ret;
_enter(",{%d}", sp->len);
- if (sp->len < 8) {
- aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_hdr", "V2H",
- RXKADSEALEDINCON);
- goto protocol_error;
- }
+ if (sp->len < 8)
+ return rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON,
+ rxkad_abort_2_short_header);
/* Decrypt the skbuff in-place. TODO: We really want to decrypt
* directly into the target buffer.
@@ -513,7 +495,7 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
} else {
sg = kmalloc_array(nsg, sizeof(*sg), GFP_NOIO);
if (!sg)
- goto nomem;
+ return -ENOMEM;
}
sg_init_table(sg, nsg);
@@ -537,11 +519,9 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
kfree(sg);
/* Extract the decrypted packet length */
- if (skb_copy_bits(skb, sp->offset, &sechdr, sizeof(sechdr)) < 0) {
- aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_len", "XV2",
- RXKADDATALEN);
- goto protocol_error;
- }
+ if (skb_copy_bits(skb, sp->offset, &sechdr, sizeof(sechdr)) < 0)
+ return rxrpc_abort_eproto(call, skb, RXKADDATALEN,
+ rxkad_abort_2_short_len);
sp->offset += sizeof(sechdr);
sp->len -= sizeof(sechdr);
@@ -551,30 +531,17 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
check = buf >> 16;
check ^= seq ^ call->call_id;
check &= 0xffff;
- if (check != 0) {
- aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_check", "V2C",
- RXKADSEALEDINCON);
- goto protocol_error;
- }
+ if (check != 0)
+ return rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON,
+ rxkad_abort_2_short_check);
- if (data_size > sp->len) {
- aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_datalen", "V2L",
- RXKADDATALEN);
- goto protocol_error;
- }
+ if (data_size > sp->len)
+ return rxrpc_abort_eproto(call, skb, RXKADDATALEN,
+ rxkad_abort_2_short_data);
sp->len = data_size;
_leave(" = 0 [dlen=%x]", data_size);
return 0;
-
-protocol_error:
- if (aborted)
- rxrpc_send_abort_packet(call);
- return -EPROTO;
-
-nomem:
- _leave(" = -ENOMEM");
- return -ENOMEM;
}
/*
@@ -590,7 +557,6 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb)
__be32 buf[2];
} crypto __aligned(8);
rxrpc_seq_t seq = sp->hdr.seq;
- bool aborted;
int ret;
u16 cksum;
u32 x, y;
@@ -627,9 +593,9 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb)
cksum = 1; /* zero checksums are not permitted */
if (cksum != sp->hdr.cksum) {
- aborted = rxrpc_abort_eproto(call, skb, "rxkad_csum", "VCK",
- RXKADSEALEDINCON);
- goto protocol_error;
+ ret = rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON,
+ rxkad_abort_bad_checksum);
+ goto out;
}
switch (call->conn->security_level) {
@@ -647,13 +613,9 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb)
break;
}
+out:
skcipher_request_free(req);
return ret;
-
-protocol_error:
- if (aborted)
- rxrpc_send_abort_packet(call);
- return -EPROTO;
}
/*
@@ -821,34 +783,30 @@ static int rxkad_encrypt_response(struct rxrpc_connection *conn,
* respond to a challenge packet
*/
static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
- struct sk_buff *skb,
- u32 *_abort_code)
+ struct sk_buff *skb)
{
const struct rxrpc_key_token *token;
struct rxkad_challenge challenge;
struct rxkad_response *resp;
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- const char *eproto;
- u32 version, nonce, min_level, abort_code;
- int ret;
+ u32 version, nonce, min_level;
+ int ret = -EPROTO;
_enter("{%d,%x}", conn->debug_id, key_serial(conn->key));
- eproto = tracepoint_string("chall_no_key");
- abort_code = RX_PROTOCOL_ERROR;
if (!conn->key)
- goto protocol_error;
+ return rxrpc_abort_conn(conn, skb, RX_PROTOCOL_ERROR, -EPROTO,
+ rxkad_abort_chall_no_key);
- abort_code = RXKADEXPIRED;
ret = key_validate(conn->key);
if (ret < 0)
- goto other_error;
+ return rxrpc_abort_conn(conn, skb, RXKADEXPIRED, ret,
+ rxkad_abort_chall_key_expired);
- eproto = tracepoint_string("chall_short");
- abort_code = RXKADPACKETSHORT;
if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
&challenge, sizeof(challenge)) < 0)
- goto protocol_error;
+ return rxrpc_abort_conn(conn, skb, RXKADPACKETSHORT, -EPROTO,
+ rxkad_abort_chall_short);
version = ntohl(challenge.version);
nonce = ntohl(challenge.nonce);
@@ -856,15 +814,13 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
trace_rxrpc_rx_challenge(conn, sp->hdr.serial, version, nonce, min_level);
- eproto = tracepoint_string("chall_ver");
- abort_code = RXKADINCONSISTENCY;
if (version != RXKAD_VERSION)
- goto protocol_error;
+ return rxrpc_abort_conn(conn, skb, RXKADINCONSISTENCY, -EPROTO,
+ rxkad_abort_chall_version);
- abort_code = RXKADLEVELFAIL;
- ret = -EACCES;
if (conn->security_level < min_level)
- goto other_error;
+ return rxrpc_abort_conn(conn, skb, RXKADLEVELFAIL, -EACCES,
+ rxkad_abort_chall_level);
token = conn->key->payload.data[0];
@@ -893,13 +849,6 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
ret = rxkad_send_response(conn, &sp->hdr, resp, token->kad);
kfree(resp);
return ret;
-
-protocol_error:
- trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, eproto);
- ret = -EPROTO;
-other_error:
- *_abort_code = abort_code;
- return ret;
}
/*
@@ -910,20 +859,15 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
struct sk_buff *skb,
void *ticket, size_t ticket_len,
struct rxrpc_crypt *_session_key,
- time64_t *_expiry,
- u32 *_abort_code)
+ time64_t *_expiry)
{
struct skcipher_request *req;
- struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct rxrpc_crypt iv, key;
struct scatterlist sg[1];
struct in_addr addr;
unsigned int life;
- const char *eproto;
time64_t issue, now;
bool little_endian;
- int ret;
- u32 abort_code;
u8 *p, *q, *name, *end;
_enter("{%d},{%x}", conn->debug_id, key_serial(server_key));
@@ -935,10 +879,9 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
memcpy(&iv, &server_key->payload.data[2], sizeof(iv));
- ret = -ENOMEM;
req = skcipher_request_alloc(server_key->payload.data[0], GFP_NOFS);
if (!req)
- goto temporary_error;
+ return -ENOMEM;
sg_init_one(&sg[0], ticket, ticket_len);
skcipher_request_set_callback(req, 0, NULL, NULL);
@@ -949,18 +892,21 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
p = ticket;
end = p + ticket_len;
-#define Z(field) \
- ({ \
- u8 *__str = p; \
- eproto = tracepoint_string("rxkad_bad_"#field); \
- q = memchr(p, 0, end - p); \
- if (!q || q - p > (field##_SZ)) \
- goto bad_ticket; \
- for (; p < q; p++) \
- if (!isprint(*p)) \
- goto bad_ticket; \
- p++; \
- __str; \
+#define Z(field, fieldl) \
+ ({ \
+ u8 *__str = p; \
+ q = memchr(p, 0, end - p); \
+ if (!q || q - p > field##_SZ) \
+ return rxrpc_abort_conn( \
+ conn, skb, RXKADBADTICKET, -EPROTO, \
+ rxkad_abort_resp_tkt_##fieldl); \
+ for (; p < q; p++) \
+ if (!isprint(*p)) \
+ return rxrpc_abort_conn( \
+ conn, skb, RXKADBADTICKET, -EPROTO, \
+ rxkad_abort_resp_tkt_##fieldl); \
+ p++; \
+ __str; \
})
/* extract the ticket flags */
@@ -969,20 +915,20 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
p++;
/* extract the authentication name */
- name = Z(ANAME);
+ name = Z(ANAME, aname);
_debug("KIV ANAME: %s", name);
/* extract the principal's instance */
- name = Z(INST);
+ name = Z(INST, inst);
_debug("KIV INST : %s", name);
/* extract the principal's authentication domain */
- name = Z(REALM);
+ name = Z(REALM, realm);
_debug("KIV REALM: %s", name);
- eproto = tracepoint_string("rxkad_bad_len");
if (end - p < 4 + 8 + 4 + 2)
- goto bad_ticket;
+ return rxrpc_abort_conn(conn, skb, RXKADBADTICKET, -EPROTO,
+ rxkad_abort_resp_tkt_short);
/* get the IPv4 address of the entity that requested the ticket */
memcpy(&addr, p, sizeof(addr));
@@ -1014,38 +960,23 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
_debug("KIV ISSUE: %llx [%llx]", issue, now);
/* check the ticket is in date */
- if (issue > now) {
- abort_code = RXKADNOAUTH;
- ret = -EKEYREJECTED;
- goto other_error;
- }
-
- if (issue < now - life) {
- abort_code = RXKADEXPIRED;
- ret = -EKEYEXPIRED;
- goto other_error;
- }
+ if (issue > now)
+ return rxrpc_abort_conn(conn, skb, RXKADNOAUTH, -EKEYREJECTED,
+ rxkad_abort_resp_tkt_future);
+ if (issue < now - life)
+ return rxrpc_abort_conn(conn, skb, RXKADEXPIRED, -EKEYEXPIRED,
+ rxkad_abort_resp_tkt_expired);
*_expiry = issue + life;
/* get the service name */
- name = Z(SNAME);
+ name = Z(SNAME, sname);
_debug("KIV SNAME: %s", name);
/* get the service instance name */
- name = Z(INST);
+ name = Z(INST, sinst);
_debug("KIV SINST: %s", name);
return 0;
-
-bad_ticket:
- trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, eproto);
- abort_code = RXKADBADTICKET;
- ret = -EPROTO;
-other_error:
- *_abort_code = abort_code;
- return ret;
-temporary_error:
- return ret;
}
/*
@@ -1086,17 +1017,15 @@ static void rxkad_decrypt_response(struct rxrpc_connection *conn,
* verify a response
*/
static int rxkad_verify_response(struct rxrpc_connection *conn,
- struct sk_buff *skb,
- u32 *_abort_code)
+ struct sk_buff *skb)
{
struct rxkad_response *response;
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct rxrpc_crypt session_key;
struct key *server_key;
- const char *eproto;
time64_t expiry;
void *ticket;
- u32 abort_code, version, kvno, ticket_len, level;
+ u32 version, kvno, ticket_len, level;
__be32 csum;
int ret, i;
@@ -1104,22 +1033,18 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
server_key = rxrpc_look_up_server_security(conn, skb, 0, 0);
if (IS_ERR(server_key)) {
- switch (PTR_ERR(server_key)) {
+ ret = PTR_ERR(server_key);
+ switch (ret) {
case -ENOKEY:
- abort_code = RXKADUNKNOWNKEY;
- break;
+ return rxrpc_abort_conn(conn, skb, RXKADUNKNOWNKEY, ret,
+ rxkad_abort_resp_nokey);
case -EKEYEXPIRED:
- abort_code = RXKADEXPIRED;
- break;
+ return rxrpc_abort_conn(conn, skb, RXKADEXPIRED, ret,
+ rxkad_abort_resp_key_expired);
default:
- abort_code = RXKADNOAUTH;
- break;
+ return rxrpc_abort_conn(conn, skb, RXKADNOAUTH, ret,
+ rxkad_abort_resp_key_rejected);
}
- trace_rxrpc_abort(0, "SVK",
- sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
- abort_code, PTR_ERR(server_key));
- *_abort_code = abort_code;
- return -EPROTO;
}
ret = -ENOMEM;
@@ -1127,11 +1052,12 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
if (!response)
goto temporary_error;
- eproto = tracepoint_string("rxkad_rsp_short");
- abort_code = RXKADPACKETSHORT;
if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
- response, sizeof(*response)) < 0)
+ response, sizeof(*response)) < 0) {
+ rxrpc_abort_conn(conn, skb, RXKADPACKETSHORT, -EPROTO,
+ rxkad_abort_resp_short);
goto protocol_error;
+ }
version = ntohl(response->version);
ticket_len = ntohl(response->ticket_len);
@@ -1139,20 +1065,23 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
trace_rxrpc_rx_response(conn, sp->hdr.serial, version, kvno, ticket_len);
- eproto = tracepoint_string("rxkad_rsp_ver");
- abort_code = RXKADINCONSISTENCY;
- if (version != RXKAD_VERSION)
+ if (version != RXKAD_VERSION) {
+ rxrpc_abort_conn(conn, skb, RXKADINCONSISTENCY, -EPROTO,
+ rxkad_abort_resp_version);
goto protocol_error;
+ }
- eproto = tracepoint_string("rxkad_rsp_tktlen");
- abort_code = RXKADTICKETLEN;
- if (ticket_len < 4 || ticket_len > MAXKRB5TICKETLEN)
+ if (ticket_len < 4 || ticket_len > MAXKRB5TICKETLEN) {
+ rxrpc_abort_conn(conn, skb, RXKADTICKETLEN, -EPROTO,
+ rxkad_abort_resp_tkt_len);
goto protocol_error;
+ }
- eproto = tracepoint_string("rxkad_rsp_unkkey");
- abort_code = RXKADUNKNOWNKEY;
- if (kvno >= RXKAD_TKT_TYPE_KERBEROS_V5)
+ if (kvno >= RXKAD_TKT_TYPE_KERBEROS_V5) {
+ rxrpc_abort_conn(conn, skb, RXKADUNKNOWNKEY, -EPROTO,
+ rxkad_abort_resp_unknown_tkt);
goto protocol_error;
+ }
/* extract the kerberos ticket and decrypt and decode it */
ret = -ENOMEM;
@@ -1160,15 +1089,15 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
if (!ticket)
goto temporary_error_free_resp;
- eproto = tracepoint_string("rxkad_tkt_short");
- abort_code = RXKADPACKETSHORT;
- ret = skb_copy_bits(skb, sizeof(struct rxrpc_wire_header) + sizeof(*response),
- ticket, ticket_len);
- if (ret < 0)
- goto temporary_error_free_ticket;
+ if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header) + sizeof(*response),
+ ticket, ticket_len) < 0) {
+ rxrpc_abort_conn(conn, skb, RXKADPACKETSHORT, -EPROTO,
+ rxkad_abort_resp_short_tkt);
+ goto protocol_error;
+ }
ret = rxkad_decrypt_ticket(conn, server_key, skb, ticket, ticket_len,
- &session_key, &expiry, _abort_code);
+ &session_key, &expiry);
if (ret < 0)
goto temporary_error_free_ticket;
@@ -1176,56 +1105,61 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
* response */
rxkad_decrypt_response(conn, response, &session_key);
- eproto = tracepoint_string("rxkad_rsp_param");
- abort_code = RXKADSEALEDINCON;
- if (ntohl(response->encrypted.epoch) != conn->proto.epoch)
- goto protocol_error_free;
- if (ntohl(response->encrypted.cid) != conn->proto.cid)
- goto protocol_error_free;
- if (ntohl(response->encrypted.securityIndex) != conn->security_ix)
+ if (ntohl(response->encrypted.epoch) != conn->proto.epoch ||
+ ntohl(response->encrypted.cid) != conn->proto.cid ||
+ ntohl(response->encrypted.securityIndex) != conn->security_ix) {
+ rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO,
+ rxkad_abort_resp_bad_param);
goto protocol_error_free;
+ }
+
csum = response->encrypted.checksum;
response->encrypted.checksum = 0;
rxkad_calc_response_checksum(response);
- eproto = tracepoint_string("rxkad_rsp_csum");
- if (response->encrypted.checksum != csum)
+ if (response->encrypted.checksum != csum) {
+ rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO,
+ rxkad_abort_resp_bad_checksum);
goto protocol_error_free;
+ }
- spin_lock(&conn->bundle->channel_lock);
for (i = 0; i < RXRPC_MAXCALLS; i++) {
- struct rxrpc_call *call;
u32 call_id = ntohl(response->encrypted.call_id[i]);
+ u32 counter = READ_ONCE(conn->channels[i].call_counter);
+
+ if (call_id > INT_MAX) {
+ rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO,
+ rxkad_abort_resp_bad_callid);
+ goto protocol_error_free;
+ }
- eproto = tracepoint_string("rxkad_rsp_callid");
- if (call_id > INT_MAX)
- goto protocol_error_unlock;
-
- eproto = tracepoint_string("rxkad_rsp_callctr");
- if (call_id < conn->channels[i].call_counter)
- goto protocol_error_unlock;
-
- eproto = tracepoint_string("rxkad_rsp_callst");
- if (call_id > conn->channels[i].call_counter) {
- call = rcu_dereference_protected(
- conn->channels[i].call,
- lockdep_is_held(&conn->bundle->channel_lock));
- if (call && call->state < RXRPC_CALL_COMPLETE)
- goto protocol_error_unlock;
+ if (call_id < counter) {
+ rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO,
+ rxkad_abort_resp_call_ctr);
+ goto protocol_error_free;
+ }
+
+ if (call_id > counter) {
+ if (conn->channels[i].call) {
+ rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO,
+ rxkad_abort_resp_call_state);
+ goto protocol_error_free;
+ }
conn->channels[i].call_counter = call_id;
}
}
- spin_unlock(&conn->bundle->channel_lock);
- eproto = tracepoint_string("rxkad_rsp_seq");
- abort_code = RXKADOUTOFSEQUENCE;
- if (ntohl(response->encrypted.inc_nonce) != conn->rxkad.nonce + 1)
+ if (ntohl(response->encrypted.inc_nonce) != conn->rxkad.nonce + 1) {
+ rxrpc_abort_conn(conn, skb, RXKADOUTOFSEQUENCE, -EPROTO,
+ rxkad_abort_resp_ooseq);
goto protocol_error_free;
+ }
- eproto = tracepoint_string("rxkad_rsp_level");
- abort_code = RXKADLEVELFAIL;
level = ntohl(response->encrypted.level);
- if (level > RXRPC_SECURITY_ENCRYPT)
+ if (level > RXRPC_SECURITY_ENCRYPT) {
+ rxrpc_abort_conn(conn, skb, RXKADLEVELFAIL, -EPROTO,
+ rxkad_abort_resp_level);
goto protocol_error_free;
+ }
conn->security_level = level;
/* create a key to hold the security data and expiration time - after
@@ -1240,15 +1174,11 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
_leave(" = 0");
return 0;
-protocol_error_unlock:
- spin_unlock(&conn->bundle->channel_lock);
protocol_error_free:
kfree(ticket);
protocol_error:
kfree(response);
- trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, eproto);
key_put(server_key);
- *_abort_code = abort_code;
return -EPROTO;
temporary_error_free_ticket:
diff --git a/net/rxrpc/rxperf.c b/net/rxrpc/rxperf.c
index 66f5eea291ff..16dcabb71ebe 100644
--- a/net/rxrpc/rxperf.c
+++ b/net/rxrpc/rxperf.c
@@ -10,6 +10,8 @@
#include <linux/slab.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
+#define RXRPC_TRACE_ONLY_DEFINE_ENUMS
+#include <trace/events/rxrpc.h>
MODULE_DESCRIPTION("rxperf test server (afs)");
MODULE_AUTHOR("Red Hat, Inc.");
@@ -275,7 +277,7 @@ static void rxperf_deliver_to_call(struct work_struct *work)
struct rxperf_call *call = container_of(work, struct rxperf_call, work);
enum rxperf_call_state state;
u32 abort_code, remote_abort = 0;
- int ret;
+ int ret = 0;
if (call->state == RXPERF_CALL_COMPLETE)
return;
@@ -307,12 +309,14 @@ static void rxperf_deliver_to_call(struct work_struct *work)
case -EOPNOTSUPP:
abort_code = RXGEN_OPCODE;
rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
- abort_code, ret, "GOP");
+ abort_code, ret,
+ rxperf_abort_op_not_supported);
goto call_complete;
case -ENOTSUPP:
abort_code = RX_USER_ABORT;
rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
- abort_code, ret, "GUA");
+ abort_code, ret,
+ rxperf_abort_op_not_supported);
goto call_complete;
case -EIO:
pr_err("Call %u in bad state %u\n",
@@ -324,11 +328,13 @@ static void rxperf_deliver_to_call(struct work_struct *work)
case -ENOMEM:
case -EFAULT:
rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
- RXGEN_SS_UNMARSHAL, ret, "GUM");
+ RXGEN_SS_UNMARSHAL, ret,
+ rxperf_abort_unmarshal_error);
goto call_complete;
default:
rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
- RX_CALL_DEAD, ret, "GER");
+ RX_CALL_DEAD, ret,
+ rxperf_abort_general_error);
goto call_complete;
}
}
@@ -523,7 +529,8 @@ static int rxperf_process_call(struct rxperf_call *call)
if (n == -ENOMEM)
rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
- RXGEN_SS_MARSHAL, -ENOMEM, "GOM");
+ RXGEN_SS_MARSHAL, -ENOMEM,
+ rxperf_abort_oom);
return n;
}
diff --git a/net/rxrpc/security.c b/net/rxrpc/security.c
index 209f2c25a0da..cb8dd1d3b1d4 100644
--- a/net/rxrpc/security.c
+++ b/net/rxrpc/security.c
@@ -67,13 +67,13 @@ const struct rxrpc_security *rxrpc_security_lookup(u8 security_index)
*/
int rxrpc_init_client_call_security(struct rxrpc_call *call)
{
- const struct rxrpc_security *sec;
+ const struct rxrpc_security *sec = &rxrpc_no_security;
struct rxrpc_key_token *token;
struct key *key = call->key;
int ret;
if (!key)
- return 0;
+ goto found;
ret = key_validate(key);
if (ret < 0)
@@ -88,7 +88,7 @@ int rxrpc_init_client_call_security(struct rxrpc_call *call)
found:
call->security = sec;
- _leave(" = 0");
+ call->security_ix = sec->security_index;
return 0;
}
@@ -97,38 +97,31 @@ found:
*/
int rxrpc_init_client_conn_security(struct rxrpc_connection *conn)
{
- const struct rxrpc_security *sec;
struct rxrpc_key_token *token;
struct key *key = conn->key;
- int ret;
+ int ret = 0;
_enter("{%d},{%x}", conn->debug_id, key_serial(key));
- if (!key)
- return 0;
-
- ret = key_validate(key);
- if (ret < 0)
- return ret;
-
for (token = key->payload.data[0]; token; token = token->next) {
- sec = rxrpc_security_lookup(token->security_index);
- if (sec)
+ if (token->security_index == conn->security->security_index)
goto found;
}
return -EKEYREJECTED;
found:
- conn->security = sec;
-
- ret = conn->security->init_connection_security(conn, token);
- if (ret < 0) {
- conn->security = &rxrpc_no_security;
- return ret;
+ mutex_lock(&conn->security_lock);
+ if (conn->state == RXRPC_CONN_CLIENT_UNSECURED) {
+ ret = conn->security->init_connection_security(conn, token);
+ if (ret == 0) {
+ spin_lock(&conn->state_lock);
+ if (conn->state == RXRPC_CONN_CLIENT_UNSECURED)
+ conn->state = RXRPC_CONN_CLIENT;
+ spin_unlock(&conn->state_lock);
+ }
}
-
- _leave(" = 0");
- return 0;
+ mutex_unlock(&conn->security_lock);
+ return ret;
}
/*
@@ -144,21 +137,15 @@ const struct rxrpc_security *rxrpc_get_incoming_security(struct rxrpc_sock *rx,
sec = rxrpc_security_lookup(sp->hdr.securityIndex);
if (!sec) {
- trace_rxrpc_abort(0, "SVS",
- sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
- RX_INVALID_OPERATION, EKEYREJECTED);
- skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
- skb->priority = RX_INVALID_OPERATION;
+ rxrpc_direct_abort(skb, rxrpc_abort_unsupported_security,
+ RX_INVALID_OPERATION, -EKEYREJECTED);
return NULL;
}
if (sp->hdr.securityIndex != RXRPC_SECURITY_NONE &&
!rx->securities) {
- trace_rxrpc_abort(0, "SVR",
- sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
- RX_INVALID_OPERATION, EKEYREJECTED);
- skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
- skb->priority = sec->no_key_abort;
+ rxrpc_direct_abort(skb, rxrpc_abort_no_service_key,
+ sec->no_key_abort, -EKEYREJECTED);
return NULL;
}
@@ -191,9 +178,9 @@ struct key *rxrpc_look_up_server_security(struct rxrpc_connection *conn,
sprintf(kdesc, "%u:%u",
sp->hdr.serviceId, sp->hdr.securityIndex);
- rcu_read_lock();
+ read_lock(&conn->local->services_lock);
- rx = rcu_dereference(conn->local->service);
+ rx = conn->local->service;
if (!rx)
goto out;
@@ -215,6 +202,6 @@ struct key *rxrpc_look_up_server_security(struct rxrpc_connection *conn,
}
out:
- rcu_read_unlock();
+ read_unlock(&conn->local->services_lock);
return key;
}
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index 9fa7e37f7155..da49fcf1c456 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -18,6 +18,81 @@
#include "ar-internal.h"
/*
+ * Propose an abort to be made in the I/O thread.
+ */
+bool rxrpc_propose_abort(struct rxrpc_call *call, s32 abort_code, int error,
+ enum rxrpc_abort_reason why)
+{
+ _enter("{%d},%d,%d,%u", call->debug_id, abort_code, error, why);
+
+ if (!call->send_abort && !rxrpc_call_is_complete(call)) {
+ call->send_abort_why = why;
+ call->send_abort_err = error;
+ call->send_abort_seq = 0;
+ /* Request abort locklessly vs rxrpc_input_call_event(). */
+ smp_store_release(&call->send_abort, abort_code);
+ rxrpc_poke_call(call, rxrpc_call_poke_abort);
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Wait for a call to become connected. Interruption here doesn't cause the
+ * call to be aborted.
+ */
+static int rxrpc_wait_to_be_connected(struct rxrpc_call *call, long *timeo)
+{
+ DECLARE_WAITQUEUE(myself, current);
+ int ret = 0;
+
+ _enter("%d", call->debug_id);
+
+ if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN)
+ return call->error;
+
+ add_wait_queue_exclusive(&call->waitq, &myself);
+
+ for (;;) {
+ ret = call->error;
+ if (ret < 0)
+ break;
+
+ switch (call->interruptibility) {
+ case RXRPC_INTERRUPTIBLE:
+ case RXRPC_PREINTERRUPTIBLE:
+ set_current_state(TASK_INTERRUPTIBLE);
+ break;
+ case RXRPC_UNINTERRUPTIBLE:
+ default:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ break;
+ }
+ if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN) {
+ ret = call->error;
+ break;
+ }
+ if ((call->interruptibility == RXRPC_INTERRUPTIBLE ||
+ call->interruptibility == RXRPC_PREINTERRUPTIBLE) &&
+ signal_pending(current)) {
+ ret = sock_intr_errno(*timeo);
+ break;
+ }
+ *timeo = schedule_timeout(*timeo);
+ }
+
+ remove_wait_queue(&call->waitq, &myself);
+ __set_current_state(TASK_RUNNING);
+
+ if (ret == 0 && rxrpc_call_is_complete(call))
+ ret = call->error;
+
+ _leave(" = %d", ret);
+ return ret;
+}
+
+/*
* Return true if there's sufficient Tx queue space.
*/
static bool rxrpc_check_tx_space(struct rxrpc_call *call, rxrpc_seq_t *_tx_win)
@@ -39,7 +114,7 @@ static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx,
if (rxrpc_check_tx_space(call, NULL))
return 0;
- if (call->state >= RXRPC_CALL_COMPLETE)
+ if (rxrpc_call_is_complete(call))
return call->error;
if (signal_pending(current))
@@ -74,7 +149,7 @@ static int rxrpc_wait_for_tx_window_waitall(struct rxrpc_sock *rx,
if (rxrpc_check_tx_space(call, &tx_win))
return 0;
- if (call->state >= RXRPC_CALL_COMPLETE)
+ if (rxrpc_call_is_complete(call))
return call->error;
if (timeout == 0 &&
@@ -103,7 +178,7 @@ static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx,
if (rxrpc_check_tx_space(call, NULL))
return 0;
- if (call->state >= RXRPC_CALL_COMPLETE)
+ if (rxrpc_call_is_complete(call))
return call->error;
trace_rxrpc_txqueue(call, rxrpc_txqueue_wait);
@@ -168,7 +243,6 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
struct rxrpc_txbuf *txb,
rxrpc_notify_end_tx_t notify_end_tx)
{
- unsigned long now;
rxrpc_seq_t seq = txb->seq;
bool last = test_bit(RXRPC_TXBUF_LAST, &txb->flags), poke;
@@ -191,36 +265,10 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
poke = list_empty(&call->tx_sendmsg);
list_add_tail(&txb->call_link, &call->tx_sendmsg);
call->tx_prepared = seq;
+ if (last)
+ rxrpc_notify_end_tx(rx, call, notify_end_tx);
spin_unlock(&call->tx_lock);
- if (last || call->state == RXRPC_CALL_SERVER_ACK_REQUEST) {
- _debug("________awaiting reply/ACK__________");
- write_lock(&call->state_lock);
- switch (call->state) {
- case RXRPC_CALL_CLIENT_SEND_REQUEST:
- call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
- rxrpc_notify_end_tx(rx, call, notify_end_tx);
- break;
- case RXRPC_CALL_SERVER_ACK_REQUEST:
- call->state = RXRPC_CALL_SERVER_SEND_REPLY;
- now = jiffies;
- WRITE_ONCE(call->delay_ack_at, now + MAX_JIFFY_OFFSET);
- if (call->ackr_reason == RXRPC_ACK_DELAY)
- call->ackr_reason = 0;
- trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now);
- if (!last)
- break;
- fallthrough;
- case RXRPC_CALL_SERVER_SEND_REPLY:
- call->state = RXRPC_CALL_SERVER_AWAIT_ACK;
- rxrpc_notify_end_tx(rx, call, notify_end_tx);
- break;
- default:
- break;
- }
- write_unlock(&call->state_lock);
- }
-
if (poke)
rxrpc_poke_call(call, rxrpc_call_poke_start);
}
@@ -245,6 +293,16 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
+ ret = rxrpc_wait_to_be_connected(call, &timeo);
+ if (ret < 0)
+ return ret;
+
+ if (call->conn->state == RXRPC_CONN_CLIENT_UNSECURED) {
+ ret = rxrpc_init_client_conn_security(call->conn);
+ if (ret < 0)
+ return ret;
+ }
+
/* this should be in poll */
sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
@@ -252,15 +310,20 @@ reload:
ret = -EPIPE;
if (sk->sk_shutdown & SEND_SHUTDOWN)
goto maybe_error;
- state = READ_ONCE(call->state);
+ state = rxrpc_call_state(call);
ret = -ESHUTDOWN;
if (state >= RXRPC_CALL_COMPLETE)
goto maybe_error;
ret = -EPROTO;
if (state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
state != RXRPC_CALL_SERVER_ACK_REQUEST &&
- state != RXRPC_CALL_SERVER_SEND_REPLY)
+ state != RXRPC_CALL_SERVER_SEND_REPLY) {
+ /* Request phase complete for this client call */
+ trace_rxrpc_abort(call->debug_id, rxrpc_sendmsg_late_send,
+ call->cid, call->call_id, call->rx_consumed,
+ 0, -EPROTO);
goto maybe_error;
+ }
ret = -EMSGSIZE;
if (call->tx_total_len != -1) {
@@ -329,7 +392,7 @@ reload:
/* check for the far side aborting the call or a network error
* occurring */
- if (call->state == RXRPC_CALL_COMPLETE)
+ if (rxrpc_call_is_complete(call))
goto call_terminated;
/* add the packet to the send queue if it's now full */
@@ -354,12 +417,9 @@ reload:
success:
ret = copied;
- if (READ_ONCE(call->state) == RXRPC_CALL_COMPLETE) {
- read_lock(&call->state_lock);
- if (call->error < 0)
- ret = call->error;
- read_unlock(&call->state_lock);
- }
+ if (rxrpc_call_is_complete(call) &&
+ call->error < 0)
+ ret = call->error;
out:
call->tx_pending = txb;
_leave(" = %d", ret);
@@ -543,7 +603,6 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
atomic_inc_return(&rxrpc_debug_id));
/* The socket is now unlocked */
- rxrpc_put_peer(cp.peer, rxrpc_peer_put_discard_tmp);
_leave(" = %p\n", call);
return call;
}
@@ -556,7 +615,6 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
__releases(&rx->sk.sk_lock.slock)
{
- enum rxrpc_call_state state;
struct rxrpc_call *call;
unsigned long now, j;
bool dropped_lock = false;
@@ -598,10 +656,10 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
return PTR_ERR(call);
/* ... and we have the call lock. */
ret = 0;
- if (READ_ONCE(call->state) == RXRPC_CALL_COMPLETE)
+ if (rxrpc_call_is_complete(call))
goto out_put_unlock;
} else {
- switch (READ_ONCE(call->state)) {
+ switch (rxrpc_call_state(call)) {
case RXRPC_CALL_UNINITIALISED:
case RXRPC_CALL_CLIENT_AWAIT_CONN:
case RXRPC_CALL_SERVER_PREALLOC:
@@ -625,7 +683,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
if (call->tx_total_len != -1 ||
call->tx_pending ||
call->tx_top != 0)
- goto error_put;
+ goto out_put_unlock;
call->tx_total_len = p.call.tx_total_len;
}
}
@@ -655,17 +713,13 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
break;
}
- state = READ_ONCE(call->state);
- _debug("CALL %d USR %lx ST %d on CONN %p",
- call->debug_id, call->user_call_ID, state, call->conn);
-
- if (state >= RXRPC_CALL_COMPLETE) {
+ if (rxrpc_call_is_complete(call)) {
/* it's too late for this call */
ret = -ESHUTDOWN;
} else if (p.command == RXRPC_CMD_SEND_ABORT) {
+ rxrpc_propose_abort(call, p.abort_code, -ECONNABORTED,
+ rxrpc_abort_call_sendmsg);
ret = 0;
- if (rxrpc_abort_call("CMD", call, 0, p.abort_code, -ECONNABORTED))
- ret = rxrpc_send_abort_packet(call);
} else if (p.command != RXRPC_CMD_SEND_DATA) {
ret = -EINVAL;
} else {
@@ -705,34 +759,17 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
bool dropped_lock = false;
int ret;
- _enter("{%d,%s},", call->debug_id, rxrpc_call_states[call->state]);
+ _enter("{%d},", call->debug_id);
ASSERTCMP(msg->msg_name, ==, NULL);
ASSERTCMP(msg->msg_control, ==, NULL);
mutex_lock(&call->user_mutex);
- _debug("CALL %d USR %lx ST %d on CONN %p",
- call->debug_id, call->user_call_ID, call->state, call->conn);
-
- switch (READ_ONCE(call->state)) {
- case RXRPC_CALL_CLIENT_SEND_REQUEST:
- case RXRPC_CALL_SERVER_ACK_REQUEST:
- case RXRPC_CALL_SERVER_SEND_REPLY:
- ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len,
- notify_end_tx, &dropped_lock);
- break;
- case RXRPC_CALL_COMPLETE:
- read_lock(&call->state_lock);
+ ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len,
+ notify_end_tx, &dropped_lock);
+ if (ret == -ESHUTDOWN)
ret = call->error;
- read_unlock(&call->state_lock);
- break;
- default:
- /* Request phase complete for this client call */
- trace_rxrpc_rx_eproto(call, 0, tracepoint_string("late_send"));
- ret = -EPROTO;
- break;
- }
if (!dropped_lock)
mutex_unlock(&call->user_mutex);
@@ -747,24 +784,20 @@ EXPORT_SYMBOL(rxrpc_kernel_send_data);
* @call: The call to be aborted
* @abort_code: The abort code to stick into the ABORT packet
* @error: Local error value
- * @why: 3-char string indicating why.
+ * @why: Indication as to why.
*
* Allow a kernel service to abort a call, if it's still in an abortable state
* and return true if the call was aborted, false if it was already complete.
*/
bool rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call,
- u32 abort_code, int error, const char *why)
+ u32 abort_code, int error, enum rxrpc_abort_reason why)
{
bool aborted;
- _enter("{%d},%d,%d,%s", call->debug_id, abort_code, error, why);
+ _enter("{%d},%d,%d,%u", call->debug_id, abort_code, error, why);
mutex_lock(&call->user_mutex);
-
- aborted = rxrpc_abort_call(why, call, 0, abort_code, error);
- if (aborted)
- rxrpc_send_abort_packet(call);
-
+ aborted = rxrpc_propose_abort(call, abort_code, error, why);
mutex_unlock(&call->user_mutex);
return aborted;
}
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 54f1b13b2360..904ab3d457ef 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -26,7 +26,7 @@ static struct tc_action_ops act_gact_ops;
static int gact_net_rand(struct tcf_gact *gact)
{
smp_rmb(); /* coupled with smp_wmb() in tcf_gact_init() */
- if (prandom_u32_max(gact->tcfg_pval))
+ if (get_random_u32_below(gact->tcfg_pval))
return gact->tcf_action;
return gact->tcfg_paction;
}
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 98dea08c1764..f7416b5598e0 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -170,7 +170,7 @@ TC_INDIRECT_SCOPE int tcf_sample_act(struct sk_buff *skb,
psample_group = rcu_dereference_bh(s->psample_group);
/* randomly sample packets according to rate */
- if (psample_group && (prandom_u32_max(s->rate) == 0)) {
+ if (psample_group && (get_random_u32_below(s->rate) == 0)) {
if (!skb_at_tc_ingress(skb)) {
md.in_ifindex = skb->skb_iif;
md.out_ifindex = skb->dev->ifindex;
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 535668e1f748..6ab317b48d6c 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -369,7 +369,7 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
static void __flow_destroy_filter(struct flow_filter *f)
{
- del_timer_sync(&f->perturb_timer);
+ timer_shutdown_sync(&f->perturb_timer);
tcf_exts_destroy(&f->exts);
tcf_em_tree_destroy(&f->ematches);
tcf_exts_put_net(&f->exts);
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index eb0e9458e722..ee2a050c887b 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -333,7 +333,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
struct tcindex_filter_result *r, struct nlattr **tb,
struct nlattr *est, u32 flags, struct netlink_ext_ack *extack)
{
- struct tcindex_filter_result new_filter_result, *old_r = r;
+ struct tcindex_filter_result new_filter_result;
struct tcindex_data *cp = NULL, *oldp;
struct tcindex_filter *f = NULL; /* make gcc behave */
struct tcf_result cr = {};
@@ -402,7 +402,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
err = tcindex_filter_result_init(&new_filter_result, cp, net);
if (err < 0)
goto errout_alloc;
- if (old_r)
+ if (r)
cr = r->res;
err = -EBUSY;
@@ -479,14 +479,6 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
tcf_bind_filter(tp, &cr, base);
}
- if (old_r && old_r != r) {
- err = tcindex_filter_result_init(old_r, cp, net);
- if (err < 0) {
- kfree(f);
- goto errout_alloc;
- }
- }
-
oldp = p;
r->res = cr;
tcf_exts_change(&r->exts, &e);
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index 4ce681361851..5c1235e6076a 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -255,6 +255,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
* the value carried.
*/
if (em_hdr->flags & TCF_EM_SIMPLE) {
+ if (em->ops->datalen > 0)
+ goto errout;
if (data_len < sizeof(u32))
goto errout;
em->data = *(u32 *) data;
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index f52255fea652..4a981ca90b0b 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -393,10 +393,13 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
result = tcf_classify(skb, NULL, fl, &res, true);
if (result < 0)
continue;
+ if (result == TC_ACT_SHOT)
+ goto done;
+
flow = (struct atm_flow_data *)res.class;
if (!flow)
flow = lookup_flow(sch, res.classid);
- goto done;
+ goto drop;
}
}
flow = NULL;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 6568e17c4c63..36db5f6782f2 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -230,6 +230,8 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
result = tcf_classify(skb, NULL, fl, &res, true);
if (!fl || result < 0)
goto fallback;
+ if (result == TC_ACT_SHOT)
+ return NULL;
cl = (void *)res.class;
if (!cl) {
@@ -250,8 +252,6 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
case TC_ACT_TRAP:
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
fallthrough;
- case TC_ACT_SHOT:
- return NULL;
case TC_ACT_RECLASSIFY:
return cbq_reclassify(skb, cl);
}
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 3ac3e5c80b6f..19c851125901 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -183,7 +183,7 @@ static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
int retrys = 3;
do {
- *pidx = (q->head + prandom_u32_max(choke_len(q))) & q->tab_mask;
+ *pidx = (q->head + get_random_u32_below(choke_len(q))) & q->tab_mask;
skb = q->tab[*pidx];
if (skb)
return skb;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index e5b4bbf3ce3d..2238edece1a4 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -199,8 +199,14 @@ static unsigned long htb_search(struct Qdisc *sch, u32 handle)
{
return (unsigned long)htb_find(handle, sch);
}
+
+#define HTB_DIRECT ((struct htb_class *)-1L)
+
/**
* htb_classify - classify a packet into class
+ * @skb: the socket buffer
+ * @sch: the active queue discipline
+ * @qerr: pointer for returned status code
*
* It returns NULL if the packet should be dropped or -1 if the packet
* should be passed directly thru. In all other cases leaf class is returned.
@@ -211,8 +217,6 @@ static unsigned long htb_search(struct Qdisc *sch, u32 handle)
* have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
* then finish and return direct queue.
*/
-#define HTB_DIRECT ((struct htb_class *)-1L)
-
static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
int *qerr)
{
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index fb00ac40ecb7..6ef3021e1169 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -513,8 +513,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
goto finish_segs;
}
- skb->data[prandom_u32_max(skb_headlen(skb))] ^=
- 1<<prandom_u32_max(8);
+ skb->data[get_random_u32_below(skb_headlen(skb))] ^=
+ 1<<get_random_u32_below(8);
}
if (unlikely(sch->q.qlen >= sch->limit)) {
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 5acbdf0d38f3..84021a6c4f9d 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -8323,7 +8323,7 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
inet_get_local_port_range(net, &low, &high);
remaining = (high - low) + 1;
- rover = prandom_u32_max(remaining) + low;
+ rover = get_random_u32_below(remaining) + low;
do {
rover++;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index f8fd98784977..ca1eba95c293 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -199,7 +199,7 @@ void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
if ((time_before(transport->hb_timer.expires, expires) ||
!timer_pending(&transport->hb_timer)) &&
!mod_timer(&transport->hb_timer,
- expires + prandom_u32_max(transport->rto)))
+ expires + get_random_u32_below(transport->rto)))
sctp_transport_hold(transport);
}
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 1472f31480d8..dfb9797f7bc6 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -673,7 +673,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
*/
krflags = MSG_PEEK | MSG_WAITALL;
clc_sk->sk_rcvtimeo = timeout;
- iov_iter_kvec(&msg.msg_iter, READ, &vec, 1,
+ iov_iter_kvec(&msg.msg_iter, ITER_DEST, &vec, 1,
sizeof(struct smc_clc_msg_hdr));
len = sock_recvmsg(smc->clcsock, &msg, krflags);
if (signal_pending(current)) {
@@ -720,7 +720,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
} else {
recvlen = datlen;
}
- iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, recvlen);
+ iov_iter_kvec(&msg.msg_iter, ITER_DEST, &vec, 1, recvlen);
krflags = MSG_WAITALL;
len = sock_recvmsg(smc->clcsock, &msg, krflags);
if (len < recvlen || !smc_clc_msg_hdr_valid(clcm, check_trl)) {
@@ -737,7 +737,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
/* receive remaining proposal message */
recvlen = datlen > SMC_CLC_RECV_BUF_LEN ?
SMC_CLC_RECV_BUF_LEN : datlen;
- iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, recvlen);
+ iov_iter_kvec(&msg.msg_iter, ITER_DEST, &vec, 1, recvlen);
len = sock_recvmsg(smc->clcsock, &msg, krflags);
datlen -= len;
}
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index 64dedffe9d26..f4b6a71ac488 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -308,7 +308,7 @@ int smc_tx_sendpage(struct smc_sock *smc, struct page *page, int offset,
iov.iov_base = kaddr + offset;
iov.iov_len = size;
- iov_iter_kvec(&msg.msg_iter, WRITE, &iov, 1, size);
+ iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, size);
rc = smc_tx_sendmsg(smc, &msg, size);
kunmap(page);
return rc;
diff --git a/net/socket.c b/net/socket.c
index 55c5d536e5f6..888cd618a968 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -750,7 +750,7 @@ EXPORT_SYMBOL(sock_sendmsg);
int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
struct kvec *vec, size_t num, size_t size)
{
- iov_iter_kvec(&msg->msg_iter, WRITE, vec, num, size);
+ iov_iter_kvec(&msg->msg_iter, ITER_SOURCE, vec, num, size);
return sock_sendmsg(sock, msg);
}
EXPORT_SYMBOL(kernel_sendmsg);
@@ -776,7 +776,7 @@ int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg,
if (!sock->ops->sendmsg_locked)
return sock_no_sendmsg_locked(sk, msg, size);
- iov_iter_kvec(&msg->msg_iter, WRITE, vec, num, size);
+ iov_iter_kvec(&msg->msg_iter, ITER_SOURCE, vec, num, size);
return sock->ops->sendmsg_locked(sk, msg, msg_data_left(msg));
}
@@ -1034,7 +1034,7 @@ int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
struct kvec *vec, size_t num, size_t size, int flags)
{
msg->msg_control_is_user = false;
- iov_iter_kvec(&msg->msg_iter, READ, vec, num, size);
+ iov_iter_kvec(&msg->msg_iter, ITER_DEST, vec, num, size);
return sock_recvmsg(sock, msg, flags);
}
EXPORT_SYMBOL(kernel_recvmsg);
@@ -2092,7 +2092,7 @@ int __sys_sendto(int fd, void __user *buff, size_t len, unsigned int flags,
struct iovec iov;
int fput_needed;
- err = import_single_range(WRITE, buff, len, &iov, &msg.msg_iter);
+ err = import_single_range(ITER_SOURCE, buff, len, &iov, &msg.msg_iter);
if (unlikely(err))
return err;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
@@ -2157,7 +2157,7 @@ int __sys_recvfrom(int fd, void __user *ubuf, size_t size, unsigned int flags,
int err, err2;
int fput_needed;
- err = import_single_range(READ, ubuf, size, &iov, &msg.msg_iter);
+ err = import_single_range(ITER_DEST, ubuf, size, &iov, &msg.msg_iter);
if (unlikely(err))
return err;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
@@ -2411,7 +2411,7 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
if (err)
return err;
- err = import_iovec(save_addr ? READ : WRITE,
+ err = import_iovec(save_addr ? ITER_DEST : ITER_SOURCE,
msg.msg_iov, msg.msg_iovlen,
UIO_FASTIOV, iov, &kmsg->msg_iter);
return err < 0 ? err : 0;
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index bcd74dddbe2d..148bb0a7fa5b 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -49,11 +49,36 @@
#include <linux/sunrpc/svcauth.h>
#include <linux/sunrpc/svcauth_gss.h>
#include <linux/sunrpc/cache.h>
+#include <linux/sunrpc/gss_krb5.h>
#include <trace/events/rpcgss.h>
#include "gss_rpc_upcall.h"
+/*
+ * Unfortunately there isn't a maximum checksum size exported via the
+ * GSS API. Manufacture one based on GSS mechanisms supported by this
+ * implementation.
+ */
+#define GSS_MAX_CKSUMSIZE (GSS_KRB5_TOK_HDR_LEN + GSS_KRB5_MAX_CKSUM_LEN)
+
+/*
+ * This value may be increased in the future to accommodate other
+ * usage of the scratch buffer.
+ */
+#define GSS_SCRATCH_SIZE GSS_MAX_CKSUMSIZE
+
+struct gss_svc_data {
+ /* decoded gss client cred: */
+ struct rpc_gss_wire_cred clcred;
+ /* save a pointer to the beginning of the encoded verifier,
+ * for use in encryption/checksumming in svcauth_gss_release: */
+ __be32 *verf_start;
+ struct rsc *rsci;
+
+ /* for temporary results */
+ u8 gsd_scratch[GSS_SCRATCH_SIZE];
+};
/* The rpcsec_init cache is used for mapping RPCSEC_GSS_{,CONT_}INIT requests
* into replies.
@@ -887,13 +912,11 @@ read_u32_from_xdr_buf(struct xdr_buf *buf, int base, u32 *obj)
static int
unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
{
+ struct gss_svc_data *gsd = rqstp->rq_auth_data;
u32 integ_len, rseqno, maj_stat;
- int stat = -EINVAL;
struct xdr_netobj mic;
struct xdr_buf integ_buf;
- mic.data = NULL;
-
/* NFS READ normally uses splice to send data in-place. However
* the data in cache can change after the reply's MIC is computed
* but before the RPC reply is sent. To prevent the client from
@@ -917,11 +940,9 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g
/* copy out mic... */
if (read_u32_from_xdr_buf(buf, integ_len, &mic.len))
goto unwrap_failed;
- if (mic.len > RPC_MAX_AUTH_SIZE)
- goto unwrap_failed;
- mic.data = kmalloc(mic.len, GFP_KERNEL);
- if (!mic.data)
+ if (mic.len > sizeof(gsd->gsd_scratch))
goto unwrap_failed;
+ mic.data = gsd->gsd_scratch;
if (read_bytes_from_xdr_buf(buf, integ_len + 4, mic.data, mic.len))
goto unwrap_failed;
maj_stat = gss_verify_mic(ctx, &integ_buf, &mic);
@@ -932,20 +953,17 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g
goto bad_seqno;
/* trim off the mic and padding at the end before returning */
xdr_buf_trim(buf, round_up_to_quad(mic.len) + 4);
- stat = 0;
-out:
- kfree(mic.data);
- return stat;
+ return 0;
unwrap_failed:
trace_rpcgss_svc_unwrap_failed(rqstp);
- goto out;
+ return -EINVAL;
bad_seqno:
trace_rpcgss_svc_seqno_bad(rqstp, seq, rseqno);
- goto out;
+ return -EINVAL;
bad_mic:
trace_rpcgss_svc_mic(rqstp, maj_stat);
- goto out;
+ return -EINVAL;
}
static inline int
@@ -1023,15 +1041,6 @@ bad_unwrap:
return -EINVAL;
}
-struct gss_svc_data {
- /* decoded gss client cred: */
- struct rpc_gss_wire_cred clcred;
- /* save a pointer to the beginning of the encoded verifier,
- * for use in encryption/checksumming in svcauth_gss_release: */
- __be32 *verf_start;
- struct rsc *rsci;
-};
-
static int
svcauth_gss_set_client(struct svc_rqst *rqstp)
{
@@ -1162,18 +1171,23 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp,
return res;
inlen = svc_getnl(argv);
- if (inlen > (argv->iov_len + rqstp->rq_arg.page_len))
+ if (inlen > (argv->iov_len + rqstp->rq_arg.page_len)) {
+ kfree(in_handle->data);
return SVC_DENIED;
+ }
pages = DIV_ROUND_UP(inlen, PAGE_SIZE);
in_token->pages = kcalloc(pages, sizeof(struct page *), GFP_KERNEL);
- if (!in_token->pages)
+ if (!in_token->pages) {
+ kfree(in_handle->data);
return SVC_DENIED;
+ }
in_token->page_base = 0;
in_token->page_len = inlen;
for (i = 0; i < pages; i++) {
in_token->pages[i] = alloc_page(GFP_KERNEL);
if (!in_token->pages[i]) {
+ kfree(in_handle->data);
gss_free_in_token_pages(in_token);
return SVC_DENIED;
}
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index f075a9fb5ccc..95ff74706104 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -677,7 +677,7 @@ static void cache_limit_defers(void)
/* Consider removing either the first or the last */
if (cache_defer_cnt > DFR_MAX) {
- if (prandom_u32_max(2))
+ if (get_random_u32_below(2))
discard = list_entry(cache_defer_list.next,
struct cache_deferred_req, recent);
else
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 993acf38af87..0b0b9f1eed46 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1442,7 +1442,7 @@ static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen,
break;
default:
err = -EAFNOSUPPORT;
- goto out;
+ goto out_release;
}
if (err < 0) {
dprintk("RPC: can't bind UDP socket (%d)\n", err);
diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
index 71ba4cf513bc..1b2b84feeec6 100644
--- a/net/sunrpc/socklib.c
+++ b/net/sunrpc/socklib.c
@@ -214,14 +214,14 @@ static inline int xprt_sendmsg(struct socket *sock, struct msghdr *msg,
static int xprt_send_kvec(struct socket *sock, struct msghdr *msg,
struct kvec *vec, size_t seek)
{
- iov_iter_kvec(&msg->msg_iter, WRITE, vec, 1, vec->iov_len);
+ iov_iter_kvec(&msg->msg_iter, ITER_SOURCE, vec, 1, vec->iov_len);
return xprt_sendmsg(sock, msg, seek);
}
static int xprt_send_pagedata(struct socket *sock, struct msghdr *msg,
struct xdr_buf *xdr, size_t base)
{
- iov_iter_bvec(&msg->msg_iter, WRITE, xdr->bvec, xdr_buf_pagecount(xdr),
+ iov_iter_bvec(&msg->msg_iter, ITER_SOURCE, xdr->bvec, xdr_buf_pagecount(xdr),
xdr->page_len + xdr->page_base);
return xprt_sendmsg(sock, msg, base + xdr->page_base);
}
@@ -244,7 +244,7 @@ static int xprt_send_rm_and_kvec(struct socket *sock, struct msghdr *msg,
};
size_t len = iov[0].iov_len + iov[1].iov_len;
- iov_iter_kvec(&msg->msg_iter, WRITE, iov, 2, len);
+ iov_iter_kvec(&msg->msg_iter, ITER_SOURCE, iov, 2, len);
return xprt_sendmsg(sock, msg, base);
}
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 149171774bc6..85f0c3cfc877 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -567,7 +567,7 @@ svc_destroy(struct kref *ref)
struct svc_serv *serv = container_of(ref, struct svc_serv, sv_refcnt);
dprintk("svc: svc_destroy(%s)\n", serv->sv_program->pg_name);
- del_timer_sync(&serv->sv_temptimer);
+ timer_shutdown_sync(&serv->sv_temptimer);
/*
* The last user is gone and thus all sockets have to be destroyed to
@@ -638,7 +638,6 @@ svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node)
return rqstp;
__set_bit(RQ_BUSY, &rqstp->rq_flags);
- spin_lock_init(&rqstp->rq_lock);
rqstp->rq_server = serv;
rqstp->rq_pool = pool;
@@ -1281,8 +1280,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
/* Also give the program a chance to reject this call: */
if (auth_res == SVC_OK && progp)
auth_res = progp->pg_authenticate(rqstp);
- if (auth_res != SVC_OK)
- trace_svc_authenticate(rqstp, auth_res);
+ trace_svc_authenticate(rqstp, auth_res);
switch (auth_res) {
case SVC_OK:
break;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 2fc98fea59b4..015714398007 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -260,7 +260,7 @@ static ssize_t svc_tcp_read_msg(struct svc_rqst *rqstp, size_t buflen,
rqstp->rq_respages = &rqstp->rq_pages[i];
rqstp->rq_next_page = rqstp->rq_respages + 1;
- iov_iter_bvec(&msg.msg_iter, READ, bvec, i, buflen);
+ iov_iter_bvec(&msg.msg_iter, ITER_DEST, bvec, i, buflen);
if (seek) {
iov_iter_advance(&msg.msg_iter, seek);
buflen -= seek;
@@ -874,7 +874,7 @@ static ssize_t svc_tcp_read_marker(struct svc_sock *svsk,
want = sizeof(rpc_fraghdr) - svsk->sk_tcplen;
iov.iov_base = ((char *)&svsk->sk_marker) + svsk->sk_tcplen;
iov.iov_len = want;
- iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, want);
+ iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, want);
len = sock_recvmsg(svsk->sk_sock, &msg, MSG_DONTWAIT);
if (len < 0)
return len;
diff --git a/net/sunrpc/sysfs.c b/net/sunrpc/sysfs.c
index c1f559892ae8..1e05a2d723f4 100644
--- a/net/sunrpc/sysfs.c
+++ b/net/sunrpc/sysfs.c
@@ -31,7 +31,7 @@ static void rpc_sysfs_object_release(struct kobject *kobj)
}
static const struct kobj_ns_type_operations *
-rpc_sysfs_object_child_ns_type(struct kobject *kobj)
+rpc_sysfs_object_child_ns_type(const struct kobject *kobj)
{
return &net_ns_type_operations;
}
@@ -381,17 +381,17 @@ static void rpc_sysfs_xprt_release(struct kobject *kobj)
kfree(xprt);
}
-static const void *rpc_sysfs_client_namespace(struct kobject *kobj)
+static const void *rpc_sysfs_client_namespace(const struct kobject *kobj)
{
return container_of(kobj, struct rpc_sysfs_client, kobject)->net;
}
-static const void *rpc_sysfs_xprt_switch_namespace(struct kobject *kobj)
+static const void *rpc_sysfs_xprt_switch_namespace(const struct kobject *kobj)
{
return container_of(kobj, struct rpc_sysfs_xprt_switch, kobject)->net;
}
-static const void *rpc_sysfs_xprt_namespace(struct kobject *kobj)
+static const void *rpc_sysfs_xprt_namespace(const struct kobject *kobj)
{
return container_of(kobj, struct rpc_sysfs_xprt,
kobject)->xprt->xprt_net;
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 336a7c7833e4..f7767bf22406 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -1224,30 +1224,34 @@ EXPORT_SYMBOL(xdr_restrict_buflen);
/**
* xdr_write_pages - Insert a list of pages into an XDR buffer for sending
* @xdr: pointer to xdr_stream
- * @pages: list of pages
- * @base: offset of first byte
- * @len: length of data in bytes
+ * @pages: array of pages to insert
+ * @base: starting offset of first data byte in @pages
+ * @len: number of data bytes in @pages to insert
*
+ * After the @pages are added, the tail iovec is instantiated pointing to
+ * end of the head buffer, and the stream is set up to encode subsequent
+ * items into the tail.
*/
void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
unsigned int len)
{
struct xdr_buf *buf = xdr->buf;
- struct kvec *iov = buf->tail;
+ struct kvec *tail = buf->tail;
+
buf->pages = pages;
buf->page_base = base;
buf->page_len = len;
- iov->iov_base = (char *)xdr->p;
- iov->iov_len = 0;
- xdr->iov = iov;
+ tail->iov_base = xdr->p;
+ tail->iov_len = 0;
+ xdr->iov = tail;
if (len & 3) {
unsigned int pad = 4 - (len & 3);
BUG_ON(xdr->p >= xdr->end);
- iov->iov_base = (char *)xdr->p + (len & 3);
- iov->iov_len += pad;
+ tail->iov_base = (char *)xdr->p + (len & 3);
+ tail->iov_len += pad;
len += pad;
*xdr->p++ = 0;
}
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 656cec208371..ab453ede54f0 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1164,7 +1164,7 @@ xprt_request_enqueue_receive(struct rpc_task *task)
spin_unlock(&xprt->queue_lock);
/* Turn off autodisconnect */
- del_singleshot_timer_sync(&xprt->timer);
+ del_timer_sync(&xprt->timer);
return 0;
}
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 44b87e4274b4..b098fde373ab 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -831,7 +831,7 @@ struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt,
return req;
out3:
- kfree(req->rl_sendbuf);
+ rpcrdma_regbuf_free(req->rl_sendbuf);
out2:
kfree(req);
out1:
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 915b9902f673..aaa5b2741b79 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -364,7 +364,7 @@ static ssize_t
xs_read_kvec(struct socket *sock, struct msghdr *msg, int flags,
struct kvec *kvec, size_t count, size_t seek)
{
- iov_iter_kvec(&msg->msg_iter, READ, kvec, 1, count);
+ iov_iter_kvec(&msg->msg_iter, ITER_DEST, kvec, 1, count);
return xs_sock_recvmsg(sock, msg, flags, seek);
}
@@ -373,7 +373,7 @@ xs_read_bvec(struct socket *sock, struct msghdr *msg, int flags,
struct bio_vec *bvec, unsigned long nr, size_t count,
size_t seek)
{
- iov_iter_bvec(&msg->msg_iter, READ, bvec, nr, count);
+ iov_iter_bvec(&msg->msg_iter, ITER_DEST, bvec, nr, count);
return xs_sock_recvmsg(sock, msg, flags, seek);
}
@@ -381,7 +381,7 @@ static ssize_t
xs_read_discard(struct socket *sock, struct msghdr *msg, int flags,
size_t count)
{
- iov_iter_discard(&msg->msg_iter, READ, count);
+ iov_iter_discard(&msg->msg_iter, ITER_DEST, count);
return sock_recvmsg(sock, msg, flags);
}
@@ -1619,7 +1619,7 @@ static int xs_get_random_port(void)
if (max < min)
return -EADDRINUSE;
range = max - min + 1;
- rand = prandom_u32_max(range);
+ rand = get_random_u32_below(range);
return rand + min;
}
@@ -1882,6 +1882,7 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt,
sk->sk_write_space = xs_udp_write_space;
sk->sk_state_change = xs_local_state_change;
sk->sk_error_report = xs_error_report;
+ sk->sk_use_task_frag = false;
xprt_clear_connected(xprt);
@@ -2082,6 +2083,7 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
sk->sk_user_data = xprt;
sk->sk_data_ready = xs_data_ready;
sk->sk_write_space = xs_udp_write_space;
+ sk->sk_use_task_frag = false;
xprt_set_connected(xprt);
@@ -2249,6 +2251,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
sk->sk_state_change = xs_tcp_state_change;
sk->sk_write_space = xs_tcp_write_space;
sk->sk_error_report = xs_error_report;
+ sk->sk_use_task_frag = false;
/* socket options */
sock_reset_flag(sk, SOCK_LINGER);
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index e8dcdf267c0c..685389d4b245 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -388,7 +388,7 @@ int tipc_disc_create(struct net *net, struct tipc_bearer *b,
*/
void tipc_disc_delete(struct tipc_discoverer *d)
{
- del_timer_sync(&d->timer);
+ timer_shutdown_sync(&d->timer);
kfree_skb(d->skb);
kfree(d);
}
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
index 9618e4429f0f..77a3d016cade 100644
--- a/net/tipc/monitor.c
+++ b/net/tipc/monitor.c
@@ -700,7 +700,7 @@ void tipc_mon_delete(struct net *net, int bearer_id)
}
mon->self = NULL;
write_unlock_bh(&mon->lock);
- del_timer_sync(&mon->timer);
+ timer_shutdown_sync(&mon->timer);
kfree(self->domain);
kfree(self);
kfree(mon);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 49ddc484c4fe..5e000fde8067 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1179,8 +1179,9 @@ void tipc_node_check_dest(struct net *net, u32 addr,
bool addr_match = false;
bool sign_match = false;
bool link_up = false;
+ bool link_is_reset = false;
bool accept_addr = false;
- bool reset = true;
+ bool reset = false;
char *if_name;
unsigned long intv;
u16 session;
@@ -1200,14 +1201,14 @@ void tipc_node_check_dest(struct net *net, u32 addr,
/* Prepare to validate requesting node's signature and media address */
l = le->link;
link_up = l && tipc_link_is_up(l);
+ link_is_reset = l && tipc_link_is_reset(l);
addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
sign_match = (signature == n->signature);
/* These three flags give us eight permutations: */
if (sign_match && addr_match && link_up) {
- /* All is fine. Do nothing. */
- reset = false;
+ /* All is fine. Ignore requests. */
/* Peer node is not a container/local namespace */
if (!n->peer_hash_mix)
n->peer_hash_mix = hash_mixes;
@@ -1232,6 +1233,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
*/
accept_addr = true;
*respond = true;
+ reset = true;
} else if (!sign_match && addr_match && link_up) {
/* Peer node rebooted. Two possibilities:
* - Delayed re-discovery; this link endpoint has already
@@ -1263,6 +1265,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
n->signature = signature;
accept_addr = true;
*respond = true;
+ reset = true;
}
if (!accept_addr)
@@ -1291,6 +1294,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
tipc_link_fsm_evt(l, LINK_RESET_EVT);
if (n->state == NODE_FAILINGOVER)
tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
+ link_is_reset = tipc_link_is_reset(l);
le->link = l;
n->link_cnt++;
tipc_node_calculate_timer(n, l);
@@ -1303,7 +1307,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
memcpy(&le->maddr, maddr, sizeof(*maddr));
exit:
tipc_node_write_unlock(n);
- if (reset && l && !tipc_link_is_reset(l))
+ if (reset && !link_is_reset)
tipc_node_link_down(n, b->identity, false);
tipc_node_put(n);
}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index e902b01ea3cb..b35c8701876a 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -3010,7 +3010,7 @@ static int tipc_sk_insert(struct tipc_sock *tsk)
struct net *net = sock_net(sk);
struct tipc_net *tn = net_generic(net, tipc_net_id);
u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
- u32 portid = prandom_u32_max(remaining) + TIPC_MIN_PORT;
+ u32 portid = get_random_u32_below(remaining) + TIPC_MIN_PORT;
while (remaining--) {
portid++;
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
index e3b427a70398..69c88cc03887 100644
--- a/net/tipc/topsrv.c
+++ b/net/tipc/topsrv.c
@@ -396,7 +396,7 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con)
iov.iov_base = &s;
iov.iov_len = sizeof(s);
msg.msg_name = NULL;
- iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, iov.iov_len);
+ iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, iov.iov_len);
ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT);
if (ret == -EWOULDBLOCK)
return -EWOULDBLOCK;
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index a03d66046ca3..6c593788dc25 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -620,7 +620,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
kaddr = kmap(page);
iov.iov_base = kaddr + offset;
iov.iov_len = size;
- iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size);
+ iov_iter_kvec(&msg_iter, ITER_SOURCE, &iov, 1, size);
iter_offset.msg_iter = &msg_iter;
rc = tls_push_data(sk, iter_offset, size, flags, TLS_RECORD_TYPE_DATA,
NULL);
@@ -697,7 +697,7 @@ static int tls_device_push_pending_record(struct sock *sk, int flags)
union tls_iter_offset iter;
struct iov_iter msg_iter;
- iov_iter_kvec(&msg_iter, WRITE, NULL, 0, 0);
+ iov_iter_kvec(&msg_iter, ITER_SOURCE, NULL, 0, 0);
iter.msg_iter = &msg_iter;
return tls_push_data(sk, iter, 0, flags, TLS_RECORD_TYPE_DATA, NULL);
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index b3545fc68097..f0c2293f1d3b 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1999,13 +1999,20 @@ restart_locked:
unix_state_lock(sk);
err = 0;
- if (unix_peer(sk) == other) {
+ if (sk->sk_type == SOCK_SEQPACKET) {
+ /* We are here only when racing with unix_release_sock()
+ * is clearing @other. Never change state to TCP_CLOSE
+ * unlike SOCK_DGRAM wants.
+ */
+ unix_state_unlock(sk);
+ err = -EPIPE;
+ } else if (unix_peer(sk) == other) {
unix_peer(sk) = NULL;
unix_dgram_peer_wake_disconnect_wakeup(sk, other);
+ sk->sk_state = TCP_CLOSE;
unix_state_unlock(sk);
- sk->sk_state = TCP_CLOSE;
unix_dgram_disconnected(sk, other);
sock_put(other);
err = -ECONNREFUSED;
@@ -3738,6 +3745,7 @@ static int __init af_unix_init(void)
rc = proto_register(&unix_stream_proto, 1);
if (rc != 0) {
pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
+ proto_unregister(&unix_dgram_proto);
goto out;
}
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 884eca7f6743..d593d5b6d4b1 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -626,8 +626,7 @@ static int __vsock_bind_connectible(struct vsock_sock *vsk,
struct sockaddr_vm new_addr;
if (!port)
- port = LAST_RESERVED_PORT + 1 +
- prandom_u32_max(U32_MAX - LAST_RESERVED_PORT);
+ port = get_random_u32_above(LAST_RESERVED_PORT);
vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port);
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 842c94286d31..36eb16a40745 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -1711,7 +1711,11 @@ static int vmci_transport_dgram_enqueue(
if (!dg)
return -ENOMEM;
- memcpy_from_msg(VMCI_DG_PAYLOAD(dg), msg, len);
+ err = memcpy_from_msg(VMCI_DG_PAYLOAD(dg), msg, len);
+ if (err) {
+ kfree(dg);
+ return err;
+ }
dg->dst = vmci_make_handle(remote_addr->svm_cid,
remote_addr->svm_port);
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 0c3f05c9be27..cdb638647e0b 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -148,7 +148,7 @@ static SIMPLE_DEV_PM_OPS(wiphy_pm_ops, wiphy_suspend, wiphy_resume);
#define WIPHY_PM_OPS NULL
#endif
-static const void *wiphy_namespace(struct device *d)
+static const void *wiphy_namespace(const struct device *d)
{
struct wiphy *wiphy = container_of(d, struct wiphy, dev);
diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c
index 29a540dcb5a7..74a54295c164 100644
--- a/net/xfrm/espintcp.c
+++ b/net/xfrm/espintcp.c
@@ -354,7 +354,7 @@ static int espintcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
*((__be16 *)buf) = cpu_to_be16(msglen);
pfx_iov.iov_base = buf;
pfx_iov.iov_len = sizeof(buf);
- iov_iter_kvec(&pfx_iter, WRITE, &pfx_iov, 1, pfx_iov.iov_len);
+ iov_iter_kvec(&pfx_iter, ITER_SOURCE, &pfx_iov, 1, pfx_iov.iov_len);
err = sk_msg_memcopy_from_iter(sk, &pfx_iter, &emsg->skmsg,
pfx_iov.iov_len);
@@ -489,6 +489,7 @@ static int espintcp_init_sk(struct sock *sk)
/* avoid using task_frag */
sk->sk_allocation = GFP_ATOMIC;
+ sk->sk_use_task_frag = false;
return 0;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index cc1d0ea42672..89c731f4f0c7 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -2235,7 +2235,7 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high,
} else {
u32 spi = 0;
for (h = 0; h < high-low+1; h++) {
- spi = low + prandom_u32_max(high - low + 1);
+ spi = get_random_u32_inclusive(low, high);
x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
if (x0 == NULL) {
newspi = htonl(spi);