summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/garp.c2
-rw-r--r--net/802/mrp.c2
-rw-r--r--net/batman-adv/bat_iv_ogm.c4
-rw-r--r--net/batman-adv/bat_v_elp.c2
-rw-r--r--net/batman-adv/bat_v_ogm.c4
-rw-r--r--net/batman-adv/network-coding.c2
-rw-r--r--net/bluetooth/mgmt.c4
-rw-r--r--net/can/j1939/socket.c2
-rw-r--r--net/can/j1939/transport.c2
-rw-r--r--net/ceph/mon_client.c2
-rw-r--r--net/ceph/osd_client.c2
-rw-r--r--net/core/neighbour.c4
-rw-r--r--net/core/pktgen.c28
-rw-r--r--net/core/stream.c2
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/igmp.c6
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/inet_hashtables.c2
-rw-r--r--net/ipv4/route.c4
-rw-r--r--net/ipv4/tcp_bbr.c2
-rw-r--r--net/ipv4/tcp_input.c3
-rw-r--r--net/ipv6/addrconf.c8
-rw-r--r--net/ipv6/mcast.c10
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_twos.c4
-rw-r--r--net/netfilter/nf_conntrack_core.c4
-rw-r--r--net/netfilter/nf_nat_helper.c2
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/sched/act_gact.c2
-rw-r--r--net/sched/act_sample.c2
-rw-r--r--net/sched/sch_choke.c2
-rw-r--r--net/sched/sch_netem.c4
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/sctp/transport.c2
-rw-r--r--net/sunrpc/cache.c2
-rw-r--r--net/sunrpc/xprtsock.c2
-rw-r--r--net/tipc/socket.c2
-rw-r--r--net/vmw_vsock/af_vsock.c2
-rw-r--r--net/xfrm/xfrm_state.c2
40 files changed, 72 insertions, 71 deletions
diff --git a/net/802/garp.c b/net/802/garp.c
index fc9eb02a912f..77aac2763835 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -407,7 +407,7 @@ static void garp_join_timer_arm(struct garp_applicant *app)
{
unsigned long delay;
- delay = prandom_u32_max(msecs_to_jiffies(garp_join_time));
+ delay = get_random_u32_below(msecs_to_jiffies(garp_join_time));
mod_timer(&app->join_timer, jiffies + delay);
}
diff --git a/net/802/mrp.c b/net/802/mrp.c
index 155f74d8b14f..8c6f0381023b 100644
--- a/net/802/mrp.c
+++ b/net/802/mrp.c
@@ -592,7 +592,7 @@ static void mrp_join_timer_arm(struct mrp_applicant *app)
{
unsigned long delay;
- delay = prandom_u32_max(msecs_to_jiffies(mrp_join_time));
+ delay = get_random_u32_below(msecs_to_jiffies(mrp_join_time));
mod_timer(&app->join_timer, jiffies + delay);
}
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 7f6a7c96ac92..114ee5da261f 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -280,7 +280,7 @@ batadv_iv_ogm_emit_send_time(const struct batadv_priv *bat_priv)
unsigned int msecs;
msecs = atomic_read(&bat_priv->orig_interval) - BATADV_JITTER;
- msecs += prandom_u32_max(2 * BATADV_JITTER);
+ msecs += get_random_u32_below(2 * BATADV_JITTER);
return jiffies + msecs_to_jiffies(msecs);
}
@@ -288,7 +288,7 @@ batadv_iv_ogm_emit_send_time(const struct batadv_priv *bat_priv)
/* when do we schedule a ogm packet to be sent */
static unsigned long batadv_iv_ogm_fwd_send_time(void)
{
- return jiffies + msecs_to_jiffies(prandom_u32_max(BATADV_JITTER / 2));
+ return jiffies + msecs_to_jiffies(get_random_u32_below(BATADV_JITTER / 2));
}
/* apply hop penalty for a normal link */
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index f1741fbfb617..f9a58fb5442e 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -51,7 +51,7 @@ static void batadv_v_elp_start_timer(struct batadv_hard_iface *hard_iface)
unsigned int msecs;
msecs = atomic_read(&hard_iface->bat_v.elp_interval) - BATADV_JITTER;
- msecs += prandom_u32_max(2 * BATADV_JITTER);
+ msecs += get_random_u32_below(2 * BATADV_JITTER);
queue_delayed_work(batadv_event_workqueue, &hard_iface->bat_v.elp_wq,
msecs_to_jiffies(msecs));
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index 033639df96d8..addfd8c4fe95 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -90,7 +90,7 @@ static void batadv_v_ogm_start_queue_timer(struct batadv_hard_iface *hard_iface)
unsigned int msecs = BATADV_MAX_AGGREGATION_MS * 1000;
/* msecs * [0.9, 1.1] */
- msecs += prandom_u32_max(msecs / 5) - (msecs / 10);
+ msecs += get_random_u32_below(msecs / 5) - (msecs / 10);
queue_delayed_work(batadv_event_workqueue, &hard_iface->bat_v.aggr_wq,
msecs_to_jiffies(msecs / 1000));
}
@@ -109,7 +109,7 @@ static void batadv_v_ogm_start_timer(struct batadv_priv *bat_priv)
return;
msecs = atomic_read(&bat_priv->orig_interval) - BATADV_JITTER;
- msecs += prandom_u32_max(2 * BATADV_JITTER);
+ msecs += get_random_u32_below(2 * BATADV_JITTER);
queue_delayed_work(batadv_event_workqueue, &bat_priv->bat_v.ogm_wq,
msecs_to_jiffies(msecs));
}
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index 5f4aeeb60dc4..bf29fba4dde5 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -1009,7 +1009,7 @@ static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
static u8 batadv_nc_random_weight_tq(u8 tq)
{
/* randomize the estimated packet loss (max TQ - estimated TQ) */
- u8 rand_tq = prandom_u32_max(BATADV_TQ_MAX_VALUE + 1 - tq);
+ u8 rand_tq = get_random_u32_below(BATADV_TQ_MAX_VALUE + 1 - tq);
/* convert to (randomized) estimated tq again */
return BATADV_TQ_MAX_VALUE - rand_tq;
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index a92e7e485feb..b2f9679066c4 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -7374,8 +7374,8 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
* calculate conn info age as random value between min/max set in hdev.
*/
conn_info_age = hdev->conn_info_min_age +
- prandom_u32_max(hdev->conn_info_max_age -
- hdev->conn_info_min_age);
+ get_random_u32_below(hdev->conn_info_max_age -
+ hdev->conn_info_min_age);
/* Query controller to refresh cached values if they are too old or were
* never read.
diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
index b670ba03a675..7e90f9e61d9b 100644
--- a/net/can/j1939/socket.c
+++ b/net/can/j1939/socket.c
@@ -189,7 +189,7 @@ activate_next:
int time_ms = 0;
if (err)
- time_ms = 10 + prandom_u32_max(16);
+ time_ms = 10 + get_random_u32_below(16);
j1939_tp_schedule_txtimer(first, time_ms);
}
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
index 55f29c9f9e08..67d36776aff4 100644
--- a/net/can/j1939/transport.c
+++ b/net/can/j1939/transport.c
@@ -1168,7 +1168,7 @@ static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer)
if (session->tx_retry < J1939_XTP_TX_RETRY_LIMIT) {
session->tx_retry++;
j1939_tp_schedule_txtimer(session,
- 10 + prandom_u32_max(16));
+ 10 + get_random_u32_below(16));
} else {
netdev_alert(priv->ndev, "%s: 0x%p: tx retry count reached\n",
__func__, session);
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index db60217f911b..faabad6603db 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -222,7 +222,7 @@ static void pick_new_mon(struct ceph_mon_client *monc)
max--;
}
- n = prandom_u32_max(max);
+ n = get_random_u32_below(max);
if (o >= 0 && n >= o)
n++;
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 4e4f1e4bc265..11c04e7d928e 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1479,7 +1479,7 @@ static bool target_should_be_paused(struct ceph_osd_client *osdc,
static int pick_random_replica(const struct ceph_osds *acting)
{
- int i = prandom_u32_max(acting->size);
+ int i = get_random_u32_below(acting->size);
dout("%s picked osd%d, primary osd%d\n", __func__,
acting->osds[i], acting->primary);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index a77a85e357e0..ba92762de525 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -111,7 +111,7 @@ static void neigh_cleanup_and_release(struct neighbour *neigh)
unsigned long neigh_rand_reach_time(unsigned long base)
{
- return base ? prandom_u32_max(base) + (base >> 1) : 0;
+ return base ? get_random_u32_below(base) + (base >> 1) : 0;
}
EXPORT_SYMBOL(neigh_rand_reach_time);
@@ -1652,7 +1652,7 @@ void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
struct sk_buff *skb)
{
unsigned long sched_next = jiffies +
- prandom_u32_max(NEIGH_VAR(p, PROXY_DELAY));
+ get_random_u32_below(NEIGH_VAR(p, PROXY_DELAY));
if (p->qlen > NEIGH_VAR(p, PROXY_QLEN)) {
kfree_skb(skb);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index c3763056c554..95da2ddc1c20 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2324,7 +2324,7 @@ static inline int f_pick(struct pktgen_dev *pkt_dev)
pkt_dev->curfl = 0; /*reset */
}
} else {
- flow = prandom_u32_max(pkt_dev->cflows);
+ flow = get_random_u32_below(pkt_dev->cflows);
pkt_dev->curfl = flow;
if (pkt_dev->flows[flow].count > pkt_dev->lflow) {
@@ -2380,8 +2380,8 @@ static void set_cur_queue_map(struct pktgen_dev *pkt_dev)
else if (pkt_dev->queue_map_min <= pkt_dev->queue_map_max) {
__u16 t;
if (pkt_dev->flags & F_QUEUE_MAP_RND) {
- t = prandom_u32_max(pkt_dev->queue_map_max -
- pkt_dev->queue_map_min + 1) +
+ t = get_random_u32_below(pkt_dev->queue_map_max -
+ pkt_dev->queue_map_min + 1) +
pkt_dev->queue_map_min;
} else {
t = pkt_dev->cur_queue_map + 1;
@@ -2411,7 +2411,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
__u32 tmp;
if (pkt_dev->flags & F_MACSRC_RND)
- mc = prandom_u32_max(pkt_dev->src_mac_count);
+ mc = get_random_u32_below(pkt_dev->src_mac_count);
else {
mc = pkt_dev->cur_src_mac_offset++;
if (pkt_dev->cur_src_mac_offset >=
@@ -2437,7 +2437,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
__u32 tmp;
if (pkt_dev->flags & F_MACDST_RND)
- mc = prandom_u32_max(pkt_dev->dst_mac_count);
+ mc = get_random_u32_below(pkt_dev->dst_mac_count);
else {
mc = pkt_dev->cur_dst_mac_offset++;
@@ -2469,16 +2469,16 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
}
if ((pkt_dev->flags & F_VID_RND) && (pkt_dev->vlan_id != 0xffff)) {
- pkt_dev->vlan_id = prandom_u32_max(4096);
+ pkt_dev->vlan_id = get_random_u32_below(4096);
}
if ((pkt_dev->flags & F_SVID_RND) && (pkt_dev->svlan_id != 0xffff)) {
- pkt_dev->svlan_id = prandom_u32_max(4096);
+ pkt_dev->svlan_id = get_random_u32_below(4096);
}
if (pkt_dev->udp_src_min < pkt_dev->udp_src_max) {
if (pkt_dev->flags & F_UDPSRC_RND)
- pkt_dev->cur_udp_src = prandom_u32_max(
+ pkt_dev->cur_udp_src = get_random_u32_below(
pkt_dev->udp_src_max - pkt_dev->udp_src_min) +
pkt_dev->udp_src_min;
@@ -2491,7 +2491,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
if (pkt_dev->udp_dst_min < pkt_dev->udp_dst_max) {
if (pkt_dev->flags & F_UDPDST_RND) {
- pkt_dev->cur_udp_dst = prandom_u32_max(
+ pkt_dev->cur_udp_dst = get_random_u32_below(
pkt_dev->udp_dst_max - pkt_dev->udp_dst_min) +
pkt_dev->udp_dst_min;
} else {
@@ -2508,7 +2508,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
if (imn < imx) {
__u32 t;
if (pkt_dev->flags & F_IPSRC_RND)
- t = prandom_u32_max(imx - imn) + imn;
+ t = get_random_u32_below(imx - imn) + imn;
else {
t = ntohl(pkt_dev->cur_saddr);
t++;
@@ -2530,7 +2530,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
if (pkt_dev->flags & F_IPDST_RND) {
do {
- t = prandom_u32_max(imx - imn) +
+ t = get_random_u32_below(imx - imn) +
imn;
s = htonl(t);
} while (ipv4_is_loopback(s) ||
@@ -2578,8 +2578,8 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
if (pkt_dev->min_pkt_size < pkt_dev->max_pkt_size) {
__u32 t;
if (pkt_dev->flags & F_TXSIZE_RND) {
- t = prandom_u32_max(pkt_dev->max_pkt_size -
- pkt_dev->min_pkt_size) +
+ t = get_random_u32_below(pkt_dev->max_pkt_size -
+ pkt_dev->min_pkt_size) +
pkt_dev->min_pkt_size;
} else {
t = pkt_dev->cur_pkt_size + 1;
@@ -2589,7 +2589,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
pkt_dev->cur_pkt_size = t;
} else if (pkt_dev->n_imix_entries > 0) {
struct imix_pkt *entry;
- __u32 t = prandom_u32_max(IMIX_PRECISION);
+ __u32 t = get_random_u32_below(IMIX_PRECISION);
__u8 entry_index = pkt_dev->imix_distribution[t];
entry = &pkt_dev->imix_entries[entry_index];
diff --git a/net/core/stream.c b/net/core/stream.c
index 75fded8495f5..5b1fe2b82eac 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -123,7 +123,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
DEFINE_WAIT_FUNC(wait, woken_wake_function);
if (sk_stream_memory_free(sk))
- current_timeo = vm_wait = prandom_u32_max(HZ / 5) + 2;
+ current_timeo = vm_wait = get_random_u32_below(HZ / 5) + 2;
add_wait_queue(sk_sleep(sk), &wait);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index d5d745c3e345..46aa2d65e40a 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -263,7 +263,7 @@ bool icmp_global_allow(void)
/* We want to use a credit of one in average, but need to randomize
* it for security reasons.
*/
- credit = max_t(int, credit - prandom_u32_max(3), 0);
+ credit = max_t(int, credit - get_random_u32_below(3), 0);
rc = true;
}
WRITE_ONCE(icmp_global.credit, credit);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 81be3e0f0e70..c920aa9a62a9 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -213,7 +213,7 @@ static void igmp_stop_timer(struct ip_mc_list *im)
/* It must be called with locked im->lock */
static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
{
- int tv = prandom_u32_max(max_delay);
+ int tv = get_random_u32_below(max_delay);
im->tm_running = 1;
if (!mod_timer(&im->timer, jiffies+tv+2))
@@ -222,7 +222,7 @@ static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
static void igmp_gq_start_timer(struct in_device *in_dev)
{
- int tv = prandom_u32_max(in_dev->mr_maxdelay);
+ int tv = get_random_u32_below(in_dev->mr_maxdelay);
unsigned long exp = jiffies + tv + 2;
if (in_dev->mr_gq_running &&
@@ -236,7 +236,7 @@ static void igmp_gq_start_timer(struct in_device *in_dev)
static void igmp_ifc_start_timer(struct in_device *in_dev, int delay)
{
- int tv = prandom_u32_max(delay);
+ int tv = get_random_u32_below(delay);
if (!mod_timer(&in_dev->mr_ifc_timer, jiffies+tv+2))
in_dev_hold(in_dev);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 4e84ed21d16f..f22051219b50 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -314,7 +314,7 @@ other_half_scan:
if (likely(remaining > 1))
remaining &= ~1U;
- offset = prandom_u32_max(remaining);
+ offset = get_random_u32_below(remaining);
/* __inet_hash_connect() favors ports having @low parity
* We do the opposite to not pollute connect() users.
*/
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index d3dc28156622..a879ec1a267d 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -1037,7 +1037,7 @@ ok:
* on low contention the randomness is maximal and on high contention
* it may be inexistent.
*/
- i = max_t(int, i, prandom_u32_max(8) * 2);
+ i = max_t(int, i, get_random_u32_below(8) * 2);
WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2);
/* Head lock still held and bh's disabled */
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index cd1fa9f70f1a..de6e3515ab4f 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -471,7 +471,7 @@ static u32 ip_idents_reserve(u32 hash, int segs)
old = READ_ONCE(*p_tstamp);
if (old != now && cmpxchg(p_tstamp, old, now) == old)
- delta = prandom_u32_max(now - old);
+ delta = get_random_u32_below(now - old);
/* If UBSAN reports an error there, please make sure your compiler
* supports -fno-strict-overflow before reporting it that was a bug
@@ -689,7 +689,7 @@ static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr,
} else {
/* Randomize max depth to avoid some side channels attacks. */
int max_depth = FNHE_RECLAIM_DEPTH +
- prandom_u32_max(FNHE_RECLAIM_DEPTH);
+ get_random_u32_below(FNHE_RECLAIM_DEPTH);
while (depth > max_depth) {
fnhe_remove_oldest(hash);
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 54eec33c6e1c..d2c470524e58 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -618,7 +618,7 @@ static void bbr_reset_probe_bw_mode(struct sock *sk)
struct bbr *bbr = inet_csk_ca(sk);
bbr->mode = BBR_PROBE_BW;
- bbr->cycle_idx = CYCLE_LEN - 1 - prandom_u32_max(bbr_cycle_rand);
+ bbr->cycle_idx = CYCLE_LEN - 1 - get_random_u32_below(bbr_cycle_rand);
bbr_advance_cycle_phase(sk); /* flip to next phase of gain cycle */
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 0640453fce54..3b076e5ba932 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3646,7 +3646,8 @@ static void tcp_send_challenge_ack(struct sock *sk)
u32 half = (ack_limit + 1) >> 1;
WRITE_ONCE(net->ipv4.tcp_challenge_timestamp, now);
- WRITE_ONCE(net->ipv4.tcp_challenge_count, half + prandom_u32_max(ack_limit));
+ WRITE_ONCE(net->ipv4.tcp_challenge_count,
+ half + get_random_u32_below(ack_limit));
}
count = READ_ONCE(net->ipv4.tcp_challenge_count);
if (count > 0) {
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 9c3f5202a97b..daf89a2eb492 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -104,7 +104,7 @@ static inline u32 cstamp_delta(unsigned long cstamp)
static inline s32 rfc3315_s14_backoff_init(s32 irt)
{
/* multiply 'initial retransmission time' by 0.9 .. 1.1 */
- u64 tmp = (900000 + prandom_u32_max(200001)) * (u64)irt;
+ u64 tmp = (900000 + get_random_u32_below(200001)) * (u64)irt;
do_div(tmp, 1000000);
return (s32)tmp;
}
@@ -112,11 +112,11 @@ static inline s32 rfc3315_s14_backoff_init(s32 irt)
static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt)
{
/* multiply 'retransmission timeout' by 1.9 .. 2.1 */
- u64 tmp = (1900000 + prandom_u32_max(200001)) * (u64)rt;
+ u64 tmp = (1900000 + get_random_u32_below(200001)) * (u64)rt;
do_div(tmp, 1000000);
if ((s32)tmp > mrt) {
/* multiply 'maximum retransmission time' by 0.9 .. 1.1 */
- tmp = (900000 + prandom_u32_max(200001)) * (u64)mrt;
+ tmp = (900000 + get_random_u32_below(200001)) * (u64)mrt;
do_div(tmp, 1000000);
}
return (s32)tmp;
@@ -3967,7 +3967,7 @@ static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
if (ifp->flags & IFA_F_OPTIMISTIC)
rand_num = 0;
else
- rand_num = prandom_u32_max(idev->cnf.rtr_solicit_delay ?: 1);
+ rand_num = get_random_u32_below(idev->cnf.rtr_solicit_delay ? : 1);
nonce = 0;
if (idev->cnf.enhanced_dad ||
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 7860383295d8..1c02160cf7a4 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1050,7 +1050,7 @@ bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
/* called with mc_lock */
static void mld_gq_start_work(struct inet6_dev *idev)
{
- unsigned long tv = prandom_u32_max(idev->mc_maxdelay);
+ unsigned long tv = get_random_u32_below(idev->mc_maxdelay);
idev->mc_gq_running = 1;
if (!mod_delayed_work(mld_wq, &idev->mc_gq_work, tv + 2))
@@ -1068,7 +1068,7 @@ static void mld_gq_stop_work(struct inet6_dev *idev)
/* called with mc_lock */
static void mld_ifc_start_work(struct inet6_dev *idev, unsigned long delay)
{
- unsigned long tv = prandom_u32_max(delay);
+ unsigned long tv = get_random_u32_below(delay);
if (!mod_delayed_work(mld_wq, &idev->mc_ifc_work, tv + 2))
in6_dev_hold(idev);
@@ -1085,7 +1085,7 @@ static void mld_ifc_stop_work(struct inet6_dev *idev)
/* called with mc_lock */
static void mld_dad_start_work(struct inet6_dev *idev, unsigned long delay)
{
- unsigned long tv = prandom_u32_max(delay);
+ unsigned long tv = get_random_u32_below(delay);
if (!mod_delayed_work(mld_wq, &idev->mc_dad_work, tv + 2))
in6_dev_hold(idev);
@@ -1130,7 +1130,7 @@ static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
}
if (delay >= resptime)
- delay = prandom_u32_max(resptime);
+ delay = get_random_u32_below(resptime);
if (!mod_delayed_work(mld_wq, &ma->mca_work, delay))
refcount_inc(&ma->mca_refcnt);
@@ -2574,7 +2574,7 @@ static void igmp6_join_group(struct ifmcaddr6 *ma)
igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
- delay = prandom_u32_max(unsolicited_report_interval(ma->idev));
+ delay = get_random_u32_below(unsolicited_report_interval(ma->idev));
if (cancel_delayed_work(&ma->mca_work)) {
refcount_dec(&ma->mca_refcnt);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 2f355f0ec32a..e74e0361fd92 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1713,7 +1713,7 @@ static int rt6_insert_exception(struct rt6_info *nrt,
net->ipv6.rt6_stats->fib_rt_cache++;
/* Randomize max depth to avoid some side channels attacks. */
- max_depth = FIB6_MAX_DEPTH + prandom_u32_max(FIB6_MAX_DEPTH);
+ max_depth = FIB6_MAX_DEPTH + get_random_u32_below(FIB6_MAX_DEPTH);
while (bucket->depth > max_depth)
rt6_exception_remove_oldest(bucket);
diff --git a/net/netfilter/ipvs/ip_vs_twos.c b/net/netfilter/ipvs/ip_vs_twos.c
index f2579fc9c75b..3308e4cc740a 100644
--- a/net/netfilter/ipvs/ip_vs_twos.c
+++ b/net/netfilter/ipvs/ip_vs_twos.c
@@ -71,8 +71,8 @@ static struct ip_vs_dest *ip_vs_twos_schedule(struct ip_vs_service *svc,
* from 0 to total_weight
*/
total_weight += 1;
- rweight1 = prandom_u32_max(total_weight);
- rweight2 = prandom_u32_max(total_weight);
+ rweight1 = get_random_u32_below(total_weight);
+ rweight2 = get_random_u32_below(total_weight);
/* Pick two weighted servers */
list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index f97bda06d2a9..8703812405eb 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -906,7 +906,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));
} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
- max_chainlen = MIN_CHAINLEN + prandom_u32_max(MAX_CHAINLEN);
+ max_chainlen = MIN_CHAINLEN + get_random_u32_below(MAX_CHAINLEN);
/* See if there's one in the list already, including reverse */
hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode) {
@@ -1227,7 +1227,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
goto dying;
}
- max_chainlen = MIN_CHAINLEN + prandom_u32_max(MAX_CHAINLEN);
+ max_chainlen = MIN_CHAINLEN + get_random_u32_below(MAX_CHAINLEN);
/* See if there's one in the list already, including reverse:
NAT could have grabbed it without realizing, since we're
not in the hash. If there is, we lost race. */
diff --git a/net/netfilter/nf_nat_helper.c b/net/netfilter/nf_nat_helper.c
index a95a25196943..bf591e6af005 100644
--- a/net/netfilter/nf_nat_helper.c
+++ b/net/netfilter/nf_nat_helper.c
@@ -223,7 +223,7 @@ u16 nf_nat_exp_find_port(struct nf_conntrack_expect *exp, u16 port)
if (res != -EBUSY || (--attempts_left < 0))
break;
- port = min + prandom_u32_max(range);
+ port = min + get_random_u32_below(range);
}
return 0;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index a662e8a5ff84..7a401d94463a 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -835,7 +835,7 @@ retry:
/* Bind collision, search negative portid values. */
if (rover == -4096)
/* rover will be in range [S32_MIN, -4097] */
- rover = S32_MIN + prandom_u32_max(-4096 - S32_MIN);
+ rover = S32_MIN + get_random_u32_below(-4096 - S32_MIN);
else if (rover >= -4096)
rover = -4097;
portid = rover--;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 6ce8dd19f33c..51a47ade92e8 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1350,7 +1350,7 @@ static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
if (READ_ONCE(history[i]) == rxhash)
count++;
- victim = prandom_u32_max(ROLLOVER_HLEN);
+ victim = get_random_u32_below(ROLLOVER_HLEN);
/* Avoid dirtying the cache line if possible */
if (READ_ONCE(history[victim]) != rxhash)
@@ -1386,7 +1386,7 @@ static unsigned int fanout_demux_rnd(struct packet_fanout *f,
struct sk_buff *skb,
unsigned int num)
{
- return prandom_u32_max(num);
+ return get_random_u32_below(num);
}
static unsigned int fanout_demux_rollover(struct packet_fanout *f,
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 62d682b96b88..be267ffaaba7 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -25,7 +25,7 @@ static struct tc_action_ops act_gact_ops;
static int gact_net_rand(struct tcf_gact *gact)
{
smp_rmb(); /* coupled with smp_wmb() in tcf_gact_init() */
- if (prandom_u32_max(gact->tcfg_pval))
+ if (get_random_u32_below(gact->tcfg_pval))
return gact->tcf_action;
return gact->tcfg_paction;
}
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 7a25477f5d99..4194480746b0 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -168,7 +168,7 @@ static int tcf_sample_act(struct sk_buff *skb, const struct tc_action *a,
psample_group = rcu_dereference_bh(s->psample_group);
/* randomly sample packets according to rate */
- if (psample_group && (prandom_u32_max(s->rate) == 0)) {
+ if (psample_group && (get_random_u32_below(s->rate) == 0)) {
if (!skb_at_tc_ingress(skb)) {
md.in_ifindex = skb->skb_iif;
md.out_ifindex = skb->dev->ifindex;
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 3ac3e5c80b6f..19c851125901 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -183,7 +183,7 @@ static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
int retrys = 3;
do {
- *pidx = (q->head + prandom_u32_max(choke_len(q))) & q->tab_mask;
+ *pidx = (q->head + get_random_u32_below(choke_len(q))) & q->tab_mask;
skb = q->tab[*pidx];
if (skb)
return skb;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index fb00ac40ecb7..6ef3021e1169 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -513,8 +513,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
goto finish_segs;
}
- skb->data[prandom_u32_max(skb_headlen(skb))] ^=
- 1<<prandom_u32_max(8);
+ skb->data[get_random_u32_below(skb_headlen(skb))] ^=
+ 1<<get_random_u32_below(8);
}
if (unlikely(sch->q.qlen >= sch->limit)) {
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 83628c347744..cfe72085fdc4 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -8319,7 +8319,7 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
inet_get_local_port_range(net, &low, &high);
remaining = (high - low) + 1;
- rover = prandom_u32_max(remaining) + low;
+ rover = get_random_u32_below(remaining) + low;
do {
rover++;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index f8fd98784977..ca1eba95c293 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -199,7 +199,7 @@ void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
if ((time_before(transport->hb_timer.expires, expires) ||
!timer_pending(&transport->hb_timer)) &&
!mod_timer(&transport->hb_timer,
- expires + prandom_u32_max(transport->rto)))
+ expires + get_random_u32_below(transport->rto)))
sctp_transport_hold(transport);
}
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index f075a9fb5ccc..95ff74706104 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -677,7 +677,7 @@ static void cache_limit_defers(void)
/* Consider removing either the first or the last */
if (cache_defer_cnt > DFR_MAX) {
- if (prandom_u32_max(2))
+ if (get_random_u32_below(2))
discard = list_entry(cache_defer_list.next,
struct cache_deferred_req, recent);
else
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 915b9902f673..2e4987dcba29 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1619,7 +1619,7 @@ static int xs_get_random_port(void)
if (max < min)
return -EADDRINUSE;
range = max - min + 1;
- rand = prandom_u32_max(range);
+ rand = get_random_u32_below(range);
return rand + min;
}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index e902b01ea3cb..b35c8701876a 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -3010,7 +3010,7 @@ static int tipc_sk_insert(struct tipc_sock *tsk)
struct net *net = sock_net(sk);
struct tipc_net *tn = net_generic(net, tipc_net_id);
u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
- u32 portid = prandom_u32_max(remaining) + TIPC_MIN_PORT;
+ u32 portid = get_random_u32_below(remaining) + TIPC_MIN_PORT;
while (remaining--) {
portid++;
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 884eca7f6743..ff38c5a4d174 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -627,7 +627,7 @@ static int __vsock_bind_connectible(struct vsock_sock *vsk,
if (!port)
port = LAST_RESERVED_PORT + 1 +
- prandom_u32_max(U32_MAX - LAST_RESERVED_PORT);
+ get_random_u32_below(U32_MAX - LAST_RESERVED_PORT);
vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 3d2fe7712ac5..40f831854774 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -2072,7 +2072,7 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
} else {
u32 spi = 0;
for (h = 0; h < high-low+1; h++) {
- spi = low + prandom_u32_max(high - low + 1);
+ spi = low + get_random_u32_below(high - low + 1);
x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
if (x0 == NULL) {
newspi = htonl(spi);