1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* ratelimit.c - Do something with rate limit.
*
* Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com>
*
* 2008-05-01 rewrite the function and use a ratelimit_state data struct as
* parameter. Now every user can use their own standalone ratelimit_state.
*/
#include <linux/ratelimit.h>
#include <linux/jiffies.h>
#include <linux/export.h>
/*
* __ratelimit - rate limiting
* @rs: ratelimit_state data
* @func: name of calling function
*
* This enforces a rate limit: not more than @rs->burst callbacks
* in every @rs->interval
*
* RETURNS:
* 0 means callbacks will be suppressed.
* 1 means go ahead and do it.
*/
int ___ratelimit(struct ratelimit_state *rs, const char *func)
{
/* Paired with WRITE_ONCE() in .proc_handler().
* Changing two values seperately could be inconsistent
* and some message could be lost. (See: net_ratelimit_state).
*/
int interval = READ_ONCE(rs->interval);
int burst = READ_ONCE(rs->burst);
unsigned long flags;
int ret = 0;
/*
* Zero interval says never limit, otherwise, non-positive burst
* says always limit.
*/
if (interval <= 0 || burst <= 0) {
WARN_ONCE(interval < 0 || burst < 0, "Negative interval (%d) or burst (%d): Uninitialized ratelimit_state structure?\n", interval, burst);
ret = interval == 0 || burst > 0;
if (!(READ_ONCE(rs->flags) & RATELIMIT_INITIALIZED) || (!interval && !burst) ||
!raw_spin_trylock_irqsave(&rs->lock, flags))
goto nolock_ret;
/* Force re-initialization once re-enabled. */
rs->flags &= ~RATELIMIT_INITIALIZED;
goto unlock_ret;
}
/*
* If we contend on this state's lock then just check if
* the current burst is used or not. It might cause
* false positive when we are past the interval and
* the current lock owner is just about to reset it.
*/
if (!raw_spin_trylock_irqsave(&rs->lock, flags)) {
if (READ_ONCE(rs->flags) & RATELIMIT_INITIALIZED &&
atomic_read(&rs->rs_n_left) > 0 && atomic_dec_return(&rs->rs_n_left) >= 0)
ret = 1;
goto nolock_ret;
}
if (!(rs->flags & RATELIMIT_INITIALIZED)) {
rs->begin = jiffies;
rs->flags |= RATELIMIT_INITIALIZED;
atomic_set(&rs->rs_n_left, rs->burst);
}
if (time_is_before_jiffies(rs->begin + interval)) {
int m;
/*
* Reset rs_n_left ASAP to reduce false positives
* in parallel calls, see above.
*/
atomic_set(&rs->rs_n_left, rs->burst);
rs->begin = jiffies;
if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
m = ratelimit_state_reset_miss(rs);
if (m) {
printk_deferred(KERN_WARNING
"%s: %d callbacks suppressed\n", func, m);
}
}
}
/* Note that the burst might be taken by a parallel call. */
if (atomic_read(&rs->rs_n_left) > 0 && atomic_dec_return(&rs->rs_n_left) >= 0)
ret = 1;
unlock_ret:
raw_spin_unlock_irqrestore(&rs->lock, flags);
nolock_ret:
if (!ret)
ratelimit_state_inc_miss(rs);
return ret;
}
EXPORT_SYMBOL(___ratelimit);
|