summaryrefslogtreecommitdiff
path: root/include/rv
diff options
context:
space:
mode:
Diffstat (limited to 'include/rv')
-rw-r--r--include/rv/da_monitor.h172
-rw-r--r--include/rv/ltl_monitor.h186
2 files changed, 272 insertions, 86 deletions
diff --git a/include/rv/da_monitor.h b/include/rv/da_monitor.h
index 510c88bfabd4..17fa4f6e5ea6 100644
--- a/include/rv/da_monitor.h
+++ b/include/rv/da_monitor.h
@@ -19,45 +19,22 @@
#ifdef CONFIG_RV_REACTORS
#define DECLARE_RV_REACTING_HELPERS(name, type) \
-static char REACT_MSG_##name[1024]; \
- \
-static inline char *format_react_msg_##name(type curr_state, type event) \
-{ \
- snprintf(REACT_MSG_##name, 1024, \
- "rv: monitor %s does not allow event %s on state %s\n", \
- #name, \
- model_get_event_name_##name(event), \
- model_get_state_name_##name(curr_state)); \
- return REACT_MSG_##name; \
-} \
- \
-static void cond_react_##name(char *msg) \
+static void cond_react_##name(type curr_state, type event) \
{ \
- if (rv_##name.react) \
- rv_##name.react(msg); \
-} \
- \
-static bool rv_reacting_on_##name(void) \
-{ \
- return rv_reacting_on(); \
+ if (!rv_reacting_on() || !rv_##name.react) \
+ return; \
+ rv_##name.react("rv: monitor %s does not allow event %s on state %s\n", \
+ #name, \
+ model_get_event_name_##name(event), \
+ model_get_state_name_##name(curr_state)); \
}
#else /* CONFIG_RV_REACTOR */
#define DECLARE_RV_REACTING_HELPERS(name, type) \
-static inline char *format_react_msg_##name(type curr_state, type event) \
-{ \
- return NULL; \
-} \
- \
-static void cond_react_##name(char *msg) \
+static void cond_react_##name(type curr_state, type event) \
{ \
return; \
-} \
- \
-static bool rv_reacting_on_##name(void) \
-{ \
- return 0; \
}
#endif
@@ -78,23 +55,6 @@ static inline void da_monitor_reset_##name(struct da_monitor *da_mon) \
} \
\
/* \
- * da_monitor_curr_state_##name - return the current state \
- */ \
-static inline type da_monitor_curr_state_##name(struct da_monitor *da_mon) \
-{ \
- return da_mon->curr_state; \
-} \
- \
-/* \
- * da_monitor_set_state_##name - set the new current state \
- */ \
-static inline void \
-da_monitor_set_state_##name(struct da_monitor *da_mon, enum states_##name state) \
-{ \
- da_mon->curr_state = state; \
-} \
- \
-/* \
* da_monitor_start_##name - start monitoring \
* \
* The monitor will ignore all events until monitoring is set to true. This \
@@ -150,65 +110,81 @@ static inline bool da_monitor_handling_event_##name(struct da_monitor *da_mon)
* Event handler for implicit monitors. Implicit monitor is the one which the
* handler does not need to specify which da_monitor to manipulate. Examples
* of implicit monitor are the per_cpu or the global ones.
+ *
+ * Retry in case there is a race between getting and setting the next state,
+ * warn and reset the monitor if it runs out of retries. The monitor should be
+ * able to handle various orders.
*/
#define DECLARE_DA_MON_MODEL_HANDLER_IMPLICIT(name, type) \
\
static inline bool \
da_event_##name(struct da_monitor *da_mon, enum events_##name event) \
{ \
- type curr_state = da_monitor_curr_state_##name(da_mon); \
- type next_state = model_get_next_state_##name(curr_state, event); \
- \
- if (next_state != INVALID_STATE) { \
- da_monitor_set_state_##name(da_mon, next_state); \
- \
- trace_event_##name(model_get_state_name_##name(curr_state), \
- model_get_event_name_##name(event), \
- model_get_state_name_##name(next_state), \
- model_is_final_state_##name(next_state)); \
- \
- return true; \
+ enum states_##name curr_state, next_state; \
+ \
+ curr_state = READ_ONCE(da_mon->curr_state); \
+ for (int i = 0; i < MAX_DA_RETRY_RACING_EVENTS; i++) { \
+ next_state = model_get_next_state_##name(curr_state, event); \
+ if (next_state == INVALID_STATE) { \
+ cond_react_##name(curr_state, event); \
+ trace_error_##name(model_get_state_name_##name(curr_state), \
+ model_get_event_name_##name(event)); \
+ return false; \
+ } \
+ if (likely(try_cmpxchg(&da_mon->curr_state, &curr_state, next_state))) { \
+ trace_event_##name(model_get_state_name_##name(curr_state), \
+ model_get_event_name_##name(event), \
+ model_get_state_name_##name(next_state), \
+ model_is_final_state_##name(next_state)); \
+ return true; \
+ } \
} \
\
- if (rv_reacting_on_##name()) \
- cond_react_##name(format_react_msg_##name(curr_state, event)); \
- \
- trace_error_##name(model_get_state_name_##name(curr_state), \
- model_get_event_name_##name(event)); \
- \
+ trace_rv_retries_error(#name, model_get_event_name_##name(event)); \
+ pr_warn("rv: " __stringify(MAX_DA_RETRY_RACING_EVENTS) \
+ " retries reached for event %s, resetting monitor %s", \
+ model_get_event_name_##name(event), #name); \
return false; \
} \
/*
* Event handler for per_task monitors.
+ *
+ * Retry in case there is a race between getting and setting the next state,
+ * warn and reset the monitor if it runs out of retries. The monitor should be
+ * able to handle various orders.
*/
#define DECLARE_DA_MON_MODEL_HANDLER_PER_TASK(name, type) \
\
static inline bool da_event_##name(struct da_monitor *da_mon, struct task_struct *tsk, \
enum events_##name event) \
{ \
- type curr_state = da_monitor_curr_state_##name(da_mon); \
- type next_state = model_get_next_state_##name(curr_state, event); \
- \
- if (next_state != INVALID_STATE) { \
- da_monitor_set_state_##name(da_mon, next_state); \
- \
- trace_event_##name(tsk->pid, \
- model_get_state_name_##name(curr_state), \
- model_get_event_name_##name(event), \
- model_get_state_name_##name(next_state), \
- model_is_final_state_##name(next_state)); \
- \
- return true; \
+ enum states_##name curr_state, next_state; \
+ \
+ curr_state = READ_ONCE(da_mon->curr_state); \
+ for (int i = 0; i < MAX_DA_RETRY_RACING_EVENTS; i++) { \
+ next_state = model_get_next_state_##name(curr_state, event); \
+ if (next_state == INVALID_STATE) { \
+ cond_react_##name(curr_state, event); \
+ trace_error_##name(tsk->pid, \
+ model_get_state_name_##name(curr_state), \
+ model_get_event_name_##name(event)); \
+ return false; \
+ } \
+ if (likely(try_cmpxchg(&da_mon->curr_state, &curr_state, next_state))) { \
+ trace_event_##name(tsk->pid, \
+ model_get_state_name_##name(curr_state), \
+ model_get_event_name_##name(event), \
+ model_get_state_name_##name(next_state), \
+ model_is_final_state_##name(next_state)); \
+ return true; \
+ } \
} \
\
- if (rv_reacting_on_##name()) \
- cond_react_##name(format_react_msg_##name(curr_state, event)); \
- \
- trace_error_##name(tsk->pid, \
- model_get_state_name_##name(curr_state), \
- model_get_event_name_##name(event)); \
- \
+ trace_rv_retries_error(#name, model_get_event_name_##name(event)); \
+ pr_warn("rv: " __stringify(MAX_DA_RETRY_RACING_EVENTS) \
+ " retries reached for event %s, resetting monitor %s", \
+ model_get_event_name_##name(event), #name); \
return false; \
}
@@ -512,6 +488,30 @@ da_handle_start_event_##name(struct task_struct *tsk, enum events_##name event)
__da_handle_event_##name(da_mon, tsk, event); \
\
return 1; \
+} \
+ \
+/* \
+ * da_handle_start_run_event_##name - start monitoring and handle event \
+ * \
+ * This function is used to notify the monitor that the system is in the \
+ * initial state, so the monitor can start monitoring and handling event. \
+ */ \
+static inline bool \
+da_handle_start_run_event_##name(struct task_struct *tsk, enum events_##name event) \
+{ \
+ struct da_monitor *da_mon; \
+ \
+ if (!da_monitor_enabled_##name()) \
+ return 0; \
+ \
+ da_mon = da_get_monitor_##name(tsk); \
+ \
+ if (unlikely(!da_monitoring_##name(da_mon))) \
+ da_monitor_start_##name(da_mon); \
+ \
+ __da_handle_event_##name(da_mon, tsk, event); \
+ \
+ return 1; \
}
/*
diff --git a/include/rv/ltl_monitor.h b/include/rv/ltl_monitor.h
new file mode 100644
index 000000000000..67031a774e3d
--- /dev/null
+++ b/include/rv/ltl_monitor.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/**
+ * This file must be combined with the $(MODEL_NAME).h file generated by
+ * tools/verification/rvgen.
+ */
+
+#include <linux/args.h>
+#include <linux/rv.h>
+#include <linux/stringify.h>
+#include <linux/seq_buf.h>
+#include <rv/instrumentation.h>
+#include <trace/events/task.h>
+#include <trace/events/sched.h>
+
+#ifndef MONITOR_NAME
+#error "Please include $(MODEL_NAME).h generated by rvgen"
+#endif
+
+#ifdef CONFIG_RV_REACTORS
+#define RV_MONITOR_NAME CONCATENATE(rv_, MONITOR_NAME)
+static struct rv_monitor RV_MONITOR_NAME;
+
+static void rv_cond_react(struct task_struct *task)
+{
+ if (!rv_reacting_on() || !RV_MONITOR_NAME.react)
+ return;
+ RV_MONITOR_NAME.react("rv: "__stringify(MONITOR_NAME)": %s[%d]: violation detected\n",
+ task->comm, task->pid);
+}
+#else
+static void rv_cond_react(struct task_struct *task)
+{
+}
+#endif
+
+static int ltl_monitor_slot = RV_PER_TASK_MONITOR_INIT;
+
+static void ltl_atoms_fetch(struct task_struct *task, struct ltl_monitor *mon);
+static void ltl_atoms_init(struct task_struct *task, struct ltl_monitor *mon, bool task_creation);
+
+static struct ltl_monitor *ltl_get_monitor(struct task_struct *task)
+{
+ return &task->rv[ltl_monitor_slot].ltl_mon;
+}
+
+static void ltl_task_init(struct task_struct *task, bool task_creation)
+{
+ struct ltl_monitor *mon = ltl_get_monitor(task);
+
+ memset(&mon->states, 0, sizeof(mon->states));
+
+ for (int i = 0; i < LTL_NUM_ATOM; ++i)
+ __set_bit(i, mon->unknown_atoms);
+
+ ltl_atoms_init(task, mon, task_creation);
+ ltl_atoms_fetch(task, mon);
+}
+
+static void handle_task_newtask(void *data, struct task_struct *task, unsigned long flags)
+{
+ ltl_task_init(task, true);
+}
+
+static int ltl_monitor_init(void)
+{
+ struct task_struct *g, *p;
+ int ret, cpu;
+
+ ret = rv_get_task_monitor_slot();
+ if (ret < 0)
+ return ret;
+
+ ltl_monitor_slot = ret;
+
+ rv_attach_trace_probe(name, task_newtask, handle_task_newtask);
+
+ read_lock(&tasklist_lock);
+
+ for_each_process_thread(g, p)
+ ltl_task_init(p, false);
+
+ for_each_present_cpu(cpu)
+ ltl_task_init(idle_task(cpu), false);
+
+ read_unlock(&tasklist_lock);
+
+ return 0;
+}
+
+static void ltl_monitor_destroy(void)
+{
+ rv_detach_trace_probe(name, task_newtask, handle_task_newtask);
+
+ rv_put_task_monitor_slot(ltl_monitor_slot);
+ ltl_monitor_slot = RV_PER_TASK_MONITOR_INIT;
+}
+
+static void ltl_illegal_state(struct task_struct *task, struct ltl_monitor *mon)
+{
+ CONCATENATE(trace_error_, MONITOR_NAME)(task);
+ rv_cond_react(task);
+}
+
+static void ltl_attempt_start(struct task_struct *task, struct ltl_monitor *mon)
+{
+ if (rv_ltl_all_atoms_known(mon))
+ ltl_start(task, mon);
+}
+
+static inline void ltl_atom_set(struct ltl_monitor *mon, enum ltl_atom atom, bool value)
+{
+ __clear_bit(atom, mon->unknown_atoms);
+ if (value)
+ __set_bit(atom, mon->atoms);
+ else
+ __clear_bit(atom, mon->atoms);
+}
+
+static void
+ltl_trace_event(struct task_struct *task, struct ltl_monitor *mon, unsigned long *next_state)
+{
+ const char *format_str = "%s";
+ DECLARE_SEQ_BUF(atoms, 64);
+ char states[32], next[32];
+ int i;
+
+ if (!CONCATENATE(CONCATENATE(trace_event_, MONITOR_NAME), _enabled)())
+ return;
+
+ snprintf(states, sizeof(states), "%*pbl", RV_MAX_BA_STATES, mon->states);
+ snprintf(next, sizeof(next), "%*pbl", RV_MAX_BA_STATES, next_state);
+
+ for (i = 0; i < LTL_NUM_ATOM; ++i) {
+ if (test_bit(i, mon->atoms)) {
+ seq_buf_printf(&atoms, format_str, ltl_atom_str(i));
+ format_str = ",%s";
+ }
+ }
+
+ CONCATENATE(trace_event_, MONITOR_NAME)(task, states, atoms.buffer, next);
+}
+
+static void ltl_validate(struct task_struct *task, struct ltl_monitor *mon)
+{
+ DECLARE_BITMAP(next_states, RV_MAX_BA_STATES) = {0};
+
+ if (!rv_ltl_valid_state(mon))
+ return;
+
+ for (unsigned int i = 0; i < RV_NUM_BA_STATES; ++i) {
+ if (test_bit(i, mon->states))
+ ltl_possible_next_states(mon, i, next_states);
+ }
+
+ ltl_trace_event(task, mon, next_states);
+
+ memcpy(mon->states, next_states, sizeof(next_states));
+
+ if (!rv_ltl_valid_state(mon))
+ ltl_illegal_state(task, mon);
+}
+
+static void ltl_atom_update(struct task_struct *task, enum ltl_atom atom, bool value)
+{
+ struct ltl_monitor *mon = ltl_get_monitor(task);
+
+ ltl_atom_set(mon, atom, value);
+ ltl_atoms_fetch(task, mon);
+
+ if (!rv_ltl_valid_state(mon)) {
+ ltl_attempt_start(task, mon);
+ return;
+ }
+
+ ltl_validate(task, mon);
+}
+
+static void __maybe_unused ltl_atom_pulse(struct task_struct *task, enum ltl_atom atom, bool value)
+{
+ struct ltl_monitor *mon = ltl_get_monitor(task);
+
+ ltl_atom_update(task, atom, value);
+
+ ltl_atom_set(mon, atom, !value);
+ ltl_validate(task, mon);
+}