summaryrefslogtreecommitdiff
path: root/include/linux/backing-dev-defs.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/backing-dev-defs.h')
-rw-r--r--include/linux/backing-dev-defs.h23
1 files changed, 23 insertions, 0 deletions
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 420de5c7c7f9..b7c7be6f5986 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -45,6 +45,28 @@ enum wb_stat_item {
#define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
/*
+ * why some writeback work was initiated
+ */
+enum wb_reason {
+ WB_REASON_BACKGROUND,
+ WB_REASON_VMSCAN,
+ WB_REASON_SYNC,
+ WB_REASON_PERIODIC,
+ WB_REASON_LAPTOP_TIMER,
+ WB_REASON_FREE_MORE_MEM,
+ WB_REASON_FS_FREE_SPACE,
+ /*
+ * There is no bdi forker thread any more and works are done
+ * by emergency worker, however, this is TPs userland visible
+ * and we'll be exposing exactly the same information,
+ * so it has a mismatch name.
+ */
+ WB_REASON_FORKER_THREAD,
+
+ WB_REASON_MAX,
+};
+
+/*
* For cgroup writeback, multiple wb's may map to the same blkcg. Those
* wb's can operate mostly independently but should share the congested
* state. To facilitate such sharing, the congested state is tracked using
@@ -116,6 +138,7 @@ struct bdi_writeback {
struct fprop_local_percpu completions;
int dirty_exceeded;
+ enum wb_reason start_all_reason;
spinlock_t work_lock; /* protects work_list & dwork scheduling */
struct list_head work_list;