diff options
| author | Chris Mason <clm@fb.com> | 2017-01-11 06:26:12 -0800 | 
|---|---|---|
| committer | Chris Mason <clm@fb.com> | 2017-01-11 06:26:12 -0800 | 
| commit | 0bf70aebf12d8fa0d06967b72ca4b257eb6adf06 (patch) | |
| tree | 13f6063275339627603ed85ae25d5898bf5981a7 /fs/btrfs/async-thread.c | |
| parent | 3dda13a8ad787f3d4c4f18c8c05f8eebc7ea135a (diff) | |
| parent | 562a7a07bf61e2949f7cbdb6ac7537ad9e2794d1 (diff) | |
Merge branch 'tracepoint-updates-4.10' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux into for-linus-4.10
Diffstat (limited to 'fs/btrfs/async-thread.c')
| -rw-r--r-- | fs/btrfs/async-thread.c | 15 | 
1 files changed, 11 insertions, 4 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 63d197724519..ff0b0be92d61 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c @@ -273,6 +273,8 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)  	unsigned long flags;  	while (1) { +		void *wtag; +  		spin_lock_irqsave(lock, flags);  		if (list_empty(list))  			break; @@ -299,11 +301,13 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)  		spin_unlock_irqrestore(lock, flags);  		/* -		 * we don't want to call the ordered free functions -		 * with the lock held though +		 * We don't want to call the ordered free functions with the +		 * lock held though. Save the work as tag for the trace event, +		 * because the callback could free the structure.  		 */ +		wtag = work;  		work->ordered_free(work); -		trace_btrfs_all_work_done(work); +		trace_btrfs_all_work_done(wq->fs_info, wtag);  	}  	spin_unlock_irqrestore(lock, flags);  } @@ -311,6 +315,7 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)  static void normal_work_helper(struct btrfs_work *work)  {  	struct __btrfs_workqueue *wq; +	void *wtag;  	int need_order = 0;  	/* @@ -324,6 +329,8 @@ static void normal_work_helper(struct btrfs_work *work)  	if (work->ordered_func)  		need_order = 1;  	wq = work->wq; +	/* Safe for tracepoints in case work gets freed by the callback */ +	wtag = work;  	trace_btrfs_work_sched(work);  	thresh_exec_hook(wq); @@ -333,7 +340,7 @@ static void normal_work_helper(struct btrfs_work *work)  		run_ordered_work(wq);  	}  	if (!need_order) -		trace_btrfs_all_work_done(work); +		trace_btrfs_all_work_done(wq->fs_info, wtag);  }  void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func,  | 
