diff options
| author | Arnd Bergmann <arnd@arndb.de> | 2012-03-15 16:11:36 +0000 | 
|---|---|---|
| committer | Arnd Bergmann <arnd@arndb.de> | 2012-03-15 16:11:40 +0000 | 
| commit | 86a30bece9ad4cc91c393a829a7b128291e0fb65 (patch) | |
| tree | 6cb3e6ad413d74118535f77436056c8d3cfae0eb /fs/aio.c | |
| parent | 243d58ec5792299fa212d05a4113c0ebac2df6a3 (diff) | |
| parent | a323f66439c04d1c3ae4dc20cc2d44d52ee43c9f (diff) | |
Merge branch 'fixes-non-critical' of git://github.com/hzhuang1/linux into next/maintainers
* 'fixes-non-critical' of git://github.com/hzhuang1/linux:
  MAINTAINERS: update MAINTAINERS email entry
  MAINTAINERS: update maintainer entry for pxa/hx4700
(update to v3.3-rc7)
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'fs/aio.c')
| -rw-r--r-- | fs/aio.c | 24 | 
1 files changed, 12 insertions, 12 deletions
@@ -228,12 +228,6 @@ static void __put_ioctx(struct kioctx *ctx)  	call_rcu(&ctx->rcu_head, ctx_rcu_free);  } -static inline void get_ioctx(struct kioctx *kioctx) -{ -	BUG_ON(atomic_read(&kioctx->users) <= 0); -	atomic_inc(&kioctx->users); -} -  static inline int try_get_ioctx(struct kioctx *kioctx)  {  	return atomic_inc_not_zero(&kioctx->users); @@ -273,7 +267,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)  	mm = ctx->mm = current->mm;  	atomic_inc(&mm->mm_count); -	atomic_set(&ctx->users, 1); +	atomic_set(&ctx->users, 2);  	spin_lock_init(&ctx->ctx_lock);  	spin_lock_init(&ctx->ring_info.ring_lock);  	init_waitqueue_head(&ctx->wait); @@ -490,6 +484,8 @@ static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch)  		kmem_cache_free(kiocb_cachep, req);  		ctx->reqs_active--;  	} +	if (unlikely(!ctx->reqs_active && ctx->dead)) +		wake_up_all(&ctx->wait);  	spin_unlock_irq(&ctx->ctx_lock);  } @@ -607,11 +603,16 @@ static void aio_fput_routine(struct work_struct *data)  			fput(req->ki_filp);  		/* Link the iocb into the context's free list */ +		rcu_read_lock();  		spin_lock_irq(&ctx->ctx_lock);  		really_put_req(ctx, req); +		/* +		 * at that point ctx might've been killed, but actual +		 * freeing is RCU'd +		 */  		spin_unlock_irq(&ctx->ctx_lock); +		rcu_read_unlock(); -		put_ioctx(ctx);  		spin_lock_irq(&fput_lock);  	}  	spin_unlock_irq(&fput_lock); @@ -642,7 +643,6 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)  	 * this function will be executed w/out any aio kthread wakeup.  	 */  	if (unlikely(!fput_atomic(req->ki_filp))) { -		get_ioctx(ctx);  		spin_lock(&fput_lock);  		list_add(&req->ki_list, &fput_head);  		spin_unlock(&fput_lock); @@ -1336,10 +1336,10 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)  	ret = PTR_ERR(ioctx);  	if (!IS_ERR(ioctx)) {  		ret = put_user(ioctx->user_id, ctxp); -		if (!ret) +		if (!ret) { +			put_ioctx(ioctx);  			return 0; - -		get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */ +		}  		io_destroy(ioctx);  	}  | 
