diff options
Diffstat (limited to 'fs/ceph/locks.c')
| -rw-r--r-- | fs/ceph/locks.c | 212 |
1 files changed, 128 insertions, 84 deletions
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c index 9dae2ec7e1fa..dd764f9c64b9 100644 --- a/fs/ceph/locks.c +++ b/fs/ceph/locks.c @@ -7,6 +7,7 @@ #include "super.h" #include "mds_client.h" +#include <linux/filelock.h> #include <linux/ceph/pagelist.h> static u64 lock_secret; @@ -32,20 +33,36 @@ void __init ceph_flock_init(void) static void ceph_fl_copy_lock(struct file_lock *dst, struct file_lock *src) { - struct inode *inode = file_inode(src->fl_file); + struct inode *inode = file_inode(dst->c.flc_file); atomic_inc(&ceph_inode(inode)->i_filelock_ref); + dst->fl_u.ceph.inode = igrab(inode); } +/* + * Do not use the 'fl->fl_file' in release function, which + * is possibly already released by another thread. + */ static void ceph_fl_release_lock(struct file_lock *fl) { - struct inode *inode = file_inode(fl->fl_file); - struct ceph_inode_info *ci = ceph_inode(inode); + struct inode *inode = fl->fl_u.ceph.inode; + struct ceph_inode_info *ci; + + /* + * If inode is NULL it should be a request file_lock, + * nothing we can do. + */ + if (!inode) + return; + + ci = ceph_inode(inode); if (atomic_dec_and_test(&ci->i_filelock_ref)) { /* clear error when all locks are released */ spin_lock(&ci->i_ceph_lock); ci->i_ceph_flags &= ~CEPH_I_ERROR_FILELOCK; spin_unlock(&ci->i_ceph_lock); } + fl->fl_u.ceph.inode = NULL; + iput(inode); } static const struct file_lock_operations ceph_fl_lock_ops = { @@ -53,13 +70,14 @@ static const struct file_lock_operations ceph_fl_lock_ops = { .fl_release_private = ceph_fl_release_lock, }; -/** +/* * Implement fcntl and flock locking functions. */ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode, int cmd, u8 wait, struct file_lock *fl) { - struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; + struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); + struct ceph_client *cl = mdsc->fsc->client; struct ceph_mds_request *req; int err; u64 length = 0; @@ -73,7 +91,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode, * window. Caller function will decrease the counter. */ fl->fl_ops = &ceph_fl_lock_ops; - atomic_inc(&ceph_inode(inode)->i_filelock_ref); + fl->fl_ops->fl_copy_lock(fl, NULL); } if (operation != CEPH_MDS_OP_SETFILELOCK || cmd == CEPH_LOCK_UNLOCK) @@ -92,34 +110,34 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode, else length = fl->fl_end - fl->fl_start + 1; - owner = secure_addr(fl->fl_owner); + owner = secure_addr(fl->c.flc_owner); - dout("ceph_lock_message: rule: %d, op: %d, owner: %llx, pid: %llu, " - "start: %llu, length: %llu, wait: %d, type: %d\n", (int)lock_type, - (int)operation, owner, (u64)fl->fl_pid, fl->fl_start, length, - wait, fl->fl_type); + doutc(cl, "rule: %d, op: %d, owner: %llx, pid: %llu, " + "start: %llu, length: %llu, wait: %d, type: %d\n", + (int)lock_type, (int)operation, owner, + (u64) fl->c.flc_pid, + fl->fl_start, length, wait, fl->c.flc_type); req->r_args.filelock_change.rule = lock_type; req->r_args.filelock_change.type = cmd; req->r_args.filelock_change.owner = cpu_to_le64(owner); - req->r_args.filelock_change.pid = cpu_to_le64((u64)fl->fl_pid); + req->r_args.filelock_change.pid = cpu_to_le64((u64) fl->c.flc_pid); req->r_args.filelock_change.start = cpu_to_le64(fl->fl_start); req->r_args.filelock_change.length = cpu_to_le64(length); req->r_args.filelock_change.wait = wait; - if (wait) - req->r_wait_for_completion = ceph_lock_wait_for_completion; - - err = ceph_mdsc_do_request(mdsc, inode, req); - - if (operation == CEPH_MDS_OP_GETFILELOCK) { - fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid); + err = ceph_mdsc_submit_request(mdsc, inode, req); + if (!err) + err = ceph_mdsc_wait_request(mdsc, req, wait ? + ceph_lock_wait_for_completion : NULL); + if (!err && operation == CEPH_MDS_OP_GETFILELOCK) { + fl->c.flc_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid); if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type) - fl->fl_type = F_RDLCK; + fl->c.flc_type = F_RDLCK; else if (CEPH_LOCK_EXCL == req->r_reply_info.filelock_reply->type) - fl->fl_type = F_WRLCK; + fl->c.flc_type = F_WRLCK; else - fl->fl_type = F_UNLCK; + fl->c.flc_type = F_UNLCK; fl->fl_start = le64_to_cpu(req->r_reply_info.filelock_reply->start); length = le64_to_cpu(req->r_reply_info.filelock_reply->start) + @@ -131,16 +149,17 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode, } ceph_mdsc_put_request(req); - dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, " - "length: %llu, wait: %d, type: %d, err code %d\n", (int)lock_type, - (int)operation, (u64)fl->fl_pid, fl->fl_start, - length, wait, fl->fl_type, err); + doutc(cl, "rule: %d, op: %d, pid: %llu, start: %llu, " + "length: %llu, wait: %d, type: %d, err code %d\n", + (int)lock_type, (int)operation, (u64) fl->c.flc_pid, + fl->fl_start, length, wait, fl->c.flc_type, err); return err; } static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc, struct ceph_mds_request *req) { + struct ceph_client *cl = mdsc->fsc->client; struct ceph_mds_request *intr_req; struct inode *inode = req->r_inode; int err, lock_type; @@ -158,8 +177,7 @@ static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc, if (!err) return 0; - dout("ceph_lock_wait_for_completion: request %llu was interrupted\n", - req->r_tid); + doutc(cl, "request %llu was interrupted\n", req->r_tid); mutex_lock(&mdsc->mutex); if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) { @@ -203,11 +221,29 @@ static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc, if (err && err != -ERESTARTSYS) return err; - wait_for_completion_killable(&req->r_safe_completion); + err = wait_for_completion_killable(&req->r_safe_completion); + if (err) + return err; + return 0; } -/** +static int try_unlock_file(struct file *file, struct file_lock *fl) +{ + int err; + unsigned int orig_flags = fl->c.flc_flags; + fl->c.flc_flags |= FL_EXISTS; + err = locks_lock_file_wait(file, fl); + fl->c.flc_flags = orig_flags; + if (err == -ENOENT) { + if (!(orig_flags & FL_EXISTS)) + err = 0; + return err; + } + return 1; +} + +/* * Attempt to set an fcntl lock. * For now, this just goes away to the server. Later it may be more awesome. */ @@ -215,18 +251,19 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl) { struct inode *inode = file_inode(file); struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_client *cl = ceph_inode_to_client(inode); int err = 0; u16 op = CEPH_MDS_OP_SETFILELOCK; u8 wait = 0; u8 lock_cmd; - if (!(fl->fl_flags & FL_POSIX)) - return -ENOLCK; - /* No mandatory locks */ - if (__mandatory_lock(file->f_mapping->host) && fl->fl_type != F_UNLCK) + if (!(fl->c.flc_flags & FL_POSIX)) return -ENOLCK; - dout("ceph_lock, fl_owner: %p\n", fl->fl_owner); + if (ceph_inode_is_shutdown(inode)) + return -ESTALE; + + doutc(cl, "fl_owner: %p\n", fl->c.flc_owner); /* set wait bit as appropriate, then make command as Ceph expects it*/ if (IS_GETLK(cmd)) @@ -237,34 +274,31 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl) spin_lock(&ci->i_ceph_lock); if (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) { err = -EIO; - } else if (op == CEPH_MDS_OP_SETFILELOCK) { - /* - * increasing i_filelock_ref closes race window between - * handling request reply and adding file_lock struct to - * inode. Otherwise, i_auth_cap may get trimmed in the - * window. Caller function will decrease the counter. - */ - fl->fl_ops = &ceph_fl_lock_ops; - atomic_inc(&ci->i_filelock_ref); } spin_unlock(&ci->i_ceph_lock); if (err < 0) { - if (op == CEPH_MDS_OP_SETFILELOCK && F_UNLCK == fl->fl_type) + if (op == CEPH_MDS_OP_SETFILELOCK && lock_is_unlock(fl)) posix_lock_file(file, fl, NULL); return err; } - if (F_RDLCK == fl->fl_type) + if (lock_is_read(fl)) lock_cmd = CEPH_LOCK_SHARED; - else if (F_WRLCK == fl->fl_type) + else if (lock_is_write(fl)) lock_cmd = CEPH_LOCK_EXCL; else lock_cmd = CEPH_LOCK_UNLOCK; + if (op == CEPH_MDS_OP_SETFILELOCK && lock_is_unlock(fl)) { + err = try_unlock_file(file, fl); + if (err <= 0) + return err; + } + err = ceph_lock_message(CEPH_LOCK_FCNTL, op, inode, lock_cmd, wait, fl); if (!err) { - if (op == CEPH_MDS_OP_SETFILELOCK) { - dout("mds locked, locking locally\n"); + if (op == CEPH_MDS_OP_SETFILELOCK && F_UNLCK != fl->c.flc_type) { + doutc(cl, "locking locally\n"); err = posix_lock_file(file, fl, NULL); if (err) { /* undo! This should only happen if @@ -272,8 +306,8 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl) * deadlock. */ ceph_lock_message(CEPH_LOCK_FCNTL, op, inode, CEPH_LOCK_UNLOCK, 0, fl); - dout("got %d on posix_lock_file, undid lock\n", - err); + doutc(cl, "got %d on posix_lock_file, undid lock\n", + err); } } } @@ -284,29 +318,26 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl) { struct inode *inode = file_inode(file); struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_client *cl = ceph_inode_to_client(inode); int err = 0; u8 wait = 0; u8 lock_cmd; - if (!(fl->fl_flags & FL_FLOCK)) + if (!(fl->c.flc_flags & FL_FLOCK)) return -ENOLCK; - /* No mandatory locks */ - if (fl->fl_type & LOCK_MAND) - return -EOPNOTSUPP; - dout("ceph_flock, fl_file: %p\n", fl->fl_file); + if (ceph_inode_is_shutdown(inode)) + return -ESTALE; + + doutc(cl, "fl_file: %p\n", fl->c.flc_file); spin_lock(&ci->i_ceph_lock); if (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) { err = -EIO; - } else { - /* see comment in ceph_lock */ - fl->fl_ops = &ceph_fl_lock_ops; - atomic_inc(&ci->i_filelock_ref); } spin_unlock(&ci->i_ceph_lock); if (err < 0) { - if (F_UNLCK == fl->fl_type) + if (lock_is_unlock(fl)) locks_lock_file_wait(file, fl); return err; } @@ -314,22 +345,29 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl) if (IS_SETLKW(cmd)) wait = 1; - if (F_RDLCK == fl->fl_type) + if (lock_is_read(fl)) lock_cmd = CEPH_LOCK_SHARED; - else if (F_WRLCK == fl->fl_type) + else if (lock_is_write(fl)) lock_cmd = CEPH_LOCK_EXCL; else lock_cmd = CEPH_LOCK_UNLOCK; + if (lock_is_unlock(fl)) { + err = try_unlock_file(file, fl); + if (err <= 0) + return err; + } + err = ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK, inode, lock_cmd, wait, fl); - if (!err) { + if (!err && F_UNLCK != fl->c.flc_type) { err = locks_lock_file_wait(file, fl); if (err) { ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK, inode, CEPH_LOCK_UNLOCK, 0, fl); - dout("got %d on locks_lock_file_wait, undid lock\n", err); + doutc(cl, "got %d on locks_lock_file_wait, undid lock\n", + err); } } return err; @@ -341,39 +379,43 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl) */ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count) { + struct ceph_client *cl = ceph_inode_to_client(inode); struct file_lock *lock; struct file_lock_context *ctx; *fcntl_count = 0; *flock_count = 0; - ctx = inode->i_flctx; + ctx = locks_inode_context(inode); if (ctx) { spin_lock(&ctx->flc_lock); - list_for_each_entry(lock, &ctx->flc_posix, fl_list) + for_each_file_lock(lock, &ctx->flc_posix) ++(*fcntl_count); - list_for_each_entry(lock, &ctx->flc_flock, fl_list) + for_each_file_lock(lock, &ctx->flc_flock) ++(*flock_count); spin_unlock(&ctx->flc_lock); } - dout("counted %d flock locks and %d fcntl locks\n", - *flock_count, *fcntl_count); + doutc(cl, "counted %d flock locks and %d fcntl locks\n", + *flock_count, *fcntl_count); } /* * Given a pointer to a lock, convert it to a ceph filelock */ -static int lock_to_ceph_filelock(struct file_lock *lock, +static int lock_to_ceph_filelock(struct inode *inode, + struct file_lock *lock, struct ceph_filelock *cephlock) { + struct ceph_client *cl = ceph_inode_to_client(inode); int err = 0; + cephlock->start = cpu_to_le64(lock->fl_start); cephlock->length = cpu_to_le64(lock->fl_end - lock->fl_start + 1); cephlock->client = cpu_to_le64(0); - cephlock->pid = cpu_to_le64((u64)lock->fl_pid); - cephlock->owner = cpu_to_le64(secure_addr(lock->fl_owner)); + cephlock->pid = cpu_to_le64((u64) lock->c.flc_pid); + cephlock->owner = cpu_to_le64(secure_addr(lock->c.flc_owner)); - switch (lock->fl_type) { + switch (lock->c.flc_type) { case F_RDLCK: cephlock->type = CEPH_LOCK_SHARED; break; @@ -384,14 +426,15 @@ static int lock_to_ceph_filelock(struct file_lock *lock, cephlock->type = CEPH_LOCK_UNLOCK; break; default: - dout("Have unknown lock type %d\n", lock->fl_type); + doutc(cl, "Have unknown lock type %d\n", + lock->c.flc_type); err = -EINVAL; } return err; } -/** +/* * Encode the flock and fcntl locks for the given inode into the ceph_filelock * array. Must be called with inode->i_lock already held. * If we encounter more of a specific lock type than expected, return -ENOSPC. @@ -401,37 +444,38 @@ int ceph_encode_locks_to_buffer(struct inode *inode, int num_fcntl_locks, int num_flock_locks) { struct file_lock *lock; - struct file_lock_context *ctx = inode->i_flctx; + struct file_lock_context *ctx = locks_inode_context(inode); + struct ceph_client *cl = ceph_inode_to_client(inode); int err = 0; int seen_fcntl = 0; int seen_flock = 0; int l = 0; - dout("encoding %d flock and %d fcntl locks\n", num_flock_locks, - num_fcntl_locks); + doutc(cl, "encoding %d flock and %d fcntl locks\n", num_flock_locks, + num_fcntl_locks); if (!ctx) return 0; spin_lock(&ctx->flc_lock); - list_for_each_entry(lock, &ctx->flc_posix, fl_list) { + for_each_file_lock(lock, &ctx->flc_posix) { ++seen_fcntl; if (seen_fcntl > num_fcntl_locks) { err = -ENOSPC; goto fail; } - err = lock_to_ceph_filelock(lock, &flocks[l]); + err = lock_to_ceph_filelock(inode, lock, &flocks[l]); if (err) goto fail; ++l; } - list_for_each_entry(lock, &ctx->flc_flock, fl_list) { + for_each_file_lock(lock, &ctx->flc_flock) { ++seen_flock; if (seen_flock > num_flock_locks) { err = -ENOSPC; goto fail; } - err = lock_to_ceph_filelock(lock, &flocks[l]); + err = lock_to_ceph_filelock(inode, lock, &flocks[l]); if (err) goto fail; ++l; @@ -441,7 +485,7 @@ fail: return err; } -/** +/* * Copy the encoded flock and fcntl locks into the pagelist. * Format is: #fcntl locks, sequential fcntl locks, #flock locks, * sequential flock locks. |
