summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIlya Dryomov <idryomov@gmail.com>2023-07-22 20:28:08 +0200
committerIlya Dryomov <idryomov@gmail.com>2023-07-26 15:08:21 +0200
commit588159009d5b7a09c3e5904cffddbe4a4e170301 (patch)
tree8a818cecf49f7918c52b09a3bef1843a13d525db
parent8ff2c64c9765446c3cef804fb99da04916603e27 (diff)
rbd: retrieve and check lock owner twice before blocklisting
An attempt to acquire exclusive lock can race with the current lock owner closing the image: 1. lock is held by client123, rbd_lock() returns -EBUSY 2. get_lock_owner_info() returns client123 instance details 3. client123 closes the image, lock is released 4. find_watcher() returns 0 as there is no matching watcher anymore 5. client123 instance gets erroneously blocklisted Particularly impacted is mirror snapshot scheduler in snapshot-based mirroring since it happens to open and close images a lot (images are opened only for as long as it takes to take the next mirror snapshot, the same client instance is used for all images). To reduce the potential for erroneous blocklisting, retrieve the lock owner again after find_watcher() returns 0. If it's still there, make sure it matches the previously detected lock owner. Cc: stable@vger.kernel.org # f38cb9d9c204: rbd: make get_lock_owner_info() return a single locker or NULL Cc: stable@vger.kernel.org # 8ff2c64c9765: rbd: harden get_lock_owner_info() a bit Cc: stable@vger.kernel.org Signed-off-by: Ilya Dryomov <idryomov@gmail.com> Reviewed-by: Dongsheng Yang <dongsheng.yang@easystack.cn>
-rw-r--r--drivers/block/rbd.c25
1 files changed, 23 insertions, 2 deletions
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 94629e826369..24afcc93ac01 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3849,6 +3849,15 @@ static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
}
+static bool locker_equal(const struct ceph_locker *lhs,
+ const struct ceph_locker *rhs)
+{
+ return lhs->id.name.type == rhs->id.name.type &&
+ lhs->id.name.num == rhs->id.name.num &&
+ !strcmp(lhs->id.cookie, rhs->id.cookie) &&
+ ceph_addr_equal_no_type(&lhs->info.addr, &rhs->info.addr);
+}
+
static void free_locker(struct ceph_locker *locker)
{
if (locker)
@@ -3969,11 +3978,11 @@ out:
static int rbd_try_lock(struct rbd_device *rbd_dev)
{
struct ceph_client *client = rbd_dev->rbd_client->client;
- struct ceph_locker *locker;
+ struct ceph_locker *locker, *refreshed_locker;
int ret;
for (;;) {
- locker = NULL;
+ locker = refreshed_locker = NULL;
ret = rbd_lock(rbd_dev);
if (ret != -EBUSY)
@@ -3993,6 +4002,16 @@ static int rbd_try_lock(struct rbd_device *rbd_dev)
if (ret)
goto out; /* request lock or error */
+ refreshed_locker = get_lock_owner_info(rbd_dev);
+ if (IS_ERR(refreshed_locker)) {
+ ret = PTR_ERR(refreshed_locker);
+ refreshed_locker = NULL;
+ goto out;
+ }
+ if (!refreshed_locker ||
+ !locker_equal(locker, refreshed_locker))
+ goto again;
+
rbd_warn(rbd_dev, "breaking header lock owned by %s%llu",
ENTITY_NAME(locker->id.name));
@@ -4014,10 +4033,12 @@ static int rbd_try_lock(struct rbd_device *rbd_dev)
}
again:
+ free_locker(refreshed_locker);
free_locker(locker);
}
out:
+ free_locker(refreshed_locker);
free_locker(locker);
return ret;
}