summaryrefslogtreecommitdiff
path: root/drivers/md/bcache/journal.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/bcache/journal.c')
-rw-r--r--drivers/md/bcache/journal.c75
1 files changed, 45 insertions, 30 deletions
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 7eafdf09a0ae..18039affc306 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -44,11 +44,11 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list,
closure_init_stack(&cl);
- pr_debug("reading %llu", (uint64_t) bucket);
+ pr_debug("reading %u", bucket_index);
while (offset < ca->sb.bucket_size) {
reread: left = ca->sb.bucket_size - offset;
- len = min_t(unsigned, left, PAGE_SECTORS * 8);
+ len = min_t(unsigned, left, PAGE_SECTORS << JSET_BITS);
bio_reset(bio);
bio->bi_iter.bi_sector = bucket + offset;
@@ -74,19 +74,28 @@ reread: left = ca->sb.bucket_size - offset;
struct list_head *where;
size_t blocks, bytes = set_bytes(j);
- if (j->magic != jset_magic(&ca->sb))
+ if (j->magic != jset_magic(&ca->sb)) {
+ pr_debug("%u: bad magic", bucket_index);
return ret;
+ }
- if (bytes > left << 9)
+ if (bytes > left << 9 ||
+ bytes > PAGE_SIZE << JSET_BITS) {
+ pr_info("%u: too big, %zu bytes, offset %u",
+ bucket_index, bytes, offset);
return ret;
+ }
if (bytes > len << 9)
goto reread;
- if (j->csum != csum_set(j))
+ if (j->csum != csum_set(j)) {
+ pr_info("%u: bad csum, %zu bytes, offset %u",
+ bucket_index, bytes, offset);
return ret;
+ }
- blocks = set_blocks(j, ca->set);
+ blocks = set_blocks(j, block_bytes(ca->set));
while (!list_empty(list)) {
i = list_first_entry(list,
@@ -275,7 +284,7 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
}
for (k = i->j.start;
- k < end(&i->j);
+ k < bset_bkey_last(&i->j);
k = bkey_next(k)) {
unsigned j;
@@ -313,7 +322,7 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
n, i->j.seq - 1, start, end);
for (k = i->j.start;
- k < end(&i->j);
+ k < bset_bkey_last(&i->j);
k = bkey_next(k)) {
trace_bcache_journal_replay_key(k);
@@ -555,6 +564,14 @@ static void journal_write_done(struct closure *cl)
continue_at_nobarrier(cl, journal_write, system_wq);
}
+static void journal_write_unlock(struct closure *cl)
+{
+ struct cache_set *c = container_of(cl, struct cache_set, journal.io);
+
+ c->journal.io_in_flight = 0;
+ spin_unlock(&c->journal.lock);
+}
+
static void journal_write_unlocked(struct closure *cl)
__releases(c->journal.lock)
{
@@ -562,22 +579,15 @@ static void journal_write_unlocked(struct closure *cl)
struct cache *ca;
struct journal_write *w = c->journal.cur;
struct bkey *k = &c->journal.key;
- unsigned i, sectors = set_blocks(w->data, c) * c->sb.block_size;
+ unsigned i, sectors = set_blocks(w->data, block_bytes(c)) *
+ c->sb.block_size;
struct bio *bio;
struct bio_list list;
bio_list_init(&list);
if (!w->need_write) {
- /*
- * XXX: have to unlock closure before we unlock journal lock,
- * else we race with bch_journal(). But this way we race
- * against cache set unregister. Doh.
- */
- set_closure_fn(cl, NULL, NULL);
- closure_sub(cl, CLOSURE_RUNNING + 1);
- spin_unlock(&c->journal.lock);
- return;
+ closure_return_with_destructor(cl, journal_write_unlock);
} else if (journal_full(&c->journal)) {
journal_reclaim(c);
spin_unlock(&c->journal.lock);
@@ -586,7 +596,7 @@ static void journal_write_unlocked(struct closure *cl)
continue_at(cl, journal_write, system_wq);
}
- c->journal.blocks_free -= set_blocks(w->data, c);
+ c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
w->data->btree_level = c->root->level;
@@ -653,10 +663,12 @@ static void journal_try_write(struct cache_set *c)
w->need_write = true;
- if (closure_trylock(cl, &c->cl))
- journal_write_unlocked(cl);
- else
+ if (!c->journal.io_in_flight) {
+ c->journal.io_in_flight = 1;
+ closure_call(cl, journal_write_unlocked, NULL, &c->cl);
+ } else {
spin_unlock(&c->journal.lock);
+ }
}
static struct journal_write *journal_wait_for_write(struct cache_set *c,
@@ -664,6 +676,7 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
{
size_t sectors;
struct closure cl;
+ bool wait = false;
closure_init_stack(&cl);
@@ -673,16 +686,19 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
struct journal_write *w = c->journal.cur;
sectors = __set_blocks(w->data, w->data->keys + nkeys,
- c) * c->sb.block_size;
+ block_bytes(c)) * c->sb.block_size;
if (sectors <= min_t(size_t,
c->journal.blocks_free * c->sb.block_size,
PAGE_SECTORS << JSET_BITS))
return w;
- /* XXX: tracepoint */
+ if (wait)
+ closure_wait(&c->journal.wait, &cl);
+
if (!journal_full(&c->journal)) {
- trace_bcache_journal_entry_full(c);
+ if (wait)
+ trace_bcache_journal_entry_full(c);
/*
* XXX: If we were inserting so many keys that they
@@ -692,12 +708,11 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
*/
BUG_ON(!w->data->keys);
- closure_wait(&w->wait, &cl);
journal_try_write(c); /* unlocks */
} else {
- trace_bcache_journal_full(c);
+ if (wait)
+ trace_bcache_journal_full(c);
- closure_wait(&c->journal.wait, &cl);
journal_reclaim(c);
spin_unlock(&c->journal.lock);
@@ -706,6 +721,7 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
closure_sync(&cl);
spin_lock(&c->journal.lock);
+ wait = true;
}
}
@@ -736,7 +752,7 @@ atomic_t *bch_journal(struct cache_set *c,
w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
- memcpy(end(w->data), keys->keys, bch_keylist_bytes(keys));
+ memcpy(bset_bkey_last(w->data), keys->keys, bch_keylist_bytes(keys));
w->data->keys += bch_keylist_nkeys(keys);
ret = &fifo_back(&c->journal.pin);
@@ -780,7 +796,6 @@ int bch_journal_alloc(struct cache_set *c)
{
struct journal *j = &c->journal;
- closure_init_unlocked(&j->io);
spin_lock_init(&j->lock);
INIT_DELAYED_WORK(&j->work, journal_write_work);