summaryrefslogtreecommitdiff
path: root/drivers/md/dm-kcopyd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-kcopyd.c')
-rw-r--r--drivers/md/dm-kcopyd.c136
1 files changed, 74 insertions, 62 deletions
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 1bbe4a34ef4c..6ea75436a433 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2002 Sistina Software (UK) Limited.
* Copyright (C) 2006 Red Hat GmbH
@@ -34,14 +35,14 @@
#define DEFAULT_SUB_JOB_SIZE_KB 512
#define MAX_SUB_JOB_SIZE_KB 1024
-static unsigned kcopyd_subjob_size_kb = DEFAULT_SUB_JOB_SIZE_KB;
+static unsigned int kcopyd_subjob_size_kb = DEFAULT_SUB_JOB_SIZE_KB;
-module_param(kcopyd_subjob_size_kb, uint, S_IRUGO | S_IWUSR);
+module_param(kcopyd_subjob_size_kb, uint, 0644);
MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients");
-static unsigned dm_get_kcopyd_subjob_size(void)
+static unsigned int dm_get_kcopyd_subjob_size(void)
{
- unsigned sub_job_size_kb;
+ unsigned int sub_job_size_kb;
sub_job_size_kb = __dm_get_module_param(&kcopyd_subjob_size_kb,
DEFAULT_SUB_JOB_SIZE_KB,
@@ -50,15 +51,17 @@ static unsigned dm_get_kcopyd_subjob_size(void)
return sub_job_size_kb << 1;
}
-/*-----------------------------------------------------------------
+/*
+ *----------------------------------------------------------------
* Each kcopyd client has its own little pool of preallocated
* pages for kcopyd io.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
struct dm_kcopyd_client {
struct page_list *pages;
- unsigned nr_reserved_pages;
- unsigned nr_free_pages;
- unsigned sub_job_size;
+ unsigned int nr_reserved_pages;
+ unsigned int nr_free_pages;
+ unsigned int sub_job_size;
struct dm_io_client *io_client;
@@ -109,7 +112,7 @@ static DEFINE_SPINLOCK(throttle_spinlock);
* The reason for this is unknown but possibly due to jiffies rounding errors
* or read/write cache inside the disk.
*/
-#define SLEEP_MSEC 100
+#define SLEEP_USEC 100000
/*
* Maximum number of sleep events. There is a theoretical livelock if more
@@ -119,7 +122,7 @@ static DEFINE_SPINLOCK(throttle_spinlock);
static void io_job_start(struct dm_kcopyd_throttle *t)
{
- unsigned throttle, now, difference;
+ unsigned int throttle, now, difference;
int slept = 0, skew;
if (unlikely(!t))
@@ -148,6 +151,7 @@ try_again:
if (unlikely(t->total_period >= (1 << ACCOUNT_INTERVAL_SHIFT))) {
int shift = fls(t->total_period >> ACCOUNT_INTERVAL_SHIFT);
+
t->total_period >>= shift;
t->io_period >>= shift;
}
@@ -157,7 +161,7 @@ try_again:
if (unlikely(skew > 0) && slept < MAX_SLEEPS) {
slept++;
spin_unlock_irq(&throttle_spinlock);
- msleep(SLEEP_MSEC);
+ fsleep(SLEEP_USEC);
goto try_again;
}
@@ -182,7 +186,7 @@ static void io_job_finish(struct dm_kcopyd_throttle *t)
goto skip_limit;
if (!t->num_io_jobs) {
- unsigned now, difference;
+ unsigned int now, difference;
now = jiffies;
difference = now - t->last_jiffies;
@@ -219,7 +223,7 @@ static struct page_list *alloc_pl(gfp_t gfp)
if (!pl)
return NULL;
- pl->page = alloc_page(gfp);
+ pl->page = alloc_page(gfp | __GFP_HIGHMEM);
if (!pl->page) {
kfree(pl);
return NULL;
@@ -303,9 +307,9 @@ static void drop_pages(struct page_list *pl)
/*
* Allocate and reserve nr_pages for the use of a specific client.
*/
-static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned nr_pages)
+static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned int nr_pages)
{
- unsigned i;
+ unsigned int i;
struct page_list *pl = NULL, *next;
for (i = 0; i < nr_pages; i++) {
@@ -333,15 +337,17 @@ static void client_free_pages(struct dm_kcopyd_client *kc)
kc->nr_free_pages = kc->nr_reserved_pages = 0;
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* kcopyd_jobs need to be allocated by the *clients* of kcopyd,
* for this reason we use a mempool to prevent the client from
* ever having to do io (which could cause a deadlock).
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
struct kcopyd_job {
struct dm_kcopyd_client *kc;
struct list_head list;
- unsigned long flags;
+ unsigned int flags;
/*
* Error state of the job.
@@ -350,9 +356,9 @@ struct kcopyd_job {
unsigned long write_err;
/*
- * Either READ or WRITE
+ * REQ_OP_READ, REQ_OP_WRITE or REQ_OP_WRITE_ZEROES.
*/
- int rw;
+ enum req_op op;
struct dm_io_region source;
/*
@@ -418,7 +424,8 @@ static struct kcopyd_job *pop_io_job(struct list_head *jobs,
* constraint and sequential writes that are at the right position.
*/
list_for_each_entry(job, jobs, list) {
- if (job->rw == READ || !test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags)) {
+ if (job->op == REQ_OP_READ ||
+ !(job->flags & BIT(DM_KCOPYD_WRITE_SEQ))) {
list_del(&job->list);
return job;
}
@@ -437,9 +444,8 @@ static struct kcopyd_job *pop(struct list_head *jobs,
struct dm_kcopyd_client *kc)
{
struct kcopyd_job *job = NULL;
- unsigned long flags;
- spin_lock_irqsave(&kc->job_lock, flags);
+ spin_lock_irq(&kc->job_lock);
if (!list_empty(jobs)) {
if (jobs == &kc->io_jobs)
@@ -449,7 +455,7 @@ static struct kcopyd_job *pop(struct list_head *jobs,
list_del(&job->list);
}
}
- spin_unlock_irqrestore(&kc->job_lock, flags);
+ spin_unlock_irq(&kc->job_lock);
return job;
}
@@ -467,12 +473,11 @@ static void push(struct list_head *jobs, struct kcopyd_job *job)
static void push_head(struct list_head *jobs, struct kcopyd_job *job)
{
- unsigned long flags;
struct dm_kcopyd_client *kc = job->kc;
- spin_lock_irqsave(&kc->job_lock, flags);
+ spin_lock_irq(&kc->job_lock);
list_add(&job->list, jobs);
- spin_unlock_irqrestore(&kc->job_lock, flags);
+ spin_unlock_irq(&kc->job_lock);
}
/*
@@ -514,29 +519,29 @@ static int run_complete_job(struct kcopyd_job *job)
static void complete_io(unsigned long error, void *context)
{
- struct kcopyd_job *job = (struct kcopyd_job *) context;
+ struct kcopyd_job *job = context;
struct dm_kcopyd_client *kc = job->kc;
io_job_finish(kc->throttle);
if (error) {
- if (op_is_write(job->rw))
+ if (op_is_write(job->op))
job->write_err |= error;
else
job->read_err = 1;
- if (!test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
+ if (!(job->flags & BIT(DM_KCOPYD_IGNORE_ERROR))) {
push(&kc->complete_jobs, job);
wake(kc);
return;
}
}
- if (op_is_write(job->rw))
+ if (op_is_write(job->op))
push(&kc->complete_jobs, job);
else {
- job->rw = WRITE;
+ job->op = REQ_OP_WRITE;
push(&kc->io_jobs, job);
}
@@ -551,8 +556,7 @@ static int run_io_job(struct kcopyd_job *job)
{
int r;
struct dm_io_request io_req = {
- .bi_op = job->rw,
- .bi_op_flags = 0,
+ .bi_opf = job->op,
.mem.type = DM_IO_PAGE_LIST,
.mem.ptr.pl = job->pages,
.mem.offset = 0,
@@ -565,7 +569,7 @@ static int run_io_job(struct kcopyd_job *job)
* If we need to write sequentially and some reads or writes failed,
* no point in continuing.
*/
- if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) &&
+ if (job->flags & BIT(DM_KCOPYD_WRITE_SEQ) &&
job->master_job->write_err) {
job->write_err = job->master_job->write_err;
return -EIO;
@@ -573,10 +577,10 @@ static int run_io_job(struct kcopyd_job *job)
io_job_start(job->kc->throttle);
- if (job->rw == READ)
- r = dm_io(&io_req, 1, &job->source, NULL);
+ if (job->op == REQ_OP_READ)
+ r = dm_io(&io_req, 1, &job->source, NULL, IOPRIO_DEFAULT);
else
- r = dm_io(&io_req, job->num_dests, job->dests, NULL);
+ r = dm_io(&io_req, job->num_dests, job->dests, NULL, IOPRIO_DEFAULT);
return r;
}
@@ -584,7 +588,7 @@ static int run_io_job(struct kcopyd_job *job)
static int run_pages_job(struct kcopyd_job *job)
{
int r;
- unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
+ unsigned int nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
r = kcopyd_get_pages(job->kc, nr_pages, &job->pages);
if (!r) {
@@ -605,7 +609,7 @@ static int run_pages_job(struct kcopyd_job *job)
* of successful jobs.
*/
static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
- int (*fn) (struct kcopyd_job *))
+ int (*fn)(struct kcopyd_job *))
{
struct kcopyd_job *job;
int r, count = 0;
@@ -616,7 +620,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
if (r < 0) {
/* error this rogue job */
- if (op_is_write(job->rw))
+ if (op_is_write(job->op))
job->write_err = (unsigned long) -1L;
else
job->read_err = 1;
@@ -648,7 +652,6 @@ static void do_work(struct work_struct *work)
struct dm_kcopyd_client *kc = container_of(work,
struct dm_kcopyd_client, kcopyd_work);
struct blk_plug plug;
- unsigned long flags;
/*
* The order that these are called is *very* important.
@@ -657,9 +660,9 @@ static void do_work(struct work_struct *work)
* list. io jobs call wake when they complete and it all
* starts again.
*/
- spin_lock_irqsave(&kc->job_lock, flags);
+ spin_lock_irq(&kc->job_lock);
list_splice_tail_init(&kc->callback_jobs, &kc->complete_jobs);
- spin_unlock_irqrestore(&kc->job_lock, flags);
+ spin_unlock_irq(&kc->job_lock);
blk_start_plug(&plug);
process_jobs(&kc->complete_jobs, kc, run_complete_job);
@@ -676,6 +679,7 @@ static void do_work(struct work_struct *work)
static void dispatch_job(struct kcopyd_job *job)
{
struct dm_kcopyd_client *kc = job->kc;
+
atomic_inc(&kc->nr_jobs);
if (unlikely(!job->source.count))
push(&kc->callback_jobs, job);
@@ -692,7 +696,7 @@ static void segment_complete(int read_err, unsigned long write_err,
/* FIXME: tidy this function */
sector_t progress = 0;
sector_t count = 0;
- struct kcopyd_job *sub_job = (struct kcopyd_job *) context;
+ struct kcopyd_job *sub_job = context;
struct kcopyd_job *job = sub_job->master_job;
struct dm_kcopyd_client *kc = job->kc;
@@ -709,7 +713,7 @@ static void segment_complete(int read_err, unsigned long write_err,
* Only dispatch more work if there hasn't been an error.
*/
if ((!job->read_err && !job->write_err) ||
- test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
+ job->flags & BIT(DM_KCOPYD_IGNORE_ERROR)) {
/* get the next chunk of work */
progress = job->progress;
count = job->source.count - progress;
@@ -801,10 +805,10 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
* we need to write sequentially. If one of the destination is a
* host-aware device, then leave it to the caller to choose what to do.
*/
- if (!test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags)) {
+ if (!(job->flags & BIT(DM_KCOPYD_WRITE_SEQ))) {
for (i = 0; i < job->num_dests; i++) {
- if (bdev_zoned_model(dests[i].bdev) == BLK_ZONED_HM) {
- set_bit(DM_KCOPYD_WRITE_SEQ, &job->flags);
+ if (bdev_is_zoned(dests[i].bdev)) {
+ job->flags |= BIT(DM_KCOPYD_WRITE_SEQ);
break;
}
}
@@ -813,26 +817,26 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
/*
* If we need to write sequentially, errors cannot be ignored.
*/
- if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) &&
- test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags))
- clear_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags);
+ if (job->flags & BIT(DM_KCOPYD_WRITE_SEQ) &&
+ job->flags & BIT(DM_KCOPYD_IGNORE_ERROR))
+ job->flags &= ~BIT(DM_KCOPYD_IGNORE_ERROR);
if (from) {
job->source = *from;
job->pages = NULL;
- job->rw = READ;
+ job->op = REQ_OP_READ;
} else {
- memset(&job->source, 0, sizeof job->source);
+ memset(&job->source, 0, sizeof(job->source));
job->source.count = job->dests[0].count;
job->pages = &zero_page_list;
/*
* Use WRITE ZEROES to optimize zeroing if all dests support it.
*/
- job->rw = REQ_OP_WRITE_ZEROES;
+ job->op = REQ_OP_WRITE_ZEROES;
for (i = 0; i < job->num_dests; i++)
if (!bdev_write_zeroes_sectors(job->dests[i].bdev)) {
- job->rw = WRITE;
+ job->op = REQ_OP_WRITE;
break;
}
}
@@ -852,8 +856,8 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
EXPORT_SYMBOL(dm_kcopyd_copy);
void dm_kcopyd_zero(struct dm_kcopyd_client *kc,
- unsigned num_dests, struct dm_io_region *dests,
- unsigned flags, dm_kcopyd_notify_fn fn, void *context)
+ unsigned int num_dests, struct dm_io_region *dests,
+ unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
{
dm_kcopyd_copy(kc, NULL, num_dests, dests, flags, fn, context);
}
@@ -903,13 +907,15 @@ int kcopyd_cancel(struct kcopyd_job *job, int block)
}
#endif /* 0 */
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Client setup
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle)
{
int r;
- unsigned reserve_pages;
+ unsigned int reserve_pages;
struct dm_kcopyd_client *kc;
kc = kzalloc(sizeof(*kc), GFP_KERNEL);
@@ -983,3 +989,9 @@ void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
kfree(kc);
}
EXPORT_SYMBOL(dm_kcopyd_client_destroy);
+
+void dm_kcopyd_client_flush(struct dm_kcopyd_client *kc)
+{
+ flush_workqueue(kc->kcopyd_wq);
+}
+EXPORT_SYMBOL(dm_kcopyd_client_flush);