summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c13
-rw-r--r--block/blk-cgroup.h22
-rw-r--r--block/blk-core.c6
-rw-r--r--block/blk-ioc.c2
-rw-r--r--block/blk-iopoll.c3
-rw-r--r--block/blk-map.c2
-rw-r--r--block/blk-mq-cpumap.c10
-rw-r--r--block/blk-mq-sysfs.c31
-rw-r--r--block/blk-mq.c78
-rw-r--r--block/blk-mq.h2
-rw-r--r--block/blk-softirq.c14
-rw-r--r--block/blk-throttle.c8
-rw-r--r--block/blk.h2
-rw-r--r--block/cfq-iosched.c15
-rw-r--r--block/deadline-iosched.c8
-rw-r--r--block/elevator.c2
-rw-r--r--block/partitions/atari.h4
-rw-r--r--block/partitions/efi.h9
-rw-r--r--block/partitions/karma.c3
19 files changed, 153 insertions, 81 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 4e491d9b5292..e4a4145926f6 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -336,7 +336,7 @@ static void blkg_destroy(struct blkcg_gq *blkg)
* under queue_lock. If it's not pointing to @blkg now, it never
* will. Hint assignment itself can race safely.
*/
- if (rcu_dereference_raw(blkcg->blkg_hint) == blkg)
+ if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
rcu_assign_pointer(blkcg->blkg_hint, NULL);
/*
@@ -894,7 +894,7 @@ static int blkcg_can_attach(struct cgroup_subsys_state *css,
int ret = 0;
/* task_lock() is needed to avoid races with exit_io_context() */
- cgroup_taskset_for_each(task, css, tset) {
+ cgroup_taskset_for_each(task, tset) {
task_lock(task);
ioc = task->io_context;
if (ioc && atomic_read(&ioc->nr_tasks) > 1)
@@ -906,17 +906,14 @@ static int blkcg_can_attach(struct cgroup_subsys_state *css,
return ret;
}
-struct cgroup_subsys blkio_subsys = {
- .name = "blkio",
+struct cgroup_subsys blkio_cgrp_subsys = {
.css_alloc = blkcg_css_alloc,
.css_offline = blkcg_css_offline,
.css_free = blkcg_css_free,
.can_attach = blkcg_can_attach,
- .subsys_id = blkio_subsys_id,
.base_cftypes = blkcg_files,
- .module = THIS_MODULE,
};
-EXPORT_SYMBOL_GPL(blkio_subsys);
+EXPORT_SYMBOL_GPL(blkio_cgrp_subsys);
/**
* blkcg_activate_policy - activate a blkcg policy on a request_queue
@@ -1106,7 +1103,7 @@ int blkcg_policy_register(struct blkcg_policy *pol)
/* everything is in place, add intf files for the new policy */
if (pol->cftypes)
- WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes));
+ WARN_ON(cgroup_add_cftypes(&blkio_cgrp_subsys, pol->cftypes));
ret = 0;
out_unlock:
mutex_unlock(&blkcg_pol_mutex);
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 86154eab9523..371fe8e92ab5 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -186,7 +186,7 @@ static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
static inline struct blkcg *task_blkcg(struct task_struct *tsk)
{
- return css_to_blkcg(task_css(tsk, blkio_subsys_id));
+ return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
}
static inline struct blkcg *bio_blkcg(struct bio *bio)
@@ -241,12 +241,16 @@ static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
*/
static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
{
- int ret;
+ char *p;
- ret = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
- if (ret)
+ p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
+ if (!p) {
strncpy(buf, "<unavailable>", buflen);
- return ret;
+ return -ENAMETOOLONG;
+ }
+
+ memmove(buf, p, buf + buflen - p);
+ return 0;
}
/**
@@ -435,9 +439,9 @@ static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
uint64_t v;
do {
- start = u64_stats_fetch_begin_bh(&stat->syncp);
+ start = u64_stats_fetch_begin_irq(&stat->syncp);
v = stat->cnt;
- } while (u64_stats_fetch_retry_bh(&stat->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&stat->syncp, start));
return v;
}
@@ -508,9 +512,9 @@ static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
struct blkg_rwstat tmp;
do {
- start = u64_stats_fetch_begin_bh(&rwstat->syncp);
+ start = u64_stats_fetch_begin_irq(&rwstat->syncp);
tmp = *rwstat;
- } while (u64_stats_fetch_retry_bh(&rwstat->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
return tmp;
}
diff --git a/block/blk-core.c b/block/blk-core.c
index bfe16d5af9f9..a0e3096c4bb5 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1307,7 +1307,7 @@ void __blk_put_request(struct request_queue *q, struct request *req)
struct request_list *rl = blk_rq_rl(req);
BUG_ON(!list_empty(&req->queuelist));
- BUG_ON(!hlist_unhashed(&req->hash));
+ BUG_ON(ELV_ON_HASH(req));
blk_free_request(rl, req);
freed_request(rl, flags);
@@ -1928,7 +1928,7 @@ EXPORT_SYMBOL(submit_bio);
* in some cases below, so export this function.
* Request stacking drivers like request-based dm may change the queue
* limits while requests are in the queue (e.g. dm's table swapping).
- * Such request stacking drivers should check those requests agaist
+ * Such request stacking drivers should check those requests against
* the new queue limits again when they dispatch those requests,
* although such checkings are also done against the old queue limits
* when submitting requests.
@@ -2353,7 +2353,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
if (!req->bio)
return false;
- trace_block_rq_complete(req->q, req);
+ trace_block_rq_complete(req->q, req, nr_bytes);
/*
* For fs requests, rq is just carrier of independent bio's
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 242df01413f6..1a27f45ec776 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -68,7 +68,7 @@ static void ioc_destroy_icq(struct io_cq *icq)
* under queue_lock. If it's not pointing to @icq now, it never
* will. Hint assignment itself can race safely.
*/
- if (rcu_dereference_raw(ioc->icq_hint) == icq)
+ if (rcu_access_pointer(ioc->icq_hint) == icq)
rcu_assign_pointer(ioc->icq_hint, NULL);
ioc_exit_icq(icq);
diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
index 1855bf51edb0..c11d24e379e2 100644
--- a/block/blk-iopoll.c
+++ b/block/blk-iopoll.c
@@ -14,9 +14,6 @@
#include "blk.h"
-int blk_iopoll_enabled = 1;
-EXPORT_SYMBOL(blk_iopoll_enabled);
-
static unsigned int blk_iopoll_budget __read_mostly = 256;
static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
diff --git a/block/blk-map.c b/block/blk-map.c
index 86d93779c066..f7b22bc21518 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -285,7 +285,7 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
*
* Description:
* Data will be mapped directly if possible. Otherwise a bounce
- * buffer is used. Can be called multple times to append multple
+ * buffer is used. Can be called multiple times to append multiple
* buffers.
*/
int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index f8721278601c..097921329619 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -9,15 +9,6 @@
#include "blk.h"
#include "blk-mq.h"
-static void show_map(unsigned int *map, unsigned int nr)
-{
- int i;
-
- pr_info("blk-mq: CPU -> queue map\n");
- for_each_online_cpu(i)
- pr_info(" CPU%2u -> Queue %u\n", i, map[i]);
-}
-
static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues,
const int cpu)
{
@@ -85,7 +76,6 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
map[i] = map[first_sibling];
}
- show_map(map, nr_cpus);
free_cpumask_var(cpus);
return 0;
}
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index b91ce75bd35d..b0ba264b0522 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -244,6 +244,32 @@ static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
return blk_mq_tag_sysfs_show(hctx->tags, page);
}
+static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
+{
+ unsigned int i, queue_num, first = 1;
+ ssize_t ret = 0;
+
+ blk_mq_disable_hotplug();
+
+ for_each_online_cpu(i) {
+ queue_num = hctx->queue->mq_map[i];
+ if (queue_num != hctx->queue_num)
+ continue;
+
+ if (first)
+ ret += sprintf(ret + page, "%u", i);
+ else
+ ret += sprintf(ret + page, ", %u", i);
+
+ first = 0;
+ }
+
+ blk_mq_enable_hotplug();
+
+ ret += sprintf(ret + page, "\n");
+ return ret;
+}
+
static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
.attr = {.name = "dispatched", .mode = S_IRUGO },
.show = blk_mq_sysfs_dispatched_show,
@@ -294,6 +320,10 @@ static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
.attr = {.name = "tags", .mode = S_IRUGO },
.show = blk_mq_hw_sysfs_tags_show,
};
+static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
+ .attr = {.name = "cpu_list", .mode = S_IRUGO },
+ .show = blk_mq_hw_sysfs_cpus_show,
+};
static struct attribute *default_hw_ctx_attrs[] = {
&blk_mq_hw_sysfs_queued.attr,
@@ -302,6 +332,7 @@ static struct attribute *default_hw_ctx_attrs[] = {
&blk_mq_hw_sysfs_pending.attr,
&blk_mq_hw_sysfs_ipi.attr,
&blk_mq_hw_sysfs_tags.attr,
+ &blk_mq_hw_sysfs_cpus.attr,
NULL,
};
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 883f72089015..1d2a9bdbee57 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -320,7 +320,7 @@ void __blk_mq_complete_request(struct request *rq)
rq->csd.func = __blk_mq_complete_request_remote;
rq->csd.info = rq;
rq->csd.flags = 0;
- __smp_call_function_single(ctx->cpu, &rq->csd, 0);
+ smp_call_function_single_async(ctx->cpu, &rq->csd);
} else {
rq->q->softirq_done_fn(rq);
}
@@ -514,7 +514,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
LIST_HEAD(rq_list);
int bit, queued;
- if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->flags)))
+ if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
return;
hctx->run++;
@@ -603,7 +603,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
{
- if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->flags)))
+ if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
return;
if (!async)
@@ -623,7 +623,7 @@ void blk_mq_run_queues(struct request_queue *q, bool async)
queue_for_each_hw_ctx(q, hctx, i) {
if ((!blk_mq_hctx_has_pending(hctx) &&
list_empty_careful(&hctx->dispatch)) ||
- test_bit(BLK_MQ_S_STOPPED, &hctx->flags))
+ test_bit(BLK_MQ_S_STOPPED, &hctx->state))
continue;
blk_mq_run_hw_queue(hctx, async);
@@ -956,6 +956,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
unsigned int cpu)
{
struct blk_mq_hw_ctx *hctx = data;
+ struct request_queue *q = hctx->queue;
struct blk_mq_ctx *ctx;
LIST_HEAD(tmp);
@@ -965,7 +966,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
/*
* Move ctx entries to new CPU, if this one is going away.
*/
- ctx = __blk_mq_get_ctx(hctx->queue, cpu);
+ ctx = __blk_mq_get_ctx(q, cpu);
spin_lock(&ctx->lock);
if (!list_empty(&ctx->rq_list)) {
@@ -977,7 +978,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
if (list_empty(&tmp))
return;
- ctx = blk_mq_get_ctx(hctx->queue);
+ ctx = blk_mq_get_ctx(q);
spin_lock(&ctx->lock);
while (!list_empty(&tmp)) {
@@ -988,14 +989,55 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
list_move_tail(&rq->queuelist, &ctx->rq_list);
}
+ hctx = q->mq_ops->map_queue(q, ctx->cpu);
blk_mq_hctx_mark_pending(hctx, ctx);
spin_unlock(&ctx->lock);
blk_mq_put_ctx(ctx);
+
+ blk_mq_run_hw_queue(hctx, true);
+}
+
+static int blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx,
+ int (*init)(void *, struct blk_mq_hw_ctx *,
+ struct request *, unsigned int),
+ void *data)
+{
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < hctx->queue_depth; i++) {
+ struct request *rq = hctx->rqs[i];
+
+ ret = init(data, hctx, rq, i);
+ if (ret)
+ break;
+ }
+
+ return ret;
}
-static void blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx,
- void (*init)(void *, struct blk_mq_hw_ctx *,
+int blk_mq_init_commands(struct request_queue *q,
+ int (*init)(void *, struct blk_mq_hw_ctx *,
+ struct request *, unsigned int),
+ void *data)
+{
+ struct blk_mq_hw_ctx *hctx;
+ unsigned int i;
+ int ret = 0;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ ret = blk_mq_init_hw_commands(hctx, init, data);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(blk_mq_init_commands);
+
+static void blk_mq_free_hw_commands(struct blk_mq_hw_ctx *hctx,
+ void (*free)(void *, struct blk_mq_hw_ctx *,
struct request *, unsigned int),
void *data)
{
@@ -1004,12 +1046,12 @@ static void blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx,
for (i = 0; i < hctx->queue_depth; i++) {
struct request *rq = hctx->rqs[i];
- init(data, hctx, rq, i);
+ free(data, hctx, rq, i);
}
}
-void blk_mq_init_commands(struct request_queue *q,
- void (*init)(void *, struct blk_mq_hw_ctx *,
+void blk_mq_free_commands(struct request_queue *q,
+ void (*free)(void *, struct blk_mq_hw_ctx *,
struct request *, unsigned int),
void *data)
{
@@ -1017,9 +1059,9 @@ void blk_mq_init_commands(struct request_queue *q,
unsigned int i;
queue_for_each_hw_ctx(q, hctx, i)
- blk_mq_init_hw_commands(hctx, init, data);
+ blk_mq_free_hw_commands(hctx, free, data);
}
-EXPORT_SYMBOL(blk_mq_init_commands);
+EXPORT_SYMBOL(blk_mq_free_commands);
static void blk_mq_free_rq_map(struct blk_mq_hw_ctx *hctx)
{
@@ -1430,6 +1472,16 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
return NOTIFY_OK;
}
+void blk_mq_disable_hotplug(void)
+{
+ mutex_lock(&all_q_mutex);
+}
+
+void blk_mq_enable_hotplug(void)
+{
+ mutex_unlock(&all_q_mutex);
+}
+
static int __init blk_mq_init(void)
{
blk_mq_cpu_init();
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 72beba1f9d55..ebbe6bac9d61 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -39,6 +39,8 @@ void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
void blk_mq_cpu_init(void);
+void blk_mq_enable_hotplug(void);
+void blk_mq_disable_hotplug(void);
/*
* CPU -> queue mappings
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 57790c1a97eb..53b1737e978d 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -30,8 +30,8 @@ static void blk_done_softirq(struct softirq_action *h)
while (!list_empty(&local_list)) {
struct request *rq;
- rq = list_entry(local_list.next, struct request, csd.list);
- list_del_init(&rq->csd.list);
+ rq = list_entry(local_list.next, struct request, ipi_list);
+ list_del_init(&rq->ipi_list);
rq->q->softirq_done_fn(rq);
}
}
@@ -45,9 +45,9 @@ static void trigger_softirq(void *data)
local_irq_save(flags);
list = this_cpu_ptr(&blk_cpu_done);
- list_add_tail(&rq->csd.list, list);
+ list_add_tail(&rq->ipi_list, list);
- if (list->next == &rq->csd.list)
+ if (list->next == &rq->ipi_list)
raise_softirq_irqoff(BLOCK_SOFTIRQ);
local_irq_restore(flags);
@@ -65,7 +65,7 @@ static int raise_blk_irq(int cpu, struct request *rq)
data->info = rq;
data->flags = 0;
- __smp_call_function_single(cpu, data, 0);
+ smp_call_function_single_async(cpu, data);
return 0;
}
@@ -136,7 +136,7 @@ void __blk_complete_request(struct request *req)
struct list_head *list;
do_local:
list = this_cpu_ptr(&blk_cpu_done);
- list_add_tail(&req->csd.list, list);
+ list_add_tail(&req->ipi_list, list);
/*
* if the list only contains our just added request,
@@ -144,7 +144,7 @@ do_local:
* entries there, someone already raised the irq but it
* hasn't run yet.
*/
- if (list->next == &req->csd.list)
+ if (list->next == &req->ipi_list)
raise_softirq_irqoff(BLOCK_SOFTIRQ);
} else if (raise_blk_irq(ccpu, req))
goto do_local;
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 1474c3ab7e72..033745cd7fba 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1408,13 +1408,13 @@ static int tg_set_conf(struct cgroup_subsys_state *css, struct cftype *cft,
}
static int tg_set_conf_u64(struct cgroup_subsys_state *css, struct cftype *cft,
- const char *buf)
+ char *buf)
{
return tg_set_conf(css, cft, buf, true);
}
static int tg_set_conf_uint(struct cgroup_subsys_state *css, struct cftype *cft,
- const char *buf)
+ char *buf)
{
return tg_set_conf(css, cft, buf, false);
}
@@ -1425,28 +1425,24 @@ static struct cftype throtl_files[] = {
.private = offsetof(struct throtl_grp, bps[READ]),
.seq_show = tg_print_conf_u64,
.write_string = tg_set_conf_u64,
- .max_write_len = 256,
},
{
.name = "throttle.write_bps_device",
.private = offsetof(struct throtl_grp, bps[WRITE]),
.seq_show = tg_print_conf_u64,
.write_string = tg_set_conf_u64,
- .max_write_len = 256,
},
{
.name = "throttle.read_iops_device",
.private = offsetof(struct throtl_grp, iops[READ]),
.seq_show = tg_print_conf_uint,
.write_string = tg_set_conf_uint,
- .max_write_len = 256,
},
{
.name = "throttle.write_iops_device",
.private = offsetof(struct throtl_grp, iops[WRITE]),
.seq_show = tg_print_conf_uint,
.write_string = tg_set_conf_uint,
- .max_write_len = 256,
},
{
.name = "throttle.io_service_bytes",
diff --git a/block/blk.h b/block/blk.h
index d23b415b8a28..1d880f1f957f 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -78,7 +78,7 @@ static inline void blk_clear_rq_complete(struct request *rq)
/*
* Internal elevator interface
*/
-#define ELV_ON_HASH(rq) hash_hashed(&(rq)->hash)
+#define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED)
void blk_insert_flush(struct request *rq);
void blk_abort_flushes(struct request_queue *q);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 744833b630c6..e0985f1955e7 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1701,13 +1701,13 @@ static int __cfqg_set_weight_device(struct cgroup_subsys_state *css,
}
static int cfqg_set_weight_device(struct cgroup_subsys_state *css,
- struct cftype *cft, const char *buf)
+ struct cftype *cft, char *buf)
{
return __cfqg_set_weight_device(css, cft, buf, false);
}
static int cfqg_set_leaf_weight_device(struct cgroup_subsys_state *css,
- struct cftype *cft, const char *buf)
+ struct cftype *cft, char *buf)
{
return __cfqg_set_weight_device(css, cft, buf, true);
}
@@ -1838,7 +1838,6 @@ static struct cftype cfq_blkcg_files[] = {
.flags = CFTYPE_ONLY_ON_ROOT,
.seq_show = cfqg_print_leaf_weight_device,
.write_string = cfqg_set_leaf_weight_device,
- .max_write_len = 256,
},
{
.name = "weight",
@@ -1853,7 +1852,6 @@ static struct cftype cfq_blkcg_files[] = {
.flags = CFTYPE_NOT_ON_ROOT,
.seq_show = cfqg_print_weight_device,
.write_string = cfqg_set_weight_device,
- .max_write_len = 256,
},
{
.name = "weight",
@@ -1866,7 +1864,6 @@ static struct cftype cfq_blkcg_files[] = {
.name = "leaf_weight_device",
.seq_show = cfqg_print_leaf_weight_device,
.write_string = cfqg_set_leaf_weight_device,
- .max_write_len = 256,
},
{
.name = "leaf_weight",
@@ -2367,10 +2364,10 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
* reposition in fifo if next is older than rq
*/
if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
- time_before(rq_fifo_time(next), rq_fifo_time(rq)) &&
+ time_before(next->fifo_time, rq->fifo_time) &&
cfqq == RQ_CFQQ(next)) {
list_move(&rq->queuelist, &next->queuelist);
- rq_set_fifo_time(rq, rq_fifo_time(next));
+ rq->fifo_time = next->fifo_time;
}
if (cfqq->next_rq == next)
@@ -2814,7 +2811,7 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
return NULL;
rq = rq_entry_fifo(cfqq->fifo.next);
- if (time_before(jiffies, rq_fifo_time(rq)))
+ if (time_before(jiffies, rq->fifo_time))
rq = NULL;
cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
@@ -3927,7 +3924,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
cfq_log_cfqq(cfqd, cfqq, "insert_request");
cfq_init_prio_data(cfqq, RQ_CIC(rq));
- rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
+ rq->fifo_time = jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)];
list_add_tail(&rq->queuelist, &cfqq->fifo);
cfq_add_rq_rb(rq);
cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 9ef66406c625..a753df2b3fc2 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -106,7 +106,7 @@ deadline_add_request(struct request_queue *q, struct request *rq)
/*
* set expire time and add to fifo list
*/
- rq_set_fifo_time(rq, jiffies + dd->fifo_expire[data_dir]);
+ rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
}
@@ -174,9 +174,9 @@ deadline_merged_requests(struct request_queue *q, struct request *req,
* and move into next position (next will be deleted) in fifo
*/
if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
- if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
+ if (time_before(next->fifo_time, req->fifo_time)) {
list_move(&req->queuelist, &next->queuelist);
- rq_set_fifo_time(req, rq_fifo_time(next));
+ req->fifo_time = next->fifo_time;
}
}
@@ -230,7 +230,7 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
/*
* rq is expired!
*/
- if (time_after_eq(jiffies, rq_fifo_time(rq)))
+ if (time_after_eq(jiffies, rq->fifo_time))
return 1;
return 0;
diff --git a/block/elevator.c b/block/elevator.c
index 42c45a7d6714..1e01b66a0b92 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -247,6 +247,7 @@ EXPORT_SYMBOL(elevator_exit);
static inline void __elv_rqhash_del(struct request *rq)
{
hash_del(&rq->hash);
+ rq->cmd_flags &= ~REQ_HASHED;
}
static void elv_rqhash_del(struct request_queue *q, struct request *rq)
@@ -261,6 +262,7 @@ static void elv_rqhash_add(struct request_queue *q, struct request *rq)
BUG_ON(ELV_ON_HASH(rq));
hash_add(e->hash, &rq->hash, rq_hash_key(rq));
+ rq->cmd_flags |= REQ_HASHED;
}
static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
diff --git a/block/partitions/atari.h b/block/partitions/atari.h
index fe2d32a89f36..f2ec43bfeec1 100644
--- a/block/partitions/atari.h
+++ b/block/partitions/atari.h
@@ -11,6 +11,8 @@
* by Guenther Kelleter (guenther@pool.informatik.rwth-aachen.de)
*/
+#include <linux/compiler.h>
+
struct partition_info
{
u8 flg; /* bit 0: active; bit 7: bootable */
@@ -29,6 +31,6 @@ struct rootsector
u32 bsl_st; /* start of bad sector list */
u32 bsl_cnt; /* length of bad sector list */
u16 checksum; /* checksum for bootable disks */
-} __attribute__((__packed__));
+} __packed;
int atari_partition(struct parsed_partitions *state);
diff --git a/block/partitions/efi.h b/block/partitions/efi.h
index 4efcafba7e64..abd0b19288a6 100644
--- a/block/partitions/efi.h
+++ b/block/partitions/efi.h
@@ -32,6 +32,7 @@
#include <linux/major.h>
#include <linux/string.h>
#include <linux/efi.h>
+#include <linux/compiler.h>
#define MSDOS_MBR_SIGNATURE 0xaa55
#define EFI_PMBR_OSTYPE_EFI 0xEF
@@ -87,13 +88,13 @@ typedef struct _gpt_header {
*
* uint8_t reserved2[ BlockSize - 92 ];
*/
-} __attribute__ ((packed)) gpt_header;
+} __packed gpt_header;
typedef struct _gpt_entry_attributes {
u64 required_to_function:1;
u64 reserved:47;
u64 type_guid_specific:16;
-} __attribute__ ((packed)) gpt_entry_attributes;
+} __packed gpt_entry_attributes;
typedef struct _gpt_entry {
efi_guid_t partition_type_guid;
@@ -102,7 +103,7 @@ typedef struct _gpt_entry {
__le64 ending_lba;
gpt_entry_attributes attributes;
efi_char16_t partition_name[72 / sizeof (efi_char16_t)];
-} __attribute__ ((packed)) gpt_entry;
+} __packed gpt_entry;
typedef struct _gpt_mbr_record {
u8 boot_indicator; /* unused by EFI, set to 0x80 for bootable */
@@ -124,7 +125,7 @@ typedef struct _legacy_mbr {
__le16 unknown;
gpt_mbr_record partition_record[4];
__le16 signature;
-} __attribute__ ((packed)) legacy_mbr;
+} __packed legacy_mbr;
/* Functions */
extern int efi_partition(struct parsed_partitions *state);
diff --git a/block/partitions/karma.c b/block/partitions/karma.c
index 0ea19312706b..9721fa589bb1 100644
--- a/block/partitions/karma.c
+++ b/block/partitions/karma.c
@@ -8,6 +8,7 @@
#include "check.h"
#include "karma.h"
+#include <linux/compiler.h>
int karma_partition(struct parsed_partitions *state)
{
@@ -26,7 +27,7 @@ int karma_partition(struct parsed_partitions *state)
} d_partitions[2];
u8 d_blank[208];
__le16 d_magic;
- } __attribute__((packed)) *label;
+ } __packed *label;
struct d_partition *p;
data = read_part_sector(state, 0, &sect);