summaryrefslogtreecommitdiff
path: root/drivers/block/ll_rw_blk.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/ll_rw_blk.c')
-rw-r--r--drivers/block/ll_rw_blk.c193
1 files changed, 55 insertions, 138 deletions
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index baedac522945..0af73512b9a8 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -263,8 +263,6 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
blk_queue_activity_fn(q, NULL, NULL);
-
- INIT_LIST_HEAD(&q->drain_list);
}
EXPORT_SYMBOL(blk_queue_make_request);
@@ -353,6 +351,8 @@ static void blk_pre_flush_end_io(struct request *flush_rq)
struct request *rq = flush_rq->end_io_data;
request_queue_t *q = rq->q;
+ elv_completed_request(q, flush_rq);
+
rq->flags |= REQ_BAR_PREFLUSH;
if (!flush_rq->errors)
@@ -369,6 +369,8 @@ static void blk_post_flush_end_io(struct request *flush_rq)
struct request *rq = flush_rq->end_io_data;
request_queue_t *q = rq->q;
+ elv_completed_request(q, flush_rq);
+
rq->flags |= REQ_BAR_POSTFLUSH;
q->end_flush_fn(q, flush_rq);
@@ -408,8 +410,6 @@ struct request *blk_start_pre_flush(request_queue_t *q, struct request *rq)
if (!list_empty(&rq->queuelist))
blkdev_dequeue_request(rq);
- elv_deactivate_request(q, rq);
-
flush_rq->end_io_data = rq;
flush_rq->end_io = blk_pre_flush_end_io;
@@ -1040,6 +1040,7 @@ EXPORT_SYMBOL(blk_queue_invalidate_tags);
static char *rq_flags[] = {
"REQ_RW",
"REQ_FAILFAST",
+ "REQ_SORTED",
"REQ_SOFTBARRIER",
"REQ_HARDBARRIER",
"REQ_CMD",
@@ -1047,6 +1048,7 @@ static char *rq_flags[] = {
"REQ_STARTED",
"REQ_DONTPREP",
"REQ_QUEUED",
+ "REQ_ELVPRIV",
"REQ_PC",
"REQ_BLOCK_PC",
"REQ_SENSE",
@@ -1637,9 +1639,9 @@ static int blk_init_free_list(request_queue_t *q)
rl->count[READ] = rl->count[WRITE] = 0;
rl->starved[READ] = rl->starved[WRITE] = 0;
+ rl->elvpriv = 0;
init_waitqueue_head(&rl->wait[READ]);
init_waitqueue_head(&rl->wait[WRITE]);
- init_waitqueue_head(&rl->drain);
rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
mempool_free_slab, request_cachep, q->node);
@@ -1652,13 +1654,13 @@ static int blk_init_free_list(request_queue_t *q)
static int __make_request(request_queue_t *, struct bio *);
-request_queue_t *blk_alloc_queue(int gfp_mask)
+request_queue_t *blk_alloc_queue(gfp_t gfp_mask)
{
return blk_alloc_queue_node(gfp_mask, -1);
}
EXPORT_SYMBOL(blk_alloc_queue);
-request_queue_t *blk_alloc_queue_node(int gfp_mask, int node_id)
+request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
{
request_queue_t *q;
@@ -1782,12 +1784,14 @@ EXPORT_SYMBOL(blk_get_queue);
static inline void blk_free_request(request_queue_t *q, struct request *rq)
{
- elv_put_request(q, rq);
+ if (rq->flags & REQ_ELVPRIV)
+ elv_put_request(q, rq);
mempool_free(rq, q->rq.rq_pool);
}
static inline struct request *
-blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, int gfp_mask)
+blk_alloc_request(request_queue_t *q, int rw, struct bio *bio,
+ int priv, gfp_t gfp_mask)
{
struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
@@ -1800,11 +1804,15 @@ blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, int gfp_mask)
*/
rq->flags = rw;
- if (!elv_set_request(q, rq, bio, gfp_mask))
- return rq;
+ if (priv) {
+ if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) {
+ mempool_free(rq, q->rq.rq_pool);
+ return NULL;
+ }
+ rq->flags |= REQ_ELVPRIV;
+ }
- mempool_free(rq, q->rq.rq_pool);
- return NULL;
+ return rq;
}
/*
@@ -1860,22 +1868,18 @@ static void __freed_request(request_queue_t *q, int rw)
* A request has just been released. Account for it, update the full and
* congestion status, wake up any waiters. Called under q->queue_lock.
*/
-static void freed_request(request_queue_t *q, int rw)
+static void freed_request(request_queue_t *q, int rw, int priv)
{
struct request_list *rl = &q->rq;
rl->count[rw]--;
+ if (priv)
+ rl->elvpriv--;
__freed_request(q, rw);
if (unlikely(rl->starved[rw ^ 1]))
__freed_request(q, rw ^ 1);
-
- if (!rl->count[READ] && !rl->count[WRITE]) {
- smp_mb();
- if (unlikely(waitqueue_active(&rl->drain)))
- wake_up(&rl->drain);
- }
}
#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
@@ -1885,14 +1889,12 @@ static void freed_request(request_queue_t *q, int rw)
* Returns !NULL on success, with queue_lock *not held*.
*/
static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
- int gfp_mask)
+ gfp_t gfp_mask)
{
struct request *rq = NULL;
struct request_list *rl = &q->rq;
struct io_context *ioc = current_io_context(GFP_ATOMIC);
-
- if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)))
- goto out;
+ int priv;
if (rl->count[rw]+1 >= q->nr_requests) {
/*
@@ -1937,9 +1939,14 @@ get_rq:
rl->starved[rw] = 0;
if (rl->count[rw] >= queue_congestion_on_threshold(q))
set_queue_congested(q, rw);
+
+ priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
+ if (priv)
+ rl->elvpriv++;
+
spin_unlock_irq(q->queue_lock);
- rq = blk_alloc_request(q, rw, bio, gfp_mask);
+ rq = blk_alloc_request(q, rw, bio, priv, gfp_mask);
if (!rq) {
/*
* Allocation failed presumably due to memory. Undo anything
@@ -1949,7 +1956,7 @@ get_rq:
* wait queue, but this is pretty rare.
*/
spin_lock_irq(q->queue_lock);
- freed_request(q, rw);
+ freed_request(q, rw, priv);
/*
* in the very unlikely event that allocation failed and no
@@ -2019,7 +2026,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
return rq;
}
-struct request *blk_get_request(request_queue_t *q, int rw, int gfp_mask)
+struct request *blk_get_request(request_queue_t *q, int rw, gfp_t gfp_mask)
{
struct request *rq;
@@ -2251,7 +2258,7 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
* @gfp_mask: memory allocation flags
*/
int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
- unsigned int len, unsigned int gfp_mask)
+ unsigned int len, gfp_t gfp_mask)
{
struct bio *bio;
@@ -2433,13 +2440,15 @@ void disk_round_stats(struct gendisk *disk)
{
unsigned long now = jiffies;
- __disk_stat_add(disk, time_in_queue,
- disk->in_flight * (now - disk->stamp));
- disk->stamp = now;
+ if (now == disk->stamp)
+ return;
- if (disk->in_flight)
- __disk_stat_add(disk, io_ticks, (now - disk->stamp_idle));
- disk->stamp_idle = now;
+ if (disk->in_flight) {
+ __disk_stat_add(disk, time_in_queue,
+ disk->in_flight * (now - disk->stamp));
+ __disk_stat_add(disk, io_ticks, (now - disk->stamp));
+ }
+ disk->stamp = now;
}
/*
@@ -2454,6 +2463,8 @@ static void __blk_put_request(request_queue_t *q, struct request *req)
if (unlikely(--req->ref_count))
return;
+ elv_completed_request(q, req);
+
req->rq_status = RQ_INACTIVE;
req->rl = NULL;
@@ -2463,26 +2474,25 @@ static void __blk_put_request(request_queue_t *q, struct request *req)
*/
if (rl) {
int rw = rq_data_dir(req);
-
- elv_completed_request(q, req);
+ int priv = req->flags & REQ_ELVPRIV;
BUG_ON(!list_empty(&req->queuelist));
blk_free_request(q, req);
- freed_request(q, rw);
+ freed_request(q, rw, priv);
}
}
void blk_put_request(struct request *req)
{
+ unsigned long flags;
+ request_queue_t *q = req->q;
+
/*
- * if req->rl isn't set, this request didnt originate from the
- * block layer, so it's safe to just disregard it
+ * Gee, IDE calls in w/ NULL q. Fix IDE and remove the
+ * following if (q) test.
*/
- if (req->rl) {
- unsigned long flags;
- request_queue_t *q = req->q;
-
+ if (q) {
spin_lock_irqsave(q->queue_lock, flags);
__blk_put_request(q, req);
spin_unlock_irqrestore(q->queue_lock, flags);
@@ -2797,97 +2807,6 @@ static inline void blk_partition_remap(struct bio *bio)
}
}
-void blk_finish_queue_drain(request_queue_t *q)
-{
- struct request_list *rl = &q->rq;
- struct request *rq;
- int requeued = 0;
-
- spin_lock_irq(q->queue_lock);
- clear_bit(QUEUE_FLAG_DRAIN, &q->queue_flags);
-
- while (!list_empty(&q->drain_list)) {
- rq = list_entry_rq(q->drain_list.next);
-
- list_del_init(&rq->queuelist);
- elv_requeue_request(q, rq);
- requeued++;
- }
-
- if (requeued)
- q->request_fn(q);
-
- spin_unlock_irq(q->queue_lock);
-
- wake_up(&rl->wait[0]);
- wake_up(&rl->wait[1]);
- wake_up(&rl->drain);
-}
-
-static int wait_drain(request_queue_t *q, struct request_list *rl, int dispatch)
-{
- int wait = rl->count[READ] + rl->count[WRITE];
-
- if (dispatch)
- wait += !list_empty(&q->queue_head);
-
- return wait;
-}
-
-/*
- * We rely on the fact that only requests allocated through blk_alloc_request()
- * have io scheduler private data structures associated with them. Any other
- * type of request (allocated on stack or through kmalloc()) should not go
- * to the io scheduler core, but be attached to the queue head instead.
- */
-void blk_wait_queue_drained(request_queue_t *q, int wait_dispatch)
-{
- struct request_list *rl = &q->rq;
- DEFINE_WAIT(wait);
-
- spin_lock_irq(q->queue_lock);
- set_bit(QUEUE_FLAG_DRAIN, &q->queue_flags);
-
- while (wait_drain(q, rl, wait_dispatch)) {
- prepare_to_wait(&rl->drain, &wait, TASK_UNINTERRUPTIBLE);
-
- if (wait_drain(q, rl, wait_dispatch)) {
- __generic_unplug_device(q);
- spin_unlock_irq(q->queue_lock);
- io_schedule();
- spin_lock_irq(q->queue_lock);
- }
-
- finish_wait(&rl->drain, &wait);
- }
-
- spin_unlock_irq(q->queue_lock);
-}
-
-/*
- * block waiting for the io scheduler being started again.
- */
-static inline void block_wait_queue_running(request_queue_t *q)
-{
- DEFINE_WAIT(wait);
-
- while (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))) {
- struct request_list *rl = &q->rq;
-
- prepare_to_wait_exclusive(&rl->drain, &wait,
- TASK_UNINTERRUPTIBLE);
-
- /*
- * re-check the condition. avoids using prepare_to_wait()
- * in the fast path (queue is running)
- */
- if (test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))
- io_schedule();
-
- finish_wait(&rl->drain, &wait);
- }
-}
-
static void handle_bad_sector(struct bio *bio)
{
char b[BDEVNAME_SIZE];
@@ -2983,8 +2902,6 @@ end_io:
if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
goto end_io;
- block_wait_queue_running(q);
-
/*
* If this device has partitions, remap block n
* of partition p to block n+start(p) of the disk.
@@ -3393,7 +3310,7 @@ void exit_io_context(void)
* but since the current task itself holds a reference, the context can be
* used in general code, so long as it stays within `current` context.
*/
-struct io_context *current_io_context(int gfp_flags)
+struct io_context *current_io_context(gfp_t gfp_flags)
{
struct task_struct *tsk = current;
struct io_context *ret;
@@ -3424,7 +3341,7 @@ EXPORT_SYMBOL(current_io_context);
*
* This is always called in the context of the task which submitted the I/O.
*/
-struct io_context *get_io_context(int gfp_flags)
+struct io_context *get_io_context(gfp_t gfp_flags)
{
struct io_context *ret;
ret = current_io_context(gfp_flags);