* Based on ideas from a previously unfinished io
* scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
*
- * Copyright (C) 2003 Jens Axboe <axboe@suse.de>
+ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
*/
#include <linux/module.h>
#include <linux/blkdev.h>
#define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private)
#define RQ_CFQQ(rq) ((rq)->elevator_private2)
-static kmem_cache_t *cfq_pool;
-static kmem_cache_t *cfq_ioc_pool;
+static struct kmem_cache *cfq_pool;
+static struct kmem_cache *cfq_ioc_pool;
static DEFINE_PER_CPU(unsigned long, ioc_count);
static struct completion *ioc_gone;
int queued[2];
/* currently allocated requests */
int allocated[2];
+ /* pending metadata requests */
+ int meta_pending;
/* fifo list of requests in sort_list */
struct list_head fifo;
return !cfqd->busy_queues;
}
-static inline pid_t cfq_queue_pid(struct task_struct *task, int rw)
+static inline pid_t cfq_queue_pid(struct task_struct *task, int rw, int is_sync)
{
- if (rw == READ || rw == WRITE_SYNC)
+ /*
+ * Use the per-process queue, for read requests and syncronous writes
+ */
+ if (!(rw & REQ_RW) || is_sync)
return task->pid;
return CFQ_KEY_ASYNC;
return rq1;
else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
return rq2;
+ if (rq_is_meta(rq1) && !rq_is_meta(rq2))
+ return rq1;
+ else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
+ return rq2;
s1 = rq1->sector;
s2 = rq2->sector;
*/
while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
cfq_dispatch_insert(cfqd->queue, __alias);
+
+ if (!cfq_cfqq_on_rr(cfqq))
+ cfq_add_cfqq_rr(cfqd, cfqq);
}
static inline void
cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
{
struct task_struct *tsk = current;
- pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio));
+ pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio), bio_sync(bio));
struct cfq_queue *cfqq;
cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
list_del_init(&rq->queuelist);
cfq_del_rq_rb(rq);
+
+ if (rq_is_meta(rq)) {
+ WARN_ON(!cfqq->meta_pending);
+ cfqq->meta_pending--;
+ }
}
static int
cfq_remove_request(next);
}
+static int cfq_allow_merge(request_queue_t *q, struct request *rq,
+ struct bio *bio)
+{
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+ const int rw = bio_data_dir(bio);
+ struct cfq_queue *cfqq;
+ pid_t key;
+
+ /*
+ * Disallow merge of a sync bio into an async request.
+ */
+ if ((bio_data_dir(bio) == READ || bio_sync(bio)) && !rq_is_sync(rq))
+ return 0;
+
+ /*
+ * Lookup the cfqq that this bio will be queued with. Allow
+ * merge only if rq is queued there.
+ */
+ key = cfq_queue_pid(current, rw, bio_sync(bio));
+ cfqq = cfq_find_cfq_hash(cfqd, key, current->ioprio);
+
+ if (cfqq == RQ_CFQQ(rq))
+ return 1;
+
+ return 0;
+}
+
static inline void
__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
{
struct cfq_data *cfqd = cic->key;
struct cfq_queue *cfqq;
+ unsigned long flags;
if (unlikely(!cfqd))
return;
- spin_lock(cfqd->queue->queue_lock);
+ spin_lock_irqsave(cfqd->queue->queue_lock, flags);
cfqq = cic->cfqq[ASYNC];
if (cfqq) {
if (cfqq)
cfq_mark_cfqq_prio_changed(cfqq);
- spin_unlock(cfqd->queue->queue_lock);
+ spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
}
static void cfq_ioc_set_ioprio(struct io_context *ioc)
struct rb_node **p;
struct rb_node *parent;
struct cfq_io_context *__cic;
+ unsigned long flags;
void *k;
cic->ioc = ioc;
rb_link_node(&cic->rb_node, parent, p);
rb_insert_color(&cic->rb_node, &ioc->cic_root);
- spin_lock_irq(cfqd->queue->queue_lock);
+ spin_lock_irqsave(cfqd->queue->queue_lock, flags);
list_add(&cic->queue_list, &cfqd->cic_list);
- spin_unlock_irq(cfqd->queue->queue_lock);
+ spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
}
/*
}
static void
-cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
- struct request *rq)
+cfq_update_io_seektime(struct cfq_io_context *cic, struct request *rq)
{
sector_t sdist;
u64 total;
*/
if (new_cfqq->slice_left < cfqd->cfq_slice_idle)
return 0;
+ /*
+ * if the new request is sync, but the currently running queue is
+ * not, let the sync request have priority.
+ */
if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
return 1;
+ /*
+ * So both queues are sync. Let the new request get disk time if
+ * it's a metadata request and the current queue is doing regular IO.
+ */
+ if (rq_is_meta(rq) && !cfqq->meta_pending)
+ return 1;
return 0;
}
*/
static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
- struct cfq_queue *__cfqq, *next;
-
- list_for_each_entry_safe(__cfqq, next, &cfqd->cur_rr, cfq_list)
- cfq_resort_rr_list(__cfqq, 1);
+ cfq_slice_expired(cfqd, 1);
if (!cfqq->slice_left)
cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2;
+ /*
+ * Put the new queue at the front of the of the current list,
+ * so we know that it will be selected next.
+ */
+ BUG_ON(!cfq_cfqq_on_rr(cfqq));
+ list_move(&cfqq->cfq_list, &cfqd->cur_rr);
+
cfqq->slice_end = cfqq->slice_left + jiffies;
- cfq_slice_expired(cfqd, 1);
- __cfq_set_active_queue(cfqd, cfqq);
}
/*
{
struct cfq_io_context *cic = RQ_CIC(rq);
+ if (rq_is_meta(rq))
+ cfqq->meta_pending++;
+
/*
* check if this request is a better next-serve candidate)) {
*/
}
cfq_update_io_thinktime(cfqd, cic);
- cfq_update_io_seektime(cfqd, cic, rq);
+ cfq_update_io_seektime(cic, rq);
cfq_update_idle_window(cfqd, cfqq, cic);
cic->last_queue = jiffies;
cfq_add_rq_rb(rq);
- if (!cfq_cfqq_on_rr(cfqq))
- cfq_add_cfqq_rr(cfqd, cfqq);
-
list_add_tail(&rq->queuelist, &cfqq->fifo);
cfq_rq_enqueued(cfqd, cfqq, rq);
struct cfq_data *cfqd = q->elevator->elevator_data;
struct task_struct *tsk = current;
struct cfq_queue *cfqq;
+ unsigned int key;
+
+ key = cfq_queue_pid(tsk, rw, rw & REQ_RW_SYNC);
/*
* don't force setup of a queue from here, as a call to may_queue
* so just lookup a possibly existing queue, or return 'may queue'
* if that fails
*/
- cfqq = cfq_find_cfq_hash(cfqd, cfq_queue_pid(tsk, rw), tsk->ioprio);
+ cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
if (cfqq) {
cfq_init_prio_data(cfqq);
cfq_prio_boost(cfqq);
/*
* queue lock held here
*/
-static void cfq_put_request(request_queue_t *q, struct request *rq)
+static void cfq_put_request(struct request *rq)
{
struct cfq_queue *cfqq = RQ_CFQQ(rq);
struct task_struct *tsk = current;
struct cfq_io_context *cic;
const int rw = rq_data_dir(rq);
- pid_t key = cfq_queue_pid(tsk, rw);
+ const int is_sync = rq_is_sync(rq);
+ pid_t key = cfq_queue_pid(tsk, rw, is_sync);
struct cfq_queue *cfqq;
unsigned long flags;
- int is_sync = key != CFQ_KEY_ASYNC;
might_sleep_if(gfp_mask & __GFP_WAIT);
return 1;
}
-static void cfq_kick_queue(void *data)
+static void cfq_kick_queue(struct work_struct *work)
{
- request_queue_t *q = data;
+ struct cfq_data *cfqd =
+ container_of(work, struct cfq_data, unplug_work);
+ request_queue_t *q = cfqd->queue;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
kfree(cfqd);
}
-static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
+static void *cfq_init_queue(request_queue_t *q)
{
struct cfq_data *cfqd;
int i;
cfqd->idle_class_timer.function = cfq_idle_class_timer;
cfqd->idle_class_timer.data = (unsigned long) cfqd;
- INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q);
+ INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
cfqd->cfq_quantum = cfq_quantum;
cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
.elevator_merge_fn = cfq_merge,
.elevator_merged_fn = cfq_merged_request,
.elevator_merge_req_fn = cfq_merged_requests,
+ .elevator_allow_merge_fn = cfq_allow_merge,
.elevator_dispatch_fn = cfq_dispatch_requests,
.elevator_add_req_fn = cfq_insert_request,
.elevator_activate_req_fn = cfq_activate_request,
static void __exit cfq_exit(void)
{
- DECLARE_COMPLETION(all_gone);
+ DECLARE_COMPLETION_ONSTACK(all_gone);
elv_unregister(&iosched_cfq);
ioc_gone = &all_gone;
/* ioc_gone's update must be visible before reading ioc_count */