q->nr_requests = nr;
blk_queue_congestion_threshold(q);
- if (rl->count[READ] >= queue_congestion_on_threshold(q))
- blk_set_queue_congested(q, READ);
- else if (rl->count[READ] < queue_congestion_off_threshold(q))
- blk_clear_queue_congested(q, READ);
-
- if (rl->count[WRITE] >= queue_congestion_on_threshold(q))
- blk_set_queue_congested(q, WRITE);
- else if (rl->count[WRITE] < queue_congestion_off_threshold(q))
- blk_clear_queue_congested(q, WRITE);
-
- if (rl->count[READ] >= q->nr_requests) {
- blk_set_queue_full(q, READ);
- } else if (rl->count[READ]+1 <= q->nr_requests) {
- blk_clear_queue_full(q, READ);
- wake_up(&rl->wait[READ]);
+ if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
+ blk_set_queue_congested(q, BLK_RW_SYNC);
+ else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
+ blk_clear_queue_congested(q, BLK_RW_SYNC);
+
+ if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q))
+ blk_set_queue_congested(q, BLK_RW_ASYNC);
+ else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
+ blk_clear_queue_congested(q, BLK_RW_ASYNC);
+
+ if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
+ blk_set_queue_full(q, BLK_RW_SYNC);
+ } else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) {
+ blk_clear_queue_full(q, BLK_RW_SYNC);
+ wake_up(&rl->wait[BLK_RW_SYNC]);
}
- if (rl->count[WRITE] >= q->nr_requests) {
- blk_set_queue_full(q, WRITE);
- } else if (rl->count[WRITE]+1 <= q->nr_requests) {
- blk_clear_queue_full(q, WRITE);
- wake_up(&rl->wait[WRITE]);
+ if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
+ blk_set_queue_full(q, BLK_RW_ASYNC);
+ } else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) {
+ blk_clear_queue_full(q, BLK_RW_ASYNC);
+ wake_up(&rl->wait[BLK_RW_ASYNC]);
}
spin_unlock_irq(q->queue_lock);
return ret;
unsigned long ra_kb;
ssize_t ret = queue_var_store(&ra_kb, page, count);
- spin_lock_irq(q->queue_lock);
q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
- spin_unlock_irq(q->queue_lock);
return ret;
}
if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
return -EINVAL;
- /*
- * Take the queue lock to update the readahead and max_sectors
- * values synchronously:
- */
+
spin_lock_irq(q->queue_lock);
q->max_sectors = max_sectors_kb << 1;
spin_unlock_irq(q->queue_lock);
return queue_var_show(max_hw_sectors_kb, (page));
}
+static ssize_t queue_nonrot_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(!blk_queue_nonrot(q), page);
+}
+
+static ssize_t queue_nonrot_store(struct request_queue *q, const char *page,
+ size_t count)
+{
+ unsigned long nm;
+ ssize_t ret = queue_var_store(&nm, page, count);
+
+ spin_lock_irq(q->queue_lock);
+ if (nm)
+ queue_flag_clear(QUEUE_FLAG_NONROT, q);
+ else
+ queue_flag_set(QUEUE_FLAG_NONROT, q);
+ spin_unlock_irq(q->queue_lock);
+
+ return ret;
+}
+
+static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(blk_queue_nomerges(q), page);
+}
+
+static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
+ size_t count)
+{
+ unsigned long nm;
+ ssize_t ret = queue_var_store(&nm, page, count);
+
+ spin_lock_irq(q->queue_lock);
+ if (nm)
+ queue_flag_set(QUEUE_FLAG_NOMERGES, q);
+ else
+ queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
+ spin_unlock_irq(q->queue_lock);
+
+ return ret;
+}
+
+static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
+{
+ unsigned int set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
+
+ return queue_var_show(set != 0, page);
+}
+
+static ssize_t
+queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
+{
+ ssize_t ret = -EINVAL;
+#if defined(CONFIG_USE_GENERIC_SMP_HELPERS)
+ unsigned long val;
+
+ ret = queue_var_store(&val, page, count);
+ spin_lock_irq(q->queue_lock);
+ if (val)
+ queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
+ else
+ queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
+ spin_unlock_irq(q->queue_lock);
+#endif
+ return ret;
+}
+
+static ssize_t queue_iostats_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(blk_queue_io_stat(q), page);
+}
+
+static ssize_t queue_iostats_store(struct request_queue *q, const char *page,
+ size_t count)
+{
+ unsigned long stats;
+ ssize_t ret = queue_var_store(&stats, page, count);
+
+ spin_lock_irq(q->queue_lock);
+ if (stats)
+ queue_flag_set(QUEUE_FLAG_IO_STAT, q);
+ else
+ queue_flag_clear(QUEUE_FLAG_IO_STAT, q);
+ spin_unlock_irq(q->queue_lock);
+
+ return ret;
+}
static struct queue_sysfs_entry queue_requests_entry = {
.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
.show = queue_hw_sector_size_show,
};
+static struct queue_sysfs_entry queue_nonrot_entry = {
+ .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_nonrot_show,
+ .store = queue_nonrot_store,
+};
+
+static struct queue_sysfs_entry queue_nomerges_entry = {
+ .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_nomerges_show,
+ .store = queue_nomerges_store,
+};
+
+static struct queue_sysfs_entry queue_rq_affinity_entry = {
+ .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_rq_affinity_show,
+ .store = queue_rq_affinity_store,
+};
+
+static struct queue_sysfs_entry queue_iostats_entry = {
+ .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_iostats_show,
+ .store = queue_iostats_store,
+};
+
static struct attribute *default_attrs[] = {
&queue_requests_entry.attr,
&queue_ra_entry.attr,
&queue_max_sectors_entry.attr,
&queue_iosched_entry.attr,
&queue_hw_sector_size_entry.attr,
+ &queue_nonrot_entry.attr,
+ &queue_nomerges_entry.attr,
+ &queue_rq_affinity_entry.attr,
+ &queue_iostats_entry.attr,
NULL,
};
const char *page, size_t length)
{
struct queue_sysfs_entry *entry = to_queue(attr);
- struct request_queue *q = container_of(kobj, struct request_queue, kobj);
-
+ struct request_queue *q;
ssize_t res;
if (!entry->store)
return -EIO;
+
+ q = container_of(kobj, struct request_queue, kobj);
mutex_lock(&q->sysfs_lock);
if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
mutex_unlock(&q->sysfs_lock);
struct request_queue *q = disk->queue;
- if (!q || !q->request_fn)
+ if (WARN_ON(!q))
return -ENXIO;
- ret = kobject_add(&q->kobj, kobject_get(&disk->dev.kobj),
+ if (!q->request_fn)
+ return 0;
+
+ ret = kobject_add(&q->kobj, kobject_get(&disk_to_dev(disk)->kobj),
"%s", "queue");
if (ret < 0)
return ret;
{
struct request_queue *q = disk->queue;
- if (q && q->request_fn) {
+ if (WARN_ON(!q))
+ return;
+
+ if (q->request_fn) {
elv_unregister_queue(q);
kobject_uevent(&q->kobj, KOBJ_REMOVE);
kobject_del(&q->kobj);
- kobject_put(&disk->dev.kobj);
+ kobject_put(&disk_to_dev(disk)->kobj);
}
}