#ifndef IOCONTEXT_H
#define IOCONTEXT_H
-/*
- * This is the per-process anticipatory I/O scheduler state.
- */
-struct as_io_context {
- spinlock_t lock;
-
- void (*dtor)(struct as_io_context *aic); /* destructor */
- void (*exit)(struct as_io_context *aic); /* called on task exit */
-
- unsigned long state;
- atomic_t nr_queued; /* queued reads & sync writes */
- atomic_t nr_dispatched; /* number of requests gone to the drivers */
-
- /* IO History tracking */
- /* Thinktime */
- unsigned long last_end_request;
- unsigned long ttime_total;
- unsigned long ttime_samples;
- unsigned long ttime_mean;
- /* Layout pattern */
- unsigned int seek_samples;
- sector_t last_request_pos;
- u64 seek_total;
- sector_t seek_mean;
-};
+#include <linux/radix-tree.h>
+#include <linux/rcupdate.h>
struct cfq_queue;
struct cfq_io_context {
- struct rb_node rb_node;
void *key;
+ unsigned long dead_key;
struct cfq_queue *cfqq[2];
struct io_context *ioc;
unsigned long last_end_request;
- sector_t last_request_pos;
unsigned long ttime_total;
unsigned long ttime_samples;
unsigned long ttime_mean;
- unsigned int seek_samples;
- u64 seek_total;
- sector_t seek_mean;
-
struct list_head queue_list;
+ struct hlist_node cic_list;
void (*dtor)(struct io_context *); /* destructor */
void (*exit)(struct io_context *); /* called on task exit */
+
+ struct rcu_head rcu_head;
};
/*
* and kmalloc'ed. These could be shared between processes.
*/
struct io_context {
- atomic_t refcount;
+ atomic_long_t refcount;
atomic_t nr_tasks;
/* all the fields below are protected by this lock */
unsigned short ioprio;
unsigned short ioprio_changed;
+#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
+ unsigned short cgroup_changed;
+#endif
+
/*
* For request batching
*/
- unsigned long last_waited; /* Time last woken after wait for request */
int nr_batch_requests; /* Number of requests left in the batch */
+ unsigned long last_waited; /* Time last woken after wait for request */
- struct as_io_context *aic;
- struct rb_root cic_root;
+ struct radix_tree_root radix_root;
+ struct hlist_head cic_list;
void *ioc_data;
};
* if ref count is zero, don't allow sharing (ioc is going away, it's
* a race).
*/
- if (ioc && atomic_inc_not_zero(&ioc->refcount))
+ if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) {
+ atomic_inc(&ioc->nr_tasks);
return ioc;
+ }
return NULL;
}
+struct task_struct;
+#ifdef CONFIG_BLOCK
+int put_io_context(struct io_context *ioc);
+void exit_io_context(struct task_struct *task);
+struct io_context *get_io_context(gfp_t gfp_flags, int node);
+struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
+void copy_io_context(struct io_context **pdst, struct io_context **psrc);
+#else
+static inline void exit_io_context(struct task_struct *task)
+{
+}
+
+struct io_context;
+static inline int put_io_context(struct io_context *ioc)
+{
+ return 1;
+}
+#endif
+
#endif