__REQ_PREEMPT, /* set for "ide_preempt" requests */
__REQ_ORDERED_COLOR, /* is before or after barrier */
__REQ_RW_SYNC, /* request is sync (O_DIRECT) */
+ __REQ_ALLOCED, /* request came from our alloc pool */
__REQ_NR_BITS, /* stops here */
};
#define REQ_PREEMPT (1 << __REQ_PREEMPT)
#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR)
#define REQ_RW_SYNC (1 << __REQ_RW_SYNC)
+#define REQ_ALLOCED (1 << __REQ_ALLOCED)
#define BLK_MAX_CDB 16
struct bio *biotail;
struct hlist_node hash; /* merge hash */
+ struct rb_node rb_node; /* sort/lookup */
+ /*
+ * two pointers are available for the IO schedulers, if they need
+ * more they have to dynamically allocate it.
+ */
void *elevator_private;
+ void *elevator_private2;
+
void *completion_data;
- int rq_status; /* should split this into a few status bits */
- int errors;
struct gendisk *rq_disk;
unsigned long start_time;
unsigned short ioprio;
- int tag;
-
- int ref_count;
request_queue_t *q;
- struct request_list *rl;
- struct completion *waiting;
void *special;
char *buffer;
+ int tag;
+ int errors;
+
+ int ref_count;
+
/*
* when request is used as a packet command carrier
*/
int retries;
/*
- * completion callback. end_io_data should be folded in with waiting
+ * completion callback.
*/
rq_end_io_fn *end_io;
void *end_io_data;
struct mutex sysfs_lock;
};
-#define RQ_INACTIVE (-1)
-#define RQ_ACTIVE 1
-
#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
#define rq_data_dir(rq) ((rq)->cmd_flags & 1)
+/*
+ * We regard a request as sync, if it's a READ or a SYNC write.
+ */
+#define rq_is_sync(rq) (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC)
+
static inline int blk_queue_full(struct request_queue *q, int rw)
{
if (rw == READ)