regulator: fix header file missing kernel-doc
[safe/jmp/linux-2.6] / include / linux / blkdev.h
index e9bb73f..2755d5c 100644 (file)
@@ -26,7 +26,6 @@ struct scsi_ioctl_command;
 
 struct request_queue;
 struct elevator_queue;
-typedef struct elevator_queue elevator_t;
 struct request_pm_state;
 struct blk_trace;
 struct request;
@@ -39,6 +38,10 @@ struct request;
 typedef void (rq_end_io_fn)(struct request *, int);
 
 struct request_list {
+       /*
+        * count[], starved[], and wait[] are indexed by
+        * BLK_RW_SYNC/BLK_RW_ASYNC
+        */
        int count[2];
        int starved[2];
        int elvpriv;
@@ -67,6 +70,11 @@ enum rq_cmd_type_bits {
        REQ_TYPE_ATA_PC,
 };
 
+enum {
+       BLK_RW_ASYNC    = 0,
+       BLK_RW_SYNC     = 1,
+};
+
 /*
  * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being
  * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a
@@ -104,11 +112,13 @@ enum rq_flag_bits {
        __REQ_QUIET,            /* don't worry about errors */
        __REQ_PREEMPT,          /* set for "ide_preempt" requests */
        __REQ_ORDERED_COLOR,    /* is before or after barrier */
-       __REQ_RW_SYNC,          /* request is sync (O_DIRECT) */
+       __REQ_RW_SYNC,          /* request is sync (sync write or read) */
        __REQ_ALLOCED,          /* request came from our alloc pool */
        __REQ_RW_META,          /* metadata io request */
        __REQ_COPY_USER,        /* contains copies of user pages */
        __REQ_INTEGRITY,        /* integrity metadata has been remapped */
+       __REQ_NOIDLE,           /* Don't anticipate more IO after this one */
+       __REQ_IO_STAT,          /* account I/O stat */
        __REQ_NR_BITS,          /* stops here */
 };
 
@@ -135,6 +145,8 @@ enum rq_flag_bits {
 #define REQ_RW_META    (1 << __REQ_RW_META)
 #define REQ_COPY_USER  (1 << __REQ_COPY_USER)
 #define REQ_INTEGRITY  (1 << __REQ_INTEGRITY)
+#define REQ_NOIDLE     (1 << __REQ_NOIDLE)
+#define REQ_IO_STAT    (1 << __REQ_IO_STAT)
 
 #define BLK_MAX_CDB    16
 
@@ -313,7 +325,7 @@ struct request_queue
         */
        struct list_head        queue_head;
        struct request          *last_merge;
-       elevator_t              *elevator;
+       struct elevator_queue   *elevator;
 
        /*
         * the queue request freelist, one for reads and one for writes
@@ -437,8 +449,8 @@ struct request_queue
 #define QUEUE_FLAG_CLUSTER     0       /* cluster several segments into 1 */
 #define QUEUE_FLAG_QUEUED      1       /* uses generic tag queueing */
 #define QUEUE_FLAG_STOPPED     2       /* queue is stopped */
-#define        QUEUE_FLAG_READFULL     3       /* read queue has been filled */
-#define QUEUE_FLAG_WRITEFULL   4       /* write queue has been filled */
+#define        QUEUE_FLAG_SYNCFULL     3       /* read queue has been filled */
+#define QUEUE_FLAG_ASYNCFULL   4       /* write queue has been filled */
 #define QUEUE_FLAG_DEAD                5       /* queue being torn down */
 #define QUEUE_FLAG_REENTER     6       /* Re-entrancy avoidance */
 #define QUEUE_FLAG_PLUGGED     7       /* queue is plugged */
@@ -450,6 +462,11 @@ struct request_queue
 #define QUEUE_FLAG_STACKABLE   13      /* supports request stacking */
 #define QUEUE_FLAG_NONROT      14      /* non-rotational device (SSD) */
 #define QUEUE_FLAG_VIRT        QUEUE_FLAG_NONROT /* paravirt device */
+#define QUEUE_FLAG_IO_STAT     15      /* do IO stats */
+
+#define QUEUE_FLAG_DEFAULT     ((1 << QUEUE_FLAG_IO_STAT) |            \
+                                (1 << QUEUE_FLAG_CLUSTER) |            \
+                                (1 << QUEUE_FLAG_STACKABLE))
 
 static inline int queue_is_locked(struct request_queue *q)
 {
@@ -523,22 +540,32 @@ enum {
         * TAG_FLUSH    : ordering by tag w/ pre and post flushes
         * TAG_FUA      : ordering by tag w/ pre flush and FUA write
         */
-       QUEUE_ORDERED_NONE      = 0x00,
-       QUEUE_ORDERED_DRAIN     = 0x01,
-       QUEUE_ORDERED_TAG       = 0x02,
-
-       QUEUE_ORDERED_PREFLUSH  = 0x10,
-       QUEUE_ORDERED_POSTFLUSH = 0x20,
-       QUEUE_ORDERED_FUA       = 0x40,
-
-       QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
-                       QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
-       QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN |
-                       QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
-       QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG |
-                       QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
-       QUEUE_ORDERED_TAG_FUA   = QUEUE_ORDERED_TAG |
-                       QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
+       QUEUE_ORDERED_BY_DRAIN          = 0x01,
+       QUEUE_ORDERED_BY_TAG            = 0x02,
+       QUEUE_ORDERED_DO_PREFLUSH       = 0x10,
+       QUEUE_ORDERED_DO_BAR            = 0x20,
+       QUEUE_ORDERED_DO_POSTFLUSH      = 0x40,
+       QUEUE_ORDERED_DO_FUA            = 0x80,
+
+       QUEUE_ORDERED_NONE              = 0x00,
+
+       QUEUE_ORDERED_DRAIN             = QUEUE_ORDERED_BY_DRAIN |
+                                         QUEUE_ORDERED_DO_BAR,
+       QUEUE_ORDERED_DRAIN_FLUSH       = QUEUE_ORDERED_DRAIN |
+                                         QUEUE_ORDERED_DO_PREFLUSH |
+                                         QUEUE_ORDERED_DO_POSTFLUSH,
+       QUEUE_ORDERED_DRAIN_FUA         = QUEUE_ORDERED_DRAIN |
+                                         QUEUE_ORDERED_DO_PREFLUSH |
+                                         QUEUE_ORDERED_DO_FUA,
+
+       QUEUE_ORDERED_TAG               = QUEUE_ORDERED_BY_TAG |
+                                         QUEUE_ORDERED_DO_BAR,
+       QUEUE_ORDERED_TAG_FLUSH         = QUEUE_ORDERED_TAG |
+                                         QUEUE_ORDERED_DO_PREFLUSH |
+                                         QUEUE_ORDERED_DO_POSTFLUSH,
+       QUEUE_ORDERED_TAG_FUA           = QUEUE_ORDERED_TAG |
+                                         QUEUE_ORDERED_DO_PREFLUSH |
+                                         QUEUE_ORDERED_DO_FUA,
 
        /*
         * Ordered operation sequence
@@ -556,6 +583,7 @@ enum {
 #define blk_queue_stopped(q)   test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
 #define blk_queue_nomerges(q)  test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
 #define blk_queue_nonrot(q)    test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
+#define blk_queue_io_stat(q)   test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
 #define blk_queue_flushing(q)  ((q)->ordseq)
 #define blk_queue_stackable(q) \
        test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
@@ -572,6 +600,7 @@ enum {
                                 blk_failfast_transport(rq) ||  \
                                 blk_failfast_driver(rq))
 #define blk_rq_started(rq)     ((rq)->cmd_flags & REQ_STARTED)
+#define blk_rq_io_stat(rq)     ((rq)->cmd_flags & REQ_IO_STAT)
 
 #define blk_account_rq(rq)     (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) 
 
@@ -586,7 +615,6 @@ enum {
 #define blk_fua_rq(rq)         ((rq)->cmd_flags & REQ_FUA)
 #define blk_discard_rq(rq)     ((rq)->cmd_flags & REQ_DISCARD)
 #define blk_bidi_rq(rq)                ((rq)->next_rq != NULL)
-#define blk_empty_barrier(rq)  (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
 /* rq->queuelist of dequeued request must be list_empty() */
 #define blk_queued_rq(rq)      (!list_empty(&(rq)->queuelist))
 
@@ -595,32 +623,42 @@ enum {
 #define rq_data_dir(rq)                ((rq)->cmd_flags & 1)
 
 /*
- * We regard a request as sync, if it's a READ or a SYNC write.
+ * We regard a request as sync, if either a read or a sync write
  */
-#define rq_is_sync(rq)         (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC)
+static inline bool rw_is_sync(unsigned int rw_flags)
+{
+       return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC);
+}
+
+static inline bool rq_is_sync(struct request *rq)
+{
+       return rw_is_sync(rq->cmd_flags);
+}
+
 #define rq_is_meta(rq)         ((rq)->cmd_flags & REQ_RW_META)
+#define rq_noidle(rq)          ((rq)->cmd_flags & REQ_NOIDLE)
 
-static inline int blk_queue_full(struct request_queue *q, int rw)
+static inline int blk_queue_full(struct request_queue *q, int sync)
 {
-       if (rw == READ)
-               return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags);
-       return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);
+       if (sync)
+               return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags);
+       return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags);
 }
 
-static inline void blk_set_queue_full(struct request_queue *q, int rw)
+static inline void blk_set_queue_full(struct request_queue *q, int sync)
 {
-       if (rw == READ)
-               queue_flag_set(QUEUE_FLAG_READFULL, q);
+       if (sync)
+               queue_flag_set(QUEUE_FLAG_SYNCFULL, q);
        else
-               queue_flag_set(QUEUE_FLAG_WRITEFULL, q);
+               queue_flag_set(QUEUE_FLAG_ASYNCFULL, q);
 }
 
-static inline void blk_clear_queue_full(struct request_queue *q, int rw)
+static inline void blk_clear_queue_full(struct request_queue *q, int sync)
 {
-       if (rw == READ)
-               queue_flag_clear(QUEUE_FLAG_READFULL, q);
+       if (sync)
+               queue_flag_clear(QUEUE_FLAG_SYNCFULL, q);
        else
-               queue_flag_clear(QUEUE_FLAG_WRITEFULL, q);
+               queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q);
 }
 
 
@@ -682,6 +720,8 @@ struct rq_map_data {
        struct page **pages;
        int page_order;
        int nr_entries;
+       unsigned long offset;
+       int null_mapped;
 };
 
 struct req_iterator {
@@ -690,6 +730,8 @@ struct req_iterator {
 };
 
 /* This should not be used directly - use rq_for_each_segment */
+#define for_each_bio(_bio)             \
+       for (; _bio; _bio = _bio->bi_next)
 #define __rq_for_each_bio(_bio, rq)    \
        if ((rq->bio))                  \
                for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
@@ -856,10 +898,10 @@ extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
 extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
-extern int blk_do_ordered(struct request_queue *, struct request **);
+extern bool blk_do_ordered(struct request_queue *, struct request **);
 extern unsigned blk_ordered_cur_seq(struct request_queue *);
 extern unsigned blk_ordered_req_seq(struct request *);
-extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int);
+extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int);
 
 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
 extern void blk_dump_rq_flags(struct request *, char *);