libata: implement HORKAGE_1_5_GBPS and apply it to WD My Book
[safe/jmp/linux-2.6] / include / linux / blkdev.h
index 9cc7cc5..d08c4b8 100644 (file)
@@ -26,7 +26,6 @@ struct scsi_ioctl_command;
 
 struct request_queue;
 struct elevator_queue;
-typedef struct elevator_queue elevator_t;
 struct request_pm_state;
 struct blk_trace;
 struct request;
@@ -109,6 +108,7 @@ enum rq_flag_bits {
        __REQ_RW_META,          /* metadata io request */
        __REQ_COPY_USER,        /* contains copies of user pages */
        __REQ_INTEGRITY,        /* integrity metadata has been remapped */
+       __REQ_UNPLUG,           /* unplug queue on submission */
        __REQ_NR_BITS,          /* stops here */
 };
 
@@ -135,6 +135,7 @@ enum rq_flag_bits {
 #define REQ_RW_META    (1 << __REQ_RW_META)
 #define REQ_COPY_USER  (1 << __REQ_COPY_USER)
 #define REQ_INTEGRITY  (1 << __REQ_INTEGRITY)
+#define REQ_UNPLUG     (1 << __REQ_UNPLUG)
 
 #define BLK_MAX_CDB    16
 
@@ -313,7 +314,7 @@ struct request_queue
         */
        struct list_head        queue_head;
        struct request          *last_merge;
-       elevator_t              *elevator;
+       struct elevator_queue   *elevator;
 
        /*
         * the queue request freelist, one for reads and one for writes
@@ -449,6 +450,12 @@ struct request_queue
 #define QUEUE_FLAG_FAIL_IO     12      /* fake timeout */
 #define QUEUE_FLAG_STACKABLE   13      /* supports request stacking */
 #define QUEUE_FLAG_NONROT      14      /* non-rotational device (SSD) */
+#define QUEUE_FLAG_VIRT        QUEUE_FLAG_NONROT /* paravirt device */
+#define QUEUE_FLAG_IO_STAT     15      /* do IO stats */
+
+#define QUEUE_FLAG_DEFAULT     ((1 << QUEUE_FLAG_IO_STAT) |            \
+                                (1 << QUEUE_FLAG_CLUSTER) |            \
+                                 1 << QUEUE_FLAG_STACKABLE)
 
 static inline int queue_is_locked(struct request_queue *q)
 {
@@ -522,22 +529,32 @@ enum {
         * TAG_FLUSH    : ordering by tag w/ pre and post flushes
         * TAG_FUA      : ordering by tag w/ pre flush and FUA write
         */
-       QUEUE_ORDERED_NONE      = 0x00,
-       QUEUE_ORDERED_DRAIN     = 0x01,
-       QUEUE_ORDERED_TAG       = 0x02,
-
-       QUEUE_ORDERED_PREFLUSH  = 0x10,
-       QUEUE_ORDERED_POSTFLUSH = 0x20,
-       QUEUE_ORDERED_FUA       = 0x40,
-
-       QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
-                       QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
-       QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN |
-                       QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
-       QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG |
-                       QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
-       QUEUE_ORDERED_TAG_FUA   = QUEUE_ORDERED_TAG |
-                       QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
+       QUEUE_ORDERED_BY_DRAIN          = 0x01,
+       QUEUE_ORDERED_BY_TAG            = 0x02,
+       QUEUE_ORDERED_DO_PREFLUSH       = 0x10,
+       QUEUE_ORDERED_DO_BAR            = 0x20,
+       QUEUE_ORDERED_DO_POSTFLUSH      = 0x40,
+       QUEUE_ORDERED_DO_FUA            = 0x80,
+
+       QUEUE_ORDERED_NONE              = 0x00,
+
+       QUEUE_ORDERED_DRAIN             = QUEUE_ORDERED_BY_DRAIN |
+                                         QUEUE_ORDERED_DO_BAR,
+       QUEUE_ORDERED_DRAIN_FLUSH       = QUEUE_ORDERED_DRAIN |
+                                         QUEUE_ORDERED_DO_PREFLUSH |
+                                         QUEUE_ORDERED_DO_POSTFLUSH,
+       QUEUE_ORDERED_DRAIN_FUA         = QUEUE_ORDERED_DRAIN |
+                                         QUEUE_ORDERED_DO_PREFLUSH |
+                                         QUEUE_ORDERED_DO_FUA,
+
+       QUEUE_ORDERED_TAG               = QUEUE_ORDERED_BY_TAG |
+                                         QUEUE_ORDERED_DO_BAR,
+       QUEUE_ORDERED_TAG_FLUSH         = QUEUE_ORDERED_TAG |
+                                         QUEUE_ORDERED_DO_PREFLUSH |
+                                         QUEUE_ORDERED_DO_POSTFLUSH,
+       QUEUE_ORDERED_TAG_FUA           = QUEUE_ORDERED_TAG |
+                                         QUEUE_ORDERED_DO_PREFLUSH |
+                                         QUEUE_ORDERED_DO_FUA,
 
        /*
         * Ordered operation sequence
@@ -555,6 +572,7 @@ enum {
 #define blk_queue_stopped(q)   test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
 #define blk_queue_nomerges(q)  test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
 #define blk_queue_nonrot(q)    test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
+#define blk_queue_io_stat(q)   test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
 #define blk_queue_flushing(q)  ((q)->ordseq)
 #define blk_queue_stackable(q) \
        test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
@@ -585,7 +603,6 @@ enum {
 #define blk_fua_rq(rq)         ((rq)->cmd_flags & REQ_FUA)
 #define blk_discard_rq(rq)     ((rq)->cmd_flags & REQ_DISCARD)
 #define blk_bidi_rq(rq)                ((rq)->next_rq != NULL)
-#define blk_empty_barrier(rq)  (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
 /* rq->queuelist of dequeued request must be list_empty() */
 #define blk_queued_rq(rq)      (!list_empty(&(rq)->queuelist))
 
@@ -662,6 +679,7 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
  * default timeout for SG_IO if none specified
  */
 #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
+#define BLK_MIN_SG_TIMEOUT     (7 * HZ)
 
 #ifdef CONFIG_BOUNCE
 extern int init_emergency_isa_pool(void);
@@ -680,6 +698,8 @@ struct rq_map_data {
        struct page **pages;
        int page_order;
        int nr_entries;
+       unsigned long offset;
+       int null_mapped;
 };
 
 struct req_iterator {
@@ -854,10 +874,10 @@ extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
 extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
-extern int blk_do_ordered(struct request_queue *, struct request **);
+extern bool blk_do_ordered(struct request_queue *, struct request **);
 extern unsigned blk_ordered_cur_seq(struct request_queue *);
 extern unsigned blk_ordered_req_seq(struct request *);
-extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int);
+extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int);
 
 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
 extern void blk_dump_rq_flags(struct request *, char *);
@@ -918,6 +938,8 @@ extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter);
 
 #define MAX_SEGMENT_SIZE       65536
 
+#define BLK_SEG_BOUNDARY_MASK  0xFFFFFFFFUL
+
 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
 
 static inline int queue_hardsect_size(struct request_queue *q)
@@ -974,7 +996,6 @@ static inline void put_dev_sector(Sector p)
 
 struct work_struct;
 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
-void kblockd_flush_work(struct work_struct *work);
 
 #define MODULE_ALIAS_BLOCKDEV(major,minor) \
        MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))