#ifndef _LINUX_BLKDEV_H
#define _LINUX_BLKDEV_H
+#include <linux/sched.h>
#include <linux/major.h>
#include <linux/genhd.h>
#include <linux/list.h>
#include <linux/bio.h>
#include <linux/module.h>
#include <linux/stringify.h>
+#include <linux/bsg.h>
#include <asm/scatterlist.h>
+#ifdef CONFIG_LBD
+# include <asm/div64.h>
+# define sector_div(a, b) do_div(a, b)
+#else
+# define sector_div(n, b)( \
+{ \
+ int _res; \
+ _res = (n) % (b); \
+ (n) /= (b); \
+ _res; \
+} \
+)
+#endif
+
+#ifdef CONFIG_BLOCK
+
struct scsi_ioctl_command;
struct request_queue;
-typedef struct request_queue request_queue_t;
+typedef struct request_queue request_queue_t __deprecated;
struct elevator_queue;
typedef struct elevator_queue elevator_t;
struct request_pm_state;
struct blk_trace;
+struct request;
+struct sg_io_hdr;
#define BLKDEV_MIN_RQ 4
#define BLKDEV_MAX_RQ 128 /* Default maximum */
unsigned long last_end_request;
sector_t last_request_pos;
- unsigned long last_queue;
unsigned long ttime_total;
unsigned long ttime_samples;
atomic_t refcount;
struct task_struct *task;
- int (*set_ioprio)(struct io_context *, unsigned int);
+ unsigned int ioprio_changed;
/*
* For request batching
struct as_io_context *aic;
struct rb_root cic_root;
+ void *ioc_data;
};
void put_io_context(struct io_context *ioc);
void exit_io_context(void);
-struct io_context *current_io_context(gfp_t gfp_flags);
-struct io_context *get_io_context(gfp_t gfp_flags);
+struct io_context *get_io_context(gfp_t gfp_flags, int node);
void copy_io_context(struct io_context **pdst, struct io_context **psrc);
void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);
REQ_TYPE_ATA_CMD,
REQ_TYPE_ATA_TASK,
REQ_TYPE_ATA_TASKFILE,
+ REQ_TYPE_ATA_PC,
};
/*
__REQ_ORDERED_COLOR, /* is before or after barrier */
__REQ_RW_SYNC, /* request is sync (O_DIRECT) */
__REQ_ALLOCED, /* request came from our alloc pool */
+ __REQ_RW_META, /* metadata io request */
__REQ_NR_BITS, /* stops here */
};
#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR)
#define REQ_RW_SYNC (1 << __REQ_RW_SYNC)
#define REQ_ALLOCED (1 << __REQ_ALLOCED)
+#define REQ_RW_META (1 << __REQ_RW_META)
#define BLK_MAX_CDB 16
struct list_head queuelist;
struct list_head donelist;
+ struct request_queue *q;
+
unsigned int cmd_flags;
enum rq_cmd_type_bits cmd_type;
*/
sector_t sector; /* next sector to submit */
+ sector_t hard_sector; /* next sector to complete */
unsigned long nr_sectors; /* no. of sectors left to submit */
+ unsigned long hard_nr_sectors; /* no. of sectors left to complete */
/* no. of sectors left to submit in the current segment */
unsigned int current_nr_sectors;
- sector_t hard_sector; /* next sector to complete */
- unsigned long hard_nr_sectors; /* no. of sectors left to complete */
/* no. of sectors left to complete in the current segment */
unsigned int hard_cur_sectors;
struct bio *biotail;
struct hlist_node hash; /* merge hash */
- struct rb_node rb_node; /* sort/lookup */
+ /*
+ * The rb_node is only used inside the io scheduler, requests
+ * are pruned when moved to the dispatch queue. So let the
+ * completion_data share space with the rb_node.
+ */
+ union {
+ struct rb_node rb_node; /* sort/lookup */
+ void *completion_data;
+ };
/*
* two pointers are available for the IO schedulers, if they need
void *elevator_private;
void *elevator_private2;
- void *completion_data;
-
struct gendisk *rq_disk;
unsigned long start_time;
unsigned short ioprio;
- request_queue_t *q;
-
void *special;
char *buffer;
*/
rq_end_io_fn *end_io;
void *end_io_data;
+
+ /* for bidi */
+ struct request *next_rq;
};
/*
#include <linux/elevator.h>
-typedef int (merge_request_fn) (request_queue_t *, struct request *,
- struct bio *);
-typedef int (merge_requests_fn) (request_queue_t *, struct request *,
- struct request *);
-typedef void (request_fn_proc) (request_queue_t *q);
-typedef int (make_request_fn) (request_queue_t *q, struct bio *bio);
-typedef int (prep_rq_fn) (request_queue_t *, struct request *);
-typedef void (unplug_fn) (request_queue_t *);
+typedef void (request_fn_proc) (struct request_queue *q);
+typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
+typedef int (prep_rq_fn) (struct request_queue *, struct request *);
+typedef void (unplug_fn) (struct request_queue *);
struct bio_vec;
-typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *);
-typedef void (activity_fn) (void *data, int rw);
-typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *);
-typedef void (prepare_flush_fn) (request_queue_t *, struct request *);
+typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *);
+typedef int (issue_flush_fn) (struct request_queue *, struct gendisk *, sector_t *);
+typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
typedef void (softirq_done_fn)(struct request *);
enum blk_queue_state {
struct request_list rq;
request_fn_proc *request_fn;
- merge_request_fn *back_merge_fn;
- merge_request_fn *front_merge_fn;
- merge_requests_fn *merge_requests_fn;
make_request_fn *make_request_fn;
prep_rq_fn *prep_rq_fn;
unplug_fn *unplug_fn;
merge_bvec_fn *merge_bvec_fn;
- activity_fn *activity_fn;
issue_flush_fn *issue_flush_fn;
prepare_flush_fn *prepare_flush_fn;
softirq_done_fn *softirq_done_fn;
*/
void *queuedata;
- void *activity_data;
-
/*
* queue needs bounce pages for pages above this limit
*/
unsigned int bi_size;
struct mutex sysfs_lock;
+
+#if defined(CONFIG_BLK_DEV_BSG)
+ struct bsg_class_device bsg_dev;
+#endif
};
#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
-#define QUEUE_FLAG_READFULL 3 /* write queue has been filled */
-#define QUEUE_FLAG_WRITEFULL 4 /* read queue has been filled */
+#define QUEUE_FLAG_READFULL 3 /* read queue has been filled */
+#define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */
#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */
+#define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */
enum {
/*
#define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED)
#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER)
#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
+#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
* We regard a request as sync, if it's a READ or a SYNC write.
*/
#define rq_is_sync(rq) (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC)
+#define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META)
static inline int blk_queue_full(struct request_queue *q, int rw)
{
(!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq)))
/*
- * noop, requests are automagically marked as active/inactive by I/O
- * scheduler -- see elv_next_request
- */
-#define blk_queue_headactive(q, head_active)
-
-/*
* q->prep_rq_fn return values
*/
#define BLKPREP_OK 0 /* serve it */
#define BLK_BOUNCE_ANY ((u64)blk_max_pfn << PAGE_SHIFT)
#define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD)
-#ifdef CONFIG_MMU
+/*
+ * default timeout for SG_IO if none specified
+ */
+#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
+
+#ifdef CONFIG_BOUNCE
extern int init_emergency_isa_pool(void);
-extern void blk_queue_bounce(request_queue_t *q, struct bio **bio);
+extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
#else
static inline int init_emergency_isa_pool(void)
{
return 0;
}
-static inline void blk_queue_bounce(request_queue_t *q, struct bio **bio)
+static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
{
}
#endif /* CONFIG_MMU */
if ((rq->bio)) \
for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
-struct sec_size {
- unsigned block_size;
- unsigned block_size_bits;
-};
-
extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
extern void register_disk(struct gendisk *dev);
extern void generic_make_request(struct bio *bio);
extern void blk_put_request(struct request *);
-extern void __blk_put_request(request_queue_t *, struct request *);
+extern void __blk_put_request(struct request_queue *, struct request *);
extern void blk_end_sync_rq(struct request *rq, int error);
-extern struct request *blk_get_request(request_queue_t *, int, gfp_t);
-extern void blk_insert_request(request_queue_t *, struct request *, int, void *);
-extern void blk_requeue_request(request_queue_t *, struct request *);
-extern void blk_plug_device(request_queue_t *);
-extern int blk_remove_plug(request_queue_t *);
-extern void blk_recount_segments(request_queue_t *, struct bio *);
-extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *);
+extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
+extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
+extern void blk_requeue_request(struct request_queue *, struct request *);
+extern void blk_plug_device(struct request_queue *);
+extern int blk_remove_plug(struct request_queue *);
+extern void blk_recount_segments(struct request_queue *, struct bio *);
+extern int scsi_cmd_ioctl(struct file *, struct request_queue *,
+ struct gendisk *, unsigned int, void __user *);
extern int sg_scsi_ioctl(struct file *, struct request_queue *,
struct gendisk *, struct scsi_ioctl_command __user *);
-extern void blk_start_queue(request_queue_t *q);
-extern void blk_stop_queue(request_queue_t *q);
+
+/*
+ * Temporary export, until SCSI gets fixed up.
+ */
+extern int ll_back_merge_fn(struct request_queue *, struct request *,
+ struct bio *);
+
+/*
+ * A queue has just exitted congestion. Note this in the global counter of
+ * congested queues, and wake up anyone who was waiting for requests to be
+ * put back.
+ */
+static inline void blk_clear_queue_congested(struct request_queue *q, int rw)
+{
+ clear_bdi_congested(&q->backing_dev_info, rw);
+}
+
+/*
+ * A queue has just entered congestion. Flag that in the queue's VM-visible
+ * state flags and increment the global gounter of congested queues.
+ */
+static inline void blk_set_queue_congested(struct request_queue *q, int rw)
+{
+ set_bdi_congested(&q->backing_dev_info, rw);
+}
+
+extern void blk_start_queue(struct request_queue *q);
+extern void blk_stop_queue(struct request_queue *q);
extern void blk_sync_queue(struct request_queue *q);
-extern void __blk_stop_queue(request_queue_t *q);
-extern void blk_run_queue(request_queue_t *);
-extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *);
-extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned int);
-extern int blk_rq_unmap_user(struct bio *, unsigned int);
-extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t);
-extern int blk_rq_map_user_iov(request_queue_t *, struct request *, struct sg_iovec *, int);
-extern int blk_execute_rq(request_queue_t *, struct gendisk *,
+extern void __blk_stop_queue(struct request_queue *q);
+extern void blk_run_queue(struct request_queue *);
+extern void blk_start_queueing(struct request_queue *);
+extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long);
+extern int blk_rq_unmap_user(struct bio *);
+extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
+extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
+ struct sg_iovec *, int, unsigned int);
+extern int blk_execute_rq(struct request_queue *, struct gendisk *,
struct request *, int);
-extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *,
+extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
struct request *, int, rq_end_io_fn *);
+extern int blk_verify_command(unsigned char *, int);
-static inline request_queue_t *bdev_get_queue(struct block_device *bdev)
+static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{
return bdev->bd_disk->queue;
}
extern void end_request(struct request *req, int uptodate);
extern void blk_complete_request(struct request *);
-static inline int rq_all_done(struct request *rq, unsigned int nr_bytes)
-{
- if (blk_fs_request(rq))
- return (nr_bytes >= (rq->hard_nr_sectors << 9));
- else if (blk_pc_request(rq))
- return nr_bytes >= rq->data_len;
-
- return 0;
-}
-
/*
* end_that_request_first/chunk() takes an uptodate argument. we account
* any value <= as an io error. 0 means -EIO for compatability reasons,
/*
* Access functions for manipulating queue properties
*/
-extern request_queue_t *blk_init_queue_node(request_fn_proc *rfn,
+extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
spinlock_t *lock, int node_id);
-extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *);
-extern void blk_cleanup_queue(request_queue_t *);
-extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
-extern void blk_queue_bounce_limit(request_queue_t *, u64);
-extern void blk_queue_max_sectors(request_queue_t *, unsigned int);
-extern void blk_queue_max_phys_segments(request_queue_t *, unsigned short);
-extern void blk_queue_max_hw_segments(request_queue_t *, unsigned short);
-extern void blk_queue_max_segment_size(request_queue_t *, unsigned int);
-extern void blk_queue_hardsect_size(request_queue_t *, unsigned short);
-extern void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b);
-extern void blk_queue_segment_boundary(request_queue_t *, unsigned long);
-extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn);
-extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *);
-extern void blk_queue_dma_alignment(request_queue_t *, int);
-extern void blk_queue_softirq_done(request_queue_t *, softirq_done_fn *);
+extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
+extern void blk_cleanup_queue(struct request_queue *);
+extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
+extern void blk_queue_bounce_limit(struct request_queue *, u64);
+extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
+extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
+extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
+extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
+extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
+extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
+extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
+extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
+extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
+extern void blk_queue_dma_alignment(struct request_queue *, int);
+extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
-extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *);
-extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *);
-extern int blk_do_ordered(request_queue_t *, struct request **);
-extern unsigned blk_ordered_cur_seq(request_queue_t *);
+extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
+extern void blk_queue_issue_flush_fn(struct request_queue *, issue_flush_fn *);
+extern int blk_do_ordered(struct request_queue *, struct request **);
+extern unsigned blk_ordered_cur_seq(struct request_queue *);
extern unsigned blk_ordered_req_seq(struct request *);
-extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int);
+extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int);
-extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
+extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
extern void blk_dump_rq_flags(struct request *, char *);
-extern void generic_unplug_device(request_queue_t *);
-extern void __generic_unplug_device(request_queue_t *);
+extern void generic_unplug_device(struct request_queue *);
+extern void __generic_unplug_device(struct request_queue *);
extern long nr_blockdev_pages(void);
-int blk_get_queue(request_queue_t *);
-request_queue_t *blk_alloc_queue(gfp_t);
-request_queue_t *blk_alloc_queue_node(gfp_t, int);
-extern void blk_put_queue(request_queue_t *);
+int blk_get_queue(struct request_queue *);
+struct request_queue *blk_alloc_queue(gfp_t);
+struct request_queue *blk_alloc_queue_node(gfp_t, int);
+extern void blk_put_queue(struct request_queue *);
/*
* tag stuff
#define blk_queue_tag_depth(q) ((q)->queue_tags->busy)
#define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth)
#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED)
-extern int blk_queue_start_tag(request_queue_t *, struct request *);
-extern struct request *blk_queue_find_tag(request_queue_t *, int);
-extern void blk_queue_end_tag(request_queue_t *, struct request *);
-extern int blk_queue_init_tags(request_queue_t *, int, struct blk_queue_tag *);
-extern void blk_queue_free_tags(request_queue_t *);
-extern int blk_queue_resize_tags(request_queue_t *, int);
-extern void blk_queue_invalidate_tags(request_queue_t *);
-extern long blk_congestion_wait(int rw, long timeout);
+extern int blk_queue_start_tag(struct request_queue *, struct request *);
+extern struct request *blk_queue_find_tag(struct request_queue *, int);
+extern void blk_queue_end_tag(struct request_queue *, struct request *);
+extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *);
+extern void blk_queue_free_tags(struct request_queue *);
+extern int blk_queue_resize_tags(struct request_queue *, int);
+extern void blk_queue_invalidate_tags(struct request_queue *);
extern struct blk_queue_tag *blk_init_tags(int);
extern void blk_free_tags(struct blk_queue_tag *);
-extern void blk_congestion_end(int rw);
-extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *);
+static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
+ int tag)
+{
+ if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
+ return NULL;
+ return bqt->tag_index[tag];
+}
+
+extern void blk_rq_bio_prep(struct request_queue *, struct request *, struct bio *);
extern int blkdev_issue_flush(struct block_device *, sector_t *);
#define MAX_PHYS_SEGMENTS 128
#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
-static inline int queue_hardsect_size(request_queue_t *q)
+static inline int queue_hardsect_size(struct request_queue *q)
{
int retval = 512;
return queue_hardsect_size(bdev_get_queue(bdev));
}
-static inline int queue_dma_alignment(request_queue_t *q)
+static inline int queue_dma_alignment(struct request_queue *q)
{
int retval = 511;
return retval;
}
-static inline int bdev_dma_aligment(struct block_device *bdev)
-{
- return queue_dma_alignment(bdev_get_queue(bdev));
-}
-
-#define blk_finished_io(nsects) do { } while (0)
-#define blk_started_io(nsects) do { } while (0)
-
/* assumes size > 256 */
static inline unsigned int blksize_bits(unsigned int size)
{
struct work_struct;
int kblockd_schedule_work(struct work_struct *work);
-void kblockd_flush(void);
-
-#ifdef CONFIG_LBD
-# include <asm/div64.h>
-# define sector_div(a, b) do_div(a, b)
-#else
-# define sector_div(n, b)( \
-{ \
- int _res; \
- _res = (n) % (b); \
- (n) /= (b); \
- _res; \
-} \
-)
-#endif
+void kblockd_flush_work(struct work_struct *work);
#define MODULE_ALIAS_BLOCKDEV(major,minor) \
MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
MODULE_ALIAS("block-major-" __stringify(major) "-*")
+#else /* CONFIG_BLOCK */
+/*
+ * stubs for when the block layer is configured out
+ */
+#define buffer_heads_over_limit 0
+
+static inline long nr_blockdev_pages(void)
+{
+ return 0;
+}
+
+static inline void exit_io_context(void)
+{
+}
+
+#endif /* CONFIG_BLOCK */
+
#endif