enum rq_cmd_type_bits cmd_type;
unsigned long atomic_flags;
- /* Maintain bio traversal state for part by part I/O submission.
- * hard_* are block layer internals, no driver should touch them!
- */
-
- sector_t sector; /* next sector to submit */
- sector_t hard_sector; /* next sector to complete */
- unsigned long nr_sectors; /* no. of sectors left to submit */
- unsigned long hard_nr_sectors; /* no. of sectors left to complete */
- /* no. of sectors left to submit in the current segment */
- unsigned int current_nr_sectors;
-
- /* no. of sectors left to complete in the current segment */
- unsigned int hard_cur_sectors;
+ /* the following two fields are internal, NEVER access directly */
+ sector_t __sector; /* sector cursor */
+ unsigned int __data_len; /* total data len */
struct bio *bio;
struct bio *biotail;
unsigned char __cmd[BLK_MAX_CDB];
unsigned char *cmd;
- unsigned int data_len;
unsigned int extra_len; /* length of alignment and padding */
unsigned int sense_len;
unsigned int resid_len; /* residual count */
#define BLK_SCSI_MAX_CMDS (256)
#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
-struct blk_cmd_filter {
- unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
- unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
- struct kobject kobj;
+struct queue_limits {
+ unsigned long bounce_pfn;
+ unsigned long seg_boundary_mask;
+
+ unsigned int max_hw_sectors;
+ unsigned int max_sectors;
+ unsigned int max_segment_size;
+ unsigned int physical_block_size;
+ unsigned int alignment_offset;
+ unsigned int io_min;
+ unsigned int io_opt;
+
+ unsigned short logical_block_size;
+ unsigned short max_hw_segments;
+ unsigned short max_phys_segments;
+
+ unsigned char misaligned;
+ unsigned char no_cluster;
};
struct request_queue
/*
* queue needs bounce pages for pages above this limit
*/
- unsigned long bounce_pfn;
gfp_t bounce_gfp;
/*
unsigned int nr_congestion_off;
unsigned int nr_batching;
- unsigned int max_sectors;
- unsigned int max_hw_sectors;
- unsigned short max_phys_segments;
- unsigned short max_hw_segments;
- unsigned short hardsect_size;
- unsigned int max_segment_size;
-
- unsigned long seg_boundary_mask;
void *dma_drain_buffer;
unsigned int dma_drain_size;
unsigned int dma_pad_mask;
struct list_head tag_busy_list;
unsigned int nr_sorted;
- unsigned int in_flight;
+ unsigned int in_flight[2];
unsigned int rq_timeout;
struct timer_list timeout;
struct list_head timeout_list;
+ struct queue_limits limits;
+
/*
* sg stuff
*/
#if defined(CONFIG_BLK_DEV_BSG)
struct bsg_class_device bsg_dev;
#endif
- struct blk_cmd_filter cmd_filter;
};
#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
__clear_bit(flag, &q->queue_flags);
}
+static inline int queue_in_flight(struct request_queue *q)
+{
+ return q->in_flight[0] + q->in_flight[1];
+}
+
static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
{
WARN_ON_ONCE(!queue_is_locked(q));
blk_failfast_driver(rq))
#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED)
#define blk_rq_io_stat(rq) ((rq)->cmd_flags & REQ_IO_STAT)
+#define blk_rq_quiet(rq) ((rq)->cmd_flags & REQ_QUIET)
#define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq)))
extern void blk_put_request(struct request *);
extern void __blk_put_request(struct request_queue *, struct request *);
extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
+extern struct request *blk_make_request(struct request_queue *, struct bio *,
+ gfp_t);
extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
extern void blk_requeue_request(struct request_queue *, struct request *);
extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
extern int blk_lld_busy(struct request_queue *q);
+extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
+ struct bio_set *bs, gfp_t gfp_mask,
+ int (*bio_ctr)(struct bio *, struct bio *, void *),
+ void *data);
+extern void blk_rq_unprep_clone(struct request *rq);
extern int blk_insert_cloned_request(struct request_queue *q,
struct request *rq);
extern void blk_plug_device(struct request_queue *);
struct scsi_ioctl_command __user *);
/*
- * Temporary export, until SCSI gets fixed up.
- */
-extern int blk_rq_append_bio(struct request_queue *q, struct request *rq,
- struct bio *bio);
-
-/*
* A queue has just exitted congestion. Note this in the global counter of
* congested queues, and wake up anyone who was waiting for requests to be
* put back.
blk_run_backing_dev(mapping->backing_dev_info, NULL);
}
-extern void blkdev_dequeue_request(struct request *req);
-
/*
* blk_rq_pos() : the current sector
* blk_rq_bytes() : bytes left in the entire request
*/
static inline sector_t blk_rq_pos(const struct request *rq)
{
- return rq->hard_sector;
+ return rq->__sector;
}
-extern unsigned int blk_rq_bytes(struct request *rq);
-extern unsigned int blk_rq_cur_bytes(struct request *rq);
+static inline unsigned int blk_rq_bytes(const struct request *rq)
+{
+ return rq->__data_len;
+}
+
+static inline int blk_rq_cur_bytes(const struct request *rq)
+{
+ return rq->bio ? bio_cur_bytes(rq->bio) : 0;
+}
static inline unsigned int blk_rq_sectors(const struct request *rq)
{
- return rq->hard_nr_sectors;
+ return blk_rq_bytes(rq) >> 9;
}
static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
{
- return rq->hard_cur_sectors;
+ return blk_rq_cur_bytes(rq) >> 9;
}
/*
+ * Request issue related functions.
+ */
+extern struct request *blk_peek_request(struct request_queue *q);
+extern void blk_start_request(struct request *rq);
+extern struct request *blk_fetch_request(struct request_queue *q);
+
+/*
* Request completion related functions.
*
* blk_update_request() completes given number of bytes and updates
*/
extern bool blk_update_request(struct request *rq, int error,
unsigned int nr_bytes);
-extern bool blk_end_bidi_request(struct request *rq, int error,
- unsigned int nr_bytes,
- unsigned int bidi_bytes);
-extern bool __blk_end_bidi_request(struct request *rq, int error,
- unsigned int nr_bytes,
- unsigned int bidi_bytes);
-
-/**
- * blk_end_request - Helper function for drivers to complete the request.
- * @rq: the request being processed
- * @error: %0 for success, < %0 for error
- * @nr_bytes: number of bytes to complete
- *
- * Description:
- * Ends I/O on a number of bytes attached to @rq.
- * If @rq has leftover, sets it up for the next range of segments.
- *
- * Return:
- * %false - we are done with this request
- * %true - still buffers pending for this request
- **/
-static inline bool blk_end_request(struct request *rq, int error,
- unsigned int nr_bytes)
-{
- return blk_end_bidi_request(rq, error, nr_bytes, 0);
-}
-
-/**
- * blk_end_request_all - Helper function for drives to finish the request.
- * @rq: the request to finish
- * @err: %0 for success, < %0 for error
- *
- * Description:
- * Completely finish @rq.
- */
-static inline void blk_end_request_all(struct request *rq, int error)
-{
- bool pending;
-
- pending = blk_end_request(rq, error, blk_rq_bytes(rq));
- BUG_ON(pending);
-}
-
-/**
- * blk_end_request_cur - Helper function to finish the current request chunk.
- * @rq: the request to finish the current chunk for
- * @err: %0 for success, < %0 for error
- *
- * Description:
- * Complete the current consecutively mapped chunk from @rq.
- *
- * Return:
- * %false - we are done with this request
- * %true - still buffers pending for this request
- */
-static inline bool blk_end_request_cur(struct request *rq, int error)
-{
- return blk_end_request(rq, error, rq->hard_cur_sectors << 9);
-}
-
-/**
- * __blk_end_request - Helper function for drivers to complete the request.
- * @rq: the request being processed
- * @error: %0 for success, < %0 for error
- * @nr_bytes: number of bytes to complete
- *
- * Description:
- * Must be called with queue lock held unlike blk_end_request().
- *
- * Return:
- * %false - we are done with this request
- * %true - still buffers pending for this request
- **/
-static inline bool __blk_end_request(struct request *rq, int error,
- unsigned int nr_bytes)
-{
- return __blk_end_bidi_request(rq, error, nr_bytes, 0);
-}
-
-/**
- * __blk_end_request_all - Helper function for drives to finish the request.
- * @rq: the request to finish
- * @err: %0 for success, < %0 for error
- *
- * Description:
- * Completely finish @rq. Must be called with queue lock held.
- */
-static inline void __blk_end_request_all(struct request *rq, int error)
-{
- bool pending;
-
- pending = __blk_end_request(rq, error, blk_rq_bytes(rq));
- BUG_ON(pending);
-}
-
-/**
- * __blk_end_request_cur - Helper function to finish the current request chunk.
- * @rq: the request to finish the current chunk for
- * @err: %0 for success, < %0 for error
- *
- * Description:
- * Complete the current consecutively mapped chunk from @rq. Must
- * be called with queue lock held.
- *
- * Return:
- * %false - we are done with this request
- * %true - still buffers pending for this request
- */
-static inline bool __blk_end_request_cur(struct request *rq, int error)
-{
- return __blk_end_request(rq, error, rq->hard_cur_sectors << 9);
-}
+extern bool blk_end_request(struct request *rq, int error,
+ unsigned int nr_bytes);
+extern void blk_end_request_all(struct request *rq, int error);
+extern bool blk_end_request_cur(struct request *rq, int error);
+extern bool __blk_end_request(struct request *rq, int error,
+ unsigned int nr_bytes);
+extern void __blk_end_request_all(struct request *rq, int error);
+extern bool __blk_end_request_cur(struct request *rq, int error);
extern void blk_complete_request(struct request *);
extern void __blk_complete_request(struct request *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
+extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
-extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
+extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
+extern void blk_queue_physical_block_size(struct request_queue *, unsigned short);
+extern void blk_queue_alignment_offset(struct request_queue *q,
+ unsigned int alignment);
+extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
+extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
+extern void blk_set_default_limits(struct queue_limits *lim);
+extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
+ sector_t offset);
+extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
+ sector_t offset);
extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL);
}
-/*
-* command filter functions
-*/
-extern int blk_verify_command(struct blk_cmd_filter *filter,
- unsigned char *cmd, fmode_t has_write_perm);
-extern void blk_unregister_filter(struct gendisk *disk);
-extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter);
+extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
#define MAX_PHYS_SEGMENTS 128
#define MAX_HW_SEGMENTS 128
#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
-static inline int queue_hardsect_size(struct request_queue *q)
+static inline unsigned long queue_bounce_pfn(struct request_queue *q)
+{
+ return q->limits.bounce_pfn;
+}
+
+static inline unsigned long queue_segment_boundary(struct request_queue *q)
+{
+ return q->limits.seg_boundary_mask;
+}
+
+static inline unsigned int queue_max_sectors(struct request_queue *q)
+{
+ return q->limits.max_sectors;
+}
+
+static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
+{
+ return q->limits.max_hw_sectors;
+}
+
+static inline unsigned short queue_max_hw_segments(struct request_queue *q)
+{
+ return q->limits.max_hw_segments;
+}
+
+static inline unsigned short queue_max_phys_segments(struct request_queue *q)
+{
+ return q->limits.max_phys_segments;
+}
+
+static inline unsigned int queue_max_segment_size(struct request_queue *q)
+{
+ return q->limits.max_segment_size;
+}
+
+static inline unsigned short queue_logical_block_size(struct request_queue *q)
{
int retval = 512;
- if (q && q->hardsect_size)
- retval = q->hardsect_size;
+ if (q && q->limits.logical_block_size)
+ retval = q->limits.logical_block_size;
return retval;
}
-static inline int bdev_hardsect_size(struct block_device *bdev)
+static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
+{
+ return queue_logical_block_size(bdev_get_queue(bdev));
+}
+
+static inline unsigned int queue_physical_block_size(struct request_queue *q)
+{
+ return q->limits.physical_block_size;
+}
+
+static inline unsigned int queue_io_min(struct request_queue *q)
+{
+ return q->limits.io_min;
+}
+
+static inline unsigned int queue_io_opt(struct request_queue *q)
+{
+ return q->limits.io_opt;
+}
+
+static inline int queue_alignment_offset(struct request_queue *q)
+{
+ if (q && q->limits.misaligned)
+ return -1;
+
+ if (q && q->limits.alignment_offset)
+ return q->limits.alignment_offset;
+
+ return 0;
+}
+
+static inline int queue_sector_alignment_offset(struct request_queue *q,
+ sector_t sector)
{
- return queue_hardsect_size(bdev_get_queue(bdev));
+ return ((sector << 9) - q->limits.alignment_offset)
+ & (q->limits.io_min - 1);
}
static inline int queue_dma_alignment(struct request_queue *q)
int (*direct_access) (struct block_device *, sector_t,
void **, unsigned long *);
int (*media_changed) (struct gendisk *);
+ unsigned long long (*set_capacity) (struct gendisk *,
+ unsigned long long);
int (*revalidate_disk) (struct gendisk *);
int (*getgeo)(struct block_device *, struct hd_geometry *);
struct module *owner;