X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=include%2Flinux%2Fblkdev.h;h=ba54c834a590b245f486df31def2081bce90dead;hb=4d831f53ddd5c0117ff40f841cf330c78d242e82;hp=5d327313a9f7d2a11edbd7691825818cd78cee4f;hpb=62c4f0a2d5a188f73a94f2cb8ea0dba3e7cf0a7f;p=safe%2Fjmp%2Flinux-2.6 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 5d32731..ba54c83 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1,6 +1,9 @@ #ifndef _LINUX_BLKDEV_H #define _LINUX_BLKDEV_H +#ifdef CONFIG_BLOCK + +#include #include #include #include @@ -13,149 +16,190 @@ #include #include #include +#include +#include +#include #include struct scsi_ioctl_command; struct request_queue; -typedef struct request_queue request_queue_t; struct elevator_queue; -typedef struct elevator_queue elevator_t; struct request_pm_state; struct blk_trace; +struct request; +struct sg_io_hdr; #define BLKDEV_MIN_RQ 4 #define BLKDEV_MAX_RQ 128 /* Default maximum */ -/* - * This is the per-process anticipatory I/O scheduler state. - */ -struct as_io_context { - spinlock_t lock; - - void (*dtor)(struct as_io_context *aic); /* destructor */ - void (*exit)(struct as_io_context *aic); /* called on task exit */ - - unsigned long state; - atomic_t nr_queued; /* queued reads & sync writes */ - atomic_t nr_dispatched; /* number of requests gone to the drivers */ - - /* IO History tracking */ - /* Thinktime */ - unsigned long last_end_request; - unsigned long ttime_total; - unsigned long ttime_samples; - unsigned long ttime_mean; - /* Layout pattern */ - unsigned int seek_samples; - sector_t last_request_pos; - u64 seek_total; - sector_t seek_mean; -}; - -struct cfq_queue; -struct cfq_io_context { - struct rb_node rb_node; - void *key; - - struct cfq_queue *cfqq[2]; - - struct io_context *ioc; - - unsigned long last_end_request; - sector_t last_request_pos; - unsigned long last_queue; - - unsigned long ttime_total; - unsigned long ttime_samples; - unsigned long ttime_mean; - - unsigned int seek_samples; - u64 seek_total; - sector_t seek_mean; - - struct list_head queue_list; +struct request; +typedef void (rq_end_io_fn)(struct request *, int); - void (*dtor)(struct io_context *); /* destructor */ - void (*exit)(struct io_context *); /* called on task exit */ +struct request_list { + /* + * count[], starved[], and wait[] are indexed by + * BLK_RW_SYNC/BLK_RW_ASYNC + */ + int count[2]; + int starved[2]; + int elvpriv; + mempool_t *rq_pool; + wait_queue_head_t wait[2]; }; /* - * This is the per-process I/O subsystem state. It is refcounted and - * kmalloc'ed. Currently all fields are modified in process io context - * (apart from the atomic refcount), so require no locking. + * request command types */ -struct io_context { - atomic_t refcount; - struct task_struct *task; - - int (*set_ioprio)(struct io_context *, unsigned int); - +enum rq_cmd_type_bits { + REQ_TYPE_FS = 1, /* fs request */ + REQ_TYPE_BLOCK_PC, /* scsi command */ + REQ_TYPE_SENSE, /* sense request */ + REQ_TYPE_PM_SUSPEND, /* suspend request */ + REQ_TYPE_PM_RESUME, /* resume request */ + REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ + REQ_TYPE_SPECIAL, /* driver defined type */ + REQ_TYPE_LINUX_BLOCK, /* generic block layer message */ /* - * For request batching + * for ATA/ATAPI devices. this really doesn't belong here, ide should + * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver + * private REQ_LB opcodes to differentiate what type of request this is */ - unsigned long last_waited; /* Time last woken after wait for request */ - int nr_batch_requests; /* Number of requests left in the batch */ - - struct as_io_context *aic; - struct rb_root cic_root; + REQ_TYPE_ATA_TASKFILE, + REQ_TYPE_ATA_PC, }; -void put_io_context(struct io_context *ioc); -void exit_io_context(void); -struct io_context *current_io_context(gfp_t gfp_flags); -struct io_context *get_io_context(gfp_t gfp_flags); -void copy_io_context(struct io_context **pdst, struct io_context **psrc); -void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); +enum { + BLK_RW_ASYNC = 0, + BLK_RW_SYNC = 1, +}; -struct request; -typedef void (rq_end_io_fn)(struct request *, int); +/* + * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being + * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a + * SCSI cdb. + * + * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need, + * typically to differentiate REQ_TYPE_SPECIAL requests. + * + */ +enum { + REQ_LB_OP_EJECT = 0x40, /* eject request */ + REQ_LB_OP_FLUSH = 0x41, /* flush request */ + REQ_LB_OP_DISCARD = 0x42, /* discard sectors */ +}; -struct request_list { - int count[2]; - int starved[2]; - int elvpriv; - mempool_t *rq_pool; - wait_queue_head_t wait[2]; +/* + * request type modified bits. first two bits match BIO_RW* bits, important + */ +enum rq_flag_bits { + __REQ_RW, /* not set, read. set, write */ + __REQ_FAILFAST_DEV, /* no driver retries of device errors */ + __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ + __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ + __REQ_DISCARD, /* request to discard sectors */ + __REQ_SORTED, /* elevator knows about this request */ + __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ + __REQ_HARDBARRIER, /* may not be passed by drive either */ + __REQ_FUA, /* forced unit access */ + __REQ_NOMERGE, /* don't touch this for merging */ + __REQ_STARTED, /* drive already may have started this one */ + __REQ_DONTPREP, /* don't call prep for this one */ + __REQ_QUEUED, /* uses queueing */ + __REQ_ELVPRIV, /* elevator private data attached */ + __REQ_FAILED, /* set if the request failed */ + __REQ_QUIET, /* don't worry about errors */ + __REQ_PREEMPT, /* set for "ide_preempt" requests */ + __REQ_ORDERED_COLOR, /* is before or after barrier */ + __REQ_RW_SYNC, /* request is sync (sync write or read) */ + __REQ_ALLOCED, /* request came from our alloc pool */ + __REQ_RW_META, /* metadata io request */ + __REQ_COPY_USER, /* contains copies of user pages */ + __REQ_INTEGRITY, /* integrity metadata has been remapped */ + __REQ_NOIDLE, /* Don't anticipate more IO after this one */ + __REQ_NR_BITS, /* stops here */ }; +#define REQ_RW (1 << __REQ_RW) +#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV) +#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT) +#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER) +#define REQ_DISCARD (1 << __REQ_DISCARD) +#define REQ_SORTED (1 << __REQ_SORTED) +#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) +#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) +#define REQ_FUA (1 << __REQ_FUA) +#define REQ_NOMERGE (1 << __REQ_NOMERGE) +#define REQ_STARTED (1 << __REQ_STARTED) +#define REQ_DONTPREP (1 << __REQ_DONTPREP) +#define REQ_QUEUED (1 << __REQ_QUEUED) +#define REQ_ELVPRIV (1 << __REQ_ELVPRIV) +#define REQ_FAILED (1 << __REQ_FAILED) +#define REQ_QUIET (1 << __REQ_QUIET) +#define REQ_PREEMPT (1 << __REQ_PREEMPT) +#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) +#define REQ_RW_SYNC (1 << __REQ_RW_SYNC) +#define REQ_ALLOCED (1 << __REQ_ALLOCED) +#define REQ_RW_META (1 << __REQ_RW_META) +#define REQ_COPY_USER (1 << __REQ_COPY_USER) +#define REQ_INTEGRITY (1 << __REQ_INTEGRITY) +#define REQ_NOIDLE (1 << __REQ_NOIDLE) + #define BLK_MAX_CDB 16 /* - * try to put the fields that are referenced together in the same cacheline + * try to put the fields that are referenced together in the same cacheline. + * if you modify this structure, be sure to check block/blk-core.c:rq_init() + * as well! */ struct request { struct list_head queuelist; - struct list_head donelist; + struct call_single_data csd; + int cpu; - unsigned long flags; /* see REQ_ bits below */ + struct request_queue *q; + + unsigned int cmd_flags; + enum rq_cmd_type_bits cmd_type; + unsigned long atomic_flags; /* Maintain bio traversal state for part by part I/O submission. * hard_* are block layer internals, no driver should touch them! */ sector_t sector; /* next sector to submit */ + sector_t hard_sector; /* next sector to complete */ unsigned long nr_sectors; /* no. of sectors left to submit */ + unsigned long hard_nr_sectors; /* no. of sectors left to complete */ /* no. of sectors left to submit in the current segment */ unsigned int current_nr_sectors; - sector_t hard_sector; /* next sector to complete */ - unsigned long hard_nr_sectors; /* no. of sectors left to complete */ /* no. of sectors left to complete in the current segment */ unsigned int hard_cur_sectors; struct bio *bio; struct bio *biotail; - void *elevator_private; - void *completion_data; + struct hlist_node hash; /* merge hash */ + /* + * The rb_node is only used inside the io scheduler, requests + * are pruned when moved to the dispatch queue. So let the + * completion_data share space with the rb_node. + */ + union { + struct rb_node rb_node; /* sort/lookup */ + void *completion_data; + }; - unsigned short ioprio; + /* + * two pointers are available for the IO schedulers, if they need + * more they have to dynamically allocate it. + */ + void *elevator_private; + void *elevator_private2; - int rq_status; /* should split this into a few status bits */ struct gendisk *rq_disk; - int errors; unsigned long start_time; /* Number of scatter-gather DMA addr+len pairs after @@ -163,116 +207,51 @@ struct request { */ unsigned short nr_phys_segments; - /* Number of scatter-gather addr+len pairs after - * physical and DMA remapping hardware coalescing is performed. - * This is the number of scatter-gather entries the driver - * will actually have to deal with after DMA mapping is done. - */ - unsigned short nr_hw_segments; + unsigned short ioprio; - int tag; + void *special; char *buffer; - int ref_count; - request_queue_t *q; - struct request_list *rl; + int tag; + int errors; - struct completion *waiting; - void *special; + int ref_count; /* * when request is used as a packet command carrier */ - unsigned int cmd_len; - unsigned char cmd[BLK_MAX_CDB]; + unsigned short cmd_len; + unsigned char __cmd[BLK_MAX_CDB]; + unsigned char *cmd; unsigned int data_len; - void *data; - + unsigned int extra_len; /* length of alignment and padding */ unsigned int sense_len; + void *data; void *sense; + unsigned long deadline; + struct list_head timeout_list; unsigned int timeout; int retries; /* - * For Power Management requests - */ - struct request_pm_state *pm; - - /* - * completion callback. end_io_data should be folded in with waiting + * completion callback. */ rq_end_io_fn *end_io; void *end_io_data; -}; -/* - * first three bits match BIO_RW* bits, important - */ -enum rq_flag_bits { - __REQ_RW, /* not set, read. set, write */ - __REQ_FAILFAST, /* no low level driver retries */ - __REQ_SORTED, /* elevator knows about this request */ - __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ - __REQ_HARDBARRIER, /* may not be passed by drive either */ - __REQ_FUA, /* forced unit access */ - __REQ_CMD, /* is a regular fs rw request */ - __REQ_NOMERGE, /* don't touch this for merging */ - __REQ_STARTED, /* drive already may have started this one */ - __REQ_DONTPREP, /* don't call prep for this one */ - __REQ_QUEUED, /* uses queueing */ - __REQ_ELVPRIV, /* elevator private data attached */ - /* - * for ATA/ATAPI devices - */ - __REQ_PC, /* packet command (special) */ - __REQ_BLOCK_PC, /* queued down pc from block layer */ - __REQ_SENSE, /* sense retrival */ - - __REQ_FAILED, /* set if the request failed */ - __REQ_QUIET, /* don't worry about errors */ - __REQ_SPECIAL, /* driver suplied command */ - __REQ_DRIVE_CMD, - __REQ_DRIVE_TASK, - __REQ_DRIVE_TASKFILE, - __REQ_PREEMPT, /* set for "ide_preempt" requests */ - __REQ_PM_SUSPEND, /* suspend request */ - __REQ_PM_RESUME, /* resume request */ - __REQ_PM_SHUTDOWN, /* shutdown request */ - __REQ_ORDERED_COLOR, /* is before or after barrier */ - __REQ_NR_BITS, /* stops here */ + /* for bidi */ + struct request *next_rq; }; -#define REQ_RW (1 << __REQ_RW) -#define REQ_FAILFAST (1 << __REQ_FAILFAST) -#define REQ_SORTED (1 << __REQ_SORTED) -#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) -#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) -#define REQ_FUA (1 << __REQ_FUA) -#define REQ_CMD (1 << __REQ_CMD) -#define REQ_NOMERGE (1 << __REQ_NOMERGE) -#define REQ_STARTED (1 << __REQ_STARTED) -#define REQ_DONTPREP (1 << __REQ_DONTPREP) -#define REQ_QUEUED (1 << __REQ_QUEUED) -#define REQ_ELVPRIV (1 << __REQ_ELVPRIV) -#define REQ_PC (1 << __REQ_PC) -#define REQ_BLOCK_PC (1 << __REQ_BLOCK_PC) -#define REQ_SENSE (1 << __REQ_SENSE) -#define REQ_FAILED (1 << __REQ_FAILED) -#define REQ_QUIET (1 << __REQ_QUIET) -#define REQ_SPECIAL (1 << __REQ_SPECIAL) -#define REQ_DRIVE_CMD (1 << __REQ_DRIVE_CMD) -#define REQ_DRIVE_TASK (1 << __REQ_DRIVE_TASK) -#define REQ_DRIVE_TASKFILE (1 << __REQ_DRIVE_TASKFILE) -#define REQ_PREEMPT (1 << __REQ_PREEMPT) -#define REQ_PM_SUSPEND (1 << __REQ_PM_SUSPEND) -#define REQ_PM_RESUME (1 << __REQ_PM_RESUME) -#define REQ_PM_SHUTDOWN (1 << __REQ_PM_SHUTDOWN) -#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) +static inline unsigned short req_get_ioprio(struct request *req) +{ + return req->ioprio; +} /* - * State information carried for REQ_PM_SUSPEND and REQ_PM_RESUME + * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME * requests. Some step values could eventually be made generic. */ struct request_pm_state @@ -286,21 +265,33 @@ struct request_pm_state #include -typedef int (merge_request_fn) (request_queue_t *, struct request *, - struct bio *); -typedef int (merge_requests_fn) (request_queue_t *, struct request *, - struct request *); -typedef void (request_fn_proc) (request_queue_t *q); -typedef int (make_request_fn) (request_queue_t *q, struct bio *bio); -typedef int (prep_rq_fn) (request_queue_t *, struct request *); -typedef void (unplug_fn) (request_queue_t *); +typedef void (request_fn_proc) (struct request_queue *q); +typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); +typedef int (prep_rq_fn) (struct request_queue *, struct request *); +typedef void (unplug_fn) (struct request_queue *); +typedef int (prepare_discard_fn) (struct request_queue *, struct request *); struct bio_vec; -typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *); -typedef void (activity_fn) (void *data, int rw); -typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *); -typedef void (prepare_flush_fn) (request_queue_t *, struct request *); +struct bvec_merge_data { + struct block_device *bi_bdev; + sector_t bi_sector; + unsigned bi_size; + unsigned long bi_rw; +}; +typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, + struct bio_vec *); +typedef void (prepare_flush_fn) (struct request_queue *, struct request *); typedef void (softirq_done_fn)(struct request *); +typedef int (dma_drain_needed_fn)(struct request *); +typedef int (lld_busy_fn) (struct request_queue *q); + +enum blk_eh_timer_return { + BLK_EH_NOT_HANDLED, + BLK_EH_HANDLED, + BLK_EH_RESET_TIMER, +}; + +typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); enum blk_queue_state { Queue_down, @@ -310,13 +301,21 @@ enum blk_queue_state { struct blk_queue_tag { struct request **tag_index; /* map of busy tags */ unsigned long *tag_map; /* bit map of free/busy tags */ - struct list_head busy_list; /* fifo list of busy tags */ int busy; /* current depth */ int max_depth; /* what we will send to device */ int real_max_depth; /* what the array can hold */ atomic_t refcnt; /* map can be shared */ }; +#define BLK_SCSI_MAX_CMDS (256) +#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) + +struct blk_cmd_filter { + unsigned long read_ok[BLK_SCSI_CMD_PER_LONG]; + unsigned long write_ok[BLK_SCSI_CMD_PER_LONG]; + struct kobject kobj; +}; + struct request_queue { /* @@ -324,7 +323,7 @@ struct request_queue */ struct list_head queue_head; struct request *last_merge; - elevator_t *elevator; + struct elevator_queue *elevator; /* * the queue request freelist, one for reads and one for writes @@ -332,17 +331,16 @@ struct request_queue struct request_list rq; request_fn_proc *request_fn; - merge_request_fn *back_merge_fn; - merge_request_fn *front_merge_fn; - merge_requests_fn *merge_requests_fn; make_request_fn *make_request_fn; prep_rq_fn *prep_rq_fn; unplug_fn *unplug_fn; + prepare_discard_fn *prepare_discard_fn; merge_bvec_fn *merge_bvec_fn; - activity_fn *activity_fn; - issue_flush_fn *issue_flush_fn; prepare_flush_fn *prepare_flush_fn; softirq_done_fn *softirq_done_fn; + rq_timed_out_fn *rq_timed_out_fn; + dma_drain_needed_fn *dma_drain_needed; + lld_busy_fn *lld_busy_fn; /* * Dispatch queue sorting @@ -366,8 +364,6 @@ struct request_queue */ void *queuedata; - void *activity_data; - /* * queue needs bounce pages for pages above this limit */ @@ -408,22 +404,30 @@ struct request_queue unsigned int max_segment_size; unsigned long seg_boundary_mask; + void *dma_drain_buffer; + unsigned int dma_drain_size; + unsigned int dma_pad_mask; unsigned int dma_alignment; struct blk_queue_tag *queue_tags; + struct list_head tag_busy_list; unsigned int nr_sorted; unsigned int in_flight; + unsigned int rq_timeout; + struct timer_list timeout; + struct list_head timeout_list; + /* * sg stuff */ unsigned int sg_timeout; unsigned int sg_reserved_size; int node; - +#ifdef CONFIG_BLK_DEV_IO_TRACE struct blk_trace *blk_trace; - +#endif /* * reserved for flush operations */ @@ -431,26 +435,96 @@ struct request_queue int orderr, ordcolor; struct request pre_flush_rq, bar_rq, post_flush_rq; struct request *orig_bar_rq; - unsigned int bi_size; struct mutex sysfs_lock; -}; -#define RQ_INACTIVE (-1) -#define RQ_ACTIVE 1 -#define RQ_SCSI_BUSY 0xffff -#define RQ_SCSI_DONE 0xfffe -#define RQ_SCSI_DISCONNECTING 0xffe0 +#if defined(CONFIG_BLK_DEV_BSG) + struct bsg_class_device bsg_dev; +#endif + struct blk_cmd_filter cmd_filter; +}; #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ -#define QUEUE_FLAG_READFULL 3 /* write queue has been filled */ -#define QUEUE_FLAG_WRITEFULL 4 /* read queue has been filled */ +#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ +#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ +#define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ +#define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */ +#define QUEUE_FLAG_SAME_COMP 11 /* force complete on same CPU */ +#define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */ +#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ +#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ +#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ +#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ + +#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ + (1 << QUEUE_FLAG_CLUSTER) | \ + (1 << QUEUE_FLAG_STACKABLE)) + +static inline int queue_is_locked(struct request_queue *q) +{ +#ifdef CONFIG_SMP + spinlock_t *lock = q->queue_lock; + return lock && spin_is_locked(lock); +#else + return 1; +#endif +} + +static inline void queue_flag_set_unlocked(unsigned int flag, + struct request_queue *q) +{ + __set_bit(flag, &q->queue_flags); +} + +static inline int queue_flag_test_and_clear(unsigned int flag, + struct request_queue *q) +{ + WARN_ON_ONCE(!queue_is_locked(q)); + + if (test_bit(flag, &q->queue_flags)) { + __clear_bit(flag, &q->queue_flags); + return 1; + } + + return 0; +} + +static inline int queue_flag_test_and_set(unsigned int flag, + struct request_queue *q) +{ + WARN_ON_ONCE(!queue_is_locked(q)); + + if (!test_bit(flag, &q->queue_flags)) { + __set_bit(flag, &q->queue_flags); + return 0; + } + + return 1; +} + +static inline void queue_flag_set(unsigned int flag, struct request_queue *q) +{ + WARN_ON_ONCE(!queue_is_locked(q)); + __set_bit(flag, &q->queue_flags); +} + +static inline void queue_flag_clear_unlocked(unsigned int flag, + struct request_queue *q) +{ + __clear_bit(flag, &q->queue_flags); +} + +static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) +{ + WARN_ON_ONCE(!queue_is_locked(q)); + __clear_bit(flag, &q->queue_flags); +} enum { /* @@ -464,22 +538,32 @@ enum { * TAG_FLUSH : ordering by tag w/ pre and post flushes * TAG_FUA : ordering by tag w/ pre flush and FUA write */ - QUEUE_ORDERED_NONE = 0x00, - QUEUE_ORDERED_DRAIN = 0x01, - QUEUE_ORDERED_TAG = 0x02, - - QUEUE_ORDERED_PREFLUSH = 0x10, - QUEUE_ORDERED_POSTFLUSH = 0x20, - QUEUE_ORDERED_FUA = 0x40, - - QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | - QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, - QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | - QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, - QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | - QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, - QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | - QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, + QUEUE_ORDERED_BY_DRAIN = 0x01, + QUEUE_ORDERED_BY_TAG = 0x02, + QUEUE_ORDERED_DO_PREFLUSH = 0x10, + QUEUE_ORDERED_DO_BAR = 0x20, + QUEUE_ORDERED_DO_POSTFLUSH = 0x40, + QUEUE_ORDERED_DO_FUA = 0x80, + + QUEUE_ORDERED_NONE = 0x00, + + QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN | + QUEUE_ORDERED_DO_BAR, + QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | + QUEUE_ORDERED_DO_PREFLUSH | + QUEUE_ORDERED_DO_POSTFLUSH, + QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | + QUEUE_ORDERED_DO_PREFLUSH | + QUEUE_ORDERED_DO_FUA, + + QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG | + QUEUE_ORDERED_DO_BAR, + QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | + QUEUE_ORDERED_DO_PREFLUSH | + QUEUE_ORDERED_DO_POSTFLUSH, + QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | + QUEUE_ORDERED_DO_PREFLUSH | + QUEUE_ORDERED_DO_FUA, /* * Ordered operation sequence @@ -495,49 +579,83 @@ enum { #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) +#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) +#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) +#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) #define blk_queue_flushing(q) ((q)->ordseq) +#define blk_queue_stackable(q) \ + test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) + +#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) +#define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) +#define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL) +#define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE) + +#define blk_failfast_dev(rq) ((rq)->cmd_flags & REQ_FAILFAST_DEV) +#define blk_failfast_transport(rq) ((rq)->cmd_flags & REQ_FAILFAST_TRANSPORT) +#define blk_failfast_driver(rq) ((rq)->cmd_flags & REQ_FAILFAST_DRIVER) +#define blk_noretry_request(rq) (blk_failfast_dev(rq) || \ + blk_failfast_transport(rq) || \ + blk_failfast_driver(rq)) +#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) + +#define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) + +#define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) +#define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) +#define blk_pm_request(rq) \ + (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) -#define blk_fs_request(rq) ((rq)->flags & REQ_CMD) -#define blk_pc_request(rq) ((rq)->flags & REQ_BLOCK_PC) -#define blk_noretry_request(rq) ((rq)->flags & REQ_FAILFAST) -#define blk_rq_started(rq) ((rq)->flags & REQ_STARTED) +#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) +#define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) +#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) +#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) +#define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD) +#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) +/* rq->queuelist of dequeued request must be list_empty() */ +#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) -#define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq)) +#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) -#define blk_pm_suspend_request(rq) ((rq)->flags & REQ_PM_SUSPEND) -#define blk_pm_resume_request(rq) ((rq)->flags & REQ_PM_RESUME) -#define blk_pm_request(rq) \ - ((rq)->flags & (REQ_PM_SUSPEND | REQ_PM_RESUME)) +#define rq_data_dir(rq) ((rq)->cmd_flags & 1) -#define blk_sorted_rq(rq) ((rq)->flags & REQ_SORTED) -#define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER) -#define blk_fua_rq(rq) ((rq)->flags & REQ_FUA) +/* + * We regard a request as sync, if either a read or a sync write + */ +static inline bool rw_is_sync(unsigned int rw_flags) +{ + return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC); +} -#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) +static inline bool rq_is_sync(struct request *rq) +{ + return rw_is_sync(rq->cmd_flags); +} -#define rq_data_dir(rq) ((rq)->flags & 1) +#define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) +#define rq_noidle(rq) ((rq)->cmd_flags & REQ_NOIDLE) -static inline int blk_queue_full(struct request_queue *q, int rw) +static inline int blk_queue_full(struct request_queue *q, int sync) { - if (rw == READ) - return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags); - return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); + if (sync) + return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags); + return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags); } -static inline void blk_set_queue_full(struct request_queue *q, int rw) +static inline void blk_set_queue_full(struct request_queue *q, int sync) { - if (rw == READ) - set_bit(QUEUE_FLAG_READFULL, &q->queue_flags); + if (sync) + queue_flag_set(QUEUE_FLAG_SYNCFULL, q); else - set_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); + queue_flag_set(QUEUE_FLAG_ASYNCFULL, q); } -static inline void blk_clear_queue_full(struct request_queue *q, int rw) +static inline void blk_clear_queue_full(struct request_queue *q, int sync) { - if (rw == READ) - clear_bit(QUEUE_FLAG_READFULL, &q->queue_flags); + if (sync) + queue_flag_clear(QUEUE_FLAG_SYNCFULL, q); else - clear_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); + queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q); } @@ -548,13 +666,8 @@ static inline void blk_clear_queue_full(struct request_queue *q, int rw) #define RQ_NOMERGE_FLAGS \ (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) #define rq_mergeable(rq) \ - (!((rq)->flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq))) - -/* - * noop, requests are automagically marked as active/inactive by I/O - * scheduler -- see elv_next_request - */ -#define blk_queue_headactive(q, head_active) + (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ + (blk_discard_rq(rq) || blk_fs_request((rq)))) /* * q->prep_rq_fn return values @@ -572,64 +685,131 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn; * BLK_BOUNCE_ANY : don't bounce anything * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary */ + +#if BITS_PER_LONG == 32 #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) -#define BLK_BOUNCE_ANY ((u64)blk_max_pfn << PAGE_SHIFT) +#else +#define BLK_BOUNCE_HIGH -1ULL +#endif +#define BLK_BOUNCE_ANY (-1ULL) #define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD) -#ifdef CONFIG_MMU +/* + * default timeout for SG_IO if none specified + */ +#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) +#define BLK_MIN_SG_TIMEOUT (7 * HZ) + +#ifdef CONFIG_BOUNCE extern int init_emergency_isa_pool(void); -extern void blk_queue_bounce(request_queue_t *q, struct bio **bio); +extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); #else static inline int init_emergency_isa_pool(void) { return 0; } -static inline void blk_queue_bounce(request_queue_t *q, struct bio **bio) +static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) { } #endif /* CONFIG_MMU */ -#define rq_for_each_bio(_bio, rq) \ +struct rq_map_data { + struct page **pages; + int page_order; + int nr_entries; + unsigned long offset; + int null_mapped; +}; + +struct req_iterator { + int i; + struct bio *bio; +}; + +/* This should not be used directly - use rq_for_each_segment */ +#define for_each_bio(_bio) \ + for (; _bio; _bio = _bio->bi_next) +#define __rq_for_each_bio(_bio, rq) \ if ((rq->bio)) \ for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) -struct sec_size { - unsigned block_size; - unsigned block_size_bits; -}; +#define rq_for_each_segment(bvl, _rq, _iter) \ + __rq_for_each_bio(_iter.bio, _rq) \ + bio_for_each_segment(bvl, _iter.bio, _iter.i) + +#define rq_iter_last(rq, _iter) \ + (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) extern int blk_register_queue(struct gendisk *disk); extern void blk_unregister_queue(struct gendisk *disk); extern void register_disk(struct gendisk *dev); extern void generic_make_request(struct bio *bio); +extern void blk_rq_init(struct request_queue *q, struct request *rq); extern void blk_put_request(struct request *); -extern void __blk_put_request(request_queue_t *, struct request *); -extern void blk_end_sync_rq(struct request *rq, int error); -extern struct request *blk_get_request(request_queue_t *, int, gfp_t); -extern void blk_insert_request(request_queue_t *, struct request *, int, void *); -extern void blk_requeue_request(request_queue_t *, struct request *); -extern void blk_plug_device(request_queue_t *); -extern int blk_remove_plug(request_queue_t *); -extern void blk_recount_segments(request_queue_t *, struct bio *); -extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *); -extern int sg_scsi_ioctl(struct file *, struct request_queue *, - struct gendisk *, struct scsi_ioctl_command __user *); -extern void blk_start_queue(request_queue_t *q); -extern void blk_stop_queue(request_queue_t *q); +extern void __blk_put_request(struct request_queue *, struct request *); +extern struct request *blk_get_request(struct request_queue *, int, gfp_t); +extern void blk_insert_request(struct request_queue *, struct request *, int, void *); +extern void blk_requeue_request(struct request_queue *, struct request *); +extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); +extern int blk_lld_busy(struct request_queue *q); +extern int blk_insert_cloned_request(struct request_queue *q, + struct request *rq); +extern void blk_plug_device(struct request_queue *); +extern void blk_plug_device_unlocked(struct request_queue *); +extern int blk_remove_plug(struct request_queue *); +extern void blk_recount_segments(struct request_queue *, struct bio *); +extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, + unsigned int, void __user *); +extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, + struct scsi_ioctl_command __user *); + +/* + * Temporary export, until SCSI gets fixed up. + */ +extern int blk_rq_append_bio(struct request_queue *q, struct request *rq, + struct bio *bio); + +/* + * A queue has just exitted congestion. Note this in the global counter of + * congested queues, and wake up anyone who was waiting for requests to be + * put back. + */ +static inline void blk_clear_queue_congested(struct request_queue *q, int rw) +{ + clear_bdi_congested(&q->backing_dev_info, rw); +} + +/* + * A queue has just entered congestion. Flag that in the queue's VM-visible + * state flags and increment the global gounter of congested queues. + */ +static inline void blk_set_queue_congested(struct request_queue *q, int rw) +{ + set_bdi_congested(&q->backing_dev_info, rw); +} + +extern void blk_start_queue(struct request_queue *q); +extern void blk_stop_queue(struct request_queue *q); extern void blk_sync_queue(struct request_queue *q); -extern void __blk_stop_queue(request_queue_t *q); -extern void blk_run_queue(request_queue_t *); -extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *); -extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned int); -extern int blk_rq_unmap_user(struct bio *, unsigned int); -extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t); -extern int blk_rq_map_user_iov(request_queue_t *, struct request *, struct sg_iovec *, int); -extern int blk_execute_rq(request_queue_t *, struct gendisk *, +extern void __blk_stop_queue(struct request_queue *q); +extern void __blk_run_queue(struct request_queue *); +extern void blk_run_queue(struct request_queue *); +extern void blk_start_queueing(struct request_queue *); +extern int blk_rq_map_user(struct request_queue *, struct request *, + struct rq_map_data *, void __user *, unsigned long, + gfp_t); +extern int blk_rq_unmap_user(struct bio *); +extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); +extern int blk_rq_map_user_iov(struct request_queue *, struct request *, + struct rq_map_data *, struct sg_iovec *, int, + unsigned int, gfp_t); +extern int blk_execute_rq(struct request_queue *, struct gendisk *, struct request *, int); -extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *, +extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, struct request *, int, rq_end_io_fn *); +extern void blk_unplug(struct request_queue *q); -static inline request_queue_t *bdev_get_queue(struct block_device *bdev) +static inline struct request_queue *bdev_get_queue(struct block_device *bdev) { return bdev->bd_disk->queue; } @@ -647,115 +827,130 @@ static inline void blk_run_address_space(struct address_space *mapping) blk_run_backing_dev(mapping->backing_dev_info, NULL); } +extern void blkdev_dequeue_request(struct request *req); + /* - * end_request() and friends. Must be called with the request queue spinlock - * acquired. All functions called within end_request() _must_be_ atomic. + * blk_end_request() and friends. + * __blk_end_request() and end_request() must be called with + * the request queue spinlock acquired. * * Several drivers define their own end_request and call - * end_that_request_first() and end_that_request_last() - * for parts of the original function. This prevents - * code duplication in drivers. + * blk_end_request() for parts of the original function. + * This prevents code duplication in drivers. */ -extern int end_that_request_first(struct request *, int, int); -extern int end_that_request_chunk(struct request *, int, int); -extern void end_that_request_last(struct request *, int); -extern void end_request(struct request *req, int uptodate); +extern int blk_end_request(struct request *rq, int error, + unsigned int nr_bytes); +extern int __blk_end_request(struct request *rq, int error, + unsigned int nr_bytes); +extern int blk_end_bidi_request(struct request *rq, int error, + unsigned int nr_bytes, unsigned int bidi_bytes); +extern void end_request(struct request *, int); +extern int blk_end_request_callback(struct request *rq, int error, + unsigned int nr_bytes, + int (drv_callback)(struct request *)); extern void blk_complete_request(struct request *); - -static inline int rq_all_done(struct request *rq, unsigned int nr_bytes) -{ - if (blk_fs_request(rq)) - return (nr_bytes >= (rq->hard_nr_sectors << 9)); - else if (blk_pc_request(rq)) - return nr_bytes >= rq->data_len; - - return 0; -} +extern void __blk_complete_request(struct request *); +extern void blk_abort_request(struct request *); +extern void blk_abort_queue(struct request_queue *); +extern void blk_update_request(struct request *rq, int error, + unsigned int nr_bytes); /* - * end_that_request_first/chunk() takes an uptodate argument. we account - * any value <= as an io error. 0 means -EIO for compatability reasons, - * any other < 0 value is the direct error type. An uptodate value of - * 1 indicates successful io completion + * blk_end_request() takes bytes instead of sectors as a complete size. + * blk_rq_bytes() returns bytes left to complete in the entire request. + * blk_rq_cur_bytes() returns bytes left to complete in the current segment. */ -#define end_io_error(uptodate) (unlikely((uptodate) <= 0)) - -static inline void blkdev_dequeue_request(struct request *req) -{ - elv_dequeue_request(req->q, req); -} - -/* - * This should be in elevator.h, but that requires pulling in rq and q - */ -static inline void elv_dispatch_add_tail(struct request_queue *q, - struct request *rq) -{ - if (q->last_merge == rq) - q->last_merge = NULL; - q->nr_sorted--; - - q->end_sector = rq_end_sector(rq); - q->boundary_rq = rq; - list_add_tail(&rq->queuelist, &q->queue_head); -} +extern unsigned int blk_rq_bytes(struct request *rq); +extern unsigned int blk_rq_cur_bytes(struct request *rq); /* * Access functions for manipulating queue properties */ -extern request_queue_t *blk_init_queue_node(request_fn_proc *rfn, +extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id); -extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *); -extern void blk_cleanup_queue(request_queue_t *); -extern void blk_queue_make_request(request_queue_t *, make_request_fn *); -extern void blk_queue_bounce_limit(request_queue_t *, u64); -extern void blk_queue_max_sectors(request_queue_t *, unsigned int); -extern void blk_queue_max_phys_segments(request_queue_t *, unsigned short); -extern void blk_queue_max_hw_segments(request_queue_t *, unsigned short); -extern void blk_queue_max_segment_size(request_queue_t *, unsigned int); -extern void blk_queue_hardsect_size(request_queue_t *, unsigned short); -extern void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b); -extern void blk_queue_segment_boundary(request_queue_t *, unsigned long); -extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn); -extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *); -extern void blk_queue_dma_alignment(request_queue_t *, int); -extern void blk_queue_softirq_done(request_queue_t *, softirq_done_fn *); +extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); +extern void blk_cleanup_queue(struct request_queue *); +extern void blk_queue_make_request(struct request_queue *, make_request_fn *); +extern void blk_queue_bounce_limit(struct request_queue *, u64); +extern void blk_queue_max_sectors(struct request_queue *, unsigned int); +extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); +extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); +extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); +extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); +extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); +extern void blk_queue_dma_pad(struct request_queue *, unsigned int); +extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); +extern int blk_queue_dma_drain(struct request_queue *q, + dma_drain_needed_fn *dma_drain_needed, + void *buf, unsigned int size); +extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); +extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); +extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); +extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); +extern void blk_queue_dma_alignment(struct request_queue *, int); +extern void blk_queue_update_dma_alignment(struct request_queue *, int); +extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); +extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *); +extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); +extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); -extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *); -extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *); -extern int blk_do_ordered(request_queue_t *, struct request **); -extern unsigned blk_ordered_cur_seq(request_queue_t *); +extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); +extern bool blk_do_ordered(struct request_queue *, struct request **); +extern unsigned blk_ordered_cur_seq(struct request_queue *); extern unsigned blk_ordered_req_seq(struct request *); -extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int); +extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int); -extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *); +extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); extern void blk_dump_rq_flags(struct request *, char *); -extern void generic_unplug_device(request_queue_t *); -extern void __generic_unplug_device(request_queue_t *); +extern void generic_unplug_device(struct request_queue *); extern long nr_blockdev_pages(void); -int blk_get_queue(request_queue_t *); -request_queue_t *blk_alloc_queue(gfp_t); -request_queue_t *blk_alloc_queue_node(gfp_t, int); -extern void blk_put_queue(request_queue_t *); +int blk_get_queue(struct request_queue *); +struct request_queue *blk_alloc_queue(gfp_t); +struct request_queue *blk_alloc_queue_node(gfp_t, int); +extern void blk_put_queue(struct request_queue *); /* * tag stuff */ -#define blk_queue_tag_depth(q) ((q)->queue_tags->busy) -#define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth) -#define blk_rq_tagged(rq) ((rq)->flags & REQ_QUEUED) -extern int blk_queue_start_tag(request_queue_t *, struct request *); -extern struct request *blk_queue_find_tag(request_queue_t *, int); -extern void blk_queue_end_tag(request_queue_t *, struct request *); -extern int blk_queue_init_tags(request_queue_t *, int, struct blk_queue_tag *); -extern void blk_queue_free_tags(request_queue_t *); -extern int blk_queue_resize_tags(request_queue_t *, int); -extern void blk_queue_invalidate_tags(request_queue_t *); -extern long blk_congestion_wait(int rw, long timeout); - -extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *); +#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) +extern int blk_queue_start_tag(struct request_queue *, struct request *); +extern struct request *blk_queue_find_tag(struct request_queue *, int); +extern void blk_queue_end_tag(struct request_queue *, struct request *); +extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); +extern void blk_queue_free_tags(struct request_queue *); +extern int blk_queue_resize_tags(struct request_queue *, int); +extern void blk_queue_invalidate_tags(struct request_queue *); +extern struct blk_queue_tag *blk_init_tags(int); +extern void blk_free_tags(struct blk_queue_tag *); + +static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, + int tag) +{ + if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) + return NULL; + return bqt->tag_index[tag]; +} + extern int blkdev_issue_flush(struct block_device *, sector_t *); +extern int blkdev_issue_discard(struct block_device *, + sector_t sector, sector_t nr_sects, gfp_t); + +static inline int sb_issue_discard(struct super_block *sb, + sector_t block, sector_t nr_blocks) +{ + block <<= (sb->s_blocksize_bits - 9); + nr_blocks <<= (sb->s_blocksize_bits - 9); + return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL); +} + +/* +* command filter functions +*/ +extern int blk_verify_command(struct blk_cmd_filter *filter, + unsigned char *cmd, fmode_t has_write_perm); +extern void blk_unregister_filter(struct gendisk *disk); +extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter); #define MAX_PHYS_SEGMENTS 128 #define MAX_HW_SEGMENTS 128 @@ -764,9 +959,11 @@ extern int blkdev_issue_flush(struct block_device *, sector_t *); #define MAX_SEGMENT_SIZE 65536 +#define BLK_SEG_BOUNDARY_MASK 0xFFFFFFFFUL + #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) -static inline int queue_hardsect_size(request_queue_t *q) +static inline int queue_hardsect_size(struct request_queue *q) { int retval = 512; @@ -781,24 +978,18 @@ static inline int bdev_hardsect_size(struct block_device *bdev) return queue_hardsect_size(bdev_get_queue(bdev)); } -static inline int queue_dma_alignment(request_queue_t *q) +static inline int queue_dma_alignment(struct request_queue *q) { - int retval = 511; - - if (q && q->dma_alignment) - retval = q->dma_alignment; - - return retval; + return q ? q->dma_alignment : 511; } -static inline int bdev_dma_aligment(struct block_device *bdev) +static inline int blk_rq_aligned(struct request_queue *q, void *addr, + unsigned int len) { - return queue_dma_alignment(bdev_get_queue(bdev)); + unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; + return !((unsigned long)addr & alignment) && !(len & alignment); } -#define blk_finished_io(nsects) do { } while (0) -#define blk_started_io(nsects) do { } while (0) - /* assumes size > 256 */ static inline unsigned int blksize_bits(unsigned int size) { @@ -825,27 +1016,113 @@ static inline void put_dev_sector(Sector p) } struct work_struct; -int kblockd_schedule_work(struct work_struct *work); -void kblockd_flush(void); - -#ifdef CONFIG_LBD -# include -# define sector_div(a, b) do_div(a, b) -#else -# define sector_div(n, b)( \ -{ \ - int _res; \ - _res = (n) % (b); \ - (n) /= (b); \ - _res; \ -} \ -) -#endif +int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); #define MODULE_ALIAS_BLOCKDEV(major,minor) \ MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ MODULE_ALIAS("block-major-" __stringify(major) "-*") +#if defined(CONFIG_BLK_DEV_INTEGRITY) + +#define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */ +#define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */ + +struct blk_integrity_exchg { + void *prot_buf; + void *data_buf; + sector_t sector; + unsigned int data_size; + unsigned short sector_size; + const char *disk_name; +}; + +typedef void (integrity_gen_fn) (struct blk_integrity_exchg *); +typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *); +typedef void (integrity_set_tag_fn) (void *, void *, unsigned int); +typedef void (integrity_get_tag_fn) (void *, void *, unsigned int); + +struct blk_integrity { + integrity_gen_fn *generate_fn; + integrity_vrfy_fn *verify_fn; + integrity_set_tag_fn *set_tag_fn; + integrity_get_tag_fn *get_tag_fn; + + unsigned short flags; + unsigned short tuple_size; + unsigned short sector_size; + unsigned short tag_size; + + const char *name; + + struct kobject kobj; +}; + +extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); +extern void blk_integrity_unregister(struct gendisk *); +extern int blk_integrity_compare(struct gendisk *, struct gendisk *); +extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); +extern int blk_rq_count_integrity_sg(struct request *); + +static inline +struct blk_integrity *bdev_get_integrity(struct block_device *bdev) +{ + return bdev->bd_disk->integrity; +} + +static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) +{ + return disk->integrity; +} + +static inline int blk_integrity_rq(struct request *rq) +{ + if (rq->bio == NULL) + return 0; + + return bio_integrity(rq->bio); +} + +#else /* CONFIG_BLK_DEV_INTEGRITY */ + +#define blk_integrity_rq(rq) (0) +#define blk_rq_count_integrity_sg(a) (0) +#define blk_rq_map_integrity_sg(a, b) (0) +#define bdev_get_integrity(a) (0) +#define blk_get_integrity(a) (0) +#define blk_integrity_compare(a, b) (0) +#define blk_integrity_register(a, b) (0) +#define blk_integrity_unregister(a) do { } while (0); + +#endif /* CONFIG_BLK_DEV_INTEGRITY */ + +struct block_device_operations { + int (*open) (struct block_device *, fmode_t); + int (*release) (struct gendisk *, fmode_t); + int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); + int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); + int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); + int (*direct_access) (struct block_device *, sector_t, + void **, unsigned long *); + int (*media_changed) (struct gendisk *); + int (*revalidate_disk) (struct gendisk *); + int (*getgeo)(struct block_device *, struct hd_geometry *); + struct module *owner; +}; + +extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, + unsigned long); +#else /* CONFIG_BLOCK */ +/* + * stubs for when the block layer is configured out + */ +#define buffer_heads_over_limit 0 + +static inline long nr_blockdev_pages(void) +{ + return 0; +} + +#endif /* CONFIG_BLOCK */ #endif