X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=include%2Flinux%2Fblkdev.h;h=8a3e309e08427c1c27ad8df167754aa1fa507748;hb=cdd6026217c0e4cda2efce1bdc318661bef1f66f;hp=8d7e2f4151d0a20b2c19ec9f69dd254b620be5e4;hpb=fa72b903f75e4f0f0b2c2feed093005167da4023;p=safe%2Fjmp%2Flinux-2.6 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 8d7e2f4..8a3e309 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1,7 +1,6 @@ #ifndef _LINUX_BLKDEV_H #define _LINUX_BLKDEV_H -#include #include #include #include @@ -17,11 +16,14 @@ #include +struct scsi_ioctl_command; + struct request_queue; typedef struct request_queue request_queue_t; struct elevator_queue; typedef struct elevator_queue elevator_t; struct request_pm_state; +struct blk_trace; #define BLKDEV_MIN_RQ 4 #define BLKDEV_MAX_RQ 128 /* Default maximum */ @@ -54,16 +56,29 @@ struct as_io_context { struct cfq_queue; struct cfq_io_context { - void (*dtor)(struct cfq_io_context *); - void (*exit)(struct cfq_io_context *); + struct rb_node rb_node; + void *key; + + struct cfq_queue *cfqq[2]; struct io_context *ioc; - /* - * circular list of cfq_io_contexts belonging to a process io context - */ - struct list_head list; - struct cfq_queue *cfqq; + unsigned long last_end_request; + sector_t last_request_pos; + unsigned long last_queue; + + unsigned long ttime_total; + unsigned long ttime_samples; + unsigned long ttime_mean; + + unsigned int seek_samples; + u64 seek_total; + sector_t seek_mean; + + struct list_head queue_list; + + void (*dtor)(struct io_context *); /* destructor */ + void (*exit)(struct io_context *); /* called on task exit */ }; /* @@ -73,7 +88,9 @@ struct cfq_io_context { */ struct io_context { atomic_t refcount; - pid_t pid; + struct task_struct *task; + + int (*set_ioprio)(struct io_context *, unsigned int); /* * For request batching @@ -81,39 +98,121 @@ struct io_context { unsigned long last_waited; /* Time last woken after wait for request */ int nr_batch_requests; /* Number of requests left in the batch */ - spinlock_t lock; - struct as_io_context *aic; - struct cfq_io_context *cic; + struct rb_root cic_root; }; void put_io_context(struct io_context *ioc); void exit_io_context(void); -struct io_context *get_io_context(int gfp_flags); +struct io_context *current_io_context(gfp_t gfp_flags); +struct io_context *get_io_context(gfp_t gfp_flags); void copy_io_context(struct io_context **pdst, struct io_context **psrc); void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); struct request; -typedef void (rq_end_io_fn)(struct request *); +typedef void (rq_end_io_fn)(struct request *, int); struct request_list { int count[2]; int starved[2]; + int elvpriv; mempool_t *rq_pool; wait_queue_head_t wait[2]; - wait_queue_head_t drain; }; +/* + * request command types + */ +enum rq_cmd_type_bits { + REQ_TYPE_FS = 1, /* fs request */ + REQ_TYPE_BLOCK_PC, /* scsi command */ + REQ_TYPE_SENSE, /* sense request */ + REQ_TYPE_PM_SUSPEND, /* suspend request */ + REQ_TYPE_PM_RESUME, /* resume request */ + REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ + REQ_TYPE_FLUSH, /* flush request */ + REQ_TYPE_SPECIAL, /* driver defined type */ + REQ_TYPE_LINUX_BLOCK, /* generic block layer message */ + /* + * for ATA/ATAPI devices. this really doesn't belong here, ide should + * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver + * private REQ_LB opcodes to differentiate what type of request this is + */ + REQ_TYPE_ATA_CMD, + REQ_TYPE_ATA_TASK, + REQ_TYPE_ATA_TASKFILE, +}; + +/* + * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being + * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a + * SCSI cdb. + * + * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need, + * typically to differentiate REQ_TYPE_SPECIAL requests. + * + */ +enum { + /* + * just examples for now + */ + REQ_LB_OP_EJECT = 0x40, /* eject request */ + REQ_LB_OP_FLUSH = 0x41, /* flush device */ +}; + +/* + * request type modified bits. first three bits match BIO_RW* bits, important + */ +enum rq_flag_bits { + __REQ_RW, /* not set, read. set, write */ + __REQ_FAILFAST, /* no low level driver retries */ + __REQ_SORTED, /* elevator knows about this request */ + __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ + __REQ_HARDBARRIER, /* may not be passed by drive either */ + __REQ_FUA, /* forced unit access */ + __REQ_NOMERGE, /* don't touch this for merging */ + __REQ_STARTED, /* drive already may have started this one */ + __REQ_DONTPREP, /* don't call prep for this one */ + __REQ_QUEUED, /* uses queueing */ + __REQ_ELVPRIV, /* elevator private data attached */ + __REQ_FAILED, /* set if the request failed */ + __REQ_QUIET, /* don't worry about errors */ + __REQ_PREEMPT, /* set for "ide_preempt" requests */ + __REQ_ORDERED_COLOR, /* is before or after barrier */ + __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ + __REQ_ALLOCED, /* request came from our alloc pool */ + __REQ_NR_BITS, /* stops here */ +}; + +#define REQ_RW (1 << __REQ_RW) +#define REQ_FAILFAST (1 << __REQ_FAILFAST) +#define REQ_SORTED (1 << __REQ_SORTED) +#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) +#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) +#define REQ_FUA (1 << __REQ_FUA) +#define REQ_NOMERGE (1 << __REQ_NOMERGE) +#define REQ_STARTED (1 << __REQ_STARTED) +#define REQ_DONTPREP (1 << __REQ_DONTPREP) +#define REQ_QUEUED (1 << __REQ_QUEUED) +#define REQ_ELVPRIV (1 << __REQ_ELVPRIV) +#define REQ_FAILED (1 << __REQ_FAILED) +#define REQ_QUIET (1 << __REQ_QUIET) +#define REQ_PREEMPT (1 << __REQ_PREEMPT) +#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) +#define REQ_RW_SYNC (1 << __REQ_RW_SYNC) +#define REQ_ALLOCED (1 << __REQ_ALLOCED) + #define BLK_MAX_CDB 16 /* * try to put the fields that are referenced together in the same cacheline */ struct request { - struct list_head queuelist; /* looking for ->queue? you must _not_ - * access it directly, use - * blkdev_dequeue_request! */ - unsigned long flags; /* see REQ_ bits below */ + struct list_head queuelist; + struct list_head donelist; + + unsigned int cmd_flags; + enum rq_cmd_type_bits cmd_type; /* Maintain bio traversal state for part by part I/O submission. * hard_* are block layer internals, no driver should touch them! @@ -132,11 +231,19 @@ struct request { struct bio *bio; struct bio *biotail; + struct hlist_node hash; /* merge hash */ + struct rb_node rb_node; /* sort/lookup */ + + /* + * two pointers are available for the IO schedulers, if they need + * more they have to dynamically allocate it. + */ void *elevator_private; + void *elevator_private2; + + void *completion_data; - int rq_status; /* should split this into a few status bits */ struct gendisk *rq_disk; - int errors; unsigned long start_time; /* Number of scatter-gather DMA addr+len pairs after @@ -151,15 +258,17 @@ struct request { */ unsigned short nr_hw_segments; - int tag; - char *buffer; + unsigned short ioprio; - int ref_count; request_queue_t *q; - struct request_list *rl; - struct completion *waiting; void *special; + char *buffer; + + int tag; + int errors; + + int ref_count; /* * when request is used as a packet command carrier @@ -168,89 +277,22 @@ struct request { unsigned char cmd[BLK_MAX_CDB]; unsigned int data_len; - void *data; - unsigned int sense_len; + void *data; void *sense; unsigned int timeout; + int retries; /* - * For Power Management requests - */ - struct request_pm_state *pm; - - /* - * completion callback. end_io_data should be folded in with waiting + * completion callback. */ rq_end_io_fn *end_io; void *end_io_data; }; /* - * first three bits match BIO_RW* bits, important - */ -enum rq_flag_bits { - __REQ_RW, /* not set, read. set, write */ - __REQ_FAILFAST, /* no low level driver retries */ - __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ - __REQ_HARDBARRIER, /* may not be passed by drive either */ - __REQ_CMD, /* is a regular fs rw request */ - __REQ_NOMERGE, /* don't touch this for merging */ - __REQ_STARTED, /* drive already may have started this one */ - __REQ_DONTPREP, /* don't call prep for this one */ - __REQ_QUEUED, /* uses queueing */ - /* - * for ATA/ATAPI devices - */ - __REQ_PC, /* packet command (special) */ - __REQ_BLOCK_PC, /* queued down pc from block layer */ - __REQ_SENSE, /* sense retrival */ - - __REQ_FAILED, /* set if the request failed */ - __REQ_QUIET, /* don't worry about errors */ - __REQ_SPECIAL, /* driver suplied command */ - __REQ_DRIVE_CMD, - __REQ_DRIVE_TASK, - __REQ_DRIVE_TASKFILE, - __REQ_PREEMPT, /* set for "ide_preempt" requests */ - __REQ_PM_SUSPEND, /* suspend request */ - __REQ_PM_RESUME, /* resume request */ - __REQ_PM_SHUTDOWN, /* shutdown request */ - __REQ_BAR_PREFLUSH, /* barrier pre-flush done */ - __REQ_BAR_POSTFLUSH, /* barrier post-flush */ - __REQ_BAR_FLUSH, /* rq is the flush request */ - __REQ_NR_BITS, /* stops here */ -}; - -#define REQ_RW (1 << __REQ_RW) -#define REQ_FAILFAST (1 << __REQ_FAILFAST) -#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) -#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) -#define REQ_CMD (1 << __REQ_CMD) -#define REQ_NOMERGE (1 << __REQ_NOMERGE) -#define REQ_STARTED (1 << __REQ_STARTED) -#define REQ_DONTPREP (1 << __REQ_DONTPREP) -#define REQ_QUEUED (1 << __REQ_QUEUED) -#define REQ_PC (1 << __REQ_PC) -#define REQ_BLOCK_PC (1 << __REQ_BLOCK_PC) -#define REQ_SENSE (1 << __REQ_SENSE) -#define REQ_FAILED (1 << __REQ_FAILED) -#define REQ_QUIET (1 << __REQ_QUIET) -#define REQ_SPECIAL (1 << __REQ_SPECIAL) -#define REQ_DRIVE_CMD (1 << __REQ_DRIVE_CMD) -#define REQ_DRIVE_TASK (1 << __REQ_DRIVE_TASK) -#define REQ_DRIVE_TASKFILE (1 << __REQ_DRIVE_TASKFILE) -#define REQ_PREEMPT (1 << __REQ_PREEMPT) -#define REQ_PM_SUSPEND (1 << __REQ_PM_SUSPEND) -#define REQ_PM_RESUME (1 << __REQ_PM_RESUME) -#define REQ_PM_SHUTDOWN (1 << __REQ_PM_SHUTDOWN) -#define REQ_BAR_PREFLUSH (1 << __REQ_BAR_PREFLUSH) -#define REQ_BAR_POSTFLUSH (1 << __REQ_BAR_POSTFLUSH) -#define REQ_BAR_FLUSH (1 << __REQ_BAR_FLUSH) - -/* - * State information carried for REQ_PM_SUSPEND and REQ_PM_RESUME + * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME * requests. Some step values could eventually be made generic. */ struct request_pm_state @@ -277,23 +319,21 @@ struct bio_vec; typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *); typedef void (activity_fn) (void *data, int rw); typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *); -typedef int (prepare_flush_fn) (request_queue_t *, struct request *); -typedef void (end_flush_fn) (request_queue_t *, struct request *); +typedef void (prepare_flush_fn) (request_queue_t *, struct request *); +typedef void (softirq_done_fn)(struct request *); enum blk_queue_state { Queue_down, Queue_up, }; -#define BLK_TAGS_PER_LONG (sizeof(unsigned long) * 8) -#define BLK_TAGS_MASK (BLK_TAGS_PER_LONG - 1) - struct blk_queue_tag { struct request **tag_index; /* map of busy tags */ unsigned long *tag_map; /* bit map of free/busy tags */ struct list_head busy_list; /* fifo list of busy tags */ int busy; /* current depth */ int max_depth; /* what we will send to device */ + int real_max_depth; /* what the array can hold */ atomic_t refcnt; /* map can be shared */ }; @@ -322,7 +362,13 @@ struct request_queue activity_fn *activity_fn; issue_flush_fn *issue_flush_fn; prepare_flush_fn *prepare_flush_fn; - end_flush_fn *end_flush_fn; + softirq_done_fn *softirq_done_fn; + + /* + * Dispatch queue sorting + */ + sector_t end_sector; + struct request *boundary_rq; /* * Auto-unplugging state @@ -346,7 +392,7 @@ struct request_queue * queue needs bounce pages for pages above this limit */ unsigned long bounce_pfn; - unsigned int bounce_gfp; + gfp_t bounce_gfp; /* * various queue flags, see QUEUE_* below @@ -374,8 +420,8 @@ struct request_queue unsigned int nr_congestion_off; unsigned int nr_batching; - unsigned short max_sectors; - unsigned short max_hw_sectors; + unsigned int max_sectors; + unsigned int max_hw_sectors; unsigned short max_phys_segments; unsigned short max_hw_segments; unsigned short hardsect_size; @@ -386,8 +432,7 @@ struct request_queue struct blk_queue_tag *queue_tags; - atomic_t refcnt; - + unsigned int nr_sorted; unsigned int in_flight; /* @@ -396,28 +441,21 @@ struct request_queue unsigned int sg_timeout; unsigned int sg_reserved_size; int node; - - struct list_head drain_list; - +#ifdef CONFIG_BLK_DEV_IO_TRACE + struct blk_trace *blk_trace; +#endif /* * reserved for flush operations */ - struct request *flush_rq; - unsigned char ordered; -}; + unsigned int ordered, next_ordered, ordseq; + int orderr, ordcolor; + struct request pre_flush_rq, bar_rq, post_flush_rq; + struct request *orig_bar_rq; + unsigned int bi_size; -enum { - QUEUE_ORDERED_NONE, - QUEUE_ORDERED_TAG, - QUEUE_ORDERED_FLUSH, + struct mutex sysfs_lock; }; -#define RQ_INACTIVE (-1) -#define RQ_ACTIVE 1 -#define RQ_SCSI_BUSY 0xffff -#define RQ_SCSI_DONE 0xfffe -#define RQ_SCSI_DISCONNECTING 0xffe0 - #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ @@ -426,33 +464,80 @@ enum { #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ -#define QUEUE_FLAG_DRAIN 8 /* draining queue for sched switch */ -#define QUEUE_FLAG_FLUSH 9 /* doing barrier flush sequence */ +#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ + +enum { + /* + * Hardbarrier is supported with one of the following methods. + * + * NONE : hardbarrier unsupported + * DRAIN : ordering by draining is enough + * DRAIN_FLUSH : ordering by draining w/ pre and post flushes + * DRAIN_FUA : ordering by draining w/ pre flush and FUA write + * TAG : ordering by tag is enough + * TAG_FLUSH : ordering by tag w/ pre and post flushes + * TAG_FUA : ordering by tag w/ pre flush and FUA write + */ + QUEUE_ORDERED_NONE = 0x00, + QUEUE_ORDERED_DRAIN = 0x01, + QUEUE_ORDERED_TAG = 0x02, + + QUEUE_ORDERED_PREFLUSH = 0x10, + QUEUE_ORDERED_POSTFLUSH = 0x20, + QUEUE_ORDERED_FUA = 0x40, + + QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | + QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, + QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | + QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, + QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | + QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, + QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | + QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, + + /* + * Ordered operation sequence + */ + QUEUE_ORDSEQ_STARTED = 0x01, /* flushing in progress */ + QUEUE_ORDSEQ_DRAIN = 0x02, /* waiting for the queue to be drained */ + QUEUE_ORDSEQ_PREFLUSH = 0x04, /* pre-flushing in progress */ + QUEUE_ORDSEQ_BAR = 0x08, /* original barrier req in progress */ + QUEUE_ORDSEQ_POSTFLUSH = 0x10, /* post-flushing in progress */ + QUEUE_ORDSEQ_DONE = 0x20, +}; #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) -#define blk_queue_flushing(q) test_bit(QUEUE_FLAG_FLUSH, &(q)->queue_flags) +#define blk_queue_flushing(q) ((q)->ordseq) + +#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) +#define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) +#define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL) +#define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE) -#define blk_fs_request(rq) ((rq)->flags & REQ_CMD) -#define blk_pc_request(rq) ((rq)->flags & REQ_BLOCK_PC) -#define blk_noretry_request(rq) ((rq)->flags & REQ_FAILFAST) -#define blk_rq_started(rq) ((rq)->flags & REQ_STARTED) +#define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST) +#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) #define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq)) -#define blk_pm_suspend_request(rq) ((rq)->flags & REQ_PM_SUSPEND) -#define blk_pm_resume_request(rq) ((rq)->flags & REQ_PM_RESUME) +#define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) +#define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) #define blk_pm_request(rq) \ - ((rq)->flags & (REQ_PM_SUSPEND | REQ_PM_RESUME)) + (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) -#define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER) -#define blk_barrier_preflush(rq) ((rq)->flags & REQ_BAR_PREFLUSH) -#define blk_barrier_postflush(rq) ((rq)->flags & REQ_BAR_POSTFLUSH) +#define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) +#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) +#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) -#define rq_data_dir(rq) ((rq)->flags & 1) +#define rq_data_dir(rq) ((rq)->cmd_flags & 1) + +/* + * We regard a request as sync, if it's a READ or a SYNC write. + */ +#define rq_is_sync(rq) (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC) static inline int blk_queue_full(struct request_queue *q, int rw) { @@ -485,7 +570,7 @@ static inline void blk_clear_queue_full(struct request_queue *q, int rw) #define RQ_NOMERGE_FLAGS \ (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) #define rq_mergeable(rq) \ - (!((rq)->flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq))) + (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq))) /* * noop, requests are automagically marked as active/inactive by I/O @@ -540,27 +625,31 @@ extern void blk_unregister_queue(struct gendisk *disk); extern void register_disk(struct gendisk *dev); extern void generic_make_request(struct bio *bio); extern void blk_put_request(struct request *); -extern void blk_end_sync_rq(struct request *rq); -extern void blk_attempt_remerge(request_queue_t *, struct request *); -extern void __blk_attempt_remerge(request_queue_t *, struct request *); -extern struct request *blk_get_request(request_queue_t *, int, int); +extern void __blk_put_request(request_queue_t *, struct request *); +extern void blk_end_sync_rq(struct request *rq, int error); +extern struct request *blk_get_request(request_queue_t *, int, gfp_t); extern void blk_insert_request(request_queue_t *, struct request *, int, void *); extern void blk_requeue_request(request_queue_t *, struct request *); extern void blk_plug_device(request_queue_t *); extern int blk_remove_plug(request_queue_t *); extern void blk_recount_segments(request_queue_t *, struct bio *); -extern int blk_phys_contig_segment(request_queue_t *q, struct bio *, struct bio *); -extern int blk_hw_contig_segment(request_queue_t *q, struct bio *, struct bio *); extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *); +extern int sg_scsi_ioctl(struct file *, struct request_queue *, + struct gendisk *, struct scsi_ioctl_command __user *); extern void blk_start_queue(request_queue_t *q); extern void blk_stop_queue(request_queue_t *q); extern void blk_sync_queue(struct request_queue *q); extern void __blk_stop_queue(request_queue_t *q); extern void blk_run_queue(request_queue_t *); extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *); -extern struct request *blk_rq_map_user(request_queue_t *, int, void __user *, unsigned int); -extern int blk_rq_unmap_user(struct request *, struct bio *, unsigned int); -extern int blk_execute_rq(request_queue_t *, struct gendisk *, struct request *); +extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned int); +extern int blk_rq_unmap_user(struct bio *, unsigned int); +extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t); +extern int blk_rq_map_user_iov(request_queue_t *, struct request *, struct sg_iovec *, int); +extern int blk_execute_rq(request_queue_t *, struct gendisk *, + struct request *, int); +extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *, + struct request *, int, rq_end_io_fn *); static inline request_queue_t *bdev_get_queue(struct block_device *bdev) { @@ -591,8 +680,19 @@ static inline void blk_run_address_space(struct address_space *mapping) */ extern int end_that_request_first(struct request *, int, int); extern int end_that_request_chunk(struct request *, int, int); -extern void end_that_request_last(struct request *); +extern void end_that_request_last(struct request *, int); extern void end_request(struct request *req, int uptodate); +extern void blk_complete_request(struct request *); + +static inline int rq_all_done(struct request *rq, unsigned int nr_bytes) +{ + if (blk_fs_request(rq)) + return (nr_bytes >= (rq->hard_nr_sectors << 9)); + else if (blk_pc_request(rq)) + return nr_bytes >= rq->data_len; + + return 0; +} /* * end_that_request_first/chunk() takes an uptodate argument. we account @@ -604,12 +704,7 @@ extern void end_request(struct request *req, int uptodate); static inline void blkdev_dequeue_request(struct request *req) { - BUG_ON(list_empty(&req->queuelist)); - - list_del_init(&req->queuelist); - - if (req->rl) - elv_remove_request(req->q, req); + elv_dequeue_request(req->q, req); } /* @@ -621,7 +716,7 @@ extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *); extern void blk_cleanup_queue(request_queue_t *); extern void blk_queue_make_request(request_queue_t *, make_request_fn *); extern void blk_queue_bounce_limit(request_queue_t *, u64); -extern void blk_queue_max_sectors(request_queue_t *, unsigned short); +extern void blk_queue_max_sectors(request_queue_t *, unsigned int); extern void blk_queue_max_phys_segments(request_queue_t *, unsigned short); extern void blk_queue_max_hw_segments(request_queue_t *, unsigned short); extern void blk_queue_max_segment_size(request_queue_t *, unsigned int); @@ -631,33 +726,32 @@ extern void blk_queue_segment_boundary(request_queue_t *, unsigned long); extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn); extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *); extern void blk_queue_dma_alignment(request_queue_t *, int); +extern void blk_queue_softirq_done(request_queue_t *, softirq_done_fn *); extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); -extern void blk_queue_ordered(request_queue_t *, int); +extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *); extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *); -extern int blkdev_scsi_issue_flush_fn(request_queue_t *, struct gendisk *, sector_t *); -extern struct request *blk_start_pre_flush(request_queue_t *,struct request *); -extern int blk_complete_barrier_rq(request_queue_t *, struct request *, int); -extern int blk_complete_barrier_rq_locked(request_queue_t *, struct request *, int); +extern int blk_do_ordered(request_queue_t *, struct request **); +extern unsigned blk_ordered_cur_seq(request_queue_t *); +extern unsigned blk_ordered_req_seq(struct request *); +extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int); extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *); extern void blk_dump_rq_flags(struct request *, char *); extern void generic_unplug_device(request_queue_t *); extern void __generic_unplug_device(request_queue_t *); extern long nr_blockdev_pages(void); -extern void blk_wait_queue_drained(request_queue_t *, int); -extern void blk_finish_queue_drain(request_queue_t *); int blk_get_queue(request_queue_t *); -request_queue_t *blk_alloc_queue(int gfp_mask); -request_queue_t *blk_alloc_queue_node(int,int); -#define blk_put_queue(q) blk_cleanup_queue((q)) +request_queue_t *blk_alloc_queue(gfp_t); +request_queue_t *blk_alloc_queue_node(gfp_t, int); +extern void blk_put_queue(request_queue_t *); /* * tag stuff */ #define blk_queue_tag_depth(q) ((q)->queue_tags->busy) #define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth) -#define blk_rq_tagged(rq) ((rq)->flags & REQ_QUEUED) +#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) extern int blk_queue_start_tag(request_queue_t *, struct request *); extern struct request *blk_queue_find_tag(request_queue_t *, int); extern void blk_queue_end_tag(request_queue_t *, struct request *); @@ -666,20 +760,22 @@ extern void blk_queue_free_tags(request_queue_t *); extern int blk_queue_resize_tags(request_queue_t *, int); extern void blk_queue_invalidate_tags(request_queue_t *); extern long blk_congestion_wait(int rw, long timeout); +extern struct blk_queue_tag *blk_init_tags(int); +extern void blk_free_tags(struct blk_queue_tag *); +extern void blk_congestion_end(int rw); extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *); extern int blkdev_issue_flush(struct block_device *, sector_t *); #define MAX_PHYS_SEGMENTS 128 #define MAX_HW_SEGMENTS 128 -#define MAX_SECTORS 255 +#define SAFE_MAX_SECTORS 255 +#define BLK_DEF_MAX_SECTORS 1024 #define MAX_SEGMENT_SIZE 65536 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) -extern void drive_stat_acct(struct request *, int, int); - static inline int queue_hardsect_size(request_queue_t *q) { int retval = 512; @@ -724,7 +820,7 @@ static inline unsigned int blksize_bits(unsigned int size) return bits; } -extern inline unsigned int block_size(struct block_device *bdev) +static inline unsigned int block_size(struct block_device *bdev) { return bdev->bd_block_size; }