X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=include%2Flinux%2Fblkdev.h;h=c09696a90d6a2149e22088a8ead1cecb9f5a082b;hb=ac9fafa1243640349aa481adf473db283a695766;hp=b32564a1e105c4220d14d988e6530a141b46be76;hpb=abae1fde63fcdd2a3abaa0d7930938d8326f83d2;p=safe%2Fjmp%2Flinux-2.6 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b32564a..c09696a 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1,6 +1,8 @@ #ifndef _LINUX_BLKDEV_H #define _LINUX_BLKDEV_H +#ifdef CONFIG_BLOCK + #include #include #include @@ -18,26 +20,10 @@ #include -#ifdef CONFIG_LBD -# include -# define sector_div(a, b) do_div(a, b) -#else -# define sector_div(n, b)( \ -{ \ - int _res; \ - _res = (n) % (b); \ - (n) /= (b); \ - _res; \ -} \ -) -#endif - -#ifdef CONFIG_BLOCK - struct scsi_ioctl_command; struct request_queue; -typedef struct request_queue request_queue_t; +typedef struct request_queue request_queue_t __deprecated; struct elevator_queue; typedef struct elevator_queue elevator_t; struct request_pm_state; @@ -48,85 +34,11 @@ struct sg_io_hdr; #define BLKDEV_MIN_RQ 4 #define BLKDEV_MAX_RQ 128 /* Default maximum */ -/* - * This is the per-process anticipatory I/O scheduler state. - */ -struct as_io_context { - spinlock_t lock; - - void (*dtor)(struct as_io_context *aic); /* destructor */ - void (*exit)(struct as_io_context *aic); /* called on task exit */ - - unsigned long state; - atomic_t nr_queued; /* queued reads & sync writes */ - atomic_t nr_dispatched; /* number of requests gone to the drivers */ - - /* IO History tracking */ - /* Thinktime */ - unsigned long last_end_request; - unsigned long ttime_total; - unsigned long ttime_samples; - unsigned long ttime_mean; - /* Layout pattern */ - unsigned int seek_samples; - sector_t last_request_pos; - u64 seek_total; - sector_t seek_mean; -}; - -struct cfq_queue; -struct cfq_io_context { - struct rb_node rb_node; - void *key; - - struct cfq_queue *cfqq[2]; - - struct io_context *ioc; - - unsigned long last_end_request; - sector_t last_request_pos; - - unsigned long ttime_total; - unsigned long ttime_samples; - unsigned long ttime_mean; - - unsigned int seek_samples; - u64 seek_total; - sector_t seek_mean; - - struct list_head queue_list; - - void (*dtor)(struct io_context *); /* destructor */ - void (*exit)(struct io_context *); /* called on task exit */ -}; - -/* - * This is the per-process I/O subsystem state. It is refcounted and - * kmalloc'ed. Currently all fields are modified in process io context - * (apart from the atomic refcount), so require no locking. - */ -struct io_context { - atomic_t refcount; - struct task_struct *task; - - unsigned int ioprio_changed; - - /* - * For request batching - */ - unsigned long last_waited; /* Time last woken after wait for request */ - int nr_batch_requests; /* Number of requests left in the batch */ - - struct as_io_context *aic; - struct rb_root cic_root; - void *ioc_data; -}; - -void put_io_context(struct io_context *ioc); +int put_io_context(struct io_context *ioc); void exit_io_context(void); struct io_context *get_io_context(gfp_t gfp_flags, int node); +struct io_context *alloc_io_context(gfp_t gfp_flags, int node); void copy_io_context(struct io_context **pdst, struct io_context **psrc); -void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); struct request; typedef void (rq_end_io_fn)(struct request *, int); @@ -157,8 +69,6 @@ enum rq_cmd_type_bits { * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver * private REQ_LB opcodes to differentiate what type of request this is */ - REQ_TYPE_ATA_CMD, - REQ_TYPE_ATA_TASK, REQ_TYPE_ATA_TASKFILE, REQ_TYPE_ATA_PC, }; @@ -202,6 +112,7 @@ enum rq_flag_bits { __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ __REQ_ALLOCED, /* request came from our alloc pool */ __REQ_RW_META, /* metadata io request */ + __REQ_COPY_USER, /* contains copies of user pages */ __REQ_NR_BITS, /* stops here */ }; @@ -223,17 +134,20 @@ enum rq_flag_bits { #define REQ_RW_SYNC (1 << __REQ_RW_SYNC) #define REQ_ALLOCED (1 << __REQ_ALLOCED) #define REQ_RW_META (1 << __REQ_RW_META) +#define REQ_COPY_USER (1 << __REQ_COPY_USER) #define BLK_MAX_CDB 16 /* - * try to put the fields that are referenced together in the same cacheline + * try to put the fields that are referenced together in the same cacheline. + * if you modify this structure, be sure to check block/blk-core.c:rq_init() + * as well! */ struct request { struct list_head queuelist; struct list_head donelist; - request_queue_t *q; + struct request_queue *q; unsigned int cmd_flags; enum rq_cmd_type_bits cmd_type; @@ -301,10 +215,12 @@ struct request { /* * when request is used as a packet command carrier */ - unsigned int cmd_len; - unsigned char cmd[BLK_MAX_CDB]; + unsigned short cmd_len; + unsigned char __cmd[BLK_MAX_CDB]; + unsigned char *cmd; unsigned int data_len; + unsigned int extra_len; /* length of alignment and padding */ unsigned int sense_len; void *data; void *sense; @@ -337,16 +253,16 @@ struct request_pm_state #include -typedef void (request_fn_proc) (request_queue_t *q); -typedef int (make_request_fn) (request_queue_t *q, struct bio *bio); -typedef int (prep_rq_fn) (request_queue_t *, struct request *); -typedef void (unplug_fn) (request_queue_t *); +typedef void (request_fn_proc) (struct request_queue *q); +typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); +typedef int (prep_rq_fn) (struct request_queue *, struct request *); +typedef void (unplug_fn) (struct request_queue *); struct bio_vec; -typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *); -typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *); -typedef void (prepare_flush_fn) (request_queue_t *, struct request *); +typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *); +typedef void (prepare_flush_fn) (struct request_queue *, struct request *); typedef void (softirq_done_fn)(struct request *); +typedef int (dma_drain_needed_fn)(struct request *); enum blk_queue_state { Queue_down, @@ -356,7 +272,6 @@ enum blk_queue_state { struct blk_queue_tag { struct request **tag_index; /* map of busy tags */ unsigned long *tag_map; /* bit map of free/busy tags */ - struct list_head busy_list; /* fifo list of busy tags */ int busy; /* current depth */ int max_depth; /* what we will send to device */ int real_max_depth; /* what the array can hold */ @@ -382,9 +297,9 @@ struct request_queue prep_rq_fn *prep_rq_fn; unplug_fn *unplug_fn; merge_bvec_fn *merge_bvec_fn; - issue_flush_fn *issue_flush_fn; prepare_flush_fn *prepare_flush_fn; softirq_done_fn *softirq_done_fn; + dma_drain_needed_fn *dma_drain_needed; /* * Dispatch queue sorting @@ -448,9 +363,13 @@ struct request_queue unsigned int max_segment_size; unsigned long seg_boundary_mask; + void *dma_drain_buffer; + unsigned int dma_drain_size; + unsigned int dma_pad_mask; unsigned int dma_alignment; struct blk_queue_tag *queue_tags; + struct list_head tag_busy_list; unsigned int nr_sorted; unsigned int in_flight; @@ -471,7 +390,6 @@ struct request_queue int orderr, ordcolor; struct request pre_flush_rq, bar_rq, post_flush_rq; struct request *orig_bar_rq; - unsigned int bi_size; struct mutex sysfs_lock; @@ -483,13 +401,38 @@ struct request_queue #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ -#define QUEUE_FLAG_READFULL 3 /* write queue has been filled */ -#define QUEUE_FLAG_WRITEFULL 4 /* read queue has been filled */ +#define QUEUE_FLAG_READFULL 3 /* read queue has been filled */ +#define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */ #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ #define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ +#define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */ + +static inline void queue_flag_set_unlocked(unsigned int flag, + struct request_queue *q) +{ + __set_bit(flag, &q->queue_flags); +} + +static inline void queue_flag_set(unsigned int flag, struct request_queue *q) +{ + WARN_ON_ONCE(!spin_is_locked(q->queue_lock)); + __set_bit(flag, &q->queue_flags); +} + +static inline void queue_flag_clear_unlocked(unsigned int flag, + struct request_queue *q) +{ + __clear_bit(flag, &q->queue_flags); +} + +static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) +{ + WARN_ON_ONCE(!spin_is_locked(q->queue_lock)); + __clear_bit(flag, &q->queue_flags); +} enum { /* @@ -534,6 +477,7 @@ enum { #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) +#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) #define blk_queue_flushing(q) ((q)->ordseq) #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) @@ -555,6 +499,9 @@ enum { #define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) +#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors) +/* rq->queuelist of dequeued request must be list_empty() */ +#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) @@ -576,17 +523,17 @@ static inline int blk_queue_full(struct request_queue *q, int rw) static inline void blk_set_queue_full(struct request_queue *q, int rw) { if (rw == READ) - set_bit(QUEUE_FLAG_READFULL, &q->queue_flags); + queue_flag_set(QUEUE_FLAG_READFULL, q); else - set_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); + queue_flag_set(QUEUE_FLAG_WRITEFULL, q); } static inline void blk_clear_queue_full(struct request_queue *q, int rw) { if (rw == READ) - clear_bit(QUEUE_FLAG_READFULL, &q->queue_flags); + queue_flag_clear(QUEUE_FLAG_READFULL, q); else - clear_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); + queue_flag_clear(QUEUE_FLAG_WRITEFULL, q); } @@ -615,8 +562,13 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn; * BLK_BOUNCE_ANY : don't bounce anything * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary */ + +#if BITS_PER_LONG == 32 #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) -#define BLK_BOUNCE_ANY ((u64)blk_max_pfn << PAGE_SHIFT) +#else +#define BLK_BOUNCE_HIGH -1ULL +#endif +#define BLK_BOUNCE_ANY (-1ULL) #define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD) /* @@ -624,36 +576,50 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn; */ #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) -#ifdef CONFIG_MMU +#ifdef CONFIG_BOUNCE extern int init_emergency_isa_pool(void); -extern void blk_queue_bounce(request_queue_t *q, struct bio **bio); +extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); #else static inline int init_emergency_isa_pool(void) { return 0; } -static inline void blk_queue_bounce(request_queue_t *q, struct bio **bio) +static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) { } #endif /* CONFIG_MMU */ -#define rq_for_each_bio(_bio, rq) \ +struct req_iterator { + int i; + struct bio *bio; +}; + +/* This should not be used directly - use rq_for_each_segment */ +#define __rq_for_each_bio(_bio, rq) \ if ((rq->bio)) \ for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) +#define rq_for_each_segment(bvl, _rq, _iter) \ + __rq_for_each_bio(_iter.bio, _rq) \ + bio_for_each_segment(bvl, _iter.bio, _iter.i) + +#define rq_iter_last(rq, _iter) \ + (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) + extern int blk_register_queue(struct gendisk *disk); extern void blk_unregister_queue(struct gendisk *disk); extern void register_disk(struct gendisk *dev); extern void generic_make_request(struct bio *bio); +extern void blk_rq_init(struct request_queue *q, struct request *rq); extern void blk_put_request(struct request *); -extern void __blk_put_request(request_queue_t *, struct request *); +extern void __blk_put_request(struct request_queue *, struct request *); extern void blk_end_sync_rq(struct request *rq, int error); -extern struct request *blk_get_request(request_queue_t *, int, gfp_t); -extern void blk_insert_request(request_queue_t *, struct request *, int, void *); -extern void blk_requeue_request(request_queue_t *, struct request *); -extern void blk_plug_device(request_queue_t *); -extern int blk_remove_plug(request_queue_t *); -extern void blk_recount_segments(request_queue_t *, struct bio *); +extern struct request *blk_get_request(struct request_queue *, int, gfp_t); +extern void blk_insert_request(struct request_queue *, struct request *, int, void *); +extern void blk_requeue_request(struct request_queue *, struct request *); +extern void blk_plug_device(struct request_queue *); +extern int blk_remove_plug(struct request_queue *); +extern void blk_recount_segments(struct request_queue *, struct bio *); extern int scsi_cmd_ioctl(struct file *, struct request_queue *, struct gendisk *, unsigned int, void __user *); extern int sg_scsi_ioctl(struct file *, struct request_queue *, @@ -662,14 +628,15 @@ extern int sg_scsi_ioctl(struct file *, struct request_queue *, /* * Temporary export, until SCSI gets fixed up. */ -extern int ll_back_merge_fn(request_queue_t *, struct request *, struct bio *); +extern int blk_rq_append_bio(struct request_queue *q, struct request *rq, + struct bio *bio); /* * A queue has just exitted congestion. Note this in the global counter of * congested queues, and wake up anyone who was waiting for requests to be * put back. */ -static inline void blk_clear_queue_congested(request_queue_t *q, int rw) +static inline void blk_clear_queue_congested(struct request_queue *q, int rw) { clear_bdi_congested(&q->backing_dev_info, rw); } @@ -678,34 +645,31 @@ static inline void blk_clear_queue_congested(request_queue_t *q, int rw) * A queue has just entered congestion. Flag that in the queue's VM-visible * state flags and increment the global gounter of congested queues. */ -static inline void blk_set_queue_congested(request_queue_t *q, int rw) +static inline void blk_set_queue_congested(struct request_queue *q, int rw) { set_bdi_congested(&q->backing_dev_info, rw); } -extern void blk_start_queue(request_queue_t *q); -extern void blk_stop_queue(request_queue_t *q); +extern void blk_start_queue(struct request_queue *q); +extern void blk_stop_queue(struct request_queue *q); extern void blk_sync_queue(struct request_queue *q); -extern void __blk_stop_queue(request_queue_t *q); -extern void blk_run_queue(request_queue_t *); -extern void blk_start_queueing(request_queue_t *); -extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned long); +extern void __blk_stop_queue(struct request_queue *q); +extern void __blk_run_queue(struct request_queue *); +extern void blk_run_queue(struct request_queue *); +extern void blk_start_queueing(struct request_queue *); +extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long); extern int blk_rq_unmap_user(struct bio *); -extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t); -extern int blk_rq_map_user_iov(request_queue_t *, struct request *, +extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); +extern int blk_rq_map_user_iov(struct request_queue *, struct request *, struct sg_iovec *, int, unsigned int); -extern int blk_execute_rq(request_queue_t *, struct gendisk *, +extern int blk_execute_rq(struct request_queue *, struct gendisk *, struct request *, int); -extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *, +extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, struct request *, int, rq_end_io_fn *); -extern int blk_fill_sghdr_rq(request_queue_t *, struct request *, - struct sg_io_hdr *, int); -extern int blk_unmap_sghdr_rq(struct request *, struct sg_io_hdr *); -extern int blk_complete_sghdr_rq(struct request *, struct sg_io_hdr *, - struct bio *); extern int blk_verify_command(unsigned char *, int); +extern void blk_unplug(struct request_queue *q); -static inline request_queue_t *bdev_get_queue(struct block_device *bdev) +static inline struct request_queue *bdev_get_queue(struct block_device *bdev) { return bdev->bd_disk->queue; } @@ -724,27 +688,35 @@ static inline void blk_run_address_space(struct address_space *mapping) } /* - * end_request() and friends. Must be called with the request queue spinlock - * acquired. All functions called within end_request() _must_be_ atomic. + * blk_end_request() and friends. + * __blk_end_request() and end_request() must be called with + * the request queue spinlock acquired. * * Several drivers define their own end_request and call - * end_that_request_first() and end_that_request_last() - * for parts of the original function. This prevents - * code duplication in drivers. + * blk_end_request() for parts of the original function. + * This prevents code duplication in drivers. */ -extern int end_that_request_first(struct request *, int, int); -extern int end_that_request_chunk(struct request *, int, int); -extern void end_that_request_last(struct request *, int); -extern void end_request(struct request *req, int uptodate); +extern int blk_end_request(struct request *rq, int error, + unsigned int nr_bytes); +extern int __blk_end_request(struct request *rq, int error, + unsigned int nr_bytes); +extern int blk_end_bidi_request(struct request *rq, int error, + unsigned int nr_bytes, unsigned int bidi_bytes); +extern void end_request(struct request *, int); +extern void end_queued_request(struct request *, int); +extern void end_dequeued_request(struct request *, int); +extern int blk_end_request_callback(struct request *rq, int error, + unsigned int nr_bytes, + int (drv_callback)(struct request *)); extern void blk_complete_request(struct request *); /* - * end_that_request_first/chunk() takes an uptodate argument. we account - * any value <= as an io error. 0 means -EIO for compatability reasons, - * any other < 0 value is the direct error type. An uptodate value of - * 1 indicates successful io completion + * blk_end_request() takes bytes instead of sectors as a complete size. + * blk_rq_bytes() returns bytes left to complete in the entire request. + * blk_rq_cur_bytes() returns bytes left to complete in the current segment. */ -#define end_io_error(uptodate) (unlikely((uptodate) <= 0)) +extern unsigned int blk_rq_bytes(struct request *rq); +extern unsigned int blk_rq_cur_bytes(struct request *rq); static inline void blkdev_dequeue_request(struct request *req) { @@ -754,41 +726,45 @@ static inline void blkdev_dequeue_request(struct request *req) /* * Access functions for manipulating queue properties */ -extern request_queue_t *blk_init_queue_node(request_fn_proc *rfn, +extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id); -extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *); -extern void blk_cleanup_queue(request_queue_t *); -extern void blk_queue_make_request(request_queue_t *, make_request_fn *); -extern void blk_queue_bounce_limit(request_queue_t *, u64); -extern void blk_queue_max_sectors(request_queue_t *, unsigned int); -extern void blk_queue_max_phys_segments(request_queue_t *, unsigned short); -extern void blk_queue_max_hw_segments(request_queue_t *, unsigned short); -extern void blk_queue_max_segment_size(request_queue_t *, unsigned int); -extern void blk_queue_hardsect_size(request_queue_t *, unsigned short); -extern void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b); -extern void blk_queue_segment_boundary(request_queue_t *, unsigned long); -extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn); -extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *); -extern void blk_queue_dma_alignment(request_queue_t *, int); -extern void blk_queue_softirq_done(request_queue_t *, softirq_done_fn *); +extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); +extern void blk_cleanup_queue(struct request_queue *); +extern void blk_queue_make_request(struct request_queue *, make_request_fn *); +extern void blk_queue_bounce_limit(struct request_queue *, u64); +extern void blk_queue_max_sectors(struct request_queue *, unsigned int); +extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); +extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); +extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); +extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); +extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); +extern void blk_queue_dma_pad(struct request_queue *, unsigned int); +extern int blk_queue_dma_drain(struct request_queue *q, + dma_drain_needed_fn *dma_drain_needed, + void *buf, unsigned int size); +extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); +extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); +extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); +extern void blk_queue_dma_alignment(struct request_queue *, int); +extern void blk_queue_update_dma_alignment(struct request_queue *, int); +extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); -extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *); -extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *); -extern int blk_do_ordered(request_queue_t *, struct request **); -extern unsigned blk_ordered_cur_seq(request_queue_t *); +extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); +extern int blk_do_ordered(struct request_queue *, struct request **); +extern unsigned blk_ordered_cur_seq(struct request_queue *); extern unsigned blk_ordered_req_seq(struct request *); -extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int); +extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int); -extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *); +extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); extern void blk_dump_rq_flags(struct request *, char *); -extern void generic_unplug_device(request_queue_t *); -extern void __generic_unplug_device(request_queue_t *); +extern void generic_unplug_device(struct request_queue *); +extern void __generic_unplug_device(struct request_queue *); extern long nr_blockdev_pages(void); -int blk_get_queue(request_queue_t *); -request_queue_t *blk_alloc_queue(gfp_t); -request_queue_t *blk_alloc_queue_node(gfp_t, int); -extern void blk_put_queue(request_queue_t *); +int blk_get_queue(struct request_queue *); +struct request_queue *blk_alloc_queue(gfp_t); +struct request_queue *blk_alloc_queue_node(gfp_t, int); +extern void blk_put_queue(struct request_queue *); /* * tag stuff @@ -796,13 +772,13 @@ extern void blk_put_queue(request_queue_t *); #define blk_queue_tag_depth(q) ((q)->queue_tags->busy) #define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth) #define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) -extern int blk_queue_start_tag(request_queue_t *, struct request *); -extern struct request *blk_queue_find_tag(request_queue_t *, int); -extern void blk_queue_end_tag(request_queue_t *, struct request *); -extern int blk_queue_init_tags(request_queue_t *, int, struct blk_queue_tag *); -extern void blk_queue_free_tags(request_queue_t *); -extern int blk_queue_resize_tags(request_queue_t *, int); -extern void blk_queue_invalidate_tags(request_queue_t *); +extern int blk_queue_start_tag(struct request_queue *, struct request *); +extern struct request *blk_queue_find_tag(struct request_queue *, int); +extern void blk_queue_end_tag(struct request_queue *, struct request *); +extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); +extern void blk_queue_free_tags(struct request_queue *); +extern int blk_queue_resize_tags(struct request_queue *, int); +extern void blk_queue_invalidate_tags(struct request_queue *); extern struct blk_queue_tag *blk_init_tags(int); extern void blk_free_tags(struct blk_queue_tag *); @@ -814,7 +790,6 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, return bqt->tag_index[tag]; } -extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *); extern int blkdev_issue_flush(struct block_device *, sector_t *); #define MAX_PHYS_SEGMENTS 128 @@ -826,7 +801,7 @@ extern int blkdev_issue_flush(struct block_device *, sector_t *); #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) -static inline int queue_hardsect_size(request_queue_t *q) +static inline int queue_hardsect_size(struct request_queue *q) { int retval = 512; @@ -841,14 +816,9 @@ static inline int bdev_hardsect_size(struct block_device *bdev) return queue_hardsect_size(bdev_get_queue(bdev)); } -static inline int queue_dma_alignment(request_queue_t *q) +static inline int queue_dma_alignment(struct request_queue *q) { - int retval = 511; - - if (q && q->dma_alignment) - retval = q->dma_alignment; - - return retval; + return q ? q->dma_alignment : 511; } /* assumes size > 256 */ @@ -901,6 +871,13 @@ static inline void exit_io_context(void) { } +struct io_context; +static inline int put_io_context(struct io_context *ioc) +{ + return 1; +} + + #endif /* CONFIG_BLOCK */ #endif