X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=block%2Fblk.h;h=79c85f7c9ff50fb33fb89619a744c0f1d3e1686b;hb=c1d4c41f2fdfe66dea957b76d005affba3e56b26;hp=59776ab4742aae29678d34886665dc437dadd00c;hpb=2a4aa30c5f967eb6ae874c67fa6fceeee84815f9;p=safe%2Fjmp%2Flinux-2.6 diff --git a/block/blk.h b/block/blk.h index 59776ab..79c85f7 100644 --- a/block/blk.h +++ b/block/blk.h @@ -17,6 +17,43 @@ void __blk_queue_free_tags(struct request_queue *q); void blk_unplug_work(struct work_struct *work); void blk_unplug_timeout(unsigned long data); +void blk_rq_timed_out_timer(unsigned long data); +void blk_delete_timer(struct request *); +void blk_add_timer(struct request *); +void __generic_unplug_device(struct request_queue *); + +/* + * Internal atomic flags for request handling + */ +enum rq_atomic_flags { + REQ_ATOM_COMPLETE = 0, +}; + +/* + * EH timer and IO completion will both attempt to 'grab' the request, make + * sure that only one of them suceeds + */ +static inline int blk_mark_rq_complete(struct request *rq) +{ + return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); +} + +static inline void blk_clear_rq_complete(struct request *rq) +{ + clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); +} + +#ifdef CONFIG_FAIL_IO_TIMEOUT +int blk_should_fake_timeout(struct request_queue *); +ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); +ssize_t part_timeout_store(struct device *, struct device_attribute *, + const char *, size_t); +#else +static inline int blk_should_fake_timeout(struct request_queue *q) +{ + return 0; +} +#endif struct io_context *current_io_context(gfp_t gfp_flags, int node); @@ -33,6 +70,10 @@ void blk_queue_congestion_threshold(struct request_queue *q); int blk_dev_init(void); +void elv_quiesce_start(struct request_queue *q); +void elv_quiesce_end(struct request_queue *q); + + /* * Return the threshold (number of used requests) at which the queue is * considered to be congested. It include a little hysteresis to keep the @@ -51,4 +92,29 @@ static inline int queue_congestion_off_threshold(struct request_queue *q) return q->nr_congestion_off; } +#if defined(CONFIG_BLK_DEV_INTEGRITY) + +#define rq_for_each_integrity_segment(bvl, _rq, _iter) \ + __rq_for_each_bio(_iter.bio, _rq) \ + bip_for_each_vec(bvl, _iter.bio->bi_integrity, _iter.i) + +#endif /* BLK_DEV_INTEGRITY */ + +static inline int blk_cpu_to_group(int cpu) +{ +#ifdef CONFIG_SCHED_MC + const struct cpumask *mask = cpu_coregroup_mask(cpu); + return cpumask_first(mask); +#elif defined(CONFIG_SCHED_SMT) + return cpumask_first(topology_thread_cpumask(cpu)); +#else + return cpu; +#endif +} + +static inline int blk_do_io_stat(struct request *rq) +{ + return rq->rq_disk && blk_rq_io_stat(rq); +} + #endif