X-Git-Url: http://ftp.safe.ca/?p=safe%2Fjmp%2Flinux-2.6;a=blobdiff_plain;f=block%2Fblk-timeout.c;h=1ba7e0aca8781b55ce14fa54491da8f81505b747;hp=9b4ad138bb33a4559a98fec2e0edc2136b8eff52;hb=e071041be037eca208b62b84469a06bdfc692bea;hpb=581d4e28d9195aa8b2231383dbabc288988d615e diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 9b4ad13..1ba7e0a 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -73,18 +73,7 @@ ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr, */ void blk_delete_timer(struct request *req) { - struct request_queue *q = req->q; - - /* - * Nothing to detach - */ - if (!q->rq_timed_out_fn || !req->deadline) - return; - list_del_init(&req->timeout_list); - - if (list_empty(&q->timeout_list)) - del_timer(&q->timeout); } static void blk_rq_timed_out(struct request *req) @@ -118,7 +107,7 @@ static void blk_rq_timed_out(struct request *req) void blk_rq_timed_out_timer(unsigned long data) { struct request_queue *q = (struct request_queue *) data; - unsigned long flags, uninitialized_var(next), next_set = 0; + unsigned long flags, next = 0; struct request *rq, *tmp; spin_lock_irqsave(q->queue_lock, flags); @@ -133,16 +122,17 @@ void blk_rq_timed_out_timer(unsigned long data) if (blk_mark_rq_complete(rq)) continue; blk_rq_timed_out(rq); - } - if (!next_set) { - next = rq->deadline; - next_set = 1; - } else if (time_after(next, rq->deadline)) + } else if (!next || time_after(next, rq->deadline)) next = rq->deadline; } - if (next_set && !list_empty(&q->timeout_list)) - mod_timer(&q->timeout, round_jiffies(next)); + /* + * next can never be 0 here with the list non-empty, since we always + * bump ->deadline to 1 so we can detect if the timer was ever added + * or not. See comment in blk_add_timer() + */ + if (next) + mod_timer(&q->timeout, round_jiffies_up(next)); spin_unlock_irqrestore(q->queue_lock, flags); } @@ -158,6 +148,8 @@ void blk_rq_timed_out_timer(unsigned long data) */ void blk_abort_request(struct request *req) { + if (blk_mark_rq_complete(req)) + return; blk_delete_timer(req); blk_rq_timed_out(req); } @@ -182,31 +174,22 @@ void blk_add_timer(struct request *req) BUG_ON(!list_empty(&req->timeout_list)); BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags)); - if (req->timeout) - req->deadline = jiffies + req->timeout; - else { - req->deadline = jiffies + q->rq_timeout; - /* - * Some LLDs, like scsi, peek at the timeout to prevent - * a command from being retried forever. - */ + /* + * Some LLDs, like scsi, peek at the timeout to prevent a + * command from being retried forever. + */ + if (!req->timeout) req->timeout = q->rq_timeout; - } + + req->deadline = jiffies + req->timeout; list_add_tail(&req->timeout_list, &q->timeout_list); /* * If the timer isn't already pending or this timeout is earlier - * than an existing one, modify the timer. Round to next nearest + * than an existing one, modify the timer. Round up to next nearest * second. */ - expiry = round_jiffies(req->deadline); - - /* - * We use ->deadline == 0 to detect whether a timer was added or - * not, so just increase to next jiffy for that specific case - */ - if (unlikely(!req->deadline)) - req->deadline = 1; + expiry = round_jiffies_up(req->deadline); if (!timer_pending(&q->timeout) || time_before(expiry, q->timeout.expires)) @@ -222,14 +205,34 @@ void blk_abort_queue(struct request_queue *q) { unsigned long flags; struct request *rq, *tmp; + LIST_HEAD(list); + + /* + * Not a request based block device, nothing to abort + */ + if (!q->request_fn) + return; spin_lock_irqsave(q->queue_lock, flags); elv_abort_queue(q); - list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) + /* + * Splice entries to local list, to avoid deadlocking if entries + * get readded to the timeout list by error handling + */ + list_splice_init(&q->timeout_list, &list); + + list_for_each_entry_safe(rq, tmp, &list, timeout_list) blk_abort_request(rq); + /* + * Occasionally, blk_abort_request() will return without + * deleting the element from the list. Make sure we add those back + * instead of leaving them on the local stack list. + */ + list_splice(&list, &q->timeout_list); + spin_unlock_irqrestore(q->queue_lock, flags); }