2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kthread.h>
20 #include <linux/list.h>
21 #include <linux/spinlock.h>
22 #include <linux/freezer.h>
23 #include "async-thread.h"
25 #define WORK_QUEUED_BIT 0
26 #define WORK_DONE_BIT 1
27 #define WORK_ORDER_DONE_BIT 2
28 #define WORK_HIGH_PRIO_BIT 3
31 * container for the kthread task pointer and the list of pending work
32 * One of these is allocated per thread.
34 struct btrfs_worker_thread {
35 /* pool we belong to */
36 struct btrfs_workers *workers;
38 /* list of struct btrfs_work that are waiting for service */
39 struct list_head pending;
40 struct list_head prio_pending;
42 /* list of worker threads from struct btrfs_workers */
43 struct list_head worker_list;
46 struct task_struct *task;
48 /* number of things on the pending list */
51 /* reference counter for this struct */
54 unsigned long sequence;
56 /* protects the pending list. */
59 /* set to non-zero when this thread is already awake and kicking */
62 /* are we currently idle */
67 * helper function to move a thread onto the idle list after it
68 * has finished some requests.
70 static void check_idle_worker(struct btrfs_worker_thread *worker)
72 if (!worker->idle && atomic_read(&worker->num_pending) <
73 worker->workers->idle_thresh / 2) {
75 spin_lock_irqsave(&worker->workers->lock, flags);
78 /* the list may be empty if the worker is just starting */
79 if (!list_empty(&worker->worker_list)) {
80 list_move(&worker->worker_list,
81 &worker->workers->idle_list);
83 spin_unlock_irqrestore(&worker->workers->lock, flags);
88 * helper function to move a thread off the idle list after new
89 * pending work is added.
91 static void check_busy_worker(struct btrfs_worker_thread *worker)
93 if (worker->idle && atomic_read(&worker->num_pending) >=
94 worker->workers->idle_thresh) {
96 spin_lock_irqsave(&worker->workers->lock, flags);
99 if (!list_empty(&worker->worker_list)) {
100 list_move_tail(&worker->worker_list,
101 &worker->workers->worker_list);
103 spin_unlock_irqrestore(&worker->workers->lock, flags);
107 static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
109 struct btrfs_workers *workers = worker->workers;
113 if (!workers->atomic_start_pending)
116 spin_lock_irqsave(&workers->lock, flags);
117 if (!workers->atomic_start_pending)
120 workers->atomic_start_pending = 0;
121 if (workers->num_workers >= workers->max_workers)
124 spin_unlock_irqrestore(&workers->lock, flags);
125 btrfs_start_workers(workers, 1);
129 spin_unlock_irqrestore(&workers->lock, flags);
132 static noinline int run_ordered_completions(struct btrfs_workers *workers,
133 struct btrfs_work *work)
135 if (!workers->ordered)
138 set_bit(WORK_DONE_BIT, &work->flags);
140 spin_lock(&workers->order_lock);
143 if (!list_empty(&workers->prio_order_list)) {
144 work = list_entry(workers->prio_order_list.next,
145 struct btrfs_work, order_list);
146 } else if (!list_empty(&workers->order_list)) {
147 work = list_entry(workers->order_list.next,
148 struct btrfs_work, order_list);
152 if (!test_bit(WORK_DONE_BIT, &work->flags))
155 /* we are going to call the ordered done function, but
156 * we leave the work item on the list as a barrier so
157 * that later work items that are done don't have their
158 * functions called before this one returns
160 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
163 spin_unlock(&workers->order_lock);
165 work->ordered_func(work);
167 /* now take the lock again and call the freeing code */
168 spin_lock(&workers->order_lock);
169 list_del(&work->order_list);
170 work->ordered_free(work);
173 spin_unlock(&workers->order_lock);
177 static void put_worker(struct btrfs_worker_thread *worker)
179 if (atomic_dec_and_test(&worker->refs))
183 static int try_worker_shutdown(struct btrfs_worker_thread *worker)
187 spin_lock_irq(&worker->lock);
188 spin_lock_irq(&worker->workers->lock);
189 if (worker->workers->num_workers > 1 &&
192 !list_empty(&worker->worker_list) &&
193 list_empty(&worker->prio_pending) &&
194 list_empty(&worker->pending)) {
196 list_del_init(&worker->worker_list);
197 worker->workers->num_workers--;
199 spin_unlock_irq(&worker->workers->lock);
200 spin_unlock_irq(&worker->lock);
207 static struct btrfs_work *get_next_work(struct btrfs_worker_thread *worker,
208 struct list_head *prio_head,
209 struct list_head *head)
211 struct btrfs_work *work = NULL;
212 struct list_head *cur = NULL;
214 if(!list_empty(prio_head))
215 cur = prio_head->next;
218 if (!list_empty(&worker->prio_pending))
221 if (!list_empty(head))
228 spin_lock_irq(&worker->lock);
229 list_splice_tail_init(&worker->prio_pending, prio_head);
230 list_splice_tail_init(&worker->pending, head);
232 if (!list_empty(prio_head))
233 cur = prio_head->next;
234 else if (!list_empty(head))
236 spin_unlock_irq(&worker->lock);
242 work = list_entry(cur, struct btrfs_work, list);
249 * main loop for servicing work items
251 static int worker_loop(void *arg)
253 struct btrfs_worker_thread *worker = arg;
254 struct list_head head;
255 struct list_head prio_head;
256 struct btrfs_work *work;
258 INIT_LIST_HEAD(&head);
259 INIT_LIST_HEAD(&prio_head);
266 work = get_next_work(worker, &prio_head, &head);
270 list_del(&work->list);
271 clear_bit(WORK_QUEUED_BIT, &work->flags);
273 work->worker = worker;
277 atomic_dec(&worker->num_pending);
279 * unless this is an ordered work queue,
280 * 'work' was probably freed by func above.
282 run_ordered_completions(worker->workers, work);
284 check_pending_worker_creates(worker);
288 spin_lock_irq(&worker->lock);
289 check_idle_worker(worker);
291 if (freezing(current)) {
293 spin_unlock_irq(&worker->lock);
296 spin_unlock_irq(&worker->lock);
297 if (!kthread_should_stop()) {
300 * we've dropped the lock, did someone else
304 if (!list_empty(&worker->pending) ||
305 !list_empty(&worker->prio_pending))
309 * this short schedule allows more work to
310 * come in without the queue functions
311 * needing to go through wake_up_process()
313 * worker->working is still 1, so nobody
314 * is going to try and wake us up
318 if (!list_empty(&worker->pending) ||
319 !list_empty(&worker->prio_pending))
322 if (kthread_should_stop())
325 /* still no more work?, sleep for real */
326 spin_lock_irq(&worker->lock);
327 set_current_state(TASK_INTERRUPTIBLE);
328 if (!list_empty(&worker->pending) ||
329 !list_empty(&worker->prio_pending)) {
330 spin_unlock_irq(&worker->lock);
335 * this makes sure we get a wakeup when someone
336 * adds something new to the queue
339 spin_unlock_irq(&worker->lock);
341 if (!kthread_should_stop()) {
342 schedule_timeout(HZ * 120);
343 if (!worker->working &&
344 try_worker_shutdown(worker)) {
349 __set_current_state(TASK_RUNNING);
351 } while (!kthread_should_stop());
356 * this will wait for all the worker threads to shutdown
358 int btrfs_stop_workers(struct btrfs_workers *workers)
360 struct list_head *cur;
361 struct btrfs_worker_thread *worker;
364 spin_lock_irq(&workers->lock);
365 list_splice_init(&workers->idle_list, &workers->worker_list);
366 while (!list_empty(&workers->worker_list)) {
367 cur = workers->worker_list.next;
368 worker = list_entry(cur, struct btrfs_worker_thread,
371 atomic_inc(&worker->refs);
372 workers->num_workers -= 1;
373 if (!list_empty(&worker->worker_list)) {
374 list_del_init(&worker->worker_list);
379 spin_unlock_irq(&workers->lock);
381 kthread_stop(worker->task);
382 spin_lock_irq(&workers->lock);
385 spin_unlock_irq(&workers->lock);
390 * simple init on struct btrfs_workers
392 void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
394 workers->num_workers = 0;
395 INIT_LIST_HEAD(&workers->worker_list);
396 INIT_LIST_HEAD(&workers->idle_list);
397 INIT_LIST_HEAD(&workers->order_list);
398 INIT_LIST_HEAD(&workers->prio_order_list);
399 spin_lock_init(&workers->lock);
400 spin_lock_init(&workers->order_lock);
401 workers->max_workers = max;
402 workers->idle_thresh = 32;
403 workers->name = name;
404 workers->ordered = 0;
405 workers->atomic_start_pending = 0;
406 workers->atomic_worker_start = 0;
410 * starts new worker threads. This does not enforce the max worker
411 * count in case you need to temporarily go past it.
413 int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
415 struct btrfs_worker_thread *worker;
419 for (i = 0; i < num_workers; i++) {
420 worker = kzalloc(sizeof(*worker), GFP_NOFS);
426 INIT_LIST_HEAD(&worker->pending);
427 INIT_LIST_HEAD(&worker->prio_pending);
428 INIT_LIST_HEAD(&worker->worker_list);
429 spin_lock_init(&worker->lock);
431 atomic_set(&worker->num_pending, 0);
432 atomic_set(&worker->refs, 1);
433 worker->workers = workers;
434 worker->task = kthread_run(worker_loop, worker,
435 "btrfs-%s-%d", workers->name,
436 workers->num_workers + i);
437 if (IS_ERR(worker->task)) {
438 ret = PTR_ERR(worker->task);
442 spin_lock_irq(&workers->lock);
443 list_add_tail(&worker->worker_list, &workers->idle_list);
445 workers->num_workers++;
446 spin_unlock_irq(&workers->lock);
450 btrfs_stop_workers(workers);
455 * run through the list and find a worker thread that doesn't have a lot
456 * to do right now. This can return null if we aren't yet at the thread
457 * count limit and all of the threads are busy.
459 static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
461 struct btrfs_worker_thread *worker;
462 struct list_head *next;
463 int enforce_min = workers->num_workers < workers->max_workers;
466 * if we find an idle thread, don't move it to the end of the
467 * idle list. This improves the chance that the next submission
468 * will reuse the same thread, and maybe catch it while it is still
471 if (!list_empty(&workers->idle_list)) {
472 next = workers->idle_list.next;
473 worker = list_entry(next, struct btrfs_worker_thread,
477 if (enforce_min || list_empty(&workers->worker_list))
481 * if we pick a busy task, move the task to the end of the list.
482 * hopefully this will keep things somewhat evenly balanced.
483 * Do the move in batches based on the sequence number. This groups
484 * requests submitted at roughly the same time onto the same worker.
486 next = workers->worker_list.next;
487 worker = list_entry(next, struct btrfs_worker_thread, worker_list);
488 atomic_inc(&worker->num_pending);
491 if (worker->sequence % workers->idle_thresh == 0)
492 list_move_tail(next, &workers->worker_list);
497 * selects a worker thread to take the next job. This will either find
498 * an idle worker, start a new worker up to the max count, or just return
499 * one of the existing busy workers.
501 static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
503 struct btrfs_worker_thread *worker;
505 struct list_head *fallback;
508 spin_lock_irqsave(&workers->lock, flags);
509 worker = next_worker(workers);
512 if (workers->num_workers >= workers->max_workers) {
514 } else if (workers->atomic_worker_start) {
515 workers->atomic_start_pending = 1;
518 spin_unlock_irqrestore(&workers->lock, flags);
519 /* we're below the limit, start another worker */
520 btrfs_start_workers(workers, 1);
524 spin_unlock_irqrestore(&workers->lock, flags);
530 * we have failed to find any workers, just
531 * return the first one we can find.
533 if (!list_empty(&workers->worker_list))
534 fallback = workers->worker_list.next;
535 if (!list_empty(&workers->idle_list))
536 fallback = workers->idle_list.next;
538 worker = list_entry(fallback,
539 struct btrfs_worker_thread, worker_list);
540 spin_unlock_irqrestore(&workers->lock, flags);
545 * btrfs_requeue_work just puts the work item back on the tail of the list
546 * it was taken from. It is intended for use with long running work functions
547 * that make some progress and want to give the cpu up for others.
549 int btrfs_requeue_work(struct btrfs_work *work)
551 struct btrfs_worker_thread *worker = work->worker;
555 if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
558 spin_lock_irqsave(&worker->lock, flags);
559 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
560 list_add_tail(&work->list, &worker->prio_pending);
562 list_add_tail(&work->list, &worker->pending);
563 atomic_inc(&worker->num_pending);
565 /* by definition we're busy, take ourselves off the idle
569 spin_lock(&worker->workers->lock);
571 list_move_tail(&worker->worker_list,
572 &worker->workers->worker_list);
573 spin_unlock(&worker->workers->lock);
575 if (!worker->working) {
581 wake_up_process(worker->task);
582 spin_unlock_irqrestore(&worker->lock, flags);
588 void btrfs_set_work_high_prio(struct btrfs_work *work)
590 set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
594 * places a struct btrfs_work into the pending queue of one of the kthreads
596 int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
598 struct btrfs_worker_thread *worker;
602 /* don't requeue something already on a list */
603 if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
606 worker = find_worker(workers);
607 if (workers->ordered) {
609 * you're not allowed to do ordered queues from an
612 spin_lock(&workers->order_lock);
613 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) {
614 list_add_tail(&work->order_list,
615 &workers->prio_order_list);
617 list_add_tail(&work->order_list, &workers->order_list);
619 spin_unlock(&workers->order_lock);
621 INIT_LIST_HEAD(&work->order_list);
624 spin_lock_irqsave(&worker->lock, flags);
626 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
627 list_add_tail(&work->list, &worker->prio_pending);
629 list_add_tail(&work->list, &worker->pending);
630 atomic_inc(&worker->num_pending);
631 check_busy_worker(worker);
634 * avoid calling into wake_up_process if this thread has already
637 if (!worker->working)
642 wake_up_process(worker->task);
643 spin_unlock_irqrestore(&worker->lock, flags);