* Boston, MA 021110-1307, USA.
*/
-#include <linux/version.h>
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/spinlock.h>
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
# include <linux/freezer.h>
-#else
-# include <linux/sched.h>
-#endif
-
#include "async-thread.h"
+#define WORK_QUEUED_BIT 0
+#define WORK_DONE_BIT 1
+#define WORK_ORDER_DONE_BIT 2
+
/*
* container for the kthread task pointer and the list of pending work
* One of these is allocated per thread.
/* number of things on the pending list */
atomic_t num_pending;
+ unsigned long sequence;
+
/* protects the pending list. */
spinlock_t lock;
}
}
+static noinline int run_ordered_completions(struct btrfs_workers *workers,
+ struct btrfs_work *work)
+{
+ unsigned long flags;
+
+ if (!workers->ordered)
+ return 0;
+
+ set_bit(WORK_DONE_BIT, &work->flags);
+
+ spin_lock_irqsave(&workers->lock, flags);
+
+ while (!list_empty(&workers->order_list)) {
+ work = list_entry(workers->order_list.next,
+ struct btrfs_work, order_list);
+
+ if (!test_bit(WORK_DONE_BIT, &work->flags))
+ break;
+
+ /* we are going to call the ordered done function, but
+ * we leave the work item on the list as a barrier so
+ * that later work items that are done don't have their
+ * functions called before this one returns
+ */
+ if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
+ break;
+
+ spin_unlock_irqrestore(&workers->lock, flags);
+
+ work->ordered_func(work);
+
+ /* now take the lock again and call the freeing code */
+ spin_lock_irqsave(&workers->lock, flags);
+ list_del(&work->order_list);
+ work->ordered_free(work);
+ }
+
+ spin_unlock_irqrestore(&workers->lock, flags);
+ return 0;
+}
+
/*
* main loop for servicing work items
*/
struct btrfs_work *work;
do {
spin_lock_irq(&worker->lock);
- while(!list_empty(&worker->pending)) {
+ while (!list_empty(&worker->pending)) {
cur = worker->pending.next;
work = list_entry(cur, struct btrfs_work, list);
list_del(&work->list);
- clear_bit(0, &work->flags);
+ clear_bit(WORK_QUEUED_BIT, &work->flags);
work->worker = worker;
spin_unlock_irq(&worker->lock);
work->func(work);
atomic_dec(&worker->num_pending);
+ /*
+ * unless this is an ordered work queue,
+ * 'work' was probably freed by func above.
+ */
+ run_ordered_completions(worker->workers, work);
+
spin_lock_irq(&worker->lock);
check_idle_worker(worker);
+
}
worker->working = 0;
if (freezing(current)) {
} else {
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(&worker->lock);
- schedule();
+ if (!kthread_should_stop())
+ schedule();
__set_current_state(TASK_RUNNING);
}
} while (!kthread_should_stop());
struct btrfs_worker_thread *worker;
list_splice_init(&workers->idle_list, &workers->worker_list);
- while(!list_empty(&workers->worker_list)) {
+ while (!list_empty(&workers->worker_list)) {
cur = workers->worker_list.next;
worker = list_entry(cur, struct btrfs_worker_thread,
worker_list);
workers->num_workers = 0;
INIT_LIST_HEAD(&workers->worker_list);
INIT_LIST_HEAD(&workers->idle_list);
+ INIT_LIST_HEAD(&workers->order_list);
spin_lock_init(&workers->lock);
workers->max_workers = max;
workers->idle_thresh = 32;
workers->name = name;
+ workers->ordered = 0;
}
/*
spin_lock_irq(&workers->lock);
list_add_tail(&worker->worker_list, &workers->idle_list);
+ worker->idle = 1;
workers->num_workers++;
spin_unlock_irq(&workers->lock);
}
/*
* if we pick a busy task, move the task to the end of the list.
- * hopefully this will keep things somewhat evenly balanced
+ * hopefully this will keep things somewhat evenly balanced.
+ * Do the move in batches based on the sequence number. This groups
+ * requests submitted at roughly the same time onto the same worker.
*/
next = workers->worker_list.next;
worker = list_entry(next, struct btrfs_worker_thread, worker_list);
- list_move_tail(next, &workers->worker_list);
+ atomic_inc(&worker->num_pending);
+ worker->sequence++;
+
+ if (worker->sequence % workers->idle_thresh == 0)
+ list_move_tail(next, &workers->worker_list);
return worker;
}
+/*
+ * selects a worker thread to take the next job. This will either find
+ * an idle worker, start a new worker up to the max count, or just return
+ * one of the existing busy workers.
+ */
static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
{
struct btrfs_worker_thread *worker;
struct btrfs_worker_thread *worker = work->worker;
unsigned long flags;
- if (test_and_set_bit(0, &work->flags))
+ if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
goto out;
spin_lock_irqsave(&worker->lock, flags);
atomic_inc(&worker->num_pending);
list_add_tail(&work->list, &worker->pending);
- check_busy_worker(worker);
+
+ /* by definition we're busy, take ourselves off the idle
+ * list
+ */
+ if (worker->idle) {
+ spin_lock_irqsave(&worker->workers->lock, flags);
+ worker->idle = 0;
+ list_move_tail(&worker->worker_list,
+ &worker->workers->worker_list);
+ spin_unlock_irqrestore(&worker->workers->lock, flags);
+ }
+
spin_unlock_irqrestore(&worker->lock, flags);
+
out:
return 0;
}
int wake = 0;
/* don't requeue something already on a list */
- if (test_and_set_bit(0, &work->flags))
+ if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
goto out;
worker = find_worker(workers);
+ if (workers->ordered) {
+ spin_lock_irqsave(&workers->lock, flags);
+ list_add_tail(&work->order_list, &workers->order_list);
+ spin_unlock_irqrestore(&workers->lock, flags);
+ } else {
+ INIT_LIST_HEAD(&work->order_list);
+ }
spin_lock_irqsave(&worker->lock, flags);
atomic_inc(&worker->num_pending);