Btrfs: Lower contention on the csum mutex
[safe/jmp/linux-2.6] / fs / btrfs / async-thread.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/version.h>
20 #include <linux/kthread.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
23
24 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
25 # include <linux/freezer.h>
26 #else
27 # include <linux/sched.h>
28 #endif
29
30 #include "async-thread.h"
31
32 /*
33  * container for the kthread task pointer and the list of pending work
34  * One of these is allocated per thread.
35  */
36 struct btrfs_worker_thread {
37         /* pool we belong to */
38         struct btrfs_workers *workers;
39
40         /* list of struct btrfs_work that are waiting for service */
41         struct list_head pending;
42
43         /* list of worker threads from struct btrfs_workers */
44         struct list_head worker_list;
45
46         /* kthread */
47         struct task_struct *task;
48
49         /* number of things on the pending list */
50         atomic_t num_pending;
51
52         unsigned long sequence;
53
54         /* protects the pending list. */
55         spinlock_t lock;
56
57         /* set to non-zero when this thread is already awake and kicking */
58         int working;
59
60         /* are we currently idle */
61         int idle;
62 };
63
64 /*
65  * helper function to move a thread onto the idle list after it
66  * has finished some requests.
67  */
68 static void check_idle_worker(struct btrfs_worker_thread *worker)
69 {
70         if (!worker->idle && atomic_read(&worker->num_pending) <
71             worker->workers->idle_thresh / 2) {
72                 unsigned long flags;
73                 spin_lock_irqsave(&worker->workers->lock, flags);
74                 worker->idle = 1;
75                 list_move(&worker->worker_list, &worker->workers->idle_list);
76                 spin_unlock_irqrestore(&worker->workers->lock, flags);
77         }
78 }
79
80 /*
81  * helper function to move a thread off the idle list after new
82  * pending work is added.
83  */
84 static void check_busy_worker(struct btrfs_worker_thread *worker)
85 {
86         if (worker->idle && atomic_read(&worker->num_pending) >=
87             worker->workers->idle_thresh) {
88                 unsigned long flags;
89                 spin_lock_irqsave(&worker->workers->lock, flags);
90                 worker->idle = 0;
91                 list_move_tail(&worker->worker_list,
92                                &worker->workers->worker_list);
93                 spin_unlock_irqrestore(&worker->workers->lock, flags);
94         }
95 }
96
97 /*
98  * main loop for servicing work items
99  */
100 static int worker_loop(void *arg)
101 {
102         struct btrfs_worker_thread *worker = arg;
103         struct list_head *cur;
104         struct btrfs_work *work;
105         do {
106                 spin_lock_irq(&worker->lock);
107                 while(!list_empty(&worker->pending)) {
108                         cur = worker->pending.next;
109                         work = list_entry(cur, struct btrfs_work, list);
110                         list_del(&work->list);
111                         clear_bit(0, &work->flags);
112
113                         work->worker = worker;
114                         spin_unlock_irq(&worker->lock);
115
116                         work->func(work);
117
118                         atomic_dec(&worker->num_pending);
119                         spin_lock_irq(&worker->lock);
120                         check_idle_worker(worker);
121                 }
122                 worker->working = 0;
123                 if (freezing(current)) {
124                         refrigerator();
125                 } else {
126                         set_current_state(TASK_INTERRUPTIBLE);
127                         spin_unlock_irq(&worker->lock);
128                         schedule();
129                         __set_current_state(TASK_RUNNING);
130                 }
131         } while (!kthread_should_stop());
132         return 0;
133 }
134
135 /*
136  * this will wait for all the worker threads to shutdown
137  */
138 int btrfs_stop_workers(struct btrfs_workers *workers)
139 {
140         struct list_head *cur;
141         struct btrfs_worker_thread *worker;
142
143         list_splice_init(&workers->idle_list, &workers->worker_list);
144         while(!list_empty(&workers->worker_list)) {
145                 cur = workers->worker_list.next;
146                 worker = list_entry(cur, struct btrfs_worker_thread,
147                                     worker_list);
148                 kthread_stop(worker->task);
149                 list_del(&worker->worker_list);
150                 kfree(worker);
151         }
152         return 0;
153 }
154
155 /*
156  * simple init on struct btrfs_workers
157  */
158 void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
159 {
160         workers->num_workers = 0;
161         INIT_LIST_HEAD(&workers->worker_list);
162         INIT_LIST_HEAD(&workers->idle_list);
163         spin_lock_init(&workers->lock);
164         workers->max_workers = max;
165         workers->idle_thresh = 32;
166         workers->name = name;
167 }
168
169 /*
170  * starts new worker threads.  This does not enforce the max worker
171  * count in case you need to temporarily go past it.
172  */
173 int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
174 {
175         struct btrfs_worker_thread *worker;
176         int ret = 0;
177         int i;
178
179         for (i = 0; i < num_workers; i++) {
180                 worker = kzalloc(sizeof(*worker), GFP_NOFS);
181                 if (!worker) {
182                         ret = -ENOMEM;
183                         goto fail;
184                 }
185
186                 INIT_LIST_HEAD(&worker->pending);
187                 INIT_LIST_HEAD(&worker->worker_list);
188                 spin_lock_init(&worker->lock);
189                 atomic_set(&worker->num_pending, 0);
190                 worker->task = kthread_run(worker_loop, worker,
191                                            "btrfs-%s-%d", workers->name,
192                                            workers->num_workers + i);
193                 worker->workers = workers;
194                 if (IS_ERR(worker->task)) {
195                         kfree(worker);
196                         ret = PTR_ERR(worker->task);
197                         goto fail;
198                 }
199
200                 spin_lock_irq(&workers->lock);
201                 list_add_tail(&worker->worker_list, &workers->idle_list);
202                 worker->idle = 1;
203                 workers->num_workers++;
204                 spin_unlock_irq(&workers->lock);
205         }
206         return 0;
207 fail:
208         btrfs_stop_workers(workers);
209         return ret;
210 }
211
212 /*
213  * run through the list and find a worker thread that doesn't have a lot
214  * to do right now.  This can return null if we aren't yet at the thread
215  * count limit and all of the threads are busy.
216  */
217 static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
218 {
219         struct btrfs_worker_thread *worker;
220         struct list_head *next;
221         int enforce_min = workers->num_workers < workers->max_workers;
222
223         /*
224          * if we find an idle thread, don't move it to the end of the
225          * idle list.  This improves the chance that the next submission
226          * will reuse the same thread, and maybe catch it while it is still
227          * working
228          */
229         if (!list_empty(&workers->idle_list)) {
230                 next = workers->idle_list.next;
231                 worker = list_entry(next, struct btrfs_worker_thread,
232                                     worker_list);
233                 return worker;
234         }
235         if (enforce_min || list_empty(&workers->worker_list))
236                 return NULL;
237
238         /*
239          * if we pick a busy task, move the task to the end of the list.
240          * hopefully this will keep things somewhat evenly balanced
241          */
242         next = workers->worker_list.next;
243         worker = list_entry(next, struct btrfs_worker_thread, worker_list);
244         atomic_inc(&worker->num_pending);
245         worker->sequence++;
246         if (worker->sequence % workers->idle_thresh == 0)
247                 list_move_tail(next, &workers->worker_list);
248         return worker;
249 }
250
251 static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
252 {
253         struct btrfs_worker_thread *worker;
254         unsigned long flags;
255
256 again:
257         spin_lock_irqsave(&workers->lock, flags);
258         worker = next_worker(workers);
259         spin_unlock_irqrestore(&workers->lock, flags);
260
261         if (!worker) {
262                 spin_lock_irqsave(&workers->lock, flags);
263                 if (workers->num_workers >= workers->max_workers) {
264                         struct list_head *fallback = NULL;
265                         /*
266                          * we have failed to find any workers, just
267                          * return the force one
268                          */
269                         if (!list_empty(&workers->worker_list))
270                                 fallback = workers->worker_list.next;
271                         if (!list_empty(&workers->idle_list))
272                                 fallback = workers->idle_list.next;
273                         BUG_ON(!fallback);
274                         worker = list_entry(fallback,
275                                   struct btrfs_worker_thread, worker_list);
276                         spin_unlock_irqrestore(&workers->lock, flags);
277                 } else {
278                         spin_unlock_irqrestore(&workers->lock, flags);
279                         /* we're below the limit, start another worker */
280                         btrfs_start_workers(workers, 1);
281                         goto again;
282                 }
283         }
284         return worker;
285 }
286
287 /*
288  * btrfs_requeue_work just puts the work item back on the tail of the list
289  * it was taken from.  It is intended for use with long running work functions
290  * that make some progress and want to give the cpu up for others.
291  */
292 int btrfs_requeue_work(struct btrfs_work *work)
293 {
294         struct btrfs_worker_thread *worker = work->worker;
295         unsigned long flags;
296
297         if (test_and_set_bit(0, &work->flags))
298                 goto out;
299
300         spin_lock_irqsave(&worker->lock, flags);
301         atomic_inc(&worker->num_pending);
302         list_add_tail(&work->list, &worker->pending);
303         check_busy_worker(worker);
304         spin_unlock_irqrestore(&worker->lock, flags);
305 out:
306         return 0;
307 }
308
309 /*
310  * places a struct btrfs_work into the pending queue of one of the kthreads
311  */
312 int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
313 {
314         struct btrfs_worker_thread *worker;
315         unsigned long flags;
316         int wake = 0;
317
318         /* don't requeue something already on a list */
319         if (test_and_set_bit(0, &work->flags))
320                 goto out;
321
322         worker = find_worker(workers);
323
324         spin_lock_irqsave(&worker->lock, flags);
325         atomic_inc(&worker->num_pending);
326         check_busy_worker(worker);
327         list_add_tail(&work->list, &worker->pending);
328
329         /*
330          * avoid calling into wake_up_process if this thread has already
331          * been kicked
332          */
333         if (!worker->working)
334                 wake = 1;
335         worker->working = 1;
336
337         spin_unlock_irqrestore(&worker->lock, flags);
338
339         if (wake)
340                 wake_up_process(worker->task);
341 out:
342         return 0;
343 }