1 /* Worker thread pool for slow items, such as filesystem lookups or mkdirs
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
11 * See Documentation/slow-work.txt
14 #include <linux/module.h>
15 #include <linux/slow-work.h>
16 #include <linux/kthread.h>
17 #include <linux/freezer.h>
18 #include <linux/wait.h>
20 #define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of
22 #define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
25 #define SLOW_WORK_THREAD_LIMIT 255 /* abs maximum number of slow-work threads */
27 static void slow_work_cull_timeout(unsigned long);
28 static void slow_work_oom_timeout(unsigned long);
31 static int slow_work_min_threads_sysctl(struct ctl_table *, int,
32 void __user *, size_t *, loff_t *);
34 static int slow_work_max_threads_sysctl(struct ctl_table *, int ,
35 void __user *, size_t *, loff_t *);
39 * The pool of threads has at least min threads in it as long as someone is
40 * using the facility, and may have as many as max.
42 * A portion of the pool may be processing very slow operations.
44 static unsigned slow_work_min_threads = 2;
45 static unsigned slow_work_max_threads = 4;
46 static unsigned vslow_work_proportion = 50; /* % of threads that may process
50 static const int slow_work_min_min_threads = 2;
51 static int slow_work_max_max_threads = SLOW_WORK_THREAD_LIMIT;
52 static const int slow_work_min_vslow = 1;
53 static const int slow_work_max_vslow = 99;
55 ctl_table slow_work_sysctls[] = {
57 .ctl_name = CTL_UNNUMBERED,
58 .procname = "min-threads",
59 .data = &slow_work_min_threads,
60 .maxlen = sizeof(unsigned),
62 .proc_handler = slow_work_min_threads_sysctl,
63 .extra1 = (void *) &slow_work_min_min_threads,
64 .extra2 = &slow_work_max_threads,
67 .ctl_name = CTL_UNNUMBERED,
68 .procname = "max-threads",
69 .data = &slow_work_max_threads,
70 .maxlen = sizeof(unsigned),
72 .proc_handler = slow_work_max_threads_sysctl,
73 .extra1 = &slow_work_min_threads,
74 .extra2 = (void *) &slow_work_max_max_threads,
77 .ctl_name = CTL_UNNUMBERED,
78 .procname = "vslow-percentage",
79 .data = &vslow_work_proportion,
80 .maxlen = sizeof(unsigned),
82 .proc_handler = &proc_dointvec_minmax,
83 .extra1 = (void *) &slow_work_min_vslow,
84 .extra2 = (void *) &slow_work_max_vslow,
91 * The active state of the thread pool
93 static atomic_t slow_work_thread_count;
94 static atomic_t vslow_work_executing_count;
96 static bool slow_work_may_not_start_new_thread;
97 static bool slow_work_cull; /* cull a thread due to lack of activity */
98 static DEFINE_TIMER(slow_work_cull_timer, slow_work_cull_timeout, 0, 0);
99 static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0);
100 static struct slow_work slow_work_new_thread; /* new thread starter */
103 * slow work ID allocation (use slow_work_queue_lock)
105 static DECLARE_BITMAP(slow_work_ids, SLOW_WORK_THREAD_LIMIT);
108 * Unregistration tracking to prevent put_ref() from disappearing during module
111 #ifdef CONFIG_MODULES
112 static struct module *slow_work_thread_processing[SLOW_WORK_THREAD_LIMIT];
113 static struct module *slow_work_unreg_module;
114 static struct slow_work *slow_work_unreg_work_item;
115 static DECLARE_WAIT_QUEUE_HEAD(slow_work_unreg_wq);
116 static DEFINE_MUTEX(slow_work_unreg_sync_lock);
120 * The queues of work items and the lock governing access to them. These are
121 * shared between all the CPUs. It doesn't make sense to have per-CPU queues
122 * as the number of threads bears no relation to the number of CPUs.
124 * There are two queues of work items: one for slow work items, and one for
125 * very slow work items.
127 static LIST_HEAD(slow_work_queue);
128 static LIST_HEAD(vslow_work_queue);
129 static DEFINE_SPINLOCK(slow_work_queue_lock);
132 * The thread controls. A variable used to signal to the threads that they
133 * should exit when the queue is empty, a waitqueue used by the threads to wait
134 * for signals, and a completion set by the last thread to exit.
136 static bool slow_work_threads_should_exit;
137 static DECLARE_WAIT_QUEUE_HEAD(slow_work_thread_wq);
138 static DECLARE_COMPLETION(slow_work_last_thread_exited);
141 * The number of users of the thread pool and its lock. Whilst this is zero we
142 * have no threads hanging around, and when this reaches zero, we wait for all
143 * active or queued work items to complete and kill all the threads we do have.
145 static int slow_work_user_count;
146 static DEFINE_MUTEX(slow_work_user_lock);
149 * Calculate the maximum number of active threads in the pool that are
150 * permitted to process very slow work items.
152 * The answer is rounded up to at least 1, but may not equal or exceed the
153 * maximum number of the threads in the pool. This means we always have at
154 * least one thread that can process slow work items, and we always have at
155 * least one thread that won't get tied up doing so.
157 static unsigned slow_work_calc_vsmax(void)
161 vsmax = atomic_read(&slow_work_thread_count) * vslow_work_proportion;
163 vsmax = max(vsmax, 1U);
164 return min(vsmax, slow_work_max_threads - 1);
168 * Attempt to execute stuff queued on a slow thread. Return true if we managed
169 * it, false if there was nothing to do.
171 static bool slow_work_execute(int id)
173 #ifdef CONFIG_MODULES
174 struct module *module;
176 struct slow_work *work = NULL;
180 vsmax = slow_work_calc_vsmax();
182 /* see if we can schedule a new thread to be started if we're not
183 * keeping up with the work */
184 if (!waitqueue_active(&slow_work_thread_wq) &&
185 (!list_empty(&slow_work_queue) || !list_empty(&vslow_work_queue)) &&
186 atomic_read(&slow_work_thread_count) < slow_work_max_threads &&
187 !slow_work_may_not_start_new_thread)
188 slow_work_enqueue(&slow_work_new_thread);
190 /* find something to execute */
191 spin_lock_irq(&slow_work_queue_lock);
192 if (!list_empty(&vslow_work_queue) &&
193 atomic_read(&vslow_work_executing_count) < vsmax) {
194 work = list_entry(vslow_work_queue.next,
195 struct slow_work, link);
196 if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags))
198 list_del_init(&work->link);
199 atomic_inc(&vslow_work_executing_count);
201 } else if (!list_empty(&slow_work_queue)) {
202 work = list_entry(slow_work_queue.next,
203 struct slow_work, link);
204 if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags))
206 list_del_init(&work->link);
209 very_slow = false; /* avoid the compiler warning */
212 #ifdef CONFIG_MODULES
214 slow_work_thread_processing[id] = work->owner;
217 spin_unlock_irq(&slow_work_queue_lock);
222 if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags))
225 work->ops->execute(work);
228 atomic_dec(&vslow_work_executing_count);
229 clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags);
231 /* if someone tried to enqueue the item whilst we were executing it,
232 * then it'll be left unenqueued to avoid multiple threads trying to
233 * execute it simultaneously
235 * there is, however, a race between us testing the pending flag and
236 * getting the spinlock, and between the enqueuer setting the pending
237 * flag and getting the spinlock, so we use a deferral bit to tell us
238 * if the enqueuer got there first
240 if (test_bit(SLOW_WORK_PENDING, &work->flags)) {
241 spin_lock_irq(&slow_work_queue_lock);
243 if (!test_bit(SLOW_WORK_EXECUTING, &work->flags) &&
244 test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags))
247 spin_unlock_irq(&slow_work_queue_lock);
250 /* sort out the race between module unloading and put_ref() */
251 work->ops->put_ref(work);
253 #ifdef CONFIG_MODULES
254 module = slow_work_thread_processing[id];
255 slow_work_thread_processing[id] = NULL;
257 if (slow_work_unreg_work_item == work ||
258 slow_work_unreg_module == module)
259 wake_up_all(&slow_work_unreg_wq);
265 /* we must complete the enqueue operation
266 * - we transfer our ref on the item back to the appropriate queue
267 * - don't wake another thread up as we're awake already
269 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
270 list_add_tail(&work->link, &vslow_work_queue);
272 list_add_tail(&work->link, &slow_work_queue);
273 spin_unlock_irq(&slow_work_queue_lock);
274 slow_work_thread_processing[id] = NULL;
279 * slow_work_enqueue - Schedule a slow work item for processing
280 * @work: The work item to queue
282 * Schedule a slow work item for processing. If the item is already undergoing
283 * execution, this guarantees not to re-enter the execution routine until the
284 * first execution finishes.
286 * The item is pinned by this function as it retains a reference to it, managed
287 * through the item operations. The item is unpinned once it has been
290 * An item may hog the thread that is running it for a relatively large amount
291 * of time, sufficient, for example, to perform several lookup, mkdir, create
292 * and setxattr operations. It may sleep on I/O and may sleep to obtain locks.
294 * Conversely, if a number of items are awaiting processing, it may take some
295 * time before any given item is given attention. The number of threads in the
296 * pool may be increased to deal with demand, but only up to a limit.
298 * If SLOW_WORK_VERY_SLOW is set on the work item, then it will be placed in
299 * the very slow queue, from which only a portion of the threads will be
300 * allowed to pick items to execute. This ensures that very slow items won't
301 * overly block ones that are just ordinarily slow.
303 * Returns 0 if successful, -EAGAIN if not.
305 int slow_work_enqueue(struct slow_work *work)
309 BUG_ON(slow_work_user_count <= 0);
312 BUG_ON(!work->ops->get_ref);
314 /* when honouring an enqueue request, we only promise that we will run
315 * the work function in the future; we do not promise to run it once
316 * per enqueue request
318 * we use the PENDING bit to merge together repeat requests without
319 * having to disable IRQs and take the spinlock, whilst still
320 * maintaining our promise
322 if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
323 spin_lock_irqsave(&slow_work_queue_lock, flags);
325 /* we promise that we will not attempt to execute the work
326 * function in more than one thread simultaneously
328 * this, however, leaves us with a problem if we're asked to
329 * enqueue the work whilst someone is executing the work
330 * function as simply queueing the work immediately means that
331 * another thread may try executing it whilst it is already
334 * to deal with this, we set the ENQ_DEFERRED bit instead of
335 * enqueueing, and the thread currently executing the work
336 * function will enqueue the work item when the work function
337 * returns and it has cleared the EXECUTING bit
339 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
340 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
342 if (work->ops->get_ref(work) < 0)
344 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
345 list_add_tail(&work->link, &vslow_work_queue);
347 list_add_tail(&work->link, &slow_work_queue);
348 wake_up(&slow_work_thread_wq);
351 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
356 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
359 EXPORT_SYMBOL(slow_work_enqueue);
362 * Schedule a cull of the thread pool at some time in the near future
364 static void slow_work_schedule_cull(void)
366 mod_timer(&slow_work_cull_timer,
367 round_jiffies(jiffies + SLOW_WORK_CULL_TIMEOUT));
371 * Worker thread culling algorithm
373 static bool slow_work_cull_thread(void)
376 bool do_cull = false;
378 spin_lock_irqsave(&slow_work_queue_lock, flags);
380 if (slow_work_cull) {
381 slow_work_cull = false;
383 if (list_empty(&slow_work_queue) &&
384 list_empty(&vslow_work_queue) &&
385 atomic_read(&slow_work_thread_count) >
386 slow_work_min_threads) {
387 slow_work_schedule_cull();
392 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
397 * Determine if there is slow work available for dispatch
399 static inline bool slow_work_available(int vsmax)
401 return !list_empty(&slow_work_queue) ||
402 (!list_empty(&vslow_work_queue) &&
403 atomic_read(&vslow_work_executing_count) < vsmax);
407 * Worker thread dispatcher
409 static int slow_work_thread(void *_data)
416 set_user_nice(current, -5);
418 /* allocate ourselves an ID */
419 spin_lock_irq(&slow_work_queue_lock);
420 id = find_first_zero_bit(slow_work_ids, SLOW_WORK_THREAD_LIMIT);
421 BUG_ON(id < 0 || id >= SLOW_WORK_THREAD_LIMIT);
422 __set_bit(id, slow_work_ids);
423 spin_unlock_irq(&slow_work_queue_lock);
425 sprintf(current->comm, "kslowd%03u", id);
428 vsmax = vslow_work_proportion;
429 vsmax *= atomic_read(&slow_work_thread_count);
432 prepare_to_wait_exclusive(&slow_work_thread_wq, &wait,
434 if (!freezing(current) &&
435 !slow_work_threads_should_exit &&
436 !slow_work_available(vsmax) &&
439 finish_wait(&slow_work_thread_wq, &wait);
443 vsmax = vslow_work_proportion;
444 vsmax *= atomic_read(&slow_work_thread_count);
447 if (slow_work_available(vsmax) && slow_work_execute(id)) {
449 if (list_empty(&slow_work_queue) &&
450 list_empty(&vslow_work_queue) &&
451 atomic_read(&slow_work_thread_count) >
452 slow_work_min_threads)
453 slow_work_schedule_cull();
457 if (slow_work_threads_should_exit)
460 if (slow_work_cull && slow_work_cull_thread())
464 spin_lock_irq(&slow_work_queue_lock);
465 __clear_bit(id, slow_work_ids);
466 spin_unlock_irq(&slow_work_queue_lock);
468 if (atomic_dec_and_test(&slow_work_thread_count))
469 complete_and_exit(&slow_work_last_thread_exited, 0);
474 * Handle thread cull timer expiration
476 static void slow_work_cull_timeout(unsigned long data)
478 slow_work_cull = true;
479 wake_up(&slow_work_thread_wq);
483 * Get a reference on slow work thread starter
485 static int slow_work_new_thread_get_ref(struct slow_work *work)
491 * Drop a reference on slow work thread starter
493 static void slow_work_new_thread_put_ref(struct slow_work *work)
498 * Start a new slow work thread
500 static void slow_work_new_thread_execute(struct slow_work *work)
502 struct task_struct *p;
504 if (slow_work_threads_should_exit)
507 if (atomic_read(&slow_work_thread_count) >= slow_work_max_threads)
510 if (!mutex_trylock(&slow_work_user_lock))
513 slow_work_may_not_start_new_thread = true;
514 atomic_inc(&slow_work_thread_count);
515 p = kthread_run(slow_work_thread, NULL, "kslowd");
517 printk(KERN_DEBUG "Slow work thread pool: OOM\n");
518 if (atomic_dec_and_test(&slow_work_thread_count))
519 BUG(); /* we're running on a slow work thread... */
520 mod_timer(&slow_work_oom_timer,
521 round_jiffies(jiffies + SLOW_WORK_OOM_TIMEOUT));
523 /* ratelimit the starting of new threads */
524 mod_timer(&slow_work_oom_timer, jiffies + 1);
527 mutex_unlock(&slow_work_user_lock);
530 static const struct slow_work_ops slow_work_new_thread_ops = {
531 .owner = THIS_MODULE,
532 .get_ref = slow_work_new_thread_get_ref,
533 .put_ref = slow_work_new_thread_put_ref,
534 .execute = slow_work_new_thread_execute,
538 * post-OOM new thread start suppression expiration
540 static void slow_work_oom_timeout(unsigned long data)
542 slow_work_may_not_start_new_thread = false;
547 * Handle adjustment of the minimum number of threads
549 static int slow_work_min_threads_sysctl(struct ctl_table *table, int write,
551 size_t *lenp, loff_t *ppos)
553 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
557 mutex_lock(&slow_work_user_lock);
558 if (slow_work_user_count > 0) {
559 /* see if we need to start or stop threads */
560 n = atomic_read(&slow_work_thread_count) -
561 slow_work_min_threads;
563 if (n < 0 && !slow_work_may_not_start_new_thread)
564 slow_work_enqueue(&slow_work_new_thread);
566 slow_work_schedule_cull();
568 mutex_unlock(&slow_work_user_lock);
575 * Handle adjustment of the maximum number of threads
577 static int slow_work_max_threads_sysctl(struct ctl_table *table, int write,
579 size_t *lenp, loff_t *ppos)
581 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
585 mutex_lock(&slow_work_user_lock);
586 if (slow_work_user_count > 0) {
587 /* see if we need to stop threads */
588 n = slow_work_max_threads -
589 atomic_read(&slow_work_thread_count);
592 slow_work_schedule_cull();
594 mutex_unlock(&slow_work_user_lock);
599 #endif /* CONFIG_SYSCTL */
602 * slow_work_register_user - Register a user of the facility
603 * @module: The module about to make use of the facility
605 * Register a user of the facility, starting up the initial threads if there
606 * aren't any other users at this point. This will return 0 if successful, or
609 int slow_work_register_user(struct module *module)
611 struct task_struct *p;
614 mutex_lock(&slow_work_user_lock);
616 if (slow_work_user_count == 0) {
617 printk(KERN_NOTICE "Slow work thread pool: Starting up\n");
618 init_completion(&slow_work_last_thread_exited);
620 slow_work_threads_should_exit = false;
621 slow_work_init(&slow_work_new_thread,
622 &slow_work_new_thread_ops);
623 slow_work_may_not_start_new_thread = false;
624 slow_work_cull = false;
626 /* start the minimum number of threads */
627 for (loop = 0; loop < slow_work_min_threads; loop++) {
628 atomic_inc(&slow_work_thread_count);
629 p = kthread_run(slow_work_thread, NULL, "kslowd");
633 printk(KERN_NOTICE "Slow work thread pool: Ready\n");
636 slow_work_user_count++;
637 mutex_unlock(&slow_work_user_lock);
641 if (atomic_dec_and_test(&slow_work_thread_count))
642 complete(&slow_work_last_thread_exited);
644 printk(KERN_ERR "Slow work thread pool:"
645 " Aborting startup on ENOMEM\n");
646 slow_work_threads_should_exit = true;
647 wake_up_all(&slow_work_thread_wq);
648 wait_for_completion(&slow_work_last_thread_exited);
649 printk(KERN_ERR "Slow work thread pool: Aborted\n");
651 mutex_unlock(&slow_work_user_lock);
654 EXPORT_SYMBOL(slow_work_register_user);
657 * wait for all outstanding items from the calling module to complete
658 * - note that more items may be queued whilst we're waiting
660 static void slow_work_wait_for_items(struct module *module)
662 DECLARE_WAITQUEUE(myself, current);
663 struct slow_work *work;
666 mutex_lock(&slow_work_unreg_sync_lock);
667 add_wait_queue(&slow_work_unreg_wq, &myself);
670 spin_lock_irq(&slow_work_queue_lock);
672 /* first of all, we wait for the last queued item in each list
674 list_for_each_entry_reverse(work, &vslow_work_queue, link) {
675 if (work->owner == module) {
676 set_current_state(TASK_UNINTERRUPTIBLE);
677 slow_work_unreg_work_item = work;
681 list_for_each_entry_reverse(work, &slow_work_queue, link) {
682 if (work->owner == module) {
683 set_current_state(TASK_UNINTERRUPTIBLE);
684 slow_work_unreg_work_item = work;
689 /* then we wait for the items being processed to finish */
690 slow_work_unreg_module = module;
692 for (loop = 0; loop < SLOW_WORK_THREAD_LIMIT; loop++) {
693 if (slow_work_thread_processing[loop] == module)
696 spin_unlock_irq(&slow_work_queue_lock);
697 break; /* okay, we're done */
700 spin_unlock_irq(&slow_work_queue_lock);
702 slow_work_unreg_work_item = NULL;
703 slow_work_unreg_module = NULL;
706 remove_wait_queue(&slow_work_unreg_wq, &myself);
707 mutex_unlock(&slow_work_unreg_sync_lock);
711 * slow_work_unregister_user - Unregister a user of the facility
712 * @module: The module whose items should be cleared
714 * Unregister a user of the facility, killing all the threads if this was the
717 * This waits for all the work items belonging to the nominated module to go
718 * away before proceeding.
720 void slow_work_unregister_user(struct module *module)
722 /* first of all, wait for all outstanding items from the calling module
725 slow_work_wait_for_items(module);
727 /* then we can actually go about shutting down the facility if need
729 mutex_lock(&slow_work_user_lock);
731 BUG_ON(slow_work_user_count <= 0);
733 slow_work_user_count--;
734 if (slow_work_user_count == 0) {
735 printk(KERN_NOTICE "Slow work thread pool: Shutting down\n");
736 slow_work_threads_should_exit = true;
737 del_timer_sync(&slow_work_cull_timer);
738 del_timer_sync(&slow_work_oom_timer);
739 wake_up_all(&slow_work_thread_wq);
740 wait_for_completion(&slow_work_last_thread_exited);
741 printk(KERN_NOTICE "Slow work thread pool:"
742 " Shut down complete\n");
745 mutex_unlock(&slow_work_user_lock);
747 EXPORT_SYMBOL(slow_work_unregister_user);
750 * Initialise the slow work facility
752 static int __init init_slow_work(void)
754 unsigned nr_cpus = num_possible_cpus();
756 if (slow_work_max_threads < nr_cpus)
757 slow_work_max_threads = nr_cpus;
759 if (slow_work_max_max_threads < nr_cpus * 2)
760 slow_work_max_max_threads = nr_cpus * 2;
765 subsys_initcall(init_slow_work);