1 /* Worker thread pool for slow items, such as filesystem lookups or mkdirs
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
11 * See Documentation/slow-work.txt
14 #include <linux/module.h>
15 #include <linux/slow-work.h>
16 #include <linux/kthread.h>
17 #include <linux/freezer.h>
18 #include <linux/wait.h>
20 #define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of
22 #define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
25 #define SLOW_WORK_THREAD_LIMIT 255 /* abs maximum number of slow-work threads */
27 static void slow_work_cull_timeout(unsigned long);
28 static void slow_work_oom_timeout(unsigned long);
31 static int slow_work_min_threads_sysctl(struct ctl_table *, int,
32 void __user *, size_t *, loff_t *);
34 static int slow_work_max_threads_sysctl(struct ctl_table *, int ,
35 void __user *, size_t *, loff_t *);
39 * The pool of threads has at least min threads in it as long as someone is
40 * using the facility, and may have as many as max.
42 * A portion of the pool may be processing very slow operations.
44 static unsigned slow_work_min_threads = 2;
45 static unsigned slow_work_max_threads = 4;
46 static unsigned vslow_work_proportion = 50; /* % of threads that may process
50 static const int slow_work_min_min_threads = 2;
51 static int slow_work_max_max_threads = SLOW_WORK_THREAD_LIMIT;
52 static const int slow_work_min_vslow = 1;
53 static const int slow_work_max_vslow = 99;
55 ctl_table slow_work_sysctls[] = {
57 .ctl_name = CTL_UNNUMBERED,
58 .procname = "min-threads",
59 .data = &slow_work_min_threads,
60 .maxlen = sizeof(unsigned),
62 .proc_handler = slow_work_min_threads_sysctl,
63 .extra1 = (void *) &slow_work_min_min_threads,
64 .extra2 = &slow_work_max_threads,
67 .ctl_name = CTL_UNNUMBERED,
68 .procname = "max-threads",
69 .data = &slow_work_max_threads,
70 .maxlen = sizeof(unsigned),
72 .proc_handler = slow_work_max_threads_sysctl,
73 .extra1 = &slow_work_min_threads,
74 .extra2 = (void *) &slow_work_max_max_threads,
77 .ctl_name = CTL_UNNUMBERED,
78 .procname = "vslow-percentage",
79 .data = &vslow_work_proportion,
80 .maxlen = sizeof(unsigned),
82 .proc_handler = &proc_dointvec_minmax,
83 .extra1 = (void *) &slow_work_min_vslow,
84 .extra2 = (void *) &slow_work_max_vslow,
91 * The active state of the thread pool
93 static atomic_t slow_work_thread_count;
94 static atomic_t vslow_work_executing_count;
96 static bool slow_work_may_not_start_new_thread;
97 static bool slow_work_cull; /* cull a thread due to lack of activity */
98 static DEFINE_TIMER(slow_work_cull_timer, slow_work_cull_timeout, 0, 0);
99 static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0);
100 static struct slow_work slow_work_new_thread; /* new thread starter */
103 * slow work ID allocation (use slow_work_queue_lock)
105 static DECLARE_BITMAP(slow_work_ids, SLOW_WORK_THREAD_LIMIT);
108 * Unregistration tracking to prevent put_ref() from disappearing during module
111 #ifdef CONFIG_MODULES
112 static struct module *slow_work_thread_processing[SLOW_WORK_THREAD_LIMIT];
113 static struct module *slow_work_unreg_module;
114 static struct slow_work *slow_work_unreg_work_item;
115 static DECLARE_WAIT_QUEUE_HEAD(slow_work_unreg_wq);
116 static DEFINE_MUTEX(slow_work_unreg_sync_lock);
120 * The queues of work items and the lock governing access to them. These are
121 * shared between all the CPUs. It doesn't make sense to have per-CPU queues
122 * as the number of threads bears no relation to the number of CPUs.
124 * There are two queues of work items: one for slow work items, and one for
125 * very slow work items.
127 static LIST_HEAD(slow_work_queue);
128 static LIST_HEAD(vslow_work_queue);
129 static DEFINE_SPINLOCK(slow_work_queue_lock);
132 * The thread controls. A variable used to signal to the threads that they
133 * should exit when the queue is empty, a waitqueue used by the threads to wait
134 * for signals, and a completion set by the last thread to exit.
136 static bool slow_work_threads_should_exit;
137 static DECLARE_WAIT_QUEUE_HEAD(slow_work_thread_wq);
138 static DECLARE_COMPLETION(slow_work_last_thread_exited);
141 * The number of users of the thread pool and its lock. Whilst this is zero we
142 * have no threads hanging around, and when this reaches zero, we wait for all
143 * active or queued work items to complete and kill all the threads we do have.
145 static int slow_work_user_count;
146 static DEFINE_MUTEX(slow_work_user_lock);
148 static inline int slow_work_get_ref(struct slow_work *work)
150 if (work->ops->get_ref)
151 return work->ops->get_ref(work);
156 static inline void slow_work_put_ref(struct slow_work *work)
158 if (work->ops->put_ref)
159 work->ops->put_ref(work);
163 * Calculate the maximum number of active threads in the pool that are
164 * permitted to process very slow work items.
166 * The answer is rounded up to at least 1, but may not equal or exceed the
167 * maximum number of the threads in the pool. This means we always have at
168 * least one thread that can process slow work items, and we always have at
169 * least one thread that won't get tied up doing so.
171 static unsigned slow_work_calc_vsmax(void)
175 vsmax = atomic_read(&slow_work_thread_count) * vslow_work_proportion;
177 vsmax = max(vsmax, 1U);
178 return min(vsmax, slow_work_max_threads - 1);
182 * Attempt to execute stuff queued on a slow thread. Return true if we managed
183 * it, false if there was nothing to do.
185 static bool slow_work_execute(int id)
187 #ifdef CONFIG_MODULES
188 struct module *module;
190 struct slow_work *work = NULL;
194 vsmax = slow_work_calc_vsmax();
196 /* see if we can schedule a new thread to be started if we're not
197 * keeping up with the work */
198 if (!waitqueue_active(&slow_work_thread_wq) &&
199 (!list_empty(&slow_work_queue) || !list_empty(&vslow_work_queue)) &&
200 atomic_read(&slow_work_thread_count) < slow_work_max_threads &&
201 !slow_work_may_not_start_new_thread)
202 slow_work_enqueue(&slow_work_new_thread);
204 /* find something to execute */
205 spin_lock_irq(&slow_work_queue_lock);
206 if (!list_empty(&vslow_work_queue) &&
207 atomic_read(&vslow_work_executing_count) < vsmax) {
208 work = list_entry(vslow_work_queue.next,
209 struct slow_work, link);
210 if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags))
212 list_del_init(&work->link);
213 atomic_inc(&vslow_work_executing_count);
215 } else if (!list_empty(&slow_work_queue)) {
216 work = list_entry(slow_work_queue.next,
217 struct slow_work, link);
218 if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags))
220 list_del_init(&work->link);
223 very_slow = false; /* avoid the compiler warning */
226 #ifdef CONFIG_MODULES
228 slow_work_thread_processing[id] = work->owner;
231 spin_unlock_irq(&slow_work_queue_lock);
236 if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags))
239 work->ops->execute(work);
242 atomic_dec(&vslow_work_executing_count);
243 clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags);
245 /* if someone tried to enqueue the item whilst we were executing it,
246 * then it'll be left unenqueued to avoid multiple threads trying to
247 * execute it simultaneously
249 * there is, however, a race between us testing the pending flag and
250 * getting the spinlock, and between the enqueuer setting the pending
251 * flag and getting the spinlock, so we use a deferral bit to tell us
252 * if the enqueuer got there first
254 if (test_bit(SLOW_WORK_PENDING, &work->flags)) {
255 spin_lock_irq(&slow_work_queue_lock);
257 if (!test_bit(SLOW_WORK_EXECUTING, &work->flags) &&
258 test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags))
261 spin_unlock_irq(&slow_work_queue_lock);
264 /* sort out the race between module unloading and put_ref() */
265 slow_work_put_ref(work);
267 #ifdef CONFIG_MODULES
268 module = slow_work_thread_processing[id];
269 slow_work_thread_processing[id] = NULL;
271 if (slow_work_unreg_work_item == work ||
272 slow_work_unreg_module == module)
273 wake_up_all(&slow_work_unreg_wq);
279 /* we must complete the enqueue operation
280 * - we transfer our ref on the item back to the appropriate queue
281 * - don't wake another thread up as we're awake already
283 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
284 list_add_tail(&work->link, &vslow_work_queue);
286 list_add_tail(&work->link, &slow_work_queue);
287 spin_unlock_irq(&slow_work_queue_lock);
288 slow_work_thread_processing[id] = NULL;
293 * slow_work_enqueue - Schedule a slow work item for processing
294 * @work: The work item to queue
296 * Schedule a slow work item for processing. If the item is already undergoing
297 * execution, this guarantees not to re-enter the execution routine until the
298 * first execution finishes.
300 * The item is pinned by this function as it retains a reference to it, managed
301 * through the item operations. The item is unpinned once it has been
304 * An item may hog the thread that is running it for a relatively large amount
305 * of time, sufficient, for example, to perform several lookup, mkdir, create
306 * and setxattr operations. It may sleep on I/O and may sleep to obtain locks.
308 * Conversely, if a number of items are awaiting processing, it may take some
309 * time before any given item is given attention. The number of threads in the
310 * pool may be increased to deal with demand, but only up to a limit.
312 * If SLOW_WORK_VERY_SLOW is set on the work item, then it will be placed in
313 * the very slow queue, from which only a portion of the threads will be
314 * allowed to pick items to execute. This ensures that very slow items won't
315 * overly block ones that are just ordinarily slow.
317 * Returns 0 if successful, -EAGAIN if not.
319 int slow_work_enqueue(struct slow_work *work)
323 BUG_ON(slow_work_user_count <= 0);
327 /* when honouring an enqueue request, we only promise that we will run
328 * the work function in the future; we do not promise to run it once
329 * per enqueue request
331 * we use the PENDING bit to merge together repeat requests without
332 * having to disable IRQs and take the spinlock, whilst still
333 * maintaining our promise
335 if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
336 spin_lock_irqsave(&slow_work_queue_lock, flags);
338 /* we promise that we will not attempt to execute the work
339 * function in more than one thread simultaneously
341 * this, however, leaves us with a problem if we're asked to
342 * enqueue the work whilst someone is executing the work
343 * function as simply queueing the work immediately means that
344 * another thread may try executing it whilst it is already
347 * to deal with this, we set the ENQ_DEFERRED bit instead of
348 * enqueueing, and the thread currently executing the work
349 * function will enqueue the work item when the work function
350 * returns and it has cleared the EXECUTING bit
352 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
353 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
355 if (slow_work_get_ref(work) < 0)
357 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
358 list_add_tail(&work->link, &vslow_work_queue);
360 list_add_tail(&work->link, &slow_work_queue);
361 wake_up(&slow_work_thread_wq);
364 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
369 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
372 EXPORT_SYMBOL(slow_work_enqueue);
375 * Schedule a cull of the thread pool at some time in the near future
377 static void slow_work_schedule_cull(void)
379 mod_timer(&slow_work_cull_timer,
380 round_jiffies(jiffies + SLOW_WORK_CULL_TIMEOUT));
384 * Worker thread culling algorithm
386 static bool slow_work_cull_thread(void)
389 bool do_cull = false;
391 spin_lock_irqsave(&slow_work_queue_lock, flags);
393 if (slow_work_cull) {
394 slow_work_cull = false;
396 if (list_empty(&slow_work_queue) &&
397 list_empty(&vslow_work_queue) &&
398 atomic_read(&slow_work_thread_count) >
399 slow_work_min_threads) {
400 slow_work_schedule_cull();
405 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
410 * Determine if there is slow work available for dispatch
412 static inline bool slow_work_available(int vsmax)
414 return !list_empty(&slow_work_queue) ||
415 (!list_empty(&vslow_work_queue) &&
416 atomic_read(&vslow_work_executing_count) < vsmax);
420 * Worker thread dispatcher
422 static int slow_work_thread(void *_data)
429 set_user_nice(current, -5);
431 /* allocate ourselves an ID */
432 spin_lock_irq(&slow_work_queue_lock);
433 id = find_first_zero_bit(slow_work_ids, SLOW_WORK_THREAD_LIMIT);
434 BUG_ON(id < 0 || id >= SLOW_WORK_THREAD_LIMIT);
435 __set_bit(id, slow_work_ids);
436 spin_unlock_irq(&slow_work_queue_lock);
438 sprintf(current->comm, "kslowd%03u", id);
441 vsmax = vslow_work_proportion;
442 vsmax *= atomic_read(&slow_work_thread_count);
445 prepare_to_wait_exclusive(&slow_work_thread_wq, &wait,
447 if (!freezing(current) &&
448 !slow_work_threads_should_exit &&
449 !slow_work_available(vsmax) &&
452 finish_wait(&slow_work_thread_wq, &wait);
456 vsmax = vslow_work_proportion;
457 vsmax *= atomic_read(&slow_work_thread_count);
460 if (slow_work_available(vsmax) && slow_work_execute(id)) {
462 if (list_empty(&slow_work_queue) &&
463 list_empty(&vslow_work_queue) &&
464 atomic_read(&slow_work_thread_count) >
465 slow_work_min_threads)
466 slow_work_schedule_cull();
470 if (slow_work_threads_should_exit)
473 if (slow_work_cull && slow_work_cull_thread())
477 spin_lock_irq(&slow_work_queue_lock);
478 __clear_bit(id, slow_work_ids);
479 spin_unlock_irq(&slow_work_queue_lock);
481 if (atomic_dec_and_test(&slow_work_thread_count))
482 complete_and_exit(&slow_work_last_thread_exited, 0);
487 * Handle thread cull timer expiration
489 static void slow_work_cull_timeout(unsigned long data)
491 slow_work_cull = true;
492 wake_up(&slow_work_thread_wq);
496 * Start a new slow work thread
498 static void slow_work_new_thread_execute(struct slow_work *work)
500 struct task_struct *p;
502 if (slow_work_threads_should_exit)
505 if (atomic_read(&slow_work_thread_count) >= slow_work_max_threads)
508 if (!mutex_trylock(&slow_work_user_lock))
511 slow_work_may_not_start_new_thread = true;
512 atomic_inc(&slow_work_thread_count);
513 p = kthread_run(slow_work_thread, NULL, "kslowd");
515 printk(KERN_DEBUG "Slow work thread pool: OOM\n");
516 if (atomic_dec_and_test(&slow_work_thread_count))
517 BUG(); /* we're running on a slow work thread... */
518 mod_timer(&slow_work_oom_timer,
519 round_jiffies(jiffies + SLOW_WORK_OOM_TIMEOUT));
521 /* ratelimit the starting of new threads */
522 mod_timer(&slow_work_oom_timer, jiffies + 1);
525 mutex_unlock(&slow_work_user_lock);
528 static const struct slow_work_ops slow_work_new_thread_ops = {
529 .owner = THIS_MODULE,
530 .execute = slow_work_new_thread_execute,
534 * post-OOM new thread start suppression expiration
536 static void slow_work_oom_timeout(unsigned long data)
538 slow_work_may_not_start_new_thread = false;
543 * Handle adjustment of the minimum number of threads
545 static int slow_work_min_threads_sysctl(struct ctl_table *table, int write,
547 size_t *lenp, loff_t *ppos)
549 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
553 mutex_lock(&slow_work_user_lock);
554 if (slow_work_user_count > 0) {
555 /* see if we need to start or stop threads */
556 n = atomic_read(&slow_work_thread_count) -
557 slow_work_min_threads;
559 if (n < 0 && !slow_work_may_not_start_new_thread)
560 slow_work_enqueue(&slow_work_new_thread);
562 slow_work_schedule_cull();
564 mutex_unlock(&slow_work_user_lock);
571 * Handle adjustment of the maximum number of threads
573 static int slow_work_max_threads_sysctl(struct ctl_table *table, int write,
575 size_t *lenp, loff_t *ppos)
577 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
581 mutex_lock(&slow_work_user_lock);
582 if (slow_work_user_count > 0) {
583 /* see if we need to stop threads */
584 n = slow_work_max_threads -
585 atomic_read(&slow_work_thread_count);
588 slow_work_schedule_cull();
590 mutex_unlock(&slow_work_user_lock);
595 #endif /* CONFIG_SYSCTL */
598 * slow_work_register_user - Register a user of the facility
599 * @module: The module about to make use of the facility
601 * Register a user of the facility, starting up the initial threads if there
602 * aren't any other users at this point. This will return 0 if successful, or
605 int slow_work_register_user(struct module *module)
607 struct task_struct *p;
610 mutex_lock(&slow_work_user_lock);
612 if (slow_work_user_count == 0) {
613 printk(KERN_NOTICE "Slow work thread pool: Starting up\n");
614 init_completion(&slow_work_last_thread_exited);
616 slow_work_threads_should_exit = false;
617 slow_work_init(&slow_work_new_thread,
618 &slow_work_new_thread_ops);
619 slow_work_may_not_start_new_thread = false;
620 slow_work_cull = false;
622 /* start the minimum number of threads */
623 for (loop = 0; loop < slow_work_min_threads; loop++) {
624 atomic_inc(&slow_work_thread_count);
625 p = kthread_run(slow_work_thread, NULL, "kslowd");
629 printk(KERN_NOTICE "Slow work thread pool: Ready\n");
632 slow_work_user_count++;
633 mutex_unlock(&slow_work_user_lock);
637 if (atomic_dec_and_test(&slow_work_thread_count))
638 complete(&slow_work_last_thread_exited);
640 printk(KERN_ERR "Slow work thread pool:"
641 " Aborting startup on ENOMEM\n");
642 slow_work_threads_should_exit = true;
643 wake_up_all(&slow_work_thread_wq);
644 wait_for_completion(&slow_work_last_thread_exited);
645 printk(KERN_ERR "Slow work thread pool: Aborted\n");
647 mutex_unlock(&slow_work_user_lock);
650 EXPORT_SYMBOL(slow_work_register_user);
653 * wait for all outstanding items from the calling module to complete
654 * - note that more items may be queued whilst we're waiting
656 static void slow_work_wait_for_items(struct module *module)
658 DECLARE_WAITQUEUE(myself, current);
659 struct slow_work *work;
662 mutex_lock(&slow_work_unreg_sync_lock);
663 add_wait_queue(&slow_work_unreg_wq, &myself);
666 spin_lock_irq(&slow_work_queue_lock);
668 /* first of all, we wait for the last queued item in each list
670 list_for_each_entry_reverse(work, &vslow_work_queue, link) {
671 if (work->owner == module) {
672 set_current_state(TASK_UNINTERRUPTIBLE);
673 slow_work_unreg_work_item = work;
677 list_for_each_entry_reverse(work, &slow_work_queue, link) {
678 if (work->owner == module) {
679 set_current_state(TASK_UNINTERRUPTIBLE);
680 slow_work_unreg_work_item = work;
685 /* then we wait for the items being processed to finish */
686 slow_work_unreg_module = module;
688 for (loop = 0; loop < SLOW_WORK_THREAD_LIMIT; loop++) {
689 if (slow_work_thread_processing[loop] == module)
692 spin_unlock_irq(&slow_work_queue_lock);
693 break; /* okay, we're done */
696 spin_unlock_irq(&slow_work_queue_lock);
698 slow_work_unreg_work_item = NULL;
699 slow_work_unreg_module = NULL;
702 remove_wait_queue(&slow_work_unreg_wq, &myself);
703 mutex_unlock(&slow_work_unreg_sync_lock);
707 * slow_work_unregister_user - Unregister a user of the facility
708 * @module: The module whose items should be cleared
710 * Unregister a user of the facility, killing all the threads if this was the
713 * This waits for all the work items belonging to the nominated module to go
714 * away before proceeding.
716 void slow_work_unregister_user(struct module *module)
718 /* first of all, wait for all outstanding items from the calling module
721 slow_work_wait_for_items(module);
723 /* then we can actually go about shutting down the facility if need
725 mutex_lock(&slow_work_user_lock);
727 BUG_ON(slow_work_user_count <= 0);
729 slow_work_user_count--;
730 if (slow_work_user_count == 0) {
731 printk(KERN_NOTICE "Slow work thread pool: Shutting down\n");
732 slow_work_threads_should_exit = true;
733 del_timer_sync(&slow_work_cull_timer);
734 del_timer_sync(&slow_work_oom_timer);
735 wake_up_all(&slow_work_thread_wq);
736 wait_for_completion(&slow_work_last_thread_exited);
737 printk(KERN_NOTICE "Slow work thread pool:"
738 " Shut down complete\n");
741 mutex_unlock(&slow_work_user_lock);
743 EXPORT_SYMBOL(slow_work_unregister_user);
746 * Initialise the slow work facility
748 static int __init init_slow_work(void)
750 unsigned nr_cpus = num_possible_cpus();
752 if (slow_work_max_threads < nr_cpus)
753 slow_work_max_threads = nr_cpus;
755 if (slow_work_max_max_threads < nr_cpus * 2)
756 slow_work_max_max_threads = nr_cpus * 2;
761 subsys_initcall(init_slow_work);