X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=kernel%2Fcpuset.c;h=cfaf6419d817e0a387f1c39d878f342e279818b6;hb=1c45f60406e61a06631416264e49eb5afd9fc324;hp=6633f3fb641751fb3d37de5c2e66dbcc3ae7d7e6;hpb=18a19cb3047e454ee5ecbc35d7acf3f8e09e0466;p=safe%2Fjmp%2Flinux-2.6 diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 6633f3f..cfaf641 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -4,22 +4,22 @@ * Processor and Memory placement constraints for sets of tasks. * * Copyright (C) 2003 BULL SA. - * Copyright (C) 2004 Silicon Graphics, Inc. + * Copyright (C) 2004-2007 Silicon Graphics, Inc. + * Copyright (C) 2006 Google, Inc * * Portions derived from Patrick Mochel's sysfs code. * sysfs is Copyright (c) 2001-3 Patrick Mochel - * Portions Copyright (c) 2004 Silicon Graphics, Inc. * - * 2003-10-10 Written by Simon Derr + * 2003-10-10 Written by Simon Derr. * 2003-10-22 Updates by Stephen Hemminger. - * 2004 May-July Rework by Paul Jackson + * 2004 May-July Rework by Paul Jackson. + * 2006 Rework by Paul Menage to use generic cgroups * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of the Linux * distribution for more details. */ -#include #include #include #include @@ -32,16 +32,19 @@ #include #include #include +#include #include #include #include #include #include +#include #include +#include #include #include +#include #include -#include #include #include #include @@ -51,68 +54,108 @@ #include #include -#include +#include +#include -#define CPUSET_SUPER_MAGIC 0x27e0eb +/* + * Tracks how many cpusets are currently defined in system. + * When there is only one cpuset (the root cpuset) we can + * short circuit some hooks. + */ +int number_of_cpusets __read_mostly; + +/* Retrieve the cpuset from a cgroup */ +struct cgroup_subsys cpuset_subsys; +struct cpuset; + +/* See "Frequency meter" comments, below. */ + +struct fmeter { + int cnt; /* unprocessed events count */ + int val; /* most recent output value */ + time_t time; /* clock (secs) when val computed */ + spinlock_t lock; /* guards read or write of above */ +}; struct cpuset { + struct cgroup_subsys_state css; + unsigned long flags; /* "unsigned long" so bitops work */ cpumask_t cpus_allowed; /* CPUs allowed to tasks in cpuset */ nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */ - /* - * Count is atomic so can incr (fork) or decr (exit) without a lock. - */ - atomic_t count; /* count tasks using this cpuset */ - - /* - * We link our 'sibling' struct into our parents 'children'. - * Our children link their 'sibling' into our 'children'. - */ - struct list_head sibling; /* my parents children */ - struct list_head children; /* my children */ - struct cpuset *parent; /* my parent */ - struct dentry *dentry; /* cpuset fs entry */ /* * Copy of global cpuset_mems_generation as of the most * recent time this cpuset changed its mems_allowed. */ - int mems_generation; + int mems_generation; + + struct fmeter fmeter; /* memory_pressure filter */ + + /* partition number for rebuild_sched_domains() */ + int pn; }; +/* Retrieve the cpuset for a cgroup */ +static inline struct cpuset *cgroup_cs(struct cgroup *cont) +{ + return container_of(cgroup_subsys_state(cont, cpuset_subsys_id), + struct cpuset, css); +} + +/* Retrieve the cpuset for a task */ +static inline struct cpuset *task_cs(struct task_struct *task) +{ + return container_of(task_subsys_state(task, cpuset_subsys_id), + struct cpuset, css); +} + + /* bits in struct cpuset flags field */ typedef enum { CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE, - CS_REMOVED, - CS_NOTIFY_ON_RELEASE + CS_MEMORY_MIGRATE, + CS_SCHED_LOAD_BALANCE, + CS_SPREAD_PAGE, + CS_SPREAD_SLAB, } cpuset_flagbits_t; /* convenient tests for these bits */ static inline int is_cpu_exclusive(const struct cpuset *cs) { - return !!test_bit(CS_CPU_EXCLUSIVE, &cs->flags); + return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); } static inline int is_mem_exclusive(const struct cpuset *cs) { - return !!test_bit(CS_MEM_EXCLUSIVE, &cs->flags); + return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); +} + +static inline int is_sched_load_balance(const struct cpuset *cs) +{ + return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); +} + +static inline int is_memory_migrate(const struct cpuset *cs) +{ + return test_bit(CS_MEMORY_MIGRATE, &cs->flags); } -static inline int is_removed(const struct cpuset *cs) +static inline int is_spread_page(const struct cpuset *cs) { - return !!test_bit(CS_REMOVED, &cs->flags); + return test_bit(CS_SPREAD_PAGE, &cs->flags); } -static inline int notify_on_release(const struct cpuset *cs) +static inline int is_spread_slab(const struct cpuset *cs) { - return !!test_bit(CS_NOTIFY_ON_RELEASE, &cs->flags); + return test_bit(CS_SPREAD_SLAB, &cs->flags); } /* - * Increment this atomic integer everytime any cpuset changes its + * Increment this integer everytime any cpuset changes its * mems_allowed value. Users of cpusets can track this generation * number, and avoid having to lock and reload mems_allowed unless * the cpuset they're using changes generation. @@ -126,82 +169,70 @@ static inline int notify_on_release(const struct cpuset *cs) * on every visit to __alloc_pages(), to efficiently check whether * its current->cpuset->mems_allowed has changed, requiring an update * of its current->mems_allowed. + * + * Since cpuset_mems_generation is guarded by manage_mutex, + * there is no need to mark it atomic. */ -static atomic_t cpuset_mems_generation = ATOMIC_INIT(1); +static int cpuset_mems_generation; static struct cpuset top_cpuset = { .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)), .cpus_allowed = CPU_MASK_ALL, .mems_allowed = NODE_MASK_ALL, - .count = ATOMIC_INIT(0), - .sibling = LIST_HEAD_INIT(top_cpuset.sibling), - .children = LIST_HEAD_INIT(top_cpuset.children), - .parent = NULL, - .dentry = NULL, - .mems_generation = 0, }; -static struct vfsmount *cpuset_mount; -static struct super_block *cpuset_sb = NULL; - /* - * We have two global cpuset semaphores below. They can nest. - * It is ok to first take manage_sem, then nest callback_sem. We also + * We have two global cpuset mutexes below. They can nest. + * It is ok to first take manage_mutex, then nest callback_mutex. We also * require taking task_lock() when dereferencing a tasks cpuset pointer. * See "The task_lock() exception", at the end of this comment. * - * A task must hold both semaphores to modify cpusets. If a task - * holds manage_sem, then it blocks others wanting that semaphore, - * ensuring that it is the only task able to also acquire callback_sem + * A task must hold both mutexes to modify cpusets. If a task + * holds manage_mutex, then it blocks others wanting that mutex, + * ensuring that it is the only task able to also acquire callback_mutex * and be able to modify cpusets. It can perform various checks on * the cpuset structure first, knowing nothing will change. It can - * also allocate memory while just holding manage_sem. While it is + * also allocate memory while just holding manage_mutex. While it is * performing these checks, various callback routines can briefly - * acquire callback_sem to query cpusets. Once it is ready to make - * the changes, it takes callback_sem, blocking everyone else. + * acquire callback_mutex to query cpusets. Once it is ready to make + * the changes, it takes callback_mutex, blocking everyone else. * * Calls to the kernel memory allocator can not be made while holding - * callback_sem, as that would risk double tripping on callback_sem + * callback_mutex, as that would risk double tripping on callback_mutex * from one of the callbacks into the cpuset code from within * __alloc_pages(). * - * If a task is only holding callback_sem, then it has read-only + * If a task is only holding callback_mutex, then it has read-only * access to cpusets. * * The task_struct fields mems_allowed and mems_generation may only * be accessed in the context of that task, so require no locks. * * Any task can increment and decrement the count field without lock. - * So in general, code holding manage_sem or callback_sem can't rely + * So in general, code holding manage_mutex or callback_mutex can't rely * on the count field not changing. However, if the count goes to - * zero, then only attach_task(), which holds both semaphores, can + * zero, then only attach_task(), which holds both mutexes, can * increment it again. Because a count of zero means that no tasks * are currently attached, therefore there is no way a task attached * to that cpuset can fork (the other way to increment the count). - * So code holding manage_sem or callback_sem can safely assume that + * So code holding manage_mutex or callback_mutex can safely assume that * if the count is zero, it will stay zero. Similarly, if a task - * holds manage_sem or callback_sem on a cpuset with zero count, it + * holds manage_mutex or callback_mutex on a cpuset with zero count, it * knows that the cpuset won't be removed, as cpuset_rmdir() needs - * both of those semaphores. - * - * A possible optimization to improve parallelism would be to make - * callback_sem a R/W semaphore (rwsem), allowing the callback routines - * to proceed in parallel, with read access, until the holder of - * manage_sem needed to take this rwsem for exclusive write access - * and modify some cpusets. + * both of those mutexes. * * The cpuset_common_file_write handler for operations that modify - * the cpuset hierarchy holds manage_sem across the entire operation, + * the cpuset hierarchy holds manage_mutex across the entire operation, * single threading all such cpuset modifications across the system. * - * The cpuset_common_file_read() handlers only hold callback_sem across + * The cpuset_common_file_read() handlers only hold callback_mutex across * small pieces of code, such as when reading out possibly multi-word * cpumasks and nodemasks. * * The fork and exit callbacks cpuset_fork() and cpuset_exit(), don't - * (usually) take either semaphore. These are the two most performance + * (usually) take either mutex. These are the two most performance * critical pieces of code here. The exception occurs on cpuset_exit(), - * when a task in a notify_on_release cpuset exits. Then manage_sem + * when a task in a notify_on_release cpuset exits. Then manage_mutex * is taken, and if the cpuset count is zero, a usermode call made * to /sbin/cpuset_release_agent with the name of the cpuset (path * relative to the root of cpuset file system) as the argument. @@ -209,7 +240,7 @@ static struct super_block *cpuset_sb = NULL; * A cpuset can only be deleted if both its 'count' of using tasks * is zero, and its list of 'children' cpusets is empty. Since all * tasks in the system use _some_ cpuset, and since there is always at - * least one task in the system (init, pid == 1), therefore, top_cpuset + * least one task in the system (init), therefore, top_cpuset * always has either children cpusets and/or using tasks. So we don't * need a special hack to ensure that top_cpuset cannot be deleted. * @@ -219,307 +250,47 @@ static struct super_block *cpuset_sb = NULL; * * The need for this exception arises from the action of attach_task(), * which overwrites one tasks cpuset pointer with another. It does - * so using both semaphores, however there are several performance + * so using both mutexes, however there are several performance * critical places that need to reference task->cpuset without the - * expense of grabbing a system global semaphore. Therefore except as + * expense of grabbing a system global mutex. Therefore except as * noted below, when dereferencing or, as in attach_task(), modifying * a tasks cpuset pointer we use task_lock(), which acts on a spinlock * (task->alloc_lock) already in the task_struct routinely used for * such matters. + * + * P.S. One more locking exception. RCU is used to guard the + * update of a tasks cpuset pointer by attach_task() and the + * access of task->cpuset->mems_generation via that pointer in + * the routine cpuset_update_task_memory_state(). */ -static DECLARE_MUTEX(manage_sem); -static DECLARE_MUTEX(callback_sem); - -/* - * A couple of forward declarations required, due to cyclic reference loop: - * cpuset_mkdir -> cpuset_create -> cpuset_populate_dir -> cpuset_add_file - * -> cpuset_create_file -> cpuset_dir_inode_operations -> cpuset_mkdir. - */ - -static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode); -static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry); - -static struct backing_dev_info cpuset_backing_dev_info = { - .ra_pages = 0, /* No readahead */ - .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK, -}; - -static struct inode *cpuset_new_inode(mode_t mode) -{ - struct inode *inode = new_inode(cpuset_sb); - - if (inode) { - inode->i_mode = mode; - inode->i_uid = current->fsuid; - inode->i_gid = current->fsgid; - inode->i_blksize = PAGE_CACHE_SIZE; - inode->i_blocks = 0; - inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; - inode->i_mapping->backing_dev_info = &cpuset_backing_dev_info; - } - return inode; -} - -static void cpuset_diput(struct dentry *dentry, struct inode *inode) -{ - /* is dentry a directory ? if so, kfree() associated cpuset */ - if (S_ISDIR(inode->i_mode)) { - struct cpuset *cs = dentry->d_fsdata; - BUG_ON(!(is_removed(cs))); - kfree(cs); - } - iput(inode); -} - -static struct dentry_operations cpuset_dops = { - .d_iput = cpuset_diput, -}; - -static struct dentry *cpuset_get_dentry(struct dentry *parent, const char *name) -{ - struct dentry *d = lookup_one_len(name, parent, strlen(name)); - if (!IS_ERR(d)) - d->d_op = &cpuset_dops; - return d; -} - -static void remove_dir(struct dentry *d) -{ - struct dentry *parent = dget(d->d_parent); - - d_delete(d); - simple_rmdir(parent->d_inode, d); - dput(parent); -} - -/* - * NOTE : the dentry must have been dget()'ed - */ -static void cpuset_d_remove_dir(struct dentry *dentry) -{ - struct list_head *node; - - spin_lock(&dcache_lock); - node = dentry->d_subdirs.next; - while (node != &dentry->d_subdirs) { - struct dentry *d = list_entry(node, struct dentry, d_child); - list_del_init(node); - if (d->d_inode) { - d = dget_locked(d); - spin_unlock(&dcache_lock); - d_delete(d); - simple_unlink(dentry->d_inode, d); - dput(d); - spin_lock(&dcache_lock); - } - node = dentry->d_subdirs.next; - } - list_del_init(&dentry->d_child); - spin_unlock(&dcache_lock); - remove_dir(dentry); -} - -static struct super_operations cpuset_ops = { - .statfs = simple_statfs, - .drop_inode = generic_delete_inode, -}; +static DEFINE_MUTEX(callback_mutex); -static int cpuset_fill_super(struct super_block *sb, void *unused_data, - int unused_silent) +/* This is ugly, but preserves the userspace API for existing cpuset + * users. If someone tries to mount the "cpuset" filesystem, we + * silently switch it to mount "cgroup" instead */ +static int cpuset_get_sb(struct file_system_type *fs_type, + int flags, const char *unused_dev_name, + void *data, struct vfsmount *mnt) { - struct inode *inode; - struct dentry *root; - - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; - sb->s_magic = CPUSET_SUPER_MAGIC; - sb->s_op = &cpuset_ops; - cpuset_sb = sb; - - inode = cpuset_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR); - if (inode) { - inode->i_op = &simple_dir_inode_operations; - inode->i_fop = &simple_dir_operations; - /* directories start off with i_nlink == 2 (for "." entry) */ - inode->i_nlink++; - } else { - return -ENOMEM; - } - - root = d_alloc_root(inode); - if (!root) { - iput(inode); - return -ENOMEM; + struct file_system_type *cgroup_fs = get_fs_type("cgroup"); + int ret = -ENODEV; + if (cgroup_fs) { + char mountopts[] = + "cpuset,noprefix," + "release_agent=/sbin/cpuset_release_agent"; + ret = cgroup_fs->get_sb(cgroup_fs, flags, + unused_dev_name, mountopts, mnt); + put_filesystem(cgroup_fs); } - sb->s_root = root; - return 0; -} - -static struct super_block *cpuset_get_sb(struct file_system_type *fs_type, - int flags, const char *unused_dev_name, - void *data) -{ - return get_sb_single(fs_type, flags, data, cpuset_fill_super); + return ret; } static struct file_system_type cpuset_fs_type = { .name = "cpuset", .get_sb = cpuset_get_sb, - .kill_sb = kill_litter_super, -}; - -/* struct cftype: - * - * The files in the cpuset filesystem mostly have a very simple read/write - * handling, some common function will take care of it. Nevertheless some cases - * (read tasks) are special and therefore I define this structure for every - * kind of file. - * - * - * When reading/writing to a file: - * - the cpuset to use in file->f_dentry->d_parent->d_fsdata - * - the 'cftype' of the file is file->f_dentry->d_fsdata - */ - -struct cftype { - char *name; - int private; - int (*open) (struct inode *inode, struct file *file); - ssize_t (*read) (struct file *file, char __user *buf, size_t nbytes, - loff_t *ppos); - int (*write) (struct file *file, const char __user *buf, size_t nbytes, - loff_t *ppos); - int (*release) (struct inode *inode, struct file *file); }; -static inline struct cpuset *__d_cs(struct dentry *dentry) -{ - return dentry->d_fsdata; -} - -static inline struct cftype *__d_cft(struct dentry *dentry) -{ - return dentry->d_fsdata; -} - -/* - * Call with manage_sem held. Writes path of cpuset into buf. - * Returns 0 on success, -errno on error. - */ - -static int cpuset_path(const struct cpuset *cs, char *buf, int buflen) -{ - char *start; - - start = buf + buflen; - - *--start = '\0'; - for (;;) { - int len = cs->dentry->d_name.len; - if ((start -= len) < buf) - return -ENAMETOOLONG; - memcpy(start, cs->dentry->d_name.name, len); - cs = cs->parent; - if (!cs) - break; - if (!cs->parent) - continue; - if (--start < buf) - return -ENAMETOOLONG; - *start = '/'; - } - memmove(buf, start, buf + buflen - start); - return 0; -} - -/* - * Notify userspace when a cpuset is released, by running - * /sbin/cpuset_release_agent with the name of the cpuset (path - * relative to the root of cpuset file system) as the argument. - * - * Most likely, this user command will try to rmdir this cpuset. - * - * This races with the possibility that some other task will be - * attached to this cpuset before it is removed, or that some other - * user task will 'mkdir' a child cpuset of this cpuset. That's ok. - * The presumed 'rmdir' will fail quietly if this cpuset is no longer - * unused, and this cpuset will be reprieved from its death sentence, - * to continue to serve a useful existence. Next time it's released, - * we will get notified again, if it still has 'notify_on_release' set. - * - * The final arg to call_usermodehelper() is 0, which means don't - * wait. The separate /sbin/cpuset_release_agent task is forked by - * call_usermodehelper(), then control in this thread returns here, - * without waiting for the release agent task. We don't bother to - * wait because the caller of this routine has no use for the exit - * status of the /sbin/cpuset_release_agent task, so no sense holding - * our caller up for that. - * - * When we had only one cpuset semaphore, we had to call this - * without holding it, to avoid deadlock when call_usermodehelper() - * allocated memory. With two locks, we could now call this while - * holding manage_sem, but we still don't, so as to minimize - * the time manage_sem is held. - */ - -static void cpuset_release_agent(const char *pathbuf) -{ - char *argv[3], *envp[3]; - int i; - - if (!pathbuf) - return; - - i = 0; - argv[i++] = "/sbin/cpuset_release_agent"; - argv[i++] = (char *)pathbuf; - argv[i] = NULL; - - i = 0; - /* minimal command environment */ - envp[i++] = "HOME=/"; - envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; - envp[i] = NULL; - - call_usermodehelper(argv[0], argv, envp, 0); - kfree(pathbuf); -} - -/* - * Either cs->count of using tasks transitioned to zero, or the - * cs->children list of child cpusets just became empty. If this - * cs is notify_on_release() and now both the user count is zero and - * the list of children is empty, prepare cpuset path in a kmalloc'd - * buffer, to be returned via ppathbuf, so that the caller can invoke - * cpuset_release_agent() with it later on, once manage_sem is dropped. - * Call here with manage_sem held. - * - * This check_for_release() routine is responsible for kmalloc'ing - * pathbuf. The above cpuset_release_agent() is responsible for - * kfree'ing pathbuf. The caller of these routines is responsible - * for providing a pathbuf pointer, initialized to NULL, then - * calling check_for_release() with manage_sem held and the address - * of the pathbuf pointer, then dropping manage_sem, then calling - * cpuset_release_agent() with pathbuf, as set by check_for_release(). - */ - -static void check_for_release(struct cpuset *cs, char **ppathbuf) -{ - if (notify_on_release(cs) && atomic_read(&cs->count) == 0 && - list_empty(&cs->children)) { - char *buf; - - buf = kmalloc(PAGE_SIZE, GFP_KERNEL); - if (!buf) - return; - if (cpuset_path(cs, buf, PAGE_SIZE) < 0) - kfree(buf); - else - *ppathbuf = buf; - } -} - /* * Return in *pmask the portion of a cpusets's cpus_allowed that * are online. If none are online, walk up the cpuset hierarchy @@ -531,7 +302,7 @@ static void check_for_release(struct cpuset *cs, char **ppathbuf) * One way or another, we guarantee to return some non-empty subset * of cpu_online_map. * - * Call with callback_sem held. + * Call with callback_mutex held. */ static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask) @@ -547,42 +318,65 @@ static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask) /* * Return in *pmask the portion of a cpusets's mems_allowed that - * are online. If none are online, walk up the cpuset hierarchy - * until we find one that does have some online mems. If we get - * all the way to the top and still haven't found any online mems, - * return node_online_map. + * are online, with memory. If none are online with memory, walk + * up the cpuset hierarchy until we find one that does have some + * online mems. If we get all the way to the top and still haven't + * found any online mems, return node_states[N_HIGH_MEMORY]. * * One way or another, we guarantee to return some non-empty subset - * of node_online_map. + * of node_states[N_HIGH_MEMORY]. * - * Call with callback_sem held. + * Call with callback_mutex held. */ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask) { - while (cs && !nodes_intersects(cs->mems_allowed, node_online_map)) + while (cs && !nodes_intersects(cs->mems_allowed, + node_states[N_HIGH_MEMORY])) cs = cs->parent; if (cs) - nodes_and(*pmask, cs->mems_allowed, node_online_map); + nodes_and(*pmask, cs->mems_allowed, + node_states[N_HIGH_MEMORY]); else - *pmask = node_online_map; - BUG_ON(!nodes_intersects(*pmask, node_online_map)); + *pmask = node_states[N_HIGH_MEMORY]; + BUG_ON(!nodes_intersects(*pmask, node_states[N_HIGH_MEMORY])); } -/* - * Refresh current tasks mems_allowed and mems_generation from current - * tasks cpuset. +/** + * cpuset_update_task_memory_state - update task memory placement + * + * If the current tasks cpusets mems_allowed changed behind our + * backs, update current->mems_allowed, mems_generation and task NUMA + * mempolicy to the new value. * - * Call without callback_sem or task_lock() held. May be called with - * or without manage_sem held. Will acquire task_lock() and might - * acquire callback_sem during call. + * Task mempolicy is updated by rebinding it relative to the + * current->cpuset if a task has its memory placement changed. + * Do not call this routine if in_interrupt(). * - * The task_lock() is required to dereference current->cpuset safely. - * Without it, we could pick up the pointer value of current->cpuset - * in one instruction, and then attach_task could give us a different - * cpuset, and then the cpuset we had could be removed and freed, - * and then on our next instruction, we could dereference a no longer - * valid cpuset pointer to get its mems_generation field. + * Call without callback_mutex or task_lock() held. May be + * called with or without manage_mutex held. Thanks in part to + * 'the_top_cpuset_hack', the tasks cpuset pointer will never + * be NULL. This routine also might acquire callback_mutex and + * current->mm->mmap_sem during call. + * + * Reading current->cpuset->mems_generation doesn't need task_lock + * to guard the current->cpuset derefence, because it is guarded + * from concurrent freeing of current->cpuset by attach_task(), + * using RCU. + * + * The rcu_dereference() is technically probably not needed, + * as I don't actually mind if I see a new cpuset pointer but + * an old value of mems_generation. However this really only + * matters on alpha systems using cpusets heavily. If I dropped + * that rcu_dereference(), it would save them a memory barrier. + * For all other arch's, rcu_dereference is a no-op anyway, and for + * alpha systems not using cpusets, another planned optimization, + * avoiding the rcu critical section for tasks in the root cpuset + * which is statically allocated, so can't vanish, will make this + * irrelevant. Better to use RCU as intended, than to engage in + * some cute trick to save a memory barrier that is impossible to + * test, for alpha systems using cpusets heavily, which might not + * even exist. * * This routine is needed to update the per-task mems_allowed data, * within the tasks context, when it is trying to allocate memory @@ -590,24 +384,38 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask) * task has been modifying its cpuset. */ -static void refresh_mems(void) +void cpuset_update_task_memory_state(void) { int my_cpusets_mem_gen; + struct task_struct *tsk = current; + struct cpuset *cs; - task_lock(current); - my_cpusets_mem_gen = current->cpuset->mems_generation; - task_unlock(current); - - if (current->cpuset_mems_generation != my_cpusets_mem_gen) { - struct cpuset *cs; + if (task_cs(tsk) == &top_cpuset) { + /* Don't need rcu for top_cpuset. It's never freed. */ + my_cpusets_mem_gen = top_cpuset.mems_generation; + } else { + rcu_read_lock(); + my_cpusets_mem_gen = task_cs(current)->mems_generation; + rcu_read_unlock(); + } - down(&callback_sem); - task_lock(current); - cs = current->cpuset; - guarantee_online_mems(cs, ¤t->mems_allowed); - current->cpuset_mems_generation = cs->mems_generation; - task_unlock(current); - up(&callback_sem); + if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) { + mutex_lock(&callback_mutex); + task_lock(tsk); + cs = task_cs(tsk); /* Maybe changed when task not locked */ + guarantee_online_mems(cs, &tsk->mems_allowed); + tsk->cpuset_mems_generation = cs->mems_generation; + if (is_spread_page(cs)) + tsk->flags |= PF_SPREAD_PAGE; + else + tsk->flags &= ~PF_SPREAD_PAGE; + if (is_spread_slab(cs)) + tsk->flags |= PF_SPREAD_SLAB; + else + tsk->flags &= ~PF_SPREAD_SLAB; + task_unlock(tsk); + mutex_unlock(&callback_mutex); + mpol_rebind_task(tsk, &tsk->mems_allowed); } } @@ -616,7 +424,7 @@ static void refresh_mems(void) * * One cpuset is a subset of another if all its allowed CPUs and * Memory Nodes are a subset of the other, and its exclusive flags - * are only set if the other's are set. Call holding manage_sem. + * are only set if the other's are set. Call holding manage_mutex. */ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) @@ -634,7 +442,7 @@ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) * If we replaced the flag and mask values of the current cpuset * (cur) with those values in the trial cpuset (trial), would * our various subset and exclusive rules still be valid? Presumes - * manage_sem held. + * manage_mutex held. * * 'cur' is the address of an actual, in-use cpuset. Operations * such as list traversal that depend on the actual address of the @@ -649,24 +457,28 @@ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) static int validate_change(const struct cpuset *cur, const struct cpuset *trial) { + struct cgroup *cont; struct cpuset *c, *par; /* Each of our child cpusets must be a subset of us */ - list_for_each_entry(c, &cur->children, sibling) { - if (!is_cpuset_subset(c, trial)) + list_for_each_entry(cont, &cur->css.cgroup->children, sibling) { + if (!is_cpuset_subset(cgroup_cs(cont), trial)) return -EBUSY; } /* Remaining checks don't apply to root cpuset */ - if ((par = cur->parent) == NULL) + if (cur == &top_cpuset) return 0; + par = cur->parent; + /* We must be a subset of our parent cpuset */ if (!is_cpuset_subset(trial, par)) return -EACCES; /* If either I or some sibling (!= me) is exclusive, we can't overlap */ - list_for_each_entry(c, &par->children, sibling) { + list_for_each_entry(cont, &par->css.cgroup->children, sibling) { + c = cgroup_cs(cont); if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && c != cur && cpus_intersects(trial->cpus_allowed, c->cpus_allowed)) @@ -677,250 +489,783 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial) return -EINVAL; } + /* Cpusets with tasks can't have empty cpus_allowed or mems_allowed */ + if (cgroup_task_count(cur->css.cgroup)) { + if (cpus_empty(trial->cpus_allowed) || + nodes_empty(trial->mems_allowed)) { + return -ENOSPC; + } + } + return 0; } /* - * For a given cpuset cur, partition the system as follows - * a. All cpus in the parent cpuset's cpus_allowed that are not part of any - * exclusive child cpusets - * b. All cpus in the current cpuset's cpus_allowed that are not part of any - * exclusive child cpusets - * Build these two partitions by calling partition_sched_domains - * - * Call with manage_sem held. May nest a call to the - * lock_cpu_hotplug()/unlock_cpu_hotplug() pair. + * Helper routine for rebuild_sched_domains(). + * Do cpusets a, b have overlapping cpus_allowed masks? */ -static void update_cpu_domains(struct cpuset *cur) +static int cpusets_overlap(struct cpuset *a, struct cpuset *b) { - struct cpuset *c, *par = cur->parent; - cpumask_t pspan, cspan; + return cpus_intersects(a->cpus_allowed, b->cpus_allowed); +} - if (par == NULL || cpus_empty(cur->cpus_allowed)) - return; +/* + * rebuild_sched_domains() + * + * If the flag 'sched_load_balance' of any cpuset with non-empty + * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset + * which has that flag enabled, or if any cpuset with a non-empty + * 'cpus' is removed, then call this routine to rebuild the + * scheduler's dynamic sched domains. + * + * This routine builds a partial partition of the systems CPUs + * (the set of non-overlappping cpumask_t's in the array 'part' + * below), and passes that partial partition to the kernel/sched.c + * partition_sched_domains() routine, which will rebuild the + * schedulers load balancing domains (sched domains) as specified + * by that partial partition. A 'partial partition' is a set of + * non-overlapping subsets whose union is a subset of that set. + * + * See "What is sched_load_balance" in Documentation/cpusets.txt + * for a background explanation of this. + * + * Does not return errors, on the theory that the callers of this + * routine would rather not worry about failures to rebuild sched + * domains when operating in the severe memory shortage situations + * that could cause allocation failures below. + * + * Call with cgroup_mutex held. May take callback_mutex during + * call due to the kfifo_alloc() and kmalloc() calls. May nest + * a call to the get_online_cpus()/put_online_cpus() pair. + * Must not be called holding callback_mutex, because we must not + * call get_online_cpus() while holding callback_mutex. Elsewhere + * the kernel nests callback_mutex inside get_online_cpus() calls. + * So the reverse nesting would risk an ABBA deadlock. + * + * The three key local variables below are: + * q - a kfifo queue of cpuset pointers, used to implement a + * top-down scan of all cpusets. This scan loads a pointer + * to each cpuset marked is_sched_load_balance into the + * array 'csa'. For our purposes, rebuilding the schedulers + * sched domains, we can ignore !is_sched_load_balance cpusets. + * csa - (for CpuSet Array) Array of pointers to all the cpusets + * that need to be load balanced, for convenient iterative + * access by the subsequent code that finds the best partition, + * i.e the set of domains (subsets) of CPUs such that the + * cpus_allowed of every cpuset marked is_sched_load_balance + * is a subset of one of these domains, while there are as + * many such domains as possible, each as small as possible. + * doms - Conversion of 'csa' to an array of cpumasks, for passing to + * the kernel/sched.c routine partition_sched_domains() in a + * convenient format, that can be easily compared to the prior + * value to determine what partition elements (sched domains) + * were changed (added or removed.) + * + * Finding the best partition (set of domains): + * The triple nested loops below over i, j, k scan over the + * load balanced cpusets (using the array of cpuset pointers in + * csa[]) looking for pairs of cpusets that have overlapping + * cpus_allowed, but which don't have the same 'pn' partition + * number and gives them in the same partition number. It keeps + * looping on the 'restart' label until it can no longer find + * any such pairs. + * + * The union of the cpus_allowed masks from the set of + * all cpusets having the same 'pn' value then form the one + * element of the partition (one sched domain) to be passed to + * partition_sched_domains(). + */ - /* - * Get all cpus from parent's cpus_allowed not part of exclusive - * children - */ - pspan = par->cpus_allowed; - list_for_each_entry(c, &par->children, sibling) { - if (is_cpu_exclusive(c)) - cpus_andnot(pspan, pspan, c->cpus_allowed); +static void rebuild_sched_domains(void) +{ + struct kfifo *q; /* queue of cpusets to be scanned */ + struct cpuset *cp; /* scans q */ + struct cpuset **csa; /* array of all cpuset ptrs */ + int csn; /* how many cpuset ptrs in csa so far */ + int i, j, k; /* indices for partition finding loops */ + cpumask_t *doms; /* resulting partition; i.e. sched domains */ + int ndoms; /* number of sched domains in result */ + int nslot; /* next empty doms[] cpumask_t slot */ + + q = NULL; + csa = NULL; + doms = NULL; + + /* Special case for the 99% of systems with one, full, sched domain */ + if (is_sched_load_balance(&top_cpuset)) { + ndoms = 1; + doms = kmalloc(sizeof(cpumask_t), GFP_KERNEL); + if (!doms) + goto rebuild; + *doms = top_cpuset.cpus_allowed; + goto rebuild; } - if (is_removed(cur) || !is_cpu_exclusive(cur)) { - cpus_or(pspan, pspan, cur->cpus_allowed); - if (cpus_equal(pspan, cur->cpus_allowed)) - return; - cspan = CPU_MASK_NONE; + + q = kfifo_alloc(number_of_cpusets * sizeof(cp), GFP_KERNEL, NULL); + if (IS_ERR(q)) + goto done; + csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL); + if (!csa) + goto done; + csn = 0; + + cp = &top_cpuset; + __kfifo_put(q, (void *)&cp, sizeof(cp)); + while (__kfifo_get(q, (void *)&cp, sizeof(cp))) { + struct cgroup *cont; + struct cpuset *child; /* scans child cpusets of cp */ + if (is_sched_load_balance(cp)) + csa[csn++] = cp; + list_for_each_entry(cont, &cp->css.cgroup->children, sibling) { + child = cgroup_cs(cont); + __kfifo_put(q, (void *)&child, sizeof(cp)); + } + } + + for (i = 0; i < csn; i++) + csa[i]->pn = i; + ndoms = csn; + +restart: + /* Find the best partition (set of sched domains) */ + for (i = 0; i < csn; i++) { + struct cpuset *a = csa[i]; + int apn = a->pn; + + for (j = 0; j < csn; j++) { + struct cpuset *b = csa[j]; + int bpn = b->pn; + + if (apn != bpn && cpusets_overlap(a, b)) { + for (k = 0; k < csn; k++) { + struct cpuset *c = csa[k]; + + if (c->pn == bpn) + c->pn = apn; + } + ndoms--; /* one less element */ + goto restart; + } + } + } + + /* Convert to */ + doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL); + if (!doms) + goto rebuild; + + for (nslot = 0, i = 0; i < csn; i++) { + struct cpuset *a = csa[i]; + int apn = a->pn; + + if (apn >= 0) { + cpumask_t *dp = doms + nslot; + + if (nslot == ndoms) { + static int warnings = 10; + if (warnings) { + printk(KERN_WARNING + "rebuild_sched_domains confused:" + " nslot %d, ndoms %d, csn %d, i %d," + " apn %d\n", + nslot, ndoms, csn, i, apn); + warnings--; + } + continue; + } + + cpus_clear(*dp); + for (j = i; j < csn; j++) { + struct cpuset *b = csa[j]; + + if (apn == b->pn) { + cpus_or(*dp, *dp, b->cpus_allowed); + b->pn = -1; + } + } + nslot++; + } + } + BUG_ON(nslot != ndoms); + +rebuild: + /* Have scheduler rebuild sched domains */ + get_online_cpus(); + partition_sched_domains(ndoms, doms); + put_online_cpus(); + +done: + if (q && !IS_ERR(q)) + kfifo_free(q); + kfree(csa); + /* Don't kfree(doms) -- partition_sched_domains() does that. */ +} + +static inline int started_after_time(struct task_struct *t1, + struct timespec *time, + struct task_struct *t2) +{ + int start_diff = timespec_compare(&t1->start_time, time); + if (start_diff > 0) { + return 1; + } else if (start_diff < 0) { + return 0; } else { - if (cpus_empty(pspan)) - return; - cspan = cur->cpus_allowed; /* - * Get all cpus from current cpuset's cpus_allowed not part - * of exclusive children + * Arbitrarily, if two processes started at the same + * time, we'll say that the lower pointer value + * started first. Note that t2 may have exited by now + * so this may not be a valid pointer any longer, but + * that's fine - it still serves to distinguish + * between two tasks started (effectively) + * simultaneously. */ - list_for_each_entry(c, &cur->children, sibling) { - if (is_cpu_exclusive(c)) - cpus_andnot(cspan, cspan, c->cpus_allowed); - } + return t1 > t2; } +} - lock_cpu_hotplug(); - partition_sched_domains(&pspan, &cspan); - unlock_cpu_hotplug(); +static inline int started_after(void *p1, void *p2) +{ + struct task_struct *t1 = p1; + struct task_struct *t2 = p2; + return started_after_time(t1, &t2->start_time, t2); } /* - * Call with manage_sem held. May take callback_sem during call. + * Call with manage_mutex held. May take callback_mutex during call. */ static int update_cpumask(struct cpuset *cs, char *buf) { struct cpuset trialcs; - int retval, cpus_unchanged; + int retval, i; + int is_load_balanced; + struct cgroup_iter it; + struct cgroup *cgrp = cs->css.cgroup; + struct task_struct *p, *dropped; + /* Never dereference latest_task, since it's not refcounted */ + struct task_struct *latest_task = NULL; + struct ptr_heap heap; + struct timespec latest_time = { 0, 0 }; + + /* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */ + if (cs == &top_cpuset) + return -EACCES; trialcs = *cs; - retval = cpulist_parse(buf, trialcs.cpus_allowed); - if (retval < 0) - return retval; + + /* + * An empty cpus_allowed is ok iff there are no tasks in the cpuset. + * Since cpulist_parse() fails on an empty mask, we special case + * that parsing. The validate_change() call ensures that cpusets + * with tasks have cpus. + */ + buf = strstrip(buf); + if (!*buf) { + cpus_clear(trialcs.cpus_allowed); + } else { + retval = cpulist_parse(buf, trialcs.cpus_allowed); + if (retval < 0) + return retval; + } cpus_and(trialcs.cpus_allowed, trialcs.cpus_allowed, cpu_online_map); - if (cpus_empty(trialcs.cpus_allowed)) - return -ENOSPC; retval = validate_change(cs, &trialcs); if (retval < 0) return retval; - cpus_unchanged = cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed); - down(&callback_sem); - cs->cpus_allowed = trialcs.cpus_allowed; - up(&callback_sem); - if (is_cpu_exclusive(cs) && !cpus_unchanged) - update_cpu_domains(cs); - return 0; -} -/* - * Call with manage_sem held. May take callback_sem during call. - */ + /* Nothing to do if the cpus didn't change */ + if (cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed)) + return 0; + retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, &started_after); + if (retval) + return retval; -static int update_nodemask(struct cpuset *cs, char *buf) -{ - struct cpuset trialcs; - int retval; + is_load_balanced = is_sched_load_balance(&trialcs); - trialcs = *cs; - retval = nodelist_parse(buf, trialcs.mems_allowed); - if (retval < 0) - return retval; - nodes_and(trialcs.mems_allowed, trialcs.mems_allowed, node_online_map); - if (nodes_empty(trialcs.mems_allowed)) - return -ENOSPC; - retval = validate_change(cs, &trialcs); - if (retval == 0) { - down(&callback_sem); - cs->mems_allowed = trialcs.mems_allowed; - atomic_inc(&cpuset_mems_generation); - cs->mems_generation = atomic_read(&cpuset_mems_generation); - up(&callback_sem); + mutex_lock(&callback_mutex); + cs->cpus_allowed = trialcs.cpus_allowed; + mutex_unlock(&callback_mutex); + + again: + /* + * Scan tasks in the cpuset, and update the cpumasks of any + * that need an update. Since we can't call set_cpus_allowed() + * while holding tasklist_lock, gather tasks to be processed + * in a heap structure. If the statically-sized heap fills up, + * overflow tasks that started later, and in future iterations + * only consider tasks that started after the latest task in + * the previous pass. This guarantees forward progress and + * that we don't miss any tasks + */ + heap.size = 0; + cgroup_iter_start(cgrp, &it); + while ((p = cgroup_iter_next(cgrp, &it))) { + /* Only affect tasks that don't have the right cpus_allowed */ + if (cpus_equal(p->cpus_allowed, cs->cpus_allowed)) + continue; + /* + * Only process tasks that started after the last task + * we processed + */ + if (!started_after_time(p, &latest_time, latest_task)) + continue; + dropped = heap_insert(&heap, p); + if (dropped == NULL) { + get_task_struct(p); + } else if (dropped != p) { + get_task_struct(p); + put_task_struct(dropped); + } } - return retval; + cgroup_iter_end(cgrp, &it); + if (heap.size) { + for (i = 0; i < heap.size; i++) { + struct task_struct *p = heap.ptrs[i]; + if (i == 0) { + latest_time = p->start_time; + latest_task = p; + } + set_cpus_allowed(p, cs->cpus_allowed); + put_task_struct(p); + } + /* + * If we had to process any tasks at all, scan again + * in case some of them were in the middle of forking + * children that didn't notice the new cpumask + * restriction. Not the most efficient way to do it, + * but it avoids having to take callback_mutex in the + * fork path + */ + goto again; + } + heap_free(&heap); + if (is_load_balanced) + rebuild_sched_domains(); + + return 0; } /* - * update_flag - read a 0 or a 1 in a file and update associated flag - * bit: the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE, - * CS_NOTIFY_ON_RELEASE) - * cs: the cpuset to update - * buf: the buffer where we read the 0 or 1 - * - * Call with manage_sem held. + * cpuset_migrate_mm + * + * Migrate memory region from one set of nodes to another. + * + * Temporarilly set tasks mems_allowed to target nodes of migration, + * so that the migration code can allocate pages on these nodes. + * + * Call holding manage_mutex, so our current->cpuset won't change + * during this call, as manage_mutex holds off any attach_task() + * calls. Therefore we don't need to take task_lock around the + * call to guarantee_online_mems(), as we know no one is changing + * our tasks cpuset. + * + * Hold callback_mutex around the two modifications of our tasks + * mems_allowed to synchronize with cpuset_mems_allowed(). + * + * While the mm_struct we are migrating is typically from some + * other task, the task_struct mems_allowed that we are hacking + * is for our current task, which must allocate new pages for that + * migrating memory region. + * + * We call cpuset_update_task_memory_state() before hacking + * our tasks mems_allowed, so that we are assured of being in + * sync with our tasks cpuset, and in particular, callbacks to + * cpuset_update_task_memory_state() from nested page allocations + * won't see any mismatch of our cpuset and task mems_generation + * values, so won't overwrite our hacked tasks mems_allowed + * nodemask. */ -static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf) +static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, + const nodemask_t *to) { - int turning_on; - struct cpuset trialcs; - int err, cpu_exclusive_changed; + struct task_struct *tsk = current; - turning_on = (simple_strtoul(buf, NULL, 10) != 0); + cpuset_update_task_memory_state(); - trialcs = *cs; - if (turning_on) - set_bit(bit, &trialcs.flags); - else - clear_bit(bit, &trialcs.flags); + mutex_lock(&callback_mutex); + tsk->mems_allowed = *to; + mutex_unlock(&callback_mutex); - err = validate_change(cs, &trialcs); - if (err < 0) - return err; - cpu_exclusive_changed = - (is_cpu_exclusive(cs) != is_cpu_exclusive(&trialcs)); - down(&callback_sem); - if (turning_on) - set_bit(bit, &cs->flags); - else - clear_bit(bit, &cs->flags); - up(&callback_sem); + do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); - if (cpu_exclusive_changed) - update_cpu_domains(cs); - return 0; + mutex_lock(&callback_mutex); + guarantee_online_mems(task_cs(tsk),&tsk->mems_allowed); + mutex_unlock(&callback_mutex); } /* - * Attack task specified by pid in 'pidbuf' to cpuset 'cs', possibly - * writing the path of the old cpuset in 'ppathbuf' if it needs to be - * notified on release. - * - * Call holding manage_sem. May take callback_sem and task_lock of - * the task 'pid' during call. + * Handle user request to change the 'mems' memory placement + * of a cpuset. Needs to validate the request, update the + * cpusets mems_allowed and mems_generation, and for each + * task in the cpuset, rebind any vma mempolicies and if + * the cpuset is marked 'memory_migrate', migrate the tasks + * pages to the new memory. + * + * Call with manage_mutex held. May take callback_mutex during call. + * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, + * lock each such tasks mm->mmap_sem, scan its vma's and rebind + * their mempolicies to the cpusets new mems_allowed. */ -static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf) -{ - pid_t pid; - struct task_struct *tsk; - struct cpuset *oldcs; - cpumask_t cpus; - - if (sscanf(pidbuf, "%d", &pid) != 1) - return -EIO; - if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) - return -ENOSPC; +static void *cpuset_being_rebound; - if (pid) { - read_lock(&tasklist_lock); +static int update_nodemask(struct cpuset *cs, char *buf) +{ + struct cpuset trialcs; + nodemask_t oldmem; + struct task_struct *p; + struct mm_struct **mmarray; + int i, n, ntasks; + int migrate; + int fudge; + int retval; + struct cgroup_iter it; - tsk = find_task_by_pid(pid); - if (!tsk || tsk->flags & PF_EXITING) { - read_unlock(&tasklist_lock); - return -ESRCH; - } + /* + * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY]; + * it's read-only + */ + if (cs == &top_cpuset) + return -EACCES; - get_task_struct(tsk); - read_unlock(&tasklist_lock); + trialcs = *cs; - if ((current->euid) && (current->euid != tsk->uid) - && (current->euid != tsk->suid)) { - put_task_struct(tsk); - return -EACCES; - } + /* + * An empty mems_allowed is ok iff there are no tasks in the cpuset. + * Since nodelist_parse() fails on an empty mask, we special case + * that parsing. The validate_change() call ensures that cpusets + * with tasks have memory. + */ + buf = strstrip(buf); + if (!*buf) { + nodes_clear(trialcs.mems_allowed); } else { - tsk = current; - get_task_struct(tsk); + retval = nodelist_parse(buf, trialcs.mems_allowed); + if (retval < 0) + goto done; + } + nodes_and(trialcs.mems_allowed, trialcs.mems_allowed, + node_states[N_HIGH_MEMORY]); + oldmem = cs->mems_allowed; + if (nodes_equal(oldmem, trialcs.mems_allowed)) { + retval = 0; /* Too easy - nothing to do */ + goto done; } + retval = validate_change(cs, &trialcs); + if (retval < 0) + goto done; - down(&callback_sem); + mutex_lock(&callback_mutex); + cs->mems_allowed = trialcs.mems_allowed; + cs->mems_generation = cpuset_mems_generation++; + mutex_unlock(&callback_mutex); - task_lock(tsk); - oldcs = tsk->cpuset; - if (!oldcs) { - task_unlock(tsk); - up(&callback_sem); - put_task_struct(tsk); - return -ESRCH; + cpuset_being_rebound = cs; /* causes mpol_copy() rebind */ + + fudge = 10; /* spare mmarray[] slots */ + fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */ + retval = -ENOMEM; + + /* + * Allocate mmarray[] to hold mm reference for each task + * in cpuset cs. Can't kmalloc GFP_KERNEL while holding + * tasklist_lock. We could use GFP_ATOMIC, but with a + * few more lines of code, we can retry until we get a big + * enough mmarray[] w/o using GFP_ATOMIC. + */ + while (1) { + ntasks = cgroup_task_count(cs->css.cgroup); /* guess */ + ntasks += fudge; + mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL); + if (!mmarray) + goto done; + read_lock(&tasklist_lock); /* block fork */ + if (cgroup_task_count(cs->css.cgroup) <= ntasks) + break; /* got enough */ + read_unlock(&tasklist_lock); /* try again */ + kfree(mmarray); } - atomic_inc(&cs->count); - tsk->cpuset = cs; - task_unlock(tsk); + n = 0; + + /* Load up mmarray[] with mm reference for each task in cpuset. */ + cgroup_iter_start(cs->css.cgroup, &it); + while ((p = cgroup_iter_next(cs->css.cgroup, &it))) { + struct mm_struct *mm; + + if (n >= ntasks) { + printk(KERN_WARNING + "Cpuset mempolicy rebind incomplete.\n"); + break; + } + mm = get_task_mm(p); + if (!mm) + continue; + mmarray[n++] = mm; + } + cgroup_iter_end(cs->css.cgroup, &it); + read_unlock(&tasklist_lock); + + /* + * Now that we've dropped the tasklist spinlock, we can + * rebind the vma mempolicies of each mm in mmarray[] to their + * new cpuset, and release that mm. The mpol_rebind_mm() + * call takes mmap_sem, which we couldn't take while holding + * tasklist_lock. Forks can happen again now - the mpol_copy() + * cpuset_being_rebound check will catch such forks, and rebind + * their vma mempolicies too. Because we still hold the global + * cpuset manage_mutex, we know that no other rebind effort will + * be contending for the global variable cpuset_being_rebound. + * It's ok if we rebind the same mm twice; mpol_rebind_mm() + * is idempotent. Also migrate pages in each mm to new nodes. + */ + migrate = is_memory_migrate(cs); + for (i = 0; i < n; i++) { + struct mm_struct *mm = mmarray[i]; + + mpol_rebind_mm(mm, &cs->mems_allowed); + if (migrate) + cpuset_migrate_mm(mm, &oldmem, &cs->mems_allowed); + mmput(mm); + } + + /* We're done rebinding vma's to this cpusets new mems_allowed. */ + kfree(mmarray); + cpuset_being_rebound = NULL; + retval = 0; +done: + return retval; +} + +int current_cpuset_is_being_rebound(void) +{ + return task_cs(current) == cpuset_being_rebound; +} + +/* + * Call with manage_mutex held. + */ + +static int update_memory_pressure_enabled(struct cpuset *cs, char *buf) +{ + if (simple_strtoul(buf, NULL, 10) != 0) + cpuset_memory_pressure_enabled = 1; + else + cpuset_memory_pressure_enabled = 0; + return 0; +} + +/* + * update_flag - read a 0 or a 1 in a file and update associated flag + * bit: the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE, + * CS_SCHED_LOAD_BALANCE, + * CS_NOTIFY_ON_RELEASE, CS_MEMORY_MIGRATE, + * CS_SPREAD_PAGE, CS_SPREAD_SLAB) + * cs: the cpuset to update + * buf: the buffer where we read the 0 or 1 + * + * Call with manage_mutex held. + */ + +static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf) +{ + int turning_on; + struct cpuset trialcs; + int err; + int cpus_nonempty, balance_flag_changed; + + turning_on = (simple_strtoul(buf, NULL, 10) != 0); + + trialcs = *cs; + if (turning_on) + set_bit(bit, &trialcs.flags); + else + clear_bit(bit, &trialcs.flags); + + err = validate_change(cs, &trialcs); + if (err < 0) + return err; + + cpus_nonempty = !cpus_empty(trialcs.cpus_allowed); + balance_flag_changed = (is_sched_load_balance(cs) != + is_sched_load_balance(&trialcs)); + + mutex_lock(&callback_mutex); + cs->flags = trialcs.flags; + mutex_unlock(&callback_mutex); + + if (cpus_nonempty && balance_flag_changed) + rebuild_sched_domains(); + + return 0; +} + +/* + * Frequency meter - How fast is some event occurring? + * + * These routines manage a digitally filtered, constant time based, + * event frequency meter. There are four routines: + * fmeter_init() - initialize a frequency meter. + * fmeter_markevent() - called each time the event happens. + * fmeter_getrate() - returns the recent rate of such events. + * fmeter_update() - internal routine used to update fmeter. + * + * A common data structure is passed to each of these routines, + * which is used to keep track of the state required to manage the + * frequency meter and its digital filter. + * + * The filter works on the number of events marked per unit time. + * The filter is single-pole low-pass recursive (IIR). The time unit + * is 1 second. Arithmetic is done using 32-bit integers scaled to + * simulate 3 decimal digits of precision (multiplied by 1000). + * + * With an FM_COEF of 933, and a time base of 1 second, the filter + * has a half-life of 10 seconds, meaning that if the events quit + * happening, then the rate returned from the fmeter_getrate() + * will be cut in half each 10 seconds, until it converges to zero. + * + * It is not worth doing a real infinitely recursive filter. If more + * than FM_MAXTICKS ticks have elapsed since the last filter event, + * just compute FM_MAXTICKS ticks worth, by which point the level + * will be stable. + * + * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid + * arithmetic overflow in the fmeter_update() routine. + * + * Given the simple 32 bit integer arithmetic used, this meter works + * best for reporting rates between one per millisecond (msec) and + * one per 32 (approx) seconds. At constant rates faster than one + * per msec it maxes out at values just under 1,000,000. At constant + * rates between one per msec, and one per second it will stabilize + * to a value N*1000, where N is the rate of events per second. + * At constant rates between one per second and one per 32 seconds, + * it will be choppy, moving up on the seconds that have an event, + * and then decaying until the next event. At rates slower than + * about one in 32 seconds, it decays all the way back to zero between + * each event. + */ + +#define FM_COEF 933 /* coefficient for half-life of 10 secs */ +#define FM_MAXTICKS ((time_t)99) /* useless computing more ticks than this */ +#define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */ +#define FM_SCALE 1000 /* faux fixed point scale */ + +/* Initialize a frequency meter */ +static void fmeter_init(struct fmeter *fmp) +{ + fmp->cnt = 0; + fmp->val = 0; + fmp->time = 0; + spin_lock_init(&fmp->lock); +} + +/* Internal meter update - process cnt events and update value */ +static void fmeter_update(struct fmeter *fmp) +{ + time_t now = get_seconds(); + time_t ticks = now - fmp->time; + + if (ticks == 0) + return; + + ticks = min(FM_MAXTICKS, ticks); + while (ticks-- > 0) + fmp->val = (FM_COEF * fmp->val) / FM_SCALE; + fmp->time = now; + + fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE; + fmp->cnt = 0; +} + +/* Process any previous ticks, then bump cnt by one (times scale). */ +static void fmeter_markevent(struct fmeter *fmp) +{ + spin_lock(&fmp->lock); + fmeter_update(fmp); + fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE); + spin_unlock(&fmp->lock); +} + +/* Process any previous ticks, then return current value. */ +static int fmeter_getrate(struct fmeter *fmp) +{ + int val; + + spin_lock(&fmp->lock); + fmeter_update(fmp); + val = fmp->val; + spin_unlock(&fmp->lock); + return val; +} + +static int cpuset_can_attach(struct cgroup_subsys *ss, + struct cgroup *cont, struct task_struct *tsk) +{ + struct cpuset *cs = cgroup_cs(cont); + + if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) + return -ENOSPC; + + return security_task_setscheduler(tsk, 0, NULL); +} + +static void cpuset_attach(struct cgroup_subsys *ss, + struct cgroup *cont, struct cgroup *oldcont, + struct task_struct *tsk) +{ + cpumask_t cpus; + nodemask_t from, to; + struct mm_struct *mm; + struct cpuset *cs = cgroup_cs(cont); + struct cpuset *oldcs = cgroup_cs(oldcont); + + mutex_lock(&callback_mutex); guarantee_online_cpus(cs, &cpus); set_cpus_allowed(tsk, cpus); + mutex_unlock(&callback_mutex); + + from = oldcs->mems_allowed; + to = cs->mems_allowed; + mm = get_task_mm(tsk); + if (mm) { + mpol_rebind_mm(mm, &to); + if (is_memory_migrate(cs)) + cpuset_migrate_mm(mm, &from, &to); + mmput(mm); + } - up(&callback_sem); - put_task_struct(tsk); - if (atomic_dec_and_test(&oldcs->count)) - check_for_release(oldcs, ppathbuf); - return 0; } /* The various types of files and directories in a cpuset file system */ typedef enum { - FILE_ROOT, - FILE_DIR, + FILE_MEMORY_MIGRATE, FILE_CPULIST, FILE_MEMLIST, FILE_CPU_EXCLUSIVE, FILE_MEM_EXCLUSIVE, - FILE_NOTIFY_ON_RELEASE, - FILE_TASKLIST, + FILE_SCHED_LOAD_BALANCE, + FILE_MEMORY_PRESSURE_ENABLED, + FILE_MEMORY_PRESSURE, + FILE_SPREAD_PAGE, + FILE_SPREAD_SLAB, } cpuset_filetype_t; -static ssize_t cpuset_common_file_write(struct file *file, const char __user *userbuf, +static ssize_t cpuset_common_file_write(struct cgroup *cont, + struct cftype *cft, + struct file *file, + const char __user *userbuf, size_t nbytes, loff_t *unused_ppos) { - struct cpuset *cs = __d_cs(file->f_dentry->d_parent); - struct cftype *cft = __d_cft(file->f_dentry); + struct cpuset *cs = cgroup_cs(cont); cpuset_filetype_t type = cft->private; char *buffer; - char *pathbuf = NULL; int retval = 0; /* Crude upper limit on largest legitimate cpulist user might write. */ - if (nbytes > 100 + 6 * NR_CPUS) + if (nbytes > 100U + 6 * max(NR_CPUS, MAX_NUMNODES)) return -E2BIG; /* +1 for nul-terminator */ @@ -933,9 +1278,9 @@ static ssize_t cpuset_common_file_write(struct file *file, const char __user *us } buffer[nbytes] = 0; /* nul-terminate */ - down(&manage_sem); + cgroup_lock(); - if (is_removed(cs)) { + if (cgroup_is_removed(cont)) { retval = -ENODEV; goto out2; } @@ -953,11 +1298,25 @@ static ssize_t cpuset_common_file_write(struct file *file, const char __user *us case FILE_MEM_EXCLUSIVE: retval = update_flag(CS_MEM_EXCLUSIVE, cs, buffer); break; - case FILE_NOTIFY_ON_RELEASE: - retval = update_flag(CS_NOTIFY_ON_RELEASE, cs, buffer); + case FILE_SCHED_LOAD_BALANCE: + retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, buffer); + break; + case FILE_MEMORY_MIGRATE: + retval = update_flag(CS_MEMORY_MIGRATE, cs, buffer); + break; + case FILE_MEMORY_PRESSURE_ENABLED: + retval = update_memory_pressure_enabled(cs, buffer); break; - case FILE_TASKLIST: - retval = attach_task(cs, buffer, &pathbuf); + case FILE_MEMORY_PRESSURE: + retval = -EACCES; + break; + case FILE_SPREAD_PAGE: + retval = update_flag(CS_SPREAD_PAGE, cs, buffer); + cs->mems_generation = cpuset_mems_generation++; + break; + case FILE_SPREAD_SLAB: + retval = update_flag(CS_SPREAD_SLAB, cs, buffer); + cs->mems_generation = cpuset_mems_generation++; break; default: retval = -EINVAL; @@ -967,30 +1326,12 @@ static ssize_t cpuset_common_file_write(struct file *file, const char __user *us if (retval == 0) retval = nbytes; out2: - up(&manage_sem); - cpuset_release_agent(pathbuf); + cgroup_unlock(); out1: kfree(buffer); return retval; } -static ssize_t cpuset_file_write(struct file *file, const char __user *buf, - size_t nbytes, loff_t *ppos) -{ - ssize_t retval = 0; - struct cftype *cft = __d_cft(file->f_dentry); - if (!cft) - return -ENODEV; - - /* special function ? */ - if (cft->write) - retval = cft->write(file, buf, nbytes, ppos); - else - retval = cpuset_common_file_write(file, buf, nbytes, ppos); - - return retval; -} - /* * These ascii lists should be read in a single call, by using a user * buffer large enough to hold the entire map. If read in smaller @@ -1007,9 +1348,9 @@ static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs) { cpumask_t mask; - down(&callback_sem); + mutex_lock(&callback_mutex); mask = cs->cpus_allowed; - up(&callback_sem); + mutex_unlock(&callback_mutex); return cpulist_scnprintf(page, PAGE_SIZE, mask); } @@ -1018,24 +1359,26 @@ static int cpuset_sprintf_memlist(char *page, struct cpuset *cs) { nodemask_t mask; - down(&callback_sem); + mutex_lock(&callback_mutex); mask = cs->mems_allowed; - up(&callback_sem); + mutex_unlock(&callback_mutex); return nodelist_scnprintf(page, PAGE_SIZE, mask); } -static ssize_t cpuset_common_file_read(struct file *file, char __user *buf, - size_t nbytes, loff_t *ppos) +static ssize_t cpuset_common_file_read(struct cgroup *cont, + struct cftype *cft, + struct file *file, + char __user *buf, + size_t nbytes, loff_t *ppos) { - struct cftype *cft = __d_cft(file->f_dentry); - struct cpuset *cs = __d_cs(file->f_dentry->d_parent); + struct cpuset *cs = cgroup_cs(cont); cpuset_filetype_t type = cft->private; char *page; ssize_t retval = 0; char *s; - if (!(page = (char *)__get_free_page(GFP_KERNEL))) + if (!(page = (char *)__get_free_page(GFP_TEMPORARY))) return -ENOMEM; s = page; @@ -1053,8 +1396,23 @@ static ssize_t cpuset_common_file_read(struct file *file, char __user *buf, case FILE_MEM_EXCLUSIVE: *s++ = is_mem_exclusive(cs) ? '1' : '0'; break; - case FILE_NOTIFY_ON_RELEASE: - *s++ = notify_on_release(cs) ? '1' : '0'; + case FILE_SCHED_LOAD_BALANCE: + *s++ = is_sched_load_balance(cs) ? '1' : '0'; + break; + case FILE_MEMORY_MIGRATE: + *s++ = is_memory_migrate(cs) ? '1' : '0'; + break; + case FILE_MEMORY_PRESSURE_ENABLED: + *s++ = cpuset_memory_pressure_enabled ? '1' : '0'; + break; + case FILE_MEMORY_PRESSURE: + s += sprintf(s, "%d", fmeter_getrate(&cs->fmeter)); + break; + case FILE_SPREAD_PAGE: + *s++ = is_spread_page(cs) ? '1' : '0'; + break; + case FILE_SPREAD_SLAB: + *s++ = is_spread_slab(cs) ? '1' : '0'; break; default: retval = -EINVAL; @@ -1068,466 +1426,242 @@ out: return retval; } -static ssize_t cpuset_file_read(struct file *file, char __user *buf, size_t nbytes, - loff_t *ppos) -{ - ssize_t retval = 0; - struct cftype *cft = __d_cft(file->f_dentry); - if (!cft) - return -ENODEV; - - /* special function ? */ - if (cft->read) - retval = cft->read(file, buf, nbytes, ppos); - else - retval = cpuset_common_file_read(file, buf, nbytes, ppos); - - return retval; -} - -static int cpuset_file_open(struct inode *inode, struct file *file) -{ - int err; - struct cftype *cft; - - err = generic_file_open(inode, file); - if (err) - return err; - - cft = __d_cft(file->f_dentry); - if (!cft) - return -ENODEV; - if (cft->open) - err = cft->open(inode, file); - else - err = 0; - - return err; -} - -static int cpuset_file_release(struct inode *inode, struct file *file) -{ - struct cftype *cft = __d_cft(file->f_dentry); - if (cft->release) - return cft->release(inode, file); - return 0; -} - -/* - * cpuset_rename - Only allow simple rename of directories in place. - */ -static int cpuset_rename(struct inode *old_dir, struct dentry *old_dentry, - struct inode *new_dir, struct dentry *new_dentry) -{ - if (!S_ISDIR(old_dentry->d_inode->i_mode)) - return -ENOTDIR; - if (new_dentry->d_inode) - return -EEXIST; - if (old_dir != new_dir) - return -EIO; - return simple_rename(old_dir, old_dentry, new_dir, new_dentry); -} - -static struct file_operations cpuset_file_operations = { - .read = cpuset_file_read, - .write = cpuset_file_write, - .llseek = generic_file_llseek, - .open = cpuset_file_open, - .release = cpuset_file_release, -}; - -static struct inode_operations cpuset_dir_inode_operations = { - .lookup = simple_lookup, - .mkdir = cpuset_mkdir, - .rmdir = cpuset_rmdir, - .rename = cpuset_rename, -}; - -static int cpuset_create_file(struct dentry *dentry, int mode) -{ - struct inode *inode; - - if (!dentry) - return -ENOENT; - if (dentry->d_inode) - return -EEXIST; - - inode = cpuset_new_inode(mode); - if (!inode) - return -ENOMEM; - - if (S_ISDIR(mode)) { - inode->i_op = &cpuset_dir_inode_operations; - inode->i_fop = &simple_dir_operations; - - /* start off with i_nlink == 2 (for "." entry) */ - inode->i_nlink++; - } else if (S_ISREG(mode)) { - inode->i_size = 0; - inode->i_fop = &cpuset_file_operations; - } - - d_instantiate(dentry, inode); - dget(dentry); /* Extra count - pin the dentry in core */ - return 0; -} - -/* - * cpuset_create_dir - create a directory for an object. - * cs: the cpuset we create the directory for. - * It must have a valid ->parent field - * And we are going to fill its ->dentry field. - * name: The name to give to the cpuset directory. Will be copied. - * mode: mode to set on new directory. - */ - -static int cpuset_create_dir(struct cpuset *cs, const char *name, int mode) -{ - struct dentry *dentry = NULL; - struct dentry *parent; - int error = 0; - - parent = cs->parent->dentry; - dentry = cpuset_get_dentry(parent, name); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); - error = cpuset_create_file(dentry, S_IFDIR | mode); - if (!error) { - dentry->d_fsdata = cs; - parent->d_inode->i_nlink++; - cs->dentry = dentry; - } - dput(dentry); - - return error; -} - -static int cpuset_add_file(struct dentry *dir, const struct cftype *cft) -{ - struct dentry *dentry; - int error; - - down(&dir->d_inode->i_sem); - dentry = cpuset_get_dentry(dir, cft->name); - if (!IS_ERR(dentry)) { - error = cpuset_create_file(dentry, 0644 | S_IFREG); - if (!error) - dentry->d_fsdata = (void *)cft; - dput(dentry); - } else - error = PTR_ERR(dentry); - up(&dir->d_inode->i_sem); - return error; -} - -/* - * Stuff for reading the 'tasks' file. - * - * Reading this file can return large amounts of data if a cpuset has - * *lots* of attached tasks. So it may need several calls to read(), - * but we cannot guarantee that the information we produce is correct - * unless we produce it entirely atomically. - * - * Upon tasks file open(), a struct ctr_struct is allocated, that - * will have a pointer to an array (also allocated here). The struct - * ctr_struct * is stored in file->private_data. Its resources will - * be freed by release() when the file is closed. The array is used - * to sprintf the PIDs and then used by read(). - */ - -/* cpusets_tasks_read array */ - -struct ctr_struct { - char *buf; - int bufsz; -}; - -/* - * Load into 'pidarray' up to 'npids' of the tasks using cpuset 'cs'. - * Return actual number of pids loaded. No need to task_lock(p) - * when reading out p->cpuset, as we don't really care if it changes - * on the next cycle, and we are not going to try to dereference it. - */ -static inline int pid_array_load(pid_t *pidarray, int npids, struct cpuset *cs) -{ - int n = 0; - struct task_struct *g, *p; - - read_lock(&tasklist_lock); - - do_each_thread(g, p) { - if (p->cpuset == cs) { - pidarray[n++] = p->pid; - if (unlikely(n == npids)) - goto array_full; - } - } while_each_thread(g, p); - -array_full: - read_unlock(&tasklist_lock); - return n; -} - -static int cmppid(const void *a, const void *b) -{ - return *(pid_t *)a - *(pid_t *)b; -} - -/* - * Convert array 'a' of 'npids' pid_t's to a string of newline separated - * decimal pids in 'buf'. Don't write more than 'sz' chars, but return - * count 'cnt' of how many chars would be written if buf were large enough. - */ -static int pid_array_to_buf(char *buf, int sz, pid_t *a, int npids) -{ - int cnt = 0; - int i; - for (i = 0; i < npids; i++) - cnt += snprintf(buf + cnt, max(sz - cnt, 0), "%d\n", a[i]); - return cnt; -} - -/* - * Handle an open on 'tasks' file. Prepare a buffer listing the - * process id's of tasks currently attached to the cpuset being opened. - * - * Does not require any specific cpuset semaphores, and does not take any. - */ -static int cpuset_tasks_open(struct inode *unused, struct file *file) -{ - struct cpuset *cs = __d_cs(file->f_dentry->d_parent); - struct ctr_struct *ctr; - pid_t *pidarray; - int npids; - char c; - if (!(file->f_mode & FMODE_READ)) - return 0; - ctr = kmalloc(sizeof(*ctr), GFP_KERNEL); - if (!ctr) - goto err0; - - /* - * If cpuset gets more users after we read count, we won't have - * enough space - tough. This race is indistinguishable to the - * caller from the case that the additional cpuset users didn't - * show up until sometime later on. - */ - npids = atomic_read(&cs->count); - pidarray = kmalloc(npids * sizeof(pid_t), GFP_KERNEL); - if (!pidarray) - goto err1; - - npids = pid_array_load(pidarray, npids, cs); - sort(pidarray, npids, sizeof(pid_t), cmppid, NULL); - - /* Call pid_array_to_buf() twice, first just to get bufsz */ - ctr->bufsz = pid_array_to_buf(&c, sizeof(c), pidarray, npids) + 1; - ctr->buf = kmalloc(ctr->bufsz, GFP_KERNEL); - if (!ctr->buf) - goto err2; - ctr->bufsz = pid_array_to_buf(ctr->buf, ctr->bufsz, pidarray, npids); - - kfree(pidarray); - file->private_data = ctr; - return 0; - -err2: - kfree(pidarray); -err1: - kfree(ctr); -err0: - return -ENOMEM; -} - -static ssize_t cpuset_tasks_read(struct file *file, char __user *buf, - size_t nbytes, loff_t *ppos) -{ - struct ctr_struct *ctr = file->private_data; - - if (*ppos + nbytes > ctr->bufsz) - nbytes = ctr->bufsz - *ppos; - if (copy_to_user(buf, ctr->buf + *ppos, nbytes)) - return -EFAULT; - *ppos += nbytes; - return nbytes; -} - -static int cpuset_tasks_release(struct inode *unused_inode, struct file *file) -{ - struct ctr_struct *ctr; - - if (file->f_mode & FMODE_READ) { - ctr = file->private_data; - kfree(ctr->buf); - kfree(ctr); - } - return 0; -} /* * for the common functions, 'private' gives the type of file */ -static struct cftype cft_tasks = { - .name = "tasks", - .open = cpuset_tasks_open, - .read = cpuset_tasks_read, - .release = cpuset_tasks_release, - .private = FILE_TASKLIST, -}; - static struct cftype cft_cpus = { .name = "cpus", + .read = cpuset_common_file_read, + .write = cpuset_common_file_write, .private = FILE_CPULIST, }; static struct cftype cft_mems = { .name = "mems", + .read = cpuset_common_file_read, + .write = cpuset_common_file_write, .private = FILE_MEMLIST, }; static struct cftype cft_cpu_exclusive = { .name = "cpu_exclusive", + .read = cpuset_common_file_read, + .write = cpuset_common_file_write, .private = FILE_CPU_EXCLUSIVE, }; static struct cftype cft_mem_exclusive = { .name = "mem_exclusive", + .read = cpuset_common_file_read, + .write = cpuset_common_file_write, .private = FILE_MEM_EXCLUSIVE, }; -static struct cftype cft_notify_on_release = { - .name = "notify_on_release", - .private = FILE_NOTIFY_ON_RELEASE, +static struct cftype cft_sched_load_balance = { + .name = "sched_load_balance", + .read = cpuset_common_file_read, + .write = cpuset_common_file_write, + .private = FILE_SCHED_LOAD_BALANCE, +}; + +static struct cftype cft_memory_migrate = { + .name = "memory_migrate", + .read = cpuset_common_file_read, + .write = cpuset_common_file_write, + .private = FILE_MEMORY_MIGRATE, +}; + +static struct cftype cft_memory_pressure_enabled = { + .name = "memory_pressure_enabled", + .read = cpuset_common_file_read, + .write = cpuset_common_file_write, + .private = FILE_MEMORY_PRESSURE_ENABLED, +}; + +static struct cftype cft_memory_pressure = { + .name = "memory_pressure", + .read = cpuset_common_file_read, + .write = cpuset_common_file_write, + .private = FILE_MEMORY_PRESSURE, }; -static int cpuset_populate_dir(struct dentry *cs_dentry) +static struct cftype cft_spread_page = { + .name = "memory_spread_page", + .read = cpuset_common_file_read, + .write = cpuset_common_file_write, + .private = FILE_SPREAD_PAGE, +}; + +static struct cftype cft_spread_slab = { + .name = "memory_spread_slab", + .read = cpuset_common_file_read, + .write = cpuset_common_file_write, + .private = FILE_SPREAD_SLAB, +}; + +static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont) { int err; - if ((err = cpuset_add_file(cs_dentry, &cft_cpus)) < 0) + if ((err = cgroup_add_file(cont, ss, &cft_cpus)) < 0) + return err; + if ((err = cgroup_add_file(cont, ss, &cft_mems)) < 0) + return err; + if ((err = cgroup_add_file(cont, ss, &cft_cpu_exclusive)) < 0) + return err; + if ((err = cgroup_add_file(cont, ss, &cft_mem_exclusive)) < 0) return err; - if ((err = cpuset_add_file(cs_dentry, &cft_mems)) < 0) + if ((err = cgroup_add_file(cont, ss, &cft_memory_migrate)) < 0) return err; - if ((err = cpuset_add_file(cs_dentry, &cft_cpu_exclusive)) < 0) + if ((err = cgroup_add_file(cont, ss, &cft_sched_load_balance)) < 0) return err; - if ((err = cpuset_add_file(cs_dentry, &cft_mem_exclusive)) < 0) + if ((err = cgroup_add_file(cont, ss, &cft_memory_pressure)) < 0) return err; - if ((err = cpuset_add_file(cs_dentry, &cft_notify_on_release)) < 0) + if ((err = cgroup_add_file(cont, ss, &cft_spread_page)) < 0) return err; - if ((err = cpuset_add_file(cs_dentry, &cft_tasks)) < 0) + if ((err = cgroup_add_file(cont, ss, &cft_spread_slab)) < 0) return err; + /* memory_pressure_enabled is in root cpuset only */ + if (err == 0 && !cont->parent) + err = cgroup_add_file(cont, ss, + &cft_memory_pressure_enabled); return 0; } /* + * post_clone() is called at the end of cgroup_clone(). + * 'cgroup' was just created automatically as a result of + * a cgroup_clone(), and the current task is about to + * be moved into 'cgroup'. + * + * Currently we refuse to set up the cgroup - thereby + * refusing the task to be entered, and as a result refusing + * the sys_unshare() or clone() which initiated it - if any + * sibling cpusets have exclusive cpus or mem. + * + * If this becomes a problem for some users who wish to + * allow that scenario, then cpuset_post_clone() could be + * changed to grant parent->cpus_allowed-sibling_cpus_exclusive + * (and likewise for mems) to the new cgroup. + */ +static void cpuset_post_clone(struct cgroup_subsys *ss, + struct cgroup *cgroup) +{ + struct cgroup *parent, *child; + struct cpuset *cs, *parent_cs; + + parent = cgroup->parent; + list_for_each_entry(child, &parent->children, sibling) { + cs = cgroup_cs(child); + if (is_mem_exclusive(cs) || is_cpu_exclusive(cs)) + return; + } + cs = cgroup_cs(cgroup); + parent_cs = cgroup_cs(parent); + + cs->mems_allowed = parent_cs->mems_allowed; + cs->cpus_allowed = parent_cs->cpus_allowed; + return; +} + +/* * cpuset_create - create a cpuset * parent: cpuset that will be parent of the new cpuset. * name: name of the new cpuset. Will be strcpy'ed. * mode: mode to set on new inode * - * Must be called with the semaphore on the parent inode held + * Must be called with the mutex on the parent inode held */ -static long cpuset_create(struct cpuset *parent, const char *name, int mode) +static struct cgroup_subsys_state *cpuset_create( + struct cgroup_subsys *ss, + struct cgroup *cont) { struct cpuset *cs; - int err; + struct cpuset *parent; + if (!cont->parent) { + /* This is early initialization for the top cgroup */ + top_cpuset.mems_generation = cpuset_mems_generation++; + return &top_cpuset.css; + } + parent = cgroup_cs(cont->parent); cs = kmalloc(sizeof(*cs), GFP_KERNEL); if (!cs) - return -ENOMEM; + return ERR_PTR(-ENOMEM); - down(&manage_sem); - refresh_mems(); + cpuset_update_task_memory_state(); cs->flags = 0; - if (notify_on_release(parent)) - set_bit(CS_NOTIFY_ON_RELEASE, &cs->flags); + if (is_spread_page(parent)) + set_bit(CS_SPREAD_PAGE, &cs->flags); + if (is_spread_slab(parent)) + set_bit(CS_SPREAD_SLAB, &cs->flags); + set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); cs->cpus_allowed = CPU_MASK_NONE; cs->mems_allowed = NODE_MASK_NONE; - atomic_set(&cs->count, 0); - INIT_LIST_HEAD(&cs->sibling); - INIT_LIST_HEAD(&cs->children); - atomic_inc(&cpuset_mems_generation); - cs->mems_generation = atomic_read(&cpuset_mems_generation); + cs->mems_generation = cpuset_mems_generation++; + fmeter_init(&cs->fmeter); cs->parent = parent; + number_of_cpusets++; + return &cs->css ; +} - down(&callback_sem); - list_add(&cs->sibling, &cs->parent->children); - up(&callback_sem); +/* + * Locking note on the strange update_flag() call below: + * + * If the cpuset being removed has its flag 'sched_load_balance' + * enabled, then simulate turning sched_load_balance off, which + * will call rebuild_sched_domains(). The get_online_cpus() + * call in rebuild_sched_domains() must not be made while holding + * callback_mutex. Elsewhere the kernel nests callback_mutex inside + * get_online_cpus() calls. So the reverse nesting would risk an + * ABBA deadlock. + */ - err = cpuset_create_dir(cs, name, mode); - if (err < 0) - goto err; +static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont) +{ + struct cpuset *cs = cgroup_cs(cont); - /* - * Release manage_sem before cpuset_populate_dir() because it - * will down() this new directory's i_sem and if we race with - * another mkdir, we might deadlock. - */ - up(&manage_sem); + cpuset_update_task_memory_state(); - err = cpuset_populate_dir(cs->dentry); - /* If err < 0, we have a half-filled directory - oh well ;) */ - return 0; -err: - list_del(&cs->sibling); - up(&manage_sem); + if (is_sched_load_balance(cs)) + update_flag(CS_SCHED_LOAD_BALANCE, cs, "0"); + + number_of_cpusets--; kfree(cs); - return err; } -static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode) -{ - struct cpuset *c_parent = dentry->d_parent->d_fsdata; +struct cgroup_subsys cpuset_subsys = { + .name = "cpuset", + .create = cpuset_create, + .destroy = cpuset_destroy, + .can_attach = cpuset_can_attach, + .attach = cpuset_attach, + .populate = cpuset_populate, + .post_clone = cpuset_post_clone, + .subsys_id = cpuset_subsys_id, + .early_init = 1, +}; - /* the vfs holds inode->i_sem already */ - return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR); -} +/* + * cpuset_init_early - just enough so that the calls to + * cpuset_update_task_memory_state() in early init code + * are harmless. + */ -static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry) +int __init cpuset_init_early(void) { - struct cpuset *cs = dentry->d_fsdata; - struct dentry *d; - struct cpuset *parent; - char *pathbuf = NULL; - - /* the vfs holds both inode->i_sem already */ - - down(&manage_sem); - refresh_mems(); - if (atomic_read(&cs->count) > 0) { - up(&manage_sem); - return -EBUSY; - } - if (!list_empty(&cs->children)) { - up(&manage_sem); - return -EBUSY; - } - parent = cs->parent; - down(&callback_sem); - set_bit(CS_REMOVED, &cs->flags); - if (is_cpu_exclusive(cs)) - update_cpu_domains(cs); - list_del(&cs->sibling); /* delete my sibling from parent->children */ - spin_lock(&cs->dentry->d_lock); - d = dget(cs->dentry); - cs->dentry = NULL; - spin_unlock(&d->d_lock); - cpuset_d_remove_dir(d); - dput(d); - up(&callback_sem); - if (list_empty(&parent->children)) - check_for_release(parent, &pathbuf); - up(&manage_sem); - cpuset_release_agent(pathbuf); + top_cpuset.mems_generation = cpuset_mems_generation++; return 0; } + /** * cpuset_init - initialize cpusets at system boot * @@ -1536,124 +1670,140 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry) int __init cpuset_init(void) { - struct dentry *root; - int err; + int err = 0; top_cpuset.cpus_allowed = CPU_MASK_ALL; top_cpuset.mems_allowed = NODE_MASK_ALL; - atomic_inc(&cpuset_mems_generation); - top_cpuset.mems_generation = atomic_read(&cpuset_mems_generation); - - init_task.cpuset = &top_cpuset; + fmeter_init(&top_cpuset.fmeter); + top_cpuset.mems_generation = cpuset_mems_generation++; + set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags); err = register_filesystem(&cpuset_fs_type); if (err < 0) - goto out; - cpuset_mount = kern_mount(&cpuset_fs_type); - if (IS_ERR(cpuset_mount)) { - printk(KERN_ERR "cpuset: could not mount!\n"); - err = PTR_ERR(cpuset_mount); - cpuset_mount = NULL; - goto out; + return err; + + number_of_cpusets = 1; + return 0; +} + +/* + * If common_cpu_mem_hotplug_unplug(), below, unplugs any CPUs + * or memory nodes, we need to walk over the cpuset hierarchy, + * removing that CPU or node from all cpusets. If this removes the + * last CPU or node from a cpuset, then the guarantee_online_cpus() + * or guarantee_online_mems() code will use that emptied cpusets + * parent online CPUs or nodes. Cpusets that were already empty of + * CPUs or nodes are left empty. + * + * This routine is intentionally inefficient in a couple of regards. + * It will check all cpusets in a subtree even if the top cpuset of + * the subtree has no offline CPUs or nodes. It checks both CPUs and + * nodes, even though the caller could have been coded to know that + * only one of CPUs or nodes needed to be checked on a given call. + * This was done to minimize text size rather than cpu cycles. + * + * Call with both manage_mutex and callback_mutex held. + * + * Recursive, on depth of cpuset subtree. + */ + +static void guarantee_online_cpus_mems_in_subtree(const struct cpuset *cur) +{ + struct cgroup *cont; + struct cpuset *c; + + /* Each of our child cpusets mems must be online */ + list_for_each_entry(cont, &cur->css.cgroup->children, sibling) { + c = cgroup_cs(cont); + guarantee_online_cpus_mems_in_subtree(c); + if (!cpus_empty(c->cpus_allowed)) + guarantee_online_cpus(c, &c->cpus_allowed); + if (!nodes_empty(c->mems_allowed)) + guarantee_online_mems(c, &c->mems_allowed); } - root = cpuset_mount->mnt_sb->s_root; - root->d_fsdata = &top_cpuset; - root->d_inode->i_nlink++; - top_cpuset.dentry = root; - root->d_inode->i_op = &cpuset_dir_inode_operations; - err = cpuset_populate_dir(root); -out: - return err; } -/** - * cpuset_init_smp - initialize cpus_allowed - * - * Description: Finish top cpuset after cpu, node maps are initialized - **/ +/* + * The cpus_allowed and mems_allowed nodemasks in the top_cpuset track + * cpu_online_map and node_states[N_HIGH_MEMORY]. Force the top cpuset to + * track what's online after any CPU or memory node hotplug or unplug + * event. + * + * To ensure that we don't remove a CPU or node from the top cpuset + * that is currently in use by a child cpuset (which would violate + * the rule that cpusets must be subsets of their parent), we first + * call the recursive routine guarantee_online_cpus_mems_in_subtree(). + * + * Since there are two callers of this routine, one for CPU hotplug + * events and one for memory node hotplug events, we could have coded + * two separate routines here. We code it as a single common routine + * in order to minimize text size. + */ -void __init cpuset_init_smp(void) +static void common_cpu_mem_hotplug_unplug(void) { + cgroup_lock(); + mutex_lock(&callback_mutex); + + guarantee_online_cpus_mems_in_subtree(&top_cpuset); top_cpuset.cpus_allowed = cpu_online_map; - top_cpuset.mems_allowed = node_online_map; + top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; + + mutex_unlock(&callback_mutex); + cgroup_unlock(); } -/** - * cpuset_fork - attach newly forked task to its parents cpuset. - * @tsk: pointer to task_struct of forking parent process. - * - * Description: A task inherits its parent's cpuset at fork(). - * - * A pointer to the shared cpuset was automatically copied in fork.c - * by dup_task_struct(). However, we ignore that copy, since it was - * not made under the protection of task_lock(), so might no longer be - * a valid cpuset pointer. attach_task() might have already changed - * current->cpuset, allowing the previously referenced cpuset to - * be removed and freed. Instead, we task_lock(current) and copy - * its present value of current->cpuset for our freshly forked child. +/* + * The top_cpuset tracks what CPUs and Memory Nodes are online, + * period. This is necessary in order to make cpusets transparent + * (of no affect) on systems that are actively using CPU hotplug + * but making no active use of cpusets. * - * At the point that cpuset_fork() is called, 'current' is the parent - * task, and the passed argument 'child' points to the child task. - **/ + * This routine ensures that top_cpuset.cpus_allowed tracks + * cpu_online_map on each CPU hotplug (cpuhp) event. + */ -void cpuset_fork(struct task_struct *child) +static int cpuset_handle_cpuhp(struct notifier_block *unused_nb, + unsigned long phase, void *unused_cpu) { - task_lock(current); - child->cpuset = current->cpuset; - atomic_inc(&child->cpuset->count); - task_unlock(current); + if (phase == CPU_DYING || phase == CPU_DYING_FROZEN) + return NOTIFY_DONE; + + common_cpu_mem_hotplug_unplug(); + return 0; +} + +#ifdef CONFIG_MEMORY_HOTPLUG +/* + * Keep top_cpuset.mems_allowed tracking node_states[N_HIGH_MEMORY]. + * Call this routine anytime after you change + * node_states[N_HIGH_MEMORY]. + * See also the previous routine cpuset_handle_cpuhp(). + */ + +void cpuset_track_online_nodes(void) +{ + common_cpu_mem_hotplug_unplug(); } +#endif /** - * cpuset_exit - detach cpuset from exiting task - * @tsk: pointer to task_struct of exiting process - * - * Description: Detach cpuset from @tsk and release it. - * - * Note that cpusets marked notify_on_release force every task in - * them to take the global manage_sem semaphore when exiting. - * This could impact scaling on very large systems. Be reluctant to - * use notify_on_release cpusets where very high task exit scaling - * is required on large systems. - * - * Don't even think about derefencing 'cs' after the cpuset use count - * goes to zero, except inside a critical section guarded by manage_sem - * or callback_sem. Otherwise a zero cpuset use count is a license to - * any other task to nuke the cpuset immediately, via cpuset_rmdir(). - * - * This routine has to take manage_sem, not callback_sem, because - * it is holding that semaphore while calling check_for_release(), - * which calls kmalloc(), so can't be called holding callback__sem(). + * cpuset_init_smp - initialize cpus_allowed * - * We don't need to task_lock() this reference to tsk->cpuset, - * because tsk is already marked PF_EXITING, so attach_task() won't - * mess with it. + * Description: Finish top cpuset after cpu, node maps are initialized **/ -void cpuset_exit(struct task_struct *tsk) +void __init cpuset_init_smp(void) { - struct cpuset *cs; - - BUG_ON(!(tsk->flags & PF_EXITING)); - - cs = tsk->cpuset; - tsk->cpuset = NULL; - - if (notify_on_release(cs)) { - char *pathbuf = NULL; + top_cpuset.cpus_allowed = cpu_online_map; + top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; - down(&manage_sem); - if (atomic_dec_and_test(&cs->count)) - check_for_release(cs, &pathbuf); - up(&manage_sem); - cpuset_release_agent(pathbuf); - } else { - atomic_dec(&cs->count); - } + hotcpu_notifier(cpuset_handle_cpuhp, 0); } /** + * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. * @@ -1663,15 +1813,28 @@ void cpuset_exit(struct task_struct *tsk) * tasks cpuset. **/ -cpumask_t cpuset_cpus_allowed(const struct task_struct *tsk) +cpumask_t cpuset_cpus_allowed(struct task_struct *tsk) +{ + cpumask_t mask; + + mutex_lock(&callback_mutex); + mask = cpuset_cpus_allowed_locked(tsk); + mutex_unlock(&callback_mutex); + + return mask; +} + +/** + * cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset. + * Must be called with callback_mutex held. + **/ +cpumask_t cpuset_cpus_allowed_locked(struct task_struct *tsk) { cpumask_t mask; - down(&callback_sem); - task_lock((struct task_struct *)tsk); - guarantee_online_cpus(tsk->cpuset, &mask); - task_unlock((struct task_struct *)tsk); - up(&callback_sem); + task_lock(tsk); + guarantee_online_cpus(task_cs(tsk), &mask); + task_unlock(tsk); return mask; } @@ -1682,43 +1845,26 @@ void cpuset_init_current_mems_allowed(void) } /** - * cpuset_update_current_mems_allowed - update mems parameters to new values - * - * If the current tasks cpusets mems_allowed changed behind our backs, - * update current->mems_allowed and mems_generation to the new value. - * Do not call this routine if in_interrupt(). + * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset. + * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed. * - * Call without callback_sem or task_lock() held. May be called - * with or without manage_sem held. Unless exiting, it will acquire - * task_lock(). Also might acquire callback_sem during call to - * refresh_mems(). - */ + * Description: Returns the nodemask_t mems_allowed of the cpuset + * attached to the specified @tsk. Guaranteed to return some non-empty + * subset of node_states[N_HIGH_MEMORY], even if this means going outside the + * tasks cpuset. + **/ -void cpuset_update_current_mems_allowed(void) +nodemask_t cpuset_mems_allowed(struct task_struct *tsk) { - struct cpuset *cs; - int need_to_refresh = 0; + nodemask_t mask; - task_lock(current); - cs = current->cpuset; - if (!cs) - goto done; - if (current->cpuset_mems_generation != cs->mems_generation) - need_to_refresh = 1; -done: - task_unlock(current); - if (need_to_refresh) - refresh_mems(); -} + mutex_lock(&callback_mutex); + task_lock(tsk); + guarantee_online_mems(task_cs(tsk), &mask); + task_unlock(tsk); + mutex_unlock(&callback_mutex); -/** - * cpuset_restrict_to_mems_allowed - limit nodes to current mems_allowed - * @nodes: pointer to a node bitmap that is and-ed with mems_allowed - */ -void cpuset_restrict_to_mems_allowed(unsigned long *nodes) -{ - bitmap_and(nodes, nodes, nodes_addr(current->mems_allowed), - MAX_NUMNODES); + return mask; } /** @@ -1732,7 +1878,7 @@ int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl) int i; for (i = 0; zl->zones[i]; i++) { - int nid = zl->zones[i]->zone_pgdat->node_id; + int nid = zone_to_nid(zl->zones[i]); if (node_isset(nid, current->mems_allowed)) return 1; @@ -1742,7 +1888,7 @@ int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl) /* * nearest_exclusive_ancestor() - Returns the nearest mem_exclusive - * ancestor to the specified cpuset. Call holding callback_sem. + * ancestor to the specified cpuset. Call holding callback_mutex. * If no ancestor is mem_exclusive (an unusual configuration), then * returns the root cpuset. */ @@ -1754,165 +1900,323 @@ static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs) } /** - * cpuset_zone_allowed - Can we allocate memory on zone z's memory node? + * cpuset_zone_allowed_softwall - Can we allocate on zone z's memory node? * @z: is this zone on an allowed node? - * @gfp_mask: memory allocation flags (we use __GFP_HARDWALL) + * @gfp_mask: memory allocation flags * - * If we're in interrupt, yes, we can always allocate. If zone + * If we're in interrupt, yes, we can always allocate. If + * __GFP_THISNODE is set, yes, we can always allocate. If zone * z's node is in our tasks mems_allowed, yes. If it's not a * __GFP_HARDWALL request and this zone's nodes is in the nearest * mem_exclusive cpuset ancestor to this tasks cpuset, yes. + * If the task has been OOM killed and has access to memory reserves + * as specified by the TIF_MEMDIE flag, yes. * Otherwise, no. * + * If __GFP_HARDWALL is set, cpuset_zone_allowed_softwall() + * reduces to cpuset_zone_allowed_hardwall(). Otherwise, + * cpuset_zone_allowed_softwall() might sleep, and might allow a zone + * from an enclosing cpuset. + * + * cpuset_zone_allowed_hardwall() only handles the simpler case of + * hardwall cpusets, and never sleeps. + * + * The __GFP_THISNODE placement logic is really handled elsewhere, + * by forcibly using a zonelist starting at a specified node, and by + * (in get_page_from_freelist()) refusing to consider the zones for + * any node on the zonelist except the first. By the time any such + * calls get to this routine, we should just shut up and say 'yes'. + * * GFP_USER allocations are marked with the __GFP_HARDWALL bit, - * and do not allow allocations outside the current tasks cpuset. + * and do not allow allocations outside the current tasks cpuset + * unless the task has been OOM killed as is marked TIF_MEMDIE. * GFP_KERNEL allocations are not so marked, so can escape to the - * nearest mem_exclusive ancestor cpuset. - * - * Scanning up parent cpusets requires callback_sem. The __alloc_pages() - * routine only calls here with __GFP_HARDWALL bit _not_ set if - * it's a GFP_KERNEL allocation, and all nodes in the current tasks - * mems_allowed came up empty on the first pass over the zonelist. - * So only GFP_KERNEL allocations, if all nodes in the cpuset are - * short of memory, might require taking the callback_sem semaphore. - * - * The first loop over the zonelist in mm/page_alloc.c:__alloc_pages() - * calls here with __GFP_HARDWALL always set in gfp_mask, enforcing - * hardwall cpusets - no allocation on a node outside the cpuset is - * allowed (unless in interrupt, of course). - * - * The second loop doesn't even call here for GFP_ATOMIC requests - * (if the __alloc_pages() local variable 'wait' is set). That check - * and the checks below have the combined affect in the second loop of - * the __alloc_pages() routine that: + * nearest enclosing mem_exclusive ancestor cpuset. + * + * Scanning up parent cpusets requires callback_mutex. The + * __alloc_pages() routine only calls here with __GFP_HARDWALL bit + * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the + * current tasks mems_allowed came up empty on the first pass over + * the zonelist. So only GFP_KERNEL allocations, if all nodes in the + * cpuset are short of memory, might require taking the callback_mutex + * mutex. + * + * The first call here from mm/page_alloc:get_page_from_freelist() + * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets, + * so no allocation on a node outside the cpuset is allowed (unless + * in interrupt, of course). + * + * The second pass through get_page_from_freelist() doesn't even call + * here for GFP_ATOMIC calls. For those calls, the __alloc_pages() + * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set + * in alloc_flags. That logic and the checks below have the combined + * affect that: * in_interrupt - any node ok (current task context irrelevant) * GFP_ATOMIC - any node ok + * TIF_MEMDIE - any node ok * GFP_KERNEL - any node in enclosing mem_exclusive cpuset ok * GFP_USER - only nodes in current tasks mems allowed ok. - **/ + * + * Rule: + * Don't call cpuset_zone_allowed_softwall if you can't sleep, unless you + * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables + * the code that might scan up ancestor cpusets and sleep. + */ -int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) +int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) { int node; /* node that zone z is on */ const struct cpuset *cs; /* current cpuset ancestors */ - int allowed = 1; /* is allocation in zone z allowed? */ + int allowed; /* is allocation in zone z allowed? */ - if (in_interrupt()) + if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) return 1; - node = z->zone_pgdat->node_id; + node = zone_to_nid(z); + might_sleep_if(!(gfp_mask & __GFP_HARDWALL)); if (node_isset(node, current->mems_allowed)) return 1; + /* + * Allow tasks that have access to memory reserves because they have + * been OOM killed to get memory anywhere. + */ + if (unlikely(test_thread_flag(TIF_MEMDIE))) + return 1; if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */ return 0; - /* Not hardwall and node outside mems_allowed: scan up cpusets */ - down(&callback_sem); - if (current->flags & PF_EXITING) /* Let dying task have memory */ return 1; + + /* Not hardwall and node outside mems_allowed: scan up cpusets */ + mutex_lock(&callback_mutex); + task_lock(current); - cs = nearest_exclusive_ancestor(current->cpuset); + cs = nearest_exclusive_ancestor(task_cs(current)); task_unlock(current); allowed = node_isset(node, cs->mems_allowed); - up(&callback_sem); + mutex_unlock(&callback_mutex); return allowed; } +/* + * cpuset_zone_allowed_hardwall - Can we allocate on zone z's memory node? + * @z: is this zone on an allowed node? + * @gfp_mask: memory allocation flags + * + * If we're in interrupt, yes, we can always allocate. + * If __GFP_THISNODE is set, yes, we can always allocate. If zone + * z's node is in our tasks mems_allowed, yes. If the task has been + * OOM killed and has access to memory reserves as specified by the + * TIF_MEMDIE flag, yes. Otherwise, no. + * + * The __GFP_THISNODE placement logic is really handled elsewhere, + * by forcibly using a zonelist starting at a specified node, and by + * (in get_page_from_freelist()) refusing to consider the zones for + * any node on the zonelist except the first. By the time any such + * calls get to this routine, we should just shut up and say 'yes'. + * + * Unlike the cpuset_zone_allowed_softwall() variant, above, + * this variant requires that the zone be in the current tasks + * mems_allowed or that we're in interrupt. It does not scan up the + * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset. + * It never sleeps. + */ + +int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) +{ + int node; /* node that zone z is on */ + + if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) + return 1; + node = zone_to_nid(z); + if (node_isset(node, current->mems_allowed)) + return 1; + /* + * Allow tasks that have access to memory reserves because they have + * been OOM killed to get memory anywhere. + */ + if (unlikely(test_thread_flag(TIF_MEMDIE))) + return 1; + return 0; +} + /** - * cpuset_excl_nodes_overlap - Do we overlap @p's mem_exclusive ancestors? - * @p: pointer to task_struct of some other task. - * - * Description: Return true if the nearest mem_exclusive ancestor - * cpusets of tasks @p and current overlap. Used by oom killer to - * determine if task @p's memory usage might impact the memory - * available to the current task. + * cpuset_lock - lock out any changes to cpuset structures + * + * The out of memory (oom) code needs to mutex_lock cpusets + * from being changed while it scans the tasklist looking for a + * task in an overlapping cpuset. Expose callback_mutex via this + * cpuset_lock() routine, so the oom code can lock it, before + * locking the task list. The tasklist_lock is a spinlock, so + * must be taken inside callback_mutex. + */ + +void cpuset_lock(void) +{ + mutex_lock(&callback_mutex); +} + +/** + * cpuset_unlock - release lock on cpuset changes * - * Acquires callback_sem - not suitable for calling from a fast path. - **/ + * Undo the lock taken in a previous cpuset_lock() call. + */ -int cpuset_excl_nodes_overlap(const struct task_struct *p) +void cpuset_unlock(void) { - const struct cpuset *cs1, *cs2; /* my and p's cpuset ancestors */ - int overlap = 0; /* do cpusets overlap? */ + mutex_unlock(&callback_mutex); +} - down(&callback_sem); +/** + * cpuset_mem_spread_node() - On which node to begin search for a page + * + * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for + * tasks in a cpuset with is_spread_page or is_spread_slab set), + * and if the memory allocation used cpuset_mem_spread_node() + * to determine on which node to start looking, as it will for + * certain page cache or slab cache pages such as used for file + * system buffers and inode caches, then instead of starting on the + * local node to look for a free page, rather spread the starting + * node around the tasks mems_allowed nodes. + * + * We don't have to worry about the returned node being offline + * because "it can't happen", and even if it did, it would be ok. + * + * The routines calling guarantee_online_mems() are careful to + * only set nodes in task->mems_allowed that are online. So it + * should not be possible for the following code to return an + * offline node. But if it did, that would be ok, as this routine + * is not returning the node where the allocation must be, only + * the node where the search should start. The zonelist passed to + * __alloc_pages() will include all nodes. If the slab allocator + * is passed an offline node, it will fall back to the local node. + * See kmem_cache_alloc_node(). + */ - task_lock(current); - if (current->flags & PF_EXITING) { - task_unlock(current); - goto done; - } - cs1 = nearest_exclusive_ancestor(current->cpuset); - task_unlock(current); +int cpuset_mem_spread_node(void) +{ + int node; - task_lock((struct task_struct *)p); - if (p->flags & PF_EXITING) { - task_unlock((struct task_struct *)p); - goto done; - } - cs2 = nearest_exclusive_ancestor(p->cpuset); - task_unlock((struct task_struct *)p); + node = next_node(current->cpuset_mem_spread_rotor, current->mems_allowed); + if (node == MAX_NUMNODES) + node = first_node(current->mems_allowed); + current->cpuset_mem_spread_rotor = node; + return node; +} +EXPORT_SYMBOL_GPL(cpuset_mem_spread_node); - overlap = nodes_intersects(cs1->mems_allowed, cs2->mems_allowed); -done: - up(&callback_sem); +/** + * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's? + * @tsk1: pointer to task_struct of some task. + * @tsk2: pointer to task_struct of some other task. + * + * Description: Return true if @tsk1's mems_allowed intersects the + * mems_allowed of @tsk2. Used by the OOM killer to determine if + * one of the task's memory usage might impact the memory available + * to the other. + **/ + +int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, + const struct task_struct *tsk2) +{ + return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed); +} + +/* + * Collection of memory_pressure is suppressed unless + * this flag is enabled by writing "1" to the special + * cpuset file 'memory_pressure_enabled' in the root cpuset. + */ + +int cpuset_memory_pressure_enabled __read_mostly; + +/** + * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims. + * + * Keep a running average of the rate of synchronous (direct) + * page reclaim efforts initiated by tasks in each cpuset. + * + * This represents the rate at which some task in the cpuset + * ran low on memory on all nodes it was allowed to use, and + * had to enter the kernels page reclaim code in an effort to + * create more free memory by tossing clean pages or swapping + * or writing dirty pages. + * + * Display to user space in the per-cpuset read-only file + * "memory_pressure". Value displayed is an integer + * representing the recent rate of entry into the synchronous + * (direct) page reclaim by any task attached to the cpuset. + **/ - return overlap; +void __cpuset_memory_pressure_bump(void) +{ + task_lock(current); + fmeter_markevent(&task_cs(current)->fmeter); + task_unlock(current); } +#ifdef CONFIG_PROC_PID_CPUSET /* * proc_cpuset_show() * - Print tasks cpuset path into seq_file. * - Used for /proc//cpuset. * - No need to task_lock(tsk) on this tsk->cpuset reference, as it * doesn't really matter if tsk->cpuset changes after we read it, - * and we take manage_sem, keeping attach_task() from changing it - * anyway. + * and we take manage_mutex, keeping attach_task() from changing it + * anyway. No need to check that tsk->cpuset != NULL, thanks to + * the_top_cpuset_hack in cpuset_exit(), which sets an exiting tasks + * cpuset to top_cpuset. */ - -static int proc_cpuset_show(struct seq_file *m, void *v) +static int proc_cpuset_show(struct seq_file *m, void *unused_v) { - struct cpuset *cs; + struct pid *pid; struct task_struct *tsk; char *buf; - int retval = 0; + struct cgroup_subsys_state *css; + int retval; + retval = -ENOMEM; buf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!buf) - return -ENOMEM; - - tsk = m->private; - down(&manage_sem); - cs = tsk->cpuset; - if (!cs) { - retval = -EINVAL; goto out; - } - retval = cpuset_path(cs, buf, PAGE_SIZE); + retval = -ESRCH; + pid = m->private; + tsk = get_pid_task(pid, PIDTYPE_PID); + if (!tsk) + goto out_free; + + retval = -EINVAL; + cgroup_lock(); + css = task_subsys_state(tsk, cpuset_subsys_id); + retval = cgroup_path(css->cgroup, buf, PAGE_SIZE); if (retval < 0) - goto out; + goto out_unlock; seq_puts(m, buf); seq_putc(m, '\n'); -out: - up(&manage_sem); +out_unlock: + cgroup_unlock(); + put_task_struct(tsk); +out_free: kfree(buf); +out: return retval; } static int cpuset_open(struct inode *inode, struct file *file) { - struct task_struct *tsk = PROC_I(inode)->task; - return single_open(file, proc_cpuset_show, tsk); + struct pid *pid = PROC_I(inode)->pid; + return single_open(file, proc_cpuset_show, pid); } -struct file_operations proc_cpuset_operations = { +const struct file_operations proc_cpuset_operations = { .open = cpuset_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; +#endif /* CONFIG_PROC_PID_CPUSET */ /* Display task cpus_allowed, mems_allowed in /proc//status file. */ char *cpuset_task_status_allowed(struct task_struct *task, char *buffer)