X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=kernel%2Fpid.c;h=d3f722d20f9c6e8d849693a1754b616d3ba74bea;hb=608221fdf9a2170962295dcfbea53dc5c50d1a74;hp=7781d9999058337f12618910b81cea8c0ee1c0f0;hpb=d73d65293e3e2de7e916a89c8da30be0948afab7;p=safe%2Fjmp%2Flinux-2.6 diff --git a/kernel/pid.c b/kernel/pid.c index 7781d99..d3f722d 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -18,31 +18,47 @@ * allocation scenario when all but one out of 1 million PIDs possible are * allocated already: the scanning of 32 list entries and at most PAGE_SIZE * bytes. The typical fastpath is a single successful setbit. Freeing is O(1). + * + * Pid namespaces: + * (C) 2007 Pavel Emelyanov , OpenVZ, SWsoft Inc. + * (C) 2007 Sukadev Bhattiprolu , IBM + * Many thanks to Oleg Nesterov for comments and help + * */ #include #include #include #include +#include #include #include +#include +#include +#include -#define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift) -static struct hlist_head *pid_hash[PIDTYPE_MAX]; -static int pidhash_shift; +#define pid_hashfn(nr, ns) \ + hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift) +static struct hlist_head *pid_hash; +static unsigned int pidhash_shift = 4; +struct pid init_struct_pid = INIT_STRUCT_PID; int pid_max = PID_MAX_DEFAULT; -int last_pid; #define RESERVED_PIDS 300 int pid_max_min = RESERVED_PIDS + 1; int pid_max_max = PID_MAX_LIMIT; -#define PIDMAP_ENTRIES ((PID_MAX_LIMIT + 8*PAGE_SIZE - 1)/PAGE_SIZE/8) #define BITS_PER_PAGE (PAGE_SIZE*8) #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) -#define mk_pid(map, off) (((map) - pidmap_array)*BITS_PER_PAGE + (off)) + +static inline int mk_pid(struct pid_namespace *pid_ns, + struct pidmap *map, int off) +{ + return (map - pid_ns->pidmap)*BITS_PER_PAGE + off; +} + #define find_next_offset(map, off) \ find_next_zero_bit((map)->page, BITS_PER_PAGE, off) @@ -52,49 +68,84 @@ int pid_max_max = PID_MAX_LIMIT; * value does not cause lots of bitmaps to be allocated, but * the scheme scales to up to 4 million PIDs, runtime. */ -typedef struct pidmap { - atomic_t nr_free; - void *page; -} pidmap_t; +struct pid_namespace init_pid_ns = { + .kref = { + .refcount = ATOMIC_INIT(2), + }, + .pidmap = { + [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } + }, + .last_pid = 0, + .level = 0, + .child_reaper = &init_task, +}; +EXPORT_SYMBOL_GPL(init_pid_ns); + +int is_container_init(struct task_struct *tsk) +{ + int ret = 0; + struct pid *pid; -static pidmap_t pidmap_array[PIDMAP_ENTRIES] = - { [ 0 ... PIDMAP_ENTRIES-1 ] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } }; + rcu_read_lock(); + pid = task_pid(tsk); + if (pid != NULL && pid->numbers[pid->level].nr == 1) + ret = 1; + rcu_read_unlock(); + + return ret; +} +EXPORT_SYMBOL(is_container_init); + +/* + * Note: disable interrupts while the pidmap_lock is held as an + * interrupt might come in and do read_lock(&tasklist_lock). + * + * If we don't disable interrupts there is a nasty deadlock between + * detach_pid()->free_pid() and another cpu that does + * spin_lock(&pidmap_lock) followed by an interrupt routine that does + * read_lock(&tasklist_lock); + * + * After we clean up the tasklist_lock and know there are no + * irq handlers that take it we can leave the interrupts enabled. + * For now it is easier to be safe than to prove it can't happen. + */ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock); -fastcall void free_pidmap(int pid) +static void free_pidmap(struct upid *upid) { - pidmap_t *map = pidmap_array + pid / BITS_PER_PAGE; - int offset = pid & BITS_PER_PAGE_MASK; + int nr = upid->nr; + struct pidmap *map = upid->ns->pidmap + nr / BITS_PER_PAGE; + int offset = nr & BITS_PER_PAGE_MASK; clear_bit(offset, map->page); atomic_inc(&map->nr_free); } -int alloc_pidmap(void) +static int alloc_pidmap(struct pid_namespace *pid_ns) { - int i, offset, max_scan, pid, last = last_pid; - pidmap_t *map; + int i, offset, max_scan, pid, last = pid_ns->last_pid; + struct pidmap *map; pid = last + 1; if (pid >= pid_max) pid = RESERVED_PIDS; offset = pid & BITS_PER_PAGE_MASK; - map = &pidmap_array[pid/BITS_PER_PAGE]; + map = &pid_ns->pidmap[pid/BITS_PER_PAGE]; max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset; for (i = 0; i <= max_scan; ++i) { if (unlikely(!map->page)) { - unsigned long page = get_zeroed_page(GFP_KERNEL); + void *page = kzalloc(PAGE_SIZE, GFP_KERNEL); /* * Free the page if someone raced with us * installing it: */ - spin_lock(&pidmap_lock); + spin_lock_irq(&pidmap_lock); if (map->page) - free_page(page); + kfree(page); else - map->page = (void *)page; - spin_unlock(&pidmap_lock); + map->page = page; + spin_unlock_irq(&pidmap_lock); if (unlikely(!map->page)) break; } @@ -102,11 +153,11 @@ int alloc_pidmap(void) do { if (!test_and_set_bit(offset, map->page)) { atomic_dec(&map->nr_free); - last_pid = pid; + pid_ns->last_pid = pid; return pid; } offset = find_next_offset(map, offset); - pid = mk_pid(map, offset); + pid = mk_pid(pid_ns, map, offset); /* * find_next_offset() found a bit, the pid from it * is in-bounds, and if we fell back to the last @@ -117,105 +168,328 @@ int alloc_pidmap(void) (i != max_scan || pid < last || !((last+1) & BITS_PER_PAGE_MASK))); } - if (map < &pidmap_array[(pid_max-1)/BITS_PER_PAGE]) { + if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) { ++map; offset = 0; } else { - map = &pidmap_array[0]; + map = &pid_ns->pidmap[0]; offset = RESERVED_PIDS; if (unlikely(last == offset)) break; } - pid = mk_pid(map, offset); + pid = mk_pid(pid_ns, map, offset); } return -1; } -struct pid * fastcall find_pid(enum pid_type type, int nr) +int next_pidmap(struct pid_namespace *pid_ns, int last) { - struct hlist_node *elem; - struct pid *pid; - - hlist_for_each_entry_rcu(pid, elem, - &pid_hash[type][pid_hashfn(nr)], pid_chain) { - if (pid->nr == nr) - return pid; + int offset; + struct pidmap *map, *end; + + offset = (last + 1) & BITS_PER_PAGE_MASK; + map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE]; + end = &pid_ns->pidmap[PIDMAP_ENTRIES]; + for (; map < end; map++, offset = 0) { + if (unlikely(!map->page)) + continue; + offset = find_next_bit((map)->page, BITS_PER_PAGE, offset); + if (offset < BITS_PER_PAGE) + return mk_pid(pid_ns, map, offset); } - return NULL; + return -1; } -int fastcall attach_pid(task_t *task, enum pid_type type, int nr) +void put_pid(struct pid *pid) { - struct pid *pid, *task_pid; - - task_pid = &task->pids[type]; - pid = find_pid(type, nr); - task_pid->nr = nr; - if (pid == NULL) { - INIT_LIST_HEAD(&task_pid->pid_list); - hlist_add_head_rcu(&task_pid->pid_chain, - &pid_hash[type][pid_hashfn(nr)]); - } else { - INIT_HLIST_NODE(&task_pid->pid_chain); - list_add_tail_rcu(&task_pid->pid_list, &pid->pid_list); + struct pid_namespace *ns; + + if (!pid) + return; + + ns = pid->numbers[pid->level].ns; + if ((atomic_read(&pid->count) == 1) || + atomic_dec_and_test(&pid->count)) { + kmem_cache_free(ns->pid_cachep, pid); + put_pid_ns(ns); } +} +EXPORT_SYMBOL_GPL(put_pid); - return 0; +static void delayed_put_pid(struct rcu_head *rhp) +{ + struct pid *pid = container_of(rhp, struct pid, rcu); + put_pid(pid); } -static fastcall int __detach_pid(task_t *task, enum pid_type type) +void free_pid(struct pid *pid) { - struct pid *pid, *pid_next; - int nr = 0; + /* We can be called with write_lock_irq(&tasklist_lock) held */ + int i; + unsigned long flags; - pid = &task->pids[type]; - if (!hlist_unhashed(&pid->pid_chain)) { + spin_lock_irqsave(&pidmap_lock, flags); + for (i = 0; i <= pid->level; i++) + hlist_del_rcu(&pid->numbers[i].pid_chain); + spin_unlock_irqrestore(&pidmap_lock, flags); - if (list_empty(&pid->pid_list)) { - nr = pid->nr; - hlist_del_rcu(&pid->pid_chain); - } else { - pid_next = list_entry(pid->pid_list.next, - struct pid, pid_list); - /* insert next pid from pid_list to hash */ - hlist_replace_rcu(&pid->pid_chain, - &pid_next->pid_chain); - } + for (i = 0; i <= pid->level; i++) + free_pidmap(pid->numbers + i); + + call_rcu(&pid->rcu, delayed_put_pid); +} + +struct pid *alloc_pid(struct pid_namespace *ns) +{ + struct pid *pid; + enum pid_type type; + int i, nr; + struct pid_namespace *tmp; + struct upid *upid; + + pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL); + if (!pid) + goto out; + + tmp = ns; + for (i = ns->level; i >= 0; i--) { + nr = alloc_pidmap(tmp); + if (nr < 0) + goto out_free; + + pid->numbers[i].nr = nr; + pid->numbers[i].ns = tmp; + tmp = tmp->parent; + } + + get_pid_ns(ns); + pid->level = ns->level; + atomic_set(&pid->count, 1); + for (type = 0; type < PIDTYPE_MAX; ++type) + INIT_HLIST_HEAD(&pid->tasks[type]); + + spin_lock_irq(&pidmap_lock); + for (i = ns->level; i >= 0; i--) { + upid = &pid->numbers[i]; + hlist_add_head_rcu(&upid->pid_chain, + &pid_hash[pid_hashfn(upid->nr, upid->ns)]); } + spin_unlock_irq(&pidmap_lock); - list_del_rcu(&pid->pid_list); - pid->nr = 0; +out: + return pid; - return nr; +out_free: + while (++i <= ns->level) + free_pidmap(pid->numbers + i); + + kmem_cache_free(ns->pid_cachep, pid); + pid = NULL; + goto out; } -void fastcall detach_pid(task_t *task, enum pid_type type) +struct pid *find_pid_ns(int nr, struct pid_namespace *ns) { - int tmp, nr; + struct hlist_node *elem; + struct upid *pnr; - nr = __detach_pid(task, type); - if (!nr) - return; + hlist_for_each_entry_rcu(pnr, elem, + &pid_hash[pid_hashfn(nr, ns)], pid_chain) + if (pnr->nr == nr && pnr->ns == ns) + return container_of(pnr, struct pid, + numbers[ns->level]); + + return NULL; +} +EXPORT_SYMBOL_GPL(find_pid_ns); + +struct pid *find_vpid(int nr) +{ + return find_pid_ns(nr, current->nsproxy->pid_ns); +} +EXPORT_SYMBOL_GPL(find_vpid); + +/* + * attach_pid() must be called with the tasklist_lock write-held. + */ +void attach_pid(struct task_struct *task, enum pid_type type, + struct pid *pid) +{ + struct pid_link *link; + + link = &task->pids[type]; + link->pid = pid; + hlist_add_head_rcu(&link->node, &pid->tasks[type]); +} + +static void __change_pid(struct task_struct *task, enum pid_type type, + struct pid *new) +{ + struct pid_link *link; + struct pid *pid; + int tmp; + + link = &task->pids[type]; + pid = link->pid; + + hlist_del_rcu(&link->node); + link->pid = new; for (tmp = PIDTYPE_MAX; --tmp >= 0; ) - if (tmp != type && find_pid(tmp, nr)) + if (!hlist_empty(&pid->tasks[tmp])) return; - free_pidmap(nr); + free_pid(pid); } -task_t *find_task_by_pid_type(int type, int nr) +void detach_pid(struct task_struct *task, enum pid_type type) +{ + __change_pid(task, type, NULL); +} + +void change_pid(struct task_struct *task, enum pid_type type, + struct pid *pid) +{ + __change_pid(task, type, pid); + attach_pid(task, type, pid); +} + +/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */ +void transfer_pid(struct task_struct *old, struct task_struct *new, + enum pid_type type) +{ + new->pids[type].pid = old->pids[type].pid; + hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node); +} + +struct task_struct *pid_task(struct pid *pid, enum pid_type type) +{ + struct task_struct *result = NULL; + if (pid) { + struct hlist_node *first; + first = rcu_dereference(pid->tasks[type].first); + if (first) + result = hlist_entry(first, struct task_struct, pids[(type)].node); + } + return result; +} +EXPORT_SYMBOL(pid_task); + +/* + * Must be called under rcu_read_lock() or with tasklist_lock read-held. + */ +struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) +{ + return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); +} + +struct task_struct *find_task_by_vpid(pid_t vnr) +{ + return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns); +} + +struct pid *get_task_pid(struct task_struct *task, enum pid_type type) { struct pid *pid; + rcu_read_lock(); + if (type != PIDTYPE_PID) + task = task->group_leader; + pid = get_pid(task->pids[type].pid); + rcu_read_unlock(); + return pid; +} - pid = find_pid(type, nr); - if (!pid) - return NULL; +struct task_struct *get_pid_task(struct pid *pid, enum pid_type type) +{ + struct task_struct *result; + rcu_read_lock(); + result = pid_task(pid, type); + if (result) + get_task_struct(result); + rcu_read_unlock(); + return result; +} - return pid_task(&pid->pid_list, type); +struct pid *find_get_pid(pid_t nr) +{ + struct pid *pid; + + rcu_read_lock(); + pid = get_pid(find_vpid(nr)); + rcu_read_unlock(); + + return pid; } +EXPORT_SYMBOL_GPL(find_get_pid); -EXPORT_SYMBOL(find_task_by_pid_type); +pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) +{ + struct upid *upid; + pid_t nr = 0; + + if (pid && ns->level <= pid->level) { + upid = &pid->numbers[ns->level]; + if (upid->ns == ns) + nr = upid->nr; + } + return nr; +} + +pid_t pid_vnr(struct pid *pid) +{ + return pid_nr_ns(pid, current->nsproxy->pid_ns); +} +EXPORT_SYMBOL_GPL(pid_vnr); + +pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, + struct pid_namespace *ns) +{ + pid_t nr = 0; + + rcu_read_lock(); + if (!ns) + ns = current->nsproxy->pid_ns; + if (likely(pid_alive(task))) { + if (type != PIDTYPE_PID) + task = task->group_leader; + nr = pid_nr_ns(task->pids[type].pid, ns); + } + rcu_read_unlock(); + + return nr; +} +EXPORT_SYMBOL(__task_pid_nr_ns); + +pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) +{ + return pid_nr_ns(task_tgid(tsk), ns); +} +EXPORT_SYMBOL(task_tgid_nr_ns); + +struct pid_namespace *task_active_pid_ns(struct task_struct *tsk) +{ + return ns_of_pid(task_pid(tsk)); +} +EXPORT_SYMBOL_GPL(task_active_pid_ns); + +/* + * Used by proc to find the first pid that is greater than or equal to nr. + * + * If there is a pid at nr this function is exactly the same as find_pid_ns. + */ +struct pid *find_ge_pid(int nr, struct pid_namespace *ns) +{ + struct pid *pid; + + do { + pid = find_pid_ns(nr, ns); + if (pid) + break; + nr = next_pidmap(ns, nr); + } while (nr > 0); + + return pid; +} /* * The pid hash table is scaled according to the amount of memory in the @@ -224,39 +498,24 @@ EXPORT_SYMBOL(find_task_by_pid_type); */ void __init pidhash_init(void) { - int i, j, pidhash_size; - unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT); + int i, pidhash_size; - pidhash_shift = max(4, fls(megabytes * 4)); - pidhash_shift = min(12, pidhash_shift); + pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18, + HASH_EARLY | HASH_SMALL, + &pidhash_shift, NULL, 4096); pidhash_size = 1 << pidhash_shift; - printk("PID hash table entries: %d (order: %d, %Zd bytes)\n", - pidhash_size, pidhash_shift, - PIDTYPE_MAX * pidhash_size * sizeof(struct hlist_head)); - - for (i = 0; i < PIDTYPE_MAX; i++) { - pid_hash[i] = alloc_bootmem(pidhash_size * - sizeof(*(pid_hash[i]))); - if (!pid_hash[i]) - panic("Could not alloc pidhash!\n"); - for (j = 0; j < pidhash_size; j++) - INIT_HLIST_HEAD(&pid_hash[i][j]); - } + for (i = 0; i < pidhash_size; i++) + INIT_HLIST_HEAD(&pid_hash[i]); } void __init pidmap_init(void) { - int i; - - pidmap_array->page = (void *)get_zeroed_page(GFP_KERNEL); - set_bit(0, pidmap_array->page); - atomic_dec(&pidmap_array->nr_free); - - /* - * Allocate PID 0, and hash it via all PID types: - */ + init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL); + /* Reserve PID 0. We never call free_pidmap(0) */ + set_bit(0, init_pid_ns.pidmap[0].page); + atomic_dec(&init_pid_ns.pidmap[0].nr_free); - for (i = 0; i < PIDTYPE_MAX; i++) - attach_pid(current, i, 0); + init_pid_ns.pid_cachep = KMEM_CACHE(pid, + SLAB_HWCACHE_ALIGN | SLAB_PANIC); }