X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=include%2Flinux%2Fsched.h;h=f4d969e85612e16307c95bc93f7cb148e8144b5d;hb=228ebcbe634a30aec35132ea4375721bcc41bec0;hp=62ddddb49db37d4fca4f8cc89371e213636b30ae;hpb=5cef9eca3837a8dcf605a360e213c4179a07c41a;p=safe%2Fjmp%2Flinux-2.6 diff --git a/include/linux/sched.h b/include/linux/sched.h index 62ddddb..f4d969e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1,8 +1,6 @@ #ifndef _LINUX_SCHED_H #define _LINUX_SCHED_H -#include /* For AT_VECTOR_SIZE */ - /* * cloning flags: */ @@ -27,6 +25,8 @@ #define CLONE_NEWUTS 0x04000000 /* New utsname group? */ #define CLONE_NEWIPC 0x08000000 /* New ipcs */ #define CLONE_NEWUSER 0x10000000 /* New user namespace */ +#define CLONE_NEWPID 0x20000000 /* New pid namespace */ +#define CLONE_NEWNET 0x40000000 /* New network namespace */ /* * Scheduling policies @@ -57,12 +57,12 @@ struct sched_param { #include #include #include +#include #include #include #include #include -#include #include #include @@ -75,6 +75,7 @@ struct sched_param { #include #include #include +#include #include #include #include @@ -86,6 +87,7 @@ struct sched_param { #include #include #include +#include #include @@ -113,7 +115,7 @@ extern unsigned long avenrun[]; /* Load averages */ #define FSHIFT 11 /* nr of bits of precision */ #define FIXED_1 (1<state again */ -#define TASK_NONINTERACTIVE 64 -#define TASK_DEAD 128 +#define TASK_DEAD 64 #define __set_task_state(tsk, state_value) \ do { (tsk)->state = (state_value); } while (0) @@ -260,6 +262,7 @@ extern void softlockup_tick(void); extern void spawn_softlockup_task(void); extern void touch_softlockup_watchdog(void); extern void touch_all_softlockup_watchdogs(void); +extern int softlockup_thresh; #else static inline void softlockup_tick(void) { @@ -317,7 +320,6 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); #define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member) #define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member) #define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member) -typedef atomic_long_t mm_counter_t; #else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ /* @@ -329,7 +331,6 @@ typedef atomic_long_t mm_counter_t; #define add_mm_counter(mm, member, value) (mm)->_##member += (value) #define inc_mm_counter(mm, member) (mm)->_##member++ #define dec_mm_counter(mm, member) (mm)->_##member-- -typedef unsigned long mm_counter_t; #endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ @@ -359,86 +360,19 @@ extern int get_dumpable(struct mm_struct *mm); #define MMF_DUMP_ANON_SHARED 3 #define MMF_DUMP_MAPPED_PRIVATE 4 #define MMF_DUMP_MAPPED_SHARED 5 +#define MMF_DUMP_ELF_HEADERS 6 #define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS -#define MMF_DUMP_FILTER_BITS 4 +#define MMF_DUMP_FILTER_BITS 5 #define MMF_DUMP_FILTER_MASK \ (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) #define MMF_DUMP_FILTER_DEFAULT \ ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED)) -struct mm_struct { - struct vm_area_struct * mmap; /* list of VMAs */ - struct rb_root mm_rb; - struct vm_area_struct * mmap_cache; /* last find_vma result */ - unsigned long (*get_unmapped_area) (struct file *filp, - unsigned long addr, unsigned long len, - unsigned long pgoff, unsigned long flags); - void (*unmap_area) (struct mm_struct *mm, unsigned long addr); - unsigned long mmap_base; /* base of mmap area */ - unsigned long task_size; /* size of task vm space */ - unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */ - unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */ - pgd_t * pgd; - atomic_t mm_users; /* How many users with user space? */ - atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ - int map_count; /* number of VMAs */ - struct rw_semaphore mmap_sem; - spinlock_t page_table_lock; /* Protects page tables and some counters */ - - struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung - * together off init_mm.mmlist, and are protected - * by mmlist_lock - */ - - /* Special counters, in some configurations protected by the - * page_table_lock, in other configurations by being atomic. - */ - mm_counter_t _file_rss; - mm_counter_t _anon_rss; - - unsigned long hiwater_rss; /* High-watermark of RSS usage */ - unsigned long hiwater_vm; /* High-water virtual memory usage */ - - unsigned long total_vm, locked_vm, shared_vm, exec_vm; - unsigned long stack_vm, reserved_vm, def_flags, nr_ptes; - unsigned long start_code, end_code, start_data, end_data; - unsigned long start_brk, brk, start_stack; - unsigned long arg_start, arg_end, env_start, env_end; - - unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ - - cpumask_t cpu_vm_mask; - - /* Architecture-specific MM context */ - mm_context_t context; - - /* Swap token stuff */ - /* - * Last value of global fault stamp as seen by this process. - * In other words, this value gives an indication of how long - * it has been since this task got the token. - * Look at mm/thrash.c - */ - unsigned int faultstamp; - unsigned int token_priority; - unsigned int last_interval; - - unsigned long flags; /* Must use atomic bitops to access the bits */ - - /* coredumping support */ - int core_waiters; - struct completion *core_startup_done, core_done; - - /* aio bits */ - rwlock_t ioctx_list_lock; - struct kioctx *ioctx_list; -}; - struct sighand_struct { atomic_t count; struct k_sigaction action[_NSIG]; spinlock_t siglock; - struct list_head signalfd_list; + wait_queue_head_t signalfd_wqh; }; struct pacct_struct { @@ -515,6 +449,8 @@ struct signal_struct { * in __exit_signal, except for the group leader. */ cputime_t utime, stime, cutime, cstime; + cputime_t gtime; + cputime_t cgtime; unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; unsigned long inblock, oublock, cinblock, coublock; @@ -583,8 +519,10 @@ struct user_struct { atomic_t inotify_watches; /* How many inotify watches does this user have? */ atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ #endif +#ifdef CONFIG_POSIX_MQUEUE /* protected by mq_lock */ unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ +#endif unsigned long locked_shm; /* How many pages of mlocked shm ? */ #ifdef CONFIG_KEYS @@ -593,10 +531,25 @@ struct user_struct { #endif /* Hash table maintenance information */ - struct list_head uidhash_list; + struct hlist_node uidhash_node; uid_t uid; + +#ifdef CONFIG_FAIR_USER_SCHED + struct task_group *tg; +#ifdef CONFIG_SYSFS + struct kset kset; + struct subsys_attribute user_attr; + struct work_struct work; +#endif +#endif }; +#ifdef CONFIG_FAIR_USER_SCHED +extern int uids_kobject_init(void); +#else +static inline int uids_kobject_init(void) { return 0; } +#endif + extern struct user_struct *find_user(uid_t); extern struct user_struct root_user; @@ -608,13 +561,17 @@ struct reclaim_state; #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) struct sched_info { /* cumulative counters */ - unsigned long pcnt; /* # of times run on this cpu */ + unsigned long pcount; /* # of times run on this cpu */ unsigned long long cpu_time, /* time spent on the cpu */ run_delay; /* time spent waiting on a runqueue */ /* timestamps */ unsigned long long last_arrival,/* when we last ran on a cpu */ last_queued; /* when we were last queued to run */ +#ifdef CONFIG_SCHEDSTATS + /* BKL stats */ + unsigned int bkl_count; +#endif }; #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ @@ -681,7 +638,7 @@ enum cpu_idle_type { #define SCHED_LOAD_SHIFT 10 #define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) -#define SCHED_LOAD_SCALE_FUZZ (SCHED_LOAD_SCALE >> 1) +#define SCHED_LOAD_SCALE_FUZZ SCHED_LOAD_SCALE #ifdef CONFIG_SMP #define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ @@ -749,40 +706,37 @@ struct sched_domain { #ifdef CONFIG_SCHEDSTATS /* load_balance() stats */ - unsigned long lb_cnt[CPU_MAX_IDLE_TYPES]; - unsigned long lb_failed[CPU_MAX_IDLE_TYPES]; - unsigned long lb_balanced[CPU_MAX_IDLE_TYPES]; - unsigned long lb_imbalance[CPU_MAX_IDLE_TYPES]; - unsigned long lb_gained[CPU_MAX_IDLE_TYPES]; - unsigned long lb_hot_gained[CPU_MAX_IDLE_TYPES]; - unsigned long lb_nobusyg[CPU_MAX_IDLE_TYPES]; - unsigned long lb_nobusyq[CPU_MAX_IDLE_TYPES]; + unsigned int lb_count[CPU_MAX_IDLE_TYPES]; + unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; + unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; + unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; + unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; + unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; + unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; + unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; /* Active load balancing */ - unsigned long alb_cnt; - unsigned long alb_failed; - unsigned long alb_pushed; + unsigned int alb_count; + unsigned int alb_failed; + unsigned int alb_pushed; /* SD_BALANCE_EXEC stats */ - unsigned long sbe_cnt; - unsigned long sbe_balanced; - unsigned long sbe_pushed; + unsigned int sbe_count; + unsigned int sbe_balanced; + unsigned int sbe_pushed; /* SD_BALANCE_FORK stats */ - unsigned long sbf_cnt; - unsigned long sbf_balanced; - unsigned long sbf_pushed; + unsigned int sbf_count; + unsigned int sbf_balanced; + unsigned int sbf_pushed; /* try_to_wake_up() stats */ - unsigned long ttwu_wake_remote; - unsigned long ttwu_move_affine; - unsigned long ttwu_move_balance; + unsigned int ttwu_wake_remote; + unsigned int ttwu_move_affine; + unsigned int ttwu_move_balance; #endif }; -extern int partition_sched_domains(cpumask_t *partition1, - cpumask_t *partition2); - #endif /* CONFIG_SMP */ /* @@ -803,8 +757,6 @@ static inline int above_background_load(void) } struct io_context; /* See blkdev.h */ -struct cpuset; - #define NGROUPS_SMALL 32 #define NGROUPS_PER_BLOCK ((int)(PAGE_SIZE / sizeof(gid_t))) struct group_info { @@ -853,18 +805,16 @@ struct rq; struct sched_domain; struct sched_class { - struct sched_class *next; + const struct sched_class *next; - void (*enqueue_task) (struct rq *rq, struct task_struct *p, - int wakeup, u64 now); - void (*dequeue_task) (struct rq *rq, struct task_struct *p, - int sleep, u64 now); - void (*yield_task) (struct rq *rq, struct task_struct *p); + void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); + void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); + void (*yield_task) (struct rq *rq); void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); - struct task_struct * (*pick_next_task) (struct rq *rq, u64 now); - void (*put_prev_task) (struct rq *rq, struct task_struct *p, u64 now); + struct task_struct * (*pick_next_task) (struct rq *rq); + void (*put_prev_task) (struct rq *rq, struct task_struct *p); unsigned long (*load_balance) (struct rq *this_rq, int this_cpu, struct rq *busiest, @@ -874,7 +824,7 @@ struct sched_class { void (*set_curr_task) (struct rq *rq); void (*task_tick) (struct rq *rq, struct task_struct *p); - void (*task_new) (struct rq *rq, struct task_struct *p, u64 now); + void (*task_new) (struct rq *rq, struct task_struct *p); }; struct load_weight { @@ -889,30 +839,22 @@ struct load_weight { * 4 se->block_start * 4 se->run_node * 4 se->sleep_start - * 4 se->sleep_start_fair * 6 se->load.weight - * 7 se->delta_fair - * 15 se->wait_runtime */ struct sched_entity { - long wait_runtime; - unsigned long delta_fair_run; - unsigned long delta_fair_sleep; - unsigned long delta_exec; - s64 fair_key; struct load_weight load; /* for load-balancing */ struct rb_node run_node; unsigned int on_rq; + int peer_preempt; u64 exec_start; u64 sum_exec_runtime; - u64 wait_start_fair; - u64 sleep_start_fair; + u64 vruntime; + u64 prev_sum_exec_runtime; #ifdef CONFIG_SCHEDSTATS u64 wait_start; u64 wait_max; - s64 sum_wait_runtime; u64 sleep_start; u64 sleep_max; @@ -921,9 +863,25 @@ struct sched_entity { u64 block_start; u64 block_max; u64 exec_max; - - unsigned long wait_runtime_overruns; - unsigned long wait_runtime_underruns; + u64 slice_max; + + u64 nr_migrations; + u64 nr_migrations_cold; + u64 nr_failed_migrations_affine; + u64 nr_failed_migrations_running; + u64 nr_failed_migrations_hot; + u64 nr_forced_migrations; + u64 nr_forced2_migrations; + + u64 nr_wakeups; + u64 nr_wakeups_sync; + u64 nr_wakeups_migrate; + u64 nr_wakeups_local; + u64 nr_wakeups_remote; + u64 nr_wakeups_affine; + u64 nr_wakeups_affine_attempts; + u64 nr_wakeups_passive; + u64 nr_wakeups_idle; #endif #ifdef CONFIG_FAIR_GROUP_SCHED @@ -952,7 +910,7 @@ struct task_struct { int prio, static_prio, normal_prio; struct list_head run_list; - struct sched_class *sched_class; + const struct sched_class *sched_class; struct sched_entity se; #ifdef CONFIG_PREEMPT_NOTIFIERS @@ -961,6 +919,16 @@ struct task_struct { #endif unsigned short ioprio; + /* + * fpu_counter contains the number of consecutive context switches + * that the FPU is used. If this is over a threshold, the lazy fpu + * saving becomes unlazy to save the trap. This is an unsigned char + * so that after 256 times the counter wraps and the behavior turns + * lazy again; this to deal with bursty apps that only use FPU for + * a short time + */ + unsigned char fpu_counter; + s8 oomkilladj; /* OOM kill score adjustment (bit shift). */ #ifdef CONFIG_BLK_DEV_IO_TRACE unsigned int btrace_seq; #endif @@ -1022,7 +990,8 @@ struct task_struct { int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ unsigned int rt_priority; - cputime_t utime, stime; + cputime_t utime, stime, utimescaled, stimescaled; + cputime_t gtime; unsigned long nvcsw, nivcsw; /* context switch counts */ struct timespec start_time; /* monotonic time */ struct timespec real_start_time; /* boot based time */ @@ -1045,16 +1014,6 @@ struct task_struct { struct key *thread_keyring; /* keyring private to this thread */ unsigned char jit_keyring; /* default keyring to attach requested keys to */ #endif - /* - * fpu_counter contains the number of consecutive context switches - * that the FPU is used. If this is over a threshold, the lazy fpu - * saving becomes unlazy to save the trap. This is an unsigned char - * so that after 256 times the counter wraps and the behavior turns - * lazy again; this to deal with bursty apps that only use FPU for - * a short time - */ - unsigned char fpu_counter; - int oomkilladj; /* OOM kill score adjustment (bit shift). */ char comm[TASK_COMM_LEN]; /* executable name excluding path - access with [gs]et_task_comm (which lock it with task_lock()) @@ -1086,8 +1045,9 @@ struct task_struct { int (*notifier)(void *priv); void *notifier_data; sigset_t *notifier_mask; - +#ifdef CONFIG_SECURITY void *security; +#endif struct audit_context *audit_context; seccomp_t seccomp; @@ -1149,13 +1109,6 @@ struct task_struct { unsigned long ptrace_message; siginfo_t *last_siginfo; /* For ptrace use. */ -/* - * current io wait handle: wait queue entry to use for io waits - * If this thread is processing aio, this points at the waitqueue - * inside the currently handled kiocb. It may be NULL (i.e. default - * to a stack based synchronous wait) if its doing sync IO. - */ - wait_queue_t *io_wait; #ifdef CONFIG_TASK_XACCT /* i/o counters(bytes read/written, #syscalls */ u64 rchar, wchar, syscr, syscw; @@ -1171,18 +1124,24 @@ struct task_struct { short il_next; #endif #ifdef CONFIG_CPUSETS - struct cpuset *cpuset; nodemask_t mems_allowed; int cpuset_mems_generation; int cpuset_mem_spread_rotor; #endif +#ifdef CONFIG_CGROUPS + /* Control Group info protected by css_set_lock */ + struct css_set *cgroups; + /* cg_list protected by css_set_lock and tsk->alloc_lock */ + struct list_head cg_list; +#endif +#ifdef CONFIG_FUTEX struct robust_list_head __user *robust_list; #ifdef CONFIG_COMPAT struct compat_robust_list_head __user *compat_robust_list; #endif struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; - +#endif atomic_t fs_excl; /* holding fs exclusive resources */ struct rcu_head rcu; @@ -1196,6 +1155,7 @@ struct task_struct { #ifdef CONFIG_FAULT_INJECTION int make_it_fail; #endif + struct prop_local_single dirties; }; /* @@ -1229,44 +1189,127 @@ static inline int rt_task(struct task_struct *p) return rt_prio(p->prio); } -static inline pid_t process_group(struct task_struct *tsk) +static inline void set_task_session(struct task_struct *tsk, pid_t session) { - return tsk->signal->pgrp; + tsk->signal->__session = session; } -static inline pid_t signal_session(struct signal_struct *sig) +static inline struct pid *task_pid(struct task_struct *task) { - return sig->__session; + return task->pids[PIDTYPE_PID].pid; } -static inline pid_t process_session(struct task_struct *tsk) +static inline struct pid *task_tgid(struct task_struct *task) { - return signal_session(tsk->signal); + return task->group_leader->pids[PIDTYPE_PID].pid; } -static inline void set_signal_session(struct signal_struct *sig, pid_t session) +static inline struct pid *task_pgrp(struct task_struct *task) { - sig->__session = session; + return task->group_leader->pids[PIDTYPE_PGID].pid; } -static inline struct pid *task_pid(struct task_struct *task) +static inline struct pid *task_session(struct task_struct *task) { - return task->pids[PIDTYPE_PID].pid; + return task->group_leader->pids[PIDTYPE_SID].pid; } -static inline struct pid *task_tgid(struct task_struct *task) +struct pid_namespace; + +/* + * the helpers to get the task's different pids as they are seen + * from various namespaces + * + * task_xid_nr() : global id, i.e. the id seen from the init namespace; + * task_xid_vnr() : virtual id, i.e. the id seen from the namespace the task + * belongs to. this only makes sence when called in the + * context of the task that belongs to the same namespace; + * task_xid_nr_ns() : id seen from the ns specified; + * + * set_task_vxid() : assigns a virtual id to a task; + * + * task_ppid_nr_ns() : the parent's id as seen from the namespace specified. + * the result depends on the namespace and whether the + * task in question is the namespace's init. e.g. for the + * namespace's init this will return 0 when called from + * the namespace of this init, or appropriate id otherwise. + * + * + * see also pid_nr() etc in include/linux/pid.h + */ + +static inline pid_t task_pid_nr(struct task_struct *tsk) { - return task->group_leader->pids[PIDTYPE_PID].pid; + return tsk->pid; } -static inline struct pid *task_pgrp(struct task_struct *task) +static inline pid_t task_pid_nr_ns(struct task_struct *tsk, + struct pid_namespace *ns) { - return task->group_leader->pids[PIDTYPE_PGID].pid; + return pid_nr_ns(task_pid(tsk), ns); } -static inline struct pid *task_session(struct task_struct *task) +static inline pid_t task_pid_vnr(struct task_struct *tsk) { - return task->group_leader->pids[PIDTYPE_SID].pid; + return pid_vnr(task_pid(tsk)); +} + + +static inline pid_t task_tgid_nr(struct task_struct *tsk) +{ + return tsk->tgid; +} + +static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, + struct pid_namespace *ns) +{ + return pid_nr_ns(task_tgid(tsk), ns); +} + +static inline pid_t task_tgid_vnr(struct task_struct *tsk) +{ + return pid_vnr(task_tgid(tsk)); +} + + +static inline pid_t task_pgrp_nr(struct task_struct *tsk) +{ + return tsk->signal->pgrp; +} + +static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, + struct pid_namespace *ns) +{ + return pid_nr_ns(task_pgrp(tsk), ns); +} + +static inline pid_t task_pgrp_vnr(struct task_struct *tsk) +{ + return pid_vnr(task_pgrp(tsk)); +} + + +static inline pid_t task_session_nr(struct task_struct *tsk) +{ + return tsk->signal->__session; +} + +static inline pid_t task_session_nr_ns(struct task_struct *tsk, + struct pid_namespace *ns) +{ + return pid_nr_ns(task_session(tsk), ns); +} + +static inline pid_t task_session_vnr(struct task_struct *tsk) +{ + return pid_vnr(task_session(tsk)); +} + + +static inline pid_t task_ppid_nr_ns(struct task_struct *tsk, + struct pid_namespace *ns) +{ + return pid_nr_ns(task_pid(rcu_dereference(tsk->real_parent)), ns); } /** @@ -1283,16 +1326,22 @@ static inline int pid_alive(struct task_struct *p) } /** - * is_init - check if a task structure is init + * is_global_init - check if a task structure is init * @tsk: Task structure to be checked. * * Check if a task structure is the first user space task the kernel created. */ -static inline int is_init(struct task_struct *tsk) +static inline int is_global_init(struct task_struct *tsk) { return tsk->pid == 1; } +/* + * is_container_init: + * check whether in the task is init in its own pid namespace. + */ +extern int is_container_init(struct task_struct *tsk); + extern struct pid *cad_pid; extern void free_task(struct task_struct *tsk); @@ -1314,6 +1363,7 @@ static inline void put_task_struct(struct task_struct *t) #define PF_STARTING 0x00000002 /* being created */ #define PF_EXITING 0x00000004 /* getting shut down */ #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ +#define PF_VCPU 0x00000010 /* I'm a virtual CPU */ #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ #define PF_DUMPCORE 0x00000200 /* dumped core */ @@ -1390,7 +1440,8 @@ extern void sched_exec(void); #define sched_exec() {} #endif -extern void sched_clock_unstable_event(void); +extern void sched_clock_idle_sleep_event(void); +extern void sched_clock_idle_wakeup_event(u64 delta_ns); #ifdef CONFIG_HOTPLUG_CPU extern void idle_task_exit(void); @@ -1400,13 +1451,17 @@ static inline void idle_task_exit(void) {} extern void sched_idle_next(void); -extern unsigned int sysctl_sched_granularity; +#ifdef CONFIG_SCHED_DEBUG +extern unsigned int sysctl_sched_latency; +extern unsigned int sysctl_sched_nr_latency; extern unsigned int sysctl_sched_wakeup_granularity; extern unsigned int sysctl_sched_batch_wakeup_granularity; -extern unsigned int sysctl_sched_stat_granularity; -extern unsigned int sysctl_sched_runtime_limit; extern unsigned int sysctl_sched_child_runs_first; extern unsigned int sysctl_sched_features; +extern unsigned int sysctl_sched_migration_cost; +#endif + +extern unsigned int sysctl_sched_compat_yield; #ifdef CONFIG_RT_MUTEXES extern int rt_mutex_getprio(struct task_struct *p); @@ -1458,8 +1513,32 @@ extern struct task_struct init_task; extern struct mm_struct init_mm; -#define find_task_by_pid(nr) find_task_by_pid_type(PIDTYPE_PID, nr) -extern struct task_struct *find_task_by_pid_type(int type, int pid); +extern struct pid_namespace init_pid_ns; + +/* + * find a task by one of its numerical ids + * + * find_task_by_pid_type_ns(): + * it is the most generic call - it finds a task by all id, + * type and namespace specified + * find_task_by_pid_ns(): + * finds a task by its pid in the specified namespace + * find_task_by_vpid(): + * finds a task by its virtual pid + * find_task_by_pid(): + * finds a task by its global pid + * + * see also find_pid() etc in include/linux/pid.h + */ + +extern struct task_struct *find_task_by_pid_type_ns(int type, int pid, + struct pid_namespace *ns); + +extern struct task_struct *find_task_by_pid(pid_t nr); +extern struct task_struct *find_task_by_vpid(pid_t nr); +extern struct task_struct *find_task_by_pid_ns(pid_t nr, + struct pid_namespace *ns); + extern void __set_special_pids(pid_t session, pid_t pgrp); /* per-UID process charging. */ @@ -1471,6 +1550,7 @@ static inline struct user_struct *get_uid(struct user_struct *u) } extern void free_uid(struct user_struct *); extern void switch_uid(struct user_struct *); +extern void release_uids(struct user_namespace *ns); #include @@ -1662,7 +1742,8 @@ static inline int thread_group_empty(struct task_struct *p) /* * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring * subscriptions and synchronises with wait4(). Also used in procfs. Also - * pins the final release of task.io_context. Also protects ->cpuset. + * pins the final release of task.io_context. Also protects ->cpuset and + * ->cgroup.subsys[]. * * Nests both inside and outside of read_lock(&tasklist_lock). * It must not be nested with write_lock_irq(&tasklist_lock), @@ -1839,6 +1920,18 @@ extern int sched_mc_power_savings, sched_smt_power_savings; extern void normalize_rt_tasks(void); +#ifdef CONFIG_FAIR_GROUP_SCHED + +extern struct task_group init_task_group; + +extern struct task_group *sched_create_group(void); +extern void sched_destroy_group(struct task_group *tg); +extern void sched_move_task(struct task_struct *tsk); +extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); +extern unsigned long sched_group_shares(struct task_group *tg); + +#endif + #ifdef CONFIG_TASK_XACCT static inline void add_rchar(struct task_struct *tsk, ssize_t amt) {