#define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */
#define CLONE_STOPPED 0x02000000 /* Start in stopped state */
#define CLONE_NEWUTS 0x04000000 /* New utsname group? */
+#define CLONE_NEWIPC 0x08000000 /* New ipcs */
+#define CLONE_NEWUSER 0x10000000 /* New user namespace */
/*
* Scheduling policies
#define SCHED_FIFO 1
#define SCHED_RR 2
#define SCHED_BATCH 3
+/* SCHED_ISO: reserved but not implemented yet */
+#define SCHED_IDLE 5
#ifdef __KERNEL__
#include <linux/resource.h>
#include <linux/timer.h>
#include <linux/hrtimer.h>
+#include <linux/task_io_accounting.h>
#include <asm/processor.h>
struct exec_domain;
struct futex_pi_state;
+struct bio;
/*
* List of flags we want to share for kernel threads,
extern unsigned long nr_iowait(void);
extern unsigned long weighted_cpuload(const int cpu);
+struct seq_file;
+struct cfs_rq;
+#ifdef CONFIG_SCHED_DEBUG
+extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
+extern void proc_sched_set_task(struct task_struct *p);
+extern void
+print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq, u64 now);
+#else
+static inline void
+proc_sched_show_task(struct task_struct *p, struct seq_file *m)
+{
+}
+static inline void proc_sched_set_task(struct task_struct *p)
+{
+}
+static inline void
+print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq, u64 now)
+{
+}
+#endif
/*
* Task state bitmask. NOTE! These bits are also
extern void sched_init(void);
extern void sched_init_smp(void);
extern void init_idle(struct task_struct *idle, int cpu);
+extern void init_idle_bootup_task(struct task_struct *idle);
extern cpumask_t nohz_cpu_mask;
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
+extern int select_nohz_load_balancer(int cpu);
+#else
+static inline int select_nohz_load_balancer(int cpu)
+{
+ return 0;
+}
+#endif
+
+/*
+ * Only dump TASK_* tasks. (0 for all tasks)
+ */
+extern void show_state_filter(unsigned long state_filter);
+
+static inline void show_state(void)
+{
+ show_state_filter(0);
+}
-extern void show_state(void);
extern void show_regs(struct pt_regs *);
/*
extern void softlockup_tick(void);
extern void spawn_softlockup_task(void);
extern void touch_softlockup_watchdog(void);
+extern void touch_all_softlockup_watchdogs(void);
#else
static inline void softlockup_tick(void)
{
static inline void touch_softlockup_watchdog(void)
{
}
+static inline void touch_all_softlockup_watchdogs(void)
+{
+}
#endif
asmlinkage void schedule(void);
struct nsproxy;
+struct user_namespace;
/* Maximum number of active map areas.. This is a random (large) number */
#define DEFAULT_MAX_MAP_COUNT 65536
(mm)->hiwater_vm = (mm)->total_vm; \
} while (0)
+extern void set_dumpable(struct mm_struct *mm, int value);
+extern int get_dumpable(struct mm_struct *mm);
+
+/* mm flags */
+#define MMF_DUMPABLE 0 /* core dump is permitted */
+#define MMF_DUMP_SECURELY 1 /* core file is readable only by root */
+
struct mm_struct {
struct vm_area_struct * mmap; /* list of VMAs */
struct rb_root mm_rb;
unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
- unsigned dumpable:2;
cpumask_t cpu_vm_mask;
/* Architecture-specific MM context */
mm_context_t context;
- /* Token based thrashing protection. */
- unsigned long swap_token_time;
- char recent_pagein;
+ /* Swap token stuff */
+ /*
+ * Last value of global fault stamp as seen by this process.
+ * In other words, this value gives an indication of how long
+ * it has been since this task got the token.
+ * Look at mm/thrash.c
+ */
+ unsigned int faultstamp;
+ unsigned int token_priority;
+ unsigned int last_interval;
+
+ unsigned long flags; /* Must use atomic bitops to access the bits */
/* coredumping support */
int core_waiters;
atomic_t count;
struct k_sigaction action[_NSIG];
spinlock_t siglock;
+ struct list_head signalfd_list;
};
struct pacct_struct {
/* job control IDs */
pid_t pgrp;
- pid_t tty_old_pgrp;
- pid_t session;
+ struct pid *tty_old_pgrp;
+
+ union {
+ pid_t session __deprecated;
+ pid_t __session;
+ };
+
/* boolean value for session group leader */
int leader;
cputime_t utime, stime, cutime, cstime;
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
+ unsigned long inblock, oublock, cinblock, coublock;
/*
* Cumulative ns of scheduled CPU time for dead threads in the
* from jiffies_to_ns(utime + stime) if sched_clock uses something
* other than jiffies.)
*/
- unsigned long long sched_time;
+ unsigned long long sum_sched_runtime;
/*
* We don't bother to synchronize most readers of this at all,
struct pacct_struct pacct; /* per-process accounting information */
#endif
#ifdef CONFIG_TASKSTATS
- spinlock_t stats_lock;
struct taskstats *stats;
#endif
+#ifdef CONFIG_AUDIT
+ unsigned audit_tty;
+ struct tty_audit_buf *tty_audit_buf;
+#endif
};
/* Context switch must be unlocked if interrupts are to be enabled */
#define SIGNAL_STOP_CONTINUED 0x00000004 /* SIGCONT since WCONTINUED reap */
#define SIGNAL_GROUP_EXIT 0x00000008 /* group exit in progress */
-
-/*
- * Priority of a process goes from 0..MAX_PRIO-1, valid RT
- * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
- * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
- * values are inverted: lower p->prio value means higher priority.
- *
- * The MAX_USER_RT_PRIO value allows the actual maximum
- * RT priority to be separate from the value exported to
- * user-space. This allows kernel threads to set their
- * priority to a value higher than any user task. Note:
- * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
- */
-
-#define MAX_USER_RT_PRIO 100
-#define MAX_RT_PRIO MAX_USER_RT_PRIO
-
-#define MAX_PRIO (MAX_RT_PRIO + 40)
-
-#define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO)
-#define rt_task(p) rt_prio((p)->prio)
-#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH))
-#define is_rt_policy(p) ((p) != SCHED_NORMAL && (p) != SCHED_BATCH)
-#define has_rt_policy(p) unlikely(is_rt_policy((p)->policy))
-
/*
* Some day this will be a full-fledged user tracking system..
*/
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
struct sched_info {
/* cumulative counters */
- unsigned long cpu_time, /* time spent on the cpu */
- run_delay, /* time spent waiting on a runqueue */
- pcnt; /* # of timeslices run on this cpu */
+ unsigned long pcnt; /* # of times run on this cpu */
+ unsigned long long cpu_time, /* time spent on the cpu */
+ run_delay; /* time spent waiting on a runqueue */
/* timestamps */
- unsigned long last_arrival, /* when we last ran on a cpu */
- last_queued; /* when we were last queued to run */
+ unsigned long long last_arrival,/* when we last ran on a cpu */
+ last_queued; /* when we were last queued to run */
};
#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
#ifdef CONFIG_SCHEDSTATS
-extern struct file_operations proc_schedstat_operations;
+extern const struct file_operations proc_schedstat_operations;
#endif /* CONFIG_SCHEDSTATS */
#ifdef CONFIG_TASK_DELAY_ACCT
#endif
}
-enum idle_type
-{
- SCHED_IDLE,
- NOT_IDLE,
- NEWLY_IDLE,
- MAX_IDLE_TYPES
+enum cpu_idle_type {
+ CPU_IDLE,
+ CPU_NOT_IDLE,
+ CPU_NEWLY_IDLE,
+ CPU_MAX_IDLE_TYPES
};
/*
* sched-domains (multiprocessor balancing) declarations:
*/
-#define SCHED_LOAD_SCALE 128UL /* increase resolution of load */
+
+/*
+ * Increase resolution of nice-level calculations:
+ */
+#define SCHED_LOAD_SHIFT 10
+#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
+
+#define SCHED_LOAD_SCALE_FUZZ (SCHED_LOAD_SCALE >> 5)
#ifdef CONFIG_SMP
#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */
#define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */
#define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */
#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */
+#define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */
+#define SD_SERIALIZE 1024 /* Only a single load balancing instance */
+
+#define BALANCE_FOR_MC_POWER \
+ (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0)
-#define BALANCE_FOR_POWER ((sched_mc_power_savings || sched_smt_power_savings) \
- ? SD_POWERSAVINGS_BALANCE : 0)
+#define BALANCE_FOR_PKG_POWER \
+ ((sched_mc_power_savings || sched_smt_power_savings) ? \
+ SD_POWERSAVINGS_BALANCE : 0)
+
+#define test_sd_parent(sd, flag) ((sd->parent && \
+ (sd->parent->flags & flag)) ? 1 : 0)
struct sched_group {
/*
* CPU power of this group, SCHED_LOAD_SCALE being max power for a
* single CPU. This is read only (except for setup, hotplug CPU).
+ * Note : Never change cpu_power without recompute its reciprocal
+ */
+ unsigned int __cpu_power;
+ /*
+ * reciprocal value of cpu_power to avoid expensive divides
+ * (see include/linux/reciprocal_div.h)
*/
- unsigned long cpu_power;
+ u32 reciprocal_cpu_power;
};
struct sched_domain {
/* These fields must be setup */
struct sched_domain *parent; /* top domain must be null terminated */
+ struct sched_domain *child; /* bottom domain must be null terminated */
struct sched_group *groups; /* the balancing groups of the domain */
cpumask_t span; /* span of all CPUs in this domain */
unsigned long min_interval; /* Minimum balance interval ms */
unsigned int imbalance_pct; /* No balance until over watermark */
unsigned long long cache_hot_time; /* Task considered cache hot (ns) */
unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */
- unsigned int per_cpu_gain; /* CPU % gained by adding domain cpus */
unsigned int busy_idx;
unsigned int idle_idx;
unsigned int newidle_idx;
#ifdef CONFIG_SCHEDSTATS
/* load_balance() stats */
- unsigned long lb_cnt[MAX_IDLE_TYPES];
- unsigned long lb_failed[MAX_IDLE_TYPES];
- unsigned long lb_balanced[MAX_IDLE_TYPES];
- unsigned long lb_imbalance[MAX_IDLE_TYPES];
- unsigned long lb_gained[MAX_IDLE_TYPES];
- unsigned long lb_hot_gained[MAX_IDLE_TYPES];
- unsigned long lb_nobusyg[MAX_IDLE_TYPES];
- unsigned long lb_nobusyq[MAX_IDLE_TYPES];
+ unsigned long lb_cnt[CPU_MAX_IDLE_TYPES];
+ unsigned long lb_failed[CPU_MAX_IDLE_TYPES];
+ unsigned long lb_balanced[CPU_MAX_IDLE_TYPES];
+ unsigned long lb_imbalance[CPU_MAX_IDLE_TYPES];
+ unsigned long lb_gained[CPU_MAX_IDLE_TYPES];
+ unsigned long lb_hot_gained[CPU_MAX_IDLE_TYPES];
+ unsigned long lb_nobusyg[CPU_MAX_IDLE_TYPES];
+ unsigned long lb_nobusyq[CPU_MAX_IDLE_TYPES];
/* Active load balancing */
unsigned long alb_cnt;
extern int partition_sched_domains(cpumask_t *partition1,
cpumask_t *partition2);
-/*
- * Maximum cache size the migration-costs auto-tuning code will
- * search from:
- */
-extern unsigned int max_cache_size;
-
#endif /* CONFIG_SMP */
struct pipe_inode_info;
struct uts_namespace;
-enum sleep_type {
- SLEEP_NORMAL,
- SLEEP_NONINTERACTIVE,
- SLEEP_INTERACTIVE,
- SLEEP_INTERRUPTED,
+struct rq;
+struct sched_domain;
+
+struct sched_class {
+ struct sched_class *next;
+
+ void (*enqueue_task) (struct rq *rq, struct task_struct *p,
+ int wakeup, u64 now);
+ void (*dequeue_task) (struct rq *rq, struct task_struct *p,
+ int sleep, u64 now);
+ void (*yield_task) (struct rq *rq, struct task_struct *p);
+
+ void (*check_preempt_curr) (struct rq *rq, struct task_struct *p);
+
+ struct task_struct * (*pick_next_task) (struct rq *rq, u64 now);
+ void (*put_prev_task) (struct rq *rq, struct task_struct *p, u64 now);
+
+ int (*load_balance) (struct rq *this_rq, int this_cpu,
+ struct rq *busiest,
+ unsigned long max_nr_move, unsigned long max_load_move,
+ struct sched_domain *sd, enum cpu_idle_type idle,
+ int *all_pinned, unsigned long *total_load_moved);
+
+ void (*set_curr_task) (struct rq *rq);
+ void (*task_tick) (struct rq *rq, struct task_struct *p);
+ void (*task_new) (struct rq *rq, struct task_struct *p);
};
-struct prio_array;
+struct load_weight {
+ unsigned long weight, inv_weight;
+};
+
+/*
+ * CFS stats for a schedulable entity (task, task-group etc)
+ *
+ * Current field usage histogram:
+ *
+ * 4 se->block_start
+ * 4 se->run_node
+ * 4 se->sleep_start
+ * 4 se->sleep_start_fair
+ * 6 se->load.weight
+ * 7 se->delta_fair
+ * 15 se->wait_runtime
+ */
+struct sched_entity {
+ long wait_runtime;
+ unsigned long delta_fair_run;
+ unsigned long delta_fair_sleep;
+ unsigned long delta_exec;
+ s64 fair_key;
+ struct load_weight load; /* for load-balancing */
+ struct rb_node run_node;
+ unsigned int on_rq;
+
+ u64 wait_start_fair;
+ u64 wait_start;
+ u64 exec_start;
+ u64 sleep_start;
+ u64 sleep_start_fair;
+ u64 block_start;
+ u64 sleep_max;
+ u64 block_max;
+ u64 exec_max;
+ u64 wait_max;
+ u64 last_ran;
+
+ u64 sum_exec_runtime;
+ s64 sum_wait_runtime;
+ s64 sum_sleep_runtime;
+ unsigned long wait_runtime_overruns;
+ unsigned long wait_runtime_underruns;
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ struct sched_entity *parent;
+ /* rq on which this entity is (to be) queued: */
+ struct cfs_rq *cfs_rq;
+ /* rq "owned" by this entity/group: */
+ struct cfs_rq *my_q;
+#endif
+};
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
- struct thread_info *thread_info;
+ void *stack;
atomic_t usage;
- unsigned long flags; /* per process flags, defined below */
- unsigned long ptrace;
+ unsigned int flags; /* per process flags, defined below */
+ unsigned int ptrace;
int lock_depth; /* BKL lock depth */
int oncpu;
#endif
#endif
- int load_weight; /* for niceness load balancing purposes */
+
int prio, static_prio, normal_prio;
struct list_head run_list;
- struct prio_array *array;
+ struct sched_class *sched_class;
+ struct sched_entity se;
unsigned short ioprio;
#ifdef CONFIG_BLK_DEV_IO_TRACE
unsigned int btrace_seq;
#endif
- unsigned long sleep_avg;
- unsigned long long timestamp, last_ran;
- unsigned long long sched_time; /* sched_clock time spent running */
- enum sleep_type sleep_type;
- unsigned long policy;
+ unsigned int policy;
cpumask_t cpus_allowed;
- unsigned int time_slice, first_time_slice;
+ unsigned int time_slice;
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
struct sched_info sched_info;
/* task state */
struct linux_binfmt *binfmt;
- long exit_state;
+ int exit_state;
int exit_code, exit_signal;
int pdeath_signal; /* The signal sent when the parent dies */
/* ??? */
- unsigned long personality;
+ unsigned int personality;
unsigned did_exec:1;
pid_t pid;
pid_t tgid;
int __user *set_child_tid; /* CLONE_CHILD_SETTID */
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
- unsigned long rt_priority;
+ unsigned int rt_priority;
cputime_t utime, stime;
unsigned long nvcsw, nivcsw; /* context switch counts */
- struct timespec start_time;
+ struct timespec start_time; /* monotonic time */
+ struct timespec real_start_time; /* boot based time */
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
unsigned long min_flt, maj_flt;
/* journalling filesystem info */
void *journal_info;
+/* stacked block device info */
+ struct bio *bio_list, **bio_tail;
+
/* VM state */
struct reclaim_state *reclaim_state;
* to a stack based synchronous wait) if its doing sync IO.
*/
wait_queue_t *io_wait;
+#ifdef CONFIG_TASK_XACCT
/* i/o counters(bytes read/written, #syscalls */
u64 rchar, wchar, syscr, syscw;
+#endif
+ struct task_io_accounting ioac;
#if defined(CONFIG_TASK_XACCT)
u64 acct_rss_mem1; /* accumulated rss usage */
u64 acct_vm_mem1; /* accumulated virtual memory usage */
#ifdef CONFIG_TASK_DELAY_ACCT
struct task_delay_info *delays;
#endif
+#ifdef CONFIG_FAULT_INJECTION
+ int make_it_fail;
+#endif
};
+/*
+ * Priority of a process goes from 0..MAX_PRIO-1, valid RT
+ * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
+ * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
+ * values are inverted: lower p->prio value means higher priority.
+ *
+ * The MAX_USER_RT_PRIO value allows the actual maximum
+ * RT priority to be separate from the value exported to
+ * user-space. This allows kernel threads to set their
+ * priority to a value higher than any user task. Note:
+ * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
+ */
+
+#define MAX_USER_RT_PRIO 100
+#define MAX_RT_PRIO MAX_USER_RT_PRIO
+
+#define MAX_PRIO (MAX_RT_PRIO + 40)
+#define DEFAULT_PRIO (MAX_RT_PRIO + 20)
+
+static inline int rt_prio(int prio)
+{
+ if (unlikely(prio < MAX_RT_PRIO))
+ return 1;
+ return 0;
+}
+
+static inline int rt_task(struct task_struct *p)
+{
+ return rt_prio(p->prio);
+}
+
static inline pid_t process_group(struct task_struct *tsk)
{
return tsk->signal->pgrp;
}
+static inline pid_t signal_session(struct signal_struct *sig)
+{
+ return sig->__session;
+}
+
+static inline pid_t process_session(struct task_struct *tsk)
+{
+ return signal_session(tsk->signal);
+}
+
+static inline void set_signal_session(struct signal_struct *sig, pid_t session)
+{
+ sig->__session = session;
+}
+
static inline struct pid *task_pid(struct task_struct *task)
{
return task->pids[PIDTYPE_PID].pid;
}
/**
- * is_init - check if a task structure is the first user space
- * task the kernel created.
- * @p: Task structure to be checked.
+ * is_init - check if a task structure is init
+ * @tsk: Task structure to be checked.
+ *
+ * Check if a task structure is the first user space task the kernel created.
*/
static inline int is_init(struct task_struct *tsk)
{
return tsk->pid == 1;
}
+extern struct pid *cad_pid;
+
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
/* Not implemented yet, only for 486*/
#define PF_STARTING 0x00000002 /* being created */
#define PF_EXITING 0x00000004 /* getting shut down */
+#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
#define PF_DUMPCORE 0x00000200 /* dumped core */
#define PF_MEMALLOC 0x00000800 /* Allocating memory */
#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
-#define PF_FREEZE 0x00004000 /* this task is being frozen for suspend now */
#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
#define PF_FROZEN 0x00010000 /* frozen for system suspend */
#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
+#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */
/*
* Only the _current_ task can read/write to tsk->flags, but other
extern unsigned long long sched_clock(void);
extern unsigned long long
-current_sched_time(const struct task_struct *current_task);
+task_sched_runtime(struct task_struct *task);
/* sched_exec is called by processes performing an exec */
#ifdef CONFIG_SMP
#define sched_exec() {}
#endif
+extern void sched_clock_unstable_event(void);
+
#ifdef CONFIG_HOTPLUG_CPU
extern void idle_task_exit(void);
#else
extern void sched_idle_next(void);
+extern unsigned int sysctl_sched_granularity;
+extern unsigned int sysctl_sched_wakeup_granularity;
+extern unsigned int sysctl_sched_batch_wakeup_granularity;
+extern unsigned int sysctl_sched_stat_granularity;
+extern unsigned int sysctl_sched_runtime_limit;
+extern unsigned int sysctl_sched_child_runs_first;
+extern unsigned int sysctl_sched_features;
+
#ifdef CONFIG_RT_MUTEXES
extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
#define find_task_by_pid(nr) find_task_by_pid_type(PIDTYPE_PID, nr)
extern struct task_struct *find_task_by_pid_type(int type, int pid);
-extern void set_special_pids(pid_t session, pid_t pgrp);
extern void __set_special_pids(pid_t session, pid_t pgrp);
/* per-UID process charging. */
-extern struct user_struct * alloc_uid(uid_t);
+extern struct user_struct * alloc_uid(struct user_namespace *, uid_t);
static inline struct user_struct *get_uid(struct user_struct *u)
{
atomic_inc(&u->__count);
#else
static inline void kick_process(struct task_struct *tsk) { }
#endif
-extern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags));
-extern void FASTCALL(sched_exit(struct task_struct * p));
+extern void sched_fork(struct task_struct *p, int clone_flags);
+extern void sched_dead(struct task_struct *p);
extern int in_group_p(gid_t);
extern int in_egroup_p(gid_t);
extern void proc_caches_init(void);
extern void flush_signals(struct task_struct *);
+extern void ignore_signals(struct task_struct *);
extern void flush_signal_handlers(struct task_struct *, int force_default);
extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32);
extern int kill_pgrp(struct pid *pid, int sig, int priv);
extern int kill_pid(struct pid *pid, int sig, int priv);
-extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp);
-extern int kill_pg_info(int, struct siginfo *, pid_t);
extern int kill_proc_info(int, struct siginfo *, pid_t);
extern void do_notify_parent(struct task_struct *, int);
extern void force_sig(int, struct task_struct *);
extern void force_sig_specific(int, struct task_struct *);
extern int send_sig(int, struct task_struct *, int);
extern void zap_other_threads(struct task_struct *p);
-extern int kill_pg(pid_t, int, int);
extern int kill_proc(pid_t, int, int);
extern struct sigqueue *sigqueue_alloc(void);
extern void sigqueue_free(struct sigqueue *);
extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);
+static inline int kill_cad_pid(int sig, int priv)
+{
+ return kill_pid(cad_pid, sig, priv);
+}
+
/* These can be the second arg to send_sig_info/send_group_sig_info. */
#define SEND_SIG_NOINFO ((struct siginfo *) 0)
#define SEND_SIG_PRIV ((struct siginfo *) 1)
extern void FASTCALL(__mmdrop(struct mm_struct *));
static inline void mmdrop(struct mm_struct * mm)
{
- if (atomic_dec_and_test(&mm->mm_count))
+ if (unlikely(atomic_dec_and_test(&mm->mm_count)))
__mmdrop(mm);
}
extern void daemonize(const char *, ...);
extern int allow_signal(int);
extern int disallow_signal(int);
-extern struct task_struct *child_reaper;
extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);
extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
#ifndef __HAVE_THREAD_FUNCTIONS
-#define task_thread_info(task) (task)->thread_info
-#define task_stack_page(task) ((void*)((task)->thread_info))
+#define task_thread_info(task) ((struct thread_info *)(task)->stack)
+#define task_stack_page(task) ((task)->stack)
static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
{
static inline unsigned long *end_of_stack(struct task_struct *p)
{
- return (unsigned long *)(p->thread_info + 1);
+ return (unsigned long *)(task_thread_info(p) + 1);
}
#endif
return 0;
}
-/* Reevaluate whether the task has signals pending delivery.
- This is required every time the blocked sigset_t changes.
- callers must hold sighand->siglock. */
-
-extern FASTCALL(void recalc_sigpending_tsk(struct task_struct *t));
+/*
+ * Reevaluate whether the task has signals pending delivery.
+ * Wake the task if so.
+ * This is required every time the blocked sigset_t changes.
+ * callers must hold sighand->siglock.
+ */
+extern void recalc_sigpending_and_wake(struct task_struct *t);
extern void recalc_sigpending(void);
extern void signal_wake_up(struct task_struct *t, int resume_stopped);
return task_thread_info(p)->cpu;
}
-static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
-{
- task_thread_info(p)->cpu = cpu;
-}
+extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
#else
extern long sched_setaffinity(pid_t pid, cpumask_t new_mask);
extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
-#include <linux/sysdev.h>
extern int sched_mc_power_savings, sched_smt_power_savings;
-extern struct sysdev_attribute attr_sched_mc_power_savings, attr_sched_smt_power_savings;
-extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls);
extern void normalize_rt_tasks(void);
-#ifdef CONFIG_PM
-/*
- * Check if a process has been frozen
- */
-static inline int frozen(struct task_struct *p)
+#ifdef CONFIG_TASK_XACCT
+static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
{
- return p->flags & PF_FROZEN;
+ tsk->rchar += amt;
}
-/*
- * Check if there is a request to freeze a process
- */
-static inline int freezing(struct task_struct *p)
+static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
{
- return p->flags & PF_FREEZE;
+ tsk->wchar += amt;
}
-/*
- * Request that a process be frozen
- * FIXME: SMP problem. We may not modify other process' flags!
- */
-static inline void freeze(struct task_struct *p)
+static inline void inc_syscr(struct task_struct *tsk)
{
- p->flags |= PF_FREEZE;
+ tsk->syscr++;
}
-/*
- * Sometimes we may need to cancel the previous 'freeze' request
- */
-static inline void do_not_freeze(struct task_struct *p)
+static inline void inc_syscw(struct task_struct *tsk)
{
- p->flags &= ~PF_FREEZE;
+ tsk->syscw++;
}
-
-/*
- * Wake up a frozen process
- */
-static inline int thaw_process(struct task_struct *p)
+#else
+static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
{
- if (frozen(p)) {
- p->flags &= ~PF_FROZEN;
- wake_up_process(p);
- return 1;
- }
- return 0;
}
-/*
- * freezing is complete, mark process as frozen
- */
-static inline void frozen_process(struct task_struct *p)
+static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
{
- p->flags = (p->flags & ~PF_FREEZE) | PF_FROZEN;
}
-extern void refrigerator(void);
-extern int freeze_processes(void);
-extern void thaw_processes(void);
-
-static inline int try_to_freeze(void)
+static inline void inc_syscr(struct task_struct *tsk)
{
- if (freezing(current)) {
- refrigerator();
- return 1;
- } else
- return 0;
}
-#else
-static inline int frozen(struct task_struct *p) { return 0; }
-static inline int freezing(struct task_struct *p) { return 0; }
-static inline void freeze(struct task_struct *p) { BUG(); }
-static inline int thaw_process(struct task_struct *p) { return 1; }
-static inline void frozen_process(struct task_struct *p) { BUG(); }
-static inline void refrigerator(void) {}
-static inline int freeze_processes(void) { BUG(); return 0; }
-static inline void thaw_processes(void) {}
-
-static inline int try_to_freeze(void) { return 0; }
+static inline void inc_syscw(struct task_struct *tsk)
+{
+}
+#endif
-#endif /* CONFIG_PM */
#endif /* __KERNEL__ */
#endif