[PATCH] refactor capable() to one implementation, add __capable() helper
[safe/jmp/linux-2.6] / kernel / sys.c
index c43b3e2..421009c 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/kernel.h>
 #include <linux/kexec.h>
 #include <linux/workqueue.h>
+#include <linux/capability.h>
 #include <linux/device.h>
 #include <linux/key.h>
 #include <linux/times.h>
@@ -32,6 +33,7 @@
 
 #include <linux/compat.h>
 #include <linux/syscalls.h>
+#include <linux/kprobes.h>
 
 #include <asm/uaccess.h>
 #include <asm/io.h>
@@ -168,7 +170,7 @@ EXPORT_SYMBOL(notifier_chain_unregister);
  *     of the last notifier function called.
  */
  
-int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v)
+int __kprobes notifier_call_chain(struct notifier_block **n, unsigned long val, void *v)
 {
        int ret=NOTIFY_DONE;
        struct notifier_block *nb = *n;
@@ -414,7 +416,7 @@ void kernel_kexec(void)
 {
 #ifdef CONFIG_KEXEC
        struct kimage *image;
-       image = xchg(&kexec_image, 0);
+       image = xchg(&kexec_image, NULL);
        if (!image) {
                return;
        }
@@ -426,23 +428,25 @@ void kernel_kexec(void)
 }
 EXPORT_SYMBOL_GPL(kernel_kexec);
 
+void kernel_shutdown_prepare(enum system_states state)
+{
+       notifier_call_chain(&reboot_notifier_list,
+               (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
+       system_state = state;
+       device_shutdown();
+}
 /**
  *     kernel_halt - halt the system
  *
  *     Shutdown everything and perform a clean system halt.
  */
-void kernel_halt_prepare(void)
-{
-       notifier_call_chain(&reboot_notifier_list, SYS_HALT, NULL);
-       system_state = SYSTEM_HALT;
-       device_shutdown();
-}
 void kernel_halt(void)
 {
-       kernel_halt_prepare();
+       kernel_shutdown_prepare(SYSTEM_HALT);
        printk(KERN_EMERG "System halted.\n");
        machine_halt();
 }
+
 EXPORT_SYMBOL_GPL(kernel_halt);
 
 /**
@@ -450,20 +454,13 @@ EXPORT_SYMBOL_GPL(kernel_halt);
  *
  *     Shutdown everything and perform a clean system power_off.
  */
-void kernel_power_off_prepare(void)
-{
-       notifier_call_chain(&reboot_notifier_list, SYS_POWER_OFF, NULL);
-       system_state = SYSTEM_POWER_OFF;
-       device_shutdown();
-}
 void kernel_power_off(void)
 {
-       kernel_power_off_prepare();
+       kernel_shutdown_prepare(SYSTEM_POWER_OFF);
        printk(KERN_EMERG "Power down.\n");
        machine_power_off();
 }
 EXPORT_SYMBOL_GPL(kernel_power_off);
-
 /*
  * Reboot system call: for obvious reasons only root may call it,
  * and even root needs to set up some magic numbers in the registers
@@ -488,6 +485,12 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user
                        magic2 != LINUX_REBOOT_MAGIC2C))
                return -EINVAL;
 
+       /* Instead of trying to make the power_off code look like
+        * halt when pm_power_off is not set do it the easy way.
+        */
+       if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
+               cmd = LINUX_REBOOT_CMD_HALT;
+
        lock_kernel();
        switch (cmd) {
        case LINUX_REBOOT_CMD_RESTART:
@@ -1083,10 +1086,11 @@ asmlinkage long sys_times(struct tms __user * tbuf)
 asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
 {
        struct task_struct *p;
+       struct task_struct *group_leader = current->group_leader;
        int err = -EINVAL;
 
        if (!pid)
-               pid = current->pid;
+               pid = group_leader->pid;
        if (!pgid)
                pgid = pid;
        if (pgid < 0)
@@ -1106,16 +1110,16 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
        if (!thread_group_leader(p))
                goto out;
 
-       if (p->parent == current || p->real_parent == current) {
+       if (p->real_parent == group_leader) {
                err = -EPERM;
-               if (p->signal->session != current->signal->session)
+               if (p->signal->session != group_leader->signal->session)
                        goto out;
                err = -EACCES;
                if (p->did_exec)
                        goto out;
        } else {
                err = -ESRCH;
-               if (p != current)
+               if (p != group_leader)
                        goto out;
        }
 
@@ -1127,7 +1131,7 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
                struct task_struct *p;
 
                do_each_task_pid(pgid, PIDTYPE_PGID, p) {
-                       if (p->signal->session == current->signal->session)
+                       if (p->signal->session == group_leader->signal->session)
                                goto ok_pgid;
                } while_each_task_pid(pgid, PIDTYPE_PGID, p);
                goto out;
@@ -1207,27 +1211,25 @@ asmlinkage long sys_getsid(pid_t pid)
 
 asmlinkage long sys_setsid(void)
 {
+       struct task_struct *group_leader = current->group_leader;
        struct pid *pid;
        int err = -EPERM;
 
-       if (!thread_group_leader(current))
-               return -EINVAL;
-
-       down(&tty_sem);
+       mutex_lock(&tty_mutex);
        write_lock_irq(&tasklist_lock);
 
-       pid = find_pid(PIDTYPE_PGID, current->pid);
+       pid = find_pid(PIDTYPE_PGID, group_leader->pid);
        if (pid)
                goto out;
 
-       current->signal->leader = 1;
-       __set_special_pids(current->pid, current->pid);
-       current->signal->tty = NULL;
-       current->signal->tty_old_pgrp = 0;
-       err = process_group(current);
+       group_leader->signal->leader = 1;
+       __set_special_pids(group_leader->pid, group_leader->pid);
+       group_leader->signal->tty = NULL;
+       group_leader->signal->tty_old_pgrp = 0;
+       err = process_group(group_leader);
 out:
        write_unlock_irq(&tasklist_lock);
-       up(&tty_sem);
+       mutex_unlock(&tty_mutex);
        return err;
 }
 
@@ -1497,6 +1499,8 @@ EXPORT_SYMBOL(in_egroup_p);
 
 DECLARE_RWSEM(uts_sem);
 
+EXPORT_SYMBOL(uts_sem);
+
 asmlinkage long sys_newuname(struct new_utsname __user * name)
 {
        int errno = 0;
@@ -1614,20 +1618,21 @@ asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *r
 asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
 {
        struct rlimit new_rlim, *old_rlim;
+       unsigned long it_prof_secs;
        int retval;
 
        if (resource >= RLIM_NLIMITS)
                return -EINVAL;
-       if(copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
+       if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
                return -EFAULT;
-       if (new_rlim.rlim_cur > new_rlim.rlim_max)
-               return -EINVAL;
+       if (new_rlim.rlim_cur > new_rlim.rlim_max)
+               return -EINVAL;
        old_rlim = current->signal->rlim + resource;
        if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
            !capable(CAP_SYS_RESOURCE))
                return -EPERM;
        if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN)
-                       return -EPERM;
+               return -EPERM;
 
        retval = security_task_setrlimit(resource, &new_rlim);
        if (retval)
@@ -1637,19 +1642,40 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
        *old_rlim = new_rlim;
        task_unlock(current->group_leader);
 
-       if (resource == RLIMIT_CPU && new_rlim.rlim_cur != RLIM_INFINITY &&
-           (cputime_eq(current->signal->it_prof_expires, cputime_zero) ||
-            new_rlim.rlim_cur <= cputime_to_secs(
-                    current->signal->it_prof_expires))) {
-               cputime_t cputime = secs_to_cputime(new_rlim.rlim_cur);
+       if (resource != RLIMIT_CPU)
+               goto out;
+
+       /*
+        * RLIMIT_CPU handling.   Note that the kernel fails to return an error
+        * code if it rejected the user's attempt to set RLIMIT_CPU.  This is a
+        * very long-standing error, and fixing it now risks breakage of
+        * applications, so we live with it
+        */
+       if (new_rlim.rlim_cur == RLIM_INFINITY)
+               goto out;
+
+       it_prof_secs = cputime_to_secs(current->signal->it_prof_expires);
+       if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) {
+               unsigned long rlim_cur = new_rlim.rlim_cur;
+               cputime_t cputime;
+
+               if (rlim_cur == 0) {
+                       /*
+                        * The caller is asking for an immediate RLIMIT_CPU
+                        * expiry.  But we use the zero value to mean "it was
+                        * never set".  So let's cheat and make it one second
+                        * instead
+                        */
+                       rlim_cur = 1;
+               }
+               cputime = secs_to_cputime(rlim_cur);
                read_lock(&tasklist_lock);
                spin_lock_irq(&current->sighand->siglock);
-               set_process_cpu_timer(current, CPUCLOCK_PROF,
-                                     &cputime, NULL);
+               set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
                spin_unlock_irq(&current->sighand->siglock);
                read_unlock(&tasklist_lock);
        }
-
+out:
        return 0;
 }
 
@@ -1661,9 +1687,6 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
  * a lot simpler!  (Which we're not doing right now because we're not
  * measuring them yet).
  *
- * This expects to be called with tasklist_lock read-locked or better,
- * and the siglock not locked.  It may momentarily take the siglock.
- *
  * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
  * races with threads incrementing their own counters.  But since word
  * reads are atomic, we either get new values or old values and we don't
@@ -1671,6 +1694,25 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
  * the c* fields from p->signal from races with exit.c updating those
  * fields when reaping, so a sample either gets all the additions of a
  * given child after it's reaped, or none so this sample is before reaping.
+ *
+ * tasklist_lock locking optimisation:
+ * If we are current and single threaded, we do not need to take the tasklist
+ * lock or the siglock.  No one else can take our signal_struct away,
+ * no one else can reap the children to update signal->c* counters, and
+ * no one else can race with the signal-> fields.
+ * If we do not take the tasklist_lock, the signal-> fields could be read
+ * out of order while another thread was just exiting. So we place a
+ * read memory barrier when we avoid the lock.  On the writer side,
+ * write memory barrier is implied in  __exit_signal as __exit_signal releases
+ * the siglock spinlock after updating the signal-> fields.
+ *
+ * We don't really need the siglock when we access the non c* fields
+ * of the signal_struct (for RUSAGE_SELF) even in multithreaded
+ * case, since we take the tasklist lock for read and the non c* signal->
+ * fields are updated only in __exit_signal, which is called with
+ * tasklist_lock taken for write, hence these two threads cannot execute
+ * concurrently.
+ *
  */
 
 static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
@@ -1678,13 +1720,26 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
        struct task_struct *t;
        unsigned long flags;
        cputime_t utime, stime;
+       int need_lock = 0;
 
        memset((char *) r, 0, sizeof *r);
+       utime = stime = cputime_zero;
 
-       if (unlikely(!p->signal))
-               return;
+       if (p != current || !thread_group_empty(p))
+               need_lock = 1;
+
+       if (need_lock) {
+               read_lock(&tasklist_lock);
+               if (unlikely(!p->signal)) {
+                       read_unlock(&tasklist_lock);
+                       return;
+               }
+       } else
+               /* See locking comments above */
+               smp_rmb();
 
        switch (who) {
+               case RUSAGE_BOTH:
                case RUSAGE_CHILDREN:
                        spin_lock_irqsave(&p->sighand->siglock, flags);
                        utime = p->signal->cutime;
@@ -1694,22 +1749,11 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
                        r->ru_minflt = p->signal->cmin_flt;
                        r->ru_majflt = p->signal->cmaj_flt;
                        spin_unlock_irqrestore(&p->sighand->siglock, flags);
-                       cputime_to_timeval(utime, &r->ru_utime);
-                       cputime_to_timeval(stime, &r->ru_stime);
-                       break;
+
+                       if (who == RUSAGE_CHILDREN)
+                               break;
+
                case RUSAGE_SELF:
-                       spin_lock_irqsave(&p->sighand->siglock, flags);
-                       utime = stime = cputime_zero;
-                       goto sum_group;
-               case RUSAGE_BOTH:
-                       spin_lock_irqsave(&p->sighand->siglock, flags);
-                       utime = p->signal->cutime;
-                       stime = p->signal->cstime;
-                       r->ru_nvcsw = p->signal->cnvcsw;
-                       r->ru_nivcsw = p->signal->cnivcsw;
-                       r->ru_minflt = p->signal->cmin_flt;
-                       r->ru_majflt = p->signal->cmaj_flt;
-               sum_group:
                        utime = cputime_add(utime, p->signal->utime);
                        stime = cputime_add(stime, p->signal->stime);
                        r->ru_nvcsw += p->signal->nvcsw;
@@ -1726,21 +1770,22 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
                                r->ru_majflt += t->maj_flt;
                                t = next_thread(t);
                        } while (t != p);
-                       spin_unlock_irqrestore(&p->sighand->siglock, flags);
-                       cputime_to_timeval(utime, &r->ru_utime);
-                       cputime_to_timeval(stime, &r->ru_stime);
                        break;
+
                default:
                        BUG();
        }
+
+       if (need_lock)
+               read_unlock(&tasklist_lock);
+       cputime_to_timeval(utime, &r->ru_utime);
+       cputime_to_timeval(stime, &r->ru_stime);
 }
 
 int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
 {
        struct rusage r;
-       read_lock(&tasklist_lock);
        k_getrusage(p, who, &r);
-       read_unlock(&tasklist_lock);
        return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
 }