Btrfs: fix some metadata enospc issues
[safe/jmp/linux-2.6] / fs / proc / array.c
index c135cbd..725a650 100644 (file)
@@ -40,7 +40,7 @@
  *
  *
  * Alan Cox         :  security fixes.
- *                     <Alan.Cox@linux.org>
+ *                     <alan@lxorguk.ukuu.org.uk>
  *
  * Al Viro           :  safe handling of mm_struct
  *
 #include <linux/signal.h>
 #include <linux/highmem.h>
 #include <linux/file.h>
+#include <linux/fdtable.h>
 #include <linux/times.h>
 #include <linux/cpuset.h>
 #include <linux/rcupdate.h>
 #include <linux/delayacct.h>
 #include <linux/seq_file.h>
 #include <linux/pid_namespace.h>
+#include <linux/ptrace.h>
+#include <linux/tracehook.h>
 
 #include <asm/pgtable.h>
 #include <asm/processor.h>
 #include "internal.h"
 
-/* Gcc optimizes away "strlen(x)" for constant x */
-#define ADDBUF(buffer, string) \
-do { memcpy(buffer, string, strlen(string)); \
-     buffer += strlen(string); } while (0)
-
 static inline void task_name(struct seq_file *m, struct task_struct *p)
 {
        int i;
@@ -162,13 +160,19 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
        struct group_info *group_info;
        int g;
        struct fdtable *fdt = NULL;
+       const struct cred *cred;
        pid_t ppid, tpid;
 
        rcu_read_lock();
        ppid = pid_alive(p) ?
                task_tgid_nr_ns(rcu_dereference(p->real_parent), ns) : 0;
-       tpid = pid_alive(p) && p->ptrace ?
-               task_pid_nr_ns(rcu_dereference(p->parent), ns) : 0;
+       tpid = 0;
+       if (pid_alive(p)) {
+               struct task_struct *tracer = tracehook_tracer_task(p);
+               if (tracer)
+                       tpid = task_pid_nr_ns(tracer, ns);
+       }
+       cred = get_cred((struct cred *) __task_cred(p));
        seq_printf(m,
                "State:\t%s\n"
                "Tgid:\t%d\n"
@@ -181,8 +185,8 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
                task_tgid_nr_ns(p, ns),
                pid_nr_ns(pid, ns),
                ppid, tpid,
-               p->uid, p->euid, p->suid, p->fsuid,
-               p->gid, p->egid, p->sgid, p->fsgid);
+               cred->uid, cred->euid, cred->suid, cred->fsuid,
+               cred->gid, cred->egid, cred->sgid, cred->fsgid);
 
        task_lock(p);
        if (p->files)
@@ -193,13 +197,12 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
                fdt ? fdt->max_fds : 0);
        rcu_read_unlock();
 
-       group_info = p->group_info;
-       get_group_info(group_info);
+       group_info = cred->group_info;
        task_unlock(p);
 
        for (g = 0; g < min(group_info->ngroups, NGROUPS_SMALL); g++)
                seq_printf(m, "%d ", GROUP_AT(group_info, g));
-       put_group_info(group_info);
+       put_cred(cred);
 
        seq_printf(m, "\n");
 }
@@ -255,18 +258,16 @@ static inline void task_sig(struct seq_file *m, struct task_struct *p)
        sigemptyset(&ignored);
        sigemptyset(&caught);
 
-       rcu_read_lock();
        if (lock_task_sighand(p, &flags)) {
                pending = p->pending.signal;
                shpending = p->signal->shared_pending.signal;
                blocked = p->blocked;
                collect_sigign_sigcatch(p, &ignored, &caught);
                num_threads = atomic_read(&p->signal->count);
-               qsize = atomic_read(&p->user->sigpending);
+               qsize = atomic_read(&__task_cred(p)->user->sigpending);
                qlim = p->signal->rlim[RLIMIT_SIGPENDING].rlim_cur;
                unlock_task_sighand(p, &flags);
        }
-       rcu_read_unlock();
 
        seq_printf(m, "Threads:\t%d\n", num_threads);
        seq_printf(m, "SigQ:\t%lu/%lu\n", qsize, qlim);
@@ -287,16 +288,28 @@ static void render_cap_t(struct seq_file *m, const char *header,
        seq_printf(m, "%s", header);
        CAP_FOR_EACH_U32(__capi) {
                seq_printf(m, "%08x",
-                          a->cap[(_LINUX_CAPABILITY_U32S-1) - __capi]);
+                          a->cap[(_KERNEL_CAPABILITY_U32S-1) - __capi]);
        }
        seq_printf(m, "\n");
 }
 
 static inline void task_cap(struct seq_file *m, struct task_struct *p)
 {
-       render_cap_t(m, "CapInh:\t", &p->cap_inheritable);
-       render_cap_t(m, "CapPrm:\t", &p->cap_permitted);
-       render_cap_t(m, "CapEff:\t", &p->cap_effective);
+       const struct cred *cred;
+       kernel_cap_t cap_inheritable, cap_permitted, cap_effective, cap_bset;
+
+       rcu_read_lock();
+       cred = __task_cred(p);
+       cap_inheritable = cred->cap_inheritable;
+       cap_permitted   = cred->cap_permitted;
+       cap_effective   = cred->cap_effective;
+       cap_bset        = cred->cap_bset;
+       rcu_read_unlock();
+
+       render_cap_t(m, "CapInh:\t", &cap_inheritable);
+       render_cap_t(m, "CapPrm:\t", &cap_permitted);
+       render_cap_t(m, "CapEff:\t", &cap_effective);
+       render_cap_t(m, "CapBnd:\t", &cap_bset);
 }
 
 static inline void task_context_switch_counts(struct seq_file *m,
@@ -330,65 +343,6 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
        return 0;
 }
 
-/*
- * Use precise platform statistics if available:
- */
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
-static cputime_t task_utime(struct task_struct *p)
-{
-       return p->utime;
-}
-
-static cputime_t task_stime(struct task_struct *p)
-{
-       return p->stime;
-}
-#else
-static cputime_t task_utime(struct task_struct *p)
-{
-       clock_t utime = cputime_to_clock_t(p->utime),
-               total = utime + cputime_to_clock_t(p->stime);
-       u64 temp;
-
-       /*
-        * Use CFS's precise accounting:
-        */
-       temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime);
-
-       if (total) {
-               temp *= utime;
-               do_div(temp, total);
-       }
-       utime = (clock_t)temp;
-
-       p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime));
-       return p->prev_utime;
-}
-
-static cputime_t task_stime(struct task_struct *p)
-{
-       clock_t stime;
-
-       /*
-        * Use CFS's precise accounting. (we subtract utime from
-        * the total, to make sure the total observed by userspace
-        * grows monotonically - apps rely on that):
-        */
-       stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
-                       cputime_to_clock_t(task_utime(p));
-
-       if (stime >= 0)
-               p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime));
-
-       return p->prev_stime;
-}
-#endif
-
-static cputime_t task_gtime(struct task_struct *p)
-{
-       return p->gtime;
-}
-
 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
                        struct pid *pid, struct task_struct *task, int whole)
 {
@@ -399,6 +353,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
        char state;
        pid_t ppid = 0, pgid = -1, sid = -1;
        int num_threads = 0;
+       int permitted;
        struct mm_struct *mm;
        unsigned long long start_time;
        unsigned long cmin_flt = 0, cmaj_flt = 0;
@@ -411,11 +366,14 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
 
        state = *get_task_state(task);
        vsize = eip = esp = 0;
+       permitted = ptrace_may_access(task, PTRACE_MODE_READ);
        mm = get_task_mm(task);
        if (mm) {
                vsize = task_vsize(mm);
-               eip = KSTK_EIP(task);
-               esp = KSTK_ESP(task);
+               if (permitted) {
+                       eip = KSTK_EIP(task);
+                       esp = KSTK_ESP(task);
+               }
        }
 
        get_task_comm(tcomm, task);
@@ -447,20 +405,20 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
 
                /* add up live thread stats at the group level */
                if (whole) {
+                       struct task_cputime cputime;
                        struct task_struct *t = task;
                        do {
                                min_flt += t->min_flt;
                                maj_flt += t->maj_flt;
-                               utime = cputime_add(utime, task_utime(t));
-                               stime = cputime_add(stime, task_stime(t));
                                gtime = cputime_add(gtime, task_gtime(t));
                                t = next_thread(t);
                        } while (t != task);
 
                        min_flt += sig->min_flt;
                        maj_flt += sig->maj_flt;
-                       utime = cputime_add(utime, sig->utime);
-                       stime = cputime_add(stime, sig->stime);
+                       thread_group_cputime(task, &cputime);
+                       utime = cputime.utime;
+                       stime = cputime.stime;
                        gtime = cputime_add(gtime, sig->gtime);
                }
 
@@ -471,7 +429,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
                unlock_task_sighand(task, &flags);
        }
 
-       if (!whole || num_threads < 2)
+       if (permitted && (!whole || num_threads < 2))
                wchan = get_wchan(task);
        if (!whole) {
                min_flt = task->min_flt;
@@ -523,7 +481,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
                rsslim,
                mm ? mm->start_code : 0,
                mm ? mm->end_code : 0,
-               mm ? mm->start_stack : 0,
+               (permitted && mm) ? mm->start_stack : 0,
                esp,
                eip,
                /* The signal information here is obsolete.