nfsd4: set shorter timeout
[safe/jmp/linux-2.6] / fs / proc / task_nommu.c
index 8f68827..64a72e2 100644 (file)
@@ -1,37 +1,47 @@
 
 #include <linux/mm.h>
 #include <linux/file.h>
+#include <linux/fdtable.h>
+#include <linux/fs_struct.h>
 #include <linux/mount.h>
+#include <linux/ptrace.h>
 #include <linux/seq_file.h>
 #include "internal.h"
 
 /*
  * Logic: we've got two memory sums for each process, "shared", and
- * "non-shared". Shared memory may get counted more then once, for
+ * "non-shared". Shared memory may get counted more than once, for
  * each process that owns it. Non-shared memory is counted
  * accurately.
  */
-char *task_mem(struct mm_struct *mm, char *buffer)
+void task_mem(struct seq_file *m, struct mm_struct *mm)
 {
-       struct vm_list_struct *vml;
-       unsigned long bytes = 0, sbytes = 0, slack = 0;
+       struct vm_area_struct *vma;
+       struct vm_region *region;
+       struct rb_node *p;
+       unsigned long bytes = 0, sbytes = 0, slack = 0, size;
         
        down_read(&mm->mmap_sem);
-       for (vml = mm->context.vmlist; vml; vml = vml->next) {
-               if (!vml->vma)
-                       continue;
+       for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
+               vma = rb_entry(p, struct vm_area_struct, vm_rb);
+
+               bytes += kobjsize(vma);
+
+               region = vma->vm_region;
+               if (region) {
+                       size = kobjsize(region);
+                       size += region->vm_end - region->vm_start;
+               } else {
+                       size = vma->vm_end - vma->vm_start;
+               }
 
-               bytes += kobjsize(vml);
                if (atomic_read(&mm->mm_count) > 1 ||
-                   atomic_read(&vml->vma->vm_usage) > 1
-                   ) {
-                       sbytes += kobjsize((void *) vml->vma->vm_start);
-                       sbytes += kobjsize(vml->vma);
+                   vma->vm_flags & VM_MAYSHARE) {
+                       sbytes += size;
                } else {
-                       bytes += kobjsize((void *) vml->vma->vm_start);
-                       bytes += kobjsize(vml->vma);
-                       slack += kobjsize((void *) vml->vma->vm_start) -
-                               (vml->vma->vm_end - vml->vma->vm_start);
+                       bytes += size;
+                       if (region)
+                               slack = region->vm_end - vma->vm_end;
                }
        }
 
@@ -40,7 +50,7 @@ char *task_mem(struct mm_struct *mm, char *buffer)
        else
                bytes += kobjsize(mm);
        
-       if (current->fs && atomic_read(&current->fs->count) > 1)
+       if (current->fs && current->fs->users > 1)
                sbytes += kobjsize(current->fs);
        else
                bytes += kobjsize(current->fs);
@@ -57,25 +67,25 @@ char *task_mem(struct mm_struct *mm, char *buffer)
 
        bytes += kobjsize(current); /* includes kernel stack */
 
-       buffer += sprintf(buffer,
+       seq_printf(m,
                "Mem:\t%8lu bytes\n"
                "Slack:\t%8lu bytes\n"
                "Shared:\t%8lu bytes\n",
                bytes, slack, sbytes);
 
        up_read(&mm->mmap_sem);
-       return buffer;
 }
 
 unsigned long task_vsize(struct mm_struct *mm)
 {
-       struct vm_list_struct *tbp;
+       struct vm_area_struct *vma;
+       struct rb_node *p;
        unsigned long vsize = 0;
 
        down_read(&mm->mmap_sem);
-       for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
-               if (tbp->vma)
-                       vsize += kobjsize((void *) tbp->vma->vm_start);
+       for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
+               vma = rb_entry(p, struct vm_area_struct, vm_rb);
+               vsize += vma->vm_end - vma->vm_start;
        }
        up_read(&mm->mmap_sem);
        return vsize;
@@ -84,15 +94,19 @@ unsigned long task_vsize(struct mm_struct *mm)
 int task_statm(struct mm_struct *mm, int *shared, int *text,
               int *data, int *resident)
 {
-       struct vm_list_struct *tbp;
+       struct vm_area_struct *vma;
+       struct vm_region *region;
+       struct rb_node *p;
        int size = kobjsize(mm);
 
        down_read(&mm->mmap_sem);
-       for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
-               size += kobjsize(tbp);
-               if (tbp->vma) {
-                       size += kobjsize(tbp->vma);
-                       size += kobjsize((void *) tbp->vma->vm_start);
+       for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
+               vma = rb_entry(p, struct vm_area_struct, vm_rb);
+               size += kobjsize(vma);
+               region = vma->vm_region;
+               if (region) {
+                       size += kobjsize(region);
+                       size += region->vm_end - region->vm_start;
                }
        }
 
@@ -103,62 +117,136 @@ int task_statm(struct mm_struct *mm, int *shared, int *text,
        return size;
 }
 
-int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
+/*
+ * display a single VMA to a sequenced file
+ */
+static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
 {
-       struct vm_list_struct *vml;
-       struct vm_area_struct *vma;
-       struct task_struct *task = proc_task(inode);
-       struct mm_struct *mm = get_task_mm(task);
-       int result = -ENOENT;
-
-       if (!mm)
-               goto out;
-       down_read(&mm->mmap_sem);
-
-       vml = mm->context.vmlist;
-       vma = NULL;
-       while (vml) {
-               if ((vml->vma->vm_flags & VM_EXECUTABLE) && vml->vma->vm_file) {
-                       vma = vml->vma;
-                       break;
-               }
-               vml = vml->next;
+       unsigned long ino = 0;
+       struct file *file;
+       dev_t dev = 0;
+       int flags, len;
+       unsigned long long pgoff = 0;
+
+       flags = vma->vm_flags;
+       file = vma->vm_file;
+
+       if (file) {
+               struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+               dev = inode->i_sb->s_dev;
+               ino = inode->i_ino;
+               pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
        }
 
-       if (vma) {
-               *mnt = mntget(vma->vm_file->f_vfsmnt);
-               *dentry = dget(vma->vm_file->f_dentry);
-               result = 0;
+       seq_printf(m,
+                  "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
+                  vma->vm_start,
+                  vma->vm_end,
+                  flags & VM_READ ? 'r' : '-',
+                  flags & VM_WRITE ? 'w' : '-',
+                  flags & VM_EXEC ? 'x' : '-',
+                  flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
+                  pgoff,
+                  MAJOR(dev), MINOR(dev), ino, &len);
+
+       if (file) {
+               len = 25 + sizeof(void *) * 6 - len;
+               if (len < 1)
+                       len = 1;
+               seq_printf(m, "%*c", len, ' ');
+               seq_path(m, &file->f_path, "");
        }
 
-       up_read(&mm->mmap_sem);
-       mmput(mm);
-out:
-       return result;
+       seq_putc(m, '\n');
+       return 0;
 }
 
 /*
- * Albert D. Cahalan suggested to fake entries for the traditional
- * sections here.  This might be worth investigating.
+ * display mapping lines for a particular process's /proc/pid/maps
  */
-static int show_map(struct seq_file *m, void *v)
+static int show_map(struct seq_file *m, void *_p)
 {
-       return 0;
+       struct rb_node *p = _p;
+
+       return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb));
 }
+
 static void *m_start(struct seq_file *m, loff_t *pos)
 {
+       struct proc_maps_private *priv = m->private;
+       struct mm_struct *mm;
+       struct rb_node *p;
+       loff_t n = *pos;
+
+       /* pin the task and mm whilst we play with them */
+       priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
+       if (!priv->task)
+               return NULL;
+
+       mm = mm_for_maps(priv->task);
+       if (!mm) {
+               put_task_struct(priv->task);
+               priv->task = NULL;
+               return NULL;
+       }
+
+       /* start from the Nth VMA */
+       for (p = rb_first(&mm->mm_rb); p; p = rb_next(p))
+               if (n-- == 0)
+                       return p;
        return NULL;
 }
-static void m_stop(struct seq_file *m, void *v)
+
+static void m_stop(struct seq_file *m, void *_vml)
 {
+       struct proc_maps_private *priv = m->private;
+
+       if (priv->task) {
+               struct mm_struct *mm = priv->task->mm;
+               up_read(&mm->mmap_sem);
+               mmput(mm);
+               put_task_struct(priv->task);
+       }
 }
-static void *m_next(struct seq_file *m, void *v, loff_t *pos)
+
+static void *m_next(struct seq_file *m, void *_p, loff_t *pos)
 {
-       return NULL;
+       struct rb_node *p = _p;
+
+       (*pos)++;
+       return p ? rb_next(p) : NULL;
 }
-struct seq_operations proc_pid_maps_op = {
+
+static const struct seq_operations proc_pid_maps_ops = {
        .start  = m_start,
        .next   = m_next,
        .stop   = m_stop,
        .show   = show_map
 };
+
+static int maps_open(struct inode *inode, struct file *file)
+{
+       struct proc_maps_private *priv;
+       int ret = -ENOMEM;
+
+       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+       if (priv) {
+               priv->pid = proc_pid(inode);
+               ret = seq_open(file, &proc_pid_maps_ops);
+               if (!ret) {
+                       struct seq_file *m = file->private_data;
+                       m->private = priv;
+               } else {
+                       kfree(priv);
+               }
+       }
+       return ret;
+}
+
+const struct file_operations proc_maps_operations = {
+       .open           = maps_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = seq_release_private,
+};
+