x86, bts: memory accounting
authorMarkus Metzger <markus.t.metzger@intel.com>
Fri, 19 Dec 2008 14:17:02 +0000 (15:17 +0100)
committerIngo Molnar <mingo@elte.hu>
Sat, 20 Dec 2008 08:15:47 +0000 (09:15 +0100)
Impact: move the BTS buffer accounting to the mlock bucket

Add alloc_locked_buffer() and free_locked_buffer() functions to mm/mlock.c
to kalloc a buffer and account the locked memory to current.

Account the memory for the BTS buffer to the tracer.

Signed-off-by: Markus Metzger <markus.t.metzger@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/ptrace.c
include/linux/mm.h
mm/mlock.c

index 6ad2bb6..0a5df5f 100644 (file)
@@ -650,6 +650,24 @@ static int ptrace_bts_drain(struct task_struct *child,
        return drained;
 }
 
+static int ptrace_bts_allocate_buffer(struct task_struct *child, size_t size)
+{
+       child->bts_buffer = alloc_locked_buffer(size);
+       if (!child->bts_buffer)
+               return -ENOMEM;
+
+       child->bts_size = size;
+
+       return 0;
+}
+
+static void ptrace_bts_free_buffer(struct task_struct *child)
+{
+       free_locked_buffer(child->bts_buffer, child->bts_size);
+       child->bts_buffer = NULL;
+       child->bts_size = 0;
+}
+
 static int ptrace_bts_config(struct task_struct *child,
                             long cfg_size,
                             const struct ptrace_bts_config __user *ucfg)
@@ -679,14 +697,13 @@ static int ptrace_bts_config(struct task_struct *child,
 
        if ((cfg.flags & PTRACE_BTS_O_ALLOC) &&
            (cfg.size != child->bts_size)) {
-               kfree(child->bts_buffer);
+               int error;
 
-               child->bts_size = cfg.size;
-               child->bts_buffer = kzalloc(cfg.size, GFP_KERNEL);
-               if (!child->bts_buffer) {
-                       child->bts_size = 0;
-                       return -ENOMEM;
-               }
+               ptrace_bts_free_buffer(child);
+
+               error = ptrace_bts_allocate_buffer(child, cfg.size);
+               if (error < 0)
+                       return error;
        }
 
        if (cfg.flags & PTRACE_BTS_O_TRACE)
@@ -701,10 +718,8 @@ static int ptrace_bts_config(struct task_struct *child,
        if (IS_ERR(child->bts)) {
                int error = PTR_ERR(child->bts);
 
-               kfree(child->bts_buffer);
+               ptrace_bts_free_buffer(child);
                child->bts = NULL;
-               child->bts_buffer = NULL;
-               child->bts_size = 0;
 
                return error;
        }
@@ -784,6 +799,9 @@ static void ptrace_bts_untrace(struct task_struct *child)
                ds_release_bts(child->bts);
                child->bts = NULL;
 
+               /* We cannot update total_vm and locked_vm since
+                  child's mm is already gone. But we can reclaim the
+                  memory. */
                kfree(child->bts_buffer);
                child->bts_buffer = NULL;
                child->bts_size = 0;
@@ -792,7 +810,12 @@ static void ptrace_bts_untrace(struct task_struct *child)
 
 static void ptrace_bts_detach(struct task_struct *child)
 {
-       ptrace_bts_untrace(child);
+       if (unlikely(child->bts)) {
+               ds_release_bts(child->bts);
+               child->bts = NULL;
+
+               ptrace_bts_free_buffer(child);
+       }
 }
 #else
 static inline void ptrace_bts_fork(struct task_struct *tsk) {}
index ffee2f7..9979d3f 100644 (file)
@@ -1286,5 +1286,7 @@ int vmemmap_populate_basepages(struct page *start_page,
 int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
 void vmemmap_populate_print_last(void);
 
+extern void *alloc_locked_buffer(size_t size);
+extern void free_locked_buffer(void *buffer, size_t size);
 #endif /* __KERNEL__ */
 #endif /* _LINUX_MM_H */
index 1ada366..3035a56 100644 (file)
@@ -667,3 +667,48 @@ void user_shm_unlock(size_t size, struct user_struct *user)
        spin_unlock(&shmlock_user_lock);
        free_uid(user);
 }
+
+void *alloc_locked_buffer(size_t size)
+{
+       unsigned long rlim, vm, pgsz;
+       void *buffer = NULL;
+
+       pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+       down_write(&current->mm->mmap_sem);
+
+       rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
+       vm   = current->mm->total_vm + pgsz;
+       if (rlim < vm)
+               goto out;
+
+       rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
+       vm   = current->mm->locked_vm + pgsz;
+       if (rlim < vm)
+               goto out;
+
+       buffer = kzalloc(size, GFP_KERNEL);
+       if (!buffer)
+               goto out;
+
+       current->mm->total_vm  += pgsz;
+       current->mm->locked_vm += pgsz;
+
+ out:
+       up_write(&current->mm->mmap_sem);
+       return buffer;
+}
+
+void free_locked_buffer(void *buffer, size_t size)
+{
+       unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+       down_write(&current->mm->mmap_sem);
+
+       current->mm->total_vm  -= pgsz;
+       current->mm->locked_vm -= pgsz;
+
+       up_write(&current->mm->mmap_sem);
+
+       kfree(buffer);
+}