tunnels: fix netns vs proto registration ordering
[safe/jmp/linux-2.6] / kernel / profile.c
index d18e2d2..a55d3a3 100644 (file)
@@ -111,20 +111,18 @@ int __ref profile_init(void)
        /* only text is profiled */
        prof_len = (_etext - _stext) >> prof_shift;
        buffer_bytes = prof_len*sizeof(atomic_t);
-       if (!slab_is_available()) {
-               prof_buffer = alloc_bootmem(buffer_bytes);
-               alloc_bootmem_cpumask_var(&prof_cpu_mask);
-               return 0;
-       }
 
        if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
                return -ENOMEM;
 
-       prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL);
+       cpumask_copy(prof_cpu_mask, cpu_possible_mask);
+
+       prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN);
        if (prof_buffer)
                return 0;
 
-       prof_buffer = alloc_pages_exact(buffer_bytes, GFP_KERNEL|__GFP_ZERO);
+       prof_buffer = alloc_pages_exact(buffer_bytes,
+                                       GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN);
        if (prof_buffer)
                return 0;
 
@@ -368,7 +366,7 @@ static int __cpuinit profile_cpu_callback(struct notifier_block *info,
                node = cpu_to_node(cpu);
                per_cpu(cpu_profile_flip, cpu) = 0;
                if (!per_cpu(cpu_profile_hits, cpu)[1]) {
-                       page = alloc_pages_node(node,
+                       page = alloc_pages_exact_node(node,
                                        GFP_KERNEL | __GFP_ZERO,
                                        0);
                        if (!page)
@@ -376,7 +374,7 @@ static int __cpuinit profile_cpu_callback(struct notifier_block *info,
                        per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
                }
                if (!per_cpu(cpu_profile_hits, cpu)[0]) {
-                       page = alloc_pages_node(node,
+                       page = alloc_pages_exact_node(node,
                                        GFP_KERNEL | __GFP_ZERO,
                                        0);
                        if (!page)
@@ -444,49 +442,51 @@ void profile_tick(int type)
 
 #ifdef CONFIG_PROC_FS
 #include <linux/proc_fs.h>
+#include <linux/seq_file.h>
 #include <asm/uaccess.h>
-#include <asm/ptrace.h>
 
-static int prof_cpu_mask_read_proc(char *page, char **start, off_t off,
-                       int count, int *eof, void *data)
+static int prof_cpu_mask_proc_show(struct seq_file *m, void *v)
+{
+       seq_cpumask(m, prof_cpu_mask);
+       seq_putc(m, '\n');
+       return 0;
+}
+
+static int prof_cpu_mask_proc_open(struct inode *inode, struct file *file)
 {
-       int len = cpumask_scnprintf(page, count, data);
-       if (count - len < 2)
-               return -EINVAL;
-       len += sprintf(page + len, "\n");
-       return len;
+       return single_open(file, prof_cpu_mask_proc_show, NULL);
 }
 
-static int prof_cpu_mask_write_proc(struct file *file,
-       const char __user *buffer,  unsigned long count, void *data)
+static ssize_t prof_cpu_mask_proc_write(struct file *file,
+       const char __user *buffer, size_t count, loff_t *pos)
 {
-       struct cpumask *mask = data;
-       unsigned long full_count = count, err;
        cpumask_var_t new_value;
+       int err;
 
        if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
                return -ENOMEM;
 
        err = cpumask_parse_user(buffer, count, new_value);
        if (!err) {
-               cpumask_copy(mask, new_value);
-               err = full_count;
+               cpumask_copy(prof_cpu_mask, new_value);
+               err = count;
        }
        free_cpumask_var(new_value);
        return err;
 }
 
+static const struct file_operations prof_cpu_mask_proc_fops = {
+       .open           = prof_cpu_mask_proc_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+       .write          = prof_cpu_mask_proc_write,
+};
+
 void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir)
 {
-       struct proc_dir_entry *entry;
-
        /* create /proc/irq/prof_cpu_mask */
-       entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
-       if (!entry)
-               return;
-       entry->data = prof_cpu_mask;
-       entry->read_proc = prof_cpu_mask_read_proc;
-       entry->write_proc = prof_cpu_mask_write_proc;
+       proc_create("prof_cpu_mask", 0600, root_irq_dir, &prof_cpu_mask_proc_fops);
 }
 
 /*
@@ -568,14 +568,14 @@ static int create_hash_tables(void)
                int node = cpu_to_node(cpu);
                struct page *page;
 
-               page = alloc_pages_node(node,
+               page = alloc_pages_exact_node(node,
                                GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
                                0);
                if (!page)
                        goto out_cleanup;
                per_cpu(cpu_profile_hits, cpu)[1]
                                = (struct profile_hit *)page_address(page);
-               page = alloc_pages_node(node,
+               page = alloc_pages_exact_node(node,
                                GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
                                0);
                if (!page)