early_res: Add free_early_partial()
authorYinghai Lu <yinghai@kernel.org>
Thu, 25 Feb 2010 02:36:53 +0000 (18:36 -0800)
committerIngo Molnar <mingo@elte.hu>
Fri, 26 Feb 2010 07:25:35 +0000 (08:25 +0100)
To free partial areas in pcpu_setup...

Reported-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jesse Barnes <jbarnes@virtuousgeek.org>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
LKML-Reference: <4B85E245.5030001@kernel.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/setup_percpu.c
include/linux/early_res.h
kernel/early_res.c
mm/percpu.c

index 35abcb8..ef6370b 100644 (file)
@@ -137,7 +137,13 @@ static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
 
 static void __init pcpu_fc_free(void *ptr, size_t size)
 {
+#ifdef CONFIG_NO_BOOTMEM
+       u64 start = __pa(ptr);
+       u64 end = start + size;
+       free_early_partial(start, end);
+#else
        free_bootmem(__pa(ptr), size);
+#endif
 }
 
 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
index 50f7663..29c09f5 100644 (file)
@@ -5,6 +5,7 @@
 extern void reserve_early(u64 start, u64 end, char *name);
 extern void reserve_early_overlap_ok(u64 start, u64 end, char *name);
 extern void free_early(u64 start, u64 end);
+void free_early_partial(u64 start, u64 end);
 extern void early_res_to_bootmem(u64 start, u64 end);
 
 void reserve_early_without_check(u64 start, u64 end, char *name);
index aa5494a..9ab11cd 100644 (file)
@@ -61,6 +61,40 @@ static void __init drop_range(int i)
        early_res_count--;
 }
 
+static void __init drop_range_partial(int i, u64 start, u64 end)
+{
+       u64 common_start, common_end;
+       u64 old_start, old_end;
+
+       old_start = early_res[i].start;
+       old_end = early_res[i].end;
+       common_start = max(old_start, start);
+       common_end = min(old_end, end);
+
+       /* no overlap ? */
+       if (common_start >= common_end)
+               return;
+
+       if (old_start < common_start) {
+               /* make head segment */
+               early_res[i].end = common_start;
+               if (old_end > common_end) {
+                       /* add another for left over on tail */
+                       reserve_early_without_check(common_end, old_end,
+                                        early_res[i].name);
+               }
+               return;
+       } else {
+               if (old_end > common_end) {
+                       /* reuse the entry for tail left */
+                       early_res[i].start = common_end;
+                       return;
+               }
+               /* all covered */
+               drop_range(i);
+       }
+}
+
 /*
  * Split any existing ranges that:
  *  1) are marked 'overlap_ok', and
@@ -284,6 +318,27 @@ void __init free_early(u64 start, u64 end)
        drop_range(i);
 }
 
+void __init free_early_partial(u64 start, u64 end)
+{
+       struct early_res *r;
+       int i;
+
+try_next:
+       i = find_overlapped_early(start, end);
+       if (i >= max_early_res)
+               return;
+
+       r = &early_res[i];
+       /* hole ? */
+       if (r->end >= end && r->start <= start) {
+               drop_range_partial(i, start, end);
+               return;
+       }
+
+       drop_range_partial(i, start, end);
+       goto try_next;
+}
+
 #ifdef CONFIG_NO_BOOTMEM
 static void __init subtract_early_res(struct range *range, int az)
 {
index 841defe..083e7c9 100644 (file)
@@ -1929,10 +1929,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
                        }
                        /* copy and return the unused part */
                        memcpy(ptr, __per_cpu_load, ai->static_size);
-#ifndef CONFIG_NO_BOOTMEM
-                       /* fix partial free ! */
                        free_fn(ptr + size_sum, ai->unit_size - size_sum);
-#endif
                }
        }