powerpc/mm: Make clear_fixmap() actually work
[safe/jmp/linux-2.6] / arch / powerpc / mm / numa.c
index 39328da..7393bd7 100644 (file)
@@ -39,7 +39,6 @@ EXPORT_SYMBOL(numa_cpu_lookup_table);
 EXPORT_SYMBOL(numa_cpumask_lookup_table);
 EXPORT_SYMBOL(node_data);
 
-static bootmem_data_t __initdata plat_node_bdata[MAX_NUMNODES];
 static int min_common_depth;
 static int n_mem_addr_cells, n_mem_size_cells;
 
@@ -90,6 +89,48 @@ static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn,
        return 0;
 }
 
+/*
+ * get_active_region_work_fn - A helper function for get_node_active_region
+ *     Returns datax set to the start_pfn and end_pfn if they contain
+ *     the initial value of datax->start_pfn between them
+ * @start_pfn: start page(inclusive) of region to check
+ * @end_pfn: end page(exclusive) of region to check
+ * @datax: comes in with ->start_pfn set to value to search for and
+ *     goes out with active range if it contains it
+ * Returns 1 if search value is in range else 0
+ */
+static int __init get_active_region_work_fn(unsigned long start_pfn,
+                                       unsigned long end_pfn, void *datax)
+{
+       struct node_active_region *data;
+       data = (struct node_active_region *)datax;
+
+       if (start_pfn <= data->start_pfn && end_pfn > data->start_pfn) {
+               data->start_pfn = start_pfn;
+               data->end_pfn = end_pfn;
+               return 1;
+       }
+       return 0;
+
+}
+
+/*
+ * get_node_active_region - Return active region containing start_pfn
+ * Active range returned is empty if none found.
+ * @start_pfn: The page to return the region for.
+ * @node_ar: Returned set to the active region containing start_pfn
+ */
+static void __init get_node_active_region(unsigned long start_pfn,
+                      struct node_active_region *node_ar)
+{
+       int nid = early_pfn_to_nid(start_pfn);
+
+       node_ar->nid = nid;
+       node_ar->start_pfn = start_pfn;
+       node_ar->end_pfn = start_pfn;
+       work_with_active_regions(nid, get_active_region_work_fn, node_ar);
+}
+
 static void __cpuinit map_cpu_to_node(int cpu, int node)
 {
        numa_cpu_lookup_table[cpu] = node;
@@ -151,6 +192,21 @@ static const int *of_get_associativity(struct device_node *dev)
        return of_get_property(dev, "ibm,associativity", NULL);
 }
 
+/*
+ * Returns the property linux,drconf-usable-memory if
+ * it exists (the property exists only in kexec/kdump kernels,
+ * added by kexec-tools)
+ */
+static const u32 *of_get_usable_memory(struct device_node *memory)
+{
+       const u32 *prop;
+       u32 len;
+       prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
+       if (!prop || len < sizeof(unsigned int))
+               return 0;
+       return prop;
+}
+
 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
  * info is found.
  */
@@ -472,12 +528,10 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start,
        /*
         * We use lmb_end_of_DRAM() in here instead of memory_limit because
         * we've already adjusted it for the limit and it takes care of
-        * having memory holes below the limit.
+        * having memory holes below the limit.  Also, in the case of
+        * iommu_is_off, memory_limit is not set but is implicitly enforced.
         */
 
-       if (! memory_limit)
-               return size;
-
        if (start + size <= lmb_end_of_DRAM())
                return size;
 
@@ -488,14 +542,29 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start,
 }
 
 /*
+ * Reads the counter for a given entry in
+ * linux,drconf-usable-memory property
+ */
+static inline int __init read_usm_ranges(const u32 **usm)
+{
+       /*
+        * For each lmb in ibm,dynamic-memory a corresponding
+        * entry in linux,drconf-usable-memory property contains
+        * a counter followed by that many (base, size) duple.
+        * read the counter from linux,drconf-usable-memory
+        */
+       return read_n_cells(n_mem_size_cells, usm);
+}
+
+/*
  * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
  * node.  This assumes n_mem_{addr,size}_cells have been set.
  */
 static void __init parse_drconf_memory(struct device_node *memory)
 {
-       const u32 *dm;
-       unsigned int n, rc;
-       unsigned long lmb_size, size;
+       const u32 *dm, *usm;
+       unsigned int n, rc, ranges, is_kexec_kdump = 0;
+       unsigned long lmb_size, base, size, sz;
        int nid;
        struct assoc_arrays aa;
 
@@ -511,6 +580,11 @@ static void __init parse_drconf_memory(struct device_node *memory)
        if (rc)
                return;
 
+       /* check if this is a kexec/kdump kernel */
+       usm = of_get_usable_memory(memory);
+       if (usm != NULL)
+               is_kexec_kdump = 1;
+
        for (; n != 0; --n) {
                struct of_drconf_cell drmem;
 
@@ -522,21 +596,31 @@ static void __init parse_drconf_memory(struct device_node *memory)
                    || !(drmem.flags & DRCONF_MEM_ASSIGNED))
                        continue;
 
-               nid = of_drconf_to_nid_single(&drmem, &aa);
+               base = drmem.base_addr;
+               size = lmb_size;
+               ranges = 1;
 
-               fake_numa_create_new_node(
-                               ((drmem.base_addr + lmb_size) >> PAGE_SHIFT),
+               if (is_kexec_kdump) {
+                       ranges = read_usm_ranges(&usm);
+                       if (!ranges) /* there are no (base, size) duple */
+                               continue;
+               }
+               do {
+                       if (is_kexec_kdump) {
+                               base = read_n_cells(n_mem_addr_cells, &usm);
+                               size = read_n_cells(n_mem_size_cells, &usm);
+                       }
+                       nid = of_drconf_to_nid_single(&drmem, &aa);
+                       fake_numa_create_new_node(
+                               ((base + size) >> PAGE_SHIFT),
                                           &nid);
-
-               node_set_online(nid);
-
-               size = numa_enforce_memory_limit(drmem.base_addr, lmb_size);
-               if (!size)
-                       continue;
-
-               add_active_range(nid, drmem.base_addr >> PAGE_SHIFT,
-                                (drmem.base_addr >> PAGE_SHIFT)
-                                + (size >> PAGE_SHIFT));
+                       node_set_online(nid);
+                       sz = numa_enforce_memory_limit(base, size);
+                       if (sz)
+                               add_active_range(nid, base >> PAGE_SHIFT,
+                                                (base >> PAGE_SHIFT)
+                                                + (sz >> PAGE_SHIFT));
+               } while (--ranges);
        }
 }
 
@@ -738,42 +822,50 @@ static void __init dump_numa_memory_topology(void)
  * required. nid is the preferred node and end is the physical address of
  * the highest address in the node.
  *
- * Returns the physical address of the memory.
+ * Returns the virtual address of the memory.
  */
-static void __init *careful_allocation(int nid, unsigned long size,
+static void __init *careful_zallocation(int nid, unsigned long size,
                                       unsigned long align,
                                       unsigned long end_pfn)
 {
+       void *ret;
        int new_nid;
-       unsigned long ret = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
+       unsigned long ret_paddr;
+
+       ret_paddr = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
 
        /* retry over all memory */
-       if (!ret)
-               ret = __lmb_alloc_base(size, align, lmb_end_of_DRAM());
+       if (!ret_paddr)
+               ret_paddr = __lmb_alloc_base(size, align, lmb_end_of_DRAM());
 
-       if (!ret)
-               panic("numa.c: cannot allocate %lu bytes on node %d",
+       if (!ret_paddr)
+               panic("numa.c: cannot allocate %lu bytes for node %d",
                      size, nid);
 
+       ret = __va(ret_paddr);
+
        /*
-        * If the memory came from a previously allocated node, we must
-        * retry with the bootmem allocator.
+        * We initialize the nodes in numeric order: 0, 1, 2...
+        * and hand over control from the LMB allocator to the
+        * bootmem allocator.  If this function is called for
+        * node 5, then we know that all nodes <5 are using the
+        * bootmem allocator instead of the LMB allocator.
+        *
+        * So, check the nid from which this allocation came
+        * and double check to see if we need to use bootmem
+        * instead of the LMB.  We don't free the LMB memory
+        * since it would be useless.
         */
-       new_nid = early_pfn_to_nid(ret >> PAGE_SHIFT);
+       new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
        if (new_nid < nid) {
-               ret = (unsigned long)__alloc_bootmem_node(NODE_DATA(new_nid),
+               ret = __alloc_bootmem_node(NODE_DATA(new_nid),
                                size, align, 0);
 
-               if (!ret)
-                       panic("numa.c: cannot allocate %lu bytes on node %d",
-                             size, new_nid);
-
-               ret = __pa(ret);
-
-               dbg("alloc_bootmem %lx %lx\n", ret, size);
+               dbg("alloc_bootmem %p %lx\n", ret, size);
        }
 
-       return (void *)ret;
+       memset(ret, 0, size);
+       return ret;
 }
 
 static struct notifier_block __cpuinitdata ppc64_numa_nb = {
@@ -781,10 +873,77 @@ static struct notifier_block __cpuinitdata ppc64_numa_nb = {
        .priority = 1 /* Must run before sched domains notifier. */
 };
 
+static void mark_reserved_regions_for_nid(int nid)
+{
+       struct pglist_data *node = NODE_DATA(nid);
+       int i;
+
+       for (i = 0; i < lmb.reserved.cnt; i++) {
+               unsigned long physbase = lmb.reserved.region[i].base;
+               unsigned long size = lmb.reserved.region[i].size;
+               unsigned long start_pfn = physbase >> PAGE_SHIFT;
+               unsigned long end_pfn = ((physbase + size) >> PAGE_SHIFT);
+               struct node_active_region node_ar;
+               unsigned long node_end_pfn = node->node_start_pfn +
+                                            node->node_spanned_pages;
+
+               /*
+                * Check to make sure that this lmb.reserved area is
+                * within the bounds of the node that we care about.
+                * Checking the nid of the start and end points is not
+                * sufficient because the reserved area could span the
+                * entire node.
+                */
+               if (end_pfn <= node->node_start_pfn ||
+                   start_pfn >= node_end_pfn)
+                       continue;
+
+               get_node_active_region(start_pfn, &node_ar);
+               while (start_pfn < end_pfn &&
+                       node_ar.start_pfn < node_ar.end_pfn) {
+                       unsigned long reserve_size = size;
+                       /*
+                        * if reserved region extends past active region
+                        * then trim size to active region
+                        */
+                       if (end_pfn > node_ar.end_pfn)
+                               reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
+                                       - (start_pfn << PAGE_SHIFT);
+                       /*
+                        * Only worry about *this* node, others may not
+                        * yet have valid NODE_DATA().
+                        */
+                       if (node_ar.nid == nid) {
+                               dbg("reserve_bootmem %lx %lx nid=%d\n",
+                                       physbase, reserve_size, node_ar.nid);
+                               reserve_bootmem_node(NODE_DATA(node_ar.nid),
+                                               physbase, reserve_size,
+                                               BOOTMEM_DEFAULT);
+                       }
+                       /*
+                        * if reserved region is contained in the active region
+                        * then done.
+                        */
+                       if (end_pfn <= node_ar.end_pfn)
+                               break;
+
+                       /*
+                        * reserved region extends past the active region
+                        *   get next active region that contains this
+                        *   reserved region
+                        */
+                       start_pfn = node_ar.end_pfn;
+                       physbase = start_pfn << PAGE_SHIFT;
+                       size = size - reserve_size;
+                       get_node_active_region(start_pfn, &node_ar);
+               }
+       }
+}
+
+
 void __init do_init_bootmem(void)
 {
        int nid;
-       unsigned int i;
 
        min_low_pfn = 0;
        max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
@@ -801,22 +960,26 @@ void __init do_init_bootmem(void)
 
        for_each_online_node(nid) {
                unsigned long start_pfn, end_pfn;
-               unsigned long bootmem_paddr;
+               void *bootmem_vaddr;
                unsigned long bootmap_pages;
 
                get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
 
-               /* Allocate the node structure node local if possible */
-               NODE_DATA(nid) = careful_allocation(nid,
+               /*
+                * Allocate the node structure node local if possible
+                *
+                * Be careful moving this around, as it relies on all
+                * previous nodes' bootmem to be initialized and have
+                * all reserved areas marked.
+                */
+               NODE_DATA(nid) = careful_zallocation(nid,
                                        sizeof(struct pglist_data),
                                        SMP_CACHE_BYTES, end_pfn);
-               NODE_DATA(nid) = __va(NODE_DATA(nid));
-               memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
 
                dbg("node %d\n", nid);
                dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
 
-               NODE_DATA(nid)->bdata = &plat_node_bdata[nid];
+               NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
                NODE_DATA(nid)->node_start_pfn = start_pfn;
                NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
 
@@ -827,47 +990,23 @@ void __init do_init_bootmem(void)
                dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
 
                bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
-               bootmem_paddr = (unsigned long)careful_allocation(nid,
+               bootmem_vaddr = careful_zallocation(nid,
                                        bootmap_pages << PAGE_SHIFT,
                                        PAGE_SIZE, end_pfn);
-               memset(__va(bootmem_paddr), 0, bootmap_pages << PAGE_SHIFT);
 
-               dbg("bootmap_paddr = %lx\n", bootmem_paddr);
+               dbg("bootmap_vaddr = %p\n", bootmem_vaddr);
 
-               init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
+               init_bootmem_node(NODE_DATA(nid),
+                                 __pa(bootmem_vaddr) >> PAGE_SHIFT,
                                  start_pfn, end_pfn);
 
                free_bootmem_with_active_regions(nid, end_pfn);
-
-               /* Mark reserved regions on this node */
-               for (i = 0; i < lmb.reserved.cnt; i++) {
-                       unsigned long physbase = lmb.reserved.region[i].base;
-                       unsigned long size = lmb.reserved.region[i].size;
-                       unsigned long start_paddr = start_pfn << PAGE_SHIFT;
-                       unsigned long end_paddr = end_pfn << PAGE_SHIFT;
-
-                       if (early_pfn_to_nid(physbase >> PAGE_SHIFT) != nid &&
-                           early_pfn_to_nid((physbase+size-1) >> PAGE_SHIFT) != nid)
-                               continue;
-
-                       if (physbase < end_paddr &&
-                           (physbase+size) > start_paddr) {
-                               /* overlaps */
-                               if (physbase < start_paddr) {
-                                       size -= start_paddr - physbase;
-                                       physbase = start_paddr;
-                               }
-
-                               if (size > end_paddr - physbase)
-                                       size = end_paddr - physbase;
-
-                               dbg("reserve_bootmem %lx %lx\n", physbase,
-                                   size);
-                               reserve_bootmem_node(NODE_DATA(nid), physbase,
-                                                    size, BOOTMEM_DEFAULT);
-                       }
-               }
-
+               /*
+                * Be very careful about moving this around.  Future
+                * calls to careful_zallocation() depend on this getting
+                * done correctly.
+                */
+               mark_reserved_regions_for_nid(nid);
                sparse_memory_present_with_active_regions(nid);
        }
 }
@@ -901,6 +1040,79 @@ early_param("numa", early_numa);
 
 #ifdef CONFIG_MEMORY_HOTPLUG
 /*
+ * Validate the node associated with the memory section we are
+ * trying to add.
+ */
+int valid_hot_add_scn(int *nid, unsigned long start, u32 lmb_size,
+                     unsigned long scn_addr)
+{
+       nodemask_t nodes;
+
+       if (*nid < 0 || !node_online(*nid))
+               *nid = any_online_node(NODE_MASK_ALL);
+
+       if ((scn_addr >= start) && (scn_addr < (start + lmb_size))) {
+               nodes_setall(nodes);
+               while (NODE_DATA(*nid)->node_spanned_pages == 0) {
+                       node_clear(*nid, nodes);
+                       *nid = any_online_node(nodes);
+               }
+
+               return 1;
+       }
+
+       return 0;
+}
+
+/*
+ * Find the node associated with a hot added memory section represented
+ * by the ibm,dynamic-reconfiguration-memory node.
+ */
+static int hot_add_drconf_scn_to_nid(struct device_node *memory,
+                                    unsigned long scn_addr)
+{
+       const u32 *dm;
+       unsigned int n, rc;
+       unsigned long lmb_size;
+       int default_nid = any_online_node(NODE_MASK_ALL);
+       int nid;
+       struct assoc_arrays aa;
+
+       n = of_get_drconf_memory(memory, &dm);
+       if (!n)
+               return default_nid;;
+
+       lmb_size = of_get_lmb_size(memory);
+       if (!lmb_size)
+               return default_nid;
+
+       rc = of_get_assoc_arrays(memory, &aa);
+       if (rc)
+               return default_nid;
+
+       for (; n != 0; --n) {
+               struct of_drconf_cell drmem;
+
+               read_drconf_cell(&drmem, &dm);
+
+               /* skip this block if it is reserved or not assigned to
+                * this partition */
+               if ((drmem.flags & DRCONF_MEM_RESERVED)
+                   || !(drmem.flags & DRCONF_MEM_ASSIGNED))
+                       continue;
+
+               nid = of_drconf_to_nid_single(&drmem, &aa);
+
+               if (valid_hot_add_scn(&nid, drmem.base_addr, lmb_size,
+                                     scn_addr))
+                       return nid;
+       }
+
+       BUG();  /* section address should be found above */
+       return 0;
+}
+
+/*
  * Find the node associated with a hot added memory section.  Section
  * corresponds to a SPARSEMEM section, not an LMB.  It is assumed that
  * sections are fully contained within a single LMB.
@@ -908,12 +1120,17 @@ early_param("numa", early_numa);
 int hot_add_scn_to_nid(unsigned long scn_addr)
 {
        struct device_node *memory = NULL;
-       nodemask_t nodes;
-       int default_nid = any_online_node(NODE_MASK_ALL);
        int nid;
 
        if (!numa_enabled || (min_common_depth < 0))
-               return default_nid;
+               return any_online_node(NODE_MASK_ALL);
+
+       memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
+       if (memory) {
+               nid = hot_add_drconf_scn_to_nid(memory, scn_addr);
+               of_node_put(memory);
+               return nid;
+       }
 
        while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
                unsigned long start, size;
@@ -932,13 +1149,9 @@ ha_new_range:
                size = read_n_cells(n_mem_size_cells, &memcell_buf);
                nid = of_node_to_nid_single(memory);
 
-               /* Domains not present at boot default to 0 */
-               if (nid < 0 || !node_online(nid))
-                       nid = default_nid;
-
-               if ((scn_addr >= start) && (scn_addr < (start + size))) {
+               if (valid_hot_add_scn(&nid, start, size, scn_addr)) {
                        of_node_put(memory);
-                       goto got_nid;
+                       return nid;
                }
 
                if (--ranges)           /* process all ranges in cell */
@@ -946,14 +1159,5 @@ ha_new_range:
        }
        BUG();  /* section address should be found above */
        return 0;
-
-       /* Temporary code to ensure that returned node is not empty */
-got_nid:
-       nodes_setall(nodes);
-       while (NODE_DATA(nid)->node_spanned_pages == 0) {
-               node_clear(nid, nodes);
-               nid = any_online_node(nodes);
-       }
-       return nid;
 }
 #endif /* CONFIG_MEMORY_HOTPLUG */