[PATCH] fbdev: Add generic ddc read functionality
[safe/jmp/linux-2.6] / mm / page_alloc.c
index 26c9939..4f59d90 100644 (file)
@@ -104,6 +104,7 @@ int min_free_kbytes = 1024;
 
 unsigned long __meminitdata nr_kernel_pages;
 unsigned long __meminitdata nr_all_pages;
+static unsigned long __initdata dma_reserve;
 
 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
   /*
@@ -130,6 +131,10 @@ unsigned long __meminitdata nr_all_pages;
   int __initdata nr_nodemap_entries;
   unsigned long __initdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
   unsigned long __initdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
+#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
+  unsigned long __initdata node_boundary_start_pfn[MAX_NUMNODES];
+  unsigned long __initdata node_boundary_end_pfn[MAX_NUMNODES];
+#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
 
 #ifdef CONFIG_DEBUG_VM
@@ -937,7 +942,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
         */
        do {
                zone = *z;
-               if (unlikely((gfp_mask & __GFP_THISNODE) &&
+               if (unlikely(NUMA_BUILD && (gfp_mask & __GFP_THISNODE) &&
                        zone->zone_pgdat != zonelist->zones[0]->zone_pgdat))
                                break;
                if ((alloc_flags & ALLOC_CPUSET) &&
@@ -1251,14 +1256,12 @@ unsigned int nr_free_pagecache_pages(void)
 {
        return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER));
 }
-#ifdef CONFIG_NUMA
-static void show_node(struct zone *zone)
+
+static inline void show_node(struct zone *zone)
 {
-       printk("Node %ld ", zone_to_nid(zone));
+       if (NUMA_BUILD)
+               printk("Node %ld ", zone_to_nid(zone));
 }
-#else
-#define show_node(zone)        do { } while (0)
-#endif
 
 void si_meminfo(struct sysinfo *val)
 {
@@ -1300,34 +1303,30 @@ void si_meminfo_node(struct sysinfo *val, int nid)
  */
 void show_free_areas(void)
 {
-       int cpu, temperature;
+       int cpu;
        unsigned long active;
        unsigned long inactive;
        unsigned long free;
        struct zone *zone;
 
        for_each_zone(zone) {
-               show_node(zone);
-               printk("%s per-cpu:", zone->name);
-
-               if (!populated_zone(zone)) {
-                       printk(" empty\n");
+               if (!populated_zone(zone))
                        continue;
-               } else
-                       printk("\n");
+
+               show_node(zone);
+               printk("%s per-cpu:\n", zone->name);
 
                for_each_online_cpu(cpu) {
                        struct per_cpu_pageset *pageset;
 
                        pageset = zone_pcp(zone, cpu);
 
-                       for (temperature = 0; temperature < 2; temperature++)
-                               printk("cpu %d %s: high %d, batch %d used:%d\n",
-                                       cpu,
-                                       temperature ? "cold" : "hot",
-                                       pageset->pcp[temperature].high,
-                                       pageset->pcp[temperature].batch,
-                                       pageset->pcp[temperature].count);
+                       printk("CPU %4d: Hot: hi:%5d, btch:%4d usd:%4d   "
+                              "Cold: hi:%5d, btch:%4d usd:%4d\n",
+                              cpu, pageset->pcp[0].high,
+                              pageset->pcp[0].batch, pageset->pcp[0].count,
+                              pageset->pcp[1].high, pageset->pcp[1].batch,
+                              pageset->pcp[1].count);
                }
        }
 
@@ -1349,6 +1348,9 @@ void show_free_areas(void)
        for_each_zone(zone) {
                int i;
 
+               if (!populated_zone(zone))
+                       continue;
+
                show_node(zone);
                printk("%s"
                        " free:%lukB"
@@ -1381,12 +1383,11 @@ void show_free_areas(void)
        for_each_zone(zone) {
                unsigned long nr[MAX_ORDER], flags, order, total = 0;
 
+               if (!populated_zone(zone))
+                       continue;
+
                show_node(zone);
                printk("%s: ", zone->name);
-               if (!populated_zone(zone)) {
-                       printk("empty\n");
-                       continue;
-               }
 
                spin_lock_irqsave(&zone->lock, flags);
                for (order = 0; order < MAX_ORDER; order++) {
@@ -1590,7 +1591,7 @@ static int __meminit __build_all_zonelists(void *dummy)
 void __meminit build_all_zonelists(void)
 {
        if (system_state == SYSTEM_BOOTING) {
-               __build_all_zonelists(0);
+               __build_all_zonelists(NULL);
                cpuset_init_current_mems_allowed();
        } else {
                /* we have to stop all cpus to guaranntee there is no user
@@ -1828,6 +1829,9 @@ static int __cpuinit process_zones(int cpu)
 
        for_each_zone(zone) {
 
+               if (!populated_zone(zone))
+                       continue;
+
                zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
                                         GFP_KERNEL, cpu_to_node(cpu));
                if (!zone_pcp(zone, cpu))
@@ -2094,6 +2098,62 @@ void __init sparse_memory_present_with_active_regions(int nid)
 }
 
 /**
+ * push_node_boundaries - Push node boundaries to at least the requested boundary
+ * @nid: The nid of the node to push the boundary for
+ * @start_pfn: The start pfn of the node
+ * @end_pfn: The end pfn of the node
+ *
+ * In reserve-based hot-add, mem_map is allocated that is unused until hotadd
+ * time. Specifically, on x86_64, SRAT will report ranges that can potentially
+ * be hotplugged even though no physical memory exists. This function allows
+ * an arch to push out the node boundaries so mem_map is allocated that can
+ * be used later.
+ */
+#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
+void __init push_node_boundaries(unsigned int nid,
+               unsigned long start_pfn, unsigned long end_pfn)
+{
+       printk(KERN_DEBUG "Entering push_node_boundaries(%u, %lu, %lu)\n",
+                       nid, start_pfn, end_pfn);
+
+       /* Initialise the boundary for this node if necessary */
+       if (node_boundary_end_pfn[nid] == 0)
+               node_boundary_start_pfn[nid] = -1UL;
+
+       /* Update the boundaries */
+       if (node_boundary_start_pfn[nid] > start_pfn)
+               node_boundary_start_pfn[nid] = start_pfn;
+       if (node_boundary_end_pfn[nid] < end_pfn)
+               node_boundary_end_pfn[nid] = end_pfn;
+}
+
+/* If necessary, push the node boundary out for reserve hotadd */
+static void __init account_node_boundary(unsigned int nid,
+               unsigned long *start_pfn, unsigned long *end_pfn)
+{
+       printk(KERN_DEBUG "Entering account_node_boundary(%u, %lu, %lu)\n",
+                       nid, *start_pfn, *end_pfn);
+
+       /* Return if boundary information has not been provided */
+       if (node_boundary_end_pfn[nid] == 0)
+               return;
+
+       /* Check the boundaries and update if necessary */
+       if (node_boundary_start_pfn[nid] < *start_pfn)
+               *start_pfn = node_boundary_start_pfn[nid];
+       if (node_boundary_end_pfn[nid] > *end_pfn)
+               *end_pfn = node_boundary_end_pfn[nid];
+}
+#else
+void __init push_node_boundaries(unsigned int nid,
+               unsigned long start_pfn, unsigned long end_pfn) {}
+
+static void __init account_node_boundary(unsigned int nid,
+               unsigned long *start_pfn, unsigned long *end_pfn) {}
+#endif
+
+
+/**
  * get_pfn_range_for_nid - Return the start and end page frames for a node
  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned
  * @start_pfn: Passed by reference. On return, it will have the node start_pfn
@@ -2120,6 +2180,9 @@ void __init get_pfn_range_for_nid(unsigned int nid,
                printk(KERN_WARNING "Node %u active with no memory\n", nid);
                *start_pfn = 0;
        }
+
+       /* Push the node boundaries out if requested */
+       account_node_boundary(nid, start_pfn, end_pfn);
 }
 
 /*
@@ -2167,6 +2230,10 @@ unsigned long __init __absent_pages_in_range(int nid,
        if (i == -1)
                return 0;
 
+       /* Account for ranges before physical memory on this node */
+       if (early_node_map[i].start_pfn > range_start_pfn)
+               hole_pages = early_node_map[i].start_pfn - range_start_pfn;
+
        prev_end_pfn = early_node_map[i].start_pfn;
 
        /* Find all holes for the zone within the node */
@@ -2188,6 +2255,11 @@ unsigned long __init __absent_pages_in_range(int nid,
                prev_end_pfn = early_node_map[i].end_pfn;
        }
 
+       /* Account for ranges past physical memory on this node */
+       if (range_end_pfn > prev_end_pfn)
+               hole_pages = range_end_pfn -
+                               max(range_start_pfn, prev_end_pfn);
+
        return hole_pages;
 }
 
@@ -2209,9 +2281,30 @@ unsigned long __init zone_absent_pages_in_node(int nid,
                                        unsigned long zone_type,
                                        unsigned long *ignored)
 {
-       return __absent_pages_in_range(nid,
-                               arch_zone_lowest_possible_pfn[zone_type],
-                               arch_zone_highest_possible_pfn[zone_type]);
+       unsigned long node_start_pfn, node_end_pfn;
+       unsigned long zone_start_pfn, zone_end_pfn;
+
+       get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
+       zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
+                                                       node_start_pfn);
+       zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
+                                                       node_end_pfn);
+
+       return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
+}
+
+/* Return the zone index a PFN is in */
+int memmap_zone_idx(struct page *lmem_map)
+{
+       int i;
+       unsigned long phys_addr = virt_to_phys(lmem_map);
+       unsigned long pfn = phys_addr >> PAGE_SHIFT;
+
+       for (i = 0; i < MAX_NR_ZONES; i++)
+               if (pfn < arch_zone_highest_possible_pfn[i])
+                       break;
+
+       return i;
 }
 #else
 static inline unsigned long zone_spanned_pages_in_node(int nid,
@@ -2230,6 +2323,11 @@ static inline unsigned long zone_absent_pages_in_node(int nid,
 
        return zholes_size[zone_type];
 }
+
+static inline int memmap_zone_idx(struct page *lmem_map)
+{
+       return MAX_NR_ZONES;
+}
 #endif
 
 static void __init calculate_node_totalpages(struct pglist_data *pgdat,
@@ -2274,12 +2372,35 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
        
        for (j = 0; j < MAX_NR_ZONES; j++) {
                struct zone *zone = pgdat->node_zones + j;
-               unsigned long size, realsize;
+               unsigned long size, realsize, memmap_pages;
 
                size = zone_spanned_pages_in_node(nid, j, zones_size);
                realsize = size - zone_absent_pages_in_node(nid, j,
                                                                zholes_size);
 
+               /*
+                * Adjust realsize so that it accounts for how much memory
+                * is used by this zone for memmap. This affects the watermark
+                * and per-cpu initialisations
+                */
+               memmap_pages = (size * sizeof(struct page)) >> PAGE_SHIFT;
+               if (realsize >= memmap_pages) {
+                       realsize -= memmap_pages;
+                       printk(KERN_DEBUG
+                               "  %s zone: %lu pages used for memmap\n",
+                               zone_names[j], memmap_pages);
+               } else
+                       printk(KERN_WARNING
+                               "  %s zone: %lu pages exceeds realsize %lu\n",
+                               zone_names[j], memmap_pages, realsize);
+
+               /* Account for reserved DMA pages */
+               if (j == ZONE_DMA && realsize > dma_reserve) {
+                       realsize -= dma_reserve;
+                       printk(KERN_DEBUG "  DMA zone: %lu pages reserved\n",
+                                                               dma_reserve);
+               }
+
                if (!is_highmem_idx(j))
                        nr_kernel_pages += realsize;
                nr_all_pages += realsize;
@@ -2287,6 +2408,7 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
                zone->spanned_pages = size;
                zone->present_pages = realsize;
 #ifdef CONFIG_NUMA
+               zone->node = nid;
                zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
                                                / 100;
                zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
@@ -2468,6 +2590,10 @@ void __init remove_all_active_ranges()
 {
        memset(early_node_map, 0, sizeof(early_node_map));
        nr_nodemap_entries = 0;
+#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
+       memset(node_boundary_start_pfn, 0, sizeof(node_boundary_start_pfn));
+       memset(node_boundary_end_pfn, 0, sizeof(node_boundary_end_pfn));
+#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
 }
 
 /* Compare two active node_active_regions */
@@ -2596,6 +2722,21 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
 }
 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
 
+/**
+ * set_dma_reserve - Account the specified number of pages reserved in ZONE_DMA
+ * @new_dma_reserve - The number of pages to mark reserved
+ *
+ * The per-cpu batchsize and zone watermarks are determined by present_pages.
+ * In the DMA zone, a significant percentage may be consumed by kernel image
+ * and other unfreeable allocations which can skew the watermarks badly. This
+ * function may optionally be used to account for unfreeable pages in
+ * ZONE_DMA. The effect will be lower watermarks and smaller per-cpu batchsize
+ */
+void __init set_dma_reserve(unsigned long new_dma_reserve)
+{
+       dma_reserve = new_dma_reserve;
+}
+
 #ifndef CONFIG_NEED_MULTIPLE_NODES
 static bootmem_data_t contig_bootmem_data;
 struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data };