trivial: small cleanups
[safe/jmp/linux-2.6] / mm / memory_hotplug.c
index 7d25cc1..7469c50 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/compiler.h>
 #include <linux/module.h>
 #include <linux/pagevec.h>
+#include <linux/writeback.h>
 #include <linux/slab.h>
 #include <linux/sysctl.h>
 #include <linux/cpu.h>
 #include <linux/highmem.h>
 #include <linux/vmalloc.h>
 #include <linux/ioport.h>
+#include <linux/cpuset.h>
+#include <linux/delay.h>
+#include <linux/migrate.h>
+#include <linux/page-isolation.h>
 
 #include <asm/tlbflush.h>
 
-extern void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn,
-                         unsigned long size);
+/* add this memory to iomem resource */
+static struct resource *register_memory_resource(u64 start, u64 size)
+{
+       struct resource *res;
+       res = kzalloc(sizeof(struct resource), GFP_KERNEL);
+       BUG_ON(!res);
+
+       res->name = "System RAM";
+       res->start = start;
+       res->end = start + size - 1;
+       res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+       if (request_resource(&iomem_resource, res) < 0) {
+               printk("System RAM resource %llx - %llx cannot be added\n",
+               (unsigned long long)res->start, (unsigned long long)res->end);
+               kfree(res);
+               res = NULL;
+       }
+       return res;
+}
+
+static void release_memory_resource(struct resource *res)
+{
+       if (!res)
+               return;
+       release_resource(res);
+       kfree(res);
+       return;
+}
+
+
+#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
 static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
 {
        struct pglist_data *pgdat = zone->zone_pgdat;
@@ -34,24 +68,26 @@ static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
        int zone_type;
 
        zone_type = zone - pgdat->node_zones;
-       if (!populated_zone(zone)) {
+       if (!zone->wait_table) {
                int ret = 0;
-               ret = init_currently_empty_zone(zone, phys_start_pfn, nr_pages);
+               ret = init_currently_empty_zone(zone, phys_start_pfn,
+                                               nr_pages, MEMMAP_HOTPLUG);
                if (ret < 0)
                        return ret;
        }
-       memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn);
-       zonetable_add(zone, nid, zone_type, phys_start_pfn, nr_pages);
+       memmap_init_zone(nr_pages, nid, zone_type,
+                        phys_start_pfn, MEMMAP_HOTPLUG);
        return 0;
 }
 
-extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
-                                 int nr_pages);
 static int __add_section(struct zone *zone, unsigned long phys_start_pfn)
 {
        int nr_pages = PAGES_PER_SECTION;
        int ret;
 
+       if (pfn_valid(phys_start_pfn))
+               return -EEXIST;
+
        ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
 
        if (ret < 0)
@@ -85,7 +121,7 @@ int __add_pages(struct zone *zone, unsigned long phys_start_pfn,
                err = __add_section(zone, i << PFN_SECTION_SHIFT);
 
                /*
-                * EEXIST is finally dealed with by ioresource collision
+                * EEXIST is finally dealt with by ioresource collision
                 * check. see add_memory() => register_memory_resource()
                 * Warning will be printed if there is collision.
                 */
@@ -128,17 +164,47 @@ static void grow_pgdat_span(struct pglist_data *pgdat,
                                        pgdat->node_start_pfn;
 }
 
-int online_pages(unsigned long pfn, unsigned long nr_pages)
+static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
+                       void *arg)
 {
        unsigned long i;
+       unsigned long onlined_pages = *(unsigned long *)arg;
+       struct page *page;
+       if (PageReserved(pfn_to_page(start_pfn)))
+               for (i = 0; i < nr_pages; i++) {
+                       page = pfn_to_page(start_pfn + i);
+                       online_page(page);
+                       onlined_pages++;
+               }
+       *(unsigned long *)arg = onlined_pages;
+       return 0;
+}
+
+
+int online_pages(unsigned long pfn, unsigned long nr_pages)
+{
        unsigned long flags;
        unsigned long onlined_pages = 0;
-       struct resource res;
-       u64 section_end;
-       unsigned long start_pfn;
        struct zone *zone;
        int need_zonelists_rebuild = 0;
+       int nid;
+       int ret;
+       struct memory_notify arg;
+
+       arg.start_pfn = pfn;
+       arg.nr_pages = nr_pages;
+       arg.status_change_nid = -1;
 
+       nid = page_to_nid(pfn_to_page(pfn));
+       if (node_present_pages(nid) == 0)
+               arg.status_change_nid = nid;
+
+       ret = memory_notify(MEM_GOING_ONLINE, &arg);
+       ret = notifier_to_errno(ret);
+       if (ret) {
+               memory_notify(MEM_CANCEL_ONLINE, &arg);
+               return ret;
+       }
        /*
         * This doesn't need a lock to do pfn_to_page().
         * The section can't be removed here because of the
@@ -158,38 +224,28 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
        if (!populated_zone(zone))
                need_zonelists_rebuild = 1;
 
-       res.start = (u64)pfn << PAGE_SHIFT;
-       res.end = res.start + ((u64)nr_pages << PAGE_SHIFT) - 1;
-       res.flags = IORESOURCE_MEM; /* we just need system ram */
-       section_end = res.end;
-
-       while (find_next_system_ram(&res) >= 0) {
-               start_pfn = (unsigned long)(res.start >> PAGE_SHIFT);
-               nr_pages = (unsigned long)
-                           ((res.end + 1 - res.start) >> PAGE_SHIFT);
-
-               if (PageReserved(pfn_to_page(start_pfn))) {
-                       /* this region's page is not onlined now */
-                       for (i = 0; i < nr_pages; i++) {
-                               struct page *page = pfn_to_page(start_pfn + i);
-                               online_page(page);
-                               onlined_pages++;
-                       }
-               }
-
-               res.start = res.end + 1;
-               res.end = section_end;
-       }
+       walk_memory_resource(pfn, nr_pages, &onlined_pages,
+               online_pages_range);
        zone->present_pages += onlined_pages;
        zone->zone_pgdat->node_present_pages += onlined_pages;
 
        setup_per_zone_pages_min();
+       if (onlined_pages) {
+               kswapd_run(zone_to_nid(zone));
+               node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
+       }
 
        if (need_zonelists_rebuild)
                build_all_zonelists();
        vm_total_pages = nr_free_pagecache_pages();
+       writeback_set_ratelimit();
+
+       if (onlined_pages)
+               memory_notify(MEM_ONLINE, &arg);
+
        return 0;
 }
+#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
 
 static pg_data_t *hotadd_new_pgdat(int nid, u64 start)
 {
@@ -219,43 +275,23 @@ static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
        return;
 }
 
-/* add this memory to iomem resource */
-static int register_memory_resource(u64 start, u64 size)
-{
-       struct resource *res;
-       int ret = 0;
-       res = kzalloc(sizeof(struct resource), GFP_KERNEL);
-       BUG_ON(!res);
-
-       res->name = "System RAM";
-       res->start = start;
-       res->end = start + size - 1;
-       res->flags = IORESOURCE_MEM;
-       if (request_resource(&iomem_resource, res) < 0) {
-               printk("System RAM resource %llx - %llx cannot be added\n",
-               (unsigned long long)res->start, (unsigned long long)res->end);
-               kfree(res);
-               ret = -EEXIST;
-       }
-       return ret;
-}
-
-
 
 int add_memory(int nid, u64 start, u64 size)
 {
        pg_data_t *pgdat = NULL;
        int new_pgdat = 0;
+       struct resource *res;
        int ret;
 
+       res = register_memory_resource(start, size);
+       if (!res)
+               return -EEXIST;
+
        if (!node_online(nid)) {
                pgdat = hotadd_new_pgdat(nid, start);
                if (!pgdat)
                        return -ENOMEM;
                new_pgdat = 1;
-               ret = kswapd_run(nid);
-               if (ret)
-                       goto error;
        }
 
        /* call arch's memory hotadd */
@@ -267,6 +303,8 @@ int add_memory(int nid, u64 start, u64 size)
        /* we online node here. we can't roll back from here. */
        node_set_online(nid);
 
+       cpuset_track_online_nodes();
+
        if (new_pgdat) {
                ret = register_one_node(nid);
                /*
@@ -277,15 +315,290 @@ int add_memory(int nid, u64 start, u64 size)
                BUG_ON(ret);
        }
 
-       /* register this memory as resource */
-       ret = register_memory_resource(start, size);
-
        return ret;
 error:
        /* rollback pgdat allocation and others */
        if (new_pgdat)
                rollback_node_hotadd(nid, pgdat);
+       if (res)
+               release_memory_resource(res);
 
        return ret;
 }
 EXPORT_SYMBOL_GPL(add_memory);
+
+#ifdef CONFIG_MEMORY_HOTREMOVE
+/*
+ * Confirm all pages in a range [start, end) is belongs to the same zone.
+ */
+static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
+{
+       unsigned long pfn;
+       struct zone *zone = NULL;
+       struct page *page;
+       int i;
+       for (pfn = start_pfn;
+            pfn < end_pfn;
+            pfn += MAX_ORDER_NR_PAGES) {
+               i = 0;
+               /* This is just a CONFIG_HOLES_IN_ZONE check.*/
+               while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i))
+                       i++;
+               if (i == MAX_ORDER_NR_PAGES)
+                       continue;
+               page = pfn_to_page(pfn + i);
+               if (zone && page_zone(page) != zone)
+                       return 0;
+               zone = page_zone(page);
+       }
+       return 1;
+}
+
+/*
+ * Scanning pfn is much easier than scanning lru list.
+ * Scan pfn from start to end and Find LRU page.
+ */
+int scan_lru_pages(unsigned long start, unsigned long end)
+{
+       unsigned long pfn;
+       struct page *page;
+       for (pfn = start; pfn < end; pfn++) {
+               if (pfn_valid(pfn)) {
+                       page = pfn_to_page(pfn);
+                       if (PageLRU(page))
+                               return pfn;
+               }
+       }
+       return 0;
+}
+
+static struct page *
+hotremove_migrate_alloc(struct page *page,
+                       unsigned long private,
+                       int **x)
+{
+       /* This should be improoooooved!! */
+       return alloc_page(GFP_HIGHUSER_PAGECACHE);
+}
+
+
+#define NR_OFFLINE_AT_ONCE_PAGES       (256)
+static int
+do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
+{
+       unsigned long pfn;
+       struct page *page;
+       int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
+       int not_managed = 0;
+       int ret = 0;
+       LIST_HEAD(source);
+
+       for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
+               if (!pfn_valid(pfn))
+                       continue;
+               page = pfn_to_page(pfn);
+               if (!page_count(page))
+                       continue;
+               /*
+                * We can skip free pages. And we can only deal with pages on
+                * LRU.
+                */
+               ret = isolate_lru_page(page, &source);
+               if (!ret) { /* Success */
+                       move_pages--;
+               } else {
+                       /* Becasue we don't have big zone->lock. we should
+                          check this again here. */
+                       if (page_count(page))
+                               not_managed++;
+#ifdef CONFIG_DEBUG_VM
+                       printk(KERN_INFO "removing from LRU failed"
+                                        " %lx/%d/%lx\n",
+                               pfn, page_count(page), page->flags);
+#endif
+               }
+       }
+       ret = -EBUSY;
+       if (not_managed) {
+               if (!list_empty(&source))
+                       putback_lru_pages(&source);
+               goto out;
+       }
+       ret = 0;
+       if (list_empty(&source))
+               goto out;
+       /* this function returns # of failed pages */
+       ret = migrate_pages(&source, hotremove_migrate_alloc, 0);
+
+out:
+       return ret;
+}
+
+/*
+ * remove from free_area[] and mark all as Reserved.
+ */
+static int
+offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
+                       void *data)
+{
+       __offline_isolated_pages(start, start + nr_pages);
+       return 0;
+}
+
+static void
+offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
+{
+       walk_memory_resource(start_pfn, end_pfn - start_pfn, NULL,
+                               offline_isolated_pages_cb);
+}
+
+/*
+ * Check all pages in range, recoreded as memory resource, are isolated.
+ */
+static int
+check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
+                       void *data)
+{
+       int ret;
+       long offlined = *(long *)data;
+       ret = test_pages_isolated(start_pfn, start_pfn + nr_pages);
+       offlined = nr_pages;
+       if (!ret)
+               *(long *)data += offlined;
+       return ret;
+}
+
+static long
+check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
+{
+       long offlined = 0;
+       int ret;
+
+       ret = walk_memory_resource(start_pfn, end_pfn - start_pfn, &offlined,
+                       check_pages_isolated_cb);
+       if (ret < 0)
+               offlined = (long)ret;
+       return offlined;
+}
+
+int offline_pages(unsigned long start_pfn,
+                 unsigned long end_pfn, unsigned long timeout)
+{
+       unsigned long pfn, nr_pages, expire;
+       long offlined_pages;
+       int ret, drain, retry_max, node;
+       struct zone *zone;
+       struct memory_notify arg;
+
+       BUG_ON(start_pfn >= end_pfn);
+       /* at least, alignment against pageblock is necessary */
+       if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
+               return -EINVAL;
+       if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
+               return -EINVAL;
+       /* This makes hotplug much easier...and readable.
+          we assume this for now. .*/
+       if (!test_pages_in_a_zone(start_pfn, end_pfn))
+               return -EINVAL;
+
+       zone = page_zone(pfn_to_page(start_pfn));
+       node = zone_to_nid(zone);
+       nr_pages = end_pfn - start_pfn;
+
+       /* set above range as isolated */
+       ret = start_isolate_page_range(start_pfn, end_pfn);
+       if (ret)
+               return ret;
+
+       arg.start_pfn = start_pfn;
+       arg.nr_pages = nr_pages;
+       arg.status_change_nid = -1;
+       if (nr_pages >= node_present_pages(node))
+               arg.status_change_nid = node;
+
+       ret = memory_notify(MEM_GOING_OFFLINE, &arg);
+       ret = notifier_to_errno(ret);
+       if (ret)
+               goto failed_removal;
+
+       pfn = start_pfn;
+       expire = jiffies + timeout;
+       drain = 0;
+       retry_max = 5;
+repeat:
+       /* start memory hot removal */
+       ret = -EAGAIN;
+       if (time_after(jiffies, expire))
+               goto failed_removal;
+       ret = -EINTR;
+       if (signal_pending(current))
+               goto failed_removal;
+       ret = 0;
+       if (drain) {
+               lru_add_drain_all();
+               flush_scheduled_work();
+               cond_resched();
+               drain_all_pages();
+       }
+
+       pfn = scan_lru_pages(start_pfn, end_pfn);
+       if (pfn) { /* We have page on LRU */
+               ret = do_migrate_range(pfn, end_pfn);
+               if (!ret) {
+                       drain = 1;
+                       goto repeat;
+               } else {
+                       if (ret < 0)
+                               if (--retry_max == 0)
+                                       goto failed_removal;
+                       yield();
+                       drain = 1;
+                       goto repeat;
+               }
+       }
+       /* drain all zone's lru pagevec, this is asyncronous... */
+       lru_add_drain_all();
+       flush_scheduled_work();
+       yield();
+       /* drain pcp pages , this is synchrouns. */
+       drain_all_pages();
+       /* check again */
+       offlined_pages = check_pages_isolated(start_pfn, end_pfn);
+       if (offlined_pages < 0) {
+               ret = -EBUSY;
+               goto failed_removal;
+       }
+       printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages);
+       /* Ok, all of our target is islaoted.
+          We cannot do rollback at this point. */
+       offline_isolated_pages(start_pfn, end_pfn);
+       /* reset pagetype flags and makes migrate type to be MOVABLE */
+       undo_isolate_page_range(start_pfn, end_pfn);
+       /* removal success */
+       zone->present_pages -= offlined_pages;
+       zone->zone_pgdat->node_present_pages -= offlined_pages;
+       totalram_pages -= offlined_pages;
+       num_physpages -= offlined_pages;
+
+       vm_total_pages = nr_free_pagecache_pages();
+       writeback_set_ratelimit();
+
+       memory_notify(MEM_OFFLINE, &arg);
+       return 0;
+
+failed_removal:
+       printk(KERN_INFO "memory offlining %lx to %lx failed\n",
+               start_pfn, end_pfn);
+       memory_notify(MEM_CANCEL_OFFLINE, &arg);
+       /* pushback to free area */
+       undo_isolate_page_range(start_pfn, end_pfn);
+
+       return ret;
+}
+#else
+int remove_memory(u64 start, u64 size)
+{
+       return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(remove_memory);
+#endif /* CONFIG_MEMORY_HOTREMOVE */