X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=mm%2Fvmscan.c;h=79c809895fba777d6069ae3778046def74a29b81;hb=55632770d7298835645489828af87f854c47749c;hp=bc0f8db8340f23f536bad0c709932c807f7b8f27;hpb=76ca542d880ebe59a7a03c1597e73e1ded271857;p=safe%2Fjmp%2Flinux-2.6 diff --git a/mm/vmscan.c b/mm/vmscan.c index bc0f8db..79c8098 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -262,27 +262,6 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, return ret; } -/* Called without lock on whether page is mapped, so answer is unstable */ -static inline int page_mapping_inuse(struct page *page) -{ - struct address_space *mapping; - - /* Page is in somebody's page tables. */ - if (page_mapped(page)) - return 1; - - /* Be more reluctant to reclaim swapcache than pagecache */ - if (PageSwapCache(page)) - return 1; - - mapping = page_mapping(page); - if (!mapping) - return 0; - - /* File is mmap'd by somebody? */ - return mapping_mapped(mapping); -} - static inline int is_page_cache_freeable(struct page *page) { /* @@ -579,6 +558,65 @@ redo: put_page(page); /* drop ref from isolate */ } +enum page_references { + PAGEREF_RECLAIM, + PAGEREF_RECLAIM_CLEAN, + PAGEREF_KEEP, + PAGEREF_ACTIVATE, +}; + +static enum page_references page_check_references(struct page *page, + struct scan_control *sc) +{ + int referenced_ptes, referenced_page; + unsigned long vm_flags; + + referenced_ptes = page_referenced(page, 1, sc->mem_cgroup, &vm_flags); + referenced_page = TestClearPageReferenced(page); + + /* Lumpy reclaim - ignore references */ + if (sc->order > PAGE_ALLOC_COSTLY_ORDER) + return PAGEREF_RECLAIM; + + /* + * Mlock lost the isolation race with us. Let try_to_unmap() + * move the page to the unevictable list. + */ + if (vm_flags & VM_LOCKED) + return PAGEREF_RECLAIM; + + if (referenced_ptes) { + if (PageAnon(page)) + return PAGEREF_ACTIVATE; + /* + * All mapped pages start out with page table + * references from the instantiating fault, so we need + * to look twice if a mapped file page is used more + * than once. + * + * Mark it and spare it for another trip around the + * inactive list. Another page table reference will + * lead to its activation. + * + * Note: the mark is set for activated pages as well + * so that recently deactivated but used pages are + * quickly recovered. + */ + SetPageReferenced(page); + + if (referenced_page) + return PAGEREF_ACTIVATE; + + return PAGEREF_KEEP; + } + + /* Reclaim if clean, defer dirty pages to writeback */ + if (referenced_page) + return PAGEREF_RECLAIM_CLEAN; + + return PAGEREF_RECLAIM; +} + /* * shrink_page_list() returns the number of reclaimed pages */ @@ -590,16 +628,15 @@ static unsigned long shrink_page_list(struct list_head *page_list, struct pagevec freed_pvec; int pgactivate = 0; unsigned long nr_reclaimed = 0; - unsigned long vm_flags; cond_resched(); pagevec_init(&freed_pvec, 1); while (!list_empty(page_list)) { + enum page_references references; struct address_space *mapping; struct page *page; int may_enter_fs; - int referenced; cond_resched(); @@ -641,17 +678,16 @@ static unsigned long shrink_page_list(struct list_head *page_list, goto keep_locked; } - referenced = page_referenced(page, 1, - sc->mem_cgroup, &vm_flags); - /* - * In active use or really unfreeable? Activate it. - * If page which have PG_mlocked lost isoltation race, - * try_to_unmap moves it to unevictable list - */ - if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && - referenced && page_mapping_inuse(page) - && !(vm_flags & VM_LOCKED)) + references = page_check_references(page, sc); + switch (references) { + case PAGEREF_ACTIVATE: goto activate_locked; + case PAGEREF_KEEP: + goto keep_locked; + case PAGEREF_RECLAIM: + case PAGEREF_RECLAIM_CLEAN: + ; /* try to reclaim the page below */ + } /* * Anonymous process memory has backing store? @@ -685,7 +721,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, } if (PageDirty(page)) { - if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced) + if (references == PAGEREF_RECLAIM_CLEAN) goto keep_locked; if (!may_enter_fs) goto keep_locked; @@ -1350,9 +1386,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, continue; } - /* page_referenced clears PageReferenced */ - if (page_mapping_inuse(page) && - page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) { + if (page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) { nr_rotated++; /* * Identify referenced, file-backed active pages and @@ -1699,8 +1733,7 @@ static void shrink_zones(int priority, struct zonelist *zonelist, continue; note_zone_scanning_priority(zone, priority); - if (zone_is_all_unreclaimable(zone) && - priority != DEF_PRIORITY) + if (zone->all_unreclaimable && priority != DEF_PRIORITY) continue; /* Let kswapd poll it */ sc->all_unreclaimable = 0; } else { @@ -1927,7 +1960,7 @@ static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining) if (!populated_zone(zone)) continue; - if (zone_is_all_unreclaimable(zone)) + if (zone->all_unreclaimable) continue; if (!zone_watermark_ok(zone, order, high_wmark_pages(zone), @@ -2017,8 +2050,7 @@ loop_again: if (!populated_zone(zone)) continue; - if (zone_is_all_unreclaimable(zone) && - priority != DEF_PRIORITY) + if (zone->all_unreclaimable && priority != DEF_PRIORITY) continue; /* @@ -2061,8 +2093,7 @@ loop_again: if (!populated_zone(zone)) continue; - if (zone_is_all_unreclaimable(zone) && - priority != DEF_PRIORITY) + if (zone->all_unreclaimable && priority != DEF_PRIORITY) continue; temp_priority[i] = priority; @@ -2089,12 +2120,11 @@ loop_again: lru_pages); sc.nr_reclaimed += reclaim_state->reclaimed_slab; total_scanned += sc.nr_scanned; - if (zone_is_all_unreclaimable(zone)) + if (zone->all_unreclaimable) continue; - if (nr_slab == 0 && zone->pages_scanned >= - (zone_reclaimable_pages(zone) * 6)) - zone_set_flag(zone, - ZONE_ALL_UNRECLAIMABLE); + if (nr_slab == 0 && + zone->pages_scanned >= (zone_reclaimable_pages(zone) * 6)) + zone->all_unreclaimable = 1; /* * If we've done a decent amount of scanning and * the reclaim ratio is low, start doing writepage @@ -2624,7 +2654,7 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages) return ZONE_RECLAIM_FULL; - if (zone_is_all_unreclaimable(zone)) + if (zone->all_unreclaimable) return ZONE_RECLAIM_FULL; /*