x86: don't special-case pmd allocations as much
[safe/jmp/linux-2.6] / arch / x86 / mm / pgtable_32.c
index 3a6c920..5ca3552 100644 (file)
@@ -195,11 +195,6 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
        return pte;
 }
 
-void pmd_ctor(struct kmem_cache *cache, void *pmd)
-{
-       memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
-}
-
 /*
  * List of all pgd's needed for non-PAE so it can invalidate entries
  * in both cached and uncached pgd's; not needed for PAE since the
@@ -285,7 +280,6 @@ static void pgd_dtor(void *pgd)
        if (SHARED_KERNEL_PMD)
                return;
 
-       paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT);
        spin_lock_irqsave(&pgd_lock, flags);
        pgd_list_del(pgd);
        spin_unlock_irqrestore(&pgd_lock, flags);
@@ -367,84 +361,22 @@ static void pgd_mop_up_pmds(pgd_t *pgd)
 }
 #endif /* CONFIG_X86_PAE */
 
-/* If we allocate a pmd for part of the kernel address space, then
-   make sure its initialized with the appropriate kernel mappings.
-   Otherwise use a cached zeroed pmd.  */
-static pmd_t *pmd_cache_alloc(int idx)
-{
-       pmd_t *pmd;
-
-       if (idx >= USER_PTRS_PER_PGD) {
-               pmd = (pmd_t *)__get_free_page(GFP_KERNEL);
-
-               if (pmd)
-                       memcpy(pmd,
-                              (void *)pgd_page_vaddr(swapper_pg_dir[idx]),
-                              sizeof(pmd_t) * PTRS_PER_PMD);
-       } else
-               pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
-
-       return pmd;
-}
-
-static void pmd_cache_free(pmd_t *pmd, int idx)
-{
-       if (idx >= USER_PTRS_PER_PGD)
-               free_page((unsigned long)pmd);
-       else
-               kmem_cache_free(pmd_cache, pmd);
-}
-
 pgd_t *pgd_alloc(struct mm_struct *mm)
 {
-       int i;
        pgd_t *pgd = quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
 
-       if (PTRS_PER_PMD == 1 || !pgd)
-               return pgd;
-
        mm->pgd = pgd;          /* so that alloc_pd can use it */
 
-       for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) {
-               pmd_t *pmd = pmd_cache_alloc(i);
-
-               if (!pmd)
-                       goto out_oom;
-
-               paravirt_alloc_pd(mm, __pa(pmd) >> PAGE_SHIFT);
-               set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
-       }
        if (pgd && !pgd_prepopulate_pmd(mm, pgd)) {
                quicklist_free(0, pgd_dtor, pgd);
                pgd = NULL;
        }
 
        return pgd;
-
-out_oom:
-       for (i--; i >= 0; i--) {
-               pgd_t pgdent = pgd[i];
-               void* pmd = (void *)__va(pgd_val(pgdent)-1);
-               paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
-               pmd_cache_free(pmd, i);
-       }
-       quicklist_free(0, pgd_dtor, pgd);
-       return NULL;
 }
 
 void pgd_free(pgd_t *pgd)
 {
-       int i;
-
-       /* in the PAE case user pgd entries are overwritten before usage */
-       if (PTRS_PER_PMD > 1)
-               for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) {
-                       pgd_t pgdent = pgd[i];
-                       void* pmd = (void *)__va(pgd_val(pgdent)-1);
-                       paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
-                       pmd_cache_free(pmd, i);
-               }
-       /* in the non-PAE case, free_pgtables() clears user pgd entries */
        pgd_mop_up_pmds(pgd);
        quicklist_free(0, pgd_dtor, pgd);
 }