X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=arch%2Fx86%2Fmm%2Fpageattr.c;h=8493c855582bf56a5a36d2e6e64266332c9d26c0;hb=9b7131542178f5f948e4bb6bea6e1c545e697b06;hp=d18c41d752f349cfb37463973559833ed1073ab1;hpb=8192206df093e8fc607b5072ce71a930d44f8638;p=safe%2Fjmp%2Flinux-2.6 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index d18c41d..8493c85 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -9,14 +9,7 @@ #include #include -void clflush_cache_range(void *addr, int size) -{ - int i; - - for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size) - clflush(addr+i); -} - +#include #include #include #include @@ -24,26 +17,180 @@ void clflush_cache_range(void *addr, int size) #include /* - * We allow the BIOS range to be executable: + * The current flushing context - we pass it instead of 5 arguments: + */ +struct cpa_data { + unsigned long vaddr; + pgprot_t mask_set; + pgprot_t mask_clr; + int numpages; + int flushtlb; +}; + +static inline int +within(unsigned long addr, unsigned long start, unsigned long end) +{ + return addr >= start && addr < end; +} + +/* + * Flushing functions */ -#define BIOS_BEGIN 0x000a0000 -#define BIOS_END 0x00100000 -static inline pgprot_t check_exec(pgprot_t prot, unsigned long address) +/** + * clflush_cache_range - flush a cache range with clflush + * @addr: virtual start address + * @size: number of bytes to flush + * + * clflush is an unordered instruction which needs fencing with mfence + * to avoid ordering issues. + */ +void clflush_cache_range(void *vaddr, unsigned int size) +{ + void *vend = vaddr + size - 1; + + mb(); + + for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size) + clflush(vaddr); + /* + * Flush any possible final partial cacheline: + */ + clflush(vend); + + mb(); +} + +static void __cpa_flush_all(void *arg) +{ + unsigned long cache = (unsigned long)arg; + + /* + * Flush all to work around Errata in early athlons regarding + * large page flushing. + */ + __flush_tlb_all(); + + if (cache && boot_cpu_data.x86_model >= 4) + wbinvd(); +} + +static void cpa_flush_all(unsigned long cache) +{ + BUG_ON(irqs_disabled()); + + on_each_cpu(__cpa_flush_all, (void *) cache, 1, 1); +} + +static void __cpa_flush_range(void *arg) +{ + /* + * We could optimize that further and do individual per page + * tlb invalidates for a low number of pages. Caveat: we must + * flush the high aliases on 64bit as well. + */ + __flush_tlb_all(); +} + +static void cpa_flush_range(unsigned long start, int numpages, int cache) +{ + unsigned int i, level; + unsigned long addr; + + BUG_ON(irqs_disabled()); + WARN_ON(PAGE_ALIGN(start) != start); + + on_each_cpu(__cpa_flush_range, NULL, 1, 1); + + if (!cache) + return; + + /* + * We only need to flush on one CPU, + * clflush is a MESI-coherent instruction that + * will cause all other CPUs to flush the same + * cachelines: + */ + for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) { + pte_t *pte = lookup_address(addr, &level); + + /* + * Only flush present addresses: + */ + if (pte && (pte_val(*pte) & _PAGE_PRESENT)) + clflush_cache_range((void *) addr, PAGE_SIZE); + } +} + +#define HIGH_MAP_START __START_KERNEL_map +#define HIGH_MAP_END (__START_KERNEL_map + KERNEL_TEXT_SIZE) + + +/* + * Converts a virtual address to a X86-64 highmap address + */ +static unsigned long virt_to_highmap(void *address) +{ +#ifdef CONFIG_X86_64 + return __pa((unsigned long)address) + HIGH_MAP_START - phys_base; +#else + return (unsigned long)address; +#endif +} + +/* + * Certain areas of memory on x86 require very specific protection flags, + * for example the BIOS area or kernel text. Callers don't always get this + * right (again, ioremap() on BIOS memory is not uncommon) so this function + * checks and fixes these known static required protection bits. + */ +static inline pgprot_t static_protections(pgprot_t prot, unsigned long address) { - if (__pa(address) >= BIOS_BEGIN && __pa(address) < BIOS_END) - pgprot_val(prot) &= ~_PAGE_NX; + pgprot_t forbidden = __pgprot(0); + + /* + * The BIOS area between 640k and 1Mb needs to be executable for + * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support. + */ + if (within(__pa(address), BIOS_BEGIN, BIOS_END)) + pgprot_val(forbidden) |= _PAGE_NX; + + /* + * The kernel text needs to be executable for obvious reasons + * Does not cover __inittext since that is gone later on + */ + if (within(address, (unsigned long)_text, (unsigned long)_etext)) + pgprot_val(forbidden) |= _PAGE_NX; + /* + * Do the same for the x86-64 high kernel mapping + */ + if (within(address, virt_to_highmap(_text), virt_to_highmap(_etext))) + pgprot_val(forbidden) |= _PAGE_NX; + + /* The .rodata section needs to be read-only */ + if (within(address, (unsigned long)__start_rodata, + (unsigned long)__end_rodata)) + pgprot_val(forbidden) |= _PAGE_RW; /* - * Better fail early if someone sets the kernel text to NX. - * Does not cover __inittext + * Do the same for the x86-64 high kernel mapping */ - BUG_ON(address >= (unsigned long)&_text && - address < (unsigned long)&_etext && - (pgprot_val(prot) & _PAGE_NX)); + if (within(address, virt_to_highmap(__start_rodata), + virt_to_highmap(__end_rodata))) + pgprot_val(forbidden) |= _PAGE_RW; + + prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); return prot; } +/* + * Lookup the page table entry for a virtual address. Return a pointer + * to the entry and the level of the mapping. + * + * Note: We return pud and pmd either when the entry is marked large + * or when the present bit is not set. Otherwise we would return a + * pointer to a nonexisting mapping. + */ pte_t *lookup_address(unsigned long address, int *level) { pgd_t *pgd = pgd_offset_k(address); @@ -54,21 +201,31 @@ pte_t *lookup_address(unsigned long address, int *level) if (pgd_none(*pgd)) return NULL; + pud = pud_offset(pgd, address); if (pud_none(*pud)) return NULL; + + *level = PG_LEVEL_1G; + if (pud_large(*pud) || !pud_present(*pud)) + return (pte_t *)pud; + pmd = pmd_offset(pud, address); if (pmd_none(*pmd)) return NULL; *level = PG_LEVEL_2M; - if (pmd_large(*pmd)) + if (pmd_large(*pmd) || !pmd_present(*pmd)) return (pte_t *)pmd; *level = PG_LEVEL_4K; + return pte_offset_kernel(pmd, address); } +/* + * Set the new pmd in all the pgds we know about: + */ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) { /* change init_mm */ @@ -77,7 +234,7 @@ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) if (!SHARED_KERNEL_PMD) { struct page *page; - for (page = pgd_list; page; page = (struct page *)page->index) { + list_for_each_entry(page, &pgd_list, lru) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; @@ -91,18 +248,104 @@ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) #endif } +static int +try_preserve_large_page(pte_t *kpte, unsigned long address, + struct cpa_data *cpa) +{ + unsigned long nextpage_addr, numpages, pmask, psize, flags; + pte_t new_pte, old_pte, *tmp; + pgprot_t old_prot, new_prot; + int level, do_split = 1; + + spin_lock_irqsave(&pgd_lock, flags); + /* + * Check for races, another CPU might have split this page + * up already: + */ + tmp = lookup_address(address, &level); + if (tmp != kpte) + goto out_unlock; + + switch (level) { + case PG_LEVEL_2M: + psize = PMD_PAGE_SIZE; + pmask = PMD_PAGE_MASK; + break; +#ifdef CONFIG_X86_64 + case PG_LEVEL_1G: + psize = PMD_PAGE_SIZE; + pmask = PMD_PAGE_MASK; + break; +#endif + default: + do_split = -EINVAL; + goto out_unlock; + } + + /* + * Calculate the number of pages, which fit into this large + * page starting at address: + */ + nextpage_addr = (address + psize) & pmask; + numpages = (nextpage_addr - address) >> PAGE_SHIFT; + if (numpages < cpa->numpages) + cpa->numpages = numpages; + + /* + * We are safe now. Check whether the new pgprot is the same: + */ + old_pte = *kpte; + old_prot = new_prot = pte_pgprot(old_pte); + + pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); + pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); + new_prot = static_protections(new_prot, address); + + /* + * If there are no changes, return. maxpages has been updated + * above: + */ + if (pgprot_val(new_prot) == pgprot_val(old_prot)) { + do_split = 0; + goto out_unlock; + } + + /* + * We need to change the attributes. Check, whether we can + * change the large page in one go. We request a split, when + * the address is not aligned and the number of pages is + * smaller than the number of pages in the large page. Note + * that we limited the number of possible pages already to + * the number of pages in the large page. + */ + if (address == (nextpage_addr - psize) && cpa->numpages == numpages) { + /* + * The address is aligned and the number of pages + * covers the full page. + */ + new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot)); + __set_pmd_pte(kpte, address, new_pte); + cpa->flushtlb = 1; + do_split = 0; + } + +out_unlock: + spin_unlock_irqrestore(&pgd_lock, flags); + + return do_split; +} + static int split_large_page(pte_t *kpte, unsigned long address) { - pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte)); + unsigned long flags, pfn, pfninc = 1; gfp_t gfp_flags = GFP_KERNEL; - unsigned long flags; - unsigned long addr; + unsigned int i, level; pte_t *pbase, *tmp; + pgprot_t ref_prot; struct page *base; - int i, level; #ifdef CONFIG_DEBUG_PAGEALLOC - gfp_flags = GFP_ATOMIC; + gfp_flags = GFP_ATOMIC | __GFP_NOWARN; #endif base = alloc_pages(gfp_flags, 0); if (!base) @@ -114,24 +357,41 @@ static int split_large_page(pte_t *kpte, unsigned long address) * up for us already: */ tmp = lookup_address(address, &level); - if (tmp != kpte) { - WARN_ON_ONCE(1); + if (tmp != kpte) goto out_unlock; - } - address = __pa(address); - addr = address & LARGE_PAGE_MASK; pbase = (pte_t *)page_address(base); #ifdef CONFIG_X86_32 paravirt_alloc_pt(&init_mm, page_to_pfn(base)); #endif + ref_prot = pte_pgprot(pte_clrhuge(*kpte)); - for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) - set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot)); +#ifdef CONFIG_X86_64 + if (level == PG_LEVEL_1G) { + pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT; + pgprot_val(ref_prot) |= _PAGE_PSE; + } +#endif + + /* + * Get the target pfn from the original entry: + */ + pfn = pte_pfn(*kpte); + for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc) + set_pte(&pbase[i], pfn_pte(pfn, ref_prot)); /* - * Install the new, split up pagetable: + * Install the new, split up pagetable. Important details here: + * + * On Intel the NX bit of all levels must be cleared to make a + * page executable. See section 4.13.2 of Intel 64 and IA-32 + * Architectures Software Developer's Manual). + * + * Mark the entry present. The current mapping might be + * set to not present, which we preserved above. */ + ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte))); + pgprot_val(ref_prot) |= _PAGE_PRESENT; __set_pmd_pte(kpte, address, mk_pte(base, ref_prot)); base = NULL; @@ -144,17 +404,12 @@ out_unlock: return 0; } -static int -__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot) +static int __change_page_attr(unsigned long address, struct cpa_data *cpa) { + int level, do_split, err; struct page *kpte_page; - int level, err = 0; pte_t *kpte; -#ifdef CONFIG_X86_32 - BUG_ON(pfn > max_low_pfn); -#endif - repeat: kpte = lookup_address(address, &level); if (!kpte) @@ -164,22 +419,69 @@ repeat: BUG_ON(PageLRU(kpte_page)); BUG_ON(PageCompound(kpte_page)); - prot = check_exec(prot, address); - if (level == PG_LEVEL_4K) { - set_pte_atomic(kpte, pfn_pte(pfn, canon_pgprot(prot))); - } else { - err = split_large_page(kpte, address); - if (!err) - goto repeat; + pte_t new_pte, old_pte = *kpte; + pgprot_t new_prot = pte_pgprot(old_pte); + + if(!pte_val(old_pte)) { + printk(KERN_WARNING "CPA: called for zero pte. " + "vaddr = %lx cpa->vaddr = %lx\n", address, + cpa->vaddr); + WARN_ON(1); + return -EINVAL; + } + + pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); + pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); + + new_prot = static_protections(new_prot, address); + + /* + * We need to keep the pfn from the existing PTE, + * after all we're only going to change it's attributes + * not the memory it points to + */ + new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot)); + + /* + * Do we really change anything ? + */ + if (pte_val(old_pte) != pte_val(new_pte)) { + set_pte_atomic(kpte, new_pte); + cpa->flushtlb = 1; + } + cpa->numpages = 1; + return 0; } + + /* + * Check, whether we can keep the large page intact + * and just change the pte: + */ + do_split = try_preserve_large_page(kpte, address, cpa); + /* + * When the range fits into the existing large page, + * return. cp->numpages and cpa->tlbflush have been updated in + * try_large_page: + */ + if (do_split <= 0) + return do_split; + + /* + * We have to split the large page: + */ + err = split_large_page(kpte, address); + if (!err) { + cpa->flushtlb = 1; + goto repeat; + } + return err; } /** * change_page_attr_addr - Change page table attributes in linear mapping * @address: Virtual address in linear mapping. - * @numpages: Number of pages to change * @prot: New page table attribute (PAGE_*) * * Change page attributes of a page in the direct mapping. This is a variant @@ -187,98 +489,258 @@ repeat: * mem_map entry (pfn_valid() is false). * * See change_page_attr() documentation for more details. + * + * Modules and drivers should use the set_memory_* APIs instead. */ - -int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot) +static int change_page_attr_addr(struct cpa_data *cpa) { - int err = 0, kernel_map = 0, i; + int err; + unsigned long address = cpa->vaddr; #ifdef CONFIG_X86_64 - if (address >= __START_KERNEL_map && - address < __START_KERNEL_map + KERNEL_TEXT_SIZE) { + unsigned long phys_addr = __pa(address); - address = (unsigned long)__va(__pa(address)); - kernel_map = 1; - } + /* + * If we are inside the high mapped kernel range, then we + * fixup the low mapping first. __va() returns the virtual + * address in the linear mapping: + */ + if (within(address, HIGH_MAP_START, HIGH_MAP_END)) + address = (unsigned long) __va(phys_addr); #endif - for (i = 0; i < numpages; i++, address += PAGE_SIZE) { - unsigned long pfn = __pa(address) >> PAGE_SHIFT; + err = __change_page_attr(address, cpa); + if (err) + return err; - if (!kernel_map || pte_present(pfn_pte(0, prot))) { - err = __change_page_attr(address, pfn, prot); - if (err) - break; - } #ifdef CONFIG_X86_64 + /* + * If the physical address is inside the kernel map, we need + * to touch the high mapped kernel as well: + */ + if (within(phys_addr, 0, KERNEL_TEXT_SIZE)) { /* - * Handle kernel mapping too which aliases part of - * lowmem: + * Calc the high mapping address. See __phys_addr() + * for the non obvious details. + * + * Note that NX and other required permissions are + * checked in static_protections(). */ - if (__pa(address) < KERNEL_TEXT_SIZE) { - unsigned long addr2; - pgprot_t prot2; - - addr2 = __START_KERNEL_map + __pa(address); - /* Make sure the kernel mappings stay executable */ - prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot))); - err = __change_page_attr(addr2, pfn, prot2); - } -#endif - } + address = phys_addr + HIGH_MAP_START - phys_base; + /* + * Our high aliases are imprecise, because we check + * everything between 0 and KERNEL_TEXT_SIZE, so do + * not propagate lookup failures back to users: + */ + __change_page_attr(address, cpa); + } +#endif return err; } -/** - * change_page_attr - Change page table attributes in the linear mapping. - * @page: First page to change - * @numpages: Number of pages to change - * @prot: New protection/caching type (PAGE_*) - * - * Returns 0 on success, otherwise a negated errno. - * - * This should be used when a page is mapped with a different caching policy - * than write-back somewhere - some CPUs do not like it when mappings with - * different caching policies exist. This changes the page attributes of the - * in kernel linear mapping too. - * - * Caller must call global_flush_tlb() later to make the changes active. - * - * The caller needs to ensure that there are no conflicting mappings elsewhere - * (e.g. in user space) * This function only deals with the kernel linear map. - * - * For MMIO areas without mem_map use change_page_attr_addr() instead. - */ -int change_page_attr(struct page *page, int numpages, pgprot_t prot) +static int __change_page_attr_set_clr(struct cpa_data *cpa) { - unsigned long addr = (unsigned long)page_address(page); + int ret, numpages = cpa->numpages; + + while (numpages) { + /* + * Store the remaining nr of pages for the large page + * preservation check. + */ + cpa->numpages = numpages; + ret = change_page_attr_addr(cpa); + if (ret) + return ret; + + /* + * Adjust the number of pages with the result of the + * CPA operation. Either a large page has been + * preserved or a single page update happened. + */ + BUG_ON(cpa->numpages > numpages); + numpages -= cpa->numpages; + cpa->vaddr += cpa->numpages * PAGE_SIZE; + } + return 0; +} - return change_page_attr_addr(addr, numpages, prot); +static inline int cache_attr(pgprot_t attr) +{ + return pgprot_val(attr) & + (_PAGE_PAT | _PAGE_PAT_LARGE | _PAGE_PWT | _PAGE_PCD); } -EXPORT_SYMBOL(change_page_attr); -static void flush_kernel_map(void *arg) +static int change_page_attr_set_clr(unsigned long addr, int numpages, + pgprot_t mask_set, pgprot_t mask_clr) { + struct cpa_data cpa; + int ret, cache; + /* - * Flush all to work around Errata in early athlons regarding - * large page flushing. + * Check, if we are requested to change a not supported + * feature: */ - __flush_tlb_all(); + mask_set = canon_pgprot(mask_set); + mask_clr = canon_pgprot(mask_clr); + if (!pgprot_val(mask_set) && !pgprot_val(mask_clr)) + return 0; - if (boot_cpu_data.x86_model >= 4) - wbinvd(); + cpa.vaddr = addr; + cpa.numpages = numpages; + cpa.mask_set = mask_set; + cpa.mask_clr = mask_clr; + cpa.flushtlb = 0; + + ret = __change_page_attr_set_clr(&cpa); + + /* + * Check whether we really changed something: + */ + if (!cpa.flushtlb) + return ret; + + /* + * No need to flush, when we did not set any of the caching + * attributes: + */ + cache = cache_attr(mask_set); + + /* + * On success we use clflush, when the CPU supports it to + * avoid the wbindv. If the CPU does not support it and in the + * error case we fall back to cpa_flush_all (which uses + * wbindv): + */ + if (!ret && cpu_has_clflush) + cpa_flush_range(addr, numpages, cache); + else + cpa_flush_all(cache); + + return ret; } -void global_flush_tlb(void) +static inline int change_page_attr_set(unsigned long addr, int numpages, + pgprot_t mask) { - BUG_ON(irqs_disabled()); + return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0)); +} + +static inline int change_page_attr_clear(unsigned long addr, int numpages, + pgprot_t mask) +{ + return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask); +} + +int set_memory_uc(unsigned long addr, int numpages) +{ + return change_page_attr_set(addr, numpages, + __pgprot(_PAGE_PCD | _PAGE_PWT)); +} +EXPORT_SYMBOL(set_memory_uc); + +int set_memory_wb(unsigned long addr, int numpages) +{ + return change_page_attr_clear(addr, numpages, + __pgprot(_PAGE_PCD | _PAGE_PWT)); +} +EXPORT_SYMBOL(set_memory_wb); + +int set_memory_x(unsigned long addr, int numpages) +{ + return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_NX)); +} +EXPORT_SYMBOL(set_memory_x); + +int set_memory_nx(unsigned long addr, int numpages) +{ + return change_page_attr_set(addr, numpages, __pgprot(_PAGE_NX)); +} +EXPORT_SYMBOL(set_memory_nx); + +int set_memory_ro(unsigned long addr, int numpages) +{ + return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW)); +} + +int set_memory_rw(unsigned long addr, int numpages) +{ + return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW)); +} + +int set_memory_np(unsigned long addr, int numpages) +{ + return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT)); +} + +int set_pages_uc(struct page *page, int numpages) +{ + unsigned long addr = (unsigned long)page_address(page); + + return set_memory_uc(addr, numpages); +} +EXPORT_SYMBOL(set_pages_uc); + +int set_pages_wb(struct page *page, int numpages) +{ + unsigned long addr = (unsigned long)page_address(page); + + return set_memory_wb(addr, numpages); +} +EXPORT_SYMBOL(set_pages_wb); + +int set_pages_x(struct page *page, int numpages) +{ + unsigned long addr = (unsigned long)page_address(page); + + return set_memory_x(addr, numpages); +} +EXPORT_SYMBOL(set_pages_x); + +int set_pages_nx(struct page *page, int numpages) +{ + unsigned long addr = (unsigned long)page_address(page); + + return set_memory_nx(addr, numpages); +} +EXPORT_SYMBOL(set_pages_nx); + +int set_pages_ro(struct page *page, int numpages) +{ + unsigned long addr = (unsigned long)page_address(page); + + return set_memory_ro(addr, numpages); +} - on_each_cpu(flush_kernel_map, NULL, 1, 1); +int set_pages_rw(struct page *page, int numpages) +{ + unsigned long addr = (unsigned long)page_address(page); + + return set_memory_rw(addr, numpages); } -EXPORT_SYMBOL(global_flush_tlb); #ifdef CONFIG_DEBUG_PAGEALLOC + +static int __set_pages_p(struct page *page, int numpages) +{ + struct cpa_data cpa = { .vaddr = (unsigned long) page_address(page), + .numpages = numpages, + .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW), + .mask_clr = __pgprot(0)}; + + return __change_page_attr_set_clr(&cpa); +} + +static int __set_pages_np(struct page *page, int numpages) +{ + struct cpa_data cpa = { .vaddr = (unsigned long) page_address(page), + .numpages = numpages, + .mask_set = __pgprot(0), + .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW)}; + + return __change_page_attr_set_clr(&cpa); +} + void kernel_map_pages(struct page *page, int numpages, int enable) { if (PageHighMem(page)) @@ -298,7 +760,10 @@ void kernel_map_pages(struct page *page, int numpages, int enable) * The return value is ignored - the calls cannot fail, * large pages are disabled at boot time: */ - change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0)); + if (enable) + __set_pages_p(page, numpages); + else + __set_pages_np(page, numpages); /* * We should perform an IPI and flush all tlbs, @@ -307,3 +772,11 @@ void kernel_map_pages(struct page *page, int numpages, int enable) __flush_tlb_all(); } #endif + +/* + * The testcases use internal knowledge of the implementation that shouldn't + * be exposed to the rest of the kernel. Include these directly here. + */ +#ifdef CONFIG_CPA_DEBUG +#include "pageattr-test.c" +#endif