/*
* Flushing functions
*/
-void clflush_cache_range(void *addr, int size)
+
+/**
+ * clflush_cache_range - flush a cache range with clflush
+ * @addr: virtual start address
+ * @size: number of bytes to flush
+ *
+ * clflush is an unordered instruction which needs fencing with mfence
+ * to avoid ordering issues.
+ */
+void clflush_cache_range(void *vaddr, unsigned int size)
{
- int i;
+ void *vend = vaddr + size - 1;
+
+ mb();
- for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
- clflush(addr+i);
+ for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size)
+ clflush(vaddr);
+ /*
+ * Flush any possible final partial cacheline:
+ */
+ clflush(vend);
+
+ mb();
}
-static void flush_kernel_map(void *arg)
+static void __cpa_flush_all(void *arg)
{
/*
* Flush all to work around Errata in early athlons regarding
wbinvd();
}
-static void global_flush_tlb(void)
+static void cpa_flush_all(void)
{
BUG_ON(irqs_disabled());
- on_each_cpu(flush_kernel_map, NULL, 1, 1);
+ on_each_cpu(__cpa_flush_all, NULL, 1, 1);
+}
+
+static void __cpa_flush_range(void *arg)
+{
+ /*
+ * We could optimize that further and do individual per page
+ * tlb invalidates for a low number of pages. Caveat: we must
+ * flush the high aliases on 64bit as well.
+ */
+ __flush_tlb_all();
+}
+
+static void cpa_flush_range(unsigned long start, int numpages)
+{
+ unsigned int i, level;
+ unsigned long addr;
+
+ BUG_ON(irqs_disabled());
+ WARN_ON(PAGE_ALIGN(start) != start);
+
+ on_each_cpu(__cpa_flush_range, NULL, 1, 1);
+
+ /*
+ * We only need to flush on one CPU,
+ * clflush is a MESI-coherent instruction that
+ * will cause all other CPUs to flush the same
+ * cachelines:
+ */
+ for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
+ pte_t *pte = lookup_address(addr, &level);
+
+ /*
+ * Only flush present addresses:
+ */
+ if (pte && pte_present(*pte))
+ clflush_cache_range((void *) addr, PAGE_SIZE);
+ }
+}
+
+#define HIGH_MAP_START __START_KERNEL_map
+#define HIGH_MAP_END (__START_KERNEL_map + KERNEL_TEXT_SIZE)
+
+
+/*
+ * Converts a virtual address to a X86-64 highmap address
+ */
+static unsigned long virt_to_highmap(void *address)
+{
+#ifdef CONFIG_X86_64
+ return __pa((unsigned long)address) + HIGH_MAP_START - phys_base;
+#else
+ return (unsigned long)address;
+#endif
}
/*
*/
if (within(address, (unsigned long)_text, (unsigned long)_etext))
pgprot_val(forbidden) |= _PAGE_NX;
+ /*
+ * Do the same for the x86-64 high kernel mapping
+ */
+ if (within(address, virt_to_highmap(_text), virt_to_highmap(_etext)))
+ pgprot_val(forbidden) |= _PAGE_NX;
+
#ifdef CONFIG_DEBUG_RODATA
/* The .rodata section needs to be read-only */
if (within(address, (unsigned long)__start_rodata,
(unsigned long)__end_rodata))
pgprot_val(forbidden) |= _PAGE_RW;
+ /*
+ * Do the same for the x86-64 high kernel mapping
+ */
+ if (within(address, virt_to_highmap(__start_rodata),
+ virt_to_highmap(__end_rodata)))
+ pgprot_val(forbidden) |= _PAGE_RW;
#endif
prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
if (!SHARED_KERNEL_PMD) {
struct page *page;
- for (page = pgd_list; page; page = (struct page *)page->index) {
+ list_for_each_entry(page, &pgd_list, lru) {
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
{
pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
gfp_t gfp_flags = GFP_KERNEL;
- unsigned long flags;
- unsigned long addr;
+ unsigned long flags, addr, pfn;
pte_t *pbase, *tmp;
struct page *base;
- int i, level;
+ unsigned int i, level;
#ifdef CONFIG_DEBUG_PAGEALLOC
- gfp_flags = GFP_ATOMIC;
+ gfp_flags = __GFP_HIGH | __GFP_NOFAIL | __GFP_NOWARN;
+ gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
#endif
base = alloc_pages(gfp_flags, 0);
if (!base)
paravirt_alloc_pt(&init_mm, page_to_pfn(base));
#endif
- for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
- set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));
+ /*
+ * Get the target pfn from the original entry:
+ */
+ pfn = pte_pfn(*kpte);
+ for (i = 0; i < PTRS_PER_PTE; i++, pfn++)
+ set_pte(&pbase[i], pfn_pte(pfn, ref_prot));
/*
* Install the new, split up pagetable. Important detail here:
}
static int
-__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot)
+__change_page_attr(unsigned long address, pgprot_t mask_set, pgprot_t mask_clr)
{
struct page *kpte_page;
int level, err = 0;
pte_t *kpte;
-#ifdef CONFIG_X86_32
- BUG_ON(pfn > max_low_pfn);
-#endif
-
repeat:
kpte = lookup_address(address, &level);
if (!kpte)
BUG_ON(PageLRU(kpte_page));
BUG_ON(PageCompound(kpte_page));
- prot = static_protections(prot, address);
-
if (level == PG_LEVEL_4K) {
- WARN_ON_ONCE(pgprot_val(prot) & _PAGE_PSE);
- set_pte_atomic(kpte, pfn_pte(pfn, canon_pgprot(prot)));
- } else {
- /* Clear the PSE bit for the 4k level pages ! */
- pgprot_val(prot) = pgprot_val(prot) & ~_PAGE_PSE;
+ pte_t new_pte, old_pte = *kpte;
+ pgprot_t new_prot = pte_pgprot(old_pte);
+ if(!pte_val(old_pte)) {
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ pgprot_val(new_prot) &= ~pgprot_val(mask_clr);
+ pgprot_val(new_prot) |= pgprot_val(mask_set);
+
+ new_prot = static_protections(new_prot, address);
+
+ /*
+ * We need to keep the pfn from the existing PTE,
+ * after all we're only going to change it's attributes
+ * not the memory it points to
+ */
+ new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot));
+ set_pte_atomic(kpte, new_pte);
+ } else {
err = split_large_page(kpte, address);
if (!err)
goto repeat;
* Modules and drivers should use the set_memory_* APIs instead.
*/
-static int change_page_attr_addr(unsigned long address, pgprot_t prot)
+
+static int
+change_page_attr_addr(unsigned long address, pgprot_t mask_set,
+ pgprot_t mask_clr)
{
- int err = 0, kernel_map = 0;
- unsigned long pfn = __pa(address) >> PAGE_SHIFT;
+ int err;
#ifdef CONFIG_X86_64
- if (address >= __START_KERNEL_map &&
- address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
+ unsigned long phys_addr = __pa(address);
- address = (unsigned long)__va(__pa(address));
- kernel_map = 1;
- }
+ /*
+ * If we are inside the high mapped kernel range, then we
+ * fixup the low mapping first. __va() returns the virtual
+ * address in the linear mapping:
+ */
+ if (within(address, HIGH_MAP_START, HIGH_MAP_END))
+ address = (unsigned long) __va(phys_addr);
#endif
- if (!kernel_map || pte_present(pfn_pte(0, prot))) {
- err = __change_page_attr(address, pfn, prot);
- if (err)
- return err;
- }
+ err = __change_page_attr(address, mask_set, mask_clr);
+ if (err)
+ return err;
#ifdef CONFIG_X86_64
/*
- * Handle kernel mapping too which aliases part of
- * lowmem:
+ * If the physical address is inside the kernel map, we need
+ * to touch the high mapped kernel as well:
*/
- if (__pa(address) < KERNEL_TEXT_SIZE) {
- unsigned long addr2;
- pgprot_t prot2;
-
- addr2 = __START_KERNEL_map + __pa(address);
- /* Make sure the kernel mappings stay executable */
- prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
- err = __change_page_attr(addr2, pfn, prot2);
+ if (within(phys_addr, 0, KERNEL_TEXT_SIZE)) {
+ /*
+ * Calc the high mapping address. See __phys_addr()
+ * for the non obvious details.
+ *
+ * Note that NX and other required permissions are
+ * checked in static_protections().
+ */
+ address = phys_addr + HIGH_MAP_START - phys_base;
+
+ /*
+ * Our high aliases are imprecise, because we check
+ * everything between 0 and KERNEL_TEXT_SIZE, so do
+ * not propagate lookup failures back to users:
+ */
+ __change_page_attr(address, mask_set, mask_clr);
}
#endif
-
return err;
}
-/**
- * change_page_attr_set - Change page table attributes in the linear mapping.
- * @addr: Virtual address in linear mapping.
- * @numpages: Number of pages to change
- * @prot: Protection/caching type bits to set (PAGE_*)
- *
- * Returns 0 on success, otherwise a negated errno.
- *
- * This should be used when a page is mapped with a different caching policy
- * than write-back somewhere - some CPUs do not like it when mappings with
- * different caching policies exist. This changes the page attributes of the
- * in kernel linear mapping too.
- *
- * The caller needs to ensure that there are no conflicting mappings elsewhere
- * (e.g. in user space) * This function only deals with the kernel linear map.
- *
- * This function is different from change_page_attr() in that only selected bits
- * are impacted, all other bits remain as is.
- */
-static int change_page_attr_set(unsigned long addr, int numpages,
- pgprot_t prot)
+static int __change_page_attr_set_clr(unsigned long addr, int numpages,
+ pgprot_t mask_set, pgprot_t mask_clr)
{
- pgprot_t current_prot, new_prot;
- int level;
- pte_t *pte;
- int i, ret;
-
- for (i = 0; i < numpages ; i++) {
-
- pte = lookup_address(addr, &level);
- if (!pte)
- return -EINVAL;
-
- current_prot = pte_pgprot(*pte);
-
- pgprot_val(new_prot) =
- pgprot_val(current_prot) | pgprot_val(prot);
+ unsigned int i;
+ int ret;
- ret = change_page_attr_addr(addr, new_prot);
+ for (i = 0; i < numpages ; i++, addr += PAGE_SIZE) {
+ ret = change_page_attr_addr(addr, mask_set, mask_clr);
if (ret)
return ret;
- addr += PAGE_SIZE;
}
+
return 0;
}
-/**
- * change_page_attr_clear - Change page table attributes in the linear mapping.
- * @addr: Virtual address in linear mapping.
- * @numpages: Number of pages to change
- * @prot: Protection/caching type bits to clear (PAGE_*)
- *
- * Returns 0 on success, otherwise a negated errno.
- *
- * This should be used when a page is mapped with a different caching policy
- * than write-back somewhere - some CPUs do not like it when mappings with
- * different caching policies exist. This changes the page attributes of the
- * in kernel linear mapping too.
- *
- * The caller needs to ensure that there are no conflicting mappings elsewhere
- * (e.g. in user space) * This function only deals with the kernel linear map.
- *
- * This function is different from change_page_attr() in that only selected bits
- * are impacted, all other bits remain as is.
- */
-static int change_page_attr_clear(unsigned long addr, int numpages,
- pgprot_t prot)
+static int change_page_attr_set_clr(unsigned long addr, int numpages,
+ pgprot_t mask_set, pgprot_t mask_clr)
{
- pgprot_t current_prot, new_prot;
- int level;
- pte_t *pte;
- int i, ret;
+ int ret;
- for (i = 0; i < numpages; i++) {
+ /*
+ * Check, if we are requested to change a not supported
+ * feature:
+ */
+ mask_set = canon_pgprot(mask_set);
+ mask_clr = canon_pgprot(mask_clr);
+ if (!pgprot_val(mask_set) && !pgprot_val(mask_clr))
+ return 0;
- pte = lookup_address(addr, &level);
- if (!pte)
- return -EINVAL;
+ ret = __change_page_attr_set_clr(addr, numpages, mask_set, mask_clr);
- current_prot = pte_pgprot(*pte);
+ /*
+ * On success we use clflush, when the CPU supports it to
+ * avoid the wbindv. If the CPU does not support it and in the
+ * error case we fall back to cpa_flush_all (which uses
+ * wbindv):
+ */
+ if (!ret && cpu_has_clflush)
+ cpa_flush_range(addr, numpages);
+ else
+ cpa_flush_all();
- pgprot_val(new_prot) =
- pgprot_val(current_prot) & ~pgprot_val(prot);
+ return ret;
+}
- ret = change_page_attr_addr(addr, new_prot);
- if (ret)
- return ret;
- addr += PAGE_SIZE;
- }
- return 0;
+static inline int change_page_attr_set(unsigned long addr, int numpages,
+ pgprot_t mask)
+{
+ return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
}
-int set_memory_uc(unsigned long addr, int numpages)
+static inline int change_page_attr_clear(unsigned long addr, int numpages,
+ pgprot_t mask)
{
- int err;
+ return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
+}
- err = change_page_attr_set(addr, numpages,
- __pgprot(_PAGE_PCD | _PAGE_PWT));
- global_flush_tlb();
- return err;
+int set_memory_uc(unsigned long addr, int numpages)
+{
+ return change_page_attr_set(addr, numpages,
+ __pgprot(_PAGE_PCD | _PAGE_PWT));
}
EXPORT_SYMBOL(set_memory_uc);
int set_memory_wb(unsigned long addr, int numpages)
{
- int err;
-
- err = change_page_attr_clear(addr, numpages,
- __pgprot(_PAGE_PCD | _PAGE_PWT));
- global_flush_tlb();
- return err;
+ return change_page_attr_clear(addr, numpages,
+ __pgprot(_PAGE_PCD | _PAGE_PWT));
}
EXPORT_SYMBOL(set_memory_wb);
int set_memory_x(unsigned long addr, int numpages)
{
- int err;
-
- err = change_page_attr_clear(addr, numpages,
- __pgprot(_PAGE_NX));
- global_flush_tlb();
- return err;
+ return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_NX));
}
EXPORT_SYMBOL(set_memory_x);
int set_memory_nx(unsigned long addr, int numpages)
{
- int err;
-
- err = change_page_attr_set(addr, numpages,
- __pgprot(_PAGE_NX));
- global_flush_tlb();
- return err;
+ return change_page_attr_set(addr, numpages, __pgprot(_PAGE_NX));
}
EXPORT_SYMBOL(set_memory_nx);
int set_memory_ro(unsigned long addr, int numpages)
{
- int err;
-
- err = change_page_attr_clear(addr, numpages,
- __pgprot(_PAGE_RW));
- global_flush_tlb();
- return err;
+ return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW));
}
int set_memory_rw(unsigned long addr, int numpages)
{
- int err;
-
- err = change_page_attr_set(addr, numpages,
- __pgprot(_PAGE_RW));
- global_flush_tlb();
- return err;
+ return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW));
}
int set_memory_np(unsigned long addr, int numpages)
{
- int err;
-
- err = change_page_attr_clear(addr, numpages,
- __pgprot(_PAGE_PRESENT));
- global_flush_tlb();
- return err;
+ return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT));
}
int set_pages_uc(struct page *page, int numpages)
}
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_CPA_DEBUG)
+static inline int __change_page_attr_set(unsigned long addr, int numpages,
+ pgprot_t mask)
+{
+ return __change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
+}
+
+static inline int __change_page_attr_clear(unsigned long addr, int numpages,
+ pgprot_t mask)
+{
+ return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
+}
+#endif
+
#ifdef CONFIG_DEBUG_PAGEALLOC
static int __set_pages_p(struct page *page, int numpages)
{
unsigned long addr = (unsigned long)page_address(page);
- return change_page_attr_set(addr, numpages,
- __pgprot(_PAGE_PRESENT | _PAGE_RW));
+
+ return __change_page_attr_set(addr, numpages,
+ __pgprot(_PAGE_PRESENT | _PAGE_RW));
}
static int __set_pages_np(struct page *page, int numpages)
{
unsigned long addr = (unsigned long)page_address(page);
- return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT));
+
+ return __change_page_attr_clear(addr, numpages,
+ __pgprot(_PAGE_PRESENT));
}
void kernel_map_pages(struct page *page, int numpages, int enable)