/*
- * This file contains the routines setting up the linux page tables.
- * -- paulus
+ * This file contains common routines for dealing with free of page tables
+ * Along with common page table handling code
*
- * Derived from arch/ppc/mm/init.c:
+ * Derived from arch/powerpc/mm/tlb_64.c:
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
* Copyright (C) 1996 Paul Mackerras
- * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
+ * Dave Engebretsen <engebret@us.ibm.com>
+ * Rework for PPC64 port.
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
- *
*/
-#include <linux/config.h>
#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/types.h>
+#include <linux/gfp.h>
#include <linux/mm.h>
-#include <linux/vmalloc.h>
#include <linux/init.h>
-#include <linux/highmem.h>
-
-#include <asm/pgtable.h>
+#include <linux/percpu.h>
+#include <linux/hardirq.h>
#include <asm/pgalloc.h>
-#include <asm/io.h>
+#include <asm/tlbflush.h>
+#include <asm/tlb.h>
#include "mmu_decl.h"
-unsigned long ioremap_base;
-unsigned long ioremap_bot;
-int io_bat_index;
-
-#if defined(CONFIG_6xx) || defined(CONFIG_POWER3)
-#define HAVE_BATS 1
-#endif
-
-#if defined(CONFIG_FSL_BOOKE)
-#define HAVE_TLBCAM 1
-#endif
-
-extern char etext[], _stext[];
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
#ifdef CONFIG_SMP
-extern void hash_page_sync(void);
-#endif
-
-#ifdef HAVE_BATS
-extern unsigned long v_mapped_by_bats(unsigned long va);
-extern unsigned long p_mapped_by_bats(unsigned long pa);
-void setbat(int index, unsigned long virt, unsigned long phys,
- unsigned int size, int flags);
-
-#else /* !HAVE_BATS */
-#define v_mapped_by_bats(x) (0UL)
-#define p_mapped_by_bats(x) (0UL)
-#endif /* HAVE_BATS */
-
-#ifdef HAVE_TLBCAM
-extern unsigned int tlbcam_index;
-extern unsigned long v_mapped_by_tlbcam(unsigned long va);
-extern unsigned long p_mapped_by_tlbcam(unsigned long pa);
-#else /* !HAVE_TLBCAM */
-#define v_mapped_by_tlbcam(x) (0UL)
-#define p_mapped_by_tlbcam(x) (0UL)
-#endif /* HAVE_TLBCAM */
-
-#ifdef CONFIG_PTE_64BIT
-/* 44x uses an 8kB pgdir because it has 8-byte Linux PTEs. */
-#define PGDIR_ORDER 1
-#else
-#define PGDIR_ORDER 0
-#endif
-pgd_t *pgd_alloc(struct mm_struct *mm)
-{
- pgd_t *ret;
+/*
+ * Handle batching of page table freeing on SMP. Page tables are
+ * queued up and send to be freed later by RCU in order to avoid
+ * freeing a page table page that is being walked without locks
+ */
- ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGDIR_ORDER);
- return ret;
-}
+static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
+static unsigned long pte_freelist_forced_free;
-void pgd_free(pgd_t *pgd)
+struct pte_freelist_batch
{
- free_pages((unsigned long)pgd, PGDIR_ORDER);
-}
+ struct rcu_head rcu;
+ unsigned int index;
+ unsigned long tables[0];
+};
-pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+#define PTE_FREELIST_SIZE \
+ ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
+ / sizeof(unsigned long))
+
+static void pte_free_smp_sync(void *arg)
{
- pte_t *pte;
- extern int mem_init_done;
- extern void *early_get_page(void);
-
- if (mem_init_done) {
- pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
- } else {
- pte = (pte_t *)early_get_page();
- if (pte)
- clear_page(pte);
- }
- return pte;
+ /* Do nothing, just ensure we sync with all CPUs */
}
-struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+/* This is only called when we are critically out of memory
+ * (and fail to get a page in pte_free_tlb).
+ */
+static void pgtable_free_now(void *table, unsigned shift)
{
- struct page *ptepage;
+ pte_freelist_forced_free++;
-#ifdef CONFIG_HIGHPTE
- int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
-#else
- int flags = GFP_KERNEL | __GFP_REPEAT;
-#endif
+ smp_call_function(pte_free_smp_sync, NULL, 1);
- ptepage = alloc_pages(flags, 0);
- if (ptepage)
- clear_highpage(ptepage);
- return ptepage;
+ pgtable_free(table, shift);
}
-void pte_free_kernel(pte_t *pte)
+static void pte_free_rcu_callback(struct rcu_head *head)
{
-#ifdef CONFIG_SMP
- hash_page_sync();
-#endif
- free_page((unsigned long)pte);
-}
+ struct pte_freelist_batch *batch =
+ container_of(head, struct pte_freelist_batch, rcu);
+ unsigned int i;
-void pte_free(struct page *ptepage)
-{
-#ifdef CONFIG_SMP
- hash_page_sync();
-#endif
- __free_page(ptepage);
-}
+ for (i = 0; i < batch->index; i++) {
+ void *table = (void *)(batch->tables[i] & ~MAX_PGTABLE_INDEX_SIZE);
+ unsigned shift = batch->tables[i] & MAX_PGTABLE_INDEX_SIZE;
-#ifndef CONFIG_PHYS_64BIT
-void __iomem *
-ioremap(phys_addr_t addr, unsigned long size)
-{
- return __ioremap(addr, size, _PAGE_NO_CACHE);
-}
-#else /* CONFIG_PHYS_64BIT */
-void __iomem *
-ioremap64(unsigned long long addr, unsigned long size)
-{
- return __ioremap(addr, size, _PAGE_NO_CACHE);
+ pgtable_free(table, shift);
+ }
+
+ free_page((unsigned long)batch);
}
-void __iomem *
-ioremap(phys_addr_t addr, unsigned long size)
+static void pte_free_submit(struct pte_freelist_batch *batch)
{
- phys_addr_t addr64 = fixup_bigphys_addr(addr, size);
-
- return ioremap64(addr64, size);
+ INIT_RCU_HEAD(&batch->rcu);
+ call_rcu(&batch->rcu, pte_free_rcu_callback);
}
-#endif /* CONFIG_PHYS_64BIT */
-void __iomem *
-__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
+void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
{
- unsigned long v, i;
- phys_addr_t p;
- int err;
-
- /*
- * Choose an address to map it to.
- * Once the vmalloc system is running, we use it.
- * Before then, we use space going down from ioremap_base
- * (ioremap_bot records where we're up to).
- */
- p = addr & PAGE_MASK;
- size = PAGE_ALIGN(addr + size) - p;
-
- /*
- * If the address lies within the first 16 MB, assume it's in ISA
- * memory space
- */
- if (p < 16*1024*1024)
- p += _ISA_MEM_BASE;
+ /* This is safe since tlb_gather_mmu has disabled preemption */
+ struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
+ unsigned long pgf;
- /*
- * Don't allow anybody to remap normal RAM that we're using.
- * mem_init() sets high_memory so only do the check after that.
- */
- if (mem_init_done && (p < virt_to_phys(high_memory))) {
- printk("__ioremap(): phys addr "PHYS_FMT" is RAM lr %p\n", p,
- __builtin_return_address(0));
- return NULL;
+ if (atomic_read(&tlb->mm->mm_users) < 2 ||
+ cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))){
+ pgtable_free(table, shift);
+ return;
}
- if (size == 0)
- return NULL;
-
- /*
- * Is it already mapped? Perhaps overlapped by a previous
- * BAT mapping. If the whole area is mapped then we're done,
- * otherwise remap it since we want to keep the virt addrs for
- * each request contiguous.
- *
- * We make the assumption here that if the bottom and top
- * of the range we want are mapped then it's mapped to the
- * same virt address (and this is contiguous).
- * -- Cort
- */
- if ((v = p_mapped_by_bats(p)) /*&& p_mapped_by_bats(p+size-1)*/ )
- goto out;
-
- if ((v = p_mapped_by_tlbcam(p)))
- goto out;
-
- if (mem_init_done) {
- struct vm_struct *area;
- area = get_vm_area(size, VM_IOREMAP);
- if (area == 0)
- return NULL;
- v = (unsigned long) area->addr;
- } else {
- v = (ioremap_bot -= size);
+ if (*batchp == NULL) {
+ *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
+ if (*batchp == NULL) {
+ pgtable_free_now(table, shift);
+ return;
+ }
+ (*batchp)->index = 0;
}
-
- if ((flags & _PAGE_PRESENT) == 0)
- flags |= _PAGE_KERNEL;
- if (flags & _PAGE_NO_CACHE)
- flags |= _PAGE_GUARDED;
-
- /*
- * Should check if it is a candidate for a BAT mapping
- */
-
- err = 0;
- for (i = 0; i < size && err == 0; i += PAGE_SIZE)
- err = map_page(v+i, p+i, flags);
- if (err) {
- if (mem_init_done)
- vunmap((void *)v);
- return NULL;
+ BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
+ pgf = (unsigned long)table | shift;
+ (*batchp)->tables[(*batchp)->index++] = pgf;
+ if ((*batchp)->index == PTE_FREELIST_SIZE) {
+ pte_free_submit(*batchp);
+ *batchp = NULL;
}
-
-out:
- return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
}
-void iounmap(volatile void __iomem *addr)
+void pte_free_finish(void)
{
- /*
- * If mapped by BATs then there is nothing to do.
- * Calling vfree() generates a benign warning.
- */
- if (v_mapped_by_bats((unsigned long)addr)) return;
+ /* This is safe since tlb_gather_mmu has disabled preemption */
+ struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
- if (addr > high_memory && (unsigned long) addr < ioremap_bot)
- vunmap((void *) (PAGE_MASK & (unsigned long)addr));
+ if (*batchp == NULL)
+ return;
+ pte_free_submit(*batchp);
+ *batchp = NULL;
}
-void __iomem *ioport_map(unsigned long port, unsigned int len)
+#endif /* CONFIG_SMP */
+
+static inline int is_exec_fault(void)
{
- return (void __iomem *) (port + _IO_BASE);
+ return current->thread.regs && TRAP(current->thread.regs) == 0x400;
}
-void ioport_unmap(void __iomem *addr)
+/* We only try to do i/d cache coherency on stuff that looks like
+ * reasonably "normal" PTEs. We currently require a PTE to be present
+ * and we avoid _PAGE_SPECIAL and _PAGE_NO_CACHE. We also only do that
+ * on userspace PTEs
+ */
+static inline int pte_looks_normal(pte_t pte)
{
- /* Nothing to do */
+ return (pte_val(pte) &
+ (_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE | _PAGE_USER)) ==
+ (_PAGE_PRESENT | _PAGE_USER);
}
-EXPORT_SYMBOL(ioport_map);
-EXPORT_SYMBOL(ioport_unmap);
-int
-map_page(unsigned long va, phys_addr_t pa, int flags)
+struct page * maybe_pte_to_page(pte_t pte)
{
- pmd_t *pd;
- pte_t *pg;
- int err = -ENOMEM;
-
- spin_lock(&init_mm.page_table_lock);
- /* Use upper 10 bits of VA to index the first level map */
- pd = pmd_offset(pgd_offset_k(va), va);
- /* Use middle 10 bits of VA to index the second-level map */
- pg = pte_alloc_kernel(&init_mm, pd, va);
- if (pg != 0) {
- err = 0;
- set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags)));
- if (mem_init_done)
- flush_HPTE(0, va, pmd_val(*pd));
- }
- spin_unlock(&init_mm.page_table_lock);
- return err;
+ unsigned long pfn = pte_pfn(pte);
+ struct page *page;
+
+ if (unlikely(!pfn_valid(pfn)))
+ return NULL;
+ page = pfn_to_page(pfn);
+ if (PageReserved(page))
+ return NULL;
+ return page;
}
-/*
- * Map in all of physical memory starting at KERNELBASE.
+#if defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0
+
+/* Server-style MMU handles coherency when hashing if HW exec permission
+ * is supposed per page (currently 64-bit only). If not, then, we always
+ * flush the cache for valid PTEs in set_pte. Embedded CPU without HW exec
+ * support falls into the same category.
*/
-void __init mapin_ram(void)
+
+static pte_t set_pte_filter(pte_t pte, unsigned long addr)
{
- unsigned long v, p, s, f;
-
- s = mmu_mapin_ram();
- v = KERNELBASE + s;
- p = PPC_MEMSTART + s;
- for (; s < total_lowmem; s += PAGE_SIZE) {
- if ((char *) v >= _stext && (char *) v < etext)
- f = _PAGE_RAM_TEXT;
- else
- f = _PAGE_RAM;
- map_page(v, p, f);
- v += PAGE_SIZE;
- p += PAGE_SIZE;
+ pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
+ if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
+ cpu_has_feature(CPU_FTR_NOEXECUTE))) {
+ struct page *pg = maybe_pte_to_page(pte);
+ if (!pg)
+ return pte;
+ if (!test_bit(PG_arch_1, &pg->flags)) {
+#ifdef CONFIG_8xx
+ /* On 8xx, cache control instructions (particularly
+ * "dcbst" from flush_dcache_icache) fault as write
+ * operation if there is an unpopulated TLB entry
+ * for the address in question. To workaround that,
+ * we invalidate the TLB here, thus avoiding dcbst
+ * misbehaviour.
+ */
+ /* 8xx doesn't care about PID, size or ind args */
+ _tlbil_va(addr, 0, 0, 0);
+#endif /* CONFIG_8xx */
+ flush_dcache_icache_page(pg);
+ set_bit(PG_arch_1, &pg->flags);
+ }
}
+ return pte;
}
-/* is x a power of 2? */
-#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
+static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
+ int dirty)
+{
+ return pte;
+}
-/* is x a power of 4? */
-#define is_power_of_4(x) ((x) != 0 && (((x) & (x-1)) == 0) && (ffs(x) & 1))
+#else /* defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0 */
-/*
- * Set up a mapping for a block of I/O.
- * virt, phys, size must all be page-aligned.
- * This should only be called before ioremap is called.
+/* Embedded type MMU with HW exec support. This is a bit more complicated
+ * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so
+ * instead we "filter out" the exec permission for non clean pages.
*/
-void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
- unsigned int size, int flags)
+static pte_t set_pte_filter(pte_t pte, unsigned long addr)
{
- int i;
+ struct page *pg;
+
+ /* No exec permission in the first place, move on */
+ if (!(pte_val(pte) & _PAGE_EXEC) || !pte_looks_normal(pte))
+ return pte;
+
+ /* If you set _PAGE_EXEC on weird pages you're on your own */
+ pg = maybe_pte_to_page(pte);
+ if (unlikely(!pg))
+ return pte;
+
+ /* If the page clean, we move on */
+ if (test_bit(PG_arch_1, &pg->flags))
+ return pte;
+
+ /* If it's an exec fault, we flush the cache and make it clean */
+ if (is_exec_fault()) {
+ flush_dcache_icache_page(pg);
+ set_bit(PG_arch_1, &pg->flags);
+ return pte;
+ }
+
+ /* Else, we filter out _PAGE_EXEC */
+ return __pte(pte_val(pte) & ~_PAGE_EXEC);
+}
- if (virt > KERNELBASE && virt < ioremap_bot)
- ioremap_bot = ioremap_base = virt;
+static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
+ int dirty)
+{
+ struct page *pg;
-#ifdef HAVE_BATS
- /*
- * Use a BAT for this if possible...
+ /* So here, we only care about exec faults, as we use them
+ * to recover lost _PAGE_EXEC and perform I$/D$ coherency
+ * if necessary. Also if _PAGE_EXEC is already set, same deal,
+ * we just bail out
*/
- if (io_bat_index < 2 && is_power_of_2(size)
- && (virt & (size - 1)) == 0 && (phys & (size - 1)) == 0) {
- setbat(io_bat_index, virt, phys, size, flags);
- ++io_bat_index;
- return;
- }
-#endif /* HAVE_BATS */
+ if (dirty || (pte_val(pte) & _PAGE_EXEC) || !is_exec_fault())
+ return pte;
-#ifdef HAVE_TLBCAM
- /*
- * Use a CAM for this if possible...
+#ifdef CONFIG_DEBUG_VM
+ /* So this is an exec fault, _PAGE_EXEC is not set. If it was
+ * an error we would have bailed out earlier in do_page_fault()
+ * but let's make sure of it
*/
- if (tlbcam_index < num_tlbcam_entries && is_power_of_4(size)
- && (virt & (size - 1)) == 0 && (phys & (size - 1)) == 0) {
- settlbcam(tlbcam_index, virt, phys, size, flags, 0);
- ++tlbcam_index;
- return;
- }
-#endif /* HAVE_TLBCAM */
+ if (WARN_ON(!(vma->vm_flags & VM_EXEC)))
+ return pte;
+#endif /* CONFIG_DEBUG_VM */
- /* No BATs available, put it in the page tables. */
- for (i = 0; i < size; i += PAGE_SIZE)
- map_page(virt + i, phys + i, flags);
-}
+ /* If you set _PAGE_EXEC on weird pages you're on your own */
+ pg = maybe_pte_to_page(pte);
+ if (unlikely(!pg))
+ goto bail;
-/* Scan the real Linux page tables and return a PTE pointer for
- * a virtual address in a context.
- * Returns true (1) if PTE was found, zero otherwise. The pointer to
- * the PTE pointer is unmodified if PTE is not found.
- */
-int
-get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
-{
- pgd_t *pgd;
- pmd_t *pmd;
- pte_t *pte;
- int retval = 0;
-
- pgd = pgd_offset(mm, addr & PAGE_MASK);
- if (pgd) {
- pmd = pmd_offset(pgd, addr & PAGE_MASK);
- if (pmd_present(*pmd)) {
- pte = pte_offset_map(pmd, addr & PAGE_MASK);
- if (pte) {
- retval = 1;
- *ptep = pte;
- /* XXX caller needs to do pte_unmap, yuck */
- }
- }
- }
- return(retval);
-}
+ /* If the page is already clean, we move on */
+ if (test_bit(PG_arch_1, &pg->flags))
+ goto bail;
-/* Find physical address for this virtual address. Normally used by
- * I/O functions, but anyone can call it.
- */
-unsigned long iopa(unsigned long addr)
-{
- unsigned long pa;
+ /* Clean the page and set PG_arch_1 */
+ flush_dcache_icache_page(pg);
+ set_bit(PG_arch_1, &pg->flags);
- /* I don't know why this won't work on PMacs or CHRP. It
- * appears there is some bug, or there is some implicit
- * mapping done not properly represented by BATs or in page
- * tables.......I am actively working on resolving this, but
- * can't hold up other stuff. -- Dan
- */
- pte_t *pte;
- struct mm_struct *mm;
+ bail:
+ return __pte(pte_val(pte) | _PAGE_EXEC);
+}
- /* Check the BATs */
- pa = v_mapped_by_bats(addr);
- if (pa)
- return pa;
+#endif /* !(defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0) */
- /* Allow mapping of user addresses (within the thread)
- * for DMA if necessary.
+/*
+ * set_pte stores a linux PTE into the linux page table.
+ */
+void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
+ pte_t pte)
+{
+#ifdef CONFIG_DEBUG_VM
+ WARN_ON(pte_present(*ptep));
+#endif
+ /* Note: mm->context.id might not yet have been assigned as
+ * this context might not have been activated yet when this
+ * is called.
*/
- if (addr < TASK_SIZE)
- mm = current->mm;
- else
- mm = &init_mm;
-
- pa = 0;
- if (get_pteptr(mm, addr, &pte)) {
- pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
- pte_unmap(pte);
- }
+ pte = set_pte_filter(pte, addr);
- return(pa);
+ /* Perform the setting of the PTE */
+ __set_pte_at(mm, addr, ptep, pte, 0);
}
-/* This is will find the virtual address for a physical one....
- * Swiped from APUS, could be dangerous :-).
- * This is only a placeholder until I really find a way to make this
- * work. -- Dan
+/*
+ * This is called when relaxing access to a PTE. It's also called in the page
+ * fault path when we don't hit any of the major fault cases, ie, a minor
+ * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
+ * handled those two for us, we additionally deal with missing execute
+ * permission here on some processors
*/
-unsigned long
-mm_ptov (unsigned long paddr)
+int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
+ pte_t *ptep, pte_t entry, int dirty)
{
- unsigned long ret;
-#if 0
- if (paddr < 16*1024*1024)
- ret = ZTWO_VADDR(paddr);
- else {
- int i;
-
- for (i = 0; i < kmap_chunk_count;){
- unsigned long phys = kmap_chunks[i++];
- unsigned long size = kmap_chunks[i++];
- unsigned long virt = kmap_chunks[i++];
- if (paddr >= phys
- && paddr < (phys + size)){
- ret = virt + paddr - phys;
- goto exit;
- }
- }
-
- ret = (unsigned long) __va(paddr);
+ int changed;
+ entry = set_access_flags_filter(entry, vma, dirty);
+ changed = !pte_same(*(ptep), entry);
+ if (changed) {
+ if (!(vma->vm_flags & VM_HUGETLB))
+ assert_pte_locked(vma->vm_mm, address);
+ __ptep_set_access_flags(ptep, entry);
+ flush_tlb_page_nohash(vma, address);
}
-exit:
-#ifdef DEBUGPV
- printk ("PTOV(%lx)=%lx\n", paddr, ret);
-#endif
-#else
- ret = (unsigned long)paddr + KERNELBASE;
-#endif
- return ret;
+ return changed;
+}
+
+#ifdef CONFIG_DEBUG_VM
+void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ if (mm == &init_mm)
+ return;
+ pgd = mm->pgd + pgd_index(addr);
+ BUG_ON(pgd_none(*pgd));
+ pud = pud_offset(pgd, addr);
+ BUG_ON(pud_none(*pud));
+ pmd = pmd_offset(pud, addr);
+ BUG_ON(!pmd_present(*pmd));
+ assert_spin_locked(pte_lockptr(mm, pmd));
}
+#endif /* CONFIG_DEBUG_VM */