#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
+#include <linux/io.h>
+#include <asm/cputype.h>
#include <asm/cacheflush.h>
-#include <asm/hardware.h>
-#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
+#include <asm/sizes.h>
-static inline void
-remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
- unsigned long phys_addr, pgprot_t pgprot)
+#include <asm/mach/map.h>
+#include "mm.h"
+
+/*
+ * Used by ioremap() and iounmap() code to mark (super)section-mapped
+ * I/O regions in vm_struct->flags field.
+ */
+#define VM_ARM_SECTION_MAPPING 0x80000000
+
+static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end,
+ unsigned long phys_addr, const struct mem_type *type)
{
- unsigned long end;
+ pgprot_t prot = __pgprot(type->prot_pte);
+ pte_t *pte;
+
+ pte = pte_alloc_kernel(pmd, addr);
+ if (!pte)
+ return -ENOMEM;
- address &= ~PMD_MASK;
- end = address + size;
- if (end > PMD_SIZE)
- end = PMD_SIZE;
- BUG_ON(address >= end);
do {
if (!pte_none(*pte))
goto bad;
- set_pte(pte, pfn_pte(phys_addr >> PAGE_SHIFT, pgprot));
- address += PAGE_SIZE;
+ set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot), 0);
phys_addr += PAGE_SIZE;
- pte++;
- } while (address && (address < end));
- return;
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+ return 0;
bad:
- printk("remap_area_pte: page already exists\n");
+ printk(KERN_CRIT "remap_area_pte: page already exists\n");
BUG();
}
-static inline int
-remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
- unsigned long phys_addr, unsigned long flags)
+static inline int remap_area_pmd(pgd_t *pgd, unsigned long addr,
+ unsigned long end, unsigned long phys_addr,
+ const struct mem_type *type)
{
- unsigned long end;
- pgprot_t pgprot;
+ unsigned long next;
+ pmd_t *pmd;
+ int ret = 0;
+
+ pmd = pmd_alloc(&init_mm, pgd, addr);
+ if (!pmd)
+ return -ENOMEM;
+
+ do {
+ next = pmd_addr_end(addr, end);
+ ret = remap_area_pte(pmd, addr, next, phys_addr, type);
+ if (ret)
+ return ret;
+ phys_addr += next - addr;
+ } while (pmd++, addr = next, addr != end);
+ return ret;
+}
+
+static int remap_area_pages(unsigned long start, unsigned long pfn,
+ size_t size, const struct mem_type *type)
+{
+ unsigned long addr = start;
+ unsigned long next, end = start + size;
+ unsigned long phys_addr = __pfn_to_phys(pfn);
+ pgd_t *pgd;
+ int err = 0;
- address &= ~PGDIR_MASK;
- end = address + size;
+ BUG_ON(addr >= end);
+ pgd = pgd_offset_k(addr);
+ do {
+ next = pgd_addr_end(addr, end);
+ err = remap_area_pmd(pgd, addr, next, phys_addr, type);
+ if (err)
+ break;
+ phys_addr += next - addr;
+ } while (pgd++, addr = next, addr != end);
- if (end > PGDIR_SIZE)
- end = PGDIR_SIZE;
+ return err;
+}
- phys_addr -= address;
- BUG_ON(address >= end);
+int ioremap_page(unsigned long virt, unsigned long phys,
+ const struct mem_type *mtype)
+{
+ return remap_area_pages(virt, __phys_to_pfn(phys), PAGE_SIZE, mtype);
+}
+EXPORT_SYMBOL(ioremap_page);
+
+void __check_kvm_seq(struct mm_struct *mm)
+{
+ unsigned int seq;
- pgprot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE | flags);
do {
- pte_t * pte = pte_alloc_kernel(pmd, address);
- if (!pte)
- return -ENOMEM;
- remap_area_pte(pte, address, end - address, address + phys_addr, pgprot);
- address = (address + PMD_SIZE) & PMD_MASK;
- pmd++;
- } while (address && (address < end));
+ seq = init_mm.context.kvm_seq;
+ memcpy(pgd_offset(mm, VMALLOC_START),
+ pgd_offset_k(VMALLOC_START),
+ sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
+ pgd_index(VMALLOC_START)));
+ mm->context.kvm_seq = seq;
+ } while (seq != init_mm.context.kvm_seq);
+}
+
+#ifndef CONFIG_SMP
+/*
+ * Section support is unsafe on SMP - If you iounmap and ioremap a region,
+ * the other CPUs will not see this change until their next context switch.
+ * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
+ * which requires the new ioremap'd region to be referenced, the CPU will
+ * reference the _old_ region.
+ *
+ * Note that get_vm_area() allocates a guard 4K page, so we need to mask
+ * the size back to 1MB aligned or we will overflow in the loop below.
+ */
+static void unmap_area_sections(unsigned long virt, unsigned long size)
+{
+ unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
+ pgd_t *pgd;
+
+ flush_cache_vunmap(addr, end);
+ pgd = pgd_offset_k(addr);
+ do {
+ pmd_t pmd, *pmdp = pmd_offset(pgd, addr);
+
+ pmd = *pmdp;
+ if (!pmd_none(pmd)) {
+ /*
+ * Clear the PMD from the page table, and
+ * increment the kvm sequence so others
+ * notice this change.
+ *
+ * Note: this is still racy on SMP machines.
+ */
+ pmd_clear(pmdp);
+ init_mm.context.kvm_seq++;
+
+ /*
+ * Free the page table, if there was one.
+ */
+ if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
+ pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
+ }
+
+ addr += PGDIR_SIZE;
+ pgd++;
+ } while (addr < end);
+
+ /*
+ * Ensure that the active_mm is up to date - we want to
+ * catch any use-after-iounmap cases.
+ */
+ if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
+ __check_kvm_seq(current->active_mm);
+
+ flush_tlb_kernel_range(virt, end);
+}
+
+static int
+remap_area_sections(unsigned long virt, unsigned long pfn,
+ size_t size, const struct mem_type *type)
+{
+ unsigned long addr = virt, end = virt + size;
+ pgd_t *pgd;
+
+ /*
+ * Remove and free any PTE-based mapping, and
+ * sync the current kernel mapping.
+ */
+ unmap_area_sections(virt, size);
+
+ pgd = pgd_offset_k(addr);
+ do {
+ pmd_t *pmd = pmd_offset(pgd, addr);
+
+ pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
+ pfn += SZ_1M >> PAGE_SHIFT;
+ pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
+ pfn += SZ_1M >> PAGE_SHIFT;
+ flush_pmd_entry(pmd);
+
+ addr += PGDIR_SIZE;
+ pgd++;
+ } while (addr < end);
+
return 0;
}
static int
-remap_area_pages(unsigned long start, unsigned long phys_addr,
- unsigned long size, unsigned long flags)
+remap_area_supersections(unsigned long virt, unsigned long pfn,
+ size_t size, const struct mem_type *type)
{
- unsigned long address = start;
- unsigned long end = start + size;
- int err = 0;
- pgd_t * dir;
+ unsigned long addr = virt, end = virt + size;
+ pgd_t *pgd;
- phys_addr -= address;
- dir = pgd_offset(&init_mm, address);
- BUG_ON(address >= end);
+ /*
+ * Remove and free any PTE-based mapping, and
+ * sync the current kernel mapping.
+ */
+ unmap_area_sections(virt, size);
+
+ pgd = pgd_offset_k(virt);
do {
- pmd_t *pmd = pmd_alloc(&init_mm, dir, address);
- if (!pmd) {
- err = -ENOMEM;
- break;
- }
- if (remap_area_pmd(pmd, address, end - address,
- phys_addr + address, flags)) {
- err = -ENOMEM;
- break;
+ unsigned long super_pmd_val, i;
+
+ super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
+ PMD_SECT_SUPER;
+ super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
+
+ for (i = 0; i < 8; i++) {
+ pmd_t *pmd = pmd_offset(pgd, addr);
+
+ pmd[0] = __pmd(super_pmd_val);
+ pmd[1] = __pmd(super_pmd_val);
+ flush_pmd_entry(pmd);
+
+ addr += PGDIR_SIZE;
+ pgd++;
}
- address = (address + PGDIR_SIZE) & PGDIR_MASK;
- dir++;
- } while (address && (address < end));
+ pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
+ } while (addr < end);
- flush_cache_vmap(start, end);
- return err;
+ return 0;
}
+#endif
+
/*
* Remap an arbitrary physical address space into the kernel virtual
* caller shouldn't need to know that small detail.
*
* 'flags' are the extra L_PTE_ flags that you want to specify for this
- * mapping. See include/asm-arm/proc-armv/pgtable.h for more information.
+ * mapping. See <asm/pgtable.h> for more information.
*/
void __iomem *
-__ioremap(unsigned long phys_addr, size_t size, unsigned long flags,
- unsigned long align)
+__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
+ unsigned int mtype)
{
- void * addr;
- struct vm_struct * area;
- unsigned long offset, last_addr;
-
- /* Don't allow wraparound or zero size */
- last_addr = phys_addr + size - 1;
- if (!size || last_addr < phys_addr)
- return NULL;
-
- /*
- * Mappings have to be page-aligned
- */
- offset = phys_addr & ~PAGE_MASK;
- phys_addr &= PAGE_MASK;
- size = PAGE_ALIGN(last_addr + 1) - phys_addr;
+ const struct mem_type *type;
+ int err;
+ unsigned long addr;
+ struct vm_struct * area;
/*
- * Ok, go for it..
+ * High mappings must be supersection aligned
*/
- area = get_vm_area(size, VM_IOREMAP);
- if (!area)
+ if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
return NULL;
- addr = area->addr;
- if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
- vfree(addr);
+
+ type = get_mem_type(mtype);
+ if (!type)
return NULL;
- }
- return (void __iomem *) (offset + (char *)addr);
-}
-EXPORT_SYMBOL(__ioremap);
-void __iounmap(void __iomem *addr)
-{
- vfree((void *) (PAGE_MASK & (unsigned long) addr));
-}
-EXPORT_SYMBOL(__iounmap);
+ /*
+ * Page align the mapping size, taking account of any offset.
+ */
+ size = PAGE_ALIGN(offset + size);
-#ifdef __io
-void __iomem *ioport_map(unsigned long port, unsigned int nr)
-{
- return __io(port);
-}
-EXPORT_SYMBOL(ioport_map);
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area)
+ return NULL;
+ addr = (unsigned long)area->addr;
-void ioport_unmap(void __iomem *addr)
-{
-}
-EXPORT_SYMBOL(ioport_unmap);
+#ifndef CONFIG_SMP
+ if (DOMAIN_IO == 0 &&
+ (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
+ cpu_is_xsc3()) && pfn >= 0x100000 &&
+ !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
+ area->flags |= VM_ARM_SECTION_MAPPING;
+ err = remap_area_supersections(addr, pfn, size, type);
+ } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
+ area->flags |= VM_ARM_SECTION_MAPPING;
+ err = remap_area_sections(addr, pfn, size, type);
+ } else
#endif
+ err = remap_area_pages(addr, pfn, size, type);
-#ifdef CONFIG_PCI
-#include <linux/pci.h>
-#include <linux/ioport.h>
+ if (err) {
+ vunmap((void *)addr);
+ return NULL;
+ }
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+ flush_cache_vmap(addr, addr + size);
+ return (void __iomem *) (offset + addr);
+}
+EXPORT_SYMBOL(__arm_ioremap_pfn);
+
+void __iomem *
+__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
{
- unsigned long start = pci_resource_start(dev, bar);
- unsigned long len = pci_resource_len(dev, bar);
- unsigned long flags = pci_resource_flags(dev, bar);
+ unsigned long last_addr;
+ unsigned long offset = phys_addr & ~PAGE_MASK;
+ unsigned long pfn = __phys_to_pfn(phys_addr);
- if (!len || !start)
+ /*
+ * Don't allow wraparound or zero size
+ */
+ last_addr = phys_addr + size - 1;
+ if (!size || last_addr < phys_addr)
return NULL;
- if (maxlen && len > maxlen)
- len = maxlen;
- if (flags & IORESOURCE_IO)
- return ioport_map(start, len);
- if (flags & IORESOURCE_MEM) {
- if (flags & IORESOURCE_CACHEABLE)
- return ioremap(start, len);
- return ioremap_nocache(start, len);
- }
- return NULL;
+
+ return __arm_ioremap_pfn(pfn, offset, size, mtype);
}
-EXPORT_SYMBOL(pci_iomap);
+EXPORT_SYMBOL(__arm_ioremap);
-void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
+void __iounmap(volatile void __iomem *io_addr)
{
- if ((unsigned long)addr >= VMALLOC_START &&
- (unsigned long)addr < VMALLOC_END)
- iounmap(addr);
-}
-EXPORT_SYMBOL(pci_iounmap);
+ void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
+#ifndef CONFIG_SMP
+ struct vm_struct **p, *tmp;
+
+ /*
+ * If this is a section based mapping we need to handle it
+ * specially as the VM subsystem does not know how to handle
+ * such a beast. We need the lock here b/c we need to clear
+ * all the mappings before the area can be reclaimed
+ * by someone else.
+ */
+ write_lock(&vmlist_lock);
+ for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
+ if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
+ if (tmp->flags & VM_ARM_SECTION_MAPPING) {
+ unmap_area_sections((unsigned long)tmp->addr,
+ tmp->size);
+ }
+ break;
+ }
+ }
+ write_unlock(&vmlist_lock);
#endif
+
+ vunmap(addr);
+}
+EXPORT_SYMBOL(__iounmap);