#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
+#include <linux/io.h>
+#include <asm/cputype.h>
#include <asm/cacheflush.h>
-#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
if (!pte_none(*pte))
goto bad;
- set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot),
- type->prot_pte_ext);
+ set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot), 0);
phys_addr += PAGE_SIZE;
} while (pte++, addr += PAGE_SIZE, addr != end);
return 0;
return err;
}
+int ioremap_page(unsigned long virt, unsigned long phys,
+ const struct mem_type *mtype)
+{
+ return remap_area_pages(virt, __phys_to_pfn(phys), PAGE_SIZE, mtype);
+}
+EXPORT_SYMBOL(ioremap_page);
void __check_kvm_seq(struct mm_struct *mm)
{
*/
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
- unsigned long addr = virt, end = virt + (size & ~SZ_1M);
+ unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
pgd_t *pgd;
flush_cache_vunmap(addr, end);
* Free the page table, if there was one.
*/
if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
- pte_free_kernel(pmd_page_vaddr(pmd));
+ pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
}
addr += PGDIR_SIZE;
* caller shouldn't need to know that small detail.
*
* 'flags' are the extra L_PTE_ flags that you want to specify for this
- * mapping. See include/asm-arm/proc-armv/pgtable.h for more information.
+ * mapping. See <asm/pgtable.h> for more information.
*/
void __iomem *
__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
if (!type)
return NULL;
- size = PAGE_ALIGN(size);
+ /*
+ * Page align the mapping size, taking account of any offset.
+ */
+ size = PAGE_ALIGN(offset + size);
area = get_vm_area(size, VM_IOREMAP);
if (!area)
if (!size || last_addr < phys_addr)
return NULL;
- /*
- * Page align the mapping size
- */
- size = PAGE_ALIGN(last_addr + 1) - phys_addr;
-
return __arm_ioremap_pfn(pfn, offset, size, mtype);
}
EXPORT_SYMBOL(__arm_ioremap);
-void __iounmap(volatile void __iomem *addr)
+void __iounmap(volatile void __iomem *io_addr)
{
+ void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
#ifndef CONFIG_SMP
struct vm_struct **p, *tmp;
-#endif
- unsigned int section_mapping = 0;
- addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long)addr);
-
-#ifndef CONFIG_SMP
/*
* If this is a section based mapping we need to handle it
- * specially as the VM subysystem does not know how to handle
+ * specially as the VM subsystem does not know how to handle
* such a beast. We need the lock here b/c we need to clear
* all the mappings before the area can be reclaimed
* by someone else.
*/
write_lock(&vmlist_lock);
for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
- if((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
+ if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
if (tmp->flags & VM_ARM_SECTION_MAPPING) {
- *p = tmp->next;
unmap_area_sections((unsigned long)tmp->addr,
tmp->size);
- kfree(tmp);
- section_mapping = 1;
}
break;
}
write_unlock(&vmlist_lock);
#endif
- if (!section_mapping)
- vunmap((void __force *)addr);
+ vunmap(addr);
}
EXPORT_SYMBOL(__iounmap);