2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
6 * (C) Copyright 1995 1996 Linus Torvalds
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
16 #include <asm/cacheflush.h>
18 #include <asm/fixmap.h>
19 #include <asm/pgtable.h>
20 #include <asm/tlbflush.h>
23 * Fix up the linear direct mapping of the kernel to avoid cache attribute
26 static int ioremap_change_attr(unsigned long phys_addr, unsigned long size,
29 unsigned long npages, vaddr, last_addr = phys_addr + size - 1;
32 /* No change for pages after the last mapping */
33 if (last_addr >= (max_pfn_mapped << PAGE_SHIFT))
36 npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
37 vaddr = (unsigned long) __va(phys_addr);
40 * If there is no identity map for this address,
41 * change_page_attr_addr is unnecessary
43 if (!lookup_address(vaddr, &level))
47 * Must use an address here and not struct page because the
48 * phys addr can be a in hole between nodes and not have a
51 err = change_page_attr_addr(vaddr, npages, prot);
59 * Remap an arbitrary physical address space into the kernel virtual
60 * address space. Needed when the kernel wants to access high addresses
63 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
64 * have to convert them into an offset in a page-aligned mapping, but the
65 * caller shouldn't need to know that small detail.
67 void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
71 struct vm_struct *area;
72 unsigned long offset, last_addr;
75 /* Don't allow wraparound or zero size */
76 last_addr = phys_addr + size - 1;
77 if (!size || last_addr < phys_addr)
81 * Don't remap the low PCI/ISA area, it's always mapped..
83 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
84 return (__force void __iomem *)phys_to_virt(phys_addr);
87 * Don't allow anybody to remap normal RAM that we're using..
89 if (phys_addr <= virt_to_phys(high_memory - 1)) {
93 t_addr = __va(phys_addr);
94 t_end = t_addr + (size - 1);
96 for (page = virt_to_page(t_addr);
97 page <= virt_to_page(t_end); page++)
98 if (!PageReserved(page))
102 pgprot = MAKE_GLOBAL(__PAGE_KERNEL | flags);
105 * Mappings have to be page-aligned
107 offset = phys_addr & ~PAGE_MASK;
108 phys_addr &= PAGE_MASK;
109 size = PAGE_ALIGN(last_addr+1) - phys_addr;
114 area = get_vm_area(size, VM_IOREMAP);
117 area->phys_addr = phys_addr;
118 addr = (void __iomem *) area->addr;
119 if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
120 phys_addr, pgprot)) {
121 remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
125 if (ioremap_change_attr(phys_addr, size, pgprot) < 0) {
130 return (void __iomem *) (offset + (char __iomem *)addr);
132 EXPORT_SYMBOL(__ioremap);
135 * ioremap_nocache - map bus memory into CPU space
136 * @offset: bus address of the memory
137 * @size: size of the resource to map
139 * ioremap_nocache performs a platform specific sequence of operations to
140 * make bus memory CPU accessible via the readb/readw/readl/writeb/
141 * writew/writel functions and the other mmio helpers. The returned
142 * address is not guaranteed to be usable directly as a virtual
145 * This version of ioremap ensures that the memory is marked uncachable
146 * on the CPU as well as honouring existing caching rules from things like
147 * the PCI bus. Note that there are other caches and buffers on many
148 * busses. In particular driver authors should read up on PCI writes
150 * It's useful if some control registers are in such an area and
151 * write combining or read caching is not desirable:
153 * Must be freed with iounmap.
155 void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
157 return __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
159 EXPORT_SYMBOL(ioremap_nocache);
162 * iounmap - Free a IO remapping
163 * @addr: virtual address from ioremap_*
165 * Caller must ensure there is only one unmapping for the same pointer.
167 void iounmap(volatile void __iomem *addr)
169 struct vm_struct *p, *o;
171 if ((void __force *)addr <= high_memory)
175 * __ioremap special-cases the PCI/ISA range by not instantiating a
176 * vm_area and by simply returning an address into the kernel mapping
177 * of ISA space. So handle that here.
179 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
180 addr < phys_to_virt(ISA_END_ADDRESS))
183 addr = (volatile void __iomem *)
184 (PAGE_MASK & (unsigned long __force)addr);
186 /* Use the vm area unlocked, assuming the caller
187 ensures there isn't another iounmap for the same address
188 in parallel. Reuse of the virtual address is prevented by
189 leaving it in the global lists until we're done with it.
190 cpa takes care of the direct mappings. */
191 read_lock(&vmlist_lock);
192 for (p = vmlist; p; p = p->next) {
196 read_unlock(&vmlist_lock);
199 printk(KERN_ERR "iounmap: bad address %p\n", addr);
204 /* Reset the direct mapping. Can block */
205 ioremap_change_attr(p->phys_addr, p->size, PAGE_KERNEL);
207 /* Finally remove it */
208 o = remove_vm_area((void *)addr);
209 BUG_ON(p != o || o == NULL);
212 EXPORT_SYMBOL(iounmap);
215 int __initdata early_ioremap_debug;
217 static int __init early_ioremap_debug_setup(char *str)
219 early_ioremap_debug = 1;
223 early_param("early_ioremap_debug", early_ioremap_debug_setup);
225 static __initdata int after_paging_init;
226 static __initdata unsigned long bm_pte[1024]
227 __attribute__((aligned(PAGE_SIZE)));
229 static inline unsigned long * __init early_ioremap_pgd(unsigned long addr)
231 return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
234 static inline unsigned long * __init early_ioremap_pte(unsigned long addr)
236 return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
239 void __init early_ioremap_init(void)
243 if (early_ioremap_debug)
244 printk(KERN_DEBUG "early_ioremap_init()\n");
246 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
247 *pgd = __pa(bm_pte) | _PAGE_TABLE;
248 memset(bm_pte, 0, sizeof(bm_pte));
250 * The boot-ioremap range spans multiple pgds, for which
251 * we are not prepared:
253 if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
255 printk(KERN_WARNING "pgd %p != %p\n",
256 pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
257 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
258 fix_to_virt(FIX_BTMAP_BEGIN));
259 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
260 fix_to_virt(FIX_BTMAP_END));
262 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
263 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
268 void __init early_ioremap_clear(void)
272 if (early_ioremap_debug)
273 printk(KERN_DEBUG "early_ioremap_clear()\n");
275 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
280 void __init early_ioremap_reset(void)
282 enum fixed_addresses idx;
283 unsigned long *pte, phys, addr;
285 after_paging_init = 1;
286 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
287 addr = fix_to_virt(idx);
288 pte = early_ioremap_pte(addr);
289 if (!*pte & _PAGE_PRESENT) {
290 phys = *pte & PAGE_MASK;
291 set_fixmap(idx, phys);
296 static void __init __early_set_fixmap(enum fixed_addresses idx,
297 unsigned long phys, pgprot_t flags)
299 unsigned long *pte, addr = __fix_to_virt(idx);
301 if (idx >= __end_of_fixed_addresses) {
305 pte = early_ioremap_pte(addr);
306 if (pgprot_val(flags))
307 *pte = (phys & PAGE_MASK) | pgprot_val(flags);
310 __flush_tlb_one(addr);
313 static inline void __init early_set_fixmap(enum fixed_addresses idx,
316 if (after_paging_init)
317 set_fixmap(idx, phys);
319 __early_set_fixmap(idx, phys, PAGE_KERNEL);
322 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
324 if (after_paging_init)
327 __early_set_fixmap(idx, 0, __pgprot(0));
331 int __initdata early_ioremap_nested;
333 static int __init check_early_ioremap_leak(void)
335 if (!early_ioremap_nested)
339 "Debug warning: early ioremap leak of %d areas detected.\n",
340 early_ioremap_nested);
342 "please boot with early_ioremap_debug and report the dmesg.\n");
347 late_initcall(check_early_ioremap_leak);
349 void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
351 unsigned long offset, last_addr;
352 unsigned int nrpages, nesting;
353 enum fixed_addresses idx0, idx;
355 WARN_ON(system_state != SYSTEM_BOOTING);
357 nesting = early_ioremap_nested;
358 if (early_ioremap_debug) {
359 printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ",
360 phys_addr, size, nesting);
364 /* Don't allow wraparound or zero size */
365 last_addr = phys_addr + size - 1;
366 if (!size || last_addr < phys_addr) {
371 if (nesting >= FIX_BTMAPS_NESTING) {
375 early_ioremap_nested++;
377 * Mappings have to be page-aligned
379 offset = phys_addr & ~PAGE_MASK;
380 phys_addr &= PAGE_MASK;
381 size = PAGE_ALIGN(last_addr) - phys_addr;
384 * Mappings have to fit in the FIX_BTMAP area.
386 nrpages = size >> PAGE_SHIFT;
387 if (nrpages > NR_FIX_BTMAPS) {
395 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
397 while (nrpages > 0) {
398 early_set_fixmap(idx, phys_addr);
399 phys_addr += PAGE_SIZE;
403 if (early_ioremap_debug)
404 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
406 return (void *) (offset + fix_to_virt(idx0));
409 void __init early_iounmap(void *addr, unsigned long size)
411 unsigned long virt_addr;
412 unsigned long offset;
413 unsigned int nrpages;
414 enum fixed_addresses idx;
415 unsigned int nesting;
417 nesting = --early_ioremap_nested;
418 WARN_ON(nesting < 0);
420 if (early_ioremap_debug) {
421 printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr,
426 virt_addr = (unsigned long)addr;
427 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
431 offset = virt_addr & ~PAGE_MASK;
432 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
434 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
435 while (nrpages > 0) {
436 early_clear_fixmap(idx);
442 void __this_fixmap_does_not_exist(void)