x86: cleanup ioremap includes
[safe/jmp/linux-2.6] / arch / x86 / mm / ioremap_32.c
1 /*
2  * Re-map IO memory to kernel address space so that we can access it.
3  * This is needed for high PCI addresses that aren't mapped in the
4  * 640k-1MB IO memory area on PC's
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  */
8
9 #include <linux/init.h>
10 #include <linux/io.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/vmalloc.h>
14
15 #include <asm/cacheflush.h>
16 #include <asm/e820.h>
17 #include <asm/fixmap.h>
18 #include <asm/pgtable.h>
19 #include <asm/tlbflush.h>
20
21 /*
22  * Remap an arbitrary physical address space into the kernel virtual
23  * address space. Needed when the kernel wants to access high addresses
24  * directly.
25  *
26  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
27  * have to convert them into an offset in a page-aligned mapping, but the
28  * caller shouldn't need to know that small detail.
29  */
30 void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
31                         unsigned long flags)
32 {
33         void __iomem *addr;
34         struct vm_struct *area;
35         unsigned long offset, last_addr;
36         pgprot_t prot;
37
38         /* Don't allow wraparound or zero size */
39         last_addr = phys_addr + size - 1;
40         if (!size || last_addr < phys_addr)
41                 return NULL;
42
43         /*
44          * Don't remap the low PCI/ISA area, it's always mapped..
45          */
46         if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
47                 return (void __iomem *) phys_to_virt(phys_addr);
48
49         /*
50          * Don't allow anybody to remap normal RAM that we're using..
51          */
52         if (phys_addr <= virt_to_phys(high_memory - 1)) {
53                 char *t_addr, *t_end;
54                 struct page *page;
55
56                 t_addr = __va(phys_addr);
57                 t_end = t_addr + (size - 1);
58
59                 for (page = virt_to_page(t_addr);
60                      page <= virt_to_page(t_end); page++)
61                         if (!PageReserved(page))
62                                 return NULL;
63         }
64
65         prot = MAKE_GLOBAL(__PAGE_KERNEL | flags);
66
67         /*
68          * Mappings have to be page-aligned
69          */
70         offset = phys_addr & ~PAGE_MASK;
71         phys_addr &= PAGE_MASK;
72         size = PAGE_ALIGN(last_addr+1) - phys_addr;
73
74         /*
75          * Ok, go for it..
76          */
77         area = get_vm_area(size, VM_IOREMAP | (flags << 20));
78         if (!area)
79                 return NULL;
80         area->phys_addr = phys_addr;
81         addr = (void __iomem *) area->addr;
82         if (ioremap_page_range((unsigned long) addr,
83                                (unsigned long) addr + size, phys_addr, prot)) {
84                 vunmap((void __force *) addr);
85                 return NULL;
86         }
87         return (void __iomem *) (offset + (char __iomem *)addr);
88 }
89 EXPORT_SYMBOL(__ioremap);
90
91 /**
92  * ioremap_nocache     -   map bus memory into CPU space
93  * @offset:    bus address of the memory
94  * @size:      size of the resource to map
95  *
96  * ioremap_nocache performs a platform specific sequence of operations to
97  * make bus memory CPU accessible via the readb/readw/readl/writeb/
98  * writew/writel functions and the other mmio helpers. The returned
99  * address is not guaranteed to be usable directly as a virtual
100  * address.
101  *
102  * This version of ioremap ensures that the memory is marked uncachable
103  * on the CPU as well as honouring existing caching rules from things like
104  * the PCI bus. Note that there are other caches and buffers on many
105  * busses. In particular driver authors should read up on PCI writes
106  *
107  * It's useful if some control registers are in such an area and
108  * write combining or read caching is not desirable:
109  *
110  * Must be freed with iounmap.
111  */
112 void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
113 {
114         unsigned long last_addr;
115         void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
116
117         if (!p)
118                 return p;
119
120         /* Guaranteed to be > phys_addr, as per __ioremap() */
121         last_addr = phys_addr + size - 1;
122
123         if (last_addr < virt_to_phys(high_memory) - 1) {
124                 struct page *ppage = virt_to_page(__va(phys_addr));
125                 unsigned long npages;
126
127                 phys_addr &= PAGE_MASK;
128
129                 /* This might overflow and become zero.. */
130                 last_addr = PAGE_ALIGN(last_addr);
131
132                 /* .. but that's ok, because modulo-2**n arithmetic will make
133                  * the page-aligned "last - first" come out right.
134                  */
135                 npages = (last_addr - phys_addr) >> PAGE_SHIFT;
136
137                 if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
138                         iounmap(p);
139                         p = NULL;
140                 }
141                 global_flush_tlb();
142         }
143
144         return p;
145 }
146 EXPORT_SYMBOL(ioremap_nocache);
147
148 /**
149  * iounmap - Free a IO remapping
150  * @addr: virtual address from ioremap_*
151  *
152  * Caller must ensure there is only one unmapping for the same pointer.
153  */
154 void iounmap(volatile void __iomem *addr)
155 {
156         struct vm_struct *p, *o;
157
158         if ((void __force *)addr <= high_memory)
159                 return;
160
161         /*
162          * __ioremap special-cases the PCI/ISA range by not instantiating a
163          * vm_area and by simply returning an address into the kernel mapping
164          * of ISA space.   So handle that here.
165          */
166         if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
167             addr < phys_to_virt(ISA_END_ADDRESS))
168                 return;
169
170         addr = (volatile void __iomem *)
171                 (PAGE_MASK & (unsigned long __force)addr);
172
173         /* Use the vm area unlocked, assuming the caller
174            ensures there isn't another iounmap for the same address
175            in parallel. Reuse of the virtual address is prevented by
176            leaving it in the global lists until we're done with it.
177            cpa takes care of the direct mappings. */
178         read_lock(&vmlist_lock);
179         for (p = vmlist; p; p = p->next) {
180                 if (p->addr == addr)
181                         break;
182         }
183         read_unlock(&vmlist_lock);
184
185         if (!p) {
186                 printk(KERN_ERR "iounmap: bad address %p\n", addr);
187                 dump_stack();
188                 return;
189         }
190
191         /* Reset the direct mapping. Can block */
192         if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) {
193                 change_page_attr(virt_to_page(__va(p->phys_addr)),
194                                  get_vm_area_size(p) >> PAGE_SHIFT,
195                                  PAGE_KERNEL);
196                 global_flush_tlb();
197         }
198
199         /* Finally remove it */
200         o = remove_vm_area((void *)addr);
201         BUG_ON(p != o || o == NULL);
202         kfree(p);
203 }
204 EXPORT_SYMBOL(iounmap);
205
206
207 int __initdata early_ioremap_debug;
208
209 static int __init early_ioremap_debug_setup(char *str)
210 {
211         early_ioremap_debug = 1;
212
213         return 0;
214 }
215 early_param("early_ioremap_debug", early_ioremap_debug_setup);
216
217 static __initdata int after_paging_init;
218 static __initdata unsigned long bm_pte[1024]
219                                 __attribute__((aligned(PAGE_SIZE)));
220
221 static inline unsigned long * __init early_ioremap_pgd(unsigned long addr)
222 {
223         return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
224 }
225
226 static inline unsigned long * __init early_ioremap_pte(unsigned long addr)
227 {
228         return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
229 }
230
231 void __init early_ioremap_init(void)
232 {
233         unsigned long *pgd;
234
235         if (early_ioremap_debug)
236                 printk(KERN_DEBUG "early_ioremap_init()\n");
237
238         pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
239         *pgd = __pa(bm_pte) | _PAGE_TABLE;
240         memset(bm_pte, 0, sizeof(bm_pte));
241         /*
242          * The boot-ioremap range spans multiple pgds, for which
243          * we are not prepared:
244          */
245         if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
246                 WARN_ON(1);
247                 printk(KERN_WARNING "pgd %p != %p\n",
248                        pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
249                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
250                        fix_to_virt(FIX_BTMAP_BEGIN));
251                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
252                        fix_to_virt(FIX_BTMAP_END));
253
254                 printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
255                 printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
256                        FIX_BTMAP_BEGIN);
257         }
258 }
259
260 void __init early_ioremap_clear(void)
261 {
262         unsigned long *pgd;
263
264         if (early_ioremap_debug)
265                 printk(KERN_DEBUG "early_ioremap_clear()\n");
266
267         pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
268         *pgd = 0;
269         __flush_tlb_all();
270 }
271
272 void __init early_ioremap_reset(void)
273 {
274         enum fixed_addresses idx;
275         unsigned long *pte, phys, addr;
276
277         after_paging_init = 1;
278         for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
279                 addr = fix_to_virt(idx);
280                 pte = early_ioremap_pte(addr);
281                 if (!*pte & _PAGE_PRESENT) {
282                         phys = *pte & PAGE_MASK;
283                         set_fixmap(idx, phys);
284                 }
285         }
286 }
287
288 static void __init __early_set_fixmap(enum fixed_addresses idx,
289                                    unsigned long phys, pgprot_t flags)
290 {
291         unsigned long *pte, addr = __fix_to_virt(idx);
292
293         if (idx >= __end_of_fixed_addresses) {
294                 BUG();
295                 return;
296         }
297         pte = early_ioremap_pte(addr);
298         if (pgprot_val(flags))
299                 *pte = (phys & PAGE_MASK) | pgprot_val(flags);
300         else
301                 *pte = 0;
302         __flush_tlb_one(addr);
303 }
304
305 static inline void __init early_set_fixmap(enum fixed_addresses idx,
306                                         unsigned long phys)
307 {
308         if (after_paging_init)
309                 set_fixmap(idx, phys);
310         else
311                 __early_set_fixmap(idx, phys, PAGE_KERNEL);
312 }
313
314 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
315 {
316         if (after_paging_init)
317                 clear_fixmap(idx);
318         else
319                 __early_set_fixmap(idx, 0, __pgprot(0));
320 }
321
322
323 int __initdata early_ioremap_nested;
324
325 static int __init check_early_ioremap_leak(void)
326 {
327         if (!early_ioremap_nested)
328                 return 0;
329
330         printk(KERN_WARNING
331                "Debug warning: early ioremap leak of %d areas detected.\n",
332                early_ioremap_nested);
333         printk(KERN_WARNING
334                "please boot with early_ioremap_debug and report the dmesg.\n");
335         WARN_ON(1);
336
337         return 1;
338 }
339 late_initcall(check_early_ioremap_leak);
340
341 void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
342 {
343         unsigned long offset, last_addr;
344         unsigned int nrpages, nesting;
345         enum fixed_addresses idx0, idx;
346
347         WARN_ON(system_state != SYSTEM_BOOTING);
348
349         nesting = early_ioremap_nested;
350         if (early_ioremap_debug) {
351                 printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ",
352                        phys_addr, size, nesting);
353                 dump_stack();
354         }
355
356         /* Don't allow wraparound or zero size */
357         last_addr = phys_addr + size - 1;
358         if (!size || last_addr < phys_addr) {
359                 WARN_ON(1);
360                 return NULL;
361         }
362
363         if (nesting >= FIX_BTMAPS_NESTING) {
364                 WARN_ON(1);
365                 return NULL;
366         }
367         early_ioremap_nested++;
368         /*
369          * Mappings have to be page-aligned
370          */
371         offset = phys_addr & ~PAGE_MASK;
372         phys_addr &= PAGE_MASK;
373         size = PAGE_ALIGN(last_addr) - phys_addr;
374
375         /*
376          * Mappings have to fit in the FIX_BTMAP area.
377          */
378         nrpages = size >> PAGE_SHIFT;
379         if (nrpages > NR_FIX_BTMAPS) {
380                 WARN_ON(1);
381                 return NULL;
382         }
383
384         /*
385          * Ok, go for it..
386          */
387         idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
388         idx = idx0;
389         while (nrpages > 0) {
390                 early_set_fixmap(idx, phys_addr);
391                 phys_addr += PAGE_SIZE;
392                 --idx;
393                 --nrpages;
394         }
395         if (early_ioremap_debug)
396                 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
397
398         return (void *) (offset + fix_to_virt(idx0));
399 }
400
401 void __init early_iounmap(void *addr, unsigned long size)
402 {
403         unsigned long virt_addr;
404         unsigned long offset;
405         unsigned int nrpages;
406         enum fixed_addresses idx;
407         unsigned int nesting;
408
409         nesting = --early_ioremap_nested;
410         WARN_ON(nesting < 0);
411
412         if (early_ioremap_debug) {
413                 printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr,
414                        size, nesting);
415                 dump_stack();
416         }
417
418         virt_addr = (unsigned long)addr;
419         if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
420                 WARN_ON(1);
421                 return;
422         }
423         offset = virt_addr & ~PAGE_MASK;
424         nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
425
426         idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
427         while (nrpages > 0) {
428                 early_clear_fixmap(idx);
429                 --idx;
430                 --nrpages;
431         }
432 }
433
434 void __this_fixmap_does_not_exist(void)
435 {
436         WARN_ON(1);
437 }