2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994, 1995 Waldorf GmbH
7 * Copyright (C) 1994 - 2000 Ralf Baechle
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
10 * Author: Maciej W. Rozycki <macro@mips.com>
15 #include <linux/config.h>
16 #include <linux/compiler.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
20 #include <asm/addrspace.h>
22 #include <asm/byteorder.h>
24 #include <asm/cpu-features.h>
26 #include <asm/pgtable-bits.h>
27 #include <asm/processor.h>
28 #include <asm/string.h>
31 #include <mangle-port.h>
34 * Slowdown I/O port space accesses for antique hardware.
36 #undef CONF_SLOWDOWN_IO
39 * Raw operations are never swapped in software. OTOH values that raw
40 * operations are working on may or may not have been swapped by the bus
41 * hardware. An example use would be for flash memory that's used for
44 # define __raw_ioswabb(x) (x)
45 # define __raw_ioswabw(x) (x)
46 # define __raw_ioswabl(x) (x)
47 # define __raw_ioswabq(x) (x)
48 # define ____raw_ioswabq(x) (x)
51 * Sane hardware offers swapping of PCI/ISA I/O space accesses in hardware;
52 * less sane hardware forces software to fiddle with this...
54 * Regardless, if the host bus endianness mismatches that of PCI/ISA, then
55 * you can't have the numerical value of data and byte addresses within
56 * multibyte quantities both preserved at the same time. Hence two
57 * variations of functions: non-prefixed ones that preserve the value
58 * and prefixed ones that preserve byte addresses. The latters are
59 * typically used for moving raw data between a peripheral and memory (cf.
60 * string I/O functions), hence the "mem_" prefix.
62 #if defined(CONFIG_SWAP_IO_SPACE)
64 # define ioswabb(x) (x)
65 # define mem_ioswabb(x) (x)
66 # ifdef CONFIG_SGI_IP22
68 * IP22 seems braindead enough to swap 16bits values in hardware, but
69 * not 32bits. Go figure... Can't tell without documentation.
71 # define ioswabw(x) (x)
72 # define mem_ioswabw(x) le16_to_cpu(x)
74 # define ioswabw(x) le16_to_cpu(x)
75 # define mem_ioswabw(x) (x)
77 # define ioswabl(x) le32_to_cpu(x)
78 # define mem_ioswabl(x) (x)
79 # define ioswabq(x) le64_to_cpu(x)
80 # define mem_ioswabq(x) (x)
84 # define ioswabb(x) (x)
85 # define mem_ioswabb(x) (x)
86 # define ioswabw(x) (x)
87 # define mem_ioswabw(x) cpu_to_le16(x)
88 # define ioswabl(x) (x)
89 # define mem_ioswabl(x) cpu_to_le32(x)
90 # define ioswabq(x) (x)
91 # define mem_ioswabq(x) cpu_to_le32(x)
95 #define IO_SPACE_LIMIT 0xffff
98 * On MIPS I/O ports are memory mapped, so we access them using normal
99 * load/store instructions. mips_io_port_base is the virtual address to
100 * which all ports are being mapped. For sake of efficiency some code
101 * assumes that this is an address that can be loaded with a single lui
102 * instruction, so the lower 16 bits must be zero. Should be true on
103 * on any sane architecture; generic code does not use this assumption.
105 extern const unsigned long mips_io_port_base;
107 #define set_io_port_base(base) \
108 do { * (unsigned long *) &mips_io_port_base = (base); } while (0)
111 * Thanks to James van Artsdalen for a better timing-fix than
112 * the two short jumps: using outb's to a nonexistent port seems
113 * to guarantee better timings even on fast machines.
115 * On the other hand, I'd like to be sure of a non-existent port:
116 * I feel a bit unsafe about using 0x80 (should be safe, though)
122 #define __SLOW_DOWN_IO \
123 __asm__ __volatile__( \
125 : : "r" (mips_io_port_base));
127 #ifdef CONF_SLOWDOWN_IO
128 #ifdef REALLY_SLOW_IO
129 #define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; }
131 #define SLOW_DOWN_IO __SLOW_DOWN_IO
138 * virt_to_phys - map virtual addresses to physical
139 * @address: address to remap
141 * The returned physical address is the physical (CPU) mapping for
142 * the memory address given. It is only valid to use this function on
143 * addresses directly mapped or allocated via kmalloc.
145 * This function does not give bus mappings for DMA transfers. In
146 * almost all conceivable cases a device driver should not be using
149 static inline unsigned long virt_to_phys(volatile void * address)
151 return (unsigned long)address - PAGE_OFFSET;
155 * phys_to_virt - map physical address to virtual
156 * @address: address to remap
158 * The returned virtual address is a current CPU mapping for
159 * the memory address given. It is only valid to use this function on
160 * addresses that have a kernel mapping
162 * This function does not handle bus mappings for DMA transfers. In
163 * almost all conceivable cases a device driver should not be using
166 static inline void * phys_to_virt(unsigned long address)
168 return (void *)(address + PAGE_OFFSET);
172 * ISA I/O bus memory addresses are 1:1 with the physical address.
174 static inline unsigned long isa_virt_to_bus(volatile void * address)
176 return (unsigned long)address - PAGE_OFFSET;
179 static inline void * isa_bus_to_virt(unsigned long address)
181 return (void *)(address + PAGE_OFFSET);
184 #define isa_page_to_bus page_to_phys
187 * However PCI ones are not necessarily 1:1 and therefore these interfaces
188 * are forbidden in portable PCI drivers.
190 * Allow them for x86 for legacy drivers, though.
192 #define virt_to_bus virt_to_phys
193 #define bus_to_virt phys_to_virt
196 * isa_slot_offset is the address where E(ISA) busaddress 0 is mapped
197 * for the processor. This implies the assumption that there is only
198 * one of these busses.
200 extern unsigned long isa_slot_offset;
203 * Change "struct page" to physical address.
205 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
207 extern void __iomem * __ioremap(phys_t offset, phys_t size, unsigned long flags);
208 extern void __iounmap(volatile void __iomem *addr);
210 static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size,
213 #define __IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL))
215 if (cpu_has_64bit_addresses) {
216 u64 base = UNCAC_BASE;
219 * R10000 supports a 2 bit uncached attribute therefore
220 * UNCAC_BASE may not equal IO_BASE.
222 if (flags == _CACHE_UNCACHED)
223 base = (u64) IO_BASE;
224 return (void __iomem *) (unsigned long) (base + offset);
225 } else if (__builtin_constant_p(offset) &&
226 __builtin_constant_p(size) && __builtin_constant_p(flags)) {
227 phys_t phys_addr, last_addr;
229 phys_addr = fixup_bigphys_addr(offset, size);
231 /* Don't allow wraparound or zero size. */
232 last_addr = phys_addr + size - 1;
233 if (!size || last_addr < phys_addr)
237 * Map uncached objects in the low 512MB of address
240 if (__IS_LOW512(phys_addr) && __IS_LOW512(last_addr) &&
241 flags == _CACHE_UNCACHED)
242 return (void __iomem *)CKSEG1ADDR(phys_addr);
245 return __ioremap(offset, size, flags);
251 * ioremap - map bus memory into CPU space
252 * @offset: bus address of the memory
253 * @size: size of the resource to map
255 * ioremap performs a platform specific sequence of operations to
256 * make bus memory CPU accessible via the readb/readw/readl/writeb/
257 * writew/writel functions and the other mmio helpers. The returned
258 * address is not guaranteed to be usable directly as a virtual
261 #define ioremap(offset, size) \
262 __ioremap_mode((offset), (size), _CACHE_UNCACHED)
265 * ioremap_nocache - map bus memory into CPU space
266 * @offset: bus address of the memory
267 * @size: size of the resource to map
269 * ioremap_nocache performs a platform specific sequence of operations to
270 * make bus memory CPU accessible via the readb/readw/readl/writeb/
271 * writew/writel functions and the other mmio helpers. The returned
272 * address is not guaranteed to be usable directly as a virtual
275 * This version of ioremap ensures that the memory is marked uncachable
276 * on the CPU as well as honouring existing caching rules from things like
277 * the PCI bus. Note that there are other caches and buffers on many
278 * busses. In paticular driver authors should read up on PCI writes
280 * It's useful if some control registers are in such an area and
281 * write combining or read caching is not desirable:
283 #define ioremap_nocache(offset, size) \
284 __ioremap_mode((offset), (size), _CACHE_UNCACHED)
287 * These two are MIPS specific ioremap variant. ioremap_cacheable_cow
288 * requests a cachable mapping, ioremap_uncached_accelerated requests a
289 * mapping using the uncached accelerated mode which isn't supported on
292 #define ioremap_cacheable_cow(offset, size) \
293 __ioremap_mode((offset), (size), _CACHE_CACHABLE_COW)
294 #define ioremap_uncached_accelerated(offset, size) \
295 __ioremap_mode((offset), (size), _CACHE_UNCACHED_ACCELERATED)
297 static inline void iounmap(volatile void __iomem *addr)
299 #define __IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
301 if (cpu_has_64bit_addresses ||
302 (__builtin_constant_p(addr) && __IS_KSEG1(addr)))
310 #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \
312 static inline void pfx##write##bwlq(type val, \
313 volatile void __iomem *mem) \
315 volatile type *__mem; \
318 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
320 __val = pfx##ioswab##bwlq(val); \
322 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
324 else if (cpu_has_64bits) { \
325 unsigned long __flags; \
329 local_irq_save(__flags); \
330 __asm__ __volatile__( \
331 ".set mips3" "\t\t# __writeq""\n\t" \
332 "dsll32 %L0, %L0, 0" "\n\t" \
333 "dsrl32 %L0, %L0, 0" "\n\t" \
334 "dsll32 %M0, %M0, 0" "\n\t" \
335 "or %L0, %L0, %M0" "\n\t" \
336 "sd %L0, %2" "\n\t" \
339 : "0" (__val), "m" (*__mem)); \
341 local_irq_restore(__flags); \
346 static inline type pfx##read##bwlq(volatile void __iomem *mem) \
348 volatile type *__mem; \
351 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
353 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
355 else if (cpu_has_64bits) { \
356 unsigned long __flags; \
359 local_irq_save(__flags); \
360 __asm__ __volatile__( \
361 ".set mips3" "\t\t# __readq" "\n\t" \
362 "ld %L0, %1" "\n\t" \
363 "dsra32 %M0, %L0, 0" "\n\t" \
364 "sll %L0, %L0, 0" "\n\t" \
369 local_irq_restore(__flags); \
375 return pfx##ioswab##bwlq(__val); \
378 #define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \
380 static inline void pfx##out##bwlq##p(type val, unsigned long port) \
382 volatile type *__addr; \
385 port = __swizzle_addr_##bwlq(port); \
386 __addr = (void *)(mips_io_port_base + port); \
388 __val = pfx##ioswab##bwlq(val); \
390 if (sizeof(type) != sizeof(u64)) { \
397 static inline type pfx##in##bwlq##p(unsigned long port) \
399 volatile type *__addr; \
402 port = __swizzle_addr_##bwlq(port); \
403 __addr = (void *)(mips_io_port_base + port); \
405 if (sizeof(type) != sizeof(u64)) { \
413 return pfx##ioswab##bwlq(__val); \
416 #define __BUILD_MEMORY_PFX(bus, bwlq, type) \
418 __BUILD_MEMORY_SINGLE(bus, bwlq, type, 1)
420 #define __BUILD_IOPORT_PFX(bus, bwlq, type) \
422 __BUILD_IOPORT_SINGLE(bus, bwlq, type, ,) \
423 __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO)
425 #define BUILDIO(bwlq, type) \
427 __BUILD_MEMORY_PFX(__raw_, bwlq, type) \
428 __BUILD_MEMORY_PFX(, bwlq, type) \
429 __BUILD_MEMORY_PFX(mem_, bwlq, type) \
430 __BUILD_IOPORT_PFX(, bwlq, type) \
431 __BUILD_IOPORT_PFX(mem_, bwlq, type)
433 #define __BUILDIO(bwlq, type) \
435 __BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 0)
444 #define readb_relaxed readb
445 #define readw_relaxed readw
446 #define readl_relaxed readl
447 #define readq_relaxed readq
450 * Some code tests for these symbols
453 #define writeq writeq
455 #define __BUILD_MEMORY_STRING(bwlq, type) \
457 static inline void writes##bwlq(volatile void __iomem *mem, void *addr, \
458 unsigned int count) \
460 volatile type *__addr = addr; \
463 mem_write##bwlq(*__addr, mem); \
468 static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \
469 unsigned int count) \
471 volatile type *__addr = addr; \
474 *__addr = mem_read##bwlq(mem); \
479 #define __BUILD_IOPORT_STRING(bwlq, type) \
481 static inline void outs##bwlq(unsigned long port, const void *addr, \
482 unsigned int count) \
484 const volatile type *__addr = addr; \
487 mem_out##bwlq(*__addr, port); \
492 static inline void ins##bwlq(unsigned long port, void *addr, \
493 unsigned int count) \
495 volatile type *__addr = addr; \
498 *__addr = mem_in##bwlq(port); \
503 #define BUILDSTRING(bwlq, type) \
505 __BUILD_MEMORY_STRING(bwlq, type) \
506 __BUILD_IOPORT_STRING(bwlq, type)
514 /* Depends on MIPS II instruction set */
515 #define mmiowb() asm volatile ("sync" ::: "memory")
517 static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
519 memset((void __force *) addr, val, count);
521 static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
523 memcpy(dst, (void __force *) src, count);
525 static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
527 memcpy((void __force *) dst, src, count);
533 #define ioread8(addr) readb(addr)
534 #define ioread16(addr) readw(addr)
535 #define ioread32(addr) readl(addr)
537 #define iowrite8(b,addr) writeb(b,addr)
538 #define iowrite16(w,addr) writew(w,addr)
539 #define iowrite32(l,addr) writel(l,addr)
541 #define ioread8_rep(a,b,c) readsb(a,b,c)
542 #define ioread16_rep(a,b,c) readsw(a,b,c)
543 #define ioread32_rep(a,b,c) readsl(a,b,c)
545 #define iowrite8_rep(a,b,c) writesb(a,b,c)
546 #define iowrite16_rep(a,b,c) writesw(a,b,c)
547 #define iowrite32_rep(a,b,c) writesl(a,b,c)
549 /* Create a virtual mapping cookie for an IO port range */
550 extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
551 extern void ioport_unmap(void __iomem *);
553 /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
555 extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
556 extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
559 * ISA space is 'always mapped' on currently supported MIPS systems, no need
560 * to explicitly ioremap() it. The fact that the ISA IO space is mapped
561 * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
562 * are physical addresses. The following constant pointer can be
563 * used as the IO-area pointer (it can be iounmapped as well, so the
564 * analogy with PCI is quite large):
566 #define __ISA_IO_base ((char *)(isa_slot_offset))
568 #define isa_readb(a) readb(__ISA_IO_base + (a))
569 #define isa_readw(a) readw(__ISA_IO_base + (a))
570 #define isa_readl(a) readl(__ISA_IO_base + (a))
571 #define isa_readq(a) readq(__ISA_IO_base + (a))
572 #define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a))
573 #define isa_writew(w,a) writew(w,__ISA_IO_base + (a))
574 #define isa_writel(l,a) writel(l,__ISA_IO_base + (a))
575 #define isa_writeq(q,a) writeq(q,__ISA_IO_base + (a))
576 #define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c))
577 #define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c))
578 #define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c))
581 * We don't have csum_partial_copy_fromio() yet, so we cheat here and
582 * just copy it. The net code will then do the checksum later.
584 #define eth_io_copy_and_sum(skb,src,len,unused) memcpy_fromio((skb)->data,(src),(len))
585 #define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(b),(c),(d))
588 * check_signature - find BIOS signatures
589 * @io_addr: mmio address to check
590 * @signature: signature block
591 * @length: length of signature
593 * Perform a signature comparison with the mmio address io_addr. This
594 * address should have been obtained by ioremap.
595 * Returns 1 on a match.
597 static inline int check_signature(char __iomem *io_addr,
598 const unsigned char *signature, int length)
602 if (readb(io_addr) != *signature)
614 * The caches on some architectures aren't dma-coherent and have need to
615 * handle this in software. There are three types of operations that
616 * can be applied to dma buffers.
618 * - dma_cache_wback_inv(start, size) makes caches and coherent by
619 * writing the content of the caches back to memory, if necessary.
620 * The function also invalidates the affected part of the caches as
621 * necessary before DMA transfers from outside to memory.
622 * - dma_cache_wback(start, size) makes caches and coherent by
623 * writing the content of the caches back to memory, if necessary.
624 * The function also invalidates the affected part of the caches as
625 * necessary before DMA transfers from outside to memory.
626 * - dma_cache_inv(start, size) invalidates the affected parts of the
627 * caches. Dirty lines of the caches may be written back or simply
628 * be discarded. This operation is necessary before dma operations
631 #ifdef CONFIG_DMA_NONCOHERENT
633 extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
634 extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
635 extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
637 #define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start,size)
638 #define dma_cache_wback(start, size) _dma_cache_wback(start,size)
639 #define dma_cache_inv(start, size) _dma_cache_inv(start,size)
641 #else /* Sane hardware */
643 #define dma_cache_wback_inv(start,size) \
644 do { (void) (start); (void) (size); } while (0)
645 #define dma_cache_wback(start,size) \
646 do { (void) (start); (void) (size); } while (0)
647 #define dma_cache_inv(start,size) \
648 do { (void) (start); (void) (size); } while (0)
650 #endif /* CONFIG_DMA_NONCOHERENT */
653 * Read a 32-bit register that requires a 64-bit read cycle on the bus.
654 * Avoid interrupt mucking, just adjust the address for 4-byte access.
655 * Assume the addresses are 8-byte aligned.
658 #define __CSR_32_ADJUST 4
660 #define __CSR_32_ADJUST 0
663 #define csr_out32(v,a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v))
664 #define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST))
667 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
670 #define xlate_dev_mem_ptr(p) __va(p)
673 * Convert a virtual cached pointer to an uncached pointer
675 #define xlate_dev_kmem_ptr(p) p
677 #endif /* _ASM_IO_H */