2 * iommu.c: IOMMU specific routines for memory management.
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
10 #include <linux/kernel.h>
11 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
15 #include <linux/scatterlist.h>
17 #include <asm/pgalloc.h>
18 #include <asm/pgtable.h>
23 #include <asm/cacheflush.h>
24 #include <asm/tlbflush.h>
25 #include <asm/bitext.h>
26 #include <asm/iommu.h>
30 * This can be sized dynamically, but we will do this
31 * only when we have a guidance about actual I/O pressures.
33 #define IOMMU_RNGE IOMMU_RNGE_256MB
34 #define IOMMU_START 0xF0000000
35 #define IOMMU_WINSIZE (256*1024*1024U)
36 #define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 265KB */
37 #define IOMMU_ORDER 6 /* 4096 * (1<<6) */
40 extern int viking_mxcc_present;
41 BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
42 #define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
43 extern int flush_page_for_dma_global;
44 static int viking_flush;
46 extern void viking_flush_page(unsigned long page);
47 extern void viking_mxcc_flush_page(unsigned long page);
50 * Values precomputed according to CPU type.
52 static unsigned int ioperm_noc; /* Consistent mapping iopte flags */
53 static pgprot_t dvma_prot; /* Consistent mapping pte flags */
55 #define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
56 #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
58 static void __init sbus_iommu_init(struct of_device *op)
60 struct iommu_struct *iommu;
61 unsigned int impl, vers;
62 unsigned long *bitmap;
65 iommu = kmalloc(sizeof(struct iommu_struct), GFP_ATOMIC);
67 prom_printf("Unable to allocate iommu structure\n");
71 iommu->regs = of_ioremap(&op->resource[0], 0, PAGE_SIZE * 3,
74 prom_printf("Cannot map IOMMU registers\n");
77 impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
78 vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
79 tmp = iommu->regs->control;
80 tmp &= ~(IOMMU_CTRL_RNGE);
81 tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
82 iommu->regs->control = tmp;
83 iommu_invalidate(iommu->regs);
84 iommu->start = IOMMU_START;
85 iommu->end = 0xffffffff;
87 /* Allocate IOMMU page table */
88 /* Stupid alignment constraints give me a headache.
89 We need 256K or 512K or 1M or 2M area aligned to
90 its size and current gfp will fortunately give
92 tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
94 prom_printf("Unable to allocate iommu table [0x%08x]\n",
95 IOMMU_NPTES*sizeof(iopte_t));
98 iommu->page_table = (iopte_t *)tmp;
100 /* Initialize new table. */
101 memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
104 iommu->regs->base = __pa((unsigned long) iommu->page_table) >> 4;
105 iommu_invalidate(iommu->regs);
107 bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
109 prom_printf("Unable to allocate iommu bitmap [%d]\n",
110 (int)(IOMMU_NPTES>>3));
113 bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
114 /* To be coherent on HyperSparc, the page color of DVMA
115 * and physical addresses must match.
117 if (srmmu_modtype == HyperSparc)
118 iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
120 iommu->usemap.num_colors = 1;
122 printk(KERN_INFO "IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
123 impl, vers, iommu->page_table,
124 (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
126 op->dev.archdata.iommu = iommu;
129 static int __init iommu_init(void)
131 struct device_node *dp;
133 for_each_node_by_name(dp, "iommu") {
134 struct of_device *op = of_find_device_by_node(dp);
137 of_propagate_archdata(op);
143 subsys_initcall(iommu_init);
145 /* This begs to be btfixup-ed by srmmu. */
146 /* Flush the iotlb entries to ram. */
147 /* This could be better if we didn't have to flush whole pages. */
148 static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
153 start = (unsigned long)iopte;
154 end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
156 if (viking_mxcc_present) {
158 viking_mxcc_flush_page(start);
161 } else if (viking_flush) {
163 viking_flush_page(start);
168 __flush_page_to_ram(start);
174 static u32 iommu_get_one(struct device *dev, struct page *page, int npages)
176 struct iommu_struct *iommu = dev->archdata.iommu;
178 iopte_t *iopte, *iopte0;
179 unsigned int busa, busa0;
182 /* page color = pfn of page */
183 ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page));
186 busa0 = iommu->start + (ioptex << PAGE_SHIFT);
187 iopte0 = &iommu->page_table[ioptex];
191 for (i = 0; i < npages; i++) {
192 iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM);
193 iommu_invalidate_page(iommu->regs, busa);
199 iommu_flush_iotlb(iopte0, npages);
204 static u32 iommu_get_scsi_one(struct device *dev, char *vaddr, unsigned int len)
211 off = (unsigned long)vaddr & ~PAGE_MASK;
212 npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
213 page = virt_to_page((unsigned long)vaddr & PAGE_MASK);
214 busa = iommu_get_one(dev, page, npages);
218 static __u32 iommu_get_scsi_one_noflush(struct device *dev, char *vaddr, unsigned long len)
220 return iommu_get_scsi_one(dev, vaddr, len);
223 static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len)
225 flush_page_for_dma(0);
226 return iommu_get_scsi_one(dev, vaddr, len);
229 static __u32 iommu_get_scsi_one_pflush(struct device *dev, char *vaddr, unsigned long len)
231 unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
233 while(page < ((unsigned long)(vaddr + len))) {
234 flush_page_for_dma(page);
237 return iommu_get_scsi_one(dev, vaddr, len);
240 static void iommu_get_scsi_sgl_noflush(struct device *dev, struct scatterlist *sg, int sz)
246 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
247 sg->dvma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
248 sg->dvma_length = (__u32) sg->length;
253 static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz)
257 flush_page_for_dma(0);
260 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
261 sg->dvma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
262 sg->dvma_length = (__u32) sg->length;
267 static void iommu_get_scsi_sgl_pflush(struct device *dev, struct scatterlist *sg, int sz)
269 unsigned long page, oldpage = 0;
275 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
278 * We expect unmapped highmem pages to be not in the cache.
279 * XXX Is this a good assumption?
280 * XXX What if someone else unmaps it here and races us?
282 if ((page = (unsigned long) page_address(sg_page(sg))) != 0) {
283 for (i = 0; i < n; i++) {
284 if (page != oldpage) { /* Already flushed? */
285 flush_page_for_dma(page);
292 sg->dvma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
293 sg->dvma_length = (__u32) sg->length;
298 static void iommu_release_one(struct device *dev, u32 busa, int npages)
300 struct iommu_struct *iommu = dev->archdata.iommu;
304 BUG_ON(busa < iommu->start);
305 ioptex = (busa - iommu->start) >> PAGE_SHIFT;
306 for (i = 0; i < npages; i++) {
307 iopte_val(iommu->page_table[ioptex + i]) = 0;
308 iommu_invalidate_page(iommu->regs, busa);
311 bit_map_clear(&iommu->usemap, ioptex, npages);
314 static void iommu_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
319 off = vaddr & ~PAGE_MASK;
320 npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
321 iommu_release_one(dev, vaddr & PAGE_MASK, npages);
324 static void iommu_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
331 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
332 iommu_release_one(dev, sg->dvma_address & PAGE_MASK, n);
333 sg->dvma_address = 0x21212121;
339 static int iommu_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va,
340 unsigned long addr, int len)
342 struct iommu_struct *iommu = dev->archdata.iommu;
343 unsigned long page, end;
344 iopte_t *iopte = iommu->page_table;
348 BUG_ON((va & ~PAGE_MASK) != 0);
349 BUG_ON((addr & ~PAGE_MASK) != 0);
350 BUG_ON((len & ~PAGE_MASK) != 0);
352 /* page color = physical address */
353 ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
368 if (viking_mxcc_present)
369 viking_mxcc_flush_page(page);
370 else if (viking_flush)
371 viking_flush_page(page);
373 __flush_page_to_ram(page);
375 pgdp = pgd_offset(&init_mm, addr);
376 pmdp = pmd_offset(pgdp, addr);
377 ptep = pte_offset_map(pmdp, addr);
379 set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
381 iopte_val(*iopte++) =
382 MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
386 /* P3: why do we need this?
388 * DAVEM: Because there are several aspects, none of which
389 * are handled by a single interface. Some cpus are
390 * completely not I/O DMA coherent, and some have
391 * virtually indexed caches. The driver DMA flushing
392 * methods handle the former case, but here during
393 * IOMMU page table modifications, and usage of non-cacheable
394 * cpu mappings of pages potentially in the cpu caches, we have
395 * to handle the latter case as well.
398 iommu_flush_iotlb(first, len >> PAGE_SHIFT);
400 iommu_invalidate(iommu->regs);
402 *pba = iommu->start + (ioptex << PAGE_SHIFT);
406 static void iommu_unmap_dma_area(struct device *dev, unsigned long busa, int len)
408 struct iommu_struct *iommu = dev->archdata.iommu;
409 iopte_t *iopte = iommu->page_table;
411 int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
413 BUG_ON((busa & ~PAGE_MASK) != 0);
414 BUG_ON((len & ~PAGE_MASK) != 0);
419 iopte_val(*iopte++) = 0;
423 iommu_invalidate(iommu->regs);
424 bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
428 static char *iommu_lockarea(char *vaddr, unsigned long len)
433 static void iommu_unlockarea(char *vaddr, unsigned long len)
437 void __init ld_mmu_iommu(void)
439 viking_flush = (BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page);
440 BTFIXUPSET_CALL(mmu_lockarea, iommu_lockarea, BTFIXUPCALL_RETO0);
441 BTFIXUPSET_CALL(mmu_unlockarea, iommu_unlockarea, BTFIXUPCALL_NOP);
443 if (!BTFIXUPVAL_CALL(flush_page_for_dma)) {
444 /* IO coherent chip */
445 BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_noflush, BTFIXUPCALL_RETO0);
446 BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_noflush, BTFIXUPCALL_NORM);
447 } else if (flush_page_for_dma_global) {
448 /* flush_page_for_dma flushes everything, no matter of what page is it */
449 BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_gflush, BTFIXUPCALL_NORM);
450 BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_gflush, BTFIXUPCALL_NORM);
452 BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_pflush, BTFIXUPCALL_NORM);
453 BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_pflush, BTFIXUPCALL_NORM);
455 BTFIXUPSET_CALL(mmu_release_scsi_one, iommu_release_scsi_one, BTFIXUPCALL_NORM);
456 BTFIXUPSET_CALL(mmu_release_scsi_sgl, iommu_release_scsi_sgl, BTFIXUPCALL_NORM);
459 BTFIXUPSET_CALL(mmu_map_dma_area, iommu_map_dma_area, BTFIXUPCALL_NORM);
460 BTFIXUPSET_CALL(mmu_unmap_dma_area, iommu_unmap_dma_area, BTFIXUPCALL_NORM);
463 if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
464 dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
465 ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
467 dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
468 ioperm_noc = IOPTE_WRITE | IOPTE_VALID;