4 * Privileged Space Mapping Buffer (PMB) Support.
6 * Copyright (C) 2005 - 2010 Paul Mundt
7 * Copyright (C) 2010 Matt Fleming
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/sysdev.h>
16 #include <linux/cpu.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/bitops.h>
20 #include <linux/debugfs.h>
22 #include <linux/seq_file.h>
23 #include <linux/err.h>
25 #include <asm/sizes.h>
26 #include <asm/system.h>
27 #include <asm/uaccess.h>
28 #include <asm/pgtable.h>
31 #include <asm/mmu_context.h>
33 static void pmb_unmap_entry(struct pmb_entry *);
35 static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
36 static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
38 static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
40 return (entry & PMB_E_MASK) << PMB_E_SHIFT;
43 static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
45 return mk_pmb_entry(entry) | PMB_ADDR;
48 static __always_inline unsigned long mk_pmb_data(unsigned int entry)
50 return mk_pmb_entry(entry) | PMB_DATA;
53 static int pmb_alloc_entry(void)
58 pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
60 if (unlikely(pos > NR_PMB_ENTRIES))
63 if (test_and_set_bit(pos, pmb_map))
69 static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
70 unsigned long flags, int entry)
72 struct pmb_entry *pmbe;
75 if (entry == PMB_NO_ENTRY) {
76 pos = pmb_alloc_entry();
80 if (test_and_set_bit(entry, pmb_map))
81 return ERR_PTR(-ENOSPC);
85 pmbe = &pmb_entry_list[pos];
87 return ERR_PTR(-ENOMEM);
97 static void pmb_free(struct pmb_entry *pmbe)
99 int pos = pmbe->entry;
106 clear_bit(pos, pmb_map);
110 * Must be run uncached.
112 static void set_pmb_entry(struct pmb_entry *pmbe)
116 __raw_writel(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
118 #ifdef CONFIG_CACHE_WRITETHROUGH
120 * When we are in 32-bit address extended mode, CCR.CB becomes
121 * invalid, so care must be taken to manually adjust cacheable
124 if (likely(pmbe->flags & PMB_C))
125 pmbe->flags |= PMB_WT;
128 __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry));
133 static void clear_pmb_entry(struct pmb_entry *pmbe)
135 unsigned int entry = pmbe->entry;
141 addr = mk_pmb_addr(entry);
142 __raw_writel(__raw_readl(addr) & ~PMB_V, addr);
144 addr = mk_pmb_data(entry);
145 __raw_writel(__raw_readl(addr) & ~PMB_V, addr);
154 { .size = SZ_512M, .flag = PMB_SZ_512M, },
155 { .size = SZ_128M, .flag = PMB_SZ_128M, },
156 { .size = SZ_64M, .flag = PMB_SZ_64M, },
157 { .size = SZ_16M, .flag = PMB_SZ_16M, },
160 long pmb_remap(unsigned long vaddr, unsigned long phys,
161 unsigned long size, pgprot_t prot)
163 struct pmb_entry *pmbp, *pmbe;
164 unsigned long wanted;
169 flags = pgprot_val(prot);
171 /* Convert typical pgprot value to the PMB equivalent */
172 if (flags & _PAGE_CACHABLE) {
173 if (flags & _PAGE_WT)
178 pmb_flags = PMB_WT | PMB_UB;
184 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
185 if (size < pmb_sizes[i].size)
188 pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
197 phys += pmb_sizes[i].size;
198 vaddr += pmb_sizes[i].size;
199 size -= pmb_sizes[i].size;
202 * Link adjacent entries that span multiple PMB entries
203 * for easier tear-down.
211 * Instead of trying smaller sizes on every iteration
212 * (even if we succeed in allocating space), try using
213 * pmb_sizes[i].size again.
218 if (size >= 0x1000000)
221 return wanted - size;
224 pmb_unmap_entry(pmbp);
229 void pmb_unmap(unsigned long addr)
231 struct pmb_entry *pmbe;
234 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
235 if (test_bit(i, pmb_map)) {
236 pmbe = &pmb_entry_list[i];
237 if (pmbe->vpn == addr) {
238 pmb_unmap_entry(pmbe);
245 static void pmb_unmap_entry(struct pmb_entry *pmbe)
250 if (!test_bit(pmbe->entry, pmb_map)) {
256 struct pmb_entry *pmblink = pmbe;
259 * We may be called before this pmb_entry has been
260 * entered into the PMB table via set_pmb_entry(), but
261 * that's OK because we've allocated a unique slot for
262 * this entry in pmb_alloc() (even if we haven't filled
265 * Therefore, calling clear_pmb_entry() is safe as no
266 * other mapping can be using that slot.
268 clear_pmb_entry(pmbe);
270 pmbe = pmblink->link;
277 pmb_log_mapping(unsigned long data_val, unsigned long vpn, unsigned long ppn)
282 size = data_val & PMB_SZ_MASK;
284 sz_str = (size == PMB_SZ_16M) ? " 16MB":
285 (size == PMB_SZ_64M) ? " 64MB":
286 (size == PMB_SZ_128M) ? "128MB":
289 pr_info("\t0x%08lx -> 0x%08lx [ %s %scached ]\n",
290 vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, sz_str,
291 (data_val & PMB_C) ? "" : "un");
294 static inline unsigned int pmb_ppn_in_range(unsigned long ppn)
296 return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
299 static int pmb_synchronize_mappings(void)
301 unsigned int applied = 0;
304 pr_info("PMB: boot mappings:\n");
307 * Run through the initial boot mappings, log the established
308 * ones, and blow away anything that falls outside of the valid
309 * PPN range. Specifically, we only care about existing mappings
310 * that impact the cached/uncached sections.
312 * Note that touching these can be a bit of a minefield; the boot
313 * loader can establish multi-page mappings with the same caching
314 * attributes, so we need to ensure that we aren't modifying a
315 * mapping that we're presently executing from, or may execute
316 * from in the case of straddling page boundaries.
318 * In the future we will have to tidy up after the boot loader by
319 * jumping between the cached and uncached mappings and tearing
320 * down alternating mappings while executing from the other.
322 for (i = 0; i < NR_PMB_ENTRIES; i++) {
323 unsigned long addr, data;
324 unsigned long addr_val, data_val;
325 unsigned long ppn, vpn, flags;
326 struct pmb_entry *pmbe;
328 addr = mk_pmb_addr(i);
329 data = mk_pmb_data(i);
331 addr_val = __raw_readl(addr);
332 data_val = __raw_readl(data);
335 * Skip over any bogus entries
337 if (!(data_val & PMB_V) || !(addr_val & PMB_V))
340 ppn = data_val & PMB_PFN_MASK;
341 vpn = addr_val & PMB_PFN_MASK;
344 * Only preserve in-range mappings.
346 if (!pmb_ppn_in_range(ppn)) {
348 * Invalidate anything out of bounds.
350 __raw_writel(addr_val & ~PMB_V, addr);
351 __raw_writel(data_val & ~PMB_V, data);
356 * Update the caching attributes if necessary
358 if (data_val & PMB_C) {
359 #if defined(CONFIG_CACHE_WRITETHROUGH)
361 #elif defined(CONFIG_CACHE_WRITEBACK)
364 data_val &= ~(PMB_C | PMB_WT);
366 __raw_writel(data_val, data);
369 flags = data_val & (PMB_SZ_MASK | PMB_CACHE_MASK);
371 pmbe = pmb_alloc(vpn, ppn, flags, i);
377 pmb_log_mapping(data_val, vpn, ppn);
382 return (applied == 0);
392 * Sync our software copy of the PMB mappings with those in
393 * hardware. The mappings in the hardware PMB were either set up
394 * by the bootloader or very early on by the kernel.
396 ret = pmb_synchronize_mappings();
397 if (unlikely(ret == 0)) {
402 __raw_writel(0, PMB_IRMCR);
404 /* Flush out the TLB */
405 __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
412 bool __in_29bit_mode(void)
414 return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
417 static int pmb_seq_show(struct seq_file *file, void *iter)
421 seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
422 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
423 seq_printf(file, "ety vpn ppn size flags\n");
425 for (i = 0; i < NR_PMB_ENTRIES; i++) {
426 unsigned long addr, data;
430 addr = __raw_readl(mk_pmb_addr(i));
431 data = __raw_readl(mk_pmb_data(i));
433 size = data & PMB_SZ_MASK;
434 sz_str = (size == PMB_SZ_16M) ? " 16MB":
435 (size == PMB_SZ_64M) ? " 64MB":
436 (size == PMB_SZ_128M) ? "128MB":
439 /* 02: V 0x88 0x08 128MB C CB B */
440 seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
441 i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
442 (addr >> 24) & 0xff, (data >> 24) & 0xff,
443 sz_str, (data & PMB_C) ? 'C' : ' ',
444 (data & PMB_WT) ? "WT" : "CB",
445 (data & PMB_UB) ? "UB" : " B");
451 static int pmb_debugfs_open(struct inode *inode, struct file *file)
453 return single_open(file, pmb_seq_show, NULL);
456 static const struct file_operations pmb_debugfs_fops = {
457 .owner = THIS_MODULE,
458 .open = pmb_debugfs_open,
461 .release = single_release,
464 static int __init pmb_debugfs_init(void)
466 struct dentry *dentry;
468 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
469 sh_debugfs_root, NULL, &pmb_debugfs_fops);
473 return PTR_ERR(dentry);
477 postcore_initcall(pmb_debugfs_init);
480 static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
482 static pm_message_t prev_state;
485 /* Restore the PMB after a resume from hibernation */
486 if (state.event == PM_EVENT_ON &&
487 prev_state.event == PM_EVENT_FREEZE) {
488 struct pmb_entry *pmbe;
489 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
490 if (test_bit(i, pmb_map)) {
491 pmbe = &pmb_entry_list[i];
500 static int pmb_sysdev_resume(struct sys_device *dev)
502 return pmb_sysdev_suspend(dev, PMSG_ON);
505 static struct sysdev_driver pmb_sysdev_driver = {
506 .suspend = pmb_sysdev_suspend,
507 .resume = pmb_sysdev_resume,
510 static int __init pmb_sysdev_init(void)
512 return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
514 subsys_initcall(pmb_sysdev_init);