2 * Handle caching attributes in page tables (PAT)
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Suresh B Siddha <suresh.b.siddha@intel.com>
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
10 #include <linux/seq_file.h>
11 #include <linux/bootmem.h>
12 #include <linux/debugfs.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/gfp.h>
18 #include <linux/rbtree.h>
20 #include <asm/cacheflush.h>
21 #include <asm/processor.h>
22 #include <asm/tlbflush.h>
23 #include <asm/x86_init.h>
24 #include <asm/pgtable.h>
25 #include <asm/fcntl.h>
34 int __read_mostly pat_enabled = 1;
36 static inline void pat_disable(const char *reason)
39 printk(KERN_INFO "%s\n", reason);
42 static int __init nopat(char *str)
44 pat_disable("PAT support disabled.");
47 early_param("nopat", nopat);
49 static inline void pat_disable(const char *reason)
56 static int debug_enable;
58 static int __init pat_debug_setup(char *str)
63 __setup("debugpat", pat_debug_setup);
65 #define dprintk(fmt, arg...) \
66 do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)
69 static u64 __read_mostly boot_pat_state;
72 PAT_UC = 0, /* uncached */
73 PAT_WC = 1, /* Write combining */
74 PAT_WT = 4, /* Write Through */
75 PAT_WP = 5, /* Write Protected */
76 PAT_WB = 6, /* Write Back (default) */
77 PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */
80 #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
85 bool boot_cpu = !boot_pat_state;
91 if (!boot_pat_state) {
92 pat_disable("PAT not supported by CPU.");
96 * If this happens we are on a secondary CPU, but
97 * switched to PAT on the boot CPU. We have no way to
100 printk(KERN_ERR "PAT enabled, "
101 "but not supported by secondary CPU\n");
106 /* Set PWT to Write-Combining. All other bits stay the same */
108 * PTE encoding used in Linux:
113 * 000 WB _PAGE_CACHE_WB
114 * 001 WC _PAGE_CACHE_WC
115 * 010 UC- _PAGE_CACHE_UC_MINUS
116 * 011 UC _PAGE_CACHE_UC
119 pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
120 PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
124 rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
126 wrmsrl(MSR_IA32_CR_PAT, pat);
129 printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
130 smp_processor_id(), boot_pat_state, pat);
135 static char *cattr_name(unsigned long flags)
137 switch (flags & _PAGE_CACHE_MASK) {
138 case _PAGE_CACHE_UC: return "uncached";
139 case _PAGE_CACHE_UC_MINUS: return "uncached-minus";
140 case _PAGE_CACHE_WB: return "write-back";
141 case _PAGE_CACHE_WC: return "write-combining";
142 default: return "broken";
147 * The global memtype list keeps track of memory type for specific
148 * physical memory areas. Conflicting memory types in different
149 * mappings can cause CPU cache corruption. To avoid this we keep track.
151 * The list is sorted based on starting address and can contain multiple
152 * entries for each address (this allows reference counting for overlapping
153 * areas). All the aliases have the same cache attributes of course.
154 * Zero attributes are represented as holes.
156 * The data structure is a list that is also organized as an rbtree
157 * sorted on the start address of memtype range.
159 * memtype_lock protects both the linear list and rbtree.
170 static struct rb_root memtype_rbroot = RB_ROOT;
171 static LIST_HEAD(memtype_list);
172 static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */
174 static struct memtype *memtype_rb_search(struct rb_root *root, u64 start)
176 struct rb_node *node = root->rb_node;
177 struct memtype *last_lower = NULL;
180 struct memtype *data = container_of(node, struct memtype, rb);
182 if (data->start < start) {
184 node = node->rb_right;
185 } else if (data->start > start) {
186 node = node->rb_left;
191 /* Will return NULL if there is no entry with its start <= start */
195 static void memtype_rb_insert(struct rb_root *root, struct memtype *data)
197 struct rb_node **new = &(root->rb_node);
198 struct rb_node *parent = NULL;
201 struct memtype *this = container_of(*new, struct memtype, rb);
204 if (data->start <= this->start)
205 new = &((*new)->rb_left);
206 else if (data->start > this->start)
207 new = &((*new)->rb_right);
210 rb_link_node(&data->rb, parent, new);
211 rb_insert_color(&data->rb, root);
215 * Does intersection of PAT memory type and MTRR memory type and returns
216 * the resulting memory type as PAT understands it.
217 * (Type in pat and mtrr will not have same value)
218 * The intersection is based on "Effective Memory Type" tables in IA-32
221 static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
224 * Look for MTRR hint to get the effective type in case where PAT
227 if (req_type == _PAGE_CACHE_WB) {
230 mtrr_type = mtrr_type_lookup(start, end);
231 if (mtrr_type != MTRR_TYPE_WRBACK)
232 return _PAGE_CACHE_UC_MINUS;
234 return _PAGE_CACHE_WB;
241 chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
243 if (new->type != entry->type) {
245 new->type = entry->type;
251 /* check overlaps with more than one entry in the list */
252 list_for_each_entry_continue(entry, &memtype_list, nd) {
253 if (new->end <= entry->start)
255 else if (new->type != entry->type)
261 printk(KERN_INFO "%s:%d conflicting memory types "
262 "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
263 new->end, cattr_name(new->type), cattr_name(entry->type));
267 static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
269 int ram_page = 0, not_rampage = 0;
270 unsigned long page_nr;
272 for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
275 * For legacy reasons, physical address range in the legacy ISA
276 * region is tracked as non-RAM. This will allow users of
277 * /dev/mem to map portions of legacy ISA region, even when
278 * some of those portions are listed(or not even listed) with
279 * different e820 types(RAM/reserved/..)
281 if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) &&
282 page_is_ram(page_nr))
287 if (ram_page == not_rampage)
295 * For RAM pages, we use page flags to mark the pages with appropriate type.
296 * Here we do two pass:
297 * - Find the memtype of all the pages in the range, look for any conflicts
298 * - In case of no conflicts, set the new memtype for pages in the range
300 * Caller must hold memtype_lock for atomicity.
302 static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
303 unsigned long *new_type)
308 if (req_type == _PAGE_CACHE_UC) {
309 /* We do not support strong UC */
311 req_type = _PAGE_CACHE_UC_MINUS;
314 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
317 page = pfn_to_page(pfn);
318 type = get_page_memtype(page);
320 printk(KERN_INFO "reserve_ram_pages_type failed "
321 "0x%Lx-0x%Lx, track 0x%lx, req 0x%lx\n",
322 start, end, type, req_type);
331 *new_type = req_type;
333 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
334 page = pfn_to_page(pfn);
335 set_page_memtype(page, req_type);
340 static int free_ram_pages_type(u64 start, u64 end)
345 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
346 page = pfn_to_page(pfn);
347 set_page_memtype(page, -1);
353 * req_type typically has one of the:
356 * - _PAGE_CACHE_UC_MINUS
359 * req_type will have a special case value '-1', when requester want to inherit
360 * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS.
362 * If new_type is NULL, function will return an error if it cannot reserve the
363 * region with req_type. If new_type is non-NULL, function will return
364 * available type in new_type in case of no error. In case of any error
365 * it will return a negative return value.
367 int reserve_memtype(u64 start, u64 end, unsigned long req_type,
368 unsigned long *new_type)
370 struct memtype *new, *entry;
371 unsigned long actual_type;
372 struct list_head *where;
376 BUG_ON(start >= end); /* end is exclusive */
379 /* This is identical to page table setting without PAT */
382 *new_type = _PAGE_CACHE_WB;
383 else if (req_type == _PAGE_CACHE_WC)
384 *new_type = _PAGE_CACHE_UC_MINUS;
386 *new_type = req_type & _PAGE_CACHE_MASK;
391 /* Low ISA region is always mapped WB in page table. No need to track */
392 if (x86_platform.is_untracked_pat_range(start, end)) {
394 *new_type = _PAGE_CACHE_WB;
399 * Call mtrr_lookup to get the type hint. This is an
400 * optimization for /dev/mem mmap'ers into WB memory (BIOS
401 * tools and ACPI tools). Use WB request for WB memory and use
402 * UC_MINUS otherwise.
404 actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK);
407 *new_type = actual_type;
409 is_range_ram = pat_pagerange_is_ram(start, end);
410 if (is_range_ram == 1) {
412 spin_lock(&memtype_lock);
413 err = reserve_ram_pages_type(start, end, req_type, new_type);
414 spin_unlock(&memtype_lock);
417 } else if (is_range_ram < 0) {
421 new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
427 new->type = actual_type;
429 spin_lock(&memtype_lock);
431 /* Search for existing mapping that overlaps the current range */
433 list_for_each_entry(entry, &memtype_list, nd) {
434 if (end <= entry->start) {
435 where = entry->nd.prev;
437 } else if (start <= entry->start) { /* end > entry->start */
438 err = chk_conflict(new, entry, new_type);
440 dprintk("Overlap at 0x%Lx-0x%Lx\n",
441 entry->start, entry->end);
442 where = entry->nd.prev;
445 } else if (start < entry->end) { /* start > entry->start */
446 err = chk_conflict(new, entry, new_type);
448 dprintk("Overlap at 0x%Lx-0x%Lx\n",
449 entry->start, entry->end);
452 * Move to right position in the linked
453 * list to add this new entry
455 list_for_each_entry_continue(entry,
457 if (start <= entry->start) {
458 where = entry->nd.prev;
468 printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, "
469 "track %s, req %s\n",
470 start, end, cattr_name(new->type), cattr_name(req_type));
472 spin_unlock(&memtype_lock);
478 list_add(&new->nd, where);
480 list_add_tail(&new->nd, &memtype_list);
482 memtype_rb_insert(&memtype_rbroot, new);
484 spin_unlock(&memtype_lock);
486 dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
487 start, end, cattr_name(new->type), cattr_name(req_type),
488 new_type ? cattr_name(*new_type) : "-");
493 int free_memtype(u64 start, u64 end)
495 struct memtype *entry, *saved_entry;
502 /* Low ISA region is always mapped WB. No need to track */
503 if (x86_platform.is_untracked_pat_range(start, end))
506 is_range_ram = pat_pagerange_is_ram(start, end);
507 if (is_range_ram == 1) {
509 spin_lock(&memtype_lock);
510 err = free_ram_pages_type(start, end);
511 spin_unlock(&memtype_lock);
514 } else if (is_range_ram < 0) {
518 spin_lock(&memtype_lock);
520 entry = memtype_rb_search(&memtype_rbroot, start);
521 if (unlikely(entry == NULL))
525 * Saved entry points to an entry with start same or less than what
526 * we searched for. Now go through the list in both directions to look
527 * for the entry that matches with both start and end, with list stored
528 * in sorted start address
531 list_for_each_entry_from(entry, &memtype_list, nd) {
532 if (entry->start == start && entry->end == end) {
533 rb_erase(&entry->rb, &memtype_rbroot);
534 list_del(&entry->nd);
538 } else if (entry->start > start) {
547 list_for_each_entry_reverse(entry, &memtype_list, nd) {
548 if (entry->start == start && entry->end == end) {
549 rb_erase(&entry->rb, &memtype_rbroot);
550 list_del(&entry->nd);
554 } else if (entry->start < start) {
559 spin_unlock(&memtype_lock);
562 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
563 current->comm, current->pid, start, end);
566 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
573 * lookup_memtype - Looksup the memory type for a physical address
574 * @paddr: physical address of which memory type needs to be looked up
576 * Only to be called when PAT is enabled
578 * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or
581 static unsigned long lookup_memtype(u64 paddr)
583 int rettype = _PAGE_CACHE_WB;
584 struct memtype *entry;
586 if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE))
589 if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
591 spin_lock(&memtype_lock);
592 page = pfn_to_page(paddr >> PAGE_SHIFT);
593 rettype = get_page_memtype(page);
594 spin_unlock(&memtype_lock);
596 * -1 from get_page_memtype() implies RAM page is in its
597 * default state and not reserved, and hence of type WB
600 rettype = _PAGE_CACHE_WB;
605 spin_lock(&memtype_lock);
607 entry = memtype_rb_search(&memtype_rbroot, paddr);
609 rettype = entry->type;
611 rettype = _PAGE_CACHE_UC_MINUS;
613 spin_unlock(&memtype_lock);
618 * io_reserve_memtype - Request a memory type mapping for a region of memory
619 * @start: start (physical address) of the region
620 * @end: end (physical address) of the region
621 * @type: A pointer to memtype, with requested type. On success, requested
622 * or any other compatible type that was available for the region is returned
624 * On success, returns 0
625 * On failure, returns non-zero
627 int io_reserve_memtype(resource_size_t start, resource_size_t end,
630 resource_size_t size = end - start;
631 unsigned long req_type = *type;
632 unsigned long new_type;
635 WARN_ON_ONCE(iomem_map_sanity_check(start, size));
637 ret = reserve_memtype(start, end, req_type, &new_type);
641 if (!is_new_memtype_allowed(start, size, req_type, new_type))
644 if (kernel_map_sync_memtype(start, size, new_type) < 0)
651 free_memtype(start, end);
658 * io_free_memtype - Release a memory type mapping for a region of memory
659 * @start: start (physical address) of the region
660 * @end: end (physical address) of the region
662 void io_free_memtype(resource_size_t start, resource_size_t end)
664 free_memtype(start, end);
667 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
668 unsigned long size, pgprot_t vma_prot)
673 #ifdef CONFIG_STRICT_DEVMEM
674 /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
675 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
680 /* This check is needed to avoid cache aliasing when PAT is enabled */
681 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
683 u64 from = ((u64)pfn) << PAGE_SHIFT;
684 u64 to = from + size;
690 while (cursor < to) {
691 if (!devmem_is_allowed(pfn)) {
693 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
694 current->comm, from, to);
702 #endif /* CONFIG_STRICT_DEVMEM */
704 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
705 unsigned long size, pgprot_t *vma_prot)
707 unsigned long flags = _PAGE_CACHE_WB;
709 if (!range_is_allowed(pfn, size))
712 if (file->f_flags & O_SYNC) {
713 flags = _PAGE_CACHE_UC_MINUS;
718 * On the PPro and successors, the MTRRs are used to set
719 * memory types for physical addresses outside main memory,
720 * so blindly setting UC or PWT on those pages is wrong.
721 * For Pentiums and earlier, the surround logic should disable
722 * caching for the high addresses through the KEN pin, but
723 * we maintain the tradition of paranoia in this code.
726 !(boot_cpu_has(X86_FEATURE_MTRR) ||
727 boot_cpu_has(X86_FEATURE_K6_MTRR) ||
728 boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
729 boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
730 (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
731 flags = _PAGE_CACHE_UC;
735 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
741 * Change the memory type for the physial address range in kernel identity
742 * mapping space if that range is a part of identity map.
744 int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
748 if (base >= __pa(high_memory))
751 id_sz = (__pa(high_memory) < base + size) ?
752 __pa(high_memory) - base :
755 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
757 "%s:%d ioremap_change_attr failed %s "
759 current->comm, current->pid,
761 base, (unsigned long long)(base + size));
768 * Internal interface to reserve a range of physical memory with prot.
769 * Reserved non RAM regions only and after successful reserve_memtype,
770 * this func also keeps identity mapping (if any) in sync with this new prot.
772 static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
777 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
778 unsigned long flags = want_flags;
780 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
783 * reserve_pfn_range() for RAM pages. We do not refcount to keep
784 * track of number of mappings of RAM pages. We can assert that
785 * the type requested matches the type of first page in the range.
791 flags = lookup_memtype(paddr);
792 if (want_flags != flags) {
794 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
795 current->comm, current->pid,
796 cattr_name(want_flags),
797 (unsigned long long)paddr,
798 (unsigned long long)(paddr + size),
800 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
801 (~_PAGE_CACHE_MASK)) |
807 ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
811 if (flags != want_flags) {
813 !is_new_memtype_allowed(paddr, size, want_flags, flags)) {
814 free_memtype(paddr, paddr + size);
815 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
816 " for %Lx-%Lx, got %s\n",
817 current->comm, current->pid,
818 cattr_name(want_flags),
819 (unsigned long long)paddr,
820 (unsigned long long)(paddr + size),
825 * We allow returning different type than the one requested in
828 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
829 (~_PAGE_CACHE_MASK)) |
833 if (kernel_map_sync_memtype(paddr, size, flags) < 0) {
834 free_memtype(paddr, paddr + size);
841 * Internal interface to free a range of physical memory.
842 * Frees non RAM regions only.
844 static void free_pfn_range(u64 paddr, unsigned long size)
848 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
850 free_memtype(paddr, paddr + size);
854 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
855 * copied through copy_page_range().
857 * If the vma has a linear pfn mapping for the entire range, we get the prot
858 * from pte and reserve the entire vma range with single reserve_pfn_range call.
860 int track_pfn_vma_copy(struct vm_area_struct *vma)
862 resource_size_t paddr;
864 unsigned long vma_size = vma->vm_end - vma->vm_start;
867 if (is_linear_pfn_mapping(vma)) {
869 * reserve the whole chunk covered by vma. We need the
870 * starting address and protection from pte.
872 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
876 pgprot = __pgprot(prot);
877 return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
884 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
885 * for physical range indicated by pfn and size.
887 * prot is passed in as a parameter for the new mapping. If the vma has a
888 * linear pfn mapping for the entire range reserve the entire vma range with
889 * single reserve_pfn_range call.
891 int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
892 unsigned long pfn, unsigned long size)
895 resource_size_t paddr;
896 unsigned long vma_size = vma->vm_end - vma->vm_start;
898 if (is_linear_pfn_mapping(vma)) {
899 /* reserve the whole chunk starting from vm_pgoff */
900 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
901 return reserve_pfn_range(paddr, vma_size, prot, 0);
907 /* for vm_insert_pfn and friends, we set prot based on lookup */
908 flags = lookup_memtype(pfn << PAGE_SHIFT);
909 *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
916 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
917 * untrack can be called for a specific region indicated by pfn and size or
918 * can be for the entire vma (in which case size can be zero).
920 void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
923 resource_size_t paddr;
924 unsigned long vma_size = vma->vm_end - vma->vm_start;
926 if (is_linear_pfn_mapping(vma)) {
927 /* free the whole chunk starting from vm_pgoff */
928 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
929 free_pfn_range(paddr, vma_size);
934 pgprot_t pgprot_writecombine(pgprot_t prot)
937 return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC);
939 return pgprot_noncached(prot);
941 EXPORT_SYMBOL_GPL(pgprot_writecombine);
943 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
945 /* get Nth element of the linked list */
946 static struct memtype *memtype_get_idx(loff_t pos)
948 struct memtype *list_node, *print_entry;
951 print_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL);
955 spin_lock(&memtype_lock);
956 list_for_each_entry(list_node, &memtype_list, nd) {
958 *print_entry = *list_node;
959 spin_unlock(&memtype_lock);
964 spin_unlock(&memtype_lock);
970 static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
974 seq_printf(seq, "PAT memtype list:\n");
977 return memtype_get_idx(*pos);
980 static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
983 return memtype_get_idx(*pos);
986 static void memtype_seq_stop(struct seq_file *seq, void *v)
990 static int memtype_seq_show(struct seq_file *seq, void *v)
992 struct memtype *print_entry = (struct memtype *)v;
994 seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
995 print_entry->start, print_entry->end);
1001 static const struct seq_operations memtype_seq_ops = {
1002 .start = memtype_seq_start,
1003 .next = memtype_seq_next,
1004 .stop = memtype_seq_stop,
1005 .show = memtype_seq_show,
1008 static int memtype_seq_open(struct inode *inode, struct file *file)
1010 return seq_open(file, &memtype_seq_ops);
1013 static const struct file_operations memtype_fops = {
1014 .open = memtype_seq_open,
1016 .llseek = seq_lseek,
1017 .release = seq_release,
1020 static int __init pat_memtype_list_init(void)
1022 debugfs_create_file("pat_memtype_list", S_IRUSR, arch_debugfs_dir,
1023 NULL, &memtype_fops);
1027 late_initcall(pat_memtype_list_init);
1029 #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */