2 * Handle caching attributes in page tables (PAT)
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Suresh B Siddha <suresh.b.siddha@intel.com>
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
10 #include <linux/seq_file.h>
11 #include <linux/bootmem.h>
12 #include <linux/debugfs.h>
13 #include <linux/kernel.h>
14 #include <linux/gfp.h>
18 #include <asm/cacheflush.h>
19 #include <asm/processor.h>
20 #include <asm/tlbflush.h>
21 #include <asm/pgtable.h>
22 #include <asm/fcntl.h>
31 int __read_mostly pat_enabled = 1;
33 void __cpuinit pat_disable(const char *reason)
36 printk(KERN_INFO "%s\n", reason);
39 static int __init nopat(char *str)
41 pat_disable("PAT support disabled.");
44 early_param("nopat", nopat);
46 static inline void pat_disable(const char *reason)
53 static int debug_enable;
55 static int __init pat_debug_setup(char *str)
60 __setup("debugpat", pat_debug_setup);
62 #define dprintk(fmt, arg...) \
63 do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)
66 static u64 __read_mostly boot_pat_state;
69 PAT_UC = 0, /* uncached */
70 PAT_WC = 1, /* Write combining */
71 PAT_WT = 4, /* Write Through */
72 PAT_WP = 5, /* Write Protected */
73 PAT_WB = 6, /* Write Back (default) */
74 PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */
77 #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
87 if (!boot_pat_state) {
88 pat_disable("PAT not supported by CPU.");
92 * If this happens we are on a secondary CPU, but
93 * switched to PAT on the boot CPU. We have no way to
96 printk(KERN_ERR "PAT enabled, "
97 "but not supported by secondary CPU\n");
102 /* Set PWT to Write-Combining. All other bits stay the same */
104 * PTE encoding used in Linux:
109 * 000 WB _PAGE_CACHE_WB
110 * 001 WC _PAGE_CACHE_WC
111 * 010 UC- _PAGE_CACHE_UC_MINUS
112 * 011 UC _PAGE_CACHE_UC
115 pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
116 PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
120 rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
122 wrmsrl(MSR_IA32_CR_PAT, pat);
123 printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
124 smp_processor_id(), boot_pat_state, pat);
129 static char *cattr_name(unsigned long flags)
131 switch (flags & _PAGE_CACHE_MASK) {
132 case _PAGE_CACHE_UC: return "uncached";
133 case _PAGE_CACHE_UC_MINUS: return "uncached-minus";
134 case _PAGE_CACHE_WB: return "write-back";
135 case _PAGE_CACHE_WC: return "write-combining";
136 default: return "broken";
141 * The global memtype list keeps track of memory type for specific
142 * physical memory areas. Conflicting memory types in different
143 * mappings can cause CPU cache corruption. To avoid this we keep track.
145 * The list is sorted based on starting address and can contain multiple
146 * entries for each address (this allows reference counting for overlapping
147 * areas). All the aliases have the same cache attributes of course.
148 * Zero attributes are represented as holes.
150 * Currently the data structure is a list because the number of mappings
151 * are expected to be relatively small. If this should be a problem
152 * it could be changed to a rbtree or similar.
154 * memtype_lock protects the whole list.
164 static LIST_HEAD(memtype_list);
165 static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */
168 * Does intersection of PAT memory type and MTRR memory type and returns
169 * the resulting memory type as PAT understands it.
170 * (Type in pat and mtrr will not have same value)
171 * The intersection is based on "Effective Memory Type" tables in IA-32
174 static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
177 * Look for MTRR hint to get the effective type in case where PAT
180 if (req_type == _PAGE_CACHE_WB) {
183 mtrr_type = mtrr_type_lookup(start, end);
184 if (mtrr_type == MTRR_TYPE_UNCACHABLE)
185 return _PAGE_CACHE_UC;
186 if (mtrr_type == MTRR_TYPE_WRCOMB)
187 return _PAGE_CACHE_WC;
194 chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
196 if (new->type != entry->type) {
198 new->type = entry->type;
204 /* check overlaps with more than one entry in the list */
205 list_for_each_entry_continue(entry, &memtype_list, nd) {
206 if (new->end <= entry->start)
208 else if (new->type != entry->type)
214 printk(KERN_INFO "%s:%d conflicting memory types "
215 "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
216 new->end, cattr_name(new->type), cattr_name(entry->type));
220 static struct memtype *cached_entry;
221 static u64 cached_start;
223 static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
225 int ram_page = 0, not_rampage = 0;
226 unsigned long page_nr;
228 for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
231 * For legacy reasons, physical address range in the legacy ISA
232 * region is tracked as non-RAM. This will allow users of
233 * /dev/mem to map portions of legacy ISA region, even when
234 * some of those portions are listed(or not even listed) with
235 * different e820 types(RAM/reserved/..)
237 if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) &&
238 page_is_ram(page_nr))
243 if (ram_page == not_rampage)
251 * For RAM pages, mark the pages as non WB memory type using
252 * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or
253 * set_memory_wc() on a RAM page at a time before marking it as WB again.
254 * This is ok, because only one driver will be owning the page and
255 * doing set_memory_*() calls.
257 * For now, we use PageNonWB to track that the RAM page is being mapped
258 * as non WB. In future, we will have to use one more flag
259 * (or some other mechanism in page_struct) to distinguish between
262 static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
263 unsigned long *new_type)
268 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
269 page = pfn_to_page(pfn);
270 if (page_mapped(page) || PageNonWB(page))
279 for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) {
280 page = pfn_to_page(pfn);
281 ClearPageNonWB(page);
287 static int free_ram_pages_type(u64 start, u64 end)
292 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
293 page = pfn_to_page(pfn);
294 if (page_mapped(page) || !PageNonWB(page))
297 ClearPageNonWB(page);
303 for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) {
304 page = pfn_to_page(pfn);
311 * req_type typically has one of the:
314 * - _PAGE_CACHE_UC_MINUS
317 * req_type will have a special case value '-1', when requester want to inherit
318 * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS.
320 * If new_type is NULL, function will return an error if it cannot reserve the
321 * region with req_type. If new_type is non-NULL, function will return
322 * available type in new_type in case of no error. In case of any error
323 * it will return a negative return value.
325 int reserve_memtype(u64 start, u64 end, unsigned long req_type,
326 unsigned long *new_type)
328 struct memtype *new, *entry;
329 unsigned long actual_type;
330 struct list_head *where;
334 BUG_ON(start >= end); /* end is exclusive */
337 /* This is identical to page table setting without PAT */
340 *new_type = _PAGE_CACHE_WB;
342 *new_type = req_type & _PAGE_CACHE_MASK;
347 /* Low ISA region is always mapped WB in page table. No need to track */
348 if (is_ISA_range(start, end - 1)) {
350 *new_type = _PAGE_CACHE_WB;
354 if (req_type == -1) {
356 * Call mtrr_lookup to get the type hint. This is an
357 * optimization for /dev/mem mmap'ers into WB memory (BIOS
358 * tools and ACPI tools). Use WB request for WB memory and use
359 * UC_MINUS otherwise.
361 u8 mtrr_type = mtrr_type_lookup(start, end);
363 if (mtrr_type == MTRR_TYPE_WRBACK)
364 actual_type = _PAGE_CACHE_WB;
366 actual_type = _PAGE_CACHE_UC_MINUS;
368 actual_type = pat_x_mtrr_type(start, end,
369 req_type & _PAGE_CACHE_MASK);
373 *new_type = actual_type;
375 is_range_ram = pat_pagerange_is_ram(start, end);
376 if (is_range_ram == 1)
377 return reserve_ram_pages_type(start, end, req_type,
379 else if (is_range_ram < 0)
382 new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
388 new->type = actual_type;
390 spin_lock(&memtype_lock);
392 if (cached_entry && start >= cached_start)
393 entry = cached_entry;
395 entry = list_entry(&memtype_list, struct memtype, nd);
397 /* Search for existing mapping that overlaps the current range */
399 list_for_each_entry_continue(entry, &memtype_list, nd) {
400 if (end <= entry->start) {
401 where = entry->nd.prev;
402 cached_entry = list_entry(where, struct memtype, nd);
404 } else if (start <= entry->start) { /* end > entry->start */
405 err = chk_conflict(new, entry, new_type);
407 dprintk("Overlap at 0x%Lx-0x%Lx\n",
408 entry->start, entry->end);
409 where = entry->nd.prev;
410 cached_entry = list_entry(where,
414 } else if (start < entry->end) { /* start > entry->start */
415 err = chk_conflict(new, entry, new_type);
417 dprintk("Overlap at 0x%Lx-0x%Lx\n",
418 entry->start, entry->end);
419 cached_entry = list_entry(entry->nd.prev,
423 * Move to right position in the linked
424 * list to add this new entry
426 list_for_each_entry_continue(entry,
428 if (start <= entry->start) {
429 where = entry->nd.prev;
439 printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, "
440 "track %s, req %s\n",
441 start, end, cattr_name(new->type), cattr_name(req_type));
443 spin_unlock(&memtype_lock);
448 cached_start = start;
451 list_add(&new->nd, where);
453 list_add_tail(&new->nd, &memtype_list);
455 spin_unlock(&memtype_lock);
457 dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
458 start, end, cattr_name(new->type), cattr_name(req_type),
459 new_type ? cattr_name(*new_type) : "-");
464 int free_memtype(u64 start, u64 end)
466 struct memtype *entry;
473 /* Low ISA region is always mapped WB. No need to track */
474 if (is_ISA_range(start, end - 1))
477 is_range_ram = pat_pagerange_is_ram(start, end);
478 if (is_range_ram == 1)
479 return free_ram_pages_type(start, end);
480 else if (is_range_ram < 0)
483 spin_lock(&memtype_lock);
484 list_for_each_entry(entry, &memtype_list, nd) {
485 if (entry->start == start && entry->end == end) {
486 if (cached_entry == entry || cached_start == start)
489 list_del(&entry->nd);
495 spin_unlock(&memtype_lock);
498 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
499 current->comm, current->pid, start, end);
502 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
508 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
509 unsigned long size, pgprot_t vma_prot)
514 #ifdef CONFIG_STRICT_DEVMEM
515 /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
516 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
521 /* This check is needed to avoid cache aliasing when PAT is enabled */
522 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
524 u64 from = ((u64)pfn) << PAGE_SHIFT;
525 u64 to = from + size;
531 while (cursor < to) {
532 if (!devmem_is_allowed(pfn)) {
534 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
535 current->comm, from, to);
543 #endif /* CONFIG_STRICT_DEVMEM */
545 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
546 unsigned long size, pgprot_t *vma_prot)
548 u64 offset = ((u64) pfn) << PAGE_SHIFT;
549 unsigned long flags = -1;
552 if (!range_is_allowed(pfn, size))
555 if (file->f_flags & O_SYNC) {
556 flags = _PAGE_CACHE_UC_MINUS;
561 * On the PPro and successors, the MTRRs are used to set
562 * memory types for physical addresses outside main memory,
563 * so blindly setting UC or PWT on those pages is wrong.
564 * For Pentiums and earlier, the surround logic should disable
565 * caching for the high addresses through the KEN pin, but
566 * we maintain the tradition of paranoia in this code.
569 !(boot_cpu_has(X86_FEATURE_MTRR) ||
570 boot_cpu_has(X86_FEATURE_K6_MTRR) ||
571 boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
572 boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
573 (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
574 flags = _PAGE_CACHE_UC;
579 * With O_SYNC, we can only take UC_MINUS mapping. Fail if we cannot.
581 * Without O_SYNC, we want to get
582 * - WB for WB-able memory and no other conflicting mappings
583 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
584 * - Inherit from confliting mappings otherwise
587 retval = reserve_memtype(offset, offset + size, flags, NULL);
589 retval = reserve_memtype(offset, offset + size, -1, &flags);
595 if (((pfn < max_low_pfn_mapped) ||
596 (pfn >= (1UL<<(32 - PAGE_SHIFT)) && pfn < max_pfn_mapped)) &&
597 ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) {
598 free_memtype(offset, offset + size);
600 "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
601 current->comm, current->pid,
603 offset, (unsigned long long)(offset + size));
607 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
612 void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
614 unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK);
615 u64 addr = (u64)pfn << PAGE_SHIFT;
618 reserve_memtype(addr, addr + size, want_flags, &flags);
619 if (flags != want_flags) {
621 "%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n",
622 current->comm, current->pid,
623 cattr_name(want_flags),
624 addr, (unsigned long long)(addr + size),
629 void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
631 u64 addr = (u64)pfn << PAGE_SHIFT;
633 free_memtype(addr, addr + size);
637 * Change the memory type for the physial address range in kernel identity
638 * mapping space if that range is a part of identity map.
640 int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
644 if (!pat_enabled || base >= __pa(high_memory))
647 id_sz = (__pa(high_memory) < base + size) ?
648 __pa(high_memory) - base :
651 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
653 "%s:%d ioremap_change_attr failed %s "
655 current->comm, current->pid,
657 base, (unsigned long long)(base + size));
664 * Internal interface to reserve a range of physical memory with prot.
665 * Reserved non RAM regions only and after successful reserve_memtype,
666 * this func also keeps identity mapping (if any) in sync with this new prot.
668 static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
674 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
676 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
679 * reserve_pfn_range() doesn't support RAM pages.
684 ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
688 if (flags != want_flags) {
689 if (strict_prot || !is_new_memtype_allowed(want_flags, flags)) {
690 free_memtype(paddr, paddr + size);
691 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
692 " for %Lx-%Lx, got %s\n",
693 current->comm, current->pid,
694 cattr_name(want_flags),
695 (unsigned long long)paddr,
696 (unsigned long long)(paddr + size),
701 * We allow returning different type than the one requested in
704 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
705 (~_PAGE_CACHE_MASK)) |
709 if (kernel_map_sync_memtype(paddr, size, flags) < 0) {
710 free_memtype(paddr, paddr + size);
717 * Internal interface to free a range of physical memory.
718 * Frees non RAM regions only.
720 static void free_pfn_range(u64 paddr, unsigned long size)
724 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
726 free_memtype(paddr, paddr + size);
730 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
731 * copied through copy_page_range().
733 * If the vma has a linear pfn mapping for the entire range, we get the prot
734 * from pte and reserve the entire vma range with single reserve_pfn_range call.
735 * Otherwise, we reserve the entire vma range, my ging through the PTEs page
736 * by page to get physical address and protection.
738 int track_pfn_vma_copy(struct vm_area_struct *vma)
742 resource_size_t paddr;
744 unsigned long vma_start = vma->vm_start;
745 unsigned long vma_end = vma->vm_end;
746 unsigned long vma_size = vma_end - vma_start;
752 if (is_linear_pfn_mapping(vma)) {
754 * reserve the whole chunk covered by vma. We need the
755 * starting address and protection from pte.
757 if (follow_phys(vma, vma_start, 0, &prot, &paddr)) {
761 pgprot = __pgprot(prot);
762 return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
765 /* reserve entire vma page by page, using pfn and prot from pte */
766 for (i = 0; i < vma_size; i += PAGE_SIZE) {
767 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
770 pgprot = __pgprot(prot);
771 retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1);
778 /* Reserve error: Cleanup partial reservation and return error */
779 for (j = 0; j < i; j += PAGE_SIZE) {
780 if (follow_phys(vma, vma_start + j, 0, &prot, &paddr))
783 free_pfn_range(paddr, PAGE_SIZE);
790 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
791 * for physical range indicated by pfn and size.
793 * prot is passed in as a parameter for the new mapping. If the vma has a
794 * linear pfn mapping for the entire range reserve the entire vma range with
795 * single reserve_pfn_range call.
796 * Otherwise, we look t the pfn and size and reserve only the specified range
799 * Note that this function can be called with caller trying to map only a
800 * subrange/page inside the vma.
802 int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
803 unsigned long pfn, unsigned long size)
807 resource_size_t base_paddr;
808 resource_size_t paddr;
809 unsigned long vma_start = vma->vm_start;
810 unsigned long vma_end = vma->vm_end;
811 unsigned long vma_size = vma_end - vma_start;
816 if (is_linear_pfn_mapping(vma)) {
817 /* reserve the whole chunk starting from vm_pgoff */
818 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
819 return reserve_pfn_range(paddr, vma_size, prot, 0);
822 /* reserve page by page using pfn and size */
823 base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
824 for (i = 0; i < size; i += PAGE_SIZE) {
825 paddr = base_paddr + i;
826 retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0);
833 /* Reserve error: Cleanup partial reservation and return error */
834 for (j = 0; j < i; j += PAGE_SIZE) {
835 paddr = base_paddr + j;
836 free_pfn_range(paddr, PAGE_SIZE);
843 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
844 * untrack can be called for a specific region indicated by pfn and size or
845 * can be for the entire vma (in which case size can be zero).
847 void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
851 resource_size_t paddr;
853 unsigned long vma_start = vma->vm_start;
854 unsigned long vma_end = vma->vm_end;
855 unsigned long vma_size = vma_end - vma_start;
860 if (is_linear_pfn_mapping(vma)) {
861 /* free the whole chunk starting from vm_pgoff */
862 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
863 free_pfn_range(paddr, vma_size);
867 if (size != 0 && size != vma_size) {
868 /* free page by page, using pfn and size */
869 paddr = (resource_size_t)pfn << PAGE_SHIFT;
870 for (i = 0; i < size; i += PAGE_SIZE) {
872 free_pfn_range(paddr, PAGE_SIZE);
875 /* free entire vma, page by page, using the pfn from pte */
876 for (i = 0; i < vma_size; i += PAGE_SIZE) {
877 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
880 free_pfn_range(paddr, PAGE_SIZE);
885 pgprot_t pgprot_writecombine(pgprot_t prot)
888 return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC);
890 return pgprot_noncached(prot);
893 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
895 /* get Nth element of the linked list */
896 static struct memtype *memtype_get_idx(loff_t pos)
898 struct memtype *list_node, *print_entry;
901 print_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL);
905 spin_lock(&memtype_lock);
906 list_for_each_entry(list_node, &memtype_list, nd) {
908 *print_entry = *list_node;
909 spin_unlock(&memtype_lock);
914 spin_unlock(&memtype_lock);
920 static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
924 seq_printf(seq, "PAT memtype list:\n");
927 return memtype_get_idx(*pos);
930 static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
933 return memtype_get_idx(*pos);
936 static void memtype_seq_stop(struct seq_file *seq, void *v)
940 static int memtype_seq_show(struct seq_file *seq, void *v)
942 struct memtype *print_entry = (struct memtype *)v;
944 seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
945 print_entry->start, print_entry->end);
951 static struct seq_operations memtype_seq_ops = {
952 .start = memtype_seq_start,
953 .next = memtype_seq_next,
954 .stop = memtype_seq_stop,
955 .show = memtype_seq_show,
958 static int memtype_seq_open(struct inode *inode, struct file *file)
960 return seq_open(file, &memtype_seq_ops);
963 static const struct file_operations memtype_fops = {
964 .open = memtype_seq_open,
967 .release = seq_release,
970 static int __init pat_memtype_list_init(void)
972 debugfs_create_file("pat_memtype_list", S_IRUSR, arch_debugfs_dir,
973 NULL, &memtype_fops);
977 late_initcall(pat_memtype_list_init);
979 #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */