X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=include%2Fasm-ppc%2Fpgtable.h;h=92f30b28b252e1ab38c78d3727d99e2317dcefd5;hb=6f1062330499cee10396bf3fc66a03eb228c5fad;hp=e135b4aba9fc9d60ea32960c3d2b1acb4a490260;hpb=b464fce5edc08a825907e9d48a2d2f1af0393fef;p=safe%2Fjmp%2Flinux-2.6 diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h index e135b4a..92f30b2 100644 --- a/include/asm-ppc/pgtable.h +++ b/include/asm-ppc/pgtable.h @@ -96,7 +96,7 @@ extern unsigned long ioremap_bot, ioremap_base; #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) -#define FIRST_USER_PGD_NR 0 +#define FIRST_USER_ADDRESS 0 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) @@ -202,18 +202,64 @@ extern unsigned long ioremap_bot, ioremap_base; * * Note that these bits preclude future use of a page size * less than 4KB. + * + * + * PPC 440 core has following TLB attribute fields; + * + * TLB1: + * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 + * RPN................................. - - - - - - ERPN....... + * + * TLB2: + * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 + * - - - - - - U0 U1 U2 U3 W I M G E - UX UW UR SX SW SR + * + * There are some constrains and options, to decide mapping software bits + * into TLB entry. + * + * - PRESENT *must* be in the bottom three bits because swap cache + * entries use the top 29 bits for TLB2. + * + * - FILE *must* be in the bottom three bits because swap cache + * entries use the top 29 bits for TLB2. + * + * - CACHE COHERENT bit (M) has no effect on PPC440 core, because it + * doesn't support SMP. So we can use this as software bit, like + * DIRTY. + * + * With the PPC 44x Linux implementation, the 0-11th LSBs of the PTE are used + * for memory protection related functions (see PTE structure in + * include/asm-ppc/mmu.h). The _PAGE_XXX definitions in this file map to the + * above bits. Note that the bit values are CPU specific, not architecture + * specific. + * + * The kernel PTE entry holds an arch-dependent swp_entry structure under + * certain situations. In other words, in such situations some portion of + * the PTE bits are used as a swp_entry. In the PPC implementation, the + * 3-24th LSB are shared with swp_entry, however the 0-2nd three LSB still + * hold protection values. That means the three protection bits are + * reserved for both PTE and SWAP entry at the most significant three + * LSBs. + * + * There are three protection bits available for SWAP entry: + * _PAGE_PRESENT + * _PAGE_FILE + * _PAGE_HASHPTE (if HW has) + * + * So those three bits have to be inside of 0-2nd LSB of PTE. + * */ + #define _PAGE_PRESENT 0x00000001 /* S: PTE valid */ #define _PAGE_RW 0x00000002 /* S: Write permission */ -#define _PAGE_DIRTY 0x00000004 /* S: Page dirty */ +#define _PAGE_FILE 0x00000004 /* S: nonlinear file mapping */ #define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */ #define _PAGE_HWWRITE 0x00000010 /* H: Dirty & RW */ #define _PAGE_HWEXEC 0x00000020 /* H: Execute permission */ #define _PAGE_USER 0x00000040 /* S: User page */ #define _PAGE_ENDIAN 0x00000080 /* H: E bit */ #define _PAGE_GUARDED 0x00000100 /* H: G bit */ -#define _PAGE_COHERENT 0x00000200 /* H: M bit */ -#define _PAGE_FILE 0x00000400 /* S: nonlinear file mapping */ +#define _PAGE_DIRTY 0x00000200 /* S: Page dirty */ #define _PAGE_NO_CACHE 0x00000400 /* H: I bit */ #define _PAGE_WRITETHRU 0x00000800 /* H: W bit */ @@ -225,8 +271,7 @@ extern unsigned long ioremap_bot, ioremap_base; /* ERPN in a PTE never gets cleared, ignore it */ #define _PTE_NONE_MASK 0xffffffff00000000ULL -#elif defined(CONFIG_E500) - +#elif defined(CONFIG_FSL_BOOKE) /* MMU Assist Register 3: @@ -240,28 +285,34 @@ extern unsigned long ioremap_bot, ioremap_base; entries use the top 29 bits. */ -/* Definitions for e500 core */ -#define _PAGE_PRESENT 0x001 /* S: PTE contains a translation */ -#define _PAGE_USER 0x002 /* S: User page (maps to UR) */ -#define _PAGE_FILE 0x002 /* S: when !present: nonlinear file mapping */ -#define _PAGE_ACCESSED 0x004 /* S: Page referenced */ -#define _PAGE_HWWRITE 0x008 /* H: Dirty & RW, set in exception */ -#define _PAGE_RW 0x010 /* S: Write permission */ -#define _PAGE_HWEXEC 0x020 /* H: UX permission */ - -#define _PAGE_ENDIAN 0x040 /* H: E bit */ -#define _PAGE_GUARDED 0x080 /* H: G bit */ -#define _PAGE_COHERENT 0x100 /* H: M bit */ -#define _PAGE_NO_CACHE 0x200 /* H: I bit */ -#define _PAGE_WRITETHRU 0x400 /* H: W bit */ -#define _PAGE_DIRTY 0x800 /* S: Page dirty */ +/* Definitions for FSL Book-E Cores */ +#define _PAGE_PRESENT 0x00001 /* S: PTE contains a translation */ +#define _PAGE_USER 0x00002 /* S: User page (maps to UR) */ +#define _PAGE_FILE 0x00002 /* S: when !present: nonlinear file mapping */ +#define _PAGE_ACCESSED 0x00004 /* S: Page referenced */ +#define _PAGE_HWWRITE 0x00008 /* H: Dirty & RW, set in exception */ +#define _PAGE_RW 0x00010 /* S: Write permission */ +#define _PAGE_HWEXEC 0x00020 /* H: UX permission */ + +#define _PAGE_ENDIAN 0x00040 /* H: E bit */ +#define _PAGE_GUARDED 0x00080 /* H: G bit */ +#define _PAGE_COHERENT 0x00100 /* H: M bit */ +#define _PAGE_NO_CACHE 0x00200 /* H: I bit */ +#define _PAGE_WRITETHRU 0x00400 /* H: W bit */ + +#ifdef CONFIG_PTE_64BIT +#define _PAGE_DIRTY 0x08000 /* S: Page dirty */ + +/* ERPN in a PTE never gets cleared, ignore it */ +#define _PTE_NONE_MASK 0xffffffffffff0000ULL +#else +#define _PAGE_DIRTY 0x00800 /* S: Page dirty */ +#endif #define _PMD_PRESENT 0 #define _PMD_PRESENT_MASK (PAGE_MASK) #define _PMD_BAD (~PAGE_MASK) -#define NUM_TLBCAMS (16) - #elif defined(CONFIG_8xx) /* Definitions for 8xx embedded chips. */ #define _PAGE_PRESENT 0x0001 /* Page is valid */ @@ -433,7 +484,11 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); /* in some case we want to additionaly adjust where the pfn is in the pte to * allow room for more flags */ +#if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT) +#define PFN_SHIFT_OFFSET (PAGE_SHIFT + 8) +#else #define PFN_SHIFT_OFFSET (PAGE_SHIFT) +#endif #define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET) #define pte_page(x) pfn_to_page(pte_pfn(x))