Version:
~ [ 0.6-2.3.46 ] ~
Architecture:
~ [ um ] ~
** Warning: Cannot open xref database.
1 #ifndef __UM_PGTABLE_H
2 #define __UM_PGTABLE_H
3
4 #include "asm/processor.h"
5 #include "asm/page.h"
6 #include "asm/fixmap.h"
7
8 extern pgd_t swapper_pg_dir[1024];
9
10 #define flush_cache_all() do ; while (0)
11 #define flush_cache_mm(mm) do ; while (0)
12 #define flush_cache_range(mm, start, end) do ; while (0)
13 #define flush_cache_page(vma, vmaddr) do ; while (0)
14 #define flush_page_to_ram(page) do ; while (0)
15 #define flush_icache_range(from, to) do ; while (0)
16 #define flush_icache_page(vma,pg) do ; while (0)
17
18 extern void set_pte(pte_t *pteptr, pte_t pteval);
19
20 extern pte_t * pte_alloc(pmd_t * pmd, unsigned long address);
21 extern void pte_free(pte_t *pte);
22
23 extern pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address);
24
25 extern void pgd_free(pgd_t *pgd);
26
27 extern int do_check_pgt_cache(int, int);
28
29 /* zero page used for uninitialized stuff */
30 extern unsigned long *empty_zero_page;
31
32 /* PMD_SHIFT determines the size of the area a second-level page table can map */
33 #define PMD_SHIFT 22
34 #define PMD_SIZE (1UL << PMD_SHIFT)
35 #define PMD_MASK (~(PMD_SIZE-1))
36
37 /* PGDIR_SHIFT determines what a third-level page table entry can map */
38 #define PGDIR_SHIFT 22
39 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
40 #define PGDIR_MASK (~(PGDIR_SIZE-1))
41
42 /*
43 * entries per page directory level: the i386 is two-level, so
44 * we don't really have any PMD directory physically.
45 */
46 #define PTRS_PER_PTE 1024
47 #define PTRS_PER_PMD 1
48 #define PTRS_PER_PGD 1024
49 #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
50
51 #define pte_ERROR(e) \
52 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
53 #define pmd_ERROR(e) \
54 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
55 #define pgd_ERROR(e) \
56 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
57
58 /*
59 * pgd entries used up by user/kernel:
60 */
61
62 #define USER_PGD_PTRS (TASK_SIZE >> PGDIR_SHIFT)
63 #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
64
65 #ifndef __ASSEMBLY__
66 /* Just any arbitrary offset to the start of the vmalloc VM area: the
67 * current 8MB value just means that there will be a 8MB "hole" after the
68 * physical memory until the kernel virtual memory starts. That means that
69 * any out-of-bounds memory accesses will hopefully be caught.
70 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
71 * area for the same reason. ;)
72 */
73 #define VMALLOC_OFFSET (__va_space)
74 #define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
75 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
76 #define VMALLOC_END (FIXADDR_START)
77
78 /*
79 * The 4MB page is guessing.. Detailed in the infamous "Chapter H"
80 * of the Pentium details, but assuming intel did the straightforward
81 * thing, this bit set in the page directory entry just means that
82 * the page directory entry points directly to a 4MB-aligned block of
83 * memory.
84 */
85 #define _PAGE_PRESENT 0x001
86 #define _PAGE_RW 0x002
87 #define _PAGE_USER 0x004
88 #define _PAGE_PWT 0x008
89 #define _PAGE_PCD 0x010
90 #define _PAGE_ACCESSED 0x020
91 #define _PAGE_DIRTY 0x040
92 #define _PAGE_4M 0x080 /* 4 MB page, Pentium+, if present.. */
93 #define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */
94
95 #define _PAGE_PROTNONE 0x080 /* If not present */
96
97 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
98 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
99 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
100
101 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
102 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
103 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
104 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
105 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
106 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED)
107
108 /*
109 * The i386 can't do page protection for execute, and considers that the same are read.
110 * Also, write permissions imply read permissions. This is the closest we can get..
111 */
112 #define __P000 PAGE_NONE
113 #define __P001 PAGE_READONLY
114 #define __P010 PAGE_COPY
115 #define __P011 PAGE_COPY
116 #define __P100 PAGE_READONLY
117 #define __P101 PAGE_READONLY
118 #define __P110 PAGE_COPY
119 #define __P111 PAGE_COPY
120
121 #define __S000 PAGE_NONE
122 #define __S001 PAGE_READONLY
123 #define __S010 PAGE_SHARED
124 #define __S011 PAGE_SHARED
125 #define __S100 PAGE_READONLY
126 #define __S101 PAGE_READONLY
127 #define __S110 PAGE_SHARED
128 #define __S111 PAGE_SHARED
129
130 /*
131 * Define this if things work differently on an i386 and an i486:
132 * it will (on an i486) warn about kernel memory accesses that are
133 * done without a 'verify_area(VERIFY_WRITE,..)'
134 */
135 #undef TEST_VERIFY_AREA
136
137 /* page table for 0-4MB for everybody */
138 extern unsigned long pg0[1024];
139
140 /*
141 * BAD_PAGETABLE is used when we need a bogus page-table, while
142 * BAD_PAGE is used for a bogus page.
143 *
144 * ZERO_PAGE is a global shared page that is always zero: used
145 * for zero-mapped memory areas etc..
146 */
147 extern pte_t __bad_page(void);
148 extern pte_t * __bad_pagetable(void);
149
150 #define BAD_PAGETABLE __bad_pagetable()
151 #define BAD_PAGE __bad_page()
152 #define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
153
154 /* number of bits that fit into a memory pointer */
155 #define BITS_PER_PTR (8*sizeof(unsigned long))
156
157 /* to align the pointer to a pointer address */
158 #define PTR_MASK (~(sizeof(void*)-1))
159
160 /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
161 /* 64-bit machines, beware! SRB. */
162 #define SIZEOF_PTR_LOG2 2
163
164 /* to find an entry in a page-table */
165 #define PAGE_PTR(address) \
166 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
167
168 #define pte_none(x) (!pte_val(x))
169 #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
170 #define pte_clear(xp) do { pte_val(*(xp)) = 0; } while (0)
171 #define pte_pagenr(x) ((unsigned long)((__pa(pte_val(x)) >> PAGE_SHIFT)))
172
173 #define pmd_none(x) (!pmd_val(x))
174 #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
175 #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
176 #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
177
178 /*
179 * The "pgd_xxx()" functions here are trivial for a folded two-level
180 * setup: the pgd is never bad, and a pmd always exists (as it's folded
181 * into the pgd entry)
182 */
183 extern inline int pgd_none(pgd_t pgd) { return 0; }
184 extern inline int pgd_bad(pgd_t pgd) { return 0; }
185 extern inline int pgd_present(pgd_t pgd) { return 1; }
186 extern inline void pgd_clear(pgd_t * pgdp) { }
187
188
189 /*
190 * Permanent address of a page. Obviously must never be
191 * called on a highmem page.
192 */
193 #define page_address(page) ({ if (!(page)->virtual) BUG(); (page)->virtual; })
194 #define __page_address(page) ({ PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT); })
195 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
196 #define pte_page(x) (mem_map+pte_pagenr(x))
197
198 /*
199 * The following only work if pte_present() is true.
200 * Undefined behaviour if not..
201 */
202 extern inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
203 extern inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
204 extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
205 extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
206 extern inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
207
208 extern inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_USER; return pte; }
209 extern inline pte_t pte_exprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_USER; return pte; }
210 extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
211 extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
212 extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_RW; return pte; }
213 extern inline pte_t pte_mkread(pte_t pte) { pte_val(pte) |= _PAGE_USER; return pte; }
214 extern inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) |= _PAGE_USER; return pte; }
215 extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
216 extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
217 extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_RW; return pte; }
218
219 /*
220 * Conversion functions: convert a page and protection to a page entry,
221 * and a page entry and page directory to the page they refer to.
222 */
223
224 #define mk_pte(page, pgprot) \
225 ({ \
226 pte_t __pte; \
227 \
228 pte_val(__pte) = ((unsigned long) __va((page-mem_map)*(unsigned long)PAGE_SIZE + pgprot_val(pgprot))); \
229 __pte; \
230 })
231
232 /* This takes a physical page address that is used by the remapping functions */
233 #define mk_pte_phys(physpage, pgprot) \
234 ({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; })
235
236 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
237 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
238
239 #define pmd_page(pmd) \
240 (pmd_val(pmd) & PAGE_MASK)
241
242 /* to find an entry in a page-table-directory. */
243 #define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
244
245 /* to find an entry in a page-table-directory */
246 #define pgd_offset(mm, address) \
247 ((mm)->pgd + ((address) >> PGDIR_SHIFT))
248
249 /* to find an entry in a kernel page-table-directory */
250 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
251
252 /* Find an entry in the second-level page table.. */
253 extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
254 {
255 return (pmd_t *) dir;
256 }
257
258 /* Find an entry in the third-level page table.. */
259 #define pte_offset(pmd, address) \
260 ((pte_t *) (pmd_page(*pmd) + ((address>>10) & ((PTRS_PER_PTE-1)<<2))))
261
262 #define update_mmu_cache(vma,address,pte) do ; while (0)
263
264 /* Encode and de-code a swap entry */
265 #define SWP_TYPE(x) (((x).val >> 1) & 0x3f)
266 #define SWP_OFFSET(x) ((x).val >> 8)
267 #define SWP_ENTRY(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
268 #define pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
269 #define swp_entry_to_pte(x) ((pte_t) { (x).val })
270
271 #define PageSkip(x) (0)
272
273 #endif
274
275 #endif
276
This page was automatically generated by the
LXR engine.
Visit the LXR main site for more
information.