Version:
~ [ 0.6-2.3.46 ] ~
Architecture:
~ [ um ] ~
** Warning: Cannot open xref database.
1 #ifndef __UM_PGALLOC_H
2 #define __UM_PGALLOC_H
3
4 #include "linux/mm.h"
5
6 #define pgd_quicklist (current_cpu_data.pgd_quick)
7 #define pmd_quicklist ((unsigned long *)0)
8 #define pte_quicklist (current_cpu_data.pte_quick)
9 #define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
10
11 /*
12 * Allocate and free page tables. The xxx_kernel() versions are
13 * used to allocate a kernel page table - this turns on ASN bits
14 * if any.
15 */
16
17 extern __inline__ pgd_t *get_pgd_slow(void)
18 {
19 pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
20
21 if (ret) {
22 memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
23 memcpy(ret + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
24 }
25 return ret;
26 }
27
28 extern __inline__ pgd_t *get_pgd_fast(void)
29 {
30 unsigned long *ret;
31
32 if ((ret = pgd_quicklist) != NULL) {
33 pgd_quicklist = (unsigned long *)(*ret);
34 ret[0] = 0;
35 pgtable_cache_size--;
36 } else
37 ret = (unsigned long *)get_pgd_slow();
38 return (pgd_t *)ret;
39 }
40
41 extern __inline__ void free_pgd_fast(pgd_t *pgd)
42 {
43 *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
44 pgd_quicklist = (unsigned long *) pgd;
45 pgtable_cache_size++;
46 }
47
48 extern __inline__ void free_pgd_slow(pgd_t *pgd)
49 {
50 free_page((unsigned long)pgd);
51 }
52
53 extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
54 extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long address_preadjusted);
55
56 extern __inline__ pte_t *get_pte_fast(void)
57 {
58 unsigned long *ret;
59
60 if((ret = (unsigned long *)pte_quicklist) != NULL) {
61 pte_quicklist = (unsigned long *)(*ret);
62 ret[0] = ret[1];
63 pgtable_cache_size--;
64 }
65 return (pte_t *)ret;
66 }
67
68 extern __inline__ void free_pte_fast(pte_t *pte)
69 {
70 *(unsigned long *)pte = (unsigned long) pte_quicklist;
71 pte_quicklist = (unsigned long *) pte;
72 pgtable_cache_size++;
73 }
74
75 extern __inline__ void free_pte_slow(pte_t *pte)
76 {
77 free_page((unsigned long)pte);
78 }
79
80 /* We don't use pmd cache, so these are dummy routines */
81 extern __inline__ pmd_t *get_pmd_fast(void)
82 {
83 return (pmd_t *)0;
84 }
85
86 extern __inline__ void free_pmd_fast(pmd_t *pmd)
87 {
88 }
89
90 extern __inline__ void free_pmd_slow(pmd_t *pmd)
91 {
92 }
93
94 extern void __bad_pte(pmd_t *pmd);
95 extern void __bad_pte_kernel(pmd_t *pmd);
96
97 #define pte_free_kernel(pte) free_pte_slow(pte)
98 #define pte_free(pte) free_pte_slow(pte)
99 #define pgd_free(pgd) free_pgd_slow(pgd)
100 #define pgd_alloc() get_pgd_fast()
101
102 extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
103 {
104 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
105 if (pmd_none(*pmd)) {
106 pte_t * page = (pte_t *) get_pte_fast();
107
108 if (!page)
109 return get_pte_kernel_slow(pmd, address);
110 pmd_val(*pmd) = _KERNPG_TABLE + (unsigned long) (page);
111 return page + address;
112 }
113 if (pmd_bad(*pmd)) {
114 __bad_pte_kernel(pmd);
115 return NULL;
116 }
117 return (pte_t *) pmd_page(*pmd) + address;
118 }
119
120 extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
121 {
122 address = (address >> (PAGE_SHIFT-2)) & 4*(PTRS_PER_PTE - 1);
123
124 if (pmd_none(*pmd))
125 goto getnew;
126 if (pmd_bad(*pmd))
127 goto fix;
128 return (pte_t *) (pmd_page(*pmd) + address);
129 getnew:
130 {
131 unsigned long page = (unsigned long) get_pte_fast();
132
133 if (!page)
134 return get_pte_slow(pmd, address);
135 pmd_val(*pmd) = _PAGE_TABLE + (unsigned long) (page);
136 return (pte_t *) (page + address);
137 }
138 fix:
139 __bad_pte(pmd);
140 return NULL;
141 }
142
143 /*
144 * allocating and freeing a pmd is trivial: the 1-entry pmd is
145 * inside the pgd, so has no extra memory associated with it.
146 */
147 extern inline void pmd_free(pmd_t * pmd)
148 {
149 }
150
151 extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address)
152 {
153 return (pmd_t *) pgd;
154 }
155
156 #define pmd_free_kernel pmd_free
157 #define pmd_alloc_kernel pmd_alloc
158
159 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
160
161 #define SET_PAGE_DIR(task, pgdir) do ; while(0)
162
163 extern inline void set_pgdir(unsigned long address, pgd_t entry)
164 {
165 }
166
167 extern void flush_tlb_all(void);
168 extern void flush_tlb_mm(struct mm_struct *mm);
169 extern void flush_tlb_kernel_vm(void);
170 extern void flush_tlb_range(struct mm_struct *mm, unsigned long start,
171 unsigned long end);
172 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
173
174 extern inline void flush_tlb_pgtables(struct mm_struct *mm,
175 unsigned long start, unsigned long end)
176 {
177 }
178
179 #endif
180
This page was automatically generated by the
LXR engine.
Visit the LXR main site for more
information.