Version:
~ [ 0.6-2.3.46 ] ~
Architecture:
~ [ um ] ~
** Warning: Cannot open xref database.
1 #include "asm/pgtable.h"
2 #include "asm/pgalloc.h"
3 #include "asm/a.out.h"
4 #include "asm/processor.h"
5 #include "linux/sched.h"
6 #include "linux/malloc.h"
7 #include "user_util.h"
8 #include "kern_util.h"
9 #include "kern.h"
10
11 struct mm_struct kernel_maps = EMPTY_MM;
12
13 static void fix_range(struct mm_struct *proc_mm, unsigned long start_addr,
14 unsigned long end_addr)
15 {
16 struct mm_struct *mm;
17 pgd_t *npgd, *opgd;
18 pmd_t *npmd, *opmd;
19 pte_t *npte, *opte, save_npte;
20 unsigned long addr;
21 int r, w, x;
22
23 if(current->thread.extern_pid != getpid())
24 panic("fix_range fixing wrong address space");
25 if(current->thread.real_mm->pgd == NULL){
26 current->thread.real_mm->pgd = (pgd_t *) get_zeroed_page(GFP_KERNEL);
27 if(current->thread.real_mm->pgd == NULL)
28 panic("fix_range couldn't allocate pgd");
29 }
30 mm = proc_mm;
31 for(addr=start_addr;addr<end_addr;){
32 /* These two tests depend on start_vm and end_vm being on pgdir (4M)
33 * boundaries.
34 */
35 if(addr == start_vm) mm = &init_mm;
36 else if(addr == end_vm) mm = proc_mm;
37 npgd = pgd_offset(mm, addr);
38 npmd = pmd_offset(npgd, addr);
39 opgd = pgd_offset(current->thread.real_mm, addr);
40 opmd = pmd_offset(opgd, addr);
41 if(pmd_present(*npmd)){
42 save_npte = *pte_offset(npmd, addr);
43 opte = pte_alloc(opmd, addr);
44 if(opte == NULL) panic("pte_alloc failed in fix_range");
45 npte = pte_offset(npmd, addr);
46 if(pte_write(*npte) != pte_write(save_npte))
47 panic("pte writeability changed in fix_range");
48 if(pte_present(*npte)){
49 r = pte_read(*npte);
50 w = pte_write(*npte);
51 x = pte_exec(*npte);
52 if(mm == &init_mm){
53 if((pte_val(*npte) & ~PAGE_MASK) == pgprot_val(PAGE_KERNEL)){
54 r = 1;
55 w = 1;
56 x = 1;
57 }
58 else if((pte_val(*npte) & ~PAGE_MASK) == pgprot_val(PAGE_KERNEL_RO)){
59 r = 1;
60 w = 0;
61 x = 1;
62 }
63 }
64 if(opte->pte != npte->pte){
65 if(!pte_present(*opte))
66 map(addr, page_address(pte_page(*npte)), PAGE_SIZE, r, w, x);
67 else if(pte_page(*opte) != pte_page(*npte)){
68 unmap(addr, PAGE_SIZE);
69 map(addr, page_address(pte_page(*npte)), PAGE_SIZE, r, w, x);
70 }
71 else if(pte_val(*opte) != pte_val(*npte)){
72 protect(addr, PAGE_SIZE, r, w, x);
73 }
74 else panic("Don't know why ptes are different");
75 }
76 }
77 else if(pte_present(*opte)) unmap(addr, PAGE_SIZE);
78 *opte = *npte;
79 addr += PAGE_SIZE;
80 }
81 else {
82 if(pmd_present(*opmd)){
83 unmap(addr, PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE);
84 pte_free((pte_t *) pmd_page(*opmd));
85 *opgd = *npgd;
86 }
87 addr += PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE;
88 }
89 }
90 }
91
92 void flush_tlb_range(struct mm_struct *mm, unsigned long start,
93 unsigned long end)
94 {
95 if(mm != current->mm) return;
96 fix_range(mm, start, end);
97 }
98
99 void flush_tlb_mm(struct mm_struct *mm)
100 {
101 if((mm != current->mm) || (mm->mmap == NULL)) return;
102 fix_range(mm, 0, STACK_TOP);
103 }
104
105 void flush_tlb_kernel_vm(void)
106 {
107 /* fix_range(NULL, start_vm, end_vm);*/
108 if(current->mm != NULL) fix_range(current->mm, 0, STACK_TOP);
109 else fix_range(NULL, start_vm, end_vm);
110 }
111
112 void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
113 {
114 if(vma->vm_mm != current->mm) return;
115 address &= PAGE_MASK;
116 fix_range(current->mm, address, address + PAGE_SIZE);
117 }
118
119 void flush_tlb_all(void)
120 {
121 fix_range(current->mm, 0, STACK_TOP);
122 }
123
124 static pgprot_t vm_prot(char r, char w, char x, char p)
125 {
126 if((r == '-') && (w == '-') && (x == '-')) return(PAGE_NONE);
127 else if(w == '-') return(PAGE_READONLY);
128 else if(p == 'p') return(PAGE_COPY);
129 else return(PAGE_SHARED);
130 }
131
132 static unsigned short vm_flags(char r, char w, char x, char p)
133 {
134 unsigned short flags;
135
136 flags = 0;
137 if(r == 'r') flags |= VM_READ;
138 if(w == 'w') flags |= VM_WRITE;
139 if(x == 'x') flags |= VM_EXEC;
140 if(p == '-') flags |= VM_SHARED;
141 return(flags);
142 }
143
144 /* text, init_task, data, bss, physical memory in three chunks,
145 virtual memory area, stack */
146 static struct vm_area_struct process_vmas[9];
147 static int num_process_vmas = 0;
148
149 void add_perm_vma(unsigned long start, unsigned long end, char rperm,
150 char wperm, char xperm, char private, unsigned long offset)
151 {
152 struct vm_area_struct *vma;
153
154 if(num_process_vmas == sizeof(process_vmas)/sizeof(process_vmas[0]))
155 panic("Too many process vmas");
156 vma = &process_vmas[num_process_vmas++];
157 *vma = ((struct vm_area_struct) {
158 &kernel_maps, start, end, NULL, vm_prot(rperm, wperm, xperm, private),
159 vm_flags(rperm, wperm, xperm, private), 0, NULL, NULL, NULL, NULL,
160 NULL, offset, NULL, 0
161 });
162 insert_vm_struct(&kernel_maps, vma);
163 }
164
165 pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
166 {
167 return(pgd_offset(mm, address));
168 }
169
170 pmd_t *pmd_offset_proc(pgd_t *pgd, unsigned long address)
171 {
172 return(pmd_offset(pgd, address));
173 }
174
175 pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
176 {
177 return(pte_offset(pmd, address));
178 }
179
This page was automatically generated by the
LXR engine.
Visit the LXR main site for more
information.