Version:
~ [ 0.6-2.3.46 ] ~
Architecture:
~ [ um ] ~
** Warning: Cannot open xref database.
1 #include "linux/sched.h"
2 #include "linux/interrupt.h"
3 #include "linux/mm.h"
4 #include "linux/malloc.h"
5 #include "linux/utsname.h"
6 #include "linux/fs.h"
7 #include "linux/utime.h"
8 #include "linux/smp_lock.h"
9 #include "asm/unistd.h"
10 #include "asm/mman.h"
11 #include "asm/segment.h"
12 #include "asm/stat.h"
13 #include "asm/pgtable.h"
14 #include "asm/pgalloc.h"
15 #include "asm/spinlock.h"
16 #include "user_util.h"
17 #include "kern_util.h"
18 #include "kern.h"
19
20 struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
21
22 struct task_struct *get_task(int pid, int require)
23 {
24 struct task_struct *task, *ret;
25
26 ret = NULL;
27 read_lock(&tasklist_lock);
28 for_each_task(task){
29 if(task->pid == pid){
30 ret = task;
31 break;
32 }
33 }
34 read_unlock(&tasklist_lock);
35 if(require && (ret == NULL)) panic("get_task couldn't find a task\n");
36 return(ret);
37 }
38
39 int external_pid(struct task_struct *task)
40 {
41 if(task == NULL) task = current;
42 return(task->thread.extern_pid);
43 }
44
45 int current_external_pid(void)
46 {
47 return(external_pid(NULL));
48 }
49
50 void free_stack(unsigned long stack)
51 {
52 free_page(stack);
53 }
54
55 void set_extern_pid(int task_pid, int pid)
56 {
57 struct task_struct *task;
58
59 if(task_pid == 0) task = &init_task;
60 else task = get_task(task_pid, 1);
61 task->thread.extern_pid = pid;
62 }
63
64 int set_user_thread(void *t, int on)
65 {
66 struct task_struct *task;
67 int ret, sigs;
68
69 if(t == NULL) task = current;
70 else task = t;
71 if(on == task->thread.tracing) return(on);
72 sigs = set_signals(task, 1);
73 ret = task->thread.tracing;
74 task->thread.want_tracing = on;
75 if(on) trap_pid(getpid());
76 else getpid();
77 set_signals(task, sigs);
78 return(ret);
79 }
80
81 void set_tracing(void *task, int tracing)
82 {
83 ((struct task_struct *) task)->thread.tracing = tracing;
84 ((struct task_struct *) task)->thread.want_tracing = tracing;
85 }
86
87 int is_tracing(void *task)
88 {
89 return(((struct task_struct *) task)->thread.tracing);
90 }
91
92 int get_want_tracing(void *task)
93 {
94 return(((struct task_struct *) task)->thread.want_tracing);
95 }
96
97 extern void schedule_tail(struct task_struct *prev);
98
99 static int new_thread_proc(void *t)
100 {
101 struct task_struct *task;
102 int (*fn)(void *), pid;
103 void *arg;
104
105 task = t;
106 trace_myself();
107 set_sigstack(t, SIGSEGV, kern_segv_handler, 1, 0, SIGVTALRM);
108 pid = getpid();
109 fn = task->thread.request.u.thread.proc;
110 arg = task->thread.request.u.thread.arg;
111 task->thread.extern_pid = pid;
112 stop_pid(pid);
113 set_cmdline("(kernel thread)");
114 if(current->thread.request.u.cswitch.from != NULL)
115 schedule_tail(current->thread.request.u.cswitch.from);
116 return((*fn)(arg));
117 }
118
119 unsigned long alloc_stack(void)
120 {
121 unsigned long page;
122
123 if((page = __get_free_page(GFP_KERNEL)) == 0)
124 panic("Couldn't allocate new stack");
125 stack_protections(page, PAGE_SIZE);
126 return(page);
127 }
128
129 extern int inited_cpus;
130
131 static int start_kernel_thread(struct task_struct *task, int (*fn)(void *),
132 void *arg, unsigned long flags, int cpu)
133 {
134 int extern_pid;
135 unsigned long sp;
136 int clone_flags;
137
138 sp = ((unsigned long) task) + 2 * PAGE_SIZE - sizeof(void *);
139 clone_flags = flags | CLONE_FILES | SIGCHLD;
140 task->thread.request.u.thread.proc = fn;
141 task->thread.request.u.thread.arg = arg;
142 task->thread.extern_pid = -1;
143 extern_pid = clone_and_wait(new_thread_proc, task, (void *) sp, clone_flags);
144 if(task->thread.extern_pid == -1) panic("task didn't set its pid");
145 atomic_inc(&init_mm.mm_count);
146 task->mm = &init_mm;
147 task->active_mm = &init_mm;
148 #ifdef __SMP__
149 if(cpu != -1){
150 cpu_tasks[cpu].pid = extern_pid;
151 cpu_tasks[cpu].task = task;
152 inited_cpus++;
153 init_tasks[cpu] = task;
154 cpu_number_map[cpu] = cpu;
155 task->processor = cpu;
156 cont_pid(extern_pid);
157 }
158 #endif
159 return(extern_pid);
160 }
161
162 int kernel_thread1(int (*fn)(void *), void * arg, unsigned long flags,
163 int cpu, int *extern_pid_out)
164 {
165 struct task_struct *new_task;
166 int pid;
167
168 pid = do_fork(CLONE_VM | flags, 0, NULL);
169 if(pid < 0) panic("do_fork failed in kernel_thread");
170 new_task = get_task(pid, 1);
171 current->thread.request.op = OP_THREAD;
172 current->thread.request.u.thread.proc = fn;
173 current->thread.request.u.thread.arg = arg;
174 current->thread.request.u.thread.flags = flags;
175 current->thread.request.u.thread.new_task = new_task;
176 current->thread.request.u.thread.cpu = cpu;
177 usr1_pid(getpid());
178 if(extern_pid_out != NULL)
179 *extern_pid_out = current->thread.request.u.thread.new_pid;
180 current->thread.request.u.cswitch.from = NULL;
181 return(pid);
182 }
183
184 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
185 {
186 return(kernel_thread1(fn, arg, flags, -1, NULL));
187 }
188
189 void switch_mm(struct mm_struct *prev, struct mm_struct *next,
190 struct task_struct *tsk, unsigned cpu)
191 {
192 if (prev != next)
193 clear_bit(cpu, &prev->cpu_vm_mask);
194 set_bit(cpu, &next->cpu_vm_mask);
195 }
196
197 void *_switch_to(void *prev, void *next)
198 {
199 struct task_struct *from, *to;
200
201 from = prev;
202 to = next;
203 current->thread.request.op = OP_SWITCH;
204 current->thread.request.u.cswitch.to = next;
205 usr1_pid(getpid());
206 flush_tlb_kernel_vm();
207 return(current->thread.request.u.cswitch.from);
208 }
209
210 void do_bh(void)
211 {
212 #ifndef __SMP__
213 if (softirq_state[0].active&softirq_state[0].mask)
214 do_softirq();
215 #else
216 #error Need to update do_bh
217 #endif
218 }
219
220 int ret_from_sys_call(void *t)
221 {
222 struct task_struct *task;
223
224 task = t;
225 if(task == NULL) task = current;
226 do_bh();
227 if(task->sigpending != 0) do_signal(task, NULL, NULL);
228 return(task->need_resched);
229 }
230
231 void release_thread(struct task_struct *task)
232 {
233 pgd_t *pgd;
234 pmd_t *pmd;
235 int i;
236
237 pgd = (pgd_t *) task->thread.real_mm->pgd;
238 for(i=0;i<USER_PGD_PTRS;i++){
239 pmd = pmd_offset(&pgd[i], 0);
240 if(pmd_present(*pmd)) free_page(pmd_page(*pmd));
241 }
242 free_page((unsigned long) task->thread.real_mm->pgd);
243 kfree(task->thread.real_mm);
244 kill_pid(task->thread.extern_pid);
245 }
246
247 int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
248 struct task_struct * p, struct pt_regs * regs)
249 {
250 p->thread = (struct thread_struct) INIT_THREAD;
251 p->thread.kernel_stack = (unsigned long) p;
252 p->thread.tracing = current->thread.forking;
253 p->thread.real_mm = kmalloc(sizeof(*p->thread.real_mm), GFP_KERNEL);
254 *p->thread.real_mm = ((struct mm_struct) EMPTY_MM);
255 if(current->thread.forking){
256 p->thread.tracing = 0;
257 p->thread.want_tracing = 0;
258 current->thread.request.op = OP_FORK;
259 current->thread.request.u.fork.task = p;
260 current->thread.request.u.fork.tramp_stack = alloc_stack();
261 usr1_pid(getpid());
262 }
263 current->need_resched = 1;
264 return(0);
265 }
266
267 void add_input_request(int op, int fd, void (*proc)(int))
268 {
269 if((current->thread.request.op == OP_INPUT) &&
270 (current->thread.request.u.input_request.op != INPUT_NONE))
271 panic("Unfinished input request");
272 current->thread.request.op = OP_INPUT;
273 current->thread.request.u.input_request.op = op;
274 current->thread.request.u.input_request.fd = fd;
275 current->thread.request.u.input_request.proc = proc;
276 usr1_pid(getpid());
277 }
278
279 struct {
280 struct task_struct *from;
281 struct task_struct *to;
282 int processor;
283 } switch_record[1024];
284
285 int switch_index = 0;
286
287 DECLARE_MUTEX(input_sem);
288 void *input_task;
289
290 int do_proc_op(void *t, int proc_id)
291 {
292 struct task_struct *task, *to, *new;
293 int op, new_pid;
294
295 task = t;
296 op = task->thread.request.op;
297 switch(op){
298 case OP_NONE:
299 break;
300 case OP_EXEC:
301 kern_finish_exec(task, task->thread.request.u.exec.ip,
302 task->thread.request.u.exec.sp, task->thread.extern_pid);
303 break;
304 case OP_SWITCH:
305 to = task->thread.request.u.cswitch.to;
306 switch_record[switch_index].from = task;
307 switch_record[switch_index].to = to;
308 switch_record[switch_index++].processor = to->processor;
309 if(switch_index == 1024) switch_index = 0;
310 #ifdef __SMP__
311 cpu_tasks[proc_id].task = to;
312 cpu_tasks[proc_id].pid = to->thread.extern_pid;
313 if(cpu_tasks[0].pid == cpu_tasks[1].pid)
314 panic("Scheduled a process on two processors");
315 #else
316 current = to;
317 #endif
318 if(to->thread.request.op == OP_FORK_FINISH){
319 to->thread.request.u.fork_finish.from = task;
320 continue_fork(to->thread.extern_pid,
321 to->thread.request.u.fork_finish.regs);
322 to->thread.request.op = OP_NONE;
323 set_tracing(to, 1);
324 }
325 else to->thread.request.u.cswitch.from = task;
326 cont_pid(to->thread.extern_pid);
327 break;
328 case OP_THREAD:
329 new_pid = start_kernel_thread(task->thread.request.u.thread.new_task,
330 task->thread.request.u.thread.proc,
331 task->thread.request.u.thread.arg,
332 task->thread.request.u.thread.flags,
333 task->thread.request.u.thread.cpu);
334 task->thread.request.u.thread.new_pid = new_pid;
335 break;
336 case OP_INPUT:
337 task->thread.request.u.input_request.pid = getpid();
338 down(&input_sem);
339 input_task = task;
340 usr1_and_wait(get_main_pid());
341 input_task = NULL;
342 up(&input_sem);
343 break;
344 case OP_FORK:
345 new = task->thread.request.u.fork.task;
346 new_pid = start_fork_tramp(task->thread.request.u.fork.regs,
347 new->thread.kernel_stack,
348 task->thread.request.u.fork.tramp_stack);
349 new->thread.extern_pid = new_pid;
350 new->thread.request.op = OP_FORK_FINISH;
351 new->thread.request.u.fork_finish.stack =
352 task->thread.request.u.fork.tramp_stack;
353 memcpy(new->thread.request.u.fork_finish.regs,
354 task->thread.request.u.fork.regs,
355 sizeof(task->thread.request.u.fork.regs));
356 break;
357 default:
358 panic("Bad op in do_proc_op");
359 break;
360 }
361 task->thread.request.op = OP_NONE;
362 return(op);
363 }
364
365 unsigned long stack_sp(unsigned long page)
366 {
367 return(page + PAGE_SIZE - sizeof(void *));
368 }
369
370 int current_pid(void *t)
371 {
372 struct task_struct *task;
373
374 if(t == NULL) task = current;
375 else task = t;
376 return(task->pid);
377 }
378
379 static void do_idle(void)
380 {
381 while(1){
382 /* endless idle loop with no priority at all */
383 current->priority = 0;
384 current->counter = -100;
385
386 /*
387 * although we are an idle CPU, we do not want to
388 * get into the scheduler unnecessarily.
389 */
390 if (current->need_resched) {
391 schedule();
392 check_pgt_cache();
393 }
394 }
395 }
396
397 static int idle_proc(void *unused)
398 {
399 del_from_runqueue(current);
400 init_idle();
401 #ifdef __SMP__
402 smp_num_cpus++;
403 #endif
404 do_idle();
405 return(0);
406 }
407
408 int cpu_idle(void)
409 {
410 int i, pid;
411
412 if(ncpus > 1){
413 printk("Starting up other processors:\n");
414 for(i=1;i<ncpus;i++){
415 kernel_thread1(idle_proc, NULL, 0, i, &pid);
416 printk("\t#%d - idle thread pid = %d\n", i, pid);
417 }
418 }
419 do_idle();
420 return(0);
421 }
422
423 unsigned long *fork_regs(void *task)
424 {
425 return(((struct task_struct *) task)->thread.request.u.fork.regs);
426 }
427
428 int page_size(void)
429 {
430 return(PAGE_SIZE);
431 }
432
433 int get_input_request(void *t, int *fd_out,
434 void (**proc_out)(int), int *pid_out)
435 {
436 struct task_struct *task;
437 int op;
438
439 task = t;
440 op = task->thread.request.u.input_request.op;
441 *pid_out = -1;
442 if(op == INPUT_NEW_FD){
443 *fd_out = task->thread.request.u.input_request.fd;
444 *proc_out = task->thread.request.u.input_request.proc;
445 *pid_out = task->thread.request.u.input_request.pid;
446 }
447 task->thread.request.op = OP_NONE;
448 task->thread.request.u.input_request.op = INPUT_NONE;
449 return(op);
450 }
451
452 static unsigned long input_mask = 0;
453 static spinlock_t input_mask_lock = SPIN_LOCK_UNLOCKED;
454
455 extern struct tasklet_struct input_tasklet;
456
457 void input_notify(int index)
458 {
459 tasklet_schedule(&input_tasklet);
460 spin_lock(&input_mask_lock);
461 input_mask |= (1 << index);
462 spin_unlock(&input_mask_lock);
463 }
464
465 void input_handler(unsigned long ignored)
466 {
467 int i;
468 unsigned long mask;
469
470 spin_lock(&input_mask_lock);
471 mask = input_mask;
472 input_mask = 0;
473 spin_unlock(&input_mask_lock);
474 for(i=0;i<sizeof(mask) * 8;i++){
475 if(mask & (1 << i)){
476 (*reg_fd[i].proc)(reg_fd[i].fd);
477 }
478 }
479 }
480
481 char *current_comm(void)
482 {
483 return(current->comm);
484 }
485
486 void *current_sigstack(void *t)
487 {
488 struct task_struct *task;
489
490 if(t == NULL) task = current;
491 else task = t;
492 return((void *) task->thread.kernel_stack);
493 }
494
495 unsigned long forced_fault(void)
496 {
497 unsigned long ret;
498
499 ret = current->thread.forced_fault;
500 current->thread.forced_fault = 0;
501 return(ret);
502 }
503
504 void set_forced_fault(void *task, unsigned long addr)
505 {
506 ((struct task_struct *) task)->thread.forced_fault = addr;
507 }
508
509 char *current_cmd(void)
510 {
511 return("(Unknown)");
512 #ifdef notdef
513 pgd_t *pgd;
514 pmd_t *pmd;
515 pte_t *pte;
516 unsigned long arg;
517
518 pgd = pgd_offset(current->mm, current->mm->arg_start);
519 pmd = pmd_offset(pgd, current->mm->arg_start);
520 if(!pmd_present(*pmd)) return("(unknown)");
521 pte = pte_offset(pmd, current->mm->arg_start);
522 if(!pte_present(*pte)) return("(unknown)");
523 arg = (pte_val(*pte) & PAGE_MASK) + (current->mm->arg_start & ~PAGE_MASK);
524 return((char *) arg);
525 #endif
526 }
527
528 void force_sigbus(void)
529 {
530 printk("Killing pid %d because of a lack of memory\n", current->pid);
531 lock_kernel();
532 sigaddset(¤t->signal, SIGBUS);
533 recalc_sigpending(current);
534 current->flags |= PF_SIGNALED;
535 do_exit(SIGBUS | 0x80);
536 }
537
538 void finish_fork(void)
539 {
540 free_page(current->thread.request.u.fork_finish.stack);
541 schedule_tail(current->thread.request.u.fork_finish.from);
542 }
543
544 void *get_current_task(void)
545 {
546 return(current);
547 }
548
This page was automatically generated by the
LXR engine.
Visit the LXR main site for more
information.