diff -Naur host/arch/i386/kernel/ldt.c host-ptrace/arch/i386/kernel/ldt.c --- host/arch/i386/kernel/ldt.c Fri Oct 26 00:01:41 2001 +++ host-ptrace/arch/i386/kernel/ldt.c Sun Nov 3 18:37:48 2002 @@ -24,11 +24,12 @@ * assured by user-space anyway. Writes are atomic, to protect * the security checks done on new descriptors. */ -static int read_ldt(void * ptr, unsigned long bytecount) +static int read_ldt(struct task_struct *task, void * ptr, + unsigned long bytecount) { int err; unsigned long size; - struct mm_struct * mm = current->mm; + struct mm_struct * mm = task->mm; err = 0; if (!mm->context.segments) @@ -64,9 +65,10 @@ return err; } -static int write_ldt(void * ptr, unsigned long bytecount, int oldmode) +static int write_ldt(struct task_struct *task, void * ptr, + unsigned long bytecount, int oldmode) { - struct mm_struct * mm = current->mm; + struct mm_struct * mm = task->mm; __u32 entry_1, entry_2, *lp; int error; struct modify_ldt_ldt_s ldt_info; @@ -148,23 +150,29 @@ return error; } -asmlinkage int sys_modify_ldt(int func, void *ptr, unsigned long bytecount) +int modify_ldt(struct task_struct *task, int func, void *ptr, + unsigned long bytecount) { int ret = -ENOSYS; switch (func) { case 0: - ret = read_ldt(ptr, bytecount); + ret = read_ldt(task, ptr, bytecount); break; case 1: - ret = write_ldt(ptr, bytecount, 1); + ret = write_ldt(task, ptr, bytecount, 1); break; case 2: ret = read_default_ldt(ptr, bytecount); break; case 0x11: - ret = write_ldt(ptr, bytecount, 0); + ret = write_ldt(task, ptr, bytecount, 0); break; } return ret; +} + +asmlinkage int sys_modify_ldt(int func, void *ptr, unsigned long bytecount) +{ + return(modify_ldt(current, func, ptr, bytecount)); } diff -Naur host/arch/i386/kernel/ptrace.c host-ptrace/arch/i386/kernel/ptrace.c --- host/arch/i386/kernel/ptrace.c Fri Aug 9 15:57:14 2002 +++ host-ptrace/arch/i386/kernel/ptrace.c Sun Nov 3 17:44:55 2002 @@ -147,6 +147,14 @@ put_stack_long(child, EFL_OFFSET, tmp); } +extern long do_mmap2(struct task_struct *task, + unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff); + +extern int modify_ldt(struct task_struct *task, int func, void *ptr, + unsigned long bytecount); + asmlinkage int sys_ptrace(long request, long pid, long addr, long data) { struct task_struct *child; @@ -416,6 +424,112 @@ else child->ptrace &= ~PT_TRACESYSGOOD; ret = 0; + break; + } + + case PTRACE_FAULTINFO: { + struct ptrace_faultinfo fault; + + fault = ((struct ptrace_faultinfo) + { .is_write = child->thread.error_code, + .addr = child->thread.cr2 }); + ret = copy_to_user((unsigned long *) data, &fault, + sizeof(fault)); + if(ret) + break; + break; + } + case PTRACE_SIGPENDING: + ret = copy_to_user((unsigned long *) data, + &child->pending.signal, + sizeof(child->pending.signal)); + break; + + case PTRACE_MMAP: { + struct ptrace_mmap map; + + if(copy_from_user(&map, (unsigned long *) data, sizeof(map))){ + ret = -EIO; + break; + } + + ret = do_mmap2(child, map.addr, map.len, map.prot, + map.flags, map.fd, map.offset >> PAGE_SHIFT); + if((ret & ~PAGE_MASK) == 0) + ret = 0; + break; + } + + case PTRACE_MUNMAP: { + struct ptrace_munmap unmap; + + if(copy_from_user(&unmap, (unsigned long *) data, + sizeof(unmap))){ + ret = -EIO; + break; + } + + down_write(&child->mm->mmap_sem); + ret = do_munmap(child->mm, unmap.addr, unmap.len); + up_write(&child->mm->mmap_sem); + break; + } + + case PTRACE_LDT: { + struct ptrace_ldt ldt; + + if(copy_from_user(&ldt, (unsigned long *) data, + sizeof(ldt))){ + ret = -EIO; + break; + } + ret = modify_ldt(child, ldt.func, ldt.ptr, ldt.bytecount); + break; + } + + case PTRACE_JOIN_MM: { + struct task_struct *sib; + struct mm_struct *mm = child->mm; + + /* Locking - we grab the tasklist_lock to hold the task in + * place. Once we have the task, we need to task_lock() since + * that protects task->mm. That should hold the mm in place. + * This can race with exec or exit. With exec, we will get + * either the old mm which the process is abandoning or the + * new one which it has just created, depending on who wins + * the task_lock race. With exit, we will get a NULL mm if it + * beats us. In that case, we return -EINVAL. + */ + ret = -EINVAL; + read_lock(&tasklist_lock); + sib = find_task_by_pid(data); + if(sib == NULL) + goto out_unlock_tasklist; + + if(!(sib->ptrace & PT_PTRACED) || + (sib->p_pptr != child->p_pptr)) + goto out_unlock_tasklist; + + task_lock(sib); + if(sib->mm == NULL) + goto out_unlock_task; + + atomic_inc(&sib->mm->mm_users); + task_unlock(sib); + + spin_unlock(&tasklist_lock); + + child->mm = sib->mm; + child->active_mm = child->mm; + mmput(mm); + + ret = 0; + break; + + out_unlock_task: + task_unlock(sib); + out_unlock_tasklist: + spin_unlock(&tasklist_lock); break; } diff -Naur host/arch/i386/kernel/sys_i386.c host-ptrace/arch/i386/kernel/sys_i386.c --- host/arch/i386/kernel/sys_i386.c Mon Mar 19 15:35:09 2001 +++ host-ptrace/arch/i386/kernel/sys_i386.c Mon Oct 28 15:41:46 2002 @@ -40,7 +40,7 @@ } /* common code for old and new mmaps */ -static inline long do_mmap2( +long do_mmap2(struct task_struct *task, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff) @@ -56,7 +56,7 @@ } down_write(¤t->mm->mmap_sem); - error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); + error = do_mmap_pgoff(task, file, addr, len, prot, flags, pgoff); up_write(¤t->mm->mmap_sem); if (file) @@ -69,7 +69,7 @@ unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff) { - return do_mmap2(addr, len, prot, flags, fd, pgoff); + return do_mmap2(current, addr, len, prot, flags, fd, pgoff); } /* @@ -100,7 +100,7 @@ if (a.offset & ~PAGE_MASK) goto out; - err = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); + err = do_mmap2(current, a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); out: return err; } diff -Naur host/include/asm-i386/ptrace.h host-ptrace/include/asm-i386/ptrace.h --- host/include/asm-i386/ptrace.h Sun Sep 23 19:20:51 2001 +++ host-ptrace/include/asm-i386/ptrace.h Sun Nov 3 18:37:32 2002 @@ -51,6 +51,45 @@ #define PTRACE_SETOPTIONS 21 +struct ptrace_faultinfo { + int is_write; + unsigned long addr; +}; + +struct ptrace_mmap { + unsigned long addr; + unsigned long len; + unsigned long prot; + unsigned long flags; + unsigned long fd; + unsigned long offset; +}; + +struct ptrace_munmap { + unsigned long addr; + unsigned long len; +}; + +struct ptrace_mprotect { + unsigned long addr; + unsigned long len; + unsigned long prot; +}; + +struct ptrace_ldt { + int func; + void *ptr; + unsigned long bytecount; +}; + +#define PTRACE_FAULTINFO 42 +#define PTRACE_SIGPENDING 43 +#define PTRACE_MMAP 44 +#define PTRACE_MUNMAP 45 +#define PTRACE_MPROTECT 46 +#define PTRACE_LDT 47 +#define PTRACE_JOIN_MM 48 + /* options set using PTRACE_SETOPTIONS */ #define PTRACE_O_TRACESYSGOOD 0x00000001 diff -Naur host/include/linux/mm.h host-ptrace/include/linux/mm.h --- host/include/linux/mm.h Fri Aug 30 15:03:44 2002 +++ host-ptrace/include/linux/mm.h Tue Nov 5 17:34:50 2002 @@ -539,9 +539,10 @@ extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); -extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, - unsigned long len, unsigned long prot, - unsigned long flag, unsigned long pgoff); +extern unsigned long do_mmap_pgoff(struct task_struct *task, + struct file *file, unsigned long addr, + unsigned long len, unsigned long prot, + unsigned long flag, unsigned long pgoff); static inline unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, @@ -551,7 +552,7 @@ if ((offset + PAGE_ALIGN(len)) < offset) goto out; if (!(offset & ~PAGE_MASK)) - ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); + ret = do_mmap_pgoff(current, file, addr, len, prot, flag, offset >> PAGE_SHIFT); out: return ret; } diff -Naur host/mm/mmap.c host-ptrace/mm/mmap.c --- host/mm/mmap.c Fri Aug 9 15:57:31 2002 +++ host-ptrace/mm/mmap.c Mon Oct 28 14:51:45 2002 @@ -390,10 +390,12 @@ return 0; } -unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, unsigned long pgoff) +unsigned long do_mmap_pgoff(struct task_struct *task, struct file * file, + unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long pgoff) { - struct mm_struct * mm = current->mm; + struct mm_struct * mm = task->mm; struct vm_area_struct * vma, * prev; unsigned int vm_flags; int correct_wcount = 0; @@ -434,7 +436,7 @@ if (vm_flags & VM_LOCKED) { unsigned long locked = mm->locked_vm << PAGE_SHIFT; locked += len; - if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur) + if (locked > task->rlim[RLIMIT_MEMLOCK].rlim_cur) return -EAGAIN; } @@ -489,7 +491,7 @@ /* Check against address space limit. */ if ((mm->total_vm << PAGE_SHIFT) + len - > current->rlim[RLIMIT_AS].rlim_cur) + > task->rlim[RLIMIT_AS].rlim_cur) return -ENOMEM; /* Private writable mapping? Check memory availability.. */