1. Process Management
Roy Lee, 20 June 2005
NCTU Computer Operating System Lab
1
2. Process State
Newly Created.
Runnable & Running
Expired
Interrupted
Resume
Terminated
Robert Love, “Linux Kernel Development,” 2nd Edition
2
3. Process Creation – fork()
fork() exec()
Copy the whole address space Discard the current address space
and the page table and load another program
A A A
A W
Parent Parent Child Parent Child
B X
C Y
B B B
...
...
...
...
...
C C C
D Z
D D D
...
...
...
3
4. Process Creation – vfork()
vfork() exec()
Copy the whole address space Discard the current address space
and the page table and load another program
A A A
W
Parent Parent Parent Child
Child X
Y
B B B
...
...
...
...
C C C
Z
D D D
...
...
...
4
5. Process Creation – Copy-on-Write
fork() copy-on-write
Only copy the page table Delay or altogether prevent
copying of data
A A A
B’
Parent Parent Child Parent Child
B B B
...
...
...
...
...
C C C
D D D
...
...
...
5
6. Process Creation – Copy-on-Write
fork() exec()
Only copy the page table Delay or altogether prevent
copying of data
A A A
W
Parent Parent Child Parent Child
X
Y
B B B
...
...
...
...
...
C C C
Z
D D D
...
...
...
6
8. Process Creation - Threads
Threads in Linux
To linux, threads are just processes that share more certain
resources.
Clone() - The heart of the Linux implementation of threads
Threads are created like normal tasks, except that the clone() syscall
is passed flags indicating to specific resources to be shared
clone(CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, 0);
Actully both fork() and vfork() are also implemented via the clone()
syscall
clone(SIGCHLD, 0);
clone(CLONE_VFORK | CLONE_VM | SIGCHLD, 0);
8
9. Process Creation Flow
User space
sys_fork() sys_vfork() sys_clone()
Kernel space
[kernel/process.c]
[kernel/fork.c]
do_fork() [kernel/sched.c]
alloc_pidmap()
duplicate the task_struct, initialize it
copy_process()
and setup according to the specified clone_flags
success?
yes
wake_up_new_task() put the child into runqueue
no
free_pidmap() vfork? wait_for_completion()
yes
when the child terminates,
no it wakes up the parent sleeping in the wait queue
return pid
9
10. Process State
preempted
schedule
ready running
fork exit
initial asleep zombie
10
11. Process State
interrupt
preempted
syscall,
exception
schedule kernel user
ready
running running
return
fork exit
initial asleep zombie
11
12. Execution Mode and Context
User mode
application
not allowed
(user) code
Process System
context context
system calls, interrupts,
exceptions system tasks
Kernel mode
URESH VAHALA, “UNIX INTERNALS – THE NEW FRONTIERS”
12
13. Execution Mode and Context
User mode A
W
application
not allowed X
(user) code
Process
context System Y
context
system calls, interrupts, B
exceptions system tasks
C
Kernel mode Z
User D
Space
…
…
Kernel
Space
...
...
P0 P1
URESH VAHALA, “UNIX INTERNALS – THE NEW FRONTIERS”
13
14. thread_info
struct thread_info {
struct task_struct *task;
struct exec_domain *exec_domain;
__u32 flags;
__u32 status;
__u32 cpu;
int preempt_count;
mm_segment_t addr_limit;
struct restart_block restart_block;
};
Daniel P. Bovet, Marco Cesati, “Understanding the Linux Kernel,” 3rd Edition
14
19. copy_process() [kernel/fork.c]
int retval;
struct task_struct *p = NULL;
if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
return ERR_PTR(-EINVAL);
/*
* Thread groups must share signals as well, and detached threads
* can only be started up within the thread group.
*/
if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
return ERR_PTR(-EINVAL);
/*
* Shared signal handlers imply shared VM. By way of the above,
* thread groups also imply shared VM. Blocking this case allows
* for various simplifications in other code.
*/
if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
return ERR_PTR(-EINVAL);
19
20. copy_process()
retval = security_task_create(clone_flags);
if (retval)
goto fork_out;
retval = -ENOMEM;
p = dup_task_struct(current);
if (!p)
goto fork_out;
retval = -EAGAIN;
if (atomic_read(&p->user->processes) >=
p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
p->user != &root_user)
goto bad_fork_free;
}
atomic_inc(&p->user->__count);
atomic_inc(&p->user->processes);
get_group_info(p->group_info);
20
21. dup_task_struct()
static struct task_struct *dup_task_struct(struct task_struct *orig)
{
struct task_struct *tsk; #define unlazy_fpu(tsk) do {
struct thread_info *ti; if ((tsk)->thread_info->status & TS_USEDFPU)
save_init_fpu(tsk);
prepare_to_copy(orig); } while (0)
tsk = alloc_task_struct();
if (!tsk) 2
return NULL;
__get_free_pages(GFP_KERNEL,THREAD_ORDER)
ti = alloc_thread_info(tsk);
if (!ti) {
free_task_struct(tsk);
return NULL;
}
*ti = *orig->thread_info;
*tsk = *orig;
tsk->thread_info = ti;
ti->task = tsk;
atomic_set(&tsk->usage,2);
return tsk;
}
21
Daniel P. Bovet, Marco Cesati, “Understanding the Linux Kernel,” 3rd Edition
22. copy_process()
if (nr_threads >= max_threads)
goto bad_fork_cleanup_count;
if (!try_module_get(p->thread_info->exec_domain->module))
goto bad_fork_cleanup_count;
if (p->binfmt && !try_module_get(p->binfmt->module))
goto bad_fork_cleanup_put_domain;
p->did_exec = 0;
copy_flags(clone_flags, p);
p->pid = pid;
retval = -EFAULT;
if (clone_flags & CLONE_PARENT_SETTID)
if (put_user(p->pid, parent_tidptr))
goto bad_fork_cleanup;
...
p->tgid = p->pid;
if (clone_flags & CLONE_THREAD)
p->tgid = current->tgid;
22
23. PID v.s. TGID
1.Every process has an unique pid.
2.Each process in the same thread group has the same tgid.
3.The tgid is the pid of the oldest process in that group
do_fork(){
... pid:1002
copy_process(){ tgid:1002
...
p->pid = pid;
... fork()
p->tgid = p->pid;
if (clone_flags & CLONE_THREAD)
pid:1003 clone() pid:1005
p->tgid = current->tgid;
tgid:1003 tgid:1003
}
}
pid:1007 clone()
tgid:1004 fork()
asmlinkage long sys_getpid(void) clone() pid:1006
pid:1004 tgid:1003
{
tgid:1004
return current->tgid;
}
23
24. copy_process()
if ((retval = security_task_alloc(p)))
goto bad_fork_cleanup_policy;
if ((retval = audit_alloc(p)))
goto bad_fork_cleanup_security;
/* copy all the process information */
if ((retval = copy_semundo(clone_flags, p)))
goto bad_fork_cleanup_audit;
if ((retval = copy_files(clone_flags, p)))
goto bad_fork_cleanup_semundo; bad_fork_cleanup_namespace:
exit_namespace(p);
if ((retval = copy_fs(clone_flags, p))) bad_fork_cleanup_keys:
goto bad_fork_cleanup_files; exit_keys(p);
bad_fork_cleanup_mm:
if ((retval = copy_sighand(clone_flags, p))) if (p->mm)
goto bad_fork_cleanup_fs; mmput(p->mm);
bad_fork_cleanup_signal:
if ((retval = copy_signal(clone_flags, p))) exit_signal(p);
goto bad_fork_cleanup_sighand; bad_fork_cleanup_sighand:
exit_sighand(p);
if ((retval = copy_mm(clone_flags, p))) bad_fork_cleanup_fs:
goto bad_fork_cleanup_signal; exit_fs(p); /* blocking */
bad_fork_cleanup_files:
if ((retval = copy_keys(clone_flags, p))) exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
goto bad_fork_cleanup_mm; exit_sem(p);
if ((retval = copy_namespace(clone_flags, p)))
bad_fork_cleanup_audit:
audit_free(p);
goto bad_fork_cleanup_keys; bad_fork_cleanup_security:
security_task_free(p);
bad_fork_cleanup_policy:
24
25. copy_process()
if ((retval = security_task_alloc(p)))
bad_fork_cleanup_namespace:
goto bad_fork_cleanup_policy;
if ((retval = audit_alloc(p)))
goto bad_fork_cleanup_security; exit_namespace(p);
bad_fork_cleanup_keys:
/* copy all the process information */
if ((retval = copy_semundo(clone_flags, p)))
goto bad_fork_cleanup_audit; exit_keys(p);
bad_fork_cleanup_mm:
if ((retval = copy_files(clone_flags, p)))
goto bad_fork_cleanup_semundo;
if (p->mm)
if ((retval = copy_fs(clone_flags, p)))
goto bad_fork_cleanup_files;
if ((retval = copy_sighand(clone_flags, p)))
mmput(p->mm);
goto bad_fork_cleanup_fs; bad_fork_cleanup_signal:
if ((retval = copy_signal(clone_flags, p)))
goto bad_fork_cleanup_sighand;
exit_signal(p);
bad_fork_cleanup_sighand:
if ((retval = copy_mm(clone_flags, p)))
goto bad_fork_cleanup_signal;
exit_sighand(p);
if ((retval = copy_keys(clone_flags, p)))
goto bad_fork_cleanup_mm; bad_fork_cleanup_fs:
if ((retval = copy_namespace(clone_flags, p)))
goto bad_fork_cleanup_keys; exit_fs(p); /* blocking */
bad_fork_cleanup_files:
exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
exit_sem(p);
bad_fork_cleanup_audit:
audit_free(p);
bad_fork_cleanup_security:
security_task_free(p);
bad_fork_cleanup_policy:
25
26. context_swtich()
[kernel/sched.c]
1. static inline
2. task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next)
3. {
4. struct mm_struct *mm = next->mm;
5. struct mm_struct *oldmm = prev->active_mm;
6. if (unlikely(!mm)) {
7. next->active_mm = oldmm;
8. atomic_inc(&oldmm->mm_count);
9. enter_lazy_tlb(oldmm, next);
10. } else
11. switch_mm(oldmm, mm, next);
12. if (unlikely(!prev->mm)) {
13. prev->active_mm = NULL;
14. WARN_ON(rq->prev_mm);
15. rq->prev_mm = oldmm;
16. }
17. /* Here we just switch the register state and the stack. */
18. switch_to(prev, next, prev);
19. return prev;
20. }
26