Kernel Study: Copy_process

Source: Internet
Author: User
Tags exit data structures goto thread

Copy_process is called in do_fork, and the function is important. This function creates a process descriptor and other data structures required by the child process. It is defined in LINUX2.6.XXX/KERNEL/FORK.C.

Only the key sections are commented as follows:

* * This creates a new process as a copy of the "old One", * but does not actually start it yet. * * It copies the registers, and all the appropriate * parts of the process environment (as the clone * flags). 
 The actual kick-off is left to the caller.  
                    * * This part of the code is implemented in the 2.6.38/static struct task_struct *copy_process (unsigned long clone_flags, unsigned long stack_start, struct pt_regs *regs, unsigned long stack_ size, int __user *child_tidptr, struct pid *pid, int tra  
    CE) {int retval;  
    struct Task_struct *p;//saves the new process descriptor address int cgroup_callbacks_done = 0; /*clone_newns and Clone_fs are conflicting can not be set at the same time, otherwise error * * * (Clone_flags & (clone_newns| CLONE_FS) = = (clone_newns|  
      
    CLONE_FS)) return err_ptr (-einval); /* Thread groups must share signals as, and detached threads * can ONly is started up within the thread group. * */*clone_thread and Clone_sighand are conflicting can not be set at the same time, otherwise error * * * (Clone_flags & Clone_thread) &&! (  
      
    Clone_flags & Clone_sighand)) return err_ptr (-einval); * * Shared signal handlers imply shared VM. By way of the above, * thread groups also imply shared VM. 
     Blocking This case allows * to various simplifications in the other code. *//*clone_sighand and CLONE_VM conflicts cannot be set at the same time. otherwise error */if (Clone_flags & Clone_sighand) &&! (  
      
    Clone_flags & CLONE_VM)) return err_ptr (-einval); * * Siblings of global Init remain as zombies on exit since they are * not reaped by their parent (swapper). To solve this and to avoid * See more highlights of this column: http://www.bianceng.cnhttp://www.bianceng.cn/OS/unix/* multi-rooted Proce 
     SS trees, prevent global and container-inits * from creating siblings. */if (Clone_flags & clone_parent) && 
                Current->signal->flags & Signal_unkillable) return err_ptr (-einval);  
    /* Call the system security framework creation process, when the kernel is configured without selecting Config_security, the system security framework function is NULL/retval = Security_task_create (clone_flags);  
      
    if (retval) goto fork_out;  
    retval =-enomem; /* To copy a process descriptor for the current process, prepare for the child process descriptor * The function creates a new kernel stack for the subprocess, assigns a new process descriptor and thread_info structure, and then copies the process descriptor and thread_info of the parent process. 
    This is a full copy of the child process and the parent process's descriptor is exactly the same.  
    * * p = dup_task_struct (current);  
      
    if (!p) goto fork_out;  
      
    Ftrace_graph_init_task (P);  
      
Rt_mutex_init_task (P);  
    #ifdef config_prove_locking debug_locks_warn_on (!p->hardirqs_enabled);  
DEBUG_LOCKS_WARN_ON (!p->softirqs_enabled);  
    #endif/* To determine whether or not to exceed the set permissions/retval =-eagain;  
        if (Atomic_read (&p->real_cred->user->processes) >= task_rlimit (P, Rlimit_nproc)) { if (!capable (cap_sys_admin) &&!capable (Cap_sys_resource);& p->real_cred->user!= init_user) goto Bad_fork_free;  
    } retval = Copy_creds (P, clone_flags);  
      
    if (retval < 0) goto Bad_fork_free; * * If multiple threads are within copy_process (), then this check * triggers too. 
     This doesn ' t hurt, the check are only there * to stop root fork bombs.  
    * * * To determine whether the number of threads exceeds the allowable range of the system, or release the resources already requested/retval =-eagain;  
                    The IF (nr_threads >= max_threads)//max_threads is defined in Kernel_fork in Fork_init.  
      
      
    System maximum number of processes and system memory related goto Bad_fork_cleanup_count; /* The following code is primarily to initialize and copy the resource of the parent process to the subprocess/* Module Reference count operation/*!try_module_get (Task_thread_info (p)-&G  
    t;exec_domain->module)) goto Bad_fork_cleanup_count;  
    /*EXECVE system call number initialized to 0*/p->did_exec = 0;  Delayacct_tsk_init (P); /* must remain after dup_task_struct () * * * Set the status token, because the current status represents a copy from the parent process/copy_flags (Clone_flags, p);  
    Init_list_head (&p->children);  
    Init_list_head (&p->sibling);  
    Rcu_copy_process (P);  
    P->vfork_done = NULL;  
      
    Spin_lock_init (&p->alloc_lock);  
      
    Init_sigpending (&p->pending);  
    P->utime = Cputime_zero;  
    P->stime = Cputime_zero;  
    P->gtime = Cputime_zero;  
    p->utimescaled = Cputime_zero;  
p->stimescaled = Cputime_zero;  
    #ifndef config_virt_cpu_accounting p->prev_utime = Cputime_zero;  
P->prev_stime = Cputime_zero;  
#endif #if defined (split_rss_counting) memset (&p->rss_stat, 0, sizeof (P-&GT;RSS_STAT));  
      
    #endif P->default_timer_slack_ns = current->timer_slack_ns;  
    Task_io_accounting_init (&AMP;P-&GT;IOAC);  
      
    Acct_clear_integrals (P);  
      
    Posix_cpu_timers_init (P);      P->lock_depth =-1;  
/*-1 = no lock */do_posix_clock_monotonic_gettime (&p->start_time);    P->real_start_time = p->start_time;  
    Monotonic_to_bootbased (&p->real_start_time);  
    P->io_context = NULL;  
    P->audit_context = NULL;  
Cgroup_fork (P);  
    #ifdef Config_numa p->mempolicy = Mpol_dup (P->mempolicy);  
        if (Is_err (P->mempolicy)) {retval = Ptr_err (P->mempolicy);  
        P->mempolicy = NULL;  
    Goto Bad_fork_cleanup_cgroup;  
} mpol_fix_fork_child_flag (P);  
#endif #ifdef Config_trace_irqflags p->irq_events = 0;  
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW p->hardirqs_enabled = 1;  
#else p->hardirqs_enabled = 0;  
    #endif p->hardirq_enable_ip = 0;  
    p->hardirq_enable_event = 0;  
    P->hardirq_disable_ip = _this_ip_;  
    p->hardirq_disable_event = 0;  
    p->softirqs_enabled = 1;  
    P->softirq_enable_ip = _this_ip_;  
    p->softirq_enable_event = 0;  
    p->softirq_disable_ip = 0; p->softirq_disable_event = 0;  
    P->hardirq_context = 0;  
P->softirq_context = 0; #endif #ifdef CONFIG_LOCKDEP p->lockdep_depth = 0;  
    /* No Locks held yet * * P->curr_chain_key = 0;  
p->lockdep_recursion = 0; #endif #ifdef config_debug_mutexes p->blocked_on = NULL;  
    /* not blocked yet/#endif #ifdef CONFIG_CGROUP_MEM_RES_CTLR p->memcg_batch.do_batch = 0;  
P-&GT;MEMCG_BATCH.MEMCG = NULL; #endif/* Perform scheduler related setup. Assign this task to a CPU.  
      
    * * Sched_fork (P, clone_flags);  
    retval = Perf_event_init_task (p);  
      
    if (retval) goto bad_fork_cleanup_policy;  
    if ((retval = Audit_alloc (p)) goto Bad_fork_cleanup_policy; /* Copy all the process information/if ((retval = Copy_semundo (Clone_flags, p)) Goto Bad_fork_cleanup_au  
    Dit  
    if ((retval = Copy_files (Clone_flags, p)) goto Bad_fork_cleanup_semundo; if (retval = Copy_fs (cLone_flags, p))) goto Bad_fork_cleanup_files;  
    if ((retval = Copy_sighand (Clone_flags, p)) goto BAD_FORK_CLEANUP_FS;  
    if ((retval = copy_signal (Clone_flags, p)) goto Bad_fork_cleanup_sighand;  
    if ((retval = copy_mm (Clone_flags, p)) goto bad_fork_cleanup_signal;  
    if ((retval = copy_namespaces (Clone_flags, p)) goto bad_fork_cleanup_mm;  
    if ((retval = Copy_io (Clone_flags, p)) goto bad_fork_cleanup_namespaces;  
    retval = Copy_thread (Clone_flags, Stack_start, Stack_size, p, regs);  
      
    if (retval) goto bad_fork_cleanup_io;  
        if (PID!= &init_struct_pid) {retval =-enomem;  
        PID = Alloc_pid (P->nsproxy->pid_ns);  
      
        if (!pid) goto bad_fork_cleanup_io;  
            if (Clone_flags & clone_newpid) {retval = Pid_ns_prepare_proc (P->nsproxy->pid_ns); if (retval < 0) Goto bAd_fork_free_pid;  
    } p->pid = PID_NR (PID);  
    P->tgid = p->pid;  
      
    if (Clone_flags & clone_thread) P->tgid = current->tgid;  
        if (current->nsproxy!= p->nsproxy) {retval = Ns_cgroup_clone (P, PID);  
    if (retval) goto bad_fork_free_pid; } P->set_child_tid = (Clone_flags & clone_child_settid)?  
    Child_tidptr:null; 
     /* Clear TID on mm_release ()? * * P->clear_child_tid = (Clone_flags & clone_child_cleartid)?  
Child_tidptr:null;  
#ifdef Config_futex p->robust_list = NULL;  
#ifdef Config_compat p->compat_robust_list = NULL;  
    #endif init_list_head (&p->pi_state_list);  
P->pi_state_cache = NULL; #endif/* Sigaltstack should is cleared when sharing the same VM/if (Clone_flags & (Clone_ vm| clone_vfork) = = CLONE_VM) p->sas_ss_sp = P->sas_ss_size = 0; 
     * * Syscall tracing and stepping should be turned out in the * child regardless of clone_ptrace.  
    * * USER_DISABLE_SINGLE_STEP (P);  
Clear_tsk_thread_flag (P, tif_syscall_trace);  
#ifdef Tif_syscall_emu Clear_tsk_thread_flag (P, Tif_syscall_emu);  
      
    #endif clear_all_latency_tracing (P); /* OK, now we should is set up. * * p->exit_signal = (Clone_flags & clone_thread)?  
    -1: (Clone_flags & Csignal);  
    p->pdeath_signal = 0;  
      
    p->exit_state = 0; 
     * * Ok, make it visible to the rest of the system. 
     * We dont wake it up yet.  
    * * P->group_leader = p;  
      
    Init_list_head (&p->thread_group); /* Now this task is set up, run Cgroup callbacks if * necessary. We need to run them before the task's visible * on the tasklist.  
    * * Cgroup_fork_callbacks (P);  
      
    Cgroup_callbacks_done = 1; /* Need tasklist lock For parent etc handling!  
      
    * * WRITE_LOCK_IRQ (&tasklist_lock); /* Clone_parent re-uses the old PARENT */if (Clone_flags & clone_parent|  
        Clone_thread)) {p->real_parent = current->real_parent;  
    p->parent_exec_id = current->parent_exec_id;  
        else {p->real_parent = current;  
    p->parent_exec_id = current->self_exec_id;  
      
    } spin_lock (¤t->sighand->siglock); * * Process Group and session signals need to is delivered to just the * parent before the fork or both the P Arent and the child after the * fork. 
     Restart if a signal comes in before we add the new process to * it ' s process group. * A fatal signal pending means that current'll exit, so the new * thread can ' t slip out of a OOM kill (or Normal 
     SIGKILL).  
    * * recalc_sigpending ();  
 if (signal_pending (current)) {spin_unlock (¤t->sighand->siglock);       WRITE_UNLOCK_IRQ (&tasklist_lock);  
        retval =-erestartnointr;  
    Goto Bad_fork_free_pid;  
        } if (Clone_flags & clone_thread) {current->signal->nr_threads++;  
        Atomic_inc (¤t->signal->live);  
        Atomic_inc (¤T-&GT;SIGNAL-&GT;SIGCNT);  
        P->group_leader = current->group_leader;  
    List_add_tail_rcu (&p->thread_group, &p->group_leader->thread_group);  
      
        } if (likely (P->pid)) {Tracehook_finish_clone (P, clone_flags, trace); if (Thread_group_leader (p)) {if (Clone_flags & Clone_newpid) P->nsproxy->pid_ns  
      
            ->child_reaper = p;  
            P->signal->leader_pid = pid;  
            P->signal->tty = Tty_kref_get (Current->signal->tty);  
            Attach_pid (P, Pidtype_pgid, TASK_PGRP (current));  
  Attach_pid (P, Pidtype_sid, task_session (current));          List_add_tail (&p->sibling, &p->real_parent->children);  
            List_add_tail_rcu (&p->tasks, &init_task.tasks);  
        __get_cpu_var (process_counts) + +;  
        } attach_pid (P, pidtype_pid, PID);  
    nr_threads++;  
    } total_forks++;  
    Spin_unlock (¤t->sighand->siglock);  
    WRITE_UNLOCK_IRQ (&tasklist_lock);  
    Proc_fork_connector (P);  
    Cgroup_post_fork (P);  
    Perf_event_fork (P);  
      
/* Returns the pointer to the subroutine descriptor/return p;  
Bad_fork_free_pid:if (PID!= &init_struct_pid) free_pid (PID);  
Bad_fork_cleanup_io:if (P->io_context) Exit_io_context (p);  
Bad_fork_cleanup_namespaces:exit_task_namespaces (P);  
        Bad_fork_cleanup_mm:if (p->mm) {task_lock (P);  
        if (P->signal->oom_score_adj = = oom_score_adj_min) Atomic_dec (&p->mm->oom_disable_count);  
        Task_unlock (P); MmpUT (P-&GT;MM); } bad_fork_cleanup_signal:if (!) (  
Clone_flags & Clone_thread)) free_signal_struct (p->signal);  
Bad_fork_cleanup_sighand: __cleanup_sighand (P->sighand); BAD_FORK_CLEANUP_FS:EXIT_FS (P); /* Blocking * * Bad_fork_cleanup_files:exit_files (P);  
/* Blocking * * BAD_FORK_CLEANUP_SEMUNDO:EXIT_SEM (P);  
Bad_fork_cleanup_audit:audit_free (P);  
Bad_fork_cleanup_policy:perf_event_free_task (P);  
#ifdef Config_numa mpol_put (p->mempolicy);  
    Bad_fork_cleanup_cgroup: #endif cgroup_exit (P, Cgroup_callbacks_done);  
    Delayacct_tsk_free (P);  
Module_put (Task_thread_info (p)->exec_domain->module);  
    Bad_fork_cleanup_count:atomic_dec (&p->cred->user->processes);  
Exit_creds (P);  
Bad_fork_free:free_task (P);  
Fork_out:return err_ptr (retval); }

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.