// function Prototype: Version number linux-3.0.8
struct task_struct *__switch_to (structtask_struct *,struct thread_info *, struct Thread_info *);
#define switch_to (prev,next,last) \
Do { \
Last =__switch_to (Prev,task_thread_info (prev), Task_thread_info (next)); \
} while (0)
// first, let's look at the following macros:
#define offsetof (TYPE, MEMBER) ((size_t) & ((type*) 0)->member)
// The following is actually pointing to the corresponding struct member
/*
Cc_stackprotect Patch is a patch Tejun Heo in the year to the mainline kernel to prevent the kernel stack overflow.
The Default Config is to turn this option off, while compiling the kernel. Change the. config file to config_cc_stackprotector=y
to enable. The future flying kernel will be able to turn this option on to prevent a day attack that exploits the kernel stack overflow. This patch's anti-
The overflow principle is that when the process starts, a pre-set stack canary is placed behind each buffer. You
able to interpret it as a sentinel, when buffer overflow occurs. Will certainly destroy the value of stack canary when
Stack Canary when the value is destroyed. The kernel will be directly on the machine.
So how do you infer that the stack canary is covered?
In fact, this was done by GCC, which added a-fstack-protector parameter to GCC when it was compiled.
*/
DEFINE (Tsk_stack_canary, offsetof (struct task_struct,stack_canary));
Task_struct
DEFINE (Ti_task, offsetof (struct thread_info, TASK));
//
/*
* Domain types
*/
/*
#define DOMAIN_NOACCESS 0
#define Domain_client 1// is the user's domain (running programs, access data), and is protected by the
// individual chapters and pages that make up the domain's permissions.
#ifdef Config_cpu_use_domains
#define Domain_manager The behavior of the 3// control domain (the sections and page of the current domain . and domain access).
#else
#define Domain_manager 1
#endif
*/
// corresponding Figure
// This domain uses the coprocessor to set the register domainaccess Control
DEFINE (Ti_cpu_domain, offsetof (struct thread_info,cpu_domain));
/*
struct Cpu_context_save {
__u32 R4;
__u32 R5;
__u32 R6;
__u32 R7;
__u32 R8;
__u32 R9;
__u32 SL;
__u32 FP;
__u32 sp;
__u32 pc;
__u32 extra[2]; /* xscale ' ACC ' register, etc */
};
*/
DEFINE (Ti_cpu_save, offsetof (struct thread_info,cpu_context));
/*
in the following there is a Set_tls, corresponding to my platform set_tls_v6k
. macroset_tls_v6k, TP, TMP1, TMP2
MCR P15, 0, \TP, C13, C0, 3 @ set TLS Register
. Endm
Tp_value is to set the value of the TLS register
In multithreaded applications. One of the processes shares all the threads in the same address space. There is often a need to maintain the data is the only
one of the threads. TLS or thread-local storage. Because you might be able to figure it out by its name now. Is the concept used for thread abstraction. It is
A high-speed and efficient way to store local data for each thread.
The offset of the thread's local data is through the TLS register (H/W or S
/W block), which points to the thread's respective thread control block.
before the arm core. Even some of the ARM9 and ARM11 cores that do not have such a TLS register are physically available.
Operating System (Linux starts here)
software that needs to be emulated. A new generation of arm cores. Cortex-ax, there is indeed a register of this TLS available (CP15).
What the kernel needs to do with TLS is that it allows the user to configure the program (typically nptl--a pthread implementation) at some point
The thread's unique base address value is within the thread information structure of the kernel.
*/
DEFINE (Ti_tp_value, offsetof (struct thread_info, tp_value));
/*
* These is the reasoncodes for the thread notifier.
*/
#define Thread_notify_flush 0
#define Thread_notify_exit 1
#define Thread_notify_switch 2
#define Thread_notify_copy 3
/* Register switch for ARMV3 and ARMv4 processors * R0 = previous task_struct, r1 = Previous thread_info, r2 = next thre Ad_info * Previous and Next are guaranteed is the same. */entry (__switch_to) unwind (. Fnstart) Unwind (. Cantunwind)//IP is the Thread_info address in the cpu_context of the previous thread Add IP, r1, #TI_CPU_SAVE//r3 with the next thread TP value LDR R3, [R2, #TI_TP_VALUE]//Storage R4-SL, FP, SP, LR to Thread_info In the->cpu_context.Implemented using arm and thumb, respectively//This is the save site. ARM (Stmia ip!, {R4-SL, FP, SP, LR}) @ Store most regs on stack THUMB (Stmia ip!, {R4-SL, fp}) @ Store most regs on stack thumb (str sp, [IP], #4) thumb (str lr, [IP], #4) #ifdef CO Nfig_cpu_use_domains//R6 The DOMAIN property of the next thread is Ldr R6, [R2, #TI_CPU_DOMAIN] #endif//set_tls above has been analyzed SET_TLS R3, R4, r5#if defined (config_cc_stackprotector) &&!defined (CONFIG_SMP) Ldr R7, [R2, #TI_TAS k]//next thread of Task_struct Ldr R8, =__stack_chk_guard//r8 inside is __stack_chk_guard address Ldr R7, [R7, #TSK_STACK_C anary]//to here. R7 inside is stack_canary value #endif#ifdef config_cpu_use_domains//set domain register. MCR P15, 0, R6, C3, C0, 0 @ Set domain register#endif//r5 inside is task_struct mov R5 of the previous thread, R0//R4 is the next Thread of the thread_info inside of the Cpu_context address add R4, R2, #TI_CPU_SAVE//R4 R5 just temporarily save//below the Thread_notify_head Notification chain, the following sample illustrates Ldr R0, =tHread_notify_head mov r1, #THREAD_NOTIFY_SWITCH bl atomic_notifier_call_chain#if defined (config_cc_ Stackprotector) &&!defined (CONFIG_SMP) str R7, [R8]//__stack_chk_guard = (next) threadinfo->task-> ; stack_canary#endif THUMB (mov ip, r4)//ip points to the thread_info of the thread inside the Cpu_context address mov r0, R5 R0 from the new point to the previous thread task_struct//below corresponding to the above save the scene, here is the recovery site.
PC corresponding to the next process of cpu_context->pc//from above to see this cpu_context->pc is the previous save the site LR, is the next thread to run the place. ARM (Ldmia R4, {R4-SL, FP, SP, PC}) @ Load all regs saved previously THUMB (Ldmia ip!, {R4-SL, FP} @ Load All regs saved previously thumb (Ldr sp, [IP], #4) thumb (Ldr pc, [IP] ) Unwind (. fnend) Endproc (__switch_to)
Experiment Code:
#include <linux/kernel.h> #include <linux/notifier.h> #include <linux/module.h> #include <asm/ Thread_notify.h>module_license ("GPL"); static int test_event (struct notifier_block *this, unsigned long event, void * PTR) { printk (kern_info "in event:event number is%ld\n", Event); return notify_done;} static struct Notifier_block test_notifier ={ . notifier_call = test_event,};static int __init reg_notifier (void) { C3/>int err = 0; PRINTK (kern_info "Begin to register:\n"); Err = Thread_register_notifier (&test_notifier); if (err) { PRINTK (kern_err "register Test_notifier error\n"); Goto fail1; } PRINTK (kern_info "register Reboot_notifier completed\n"); Return 0;FAIL1: return err;} static void __exit Unreg_notifier (void) { thread_unregister_notifier (&test_notifier); PRINTK (kern_info "Unregister finished\n");} Module_init (Reg_notifier); Module_exit (Unreg_notifier);
Print:
watermark/2/text/ahr0cdovl2jsb2cuy3nkbi5uzxqvehh4ehhsbgxsbhhs/font/5a6l5l2t/fontsize/400/fill/i0jbqkfcma==/ Dissolve/70/gravity/center ">
2 are Thread_notify_switch, of course, will continue to switch!
Detailed analysis of CONTREX-A9 assembly code __switch_to (process switching)