Linux Kernel fragmentation notes

Source: Internet
Author: User
Tags number sign

# The lower half of the interrupt.
Common_interrupt:
Addl $-0x80, (% esp)/* Adjust vector into the [-256,-1] range */# Adjust the interrupt number to 0 ~ The value range is 255.
SAVE_ALL
TRACE_IRQS_OFF
Movl % esp, % eax
Call do_IRQ # process the Interrupt Routine.
Jmp ret_from_intr
ENDPROC (common_interrupt)

Unsigned int _ irq_entry do_IRQ (struct pt_regs * regs)
{
Struct pt_regs * old_regs = set_irq_regs (regs );

/* High bit used in ret_from _ code */
Unsigned vector = ~ Regs-> orig_ax;
Unsigned irq;

Exit_idle ();
Irq_enter ();

Irq = _ get_cpu_var (vector_irq) [vector];

If (! Handle_irq (irq, regs) {# Call the Interrupt Routine.
Ack_APIC_irq (); # responds to the 8259A advanced programming interrupt controller if the routine returns successfully.

If (printk_ratelimit ())
Pr_emerg ("% s: % d. % d No irq handler for vector (irq % d) \ n ",
_ Func __, smp_processor_id (), vector, irq );
}

Irq_exit (); # After the upper half of the interrupt is processed, call irq_exit to execute the lower half of the interrupt.

Set_irq_regs (old_regs );
Return 1;
}

Void irq_exit (void)
{
Account_system_vtime (current );
Trace_hardirq_exit ();
Sub_preempt_count (IRQ_EXIT_OFFSET );
If (! In_interrupt () & local_softirq_pending ())
Invoke_softirq (); # Call _ do_softirq.

Rcu_irq_exit ();
# Ifdef CONFIG_NO_HZ
/* Make sure that timer wheel updates are propagated */
If (idle_cpu (smp_processor_id ())&&! In_interrupt ()&&! Need_resched ())
Tick_nohz_stop_sched_tick (0 );
# Endif
Preempt_enable_no_resched ();
}

# Define invoke_softirq () _ do_softirq () # invoke_softirq is a macro replaced by _ do_softirq.

Asmlinkage void _ do_softirq (void)
{
Struct softirq_action * h;
_ U32 pending;
Int max_restart = MAX_SOFTIRQ_RESTART;
Int cpu;

Pending = local_softirq_pending (); # Save the 32 bitmap of the Soft Interrupt to be processed to the stack variable.
Account_system_vtime (current );

_ Local_bh_disable (unsigned long) _ builtin_return_address (0 ));
Lockdep_softirq_enter ();

Cpu = smp_processor_id ();
Restart:
/* Reset the pending bitmask before enabling irqs */
Set_softirq_pending (0); # Clears the 32 bitmap of the Soft Interrupt to be processed.

Local_irq_enable (); # The current CPU is interrupted.

H = softirq_vec; # obtain the first element of the Soft Interrupt array.

Do {# Start to poll the Soft Interrupt array. It seems to be 32.
If (pending & 1) {# judge each bit. if it is 1, the Soft Interrupt corresponding to this bit needs to be processed.
Int prev_count = preempt_count ();
Kstat_incr_softirqs_this_cpu (h-softirq_vec );

Trace_softirq_entry (h, softirq_vec );
H-> action (h); # handle soft interruptions.
Trace_softirq_exit (h, softirq_vec );
If (unlikely (prev_count! = Preempt_count ())){
Printk (KERN_ERR "huh, entered softirq % td % s % p"
"With preempt_count % 08x ,"
"Exited with % 08x? \ N ", h-softirq_vec,
Softirq_to_name [h-softirq_vec],
H-> action, prev_count, preempt_count ());
Preempt_count () = prev_count;
}

Rcu_bh_qs (cpu );
}
H ++; # next.
Pending> = 1; # Next. shift to the right after each check. It can be seen that the priority of the second bit is the highest.
} While (pending );

Local_irq_disable (); # The current CPU is blocked for interruption.

Pending = local_softirq_pending (); # obtain the 32 bitmap of the Soft Interrupt to be processed to check whether the soft interrupt request has occurred during the Soft Interrupt process.
If (pending & -- max_restart) # if a request occurs and within the maximum number of cyclic checks that process soft interruptions at a time.
Goto restart; # Jump to restart and then handle the New Soft Interrupt. It can be seen that the soft response is very timely.

If (pending) # if the number of soft interruptions is indeed too frequent.
Wakeup_softirqd (); # Wake up the kernel thread dedicated to handling soft interruptions.

Lockdep_softirq_exit ();

Account_system_vtime (current );
_ Local_bh_enable ();
}

# Soft Interrupt _ 32-Bit Bitmap judgment parallel (simultaneously processing in SMP cases) kernel thread-assisted detection Soft Interrupt bitmap cannot sleep
# Tasklet serial (locked) sleep
# Sleep in the kernel thread working queue
Bytes ----------------------------------------------------------------------------------------------
# Privilege levels required for DPL Access Descriptors
# RPL Principal's real privilege level
# CPL current privilege level (CS. RPL)
Bytes ----------------------------------------------------------------------------------------------
# The kernel space is the high address of the independent page table space of all processes, 1 GB, mapped to the same physical space.
Bytes ----------------------------------------------------------------------------------------------
# The shared memory of IPC is to map a part of the independent page table space of multiple processes to the same physical space.
Bytes ----------------------------------------------------------------------------------------------
# Semaphores are similar to the mutex lock Implementation below. All threads that cannot be locked enter the queue for sleep. the difference is that the number of threads locked at the same time is controllable. the mutex lock can only be locked by one thread.

# Mutex lock
# Define _ mutex_fastpath_lock (count, fail_fn )\
Do {\
Unsigned int dummy; \ # this parameter is used as the passed function pointer. Because only one variable is applied, the ESP at the top of the stack has already pointed to it.
\
Typecheck (atomic_t *, count) ;\# |
Typecheck_fn (void (*) (atomic_t *), fail_fn); # \ macro parameter check
\
Asm volatile (LOCK_PREFIX "decl (% eax) \ n" \ # as shown here, if count is first subtracted from the thread, SF is not equal to 1 (not equal to negative ), execute it later, that is, obtain the lock. then, if the thread that tries to obtain the lock executes this atomic operation (in the unlocked state), SF equals 1, so that void (*) (atomic_t *) is executed *) type Function to enter the waiting queue. therefore, this locking atomic operation can be completed with a single command. in non-SMP scenarios, a single command can perform atomic operations. In SMP cases, the locking technology is used to perform atomic operations.
"Jns 1f \ n "\
"Call" # fail_fn "\ n "\
"1: \ n "\
: "= A" (dummy) \ # assign the value of the count atomic variable to dummy when the mutex lock fails to be obtained, and use void (*) (atomic_t *) put the function in the waiting queue. so that the next thread can obtain the mutex lock in sequence.
: "A" (count) \ # parameter variable count
: "Memory", "ecx", "edx ");\
} While (0)
# Replace fail_fn macro parameters with the following functions as real parameters
Static _ used noinline void _ sched _ mutex_lock_slowpath (atomic_t * lock_count)
{
Struct mutex * lock = container_of (lock_count, struct mutex, count); # Put It In The waiting queue in sequence.

_ Mutex_lock_common (lock, TASK_UNINTERRUPTIBLE, 0, _ RET_IP _); # Set the thread to an uninterrupted sleep state. wait until it is explicitly awakened (after unlocking of course ).
}

# Spin lock
Static _ always_inline void _ ticket_spin_lock (arch_spinlock_t * lock) # When the number of CPUs is less than 256
{
Short inc = 0x0100;

Asm volatile (
LOCK_PREFIX "xaddw % w0, % 1 \ n" # first save the 8-bit high of the real-time lock, and then add the 8-bit high of the Real-Time Lock to 1. The result is the thread stack variable (the lock at the time) the high eight bits equals to the real-time lock without 1. this operation is atomic, so there will never be two threads with the same locks in spin.
"1: \ t"
"Cmpb % h0, % b0 \ n \ t" # assume that the real-time lock is the initial value (0), then the binary value of the lock is equal to 0 = 00000000 00000000. comparison Between High 8 and low 8.
"Je 2f \ n \ t" # If the above assumptions are true, jump to 2f and execute them. Otherwise, the spin starts.
"Rep; nop \ n \ t" # Empty command
"Movb % 1, % b0 \ n \ t" # If the lock cannot be obtained, the stack variable inc (lock at the time) of the thread will be used all the time) high 8-bit and Real-Time Lock (Note: it will change at any time due to the unlocked atomic operation .) for comparison.
# Example:
# For example, if A thread A comes in and tries to obtain the real-time lock, thread A uses its own stack variable inc (the lock at the time) to save the real-time lock, then let the real-time lock's high eight-bit plus 1 (that is, add 256), and then let the lock at that time
# Comparison between the high eight bits and the low eight bits. If the values are equal, skip the spin and execute the code below. assume that the lock is the first to obtain the real-time lock. Because the initial value of the real-time lock is 0, both the high and low bits are 0, so the lock can be successfully obtained.
# Obtain the real-time lock. next, thread B, thread C, and thread D must also obtain the Real-Time Lock (thread A has not implemented the Real-Time Lock). Due to atomic operations, each thread, so that the high eight bits of the Real-Time Lock plus 1 (that is, add 256 ),
# The High octal values of the stack variable inc (lock at the time) of the thread are respectively 1, 2, and 3. now, the 8-bit high value of the real-time lock is 4. when these three threads come in, they can determine their stack variable inc (the lock at the time)'s high eight bits and Low eight bits.
# Whether it is equal (nonsense, certainly not equal). If it is not equal, it enters the spin state, each spin gets the lower eight bits of the Real-Time Lock and assigns the lower eight bits of the stack variable inc (the lock at the time) to the thread.
# Compare the lock height and height. If thread A executes the unlock operation (lower the Real-Time Lock by eight bits and 1), then the lock height of thread B is 1, it is the same as the real-time lock after unlocking, so line B
# Obtain the lock, and so on implement the orderly spin wait to unlock. Because only the high and low eight bits are used, the maximum number of requests to obtain the lock at the same time is 256.
# In conclusion, spin locks are constantly compared with the high eight bits of Real-Time Locks (the stack variables in the thread used to try to get the lock. every time a thread comes, the real-time lock is increased by eight bits and 1, leading
# The stack variable in the thread used to compete for the lock at that time the lock's high eight bits equals to the previous Real-Time Lock's high eight bits (before 1 ). combined with the previous sentence, we achieved an orderly competition for spin locks.
/* Don't need lfence here, because loads are in-order */
"Jmp 1b \ n" # perform the next round of judgment, that is, spin.
"2 :"
: "+ Q" (inc), "+ m" (lock-> slock) # use a value of inc in EA, B, C, DX. slock directly serves as the memory variable.
:
: "Memory", "cc ");
}
# Unlock a spin lock
Static _ always_inline void _ ticket_spin_unlock (arch_spinlock_t * lock)
{
Asm volatile (UNLOCK_LOCK_PREFIX "incb % 0" # The unlock operation is performed to reduce the real-time lock by eight bits and Add 1.
: "+ M" (lock-> slock)
:
: "Memory", "cc ");
}
Bytes ----------------------------------------------------------------------------------------------
# Current process pointer
# Define current get_current ()

Static _ always_inline struct task_struct * get_current (void)
{
Return percpu_read_stable (current_task );
}

DEFINE_PER_CPU (struct task_struct *, current_task) ____ cacheline_aligned =, that is, swapper or idle.

# Define DEFINE_PER_CPU (type, name) _ attribute _ (_ section __(". data. percpu ") _ typeof _ (type) per_cpu __## name # defines the macro definition of each CPU variable.

Struct thread_info {
Struct task_struct * task;/* main task structure */
Struct exec_domain * exec_domain;/* execution domain */
_ U32 flags;/* low level flags */
_ U32 status;/* thread synchronous flags */
_ U32 cpu;/* current CPU */
Int preempt_count;/* 0 => preemptable,
<0 => BUG */
Mm_segment_t addr_limit;
Struct restart_block;
Void _ user * sysenter_return;
# Ifdef CONFIG_X86_32
Unsigned long previus_esp;/* ESP of the previous stack in
Case of nested (IRQ) stacks
*/
_ U8 supervisor_stack [0];
# Endif
Int uaccess_err;
};

Union thread_union {# thread kernel state stack.
Struct thread_info;
Unsigned long stack [THREAD_SIZE/sizeof (long)];
};

# CPU variable
# Allocate a private data zone for each cpu, and copy the data in the. data. percpu segment to it. Each cpu has one copy.
# Since each cpu is copied, the value in the access cannot be directly offset with per_cpu _ name.
# Add _ per_cpu_offset [I]. I indicates cpuid.
# For example, __get_cpu_var (name) is equivalent to _ per_cpu_offset [smp_processor_id ()] + per_cpu _ name.
# You can also see this: sizeof (. data. percpu) * cpuid + per_cpu _ name.
Bytes ----------------------------------------------------------------------------------------------
# Check the signal timing from the system call, interrupt (of course, when the clock interrupt of the Process scheduler is returned) or before the Exception Processing returns to the user space.
Bytes ----------------------------------------------------------------------------------------------
# Fork copies the PCB, data segment, code segment, stack segment, and written stack data of the current process.
# Exec replaces the data segment and code segment of the current process, and resets various registers, such as EIP and ESP...
# Pthread_create copies the PCB of the current process, but shares the same virtual space. The difference is that all threads have their own thread stack segments.
Bytes ----------------------------------------------------------------------------------------------
# Read the code for setting the interrupt vector table to deepen the memory...
Setup_idt: # Set the Interrupt Descriptor Table
Lea ignore_int, % edx # Put the 32-bit offset address of ignore_int in EDX.
Movl $ (_ KERNEL_CS <16), % eax # Place the kernel code segment selection child in the 16-bit height of EAX, in this version, _ KERNEL_CS is set to (12 + 0) * 8). It can be seen that the descriptor table items of the kernel code segment are placed in GTD, And the DPL value is 0, the index in the GDT table is 12.
Movw % dx, % ax/* selector = 0x0010 = cs */# Put the low 16-bit ignore_int address in the EAX's low 16-bit AX
Movw $0x8E00, % dx/* interrupt gate-dpl = 0, present */# Set the ignore_int interrupt Routine in the memory to true, DPL is 0, and the flag bit is 32 bits, and the door type is interrupt door (Guanzhong)
# So the above content is: "Hi" (select a child for a 16-bit kernel code segment) | (32-bit Interrupt Routine address offset) | (1-Bit Memory flag) | (2-bit privilege level) | (2-bit number sign) | (3-bit door type sign) | (8-bit unknown ...)
Lea idt_table, % edi # obtain the first address of the interrupt vector table
Mov $256, % ecx # sets the cyclic count, because the maximum number of interruptions is 256.
Rp_sidt:
Movl % eax, (% edi) # |
Movl % edx, 4 (% edi) # \ set all the 256 interrupt routines as the "hi "!
Addl $8, % edi #386 plus 8... so, next.
Dec % ecx
Jne rp_sidt

. Macro set_early_handler handler, trapno # defines a macro for interrupt settings. The parameter is the address offset and interrupt number of the Interrupt Routine. The Interrupt Routine set by this macro is the kernel code segment and the DPL is 0, the door type is the interrupt door.
Lea \ handler, % edx # Keep up with the above, so I won't talk much about it...
Movl $ (_ KERNEL_CS <16), % eax
Movw % dx, % ax
Movw $0x8E00, % dx/* interrupt gate-dpl = 0, present */
Lea idt_table, % edi
Movl % eax, 8 * \ trapno (% edi)
Movl % edx, 8 * \ trapno + 4 (% edi)
. Endm

Set_early_handler handler = early_divide_err, trapno = 0 # Call the macro above to set the following interrupt routines...
Set_early_handler handler = early_illegal_opcode, trapno = 6
Set_early_handler handler = early_protection_fault, trapno = 13
Set_early_handler handler = early_page_fault, trapno = 14

Ret
# In summary, set all interrupt routines as a common routine, and then set the target routine in sequence.
# Exceptions:
Static inline void _ set_gate (int gate, unsigned type, void * addr, # This inline function flexibly sets the interrupt routines of the corresponding segment Selection Sub-, DPL, and gate type.
Unsigned dpl, unsigned ist, unsigned seg)
{
Gate_desc s;
Pack_gate (& s, type, (unsigned long) addr, dpl, ist, seg); # This is equivalent to the preceding kernel code segment Selection Sub, DPL... after the bit operation is set, it will be placed in the desc_struct (32-bit) structure.
/*
* Does not need to be atomic because it is only done once
* Setup time
*/
Write_idt_entry (idt_table, gate, & s); # in combination with the interrupt number and desc_struct structure, the first address of the interrupt vector table is sent to write_idt_entry, and write_idt_entry uses memcpy to set the corresponding Interrupt Routine.
}
Bytes ----------------------------------------------------------------------------------------------
# Important files during kernel initialization:
# Traps. c # initialize various interrupt routines
# Entry_32.S # System Call definition, System trace definition, and general Interrupt Routine definition...
# Irqinit. c #
# Head_32.S # initialize memory allocation and set the interrupt vector table...
# Segment. h # Segment Selection Sub-definition in various modes
# Processor. h # Some structure definitions in the context of the task environment
# Irq_32.c #
# Softirq. c # lower half of the interrupt
Bytes ----------------------------------------------------------------------------------------------
# Two main functions of cr0 _ control enable and disable paging function _ control enable and disable Virtual Mode
Bytes ----------------------------------------------------------------------------------------------
# Define SYSCALL_VECTOR 0x80 # macro-defined 0x80 is the system call interrupt number
# Set_system_trap_gate (SYSCALL_VECTOR, & system_call); # Set the address of the system call interruption routine to the idt + 128x8 location
Bytes ----------------------------------------------------------------------------------------------
# System call. first look at the approximate process:

# Int 0x80 microinstruction decomposition is similar to the following operations
-------------------------------------
If (CS-> RPL <= (IDTR + 128*8)-> DPL ){
SS = TR-> Index-> GDT-> Base-> TSS-> SS0
ESP = TR-> Index-> GDT-> Base-> TSS-> SP0
Push ss (Old)
Push esp (Old)
PUSHF
PUSH CS
PUSH EIP
TF = 0
If (IDTR + 128*8)-> gateType = 1110b)
IF = 0 => CLI
CS = (IDTR + 128*8)-> segmentSelector
EIP = (IDTR + 128*8)-> handlerOffset
} Else
Segmentation fault
------------------------------------
# No CS-> RPL <= (IDTR + 128*8)-> DPL is used to determine the hard interrupt, but directly runs it down, therefore, the hard interrupt code always runs in kernel mode (depending on the corresponding interrupt table items of the IDT ).
# (IDTR + 128*8)-> segmentSelector is initialized to _ KERNEL_CS. That is to say, when the int 0x80 command is successfully executed in user mode, the current mode has been changed to kernel mode.

# After int 0x80 is successfully executed:
ENTRY (system_call)
----------
Pushl % eax # Save EAX
Cld # change direction
Pushl % gs
Pushl % fs
Pushl % es
Pushl % ds
Pushl % eax # press the parameters and interrupt numbers passed in the user space into the thread Kernel stack.
Pushl % ebp
Pushl % edi
Pushl % esi
Pushl % edx
Pushl % ecx
Pushl % ebx
----------
Movl $(15*8 + 3), % edx # __user_ds
Movl % edx, % ds
Movl % edx, % es
Movl $ (_ KERNEL_PERCPU), % edx
Movl % edx, % fs
Movl $ (_ KERNEL_STACK_CANARY), % edx
Movl % edx, % gs
Movl $-THREAD_SIZE, % ebp; # define THREAD_SIZE (PAGE_SIZE <THREAD_ORDER) # obtain the task descriptor of the current process
Andl % esp, % ebp #
Testl $ _ TIF_WORK_SYSCALL_ENTRY, 8 (% ebp) # system call tracing in operation/emulation whether to enable tracing. If yes, enter syscall_trace_entry
Jnz syscall_trace_entry
Cmpl $ (nr_syscils), % eax # determines the call number of the system call. If the maximum number of system calls is exceeded, it jumps to the return code and sets the error number.
Jae syscall_badsys
Syscall_call:
Call * sys_call_table (, % eax, 4) # This is the kernel function that calls the system call number added with sys_call_table as the base address multiplied by the length of 4 bytes (32 bits, that is, real system calls.
Movl % eax, 24 (% esp) # Save the return value of the system call to the stack corresponding to EAX
Syscall_exit:
LOCKDEP_SYS_EXIT # It seems that debugging is useless? Please advise.
Cli # block interruption
TRACE_IRQS_OFF
Movl 8 (% ebp), % ecx # Get the trace flag status value of the current thread
Testl $ _ TIF_ALLWORK_MASK, % ecx # Jump into syscall_exit_work to close if it is enabled
Jne syscall_exit_work

Restore_all:
# Ifdef CONFIG_TRACE_IRQFLAGS # Run the following code if the tracking function is defined
Testl $515,56 (% esp) # determine whether the interrupt is disabled. If it is disabled
Jz 1f
TRAC QS_ON
1:
# Endif
Restore_all_notrace:
Movl PT_EFLAGS (% esp), % eax # mix EFLAGS, SS and CS
# Warning: PT_OLDSS (% esp) contains the wrong/random values if we
# Are returning to the kernel.
# See comments in process. c: copy_thread () for details.
Movb PT_OLDSS (% esp), % ah
Movb PT_CS (% esp), % al
Andl $ (X86_EFLAGS_VM | (SEGMENT_TI_MASK <8) | SEGMENT_RPL_MASK), % eax
Cmpl $ (SEGMENT_LDT <8) | USER_RPL), % eax
CFI_REMEMBER_STATE
Je ldt_ss # returning to user-space with LDT SS
Restore_nocheck:
RESTORE_REGS 4 # skip orig_eax/error_code
CFI_ADJUST_CFA_OFFSET-4
Irq_return:
INTERRUPT_RETURN
. Section. fixup, "ax"
ENTRY (iret_exc)
Pushl $0 # no error code
Pushl $ do_iret_error
Jmp error_code
. Previous
. Section _ ex_table, ""
. Align 4
. Long irq_return, iret_exc
. Previous

CFI_RESTORE_STATE
Ldt_ss:
Larl PT_OLDSS (% esp), % eax
Jnz restore_nocheck
Testl $0x00400000, % eax # returning to 32bit stack?
Jnz restore_nocheck # allright, normal return

# Ifdef CONFIG_PARAVIRT
/*
* The kernel can't run on a non-flat stack if paravirt mode
* Is active. Rather than try to fixup the high bits
* ESP, bypass this code entirely. This may break DOSemu
* And/or Wine support in a paravirt VM, although the option
* Is still available to implement the setting of the high
* 16-bits in the INTERRUPT_RETURN paravirt-op.
*/
Cmpl $0, pv_info + PARAVIRT_enabled
Jne restore_nocheck
# Endif

/*
* Setup and switch to ESPFIX stack
*
* We're re returning to userspace with a 16 bit stack. The CPU will not
* Restore the high word of ESP for us on executing iret... This is
* "Official" bug of all the x86-compatible CPUs, which we can work
* Around to make dosemu and wine happy. We do this by preloading
* High word of ESP with the high word of the userspace ESP while
* Compensating for the offset by changing to the ESPFIX segment
* A base address that matches for the difference.
*/
Mov % esp, % edx/* load kernel esp */
Mov PT_OLDESP (% esp), % eax/* load userspace esp */
Mov % dx, % ax/* eax: new kernel esp */
Sub % eax, % edx/* offset (low word is 0 )*/
PER_CPU (gdt_page, % ebx)
Shr $16, % edx
Mov % dl, GDT_ENTRY_ESPFIX_SS * 8 + 4 (% ebx)/* bits 16 .. 23 */
Mov % dh, GDT_ENTRY_ESPFIX_SS * 8 + 7 (% ebx)/* bits 24 .. 31 */
Pushl $ __espfix_ss
CFI_ADJUST_CFA_OFFSET 4
Push % eax/* new kernel esp */
CFI_ADJUST_CFA_OFFSET 4
/* Disable interrupts, but do not irqtrace this section: we
* Will soon execute iret and the tracer was already set
* The irqstate after the iret */
DISABLE_INTERRUPTS (CLBR_EAX)
Lss (% esp), % esp/* switch to espfix segment */
CFI_ADJUST_CFA_OFFSET-8
Jmp restore_nocheck
CFI_ENDPROC
ENDPROC (system_call)
Bytes ----------------------------------------------------------------------------------------------
Study again when you are free...

Author: "move against the water, move back-min shao"

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.