Explanation of Nginx. conf parameters in Nginx

Source: Internet
Author: User
Tags epoll memory usage

# Which user is used to start nginx before the user is followed by the group
User www;

# Number of processes that nginx is working on
Worker_processes 2;

# [Debug | info | notice | warn | error | crit] location of the error log
Error_log/var/htdocs/logs/nginx_error.log crit;

# Process number saving file
Pid/usr/local/nginx. pid;
# Maximum file descriptor? To be further organized.
Worker_rlimit_nofile 51200;

Events
{
# Use [kqueue | rtsig | epoll |/dev/poll | select | poll];
Use epoll; # use epoll (linux2.6 high-performance mode)
Worker_connections 51200; # maximum number of connections of each process (maximum connection = number of connections x number of processes)
}

Http
{
# File extension and file type ing table
Include mime. types;

# Default file type
Default_type application/octet-stream;

# Log file format
Log_format main '$ remote_addr-$ remote_user [$ time_local] $ request'
'"$ Status" $ body_bytes_sent "$ http_referer "'
'"$ Http_user_agent" "$ http_x_forwarded_for "';

Log_format download '$ remote_addr-$ remote_user [$ time_local]'
'"$ Request" $ status $ bytes_sent'
'"$ Http_referer" "$ http_user_agent "'
'"$ Http_range" "$ sent_http_content_range "';

# Default encoding
Charset gb2312, UTF-8;

Server_names_hash_bucket_size 128;
# Enable the efficient file transfer mode
Sendfile on;
# The following two options are used to prevent network congestion www.111cn.net reference http:// I .cn.yahoo.com/nesta2001zhang/blog/p_104/
Tcp_nopush on;
Tcp_nodelay on;

# Long link timeout
Keepalive_timeout 300;

# Fastcgi connection timeout time. The following can be understood literally, so I will not explain it.
Fastcgi_connect_timeout 300;
Fastcgi_send_timeout 300;
Fastcgi_read_timeout 300;
Fastcgi_buffer_size 128 k;
Fastcgi_buffers' 4 256 k;
Fastcgi_busy_buffers_size 256 k;
Fastcgi_temp_file_write_size 256 k;
Fastcgi_temp_path/dev/shm;

# Enable gzip compression
Gzip on;
# Minimum compressed file size
Gzip_min_length 1 k;
# Compression buffer
Gzip_buffers 4 8 k;
# Compressed version (1.1 by default, 1.0 is used for the front-end squid2.5)
Gzip_http_version 1.1;
# Compression type. text/html is already included by default, so you don't need to write any more below. Of course, if you write it, there will be no problem, but there will be a warn
Gzip_types text/plain application/x-javascript text/css text/html text/javascript application/xml;
# Error page
Error_page 404 http://www.111cn.net;
Error_page 403 http://111cn.net;
# Size limit of uploaded files
Client_max_body_size 20 m;
# Set request delay
Client_header_buffer_size 16 k;
Large_client_header_buffers 4 64 k;
# Set the server list of server load balancer
# If four independent php-cgi processes (eight sub-processes in each group) are initiated on the same machine ), the performance should be inferior to that of one group of php-cgi processes (32 sub-processes). Because of one group of processes, the PHP binary file cache of eaccelerator is shared, and the hit rate of one group of processes is high.
# But the advantage is that if a group of php is suspended, other ports can be taken over. It seems that the probability of a 502 error has been greatly reduced, or I haven't met
Upstream mysvr {
# The weigth parameter indicates the weight. A higher weight indicates a higher probability of being assigned.
# Enable port 3128 for Squid on the local machine
Server 192.168.8.1: 3128 weight = 5;
Server 192.168.8.2: 80 weight = 1;
Server 192.168.8.3: 80 weight = 6;
}
# Configure the VM below
Server
{
Listen 80;
Server_name www.52crack.com;
Index index.html Index.html index.htm index. php;
Root/var/htdocs/52 crack;
If (-d $ request_filename)
{
Rewrite ^/(. *) ([^/]) $ http: // $ host/$1 $2/permanent;
}
# Set access logs for the current virtual host
Access_log logs/www.52crack.com. access. log main;

Location ~ . *. Php? $
{
Fcinclude GI. conf;
Fastcgi_pass 127.0.0.1: 9000;
Fastcgi_index index. php;
}
# If you access/img/*,/js/*,/css/* resources, you can directly retrieve the local file without passing squid
# This method is not recommended if there are many files, because the squid cache works better.
Location ~ ^/(Img | js | css )/{
Root/var/htdocs/52 crack;
Expires 24 h;
}

# Enable server load balancer "/"
Location /{
Proxy_pass http: // 127.0.0.1;
Proxy_redirect off;
Proxy_set_header Host $ host;
Proxy_set_header X-Real-IP $ remote_addr;
Proxy_set_header X-Forwarded-For $ proxy_add_x_forwarded_for;
Client_max_body_size 10 m;
Client_body_buffer_size 128 k;
Proxy_connect_timeout 90;
Proxy_send_timeout 90;
Proxy_read_timeout 90;
Proxy_buffer_size 4 k;
Proxy_buffers 4 32 k;
Proxy_busy_buffers_size 64 k;

Proxy_temp_file_write_size 64 k;
}

# Set the address for viewing Nginx status
Location/NginxStatus {
Stub_status on;
Access_log on;
Auth_basic "NginxStatus ";
Auth_basic_user_file conf/htpasswd;
}
}
}
# Why do we need to configure kernel. shmmax to 134217728?
# Is it not possible by default? How did you calculate this number?
# Zhang banquet reply
# Because eaccelerator. shm_size = "128 & Prime; is configured in php. ini, the shared memory size that eaccelerator can use is 128 MB.
#134217728 bytes/1024/1024 = 128 MB
# In Linux, the maximum memory usage of a single process is limited by the number (in bytes) set in/proc/sys/kernel/shmmax ), for example, the shmmax value of CentOS and Redhat is 33554432 bytes (33554432 bytes/1024/1024 = 32 MB) by default ).
# Temporarily change the value:
# Echo bytes>/proc/sys/kernel/shmmax
# The value is automatically restored every time the system is restarted. If you want to change it permanently, you can modify the/etc/sysctl. conf file and set it:
# Kernel. shmmax = number of bytes
# If your eaccelerator uses the default 32 m shared memory, you can leave this value unchanged.

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.