123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316 |
- # User and group that workers will use.
- user nginx nginx;
- # Number of NGINX workers. Usually it's set, at maximum, 1 worker per CPU core.
- # If the server has 2+ CPU cores but it still does not receive many requests,
- # it's a good idea to keep the value to 1 so it will avoid creating idle processes.
- worker_processes auto;
- worker_rlimit_nofile 65535;
- # NGINX's master process id.
- pid /run/nginx.pid;
- events {
- # How many client connections each worker can maintain at a time.
- worker_connections 20480;
- # Using epoll is less CPU intensive when you have to serve thousands of connections, because
- # instead of scanning all connections to see which file descriptor is ready to read/write,
- # it will only scan those which are active.
- # p.s.: It's only available in Linux kernels later than 2.6.
- use epoll;
- # multi_accept tries to accept() as many connections as possible after nginx gets notification about a new connection.
- multi_accept on;
- }
- # HTTP config module
- http {
- # default charset settings
- charset utf-8,gbk;
- # Note that when SSI is enabled the Last-Modified and Content-Length headers are not sent.
- # default off
- # ssi off;
- ##
- # MIME-TYPE
- ##
- # You can include other config files using the "include" directive.
- # Here we are including the mime-types, for example.
- include mime.types;
- # The "application/octet-stream" means that the response is a binary file.
- # If this directive is not defined then response will be set to "text/plain" by default.
- default_type application/octet-stream;
- ##
- # LOG
- ##
- log_escape_non_ascii off;
-
- log_format main '$remote_addr - $remote_user [$time_iso8601] "$request" '
- '$status $body_bytes_sent "$http_referer" '
- '"$http_user_agent" "$http_x_forwarded_for" "$request_time" "$upstream_addr" "$upstream_response_time" "$request_body" "$host"';
- log_format real_ip '$remote_addr - $remote_user [$time_local] $request_time "$request" '
- '$status $body_bytes_sent "$http_referer" '
- '$http_user_agent $http_x_forwarded_for';
-
- log_format post_log '$remote_addr - $remote_user [$time_local] $request_time "$request" '
- '$status $body_bytes_sent "$request_body" "$http_referer" '
- '"$http_user_agent" "$http_x_forwarded_for" ';
- # Access log: path and type.
- access_log /usr/local/openresty/nginx/logs/access.log;
- # Error log: path and type.
- error_log /usr/local/openresty/nginx/logs/error.log;
- ##
- # TCP
- ##
- # Optimizes data transfer copying data between one file descriptor and another
- # instead of reading and copying data to/from user space.
- sendfile on;
- # Causes NGINX to attempt to send its HTTP response head in one packet,
- # instead of using partial frames. This is useful for prepending headers before calling sendfile,
- # or for throughput optimization.
- tcp_nopush on;
- # Disables the Nagle algorithm.
- # It's useful for sending frequent small bursts of data in real time.
- # tcp_nodelay off;
- tcp_nodelay on;
- # Timeout during which a keep-alive client connection will stay open to serve
- # all the requested files from the server side.
- keepalive_timeout 30s;
- ##
- # GZIP
- ##
- # In production you MUST set gzip to "on" in order to save bandwidth. Web browsers
- # which handle compressed files (all recent ones do) will get a very smaller version
- # of the server response.
- gzip on;
- # Enables compression for a given HTTP request version.
- # This module makes it possible to transfer requests to another server.
- # Nginx talks HTTP/1.1 to the browser and HTTP/1.0 to the backend server.
- gzip_http_version 1.0;
- # Compression level 1 (fastest) to 9 (slowest).
- # online suggest
- # gzip_comp_level 6;
- gzip_comp_level 2;
-
- # Enables compression for all proxied requests.
- gzip_proxied any;
-
- # Minimum length of the response (bytes). Responses shorter than this length will not be compressed.
- # online suggest
- # gzip_min_length 10000;
- gzip_min_length 1k;
- # Enables compression for additional MIME-types.
- # online suggest
- # gzip_types text/plain text/css application/x-javascript text/xml application/xml application/xml+rss text/javascript;
- gzip_types text/plain application/x-javascript text/css application/xml;
- # Disables gzip compression for User-Agents matching the given regular expression.
- # Is this case we've disabled gzip for old versions of the IE that don't support compressed responses.
- # gzip_disable "MSIE [1-6] \.";
- # Enables or disables inserting the “Vary: Accept-Encoding” response header field
- # if the directives gzip, gzip_static, or gunzip are active.
- gzip_vary on;
- # Sets the number and size of buffers used to compress a response.
- # By default, the buffer size is equal to one memory page.
- # This is either 4K or 8K, depending on a platform.
- gzip_buffers 4 16k;
- ##
- # Client
- ##
- # Directive assigns the maximum number and size of buffers for large headers to read from client request.
- # The request line can not be bigger than the size of one buffer, if the client send a bigger header nginx returns error "Request URI too large" (414).
- # The longest header line of request also must be not more than the size of one buffer, otherwise the client get the error "Bad request" (400).
- large_client_header_buffers 4 4k;
- # Sets the max size for file uploads to 32Mb.
- client_max_body_size 20m;
- # Directive sets the headerbuffer size for the request header from client.
- client_header_buffer_size 4k;
- ##
- # open_file_cache
- ##
- # specifies the maximum number of entries in the cache.
- # When the cache overflows, the least recently used(LRU) items will be removed;
- # open_file_cache max=65536 inactive=20s;
- open_file_cache off;
- # specifies the time when need to check the validity of the information about the item in open_file_cache.
- # open_file_cache_valid 30s;
- # defines the minimum use number of a file within the time specified in the directive parameter inactive in open_file_cache.
- # If use more than the number, the file descriptor will remain open in the cache.
- # open_file_cache_min_uses 2;
- # specifies whether or not to cache errors when searching for a file.
- # open_file_cache_errors on;
- ##
- # fastcgi settings
- ##
- fastcgi_connect_timeout 300;
- fastcgi_send_timeout 300;
- fastcgi_read_timeout 300;
- fastcgi_buffer_size 64k;
- fastcgi_buffers 16 64k;
-
- ##
- # fastcgi cache settings
- ##
- add_header rt-Fastcgi-Cache $upstream_cache_status;
- fastcgi_cache_path /usr/local/openresty/nginx/cache/fastcgi_cache/ levels=2:2 keys_zone=cgi_cache:10m inactive=2h max_size=2g;
- fastcgi_cache_key "$scheme$request_method$host$request_uri";
- fastcgi_cache_use_stale error timeout invalid_header http_500;
- fastcgi_ignore_headers Cache-Control Expires Set-Cookie;
-
- ##
- # proxy cache settings
- ##
- # Sets the path and other parameters of a cache. Cache data are stored in files.
- # The file name in a cache is a result of applying the MD5 function to the cache key.
- proxy_cache_path /usr/local/openresty/nginx/cache/proxy_cache/ levels=2:2 keys_zone=cgi_proxy:10m inactive=2h max_size=2g;
-
- # Defines a directory for storing temporary files with data received from proxied servers.
- # Up to three-level subdirectory hierarchy can be used underneath the specified directory
- # proxy_temp_path path [level1 [level2 [level3]]];
- proxy_temp_path /usr/local/openresty/nginx/cache/temp_path/ 1 2;
- proxy_temp_file_write_size 128k;
- proxy_max_temp_file_size 0;
- # Defines a timeout for establishing a connection with a proxied server.
- # It should be noted that this timeout cannot usually exceed 75 seconds.
- proxy_connect_timeout 30s;
- # Sets a timeout for transmitting/reading a request to the proxied server.
- # The timeout is set only between two successive write/read operations, not for the transmission of the whole request.
- # If the proxied server does not receive anything within this time, the connection is closed.
- proxy_send_timeout 30s;
- proxy_read_timeout 30s;
- # Sets the size of the buffer size used for reading the first part of the response received from the proxied server.
- proxy_buffer_size 128k;
- # When buffering is disabled, the response is passed to a client synchronously, immediately as it is received.
- # nginx will not try to read the whole response from the proxied server.
- # The maximum size of the data that nginx can receive from the server at a time is set by the proxy_buffer_size directive.
- proxy_buffering on;
- # Sets the number and size of the buffers used for reading a response from the proxied server, for a single connection.
- # By default, the buffer size is equal to one memory page. This is either 4K or 8K, depending on a platform.
- proxy_buffers 100 128k;
- # When buffering of responses from the proxied server is enabled,
- # limits the total size of buffers that can be busy sending a response to the client while the response is not yet fully read.
- # In the meantime, the rest of the buffers can be used for reading the response and, if needed, buffering part of the response to a temporary file.
- proxy_busy_buffers_size 128k;
-
- # Load modular configuration files from the conf/vhost directory.
- include vhost/*.conf;
- include vhost/pay/development/*.conf;
- include vhost/pay/test/*.conf;
- include vhost/pay/production/*.conf;
-
- ##
- # CLUSTERS
- ##
-
- # Below the "upstream" is defining some php_backend clusters that are serving a PHP-FPM app.
- # They will handle the requets proxied by NGINX.
- # java commsunny group
-
- # upstream java_443_backend {
- # server 10.30.149.41:50443 weight=30;
- # server 10.30.148.149:50443 weight=30;
- # server 10.31.74.200:50443 weight=30;
- # }
- #
- # upstream java_8080_backend {
- # server 10.31.74.200:58080 weight=30;
- # }
- # java commsunny group
- {% if java_backend_upstream is defined %}
- {{ java_backend_upstream }}
- {% else %}
- upstream java_443_backend {
- # server 10.30.149.41:50443 weight=30; # 165
- server 10.30.148.149:50443 weight=30; # 243
- server 10.31.74.200:50443 weight=30; # 106
- }
- upstream java_8080_backend {
- # server 10.30.149.41:58080 weight=30; # 165
- server 10.30.148.149:58080 weight=30; # 243
- server 10.31.74.200:58080 weight=30; # 106
- }
- {% endif %}
- upstream opscenter_backend {
- server 10.31.74.200:8888;
- }
- upstream jenkins_backend {
- server 10.31.88.120:8080;
- }
- upstream php_online_backend_http {
- server 10.28.225.116:80;
- # server 10.25.76.207:80;
- ip_hash;
- }
- upstream php_online_backend_https {
- server 10.28.225.116:443;
- # server 10.25.76.207:443;
- ip_hash;
- }
- upstream php_test_online_backend_http{
- server 10.28.81.15:80; #
- }
- include deny.ip;
- # server {
- # listen 80 default;
- # listen <%= @ipaddress_em1 %>:80 default;
- # listen <%= @ipaddress_eth0 %>:80 default;
- # listen 182.18.47.10:80 default;
- # return 500;
- # }
- }
|