nginx.conf 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316
  1. # User and group that workers will use.
  2. user nginx nginx;
  3. # Number of NGINX workers. Usually it's set, at maximum, 1 worker per CPU core.
  4. # If the server has 2+ CPU cores but it still does not receive many requests,
  5. # it's a good idea to keep the value to 1 so it will avoid creating idle processes.
  6. worker_processes auto;
  7. worker_rlimit_nofile 65535;
  8. # NGINX's master process id.
  9. pid /run/nginx.pid;
  10. events {
  11. # How many client connections each worker can maintain at a time.
  12. worker_connections 20480;
  13. # Using epoll is less CPU intensive when you have to serve thousands of connections, because
  14. # instead of scanning all connections to see which file descriptor is ready to read/write,
  15. # it will only scan those which are active.
  16. # p.s.: It's only available in Linux kernels later than 2.6.
  17. use epoll;
  18. # multi_accept tries to accept() as many connections as possible after nginx gets notification about a new connection.
  19. multi_accept on;
  20. }
  21. # HTTP config module
  22. http {
  23. # default charset settings
  24. charset utf-8,gbk;
  25. # Note that when SSI is enabled the Last-Modified and Content-Length headers are not sent.
  26. # default off
  27. # ssi off;
  28. ##
  29. # MIME-TYPE
  30. ##
  31. # You can include other config files using the "include" directive.
  32. # Here we are including the mime-types, for example.
  33. include mime.types;
  34. # The "application/octet-stream" means that the response is a binary file.
  35. # If this directive is not defined then response will be set to "text/plain" by default.
  36. default_type application/octet-stream;
  37. ##
  38. # LOG
  39. ##
  40. log_escape_non_ascii off;
  41. log_format main '$remote_addr - $remote_user [$time_iso8601] "$request" '
  42. '$status $body_bytes_sent "$http_referer" '
  43. '"$http_user_agent" "$http_x_forwarded_for" "$request_time" "$upstream_addr" "$upstream_response_time" "$request_body" "$host"';
  44. log_format real_ip '$remote_addr - $remote_user [$time_local] $request_time "$request" '
  45. '$status $body_bytes_sent "$http_referer" '
  46. '$http_user_agent $http_x_forwarded_for';
  47. log_format post_log '$remote_addr - $remote_user [$time_local] $request_time "$request" '
  48. '$status $body_bytes_sent "$request_body" "$http_referer" '
  49. '"$http_user_agent" "$http_x_forwarded_for" ';
  50. # Access log: path and type.
  51. access_log /usr/local/openresty/nginx/logs/access.log;
  52. # Error log: path and type.
  53. error_log /usr/local/openresty/nginx/logs/error.log;
  54. ##
  55. # TCP
  56. ##
  57. # Optimizes data transfer copying data between one file descriptor and another
  58. # instead of reading and copying data to/from user space.
  59. sendfile on;
  60. # Causes NGINX to attempt to send its HTTP response head in one packet,
  61. # instead of using partial frames. This is useful for prepending headers before calling sendfile,
  62. # or for throughput optimization.
  63. tcp_nopush on;
  64. # Disables the Nagle algorithm.
  65. # It's useful for sending frequent small bursts of data in real time.
  66. # tcp_nodelay off;
  67. tcp_nodelay on;
  68. # Timeout during which a keep-alive client connection will stay open to serve
  69. # all the requested files from the server side.
  70. keepalive_timeout 30s;
  71. ##
  72. # GZIP
  73. ##
  74. # In production you MUST set gzip to "on" in order to save bandwidth. Web browsers
  75. # which handle compressed files (all recent ones do) will get a very smaller version
  76. # of the server response.
  77. gzip on;
  78. # Enables compression for a given HTTP request version.
  79. # This module makes it possible to transfer requests to another server.
  80. # Nginx talks HTTP/1.1 to the browser and HTTP/1.0 to the backend server.
  81. gzip_http_version 1.0;
  82. # Compression level 1 (fastest) to 9 (slowest).
  83. # online suggest
  84. # gzip_comp_level 6;
  85. gzip_comp_level 2;
  86. # Enables compression for all proxied requests.
  87. gzip_proxied any;
  88. # Minimum length of the response (bytes). Responses shorter than this length will not be compressed.
  89. # online suggest
  90. # gzip_min_length 10000;
  91. gzip_min_length 1k;
  92. # Enables compression for additional MIME-types.
  93. # online suggest
  94. # gzip_types text/plain text/css application/x-javascript text/xml application/xml application/xml+rss text/javascript;
  95. gzip_types text/plain application/x-javascript text/css application/xml;
  96. # Disables gzip compression for User-Agents matching the given regular expression.
  97. # Is this case we've disabled gzip for old versions of the IE that don't support compressed responses.
  98. # gzip_disable "MSIE [1-6] \.";
  99. # Enables or disables inserting the “Vary: Accept-Encoding” response header field
  100. # if the directives gzip, gzip_static, or gunzip are active.
  101. gzip_vary on;
  102. # Sets the number and size of buffers used to compress a response.
  103. # By default, the buffer size is equal to one memory page.
  104. # This is either 4K or 8K, depending on a platform.
  105. gzip_buffers 4 16k;
  106. ##
  107. # Client
  108. ##
  109. # Directive assigns the maximum number and size of buffers for large headers to read from client request.
  110. # The request line can not be bigger than the size of one buffer, if the client send a bigger header nginx returns error "Request URI too large" (414).
  111. # The longest header line of request also must be not more than the size of one buffer, otherwise the client get the error "Bad request" (400).
  112. large_client_header_buffers 4 4k;
  113. # Sets the max size for file uploads to 32Mb.
  114. client_max_body_size 20m;
  115. # Directive sets the headerbuffer size for the request header from client.
  116. client_header_buffer_size 4k;
  117. ##
  118. # open_file_cache
  119. ##
  120. # specifies the maximum number of entries in the cache.
  121. # When the cache overflows, the least recently used(LRU) items will be removed;
  122. # open_file_cache max=65536 inactive=20s;
  123. open_file_cache off;
  124. # specifies the time when need to check the validity of the information about the item in open_file_cache.
  125. # open_file_cache_valid 30s;
  126. # defines the minimum use number of a file within the time specified in the directive parameter inactive in open_file_cache.
  127. # If use more than the number, the file descriptor will remain open in the cache.
  128. # open_file_cache_min_uses 2;
  129. # specifies whether or not to cache errors when searching for a file.
  130. # open_file_cache_errors on;
  131. ##
  132. # fastcgi settings
  133. ##
  134. fastcgi_connect_timeout 300;
  135. fastcgi_send_timeout 300;
  136. fastcgi_read_timeout 300;
  137. fastcgi_buffer_size 64k;
  138. fastcgi_buffers 16 64k;
  139. ##
  140. # fastcgi cache settings
  141. ##
  142. add_header rt-Fastcgi-Cache $upstream_cache_status;
  143. fastcgi_cache_path /usr/local/openresty/nginx/cache/fastcgi_cache/ levels=2:2 keys_zone=cgi_cache:10m inactive=2h max_size=2g;
  144. fastcgi_cache_key "$scheme$request_method$host$request_uri";
  145. fastcgi_cache_use_stale error timeout invalid_header http_500;
  146. fastcgi_ignore_headers Cache-Control Expires Set-Cookie;
  147. ##
  148. # proxy cache settings
  149. ##
  150. # Sets the path and other parameters of a cache. Cache data are stored in files.
  151. # The file name in a cache is a result of applying the MD5 function to the cache key.
  152. proxy_cache_path /usr/local/openresty/nginx/cache/proxy_cache/ levels=2:2 keys_zone=cgi_proxy:10m inactive=2h max_size=2g;
  153. # Defines a directory for storing temporary files with data received from proxied servers.
  154. # Up to three-level subdirectory hierarchy can be used underneath the specified directory
  155. # proxy_temp_path path [level1 [level2 [level3]]];
  156. proxy_temp_path /usr/local/openresty/nginx/cache/temp_path/ 1 2;
  157. proxy_temp_file_write_size 128k;
  158. proxy_max_temp_file_size 0;
  159. # Defines a timeout for establishing a connection with a proxied server.
  160. # It should be noted that this timeout cannot usually exceed 75 seconds.
  161. proxy_connect_timeout 30s;
  162. # Sets a timeout for transmitting/reading a request to the proxied server.
  163. # The timeout is set only between two successive write/read operations, not for the transmission of the whole request.
  164. # If the proxied server does not receive anything within this time, the connection is closed.
  165. proxy_send_timeout 30s;
  166. proxy_read_timeout 30s;
  167. # Sets the size of the buffer size used for reading the first part of the response received from the proxied server.
  168. proxy_buffer_size 128k;
  169. # When buffering is disabled, the response is passed to a client synchronously, immediately as it is received.
  170. # nginx will not try to read the whole response from the proxied server.
  171. # The maximum size of the data that nginx can receive from the server at a time is set by the proxy_buffer_size directive.
  172. proxy_buffering on;
  173. # Sets the number and size of the buffers used for reading a response from the proxied server, for a single connection.
  174. # By default, the buffer size is equal to one memory page. This is either 4K or 8K, depending on a platform.
  175. proxy_buffers 100 128k;
  176. # When buffering of responses from the proxied server is enabled,
  177. # limits the total size of buffers that can be busy sending a response to the client while the response is not yet fully read.
  178. # In the meantime, the rest of the buffers can be used for reading the response and, if needed, buffering part of the response to a temporary file.
  179. proxy_busy_buffers_size 128k;
  180. # Load modular configuration files from the conf/vhost directory.
  181. include vhost/*.conf;
  182. include vhost/pay/development/*.conf;
  183. include vhost/pay/test/*.conf;
  184. include vhost/pay/production/*.conf;
  185. ##
  186. # CLUSTERS
  187. ##
  188. # Below the "upstream" is defining some php_backend clusters that are serving a PHP-FPM app.
  189. # They will handle the requets proxied by NGINX.
  190. # java commsunny group
  191. # upstream java_443_backend {
  192. # server 10.30.149.41:50443 weight=30;
  193. # server 10.30.148.149:50443 weight=30;
  194. # server 10.31.74.200:50443 weight=30;
  195. # }
  196. #
  197. # upstream java_8080_backend {
  198. # server 10.31.74.200:58080 weight=30;
  199. # }
  200. # java commsunny group
  201. {% if java_backend_upstream is defined %}
  202. {{ java_backend_upstream }}
  203. {% else %}
  204. upstream java_443_backend {
  205. # server 10.30.149.41:50443 weight=30; # 165
  206. server 10.30.148.149:50443 weight=30; # 243
  207. server 10.31.74.200:50443 weight=30; # 106
  208. }
  209. upstream java_8080_backend {
  210. # server 10.30.149.41:58080 weight=30; # 165
  211. server 10.30.148.149:58080 weight=30; # 243
  212. server 10.31.74.200:58080 weight=30; # 106
  213. }
  214. {% endif %}
  215. upstream opscenter_backend {
  216. server 10.31.74.200:8888;
  217. }
  218. upstream jenkins_backend {
  219. server 10.31.88.120:8080;
  220. }
  221. upstream php_online_backend_http {
  222. server 10.28.225.116:80;
  223. # server 10.25.76.207:80;
  224. ip_hash;
  225. }
  226. upstream php_online_backend_https {
  227. server 10.28.225.116:443;
  228. # server 10.25.76.207:443;
  229. ip_hash;
  230. }
  231. upstream php_test_online_backend_http{
  232. server 10.28.81.15:80; #
  233. }
  234. include deny.ip;
  235. # server {
  236. # listen 80 default;
  237. # listen <%= @ipaddress_em1 %>:80 default;
  238. # listen <%= @ipaddress_eth0 %>:80 default;
  239. # listen 182.18.47.10:80 default;
  240. # return 500;
  241. # }
  242. }