user nginx; worker_processes auto; error_log /var/log/nginx/error.log warn; pid /var/run/nginx.pid; # Load modules include /etc/nginx/modules-enabled/*.conf; events { worker_connections 1024; multi_accept on; } http { # MIME include /etc/nginx/mime.types; default_type application/octet-stream; charset utf-8; sendfile on; tcp_nopush on; tcp_nodelay on; server_tokens off; log_format main '$remote_addr - $remote_user [$time_local] ' '"$request" $status $bytes_sent ' '"$http_referer" "$http_user_agent" ' '"$gzip_ratio"'; log_format cloudflare '$remote_addr - $remote_user [$time_local] "$request" ' '$status $body_bytes_sent "$http_referer" ' '"$http_user_agent" $http_cf_ray $http_cf_connecting_ip ' '$http_x_forwarded_for $http_x_forwarded_proto ' '$http_true_client_ip $http_cf_ipcountry ' '$http_cf_visitor $http_cdn_loop'; log_format json_combined escape=json '{' '"method":"$request_method",' '"scheme":"$scheme",' '"domain":"$host",' '"uri":"$request_uri",' '"query_string":"$query_string",' '"referer":"$http_referer",' '"content_type":"$sent_http_content_type",' '"status": $status,' '"bytes_sent":$body_bytes_sent,' '"request_time":$request_time,' '"user_agent":"$http_user_agent",' '"cache":"$upstream_cache_status",' '"upstream_time": "$upstream_response_time",' '"timestamp":"$time_iso8601",' '"ip":"$http_x_forwarded_for"' '}'; # log_format VCOMBINED '$host:$server_port ' # '$remote_addr $remote_user [$time_local] ' # '"$request" $status $body_bytes_sent ' # '"$http_referer" "$http_user_agent"'; # track every client request and are vital for traffic analysis and performance monitoring. access_log /var/log/nginx/access.log json_combined; # capture internal server issues and help with troubleshooting. error_log /var/log/nginx/error.log debug; # SSL # Mozilla Intermediate configuration ssl_protocols TLSv1.2 TLSv1.3; ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384; ssl_prefer_server_ciphers on; # enable session resumption to improve https performance ssl_session_timeout 10m; ssl_session_cache shared:SSL:10m; ssl_session_tickets off; # Diffie-Hellman parameter for DHE ciphersuites # ssl_dhparam /etc/nginx/dhparam.pem; # OCSP Stapling # ssl_stapling on; # ssl_stapling_verify on; resolver 127.0.0.11 1.1.1.1 1.0.0.1 8.8.8.8 8.8.4.4 208.67.222.222 208.67.220.220 valid=60s; resolver_timeout 2s; # CONTROL RESOURCES AND LIMITS / CONTROLLING BUFFER OVERFLOW ATTACKS # specify the client request body buffer size; default 8k/16k client_body_buffer_size 1k; # sets the headerbuffer size for the request header from client. # 1K sufficient for most requests. # Increase this if you have a custom header or a large cookie sent from the client (e.g., wap client). client_header_buffer_size 1k; # assigns the maximum accepted body size of client request, indicated by the line Content-Length in the header of request. # If size is greater the given one, then the client gets the error “Request Entity Too Large” (413). # Increase this when you are getting file uploads via the POST method. e.g harbor image push, minio uploads client_max_body_size 1k; # assigns the maximum number and size of buffers for large headers to read from client request. # By default the size of one buffer is equal to the size of page, depending on platform this either 4K or 8K, # if at the end of working request connection converts to state keep-alive, # then these buffers are freed. 2x1k will accept 2kB data URI. # This will also help combat bad bots and DoS attacks. large_client_header_buffers 4 4k; client_header_timeout 60; client_body_timeout 60; send_timeout 60; # PROXY BUFFERING # NGINX stores the response from a server in internal buffers as it comes in, # and doesn't start sending data to the client until the entire response is buffered # With proxy_buffering disabled, data received from the server is immediately relayed by NGINX, # allowing for minimum Time To First Byte (TTFB). # TLDR - Buffering is needed to ensure that the upstream server can be set free after delivering the response to NGINX, # and NGINX will proceed to deliver the response to the slow client. proxy_buffering off; # for a single server setup (SSL termination of Varnish), where no caching is done in NGINX itself # Defines the amount of memory that NGINX will allocate for each request to the proxied server. # This small chunk of memory will be used for reading and storing the tiny fraction of response – the HTTP headers. # tuning solves the upstream sent too big header while reading response header from upstream error. proxy_buffer_size 8k; # defaults: 4k/8k, should be enough for most PHP websites, or adjust as above # size of buffers, in kilobytes, which can be used for delivering the response to clients # while it was not fully read from the upstream server. proxy_busy_buffers_size 16k; # essentially, proxy_buffer_size + 2 small buffers of 4k # The rule of thumb with this setting is that while we make use of buffering, # it is best that the complete response from upstream can be held in memory, to avoid disk I/O. proxy_buffers 8 4k; # Default: 8 4k|8k; should be enough for most PHP websites, adjust as above to get an accurate value # Time to establish TCP connection # Increase only if your app has slow endpoints proxy_connect_timeout 60s; # Time between successive writes to upstream proxy_send_timeout 60; # Time between successive reads from upstream proxy_read_timeout 60; # Gzip Settings gzip on; gzip_vary on; gzip_proxied any; gzip_comp_level 6; gzip_types text/plain text/css text/xml text/javascript application/json application/javascript application/xml+rss application/rss+xml font/truetype font/opentype application/vnd.ms-fontobject image/svg+xml; # Include all server configurations # include /etc/nginx/conf.d/*.conf; include /etc/nginx/sites-enabled/*; }