Comments (10)
apisix: # universal configurations
node_listen: 9080 # APISIX listening port
enable_heartbeat: true
enable_admin: true
enable_admin_cors: true
enable_debug: false
enable_dev_mode: false # Sets nginx worker_processes to 1 if set to true
enable_reuseport: true # Enable nginx SO_REUSEPORT switch if set to true.
enable_ipv6: true # Enable nginx IPv6 resolver
enable_server_tokens: true # Whether the APISIX version number should be shown in Server header
enable_control: true
control:
ip: "0.0.0.0"
port: 9999
# proxy_protocol: # Proxy Protocol configuration
# listen_http_port: 9181 # The port with proxy protocol for http, it differs from node_listen and admin_listen.
# # This port can only receive http request with proxy protocol, but node_listen & admin_listen
# # can only receive http request. If you enable proxy protocol, you must use this port to
# # receive http request with proxy protocol
# listen_https_port: 9182 # The port with proxy protocol for https
# enable_tcp_pp: true # Enable the proxy protocol for tcp proxy, it works for stream_proxy.tcp option
# enable_tcp_pp_to_upstream: true # Enables the proxy protocol to the upstream server
proxy_cache: # Proxy Caching configuration
cache_ttl: 10s # The default caching time if the upstream does not specify the cache time
zones: # The parameters of a cache
- name: disk_cache_one # The name of the cache, administrator can be specify
# which cache to use by name in the admin api
memory_size: 50m # The size of shared memory, it's used to store the cache index
disk_size: 1G # The size of disk, it's used to store the cache data
disk_path: "/tmp/disk_cache_one" # The path to store the cache data
cache_levels: "1:2" # The hierarchy levels of a cache
# - name: disk_cache_two
# memory_size: 50m
# disk_size: 1G
# disk_path: "/tmp/disk_cache_two"
# cache_levels: "1:2"
router:
http: radixtree_host_uri # radixtree_uri: match route by uri(base on radixtree)
# radixtree_host_uri: match route by host + uri(base on radixtree)
# radixtree_uri_with_parameter: match route by uri with parameters
ssl: 'radixtree_sni' # radixtree_sni: match route by SNI(base on radixtree)
# dns_resolver:
#
# - 127.0.0.1
#
# - 172.20.0.10
#
# - 114.114.114.114
#
# - 223.5.5.5
#
# - 1.1.1.1
#
# - 8.8.8.8
#
dns_resolver_valid: 30
data_encryption:
enable: true
keyring:
- ABCDEDEDklksl@!#$#@sdklfj!@#$Kl
resolver_timeout: 5
ssl:
enable: true
listen:
- port: 9993
enable_http2: true
ssl_protocols: "TLSv1.2 TLSv1.3"
ssl_ciphers: "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA256:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA"
nginx_config: # config for render the template to genarate nginx.conf
error_log: "/dev/stderr"
error_log_level: "info" # warn,error
worker_processes: "auto"
enable_cpu_affinity: true
worker_rlimit_nofile: 20480 # the number of files a worker process can open, should be larger than worker_connections
event:
worker_connections: 10620
http:
enable_access_log: true
access_log: "/dev/stdout"
access_log_format: '{"time_local": "$time_iso8601", "remote_addr": "$proxy_protocol_addr", "x_forward_for": "$proxy_add_x_forwarded_for", "x_request_id": "$http_x_request_id", "scheme":"$scheme", "server_port": "$server_port" ,"request":"$request","remote_user": "$remote_user", "bytes_sent": $bytes_sent, "request_time": $request_time, "status": $status, "vhost": "$host", "request_proto": "$server_protocol", "request_body":"$request_body","path": "$uri", "request_query": "$args", "request_length": $request_length, "duration": $request_time,"method": "$request_method", "http_referrer": "$http_referer", "upstream_response_length": "$upstream_response_length", "upstream_response_time": "$upstream_response_time", "upstream_status": "$upstream_status","http_user_agent": "$http_user_agent" , "http_host": "$http_host"}'
access_log_format_escape: default
keepalive_timeout: 60s # timeout during which a keep-alive client connection will stay open on the server side.
client_header_timeout: 60s # timeout for reading client request header, then 408 (Request Time-out) error is returned to the client
client_body_timeout: 60s # timeout for reading client request body, then 408 (Request Time-out) error is returned to the client
send_timeout: 10s # timeout for transmitting a response to the client.then the connection is closed
underscores_in_headers: "on" # default enables the use of underscores in client request header fields
real_ip_header: "X-Real-IP" # http://nginx.org/en/docs/http/ngx_http_realip_module.html#real_ip_header
real_ip_from: # http://nginx.org/en/docs/http/ngx_http_realip_module.html#set_real_ip_from
- 127.0.0.1
- 'unix:'
from apisix.
Add the header is tricky part , but here is the fix for partial response.
from apisix.
One can assign it to me. :)
from apisix.
Add the header is tricky part , but here is the fix for partial response.
the transfer-encoding header maybe this reason why curl raise a error, could you try curl -i https://cluster.local/reviews/ -H 'Accept-Encoding: br,gzip' --http1.1
and check the headers? @SilentEntity
from apisix.
oh, HTTP2 is getting used, that's why Transfer-Encoding
header [chunked
] is not present.
Ref: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Transfer-Encoding
And looks like in HTTP2 it's not mandatory to send Content-Length
header in the response, it uses endStream
flag to signal the end of the content.
Ref: https://datatracker.ietf.org/doc/html/rfc7540#section-8.1.2.6.
But the fix for the partial response is required. PR: #11087 solves that.
from apisix.
Updated the issue and PR, please check once. @yuweizzz
from apisix.
yes, the fix for the partial response is required, but now I am thinking if we need to clear somes headers from upstream when we use http2. so that we can avoid http2 protocol error.
from apisix.
@yuweizzz don't you think the upstream should handle these protocol version nuances?
from apisix.
Hi @yuweizzz, the body-related headers from the upstream are getting purged in header_filter
ctx.brotli_matched = true
ctx.compressor = compressor
core.response.clear_header_as_body_modified() <--- Here
core.response.add_header("Content-Encoding", "br")
Body-related headers are getting cleared as it's a compression plugin, and will change/compress the response body.
from apisix.
I had some test in local env, this part will handle by openresty correctly, core.response.clear_header_as_body_modified
will clear content-length
header, and the transfer-encoding
header will control by openresty.
yes, the fix for the partial response is required, but now I am thinking if we need to clear somes headers from upstream when we use http2. so that we can avoid http2 protocol error.
from apisix.
Related Issues (20)
- ip_restriction plugin is not whitelisting the individual ips HOT 11
- docs: Is it enable here rather than disable about admin_key_required? HOT 6
- help request: 请教如何配置可以进入我自己的server HOT 2
- Http check route to return default response HOT 2
- feat: Using authz-keycloak plugin, I want to add claims to an authz request HOT 5
- help request: 源码安装
- bug: aws-lambda plugin with IAM auth fails with URL-encoded query parameters HOT 4
- Is it possible to get the matched route_id in a custom wasm plugin? HOT 2
- feat: apisix `enable_encrypt_fields` should support plugin fields that are deeply situated, array elements, etc. HOT 1
- bug: 编排模式配置的插件不生效,且路由列表不显示插件名 HOT 1
- help request: HOT 2
- help request: Connect to upstream through proxy server (http_proxy) HOT 2
- APISIX to handle all handshake between my client application and the 3rd party service, including getting token, refreshing token, etc. HOT 2
- help request: how to set "resolver xxx(dns server) valid=10s;" for apisix upstreams HOT 3
- help request: metric occurs even when there is no traffic. HOT 2
- help request: Is there benchmark on large-scale configuration loading scenarios? HOT 1
- bug: 最新版本中支持的指标过期特性似乎会导致指标输出时会丢失部分已过期并重新添加的列表。 HOT 3
- help request: ApisixTls configuration for SSL enabled Upstream HOT 2
- feat: SUSE Linux Enterprise Server 15 support HOT 6
- help request: Use APISIX as HTTP proxy HOT 1
Recommend Projects
-
React
A declarative, efficient, and flexible JavaScript library for building user interfaces.
-
Vue.js
🖖 Vue.js is a progressive, incrementally-adoptable JavaScript framework for building UI on the web.
-
Typescript
TypeScript is a superset of JavaScript that compiles to clean JavaScript output.
-
TensorFlow
An Open Source Machine Learning Framework for Everyone
-
Django
The Web framework for perfectionists with deadlines.
-
Laravel
A PHP framework for web artisans
-
D3
Bring data to life with SVG, Canvas and HTML. 📊📈🎉
-
Recommend Topics
-
javascript
JavaScript (JS) is a lightweight interpreted programming language with first-class functions.
-
web
Some thing interesting about web. New door for the world.
-
server
A server is a program made to process requests and deliver data to clients.
-
Machine learning
Machine learning is a way of modeling and interpreting data that allows a piece of software to respond intelligently.
-
Visualization
Some thing interesting about visualization, use data art
-
Game
Some thing interesting about game, make everyone happy.
Recommend Org
-
Facebook
We are working to build community through open source technology. NB: members must have two-factor auth.
-
Microsoft
Open source projects and samples from Microsoft.
-
Google
Google ❤️ Open Source for everyone.
-
Alibaba
Alibaba Open Source for everyone
-
D3
Data-Driven Documents codes.
-
Tencent
China tencent open source team.
from apisix.