Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

splithttp使用nginx代理时GET请求会出现超时 #3856

Open
4 tasks done
sakullla opened this issue Sep 28, 2024 · 15 comments
Open
4 tasks done

splithttp使用nginx代理时GET请求会出现超时 #3856

sakullla opened this issue Sep 28, 2024 · 15 comments

Comments

@sakullla
Copy link

完整性要求

  • 我保证阅读了文档,了解所有我编写的配置文件项的含义,而不是大量堆砌看似有用的选项或默认值。
  • 我提供了完整的配置文件和日志,而不是出于自己的判断只给出截取的部分。
  • 我搜索了 issues, 没有发现已提出的类似问题。
  • 问题在 Release 最新的版本上可以成功复现

描述

使用nginx代理的时候通过查看日志发现了存在两种情况的异常

1.同一个ID的数据先处理POST,后处理GET,处理GET时会出现超时,H2,H3都会出现

[28/Sep/2024:21:26:40 +0800] "POST /spre/f7fd9cea-56a8-4472-a793-498a57efcb77/0?x_padding=000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 HTTP/3.0" 200 0 
[28/Sep/2024:21:27:40 +0800] "GET /spre/f7fd9cea-56a8-4472-a793-498a57efcb77?x_padding=0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 HTTP/3.0" 200 0 
nginx报错
2024/09/28 21:27:40 [error] 720#720: *2214 upstream timed out (110: Connection timed out) while reading upstream, client:, request: "GET /spre/f7fd9cea-56a8-4472-a793-498a57efcb77?x_padding=0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 HTTP/3.0", upstream: "http://127.0.0.1:1234/spre/f7fd9cea-56a8-4472-a793-498a57efcb77?x_padding=0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"

重现方式

持续运行xray,观看油管视频,大概五六分钟

客户端配置

{
  "log": {
    "access": "",
    "error": "",
    "loglevel": "warning"
  },
  "outbounds": [
    {
      "tag": "proxy",
      "protocol": "vless",
      "settings": {
        "vnext": [
          {
            "address": "",
            "port": 443,
            "users": [
              {
                "id": ""
              }
            ]
          }
        ]
      },
      "streamSettings": {
        "network": "splithttp",
        "security": "tls",
        "tlsSettings": {
          "allowInsecure": false,
          "alpn": [
            "h3"
          ],
          "fingerprint": "chrome"
        },
        "splithttpSettings": {
          "path": "spre",
          "maxUploadSize": 1000000,
          "maxConcurrentUploads": 10
        }
      },
      "mux": {
        "enabled": false,
        "concurrency": -1
      }
    }
  ]
}

服务端配置

nginx 配置 proxy_read_timeout 设置了30s,修改这个参数可以减少出现断流时的时间

user nginx;
worker_processes auto;
quic_bpf on;
error_log /var/log/nginx/error.log notice;
pid /var/run/nginx.pid;

events {
worker_connections 1024;
}

http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$request_uri"';
access_log /var/log/nginx/access.log main;

map $http_upgrade $connection_upgrade {
    default upgrade;
    ""      close;
}

map $remote_addr $proxy_forwarded_elem {
    ~^[0-9.]+$        "for=$remote_addr";
    ~^[0-9A-Fa-f:.]+$ "for=\"[$remote_addr]\"";
    default           "for=unknown";
}

map $http_forwarded $proxy_add_forwarded {
    "~^(,[ \\t]*)*([!#$%&'*+.^_`|~0-9A-Za-z-]+=([!#$%&'*+.^_`|~0-9A-Za-z-]+|\"([\\t \\x21\\x23-\\x5B\\x5D-\\x7E\\x80-\\xFF]|\\\\[\\t \\x21-\\x7E\\x80-\\xFF])*\"))?(;([!#$%&'*+.^_`|~0-9A-Za-z-]+=([!#$%&'*+.^_`|~0-9A-Za-z-]+|\"([\\t \\x21\\x23-\\x5B\\x5D-\\x7E\\x80-\\xFF]|\\\\[\\t \\x21-\\x7E\\x80-\\xFF])*\"))?)*([ \\t]*,([ \\t]*([!#$%&'*+.^_`|~0-9A-Za-z-]+=([!#$%&'*+.^_`|~0-9A-Za-z-]+|\"([\\t \\x21\\x23-\\x5B\\x5D-\\x7E\\x80-\\xFF]|\\\\[\\t \\x21-\\x7E\\x80-\\xFF])*\"))?(;([!#$%&'*+.^_`|~0-9A-Za-z-]+=([!#$%&'*+.^_`|~0-9A-Za-z-]+|\"([\\t \\x21\\x23-\\x5B\\x5D-\\x7E\\x80-\\xFF]|\\\\[\\t \\x21-\\x7E\\x80-\\xFF])*\"))?)*)?)*$" "$http_forwarded, $proxy_forwarded_elem";
    default "$proxy_forwarded_elem";
}

# server {
#     listen 80;
#     listen [::]:80;
#     return 301 https://$host$request_uri;
# }

server {
    listen                  443 ssl default_server;
    listen                  [::]:443 ssl default_server;

    ssl_reject_handshake    on;

    ssl_protocols           TLSv1.2 TLSv1.3;

    ssl_session_timeout     1h;
    ssl_session_cache       shared:SSL:10m;
}

server {
    listen 443 quic reuseport;
    listen 443 ssl reuseport;
    listen [::]:443 quic reuseport;
    listen [::]:443 ssl reuseport;
    http2 on;
    http3 on;
    quic_gso on;
    http3_stream_buffer_size   512k;
    server_name                exampl.com; # 填由 Nginx 加载的 SSL 证书中包含的域名,建议将域名指向服务端的 IP

    ssl_certificate            /etc/ssl/private/fullchain.cer;
    ssl_certificate_key        /etc/ssl/private/private.key;

    ssl_protocols              TLSv1.2 TLSv1.3;
    ssl_ciphers                ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
    ssl_prefer_server_ciphers  on;

    ssl_stapling               on;
    ssl_stapling_verify        on;
    resolver                   1.1.1.1 valid=60s;
    resolver_timeout           2s;

    client_header_timeout      1h;
    keepalive_timeout          30m;
    client_header_buffer_size  8k;

    location /spre {
        proxy_pass                          http://127.0.0.1:1234;
        proxy_http_version                  1.1;
        proxy_set_header X-Forwarded-For    $proxy_add_x_forwarded_for;
        proxy_read_timeout                  30s;
        proxy_redirect                      off;
    }
}

}


xray服务器端配置

{
    "log": {
        "loglevel": "warning"
    },
    "inbounds": [
        {
            "listen": "127.0.0.1",
            "port": "1234",
            "protocol": "vless",
            "settings": {
                "clients": [
                    {
                        "id": ""
                    }
                ],
                "decryption": "none"
            },
            "streamSettings": {
                "network": "splithttp",
                "splithttpSettings": {
                    "path": "/spre"
                },
                "sockopt": {
                    "tcpFastOpen": true,
                    "tcpMptcp": true,
                    "tcpNoDelay": true
                }
            },
            "sniffing": {
                "enabled": true,
                "destOverride": [
                    "http",
                    "tls",
                    "quic"
                ]
            }
        }
    ]
}

客户端日志

使用h3时会出现断流,h2未发现


2024/09/28 22:20:59 [Warning] [1204054532] app/proxyman/inbound: connection ends > proxy/http: connection ends > proxy/http: failed to write response > write tcp 127.0.0.1:10809->127.0.0.1:55692: wsasend: An established connection was aborted by the software in your host machine.
2024/09/28 22:21:00 from 127.0.0.1:55695 accepted //signaler-pa.youtube.com:443 [http -> proxy]
2024/09/28 22:21:00 [Warning] [966074724] app/proxyman/inbound: connection ends > proxy/http: connection ends > proxy/http: failed to write response > write tcp 127.0.0.1:10809->127.0.0.1:55693: wsasend: An established connection was aborted by the software in your host machine.

服务端日志

服务器端没有异常日志


@RPRX
Copy link
Member

RPRX commented Sep 30, 2024

试一下 #3819

@sakullla
Copy link
Author

image
客户端服务器端使用v24.9.30
什么都不干,启动大概1分半左右之后还是会一直出现GET方法超时的问题

@xqzr
Copy link
Contributor

xqzr commented Sep 30, 2024

@sakullla
Copy link
Author

sakullla commented Sep 30, 2024

https://github.com/XTLS/Xray-examples/blob/ca705fbd0ba4e302f60ea12a3aeff2c498c8d236/VLESS-WSS-Nginx/nginx.conf#L27

这个GET方法不通的话当消耗完所有连接后,h3/h1就断流了,然后等待超时,配置5小时的话,那就要等待5小时才能恢复,这个问题不仅仅存在于nginx

@RPRX
Copy link
Member

RPRX commented Sep 30, 2024

感觉可能是因为下行不活跃导致的 GET 超时,但这个超时应该是正常的吧

配置5小时的话,那就要等待5小时才能恢复

听起来怪怪的

@sakullla
Copy link
Author

感觉可能是因为下行不活跃导致的 GET 超时,但这个超时应该是正常的吧

这是nginx的access日志
image
这是nginx的error日志
image
nginx配置是这样的
location /spre {
proxy_pass http://127.0.0.1:1234;
proxy_http_version 1.1;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_read_timeout 30s;
proxy_redirect off;
proxy_buffer_size 16k;
proxy_buffers 4 32k;
proxy_busy_buffers_size 64k;
}

从结果来看应该是POST跟GET同时请求的,然后GET在30s之后超时才打印的日志

@sakullla
Copy link
Author

sakullla commented Sep 30, 2024

听起来怪怪的

这个断流的时候有测试过修改proxy_read_timeout 的值会影响恢复的时间,值越小恢复的越快

@RPRX
Copy link
Member

RPRX commented Oct 3, 2024

听起来怪怪的

这个断流的时候有测试过修改proxy_read_timeout 的值会影响恢复的时间,值越小恢复的越快

从字面意思上理解,这不应该是多久不活跃超时吗,咋变成断流冷却时间了

有一种可能是后方的 Xray 服务端和目标断开了连接,不再发数据给 Nginx 了,但没关闭到 Nginx 的 TCP,导致 Nginx 没关闭到 Xray 客户端的连接,Xray 客户端还以为连接正常?不过这个前置条件是怎么达成的,@mmmray 看下代码会不会这样

@RPRX
Copy link
Member

RPRX commented Oct 3, 2024

这倒是提醒了一件事,POST 到 Xray 服务端时如果出问题了,服务端应当把对应的 GET 也关掉 @mmmray

至于 CloseWrite CloseRead 什么的就先不考虑了

@RPRX
Copy link
Member

RPRX commented Oct 3, 2024

To 群里:h2 还没删啊,就加了个 h3,客户端 ALPN 选单独的 h3 才会炸

@RPRX
Copy link
Member

RPRX commented Oct 3, 2024

不过我不确定 @yuhan6665 写的是不是默认监听 UDP 端口了,还是说判断服务端 ALPN 的值

@RPRX
Copy link
Member

RPRX commented Oct 3, 2024

To Var.:根据你的描述可能是服务端 UDP 端口冲突,你检查下服务端 ALPN 里有没有写 h3,测试下是不是写了 h3 才会监听 UDP

@mmmray
Copy link
Collaborator

mmmray commented Oct 7, 2024

These kinds of bugs where xray would not close the transport properly if the wrapped VLESS stream has been closed (or the tunneled connection closed), they should've been fixed with #3710

I also have a similar setup like in xray-examples and don't run into this kind of issue. I wonder if there is a difference between nginx vs haproxy vs caddy.

@sakullla
Copy link
Author

sakullla commented Oct 7, 2024

caddy

服务器端使用Xray-core v24.9.30版本

我参考xray的这个配置搭建了caddy,问题依旧存在 https://github.com/XTLS/Xray-examples/tree/main/VLESS-TLS-SplitHTTP-CaddyNginx
这是我的caddy配置
example.com {
tls /etc/ssl/private/fullchain.cer /etc/ssl/private/private.key
handle /split/* {
reverse_proxy http://127.0.0.1:1234
}
}

journalctl -fu caddy 查看caddy日志也是一直在输出类似下面的超时信息:
Oct 07 20:47:01 caddy[858257]: {"level":"error","ts":1728305221.0083828,"logger":"http.handlers.reverse_proxy","msg":"aborting with incomplete response","upstream":"127.0.0.1:1234","duration":0.002180221,"request":{"remote_ip":"...","remote_port":"32221","client_ip":"...","proto":"HTTP/2.0","method":"GET","host":"example.com:443","uri":"/split/847fc030-7330-46f4-aee4-7733d7321c05?x_padding=00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","headers":{"X-Forwarded-Host":["example.com:443"],"Accept-Encoding":["gzip"],"User-Agent":["Go-http-client/2.0"],"X-Forwarded-For":["..."],"X-Forwarded-Proto":["https"]},"tls":{"resumed":false,"version":772,"cipher_suite":4865,"proto":"h2","server_name":"example.com"}},"error":"reading: context canceled"}
Oct 07 20:47:07 caddy[858257]: {"level":"error","ts":1728305227.806477,"logger":"http.handlers.reverse_proxy","msg":"aborting with incomplete response","upstream":"127.0.0.1:1234","duration":0.002216172,"request":{"remote_ip":"...","remote_port":"32221","client_ip":"...","proto":"HTTP/2.0","method":"GET","host":"example.com:443","uri":"/split/08a5c199-9643-4baa-b22e-6759b98b32f7?x_padding=00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","headers":{"User-Agent":["Go-http-client/2.0"],"Accept-Encoding":["gzip"],"X-Forwarded-For":["..."],"X-Forwarded-Proto":["https"],"X-Forwarded-Host":["example.com:443"]},"tls":{"resumed":false,"version":772,"cipher_suite":4865,"proto":"h2","server_name":"example.com"}},"error":"reading: context canceled"}

@DE009
Copy link

DE009 commented Oct 14, 2024

我也碰到类似的问题。nginx1.27.1,xray24.9.30
我在nginx中添加了proxy_read_timeout 120s;后,nginx 的error.log中就不再出现upstream timed out (110: Connection timed out) while reading upstream, 问题了。
以下是我的nginx的配置

location /proxy {
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_pass http://127.0.0.1:12578;

        proxy_redirect off;
        proxy_set_header Connection "";
        proxy_http_version 1.1;
        proxy_read_timeout 120s;
    }

但是我的客户端中会出现 protocol error: received DATA after END_STREAM这样的问题。不确定是否和这个问题相关?
以下的客户端配置

  "outbounds": [
    {
      "tag": "proxy",
      "protocol": "vless",
      "settings": {
        "vnext": [
          {
            "address": "",
            "port": 443,
          }
        ]
      },
      "streamSettings": {
        "network": "splithttp",
        "security": "tls",
        "tlsSettings": {
          "allowInsecure": false,
          "serverName": "",
          "alpn": [
            "h3",
            "h2",
            "http/1.1"
          ],
          "fingerprint": "random"
        },
        "splithttpSettings": {
          "path": "/proxy",
          "host": “",
          "maxUploadSize": 1000000,
          "maxConcurrentUploads": 10
        }
      },
      "mux": {
        "enabled": false,
        "concurrency": -1
      }
    },

以下是服务端配置:

    "inbounds": [
        {
            "port": 12578,
            "protocol": "vless",
            "settings": {
                "clients": [
                    {
                        "id": "", 
                        "level": 0
                    }
                ],
                "decryption": "none",
                "fallbacks": [
                    {
                        "dest": 443, 
                        "xver": 1
                    }
                ]
            },
            "streamSettings": {
                "network": "splithttp",
                "security": "none",
                "splithttpSettings": {
                    "path": "/proxy", 
                    "host": ""
                }
            }
        }
    ],

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

5 participants