最大化从haproxy到jetty应用程序服务器的tcp连接速率

时间:2015-08-10 04:00:41

标签: linux nginx tcp jetty haproxy

我正在使用haproxy前端与nginx在jetty应用服务器上进行负载均衡,所有这些都运行在ubuntu-14-04-x64上。

Nginx和haproxy共享0.5G 1CPU VM。每个jetty服务器都运行在4G 2CPU VM上。

Nginx前端haproxy,配置如下:

user myc;
worker_processes 1;
pid /run/nginx.pid;

events {
    worker_connections 65536;
    multi_accept on;
    use epoll;
}

http {
    include mime.types;
    types_hash_max_size 2048;
    default_type application/octet-stream;
    proxy_cache_path /home/myc/cache levels=1:2 keys_zone=one:10m inactive=7d;
    proxy_http_version 1.1;
    proxy_set_header Connection "";

    upstream loadbalancer {
        server unix:/tmp/haproxy.sock;
        keepalive 8192;
    }

    server {
        listen 80 backlog=16384;
        server_name example.org;
        access_log /var/log/nginx/access.log;
        error_log /var/log/nginx/error.log;

        gzip on;
        gzip_types text/plain application/javascript application/x-javascript text/javascript text/xml text/css application/json;
        gzip_vary on;
        gzip_min_length 10240;

        open_file_cache max=2000 inactive=20s;
        open_file_cache_valid 60s;
        open_file_cache_min_uses 5;
        open_file_cache_errors off;

        sendfile on;

        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;

        error_page 502 /502.html;
        location /502.html {
            root /home/myc/html;
        }

        proxy_cache one;

        location / {
            proxy_pass http://loadbalancer/;
            proxy_connect_timeout 60s;
            proxy_read_timeout 60s;

            add_header Cache-Control private; # Don't cache any of this publicly, because JSESSIONID will be cached, too

            add_header X-Proxy-Cache $upstream_cache_status;
        }

        location /api {
            proxy_pass http://loadbalancer/api;
            proxy_connect_timeout 60s;
            proxy_read_timeout 60s;

            proxy_ignore_headers Expires;
            proxy_hide_header Expires;
            proxy_ignore_headers Set-Cookie; # Make sure JSESSIONID is not cached with static content
            proxy_hide_header Set-Cookie;

            proxy_cache_valid 200 10s;

            add_header X-Proxy-Cache $upstream_cache_status;
        }

        location ~* ^.*\.(css|js|gif|jpe?g|png|ico)$ {
            proxy_pass http://loadbalancer$uri;
            proxy_connect_timeout 60s;
            proxy_read_timeout 60s;

            expires 1h;
            access_log off;
            log_not_found off;

            proxy_ignore_headers Expires;
            proxy_hide_header Expires;
            proxy_ignore_headers Set-Cookie; # Make sure JSESSIONID is not cached with static content
            proxy_hide_header Set-Cookie;
            add_header Cache-Control public;

            proxy_cache_valid 200 404 10s;

            add_header X-Proxy-Cache $upstream_cache_status;
        }
    }
}

HAProxy的配置如下:

global
    daemon
    maxconn 262144
    maxconnrate 128
    chroot /home/jail/

defaults
    mode http
    timeout connect 10s
    timeout client 120s
    timeout server 120s
    errorfile 502 /etc/haproxy/errors/502.http
    errorfile 503 /etc/haproxy/errors/503.http
    compression algo identity
    option http-keep-alive

frontend http-in
    bind /tmp/haproxy.sock user myc
    default_backend makeyourcase

backend makeyourcase
    option httpchk GET /
    appsession JSESSIONID len 52 timeout 1h
    server ny2-b-app01 <redacted>:8080 maxconn 1024 check inter 60000 fall 1 rise 1
    server ny2-b-app02 <redacted>:8080 maxconn 1024 check inter 60000 fall 1 rise 1

在nginx / haproxy框和两个码头框中使用这些参数调整TCP堆栈:

net.core.netdev_max_backlog = 65535
net.core.somaxconn = 65535
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.ipv4.tcp_rfc1337 = 1
net.ipv4.ip_local_port_range = 1024 65535
net.ipv4.tcp_fin_timeout = 30
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_max_syn_backlog = 65535
net.ipv4.tcp_moderate_rcvbuf = 1
net.ipv4.tcp_syncookies = 0

net.ipv4.tcp_rmem = 4096 87380 16777216
net.ipv4.tcp_wmem = 4096 16384 16777216

net.ipv4.tcp_tw_recycle = 1
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_timestamps = 1
net.ipv4.tcp_window_scaling = 1

我正在使用The Grinder来创建负载。一旦负载构建到某个点,SYN_SENT就会在haproxy端堆叠。如果我将maconnrate减少到96,则会降低连接速率,使SYN_SENT永远不会堆叠。后端从未加载超过20%的CPU,并且永远不会超过1的平均负载。我在nginx,haproxy和tcp堆栈中尝试了各种调优选项。

似乎可能有一些tcp parm会让更多的连接/秒进入jetty服务器,这会让haproxy完全加载jetty服务器。

OTOH,根据http://cbonte.github.io/haproxy-dconv/configuration-1.5.html,“默认情况下,HAProxy在保持活动模式下运行 然而,haproxy和app服务器之间的连接并没有保持打开。是否有一些我指定的haproxy选项可以阻止它?或者是否有一些选项可以实现这一点?

1 个答案:

答案 0 :(得分:0)

虽然我花了一些时间来调整设置,如下所示,最终给出更合理结果的变化(大约14K同时连接)是将托管nginx和haproxy的VM的大小从0.5G 1CPU增加到2G 2CPU。

这是当前的nginx配置:

user myc;
worker_processes 1;
pid /run/nginx.pid;
worker_rlimit_nofile 131072;

events {
    worker_connections 65536;
    multi_accept on;
    use epoll;
}

http {
    include mime.types;
    types_hash_max_size 2048;
    default_type application/octet-stream;
    proxy_cache_path /home/myc/cache levels=1:2 keys_zone=one:10m inactive=7d;

    upstream loadbalancer {
        server unix:/tmp/haproxy.sock;
        keepalive 32768;
    }

    upstream haproxy_admin {
        server 127.0.0.1:8081;
    }

    server {
        listen 80 backlog=16384;
        server_name makeyourcase.org;
        access_log /var/log/nginx/access.log;
        error_log /var/log/nginx/error.log;

        gzip on;
        gzip_types text/plain application/javascript application/x-javascript text/javascript text/xml text/css application/json;
        gzip_vary on;
        gzip_min_length 10240;

        open_file_cache max=2000 inactive=20s;
        open_file_cache_valid 60s;
        open_file_cache_min_uses 5;
        open_file_cache_errors off;

        sendfile on;

        proxy_http_version 1.1;
        proxy_set_header Connection "";
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;

        error_page 502 /502.html;
        location /502.html {
            root /home/myc/html;
        }

        proxy_cache one;

        location ~ ^/api/auth/local/.*$ {
            proxy_pass http://loadbalancer$uri;
        }

        location /admin {
            proxy_pass http://loadbalancer/admin;
        }

        location /stats {
            proxy_pass http://haproxy_admin/;
        }

        location / {
            proxy_pass http://loadbalancer/;
            proxy_connect_timeout 60s;
            proxy_read_timeout 60s;

            add_header Cache-Control private; # Don't cache any of this publicly, because JSESSIONID will be cached, too

            add_header X-Proxy-Cache $upstream_cache_status;
        }

        location /api {
            proxy_pass http://loadbalancer/api;
            proxy_connect_timeout 60s;
            proxy_read_timeout 60s;

            proxy_ignore_headers Expires;
            proxy_hide_header Expires;
            proxy_ignore_headers Set-Cookie; # Make sure JSESSIONID is not cached with static content
            proxy_hide_header Set-Cookie;

            proxy_cache_valid 200 10s;

            add_header X-Proxy-Cache $upstream_cache_status;
        }

        location ~* ^.*\.(css|js|gif|jpe?g|png|ico)$ {
            proxy_pass http://loadbalancer$uri;
            proxy_connect_timeout 60s;
            proxy_read_timeout 60s;

            expires 1h;
            access_log off;
            log_not_found off;

            proxy_ignore_headers Expires;
            proxy_hide_header Expires;
            proxy_ignore_headers Set-Cookie; # Make sure JSESSIONID is not cached with static content
            proxy_hide_header Set-Cookie;
            add_header Cache-Control public;

            proxy_cache_valid 200 404 10s;

            add_header X-Proxy-Cache $upstream_cache_status;
        }
    }
}

和HAProxy:

global
    daemon
    maxconn 32768
    maxconnrate 256
    chroot /home/jail/

defaults
    mode http
    timeout connect 10s
    timeout client 120s
    timeout server 120s
    errorfile 502 /etc/haproxy/errors/502.http
    errorfile 503 /etc/haproxy/errors/503.http
    compression algo identity

frontend http-in
    bind /tmp/haproxy.sock user myc
    default_backend makeyourcase

backend makeyourcase
    option httpchk GET /
    option http-server-close
    appsession JSESSIONID len 52 timeout 1h
    server ny2-b-app01 <redacted>:8080 maxconn 1024 check inter 60000 fall 1 rise 1
    server ny2-b-app02 <redacted>:8080 maxconn 1024 check inter 60000 fall 1 rise 1

listen stats localhost:8081
    mode   http
    stats  enable
    stats  hide-version
    stats  realm MYC
    stats  uri /

和sysctl:

fs.file-max = 1048576

net.core.netdev_max_backlog = 250000
net.core.somaxconn = 262144
net.core.rmem_max = 134217728
net.core.wmem_max = 134217728

net.ipv4.tcp_rfc1337 = 1
net.ipv4.ip_local_port_range = 1024 65535
net.ipv4.tcp_fin_timeout = 10
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_max_syn_backlog = 262144
net.ipv4.tcp_moderate_rcvbuf = 1
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_rmem = 4096 87380 67108864
net.ipv4.tcp_wmem = 4096 65536 67108864
net.ipv4.tcp_low_latency = 1

net.ipv4.tcp_tw_recycle = 0
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_timestamps = 1
net.ipv4.tcp_window_scaling = 1

vm.swappiness = 0