Nginx负载均衡配置

绿林寻猫
2023-02-16 / 0 评论 / 236 阅读 / 正在检测是否收录...

服务准备

配置相同的三个服务:IP:8001 、 IP:8002 、 IP:8003

修改nginx配置文件

vim /usr/local/nginx/conf/nginx.conf

1.配置upstream,在 http {}里 新增 upstream 指向你启动的那三个后端服务。

upstream [起一个自己容易辨识的名字:webname]{
    server  IP:8001;
    server  IP:8002;
    server  IP:8003;
}

2.在 server {} 里添加一个location,并且配置 proxy_pass,(注意不要有同名的 比如已经有一个 location / 了,你再添加重启Nginx就会报错)

location / {
    #转发到负载服务上
    proxy_pass http://webname;
}

完整配置如下:


#user  nobody;
worker_processes  1;

#error_log  logs/error.log;
#error_log  logs/error.log  notice;
#error_log  logs/error.log  info;

pid        /usr/local/nginx/logs/nginx.pid;


events {
    worker_connections  1024;
}


http {
    include       mime.types;
    default_type  application/octet-stream;

    #log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
    #                  '$status $body_bytes_sent "$http_referer" '
    #                  '"$http_user_agent" "$http_x_forwarded_for"';

    #access_log  logs/access.log  main;

    sendfile        on;
    #tcp_nopush     on;

    #keepalive_timeout  0;
    keepalive_timeout  65;

    #gzip  on;

    upstream webname{
              server  IP:8001;
            server  IP:8002;
            server  IP:8003;
    }

    server {
        listen       9600;
        server_name  localhost;

        #charset koi8-r;

        #access_log  logs/host.access.log  main;

       # location / {
            #root   html;
            #index  index.html index.htm;
       #}

        location / {
            #转发到负载服务上
            proxy_pass http://webname;
        }

        # location /test/api2 {
        #     #转发到负载服务上
        #     proxy_pass http://webservers;
        # }

        #error_page  404              /404.html;

        # redirect server error pages to the static page /50x.html
        #
        error_page   500 502 503 504  /50x.html;
        location = /50x.html {
            root   html;
        }

        # proxy the PHP scripts to Apache listening on 127.0.0.1:80
        #
        #location ~ \.php$ {
        #    proxy_pass   http://127.0.0.1;
        #}

        # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
        #
        #location ~ \.php$ {
        #    root           html;
        #    fastcgi_pass   127.0.0.1:9000;
        #    fastcgi_index  index.php;
        #    fastcgi_param  SCRIPT_FILENAME  /scripts$fastcgi_script_name;
        #    include        fastcgi_params;
        #}

        # deny access to .htaccess files, if Apache's document root
        # concurs with nginx's one
        #
        #location ~ /\.ht {
        #    deny  all;
        #}
    }


    # another virtual host using mix of IP-, name-, and port-based configuration
    #
    #server {
    #    listen       8000;
    #    listen       somename:8080;
    #    server_name  somename  alias  another.alias;

    #    location / {
    #        root   html;
    #        index  index.html index.htm;
    #    }
    #}


    # HTTPS server
    #
    #server {
    #    listen       443 ssl;
    #    server_name  localhost;

    #    ssl_certificate      cert.pem;
    #    ssl_certificate_key  cert.key;

    #    ssl_session_cache    shared:SSL:1m;
    #    ssl_session_timeout  5m;

    #    ssl_ciphers  HIGH:!aNULL:!MD5;
    #    ssl_prefer_server_ciphers  on;

    #    location / {
    #        root   html;
    #        index  index.html index.htm;
    #    }
    #}

}

3.重载配置

# 重新加载(要到Nginx安装目录执行)
./nginx -s reload
# 重新启动服务 (如果你添加了服务)
systemctl restart nginx.service

/usr/local/nginx/sbin/nginx -s reload

负载均衡配置

ip_hash

每个请求按访问IP的hash结果进行分配,这样每个访客就可以固定访问一个后端服务,一定程度上可以解决session问题;

upstream webname {
  ip_hash; 
  server  IP:8001;
  server  IP:8002;
  server  IP:8003;
  }
weight

weight代表权重,默认为1,权重越高,被分配的客户端请求就会越多

upstream webname {
  server  IP:8001 weight=6;
  server  IP:8002 weight=3;
  server  IP:8003 weight=1;
  }
fair(第三方)

按后端服务器的响应时间来分配请求,响应时间短的将会被优先分配

upstream webname {
  server  IP:8001;
  server  IP:8002;
  server  IP:8003;
  fair;
}
url_hash

按访问URL的hash结果分配。这样相同的url会被分配到同一个节点,主要为了提高缓存命中率。比如,为了提高访问性能,服务端有大量数据或者资源文件需要被缓存。使用这种策略,可以节省缓存空间,提高缓存命中率

upstream webname {
  hash &request_uri;
  server  IP:8001;
  server  IP:8002;
  server  IP:8003;
}
least_conn

按节点连接数分配,把请求优先分配给连接数少的节点。该策略主要为了解决,各个节点请求处理时间长短不一造成某些节点超负荷的情况。

upstream webname {
  least_conn;
  server  IP:8001;
  server  IP:8002;
  server  IP:8003;
}
0

评论 (0)

取消