一 部署环境:
master:192.168.127.129
slave:192.168.127.130
vip: 192.168.127.100 (在实际环境中需要公网ip)
两台服务器上面分别有三个站点,web,h5,app
nginx和keepalived 安装过程省略
二 master配置文件设置
1 master 负载均衡配置
[root@master vhosts]# vi ld.conf
upstream www{
server 192.168.127.129:81;
server 192.168.127.130:81 ;
ip_hash;
}
server {
listen 81;
server_name www.ihnhlife.com;
location / {
proxy_pass http://www/;
proxy_set_header Host $host;
}
}
## S.ihnhlife.com APP端
upstream s{
server 192.168.127.129:83 ;
server 192.168.127.130:83 ;
ip_hash;
}
server {
listen 83;
server_name s.ihnhlife.com;
location / {
proxy_pass http://s/;
proxy_set_header Host $host;
}
}
upstream h5{
server 192.168.127.129:82;
server 192.168.127.130:82;
ip_hash;
}
server {
listen 82;
server_name m.ihnhlife.com;
location / {
proxy_pass http://h5/;
proxy_set_header Host $host;
}
}
2 master 3个站点的配置文件:
[root@master vhosts]# cat m.ihnhlife.com.conf
server
{
listen 192.168.127.129:82;
server_name localhost;
index index.html index.htm;
root /data/wwwroot/m.ihnhlife.com/;
}
[root@master vhosts]# cat s.ihnhlife.com.conf
server
{
listen 192.168.127.129:83;
server_name localhost;
index index.html index.htm;
root /data/wwwroot/www.ihnhlife.com/;
location / {
proxy_pass http://192.168.127.129:8080;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Real-IP $remote_addr;
}
}
[root@master vhosts]# cat www.ihnhlife.com.conf
server
{
listen 192.168.127.129:81;
server_name localhost;
index index.html index.htm;
root /data/wwwroot/www.ihnhlife.com/;
}
3 master三个站点根目录配置 (/data/wwwroot)
[root@master wwwroot]# ls
m.ihnhlife.com s.ihnhlife.com www.ihnhlife.com
[root@master m.ihnhlife.com]# cat index.html
129m.inhnlife.com
[root@master s.ihnhlife.com]# cat index.html
129 s.inhnlife.com
[root@master www.ihnhlife.com]# cat index.html
129 www.ihnhlife.com
4 master keepalived配置
global_defs {
notification_email {
root@localhost
}
notification_email_from admin@localhost
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id LTT
}
vrrp_script chk_nginx { #检测nginx服务是否在运行有很多方式,比如进程,用脚本检测等等
script "killall -0 nginx" #用shell命令检查nginx服务是否存在
interval 1 #时间间隔为1秒检测一次
weight -2 #当nginx的服务不存在了,就把当前的权重-2
fall 2 #测试失败的次数
rise 1 #测试成功的次数
}
vrrp_instance IN_1 {
state MASTER
interface eth0
virtual_router_id 22
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass aaaa
}
virtual_ipaddress {
192.168.127.100
}
track_script {
chk_nginx #引用上面的vrrp_script定义的脚本名称
}
}
5 master nginx的主配置文件:
user nobody nobody;
worker_processes 2;
error_log /usr/local/nginx/logs/nginx_error.log crit;
pid /usr/local/nginx/logs/nginx.pid;
worker_rlimit_nofile 51200; #并发数设置
events
{
use epoll;
worker_connections 6000; #单个后台worker process进程的最大并发链接数
}
http
{
include mime.types;
default_type application/octet-stream;
server_names_hash_bucket_size 3526;
server_names_hash_max_size 4096;
log_format combined_realip '$remote_addr$http_x_forwarded_for[$time_local]' (log_format是日志的名字可以自定义)
'$host "$request_uri" $status'
'"$http_referer" "$http_user_agent"';
sendfile on;
tcp_nopush on;
keepalive_timeout 30;
client_header_timeout 3m;
client_body_timeout 3m;
send_timeout 3m;
connection_pool_size 256;
client_header_buffer_size 1k;
large_client_header_buffers 8 4k;
request_pool_size 4k;
output_buffers 4 32k;
postpone_output 1460;
client_max_body_size 10m;
client_body_buffer_size 256k;
client_body_temp_path /usr/local/nginx/client_body_temp;
proxy_temp_path /usr/local/nginx/proxy_temp;
fastcgi_temp_path /usr/local/nginx/fastcgi_temp;
fastcgi_intercept_errors on;
tcp_nodelay on;
gzip on;
gzip_min_length 1k;
gzip_buffers 4 8k;
gzip_comp_level 5;
gzip_http_version 1.1;
gzip_types text/plain application/x-javascript text/css text/htm application/xml;
include vhosts/*.conf;
三 slave 配置文件:
slave除了keepalvied的配置不同之外,其余的都是一样的,复制一份即可(3个站点的内容改一下,为了方便识别)
keepalived的配置:
vim keepalived.conf #此配置文件是从Master服务器上copy过来,只需小小改动
state BACKUP #把这里原先的MASTER改成BACKUP
priority 99 #把这里原先的100改成99
四 启动服务并测试
最后在两台主机分别启动nginx和keepalived 服务
1 停掉主上面的nginx服务,看是否可以正常访问3个站点
2 停掉主上面的keepalvied服务,看是否可以正常访问3个站点