前往小程序,Get更优阅读体验!
立即前往
首页
学习
活动
专区
工具
TVP
发布
社区首页 >专栏 >redis cluster

redis cluster

作者头像
零月
发布2018-04-25 16:18:19
1.1K0
发布2018-04-25 16:18:19
举报
文章被收录于专栏:从零开始的linux从零开始的linux

redis cluster

3台机器去搭建6个redis实例的redis cluster
安装6台redis(如何安装看前面的文章)

主机

ip

端口(redis-master)

端口(redis-slave)

node1(master&&slave)

128.0.0.101

6379

6380

node2(master&&slave)

128.0.0.102

6379

6380

node3(master&&slave)

128.0.0.103

6379

6380

代码语言:javascript
复制
root@node1 redis]# cat nodes-6379.conf | grep -v "#" | grep -v "^$"
bind 128.0.0.101
protected-mode no
port 6379
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize yes
supervised no
pidfile "/var/run/redis_6379.pid"
loglevel notice
logfile "redis_6379.log"
databases 16
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename "dump.rdb"
dir "/opt/6379"
slave-serve-stale-data yes
slave-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
slave-priority 100
appendonly yes
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
lua-time-limit 5000
#重要配置
cluster-enabled yes
#重要配置
cluster-config-file nodes-6379.conf
#重要配置
cluster-node-timeout 15000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes
代码语言:javascript
复制
[root@node1 redis]# cat nodes-6380.conf | grep -v "#" | grep -v "^$"
bind 128.0.0.101
protected-mode no
port 6380
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize yes
supervised no
pidfile "/var/run/redis_6380.pid"
loglevel notice
logfile "redis_6380.log"
databases 16
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename "dump.rdb"
dir "/opt/6380"
slave-serve-stale-data yes
slave-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
slave-priority 100
appendonly yes
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
lua-time-limit 5000
 cluster-enabled yes
cluster-config-file nodes-6379.conf
 cluster-node-timeout 15000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes
代码语言:javascript
复制
root@node2 redis]# cat nodes-6379.conf | grep -v "#" | grep -v "^$"
bind 128.0.0.102
protected-mode no
port 6379
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize yes
supervised no
pidfile "/var/run/redis_6379.pid"
loglevel notice
logfile "redis_6379.log"
databases 16
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename "dump.rdb"
dir "/opt/6379"
slave-serve-stale-data yes
slave-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
slave-priority 100
appendonly yes
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
lua-time-limit 5000
 cluster-enabled yes
cluster-config-file nodes-6379.conf
 cluster-node-timeout 15000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes
代码语言:javascript
复制
[root@node2 redis]# cat nodes-6380.conf | grep -v "#" | grep -v "^$"
bind 128.0.0.102
protected-mode no
port 6380
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize yes
supervised no
pidfile "/var/run/redis_6380.pid"
loglevel notice
logfile "redis_6380.log"
databases 16
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename "dump.rdb"
dir "/opt/6380"
slave-serve-stale-data yes
slave-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
slave-priority 100
appendonly yes
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
lua-time-limit 5000
 cluster-enabled yes
cluster-config-file nodes-6379.conf
 cluster-node-timeout 15000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes
代码语言:javascript
复制
root@node3 redis]# cat nodes-6379.conf | grep -v "#" | grep -v "^$"
bind 128.0.0.103
protected-mode no
port 6379
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize yes
supervised no
pidfile "/var/run/redis_6379.pid"
loglevel notice
logfile "redis_6379.log"
databases 16
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename "dump.rdb"
dir "/opt/6379"
slave-serve-stale-data yes
slave-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
slave-priority 100
appendonly yes
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
lua-time-limit 5000
 cluster-enabled yes
cluster-config-file nodes-6379.conf
 cluster-node-timeout 15000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes
代码语言:javascript
复制
[root@node3 redis]# cat nodes-6380.conf | grep -v "#" | grep -v "^$"
bind 128.0.0.103
protected-mode no
port 6380
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize yes
supervised no
pidfile "/var/run/redis_6380.pid"
loglevel notice
logfile "redis_6380.log"
databases 16
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename "dump.rdb"
dir "/opt/6380"
slave-serve-stale-data yes
slave-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
slave-priority 100
appendonly yes
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
lua-time-limit 5000
 cluster-enabled yes
cluster-config-file nodes-6379.conf
 cluster-node-timeout 15000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes
安装集群依赖
代码语言:javascript
复制
wget http://rubygems.org/downloads/redis-3.3.0.gem
yum install -y ruby
yum install -y rubygems
gem install redis-3.3.0.gem
cp /opt/redis/src/redis-trib.rb /usr/local/bin/
启动集群&&检查集群
代码语言:javascript
复制
[root@node1 redis]# redis-trib.rb create --replicas 1 128.0.0.101:6379 128.0.0.101:6380 128
>>> Creating cluster
>>> Performing hash slots allocation on 6 nodes...
Using 3 masters:
128.0.0.103:6379
128.0.0.102:6379
128.0.0.101:6379
Adding replica 128.0.0.102:6380 to 128.0.0.103:6379
Adding replica 128.0.0.103:6380 to 128.0.0.102:6379
Adding replica 128.0.0.101:6380 to 128.0.0.101:6379
M: ed0a6a4c3c93874795f1256ab149897f032e99f0 128.0.0.101:6379
   slots:10923-16383 (5461 slots) master
S: b91983b25071a5cc659a7c61127d645c5f381946 128.0.0.101:6380
   replicates ed0a6a4c3c93874795f1256ab149897f032e99f0
M: 9ad3995fb1b4a58f155e2096aa9b34b929f91f92 128.0.0.102:6379
   slots:5461-10922 (5462 slots) master
S: 53ab8301c827e2dd78c70632ca70072b3a7f5783 128.0.0.102:6380
   replicates a3c5f10ba6b4660647a3de7ec6d5e6ef85473361
M: a3c5f10ba6b4660647a3de7ec6d5e6ef85473361 128.0.0.103:6379
   slots:0-5460 (5461 slots) master
S: b49f08acfa8ff7a449bb765eed83889c4ad16952 128.0.0.103:6380
   replicates 9ad3995fb1b4a58f155e2096aa9b34b929f91f92
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join...
>>> Performing Cluster Check (using node 128.0.0.101:6379)
M: ed0a6a4c3c93874795f1256ab149897f032e99f0 128.0.0.101:6379
   slots:10923-16383 (5461 slots) master
   1 additional replica(s)
M: a3c5f10ba6b4660647a3de7ec6d5e6ef85473361 128.0.0.103:6379
   slots:0-5460 (5461 slots) master
   1 additional replica(s)
M: 9ad3995fb1b4a58f155e2096aa9b34b929f91f92 128.0.0.102:6379
   slots:5461-10922 (5462 slots) master
   1 additional replica(s)
S: 53ab8301c827e2dd78c70632ca70072b3a7f5783 128.0.0.102:6380
   slots: (0 slots) slave
   replicates a3c5f10ba6b4660647a3de7ec6d5e6ef85473361
S: b49f08acfa8ff7a449bb765eed83889c4ad16952 128.0.0.103:6380
   slots: (0 slots) slave
   replicates 9ad3995fb1b4a58f155e2096aa9b34b929f91f92
S: b91983b25071a5cc659a7c61127d645c5f381946 128.0.0.101:6380
   slots: (0 slots) slave
   replicates ed0a6a4c3c93874795f1256ab149897f032e99f0
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
[root@node1 redis]# redis-trib.rb check 128.0.0.101:6379
>>> Performing Cluster Check (using node 128.0.0.101:6379)
M: ed0a6a4c3c93874795f1256ab149897f032e99f0 128.0.0.101:6379
   slots:10923-16383 (5461 slots) master
   1 additional replica(s)
M: a3c5f10ba6b4660647a3de7ec6d5e6ef85473361 128.0.0.103:6379
   slots:0-5460 (5461 slots) master
   1 additional replica(s)
M: 9ad3995fb1b4a58f155e2096aa9b34b929f91f92 128.0.0.102:6379
   slots:5461-10922 (5462 slots) master
   1 additional replica(s)
S: 53ab8301c827e2dd78c70632ca70072b3a7f5783 128.0.0.102:6380
   slots: (0 slots) slave
   replicates a3c5f10ba6b4660647a3de7ec6d5e6ef85473361
S: b49f08acfa8ff7a449bb765eed83889c4ad16952 128.0.0.103:6380
   slots: (0 slots) slave
   replicates 9ad3995fb1b4a58f155e2096aa9b34b929f91f92
S: b91983b25071a5cc659a7c61127d645c5f381946 128.0.0.101:6380
   slots: (0 slots) slave
   replicates ed0a6a4c3c93874795f1256ab149897f032e99f0
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

成功

本文参与 腾讯云自媒体分享计划,分享自微信公众号。
原始发表:2018-02-01,如有侵权请联系 cloudcommunity@tencent.com 删除

本文分享自 从零开始的linux 微信公众号,前往查看

如有侵权,请联系 cloudcommunity@tencent.com 删除。

本文参与 腾讯云自媒体分享计划  ,欢迎热爱写作的你一起参与!

评论
登录后参与评论
0 条评论
热度
最新
推荐阅读
目录
  • redis cluster
    • 安装集群依赖
      • 启动集群&&检查集群
      • 成功
      领券
      问题归档专栏文章快讯文章归档关键词归档开发者手册归档开发者手册 Section 归档