redis cluster

redis cluster

3台机器去搭建6个redis实例的redis cluster
安装6台redis(如何安装看前面的文章)

主机

ip

端口(redis-master)

端口(redis-slave)

node1(master&&slave)

128.0.0.101

6379

6380

node2(master&&slave)

128.0.0.102

6379

6380

node3(master&&slave)

128.0.0.103

6379

6380

root@node1 redis]# cat nodes-6379.conf | grep -v "#" | grep -v "^$"
bind 128.0.0.101
protected-mode no
port 6379
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize yes
supervised no
pidfile "/var/run/redis_6379.pid"
loglevel notice
logfile "redis_6379.log"
databases 16
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename "dump.rdb"
dir "/opt/6379"
slave-serve-stale-data yes
slave-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
slave-priority 100
appendonly yes
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
lua-time-limit 5000
#重要配置
cluster-enabled yes
#重要配置
cluster-config-file nodes-6379.conf
#重要配置
cluster-node-timeout 15000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes
[root@node1 redis]# cat nodes-6380.conf | grep -v "#" | grep -v "^$"
bind 128.0.0.101
protected-mode no
port 6380
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize yes
supervised no
pidfile "/var/run/redis_6380.pid"
loglevel notice
logfile "redis_6380.log"
databases 16
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename "dump.rdb"
dir "/opt/6380"
slave-serve-stale-data yes
slave-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
slave-priority 100
appendonly yes
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
lua-time-limit 5000
 cluster-enabled yes
cluster-config-file nodes-6379.conf
 cluster-node-timeout 15000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes
root@node2 redis]# cat nodes-6379.conf | grep -v "#" | grep -v "^$"
bind 128.0.0.102
protected-mode no
port 6379
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize yes
supervised no
pidfile "/var/run/redis_6379.pid"
loglevel notice
logfile "redis_6379.log"
databases 16
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename "dump.rdb"
dir "/opt/6379"
slave-serve-stale-data yes
slave-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
slave-priority 100
appendonly yes
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
lua-time-limit 5000
 cluster-enabled yes
cluster-config-file nodes-6379.conf
 cluster-node-timeout 15000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes
[root@node2 redis]# cat nodes-6380.conf | grep -v "#" | grep -v "^$"
bind 128.0.0.102
protected-mode no
port 6380
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize yes
supervised no
pidfile "/var/run/redis_6380.pid"
loglevel notice
logfile "redis_6380.log"
databases 16
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename "dump.rdb"
dir "/opt/6380"
slave-serve-stale-data yes
slave-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
slave-priority 100
appendonly yes
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
lua-time-limit 5000
 cluster-enabled yes
cluster-config-file nodes-6379.conf
 cluster-node-timeout 15000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes
root@node3 redis]# cat nodes-6379.conf | grep -v "#" | grep -v "^$"
bind 128.0.0.103
protected-mode no
port 6379
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize yes
supervised no
pidfile "/var/run/redis_6379.pid"
loglevel notice
logfile "redis_6379.log"
databases 16
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename "dump.rdb"
dir "/opt/6379"
slave-serve-stale-data yes
slave-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
slave-priority 100
appendonly yes
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
lua-time-limit 5000
 cluster-enabled yes
cluster-config-file nodes-6379.conf
 cluster-node-timeout 15000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes
[root@node3 redis]# cat nodes-6380.conf | grep -v "#" | grep -v "^$"
bind 128.0.0.103
protected-mode no
port 6380
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize yes
supervised no
pidfile "/var/run/redis_6380.pid"
loglevel notice
logfile "redis_6380.log"
databases 16
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename "dump.rdb"
dir "/opt/6380"
slave-serve-stale-data yes
slave-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
slave-priority 100
appendonly yes
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
lua-time-limit 5000
 cluster-enabled yes
cluster-config-file nodes-6379.conf
 cluster-node-timeout 15000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes
安装集群依赖
wget http://rubygems.org/downloads/redis-3.3.0.gem
yum install -y ruby
yum install -y rubygems
gem install redis-3.3.0.gem
cp /opt/redis/src/redis-trib.rb /usr/local/bin/
启动集群&&检查集群
[root@node1 redis]# redis-trib.rb create --replicas 1 128.0.0.101:6379 128.0.0.101:6380 128
>>> Creating cluster
>>> Performing hash slots allocation on 6 nodes...
Using 3 masters:
128.0.0.103:6379
128.0.0.102:6379
128.0.0.101:6379
Adding replica 128.0.0.102:6380 to 128.0.0.103:6379
Adding replica 128.0.0.103:6380 to 128.0.0.102:6379
Adding replica 128.0.0.101:6380 to 128.0.0.101:6379
M: ed0a6a4c3c93874795f1256ab149897f032e99f0 128.0.0.101:6379
   slots:10923-16383 (5461 slots) master
S: b91983b25071a5cc659a7c61127d645c5f381946 128.0.0.101:6380
   replicates ed0a6a4c3c93874795f1256ab149897f032e99f0
M: 9ad3995fb1b4a58f155e2096aa9b34b929f91f92 128.0.0.102:6379
   slots:5461-10922 (5462 slots) master
S: 53ab8301c827e2dd78c70632ca70072b3a7f5783 128.0.0.102:6380
   replicates a3c5f10ba6b4660647a3de7ec6d5e6ef85473361
M: a3c5f10ba6b4660647a3de7ec6d5e6ef85473361 128.0.0.103:6379
   slots:0-5460 (5461 slots) master
S: b49f08acfa8ff7a449bb765eed83889c4ad16952 128.0.0.103:6380
   replicates 9ad3995fb1b4a58f155e2096aa9b34b929f91f92
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join...
>>> Performing Cluster Check (using node 128.0.0.101:6379)
M: ed0a6a4c3c93874795f1256ab149897f032e99f0 128.0.0.101:6379
   slots:10923-16383 (5461 slots) master
   1 additional replica(s)
M: a3c5f10ba6b4660647a3de7ec6d5e6ef85473361 128.0.0.103:6379
   slots:0-5460 (5461 slots) master
   1 additional replica(s)
M: 9ad3995fb1b4a58f155e2096aa9b34b929f91f92 128.0.0.102:6379
   slots:5461-10922 (5462 slots) master
   1 additional replica(s)
S: 53ab8301c827e2dd78c70632ca70072b3a7f5783 128.0.0.102:6380
   slots: (0 slots) slave
   replicates a3c5f10ba6b4660647a3de7ec6d5e6ef85473361
S: b49f08acfa8ff7a449bb765eed83889c4ad16952 128.0.0.103:6380
   slots: (0 slots) slave
   replicates 9ad3995fb1b4a58f155e2096aa9b34b929f91f92
S: b91983b25071a5cc659a7c61127d645c5f381946 128.0.0.101:6380
   slots: (0 slots) slave
   replicates ed0a6a4c3c93874795f1256ab149897f032e99f0
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
[root@node1 redis]# redis-trib.rb check 128.0.0.101:6379
>>> Performing Cluster Check (using node 128.0.0.101:6379)
M: ed0a6a4c3c93874795f1256ab149897f032e99f0 128.0.0.101:6379
   slots:10923-16383 (5461 slots) master
   1 additional replica(s)
M: a3c5f10ba6b4660647a3de7ec6d5e6ef85473361 128.0.0.103:6379
   slots:0-5460 (5461 slots) master
   1 additional replica(s)
M: 9ad3995fb1b4a58f155e2096aa9b34b929f91f92 128.0.0.102:6379
   slots:5461-10922 (5462 slots) master
   1 additional replica(s)
S: 53ab8301c827e2dd78c70632ca70072b3a7f5783 128.0.0.102:6380
   slots: (0 slots) slave
   replicates a3c5f10ba6b4660647a3de7ec6d5e6ef85473361
S: b49f08acfa8ff7a449bb765eed83889c4ad16952 128.0.0.103:6380
   slots: (0 slots) slave
   replicates 9ad3995fb1b4a58f155e2096aa9b34b929f91f92
S: b91983b25071a5cc659a7c61127d645c5f381946 128.0.0.101:6380
   slots: (0 slots) slave
   replicates ed0a6a4c3c93874795f1256ab149897f032e99f0
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

成功

原文发布于微信公众号 - 从零开始的linux(gh_4a9d788f5f27)

原文发表时间:2018-02-01

本文参与腾讯云自媒体分享计划,欢迎正在阅读的你也加入,一起分享。

发表于

我来说两句

0 条评论
登录 后参与评论

相关文章

来自专栏ASP.NET MVC5 后台权限管理系统

ASP.NET MVC5+EF6+EasyUI 后台管理系统(91)-EF 连接 MySql

Navicat Premium 12等同于MSSQL的SQL Server Management Studio,操作过程差不多,我们到时可以建表,建数据库

1141
来自专栏c#开发者

android studio 更新 Gradle错误解决方法

Android Studio每次更新版本都会更新Gradle这个插件,但由于长城的问题每次更新都是失败,又是停止在Refreshing Gradle Proje...

3227
来自专栏张善友的专栏

Ubuntu & Fedora Mono 2.8 安装脚本

在Ubuntu和Fedora这两大系列的Linux发行版上,不像suse的Linux发行版,安装Mono 一般都是需要从源代码开始编译,这里想大家推荐一个 Ub...

1935
来自专栏乐沙弥的世界

Linux 7下MySQL自启动配置(glibc)

使用glibc编译后的mysql二进制安装方法被广泛使用,因为它和Windows下的zip方式一下,简单几个步骤,配置一下环境即可。而在Linux 7版本中,M...

1002
来自专栏黑泽君的专栏

创建redis cluster时,有警告提示

  http://www.php-master.com/post/325868.html

963
来自专栏Java帮帮-微信公众号-技术文章全总结

Web-第三十二天 WebLogic中间件【悟空教程】

中间件(middleware)是基础软件的一大类, 属于可复用软件的范畴. 顾名思义,中间件处于操作系统软件与用户的应用软件的中间.

2813
来自专栏大魏分享(微信公众号:david-share)

IBM PowerHA 6 DARE 的功能介绍

DARE 的功能介绍 PowerHA 6.1 提供了 cluster 动态调整的功能,即在 cluster 处于活动的状态时,动态地对 cluster 拓扑和资...

40112
来自专栏IT探索

qt使用笔记

最近在做跨平台的终端开发,用到了QT,把遇到的问题和解决方法在这里记录下,供大家参考。

1432
来自专栏java闲聊

windows下实用工具推荐

在windwos下启动工具平常的操作是需要在桌面找到图标,然后点击打开,现在你可以去掉第一步操作了,实用Wox会让你爱不释手,只需一步即可打开你想要的软件,快捷...

943
来自专栏WindCoder

node-gyp错误之旅

Node.js 在安装模块的时候一直报错,提示安装node-gyp时报python不可用的情况

2K1

扫码关注云+社区

领取腾讯云代金券