十六、Redis 集群
参考网站“https://redis.io/topics/cluster-tutorial”
16、1 集群搭建
#下述是在一台机器模拟六个节点,3 主 3 从
[root@redis ~]# cd /application/
[root@redis application]# cd Redis
[root@redis redis]# mkdir redis-cluster
[root@redis redis-cluster]#
[root@redis redis-cluster]# ls -l
total 24
drwxr-xr-x 2 root root 4096 Dec 10 17:51 7001
drwxr-xr-x 2 root root 4096 Dec 10 17:51 7002
drwxr-xr-x 2 root root 4096 Dec 10 17:51 7003
drwxr-xr-x 2 root root 4096 Dec 10 17:51 7004
drwxr-xr-x 2 root root 4096 Dec 10 17:51 7005
drwxr-xr-x 2 root root 4096 Dec 10 17:51 7006
[root@redis redis-cluster]# cp/application/redis/conf/redis.conf /application/redis/conf/redis.conf.ori2
[root@redis redis-cluster]# cd 7001
[root@redis 7001]# VIM redis.conf
#下述是要设置的内容
bind 172.16.1.11 #必须要绑定每个当前节点的 IP。现在是一台机器模拟集群环境,所以配置同样的 IP 即可,多台机器就多个 IP。
port 7001 #分别每个节点对应每个端口
daemonize yes
pidfile /var/run/redis_7001.pid #Redis 服务停掉之后,这个目录下就没有这个文件了
loglevel debug
logfile"/application/redis/logs/redis-7001.log"
dir "/application/redis/redis-cluster/7001" #指定数据文件存放位置,必须要指定不同的目录位置,不然会丢失数据!
appendonly yes
appendfsync always
cluster-enabled yes #启动集群模式
cluster-config-file nodes-7001.conf #这里的 700X 最好跟 port 对应!每个 node 文件也不要相同!如果要重新建集群,记得把 node700*文件全部干掉,不然会创建失败,当创建集群时,这个文件会重新创建。
cluster-node-timeout 5000
#其他五个目录的节点的改动只需要把上述 7001 目录下的 redis.conf 中的所有 7001 改成对应的端口即可。
#拷贝 7001 下的 redis.conf 文件到各个目录下
[root@redis redis-cluster]# for((i=2;i<=6;i++));do cp/application/redis/redis-cluster/7001/redis.conf/application/redis/redis-cluster/700$i;done
[root@redis redis-cluster]# ls 700{2..6}/*
7002/redis.conf 7003/redis.conf 7004/redis.conf 7005/redis.conf 7006/redis.conf
#替换各个目录下的 7001 为对应端口并备份 redis.conf
[root@redis redis-cluster]# for((i=2;i<=6;i++));do sed-i.ori "s#7001#700 $i#g" /application/redis/redis-cluster/700$ i/redis.conf;done
#查看各个目录下的文件是否替换成功
[root@redis redis-cluster]# for((i=2;i<=6;i++));dogrep 700 $i /application/redis/redis-cluster/700$ i/redis.conf;done
port 7002
pidfile "/var/run/redis_7002.pid"
logfile"/application/redis/logs/redis-7002.log"
dir "/application/redis/redis-cluster/7002"
cluster-config-file nodes-7002.conf
port 7003
pidfile "/var/run/redis_7003.pid"
logfile"/application/redis/logs/redis-7003.log"
dir "/application/redis/redis-cluster/7003"
cluster-config-file nodes-7003.conf
port 7004
pidfile "/var/run/redis_7004.pid"
logfile"/application/redis/logs/redis-7004.log"
dir "/application/redis/redis-cluster/7004"
cluster-config-file nodes-7004.conf
port 7005
pidfile "/var/run/redis_7005.pid"
logfile "/application/redis/logs/redis-7005.log"
dir "/application/redis/redis-cluster/7005"
cluster-config-file nodes-7005.conf
port 7006
pidfile "/var/run/redis_7006.pid"
logfile"/application/redis/logs/redis-7006.log"
dir "/application/redis/redis-cluster/7006"
cluster-config-file nodes-7006.conf
#缓存分布式,每个节点存放不同的数据。比如上述的 6 个节点,其中每一个节点都必须知道其他五个节点的存在。
#redis 集群需要用到 ruby,对应下述中用到集群启动的 redis-trib.rb,参考“16、2redis-trib.rb 解释”
[root@redis run]# rpm -qa Ruby
[root@redis run]# rpm -qa rubygems
[root@redis run]# yum install Ruby rubygems -y
[root@redis run]# rpm -qa Ruby rubygems
rubygems-1.3.7-5.el6.noarch
ruby-1.8.7.374-4.el6_6.x86_64
[root@redis run]# gem install Redis #安装 Redis 和 Ruby 的接口
Successfully installed redis-3.3.2
1 gem installed
Installing ri documentation for redis-3.3.2...
Installing RDoc documentation for redis-3.3.2...
#分别启动六个 Redis 实例
[root@redis redis-cluster]# for((i=1;i<=6;i++));doredis-server /application/redis/redis-cluster/700$i/redis.conf;done
[root@redis redis-cluster]# netstat -lntup|grep Redis
tcp 0 0 172.16.1.11:7005 0.0.0.0:* LISTEN 4843/redis-server 1
tcp 0 0 172.16.1.11:7006 0.0.0.0:* LISTEN 4845/redis-server 1
tcp 0 0 172.16.1.11:17001 0.0.0.0:* LISTEN 4833/redis-server 1
tcp 0 0 172.16.1.11:17002 0.0.0.0:* LISTEN 4837/redis-server 1
tcp 0 0 172.16.1.11:17003 0.0.0.0:* LISTEN 4839/redis-server 1
tcp 0 0172.16.1.11:17004 0.0.0.0:* LISTEN 4841/redis-server 1
tcp 0 0 172.16.1.11:17005 0.0.0.0:* LISTEN 4843/redis-server 1
tcp 0 0 172.16.1.11:17006 0.0.0.0:* LISTEN 4845/redis-server 1
tcp 0 0 172.16.1.11:7001 0.0.0.0:* LISTEN 4833/redis-server 1
tcp 0 0 172.16.1.11:7002 0.0.0.0:* LISTEN 4837/redis-server 1
tcp 0 0 172.16.1.11:7003 0.0.0.0:* LISTEN 4839/redis-server 1
tcp 0 0 172.16.1.11:7004 0.0.0.0:* LISTEN 4841/redis-server 1
#上述 17001 到 17006 暂不确定是什么作用
[root@redis redis-cluster]# ps -ef|grep 700|grep -v grep
root 4833 1 0 19:51 ? 00:00:00 redis-server 172.16.1.11:7001[cluster]
root 4837 1 0 19:51 ? 00:00:00 redis-server 172.16.1.11:7002[cluster]
root 4839 1 0 19:51 ? 00:00:00 redis-server 172.16.1.11:7003[cluster]
root 4841 1 0 19:51 ? 00:00:00 redis-server 172.16.1.11:7004[cluster]
root 4843 1 0 19:51 ? 00:00:00 redis-server 172.16.1.11:7005[cluster]
root 4845 1 0 19:51 ? 00:00:00 redis-server 172.16.1.11:7006[cluster]
#到 Redis 的安装目录下,执行 redis-trib.rb
[root@redis src]# pwd
/home/lly/tools/redis-3.2.5/src
#下述--replicas 后面 1 的意思是:1 是一个比例,主节点和从节点的比例,7001 到 7003 一定是三个主节点,7004 到 7006 是三个从节点,7001 和 7004 是对应的一对主从节点。
#不过也可以按照其他比例操作节点数
#一定要注意这里创建的 IP,一定要跟对应端口中 redis.conf 用 bind 绑定的地址保持一致!
#经 Java 集群测试,Java 代码中指定的 IP 地址一定要跟创建集群时指定的地址保持一致!
#而不是可以内外网 IP 通用!
[root@redis src]# ./redis-trib.rb create --replicas 1 172.16.1.11:7001 172.16.1.11:7002 172.16.1.11:7003 172.16.1.11:7004 172.16.1.11:7005 172.16.1.11:7006
> > > Creating cluster
> > > Performing hash slots allocation on 6nodes...
> > > Using 3 masters:
> > > 172.16.1.11:7001
> > > 172.16.1.11:7002
> > > 172.16.1.11:7003
> > > Adding replica 172.16.1.11:7004 to 172.16.1.11:7001
> > > Adding replica 172.16.1.11:7005 to 172.16.1.11:7002
> > > Adding replica 172.16.1.11:7006 to 172.16.1.11:7003
> > >
> >
#下述 M 是指 master,68e9252e2e2404a0ced500a98085acaa5754c7a2 是节点的 ID,是唯一的
M: 68e9252e2e2404a0ced500a98085acaa5754c7a2172.16.1.11:7001
slots:0-5460(5461 slots) master #一共 5461 个槽 slots,可以理解成数据分片:1 块空间分成 5461 片。把一块空间分成了多个片,效率比较高。只有主节点才有槽 slots,可以发现下述从节点没有槽,所以从节点只支持读操作,不支持写操作。
M: f314dda271d135634d6849cdb649192b58b08d7f172.16.1.11:7002
slots:5461-10922(5462 slots) master
M: 6385ebf9ea346525671b8c339614de4cb2a118cc172.16.1.11:7003
slots:10923-16383 (5461 slots) master
#S 是 slave
S: 37384b8db7e9e5462d2da236d0104207eac26060172.16.1.11:7004
replicates68e9252e2e2404a0ced500a98085acaa5754c7a2
S: 00371b626a2d42654a0cebb985311aee56578142172.16.1.11:7005
replicatesf314dda271d135634d6849cdb649192b58b08d7f
S: 8dc598b1ceb81e397903973eeba02e3e53770b0c172.16.1.11:7006
replicates6385ebf9ea346525671b8c339614de4cb2a118cc
Can I set the above configuration? (type 'yes' toaccept): yes #输入 yes
> > > Nodes configuration updated
> > >
> >
> > > Assign a different config epoch to each node
> > >
> >
> > > Sending CLUSTER MEET messages to join thecluster
> > >
> >
Waiting for the cluster to join...
> > > Performing Cluster Check (using node 172.16.1.11:7001)
> > >
> >
M: 68e9252e2e2404a0ced500a98085acaa5754c7a2172.16.1.11:7001
slots:0-5460(5461 slots) master
1 additionalreplica(s)
M: f314dda271d135634d6849cdb649192b58b08d7f172.16.1.11:7002
slots:5461-10922(5462 slots) master
1 additionalreplica(s)
S: 8dc598b1ceb81e397903973eeba02e3e53770b0c172.16.1.11:7006
slots: (0 slots)slave
replicates6385ebf9ea346525671b8c339614de4cb2a118cc
S: 37384b8db7e9e5462d2da236d0104207eac26060172.16.1.11:7004
slots: (0 slots)slave
replicates68e9252e2e2404a0ced500a98085acaa5754c7a2
M: 6385ebf9ea346525671b8c339614de4cb2a118cc172.16.1.11:7003
slots:10923-16383 (5461 slots) master
1 additionalreplica(s)
S: 00371b626a2d42654a0cebb985311aee56578142172.16.1.11:7005
slots: (0 slots)slave
replicatesf314dda271d135634d6849cdb649192b58b08d7f
[OK] All nodes agree about slots configuration.
> > > Check for open slots...
> > >
> >
> > > Check slots coverage...
> > >
> >
[OK] All 16384 slots covered.
#集群停止
#个人理解应该先停止从节点,再停止主节点
[root@redis ~]# for((i=6;i>=1;i--));do redis-cli -c -h172.16.1.11 -p 700$i shutdown;done
[root@redis ~]# netstat -lntup|grep redis|grep -v grep
16、2 查看集群中各个节点信息
#cluster 详细信息参考“16、5redis cluster 命令”
-c 表示集群模式 -h 表示本机 IP-p 表示端口
[root@redis ~]# redis-cli -c -h 172.16.1.11 -p 7001
172.16.1.11:7001> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:1 #主节点是从 1 到 3,从节点同理。依次排序。
cluster_stats_messages_sent:47331
cluster_stats_messages_received:47331
#对应节点有 myself 标识
172.16.1.11:7001> cluster nodes
f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002master - 0 1481381814328 2 connected 5461-10922
8dc598b1ceb81e397903973eeba02e3e53770b0c 172.16.1.11:7006slave 6385ebf9ea346525671b8c339614de4cb2a118cc 0 1481381813826 6 connected
37384b8db7e9e5462d2da236d0104207eac26060 172.16.1.11:7004slave 68e9252e2e2404a0ced500a98085acaa5754c7a2 0 1481381813218 4 connected
68e9252e2e2404a0ced500a98085acaa5754c7a2 172.16.1.11:7001myself,master - 0 0 1 connected 0-5460
6385ebf9ea346525671b8c339614de4cb2a118cc 172.16.1.11:7003master - 0 1481381814328 3 connected 10923-16383
00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005slave f314dda271d135634d6849cdb649192b58b08d7f 0 1481381813321 5 connected
[root@redis ~]# redis-cli -c -h 172.16.1.11 -p 7002
172.16.1.11:7002> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:2
cluster_stats_messages_sent:47456
cluster_stats_messages_received:47456
172.16.1.11:7002> cluster nodes
8dc598b1ceb81e397903973eeba02e3e53770b0c 172.16.1.11:7006slave 6385ebf9ea346525671b8c339614de4cb2a118cc 0 1481381818884 6 connected
68e9252e2e2404a0ced500a98085acaa5754c7a2 172.16.1.11:7001master - 0 1481381818380 1 connected 0-5460
37384b8db7e9e5462d2da236d0104207eac26060 172.16.1.11:7004slave 68e9252e2e2404a0ced500a98085acaa5754c7a2 0 1481381817372 4 connected
6385ebf9ea346525671b8c339614de4cb2a118cc 172.16.1.11:7003master - 0 1481381817372 3 connected 10923-16383
f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002myself,master - 0 0 2 connected 5461-10922
00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005slave f314dda271d135634d6849cdb649192b58b08d7f 0 1481381817374 5 connected
[root@redis ~]# redis-cli -c -h 172.16.1.11 -p 7003
172.16.1.11:7003> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:3
cluster_stats_messages_sent:47279
cluster_stats_messages_received:47279
172.16.1.11:7003> cluster nodes
f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002master - 0 1481381827476 2 connected 5461-10922
68e9252e2e2404a0ced500a98085acaa5754c7a2 172.16.1.11:7001master - 0 1481381826470 1 connected 0-5460
00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005slave f314dda271d135634d6849cdb649192b58b08d7f 0 1481381828484 5 connected
8dc598b1ceb81e397903973eeba02e3e53770b0c 172.16.1.11:7006slave 6385ebf9ea346525671b8c339614de4cb2a118cc 0 1481381826972 6 connected
37384b8db7e9e5462d2da236d0104207eac26060 172.16.1.11:7004slave 68e9252e2e2404a0ced500a98085acaa5754c7a2 0 1481381828482 4 connected
6385ebf9ea346525671b8c339614de4cb2a118cc 172.16.1.11:7003myself,master - 0 0 3 connected 10923-16383
[root@redis ~]# redis-cli -c -h 172.16.1.11 -p 7004
172.16.1.11:7004> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:1
cluster_stats_messages_sent:47229
cluster_stats_messages_received:47229
172.16.1.11:7004> cluster nodes
00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005slave f314dda271d135634d6849cdb649192b58b08d7f 0 1481381832005 5 connected
f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002master - 0 1481381832511 2 connected 5461-10922
8dc598b1ceb81e397903973eeba02e3e53770b0c 172.16.1.11:7006slave 6385ebf9ea346525671b8c339614de4cb2a118cc 0 1481381831503 6 connected
6385ebf9ea346525671b8c339614de4cb2a118cc 172.16.1.11:7003master - 0 1481381832006 3 connected 10923-16383
37384b8db7e9e5462d2da236d0104207eac26060 172.16.1.11:7004myself,slave 68e9252e2e2404a0ced500a98085acaa5754c7a2 0 0 4 connected
68e9252e2e2404a0ced500a98085acaa5754c7a2 172.16.1.11:7001master - 0 1481381832513 1 connected 0-5460
[root@redis ~]# redis-cli -c -h 172.16.1.11 -p 7005
172.16.1.11:7005> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:2
cluster_stats_messages_sent:47928
cluster_stats_messages_received:47928
172.16.1.11:7005> cluster nodes
f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002master - 0 1481381836745 2 connected 5461-10922
00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005myself,slave f314dda271d135634d6849cdb649192b58b08d7f 0 0 5 connected
6385ebf9ea346525671b8c339614de4cb2a118cc 172.16.1.11:7003master - 0 1481381837752 3 connected 10923-16383
37384b8db7e9e5462d2da236d0104207eac26060 172.16.1.11:7004slave 68e9252e2e2404a0ced500a98085acaa5754c7a2 0 1481381837753 4 connected
8dc598b1ceb81e397903973eeba02e3e53770b0c 172.16.1.11:7006slave 6385ebf9ea346525671b8c339614de4cb2a118cc 0 1481381838259 6 connected
68e9252e2e2404a0ced500a98085acaa5754c7a2 172.16.1.11:7001master - 0 1481381837250 1 connected 0-5460
[root@redis ~]# redis-cli -c -h 172.16.1.11 -p 7006
172.16.1.11:7006> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:3
cluster_stats_messages_sent:47525
cluster_stats_messages_received:47525
172.16.1.11:7006> cluster nodes
8dc598b1ceb81e397903973eeba02e3e53770b0c 172.16.1.11:7006myself,slave 6385ebf9ea346525671b8c339614de4cb2a118cc 0 0 6 connected
6385ebf9ea346525671b8c339614de4cb2a118cc 172.16.1.11:7003master - 0 1481381856221 3 connected 10923-16383
f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002master - 0 1481381857761 2 connected 5461-10922
37384b8db7e9e5462d2da236d0104207eac26060 172.16.1.11:7004slave 68e9252e2e2404a0ced500a98085acaa5754c7a2 0 1481381858237 1 connected
68e9252e2e2404a0ced500a98085acaa5754c7a2 172.16.1.11:7001master - 0 1481381857762 1 connected 0-5460
00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005slave f314dda271d135634d6849cdb649192b58b08d7f 0 1481381856726 5 connected
16、3 集群节点测试
#首先通过 keys 命令查询各个节点都没有数据。
#在 7001 设置数据
172.16.1.11:7001> set name11 name11val1
-> Redirected to slot [11311] located at172.16.1.11:7003
OK
#跳到了获取到值的 7003 节点上
172.16.1.11:7003> keys *
1. "name11"
172.16.1.11:7003> get name11
"name11val1"
#在 7002 查,虽然 keys 不出来,但是能得到值
172.16.1.11:7002> keys *
(empty list or set)
172.16.1.11:7002> get name11
-> Redirected to slot [11311] located at172.16.1.11:7003
"name11val1"
#在 7003 查
172.16.1.11:7003> keys *
1. "name11"
172.16.1.11:7003> get name11
"name11val1"
#7004、7005、7006 跟 7002 情况一致
#意思是在某个节点下 set 的值不一定分配给自己节点的槽,可能分配给其他节点的槽,在这个集群的任何一个节点都可以 get 到任意节点 set 的值。
16、4redis-trib.rb 详解
[root@redis src]# ./redis-trib.rb #创建集群的命令,是用 Ruby 写的
Usage: redis-trib <command> <options> arguments
info host:port #查看集群信息
set-timeout host:port milliseconds #设置集群节点间心跳连接的超时时间
help (show this help)
check host:port #检查集群
call host:port command arg arg .. arg #在集群全部节点上执行命令
import host:port #将外部 Redis 数据导入集群
rebalance host:port #平衡集群节点 slot 数量
del-node host:port node_id #从集群中删除节点
fix host:port #修复集群
add-node new_host:new_portexisting_host:existing_port #将新节点加入集群
reshard host:port #在线迁移 slot
create host1:port1 ... hostN:portN #创建集群环境,以 IP1:PORT1 ... IPN:PORTN 的形式处理
16、5redis cluster 命令
//集群(cluster)
CLUSTER INFO #打印集群的信息
CLUSTER NODES #列出集群当前已知的所有节点(node),以及这些节点的相关信息。
//节点(node)
CLUSTER MEET <ip> <port> #将 ip 和 port 所指定的节点添加到集群当中,让它成为集群的一份子。
CLUSTER FORGET node_id #从集群中移除 node_id 指定的节点。
CLUSTER REPLICATE node_id #将当前节点设置为 node_id 指定的节点的从节点。
CLUSTER SAVECONFIG #将节点的配置文件保存到硬盘里面。
//槽(slot)
CLUSTER ADDSLOTS <slot> [slot ...] #将一个或多个槽(slot)指派(assign)给当前节点。
CLUSTER DELSLOTS <slot> [slot ...] #移除一个或多个槽对当前节点的指派。
CLUSTER FLUSHSLOTS #移除指派给当前节点的所有槽,让当前节点变成一个没有指派任何槽的节点。
CLUSTER SETSLOT <slot> NODE node_id #将槽 slot 指派给 node_id 指定的节点,如果槽已经指派给另一个节点,那么先让另一个节点删除该槽 >,然后再进行指派。
CLUSTER SETSLOT <slot> MIGRATING node_id #将本节点的槽 slot 迁移到 node_id 指定的节点中。
CLUSTER SETSLOT <slot> IMPORTING node_id #从 node_id 指定的节点中导入槽 slot 到本节点。
CLUSTER SETSLOT <slot> STABLE #取消对槽 slot 的导入(import)或者迁移(migrate)。
//键 (key)
CLUSTER KEYSLOT <key> #计算键 key 应该被放置在哪个槽上。
CLUSTER COUNTKEYSINSLOT <slot> #返回槽 slot 目前包含的键值对数量。
CLUSTER GETKEYSINSLOT <slot> <count> #返回 count 个 slot 槽中的键。
#上述命令是 Redis 集群所独有的,执行上述命令之前得先登录
#比如
[root@redis ~]# redis-cli -c -h 172.16.1.11 -p 7001
172.16.1.11:7001> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:1
cluster_stats_messages_sent:47331
cluster_stats_messages_received:47331
16、6 集群增加/删除节点
参考网站
“http://blog.51yip.com/nosql/1726.html/comment-page-1”
16、6、1 测试节点操作
节点的具体设置参考“16、1 集群搭建”
#新增节点
[root@redis ~]# cd /application/redis/redis-cluster/
[root@redis redis-cluster]# cp -R 7001 7007
[root@redis redis-cluster]# cp -R 7001 7008
[root@redis redis-cluster]# sed -i.ori"s#7001#7007#g" 7007/redis.conf
[root@redis redis-cluster]# sed -i.ori"s#7001#7008#g" 7008/redis.conf
[root@redis redis-cluster]# grep 7007 7007/redis.conf
port 7007
pidfile /var/run/redis_7007.pid
logfile"/application/redis/logs/redis-7007.log"
dir /application/redis/redis-cluster/7007
cluster-config-file nodes-7007.conf
[root@redis redis-cluster]# grep 70087008/redis.conf
port 7008
pidfile /var/run/redis_7008.pid
logfile"/application/redis/logs/redis-7008.log"
dir /application/redis/redis-cluster/7008
cluster-config-file nodes-7008.conf
#测试节点
[root@redis redis-cluster]# ps -ef|grep redis|grep -vgrep
root 5372 1 0 00:21 ? 00:00:00 redis-server 172.16.1.11:7007[cluster]
root 5376 1 0 00:21 ? 00:00:00 redis-server 172.16.1.11:7008[cluster]
16、6、2 添加节点
16、6、2、1 添加主节点 7007
#先启动各个节点
[root@redis redis-cluster]# for((i=1;i<=6;i++));doredis-server /application/redis/redis-cluster/700$i/redis.conf;done
#添加主节点,172.16.1.11:7007 为新增节点,172.16.1.11:7001 为任一已存在节点
[root@redis redis-cluster]#/home/lly/tools/redis-3.2.5/src/redis-trib.rb add-node 172.16.1.11:7007172.16.1.11:7001
> > > Adding node 172.16.1.11:7007 to cluster172.16.1.11:7001
> > > Performing Cluster Check (using node172.16.1.11:7001)
> > > M: 68e9252e2e2404a0ced500a98085acaa5754c7a2172.16.1.11:7001
> > > slots:0-5460(5461 slots) master
> > > 1 additionalreplica(s)
> > > S: 00371b626a2d42654a0cebb985311aee56578142172.16.1.11:7005
> > > slots: (0 slots)slave
> > > replicatesf314dda271d135634d6849cdb649192b58b08d7f
> > > M: 6385ebf9ea346525671b8c339614de4cb2a118cc172.16.1.11:7003
> > > slots:10923-16383 (5461 slots) master
> > > 1 additionalreplica(s)
> > > S: 37384b8db7e9e5462d2da236d0104207eac26060172.16.1.11:7004
> > > slots: (0 slots)slave
> > > replicates68e9252e2e2404a0ced500a98085acaa5754c7a2
> > > M: f314dda271d135634d6849cdb649192b58b08d7f172.16.1.11:7002
> > > slots:5461-10922(5462 slots) master
> > > 1 additionalreplica(s)
> > > S: 8dc598b1ceb81e397903973eeba02e3e53770b0c172.16.1.11:7006
> > > slots: (0 slots)slave
> > > replicates6385ebf9ea346525671b8c339614de4cb2a118cc
> > > [OK] All nodes agree about slots configuration.
> > > Check for open slots...
> > > Check slots coverage...
> > > [OK] All 16384 slots covered.
> > > Send CLUSTER MEET to node 172.16.1.11:7007to make it join the cluster.
> > > [OK] New node added correctly.
> > >
> >
#查看集群状态
[root@redis redis-cluster]# redis-cli -c -h 172.16.1.11-p 7001
172.16.1.11:7001> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:7
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:1
cluster_stats_messages_sent:975
cluster_stats_messages_received:970
172.16.1.11:7001> cluster nodes
00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005slave f314dda271d135634d6849cdb649192b58b08d7f 0 1481387384680 5 connected
6385ebf9ea346525671b8c339614de4cb2a118cc 172.16.1.11:7003master - 0 1481387383128 3 connected 10923-16383
37384b8db7e9e5462d2da236d0104207eac26060 172.16.1.11:7004slave 68e9252e2e2404a0ced500a98085acaa5754c7a2 0 1481387382626 4 connected
#注意:当 7007 主节点添加成功之后,新增的节点不会有任何数据,因为它没有分配任何的 slot(hash 槽),所以需要为新节点手工分配 slot。如果主节点要有写权限的话,那么必须有槽,可以把已有节点的槽分配给新节点。Redis 集群在运行中,增加节点水平扩容是不会影响 Redis 性能的。
4a174a6203795d7f57f4be52534d41f01e164f84172.16.1.11:7007 master - 0 1481387382626 0 connected
68e9252e2e2404a0ced500a98085acaa5754c7a2 172.16.1.11:7001myself,master - 0 0 1 connected 0-5460
f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002master - 0 1481387383629 2 connected 5461-10922
8dc598b1ceb81e397903973eeba02e3e53770b0c 172.16.1.11:7006slave 6385ebf9ea346525671b8c339614de4cb2a118cc 0 1481387384135 6 connected
#为 7007 分配 slot 槽(找到集群中的任意一个主节点,对其进行重新分片操作)
[root@redis redis-cluster]#/home/lly/tools/redis-3.2.5/src/redis-trib.rb reshard 172.16.1.11:7001
> > > Performing Cluster Check (using node172.16.1.11:7001)
> > > M: 68e9252e2e2404a0ced500a98085acaa5754c7a2172.16.1.11:7001
> > > slots:0-5460(5461 slots) master
> > > 1 additionalreplica(s)
> > > S: 00371b626a2d42654a0cebb985311aee56578142172.16.1.11:7005
> > > slots: (0 slots)slave
> > > replicatesf314dda271d135634d6849cdb649192b58b08d7f
> > > M: 6385ebf9ea346525671b8c339614de4cb2a118cc172.16.1.11:7003
> > > slots:10923-16383 (5461 slots) master
> > > 1 additionalreplica(s)
> > > S: 37384b8db7e9e5462d2da236d0104207eac26060172.16.1.11:7004
> > > slots: (0 slots)slave
> > > replicates68e9252e2e2404a0ced500a98085acaa5754c7a2
> > > M: 4a174a6203795d7f57f4be52534d41f01e164f84172.16.1.11:7007
> > > slots: (0 slots)master
> > > 0 additionalreplica(s)
> > > M: f314dda271d135634d6849cdb649192b58b08d7f172.16.1.11:7002
> > > slots:5461-10922(5462 slots) master
> > > 1 additional replica(s)
> > > S: 8dc598b1ceb81e397903973eeba02e3e53770b0c172.16.1.11:7006
> > > slots: (0 slots)slave
> > > replicates6385ebf9ea346525671b8c339614de4cb2a118cc
> > > [OK] All nodes agree about slots configuration.
> > > Check for open slots...
> > > Check slots coverage...
> > > [OK] All 16384 slots covered.
> > >
> >
How many slots do you want tomove (from 1 to 16384)? 200
#200 是需要多少个槽移动到新的节点上,可以自己设置,比如 200 个槽。主节点如果没有 slots 的话,存取数据就不会被选中。
What is the receiving node ID?4a174a6203795d7f57f4be52534d41f01e164f84
#4a174a6203795d7f57f4be52534d41f01e164f84 为 7007 的 ID。需要把这 200 个 slot 槽移动到那个节点上去(需要指定节点 id),并且下个 提示是输入 all 为从所有主节点(7001 7002 7003)中分别抽取响应的槽数(一共为 200 个槽到指定的新节点中!,并且会打印执行分片的计划。)
Please enter all the source node IDs.
Type 'all' to useall the nodes as source nodes for the hash slots.
Type 'done' onceyou entered all the source nodes IDs.
Source node #1:all
#可以把分配的过程理解成打扑克,all 表示大家重新洗牌,而上述输入的接收主节点 ID,回车,好比洗完牌之后在某个节点抽牌,即使是一个新加入的节点/打牌者。
Ready to move 200 slots.
Source nodes:
slots:0-5460(5461 slots) master
1 additionalreplica(s)
slots:10923-16383 (5461 slots) master
1 additionalreplica(s)
slots:5461-10922(5462 slots) master
1 additionalreplica(s)
Destination node:
slots: (0 slots)master
0 additionalreplica(s)
Resharding plan:#分片执行计划
Moving slot 5461 fromf314dda271d135634d6849cdb649192b58b08d7f
..这个节点分配的
Moving slot 10923 from6385ebf9ea346525671b8c339614de4cb2a118cc
Moving slot 10924 from6385ebf9ea346525671b8c339614de4cb2a118cc
..这个节点分配的
Moving slot 0 from68e9252e2e2404a0ced500a98085acaa5754c7a2
..这个节点分配的
Do you want to proceed with theproposed reshard plan (yes/no)? yes
#输入 yes 确认开始执行分片任务。
Moving slot 5461 from 172.16.1.11:7002 to172.16.1.11:7007:
。。。。。。
Moving slot 10923 from 172.16.1.11:7003 to172.16.1.11:7007:
。。。。。。
Moving slot 0 from 172.16.1.11:7001 to 172.16.1.11:7007:
。。。。。。
#查看集群状态
[root@redis redis-cluster]# redis-cli -c -h 172.16.1.11-p 7007
172.16.1.11:7007> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:7
cluster_size:4
cluster_current_epoch:7
cluster_my_epoch:7
cluster_stats_messages_sent:7616
cluster_stats_messages_received:7611
172.16.1.11:7007> cluster nodes
00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005slave f314dda271d135634d6849cdb649192b58b08d7f 0 1481388706933 2 connected
6385ebf9ea346525671b8c339614de4cb2a118cc 172.16.1.11:7003master - 0 1481388706427 3 connected 10989-16383
4a174a6203795d7f57f4be52534d41f01e164f84172.16.1.11:7007 myself,master - 0 0 7 connected 0-65 5461-5527 10923-10988
8dc598b1ceb81e397903973eeba02e3e53770b0c 172.16.1.11:7006slave 6385ebf9ea346525671b8c339614de4cb2a118cc 0 1481388707436 3 connected
37384b8db7e9e5462d2da236d0104207eac26060 172.16.1.11:7004slave 68e9252e2e2404a0ced500a98085acaa5754c7a2 0 1481388706932 1 connected
68e9252e2e2404a0ced500a98085acaa5754c7a2 172.16.1.11:7001master - 0 1481388705922 1 connected 66-5460
f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002master - 0 1481388706428 2 connected 5528-10922
#测试读写操作
#在 7007 进入
172.16.1.11:7007> set name17 name17val1
-> Redirected to slot [3305] located at172.16.1.11:7001
OK
#跳到 7001
172.16.1.11:7001> keys *
1. "name17"
172.16.1.11:7001> get name17
"name17val1"
#在 7001 进入
#因为事先得到了 7003 的值,就一直在 7003 下
172.16.1.11:7003> keys *
1. "name14"
2. "name11"
172.16.1.11:7003> get name17
-> Redirected to slot [3305] located at172.16.1.11:7001
"name17val1"
#可以发现得到哪个节点的值,就会跳到对应节点去
172.16.1.11:7001> keys *
3. "name17"
172.16.1.11:7001> get name17
"name17val1"
16、6、2、2 添加从节点 7008
[root@redis ~]#/home/lly/tools/redis-3.2.5/src/redis-trib.rb add-node 172.16.1.11:7008172.16.1.11:7001
#172.16.1.11:7008 为新节点,172.16.1.11:7001 为任意集群中已存在的节点
(
也可以直接使用
redis-trib.rb add-node --slave--master-id 4a174a6203795d7f57f4be52534d41f01e164f84 172.16.1.11:7008172.16.1.11:7001
直接指定主节点,这样就不用利用 replicate 命令再次指定主节点了。
)
> > > Adding node 172.16.1.11:7008 to cluster172.16.1.11:7001
> > > Performing Cluster Check (using node172.16.1.11:7001)
> > > M: 68e9252e2e2404a0ced500a98085acaa5754c7a2172.16.1.11:7001
> > > slots:66-5460(5395 slots) master
> > > 1 additionalreplica(s)
> > > S: 00371b626a2d42654a0cebb985311aee56578142172.16.1.11:7005
> > > slots: (0 slots)slave
> > > replicatesf314dda271d135634d6849cdb649192b58b08d7f
> > > M: 6385ebf9ea346525671b8c339614de4cb2a118cc172.16.1.11:7003
> > > slots:10989-16383 (5395 slots) master
> > > 1 additionalreplica(s)
> > > S: 37384b8db7e9e5462d2da236d0104207eac26060172.16.1.11:7004
> > > slots: (0 slots)slave
> > > replicates68e9252e2e2404a0ced500a98085acaa5754c7a2
> > > M: 4a174a6203795d7f57f4be52534d41f01e164f84172.16.1.11:7007
> > > slots:0-65,5461-5527,10923-10988 (199 slots[lly16] ) master
> > > 0 additionalreplica(s)
> > > M: f314dda271d135634d6849cdb649192b58b08d7f172.16.1.11:7002
> > > slots:5528-10922(5395 slots) master
> > > 1 additionalreplica(s)
> > > S: 8dc598b1ceb81e397903973eeba02e3e53770b0c172.16.1.11:7006
> > > slots: (0 slots)slave
> > > replicates6385ebf9ea346525671b8c339614de4cb2a118cc
> > > [OK] All nodes agree about slots configuration.
> > > Check for open slots...
> > > Check slots coverage...
> > > [OK] All 16384 slots covered.
> > > Send CLUSTER MEET to node 172.16.1.11:7008to make it join the cluster.
> > > [OK] New node added correctly.
> > >
> >
#查看集群状态
[root@redis ~]# redis-cli -c -h 172.16.1.11 -p 7008
172.16.1.11:7008> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:8
cluster_size:4
cluster_current_epoch:7
cluster_my_epoch:0
cluster_stats_messages_sent:367
cluster_stats_messages_received:367
172.16.1.11:7008> cluster nodes
f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002master - 0 1481389544668 2 connected 5528-10922
37384b8db7e9e5462d2da236d0104207eac26060 172.16.1.11:7004slave 68e9252e2e2404a0ced500a98085acaa5754c7a2 0 1481389544163 1 connected
4a174a6203795d7f57f4be52534d41f01e164f84 172.16.1.11:7007master - 0 1481389545171 7 connected 0-65 5461-5527 10923-10988
6385ebf9ea346525671b8c339614de4cb2a118cc 172.16.1.11:7003master - 0 1481389543657 3 connected 10989-16383
68e9252e2e2404a0ced500a98085acaa5754c7a2 172.16.1.11:7001master - 0 1481389544667 1 connected 66-5460
8dc598b1ceb81e397903973eeba02e3e53770b0c 172.16.1.11:7006slave 6385ebf9ea346525671b8c339614de4cb2a118cc 0 1481389545673 3 connected
#下述为新增的从节点还未指定主节点
769cee49ada729901b9e9270467aeceff494be13172.16.1.11:7008 myself,master - 0 0 0 connected
00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005slave f314dda271d135634d6849cdb649192b58b08d7f 0 1481389545674 2 connected
#指定 7008 从节点的主节点
#需要执行 replicate 命令来指定当前节点(从节点)的主节点 id 为哪个。
首先需要登录新加的 7008 节点的客户端,然后使用集群命令进行操作,把当前的 7008(slave)节点指定到一个主节点下(这里使用之前创建的 7007 主节点 4a174a6203795d7f57f4be52534d41f01e164f84)
[root@redis ~]# redis-cli -c -h 172.16.1.11 -p 7008
172.16.1.11:7008> cluster replicate4a174a6203795d7f57f4be52534d41f01e164f84
OK
#再次查看集群信息
[root@redis ~]# redis-cli -c -h 172.16.1.11 -p 7008
172.16.1.11:7008> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:8
cluster_size:4
cluster_current_epoch:7
cluster_my_epoch:7
cluster_stats_messages_sent:3199
cluster_stats_messages_received:3199
172.16.1.11:7008> cluster nodes
f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002master - 0 1481390061799 2 connected 5528-10922
37384b8db7e9e5462d2da236d0104207eac26060 172.16.1.11:7004slave 68e9252e2e2404a0ced500a98085acaa5754c7a2 0 1481390061800 1 connected
4a174a6203795d7f57f4be52534d41f01e164f84 172.16.1.11:7007master - 0 1481390059784 7 connected 0-65 5461-5527 10923-10988
6385ebf9ea346525671b8c339614de4cb2a118cc 172.16.1.11:7003master - 0 1481390060289 3 connected 10989-16383
68e9252e2e2404a0ced500a98085acaa5754c7a2 172.16.1.11:7001master - 0 1481390059784 1 connected 66-5460
8dc598b1ceb81e397903973eeba02e3e53770b0c 172.16.1.11:7006slave 6385ebf9ea346525671b8c339614de4cb2a118cc 0 1481390060791 3 connected
#可以看到 7008 节点已经是 7007 主节点的从节点了
769cee49ada729901b9e9270467aeceff494be13172.16.1.11:7008 myself,slave 4a174a6203795d7f57f4be52534d41f01e164f84 0 0 0connected
00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005slave f314dda271d135634d6849cdb649192b58b08d7f 0 1481390061294 2 connected
#测试节点信息
172.16.1.11:7008> keys *
(empty list or set)
172.16.1.11:7008> get name17
-> Redirected to slot [3305] located at172.16.1.11:7001
"name17val1"
172.16.1.11:7001>
172.16.1.11:7008> set name18 name18val1
-> Redirected to slot [15622] located at 172.16.1.11:7003
OK
172.16.1.11:7003> get name18
"name18val1"
172.16.1.11:7003> keys *
1. "name14"
2. "name11"
3. "name18"
#如果在新增从节点时的错误,解决如下:
[root@redis ~]#/home/lly/tools/redis-3.2.5/src/redis-trib.rb add-node --slave --master-id4a174a6203795d7f57f4be52534d41f01e164f84
报错:
[ERR] Node 172.16.1.11:7008 isnot empty. Either the node already knows other nodes (check with CLUSTER NODES)or contains some key in database 0.
1、在停掉对应 Redis 节点服务后,删除对应目录下的 appendonly.aof、dump.rdb、nodes-7001.conf,重新启动 redis-server 后再次之后新增节点操作即可。
16、6、3 删除节点
16、6、3、1 删除从节点 7008
#769cee49ada729901b9e9270467aeceff494be13 为 7008 的 ID
[root@redis ~]# /home/lly/tools/redis-3.2.5/src/redis-trib.rbdel-node 172.16.1.11:7008 769cee49ada729901b9e9270467aeceff494be13
> > > Removing node769cee49ada729901b9e9270467aeceff494be13 from cluster 172.16.1.11:7008
> > > Sending CLUSTER FORGET messages to thecluster...
> > > SHUTDOWN the node.
> > >
> >
#测试是否能进入 7008 的 Redis 的环境
[root@redis ~]# redis-cli -c -h 172.16.1.11 -p 7008
Could not connect to Redis at 172.16.1.11:7008:Connection refused
Could not connect to Redis at 172.16.1.11:7008:Connection refused
#测试是否有 7008 进程,在删除节点的同时服务也被关掉了
[root@redis ~]# ps -ef|grep redis|grep -v grep
root 5372 1 0 00:21 ? 00:00:15 redis-server 172.16.1.11:7007[cluster]
root 5394 1 0 00:26 ? 00:00:15 redis-server 172.16.1.11:7001[cluster]
root 5398 1 0 00:26 ? 00:00:15 redis-server 172.16.1.11:7002[cluster]
root 5402 1 0 00:26 ? 00:00:15 redis-server 172.16.1.11:7003[cluster]
root 5406 1 0 00:26 ? 00:00:14 redis-server 172.16.1.11:7004[cluster]
root 5410 1 0 00:26 ? 00:00:14 redis-server 172.16.1.11:7005[cluster]
root 5414 1 0 00:26 ? 00:00:14 redis-server 172.16.1.11:7006[cluster]
#查看集群信息
172.16.1.11:7001> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:7
cluster_size:4
cluster_current_epoch:7
cluster_my_epoch:1
cluster_stats_messages_sent:21816
cluster_stats_messages_received:21807
172.16.1.11:7001> cluster nodes
00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005slave f314dda271d135634d6849cdb649192b58b08d7f 0 1481391195529 5 connected
6385ebf9ea346525671b8c339614de4cb2a118cc 172.16.1.11:7003master - 0 1481391194014 3 connected 10989-16383
37384b8db7e9e5462d2da236d0104207eac26060 172.16.1.11:7004slave 68e9252e2e2404a0ced500a98085acaa5754c7a2 0 1481391194519 4 connected
4a174a6203795d7f57f4be52534d41f01e164f84 172.16.1.11:7007master - 0 1481391194519 7 connected 0-65 5461-5527 10923-10988
68e9252e2e2404a0ced500a98085acaa5754c7a2 172.16.1.11:7001myself,master - 0 0 1 connected 66-5460
f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002master - 0 1481391196028 2 connected 5528-10922
8dc598b1ceb81e397903973eeba02e3e53770b0c 172.16.1.11:7006slave 6385ebf9ea346525671b8c339614de4cb2a118cc 0 1481391195022 6 connected
#测试在 7008set 的值是否还存在,对应“16、6、2、2 添加从节点 7008 中的 name18”
#在 7003 测试
172.16.1.11:7003> keys *
1. "name14"
2. "name11"
3. "name18"
#虽然从节点删除了,但是从节点添加的 name18 还是存在的
172.16.1.11:7003> get name18
"name18val1"
172.16.1.11:7003> get name18
"name18val1"
16、6、3、2 删除主节点 7007
#在删除 7007 主节点之前,必须先把其全部的数据(slot 槽)移动到其他节点中去,然后再进行移除节点操作才行,不然会出现丢失数据的问题。(目前只能把数据迁移到一个节点上,暂时做不了平均分配功能。)
16、6、3、2、1 主节点下没有从节点
#取消分配的 slot 过程
[root@redis ~]#/home/lly/tools/redis-3.2.5/src/redis-trib.rb reshard 172.16.1.11:7007
> > > Performing Cluster Check (using node172.16.1.11:7007)
> > >
> >
M:4a174a6203795d7f57f4be52534d41f01e164f84 172.16.1.11:7007
slots:0-65,5461-5527,10923-10988 (199 slots)master
0 additional replica(s)
M: 68e9252e2e2404a0ced500a98085acaa5754c7a2172.16.1.11:7001
slots:66-5460(5395 slots) master
1 additionalreplica(s)
S: 8dc598b1ceb81e397903973eeba02e3e53770b0c172.16.1.11:7006
slots: (0 slots)slave
replicates6385ebf9ea346525671b8c339614de4cb2a118cc
M: f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002
slots:5528-10922(5395 slots) master
1 additionalreplica(s)
S: 37384b8db7e9e5462d2da236d0104207eac26060172.16.1.11:7004
slots: (0 slots)slave
replicates68e9252e2e2404a0ced500a98085acaa5754c7a2
M: 6385ebf9ea346525671b8c339614de4cb2a118cc172.16.1.11:7003
slots:10989-16383 (5395 slots) master
1 additionalreplica(s)
S: 00371b626a2d42654a0cebb985311aee56578142172.16.1.11:7005
slots: (0 slots)slave
replicatesf314dda271d135634d6849cdb649192b58b08d7f
[OK] All nodes agree about slots configuration.
> > > Check for open slots...
> > > Check slots coverage...
> > > [OK] All 16384 slots covered.
> > >
> >
How many slots do you want tomove (from 1 to 16384)? 199
#注意这里的槽数是 199,不会刚好是 200 个槽
What is the receiving node ID? 68e9252e2e2404a0ced500a98085acaa5754c7a2
#这里输入的是 7001 的 ID,把槽移动到 7001
Please enter all the source node IDs.
Type 'all' to useall the nodes as source nodes for the hash slots.
Type 'done' onceyou entered all the source nodes IDs.
Source node #1:4a174a6203795d7f57f4be52534d41f01e164f84
#这里是要移除的 7007 主节点的 ID
Source node #2:done
#直接输入 done,开始生成迁移计划
Ready to move 199 slots.
Source nodes:
slots:0-65,5461-5527,10923-10988 (199 slots) master
0 additionalreplica(s)
Destination node:
slots:66-5460(5395 slots) master
1 additionalreplica(s)
Resharding plan:
。。。。。。。。移除的 slots
Do you want to proceed with theproposed reshard plan (yes/no)? yes
#输入 yes 表示开始迁移
Moving slot 0 from 172.16.1.11:7007 to 172.16.1.11:7001:
。。。。。。。。移除的 slots
Moving slot 10988 from 172.16.1.11:7007 to172.16.1.11:7001:
#查看集群信息
172.16.1.11:7001> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:7
cluster_size:3
cluster_current_epoch:8
cluster_my_epoch:8
cluster_stats_messages_sent:7598
cluster_stats_messages_received:7592
172.16.1.11:7001> cluster nodes
4a174a6203795d7f57f4be52534d41f01e164f84172.16.1.11:7007 master - 0 1481393144924 7 connected
00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005slave f314dda271d135634d6849cdb649192b58b08d7f 0 1481393144412 5 connected
8dc598b1ceb81e397903973eeba02e3e53770b0c 172.16.1.11:7006slave 6385ebf9ea346525671b8c339614de4cb2a118cc 0 1481393143402 6 connected
f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002master - 0 1481393143908 2 connected 5528-10922
#可以发现 7008 的槽已经分配的到了 7001
68e9252e2e2404a0ced500a98085acaa5754c7a2172.16.1.11:7001 myself,master - 0 0 8 connected 0-552710923-10988
#未把 7007 的槽移给 7001 之前 7001 的槽数为
68e9252e2e2404a0ced500a98085acaa5754c7a2172.16.1.11:7001 myself,master - 0 0 1 connected 66-5460
6385ebf9ea346525671b8c339614de4cb2a118cc 172.16.1.11:7003master - 0 1481393142899 3 connected 10989-16383
37384b8db7e9e5462d2da236d0104207eac26060 172.16.1.11:7004slave 68e9252e2e2404a0ced500a98085acaa5754c7a2 0 1481393143403 8 connected
#查看集群进程
[root@redis ~]# ps -ef|grep redis|grep -v grep
root 5613 1 0 01:41 ? 00:00:06 redis-server 172.16.1.11:7001[cluster]
root 5615 1 0 01:41 ? 00:00:06 redis-server 172.16.1.11:7002[cluster]
root 5621 1 0 01:41 ? 00:00:06 redis-server 172.16.1.11:7003[cluster]
root 5625 1 0 01:41 ? 00:00:06 redis-server 172.16.1.11:7004[cluster]
root 5629 1 0 01:41 ? 00:00:06 redis-server 172.16.1.11:7005[cluster]
root 5633 1 0 01:41 ? 00:00:06 redis-server 172.16.1.11:7006[cluster]
#现在 7007 进程还是存在的
root 5635 1 0 01:41 ? 00:00:06 redis-server 172.16.1.11:7007[cluster]
#删除 7007 主节点
[root@redis ~]#/home/lly/tools/redis-3.2.5/src/redis-trib.rb del-node 172.16.1.11:70074a174a6203795d7f57f4be52534d41f01e164f84
> > > Removing node4a174a6203795d7f57f4be52534d41f01e164f84 from cluster 172.16.1.11:7007
> > >
> >
> > > Sending CLUSTER FORGET messages to thecluster...
> > >
> >
> > > SHUTDOWN the node.
> > >
> >
#查看集群信息,已经没有 7007 节点信息了
172.16.1.11:7001> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:8
cluster_my_epoch:8
cluster_stats_messages_sent:9384
cluster_stats_messages_received:9378
172.16.1.11:7001> cluster nodes
00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005slave f314dda271d135634d6849cdb649192b58b08d7f 0 1481393501468 5 connected
8dc598b1ceb81e397903973eeba02e3e53770b0c 172.16.1.11:7006slave 6385ebf9ea346525671b8c339614de4cb2a118cc 0 1481393500456 6 connected
f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002master - 0 1481393500965 2 connected 5528-10922
68e9252e2e2404a0ced500a98085acaa5754c7a2 172.16.1.11:7001myself,master - 0 0 8 connected 0-5527 10923-10988
6385ebf9ea346525671b8c339614de4cb2a118cc 172.16.1.11:7003master - 0 1481393501468 3 connected 10989-16383
37384b8db7e9e5462d2da236d0104207eac26060 172.16.1.11:7004slave 68e9252e2e2404a0ced500a98085acaa5754c7a2 0 1481393499450 8 connected
#查看 Redis 进程,7007 进程自动停止了
[root@redis ~]# ps -ef|grep redis|grep -v grep
root 5613 1 0 01:41 ? 00:00:07 redis-server 172.16.1.11:7001[cluster]
root 5615 1 0 01:41 ? 00:00:06 redis-server 172.16.1.11:7002[cluster]
root 5621 1 0 01:41 ? 00:00:07 redis-server 172.16.1.11:7003[cluster]
root 5625 1 0 01:41 ? 00:00:07 redis-server 172.16.1.11:7004[cluster]
root 5629 1 0 01:41 ? 00:00:06 redis-server 172.16.1.11:7005[cluster]
root 5633 1 0 01:41 ? 00:00:06 redis-server 172.16.1.11:7006[cluster]
16、6、3、2、2 主节点下有从节点
#首先把新添加 7007 主节点和 7008 从节点,参考步骤“16、6、2 添加节点”
#移除前的节点信息
[root@redis ~]# redis-cli -c -h 172.16.1.11 -p 7001
172.16.1.11:7008> cluster nodes
37384b8db7e9e5462d2da236d0104207eac26060 172.16.1.11:7004slave 68e9252e2e2404a0ced500a98085acaa5754c7a2 0 1481393974867 8 connected
8c965e903668a7947727edd8ba9288205b0190cc172.16.1.11:7008 myself,slave 3724b337e95391596114f25f855f97b973994ca3 0 0 0connected
68e9252e2e2404a0ced500a98085acaa5754c7a2 172.16.1.11:7001master - 0 1481393974364 8 connected 69-5527 10923-10988
3724b337e95391596114f25f855f97b973994ca3172.16.1.11:7007 master - 0 1481393973859 9 connected 0-68 5528-559210989-11053
00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005slave f314dda271d135634d6849cdb649192b58b08d7f 0 1481393973864 2 connected
f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002master - 0 1481393974364 2 connected 5593-10922
8dc598b1ceb81e397903973eeba02e3e53770b0c 172.16.1.11:7006slave 6385ebf9ea346525671b8c339614de4cb2a118cc 0 1481393972856 3 connected
6385ebf9ea346525671b8c339614de4cb2a118cc 172.16.1.11:7003master - 0 1481393973355 3 connected 11054-16383
#取消分配 slot
[root@redis ~]#/home/lly/tools/redis-3.2.5/src/redis-trib.rb reshard 172.16.1.11:7007
> > > Performing Cluster Check (using node172.16.1.11:7007)
> > >
> >
M:3724b337e95391596114f25f855f97b973994ca3 172.16.1.11:7007
slots:0-68,5528-5592,10989-11053 (199 slots)master
1 additional replica(s)
M: 6385ebf9ea346525671b8c339614de4cb2a118cc172.16.1.11:7003
slots:11054-16383 (5330 slots) master
1 additionalreplica(s)
S: 00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005
slots: (0 slots)slave
replicatesf314dda271d135634d6849cdb649192b58b08d7f
M: 68e9252e2e2404a0ced500a98085acaa5754c7a2172.16.1.11:7001
slots:69-5527,10923-10988 (5525 slots) master
1 additionalreplica(s)
S: 8c965e903668a7947727edd8ba9288205b0190cc172.16.1.11:7008
slots: (0 slots)slave
replicates3724b337e95391596114f25f855f97b973994ca3
S: 8dc598b1ceb81e397903973eeba02e3e53770b0c172.16.1.11:7006
slots: (0 slots)slave
replicates6385ebf9ea346525671b8c339614de4cb2a118cc
S: 37384b8db7e9e5462d2da236d0104207eac26060172.16.1.11:7004
slots: (0 slots)slave
replicates68e9252e2e2404a0ced500a98085acaa5754c7a2
M: f314dda271d135634d6849cdb649192b58b08d7f172.16.1.11:7002
slots:5593-10922(5330 slots) master
1 additionalreplica(s)
[OK] All nodes agree about slots configuration.
> > > Check for open slots...
> > >
> >
> > > Check slots coverage...
> > >
> >
[OK] All 16384 slots covered.
How many slots do you want tomove (from 1 to 16384)? 199
What is the receiving node ID? 68e9252e2e2404a0ced500a98085acaa5754c7a2
Please enter all the source node IDs.
Type 'all' to useall the nodes as source nodes for the hash slots.
Type 'done' onceyou entered all the source nodes IDs.
Source node #1:3724b337e95391596114f25f855f97b973994ca3
Source node #2:done
Ready to move 199 slots.
Source nodes:
slots:0-68,5528-5592,10989-11053 (199 slots) master
1 additionalreplica(s)
Destination node:
slots:69-5527,10923-10988 (5525 slots) master
1 additionalreplica(s)
Resharding plan:
Moving slot 0 from3724b337e95391596114f25f855f97b973994ca3
。。移除的其他节点
Moving slot 11053 from 3724b337e95391596114f25f855f97b973994ca3
Do you want to proceed with theproposed reshard plan (yes/no)? yes
Moving slot 0 from 172.16.1.11:7007 to 172.16.1.11:7001:
。。移除的其他节点
Moving slot 11053 from 172.16.1.11:7007 to172.16.1.11:7001:
#删除 7007 节点 ID
[root@redis ~]#/home/lly/tools/redis-3.2.5/src/redis-trib.rb del-node 172.16.1.11:70073724b337e95391596114f25f855f97b973994ca3
> > > Removing node3724b337e95391596114f25f855f97b973994ca3 from cluster 172.16.1.11:7007
> > > Sending CLUSTER FORGET messages to thecluster...
> > > SHUTDOWN the node.
> > >
> >
#查看集群节点信息
[root@redis ~]# redis-cli -c -h 172.16.1.11 -p 7001
172.16.1.11:7001> cluster nodes
00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005slave f314dda271d135634d6849cdb649192b58b08d7f 0 1481394169450 5 connected
#可以发现 7008 的主节点变成了 7001
8c965e903668a7947727edd8ba9288205b0190cc172.16.1.11:7008 slave 68e9252e2e2404a0ced500a98085acaa5754c7a20 1481394169952 10 connected
8dc598b1ceb81e397903973eeba02e3e53770b0c 172.16.1.11:7006slave 6385ebf9ea346525671b8c339614de4cb2a118cc 0 1481394170457 6 connected
f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002master - 0 1481394170457 2 connected 5593-10922
#此时 7001 有两个从节点
68e9252e2e2404a0ced500a98085acaa5754c7a2172.16.1.11:7001 myself,master - 0 0 10 connected 0-5592 10923-11053
6385ebf9ea346525671b8c339614de4cb2a118cc 172.16.1.11:7003master - 0 1481394171466 3 connected 11054-16383
37384b8db7e9e5462d2da236d0104207eac26060172.16.1.11:7004 slave 68e9252e2e2404a0ced500a98085acaa5754c7a20 1481394170963 10 connected
172.16.1.11:7001> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:7
cluster_size:3
cluster_current_epoch:10
cluster_my_epoch:10
cluster_stats_messages_sent:12873
cluster_stats_messages_received:12860
#7007 进程已经停掉了
[root@redis ~]# ps -ef|grep redis|grep -v grep
root 5613 1 0 01:41 ? 00:00:11 redis-server 172.16.1.11:7001[cluster]
root 5615 1 0 01:41 ? 00:00:10 redis-server 172.16.1.11:7002[cluster]
root 5621 1 0 01:41 ? 00:00:10 redis-server 172.16.1.11:7003[cluster]
root 5625 1 0 01:41 ? 00:00:10 redis-server 172.16.1.11:7004[cluster]
root 5629 1 0 01:41 ? 00:00:10 redis-server 172.16.1.11:7005[cluster]
root 5633 1 0 01:41 ? 00:00:10 redis-server 172.16.1.11:7006[cluster]
root 5752 1 0 02:16 ? 00:00:02 redis-server 172.16.1.11:7008[cluster]
标题:总结redis第四部分(集群搭建以及增加和删除节点)
作者:yazong
地址:https://blog.llyweb.com/articles/2016/12/11/1578156806878.html