#部署完成后
root@servera:/opt/hadoop/hadoop-3.1.0# jps
14056 SecondaryNameNode
14633 Jps
13706 NameNode
14317 ResourceManager
root@serverb:~# jps
5288 NodeManager
5162 DataNode
5421 Jps
root@serverc:~# jps
4545 NodeManager
4371 DataNode
4678 Jps
如上图,一共三台机器作为集群,servera作为master,其他两台作为worker。
vim /etc/hosts
10.80.80.110 servera
10.80.80.111 serverb
10.80.80.112 serverc
wget --no-check-certificate --no-cookies --header "Cookie: oraclelicense=accept-securebackup-cookie" http://download.oracle.com/otn-pub/java/jdk/8u172-b11/a58eab1ec242421181065cdc37240b08/jdk-8u172-linux-x64.tar.gz
mkdir /opt/java wget --no-check-certificate --no-cookies --header "Cookie: oraclelicense=accept-securebackup-cookie" http://download.oracle.com/otn-pub/java/jdk/8u172-b11/a58eab1ec242421181065cdc37240b08/jdk-8u172-linux-x64.tar.gz tar -zxf jdk-8u172-linux-x64.tar.gz mv jdk1.8.0_172/ /opt/java/
vim /etc/profile.d/jdk-1.8.sh #!/bin/sh # Author:wangxiaolei 王小雷 # Blog: http://blog.csdn.net/dream_an # Github: https://github.com/wangxiaoleiai # web: www.xiaolei.wang # Date: 2018.05 # Path: /etc/profile.d/ export JAVA_HOME=/opt/java/jdk1.8.0_172 export JRE_HOME=${JAVA_HOME}/jre export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib export PATH=${JAVA_HOME}/bin:$PATH # 使环境变量生效 source /etc/profile # 查看 Java java --version
root@servera:~# apt install ssh pdsh
echo ssh>/etc/pdsh/rcmd_default
$ ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
$ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
$ chmod 0600 ~/.ssh/authorized_keys
ssh localhost(首次需输入yes)
ssh-copy-id -i ~/.ssh/id_rsa.pub servera
ssh-copy-id -i ~/.ssh/id_rsa.pub serverb
ssh-copy-id -i ~/.ssh/id_rsa.pub serverc
共需要配置/opt/hadoop/hadoop-3.1.0/etc/hadoop/下的六个个文件,分别是
hadoop-env.sh、core-site.xml、hdfs-site.xml、yarn-site.xml、mapred-site.xml、workers
export JAVA_HOME=/opt/java/jdk1.8.0_172/
export HDFS_NAMENODE_USER="root"
export HDFS_DATANODE_USER="root"
export HDFS_SECONDARYNAMENODE_USER="root"
export YARN_RESOURCEMANAGER_USER="root"
export YARN_NODEMANAGER_USER="root"
<configuration>
<!-- 指定hdfs的nameservice为ns1 -->
<property>
<name>fs.defaultFS</name>
<value>hdfs://ruizhia:9000</value>
</property>
<property>
<name>io.file.buffer.size</name>
<value>131072</value>
</property>
</configuration>
<configuration>
<!-- Configurations for NameNode: -->
<property>
<name>dfs.namenode.name.dir</name>
<value>/var/lib/hadoop/hdfs/name/</value>
</property>
<property>
<name>dfs.blocksize</name>
<value>268435456</value>
</property>
<property>
<name>dfs.namenode.handler.count </name>
<value>100</value>
</property>
<!-- Configurations for DataNode: -->
<property>
<name>dfs.datanode.data.dir</name>
<value>/var/lib/hadoop/hdfs/data/</value>
</property>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
</configuration>
<configuration>
<!-- Site specific YARN configuration properties -->
<!-- Configurations for ResourceManager and NodeManager: -->
<!-- Configurations for ResourceManager: -->
<property>
<name>yarn.resourcemanager.hostname</name>
<value>servera</value>
</property>
<!-- 配置外网只需要替换外网ip为真实ip,否则默认为 localhost:8088 -->
<!-- <property>
<name>yarn.resourcemanager.webapp.address</name>
<value>外网ip:8088</value>
</property> -->
<!-- Configurations for NodeManager: -->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<!-- Configurations for History Server (Needs to be moved elsewhere): -->
</configuration>
<configuration>
<!-- Configurations for MapReduce Applications: -->
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
serverb
serverc
vim /etc/profile.d/hadoop-3.1.0.sh
#!/bin/sh
# Author:wangxiaolei 王小雷
# Blog: http://blog.csdn.net/dream_an
# Github: https://github.com/wangxiaoleiai
# Date: 201805
# web: www.xiaolei.wang
# Path: /etc/profile.d/
export HADOOP_HOME="/opt/hadoop/hadoop-3.1.0"
export PATH="$HADOOP_HOME/bin:$PATH"
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
export YARN_CONF_DIR=$HADOOP_HOME/etc/hadoop
source /etc/profile
/opt/hadoop/hadoop-3.1.0/bin/hdfs namenode -format myClusterName
/opt/hadoop/hadoop-3.1.0/sbin/start-dfs.sh
/opt/hadoop/hadoop-3.1.0/sbin/start-yarn.sh
jps
/opt/hadoop/hadoop-3.1.0/sbin/stop-dfs.sh
/opt/hadoop/hadoop-3.1.0/sbin/stop-yarn.sh
rm -rf /opt/hadoop/hadoop-3.1.0/logs/*
rm -rf /var/lib/hadoop/
root@servera:/opt/hadoop/hadoop-3.1.0# sbin/start-dfs.sh Starting namenodes on [servera] pdsh@servera: servera: connect: Connection refused Starting datanodes pdsh@servera: serverc: connect: Connection refused pdsh@servera: serverb: connect: Connection refused Starting secondary namenodes [servera] pdsh@servera: servera: connect: Connection refused
echo ssh>/etc/pdsh/rcmd_default