热门IT资讯网

hadoop ha集群搭建(5台)

发表于:2024-11-24 作者:热门IT资讯网编辑
编辑最后更新 2024年11月24日,yum install ntpdate lrzsz -ysystemctl stop firewalldsystemctl disable firewalldsystemctl stop Networ

yum install ntpdate lrzsz -y

systemctl stop firewalld
systemctl disable firewalld
systemctl stop NetworkManager
systemctl disable NetworkManager
setenforce 0

mkdir /home/myapps && cd /home/myapps

hosts文件配置:
cat >> /etc/hosts << EOF
192.168.163.129 node1
192.168.163.131 node2
192.168.163.132 node3
192.168.163.133 node4
192.168.163.128 node5
EOF

时间同步配置:
/usr/sbin/ntpdate ntp1.aliyun.com
crontab -e
/30 * /usr/sbin/ntpdate ntp1.aliyun.com

互信配置:
ssh-keygen
for ip in 132 129 131 133 128;do ssh-copy-id -i ~/.ssh/id_rsa.pub [email protected].$ip ;done

java环境变量配置:
java -version
vim /etc/profile
export JAVA_HOME=/home/myapp/java
export PATH=$JAVA_HOME/bin:$PATH
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar

source /etc/profile

zookeeper集群搭建:
vim /etc/profile

export ZOOKEEPER_HOME=/usr/local/zookeeper-3.4.11export PATH=$PATH:$ZOOKEEPER_HOME/bin

cd /home/myapps/zookeeper-3.4.11/
mkdir data
mkdir log
vim conf/zoo.cfg

    dataDir=/home/myapps/zookeeper-3.4.11/data    dataLogDir=/home/myapps/zookeeper-3.4.11/log    server.1=node1:2888:3888    server.2=node2:2888:3888    server.3=node3:2888:3888
将修改配置文件推送到各个节点:scp -r zookeeper-3.4.11 root@node2:/home/myapps/scp -r zookeeper-3.4.11 root@node3:/home/myapps/

设置myid
在我们配置的dataDir指定的目录下面,创建一个myid文件,里面内容为一个数字,用来标识当前主机
[root@node1 zookeeper-3.4.11]# echo "1" > /home/myapps/zookeeper-3.4.11/data/myid
[root@node2 zookeeper-3.4.11]# echo "2" > /home/myapps/zookeeper-3.4.11/data/myid
[root@node3 zookeeper-3.4.11]# echo "3" > /home/myapps/zookeeper-3.4.11/data/myid
启动集群:
[root@node1 zookeeper-3.4.11]# /home/myapps/zookeeper-3.4.11/bin/zkServer.sh start
[root@node2 zookeeper-3.4.11]# /home/myapps/zookeeper-3.4.11/bin/zkServer.sh start
[root@node3 zookeeper-3.4.11]# /home/myapps/zookeeper-3.4.11/bin/zkServer.sh start

hadoop集群搭建:
cd /home/myapps/hadoop/hadoop-2.7.5/etc/hadoop

vim hadoop-env.sh

25 export JAVA_HOME=/home/myapp/java

vim yarn-env.sh

23 export JAVA_HOME=/home/myapp/java

vim hdfs-site.xml

  dfs.nameservices  mycluster  dfs.ha.namenodes.mycluster  nn1,nn2  dfs.namenode.rpc-address.mycluster.nn1  node1:8020  dfs.namenode.rpc-address.mycluster.nn2  node2:8020  dfs.namenode.http-address.mycluster.nn1  node1:50070  dfs.namenode.http-address.mycluster.nn2  node2:50070  dfs.namenode.shared.edits.dir qjournal://node3:8485;node4:8485;node5:8485/mycluster  dfs.client.failover.proxy.provider.mycluster  org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider      dfs.ha.fencing.methods      sshfence              dfs.ha.fencing.ssh.private-key-files      /root/.ssh/id_dsa      dfs.journalnode.edits.dir  /home/myapps/hadoop/node/local/data   dfs.ha.automatic-failover.enabled   true  

vim core-site.xml

       fs.defaultFS       hdfs://mycluster              hadoop.tmp.dir       /home/myapps/hadoop/data/hadoop/temp       Abase for other temporarydirectories.           ha.zookeeper.quorum       node1:2181,node2:2181,node3:2181

vim yarn-site.xml

  yarn.resourcemanager.hostname  node1    yarn.nodemanager.aux-services    mapreduce_shuffle        yarn.nodemanager.aux-services.mapreduce_shuffle.class        org.apache.hadoop.mapred.ShuffleHandler

vim mapred-site.xml

            mapreduce.framework.name        yarn        Execution framework set to Hadoop YARN.    

vim slaves
node3
node4
node5

将修改后的hadoop拷贝到各个节点:
for ip in 2 3 4 5 ;do scp -r /home/myapps/hadoop root@node${ip}:/home/myapps/ ;done

首先启动zookeeper:
验证:jps查看进程 QuorumPeerMain
hadoop集群启动:

1、在node3,node4,node5分别执行:    cd /home/myapps/hadoop/hadoop-2.7.5/sbin/    ./hadoop-daemon.sh start journalnode    验证:jps 查看结果,如果出现JournalNode,预示着初步成功2、在其中一个namenode上执行格式化:    cd /home/myapps/hadoop/hadoop-2.7.5/bin    ./hdfs namenode -format    将执行成功后生成的dir拷贝到另一个namenode上面:    scp -r /home/myapps/hadoop/data root@node2:/home/myapps/hadoop/    成功后执行node1执行    hdfs zkfc -formatZK3、启动    cd /home/myapps/hadoop/hadoop-2.7.5/sbin/    ./start-all.sh    jps 查看进程验证结果    可以统计浏览器访问:              http://node1:50070        NameNode 'node1:9000' (active)        http://node2:50070        NameNode 'node2:9000' (standby)

zookeeper启动脚本:

#!/bin/bash# chkconfig: 2345 20 80# description: zk 集群 启动停止case $1 in"start")    cat /root/myapps/onekey/zk/slave | while read line    do    {     echo $line     ssh $line "source /etc/profile;nohup zkServer.sh start >/dev/null 2>&1 &"    }&    wait    done ;;"stop")    cat /root/myapps/onekey/zk/slave | while read line    do    {     echo $line     ssh $line "source /etc/profile;jps |grep QuorumPeerMain |cut -c 1-5 |xargs kill -s 9"    }&    wait    done ;;"status")jps;;*)echo "用法:/etc/init.d/zkd {start|stop|status}";;esac

hadoop启动脚本

#!/bin/bash# chkconfig: 2345 20 80# description: hadoop 集群 启动停止case $1 in"start")/root/myapps/hadoop/hadoop-2.7.4/sbin/start-all.sh;;"stop")/root/myapps/hadoop/hadoop-2.7.4/sbin/stop-all.sh;;"status")jps;;*)echo "用法:/etc/init.d/hadoopd {start|stop|status}";;esac
0