node3
★
★
★
192.168.179.7
node4
★
★
★
192.168.179.8
node5
★
★
★
yum -y install psmisc
[root@node1 software]# tar -zxvf ./hadoop-3.3.4.tar.gz
[root@node1 software]# vim /etc/profileexport HADOOP_HOME=/software/hadoop-3.3.4/export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:#使配置生效source/etc/profile
#导入JAVA_HOMEexport JAVA_HOME=/usr/java/jdk1.8.0_181-amd64/
dfs.nameservices mycluster dfs.permissions.enabled false dfs.ha.namenodes.mycluster nn1,nn2 dfs.namenode.rpc-address.mycluster.nn1 node1:8020 dfs.namenode.rpc-address.mycluster.nn2 node2:8020 dfs.namenode.http-address.mycluster.nn1 node1:50070 dfs.namenode.http-address.mycluster.nn2 node2:50070 dfs.namenode.shared.edits.dir qjournal://node3:8485;node4:8485;node5:8485/mycluster dfs.client.failover.proxy.provider.mycluster org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider dfs.ha.fencing.methods sshfence dfs.ha.fencing.ssh.private-key-files /root/.ssh/id_rsa dfs.journalnode.edits.dir /opt/data/journal/node/local/data dfs.ha.automatic-failover.enabled true
fs.defaultFS hdfs://mycluster hadoop.tmp.dir /opt/data/hadoop/ ha.zookeeper.quorum node3:2181,node4:2181,node5:2181
yarn.nodemanager.aux-services mapreduce_shuffle yarn.nodemanager.env-whitelist JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME yarn.resourcemanager.ha.enabled true yarn.resourcemanager.cluster-id mycluster yarn.resourcemanager.ha.rm-ids rm1,rm2 yarn.resourcemanager.hostname.rm1 node1 yarn.resourcemanager.hostname.rm2 node2 yarn.resourcemanager.webapp.address.rm1 node1:8088 yarn.resourcemanager.webapp.address.rm2 node2:8088 yarn.resourcemanager.zk-address node3:2181,node4:2181,node5:2181 yarn.nodemanager.vmem-check-enabled false
mapreduce.framework.name yarn
[root@node1 ~]# vim /software/hadoop-3.3.4/etc/hadoop/workersnode3node4node5
HDFS_DATANODE_USER=rootHDFS_DATANODE_SECURE_USER=hdfsHDFS_NAMENODE_USER=rootHDFS_JOURNALNODE_USER=rootHDFS_ZKFC_USER=root
YARN_RESOURCEMANAGER_USER=rootYARN_NODEMANAGER_USER=root
[root@node1 ~]# scp -r /software/hadoop-3.3.4 node2:/software/[root@node1 ~]# scp -r /software/hadoop-3.3.4 node3:/software/[root@node1 ~]# scp -r /software/hadoop-3.3.4 node4:/software/[root@node1 ~]# scp -r /software/hadoop-3.3.4 node5:/software/
也可以在对应其他节点上解压对应的安装包后,只发送对应的配置文件,这样速度较快。
Copyright 2015-2022 财务报告网版权所有 备案号: 京ICP备12018864号-19 联系邮箱:29 13 23 6 @qq.com