Hadoop集群配置
-
增加环境变量
export PATH=$PATH:/root/hadoop/bin:/root/hadoop/sbin
-
修改workers配置
vim ${HADOOP_HOME}/etc/hadoop/workers #增加你的workers节点的名称 hadoop2 hadoop3
-
修改core-site.xml
vim ${HADOOP_HOME}/etc/hadoop/core-site.xml <configuration> <property> <name>fs.defaultFS</name> <value>hdfs://hadoop1:9000</value> </property> <property> <name>hadoop.tmp.dir</name> <value>file:/usr/local/hadoop/tmp</value> <description>Abase for other temporary directories.</description> </property> </configuration>
-
修改hdfs-site.xml
vim ${HADOOP_HOME}/etc/hadoop/hdfs-site.xml <configuration> <property> <name>dfs.namenode.secondary.http-address</name> <value>hadoop1:50090</value> </property> <property> <name>dfs.replication</name> <value>1</value> </property> <property> <name>dfs.namenode.name.dir</name> <value>file:/usr/local/hadoop/tmp/dfs/name</value> </property> <property> <name>dfs.datanode.data.dir</name> <value>file:/usr/local/hadoop/tmp/dfs/data</value> </property> </configuration>
-
修改mapred-site.xml
vim ${HADOOP_HOME}/etc/hadoop/mapred-site.xml <configuration> <property> <name>mapreduce.framework.name</name> <value>yarn</value> </property> <property> <name>mapreduce.jobhistory.address</name> <value>hadoop1:10020</value> </property> <property> <name>mapreduce.jobhistory.webapp.address</name> <value>hadoop1:19888</value> </property> <property> <name>yarn.app.mapreduce.am.env</name> <value>HADOOP_MAPRED_HOME=/root/hadoop</value> </property> <property> <name>mapreduce.map.env</name> <value>HADOOP_MAPRED_HOME=/root/hadoop</value> </property> <property> <name>mapreduce.reduce.env</name> <value>HADOOP_MAPRED_HOME=/root/hadoop</value> </property> </configuration>
-
修改yarn-site.xml
vim ${HADOOP_HOME}/etc/hadoop/yarn-site.xml <configuration> <property> <name>yarn.resourcemanager.hostname</name> <value>hadoop1</value> </property> <property> <name>yarn.nodemanager.aux-services</name> <value>mapreduce_shuffle</value> </property> </configuration>
-
将配好的hadoop安装包发送到其他节点
scp -r /root/hadoop hadoop2:/root
-
将jdk环境变量等配置到各个节点
-
在master节点启动
sh {HADOOP_HOMP}/sbin/start-dfs.sh sh {HADOOP_HOMP}/sbin/start-yarn.sh sh {HADOOP_HOME}/sbin/mr-jobhistory-daemon.sh start historyserver