首页 > 其他分享 >大数据平台搭建手册——hadoop

大数据平台搭建手册——hadoop

时间:2024-04-25 20:22:29浏览次数:27  
标签:src hadoop 手册 slave1 slave2 master root 搭建

从0开始 超详细搭建hadoop平台手册

创建三台使用centos7操作系统的虚拟机

基础环境配置

ps:不建议使用DHCP,因为ip地址会变动

配置ip

1.master

[root@master ~]# nmcli connection  add ifname ens32 con-name ens32 autoconnect 
 yes type ethernet  ipv4.method manual ipv4.addresses 192.168.130.101/24 ipv4.dns 114.114.114.114 ipv4.gateway 192.168.130.2
[root@master ~]# nmcli con up ens32 

2.slave1

[root@slave1 ~]# nmcli connection  add ifname ens33 con-name ens33 autoconnect 
 yes type ethernet  ipv4.method manual ipv4.addresses 192.168.130.101/24 ipv4.dns 114.114.114.114 ipv4.gateway 192.168.130.2
[root@slave1 ~]# nmcli con up ens33 

3.slave2

[root@slave2 ~]# nmcli connection  add ifname ens33 con-name ens33 autoconnect 
 yes type ethernet  ipv4.method manual ipv4.addresses 192.168.130.101/24 ipv4.dns 114.114.114.114 ipv4.gateway 192.168.130.2
[root@slave2 ~]# nmcli con up ens33 

ping百度

1.master

[root@master ~]# ping baidu.com
PING baidu.com (39.156.66.10) 56(84) bytes of data.
64 bytes from 39.156.66.10 (39.156.66.10): icmp_seq=1 ttl=128 time=28.5 ms
^C
--- baidu.com ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 28.587/28.587/28.587/0.000 ms
[root@master ~]# 

2.slave1

[root@slave1 ~]# ping baidu.com
PING baidu.com (110.242.68.66) 56(84) bytes of data.
64 bytes from 110.242.68.66 (110.242.68.66): icmp_seq=1 ttl=128 time=34.5 ms
64 bytes from 110.242.68.66 (110.242.68.66): icmp_seq=2 ttl=128 time=34.9 ms
^C
--- baidu.com ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1002ms
rtt min/avg/max/mdev = 34.512/34.708/34.904/0.196 ms
[root@slave1 ~]# 

3.slave2

[root@slave2 ~]# ping baidu.com
PING baidu.com (110.242.68.66) 56(84) bytes of data.
64 bytes from 110.242.68.66 (110.242.68.66): icmp_seq=1 ttl=128 time=33.0 ms
64 bytes from 110.242.68.66 (110.242.68.66): icmp_seq=2 ttl=128 time=35.2 ms
^C
--- baidu.com ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1002ms
rtt min/avg/max/mdev = 33.035/34.138/35.241/1.103 ms
[root@slave2 ~]# 

关闭防火墙和selinux

1.master

[root@master ~]# systemctl stop firewalld
[root@master ~]# systemctl disable firewalld
[root@master ~]# systemctl status firewalld
● firewalld.service - firewalld - dynamic firewall daemon
   Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled)
   Active: inactive (dead)
     Docs: man:firewalld(1)
[root@master ~]# 

2.slave1

[root@slave1 ~]# systemctl stop firewalld
[root@slave1 ~]# systemctl disable firewalld
[root@slave1 ~]# systemctl status firewalld
● firewalld.service - firewalld - dynamic firewall daemon
   Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled)
   Active: inactive (dead)
     Docs: man:firewalld(1)
[root@slave1 ~]# 

创建hadoop用户

1.master

[root@master ~]# id hadoop
uid=1000(hadoop) gid=1000(hadoop) groups=1000(hadoop),10(wheel)
[root@master ~]# 

2.slave1

[root@master ~]# id hadoop
uid=1000(hadoop) gid=1000(hadoop) groups=1000(hadoop),10(wheel)
[root@master ~]# 

3.slave2

[root@slave2 ~]# id hadoop
uid=1000(hadoop) gid=1000(hadoop) groups=1000(hadoop),10(wheel)
[root@slave2 ~]# 

创建hadoop用户密码

1.master

[root@master ~]# echo password|passwd --stdin hadoop 
Changing password for user hadoop.
passwd: all authentication tokens updated successfully.
[root@master ~]# 

2.slave1

[root@slave1 ~]# echo 'password' |passwd --stdin  hadoop 
Changing password for user hadoop.
passwd: all authentication tokens updated successfully.
[root@slave1 ~]# 

3.slave2

[root@slave2 ~]# echo 'password' |passwd --stdin hadoop 
Changing password for user hadoop.
passwd: all authentication tokens updated successfully.
[root@slave2 ~]# 

安装jdk

删除原有jdk版本

1.master

[root@master ~]# rpm -qa |grep java
java-1.8.0-openjdk-headless-1.8.0.131-11.b12.el7.x86_64
javapackages-tools-3.4.1-11.el7.noarch
tzdata-java-2017b-1.el7.noarch
java-1.7.0-openjdk-headless-1.7.0.141-2.6.10.5.el7.x86_64
java-1.7.0-openjdk-1.7.0.141-2.6.10.5.el7.x86_64
java-1.8.0-openjdk-1.8.0.131-11.b12.el7.x86_64
python-javapackages-3.4.1-11.el7.noarch
[root@master ~]# rpm -e  --nodeps $(rpm -qa|grep java)
[root@master ~]# rpm -qa |grep java
[root@master ~]# 

2.slave1

[root@slave1 ~]# rpm -qa |grep java
javapackages-tools-3.4.1-11.el7.noarch
tzdata-java-2017b-1.el7.noarch
python-javapackages-3.4.1-11.el7.noarch
[root@slave1 ~]# rpm -e  --nodeps $(rpm -qa|grep java)
[root@slave1 ~]# rpm -qa |grep java
[root@slave1 ~]# 

3.slave2

[root@slave2 ~]# rpm -qa |grep java
javapackages-tools-3.4.1-11.el7.noarch
tzdata-java-2017b-1.el7.noarch
python-javapackages-3.4.1-11.el7.noarch
[root@slave2 ~]# rpm -e  --nodeps $(rpm -qa|grep java)
[root@slave2 ~]# rpm -qa |grep java
[root@slave2 ~]# 

安装新的jdk

一,安装安装包
1.master

[root@master software]# tar -zxvf jdk-8u152-linux-x64.tar.gz  -C /usr/local/src/

2.slave1

3.slave2

二,配置jdk环境变量
1.master

[root@master src]# ls
jdk1.8.0_152
[root@master src]# mv jdk1.8.0_152/ jdk    //修改名字(有数字太长了)
[root@master jdk]# vim  /etc/profile
//G(大写跳转到末行写下面两行)
export JAVA_HOME=/usr/local/src/jdk
export PATH=$PATH:$JAVA_HOME/bin
[root@master jdk]# source /etc/profile
//生成新的环境变量
[root@master jdk]# java -version
java version "1.8.0_152"
Java(TM) SE Runtime Environment (build 1.8.0_152-b16)
Java HotSpot(TM) 64-Bit Server VM (build 25.152-b16, mixed mode)
[root@master jdk]# 

hadoop

安装hadoop包

1.master

[root@master software]# tar -xzf hadoop-2.7.1.tar.gz  -C /usr/local/src/
[root@master software]# cd  /usr/local/src/
[root@master src]# mv hadoop-2.7.1/ hadoop

修改环境变量

[root@master hadoop]# tail -n 3 /etc/profile
# hadoop
export HADOOP_HOME=/usr/local/src/hadoop
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
[root@master hadoop]# source /etc/profile //生成环境变量
[root@master hadoop]# hadoop
Usage: hadoop [--config confdir] [COMMAND | CLASSNAME]
  CLASSNAME            run the class named CLASSNAME
 or
  where COMMAND is one of:
  fs                   run a generic filesystem user client
  version              print the version
  jar <jar>            run a jar file
                       note: please use "yarn jar" to launch
                             YARN applications, not this command.
  checknative [-a|-h]  check native hadoop and compression libraries availability
  distcp <srcurl> <desturl> copy file or directories recursively
  archive -archiveName NAME -p <parent path> <src>* <dest> create a hadoop archive
  classpath            prints the class path needed to get the
  credential           interact with credential providers
                       Hadoop jar and the required libraries
  daemonlog            get/set the log level for each daemon
  trace                view and modify Hadoop tracing settings

Most commands print help when invoked w/o parameters.
[root@master hadoop]# 

给hadoop权限

[root@master hadoop]# chown -R hadoop:hadoop /usr/local/src/
[root@master hadoop]# ll /usr/local/src/
total 0
drwxr-xr-x 9 hadoop hadoop 149 Jun 29  2015 hadoop
drwxr-xr-x 8 hadoop hadoop 255 Sep 14  2017 jdk

配置hadoop-env.sh

[root@master hadoop]# vi etc/hadoop/hadoop-env.sh 
[root@master hadoop]# cat etc/hadoop/hadoop-env.sh |grep JAVA
# The only required environment variable is JAVA_HOME.  All others are
# set JAVA_HOME in this file, so that it is correctly defined on
export JAVA_HOME=/usr/local/src/jdk

配置集群环境

域名解析

1.master

[root@master hadoop]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.130.101 master 
192.168.130.102 slave1
192.168.130.103 slave2
[root@master hadoop]# 

2.slave1

[root@slave1 ~]# vim /etc/hosts
[root@slave1 ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

192.168.130.101 master 
192.168.130.102 slave1
192.168.130.103 slave2
[root@slave1 ~]# 

3.slave2

[root@slave2 ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

192.168.130.101 master 
192.168.130.102 slave1
192.168.130.103 slave2
[root@slave2 ~]# 

免密登陆

生成密钥文件并且发送给自己
  1. master
[root@master ~]# su - hadoop
Last login: Thu Apr 25 17:45:05 CST 2024 on pts/0
[hadoop@master ~]$ ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/home/hadoop/.ssh/id_rsa): 
Created directory '/home/hadoop/.ssh'.
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /home/hadoop/.ssh/id_rsa.
Your public key has been saved in /home/hadoop/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:2b+pnJuChp6RkHkx9XYqUgEpyhCf8WHBz3dtE67E9lA hadoop@master
The key's randomart image is:
+---[RSA 2048]----+
|....=+o          |
|...=oo o    E    |
|o.o.+o. o..+ .   |
|.. o +o..=* =    |
|  + o ..S+.= .   |
|   o o .  ...    |
|    o. .    .    |
|    .oo .. o o   |
|   .o.   .*oo    |
+----[SHA256]-----+
[hadoop@master ~]$ 
[hadoop@master ~]$ cat .ssh/id_rsa >> .ssh/authorized_keys
[hadoop@master ~]$ ls ~/.ssh/
authorized_keys  id_rsa  id_rsa.pub
[hadoop@master ~]$ chmod 600 ~/.ssh/authorized_keys 
[hadoop@master ~]$ ll ~/.ssh/
total 12
-rw------- 1 hadoop hadoop 3358 Apr 25 18:30 authorized_keys
-rw------- 1 hadoop hadoop 1679 Apr 25 17:45 id_rsa
-rw-r--r-- 1 hadoop hadoop  395 Apr 25 17:45 id_rsa.pub
[root@master .ssh]# cat /etc/ssh/sshd_config  |grep Pub
PubkeyAuthentication yes  //(使用vim打开该文件)去掉该文件中这一行的注释
[root@master .ssh]# systemctl restart sshd
[root@master .ssh]# 
  1. slave1
[root@slave1 ~]# su - hadoop
Last login: Tue Apr  7 15:37:22 CST 2020 on :0
[hadoop@slave1 ~]$ ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/home/hadoop/.ssh/id_rsa): 
Created directory '/home/hadoop/.ssh'.
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /home/hadoop/.ssh/id_rsa.
Your public key has been saved in /home/hadoop/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:H5ouJBZdnawOEbyx1xFU3tKt8NEm5XlmnpzghOtQMcY hadoop@slave1
The key's randomart image is:
+---[RSA 2048]----+
|     ... ++B.   .|
|      + . *E+o =.|
|     . * o ++o=.O|
|    . = o o ++oO+|
|     . +S... .o+.|
|    o . .+o.     |
|   . o  o ..     |
|      ..         |
|       ..        |
+----[SHA256]-----+
[hadoop@slave1 ~]$ cat .ssh/id_rsa >> .ssh/authorized_keys
[hadoop@slave1 ~]$ ls ~/.ssh/
authorized_keys  id_rsa  id_rsa.pub
[hadoop@slave1 ~]$ chmod 600 ~/.ssh/authorized_keys
[root@slave1 ~]# systemctl restart sshd
  1. slave2
[root@slave2 ~]# su - hadoop
Last login: Tue Apr  7 15:37:22 CST 2020 on :0
[hadoop@slave2 ~]$ ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/home/hadoop/.ssh/id_rsa): 
Created directory '/home/hadoop/.ssh'.
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /home/hadoop/.ssh/id_rsa.
Your public key has been saved in /home/hadoop/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:Brs8qBwc6izUBbj10eS/AWBZ6Dtxs1EZ8mVc9fM97Yg hadoop@slave2
The key's randomart image is:
+---[RSA 2048]----+
|  .  o*+ .+o.... |
| . o.+o.ooo.    .|
|  o + oo..     ..|
| .   = *o       =|
|  o . = So     .+|
| + o = +  o  . o.|
|o o . =  .  E . .|
|+. o   .         |
|.oo              |
+----[SHA256]-----+
[root@slave2 ~]# su - hadoop
Last login: Thu Apr 25 17:45:29 CST 2024 on pts/0
[hadoop@slave2 ~]$ cat .ssh/id_rsa >> .ssh/authorized_keys
[hadoop@slave2 ~]$ ls ~/.ssh/
authorized_keys  id_rsa  id_rsa.pub
[hadoop@slave2 ~]$ chmod 600 ~/.ssh/authorized_keys
[root@slave2 ~]# vim /etc/ssh/sshd_config 
[root@slave2 ~]# cat /etc/ssh/sshd_config  |grep Pub
PubkeyAuthentication yes
[root@slave2 ~]# systemctl restart sshd
交换ssh密钥

hadoop参数(全在master上修改)

hdfs-site.xml

[root@master hadoop]# pwd
/usr/local/src/hadoop/etc/hadoop
[root@master hadoop]# vim hdfs-site.xml 


  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
<property>
<name>dfs.namenode.name.dir</name>
      <value>file:/usr/local/src/hadoop/dfs/name</value>
      </property>
      <property>
<name>dfs.datanode.data.dir</name>
      <value>file:/usr/local/src/hadoop/dfs/data</value>
</property>
<property>
      <name>dfs.replication</name>
      <value>3</value>
</property>

core-site.xml

[root@master hadoop]# pwd
/usr/local/src/hadoop/etc/hadoop
[root@master hadoop]# vim core-site.xml 


  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
<property>
       <name>fs.defaultFS</name>
       <value>hdfs://192.168.130.101:9000</value>
</property>
<property>
       <name>io.file.buffer.size</name>
       <value>131072</value>
</property>
<property>
       <name>hadoop.tmp.dir</name>
       <value>file:/usr/local/src/hadoop/tmp</value>
</property>

mapred-site.xml

[root@master hadoop]# pwd
/usr/local/src/hadoop/etc/hadoop
[root@master hadoop]# cp mapred-site.xml.template  mapred-site.xml
[root@master hadoop]# vim mapred-site.xml

  you may not use this file except in compliance with the License.

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
<property>
      <name>mapreduce.framework.name</name>
      <value>yarn</value>
</property>
<property>
      <name>mapreduce.jobhistory.address</name>
      <value>master:10020</value>
</property>
<property>
      <name>mapreduce.jobhistory.webapp.address</name>
      <value>master:19888</value>
</property>
</configuration>

yarn-site.xml

[root@master hadoop]# pwd
/usr/local/src/hadoop/etc/hadoop
[root@master hadoop]# vim yarn-site.xml 

<property>
<configuration>
<!-- Site specific YARN configuration properties -->
<!-- Site specific YARN configuration properties -->
<property>
       <name>yarn.resourcemanager.address</name>
       <value>master:8032</value>
</property>
<property>
       <name>yarn.resourcemanager.scheduler.address</name>
       <value>master:8030</value>
</property>
<property>
       <name>yarn.resourcemanager.resource-tracker.address</name>
       <value>master:8031</value>
</property>
<property>
       <name>yarn.resourcemanager.admin.address</name>
       <value>master:8033</value>
</property>
<property>
       <name>yarn.resourcemanager.webapp.address</name>
       <value>master:8088</value>
</property>
<property>
       <name>yarn.nodemanager.aux-services</name>
       <value>mapreduce_shuffle</value>
</property>
<property>
       <name>yarn.nodemanager.auxservices.mapreduce.shuffle.class</name>
       <value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
</configuration>

其他

[root@master hadoop]# vim masters 
192.168.130.101
[root@master hadoop]# vim slaves 
slave1
slave2
[root@master hadoop]# mkdir /usr/local/src/hadoop/tmp
[root@master hadoop]# mkdir /usr/local/src/hadoop/dfs/name -p
[root@master hadoop]# mkdir /usr/local/src/hadoop/dfs/data -p
[root@master hadoop]# chown -R hadoop:hadoop /usr/local/src/hadoop/

复制到其他节点

1.slave1

[root@master ~]# scp -r /usr/local/src/hadoop/ root@slave1:/usr/local/src/
The authenticity of host 'slave1 (192.168.47.141)' can't be established.
ECDSA key fingerprint is SHA256:vnHclJTJVtDbeULN8jdOLhTCmqxJNqUQshH9g9LfJ3k.
ECDSA key fingerprint is MD5:31:03:3d:83:46:aa:c4:d0:c9:fc:5f:f1:cf:2d:fd:e2.
Are you sure you want to continue connecting (yes/no)? yes
* * * * * * * 

2.slave2

[root@master ~]# scp -r /usr/local/src/hadoop/ root@slave2:/usr/local/src/
The authenticity of host 'slave1 (192.168.47.142)' can't be established.
ECDSA key fingerprint is SHA256:vnHclJTJVtDbeULN8jdOLhTCmqxJNqUQshH9g9LfJ3k.
ECDSA key fingerprint is MD5:31:03:3d:83:46:aa:c4:d0:c9:fc:5f:f1:cf:2d:fd:e2.
Are you sure you want to continue connecting (yes/no)? yes
* * * * * * *
其他节点配置环境变量(slave1,slave2)

1.slave1

[root@slave1 .ssh]# tail -n 8 /etc/profile
unset -f pathmunge

# jdk
export JAVA_HOME=/usr/local/src/jdk
export PATH=$PATH:$JAVA_HOME/bin
# hadoop
export HADOOP_HOME=/usr/local/src/hadoop
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
[root@slave1 .ssh]# 

2.slave2

[root@slave2 ~]# tail -n 8 /etc/profile

# jdk
export JAVA_HOME=/usr/local/src/jdk
export PATH=$PATH:$JAVA_HOME/bin
# hadoop
export HADOOP_HOME=/usr/local/src/hadoop
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin

[root@slave2 ~]# 

启动hadoop集群

格式化namenode(master)

[hadoop@master hadoop]$ bin/hdfs namenode –format
*****
24/04/25 19:41:28 INFO util.ExitUtil: Exiting with status 0
24/04/25 19:41:28 INFO namenode.NameNode: SHUTDOWN_MSG: 
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at master/192.168.130.101
************************************************************/

启动namenode(master)

[hadoop@master hadoop]$ hadoop-daemon.sh start namenode
starting namenode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-namenode-master.out
[hadoop@master hadoop]$ jps  //查看进程
4746 NameNode
4782 Jps
[hadoop@master hadoop]$ 

在slave1上启动datenode

[root@slave1 hadoop]# chown -R  hadoop:hadoop /usr/local/src/
[root@slave1 hadoop]# su - hadoop
[hadoop@slave2 ~]$ source /etc/profile
[hadoop@slave1 src]$ hadoop-daemon.sh start datanode
starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-datanode-slave1.out
[hadoop@slave1 src]$ jps
4990 Jps
4943 DataNode
[hadoop@slave1 src]$ 

ps:
如果显示

[hadoop@slave1 hadoop]$ hadoop-daemon.sh start datanode
starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-datanode-slave1.out
/usr/local/src/hadoop/bin/hdfs: line 304: /usr/local/src/jdk/bin/java: No such file or directory
//那么久在master上使用`scp -r jdk/ hadoop@slave1:/usr/local/src/jdk` 传到slave1

在slave2上启动datenode

[hadoop@slave2 hadoop]$ source /etc/profile
[hadoop@slave2 hadoop]$ hadoop-daemon.sh start datanode
starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-datanode-slave2.out
[hadoop@slave2 hadoop]$ jps
3598 Jps
3551 DataNode
[hadoop@slave2 hadoop]$ 

启动 SecondaryNameNode(master)

[hadoop@master src]$  hadoop-daemon.sh start secondarynamenode
starting secondarynamenode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-secondarynamenode-master.out
[hadoop@master src]$ jps
5009 Jps
4746 NameNode
4974 SecondaryNameNode
[hadoop@master src]$ 

查看hdfs报告

[hadoop@master src]$ hdfs dfsadmin -report
Configured Capacity: 94434762752 (87.95 GB)
Present Capacity: 82971066368 (77.27 GB)
DFS Remaining: 82971058176 (77.27 GB)
DFS Used: 8192 (8 KB)
DFS Used%: 0.00%
Under replicated blocks: 0
Blocks with corrupt replicas: 0
Missing blocks: 0
Missing blocks (with replication factor 1): 0

-------------------------------------------------
Live datanodes (2):

Name: 192.168.130.103:50010 (slave2)
Hostname: slave2
Decommission Status : Normal
Configured Capacity: 47217381376 (43.97 GB)
DFS Used: 4096 (4 KB)
Non DFS Used: 5731614720 (5.34 GB)
DFS Remaining: 41485762560 (38.64 GB)
DFS Used%: 0.00%
DFS Remaining%: 87.86%
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00%
Cache Remaining%: 0.00%
Xceivers: 1
Last contact: Thu Apr 25 19:57:20 CST 2024


Name: 192.168.130.102:50010 (slave1)
Hostname: slave1
Decommission Status : Normal
Configured Capacity: 47217381376 (43.97 GB)
DFS Used: 4096 (4 KB)
Non DFS Used: 5732081664 (5.34 GB)
DFS Remaining: 41485295616 (38.64 GB)
DFS Used%: 0.00%
DFS Remaining%: 87.86%
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00%
Cache Remaining%: 0.00%
Xceivers: 1
Last contact: Thu Apr 25 19:57:20 CST 2024


[hadoop@master src]$ 

浏览器查看节点状态

  1. http://master:50070/ (看namenode和datanode节点状态)
  2. http://master:50090 (看SecondaryNameNode节点状态)

全部启动服务

[hadoop@master hadoop]$ start-yarn.sh
[hadoop@master hadoop]$ start-dfs.sh
[hadoop@master hadoop]$ jps
34257 NameNode 
34449 SecondaryNameNode 
34494 Jps 
32847 ResourceManager
###################################################
[学习hadoop的第三天——hive搭建-CSDN博客](https://blog.csdn.net/m0_74752717/article/details/137449938?spm=1001.2014.3001.5501)

标签:src,hadoop,手册,slave1,slave2,master,root,搭建
From: https://www.cnblogs.com/humlogs/p/18150747

相关文章

  • flume的安装与配置总结 flume搭建
    flume的安装与配置总结flume搭建Flume的官网是 http://flume.apache.org,官网提供了丰富实用的技术资料。另外还有一个中文版的文档 https://flume.liyifeng.org/。一、下载软件网站 https://mirrors.tuna.tsinghua.edu.cn/apache/flume提供了各个版本的下载。登录后复制cd......
  • 网络拓扑—WEB-IIS服务搭建
    目录WEB-IIS服务搭建网络拓扑配置网络IISPC安装IIS服务配置IIS服务(默认站点)PC机访问网页配置IIS服务(新建站点)PC机访问网页WEB-IIS服务搭建网络拓扑//交换机忽略不计IIS服务IP:192.168.1.1PC机IP:192.168.1.2配置网络IISPC安装IIS服务在192.168.1.1的机子上安装下面跟......
  • Python GUI开发- Qt Designer环境搭建
    前言QtDesigner是PyQt5程序UI界面的实现工具,使用QtDesigner可以拖拽、点击完成GUI界面设计,并且设计完成的.ui程序可以转换成.py文件供python程序调用环境准备使用pip安装pipinstallpyqt5-toolsQtDesigner环境搭建在pip安装包的路径中,找到designer.exe文件......
  • 用github搭建图床
    本节内容,将会以极为简介、快速的方式,带领你认识图床、搭建图床。我的这篇文章里所用的图片,就是用此种方法所搭建的图床上传而成的(如下图)。在这里,需要用到一个工具叫PicGo。不过它的用法很简单。一会儿就搞定了。一、何为图床所谓图床,就是存储图片的服务器。我们写在线文档的......
  • spark和scala的搭建
    Scala--部署安装步骤(1)上传并解压安装scala安装包例:tar-zxvfscala--**-C指定位置(2)设置环境变量vim/etc/profileexportSCALA_HOME=spark软件包位置exportPATH=$PATH:${SCALA_HOME}/binsource/etc/profile使环境变量生效(3)验证scala启动成功spark--部署与安装......
  • 网络拓扑—DNS服务搭建
    DNS服务搭建网络拓扑为了节省我的U盘空间,没有用路由器,所以搭建的环境只要在同网段即可。//交换机不用考虑DNS:192.168.1.1PC:192.168.1.2配置网络DNSipconfig查看网卡配置,可以看到配置成功了PCipconfig/all查看PC机所有的网卡配置,可以看到配置成功了安装DNS服务......
  • 搭建Appium环境
    Appium是一个开源工具,用于自动化iOS、Android和Windows应用程序的测试。Appium作为一个服务器启动,它接收到客户端(如脚本或测试代码)发出的命令,然后将这些命令转换成适当的动作在移动设备上执行。Appium的工作原理是通过使用WebDriver协议来与设备交互。一、依赖安装:Node.js......
  • 没有对应芯片手册,不知道哪些IO口可以控,测试demo
     //sdk\apps\earphone\include\app_config.h//////////↓↓↓↓↓↓↓↓↓↓codesnippetfromxwh↓↓↓↓↓↓↓↓↓↓////////////////////#defineLED0_IOIO_PORTA_01#defineLED0_ONOFF(x)do{gpio_set_pull_down(LED0_IO,0);\gpio_set......
  • 在虚拟机环境下搭建MPI集群
    一、安装虚拟机这步就不详细说了,参考了一位博主的教程,推荐:安装CentOS7虚拟机(超详细)_linux多台虚拟机-CSDN博客这步踩了一个坑,报错显示无法使用内核设备,参考博文:无法打开内核设备“\.\VMCIDev\VMX”:操作成功完成。是否在安装VMwareWorkstation后重新引导?模块“DevicePower......
  • YOLO v8 环境搭建
    1.Anaconda3安装:下载:anacondadownload默认:安装到根目录下:默认:下面一路默认,直至安装完成。2.安装VScode并在vscode中配置anaconda安装,我有vscode所以不用重新安装了,去官网安装即可。配置anacondaVscode中使用Ctrl+P打开搜索搜索栏里输入:>selectinterpreter找......