首页 > 其他分享 >HBase部署

HBase部署

时间:2023-05-28 22:12:23浏览次数:26  
标签:src 部署 hadoop master usr hbase local HBase

HBase部署-基于Hbase自带的Zookeeper

时间同步

#查看三台主机时间是否同步,可容忍5秒内偏差
[root@master ~]# for i in master slave1 slave2;do ssh root@$i 'date';done
Thu Apr  6 17:26:38 CST 2023
Thu Apr  6 17:26:38 CST 2023
Thu Apr  6 17:26:38 CST 2023

#若要实现时间同步,则安装ntpdate包
[root@master ~]# yum -y install ntpdate
#执行命令
[root@master ~]# ntpdate time1.aliyun.com

#注意三个节点都要做

解压安装包,改名

[root@master ~]# ls
apache-hive-2.0.0-bin.tar.gz  mysql
hadoop-2.7.1.tar.gz           mysql-5.7.18.zip
hbase-1.2.1-bin.tar.gz        mysql-connector-java-5.1.46.jar
jdk-8u152-linux-x64.tar.gz    zookeeper-3.4.8.tar.gz
[root@master ~]# tar xf hbase-1.2.1-bin.tar.gz -C /usr/local/src
[root@master ~]# cd /usr/local/src/
[root@master src]# ls
hadoop  hbase-1.2.1  hive  jdk  zookeeper
[root@master src]# mv hbase-1.2.1 hbase
[root@master src]# ls
hadoop  hbase  hive  jdk  zookeeper
[root@master ~]# cd /usr/local/src/hbase/conf/
[root@master conf]# ls
hadoop-metrics2-hbase.properties  hbase-env.sh      hbase-site.xml    regionservers
hbase-env.cmd                     hbase-policy.xml  log4j.properties

配置hbase环境变量

#创建hbase.sh文件
[root@master ~]# vi /etc/profile.d/hbase.sh

#添加如下内容
export HBASE_HOME=/usr/local/src/hbase
export PATH=${HBASE_HOME}/bin:$PATH

#进入/usr/local/src/hbase/conf/目录,在文件hbase-env.sh中修改以下内容
export JAVA_HOME=/usr/local/src/jdk
export HBASE_CLASSPATH=/usr/local/src/hadoop/etc/hadoop
export HBASE_MANAGES_ZK=true
#export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -XX:PermSize=128m -XX:MaxPermSize=128m"
#export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -XX:PermSize=128m -XX:MaxPermSize=128m"

在master节点配置hbase-site.xml

[root@master conf]# vi hbase-site.xml 
#在<configuration></configuration>添加
<configuration>
        <property>
                <name>hbase.rootdir</name>
                <value>hdfs://master:9000/hbase</value>
        </property>
        <property>
                <name>hbase.master.info.port</name>
                <value>60010</value>
        </property>
        <property>
                <name>hbase.zookeeper.property.clientPort</name>
                <value>2181</value>
        </property>
        <property>
                <name>zookeeper.session.timeout</name>
                <value>120000</value>
        </property>
        <property>
                <name>hbase.zookeeper.quorum</name>
                <value>master,slave1,slave2</value>
        </property>
        <property>
                <name>hbase.tmp.dir</name>
                <value>/usr/local/src/hbase/tmp</value>
        </property>
        <property>
                <name>hbase.cluster.distributed</name>
                <value>true</value>
        </property>
</configuration>

在master节点修改 regionservers文件

[root @master conf]$ vi regionservers 
#删除 localhost,每一行写一个 slave 节点主机机器名
slave1
slave2

在 master 节点创建hbase.tmp.dir目录

[root@master hbase]# mkdir /usr/local/src/hbase/tmp
[root@master hbase]# ls
bin  CHANGES.txt  conf  docs  hbase-webapps  LEGAL  lib  LICENSE.txt  NOTICE.txt  README.txt  tmp

将 master上的 hbase 安装文件同步到 slave1 slave2

[root@master ~]# scp -r /usr/local/src/hbase/ root@slave1:/usr/local/src/
[root@master ~]# scp -r /usr/local/src/hbase/ root@slave2:/usr/local/src/

在所有节点修改hbase 目录权限

#master
[root@master ~]# chown -R hadoop.hadoop /usr/local/src/
[root@master ~]# ll /usr/local/src/
total 4
drwxr-xr-x. 12 hadoop hadoop  183 Mar 15 16:11 hadoop
drwxr-xr-x   8 hadoop hadoop  171 Apr  6 20:43 hbase
drwxr-xr-x   9 hadoop hadoop  170 Mar 25 18:14 hive
drwxr-xr-x.  8 hadoop hadoop  255 Sep 14  2017 jdk
drwxr-xr-x  12 hadoop hadoop 4096 Mar 29 09:56 zookeeper

#slave1
[root@slave1 ~]# chown -R hadoop.hadoop /usr/local/src/
[root@slave1 ~]# ll /usr/local/src/
total 4
drwxr-xr-x  12 hadoop hadoop  183 Mar 15 16:14 hadoop
drwxr-xr-x   8 hadoop hadoop  171 Apr  6 20:48 hbase
drwxr-xr-x.  8 hadoop hadoop  255 Sep 14  2017 jdk
drwxr-xr-x  12 hadoop hadoop 4096 Mar 29 09:58 zookeeper

#slave2
[root@slave2 ~]# chown -R hadoop.hadoop /usr/local/src/
[root@slave2 ~]# ll /usr/local/src/
total 4
drwxr-xr-x  12 hadoop hadoop  183 Mar 15 16:14 hadoop
drwxr-xr-x   8 hadoop hadoop  171 Apr  6 20:48 hbase
drwxr-xr-x.  8 hadoop hadoop  255 Sep 14  2017 jdk
drwxr-xr-x  12 hadoop hadoop 4096 Mar 29 09:58 zookeeper

将master配置的hbase.sh环境变量传到slave节点

[root@master ~]# scp /etc/profile.d/hbase.sh root@slave1:/etc/profile.d/
hbase.sh                                                                 100%   75    67.7KB/s   00:00    
[root@master ~]# scp /etc/profile.d/hbase.sh root@slave2:/etc/profile.d/
hbase.sh                                                                 100%   75    75.3KB/s   00:00 

#source导入环境变量后,查看三台主机环境变量
[root@master ~]# source /etc/profile.d/hbase.sh 
[root@master ~]# cat $PATH
cat: /usr/local/src/hbase/bin:/usr/local/src/hive/bin:/usr/local/src/hadoop/bin:/usr/local/src/hadoop/sbin:/usr/local/src/hive/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/usr/local/src/zookeeper/bin:/usr/local/src/jdk/bin:/root/bin: No such file or directory

[root@slave1 ~]# source /etc/profile.d/hbase.sh 
[root@slave1 ~]# cat $PATH
cat: /usr/local/src/hbase/bin:/usr/local/src/hadoop/bin:/usr/local/src/hadoop/sbin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/usr/local/src/zookeeper/bin:/usr/local/src/jdk/bin:/root/bin: No such file or directory

[root@slave2 ~]# source /etc/profile.d/hbase.sh 
[root@slave2 ~]# cat $PATH
cat: /usr/local/src/hbase/bin:/usr/local/src/hadoop/bin:/usr/local/src/hadoop/sbin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/usr/local/src/zookeeper/bin:/usr/local/src/jdk/bin:/root/bin: No such file or directory

切换hadoop用户

[root@master ~]# su - hadoop
Last login: Wed Mar 29 10:20:38 CST 2023 on pts/0
[hadoop@master ~]$ 

[root@slave1 ~]# su - hadoop
Last login: Wed Mar 29 10:20:41 CST 2023 on pts/0
[hadoop@slave1 ~]$ 

[root@slave2 ~]# su - hadoop
Last login: Wed Mar 29 10:20:48 CST 2023 on pts/0
[hadoop@slave2 ~]$ 

启动 HBase

#先启动 Hadoop,然后启动 HBase
#启动Hadoop
[hadoop@master ~]$ start-all.sh 
This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh
Starting namenodes on [master]
master: starting namenode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-namenode-master.out
192.168.88.30: starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-datanode-slave2.out
192.168.88.20: starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-datanode-slave1.out
Starting secondary namenodes [0.0.0.0]
0.0.0.0: starting secondarynamenode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-secondarynamenode-master.out
starting yarn daemons
starting resourcemanager, logging to /usr/local/src/hadoop/logs/yarn-hadoop-resourcemanager-master.out
192.168.88.30: starting nodemanager, logging to /usr/local/src/hadoop/logs/yarn-hadoop-nodemanager-slave2.out
192.168.88.20: starting nodemanager, logging to /usr/local/src/hadoop/logs/yarn-hadoop-nodemanager-slave1.out
[hadoop@master ~]$ jps
1497 NameNode
1851 ResourceManager
1692 SecondaryNameNode
2111 Jps

[hadoop@slave1 ~]$ jps
1367 NodeManager
1256 DataNode
1487 Jps

[hadoop@slave2 ~]$ jps
1428 NodeManager
1317 DataNode
1549 Jps

#启动Hbase
[hadoop@master ~]$ start-hbase.sh 
slave2: starting zookeeper, logging to /usr/local/src/hbase/logs/hbase-hadoop-zookeeper-slave2.out
master: starting zookeeper, logging to /usr/local/src/hbase/logs/hbase-hadoop-zookeeper-master.out
slave1: starting zookeeper, logging to /usr/local/src/hbase/logs/hbase-hadoop-zookeeper-slave1.out
starting master, logging to /usr/local/src/hbase/logs/hbase-hadoop-master-master.out
slave2: starting regionserver, logging to /usr/local/src/hbase/logs/hbase-hadoop-regionserver-slave2.out
slave1: starting regionserver, logging to /usr/local/src/hbase/logs/hbase-hadoop-regionserver-slave1.out
[hadoop@master ~]$ jps
2519 Jps
1497 NameNode
2393 HMaster
1851 ResourceManager
1692 SecondaryNameNode
2335 HQuorumPeer

[hadoop@slave1 ~]$ jps
1600 HRegionServer
1367 NodeManager
1256 DataNode
1533 HQuorumPeer
1757 Jps

[hadoop@slave2 ~]$ jps
1428 NodeManager
1317 DataNode
1848 Jps
1594 HQuorumPeer
1661 HRegionServer

在浏览器输 master:60010出现如下图所示的界面

image-20230406210732854

HBase部署-基于主机安装的Zookeeper

实现时间同步

#安装ntpdate包
[root@master ~]# yum -y install ntpdate
#执行命令
[root@master ~]# ntpdate time1.aliyun.com
#三个节点都要做
#查看三台主机时间是否同步,可容忍5秒内偏差
[root@master ~]# for i in master slave1 slave2;do ssh root@$i 'date';done
Thu Apr  6 22:13:47 CST 2023
Thu Apr  6 22:13:47 CST 2023
Thu Apr  6 22:13:47 CST 2023

解压安装包,改名

[root@master ~]# ls
apache-hive-2.0.0-bin.tar.gz  mysql
hadoop-2.7.1.tar.gz           mysql-5.7.18.zip
hbase-1.2.1-bin.tar.gz        mysql-connector-java-5.1.46.jar
jdk-8u152-linux-x64.tar.gz    zookeeper-3.4.8.tar.gz
[root@master ~]# tar xf hbase-1.2.1-bin.tar.gz -C /usr/local/src
[root@master ~]# cd /usr/local/src/
[root@master src]# ls
hadoop  hbase-1.2.1  hive  jdk  zookeeper
[root@master src]# mv hbase-1.2.1 hbase
[root@master src]# ls
hadoop  hbase  hive  jdk  zookeeper
[root@master ~]# cd /usr/local/src/hbase/conf/
[root@master conf]# ls
hadoop-metrics2-hbase.properties  hbase-env.sh      hbase-site.xml    regionservers
hbase-env.cmd                     hbase-policy.xml  log4j.properties

配置hbase环境变量

#创建hbase.sh文件
[root@master ~]# vi /etc/profile.d/hbase.sh

#添加如下内容
export HBASE_HOME=/usr/local/src/hbase
export PATH=${HBASE_HOME}/bin:$PATH

#进入/usr/local/src/hbase/conf/目录,在文件hbase-env.sh中修改以下内容
export JAVA_HOME=/usr/local/src/jdk
export HBASE_CLASSPATH=/usr/local/src/hadoop/etc/hadoop
export HBASE_MANAGES_ZK=false			#不使用默认的zookeeper
#export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -XX:PermSize=128m -XX:MaxPermSize=128m"
#export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -XX:PermSize=128m -XX:MaxPermSize=128m"

配置Hbase

在master 节点配置hbase-site.xml

[root@master conf]# vi hbase-site.xml 
#在<configuration></configuration>添加
<configuration>
        <property>
                <name>hbase.rootdir</name>
                <value>hdfs://master:9000/hbase</value>
        </property>
        <property>
                <name>hbase.master.info.port</name>
                <value>60010</value>
        </property>
        <property>
                <name>hbase.zookeeper.property.clientPort</name>
                <value>2181</value>
        </property>
        <property>
                <name>zookeeper.session.timeout</name>
                <value>120000</value>
        </property>
        <property>
                <name>hbase.zookeeper.quorum</name>
                <value>master,slave1,slave2</value>
        </property>
        <property>
                <name>hbase.tmp.dir</name>
                <value>/usr/local/src/hbase/tmp</value>
        </property>
        <property>
                <name>hbase.cluster.distributed</name>
                <value>true</value>
        </property>
</configuration>

在master节点修改regionservers文件

[root @master conf]$ vi regionservers 
#删除 localhost,每一行写一个 slave 节点主机机器名
slave1
slave2

在master节点创建hbase.tmp.dir目录

[root@master hbase]# mkdir /usr/local/src/hbase/tmp
[root@master hbase]# ls
bin  CHANGES.txt  conf  docs  hbase-webapps  LEGAL  lib  LICENSE.txt  NOTICE.txt  README.txt  tmp

将master上的hbase安装文件同步到slave1 slave2

[root@master ~]# scp -r /usr/local/src/hbase/ root@slave1:/usr/local/src/
[root@master ~]# scp -r /usr/local/src/hbase/ root@slave2:/usr/local/src/

在所有节点修改hbase目录权限

#master
[root@master ~]# chown -R hadoop.hadoop /usr/local/src/
[root@master ~]# ll /usr/local/src/
total 4
drwxr-xr-x. 12 hadoop hadoop  183 Mar 15 16:11 hadoop
drwxr-xr-x   8 hadoop hadoop  171 Apr  6 22:43 hbase
drwxr-xr-x   9 hadoop hadoop  170 Mar 25 18:14 hive
drwxr-xr-x.  8 hadoop hadoop  255 Sep 14  2017 jdk
drwxr-xr-x  12 hadoop hadoop 4096 Mar 29 09:56 zookeeper

#slave1
[root@slave1 ~]# chown -R hadoop.hadoop /usr/local/src/
[root@slave1 ~]# ll /usr/local/src/
total 4
drwxr-xr-x  12 hadoop hadoop  183 Mar 15 16:14 hadoop
drwxr-xr-x   8 hadoop hadoop  171 Apr  6 22:48 hbase
drwxr-xr-x.  8 hadoop hadoop  255 Sep 14  2017 jdk
drwxr-xr-x  12 hadoop hadoop 4096 Mar 29 09:58 zookeeper

#slave2
[root@slave2 ~]# chown -R hadoop.hadoop /usr/local/src/
[root@slave2 ~]# ll /usr/local/src/
total 4
drwxr-xr-x  12 hadoop hadoop  183 Mar 15 16:14 hadoop
drwxr-xr-x   8 hadoop hadoop  171 Apr  6 22:48 hbase
drwxr-xr-x.  8 hadoop hadoop  255 Sep 14  2017 jdk
drwxr-xr-x  12 hadoop hadoop 4096 Mar 29 09:58 zookeeper

将master配置的hbase.sh环境变量传到slave节点

[root@master ~]# scp /etc/profile.d/hbase.sh root@slave1:/etc/profile.d/
hbase.sh                                                                 100%   75    67.7KB/s   00:00    
[root@master ~]# scp /etc/profile.d/hbase.sh root@slave2:/etc/profile.d/
hbase.sh                                                                 100%   75    75.3KB/s   00:00 

#source导入环境变量后,查看三台主机环境变量
[root@master ~]# source /etc/profile.d/hbase.sh 
[root@master ~]# cat $PATH
cat: /usr/local/src/hbase/bin:/usr/local/src/hive/bin:/usr/local/src/hadoop/bin:/usr/local/src/hadoop/sbin:/usr/local/src/hive/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/usr/local/src/zookeeper/bin:/usr/local/src/jdk/bin:/root/bin: No such file or directory

[root@slave1 ~]# source /etc/profile.d/hbase.sh 
[root@slave1 ~]# cat $PATH
cat: /usr/local/src/hbase/bin:/usr/local/src/hadoop/bin:/usr/local/src/hadoop/sbin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/usr/local/src/zookeeper/bin:/usr/local/src/jdk/bin:/root/bin: No such file or directory

[root@slave2 ~]# source /etc/profile.d/hbase.sh 
[root@slave2 ~]# cat $PATH
cat: /usr/local/src/hbase/bin:/usr/local/src/hadoop/bin:/usr/local/src/hadoop/sbin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/usr/local/src/zookeeper/bin:/usr/local/src/jdk/bin:/root/bin: No such file or directory

切换hadoop用户

[root@master ~]# su - hadoop
Last login: Wed Mar 29 10:20:38 CST 2023 on pts/0
[hadoop@master ~]$ 

[root@slave1 ~]# su - hadoop
Last login: Wed Mar 29 10:20:41 CST 2023 on pts/0
[hadoop@slave1 ~]$ 

[root@slave2 ~]# su - hadoop
Last login: Wed Mar 29 10:20:48 CST 2023 on pts/0
[hadoop@slave2 ~]$ 

启动 服务

#先启动 Hadoop,然后启动 ZooKeeper,最后启动 HBase
#启动Hadoop
[hadoop@master ~]$ start-all.sh 
This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh
Starting namenodes on [master]
master: starting namenode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-namenode-master.out
192.168.88.30: starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-datanode-slave2.out
192.168.88.20: starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-datanode-slave1.out
Starting secondary namenodes [0.0.0.0]
0.0.0.0: starting secondarynamenode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-secondarynamenode-master.out
starting yarn daemons
starting resourcemanager, logging to /usr/local/src/hadoop/logs/yarn-hadoop-resourcemanager-master.out
192.168.88.30: starting nodemanager, logging to /usr/local/src/hadoop/logs/yarn-hadoop-nodemanager-slave2.out
192.168.88.20: starting nodemanager, logging to /usr/local/src/hadoop/logs/yarn-hadoop-nodemanager-slave1.out
[hadoop@master ~]$ jps
1497 NameNode
1851 ResourceManager
1692 SecondaryNameNode
2111 Jps

[hadoop@slave1 ~]$ jps
1367 NodeManager
1256 DataNode
1487 Jps

[hadoop@slave2 ~]$ jps
1428 NodeManager
1317 DataNode
1549 Jps

#启动zookeeper
[hadoop@master ~]$ zkServer.sh start
ZooKeeper JMX enabled by default
Using config: /usr/local/src/zookeeper/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
[hadoop@master ~]$ jps
1473 NameNode
1827 ResourceManager
1640 SecondaryNameNode
2136 Jps
2109 QuorumPeerMain

[hadoop@slave1 ~]$ zkServer.sh start
ZooKeeper JMX enabled by default
Using config: /usr/local/src/zookeeper/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
[hadoop@slave1 ~]$ jps
1301 DataNode
1369 NodeManager
1582 Jps
1551 QuorumPeerMain

[hadoop@slave2 ~]$ zkServer.sh start
ZooKeeper JMX enabled by default
Using config: /usr/local/src/zookeeper/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
[hadoop@slave2 ~]$ jps
1296 DataNode
1571 Jps
1364 NodeManager
1546 QuorumPeerMain

#启动 HBase
[hadoop@master conf]$ start-hbase.sh 
starting master, logging to /usr/local/src/hbase/logs/hbase-hadoop-master-master.out
slave1: starting regionserver, logging to /usr/local/src/hbase/logs/hbase-hadoop-regionserver-slave1.out
slave2: starting regionserver, logging to /usr/local/src/hbase/logs/hbase-hadoop-regionserver-slave2.out
[hadoop@master conf]$ jps
1473 NameNode
1827 ResourceManager
2691 Jps
1640 SecondaryNameNode
2474 HMaster
2109 QuorumPeerMain

[hadoop@slave1 conf]$ jps
1301 DataNode
1369 NodeManager
1771 Jps
1629 HRegionServer
1551 QuorumPeerMain

[hadoop@slave2 conf]$ jps
1296 DataNode
1762 Jps
1619 HRegionServer
1364 NodeManager
1546 QuorumPeerMain

在浏览器输 master:60010出现如下图所示的界面

image-20230406225101137

HBase常用 Shell 命令

进入 HBase 命令行

[hadoop@master ~]$ hbase shell
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/usr/local/src/hbase/lib/slf4j-log4j12-1.7.5.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/usr/local/src/hadoop/share/hadoop/common/lib/slf4j-log4j12-1.7.10.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
SLF4J: Actual binding is of type [org.slf4j.impl.Log4jLoggerFactory]
HBase Shell; enter 'help<RETURN>' for list of supported commands.
Type "exit<RETURN>" to leave the HBase Shell
Version 1.2.1, r8d8a7107dc4ccbf36a92f64675dc60392f85c015, Wed Mar 30 11:19:21 CDT 2016

hbase(main):001:0> 

建立表 scores,两个列簇:grade和course

hbase(main):001:0> create 'scores','grade','course'
0 row(s) in 2.4730 seconds

=> Hbase::Table - scores

image-20230406225214559

查看数据库状态

hbase(main):002:0> status
1 active master, 0 backup masters, 2 servers, 0 dead, 1.5000 average load

查看数据库版本

hbase(main):003:0> version
1.2.1, r8d8a7107dc4ccbf36a92f64675dc60392f85c015, Wed Mar 30 11:19:21 CDT 2016

查看表

hbase(main):004:0> list
TABLE                                                                                                      
scores                                                                                                     
1 row(s) in 0.0200 seconds

=> ["scores"]

插入记录 1:jie,grade: 143cloud

hbase(main):005:0> put 'scores','jie','grade:','146cloud'
0 row(s) in 0.1690 seconds

插入记录2:jie,course:math,86

hbase(main):006:0> put 'scores','jie','course:math','86'
0 row(s) in 0.0250 seconds

插入记录3:jie,course:cloud,92

hbase(main):007:0> put 'scores','jie','course:cloud','92'
0 row(s) in 0.0120 seconds

插入记录 4:shi,grade:133soft

hbase(main):008:0> put 'scores','shi','grade:','133soft'
0 row(s) in 0.0110 seconds

插入记录5:shi,grade:math,87

hbase(main):009:0> put 'scores','shi','course:math','87'
0 row(s) in 0.0080 seconds

插入记录6:shi,grade:cloud,96

hbase(main):010:0> put 'scores','shi','course:cloud','96'
0 row(s) in 0.0100 seconds

读取jie的记录

hbase(main):011:0> get 'scores','jie'
COLUMN                      CELL                                                                           
 course:cloud               timestamp=1680787218638, value=92                                              
 course:math                timestamp=1680787127230, value=86                                              
 grade:                     timestamp=1680787034649, value=146cloud                                        
3 row(s) in 0.0500 seconds

读取jie的班级

hbase(main):012:0> get 'scores','jie','grade'
COLUMN                      CELL                                                                           
 grade:                     timestamp=1680787034649, value=146cloud                                        
1 row(s) in 0.0100 seconds

查看整个表记录

hbase(main):013:0> scan 'scores'
ROW                         COLUMN+CELL                                                                    
 jie                        column=course:cloud, timestamp=1680787218638, value=92                         
 jie                        column=course:math, timestamp=1680787127230, value=86                          
 jie                        column=grade:, timestamp=1680787034649, value=146cloud                         
 shi                        column=course:cloud, timestamp=1680787431479, value=96                         
 shi                        column=course:math, timestamp=1680787369769, value=87                          
 shi                        column=grade:, timestamp=1680787290357, value=133soft                          
2 row(s) in 0.0400 seconds

按例查看表记录

hbase(main):014:0> scan 'scores',{COLUMN=>'course'}
ROW                         COLUMN+CELL                                                                    
 jie                        column=course:cloud, timestamp=1680787218638, value=92                         
 jie                        column=course:math, timestamp=1680787127230, value=86                          
 shi                        column=course:cloud, timestamp=1680787431479, value=96                         
 shi                        column=course:math, timestamp=1680787369769, value=87                          
2 row(s) in 0.0220 seconds

删除指定记录

hbase(main):015:0> delete 'scores','shi','grade'
0 row(s) in 0.0270 seconds

删除后,执行scan命令

hbase(main):016:0> scan 'scores'
ROW                         COLUMN+CELL                                                                    
 jie                        column=course:cloud, timestamp=1680787218638, value=92                         
 jie                        column=course:math, timestamp=1680787127230, value=86                          
 jie                        column=grade:, timestamp=1680787034649, value=146cloud                         
 shi                        column=course:cloud, timestamp=1680787431479, value=96                         
 shi                        column=course:math, timestamp=1680787369769, value=87                          
2 row(s) in 0.0300 seconds

增加新的列簇

hbase(main):017:0> alter 'scores',NAME=>'age'
Updating all regions with the new schema...
0/1 regions updated.
1/1 regions updated.
Done.
0 row(s) in 2.9720 seconds

查看表结构

hbase(main):018:0> describe 'scores'
Table scores is ENABLED                                                                                    
scores                                                                                                     
COLUMN FAMILIES DESCRIPTION                                                                                
{NAME => 'age', BLOOMFILTER => 'ROW', VERSIONS => '1', IN_MEMORY => 'false', KEEP_DELETED_CELLS => 'FALSE',
 DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', COMPRESSION => 'NONE', MIN_VERSIONS => '0', BLOCKCACHE =>
 'true', BLOCKSIZE => '65536', REPLICATION_SCOPE => '0'}                                                   
{NAME => 'course', BLOOMFILTER => 'ROW', VERSIONS => '1', IN_MEMORY => 'false', KEEP_DELETED_CELLS => 'FALS
E', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', COMPRESSION => 'NONE', MIN_VERSIONS => '0', BLOCKCACHE
 => 'true', BLOCKSIZE => '65536', REPLICATION_SCOPE => '0'}                                                
{NAME => 'grade', BLOOMFILTER => 'ROW', VERSIONS => '1', IN_MEMORY => 'false', KEEP_DELETED_CELLS => 'FALSE
', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', COMPRESSION => 'NONE', MIN_VERSIONS => '0', BLOCKCACHE 
=> 'true', BLOCKSIZE => '65536', REPLICATION_SCOPE => '0'}                                                 
3 row(s) in 0.0240 seconds

删除列簇

hbase(main):019:0> alter 'scores',NAME=>'age',METHOD=>'delete'
Updating all regions with the new schema...
1/1 regions updated.
Done.
0 row(s) in 2.1690 seconds

删除表

hbase(main):020:0> disable 'scores'
0 row(s) in 2.2660 seconds

hbase(main):021:0> drop 'scores'
0 row(s) in 1.2680 seconds

hbase(main):022:0> list
TABLE                                                                                                      
0 row(s) in 0.0070 seconds

=> []

退出

hbase(main):023:0> quit
[hadoop@master ~]$ 

关闭所有服务

在 master 节点关闭 HBase

[hadoop@master ~]$ stop-hbase.sh 
stopping hbase...............
[hadoop@master ~]$ jps
1473 NameNode
1827 ResourceManager
1640 SecondaryNameNode
2109 QuorumPeerMain
3311 Jps

[hadoop@slave1 ~]$ jps
1301 DataNode
1369 NodeManager
1933 Jps
1551 QuorumPeerMain

[hadoop@slave2 ~]$ jps
1296 DataNode
1922 Jps
1364 NodeManager
1546 QuorumPeerMain

在所有节点关闭 ZooKeeper

[hadoop@master ~]$ zkServer.sh stop
ZooKeeper JMX enabled by default
Using config: /usr/local/src/zookeeper/bin/../conf/zoo.cfg
Stopping zookeeper ... STOPPED
[hadoop@master ~]$ jps
1473 NameNode
1827 ResourceManager
1640 SecondaryNameNode
3338 Jps

[hadoop@slave1 ~]$ zkServer.sh stop
ZooKeeper JMX enabled by default
Using config: /usr/local/src/zookeeper/bin/../conf/zoo.cfg
Stopping zookeeper ... STOPPED
[hadoop@slave1 ~]$ jps
1301 DataNode
1369 NodeManager
1964 Jps

[hadoop@slave2 ~]$ zkServer.sh stop
ZooKeeper JMX enabled by default
Using config: /usr/local/src/zookeeper/bin/../conf/zoo.cfg
Stopping zookeeper ... STOPPED
[hadoop@slave2 ~]$ jps
1296 DataNode
1953 Jps
1364 NodeManager

在 master 节点关闭 Hadoop

[hadoop@master ~]$ stop-all.sh 
This script is Deprecated. Instead use stop-dfs.sh and stop-yarn.sh
Stopping namenodes on [master]
master: stopping namenode
192.168.88.20: stopping datanode
192.168.88.30: stopping datanode
Stopping secondary namenodes [0.0.0.0]
0.0.0.0: stopping secondarynamenode
stopping yarn daemons
stopping resourcemanager
192.168.88.20: stopping nodemanager
192.168.88.30: stopping nodemanager
no proxyserver to stop
[hadoop@master ~]$ jps
3771 Jps

[hadoop@slave1 ~]$ jps
2043 Jps

[hadoop@slave2 ~]$ jps
2031 Jps

标签:src,部署,hadoop,master,usr,hbase,local,HBase
From: https://www.cnblogs.com/skyrainmom/p/17438944.html

相关文章

  • Hadoop全分布部署
    安装包下载(百度网盘)链接:https://pan.baidu.com/s/1XrnbpNNqcG20QG_hL4RJoQ?pwd=aec9提取码:aec9基础配置(所有节点)关闭防火墙,selinux安全子系统#关闭防火墙,设置开机自动关闭[root@localhost~]#systemctldisable--nowfirewalldRemoved/etc/systemd/system/multi-user......
  • HA高可用集群部署
    HA高可用集群部署高可用ZooKeeper集群部署zookeeper安装部署注意:需要安装jdk,但jdk已经在第4章装过,这里直接装zookeeper#解压并安装zookeeper[root@master~]#lsanaconda-ks.cfgapache-hive-2.0.0-bin.tar.gzhadoop-2.7.1.tar.gzjdk-8u152-linux-x64.tar.gzmysql-comm......
  • jenkins+gitlab 实现代码自动部署
    Jenkins设置:点击:ManageJenkins:点击:ManageCredentials点击:StoresscopedtoJenkins下的全局点击:添加凭据添加gitlab访问凭据只需要填写如下三个即可。这里填写的是gitlab的账户,不是linux系统账户,这个gitlab账户要对你所需要部署的项目有权限才可以;设置ssh账户密码:选择Mana......
  • 哲讯一文解答SAP S/4HANA的四种部署方式(MTE,STE,HEC,On-Premise)
    SAPS/4HANA是SAP的第4代ERP产品,也是SAP目前最主推的产品。虽然目前SAP推迟了ECC版本的截止支持时间,但越来越多的企业已经开始考虑,如何将传统 SAPECC系统升级或迁移至S/4HANA这个问题。以下将介绍SAPS/4HANA的4种部署方式,并辨析他们之间的不同之处。一、部署方式SAPS/4HANA主要......
  • centos7上Hadoop2.7.2完全分布式部署
    1.规划node1         node2           node3datanode       datanode         datanodenamenode     resourcemanager  secondarynamenodenodemanager   nodemanager     no......
  • 无法在web.xml或使用此应用程序部署的jar文件中解析绝对uri:[http://java.sun.com/jsp/
     第一个错误 刷新后出现第二个 org.apache.jasper.JasperException: java.lang.ClassNotFoundException: org.apache.jsp.admin.adminFace_jsp 解决方法:1.1检查WEB-INF下的lib文件夹里是否导入了写jstl-1.2.jar(jstl.jar),standard1.1.jar(standard.jar)如果没有......
  • centos7.9上hadoop-2.7.2伪分布式部署
    1.安装jdk1.1在Oracle官网上现在jdk1.8,然后上传到Linux服务器中1.2 安装jdk rpm-ivhjdk-8u371-linux-x64.rpm2创建部署用户hadoopuseradd-d/hadoophadoopecho123|passwd--stdinhadoop3修改/etc/hosts4使用Hadoop用户上传hadoop安装包hadoop-2.7......
  • docker部署nacos集群
    1.环境准备准备4台服务器,一台安装nginx和mysql另三台做集群使用IP服务操作系统192.168.3.215Nginx/MysqlCentOS7.9192.168.3.216Nacos-1CentOS7.9192.168.3.217Nacos-2CentOS7.9192.168.3.218Nacos-3CentOS7.9     mysql选用5.7......
  • Linux部署ArcGIS Server 10.7.1简要手册
    一、经过测试可以正常安装并运行ArcGISServer10.7.1的发行版:CentOS7.9,Ubuntu/UbuntuServer22.04.2LTS,RedhatEnterpriseLinux9.1二、ArcGISServer使用的端口:6443,6080,6006,1098,6099,确保这些端口没有被其他程序占用,否则安装完毕之后,管理页面无法打开,详情见官方链接查看......
  • k8s helm 部署kong
    k8s部署kong1.首先对比k8s与helm安装版本[root@VM-8-15-centoskong]#helmversionWARNING:Kubernetesconfigurationfileisgroup-readable.Thisisinsecure.Location:/root/.kube/configWARNING:Kubernetesconfigurationfileisworld-readable.Thisisinsecure......