# 基于【2023新版黑马程序员大数据入门到实战教程,大数据开发必会的Hadoop、Hive,云平台实战项目全套一网打尽】https://www.bilibili.com/video/BV1WY4y197g7
# 参考http://t.csdn.cn/DycaL https://blog.csdn.net/weixin_42837961/article/details/106308562
# 参考http://t.csdn.cn/OxLEQ https://liuyuhang.blog.csdn.net/article/details/106233902
# 参考其他网上资源
# 由同学进行整理而成
hadoop 3.1.2
zookeeper 3.8.2-https://www.apache.org/dyn/closer.lua/zookeeper/zookeeper-3.8.2/apache-zookeeper-3.8.2-bin.tar.gz
hbase 2.5.5-https://www.apache.org/dyn/closer.lua/hbase/2.5.5/hbase-2.5.5-bin.tar.gz
phoenix-5.1.3-http://www.apache.org/dyn/closer.lua/phoenix/phoenix-5.1.3/phoenix-hbase-2.5-5.1.3-bin.tar.gz
# 解压缩zookeeper
tar -zxvf /home/hadoop/apache-zookeeper-3.8.2-bin.tar.gz -C /export/server/
# 创建超链接zookeeper
ln -s /export/server/apache-zookeeper-3.8.2-bin/ /export/server/zookeeper
# 移动并且重命名为zoo.cfg
mv /export/server/zookeeper/conf/zoo_sample.cfg /export/server/zookeeper/conf/zoo.cfg
# 编辑zoo.cfg
vim /export/server/zookeeper/conf/zoo.cfg
dataDir=/export/server/zookeeper/zkData # 修改存储zookeeper快照的目录
server.1=node1:2888:3888
server.2=node2:2888:3888
server.3=node3:2888:3888
# 创建目录zkData
mkdir /export/server/zookeeper/zkData
# 创建myid文件
touch /export/server/zookeeper/zkData/myid
# 编辑myid文件
vim /export/server/zookeeper/zkData/myid
# node1为1,node2为2,node3为3
1/2/3
# root权限下,修改配置文件
vim /etc/profile
# ZOOKEEPER 文件位置
export ZOOKEEPER_HOME=/export/server/zookeeper
export PATH=$PATH:$ZOOKEEPER_HOME/bin
# 使配置文件生效
source /etc/profile
# 编辑zk脚本,一键启动三个node的zookeeper
vim /usr/local/bin/zk
#------------------------------------------------------
#!/bin/bash
case $1 in
"start"){
for i in node1 node2 node3
do
echo "****************** $i *********************"
ssh $i "source /etc/profile && zkServer.sh start"
done
};;
"stop"){
for i in node1 node2 node3
do
echo "****************** $i *********************"
ssh $i "source /etc/profile && zkServer.sh stop"
done
};;
esac
#------------------------------------------------------
# 给zk一键启动zookeeper文件赋予全部权限
chmod 777 /usr/local/bin/zk
# hadoop用户
# 解压缩hbase
tar -zxvf /home/hadoop/hbase-2.5.5-bin.tar.gz -C /export/server/
# 创建超链接hbase
ln -s /export/server/hbase-2.5.5/ /export/server/hbase
# 修改hbase-env.sh
vim /export/server/hbase/conf/hbase-env.sh
# The java implementation to use. Java 1.8+ required.
# 配置要使用的jdk位置
export JAVA_HOME=/export/server/jdk/
# Tell HBase whether it should manage it's own instance of ZooKeeper or not.
# 告诉 HBase 是否应该管理自己的 ZooKeeper 实例
export HBASE_MANAGES_ZK=false
# 修改hbase-site.xml
vim /export/server/hbase/conf/hbase-site.xml
# !修改
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
<description>指示 HBase 在分布式模式下运行,每个守护程序一个 JVM 实例。</description>
</property>
# 增加
<property>
<name>hbase.rootdir</name>
<value>hdfs://node1:8020/hbase</value>
<description>HDFS 在端口 8020 的本地主机上运行</description>
</property>
<!-- 0.98后的新变动,之前版本没有.port,默认端口为60000 -->
<property>
<name>hbase.master.port</name>
<value>16000</value>
<description>HBase主服务器node1应绑定到的端口16000</description>
</property>
<property>
<name>hbase.zookeeper.quorum</name>
<value>node1:2181,node2:2181,node3:2181</value>
<description>逗号分隔的 ZooKeeper 集合中的服务器列表</description>
</property>
<property>
<name>hbase.zookeeper.property.dataDir</name>
<value>/export/server/zookeeper/zkData</value>
<description>属性来自 ZooKeeper 的配置zoo.cfg。存储快照的目录</description>
</property>
# 修改hbase节点
vim /export/server/hbase/conf/regionservers
node1
node2
node3
# 创建超链接,将hadoop的core-site.xml和hdfs-site.xml配置文件,链接到hbase的conf目录下
ln -s /export/server/hadoop/etc/hadoop/core-site.xml /export/server/hbase/conf/core-site.xml
ln -s /export/server/hadoop/etc/hadoop/hdfs-site.xml /export/server/hbase/conf/hdfs-site.xml
# root权限下,修改配置文件
vim /etc/profile
# HBASE 文件位置
export HBASE_HOME=/export/server/hbase
export PATH=$PATH:$HBASE_HOME/bin
# 使配置文件生效
source /etc/profile
# !!!!!!复制htrace-core4-4.1.0-incubating.jar到hbase的lib下
cp /export/server/hbase/lib/client-facing-thirdparty/htrace-core4-4.1.0-incubating.jar /export/server/hbase/lib/
# 修改hbase-env.sh
vim /export/server/hbase/conf/hbase-env.sh
# 让hbase不扫描hadoop的jar包
export HBASE_DISABLE_HADOOP_CLASSPATH_LOOKUP="true"
# 复制phoenix-server-hbase-2.5-5.1.3.jar到hbase的lib下
cp /home/hadoop/phoenix-server-hbase-2.5-5.1.3.jar /export/server/hbase/lib/
# 编辑hb脚本,一键启动dfs,zookeeper,hbase
vim /usr/local/bin/hb
#------------------------------------------------------
#!/bin/bash
case $1 in
"start"){
echo "----------/export/server/hadoop/sbin/start-dfs.sh----------"
ssh node1 "/export/server/hadoop/sbin/start-dfs.sh"
echo "----------/usr/local/bin/zk start----------"
ssh node1 "/usr/local/bin/zk start"
echo "----------/export/server/hbase/bin/start-hbase.sh----------"
ssh node1 "/export/server/hbase/bin/start-hbase.sh"
};;
"stop"){
echo "----------/export/server/hbase/bin/stop-hbase.sh----------"
ssh node1 "/export/server/hbase/bin/stop-hbase.sh"
echo "----------/usr/local/bin/zk stop----------"
ssh node1 "/usr/local/bin/zk stop"
echo "----------/export/server/hadoop/sbin/stop-dfs.sh----------"
ssh node1 "/export/server/hadoop/sbin/stop-dfs.sh"
};;
esac
#------------------------------------------------------
# 给hb一键启动dfs,zookeeper,hbase赋予全部权限
chmod 777 /usr/local/bin/hb
# phoenix下的HBase的SQL语句
-- 使DBeaver使用default数据库而不是system
use DEFAULT;
CREATE TABLE IF NOT EXISTS student(
id VARCHAR primary key,
name VARCHAR);
-- 以上两句以脚本方式同时执行,在DBeaver中,打开SQL编辑器,选择第三个:执行SQL脚本
UPSERT INTO student (id,name) values ('20216666','fsz');-- 插入或修改
select * from student;-- 查询
delete from student where id='20216666';-- 删除