首页 > 系统相关 >centos7 使用docker-compose搭建hadoop集群

centos7 使用docker-compose搭建hadoop集群

时间:2023-03-08 16:24:29浏览次数:56  
标签:compose env resourcemanager 0.0 hadoop centos7 CONF namenode

1,安装docker和docker-compose

2,在linux服务器上创建hadoop文件夹,在hadoop文件夹下创建docker-compose.yml文件

docker-compose.yml文件内容如下:

version: '3'
services:
  namenode:
    image: bde2020/hadoop-namenode:2.0.0-hadoop3.2.1-java8
    container_name: namenode
    hostname: namenode
    restart: always
    ports:
      - 9870:9870
      - 9000:9000
    volumes:
      - ./hadoop/data/hadoop_namenode:/hadoop/dfs/name
    environment:
      - CLUSTER_NAME=test
    env_file:
      - ./hadoop.env
    networks:
      - hadoop_net

  resourcemanager:
    image: bde2020/hadoop-resourcemanager:2.0.0-hadoop3.2.1-java8
    container_name: resourcemanager
    hostname: resourcemanager
    restart: always
    ports:
      - "5888:5888"
    depends_on:
      - namenode
      - datanode1
      - datanode2
      - datanode3
    environment:
      - YARN_CONF_yarn_resourcemanager_webapp_address=0.0.0.0:5888
    env_file:
      - ./hadoop.env
    networks:
      - hadoop_net

  historyserver:
    image: bde2020/hadoop-historyserver:2.0.0-hadoop3.2.1-java8
    container_name: historyserver
    restart: always
    hostname: historyserver
    ports:
      - "8188:8188"
    depends_on:
      - namenode
      - datanode1
      - datanode2
      - datanode3
    volumes:
      - ./hadoop/data/hadoop_historyserver:/hadoop/yarn/timeline
    env_file:
      - ./hadoop.env
    networks:
      - hadoop_net

  nodemanager1:
    image: bde2020/hadoop-nodemanager:2.0.0-hadoop3.2.1-java8
    restart: always
    container_name: nodemanager1
    hostname: nodemanager1
    depends_on:
      - namenode
      - datanode1
      - datanode2
      - datanode3
    env_file:
      - ./hadoop.env
    ports:
      - "8042:8042"
    networks:
      - hadoop_net

  datanode1:
    image: bde2020/hadoop-datanode:2.0.0-hadoop3.2.1-java8
    restart: always
    container_name: datanode1
    hostname: datanode1
    depends_on:
      - namenode
    ports:
version: '3'
services:
  namenode:
    image: bde2020/hadoop-namenode:2.0.0-hadoop3.2.1-java8
    container_name: namenode
    hostname: namenode
    restart: always
    ports:
      - 9870:9870
      - 9000:9000
    volumes:
      - ./hadoop/data/hadoop_namenode:/hadoop/dfs/name
    environment:
      - CLUSTER_NAME=test
    env_file:
      - ./hadoop.env
    networks:
      - hadoop_net

  resourcemanager:
    image: bde2020/hadoop-resourcemanager:2.0.0-hadoop3.2.1-java8
    container_name: resourcemanager
    hostname: resourcemanager
    restart: always
    ports:
      - "5888:5888"
    depends_on:
      - namenode
      - datanode1
      - datanode2
      - datanode3
    environment:
      - YARN_CONF_yarn_resourcemanager_webapp_address=0.0.0.0:5888
    env_file:
      - ./hadoop.env
    networks:
      - hadoop_net

  historyserver:
    image: bde2020/hadoop-historyserver:2.0.0-hadoop3.2.1-java8
    container_name: historyserver
    restart: always
    hostname: historyserver
    ports:
      - "8188:8188"
    depends_on:
      - namenode
      - datanode1
      - datanode2
      - datanode3
    volumes:
      - ./hadoop/data/hadoop_historyserver:/hadoop/yarn/timeline
    env_file:
      - ./hadoop.env
    networks:
      - hadoop_net

  nodemanager1:
    image: bde2020/hadoop-nodemanager:2.0.0-hadoop3.2.1-java8
    restart: always
    container_name: nodemanager1
    hostname: nodemanager1
    depends_on:
      - namenode
      - datanode1
      - datanode2
      - datanode3
    env_file:
      - ./hadoop.env
    ports:
      - "8042:8042"
    networks:
      - hadoop_net

  datanode1:
    image: bde2020/hadoop-datanode:2.0.0-hadoop3.2.1-java8
    restart: always
    container_name: datanode1
    hostname: datanode1
    depends_on:
      - namenode
    ports:
      - "5642:5642"
    volumes:
      - ./hadoop/data/hadoop_datanode1:/hadoop/dfs/data
    env_file:
      - ./hadoop.env
    environment:
      - HDFS_CONF_dfs_datanode_address=0.0.0.0:5640
      - HDFS_CONF_dfs_datanode_ipc_address=0.0.0.0:5641
      - HDFS_CONF_dfs_datanode_http_address=0.0.0.0:5642
    networks:
      - hadoop_net

  datanode2:
    image: bde2020/hadoop-datanode:2.0.0-hadoop3.2.1-java8
    restart: always
    container_name: datanode2
    hostname: datanode2
    depends_on:
      - namenode
    ports:
      - "5645:5645"
    volumes:
      - ./hadoop/data/hadoop_datanode2:/hadoop/dfs/data
    env_file:
      - ./hadoop.env
    environment:
      - HDFS_CONF_dfs_datanode_address=0.0.0.0:5643
      - HDFS_CONF_dfs_datanode_ipc_address=0.0.0.0:5644
      - HDFS_CONF_dfs_datanode_http_address=0.0.0.0:5645
    networks:
      - hadoop_net

  datanode3:
    image: bde2020/hadoop-datanode:2.0.0-hadoop3.2.1-java8
    restart: always
    container_name: datanode3
    hostname: datanode3
    depends_on:
      - namenode
    ports:
      - "5648:5648"
    volumes:
      - ./hadoop/data/hadoop_datanode3:/hadoop/dfs/data
    env_file:
      - ./hadoop.env
    environment:
      - HDFS_CONF_dfs_datanode_address=0.0.0.0:5646
      - HDFS_CONF_dfs_datanode_ipc_address=0.0.0.0:5647
      - HDFS_CONF_dfs_datanode_http_address=0.0.0.0:5648
    networks:
      - hadoop_net

networks:
  hadoop_net:
    driver: bridge

3.在hadoop文件夹下创建hadoop环境配置文件,hadoop.env:

hadoop.env如下

CORE_CONF_fs_defaultFS=hdfs://namenode:8020
CORE_CONF_hadoop_http_staticuser_user=root
CORE_CONF_hadoop_proxyuser_hue_hosts=*
CORE_CONF_hadoop_proxyuser_hue_groups=*

HDFS_CONF_dfs_webhdfs_enabled=true
HDFS_CONF_dfs_permissions_enabled=false

YARN_CONF_yarn_log___aggregation___enable=true
YARN_CONF_yarn_resourcemanager_recovery_enabled=true
YARN_CONF_yarn_resourcemanager_store_class=org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore
YARN_CONF_yarn_resourcemanager_fs_state___store_uri=/rmstate
YARN_CONF_yarn_nodemanager_remote___app___log___dir=/app-logs
YARN_CONF_yarn_log_server_url=http://historyserver:8188/applicationhistory/logs/
YARN_CONF_yarn_timeline___service_enabled=true
YARN_CONF_yarn_timeline___service_generic___application___history_enabled=true
YARN_CONF_yarn_resourcemanager_system___metrics___publisher_enabled=true
YARN_CONF_yarn_resourcemanager_hostname=resourcemanager
YARN_CONF_yarn_timeline___service_hostname=historyserver
YARN_CONF_yarn_resourcemanager_address=resourcemanager:8032
YARN_CONF_yarn_resourcemanager_scheduler_address=resourcemanager:8030
YARN_CONF_yarn_resourcemanager_resource___tracker_address=resourcemanager:8031

4,在hadoop文件夹下创建hadoop/data,然后在hadoop/hadoop/data文件夹下分别创建hadoop_namenode、hadoop_historyserver、hadoop_datanode1、hadoop_datanode2、hadoop_datanode3作为持久化挂载路径

5,执行docker-compose up -d命令拉取镜像并启动容器

docker-compose ps 出现下面画面说明搭建成功

 

 6,使用ip+端口即可访问hadoop界面

 

标签:compose,env,resourcemanager,0.0,hadoop,centos7,CONF,namenode
From: https://www.cnblogs.com/bigfaceWei/p/17192439.html

相关文章

  • CentOS7使用cp命令覆盖时不提示
    平常使用中bbb这个文件存在,想要使用cp命令把aaa文件的内容覆盖到bbb文件中,就会使用cp-faaabbb-f 的意思是遇到同名的文件,不提示,直接覆盖但是还是会提示[roo......
  • Jetpack Compose 之Pager滑动动画
    一般使用state.animateScrollToPage()滑动的话会瞬间划过去。如果想要加入滑动动画的话需要使用suspendfunScrollableState.animateScrollBy(value:Float,animationSpe......
  • CentOS7加入AD域(sssd)
    作者:独笔孤行官网:​​ ​http://anyamaze.com​​公众号:云实战前言AD域(ActiveDirectory)是Windows服务器的活动目录,在目录中可以收录公司的电脑账号,用户账号,组等信息,......
  • Hadoop安全模式和机架感知
    安全模式集群启动时的一个状态安全模式是HDFS的一种工作状态,处于安全模式的状态下,只向客户端提供文件的只读视图,不接受对命名空间的修改;同时NameNode节点也不会进行数据......
  • Linux(Centos7)下rpm方式安装MySQL
    1.卸载已有MySQL1.1.查看是否已安装mysqlrpm-qa|grep-imysql如果系统已安装,请卸载删除。1.2.删除MySQL删除命令:rpm-e--nodeps包名rpm-evmysql-......
  • centos7部署element-plus
    element-plus官网实在是太不稳定了,所以自己搭建了一套在本地,流程如下:环境安装yuminstallnodeyuminstallnpmnodeinstall-gpnpm从gitee拉取代码wget-P/......
  • Centos7.6下corosync+pacemaker+pcs高可用集群环境安装
    在容灾备份领域,有多种高可用方案,corosync+pacemaker无疑是开源方案中的佼佼者。1.什么是corosync、pacemaker、pcs?Corosync是集群管理套件的一部分,通常会与其他资源管......
  • centos7安装python3
    1、首先安装依赖包,centos里面是-devel yum-ygroupinstall"Developmenttools"yuminstallzlib-develbzip2-developenssl-develncurses-develsqlite-develrea......
  • docker-compose搭建redis集群
    1、编写Redis配置文件#创建目录mkdir-p/home/docker/docker-redis/redis-cluster#切换至指定目录cd/home/docker/docker-redis/redis-cluster/#编写re......
  • Hadoop节点的分类与作用
    目录文件的数据类型NameNode(NN)功能性能DataNode(DN)功能SecondaryNameNode传统解决方案SNN解决方案节点的分类与作用汇总图文件的数据类型文件有一个stat命令元数据......