首页 > 其他分享 >docker部署Zookeeper+Kafka+Storm

docker部署Zookeeper+Kafka+Storm

时间:2023-06-29 19:22:16浏览次数:33  
标签:storm Zookeeper 2181 Storm docker 172.30 zookeeper1 zookeeper2 zookeeper3

docker部署Zookeeper+Storm+Kafka

安装docker

信任DockerGPG公钥:

curl -fsSL https://repo.huaweicloud.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -`

添加软件仓库:

sudo add-apt-repository "deb [arch=amd64] https://repo.huaweicloud.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"

更新索引文件并安装

sudo apt-get update
sudo apt-get install docker-ce

创建专用网段

docker network create --subnet 172.30.0.0/24 --gateway 172.30.0.1 zookeeper

创建Zookeeper Compose文件

version: '3'

services: 
    zookeeper1:
        image: zookeeper:3.4
        restart: always
        hostname: zookeeper1
        container_name: zookeeper1
        ports:
            - "2181:2181"
        volumes: 
            - "/home/zk/workspace/volumes/zkcluster/zookeeper1/data:/data"
            - "/home/zk/workspace/volumes/zkcluster/zookeeper1/datalog:/datalog"
        environment: 
            ZOO_MY_ID: 1
            ZOO_SERVERS: server.1=0.0.0.0:2888:3888 server.2=zookeeper2:2888:3888 server.3=zookeeper3:2888:3888
        networks:
            zookeeper:
                ipv4_address: 172.30.0.11

    zookeeper2:
        image: zookeeper:3.4
        restart: always
        hostname: zookeeper2
        container_name: zookeeper2
        ports:
            - "2182:2181"
        volumes: 
            - "/home/zk/workspace/volumes/zkcluster/zookeeper2/data:/data"
            - "/home/zk/workspace/volumes/zkcluster/zookeeper2/datalog:/datalog"
        environment: 
            ZOO_MY_ID: 2
            ZOO_SERVERS: server.1=zookeeper1:2888:3888 server.2=0.0.0.0:2888:3888 server.3=zookeeper3:2888:3888
        networks:
            zookeeper:
                ipv4_address: 172.30.0.12

    zookeeper3:
        image: zookeeper:3.4
        restart: always
        hostname: zookeeper3
        container_name: zookeeper3
        ports:
            - "2183:2181"
        volumes: 
            - "/home/zk/workspace/volumes/zkcluster/zookeeper3/data:/data"
            - "/home/zk/workspace/volumes/zkcluster/zookeeper3/datalog:/datalog"
        environment: 
            ZOO_MY_ID: 3
            ZOO_SERVERS: server.1=zookeeper1:2888:3888 server.2=zookeeper2:2888:3888 server.3=0.0.0.0:2888:3888
        networks:
            zookeeper:
                ipv4_address: 172.30.0.13

networks: 
    zookeeper:
        external: 
            name: zookeeper

创建Kafka Compose文件

version: '3'

services: 
    kafka1:
        image: wurstmeister/kafka:2.12-2.4.1
        restart: always
        hostname: kafka1
        container_name: kafka1
        privileged: true
        ports:
            - 9092:9092
        environment:
              KAFKA_ADVERTISED_HOST_NAME: kafka1
              KAFKA_LISTENERS: PLAINTEXT://kafka1:9092
              KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:9092
              KAFKA_ADVERTISED_PORT: 9092
              KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181,zookeeper2:2181,zookeeper3:2181
        volumes:
            - /home/zk/workspace/volumes/kafkaCluster/kafka1/logs:/kafka
            - /home/w2110276138/dataset/:/tmp
        networks:
            kafka:
                ipv4_address: 172.30.0.14
        extra_hosts: 
            zookeeper1: 172.30.0.11
            zookeeper2: 172.30.0.12
            zookeeper3: 172.30.0.13

    kafka2:
        image: wurstmeister/kafka:2.12-2.4.1
        restart: always
        hostname: kafka2
        container_name: kafka2
        privileged: true
        ports:
            - 9093:9093
        environment:
              KAFKA_ADVERTISED_HOST_NAME: kafka2
              KAFKA_LISTENERS: PLAINTEXT://kafka2:9093
              KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka2:9093
              KAFKA_ADVERTISED_PORT: 9093
              KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181,zookeeper2:2181,zookeeper3:2181
        volumes:
            - /home/zk/workspace/volumes/kafkaCluster/kafka2/logs:/kafka
            - /home/w2110276138/dataset/:/tmp
        networks:
            kafka:
                ipv4_address: 172.30.0.15
        extra_hosts: 
            zookeeper1: 172.30.0.11
            zookeeper2: 172.30.0.12
            zookeeper3: 172.30.0.13

    kafka3:
        image: wurstmeister/kafka:2.12-2.4.1
        restart: always
        hostname: kafka3
        container_name: kafka3
        privileged: true
        ports:
            - 9094:9094
        environment:
              KAFKA_ADVERTISED_HOST_NAME: kafka3
              KAFKA_LISTENERS: PLAINTEXT://kafka3:9094
              KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka3:9094
              KAFKA_ADVERTISED_PORT: 9094
              KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181,zookeeper2:2181,zookeeper3:2181
        volumes:
            - /home/zk/workspace/volumes/kafkaCluster/kafka3/logs:/kafka
            - /home/w2110276138/dataset/:/tmp
        networks:
            kafka:
                ipv4_address: 172.30.0.16
        extra_hosts: 
            zookeeper1: 172.30.0.11
            zookeeper2: 172.30.0.12
            zookeeper3: 172.30.0.13                            

networks: 
    kafka:
        external: 
            name: zookeeper

创建Storm Compose文件

version: '3'

services:
  nimbus:
    image: storm:1.2.2
    command: storm nimbus
    restart: always
    environment:
      - STORM_ZOOKEEPER_SERVERS=zookeeper1:2181,zookeeper2:2181,zookeeper3:2181
    container_name: nimbus
    hostname: nimbus
    networks:
      storm:
        ipv4_address: 172.30.0.17
    ports:
      - 6627:6627
    volumes:
      - "./storm.yaml:/conf/storm.yaml"
      - "/home/w2110276138/jar:/tmp"
    extra_hosts:
      zookeeper1: 172.30.0.11
      zookeeper2: 172.30.0.12
      zookeeper3: 172.30.0.13
      kafka1: 172.30.0.14
      kafka2: 172.30.0.15
      kafka3: 172.30.0.16
      ui: 172.30.0.18

  ui:
    image: storm:1.2.2
    command: storm ui
    restart: always
    environment:
      - STORM_ZOOKEEPER_SERVERS=zookeeper1:2181,zookeeper2:2181,zookeeper3:2181
    container_name: ui
    hostname: ui
    networks:
      storm:
        ipv4_address: 172.30.0.18
    ports:
      - 8080:8080
    volumes:
      - "./storm.yaml:/conf/storm.yaml"
    depends_on:
      - nimbus
    links:
      - nimbus:nimbus
    extra_hosts:
      zookeeper1: 172.30.0.11
      zookeeper2: 172.30.0.12
      zookeeper3: 172.30.0.13
      kafka1: 172.30.0.14
      kafka2: 172.30.0.15
      kafka3: 172.30.0.16
      nimbus: 172.30.0.17


  supervisor:
    image: storm:1.2.2
    command: storm supervisor
    restart: always
    environment:
      - STORM_ZOOKEEPER_SERVERS=zookeeper1:2181,zookeeper2:2181,zookeeper3:2181
    networks:
      - storm
    depends_on:
      - nimbus
    links:
      - nimbus:nimbus
    volumes:
      - "./storm.yaml:/conf/storm.yaml"
    extra_hosts:
      zookeeper1: 172.30.0.11
      zookeeper2: 172.30.0.12
      zookeeper3: 172.30.0.13
      kafka1: 172.30.0.14
      kafka2: 172.30.0.15
      kafka3: 172.30.0.16

networks:
    storm:
        external:
            name: zookeeper

启动Zookeeper+Kafka+Storm

如果把所有的compose文件写在一起,则可以一起启动。

docker compose -f docker-compose-zookeeper.yml up -d
docker compose -f docker-compose-kafka.yml up -d
docker compose -f docker-compose-storm.yml up -d

扩展Supervisor

docker compose -f docker-compose-storm.yml up --scale supervisor=4 -d

标签:storm,Zookeeper,2181,Storm,docker,172.30,zookeeper1,zookeeper2,zookeeper3
From: https://www.cnblogs.com/Modest-Hamilton/p/17515034.html

相关文章

  • Storm多节点部署
    Storm多节点部署环境准备三台已安装Centos7的虚拟机hadoop001192.168.188.145hadoop002192.168.188.146hadoop003192.168.188.147配置hosts在每台机器的/etc/hosts末尾追加192.168.188.145hadoop001192.168.188.146hadoop002192.168.188.147hadoop003首先确保三......
  • Zookeeper投票机制
    每个sever首先给自己投票,然后用自己的选票和其他sever选票对比,权重大的胜出,使用权重较大的更新自身选票箱。具体选举过程如下:1.每个Server启动以后都询问其它的Server它要投票给谁。对于其他server的询问,server每次根据自己的状态都回复自己推荐的leader的id和上......
  • docker-compose中的links和depend_on关键字
    linkslink关键字用于在不同的容器之间创建网络链接。它允许一个容器能够访问另一个容器的网络连接信息(如IP地址和端口)。使用方法:在DockerCompose配置文件中,通过links关键字定义容器之间的链接关系。例如:version:'3'services:web:build:.links:-db......
  • Docker容器 如何修改容器时间,时区问题修改
    1.以root身份进入容器dockerexec-it-urootb3465e6bbc5b/bin/sh可以使用date查看时间2.在容器中创建文件mkdir-p /usr/share/zoneinfo/Asia3.回到宿主机,复制宿主机里的文件到容器中docker cp /usr/share/zoneinfo/Asia/Shanghai容器ID或容器名:/usr/share/zonei......
  • docker-compose 部署java微服务项目
    1、准备条件:安装docker,安装docker-compose,docker安装可自行百度,docker-compose安装由于太慢,我这里提供两个版本:win和linux版本的百度网盘版,大家可根据需要自行下载:链接:https://pan.baidu.com/s/10W81TX6cWQqyi92xyeuZQQ提取码:2evg这里一linux系统为例:下载docker-compose-linux-......
  • Zookeeper 介绍
    一、Zookeeper介绍1.什么是ZookeeperZookeeper是一种分布式协调服务用于管理大型主机,分布式环境中协调管理服务是一个复杂的过程,Zookeeper通过其简单的架构和API解决这个问题。Zookeeper允许开发人员专注于核心应用程序逻辑,而不必担心应用程序的分布特性。2.Zookeeper......
  • Linux - Docker日志文件清理
    1.容器日志文件默认存放路径:/var/lib/docker,docker日志文件后缀是containerID+"-json.log",     查看各个日志文件大小:   1) ls-lh$(find/var/lib/docker/containers/-name*-json.log)       查询结果sample:  -rw-r-----1rootroot2......
  • Docker实战_Mysql数据卷挂载
    Docker实战_Mysql数据卷挂载搜索镜像dockersearchmysql:5.7下载镜像dockerpullmysql:5.7运行镜像dockerrun-d-p3306:3306\-v/opt/mysql/conf:/etc/mysql/conf.d\-v/opt/mysql/data:/var/lib/mysql\-eMYSQL_ROOT_PASSWORD=123456--namemy-mysqlmysql:......
  • Docker数据管理_数据卷容器
    Docker数据管理_数据卷容器Volume基本使用参数: create#创建数据卷 inspect#查看数据卷元数据 ls#查看数据卷列表 prune#删除所有未使用的卷 rm#删除数据卷创建一个自定义容器dockervolumecreateedc-tomcat-vol查看所有容器卷dockervolumels查看指定容器......
  • Docker数据管理_配置数据卷
    Docker数据管理_配置数据卷命令dockerrun-v宿主机目录(文件):容器内目录(文件)webapps注意事项目录必须是绝对路径如果目录不存在则会自动创建可以挂载多个数据卷示例给Tomcat服务器挂载/opt/webapps路径。dockerrun-d--namemytomcat-p8080:8080......