首页 > 其他分享 >docker swarm 搭建kafka集群

docker swarm 搭建kafka集群

时间:2023-01-14 12:11:57浏览次数:33  
标签:PLAINTEXT kafka swarm 2181 replicated docker KAFKA data

1. docker-compose文件准备

docker-compose-kafka-cluster.yml

version: '3.3'
services:
  zoo1:
    image: confluentinc/cp-zookeeper:7.0.1
    hostname: zoo1
    ports:
      - "2181:2181"
    volumes:
      - /etc/localtime:/etc/localtime:ro
      - zookeeper1_data:/data
      - zookeeper1_datalog:/datalog
    environment:
      ZOO_MY_ID: 1
      ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
      ZOOKEEPER_CLIENT_PORT: 2181
      ZOOKEEPER_TICK_TIME: 2000
    networks:
      - kafka
    deploy:
      mode: replicated
      replicas: 1
      resources:
        limits: # 资源使用上限
          cpus: "0.50"
          memory: 1G
        reservations: # 随时可以使用的资源
          cpus: "0.25"
          memory: 1G
      placement:
        constraints:
          - node.role==manager # 部署位置

  zoo2:
    image: confluentinc/cp-zookeeper:7.0.1
    hostname: zoo2
    ports:
      - "2182:2181"
    volumes:
      - /etc/localtime:/etc/localtime:ro
      - zookeeper2_data:/data
      - zookeeper2_datalog:/datalog
    environment:
      ZOO_MY_ID: 2
      ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
      ZOOKEEPER_CLIENT_PORT: 2182
      ZOOKEEPER_TICK_TIME: 2000
    networks:
      - kafka
    deploy:
      mode: replicated
      replicas: 1
      resources:
        limits: # 资源使用上限
          cpus: "0.50"
          memory: 1G
        reservations: # 随时可以使用的资源
          cpus: "0.25"
          memory: 1G
      placement:
        constraints:
          - node.role==manager # 部署位置

  zoo3:
    image: confluentinc/cp-zookeeper:7.0.1
    hostname: zoo3
    ports:
      - "2183:2181"
    volumes:
      - /etc/localtime:/etc/localtime:ro
      - zookeeper2_data:/data
      - zookeeper2_datalog:/datalog
    environment:
      ZOO_MY_ID: 3
      ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
      ZOOKEEPER_CLIENT_PORT: 2182
      ZOOKEEPER_TICK_TIME: 2000
    networks:
      - kafka
    deploy:
      mode: replicated
      replicas: 1
      resources:
        limits: # 资源使用上限
          cpus: "0.50"
          memory: 1G
        reservations: # 随时可以使用的资源
          cpus: "0.25"
          memory: 1G
      placement:
        constraints:
          - node.role==manager # 部署位置

  kafka1:
    image: confluentinc/cp-kafka:7.0.1
    hostname: kafka1
    ports:
      - "9092:9092"
    depends_on:
      - zoo1
      - zoo2
      - zoo3
    environment:
      - TZ=Asia/Shanghai
      - KAFKA_ZOOKEEPER_CONNECT=zoo1:2181,zoo2:2182,zoo3:2183/kafka
      - KAFKA_BROKER_ID=1
      - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=PLAINTEXT:PLAINTEXT,PLAINTEXT_INTERNAL:PLAINTEXT
      - KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://kafka1:9092,PLAINTEXT_INTERNAL://kafka1:29092
      - ALLOW_PLAINTEXT_LISTENER=yes
    volumes:
      - kafka1_data:/kafka
    networks:
      - kafka
    deploy:
      mode: replicated
      replicas: 1
      resources:
        limits: # 资源使用上限
          cpus: "0.50"
          memory: 1G
        reservations: # 随时可以使用的资源
          cpus: "0.25"
          memory: 1G
      placement:
        constraints:
          - node.role==manager #

  kafka2:
    image: confluentinc/cp-kafka:7.0.1
    hostname: kafka2
    ports:
      - "9093:9092"
    depends_on:
      - zoo1
      - zoo2
      - zoo3
    environment:
      - TZ=Asia/Shanghai
      - KAFKA_ZOOKEEPER_CONNECT=zoo1:2181,zoo2:2182,zoo3:2183/kafka
      - KAFKA_BROKER_ID=2
      - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=PLAINTEXT:PLAINTEXT,PLAINTEXT_INTERNAL:PLAINTEXT
      - KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://kafka2:9092,PLAINTEXT_INTERNAL://kafka2:29093
      - ALLOW_PLAINTEXT_LISTENER=yes
    volumes:
      - kafka2_data:/kafka
    networks:
      - kafka
    deploy:
      mode: replicated
      replicas: 1
      resources:
        limits: # 资源使用上限
          cpus: "0.50"
          memory: 1G
        reservations: # 随时可以使用的资源
          cpus: "0.25"
          memory: 1G
      placement:
        constraints:
          - node.role==manager # 部署位置

  kafka3:
    image: confluentinc/cp-kafka:7.0.1
    hostname: kafka3
    ports:
      - "9094:9092"
    depends_on:
      - zoo1
      - zoo2
      - zoo3
    environment:
      - TZ=Asia/Shanghai
      - KAFKA_ZOOKEEPER_CONNECT=zoo1:2181,zoo2:2182,zoo3:2183/kafka
      - KAFKA_BROKER_ID=3
      - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=PLAINTEXT:PLAINTEXT,PLAINTEXT_INTERNAL:PLAINTEXT
      - KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://kafka3:9092,PLAINTEXT_INTERNAL://kafka3:29094
      - ALLOW_PLAINTEXT_LISTENER=yes
    volumes:
      - kafka3_data:/kafka
    networks:
      - kafka
    deploy:
      mode: replicated
      replicas: 1
      resources:
        limits: # 资源使用上限
          cpus: "0.50"
          memory: 1G
        reservations: # 随时可以使用的资源
          cpus: "0.25"
          memory: 1G
      placement:
        constraints:
          - node.role==manager # 部署位置

  kafdrop:
    image: obsidiandynamics/kafdrop
    ports:
      - "9090:9000"
    environment:
      KAFKA_BROKERCONNECT: "kafka1:9092,kafka2:9093,kafka3:9094"
      JVM_OPTS: "-Xms16M -Xmx48M -Xss180K -XX:-TieredCompilation -XX:+UseStringDeduplication -noverify"
    networks:
      - kafka
    depends_on:
      - "kafka1"
      - "kafka2"
      - "kafka3"
    deploy:
      placement:
        constraints:
          - node.role==manager # 部署位置
volumes:
  zookeeper1_data:
    driver: local
  zookeeper1_datalog:
    driver: local
  zookeeper2_data:
    driver: local
  zookeeper2_datalog:
    driver: local
  zookeeper3_data:
    driver: local
  zookeeper3_datalog:
    driver: local
  kafka1_data:
    driver: local
  kafka2_data:
    driver: local
  kafka3_data:
    driver: local

networks:
  kafka:
    driver: overlay # 必须使用swarm类型的网络

2. 部署服务

部署服务使用 docker stack deploy,其中 -c 参数指定 compose 文件名。

$ docker stack deploy -c docker-compose-kafka-cluster.yml es

验证kafka:在浏览器访问kafka的控制后台任一节点IP:9090,打开如下界面:

image-20230114115111239

可以看到三个kafka的节点都出现。

创建topics,如能同步值所有kafka节点,则表示kafka集群成功搭建。

3. 查看服务

$ docker service ls
ID             NAME            MODE         REPLICAS   IMAGE                             PORTS
wk0eo0ei9b9v   kafka_kafdrop   replicated   1/1        obsidiandynamics/kafdrop:latest   *:9090->9000/tcp
nkivqnmsikly   kafka_kafka1    replicated   1/1        confluentinc/cp-kafka:7.0.1       *:9092->9092/tcp
n0855vlsjlj1   kafka_kafka2    replicated   1/1        confluentinc/cp-kafka:7.0.1       *:9093->9092/tcp
hhrggo3luh4f   kafka_kafka3    replicated   1/1        confluentinc/cp-kafka:7.0.1       *:9094->9092/tcp
rzjjjzc1cua5   kafka_zoo1      replicated   1/1        confluentinc/cp-zookeeper:7.0.1   *:2181->2181/tcp
m1mj7lniy34y   kafka_zoo2      replicated   1/1        confluentinc/cp-zookeeper:7.0.1   *:2182->2181/tcp
wmki83nxaiuu   kafka_zoo3      replicated   1/1        confluentinc/cp-zookeeper:7.0.1   *:2183->2181/tcp

4. 常见问题解决

标签:PLAINTEXT,kafka,swarm,2181,replicated,docker,KAFKA,data
From: https://www.cnblogs.com/JentZhang/p/17051538.html

相关文章