当前位置: 首页 > 综合 > >正文

docker-compose多服务器部署kafka集群

来源:博客园    时间:2023-06-21 15:14:12


【资料图】

Kafka 是一个开源的分布式事件流平台,依赖Zookeeper或者KRaft,本文基于Zookeeper。服务器IP配置

本文使用三个服务器来做集群搭建,IP如下:

nodeNameIP
node110.10.210.96
node210.10.210.97
node310.10.210.98
部署zookeeper工作目录为/home/zookeepernode1配置目录结构
- zookeeper  - config    - zoo.cfg  - docker-compose.yml
zoo.cfg
dataDir=/datadataLogDir=/datalogtickTime=2000initLimit=5syncLimit=2clientPort:2181server.1=127.0.0.1:2888:3888server.2=10.10.210.97:2888:3888server.3=10.10.210.98:2888:3888
docker-compose.yml
version: "3"services:  zookeeper:    image: zookeeper:3.7.0    restart: always    hostname: zookeeper-node-1    container_name: zookeeper    ports:    - 2181:2181    - 2888:2888    - 3888:3888    - 8080:8080    volumes:    - ./data:/data    - ./datalog:/datalog    - ./config/zoo.cfg:/conf/zoo.cfg    environment:      ZOO_MY_ID: 1
node2配置目录结构
- zookeeper  - config    - zoo.cfg  - docker-compose.yml
zoo.cfg
dataDir=/datadataLogDir=/datalogtickTime=2000initLimit=5syncLimit=2clientPort:2181server.1=10.10.210.96:2888:3888server.2=127.0.0.1:2888:3888server.3=10.10.210.98:2888:3888
docker-compose.yml
version: "3"services:  zookeeper:    image: zookeeper:3.7.0    restart: always    hostname: zookeeper-node-2    container_name: zookeeper    ports:    - 2181:2181    - 2888:2888    - 3888:3888    - 8080:8080    volumes:    - ./data:/data    - ./datalog:/datalog    - ./config/zoo.cfg:/conf/zoo.cfg    environment:      ZOO_MY_ID: 2
node3配置目录结构
- zookeeper  - config    - zoo.cfg  - docker-compose.yml
zoo.cfg
dataDir=/datadataLogDir=/datalogtickTime=2000initLimit=5syncLimit=2clientPort:2181server.1=10.10.210.96:2888:3888server.2=10.10.210.97:2888:3888server.3=127.0.0.1:2888:3888
docker-compose.yml
version: "3"services:  zookeeper:    image: zookeeper:3.7.0    restart: always    hostname: zookeeper-node-3    container_name: zookeeper    ports:    - 2181:2181    - 2888:2888    - 3888:3888    - 8080:8080    volumes:    - ./data:/data    - ./datalog:/datalog    - ./config/zoo.cfg:/conf/zoo.cfg    environment:      ZOO_MY_ID: 3
在对应服务器的/home/zookeeper执行 docker-compose up -d 启动三个Zookeeper服务,通过docker-compose logs -f观察启动日志ZOO_MY_ID 对应zookeeper的id,多台服务器需设置不同,对应zoo.cfg的server.1,其中.1 就是对应的ZOO_MY_IDzoo.cfg配置信息具体可参考 Zookeeper部署和管理指南部署kafka工作目录为/home/kafkanode1配置目录结构
- kafka  - docker-compose.yml  - config/server.properties
docker-compose.yml
version: "3"services:  kafka:    image: bitnami/kafka:3.0.0    restart: always    hostname: kafka-node-1    container_name: kafka    ports:    - 9092:9092    - 9999:9999    volumes:    - ./logs:/opt/bitnami/kafka/logs    - ./data:/bitnami/kafka/data    - ./config/server.properties:/opt/bitnami/kafka/config/server.properties
server.properties
broker.id=1listeners=PLAINTEXT://:9092advertised.listeners=PLAINTEXT://10.10.210.96:9092num.network.threads=3num.io.threads=8socket.send.buffer.bytes=102400socket.receive.buffer.bytes=102400socket.request.max.bytes=104857600log.dirs=/bitnami/kafka/datanum.partitions=1num.recovery.threads.per.data.dir=1offsets.topic.replication.factor=1transaction.state.log.replication.factor=1transaction.state.log.min.isr=1log.retention.hours=168log.segment.bytes=1073741824log.retention.check.interval.ms=300000zookeeper.connect=10.10.210.96:2181,10.10.210.97:2181,10.10.210.98:2181zookeeper.connection.timeout.ms=18000group.initial.rebalance.delay.ms=0auto.create.topics.enable=truemax.partition.fetch.bytes=1048576max.request.size=1048576sasl.enabled.mechanisms=PLAIN,SCRAM-SHA-256,SCRAM-SHA-512sasl.mechanism.inter.broker.protocol=
node2配置目录结构
- kafka  - docker-compose.yml  - config/server.properties
docker-compose.yml
version: "3"services:  kafka:    image: bitnami/kafka:3.0.0    restart: always    hostname: kafka-node-2    container_name: kafka    ports:    - 9092:9092    - 9999:9999    volumes:    - ./logs:/opt/bitnami/kafka/logs    - ./data:/bitnami/kafka/data    - ./config/server.properties:/opt/bitnami/kafka/config/server.properties
server.properties
broker.id=2listeners=PLAINTEXT://:9092advertised.listeners=PLAINTEXT://10.10.210.97:9092num.network.threads=3num.io.threads=8socket.send.buffer.bytes=102400socket.receive.buffer.bytes=102400socket.request.max.bytes=104857600log.dirs=/bitnami/kafka/datanum.partitions=1num.recovery.threads.per.data.dir=1offsets.topic.replication.factor=1transaction.state.log.replication.factor=1transaction.state.log.min.isr=1log.retention.hours=168log.segment.bytes=1073741824log.retention.check.interval.ms=300000zookeeper.connect=10.10.210.96:2181,10.10.210.97:2181,10.10.210.98:2181zookeeper.connection.timeout.ms=18000group.initial.rebalance.delay.ms=0auto.create.topics.enable=truemax.partition.fetch.bytes=1048576max.request.size=1048576sasl.enabled.mechanisms=PLAIN,SCRAM-SHA-256,SCRAM-SHA-512sasl.mechanism.inter.broker.protocol=
node3配置目录结构
- kafka  - docker-compose.yml  - config/server.properties
docker-compose.yml
version: "3"services:  kafka:    image: bitnami/kafka:3.0.0    restart: always    hostname: kafka-node-3    container_name: kafka    ports:    - 9092:9092    - 9999:9999    volumes:    - ./logs:/opt/bitnami/kafka/logs    - ./data:/bitnami/kafka/data    - ./config/server.properties:/opt/bitnami/kafka/config/server.properties
server.properties
broker.id=3listeners=PLAINTEXT://:9092advertised.listeners=PLAINTEXT://10.10.210.98:9092num.network.threads=3num.io.threads=8socket.send.buffer.bytes=102400socket.receive.buffer.bytes=102400socket.request.max.bytes=104857600log.dirs=/bitnami/kafka/datanum.partitions=1num.recovery.threads.per.data.dir=1offsets.topic.replication.factor=1transaction.state.log.replication.factor=1transaction.state.log.min.isr=1log.retention.hours=168log.segment.bytes=1073741824log.retention.check.interval.ms=300000zookeeper.connect=10.10.210.96:2181,10.10.210.97:2181,10.10.210.98:2181zookeeper.connection.timeout.ms=18000group.initial.rebalance.delay.ms=0auto.create.topics.enable=truemax.partition.fetch.bytes=1048576max.request.size=1048576sasl.enabled.mechanisms=PLAIN,SCRAM-SHA-256,SCRAM-SHA-512sasl.mechanism.inter.broker.protocol=
在对应服务器的/home/kafka执行 docker-compose up -d 启动三个Kafka服务,通过docker-compose logs -f观察启动日志server.properties配置信息具体可参考 Kafka Broker Configskafka测试使用通过offset explorer测试连接kafka是否可用。后记如果想要简单配置的情况下,可以通过environment的方式启动kafka,参考如下:docker-compose.yml
version: "3"services:  kafka:    image: bitnami/kafka:3.0.0    restart: always    hostname: kafka-node    container_name: kafka    ports:    - 9092:9092    - 9999:9999    environment:      - KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://10.10.210.96:9092      - KAFKA_ADVERTISED_HOST_NAME=10.10.210.96      - KAFKA_ADVERTISED_PORT=9092      - KAFKA_ZOOKEEPER_CONNECT=10.10.210.96:2181,10.10.210.97:2181,10.10.210.98:2181      - ALLOW_PLAINTEXT_LISTENER=yes      - KAFKA_CFG_LISTENERS=PLAINTEXT://:9092      - JMX_PORT=9999     volumes:    - ./logs:/opt/bitnami/kafka/logs    - ./data:/bitnami/kafka/data

X 关闭

推荐内容

最近更新

Copyright ©  2015-2022 西方机械网版权所有  备案号:沪ICP备2020036824号-7   联系邮箱:5 626 629 @qq.com