配置docker-compose
- 执行docker-compose --version如果出现以下报错,则需要配置docker-compose,如果显示版本则不需要配置。
bash: docker-compose: command not found...
- 执行以下命令拉取docker-compose
sudo curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
- 执行以下命令修改权限
sudo chmod +x /usr/local/bin/docker-compose
- 执行docker-compose --version显示版本即表示创建成功
搭建zk集群
- 创建zk虚拟网络
docker network create zookeeper_network
- 创建zk集群的docker-compose.yml
version: '3.1'
networks:
default:
external:
name: zookeeper_network
services:
zoo1:
image: zookeeper
restart: always
container_name: zoo1
hostname: zoo1
ports:
- 2181:2181
volumes:
- "./zoo1/data:/data"
- "./zoo1/datalog:/datalog"
environment:
ZOO_MY_ID: 1
ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
zoo2:
image: zookeeper
restart: always
container_name: zoo2
hostname: zoo2
ports:
- 2182:2181
volumes:
- "./zoo2/data:/data"
- "./zoo2/datalog:/datalog"
environment:
ZOO_MY_ID: 2
ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
zoo3:
image: zookeeper
restart: always
container_name: zoo3
hostname: zoo3
ports:
- 2183:2181
volumes:
- "./zoo3/data:/data"
- "./zoo3/datalog:/datalog"
environment:
ZOO_MY_ID: 3
ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
- 创建或重建zk集群
docker-compose up -d
搭建kafka集群
- 创建kafka集群的docker-compose.yml
version: '3.1'
networks:
default:
external:
name: zookeeper_network
services:
kafka1:
image: wurstmeister/kafka
restart: unless-stopped
container_name: kafka1
hostname: kafka1
ports:
- "9092:9092"
external_links:
- zoo1
- zoo2
- zoo3
environment:
KAFKA_BROKER_ID: 1
KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.75.1:9092
KAFKA_ADVERTISED_HOST_NAME: kafka1
KAFKA_ADVERTISED_PORT: 9092
KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181,zoo2:2181,zoo3:2181"
volumes:
- "./kafka/kafka1/data/:/kafka"
kafka2:
image: wurstmeister/kafka
restart: unless-stopped
container_name: kafka2
hostname: kafka2
ports:
- "9093:9092"
external_links:
- zoo1
- zoo2
- zoo3
environment:
KAFKA_BROKER_ID: 2
KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.75.1:9093
KAFKA_ADVERTISED_HOST_NAME: kafka2
KAFKA_ADVERTISED_PORT: 9093
KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181,zoo2:2181,zoo3:2181"
volumes:
- "./kafka/kafka2/data/:/kafka"
kafka3:
image: wurstmeister/kafka
restart: unless-stopped
container_name: kafka3
hostname: kafka3
ports:
- "9094:9092"
external_links:
- zoo1
- zoo2
- zoo3
environment:
KAFKA_BROKER_ID: 3
KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.75.1:9094
KAFKA_ADVERTISED_HOST_NAME: kafka3
KAFKA_ADVERTISED_PORT: 9094
KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181,zoo2:2181,zoo3:2181"
volumes:
- "./kafka/kafka3/data/:/kafka"
kafka-manager:
image: sheepkiller/kafka-manager:latest
restart: unless-stopped
container_name: kafka-manager
hostname: kafka-manager
ports:
- "9000:9000"
links:
- kafka1
- kafka2
- kafka3
external_links:
- zoo1
- zoo2
- zoo3
environment:
ZK_HOSTS: zoo1:2181,zoo2:2181,zoo3:2181
KAFKA_BROKERS: kafka1:9092,kafka2:9093,kafka3:9094
- 创建或重建kafka集群
docker-compose up -d
注意:如果配置了内存限制(如下),需要使用docker-compose --compatibility -f docker-compose.yml up -d启动
deploy:
resources:
limits:
cpus: '2'
memory: 2G
reservations:
cpus: '0.5'
memory: 200M
读写验证
进入容器
docker exec -it kafka1 /bin/bash
创建topic
/opt/kafka/bin/kafka-topics.sh --create --zookeeper 172.22.0.2:2181 --replication-factor 1 --partitions 1 --topic my-test1
查看topic列表
/opt/kafka/bin/kafka-topics.sh --list --zookeeper 172.22.0.2:2181
发送消息
/opt/kafka/bin/kafka-console-producer.sh --broker-list 172.22.0.7:9092 --topic my-test1
读取消息
/opt/kafka/bin/kafka-console-consumer.sh --bootstrap-server 172.22.0.7:9092 --topic my-test1 --from-beginning
增加分区
/opt/kafka/bin/kafka-topics.sh --alter --zookeeper 172.22.0.2:2181 --partitions 2 --topic my-test1
指定消费者组,不指定则消费topic的所有分区
/opt/kafka/bin/kafka-console-consumer.sh --bootstrap-server kafka1:9092 --topic my-test1 --consumer-property group.id=group_mytes
|