我正在尝试将ssl配置为融合的kafka docker平台,并在开始说时出现错误
日志:
命令[/ usr / local / bin / dub路径/etc/kafka/secrets/kafka.server.keystore.jks存在]失败! kafka_kafka-broker1_1_13d7835ad32d以代码1退出
Docker配置:
version: '3'
services:
zookeeper1:
image: confluentinc/cp-zookeeper:5.1.0
hostname: zookeeper1
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
environment:
ZOOKEEPER_SERVER_ID: 1
ZOOKEEPER_SERVERS: 0.0.0.0:2888:3888
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
volumes:
- zookeeper-data:/var/lib/zookeeper/data
- zookeeper-log:/var/lib/zookeeper/log
kafka-broker1:
image: confluentinc/cp-kafka:5.1.0
hostname: kafka-broker1:
ports:
- "9092:9092"
- "9093:9093"
environment:
KAFKA_LISTENERS: "PLAINTEXT://0.0.0.0:9092,SSL://0.0.0.0:9093"
KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://kafkassl.com:9092,SSL://kafkassl.com:9093"
KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181
KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 2
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "false"
KAFKA_DELETE_TOPIC_ENABLE: "true"
KAFKA_LOG_RETENTION_HOURS: 168
KAFKA_OFFSETS_RETENTION_MINUTES: 43800
KAFKA_SSL_KEYSTORE_FILENAME: kafka.server.keystore.jks
KAFKA_SSL_TRUSTSTORE_LOCATION: /ssl/kafka.server.truststore.jks
KAFKA_SSL_TRUSTSTORE_PASSWORD: pass
KAFKA_SSL_KEYSTORE_LOCATION: /ssl/kafka.server.keystore.jks
KAFKA_SSL_KEYSTORE_PASSWORD: pass
KAFKA_SSL_KEY_PASSWORD: pass
volumes:
- kafka-data:/var/lib/kafka/data
- /ssl:/etc/kafka/secrets
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
depends_on:
- zookeeper1
volumes:
zookeeper-data:
zookeeper-log:
kafka-data:
答案 0 :(得分:0)
下面是可以使用SSL支持启动kafka docker-compose的步骤(@Senthil已在其评论中提供了一些指导)
有一个所谓的secrets目录,其中包含用于生成密钥库,信任库和ssl密码的shell脚本。进入docker-compose的kafka的根目录并运行此脚本,它将生成所需的文件(例如:./secrets/create-certs
)
将所有生成的文件复制到secrets目录中
将secrets目录的卷从主机安装到dockerized目录中。将以下内容放在卷部分的docker-compose文件中
volumes: - ./secrets/:/etc/kafka/secrets
使用docker-compose up运行
答案 1 :(得分:0)
FWIW,这是我用来解决此问题的方法以及遇到的问题。这是我的docker compose文件的一部分。如果要打开文件kafka_Secret.txt,则其中只会显示 P @ ssword 。我要解决的问题是-./kafka/secrets:/etc/kafka/secrets被设置为卷而不是绑定安装。我通过运行容器检查确认了这一点。 (通过运行docker container ls获取容器名称)。它显示了卷安装而不是绑定安装。为了修复它,我从泊坞窗中删除了卷以重新开始。即使重新创建了容器,仍然挂在我的kafka容器上的卷仍会附着。
zookeeper:
image: zookeeper:3.4.9
hostname: zookeeper
ports:
- '2181:2181'
environment:
ZOO_MY_ID: 1
ZOO_PORT: 2181
ZOO_SERVERS: server.1=zookeeper:2888:3888
ZOO_LOG4J_PROP: "${KAFKA_LOG_LEVEL},CONSOLE"
networks:
- ms_network
volumes:
- ./kafka/zookeeper/data:/data
- ./kafka/zookeeper/datalog:/datalog
kafka:
image: confluentinc/cp-kafka:5.5.0
hostname: kafka
ports:
- '19092:19092'
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
KAFKA_ADVERTISED_LISTENERS: SSL://kafka:19092
KAFKA_SSL_KEYSTORE_FILENAME: keystore.jks
KAFKA_SSL_KEYSTORE_CREDENTIALS: kafka_secret.txt
KAFKA_SSL_KEY_CREDENTIALS: kafka_secret.txt
KAFKA_SSL_TRUSTSTORE_FILENAME: truststore.jks
KAFKA_SSL_TRUSTSTORE_CREDENTIALS: kafka_secret.txt
KAFKA_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM: " "
KAFKA_SSL_CLIENT_AUTH: requested
KAFKA_SECURITY_INTER_BROKER_PROTOCOL: SSL
KAFKA_LOG4J_LOGGERS: 'org.apache.zookeeper=${KAFKA_LOG_LEVEL},org.apache.kafka=${KAFKA_LOG_LEVEL},kafka=${KAFKA_LOG_LEVEL},kafka.cluster=${KAFKA_LOG_LEVEL},kafka.controller=${KAFKA_LOG_LEVEL},kafka.coordinator=${KAFKA_LOG_LEVEL},kafka.log=${KAFKA_LOG_LEVEL},kafka.server=${KAFKA_LOG_LEVEL},kafka.zookeeper=${KAFKA_LOG_LEVEL},state.change.logger=${KAFKA_LOG_LEVEL},kafka.producer.async.DefaultEventHandler=${KAFKA_LOG_LEVEL},kafka.authorizer.logger=${KAFKA_LOG_LEVEL},kafka.log.LogCleaner=${KAFKA_LOG_LEVEL},kafka.request.logger=${KAFKA_LOG_LEVEL}'
KAFKA_LOG4J_ROOT_LOGLEVEL: ${KAFKA_LOG_LEVEL}
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
volumes:
- ./kafka/secrets:/etc/kafka/secrets
- ./kafka/data:/var/lib/kafka/data
depends_on:
- zookeeper
networks:
- ms_network
答案 2 :(得分:0)
这些步骤在Windows中对我有用:
1-使用Windows WSL生成密钥:
cd $(pwd)/examples/kafka-cluster-ssl/secrets
./create-certs.sh
(Type yes for all "Trust this certificate? [no]:" prompts.)
2-使用 PowerShell 设置环境变量 KAFKA_SSL_SECRETS_DIR :
$env:KAFKA_SSL_SECRETS_DIR= "xxxx\cp-docker-images\examples\kafka-cluster-ssl\secrets"
3-使用环境变量来运行kafka-ssl群集节点:
docker run -d --net=host --name=kafka-ssl-1 -e
KAFKA_ZOOKEEPER_CONNECT=localhost:22181,localhost:32181,localhost:42181 -e
KAFKA_ADVERTISED_LISTENERS=SSL://localhost:29092 -e KAFKA_SSL_KEYSTORE_FILENAME=kafka.broker1.keystore.jks -e
KAFKA_SSL_KEYSTORE_CREDENTIALS=broker1_keystore_creds -e KAFKA_SSL_KEY_CREDENTIALS=broker1_sslkey_creds -e
KAFKA_SSL_TRUSTSTORE_FILENAME=kafka.broker1.truststore.jks -e
KAFKA_SSL_TRUSTSTORE_CREDENTIALS=broker1_truststore_creds -e KAFKA_SECURITY_INTER_BROKER_PROTOCOL=SSL -v
${env:KAFKA_SSL_SECRETS_DIR}:/etc/kafka/secrets confluentinc/cp-kafka:5.0.0