如何使用SSL设置Kafka?

时间:2019-04-29 14:05:32

标签: docker ssl apache-kafka

我正在尝试在docker中运行kafka。它适用于纯文本,但不适用于SSL。

我根据this documentation执行了SSL设置:

#!/bin/bash
#Step 1
keytool -keystore server.keystore.jks -alias localhost -validity 365 -genkey

#Step 2
openssl req -new -x509 -keyout ca-key -out ca-cert -days 365
keytool -keystore server.truststore.jks -alias CARoot -import -file ca-cert
keytool -keystore client.truststore.jks -alias CARoot -import -file ca-cert

#Step 3
keytool -keystore server.keystore.jks -alias localhost -certreq -file cert-file
openssl x509 -req -CA ca-cert -CAkey ca-key -in cert-file -out cert-signed -days 365 -CAcreateserial -passin pass:test1234
keytool -keystore server.keystore.jks -alias CARoot -import -file ca-cert
keytool -keystore server.keystore.jks -alias localhost -import -file cert-signed

然后我将所有ssl内容复制到/tmp/ssl/1/目录中。

这是我的docker-compose:

version: '2'
volumes:
  data-volume: {}
services:  
  kafka:
    image: wurstmeister/kafka
    ports:
      - "9092:9092"
    environment:
      - KAFKA_ADVERTISED_HOST_NAME=127.0.0.1
      - KAFKA_ADVERTISED_PORT=9092
      - KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
    volumes:
      - "/tmp/ssl/:/tmp/ssl/"
    depends_on:
      - zookeeper
  zookeeper:
    image: wurstmeister/zookeeper
    ports:
      - "2181:2181"
    environment:
      - KAFKA_ADVERTISED_HOST_NAME=zookeeper

server.properties

    advertised.host.name = 127.0.0.1
    advertised.listeners = null
    advertised.port = 9092
    alter.config.policy.class.name = null
    alter.log.dirs.replication.quota.window.num = 11
    alter.log.dirs.replication.quota.window.size.seconds = 1
    authorizer.class.name = 
    auto.create.topics.enable = true
    auto.leader.rebalance.enable = true
    background.threads = 10
    broker.id = -1
    broker.id.generation.enable = true
    broker.rack = null
    client.quota.callback.class = null
    compression.type = producer
    connection.failed.authentication.delay.ms = 100
    connections.max.idle.ms = 600000
    connections.max.reauth.ms = 0
    control.plane.listener.name = null
    controlled.shutdown.enable = true
    controlled.shutdown.max.retries = 3
    controlled.shutdown.retry.backoff.ms = 5000
    controller.socket.timeout.ms = 30000
    create.topic.policy.class.name = null
    default.replication.factor = 1
    delegation.token.expiry.check.interval.ms = 3600000
    delegation.token.expiry.time.ms = 86400000
    delegation.token.master.key = null
    delegation.token.max.lifetime.ms = 604800000
    delete.records.purgatory.purge.interval.requests = 1
    delete.topic.enable = true
    fetch.purgatory.purge.interval.requests = 1000
    group.initial.rebalance.delay.ms = 0
    group.max.session.timeout.ms = 300000
    group.max.size = 2147483647
    group.min.session.timeout.ms = 6000
    host.name = 
    inter.broker.listener.name = null
    inter.broker.protocol.version = 2.2-IV1
    kafka.metrics.polling.interval.secs = 10
    kafka.metrics.reporters = []
    leader.imbalance.check.interval.seconds = 300
    leader.imbalance.per.broker.percentage = 10
    listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
    listeners = PLAINTEXT://:9092,SSL://:9093
    log.cleaner.backoff.ms = 15000
    log.cleaner.dedupe.buffer.size = 134217728
    log.cleaner.delete.retention.ms = 86400000
    log.cleaner.enable = true
    log.cleaner.io.buffer.load.factor = 0.9
    log.cleaner.io.buffer.size = 524288
    log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
    log.cleaner.min.cleanable.ratio = 0.5
    log.cleaner.min.compaction.lag.ms = 0
    log.cleaner.threads = 1
    log.cleanup.policy = [delete]
    log.dir = /tmp/kafka-logs
    log.dirs = /kafka/kafka-logs-935db2aeed2f
    log.flush.interval.messages = 9223372036854775807
    log.flush.interval.ms = null
    log.flush.offset.checkpoint.interval.ms = 60000
    log.flush.scheduler.interval.ms = 9223372036854775807
    log.flush.start.offset.checkpoint.interval.ms = 60000
    log.index.interval.bytes = 4096
    log.index.size.max.bytes = 10485760
    log.message.downconversion.enable = true
    log.message.format.version = 2.2-IV1
    log.message.timestamp.difference.max.ms = 9223372036854775807
    log.message.timestamp.type = CreateTime
    log.preallocate = false
    log.retention.bytes = -1
    log.retention.check.interval.ms = 300000
    log.retention.hours = 168
    log.retention.minutes = null
    log.retention.ms = null
    log.roll.hours = 168
    log.roll.jitter.hours = 0
    log.roll.jitter.ms = null
    log.roll.ms = null
    log.segment.bytes = 1073741824
    log.segment.delete.delay.ms = 60000
    max.connections.per.ip = 2147483647
    max.connections.per.ip.overrides = 
    max.incremental.fetch.session.cache.slots = 1000
    message.max.bytes = 1000012
    metric.reporters = []
    metrics.num.samples = 2
    metrics.recording.level = INFO
    metrics.sample.window.ms = 30000
    min.insync.replicas = 1
    num.io.threads = 8
    num.network.threads = 3
    num.partitions = 1
    num.recovery.threads.per.data.dir = 1
    num.replica.alter.log.dirs.threads = null
    num.replica.fetchers = 1
    offset.metadata.max.bytes = 4096
    offsets.commit.required.acks = -1
    offsets.commit.timeout.ms = 5000
    offsets.load.buffer.size = 5242880
    offsets.retention.check.interval.ms = 600000
    offsets.retention.minutes = 10080
    offsets.topic.compression.codec = 0
    offsets.topic.num.partitions = 50
    offsets.topic.replication.factor = 1
    offsets.topic.segment.bytes = 104857600
    password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding
    password.encoder.iterations = 4096
    password.encoder.key.length = 128
    password.encoder.keyfactory.algorithm = null
    password.encoder.old.secret = null
    password.encoder.secret = null
    port = 9092
    principal.builder.class = null
    producer.purgatory.purge.interval.requests = 1000
    queued.max.request.bytes = -1
    queued.max.requests = 500
    quota.consumer.default = 9223372036854775807
    quota.producer.default = 9223372036854775807
    quota.window.num = 11
    quota.window.size.seconds = 1
    replica.fetch.backoff.ms = 1000
    replica.fetch.max.bytes = 1048576
    replica.fetch.min.bytes = 1
    replica.fetch.response.max.bytes = 10485760
    replica.fetch.wait.max.ms = 500
    replica.high.watermark.checkpoint.interval.ms = 5000
    replica.lag.time.max.ms = 10000
    replica.socket.receive.buffer.bytes = 65536
    replica.socket.timeout.ms = 30000
    replication.quota.window.num = 11
    replication.quota.window.size.seconds = 1
    request.timeout.ms = 30000
    reserved.broker.max.id = 1000
    sasl.client.callback.handler.class = null
    sasl.enabled.mechanisms = [GSSAPI]
    sasl.jaas.config = null
    sasl.kerberos.kinit.cmd = /usr/bin/kinit
    sasl.kerberos.min.time.before.relogin = 60000
    sasl.kerberos.principal.to.local.rules = [DEFAULT]
    sasl.kerberos.service.name = null
    sasl.kerberos.ticket.renew.jitter = 0.05
    sasl.kerberos.ticket.renew.window.factor = 0.8
    sasl.login.callback.handler.class = null
    sasl.login.class = null
    sasl.login.refresh.buffer.seconds = 300
    sasl.login.refresh.min.period.seconds = 60
    sasl.login.refresh.window.factor = 0.8
    sasl.login.refresh.window.jitter = 0.05
    sasl.mechanism.inter.broker.protocol = GSSAPI
    sasl.server.callback.handler.class = null
    security.inter.broker.protocol = PLAINTEXT
    socket.receive.buffer.bytes = 102400
    socket.request.max.bytes = 104857600
    socket.send.buffer.bytes = 102400
    ssl.cipher.suites = []
    ssl.client.auth = none
    ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
    ssl.endpoint.identification.algorithm = https
    ssl.key.password = [hidden]
    ssl.keymanager.algorithm = SunX509
    ssl.keystore.location = /tmp/ssl/1/server.keystore.jks
    ssl.keystore.password = [hidden]
    ssl.keystore.type = JKS
    ssl.principal.mapping.rules = [DEFAULT]
    ssl.protocol = TLS
    ssl.provider = null
    ssl.secure.random.implementation = null
    ssl.trustmanager.algorithm = PKIX
    ssl.truststore.location = /tmp/ssl/1/server.truststore.jks
    ssl.truststore.password = [hidden]
    ssl.truststore.type = JKS
    transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000
    transaction.max.timeout.ms = 900000
    transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
    transaction.state.log.load.buffer.size = 5242880
    transaction.state.log.min.isr = 1
    transaction.state.log.num.partitions = 50
    transaction.state.log.replication.factor = 1
    transaction.state.log.segment.bytes = 104857600
    transactional.id.expiration.ms = 604800000
    unclean.leader.election.enable = false
    zookeeper.connect = zookeeper:2181
    zookeeper.connection.timeout.ms = 6000
    zookeeper.max.in.flight.requests = 10
    zookeeper.session.timeout.ms = 6000
    zookeeper.set.acl = false
    zookeeper.sync.time.ms = 2000

服务器日志:

  

[2019-04-29 13:12:33,935] INFO等待s0.0.0.0:9092上的套接字连接。 (kafka.network.Acceptor)   [2019-04-29 13:12:33,975]信息[SocketServer brokerId = 1001]为端点创建了数据平面接受器和处理器:EndPoint(null,9092,ListenerName(PLAINTEXT),PLAINTEXT)(kafka.network.SocketServer)   [2019-04-29 13:12:33,975] INFO等待s0.0.0.0:9093上的套接字连接。 (kafka.network.Acceptor)   [2019-04-29 13:12:34,117]信息[SocketServer brokerId = 1001]为端点创建了数据平面接受器和处理器:EndPoint(null,9093,ListenerName(SSL),SSL)(kafka.network.SocketServer)   [2019-04-29 13:12:34,122]信息[SocketServer brokerId = 1001]启动了2个用于数据平面的接受线程(kafka.network.SocketServer)   [2019-04-29 13:12:34,160]信息[ExpirationReaper-1001-Produce]:正在启动(kafka.server.DelayedOperationPurgatory $ ExpiredOperationReaper)   [2019-04-29 13:12:34,162]信息[ExpirationReaper-1001-Fetch]:正在启动(kafka.server.DelayedOperationPurgatory $ ExpiredOperationReaper)   [2019-04-29 13:12:34,163]信息[ExpirationReaper-1001-DeleteRecords]:正在启动(kafka.server.DelayedOperationPurgatory $ ExpiredOperationReaper)   [2019-04-29 13:12:34,164]信息[ExpirationReaper-1001-ElectPreferredLeader]:正在启动(kafka.server.DelayedOperationPurgatory $ ExpiredOperationReaper)   [2019-04-29 13:12:34,180]信息[LogDirFailureHandler]:正在启动(kafka.server.ReplicaManager $ LogDirFailureHandler)   [2019-04-29 13:12:34,264]信息创建/ brokers / ids / 1001(安全吗?假)(kafka.zk.KafkaZkClient)

/opt/kafka/client-ssl.properties:

security.protocol=SSL

ssl.truststore.location=/tmp/ssl/1/kafka.client.truststore.jks
ssl.truststore.password=test1234

我运行以下命令:

/opt/kafka/bin/kafka-console-producer.sh --broker-list localhost:9093 --topic sample-sink-data --producer.config /opt/kafka/client-ssl.properties

并在kafka服务器日志中看到它:

  

[2019-04-29 13:28:13,654]信息[SocketServer brokerId = 1001]使用/127.0.0.1的身份验证失败(SSL握手失败)(org.apache.kafka.common.network.Selector)

我想念什么?

0 个答案:

没有答案