我正在使用opendistro,并且正在使用安全性插件 我正在使用docker,我正在尝试连接到ldap,但是出现了此错误:
elasticsearch | [2019-07-29T10:25:14,609][WARN ][c.a.d.a.l.b.LDAPAuthorizationBackend] [922e43c02326] Unable to connect to ldapserver example.com:389 due to [org.ldaptive.provider.ConnectionException@1629074075::resultCode=PROTOCOL_ERROR, matchedDn=null, responseControls=null, referralURLs=null, messageId=-1, message=javax.naming.CommunicationException: example.com:389 [Root exception is java.net.SocketTimeoutException: connect timed out], providerException=javax.naming.CommunicationException: example.com:389 [Root exception is java.net.SocketTimeoutException: connect timed out]]. Try next.
elasticsearch | [2019-07-29T10:25:14,615][WARN ][c.a.o.s.a.BackendRegistry] [922e43c02326] Authentication finally failed for admin from 172.23.0.2:52408
我试图更改/ ect / hosts,但没有任何作用
这是我的docker-compose.yml:
version: '2.2'
services:
openldap:
image: osixia/openldap:1.2.4
container_name: openldap
environment:
LDAP_LOG_LEVEL: "256"
LDAP_ORGANISATION: "Example Inc"
LDAP_DOMAIN: "example.com"
LDAP_BASE_DN: "dc=example,dc=com"
LDAP_ADMIN_PASSWORD: XXXX
LDAP_CONFIG_PASSWORD: "config"
LDAP_READONLY_USER: "false"
LDAP_READONLY_USER_USERNAME: "readonly"
LDAP_READONLY_USER_PASSWORD: "readonly"
LDAP_RFC2307BIS_SCHEMA: "false"
LDAP_BACKEND: "mdb"
LDAP_TLS: "true"
LDAP_TLS_CRT_FILENAME: "ldap.crt"
LDAP_TLS_KEY_FILENAME: "ldap.key"
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
LDAP_TLS_ENFORCE: "false"
LDAP_TLS_PROTOCOL_MIN: "3.1"
LDAP_TLS_VERIFY_CLIENT: "demand"
LDAP_REPLICATION: "false"
KEEP_EXISTING_CONFIG: "false"
LDAP_REMOVE_CONFIG_AFTER_SETUP: "true"
LDAP_SSL_HELPER_PREFIX: "ldap"
tty: true
stdin_open: true
volumes:
- /var/lib/ldap
- /etc/ldap/slapd.d
- /container/service/slapd/assets/certs/
ports:
- "389:389"
- "636:636"
domainname: "example.com" # important: same as hostname
hostname: "example.com"
networks:
esnet
phpldapadmin:
image: osixia/phpldapadmin:latest
container_name: phpldapadmin
environment:
PHPLDAPADMIN_LDAP_HOSTS: "openldap"
PHPLDAPADMIN_HTTPS: "false"
ports:
- "8080:80"
depends_on:
- openldap
networks:
esnet
elasticsearch:
image: amazon/opendistro-for-elasticsearch:1.0.2
container_name: elasticsearch
environment:
- cluster.name=docker-cluster
- bootstrap.memory_lock=true # along with the memlock settings below, disables swapping
- "ES_JAVA_OPTS=-Xms2048m -Xmx2048m" # minimum and maximum Java heap size, recommend setting both to 50% of system RAM
- discovery.type=single-node
- OPENSSL_PATH=/usr/local/ssl/bin
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- esdata1:/usr/share/elasticsearch/data
#- ./cert/truststore.jks:/usr/share/elasticsearch/config/truststore.jks
# - ./elasticsearch/config/action_groups.yml:/usr/share/elasticsearch/plugins/opendistro_security/securityconfig/config.yml:ro
# - ./elasticsearch/config/internal_users.yml:/usr/share/elasticsearch/plugins/opendistro_security/securityconfig/internal_users.yml:ro
- ./elasticsearch/config/roles_mapping.yml:/usr/share/elasticsearch/plugins/opendistro_security/securityconfig/roles_mapping.yml.yml:ro
# - ./elasticsearch/config/roles.yml:/usr/share/elasticsearch/plugins/opendistro_security/securityconfig/roles.yml:ro
- ./elasticsearch/config/config.yml:/usr/share/elasticsearch/plugins/opendistro_security/securityconfig/config.yml:rw
- ./elasticsearch/security.sh:/usr/share/elasticsearch/security.sh
# ./elasticsearch/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
ports:
- "9300:9300"
- "9200:9200"
- "9600:9600"
networks:
esnet:
aliases:
- node-0.example.com
healthcheck:
test: if [[ $$(curl -o /dev/null --silent --head --write-out '%{http_code}\n' https://elasticsearch:9200 -u admin:admin --insecure ) == 200 ]]; then exit 0; else exit 1; fi
interval: 30s
timeout: 10s
retries: 100
user: root
这是config.yml
_meta:
type: "config"
config_version: 2
config:
dynamic:
# Set filtered_alias_mode to 'disallow' to forbid more than 2 filtered aliases per index
# Set filtered_alias_mode to 'warn' to allow more than 2 filtered aliases per index but warns about it (default)
# Set filtered_alias_mode to 'nowarn' to allow more than 2 filtered aliases per index silently
http:
anonymous_auth_enabled: false
xff:
enabled: false
doc/config/valve.html#Remote_IP_Valve
authc:
ldap:
description: "Authenticate via LDAP or Active Directory"
http_enabled: true
transport_enabled: true
order: 1
http_authenticator:
type: basic
challenge: false
authentication_backend:
type: ldap
config:
# enable ldaps
enable_ssl: false
pemtrustedcas_filepath: /usr/share/elasticsearch/config/root-ca.pem
# enable start tls, enable_ssl should be false
enable_start_tls: false
# send client certificate
enable_ssl_client_auth: false
# verify ldap hostname
verify_hostnames: true
hosts:
- "openldap"
bind_dn: "cn=admin,dc=example,dc=com"
password: XXXX
userbase: 'ou=people,dc=example,dc=com'
username_attribute: null
authz:
roles_from_myldap:
description: "Authorize via LDAP or Active Directory"
http_enabled: false
transport_enabled: false
authorization_backend:
type: ldap
config:
enable_ssl: false
pemtrustedcas_filepath: /usr/share/elasticsearch/config/root-ca.pem
enable_start_tls: false
enable_ssl_client_auth: false
verify_hostnames: true
hosts:
- openldap
bind_dn: null
password: null
rolebase: 'ou=groups,dc=example,dc=com'
rolesearch: '(member={0})'
userroleattribute: null
userrolename: disabled
Default is "name".
resolve_nested_roles: true
userbase: 'ou=people,dc=example,dc=com'
usersearch: '(uid={0})'
roles_from_another_ldap:
description: "Authorize via another Active Directory"
http_enabled: false
transport_enabled: false
authorization_backend:
type: ldap
能帮我解决这个问题吗 谢谢, 最好的问候
这是在更改BASEDN后出现的错误
elasticsearch | [2019-07-29T11:58:47,908][WARN ][c.a.d.a.l.b.LDAPAuthorizationBackend] [2219e2665aec] Unable to connect to ldapserver example.com due to [org.ldaptive.provider.ConnectionException@303672734::resultCode=PROTOCOL_ERROR, matchedDn=null, responseControls=null, referralURLs=null, messageId=-1, message=javax.naming.CommunicationException: example.com:389 [Root exception is java.net.SocketTimeoutException: connect timed out], providerException=javax.naming.CommunicationException: example.com:389 [Root exception is java.net.SocketTimeoutException: connect timed out]]. Try next.
elasticsearch | [2019-07-29T11:58:47,911][WARN ][c.a.o.s.a.BackendRegistry] [2219e2665aec] Authentication finally failed for admin from 192.168.32.2:51676
elasticsearch | [2019-07-29T11:59:23,799][WARN ][c.a.d.a.l.b.LDAPAuthorizationBackend] [2219e2665aec] Unable to connect to ldapserver example.com due to [org.ldaptive.provider.ConnectionException@846396247::resultCode=PROTOCOL_ERROR, matchedDn=null, responseControls=null, referralURLs=null, messageId=-1, message=javax.naming.CommunicationException: example.com:389 [Root exception is java.net.SocketTimeoutException: connect timed out], providerException=javax.naming.CommunicationException: example.com:389 [Root exception is java.net.SocketTimeoutException: connect timed out]]. Try next.
elasticsearch | [2019-07-29T11:59:23,800][WARN ][c.a.o.s.a.BackendRegistry] [2219e2665aec] Authentication finally failed for admin from 192.168.32.2:51680
elasticsearch | [2019-07-29T11:59:39,329][ERROR][c.a.o.s.s.h.n.OpenDistroSecuritySSLNettyHttpServerTransport] [2219e2665aec] SSL Problem Received fatal alert: certificate_unknown
elasticsearch | javax.net.ssl.SSLHandshakeException: Received fatal alert: certificate_unknown
elasticsearch | at sun.security.ssl.Alert.createSSLException(Alert.java:128) ~[?:?]
elasticsearch | at sun.security.ssl.Alert.createSSLException(Alert.java:117) ~[?:?]
elasticsearch | at sun.security.ssl.TransportContext.fatal(TransportContext.java:308) ~[?:?]
elasticsearch | at sun.security.ssl.Alert$AlertConsumer.consume(Alert.java:279) ~[?:?]
elasticsearch | at sun.security.ssl.TransportContext.dispatch(TransportContext.java:181) ~[?:?]
elasticsearch | at sun.security.ssl.SSLTransport.decode(SSLTransport.java:164) ~[?:?]
elasticsearch | at sun.security.ssl.SSLEngineImpl.decode(SSLEngineImpl.java:672) ~[?:?]
elasticsearch | at sun.security.ssl.SSLEngineImpl.readRecord(SSLEngineImpl.java:627) ~[?:?]
elasticsearch | at sun.security.ssl.SSLEngineImpl.unwrap(SSLEngineImpl.java:443) ~[?:?]
elasticsearch | at sun.security.ssl.SSLEngineImpl.unwrap(SSLEngineImpl.java:422) ~[?:?]
elasticsearch | at javax.net.ssl.SSLEngine.unwrap(SSLEngine.java:634) ~[?:?]
elasticsearch | at io.netty.handler.ssl.SslHandler$SslEngineType$3.unwrap(SslHandler.java:295) ~[netty-handler-4.1.32.Final.jar:4.1.32.Final]
elasticsearch | at io.netty.handler.ssl.SslHandler.unwrap(SslHandler.java:1301) ~[netty-handler-4.1.32.Final.jar:4.1.32.Final]
elasticsearch | at io.netty.handler.ssl.SslHandler.decodeJdkCompatible(SslHandler.java:1203) ~[netty-handler-4.1.32.Final.jar:4.1.32.Final]
elasticsearch | at io.netty.handler.ssl.SslHandler.decode(SslHandler.java:1247) ~[netty-handler-4.1.32.Final.jar:4.1.32.Final]
elasticsearch | at io.netty.handler.codec.ByteToMessageDecoder.decodeRemovalReentryProtection(ByteToMessageDecoder.java:502) ~[netty-codec-4.1.32.Final.jar:4.1.32.Final]
elasticsearch | at io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:441) ~[netty-codec-4.1.32.Final.jar:4.1.32.Final]
elasticsearch | at io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:278) ~[netty-codec-4.1.32.Final.jar:4.1.32.Final]
elasticsearch | at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362) [netty-transport-4.1.32.Final.jar:4.1.32.Final]
elasticsearch | at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348) [netty-transport-4.1.32.Final.jar:4.1.32.Final]
elasticsearch | at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340) [netty-transport-4.1.32.Final.jar:4.1.32.Final]
elasticsearch | at io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1434) [netty-transport-4.1.32.Final.jar:4.1.32.Final]
elasticsearch | at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362) [netty-transport-4.1.32.Final.jar:4.1.32.Final]
elasticsearch | at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348) [netty-transport-4.1.32.Final.jar:4.1.32.Final]
elasticsearch | at io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:965) [netty-transport-4.1.32.Final.jar:4.1.32.Final]
elasticsearch | at io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:163) [netty-transport-4.1.32.Final.jar:4.1.32.Final]
elasticsearch | at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:656) [netty-transport-4.1.32.Final.jar:4.1.32.Final]
elasticsearch | at io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:556) [netty-transport-4.1.32.Final.jar:4.1.32.Final]
elasticsearch | at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:510) [netty-transport-4.1.32.Final.jar:4.1.32.Final]
elasticsearch | at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:470) [netty-transport-4.1.32.Final.jar:4.1.32.Final]
elasticsearch | at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:909) [netty-common-4.1.32.Final.jar:4.1.32.Final]
elasticsearch | at java.lang.Thread.run(Thread.java:834) [?:?]
elasticsearch | [2019-07-29T11:59:59,696][WARN ][c.a.d.a.l.b.LDAPAuthorizationBackend] [2219e2665aec] Unable to connect to ldapserver example.com due to [org.ldaptive.provider.ConnectionException@142684755::resultCode=PROTOCOL_ERROR, matchedDn=null, responseControls=null, referralURLs=null, messageId=-1, message=javax.naming.CommunicationException: example.com:389 [Root exception is java.net.SocketTimeoutException: connect timed out], providerException=javax.naming.CommunicationException: example.com:389 [Root exception is java.net.SocketTimeoutException: connect timed out]]. Try next.
elasticsearch | [2019-07-29T11:59:59,705][WARN ][c.a.o.s.a.BackendRegistry] [2219e2665aec] Authentication finally failed for admin from 192.168.32.2:51702
更新
通过更改config.yml中example.com:389中的主机名以及更改example.com中docker容器的名称,我修复了无法连接到openldap的问题
我在这里放置了新的docker-compose:
version: '2.2'
services:
example.com:
image: osixia/openldap:1.2.4
container_name: example.com
environment:
#LDAP_ORGANISATION: # Organisation name. Defaults to Example Inc.
LDAP_DOMAIN: example.com #Ldap domain. Defaults to example.org
LDAP_BASE_DN: "" #Ldap base DN. If empty automatically set from LDAP_DOMAIN value. Defaults to (empty)
#LDAP_ADMIN_PASSWORD Ldap Admin password. Defaults to admin
ports:
- "389:389"
- "636:636"
networks:
- esnet
phpldapadmin:
image: osixia/phpldapadmin:latest
container_name: phpldapadmin
environment:
PHPLDAPADMIN_LDAP_HOSTS: "example.org"
PHPLDAPADMIN_HTTPS: "false"
ports:
- "8080:80"
depends_on:
- example.com
networks:
- esnet
elasticsearch:
image: amazon/opendistro-for-elasticsearch:1.0.2
container_name: elasticsearch
environment:
- cluster.name=docker-cluster
- bootstrap.memory_lock=true # along with the memlock settings below, disables swapping
- "ES_JAVA_OPTS=-Xms1024m -Xmx1024m" # minimum and maximum Java heap size, recommend setting both to 50% of system RAM
- discovery.type=single-node
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- esdata1:/usr/share/elasticsearch/data
- ./elasticsearch/config.yml:/usr/share/elasticsearch/plugins/opendistro_security/securityconfig/config.yml
ports:
- 9200:9200
- 9600:9600
networks:
esnet:
aliases:
- node-0.example.com
kibana:
image: amazon/opendistro-for-elasticsearch-kibana:1.0.2
container_name: kibana
volumes:
- ./kibana/kibana.yml:/usr/share/kibana/config/kibana.yml:ro
ports:
- 5601:5601
expose:
- "5601"
environment:
ELASTICSEARCH_HOSTS: https://elasticsearch:9200
networks:
- esnet
新的config.yml(密码仅用于测试)
_meta:
type: "config"
config_version: 2
config:
dynamic:
http:
anonymous_auth_enabled: false
xff:
enabled: false
internalProxies: '192\.168\.0\.10|192\.168\.0\.11' # regex pattern
authc:
basic_internal_auth_domain:
description: "Authenticate via HTTP Basic against internal users database"
http_enabled: false
transport_enabled: false
order: 4
http_authenticator:
type: basic
challenge: false
authentication_backend:
type: intern
ldap:
description: "Authenticate via LDAP or Active Directory"
http_enabled: true
transport_enabled: true
order: 5
http_authenticator:
type: basic
challenge: true
authentication_backend:
# LDAP authentication backend (authenticate users against a LDAP or Active Directory)
type: ldap
config:
# enable ldaps
enable_ssl: false
# enable start tls, enable_ssl should be false
enable_start_tls: false
# send client certificate
enable_ssl_client_auth: false
# verify ldap hostname
verify_hostnames: true
hosts:
- example.com:389
bind_dn: "cn=admin,dc=example,dc=com"
password: "admin"
userbase: 'ou=people,dc=example,dc=com'
# Filter to search for users (currently in the whole subtree beneath userbase)
# {0} is substituted with the username
usersearch: '(sAMAccountName={0})'
# Use this attribute from the user as username (if not set then DN is used)
username_attribute: null
authz:
roles_from_myldap:
description: "Authorize via LDAP or Active Directory"
http_enabled: false
transport_enabled: false
authorization_backend:
# LDAP authorization backend (gather roles from a LDAP or Active Directory, you have to configure the above LDAP authentication backend settings too)
type: ldap
config:
# enable ldaps
enable_ssl: false
# enable start tls, enable_ssl should be false
enable_start_tls: false
# send client certificate
enable_ssl_client_auth: false
# verify ldap hostname
verify_hostnames: true
hosts:
- localhost:8389
bind_dn: null
password: null
rolebase: 'ou=groups,dc=example,dc=com'
# Filter to search for roles (currently in the whole subtree beneath rolebase)
# {0} is substituted with the DN of the user
# {1} is substituted with the username
# {2} is substituted with an attribute value from user's directory entry, of the authenticated user. Use userroleattribute to specify the name of the attribute
rolesearch: '(member={0})'
# Specify the name of the attribute which value should be substituted with {2} above
userroleattribute: null
userrolename: disabled
resolve_nested_roles: true
userbase: 'ou=people,dc=example,dc=com'
roles_from_another_ldap:
description: "Authorize via another Active Directory"
http_enabled: false
transport_enabled: false
authorization_backend:
type: ldap
但是我收到一个新错误:
elasticsearch | [2019-07-31T12:48:42,590][WARN ][c.a.o.s.a.BackendRegistry] [28da1860f0c0] Authentication finally failed for cn=admin,dc=example,dc=com from 192.168.64.1:58682
elasticsearch | [2019-07-31T12:48:43,430][WARN ][c.a.o.s.a.BackendRegistry] [28da1860f0c0] Authentication finally failed for kibanaserver from 192.168.64.2:39506
如果我尝试查询此内容:
docker exec example.com ldapsearch -x -H ldap://localhost -b dc=example,dc=com -D "cn=admin,dc=example,dc=com" -w admin
我得到:
extended LDIF
#
# LDAPv3
# base <dc=example,dc=com> with scope subtree
# filter: (objectclass=*)
# requesting: ALL
#
# example.com
dn: dc=example,dc=com
objectClass: top
objectClass: dcObject
objectClass: organization
o: Example Inc.
dc: example
# admin, example.com
dn: cn=admin,dc=example,dc=com
objectClass: simpleSecurityObject
objectClass: organizationalRole
cn: admin
description: LDAP administrator
userPassword:: e1NTSEF9QnJDbHdTYnRRb3dHazJiQlB6MTF3R29MS3dFVmxmZUk=
# search result
search: 2
result: 0 Success
# numResponses: 3
# numEntries: 2
这是我的kibana.yml
server.name: kibana
server.host: "0"
elasticsearch.hosts: https://localhost:9200
elasticsearch.ssl.verificationMode: none
elasticsearch.username: kibanaserver
elasticsearch.password: kibanaserver
elasticsearch.requestHeadersWhitelist: ["securitytenant","Authorization"]
opendistro_security.multitenancy.enabled: true
opendistro_security.multitenancy.tenants.preferred: ["Private", "Global"]
从这里开始,需要kibanaserver用户
你们有什么暗示吗?我应该使用哪些凭证登录? 谢谢大家的答复!