我有以下docker-compose文件:
version: '2'
services:
# Define a Telegraf service
telegraf:
build: Services/Telegraf
image: jit-systems/telegraf
environment:
HOST_PROC: /rootfs/proc
HOST_SYS: /rootfs/sys
HOST_ETC: /rootfs/etc
volumes:
#- ./etc/telegraf.conf:/etc/telegraf/telegraf.conf:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
- /sys:/rootfs/sys:ro
- /proc:/rootfs/proc:ro
- /etc:/rootfs/etc:ro
- /var/log/telegraf:/var/log/telegraf
links:
- influxdb
logging:
driver: json-file
options:
max-size: "100m"
max-file: "3"
networks:
- influx
- default
depends_on:
- influxdb
restart: always
# Define an InfluxDB service
influxdb:
image: influxdb:1.2.0
volumes:
#- ./data/influxdb:/var/lib/influxdb
- influxdb:/var/lib/influxdb
networks:
- influx
- default
#this port should not be exposed
ports:
- "8086:8086"
logging:
driver: json-file
options:
max-size: "100m"
max-file: "3"
restart: always
# Define a Kapacitor service
kapacitor:
image: kapacitor:1.2.0
environment:
KAPACITOR_HOSTNAME: kapacitor
KAPACITOR_INFLUXDB_0_URLS_0: http://influxdb:8086
volumes:
- influxdb:/home/docker_containers/kapacitor/volume
- influxdb:/var/lib/kapacitor
- /var/log/kapacitor:/var/log/kapacitor
links:
- influxdb
logging:
driver: json-file
options:
max-size: "100m"
max-file: "3"
networks:
- influx
- default
depends_on:
- influxdb
restart: always
grafana:
image: grafana/grafana
ports:
- 3000:3000
volumes:
- grafana:/var/lib/grafana
env_file:
- config.monitoring
links:
- influxdb
logging:
driver: json-file
options:
max-size: "100m"
max-file: "3"
restart: always
volumes:
influxdb:
portainer:
grafana:
networks:
influx:
所有容器都是成功构建的。 Telegraf正在Influx中插入数据。没有错误被抛出。仅当端口8086暴露时才会发生这种情况。如果我关闭端口8086,则不会插入数据,但可以从Grafana - datasource面板中看到数据库。当我保存连接时,会显示一条消息,表明连接成功。有没有办法从Influxdb容器中获取数据而不暴露端口8086公开?
答案 0 :(得分:1)
我不确定这是否在docker-compose版本2中可用,但是:
您可以使用网络启用网络中的所有容器,以便在不向公众发布端口的情况下访问其他端口。
一项服务将通过服务名称和端口访问另一项服务。这是一个例子:
version: "3.1"
## To ensure optimal performance and data persistence elk stack will only run on a node with a label added in the following way: docker node update --label-add app_role=elasticsearch nodeID
networks:
logging:
volumes:
logging_data:
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:5.3.1
logging:
driver: "json-file"
networks:
- logging
volumes:
- logging_data:/usr/share/elasticsearch/data
environment:
xpack.security.enabled: "false"
deploy:
placement:
constraints: [node.labels.app_role == elasticsearch]
logstash:
image: docker.elastic.co/logstash/logstash:5.3.1
logging:
driver: "json-file"
networks:
- logging
ports:
- "127.0.0.1:12201:12201/udp"
entrypoint: logstash -e 'input { gelf { } }
output { stdout{ } elasticsearch { hosts => ["http://elasticsearch:9200"] } }'
# Add to date{} add_field => { "ElkDebug" => "timestamp matched and was overwritten"} when in doubt about time filter
logstash输出使用弹性搜索地址。