Reputation: 47
I'm using Grafana
and Prometheus
in a Docker swarm
environment. I've set up docker-compose.yml
and prometheus.yml
to scrape metrics from node-exporter and microservices.
My question is how to scrape metrics from microservice, when it is running over multiple containers.
When I run grafana, I only see the output from one container, not from all of them.
docker-compose.yml
version: '3.8'
networks:
monitoring:
volumes:
#prometheus_data: {}
prometheus_data:
driver: local
driver_opts:
type: nfs
o: addr=pvevm26,rw
device: :/srv/nexusshare-mnt/metrics/prometheus_data/
grafana_data: {}
configs:
metrics_prometheus_v1.conf:
external: true
metrics_grafana_ini_v1.conf:
external: true
services:
#####################################################
# Prometheus
#####################################################
prometheus:
image: prom/prometheus:latest
configs:
- source: metrics_prometheus_v1.conf
target: /etc/prometheus/prometheus.yml
volumes:
- prometheus_data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.no-lockfile'
ports:
- 9090:9090
deploy:
mode: replicated
replicas: 1
restart_policy:
condition: any
placement:
constraints:
# Hostname of the node!
- node.role == worker
resources:
limits:
cpus: '2'
memory: 1G
logging:
driver: "json-file"
options:
max-size: "10M"
max-file: "1"
networks:
- monitoring
#####################################################
# Node-exporter
# For each node a separate service need to be added
#####################################################
docker-s1-exporter:
image: prom/node-exporter
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /:/rootfs:ro
command:
- '--path.procfs=/host/proc'
- '--path.sysfs=/host/sys'
- --collector.filesystem.ignored-mount-points
- "^/(sys|proc|dev|host|etc|rootfs/var/lib/docker/containers|rootfs/var/lib/docker/overlay2|rootfs/run/docker/netns|rootfs/var/lib/docker/aufs)($$|/)"
ports:
- 9101:9100
deploy:
mode: replicated
replicas: 1
restart_policy:
condition: any
placement:
constraints:
# Hostname of the node!
- node.hostname == Docker-s1
- node.platform.os == linux
resources:
limits:
cpus: '0.5'
memory: 128M
logging:
driver: "json-file"
options:
max-size: "10M"
max-file: "1"
networks:
- monitoring
docker-s2-exporter:
image: prom/node-exporter
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /:/rootfs:ro
command:
- '--path.procfs=/host/proc'
- '--path.sysfs=/host/sys'
- --collector.filesystem.ignored-mount-points
- "^/(sys|proc|dev|host|etc|rootfs/var/lib/docker/containers|rootfs/var/lib/docker/overlay2|rootfs/run/docker/netns|rootfs/var/lib/docker/aufs)($$|/)"
ports:
- 9102:9100
deploy:
mode: replicated
replicas: 1
restart_policy:
condition: any
placement:
constraints:
# Hostname of the node!
- node.hostname == Docker-s2
resources:
limits:
cpus: '0.5'
memory: 128M
logging:
driver: "json-file"
options:
max-size: "10M"
max-file: "1"
networks:
- monitoring
docker-s3-exporter:
image: prom/node-exporter
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /:/rootfs:ro
command:
- '--path.procfs=/host/proc'
- '--path.sysfs=/host/sys'
- --collector.filesystem.ignored-mount-points
- "^/(sys|proc|dev|host|etc|rootfs/var/lib/docker/containers|rootfs/var/lib/docker/overlay2|rootfs/run/docker/netns|rootfs/var/lib/docker/aufs)($$|/)"
ports:
- 9103:9100
deploy:
mode: replicated
replicas: 1
restart_policy:
condition: any
placement:
constraints:
# Hostname of the node!
- node.hostname == Docker-s3
resources:
limits:
cpus: '0.5'
memory: 128M
logging:
driver: "json-file"
options:
max-size: "10M"
max-file: "1"
networks:
- monitoring
docker-s4-exporter:
image: prom/node-exporter
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /:/rootfs:ro
command:
- '--path.procfs=/host/proc'
- '--path.sysfs=/host/sys'
- --collector.filesystem.ignored-mount-points
- "^/(sys|proc|dev|host|etc|rootfs/var/lib/docker/containers|rootfs/var/lib/docker/overlay2|rootfs/run/docker/netns|rootfs/var/lib/docker/aufs)($$|/)"
ports:
- 9104:9100
deploy:
mode: replicated
replicas: 1
restart_policy:
condition: any
placement:
constraints:
# Hostname of the node!
- node.hostname == Docker-s4
resources:
limits:
cpus: '0.5'
memory: 128M
logging:
driver: "json-file"
options:
max-size: "10M"
max-file: "1"
networks:
- monitoring
docker-s5-exporter:
image: prom/node-exporter
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /:/rootfs:ro
command:
- '--path.procfs=/host/proc'
- '--path.sysfs=/host/sys'
- --collector.filesystem.ignored-mount-points
- "^/(sys|proc|dev|host|etc|rootfs/var/lib/docker/containers|rootfs/var/lib/docker/overlay2|rootfs/run/docker/netns|rootfs/var/lib/docker/aufs)($$|/)"
ports:
- 9105:9100
deploy:
mode: replicated
replicas: 1
restart_policy:
condition: any
placement:
constraints:
# Hostname of the node!
- node.hostname == Docker-s5
resources:
limits:
cpus: '0.5'
memory: 128M
logging:
driver: "json-file"
options:
max-size: "10M"
max-file: "1"
networks:
- monitoring
docker-s6-exporter:
image: prom/node-exporter
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /:/rootfs:ro
command:
- '--path.procfs=/host/proc'
- '--path.sysfs=/host/sys'
- --collector.filesystem.ignored-mount-points
- "^/(sys|proc|dev|host|etc|rootfs/var/lib/docker/containers|rootfs/var/lib/docker/overlay2|rootfs/run/docker/netns|rootfs/var/lib/docker/aufs)($$|/)"
ports:
- 9106:9100
deploy:
mode: replicated
replicas: 1
restart_policy:
condition: any
placement:
constraints:
# Hostname of the node!
- node.hostname == Docker-s6
resources:
limits:
cpus: '0.5'
memory: 128M
logging:
driver: "json-file"
options:
max-size: "10M"
max-file: "1"
networks:
- monitoring
#####################################################
# The Grafana
#####################################################
grafana:
image: grafana/grafana:latest
# image: grafana/grafana:8.2.6
depends_on:
- prometheus
volumes:
- grafana_data:/var/lib/grafana
ports:
- 3000:3000
configs:
- source: metrics_grafana_ini_v1.conf
target: /etc/grafana/grafana.ini
deploy:
mode: replicated
replicas: 1
restart_policy:
condition: any
resources:
limits:
cpus: '2'
memory: 1G
logging:
driver: "json-file"
options:
max-size: "10M"
max-file: "1"
networks:
- monitoring
prometheus.yml
global:
- job_name: 'my-service-job'
dns_sd_configs:
- names: ['tasks.my-service-name']
scrape_interval: 15s
type: 'A'
port: 80
UPDATE:
I found out that I should use dns_sd_config inside prometheus.yml
like here:
But I get an error.
Upvotes: 2
Views: 1216
Reputation: 586
What you will probably want is an instance of Prometheus within the swarm to scrape the individual services (rather then the load balanced endpoints) and then federate that data out to the main Prometheus set up.
This blog post has a pretty good explanation on what to do.
Upvotes: 2