1 创建工作目录
$ mkdir /data/prometheus && cd /data/prometheus
$ mkdir conf rules
2 创建 prometheus.yml 配置文件
$ vim conf/prometheus.yml
# my global config
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'prometheus'
scrape_interval: 60s
static_configs:
- targets:
- 'localhost:29090'
labels:
hostname: Prometheus-Server
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ["localhost:29090"]
3 创建Prometheus的Docker-compose文件
$ vim docker-compose.yml
version: '3'
services:
prometheus:
image: bitnami/prometheus:2.41.0
restart: always
container_name: prometheus
environment:
TZ: Asia/Shanghai
ports:
- 29090:29090
volumes:
- ./conf/prometheus.yml:/usr/local/prometheus/prometheus.yml
- ./rules:/usr/local/prometheus/rules
command: /usr/local/prometheus/prometheus --config.file=/usr/local/prometheus/prometheus.yml --web.config.file=/usr/local/prometheus/auth/config.yaml --web.listen-address=:29090 --web.max-connections=1024 --query.max-concurrency=100 --storage.tsdb.path=/usr/local/prometheus/data --storage.tsdb.retention=24h --web.enable-lifecycle --storage.tsdb.no-lockfile
networks:
- prometheus
networks:
prometheus:
driver: bridge
4 创建prometheus容器
$ docker-compose up -d