# my global config global: scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. # scrape_timeout is set to the global default (10s). # Alertmanager configuration alerting: alertmanagers: - static_configs: - targets: # - alertmanager:9093 # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. rule_files: # - "first_rules.yml" # - "second_rules.yml" # A scrape configuration containing exactly one endpoint to scrape: # Here it's Prometheus itself. scrape_configs: - job_name: 'consul-services' consul_sd_configs: - server: 'http://{{consul_server}}:8500' # Consul 地址 services: [] # 空 = 抓取所有服务 tags: [] # 空 = 抓取所有标签 relabel_configs: # ① 只抓取含「exporter」标签的服务 - source_labels: [__meta_consul_tags] regex: '.*exporter.*' action: keep # ② 标签重命名(美观) - source_labels: [__meta_consul_service] target_label: job - source_labels: [__meta_consul_node] target_label: instance - source_labels: [__meta_consul_tags] target_label: tags