From 349f525426b4e92a483d23ef8c68724ffe81d07a Mon Sep 17 00:00:00 2001 From: Brian Christner Date: Thu, 14 Jul 2016 08:44:16 +0200 Subject: [PATCH] 1 - remove prom directory 2 - updated docker-compose prometheus section as the config.js was being mounted as a directory and not a file in 1.12 3 - slight format chnages in docker-compose --- prom/Dockerfile | 14 -------------- prom/prometheus.yml | 30 ------------------------------ 2 files changed, 44 deletions(-) delete mode 100644 prom/Dockerfile delete mode 100644 prom/prometheus.yml diff --git a/prom/Dockerfile b/prom/Dockerfile deleted file mode 100644 index 549a383..0000000 --- a/prom/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM prom/prometheus:0.16.1 - -ADD prometheus.yml /etc/prometheus/prometheus.yml - -RUN mkdir -p /etc/prometheus/targets.d - -EXPOSE 9090 - -ENTRYPOINT [ "/bin/prometheus" ] -CMD [ "-config.file=/etc/prometheus/prometheus.yml", \ - "-storage.local.path=/prometheus", \ - "-web.console.libraries=/etc/prometheus/console_libraries", \ - "-web.console.templates=/etc/prometheus/consoles", \ - "-web.listen-address=:9090" ] diff --git a/prom/prometheus.yml b/prom/prometheus.yml deleted file mode 100644 index f46071b..0000000 --- a/prom/prometheus.yml +++ /dev/null @@ -1,30 +0,0 @@ -# my global config -global: - scrape_interval: 15s # By default, scrape targets every 15 seconds. - evaluation_interval: 15s # By default, scrape targets every 15 seconds. - # scrape_timeout is set to the global default (10s). - - # Attach these labels to any time series or alerts when communicating with - # external systems (federation, remote storage, Alertmanager). - external_labels: - monitor: 'my-project' - -# Load and evaluate rules in this file every 'evaluation_interval' seconds. -rule_files: - # - "first.rules" - # - "second.rules" - -# A scrape configuration containing exactly one endpoint to scrape: -scrape_configs: - # The job name is added as a label `job=` to any timeseries scraped from this config. - - job_name: 'prometheus' - - # Override the global default and scrape targets from this job every 5 seconds. - scrape_interval: 5s - scrape_timeout: 10s - - # metrics_path defaults to '/metrics' - # scheme defaults to 'http'. - - target_groups: - - targets: ['localhost:9090','cadvisor:8080','node-exporter:9100']