update usage

This commit is contained in:
RvDijk 2017-12-18 14:40:21 +01:00
parent 2d0685c0d7
commit a8faca765d
24 changed files with 539 additions and 0 deletions

2
grafana_s/run Executable file
View File

@ -0,0 +1,2 @@
singularity shell docker://grafana/grafana
# grafana-server -homepath /usr/share/grafana/

Binary file not shown.

Binary file not shown.

135
influx_s/influxdb.conf Normal file
View File

@ -0,0 +1,135 @@
reporting-disabled = false
bind-address = "127.0.0.1:8088"
[meta]
dir = "/var/lib/influxdb/meta"
retention-autocreate = true
default-retention-policy-name = "default"
logging-enabled = true
[data]
dir = "/var/lib/influxdb/data"
index-version = "inmem"
wal-dir = "/var/lib/influxdb/wal"
wal-fsync-delay = "0s"
query-log-enabled = true
cache-max-memory-size = 1073741824
cache-snapshot-memory-size = 26214400
cache-snapshot-write-cold-duration = "10m0s"
compact-full-write-cold-duration = "4h0m0s"
max-series-per-database = 1000000
max-values-per-tag = 100000
max-concurrent-compactions = 0
trace-logging-enabled = false
[coordinator]
write-timeout = "10s"
max-concurrent-queries = 0
query-timeout = "0s"
log-queries-after = "0s"
max-select-point = 0
max-select-series = 0
max-select-buckets = 0
[retention]
enabled = true
check-interval = "30m0s"
[shard-precreation]
enabled = true
check-interval = "10m0s"
advance-period = "30m0s"
[monitor]
store-enabled = true
store-database = "_internal"
store-interval = "10s"
[subscriber]
enabled = true
http-timeout = "30s"
insecure-skip-verify = false
ca-certs = ""
write-concurrency = 40
write-buffer-size = 1000
[http]
enabled = true
bind-address = ":8086"
auth-enabled = false
log-enabled = true
write-tracing = false
pprof-enabled = true
https-enabled = false
https-certificate = "/etc/ssl/influxdb.pem"
https-private-key = ""
max-row-limit = 0
max-connection-limit = 0
shared-secret = ""
realm = "InfluxDB"
unix-socket-enabled = false
bind-socket = "/var/run/influxdb.sock"
max-body-size = 25000000
[ifql]
enabled = false
log-enabled = true
bind-address = ":8082"
[[graphite]]
enabled = false
bind-address = ":2003"
database = "graphite"
retention-policy = ""
protocol = "tcp"
batch-size = 5000
batch-pending = 10
batch-timeout = "1s"
consistency-level = "one"
separator = "."
udp-read-buffer = 0
[[collectd]]
enabled = false
bind-address = ":25826"
database = "collectd"
retention-policy = ""
batch-size = 5000
batch-pending = 10
batch-timeout = "10s"
read-buffer = 0
typesdb = "/usr/share/collectd/types.db"
security-level = "none"
auth-file = "/etc/collectd/auth_file"
parse-multivalue-plugin = "split"
[[opentsdb]]
enabled = false
bind-address = ":4242"
database = "opentsdb"
retention-policy = ""
consistency-level = "one"
tls-enabled = false
certificate = "/etc/ssl/influxdb.pem"
batch-size = 1000
batch-pending = 5
batch-timeout = "1s"
log-point-errors = true
[[udp]]
enabled = false
bind-address = ":8089"
database = "udp"
retention-policy = ""
batch-size = 5000
batch-pending = 10
read-buffer = 0
batch-timeout = "1s"
precision = ""
[continuous_queries]
log-enabled = true
enabled = true
query-stats-enabled = false
run-interval = "1s"

13
influx_s/run Executable file
View File

@ -0,0 +1,13 @@
singularity shell --bind ~/influx_db:/var/lib/ --bind ~/cpu-profiling/influx_s/influxdb.conf:/etc/influxdb/influxdb.conf docker://influxdb
# create database
#curl -POST http://pg-node045:8086/query?pretty=true --data-urlencode "q=CREATE DATABASE slurm WITH NAME \"default\""
# QUERY TESTS:
#curl -G 'http://pg-node045:8086/query?pretty=true' --data-urlencode "db=slurm" --data-urlencode "q=SELECT * FROM cpu"
# inside container
# influxd -config /etc/influxdb/influxdb.conf

9
test_batch/batch.sh Normal file
View File

@ -0,0 +1,9 @@
#!/bin/bash
#SBATCH --time=00:10:00
#SBATCH --nodes=1
#SBATCH --ntasks=10
#SBATCH --job-name=python_cpu
#SBATCH --reservation=ood
#SBATCH --mem=20000
module load Python/3.5.1-foss-2016a
python python_cpu.py

20
test_batch/python_cpu.py Normal file
View File

@ -0,0 +1,20 @@
#!/usr/bin/env python
import multiprocessing
import os # For reading the amount of CPUs requested.
import time # For clocking the calculation.
def double(data):
return data ** 2
if __name__ == '__main__':
begin = time.time()
inputs = list(range(10000)) # Makes an array from 0 to 10
poolSize = int(os.environ['SLURM_JOB_CPUS_PER_NODE']) # Amount of CPUs requested.
pool = multiprocessing.Pool(processes=poolSize,)
poolResults = pool.map(double, inputs) # Do the calculation.
pool.close() # Stop pool accordingly.
pool.join() # Wrap up data from the workers in the pool.
print ('Pool output:', poolResults) # Results.
elapsedTime = time.time() - begin
print ('Time elapsed for ' , poolSize, ' workers: ', elapsedTime, ' seconds')

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long