1
0

23 Commits

Author SHA1 Message Date
525847fdf5 fix(molgenis): Recreate pods upon upgrade
The default upgrade strategy would cause multiple instances of MOLGENIS to run on the same database.
Use Recreate strategy instead.

Fixes #75
2018-09-27 11:46:11 +02:00
5760171c4b Merge branch 'use-molgenis-prod-in-helm' of p281392/molgenis-ops-docker-helm into master 2018-09-26 18:05:06 +02:00
e192f5819a Merge branch 'add-skip-build-config' of p281392/molgenis-ops-docker-helm into master 2018-09-26 18:00:56 +02:00
aaad66b40f updated pvc creation of postgres 2018-09-26 17:39:47 +02:00
b201117f9a updated pvc initialization 2018-09-26 17:27:45 +02:00
74a87892fb bumped patch version 2018-09-26 17:18:33 +02:00
6f995f45bd updated postgres instances and firewall configuration 2018-09-26 16:52:06 +02:00
35c7fd79af Merge branch 'implement-nfs-provisioning-nexus' of p281392/molgenis-ops-docker-helm into master 2018-09-26 16:34:34 +02:00
039c9993f6 fix for postgres volume mount, not available when persistence is not enabled 2018-09-26 16:30:33 +02:00
d4d9d5931d added embedded containers 2018-09-26 16:09:20 +02:00
f10b8d7ea8 updated production chart and removed preview chart 2018-09-26 16:04:22 +02:00
95dc0acabd Merge branch 'add-opencpu-chart' of p281392/molgenis-ops-docker-helm into master 2018-09-26 10:34:48 +02:00
36e2c25f94 Merge branch 'feature/molgenis-it' of P129679/molgenis-ops-docker-helm into master 2018-09-26 10:30:58 +02:00
1ed41d6c36 updated chart to enable and disable ingress on demand 2018-09-25 23:32:25 +02:00
a74507cafb chore(molgenis-jenkins): Add molgenis-it pod template 2018-09-25 13:00:34 +02:00
4c2f9bc035 Merge branch 'feat/helm-in-jenkins' of P129679/molgenis-ops-docker-helm into master 2018-09-24 14:11:44 +02:00
63a08f2264 Merge branch 'doc/helm-role' of P129679/molgenis-ops-docker-helm into master 2018-09-24 13:09:54 +02:00
95d4a1e13e chore(molgenis-jenkins): Add helm container to molgenis pod template. 2018-09-24 11:55:53 +02:00
9dedfc1690 doc: How to configure helm role 2018-09-21 15:17:14 +02:00
bed36a7dd2 added opencpu stack 2018-09-20 20:42:35 +02:00
a4c4d19fe2 renamed service again 2018-09-20 16:54:34 +02:00
d0c9c91ff3 updated nexus to connect to nfs provisioning 2018-09-20 16:50:46 +02:00
7c9a7a143b add plugin for skipping build after release 2018-08-10 08:20:46 +02:00
51 changed files with 697 additions and 1643 deletions

View File

@ -104,6 +104,7 @@ This repository is serves also as a catalogue for Rancher. We have serveral apps
- [Jenkins](molgenis-jenkins/README.md)
- [NEXUS](molgenis-nexus/README.md)
- [HTTPD](molgenis-httpd/README.md)
- [MOLGENIS](molgenis/README.md)
- [MOLGENIS preview](molgenis-preview/README.md)
- [MOLGENIS vault](molgenis-vault/README.md)
@ -122,6 +123,26 @@ You can you need to know to easily develop and deploy helm-charts
Do it in the root of the project where the Chart.yaml is located
It installs a release of a kubernetes stack. You also store this as an artifact in a kubernetes repository
- ```helm package .```
You can create a package which can be uploaded in the molgenis helm repository
- ```helm publish```
You still have to create an ```index.yaml``` for the chart. You can do this by executing this command: ```helm repo index #directory name of helm chart#```
Then you can upload it by executing:
- ```curl -v --user #username#:#password# --upload-file index.yaml https://registry.molgenis.org/repository/helm/#chart name#/index.yml```
- ```curl -v --user #username#:#password# --upload-file #chart name#-#version#.tgz https://registry.molgenis.org/repository/helm/#chart name#/#chart name#-#version#.tgz```
Now you have to add the repository locally to use in your ```requirements.yaml```.
- ```helm repo add #repository name# https://registry.molgenis.org/repository/helm/molgenis```
- ```helm dep build```
You can build your dependencies (create a ```charts``` directory and install the chart in it) of the helm-chart.
- ```helm list```
Lists all installed releases

View File

@ -0,0 +1,6 @@
To be able to run helm inside a jenkins pod, you'll need to
* create a role in the namespace where tiller is installed
* bind that role to the user that jenkins pods run as
This directory contains yaml for these resources.
See also https://github.com/helm/helm/blob/master/docs/rbac.md

View File

@ -0,0 +1,13 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: tiller-jenkins-binding
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: tiller-user
subjects:
- kind: ServiceAccount
name: default
namespace: molgenis-jenkins

View File

@ -0,0 +1,18 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: tiller-user
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- pods/portforward
verbs:
- create
- apiGroups:
- ""
resources:
- pods
verbs:
- list

View File

@ -13,7 +13,7 @@ jenkins:
- blueocean:1.6.2
- github-oauth:0.29
- gogs-webhook:1.0.14
- sauce-ondemand:1.176
- github-scm-trait-commit-skip:0.1.1
Security:
UseGitHub: false
GitHub:
@ -82,6 +82,7 @@ jenkins:
<strategyId>1</strategyId>
<trust class="org.jenkinsci.plugins.github_branch_source.ForkPullRequestDiscoveryTrait$TrustPermission"/>
</org.jenkinsci.plugins.github__branch__source.ForkPullRequestDiscoveryTrait>
<org.jenkinsci.plugins.scm__filter.GitHubCommitSkipTrait plugin="github-scm-trait-commit-skip@0.1.1"/>
</traits>
</org.jenkinsci.plugins.github__branch__source.GitHubSCMNavigator>
</navigators>
@ -416,6 +417,12 @@ jenkins:
key: VAULT_ADDR
secretName: molgenis-pipeline-vault-secret
secretKey: addr
helm:
Image: "lachlanevenson/k8s-helm"
ImageTag: "v2.10.0"
Command: cat
WorkingDir: /home/jenkins
TTY: true
NodeSelector: {}
node:
Label: node-carbon
@ -447,6 +454,69 @@ jenkins:
secretName: molgenis-pipeline-vault-secret
secretKey: addr
NodeSelector: {}
molgenis-it:
InheritFrom: molgenis
Label: molgenis-it
NodeUsageMode: EXCLUSIVE
Containers:
elasticsearch:
Image: docker.elastic.co/elasticsearch/elasticsearch
ImageTag: 5.5.3
resources:
requests:
cpu: "100m"
memory: "1Gi"
limits:
cpu: "1"
memory: "1500Mi"
EnvVars:
- type: KeyValue
key: ES_JAVA_OPTS
value: "-Xms512m -Xmx512m"
- type: KeyValue
key: cluster.name
value: molgenis
- type: KeyValue
key: bootstrap.memory_lock
value: "true"
- type: KeyValue
key: xpack.security.enabled
value: "false"
- type: KeyValue
key: discovery.type
value: single-node
postgres:
Image: postgres
ImageTag: 9.6-alpine
resources:
requests:
cpu: "100m"
memory: "250Mi"
limits:
cpu: "1"
memory: "250Mi"
EnvVars:
- type: KeyValue
key: POSTGRES_USER
value: molgenis
- type: KeyValue
key: POSTGRES_PASSWORD
value: molgenis
- type: KeyValue
key: POSTGRES_DB
value: molgenis
opencpu:
Image: molgenis/opencpu
AlwaysPullImage: true
resources:
requests:
cpu: "100m"
memory: "256Mi"
limits:
cpu: "1"
memory: "512Mi"
NodeSelector: {}
#secret contains configuration for the kubernetes secrets that jenkins can access
secret:
# vault configures the vault secret

View File

@ -1,34 +0,0 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
creationTimestamp: null
name: {{ .Values.httpd.name }}
labels:
app: {{ .Values.httpd.name }}
environment: {{ .Values.environment }}
spec:
replicas: {{ .Values.replicaCount }}
strategy:
type: {{ .Values.httpd.strategy.type }}
selector:
matchLabels:
app: {{ .Values.httpd.selector }}
template:
metadata:
labels:
app: {{ .Values.httpd.name }}
creationTimestamp: null
spec:
restartPolicy: {{ .Values.httpd.restartPolicy }}
containers:
- name: {{ .Values.httpd.name }}
image: "{{ .Values.httpd.image.repository }}:{{ .Values.httpd.image.tag }}"
imagePullPolicy: {{ .Values.httpd.image.pullPolicy }}
env:
- name: PROXY_SERVICE
value: "{{ .Values.nexus.name }}:{{ .Values.nexus.port.ui }},{{ .Values.nexus.name }}:{{ .Values.nexus.port.docker }}:{{ .Values.nexus.path.dockerV2 }}"
- name: SERVER_NAME
value: {{ .Values.httpd.hostname }}
ports:
- containerPort: {{ .Values.httpd.port }}
resources: {}

View File

@ -19,26 +19,40 @@ spec:
app: {{ .Values.nexus.name }}
creationTimestamp: null
spec:
volumes:
- name: {{ .Values.persistence.name }}
persistentVolumeClaim:
claimName: {{ .Values.persistence.name }}
restartPolicy: {{ .Values.nexus.restartPolicy }}
initContainers:
- name: volume-mount-nexus
image: busybox
command: ["sh", "-c", "chown -R 200:200 {{ .Values.persistence.mountPath }}"]
volumeMounts:
- name: {{ .Values.persistence.name }}
mountPath: "{{ .Values.persistence.mountPath }}"
containers:
- name: {{ .Values.nexus.name }}
image: "{{ .Values.nexus.image.repository }}:{{ .Values.nexus.image.tag }}"
imagePullPolicy: {{ .Values.nexus.image.pullPolicy }}
ports:
- containerPort: {{ .Values.nexus.port.ui }}
- containerPort: {{ .Values.nexus.port.docker }}
volumeMounts:
- name: {{ .Values.persistence.name }}
mountPath: "/nexus-data"
- name: {{ .Values.nexus.name }}
image: "{{ .Values.nexus.image.repository }}:{{ .Values.nexus.image.tag }}"
imagePullPolicy: {{ .Values.nexus.image.pullPolicy }}
ports:
- containerPort: {{ .Values.nexus.port.ui }}
- containerPort: {{ .Values.nexus.port.docker }}
volumeMounts:
- name: molgenis-nexus-nfs
mountPath: "/nexus-data"
livenessProbe:
httpGet:
path: /
port: {{ .Values.nexus.port.ui }}
initialDelaySeconds: 90
periodSeconds: 20
failureThreshold: 5
successThreshold: 1
readinessProbe:
httpGet:
path: /
port: {{ .Values.nexus.port.ui }}
initialDelaySeconds: 90
periodSeconds: 5
failureThreshold: 5
successThreshold: 1
volumes:
- name: molgenis-nexus-nfs
persistentVolumeClaim:
claimName: {{ .Values.persistence.claim }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}

View File

@ -0,0 +1,55 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
creationTimestamp: null
name: {{ .Values.nexusProxy.name }}
labels:
app: {{ .Values.nexusProxy.name }}
environment: {{ .Values.environment }}
spec:
replicas: {{ .Values.replicaCount }}
strategy:
type: {{ .Values.nexusProxy.strategy.type }}
selector:
matchLabels:
app: {{ .Values.nexusProxy.selector }}
template:
metadata:
labels:
app: {{ .Values.nexusProxy.name }}
creationTimestamp: null
spec:
restartPolicy: {{ .Values.nexusProxy.restartPolicy }}
containers:
- name: {{ .Values.nexusProxy.name }}
image: "{{ .Values.nexusProxy.image.repository }}:{{ .Values.nexusProxy.image.tag }}"
imagePullPolicy: {{ .Values.nexusProxy.image.pullPolicy }}
env:
- name: PROXY_SERVICE
value: "{{ .Values.nexus.name }}:{{ .Values.nexus.port.ui }},{{ .Values.nexus.name }}:{{ .Values.nexus.port.docker }}:{{ .Values.nexus.path.dockerV2 }}"
- name: SERVER_NAME
value: {{ .Values.nexusProxy.hostname }}
ports:
- containerPort: {{ .Values.nexusProxy.port }}
resources: {}
livenessProbe:
httpGet:
path: /
port: {{ .Values.nexusProxy.port }}
initialDelaySeconds: 90
periodSeconds: 5
failureThreshold: 5
successThreshold: 1
readinessProbe:
httpGet:
path: /
port: {{ .Values.nexusProxy.port }}
initialDelaySeconds: 90
periodSeconds: 5
failureThreshold: 5
successThreshold: 1
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}

View File

@ -25,8 +25,8 @@ spec:
paths:
- path: {{ default "/" .path }}
backend:
serviceName: httpd
servicePort: 80
serviceName: {{ $.Values.nexusProxy.name }}
servicePort: {{ $.Values.nexusProxy.port }}
{{- if .tls }}
tls:
- hosts:

View File

@ -0,0 +1,15 @@
{{- if .Values.persistence.enabled -}}
apiVersion: extensions/v1beta1
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: {{ .Values.persistence.claim }}
annotations:
volume.beta.kubernetes.io/storage-class: "nfs-provisioner-retain"
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: {{ .Values.persistence.size }}
{{- end }}

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Values.httpd.name }}
labels:
app: {{ .Values.httpd.name }}
spec:
type: {{ .Values.httpd.service.type }}
ports:
- name: {{ .Values.httpd.name }}
port: {{ .Values.httpd.port }}
selector:
app: {{ .Values.httpd.selector }}

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Values.nexusProxy.name }}
labels:
app: {{ .Values.nexusProxy.name }}
spec:
type: {{ .Values.nexusProxy.service.type }}
ports:
- name: {{ .Values.nexusProxy.name }}
port: {{ .Values.nexusProxy.port }}
selector:
app: {{ .Values.nexusProxy.selector }}

View File

@ -1,16 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: {{ .Values.persistence.name }}
labels:
name: nfs2
spec:
storageClassName: {{ .Values.persistence.storageClass }}
capacity:
storage: {{ .Values.persistence.size }}
accessModes:
- {{ .Values.persistence.accessMode }}
persistentVolumeReclaimPolicy: {{ .Values.persistence.reclaimPolicy }}
nfs:
server: {{ .Values.persistence.server }}
path: {{ .Values.persistence.mountPath }}

View File

@ -1,11 +0,0 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: {{ .Values.persistence.name }}
spec:
storageClassName: {{ .Values.persistence.storageClass }}
accessModes:
- {{ .Values.persistence.accessMode }}
resources:
requests:
storage: {{ .Values.persistence.size }}

View File

@ -24,12 +24,12 @@ nexus:
service:
type: ClusterIP
httpd:
name: httpd
nexusProxy:
name: nexus-proxy
hostname: registry.molgenis.org
strategy:
type: Recreate
selector: httpd
selector: nexus-proxy
restartPolicy: Always
image:
repository: registry.webhosting.rug.nl/molgenis/httpd
@ -43,39 +43,22 @@ httpd:
ingress:
enabled: true
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
- name: registry.molgenis.org
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
persistence:
name: molgenis-nexus-data
storageClass: nfs-class
size: 30G
reclaimPolicy: Retain
server: 192.168.64.12
accessMode: ReadWriteMany
mountPath: /gcc/molgenis/nexus
enabled: true
claim: molgenis-nexus
size: 500Gi
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
nodeSelector: {
deployPod: "true"
}
tolerations: []

View File

@ -1,8 +1,8 @@
apiVersion: v1
appVersion: "1.0"
description: MOLGENIS - helm stack for testing purposes
name: molgenis-preview
version: 0.2.0
description: Opencpu stack for MOLGENIS
name: molgenis-opencpu
version: 0.1.1
sources:
- https://git.webhosting.rug.nl/molgenis/molgenis-ops-docker-helm.git
icon: https://git.webhosting.rug.nl/molgenis/molgenis-ops-docker-helm/raw/master/molgenis-preview/catalogIcon-molgenis.svg
icon: https://git.webhosting.rug.nl/molgenis/molgenis-ops-docker-helm/raw/master/molgenis-opencpu/catalogIcon-molgenis-opencpu.svg

View File

@ -0,0 +1,38 @@
# MOLGENIS - OpenCPU Helm Chart
NEXUS repository for kubernetes to deploy on a kubernetes cluster with NFS-share
## Containers
This chart will deploy the following containers:
- OpenCPU
- MOLGENIS-httpd (to proxy the registry and docker to one domain)
## Provisioning
You can choose for the OpenCPU image from which repository you want to pull. Experimental builds are pushed to registry.molgenis.org and the stable builds to hub.docker.com.
You need to fill out 2 properties to determine which repository you are going to use.
- ```opencpu.image.repository```
- ```opencpu.image.tag```
You can do this in the questions in Rancher or in the ```values.yaml```.
## Development
You can test in install the chart by executing:
```helm lint .```
To test if your helm chart-syntax is right and:
```helm install . --dry-run --debug```
To test if your hem chart works and:
```helm install .```
To deploy it on the cluster.

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 245 KiB

View File

@ -0,0 +1,28 @@
categories:
- MOLGENIS
questions:
- variable: ingress.enabled
label: Enable ingress
default: false
description: "Enable ingress"
type: boolean
required: true
group: "Load balancing"
- variable: opencpu.image.repository
label: Registry
default: "registry.hub.docker.com"
description: "Select a registry to pull from"
type: enum
options:
- "registry.hub.docker.com"
- "registry.molgenis.org"
required: true
group: "Provisioning"
- variable: opencpu.image.tag
label: Version
default: ""
description: "Select a OpenCPU version (check the registry.molgenis.org or hub.docker.com for released tags)"
type: string
required: true
group: "Provisioning"

View File

@ -2,7 +2,7 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "molgenis.name" -}}
{{- define "opencpu.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
@ -11,7 +11,7 @@ Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "molgenis.fullname" -}}
{{- define "opencpu.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
@ -27,6 +27,6 @@ If release name contains chart name it will be used as a full name.
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "molgenis.chart" -}}
{{- define "opencpu.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}

View File

@ -0,0 +1,35 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
{{- with .Values.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
name: {{ template "opencpu.fullname" . }}
labels:
app: {{ template "opencpu.name" . }}
chart: {{ template "opencpu.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: {{ template "opencpu.name" . }}
release: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ template "opencpu.name" . }}
release: {{ .Release.Name }}
spec:
containers:
{{- with .Values.opencpu }}
- name: {{ .name }}
image: "{{ .image.repository }}/{{ .image.name }}:{{ .image.tag }}"
imagePullPolicy: {{ .image.pullPolicy }}
ports:
- containerPort: {{ .service.port }}
{{- end }}

View File

@ -0,0 +1,36 @@
{{- if .Values.ingress.enabled }}
{{- range .Values.ingress.hosts }}
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: "{{ $.Release.Name }}-ingress"
labels:
app: {{ $.Values.opencpu.name }}
chart: "{{ $.Chart.Name }}-{{ $.Chart.Version }}"
release: "{{ $.Release.Name }}"
heritage: "{{ $.Release.Service }}"
annotations:
{{- if .tls }}
ingress.kubernetes.io/secure-backends: "true"
{{- end }}
{{- range $key, $value := .annotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
spec:
rules:
- host: {{ .name }}
http:
paths:
- path: {{ default "/" .path }}
backend:
serviceName: {{ $.Values.opencpu.service.name }}
servicePort: {{ $.Values.opencpu.service.port }}
{{- if .tls }}
tls:
- hosts:
- {{ .name }}
secretName: {{ .tlsSecret }}
{{- end }}
---
{{- end }}
{{- end }}

View File

@ -0,0 +1,20 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Values.opencpu.service.name }}
labels:
app: {{ .Values.opencpu.service.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
type: {{ .Values.opencpu.service.type }}
loadBalancerSourceRanges:
{{- range $index, $rule := .Values.opencpu.service.firewall }}
- {{ $rule }}
{{- end }}
ports:
- name: {{ .Values.opencpu.service.name }}
port: {{ .Values.opencpu.service.port }}
selector:
app: {{ template "opencpu.name" . }}
release: {{ .Release.Name }}

View File

@ -0,0 +1,41 @@
# Default values for nexus.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
environment: production
opencpu:
name: opencpu
strategy:
type: Recreate
restartPolicy: Always
image:
repository: registry.hub.docker.com
name: molgenis/opencpu
tag: stable
pullPolicy: Always
service:
name: opencpu
type: LoadBalancer
port: 8004
firewall:
- 145.100.224.1/24
ingress:
enabled: false
annotations: {
kubernetes.io/ingress.class: "nginx",
nginx.ingress.kubernetes.io/proxy-body-size: "0"
}
path: /
hosts:
- name: opencpu.molgenis.org
tls: []
nodeSelector: {}
tolerations: []
affinity: {}

View File

@ -1,16 +0,0 @@
# MOLGENIS preview
This chart is used for testing purposes. It can be used by data managers or developers to test MOLGENIS (e.g. integration testing).
## Containers
This chart spins up a complete stack to run MOLGENIS. The created containers are:
- MOLGENIS
- PostgreSQL
- Elasticsearch
- OpenCPU
## Rancher
You can spin up a test instance by navigating to https://rancher.molgenis.org:7777 and login with your LDAP-account.
Go to the test-environment and click on "Launch". Search for MOLGENIS.

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 77 KiB

View File

@ -1,61 +0,0 @@
categories:
- MOLGENIS
questions:
- variable: ingress.hosts[0].name
default: "test.molgenis.org"
description: "Hostname for your stack"
type: hostname
required: true
group: "Services and Load Balancing"
label: Hostname
- variable: molgenis.image.repository
default: "registry.hub.docker.com"
description: "Select a registry to pull from"
type: enum
options:
- "registry.hub.docker.com"
- "registry.molgenis.org"
required: true
group: "MOLGENIS - Version"
label: Registry
- variable: molgenis.image.tag
default: "stable"
description: "Select a MOLGENIS version (check the registry.molgenis.org or hub.docker.com for other tags)"
type: string
required: true
group: "MOLGENIS - Version"
label: Version
- variable: molgenis.resources.limits.cpu
default: 1
description: "CPU limit for this MOLGENIS instance"
type: enum
options:
- "1"
- "2"
- "3"
- "4"
required: true
group: "MOLGENIS - Resource limits"
label: CPU limit
- variable: molgenis.resources.limits.memory
default: 1250Mi
description: "Memory limit for this MOLGENIS instance"
type: enum
options:
- "1250Mi"
- "1500Mi"
- "2000Mi"
- "2500Mi"
required: true
group: "MOLGENIS - Resource limits"
label: Memory limit
- variable: molgenis.javaOpts
default: "-Xmx1g -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled"
description: "Java runtime options for the MOLGENIS instance"
type: enum
options:
- "-Xmx1g -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled"
- "-Xmx2g -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled"
group: "MOLGENIS - Resource limits"
label: Java memory options

View File

@ -1,19 +0,0 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range .Values.ingress.hosts }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "molgenis.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc -w {{ template "molgenis.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "molgenis.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "molgenis.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl port-forward $POD_NAME 8080:80
{{- end }}

View File

@ -1,124 +0,0 @@
apiVersion: apps/v1beta2
kind: Deployment
metadata:
{{- with .Values.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
name: {{ template "molgenis.fullname" . }}
labels:
app: {{ template "molgenis.name" . }}
chart: {{ template "molgenis.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: {{ template "molgenis.name" . }}
release: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ template "molgenis.name" . }}
release: {{ .Release.Name }}
spec:
containers:
- name: molgenis
{{- with .Values.molgenis }}
image: "{{ .image.repository }}/{{ .image.name }}:{{ .image.tag }}"
imagePullPolicy: {{ .image.pullPolicy }}
env:
- name: molgenis.home
value: /home/molgenis
- name: opencpu.uri.host
value: localhost
- name: elasticsearch.transport.addresses
value: localhost:9300
- name: elasticsearch.cluster.name
value: {{ $.Values.elasticsearch.clusterName }}
- name: db_uri
value: "jdbc:postgresql://localhost/{{ $.Values.postgres.db }}"
- name: db_user
value: {{ $.Values.postgres.user }}
- name: db_password
value: {{ $.Values.postgres.password }}
- name: admin.password
value: {{ .adminPassword }}
- name: CATALINA_OPTS
value: "{{ .javaOpts }}"
ports:
- containerPort: 8080
# livenessProbe:
# httpGet:
# path: /
# port: 8080
# readinessProbe:
# httpGet:
# path: /api/v2/version
# port: 8080
resources:
{{ toYaml .resources | indent 12 }}
{{- end }}
- name: elasticsearch
{{- with .Values.elasticsearch }}
image: "{{ .image.repository }}:{{ .image.tag }}"
imagePullPolicy: {{ .image.pullPolicy }}
env:
- name: cluster.name
value: {{ .clusterName }}
- name: bootstrap.memory_lock
value: "true"
- name: ES_JAVA_OPTS
value: "{{ .javaOpts }}"
- name: xpack.security.enabled
value: "false"
- name: discovery.type
value: single-node
ports:
- containerPort: 9200
- containerPort: 9300
resources:
{{ toYaml .resources | indent 12 }}
{{- end }}
- name: postgres
{{- with .Values.postgres }}
image: "{{ .image.repository }}:{{ .image.tag }}"
imagePullPolicy: {{ .image.pullPolicy }}
env:
- name: POSTGRES_USER
value: {{ .user }}
- name: POSTGRES_PASSWORD
value: {{ .password }}
- name: POSTGRES_DB
value: {{ .db }}
ports:
- containerPort: 5432
resources:
{{ toYaml .resources | indent 12 }}
{{- end }}
- name: opencpu
{{- with .Values.opencpu }}
image: "{{ .image.repository }}:{{ .image.tag }}"
imagePullPolicy: {{ .image.pullPolicy }}
ports:
- containerPort: 8004
resources:
{{ toYaml .resources | indent 12 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}

View File

@ -1,38 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "molgenis.fullname" . -}}
{{- $ingressPath := .Values.ingress.path -}}
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
app: {{ template "molgenis.name" . }}
chart: {{ template "molgenis.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- with .Values.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .name }}
http:
paths:
- path: {{ $ingressPath }}
backend:
serviceName: {{ $fullName }}
servicePort: 8080
{{- end }}
{{- end }}

View File

@ -1,17 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "molgenis.fullname" . }}
labels:
app: {{ template "molgenis.name" . }}
chart: {{ template "molgenis.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
type: {{ .Values.service.type }}
ports:
- name: molgenis
port: {{ .Values.service.port }}
selector:
app: {{ template "molgenis.name" . }}
release: {{ .Release.Name }}

View File

@ -1,82 +0,0 @@
# Default values for molgenis.
replicaCount: 1
service:
type: LoadBalancer
port: 8080
ingress:
enabled: true
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: "0"
path: /
hosts:
- name: test.molgenis.org
tls: []
molgenis:
image:
repository: registry.molgenis.org
name: molgenis/molgenis-app
tag: 7.0.0-SNAPSHOT
pullPolicy: Always
adminPassword: admin
javaOpts: "-Xmx1g -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled"
resources:
limits:
cpu: 1
memory: 1250Mi
requests:
cpu: 200m
memory: 1Gi
postgres:
image:
repository: postgres
tag: 9.6-alpine
pullPolicy: IfNotPresent
user: molgenis
password: molgenis
db: molgenis
resources:
limits:
cpu: 1
memory: 250Mi
requests:
cpu: 100m
memory: 250Mi
elasticsearch:
image:
repository: docker.elastic.co/elasticsearch/elasticsearch
tag: 5.5.3
pullPolicy: IfNotPresent
javaOpts: "-Xms512m -Xmx512m"
clusterName: molgenis
resources:
limits:
cpu: 1
memory: 1500Mi
requests:
cpu: 100m
memory: 1Gi
opencpu:
image:
repository: molgenis/opencpu
tag: latest
pullPolicy: Always
resources:
limits:
cpu: 1
memory: 512Mi
requests:
cpu: 100m
memory: 256Mi
nodeSelector: {}
tolerations: []
affinity: {}

View File

@ -2,5 +2,5 @@ apiVersion: v1
appVersion: "1.0"
description: MOLGENIS vault
name: molgenis-vault
version: 0.2.1
version: 0.1.1
icon: https://git.webhosting.rug.nl/molgenis/molgenis-ops-docker-helm/raw/master/molgenis-vault/catalogIcon-molgenis-vault.svg

View File

@ -13,25 +13,21 @@ See https://github.com/coreos/vault-operator/blob/master/doc/user/vault.md
## Parameters
### Azure cloud credentials
Define credentials for an S3 compatible backup bucket.
See [etcd-operator documentation](https://github.com/coreos/etcd-operator/blob/master/doc/user/walkthrough/backup-operator.md).
> Default values backup to the minio play server.
You can host the stable/minio chart to backup to a bucket on the cluster.
Define credentials for backup to the Azure Blob Store.
See [etcd-operator documentation](https://github.com/coreos/etcd-operator/blob/master/doc/user/abs_backup.md).
| Parameter | Description | Default |
| -------------------- | ---------------------------------------- | ------------------------------------------ |
| `s3.accessKeyId` | key id storage account | `Q3AM3UQ867SPQQA43P2F` |
| `s3.secretAccessKey` | secret access key of storage account | `zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG` |
| `s3.region` | region of the storage server | `us-east-1` |
| `s3.endpoint` | endpoint for the storage server | `https://play.minio.io:9000` |
| `s3.bucket` | name of the bucket on the storage server | `vault` |
| Parameter | Description | Default |
| --------------- | ----------------------------- | ------------------ |
| `abs.account` | name of storage account | `fdlkops` |
| `abs.accessKey` | access key of storage account | `xxxx` |
| `abs.cloud` | name of cloud environment | `AzurePublicCloud` |
### Backup job
Define the schedule of the backup job
| Parameter | Description | Default |
| -------------------- | ---------------------------- | ------------- |
| `backupJob.suspend` | Suspend backup cronjob | `false` |
| `backupJob.enable` | Enable backup cronjob | `true` |
| `backupJob.schedule` | cron schedule for the backup | `0 12 * * 1` |
### UI

View File

@ -1,13 +0,0 @@
apiVersion: "etcd.database.coreos.com/v1beta2"
kind: "EtcdBackup"
metadata:
name: vault-backup
namespace: "vault-operator"
spec:
etcdEndpoints: ["https://vault-etcd-client:2379"]
storageType: S3
clientTLSSecret: vault-etcd-client-tls
s3:
path: vault/backup-manual
awsSecret: aws
endpoint: http://minio.minio.svc:9000

View File

@ -9,8 +9,7 @@ spec:
etcdCluster:
# The namespace is the same as this EtcdRestore CR
name: vault-etcd
backupStorageType: S3
s3:
path: vault/backup-<name>
awsSecret: aws
endpoint: http://minio.minio.svc:9000
backupStorageType: ABS
abs:
path: vault/backup-<specify the backup name>
absSecret: abs

View File

@ -3,15 +3,11 @@ Vault operator created
Next steps:
* Manually create a vault using resources/vault.yaml
* Manually restore a backup using resources/restore.yaml
* Manually restore a backup using resources/backup.yaml
* Unseal the vault pods
{{ if .Values.backupJob.suspend }}
!!!!!! BACKUP JOB SUSPENDED !!!!!!
{{ else }}
{{- if .Values.s3.endpoint -}}
Backing up to non-standard s3 endpoint {{ .Values.s3.endpoint }} {{ else -}}
Backing up to S3 on aws {{ end -}}
in bucket {{ .Values.s3.bucket }}.
{{ if .Values.backupJob.enable }}
!! Make sure to check if the backups succeed !!
{{ else }}
!!!!!! NO BACKUPS CONFIGURED !!!!!!
{{ end }}

View File

@ -0,0 +1,10 @@
# Secret to access microsoft azure blob store
apiVersion: v1
kind: Secret
metadata:
name: abs
type: Opaque
stringData:
storage-account: {{ .Values.abs.account }}
storage-key: {{ .Values.abs.accessKey }}
cloud: {{ .Values.abs.cloud }}

View File

@ -1,10 +0,0 @@
# Secret to access s3 compatible store
apiVersion: v1
kind: Secret
metadata:
name: aws
type: Opaque
data:
config: {{ printf "[default]\nregion = %s" .Values.s3.region | b64enc | quote }}
credentials: {{ printf "[default]\naws_access_key_id = %s\naws_secret_access_key = %s\n" .Values.s3.accessKeyId .Values.s3.secretAccessKey | b64enc | quote }}

View File

@ -11,14 +11,8 @@ data:
generateName: vault-backup-
spec:
etcdEndpoints: ["https://vault-etcd-client:2379"]
storageType: S3
storageType: ABS
clientTLSSecret: vault-etcd-client-tls
s3:
path: {{ .Values.s3.bucket }}/backup.<NOW>
awsSecret: aws
{{- if .Values.s3.endpoint }}
endpoint: {{ .Values.s3.endpoint }}
{{- end }}
{{- if hasKey .Values.s3 "forcePathStyle" }}
forcePathStyle: {{ .Values.s3.forcePathStyle }}
{{- end }}
abs:
path: vault/backup.<NOW>
absSecret: abs

View File

@ -1,10 +1,10 @@
{{- if .Values.backupJob.enable }}
# cronjob that creates etcdbackups using the etcd backup serviceaccount
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: etcd-backup
spec:
suspend: {{ .Values.backupJob.suspend }}
schedule: {{ .Values.backupJob.schedule | quote }}
jobTemplate:
spec:
@ -26,4 +26,5 @@ spec:
volumes:
- name: backup-config
configMap:
name: backup-config
name: backup-config
{{- end }}

View File

@ -2,26 +2,19 @@
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# s3 configures s3 backup storage
s3:
# accessKey for the s3 storage account
accessKeyId: Q3AM3UQ867SPQQA43P2F
# secretAccessKey for the s3 storage account
secretAccessKey: zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG
# region
region: us-east-1
# endpoint for the s3 storage
endpoint: https://play.minio.io:9000
# forcePathStyle if set to true forces requests to use path style
# (host/bucket instead of bucket.host)
forcePathStyle: true
# bucket is the name of the bucket
bucket: vault
# abs gives details of the credentials to reach the azure backup storage
abs:
# account is the name of the Storage account
account: fdlkops
# access key for the Storage account
accessKey: xxxx
# default cloud
cloud: AzurePublicCloud
# backupjob describes the backup cronjob
backupJob:
# suspend suspends the backup job
suspend: false
# enable enables the backup job
enable: true
# schedule gives the cron schedule for the backup job
schedule: "0 12 * * 1"
@ -47,12 +40,10 @@ etcd-operator:
tag: v0.9.2
backupOperator:
image:
repository: fdlk/etcd-operator
tag: latest
tag: v0.9.2
restoreOperator:
image:
repository: fdlk/etcd-operator
tag: latest
tag: v0.9.2
ui:
name: "vault-ui"

View File

@ -1,8 +1,8 @@
apiVersion: v1
appVersion: "1.0"
description: MOLGENIS - helm stack (in BETA)
name: molgenis-beta
version: 0.3.0
name: molgenis
version: 0.4.3
sources:
- https://git.webhosting.rug.nl/molgenis/molgenis-ops-docker-helm.git
icon: https://git.webhosting.rug.nl/molgenis/molgenis-ops-docker-helm/raw/master/molgenis/catalogIcon-molgenis.svg

View File

@ -5,6 +5,8 @@ This chart is used for acceptance and production use cases.
This chart spins up a MOLGENIS instance with HTTPD. The created containers are:
- MOLGENIS
- ElasticSearch
- PostgreSQL **(optional)**
## Provisioning
You can choose from which registry you want to pull. There are 2 registries:
@ -21,6 +23,19 @@ The three properties you need to specify are:
Besides determining which image you want to pull, you also have to set an administrator password. You can do this by specifying the following property.
- ```molgenis.adminPassword```
### Firewall
Is defined at service level you can specify this attribute in the values:
- ```molgenis.firewall.enabled``` default 'false'
If set to 'true' the following options are available. One of the options below has to be set.
- ```molgenis.firewall.umcg.enabled``` default 'false'
- ```molgenis.firewall.cluster.enabled``` default 'false'
UMCG = only available within the UMCG.
Cluster = only available within the GCC cluster environment.
## Services
When you start MOLGENIS you need:
- an elasticsearch instance (5.5.6)
@ -82,15 +97,16 @@ Select the resources you need dependant on the customer you need to serve.
## Persistence
You can enable persistence on your MOLGENIS stack by specifying the following property.
- ```persistence.enabled```
- ```persistence.enabled``` default 'true'
You can also choose to retain the volume of the NFS.
- ```persistence.retain```
- ```persistence.retain``` default 'false'
The size and claim name can be specified per service. There are now two services that can be persist.
- MOLGENIS
- ElasticSearch
- PostgreSQL **(optional)**
MOLGENIS persistent properties.
- ```molgenis.persistence.claim```
@ -100,6 +116,9 @@ ElasticSearch persistent properties.
- ```elasticsearch.persistence.claim```
- ```elasticsearch.persistence.size```
PostgreSQL persistent properties.
- ```postgres.persistence.claim```
- ```postgres.persistence.size```
### Resolve you persistent volume
You do not know which volume is attached to your MOLGENIS instance. You can resolve this by executing:
@ -116,7 +135,4 @@ You can now view the persistent volume claims and the attached volumes.
| pvc-3984723d-220f-14e8-a98a-skjhf88823kk | 30G | RWO | | Delete | Bound | molgenis-test/molgenis-nfs-claim | nfs-provisioner | | | 33d |
You see the ```molgenis-test/molgenis-nfs-claim``` is bound to the volume: ```pvc-3984723d-220f-14e8-a98a-skjhf88823kk```.
When you want to view the data in the this volume you can go to the nfs-provisioning pod and execute the shell. Go to the directory ```export``` and lookup the directory ```pvc-3984723d-220f-14e8-a98a-skjhf88823kk```.
## Firewall
Is defined at cluster level. This chart does not facilitate firewall configuration.
When you want to view the data in the this volume you can go to the nfs-provisioning pod and execute the shell. Go to the directory ```export``` and lookup the directory ```pvc-3984723d-220f-14e8-a98a-skjhf88823kk```.

View File

@ -8,7 +8,7 @@ questions:
description: "Hostname for your stack"
type: hostname
required: true
group: "Load Balancing"
group: "Load balancing"
- variable: molgenis.image.repository
label: Registry
default: "registry.hub.docker.com"
@ -33,6 +33,24 @@ questions:
type: password
required: true
group: "Provisioning"
- variable: service.firewall.enabled
label: Firewall enabled
default: false
description: "Firewall enabled (can be cluster or UMCG scoped)"
type: boolean
required: true
group: "Provisioning"
show_subquestion_if: true
subquestions:
- variable: service.firewall.kind
default: "umcg"
description: "Firewall kind. This can be 'umcg' or 'cluster' environment"
type: enum
required: true
options:
- umcg
- cluster
label: Firewall kind
- variable: molgenis.services.opencpu.host
label: OpenCPU cluster
default: "localhost"
@ -40,34 +58,43 @@ questions:
type: string
required: true
group: "Services"
- variable: molgenis.services.postgres.host
label: Postgres cluster location
default: "postgresql.molgenis-postgresql.svc"
description: "Set the location of the postgres cluster"
type: string
required: true
group: "Services"
- variable: molgenis.services.postgres.scheme
label: Database scheme
default: "molgenis"
description: "Set the database scheme"
type: string
required: true
group: "Services"
- variable: molgenis.services.postgres.user
label: Database username
default: "molgenis"
description: "Set user of the database scheme"
type: string
required: true
group: "Services"
- variable: molgenis.services.postgres.password
label: Database password
default: "molgenis"
description: "Set the password of the database scheme"
type: string
- variable: molgenis.services.postgres.embedded
label: Postgres embedded
default: false
description: "Do you want an embedded postgres"
type: boolean
required: true
group: "Services"
show_subquestion_if: false
subquestions:
- variable: molgenis.services.postgres.host
label: Postgres cluster location
default: ""
description: "Set the location of the postgres cluster. This can be localhost when the postgres is enabled else you need to specify a cluster location if you do not want a embedded postgres instance)"
type: string
required: true
group: "Services"
- variable: molgenis.services.postgres.scheme
label: Database scheme
default: "molgenis"
description: "Set the database scheme"
type: string
required: true
group: "Services"
- variable: molgenis.services.postgres.user
label: Database username
default: "molgenis"
description: "Set user of the database scheme"
type: string
required: true
group: "Services"
- variable: molgenis.services.postgres.password
label: Database password
default: "molgenis"
description: "Set the password of the database scheme"
type: string
required: true
group: "Services"
- variable: molgenis.resources.limits.memory
label: Container memory limit
default: 1250Mi
@ -98,7 +125,7 @@ questions:
- "2g"
group: "Resources"
- variable: persistence.enabled
default: false
default: true
description: "Do you want to use persistence"
type: boolean
required: true
@ -112,20 +139,29 @@ questions:
type: boolean
label: Retain volume
- variable: molgenis.persistence.size
default: "30Gi"
default: "5Gi"
description: "Size of MOLGENIS filestore (PostgreSQL and ElasticSearch excluded)"
type: enum
options:
- "30Gi"
- "50Gi"
- "100Gi"
- "5Gi"
- "10Gi"
- "20Gi"
label: Size MOLGENIS filestore
- variable: elasticsearch.persistence.size
default: "50Gi"
default: "5Gi"
description: "Size of ElasticSearch data (directory that is persist: /usr/share/elasticsearch/data)"
type: enum
options:
- "5Gi"
- "10Gi"
- "50Gi"
- "100Gi"
- "200Gi"
label: Size for ElasticSearch data
label: Size for ElasticSearch data
- variable: postgres.persistence.size
default: "5Gi"
description: "Size of PostgreSQL data (directory that is persist: /var/lib/postgresql/data/pgdata)"
type: enum
options:
- "5Gi"
- "10Gi"
- "50Gi"
label: Size for PostgreSQL data

View File

@ -17,6 +17,8 @@ spec:
matchLabels:
app: {{ template "molgenis.name" . }}
release: {{ .Release.Name }}
strategy:
type: Recreate
template:
metadata:
labels:
@ -97,11 +99,33 @@ spec:
- name: elasticsearch-nfs
mountPath: /usr/share/elasticsearch/data
{{- end }}
resources:
{{ toYaml .resources | indent 12 }}
{{- end }}
- name: postgres
{{- with .Values.postgres }}
image: "{{ .image.repository }}:{{ .image.tag }}"
imagePullPolicy: {{ .image.pullPolicy }}
env:
- name: POSTGRES_USER
value: {{ $.Values.molgenis.services.postgres.user }}
- name: POSTGRES_PASSWORD
value: {{ $.Values.molgenis.services.postgres.password }}
- name: POSTGRES_DB
value: {{ $.Values.molgenis.services.postgres.scheme }}
ports:
- containerPort: 5432
resources:
{{ toYaml .resources | indent 12 }}
{{- if $.Values.persistence.enabled }}
volumeMounts:
- name: postgres-nfs
mountPath: /var/lib/postgresql/data
{{- end }}
{{- end }}
{{- if .Values.persistence.enabled }}
volumes:
- name: molgenis-nfs
@ -110,6 +134,9 @@ spec:
- name: elasticsearch-nfs
persistentVolumeClaim:
claimName: {{ .Values.elasticsearch.persistence.claim }}
- name: postgres-nfs
persistentVolumeClaim:
claimName: {{ .Values.postgres.persistence.claim }}
{{- end }}
{{- with .Values.nodeSelector }}

View File

@ -4,7 +4,7 @@
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: {{ $fullName }}
name: "{{ $.Release.Name }}-ingress"
labels:
app: {{ template "molgenis.name" . }}
chart: {{ template "molgenis.chart" . }}
@ -33,6 +33,6 @@ spec:
- path: {{ $ingressPath }}
backend:
serviceName: {{ $fullName }}
servicePort: 8080
servicePort: {{ $.Values.service.port }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,21 @@
{{- if .Values.molgenis.services.postgres.embedded }}
{{- if .Values.persistence.enabled }}
apiVersion: extensions/v1beta1
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: {{ .Values.postgres.persistence.claim }}
annotations:
{{- if .Values.persistence.retain }}
volume.beta.kubernetes.io/storage-class: "nfs-provisioner-retain"
{{- else }}
volume.beta.kubernetes.io/storage-class: "nfs-provisioner"
{{- end }}
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: {{ .Values.postgres.persistence.size }}
{{- end }}
{{- end }}

View File

@ -9,6 +9,18 @@ metadata:
heritage: {{ .Release.Service }}
spec:
type: {{ .Values.service.type }}
{{- if .Values.service.firewall.enabled }}
loadBalancerSourceRanges:
{{- if .Values.service.firewall.kind eq "umcg" }}
{{- range $index, $rule := .Values.service.firewall.umcg.rules }}
- {{ $rule }}
{{- end }}
{{- else }}
{{- range $index, $rule := .Values.service.firewall.cluster.rules }}
- {{ $rule }}
{{- end }}
{{- end }}
{{- end }}
ports:
- name: molgenis
port: {{ .Values.service.port }}

View File

@ -4,6 +4,15 @@ replicaCount: 1
service:
type: LoadBalancer
firewall:
enabled: false
kind: "umcg"
umcg:
rules:
- 127.0.0.1/32
cluster:
rules:
- 127.0.0.1/32
port: 8080
ingress:
@ -33,7 +42,7 @@ molgenis:
memory: 1250Mi
persistence:
claim: molgenis-nfs-claim
size: 30Gi
size: 5Gi
services:
opencpu:
host: localhost
@ -41,6 +50,7 @@ molgenis:
transportAddresses: localhost:9300
clusterName: molgenis
postgres:
embedded: false
host: localhost
scheme: molgenis
user: molgenis
@ -62,10 +72,26 @@ elasticsearch:
memory: 1Gi
persistence:
claim: elasticsearch-nfs-claim
size: 50Gi
size: 5Gi
postgres:
image:
repository: postgres
tag: 9.6-alpine
pullPolicy: IfNotPresent
resources:
limits:
cpu: 1
memory: 250Mi
requests:
cpu: 100m
memory: 250Mi
persistence:
claim: postgres-nfs-claim
size: 5Gi
persistence:
enabled: false
enabled: true
retain: false
nodeSelector: {