1
0

5 Commits

14 changed files with 152 additions and 82 deletions

View File

@ -0,0 +1,6 @@
To be able to run helm inside a jenkins pod, you'll need to
* create a role in the namespace where tiller is installed
* bind that role to the user that jenkins pods run as
This directory contains yaml for these resources.
See also https://github.com/helm/helm/blob/master/docs/rbac.md

View File

@ -0,0 +1,13 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: tiller-jenkins-binding
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: tiller-user
subjects:
- kind: ServiceAccount
name: default
namespace: molgenis-jenkins

View File

@ -0,0 +1,18 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: tiller-user
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- pods/portforward
verbs:
- create
- apiGroups:
- ""
resources:
- pods
verbs:
- list

View File

@ -416,6 +416,12 @@ jenkins:
key: VAULT_ADDR
secretName: molgenis-pipeline-vault-secret
secretKey: addr
helm:
Image: "lachlanevenson/k8s-helm"
ImageTag: "v2.10.0"
Command: cat
WorkingDir: /home/jenkins
TTY: true
NodeSelector: {}
node:
Label: node-carbon
@ -447,6 +453,69 @@ jenkins:
secretName: molgenis-pipeline-vault-secret
secretKey: addr
NodeSelector: {}
molgenis-it:
InheritFrom: molgenis
Label: molgenis-it
NodeUsageMode: EXCLUSIVE
Containers:
elasticsearch:
Image: docker.elastic.co/elasticsearch/elasticsearch
ImageTag: 5.5.3
resources:
requests:
cpu: "100m"
memory: "1Gi"
limits:
cpu: "1"
memory: "1500Mi"
EnvVars:
- type: KeyValue
key: ES_JAVA_OPTS
value: "-Xms512m -Xmx512m"
- type: KeyValue
key: cluster.name
value: molgenis
- type: KeyValue
key: bootstrap.memory_lock
value: "true"
- type: KeyValue
key: xpack.security.enabled
value: "false"
- type: KeyValue
key: discovery.type
value: single-node
postgres:
Image: postgres
ImageTag: 9.6-alpine
resources:
requests:
cpu: "100m"
memory: "250Mi"
limits:
cpu: "1"
memory: "250Mi"
EnvVars:
- type: KeyValue
key: POSTGRES_USER
value: molgenis
- type: KeyValue
key: POSTGRES_PASSWORD
value: molgenis
- type: KeyValue
key: POSTGRES_DB
value: molgenis
opencpu:
Image: molgenis/opencpu
AlwaysPullImage: true
resources:
requests:
cpu: "100m"
memory: "256Mi"
limits:
cpu: "1"
memory: "512Mi"
NodeSelector: {}
#secret contains configuration for the kubernetes secrets that jenkins can access
secret:
# vault configures the vault secret

View File

@ -2,5 +2,5 @@ apiVersion: v1
appVersion: "1.0"
description: MOLGENIS vault
name: molgenis-vault
version: 0.2.1
version: 0.1.1
icon: https://git.webhosting.rug.nl/molgenis/molgenis-ops-docker-helm/raw/master/molgenis-vault/catalogIcon-molgenis-vault.svg

View File

@ -13,25 +13,21 @@ See https://github.com/coreos/vault-operator/blob/master/doc/user/vault.md
## Parameters
### Azure cloud credentials
Define credentials for an S3 compatible backup bucket.
See [etcd-operator documentation](https://github.com/coreos/etcd-operator/blob/master/doc/user/walkthrough/backup-operator.md).
> Default values backup to the minio play server.
You can host the stable/minio chart to backup to a bucket on the cluster.
Define credentials for backup to the Azure Blob Store.
See [etcd-operator documentation](https://github.com/coreos/etcd-operator/blob/master/doc/user/abs_backup.md).
| Parameter | Description | Default |
| -------------------- | ---------------------------------------- | ------------------------------------------ |
| `s3.accessKeyId` | key id storage account | `Q3AM3UQ867SPQQA43P2F` |
| `s3.secretAccessKey` | secret access key of storage account | `zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG` |
| `s3.region` | region of the storage server | `us-east-1` |
| `s3.endpoint` | endpoint for the storage server | `https://play.minio.io:9000` |
| `s3.bucket` | name of the bucket on the storage server | `vault` |
| --------------- | ----------------------------- | ------------------ |
| `abs.account` | name of storage account | `fdlkops` |
| `abs.accessKey` | access key of storage account | `xxxx` |
| `abs.cloud` | name of cloud environment | `AzurePublicCloud` |
### Backup job
Define the schedule of the backup job
| Parameter | Description | Default |
| -------------------- | ---------------------------- | ------------- |
| `backupJob.suspend` | Suspend backup cronjob | `false` |
| `backupJob.enable` | Enable backup cronjob | `true` |
| `backupJob.schedule` | cron schedule for the backup | `0 12 * * 1` |
### UI

View File

@ -1,13 +0,0 @@
apiVersion: "etcd.database.coreos.com/v1beta2"
kind: "EtcdBackup"
metadata:
name: vault-backup
namespace: "vault-operator"
spec:
etcdEndpoints: ["https://vault-etcd-client:2379"]
storageType: S3
clientTLSSecret: vault-etcd-client-tls
s3:
path: vault/backup-manual
awsSecret: aws
endpoint: http://minio.minio.svc:9000

View File

@ -9,8 +9,7 @@ spec:
etcdCluster:
# The namespace is the same as this EtcdRestore CR
name: vault-etcd
backupStorageType: S3
s3:
path: vault/backup-<name>
awsSecret: aws
endpoint: http://minio.minio.svc:9000
backupStorageType: ABS
abs:
path: vault/backup-<specify the backup name>
absSecret: abs

View File

@ -3,15 +3,11 @@ Vault operator created
Next steps:
* Manually create a vault using resources/vault.yaml
* Manually restore a backup using resources/restore.yaml
* Manually restore a backup using resources/backup.yaml
* Unseal the vault pods
{{ if .Values.backupJob.suspend }}
!!!!!! BACKUP JOB SUSPENDED !!!!!!
{{ else }}
{{- if .Values.s3.endpoint -}}
Backing up to non-standard s3 endpoint {{ .Values.s3.endpoint }} {{ else -}}
Backing up to S3 on aws {{ end -}}
in bucket {{ .Values.s3.bucket }}.
{{ if .Values.backupJob.enable }}
!! Make sure to check if the backups succeed !!
{{ else }}
!!!!!! NO BACKUPS CONFIGURED !!!!!!
{{ end }}

View File

@ -0,0 +1,10 @@
# Secret to access microsoft azure blob store
apiVersion: v1
kind: Secret
metadata:
name: abs
type: Opaque
stringData:
storage-account: {{ .Values.abs.account }}
storage-key: {{ .Values.abs.accessKey }}
cloud: {{ .Values.abs.cloud }}

View File

@ -1,10 +0,0 @@
# Secret to access s3 compatible store
apiVersion: v1
kind: Secret
metadata:
name: aws
type: Opaque
data:
config: {{ printf "[default]\nregion = %s" .Values.s3.region | b64enc | quote }}
credentials: {{ printf "[default]\naws_access_key_id = %s\naws_secret_access_key = %s\n" .Values.s3.accessKeyId .Values.s3.secretAccessKey | b64enc | quote }}

View File

@ -11,14 +11,8 @@ data:
generateName: vault-backup-
spec:
etcdEndpoints: ["https://vault-etcd-client:2379"]
storageType: S3
storageType: ABS
clientTLSSecret: vault-etcd-client-tls
s3:
path: {{ .Values.s3.bucket }}/backup.<NOW>
awsSecret: aws
{{- if .Values.s3.endpoint }}
endpoint: {{ .Values.s3.endpoint }}
{{- end }}
{{- if hasKey .Values.s3 "forcePathStyle" }}
forcePathStyle: {{ .Values.s3.forcePathStyle }}
{{- end }}
abs:
path: vault/backup.<NOW>
absSecret: abs

View File

@ -1,10 +1,10 @@
{{- if .Values.backupJob.enable }}
# cronjob that creates etcdbackups using the etcd backup serviceaccount
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: etcd-backup
spec:
suspend: {{ .Values.backupJob.suspend }}
schedule: {{ .Values.backupJob.schedule | quote }}
jobTemplate:
spec:
@ -27,3 +27,4 @@ spec:
- name: backup-config
configMap:
name: backup-config
{{- end }}

View File

@ -2,26 +2,19 @@
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# s3 configures s3 backup storage
s3:
# accessKey for the s3 storage account
accessKeyId: Q3AM3UQ867SPQQA43P2F
# secretAccessKey for the s3 storage account
secretAccessKey: zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG
# region
region: us-east-1
# endpoint for the s3 storage
endpoint: https://play.minio.io:9000
# forcePathStyle if set to true forces requests to use path style
# (host/bucket instead of bucket.host)
forcePathStyle: true
# bucket is the name of the bucket
bucket: vault
# abs gives details of the credentials to reach the azure backup storage
abs:
# account is the name of the Storage account
account: fdlkops
# access key for the Storage account
accessKey: xxxx
# default cloud
cloud: AzurePublicCloud
# backupjob describes the backup cronjob
backupJob:
# suspend suspends the backup job
suspend: false
# enable enables the backup job
enable: true
# schedule gives the cron schedule for the backup job
schedule: "0 12 * * 1"
@ -47,12 +40,10 @@ etcd-operator:
tag: v0.9.2
backupOperator:
image:
repository: fdlk/etcd-operator
tag: latest
tag: v0.9.2
restoreOperator:
image:
repository: fdlk/etcd-operator
tag: latest
tag: v0.9.2
ui:
name: "vault-ui"