kubernetes ala Ger

This commit is contained in:
G.J.C. Strikwerda 2018-10-26 10:41:46 +02:00
parent d7e02e1689
commit f1ba34f49b
8 changed files with 324 additions and 0 deletions

51
kube-cluster/README.md Normal file
View File

@ -0,0 +1,51 @@
kubernetes ala Ger:
1 Vagrantfile for provisioning 3 clean CentOS7 virtualbox vm's:
Vagrantfile:
- master.ger.test (master-node)
- worker1.ger.test (worker-node)
- worker2.ger.test (worker-node)
kube-depencies.yml: installing kubernetes depencies on all the nodes:
master.yml : containing the setup of the kubernetes-cluster on the master:
worker.yml : containing setup of workers/joining the cluster
- /etc/hosts: host-file
hosts: ansible hosts info
Use:
provision nodes:
$ vagrant --instance=kubernetes
install depencies:
$ ansible-playbook -i hosts ./kube-dependencies.yml
install master-node:
$ ansible-playbook -i hosts ./master.yml
install worker-nodes:
$ ansible-playbook -i hosts ./workers.yml
klaar:
$ ssh ger@master
[ger@master ~]$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
master.ger.test Ready master 2d v1.12.1
worker1.ger.test Ready worker 47h v1.12.1
worker2.ger.test Ready worker 47h v1.12.1
possilbe extras:
/etc/sysconfig/kubelet: KUBELET_EXTRA_ARGS=--runtime-cgroups=/systemd/system.slice --kubelet-cgroups=/systemd/system.slice
label node: kubectl label node worker1.ger.test node-role.kubernetes.io/worker=worker

93
kube-cluster/Vagrantfile vendored Normal file
View File

@ -0,0 +1,93 @@
# coding: utf-8
# -*- mode: ruby -*-
# vi: set ft=ruby :
# GS: run script after install:
$post_script = <<SCRIPT
yum install epel-release -y
yum install htop -y
yum install net-tools -y
useradd ger
mkdir /home/ger/.ssh
chown ger:ger /home/ger/.ssh/
chmod 700 /home/ger/.ssh/
cat /tmp/ger.pubkey >> /home/ger/.ssh/authorized_keys
mkdir /root/.ssh
chown root:root /root/.ssh/
chmod 700 /root/.ssh/
cat /tmp/ger.pubkey >> /root/.ssh/authorized_keys
cp /tmp/hosts /etc/hosts
cp /tmp/fire_stop.sh /root/fire_stop.sh
SCRIPT
# Retrieve instance from command line.
require 'getoptlong'
opts = GetoptLong.new(
[ '--instance', GetoptLong::OPTIONAL_ARGUMENT ]
)
instance='combined'
opts.each do |opt, arg|
case opt
when '--instance'
instance=arg
end
end
# Configuration variables.
VAGRANTFILE_API_VERSION = "2"
BOX = 'centos/7'
GUI = false
CPU = 1
RAM = 1024
DOMAIN = ".ger.test"
NETWORK = "192.168.50."
NETMASK = "255.255.255.0"
if instance == "kubernetes" then
HOSTS = {
"master" => [NETWORK+"21", CPU, RAM, GUI, BOX],
"worker1" => [NETWORK+"22", CPU, RAM, GUI, BOX],
"worker2" => [NETWORK+"23", CPU, RAM, GUI, BOX],
}
end
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
#config.ssh.insert_key ='true'
config.vm.provision "file", source: "/home/ger/.ssh/id_ecdsa.pub", destination: "/tmp/ger.pubkey"
config.vm.provision "file", source: "/etc/hosts", destination: "/tmp/hosts"
config.vm.provision "file", source: "/home/ger/fire_stop.sh", destination: "/tmp/fire_stop.sh"
HOSTS.each do | (name, cfg) |
ipaddr, cpu, ram, gui, box = cfg
config.vm.define name do |machine|
machine.vm.box = box
machine.vm.provider "virtualbox" do |vbox|
vbox.gui = gui
vbox.cpus = cpu
vbox.memory = ram
vbox.name = name
vbox.customize ["guestproperty", "set", :id, "/VirtualBox/GuestAdd/VBoxService/--timesync-set-threshold", 10000]
end
machine.vm.hostname = name + DOMAIN
machine.vm.network 'private_network', ip: ipaddr, netmask: NETMASK
machine.vm.synced_folder ".", "/vagrant", disabled: true
machine.vm.provision "shell",
inline: "sudo timedatectl set-timezone Europe/Amsterdam"
machine.vm.provision "shell",
inline: "cat /tmp/ger.pubkey >> /home/vagrant/.ssh/authorized_keys"
machine.vm.provision "shell",
inline: $post_script
end
end
end

20
kube-cluster/etc.hosts Normal file
View File

@ -0,0 +1,20 @@
127.0.0.1 localhost
127.0.1.1 ger-lpt-werk
# The following lines are desirable for IPv6 capable hosts
::1 ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
192.168.50.11 portal.ger.test portal
192.168.50.12 icat.ger.test icat
192.168.50.13 resc1.ger.test resc1
192.168.50.14 resc2.ger.test resc2
192.168.50.15 test01
192.168.50.21 master.ger.test master
192.168.50.22 worker1.ger.test worker1
192.168.50.23 worker2.ger.test worker2

15
kube-cluster/fire_stop.sh Executable file
View File

@ -0,0 +1,15 @@
#!/bin/bash
iptables -F
iptables -X
iptables -Z
iptables -N LOGDROP
iptables -A LOGDROP -j LOG
iptables -A LOGDROP -j DROP
iptables -P FORWARD ACCEPT
iptables -P OUTPUT ACCEPT
iptables -P INPUT ACCEPT
iptables --list | grep policy

6
kube-cluster/hosts Normal file
View File

@ -0,0 +1,6 @@
[masters]
master ansible_host=master ansible_user=root
[workers]
worker1 ansible_host=worker1 ansible_user=root
worker2 ansible_host=worker2 ansible_user=root

View File

@ -0,0 +1,86 @@
- hosts: all
become: yes
tasks:
- name: remove swap from /etc/fstab
mount:
name: swap
fstype: swap
state: absent
- name: disable swap
command: swapoff -a
when: ansible_swaptotal_mb > 0
- name: install Docker
yum:
name: docker
state: present
update_cache: true
- name: start Docker
service:
name: docker
state: started
- name: enable Docker
service:
name: docker
state: started
enabled: yes
- name: disable firewalld
service:
name: firewalld
enabled: no
- name: disable SELinux
command: setenforce 0
- name: disable SELinux on reboot
selinux:
state: disabled
- name: ensure net.bridge.bridge-nf-call-ip6tables is set to 1
sysctl:
name: net.bridge.bridge-nf-call-ip6tables
value: 1
state: present
- name: ensure net.bridge.bridge-nf-call-iptables is set to 1
sysctl:
name: net.bridge.bridge-nf-call-iptables
value: 1
state: present
- name: add Kubernetes' YUM repository
yum_repository:
name: Kubernetes
description: Kubernetes YUM repository
baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
gpgcheck: yes
- name: install kubelet
yum:
name: kubelet
state: present
update_cache: true
- name: install kubeadm
yum:
name: kubeadm
state: present
- name: start kubelet
service:
name: kubelet
enabled: yes
state: started
- hosts: master
become: yes
tasks:
- name: install kubectl
yum:
name: kubectl
state: present

31
kube-cluster/master.yml Normal file
View File

@ -0,0 +1,31 @@
- hosts: master
become: yes
tasks:
- name: initialize the cluster
shell: kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.50.21 >> cluster_initialized.txt
args:
chdir: $HOME
creates: cluster_initialized.txt
- name: create .kube directory
become: yes
become_user: ger
file:
path: $HOME/.kube
state: directory
mode: 0755
- name: copy admin.conf to user's kube config
copy:
src: /etc/kubernetes/admin.conf
dest: /home/ger/.kube/config
remote_src: yes
owner: ger
- name: install Pod network
become: yes
become_user: ger
shell: kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/bc79dd1505b0c8681ece4de4c0d86c5cd2643275/Documentation/kube-flannel.yml >> pod_network_setup.txt
args:
chdir: $HOME
creates: pod_network_setup.txt

22
kube-cluster/workers.yml Normal file
View File

@ -0,0 +1,22 @@
- hosts: master
become: yes
gather_facts: false
tasks:
- name: get join command
shell: kubeadm token create --print-join-command
register: join_command_raw
- name: set join command
set_fact:
join_command: "{{ join_command_raw.stdout_lines[0] }}"
- hosts: workers
become: yes
tasks:
- name: join cluster
shell: "{{ hostvars['master'].join_command }} >> node_joined.txt"
args:
chdir: $HOME
creates: node_joined.txt