diff --git a/.gitignore b/.gitignore
index 66c9b48678b..3f7924496fb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,7 @@
.vagrant
*.retry
-inventory/vagrant_ansible_inventory
+**/vagrant_ansible_inventory
+inventory/credentials/
inventory/group_vars/fake_hosts.yml
inventory/host_vars/
temp
@@ -11,9 +12,9 @@ temp
*.tfstate
*.tfstate.backup
contrib/terraform/aws/credentials.tfvars
-**/*.sw[pon]
/ssh-bastion.conf
**/*.sw[pon]
+*~
vagrant/
# Byte-compiled / optimized / DLL files
@@ -23,7 +24,7 @@ __pycache__/
# Distribution / packaging
.Python
-artifacts/
+inventory/*/artifacts/
env/
build/
credentials/
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index c21bb0c434b..d70a479f8dc 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -20,6 +20,7 @@ variables:
GCE_PREEMPTIBLE: "false"
ANSIBLE_KEEP_REMOTE_FILES: "1"
ANSIBLE_CONFIG: ./tests/ansible.cfg
+ ANSIBLE_INVENTORY: ./inventory/sample/${CI_JOB_NAME}-${BUILD_NUMBER}.ini
IDEMPOT_CHECK: "false"
RESET_CHECK: "false"
UPGRADE_TEST: "false"
@@ -90,9 +91,9 @@ before_script:
- cd tests && make create-${CI_PLATFORM} -s ; cd -
# Check out latest tag if testing upgrade
- # Uncomment when gitlab kargo repo has tags
+ # Uncomment when gitlab kubespray repo has tags
#- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
- - test "${UPGRADE_TEST}" != "false" && git checkout ba0a03a8ba2d97a73d06242ec4bb3c7e2012e58c
+ - test "${UPGRADE_TEST}" != "false" && git checkout 8b3ce6e418ccf48171eb5b3888ee1af84f8d71ba
# Checkout the CI vars file so it is available
- test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml
# Workaround https://github.com/kubernetes-incubator/kubespray/issues/2021
@@ -102,14 +103,13 @@ before_script:
# Create cluster
- >
ansible-playbook
- -i inventory/sample/hosts.ini
+ -i ${ANSIBLE_INVENTORY}
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
${SSH_ARGS}
${LOG_LEVEL}
-e @${CI_TEST_VARS}
- -e ansible_python_interpreter=${PYPATH}
-e ansible_ssh_user=${SSH_USER}
-e local_release_dir=${PWD}/downloads
--limit "all:!fake_hosts"
@@ -122,14 +122,13 @@ before_script:
test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml";
git checkout "${CI_BUILD_REF}";
ansible-playbook
- -i inventory/sample/hosts.ini
+ -i ${ANSIBLE_INVENTORY}
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
${SSH_ARGS}
${LOG_LEVEL}
-e @${CI_TEST_VARS}
- -e ansible_python_interpreter=${PYPATH}
-e ansible_ssh_user=${SSH_USER}
-e local_release_dir=${PWD}/downloads
--limit "all:!fake_hosts"
@@ -139,20 +138,20 @@ before_script:
# Tests Cases
## Test Master API
- >
- ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
+ ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
## Ping the between 2 pod
- - ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL
+ - ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL
## Advanced DNS checks
- - ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL
+ - ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL
## Idempotency checks 1/5 (repeat deployment)
- >
if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook
- -i inventory/sample/hosts.ini
+ -i ${ANSIBLE_INVENTORY}
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
@@ -169,7 +168,7 @@ before_script:
- >
if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook
- -i inventory/sample/hosts.ini
+ -i ${ANSIBLE_INVENTORY}
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
@@ -184,7 +183,7 @@ before_script:
- >
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
ansible-playbook
- -i inventory/sample/hosts.ini
+ -i ${ANSIBLE_INVENTORY}
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
@@ -201,7 +200,7 @@ before_script:
- >
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
ansible-playbook
- -i inventory/sample/hosts.ini
+ -i ${ANSIBLE_INVENTORY}
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
@@ -217,7 +216,7 @@ before_script:
## Idempotency checks 5/5 (Advanced DNS checks)
- >
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
- ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH}
+ ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH}
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
--limit "all:!fake_hosts"
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
@@ -241,6 +240,10 @@ before_script:
# stage: deploy-part1
MOVED_TO_GROUP_VARS: "true"
+.ubuntu18_flannel_aio_variables: &ubuntu18_flannel_aio_variables
+# stage: deploy-part1
+ MOVED_TO_GROUP_VARS: "true"
+
.ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables
# stage: deploy-part1
UPGRADE_TEST: "graceful"
@@ -257,6 +260,10 @@ before_script:
# stage: deploy-special
MOVED_TO_GROUP_VARS: "true"
+.coreos_cilium_variables: &coreos_cilium_variables
+# stage: deploy-special
+ MOVED_TO_GROUP_VARS: "true"
+
.ubuntu_cilium_sep_variables: &ubuntu_cilium_sep_variables
# stage: deploy-special
MOVED_TO_GROUP_VARS: "true"
@@ -301,10 +308,18 @@ before_script:
# stage: deploy-part1
MOVED_TO_GROUP_VARS: "true"
+.coreos_vault_upgrade_variables: &coreos_vault_upgrade_variables
+# stage: deploy-part1
+ UPGRADE_TEST: "basic"
+
.ubuntu_flannel_variables: &ubuntu_flannel_variables
# stage: deploy-special
MOVED_TO_GROUP_VARS: "true"
+.opensuse_canal_variables: &opensuse_canal_variables
+# stage: deploy-part2
+ MOVED_TO_GROUP_VARS: "true"
+
# Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
### PR JOBS PART1
@@ -320,6 +335,18 @@ gce_coreos-calico-aio:
only: [/^pr-.*$/]
### PR JOBS PART2
+
+gce_ubuntu18-flannel-aio:
+ stage: deploy-part2
+ <<: *job
+ <<: *gce
+ variables:
+ <<: *ubuntu18_flannel_aio_variables
+ <<: *gce_variables
+ when: manual
+ except: ['triggers']
+ only: [/^pr-.*$/]
+
gce_centos7-flannel-addons:
stage: deploy-part2
<<: *job
@@ -331,6 +358,17 @@ gce_centos7-flannel-addons:
except: ['triggers']
only: [/^pr-.*$/]
+gce_centos-weave-kubeadm:
+ stage: deploy-part2
+ <<: *job
+ <<: *gce
+ variables:
+ <<: *gce_variables
+ <<: *centos_weave_kubeadm_variables
+ when: on_success
+ except: ['triggers']
+ only: [/^pr-.*$/]
+
gce_ubuntu-weave-sep:
stage: deploy-part2
<<: *job
@@ -427,17 +465,6 @@ gce_ubuntu-canal-kubeadm-triggers:
when: on_success
only: ['triggers']
-gce_centos-weave-kubeadm:
- stage: deploy-part2
- <<: *job
- <<: *gce
- variables:
- <<: *gce_variables
- <<: *centos_weave_kubeadm_variables
- when: manual
- except: ['triggers']
- only: ['master', /^pr-.*$/]
-
gce_centos-weave-kubeadm-triggers:
stage: deploy-part2
<<: *job
@@ -459,6 +486,17 @@ gce_ubuntu-contiv-sep:
except: ['triggers']
only: ['master', /^pr-.*$/]
+gce_coreos-cilium:
+ stage: deploy-special
+ <<: *job
+ <<: *gce
+ variables:
+ <<: *gce_variables
+ <<: *coreos_cilium_variables
+ when: manual
+ except: ['triggers']
+ only: ['master', /^pr-.*$/]
+
gce_ubuntu-cilium-sep:
stage: deploy-special
<<: *job
@@ -542,7 +580,7 @@ gce_rhel7-canal-sep:
<<: *rhel7_canal_sep_variables
when: manual
except: ['triggers']
- only: ['master', /^pr-.*$/,]
+ only: ['master', /^pr-.*$/]
gce_rhel7-canal-sep-triggers:
stage: deploy-part2
@@ -575,6 +613,17 @@ gce_centos7-calico-ha-triggers:
when: on_success
only: ['triggers']
+gce_opensuse-canal:
+ stage: deploy-part2
+ <<: *job
+ <<: *gce
+ variables:
+ <<: *gce_variables
+ <<: *opensuse_canal_variables
+ when: manual
+ except: ['triggers']
+ only: ['master', /^pr-.*$/]
+
# no triggers yet https://github.com/kubernetes-incubator/kargo/issues/613
gce_coreos-alpha-weave-ha:
stage: deploy-special
@@ -609,6 +658,17 @@ gce_ubuntu-vault-sep:
except: ['triggers']
only: ['master', /^pr-.*$/]
+gce_coreos-vault-upgrade:
+ stage: deploy-part2
+ <<: *job
+ <<: *gce
+ variables:
+ <<: *gce_variables
+ <<: *coreos_vault_upgrade_variables
+ when: manual
+ except: ['triggers']
+ only: ['master', /^pr-.*$/]
+
gce_ubuntu-flannel-sep:
stage: deploy-special
<<: *job
diff --git a/OWNERS b/OWNERS
index 6ecbee5c928..1883a49304e 100644
--- a/OWNERS
+++ b/OWNERS
@@ -1,9 +1,7 @@
# See the OWNERS file documentation:
-# https://github.com/kubernetes/kubernetes/blob/master/docs/devel/owners.md
+# https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md
-owners:
- - Smana
- - ant31
- - bogdando
- - mattymo
- - rsmitty
+approvers:
+ - kubespray-approvers
+reviewers:
+ - kubespray-reviewers
diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES
new file mode 100644
index 00000000000..a3aa995a7c8
--- /dev/null
+++ b/OWNERS_ALIASES
@@ -0,0 +1,18 @@
+aliases:
+ kubespray-approvers:
+ - ant31
+ - mattymo
+ - atoms
+ - chadswen
+ - rsmitty
+ - bogdando
+ - bradbeam
+ - woopstar
+ - riverzhang
+ - holser
+ - smana
+ kubespray-reviewers:
+ - jjungnickel
+ - archifleks
+ - chapsuk
+ - mirwan
diff --git a/README.md b/README.md
index c9ff3f2c9e4..59686019fde 100644
--- a/README.md
+++ b/README.md
@@ -1,14 +1,15 @@
-![Kubernetes Logo](https://s28.postimg.org/lf3q4ocpp/k8s.png)
+![Kubernetes Logo](https://raw.githubusercontent.com/kubernetes-incubator/kubespray/master/docs/img/kubernetes-logo.png)
Deploy a Production Ready Kubernetes Cluster
============================================
If you have questions, join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
+You can get your invite [here](http://slack.k8s.io/)
-- Can be deployed on **AWS, GCE, Azure, OpenStack or Baremetal**
-- **High available** cluster
+- Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere, Oracle Cloud Infrastructure (Experimental), or Baremetal**
+- **Highly available** cluster
- **Composable** (Choice of the network plugin for instance)
-- Support most popular **Linux distributions**
+- Supports most popular **Linux distributions**
- **Continuous integration tests**
Quick Start
@@ -18,8 +19,11 @@ To deploy the cluster you can use :
### Ansible
+ # Install dependencies from ``requirements.txt``
+ sudo pip install -r requirements.txt
+
# Copy ``inventory/sample`` as ``inventory/mycluster``
- cp -rfp inventory/sample inventory/mycluster
+ cp -rfp inventory/sample/* inventory/mycluster
# Update Ansible inventory file with inventory builder
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
@@ -32,9 +36,27 @@ To deploy the cluster you can use :
# Deploy Kubespray with Ansible Playbook
ansible-playbook -i inventory/mycluster/hosts.ini cluster.yml
+Note: When Ansible is already installed via system packages on the control machine, other python packages installed via `sudo pip install -r requirements.txt` will go to a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on Ubuntu).
+As a consequence, `ansible-playbook` command will fail with:
+```
+ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path.
+```
+probably pointing on a task depending on a module present in requirements.txt (i.e. "unseal vault").
+
+One way of solving this would be to uninstall the Ansible package and then, to install it via pip but it is not always possible.
+A workaround consists of setting `ANSIBLE_LIBRARY` and `ANSIBLE_MODULE_UTILS` environment variables respectively to the `ansible/modules` and `ansible/module_utils` subdirectories of pip packages installation location, which can be found in the Location field of the output of `pip show [package]` before executing `ansible-playbook`.
+
### Vagrant
- # Simply running `vagrant up` (for tests purposes)
+For Vagrant we need to install python dependencies for provisioning tasks.
+Check if Python and pip are installed:
+
+ python -V && pip -V
+
+If this returns the version of the software, you're good to go. If not, download and install Python from here
+Install the necessary requirements
+
+ sudo pip install -r requirements.txt
vagrant up
Documents
@@ -52,6 +74,7 @@ Documents
- [Vagrant install](docs/vagrant.md)
- [CoreOS bootstrap](docs/coreos.md)
- [Debian Jessie setup](docs/debian.md)
+- [openSUSE setup](docs/opensuse.md)
- [Downloaded artifacts](docs/downloads.md)
- [Cloud providers](docs/cloud.md)
- [OpenStack](docs/openstack.md)
@@ -66,28 +89,36 @@ Supported Linux Distributions
-----------------------------
- **Container Linux by CoreOS**
-- **Debian** Jessie
-- **Ubuntu** 16.04
+- **Debian** Jessie, Stretch, Wheezy
+- **Ubuntu** 16.04, 18.04
- **CentOS/RHEL** 7
- **Fedora/CentOS** Atomic
+- **openSUSE** Leap 42.3/Tumbleweed
Note: Upstart/SysV init based OS types are not supported.
-Versions of supported components
---------------------------------
-
-- [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.9.3
-- [etcd](https://github.com/coreos/etcd/releases) v3.2.4
-- [flanneld](https://github.com/coreos/flannel/releases) v0.9.1
-- [calico](https://docs.projectcalico.org/v2.5/releases/) v2.5.0
-- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
-- [cilium](https://github.com/cilium/cilium) v1.0.0-rc4
-- [contiv](https://github.com/contiv/install/releases) v1.0.3
-- [weave](http://weave.works/) v2.0.1
-- [docker](https://www.docker.com/) v17.03 (see note)
-- [rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)
-
-Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
+Supported Components
+--------------------
+
+- Core
+ - [kubernetes](https://github.com/kubernetes/kubernetes) v1.11.3
+ - [etcd](https://github.com/coreos/etcd) v3.2.18
+ - [docker](https://www.docker.com/) v17.03 (see note)
+ - [rkt](https://github.com/rkt/rkt) v1.21.0 (see Note 2)
+- Network Plugin
+ - [calico](https://github.com/projectcalico/calico) v3.1.3
+ - [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
+ - [cilium](https://github.com/cilium/cilium) v1.2.0
+ - [contiv](https://github.com/contiv/install) v1.1.7
+ - [flanneld](https://github.com/coreos/flannel) v0.10.0
+ - [weave](https://github.com/weaveworks/weave) v2.4.0
+- Application
+ - [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
+ - [cert-manager](https://github.com/jetstack/cert-manager) v0.4.1
+ - [coredns](https://github.com/coredns/coredns) v1.2.2
+ - [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.19.0
+
+Note: kubernetes doesn't support newer docker versions ("Version 17.03 is recommended... Versions 17.06+ might work, but have not yet been tested and verified by the Kubernetes node team" cf. [Bootstrapping Clusters with kubeadm](https://kubernetes.io/docs/setup/independent/install-kubeadm/#installing-docker)). Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
Note 2: rkt support as docker alternative is limited to control plane (etcd and
kubelet). Docker is still used for Kubernetes cluster workloads and network
@@ -105,6 +136,9 @@ Requirements
- **Your ssh key must be copied** to all the servers part of your inventory.
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
in order to avoid any issue during deployment you should disable your firewall.
+- If kubespray is ran from non-root user account, correct privilege escalation method
+ should be configured in the target servers. Then the `ansible_become` flag
+ or command parameters `--become or -b` should be specified.
Network Plugins
---------------
@@ -117,7 +151,7 @@ You can choose between 6 network plugins. (default: `calico`, except Vagrant use
- [canal](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
-- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
+- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
- [contiv](docs/contiv.md): supports vlan, vxlan, bgp and Cisco SDN networking. This plugin is able to
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
@@ -147,8 +181,6 @@ Tools and projects on top of Kubespray
CI Tests
--------
-![Gitlab Logo](https://s27.postimg.org/wmtaig1wz/gitlabci.png)
-
[![Build graphs](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/badges/master/build.svg)](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/pipelines)
CI/end-to-end tests sponsored by Google (GCE)
diff --git a/SECURITY_CONTACTS b/SECURITY_CONTACTS
new file mode 100644
index 00000000000..df77f138252
--- /dev/null
+++ b/SECURITY_CONTACTS
@@ -0,0 +1,13 @@
+# Defined below are the security contacts for this repo.
+#
+# They are the contact point for the Product Security Team to reach out
+# to for triaging and handling of incoming issues.
+#
+# The below names agree to abide by the
+# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy)
+# and will be removed and replaced if they violate that agreement.
+#
+# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
+# INSTRUCTIONS AT https://kubernetes.io/security/
+atoms
+mattymo
\ No newline at end of file
diff --git a/Vagrantfile b/Vagrantfile
index 9db4be3a1b9..df650f1e8cd 100644
--- a/Vagrantfile
+++ b/Vagrantfile
@@ -3,7 +3,7 @@
require 'fileutils'
-Vagrant.require_version ">= 1.9.0"
+Vagrant.require_version ">= 2.0.0"
CONFIG = File.join(File.dirname(__FILE__), "vagrant/config.rb")
@@ -18,6 +18,8 @@ SUPPORTED_OS = {
"coreos-beta" => {box: "coreos-beta", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]},
"ubuntu" => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu", user: "vagrant"},
"centos" => {box: "centos/7", bootstrap_os: "centos", user: "vagrant"},
+ "opensuse" => {box: "opensuse/openSUSE-42.3-x86_64", bootstrap_os: "opensuse", use: "vagrant"},
+ "opensuse-tumbleweed" => {box: "opensuse/openSUSE-Tumbleweed-x86_64", bootstrap_os: "opensuse", use: "vagrant"},
}
# Defaults for config options defined in CONFIG
@@ -42,6 +44,8 @@ $kube_node_instances_with_disks = false
$kube_node_instances_with_disks_size = "20G"
$kube_node_instances_with_disks_number = 2
+$playbook = "cluster.yml"
+
$local_release_dir = "/vagrant/temp"
host_vars = {}
@@ -52,7 +56,7 @@ end
$box = SUPPORTED_OS[$os][:box]
# if $inventory is not set, try to use example
-$inventory = File.join(File.dirname(__FILE__), "inventory") if ! $inventory
+$inventory = File.join(File.dirname(__FILE__), "inventory", "sample") if ! $inventory
# if $inventory has a hosts file use it, otherwise copy over vars etc
# to where vagrant expects dynamic inventory to be.
@@ -84,7 +88,6 @@ Vagrant.configure("2") do |config|
if Vagrant.has_plugin?("vagrant-vbguest") then
config.vbguest.auto_update = false
end
-
(1..$num_instances).each do |i|
config.vm.define vm_name = "%s-%02d" % [$instance_name_prefix, i] do |config|
config.vm.hostname = vm_name
@@ -110,8 +113,10 @@ Vagrant.configure("2") do |config|
end
end
+ config.vm.synced_folder ".", "/vagrant", type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
+
$shared_folders.each do |src, dst|
- config.vm.synced_folder src, dst
+ config.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
end
config.vm.provider :virtualbox do |vb|
@@ -135,12 +140,6 @@ Vagrant.configure("2") do |config|
config.vm.network :private_network, ip: ip
- # workaround for Vagrant 1.9.1 and centos vm
- # https://github.com/hashicorp/vagrant/issues/8096
- if Vagrant::VERSION == "1.9.1" && $os == "centos"
- config.vm.provision "shell", inline: "service network restart", run: "always"
- end
-
# Disable swap for each vm
config.vm.provision "shell", inline: "swapoff -a"
@@ -160,11 +159,11 @@ Vagrant.configure("2") do |config|
# when all the machines are up and ready.
if i == $num_instances
config.vm.provision "ansible" do |ansible|
- ansible.playbook = "cluster.yml"
+ ansible.playbook = $playbook
if File.exist?(File.join(File.dirname($inventory), "hosts"))
ansible.inventory_path = $inventory
end
- ansible.sudo = true
+ ansible.become = true
ansible.limit = "all"
ansible.host_key_checking = false
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache"]
diff --git a/ansible.cfg b/ansible.cfg
index 732e3bf6e31..6912e4d0891 100644
--- a/ansible.cfg
+++ b/ansible.cfg
@@ -12,3 +12,6 @@ library = ./library
callback_whitelist = profile_tasks
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
deprecation_warnings=False
+inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds
+[inventory]
+ignore_patterns = artifacts, credentials
diff --git a/cluster.yml b/cluster.yml
index 00c68a5939f..6a9de14da1a 100644
--- a/cluster.yml
+++ b/cluster.yml
@@ -26,18 +26,19 @@
setup:
delegate_to: "{{item}}"
delegate_facts: True
- with_items: "{{ groups['k8s-cluster'] + groups['etcd'] }}"
+ with_items: "{{ groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]) }}"
- hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- { role: kubernetes/preinstall, tags: preinstall }
- - { role: docker, tags: docker }
+ - { role: docker, tags: docker, when: container_manager == 'docker' }
+ - { role: cri-o, tags: crio, when: container_manager == 'crio' }
- role: rkt
tags: rkt
when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]"
- - { role: download, tags: download, skip_downloads: false }
+ - { role: download, tags: download, when: "not skip_downloads" }
environment: "{{proxy_env}}"
- hosts: etcd:k8s-cluster:vault:calico-rr
@@ -51,13 +52,13 @@
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- - { role: etcd, tags: etcd, etcd_cluster_setup: true }
+ - { role: etcd, tags: etcd, etcd_cluster_setup: true, etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}" }
- hosts: k8s-cluster:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- - { role: etcd, tags: etcd, etcd_cluster_setup: false }
+ - { role: etcd, tags: etcd, etcd_cluster_setup: false, etcd_events_cluster_setup: false }
- hosts: etcd:k8s-cluster:vault:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
@@ -93,6 +94,7 @@
roles:
- { role: kubespray-defaults}
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
+ - { role: win_nodes/kubernetes_patch, tags: win_nodes, when: "kubeadm_enabled" }
- hosts: kube-master
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
diff --git a/contrib/azurerm/README.md b/contrib/azurerm/README.md
index c15d3ecf253..b83aeeb9bbf 100644
--- a/contrib/azurerm/README.md
+++ b/contrib/azurerm/README.md
@@ -9,8 +9,8 @@ Resource Group. It will not install Kubernetes itself, this has to be done in a
## Requirements
-- [Install azure-cli](https://docs.microsoft.com/en-us/azure/xplat-cli-install)
-- [Login with azure-cli](https://docs.microsoft.com/en-us/azure/xplat-cli-connect)
+- [Install azure-cli](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest)
+- [Login with azure-cli](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli?view=azure-cli-latest)
- Dedicated Resource Group created in the Azure Portal or through azure-cli
## Configuration through group_vars/all
diff --git a/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2 b/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2
index 696be6d57e8..3e9728e715a 100644
--- a/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2
+++ b/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2
@@ -1,6 +1,6 @@
{% for vm in vm_ip_list %}
-{% if not use_bastion or vm.virtualMachinename == 'bastion' %}
+{% if not use_bastion or vm.virtualMachine.name == 'bastion' %}
{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.publicIpAddresses[0].ipAddress }} ip={{ vm.virtualMachine.network.privateIpAddresses[0] }}
{% else %}
{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.privateIpAddresses[0] }}
diff --git a/contrib/inventory_builder/inventory.py b/contrib/inventory_builder/inventory.py
index 94e5c7c352c..3ed14cbe394 100644
--- a/contrib/inventory_builder/inventory.py
+++ b/contrib/inventory_builder/inventory.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python3
+#!/usr/bin/env python3
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
diff --git a/contrib/metallb/README.md b/contrib/metallb/README.md
new file mode 100644
index 00000000000..7095d58f816
--- /dev/null
+++ b/contrib/metallb/README.md
@@ -0,0 +1,10 @@
+# Deploy MetalLB into Kubespray/Kubernetes
+```
+MetalLB hooks into your Kubernetes cluster, and provides a network load-balancer implementation. In short, it allows you to create Kubernetes services of type “LoadBalancer” in clusters that don’t run on a cloud provider, and thus cannot simply hook into paid products to provide load-balancers.
+```
+This playbook aims to automate [this](https://metallb.universe.tf/tutorial/layer2/tutorial). It deploys MetalLB into kubernetes and sets up a layer 2 loadbalancer.
+
+## Install
+```
+ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/metallb/metallb.yml
+```
diff --git a/contrib/metallb/metallb.yml b/contrib/metallb/metallb.yml
new file mode 100644
index 00000000000..618a1d22315
--- /dev/null
+++ b/contrib/metallb/metallb.yml
@@ -0,0 +1,6 @@
+---
+- hosts: kube-master[0]
+ tags:
+ - "provision"
+ roles:
+ - { role: provision }
diff --git a/contrib/metallb/roles/provision/defaults/main.yml b/contrib/metallb/roles/provision/defaults/main.yml
new file mode 100644
index 00000000000..c7d0bf904b3
--- /dev/null
+++ b/contrib/metallb/roles/provision/defaults/main.yml
@@ -0,0 +1,7 @@
+---
+metallb:
+ ip_range: "10.5.0.50-10.5.0.99"
+ limits:
+ cpu: "100m"
+ memory: "100Mi"
+ port: "7472"
diff --git a/contrib/metallb/roles/provision/tasks/main.yml b/contrib/metallb/roles/provision/tasks/main.yml
new file mode 100644
index 00000000000..47c4b624a8d
--- /dev/null
+++ b/contrib/metallb/roles/provision/tasks/main.yml
@@ -0,0 +1,16 @@
+---
+- name: "Kubernetes Apps | Lay Down MetalLB"
+ become: true
+ template: { src: "{{ item }}.j2", dest: "{{ kube_config_dir }}/{{ item }}" }
+ with_items: ["metallb.yml", "metallb-config.yml"]
+ register: "rendering"
+ when:
+ - "inventory_hostname == groups['kube-master'][0]"
+- name: "Kubernetes Apps | Install and configure MetalLB"
+ kube:
+ name: "MetalLB"
+ filename: "{{ kube_config_dir }}/metallb.yml"
+ state: "{{ item.changed | ternary('latest','present') }}"
+ with_items: "{{ rendering.results }}"
+ when:
+ - "inventory_hostname == groups['kube-master'][0]"
diff --git a/contrib/metallb/roles/provision/templates/metallb-config.yml.j2 b/contrib/metallb/roles/provision/templates/metallb-config.yml.j2
new file mode 100644
index 00000000000..2e58f2d5f15
--- /dev/null
+++ b/contrib/metallb/roles/provision/templates/metallb-config.yml.j2
@@ -0,0 +1,13 @@
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ namespace: metallb-system
+ name: config
+data:
+ config: |
+ address-pools:
+ - name: loadbalanced
+ protocol: layer2
+ addresses:
+ - {{ metallb.ip_range }}
diff --git a/contrib/metallb/roles/provision/templates/metallb.yml.j2 b/contrib/metallb/roles/provision/templates/metallb.yml.j2
new file mode 100644
index 00000000000..c9532f014ae
--- /dev/null
+++ b/contrib/metallb/roles/provision/templates/metallb.yml.j2
@@ -0,0 +1,254 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: metallb-system
+ labels:
+ app: metallb
+---
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ namespace: metallb-system
+ name: controller
+ labels:
+ app: metallb
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ namespace: metallb-system
+ name: speaker
+ labels:
+ app: metallb
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: metallb-system:controller
+ labels:
+ app: metallb
+rules:
+- apiGroups: [""]
+ resources: ["services"]
+ verbs: ["get", "list", "watch", "update"]
+- apiGroups: [""]
+ resources: ["services/status"]
+ verbs: ["update"]
+- apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create", "patch"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: metallb-system:speaker
+ labels:
+ app: metallb
+rules:
+- apiGroups: [""]
+ resources: ["services", "endpoints", "nodes"]
+ verbs: ["get", "list", "watch"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ namespace: metallb-system
+ name: leader-election
+ labels:
+ app: metallb
+rules:
+- apiGroups: [""]
+ resources: ["endpoints"]
+ resourceNames: ["metallb-speaker"]
+ verbs: ["get", "update"]
+- apiGroups: [""]
+ resources: ["endpoints"]
+ verbs: ["create"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ namespace: metallb-system
+ name: config-watcher
+ labels:
+ app: metallb
+rules:
+- apiGroups: [""]
+ resources: ["configmaps"]
+ verbs: ["get", "list", "watch"]
+- apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create"]
+---
+
+## Role bindings
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: metallb-system:controller
+ labels:
+ app: metallb
+subjects:
+- kind: ServiceAccount
+ name: controller
+ namespace: metallb-system
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: metallb-system:controller
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: metallb-system:speaker
+ labels:
+ app: metallb
+subjects:
+- kind: ServiceAccount
+ name: speaker
+ namespace: metallb-system
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: metallb-system:speaker
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ namespace: metallb-system
+ name: config-watcher
+ labels:
+ app: metallb
+subjects:
+- kind: ServiceAccount
+ name: controller
+- kind: ServiceAccount
+ name: speaker
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: config-watcher
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ namespace: metallb-system
+ name: leader-election
+ labels:
+ app: metallb
+subjects:
+- kind: ServiceAccount
+ name: speaker
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: leader-election
+---
+apiVersion: apps/v1beta2
+kind: DaemonSet
+metadata:
+ namespace: metallb-system
+ name: speaker
+ labels:
+ app: metallb
+ component: speaker
+spec:
+ selector:
+ matchLabels:
+ app: metallb
+ component: speaker
+ template:
+ metadata:
+ labels:
+ app: metallb
+ component: speaker
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "{{ metallb.port }}"
+ spec:
+ serviceAccountName: speaker
+ terminationGracePeriodSeconds: 0
+ hostNetwork: true
+ containers:
+ - name: speaker
+ image: metallb/speaker:v0.6.2
+ imagePullPolicy: IfNotPresent
+ args:
+ - --port={{ metallb.port }}
+ - --config=config
+ env:
+ - name: METALLB_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ ports:
+ - name: monitoring
+ containerPort: {{ metallb.port }}
+ resources:
+ limits:
+ cpu: {{ metallb.limits.cpu }}
+ memory: {{ metallb.limits.memory }}
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ capabilities:
+ drop:
+ - all
+ add:
+ - net_raw
+
+---
+apiVersion: apps/v1beta2
+kind: Deployment
+metadata:
+ namespace: metallb-system
+ name: controller
+ labels:
+ app: metallb
+ component: controller
+spec:
+ revisionHistoryLimit: 3
+ selector:
+ matchLabels:
+ app: metallb
+ component: controller
+ template:
+ metadata:
+ labels:
+ app: metallb
+ component: controller
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "{{ metallb.port }}"
+ spec:
+ serviceAccountName: controller
+ terminationGracePeriodSeconds: 0
+ securityContext:
+ runAsNonRoot: true
+ runAsUser: 65534 # nobody
+ containers:
+ - name: controller
+ image: metallb/controller:v0.6.2
+ imagePullPolicy: IfNotPresent
+ args:
+ - --port={{ metallb.port }}
+ - --config=config
+ ports:
+ - name: monitoring
+ containerPort: {{ metallb.port }}
+ resources:
+ limits:
+ cpu: {{ metallb.limits.cpu }}
+ memory: {{ metallb.limits.memory }}
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - all
+ readOnlyRootFilesystem: true
+
+---
+
+
diff --git a/contrib/network-storage/glusterfs/group_vars b/contrib/network-storage/glusterfs/group_vars
index d64da8dc611..6a3f85e47c9 120000
--- a/contrib/network-storage/glusterfs/group_vars
+++ b/contrib/network-storage/glusterfs/group_vars
@@ -1 +1 @@
-../../../inventory/group_vars
\ No newline at end of file
+../../../inventory/local/group_vars
\ No newline at end of file
diff --git a/contrib/network-storage/glusterfs/inventory.example b/contrib/network-storage/glusterfs/inventory.example
index 41e36c8da3c..15fbad0a815 100644
--- a/contrib/network-storage/glusterfs/inventory.example
+++ b/contrib/network-storage/glusterfs/inventory.example
@@ -12,7 +12,7 @@
# ## As in the previous case, you can set ip to give direct communication on internal IPs
# gfs_node1 ansible_ssh_host=95.54.0.18 # disk_volume_device_1=/dev/vdc ip=10.3.0.7
# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8
-# gfs_node1 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9
+# gfs_node3 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9
# [kube-master]
# node1
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/client/defaults/main.yml b/contrib/network-storage/glusterfs/roles/glusterfs/client/defaults/main.yml
index 5ca49386706..b9f0d2d1d3b 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/client/defaults/main.yml
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/client/defaults/main.yml
@@ -2,7 +2,7 @@
# For Ubuntu.
glusterfs_default_release: ""
glusterfs_ppa_use: yes
-glusterfs_ppa_version: "3.8"
+glusterfs_ppa_version: "4.1"
# Gluster configuration.
gluster_mount_dir: /mnt/gluster
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/server/defaults/main.yml b/contrib/network-storage/glusterfs/roles/glusterfs/server/defaults/main.yml
index 1c876338863..ef9a71eba40 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/server/defaults/main.yml
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/server/defaults/main.yml
@@ -2,7 +2,7 @@
# For Ubuntu.
glusterfs_default_release: ""
glusterfs_ppa_use: yes
-glusterfs_ppa_version: "3.8"
+glusterfs_ppa_version: "3.12"
# Gluster configuration.
gluster_mount_dir: /mnt/gluster
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/server/vars/Debian.yml b/contrib/network-storage/glusterfs/roles/glusterfs/server/vars/Debian.yml
index 13c595f745c..e931068ae37 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/server/vars/Debian.yml
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/server/vars/Debian.yml
@@ -1,2 +1,2 @@
---
-glusterfs_daemon: glusterfs-server
+glusterfs_daemon: glusterd
diff --git a/contrib/network-storage/heketi/README.md b/contrib/network-storage/heketi/README.md
new file mode 100644
index 00000000000..aa1b656e5c4
--- /dev/null
+++ b/contrib/network-storage/heketi/README.md
@@ -0,0 +1,16 @@
+# Deploy Heketi/Glusterfs into Kubespray/Kubernetes
+This playbook aims to automate [this](https://github.com/heketi/heketi/blob/master/docs/admin/install-kubernetes.md) tutorial. It deploys heketi/glusterfs into kubernetes and sets up a storageclass.
+
+## Client Setup
+Heketi provides a CLI that provides users with a means to administer the deployment and configuration of GlusterFS in Kubernetes. [Download and install the heketi-cli](https://github.com/heketi/heketi/releases) on your client machine.
+
+## Install
+Copy the inventory.yml.sample over to inventory/sample/k8s_heketi_inventory.yml and change it according to your setup.
+```
+ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi.yml
+```
+
+## Tear down
+```
+ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi-tear-down.yml
+```
diff --git a/contrib/network-storage/heketi/heketi-tear-down.yml b/contrib/network-storage/heketi/heketi-tear-down.yml
new file mode 100644
index 00000000000..92b9f92d64e
--- /dev/null
+++ b/contrib/network-storage/heketi/heketi-tear-down.yml
@@ -0,0 +1,9 @@
+---
+- hosts: kube-master[0]
+ roles:
+ - { role: tear-down }
+
+- hosts: heketi-node
+ become: yes
+ roles:
+ - { role: tear-down-disks }
diff --git a/contrib/network-storage/heketi/heketi.yml b/contrib/network-storage/heketi/heketi.yml
new file mode 100644
index 00000000000..3ec719e95b7
--- /dev/null
+++ b/contrib/network-storage/heketi/heketi.yml
@@ -0,0 +1,10 @@
+---
+- hosts: heketi-node
+ roles:
+ - { role: prepare }
+
+- hosts: kube-master[0]
+ tags:
+ - "provision"
+ roles:
+ - { role: provision }
diff --git a/contrib/network-storage/heketi/inventory.yml.sample b/contrib/network-storage/heketi/inventory.yml.sample
new file mode 100644
index 00000000000..7d488d1ba14
--- /dev/null
+++ b/contrib/network-storage/heketi/inventory.yml.sample
@@ -0,0 +1,26 @@
+all:
+ vars:
+ heketi_admin_key: "11elfeinhundertundelf"
+ heketi_user_key: "!!einseinseins"
+ children:
+ k8s-cluster:
+ vars:
+ kubelet_fail_swap_on: false
+ children:
+ kube-master:
+ hosts:
+ node1:
+ etcd:
+ hosts:
+ node2:
+ kube-node:
+ hosts: &kube_nodes
+ node1:
+ node2:
+ node3:
+ node4:
+ heketi-node:
+ vars:
+ disk_volume_device_1: "/dev/vdb"
+ hosts:
+ <<: *kube_nodes
diff --git a/contrib/network-storage/heketi/requirements.txt b/contrib/network-storage/heketi/requirements.txt
new file mode 100644
index 00000000000..45c1e038e5f
--- /dev/null
+++ b/contrib/network-storage/heketi/requirements.txt
@@ -0,0 +1 @@
+jmespath
diff --git a/contrib/network-storage/heketi/roles/prepare/tasks/main.yml b/contrib/network-storage/heketi/roles/prepare/tasks/main.yml
new file mode 100644
index 00000000000..e4db23365bf
--- /dev/null
+++ b/contrib/network-storage/heketi/roles/prepare/tasks/main.yml
@@ -0,0 +1,24 @@
+---
+- name: "Load lvm kernel modules"
+ become: true
+ with_items:
+ - "dm_snapshot"
+ - "dm_mirror"
+ - "dm_thin_pool"
+ modprobe:
+ name: "{{ item }}"
+ state: "present"
+
+- name: "Install glusterfs mount utils (RedHat)"
+ become: true
+ yum:
+ name: "glusterfs-fuse"
+ state: "present"
+ when: "ansible_os_family == 'RedHat'"
+
+- name: "Install glusterfs mount utils (Debian)"
+ become: true
+ apt:
+ name: "glusterfs-client"
+ state: "present"
+ when: "ansible_os_family == 'Debian'"
diff --git a/contrib/network-storage/heketi/roles/provision/defaults/main.yml b/contrib/network-storage/heketi/roles/provision/defaults/main.yml
new file mode 100644
index 00000000000..ed97d539c09
--- /dev/null
+++ b/contrib/network-storage/heketi/roles/provision/defaults/main.yml
@@ -0,0 +1 @@
+---
diff --git a/contrib/network-storage/heketi/roles/provision/handlers/main.yml b/contrib/network-storage/heketi/roles/provision/handlers/main.yml
new file mode 100644
index 00000000000..9e876de177b
--- /dev/null
+++ b/contrib/network-storage/heketi/roles/provision/handlers/main.yml
@@ -0,0 +1,3 @@
+---
+- name: "stop port forwarding"
+ command: "killall "
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap.yml
new file mode 100644
index 00000000000..572913a63e0
--- /dev/null
+++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap.yml
@@ -0,0 +1,56 @@
+# Bootstrap heketi
+- name: "Get state of heketi service, deployment and pods."
+ register: "initial_heketi_state"
+ changed_when: false
+ command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
+- name: "Bootstrap heketi."
+ when:
+ - "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Service']\"))|length == 0"
+ - "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Deployment']\"))|length == 0"
+ - "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod']\"))|length == 0"
+ include_tasks: "bootstrap/deploy.yml"
+
+# Prepare heketi topology
+- name: "Get heketi initial pod state."
+ register: "initial_heketi_pod"
+ command: "{{ bin_dir }}/kubectl get pods --selector=deploy-heketi=pod,glusterfs=heketi-pod,name=deploy-heketi --output=json"
+ changed_when: false
+- name: "Ensure heketi bootstrap pod is up."
+ assert:
+ that: "(initial_heketi_pod.stdout|from_json|json_query('items[*]'))|length == 1"
+- set_fact:
+ initial_heketi_pod_name: "{{ initial_heketi_pod.stdout|from_json|json_query(\"items[*].metadata.name|[0]\") }}"
+- name: "Test heketi topology."
+ changed_when: false
+ register: "heketi_topology"
+ command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
+- name: "Load heketi topology."
+ when: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*]\")|flatten|length == 0"
+ include_tasks: "bootstrap/topology.yml"
+
+# Provision heketi database volume
+- name: "Prepare heketi volumes."
+ include_tasks: "bootstrap/volumes.yml"
+
+# Remove bootstrap heketi
+- name: "Tear down bootstrap."
+ include_tasks: "bootstrap/tear-down.yml"
+
+# Prepare heketi storage
+- name: "Test heketi storage."
+ command: "{{ bin_dir }}/kubectl get secrets,endpoints,services,jobs --output=json"
+ changed_when: false
+ register: "heketi_storage_state"
+# ensure endpoints actually exist before trying to move database data to it
+- name: "Create heketi storage."
+ include_tasks: "bootstrap/storage.yml"
+ vars:
+ secret_query: "items[?metadata.name=='heketi-storage-secret' && kind=='Secret']"
+ endpoints_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Endpoints']"
+ service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
+ job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
+ when:
+ - "heketi_storage_state.stdout|from_json|json_query(secret_query)|length == 0"
+ - "heketi_storage_state.stdout|from_json|json_query(endpoints_query)|length == 0"
+ - "heketi_storage_state.stdout|from_json|json_query(service_query)|length == 0"
+ - "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 0"
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml
new file mode 100644
index 00000000000..3037d8b77ca
--- /dev/null
+++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml
@@ -0,0 +1,24 @@
+---
+- name: "Kubernetes Apps | Lay Down Heketi Bootstrap"
+ become: true
+ template: { src: "heketi-bootstrap.json.j2", dest: "{{ kube_config_dir }}/heketi-bootstrap.json" }
+ register: "rendering"
+- name: "Kubernetes Apps | Install and configure Heketi Bootstrap"
+ kube:
+ name: "GlusterFS"
+ kubectl: "{{bin_dir}}/kubectl"
+ filename: "{{ kube_config_dir }}/heketi-bootstrap.json"
+ state: "{{ rendering.changed | ternary('latest', 'present') }}"
+- name: "Wait for heketi bootstrap to complete."
+ changed_when: false
+ register: "initial_heketi_state"
+ vars:
+ initial_heketi_state: { stdout: "{}" }
+ pods_query: "items[?kind=='Pod'].status.conditions|[0][?type=='Ready'].status|[0]"
+ deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
+ command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
+ until:
+ - "initial_heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
+ - "initial_heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
+ retries: 60
+ delay: 5
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/storage.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/storage.yml
new file mode 100644
index 00000000000..be3c42cafa7
--- /dev/null
+++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/storage.yml
@@ -0,0 +1,33 @@
+---
+- name: "Test heketi storage."
+ command: "{{ bin_dir }}/kubectl get secrets,endpoints,services,jobs --output=json"
+ changed_when: false
+ register: "heketi_storage_state"
+- name: "Create heketi storage."
+ kube:
+ name: "GlusterFS"
+ kubectl: "{{bin_dir}}/kubectl"
+ filename: "{{ kube_config_dir }}/heketi-storage-bootstrap.json"
+ state: "present"
+ vars:
+ secret_query: "items[?metadata.name=='heketi-storage-secret' && kind=='Secret']"
+ endpoints_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Endpoints']"
+ service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
+ job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
+ when:
+ - "heketi_storage_state.stdout|from_json|json_query(secret_query)|length == 0"
+ - "heketi_storage_state.stdout|from_json|json_query(endpoints_query)|length == 0"
+ - "heketi_storage_state.stdout|from_json|json_query(service_query)|length == 0"
+ - "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 0"
+ register: "heketi_storage_result"
+- name: "Get state of heketi database copy job."
+ command: "{{ bin_dir }}/kubectl get jobs --output=json"
+ changed_when: false
+ register: "heketi_storage_state"
+ vars:
+ heketi_storage_state: { stdout: "{}" }
+ job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job' && status.succeeded==1]"
+ until:
+ - "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 1"
+ retries: 60
+ delay: 5
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml
new file mode 100644
index 00000000000..0ffd6f469f2
--- /dev/null
+++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml
@@ -0,0 +1,14 @@
+---
+- name: "Get existing Heketi deploy resources."
+ command: "{{ bin_dir }}/kubectl get all --selector=\"deploy-heketi\" -o=json"
+ register: "heketi_resources"
+ changed_when: false
+- name: "Delete bootstrap Heketi."
+ command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
+ when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0"
+- name: "Ensure there is nothing left over."
+ command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
+ register: "heketi_result"
+ until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
+ retries: 60
+ delay: 5
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml
new file mode 100644
index 00000000000..7d2c5981e7e
--- /dev/null
+++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml
@@ -0,0 +1,26 @@
+---
+- name: "Get heketi topology."
+ changed_when: false
+ register: "heketi_topology"
+ command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
+- name: "Render heketi topology template."
+ become: true
+ vars: { nodes: "{{ groups['heketi-node'] }}" }
+ register: "render"
+ template:
+ src: "topology.json.j2"
+ dest: "{{ kube_config_dir }}/topology.json"
+- name: "Copy topology configuration into container."
+ changed_when: false
+ command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
+- name: "Load heketi topology."
+ when: "render.changed"
+ command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
+ register: "load_heketi"
+- name: "Get heketi topology."
+ changed_when: false
+ register: "heketi_topology"
+ command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
+ until: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\")|flatten|length == groups['heketi-node']|length"
+ retries: 60
+ delay: 5
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml
new file mode 100644
index 00000000000..d5da1a12588
--- /dev/null
+++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml
@@ -0,0 +1,41 @@
+---
+- name: "Get heketi volume ids."
+ command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume list --json"
+ changed_when: false
+ register: "heketi_volumes"
+- name: "Get heketi volumes."
+ changed_when: false
+ command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
+ with_items: "{{ heketi_volumes.stdout|from_json|json_query(\"volumes[*]\") }}"
+ loop_control: { loop_var: "volume_id" }
+ register: "volumes_information"
+- name: "Test heketi database volume."
+ set_fact: { heketi_database_volume_exists: true }
+ with_items: "{{ volumes_information.results }}"
+ loop_control: { loop_var: "volume_information" }
+ vars: { volume: "{{ volume_information.stdout|from_json }}" }
+ when: "volume.name == 'heketidbstorage'"
+- name: "Provision database volume."
+ command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
+ when: "heketi_database_volume_exists is undefined"
+- name: "Copy configuration from pod."
+ become: true
+ command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
+- name: "Get heketi volume ids."
+ command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume list --json"
+ changed_when: false
+ register: "heketi_volumes"
+- name: "Get heketi volumes."
+ changed_when: false
+ command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
+ with_items: "{{ heketi_volumes.stdout|from_json|json_query(\"volumes[*]\") }}"
+ loop_control: { loop_var: "volume_id" }
+ register: "volumes_information"
+- name: "Test heketi database volume."
+ set_fact: { heketi_database_volume_created: true }
+ with_items: "{{ volumes_information.results }}"
+ loop_control: { loop_var: "volume_information" }
+ vars: { volume: "{{ volume_information.stdout|from_json }}" }
+ when: "volume.name == 'heketidbstorage'"
+- name: "Ensure heketi database volume exists."
+ assert: { that: "heketi_database_volume_created is defined" , msg: "Heketi database volume does not exist." }
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/cleanup.yml b/contrib/network-storage/heketi/roles/provision/tasks/cleanup.yml
new file mode 100644
index 00000000000..238f29bc214
--- /dev/null
+++ b/contrib/network-storage/heketi/roles/provision/tasks/cleanup.yml
@@ -0,0 +1,4 @@
+---
+- name: "Clean up left over jobs."
+ command: "{{ bin_dir }}/kubectl delete jobs,pods --selector=\"deploy-heketi\""
+ changed_when: false
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml b/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml
new file mode 100644
index 00000000000..e4615996937
--- /dev/null
+++ b/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml
@@ -0,0 +1,38 @@
+---
+- name: "Kubernetes Apps | Lay Down GlusterFS Daemonset"
+ template: { src: "glusterfs-daemonset.json.j2", dest: "{{ kube_config_dir }}/glusterfs-daemonset.json" }
+ become: true
+ register: "rendering"
+- name: "Kubernetes Apps | Install and configure GlusterFS daemonset"
+ kube:
+ name: "GlusterFS"
+ kubectl: "{{bin_dir}}/kubectl"
+ filename: "{{ kube_config_dir }}/glusterfs-daemonset.json"
+ state: "{{ rendering.changed | ternary('latest', 'present') }}"
+- name: "Kubernetes Apps | Label GlusterFS nodes"
+ include_tasks: "glusterfs/label.yml"
+ with_items: "{{ groups['heketi-node'] }}"
+ loop_control:
+ loop_var: "node"
+- name: "Kubernetes Apps | Wait for daemonset to become available."
+ register: "daemonset_state"
+ command: "{{ bin_dir }}/kubectl get daemonset glusterfs --output=json --ignore-not-found=true"
+ changed_when: false
+ vars:
+ daemonset_state: { stdout: "{}" }
+ ready: "{{ daemonset_state.stdout|from_json|json_query(\"status.numberReady\") }}"
+ desired: "{{ daemonset_state.stdout|from_json|json_query(\"status.desiredNumberScheduled\") }}"
+ until: "ready >= 3"
+ retries: 60
+ delay: 5
+
+- name: "Kubernetes Apps | Lay Down Heketi Service Account"
+ template: { src: "heketi-service-account.json.j2", dest: "{{ kube_config_dir }}/heketi-service-account.json" }
+ become: true
+ register: "rendering"
+- name: "Kubernetes Apps | Install and configure Heketi Service Account"
+ kube:
+ name: "GlusterFS"
+ kubectl: "{{bin_dir}}/kubectl"
+ filename: "{{ kube_config_dir }}/heketi-service-account.json"
+ state: "{{ rendering.changed | ternary('latest', 'present') }}"
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/glusterfs/label.yml b/contrib/network-storage/heketi/roles/provision/tasks/glusterfs/label.yml
new file mode 100644
index 00000000000..61729a5e29b
--- /dev/null
+++ b/contrib/network-storage/heketi/roles/provision/tasks/glusterfs/label.yml
@@ -0,0 +1,11 @@
+---
+- register: "label_present"
+ command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true"
+ changed_when: false
+- name: "Assign storage label"
+ when: "label_present.stdout_lines|length == 0"
+ command: "{{ bin_dir }}/kubectl label node {{ node }} storagenode=glusterfs"
+- register: "label_present"
+ command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true"
+ changed_when: false
+- assert: { that: "label_present|length > 0", msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs." }
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml b/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml
new file mode 100644
index 00000000000..029baef947b
--- /dev/null
+++ b/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml
@@ -0,0 +1,26 @@
+---
+- name: "Kubernetes Apps | Lay Down Heketi"
+ become: true
+ template: { src: "heketi-deployment.json.j2", dest: "{{ kube_config_dir }}/heketi-deployment.json" }
+ register: "rendering"
+- name: "Kubernetes Apps | Install and configure Heketi"
+ kube:
+ name: "GlusterFS"
+ kubectl: "{{bin_dir}}/kubectl"
+ filename: "{{ kube_config_dir }}/heketi-deployment.json"
+ state: "{{ rendering.changed | ternary('latest', 'present') }}"
+- name: "Ensure heketi is up and running."
+ changed_when: false
+ register: "heketi_state"
+ vars:
+ heketi_state: { stdout: "{}" }
+ pods_query: "items[?kind=='Pod'].status.conditions|[0][?type=='Ready'].status|[0]"
+ deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
+ command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json"
+ until:
+ - "heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
+ - "heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
+ retries: 60
+ delay: 5
+- set_fact:
+ heketi_pod_name: "{{ heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}"
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/main.yml b/contrib/network-storage/heketi/roles/provision/tasks/main.yml
new file mode 100644
index 00000000000..23a2b4f9c72
--- /dev/null
+++ b/contrib/network-storage/heketi/roles/provision/tasks/main.yml
@@ -0,0 +1,30 @@
+---
+- name: "Kubernetes Apps | GlusterFS"
+ include_tasks: "glusterfs.yml"
+
+- name: "Kubernetes Apps | Heketi Secrets"
+ include_tasks: "secret.yml"
+
+- name: "Kubernetes Apps | Test Heketi"
+ register: "heketi_service_state"
+ command: "{{bin_dir}}/kubectl get service heketi-storage-endpoints -o=name --ignore-not-found=true"
+ changed_when: false
+
+- name: "Kubernetes Apps | Bootstrap Heketi"
+ when: "heketi_service_state.stdout == \"\""
+ include_tasks: "bootstrap.yml"
+
+- name: "Kubernetes Apps | Heketi"
+ include_tasks: "heketi.yml"
+
+- name: "Kubernetes Apps | Heketi Topology"
+ include_tasks: "topology.yml"
+
+- name: "Kubernetes Apps | Heketi Storage"
+ include_tasks: "storage.yml"
+
+- name: "Kubernetes Apps | Storage Class"
+ include_tasks: "storageclass.yml"
+
+- name: "Clean up"
+ include_tasks: "cleanup.yml"
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/secret.yml b/contrib/network-storage/heketi/roles/provision/tasks/secret.yml
new file mode 100644
index 00000000000..8ca21bcb693
--- /dev/null
+++ b/contrib/network-storage/heketi/roles/provision/tasks/secret.yml
@@ -0,0 +1,27 @@
+---
+- register: "clusterrolebinding_state"
+ command: "{{bin_dir}}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
+ changed_when: false
+- name: "Kubernetes Apps | Deploy cluster role binding."
+ when: "clusterrolebinding_state.stdout == \"\""
+ command: "{{bin_dir}}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
+- register: "clusterrolebinding_state"
+ command: "{{bin_dir}}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
+ changed_when: false
+- assert: { that: "clusterrolebinding_state.stdout != \"\"", message: "Cluster role binding is not present." }
+
+- register: "secret_state"
+ command: "{{bin_dir}}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
+ changed_when: false
+- name: "Render Heketi secret configuration."
+ become: true
+ template:
+ src: "heketi.json.j2"
+ dest: "{{ kube_config_dir }}/heketi.json"
+- name: "Deploy Heketi config secret"
+ when: "secret_state.stdout == \"\""
+ command: "{{bin_dir}}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
+- register: "secret_state"
+ command: "{{bin_dir}}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
+ changed_when: false
+- assert: { that: "secret_state.stdout != \"\"", message: "Heketi config secret is not present." }
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/storage.yml b/contrib/network-storage/heketi/roles/provision/tasks/storage.yml
new file mode 100644
index 00000000000..881084bbe1d
--- /dev/null
+++ b/contrib/network-storage/heketi/roles/provision/tasks/storage.yml
@@ -0,0 +1,11 @@
+---
+- name: "Kubernetes Apps | Lay Down Heketi Storage"
+ become: true
+ vars: { nodes: "{{ groups['heketi-node'] }}" }
+ template: { src: "heketi-storage.json.j2", dest: "{{ kube_config_dir }}/heketi-storage.json" }
+ register: "rendering"
+- name: "Kubernetes Apps | Install and configure Heketi Storage"
+ kube:
+ name: "GlusterFS"
+ filename: "{{ kube_config_dir }}/heketi-storage.json"
+ state: "{{ rendering.changed | ternary('latest', 'present') }}"
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml b/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml
new file mode 100644
index 00000000000..afd818eb31b
--- /dev/null
+++ b/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml
@@ -0,0 +1,25 @@
+---
+- name: "Test storage class."
+ command: "{{ bin_dir }}/kubectl get storageclass gluster --ignore-not-found=true --output=json"
+ register: "storageclass"
+ changed_when: false
+- name: "Test heketi service."
+ command: "{{ bin_dir }}/kubectl get service heketi --ignore-not-found=true --output=json"
+ register: "heketi_service"
+ changed_when: false
+- name: "Ensure heketi service is available."
+ assert: { that: "heketi_service.stdout != \"\"" }
+- name: "Render storage class configuration."
+ become: true
+ vars:
+ endpoint_address: "{{ (heketi_service.stdout|from_json).spec.clusterIP }}"
+ template:
+ src: "storageclass.yml.j2"
+ dest: "{{ kube_config_dir }}/storageclass.yml"
+ register: "rendering"
+- name: "Kubernetes Apps | Install and configure Storace Class"
+ kube:
+ name: "GlusterFS"
+ kubectl: "{{bin_dir}}/kubectl"
+ filename: "{{ kube_config_dir }}/storageclass.yml"
+ state: "{{ rendering.changed | ternary('latest', 'present') }}"
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/topology.yml b/contrib/network-storage/heketi/roles/provision/tasks/topology.yml
new file mode 100644
index 00000000000..dd1e272beb0
--- /dev/null
+++ b/contrib/network-storage/heketi/roles/provision/tasks/topology.yml
@@ -0,0 +1,25 @@
+---
+- name: "Get heketi topology."
+ register: "heketi_topology"
+ changed_when: false
+ command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
+- name: "Render heketi topology template."
+ become: true
+ vars: { nodes: "{{ groups['heketi-node'] }}" }
+ register: "rendering"
+ template:
+ src: "topology.json.j2"
+ dest: "{{ kube_config_dir }}/topology.json"
+- name: "Copy topology configuration into container."
+ when: "rendering.changed"
+ command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
+- name: "Load heketi topology."
+ when: "rendering.changed"
+ command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
+- name: "Get heketi topology."
+ register: "heketi_topology"
+ changed_when: false
+ command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
+ until: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\")|flatten|length == groups['heketi-node']|length"
+ retries: 60
+ delay: 5
diff --git a/contrib/network-storage/heketi/roles/provision/templates/glusterfs-daemonset.json.j2 b/contrib/network-storage/heketi/roles/provision/templates/glusterfs-daemonset.json.j2
new file mode 100644
index 00000000000..eddd57eb817
--- /dev/null
+++ b/contrib/network-storage/heketi/roles/provision/templates/glusterfs-daemonset.json.j2
@@ -0,0 +1,144 @@
+{
+ "kind": "DaemonSet",
+ "apiVersion": "extensions/v1beta1",
+ "metadata": {
+ "name": "glusterfs",
+ "labels": {
+ "glusterfs": "deployment"
+ },
+ "annotations": {
+ "description": "GlusterFS Daemon Set",
+ "tags": "glusterfs"
+ }
+ },
+ "spec": {
+ "template": {
+ "metadata": {
+ "name": "glusterfs",
+ "labels": {
+ "glusterfs-node": "daemonset"
+ }
+ },
+ "spec": {
+ "nodeSelector": {
+ "storagenode" : "glusterfs"
+ },
+ "hostNetwork": true,
+ "containers": [
+ {
+ "image": "gluster/gluster-centos:gluster4u0_centos7",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "glusterfs",
+ "volumeMounts": [
+ {
+ "name": "glusterfs-heketi",
+ "mountPath": "/var/lib/heketi"
+ },
+ {
+ "name": "glusterfs-run",
+ "mountPath": "/run"
+ },
+ {
+ "name": "glusterfs-lvm",
+ "mountPath": "/run/lvm"
+ },
+ {
+ "name": "glusterfs-etc",
+ "mountPath": "/etc/glusterfs"
+ },
+ {
+ "name": "glusterfs-logs",
+ "mountPath": "/var/log/glusterfs"
+ },
+ {
+ "name": "glusterfs-config",
+ "mountPath": "/var/lib/glusterd"
+ },
+ {
+ "name": "glusterfs-dev",
+ "mountPath": "/dev"
+ },
+ {
+ "name": "glusterfs-cgroup",
+ "mountPath": "/sys/fs/cgroup"
+ }
+ ],
+ "securityContext": {
+ "capabilities": {},
+ "privileged": true
+ },
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 60,
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "systemctl status glusterd.service"
+ ]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 60,
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "systemctl status glusterd.service"
+ ]
+ }
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "glusterfs-heketi",
+ "hostPath": {
+ "path": "/var/lib/heketi"
+ }
+ },
+ {
+ "name": "glusterfs-run"
+ },
+ {
+ "name": "glusterfs-lvm",
+ "hostPath": {
+ "path": "/run/lvm"
+ }
+ },
+ {
+ "name": "glusterfs-etc",
+ "hostPath": {
+ "path": "/etc/glusterfs"
+ }
+ },
+ {
+ "name": "glusterfs-logs",
+ "hostPath": {
+ "path": "/var/log/glusterfs"
+ }
+ },
+ {
+ "name": "glusterfs-config",
+ "hostPath": {
+ "path": "/var/lib/glusterd"
+ }
+ },
+ {
+ "name": "glusterfs-dev",
+ "hostPath": {
+ "path": "/dev"
+ }
+ },
+ {
+ "name": "glusterfs-cgroup",
+ "hostPath": {
+ "path": "/sys/fs/cgroup"
+ }
+ }
+ ]
+ }
+ }
+ }
+}
diff --git a/contrib/network-storage/heketi/roles/provision/templates/heketi-bootstrap.json.j2 b/contrib/network-storage/heketi/roles/provision/templates/heketi-bootstrap.json.j2
new file mode 100644
index 00000000000..bdcf3e9588f
--- /dev/null
+++ b/contrib/network-storage/heketi/roles/provision/templates/heketi-bootstrap.json.j2
@@ -0,0 +1,133 @@
+{
+ "kind": "List",
+ "apiVersion": "v1",
+ "items": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "deploy-heketi",
+ "labels": {
+ "glusterfs": "heketi-service",
+ "deploy-heketi": "support"
+ },
+ "annotations": {
+ "description": "Exposes Heketi Service"
+ }
+ },
+ "spec": {
+ "selector": {
+ "name": "deploy-heketi"
+ },
+ "ports": [
+ {
+ "name": "deploy-heketi",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ]
+ }
+ },
+ {
+ "kind": "Deployment",
+ "apiVersion": "extensions/v1beta1",
+ "metadata": {
+ "name": "deploy-heketi",
+ "labels": {
+ "glusterfs": "heketi-deployment",
+ "deploy-heketi": "deployment"
+ },
+ "annotations": {
+ "description": "Defines how to deploy Heketi"
+ }
+ },
+ "spec": {
+ "replicas": 1,
+ "template": {
+ "metadata": {
+ "name": "deploy-heketi",
+ "labels": {
+ "name": "deploy-heketi",
+ "glusterfs": "heketi-pod",
+ "deploy-heketi": "pod"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "heketi-service-account",
+ "containers": [
+ {
+ "image": "heketi/heketi:7",
+ "imagePullPolicy": "Always",
+ "name": "deploy-heketi",
+ "env": [
+ {
+ "name": "HEKETI_EXECUTOR",
+ "value": "kubernetes"
+ },
+ {
+ "name": "HEKETI_DB_PATH",
+ "value": "/var/lib/heketi/heketi.db"
+ },
+ {
+ "name": "HEKETI_FSTAB",
+ "value": "/var/lib/heketi/fstab"
+ },
+ {
+ "name": "HEKETI_SNAPSHOT_LIMIT",
+ "value": "14"
+ },
+ {
+ "name": "HEKETI_KUBE_GLUSTER_DAEMONSET",
+ "value": "y"
+ }
+ ],
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "volumeMounts": [
+ {
+ "name": "db",
+ "mountPath": "/var/lib/heketi"
+ },
+ {
+ "name": "config",
+ "mountPath": "/etc/heketi"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 3,
+ "httpGet": {
+ "path": "/hello",
+ "port": 8080
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 30,
+ "httpGet": {
+ "path": "/hello",
+ "port": 8080
+ }
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "db"
+ },
+ {
+ "name": "config",
+ "secret": {
+ "secretName": "heketi-config-secret"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/contrib/network-storage/heketi/roles/provision/templates/heketi-deployment.json.j2 b/contrib/network-storage/heketi/roles/provision/templates/heketi-deployment.json.j2
new file mode 100644
index 00000000000..5eb71718cc9
--- /dev/null
+++ b/contrib/network-storage/heketi/roles/provision/templates/heketi-deployment.json.j2
@@ -0,0 +1,159 @@
+{
+ "kind": "List",
+ "apiVersion": "v1",
+ "items": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "heketi-db-backup",
+ "labels": {
+ "glusterfs": "heketi-db",
+ "heketi": "db"
+ }
+ },
+ "data": {
+ },
+ "type": "Opaque"
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "heketi",
+ "labels": {
+ "glusterfs": "heketi-service",
+ "deploy-heketi": "support"
+ },
+ "annotations": {
+ "description": "Exposes Heketi Service"
+ }
+ },
+ "spec": {
+ "selector": {
+ "name": "heketi"
+ },
+ "ports": [
+ {
+ "name": "heketi",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ]
+ }
+ },
+ {
+ "kind": "Deployment",
+ "apiVersion": "extensions/v1beta1",
+ "metadata": {
+ "name": "heketi",
+ "labels": {
+ "glusterfs": "heketi-deployment"
+ },
+ "annotations": {
+ "description": "Defines how to deploy Heketi"
+ }
+ },
+ "spec": {
+ "replicas": 1,
+ "template": {
+ "metadata": {
+ "name": "heketi",
+ "labels": {
+ "name": "heketi",
+ "glusterfs": "heketi-pod"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "heketi-service-account",
+ "containers": [
+ {
+ "image": "heketi/heketi:7",
+ "imagePullPolicy": "Always",
+ "name": "heketi",
+ "env": [
+ {
+ "name": "HEKETI_EXECUTOR",
+ "value": "kubernetes"
+ },
+ {
+ "name": "HEKETI_DB_PATH",
+ "value": "/var/lib/heketi/heketi.db"
+ },
+ {
+ "name": "HEKETI_FSTAB",
+ "value": "/var/lib/heketi/fstab"
+ },
+ {
+ "name": "HEKETI_SNAPSHOT_LIMIT",
+ "value": "14"
+ },
+ {
+ "name": "HEKETI_KUBE_GLUSTER_DAEMONSET",
+ "value": "y"
+ }
+ ],
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/backupdb",
+ "name": "heketi-db-secret"
+ },
+ {
+ "name": "db",
+ "mountPath": "/var/lib/heketi"
+ },
+ {
+ "name": "config",
+ "mountPath": "/etc/heketi"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 3,
+ "httpGet": {
+ "path": "/hello",
+ "port": 8080
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 30,
+ "httpGet": {
+ "path": "/hello",
+ "port": 8080
+ }
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "db",
+ "glusterfs": {
+ "endpoints": "heketi-storage-endpoints",
+ "path": "heketidbstorage"
+ }
+ },
+ {
+ "name": "heketi-db-secret",
+ "secret": {
+ "secretName": "heketi-db-backup"
+ }
+ },
+ {
+ "name": "config",
+ "secret": {
+ "secretName": "heketi-config-secret"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/contrib/network-storage/heketi/roles/provision/templates/heketi-service-account.json.j2 b/contrib/network-storage/heketi/roles/provision/templates/heketi-service-account.json.j2
new file mode 100644
index 00000000000..1dbcb9e962c
--- /dev/null
+++ b/contrib/network-storage/heketi/roles/provision/templates/heketi-service-account.json.j2
@@ -0,0 +1,7 @@
+{
+ "apiVersion": "v1",
+ "kind": "ServiceAccount",
+ "metadata": {
+ "name": "heketi-service-account"
+ }
+}
diff --git a/contrib/network-storage/heketi/roles/provision/templates/heketi-storage.json.j2 b/contrib/network-storage/heketi/roles/provision/templates/heketi-storage.json.j2
new file mode 100644
index 00000000000..3089256c96f
--- /dev/null
+++ b/contrib/network-storage/heketi/roles/provision/templates/heketi-storage.json.j2
@@ -0,0 +1,54 @@
+{
+ "apiVersion": "v1",
+ "kind": "List",
+ "items": [
+ {
+ "kind": "Endpoints",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "heketi-storage-endpoints",
+ "creationTimestamp": null
+ },
+ "subsets": [
+{% set nodeblocks = [] %}
+{% for node in nodes %}
+{% set nodeblock %}
+ {
+ "addresses": [
+ {
+ "ip": "{{ hostvars[node]['ansible_facts']['default_ipv4']['address'] }}"
+ }
+ ],
+ "ports": [
+ {
+ "port": 1
+ }
+ ]
+ }
+{% endset %}
+{% if nodeblocks.append(nodeblock) %}{% endif %}
+{% endfor %}
+{{ nodeblocks|join(',') }}
+ ]
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "heketi-storage-endpoints",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "port": 1,
+ "targetPort": 0
+ }
+ ]
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ }
+ ]
+}
diff --git a/contrib/network-storage/heketi/roles/provision/templates/heketi.json.j2 b/contrib/network-storage/heketi/roles/provision/templates/heketi.json.j2
new file mode 100644
index 00000000000..5861b684b43
--- /dev/null
+++ b/contrib/network-storage/heketi/roles/provision/templates/heketi.json.j2
@@ -0,0 +1,44 @@
+{
+ "_port_comment": "Heketi Server Port Number",
+ "port": "8080",
+
+ "_use_auth": "Enable JWT authorization. Please enable for deployment",
+ "use_auth": true,
+
+ "_jwt": "Private keys for access",
+ "jwt": {
+ "_admin": "Admin has access to all APIs",
+ "admin": {
+ "key": "{{ heketi_admin_key }}"
+ },
+ "_user": "User only has access to /volumes endpoint",
+ "user": {
+ "key": "{{ heketi_user_key }}"
+ }
+ },
+
+ "_glusterfs_comment": "GlusterFS Configuration",
+ "glusterfs": {
+ "_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh",
+ "executor": "kubernetes",
+
+ "_db_comment": "Database file name",
+ "db": "/var/lib/heketi/heketi.db",
+
+ "kubeexec": {
+ "rebalance_on_expansion": true
+ },
+
+ "sshexec": {
+ "rebalance_on_expansion": true,
+ "keyfile": "/etc/heketi/private_key",
+ "fstab": "/etc/fstab",
+ "port": "22",
+ "user": "root",
+ "sudo": false
+ }
+ },
+
+ "_backup_db_to_kube_secret": "Backup the heketi database to a Kubernetes secret when running in Kubernetes. Default is off.",
+ "backup_db_to_kube_secret": false
+}
diff --git a/contrib/network-storage/heketi/roles/provision/templates/storageclass.yml.j2 b/contrib/network-storage/heketi/roles/provision/templates/storageclass.yml.j2
new file mode 100644
index 00000000000..c2b64cf6942
--- /dev/null
+++ b/contrib/network-storage/heketi/roles/provision/templates/storageclass.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: gluster
+ annotations:
+ storageclass.beta.kubernetes.io/is-default-class: "true"
+provisioner: kubernetes.io/glusterfs
+parameters:
+ resturl: "http://{{ endpoint_address }}:8080"
+ restuser: "admin"
+ restuserkey: "{{ heketi_admin_key }}"
diff --git a/contrib/network-storage/heketi/roles/provision/templates/topology.json.j2 b/contrib/network-storage/heketi/roles/provision/templates/topology.json.j2
new file mode 100644
index 00000000000..b0ac29d7b3b
--- /dev/null
+++ b/contrib/network-storage/heketi/roles/provision/templates/topology.json.j2
@@ -0,0 +1,34 @@
+{
+ "clusters": [
+ {
+ "nodes": [
+{% set nodeblocks = [] %}
+{% for node in nodes %}
+{% set nodeblock %}
+ {
+ "node": {
+ "hostnames": {
+ "manage": [
+ "{{ node }}"
+ ],
+ "storage": [
+ "{{ hostvars[node]['ansible_facts']['default_ipv4']['address'] }}"
+ ]
+ },
+ "zone": 1
+ },
+ "devices": [
+ {
+ "name": "{{ hostvars[node]['disk_volume_device_1'] }}",
+ "destroydata": false
+ }
+ ]
+ }
+{% endset %}
+{% if nodeblocks.append(nodeblock) %}{% endif %}
+{% endfor %}
+{{ nodeblocks|join(',') }}
+ ]
+ }
+ ]
+}
diff --git a/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml b/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml
new file mode 100644
index 00000000000..01e03660c4d
--- /dev/null
+++ b/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml
@@ -0,0 +1,46 @@
+---
+- name: "Install lvm utils (RedHat)"
+ become: true
+ yum:
+ name: "lvm2"
+ state: "present"
+ when: "ansible_os_family == 'RedHat'"
+
+- name: "Install lvm utils (Debian)"
+ become: true
+ apt:
+ name: "lvm2"
+ state: "present"
+ when: "ansible_os_family == 'Debian'"
+
+- name: "Get volume group information."
+ become: true
+ shell: "pvs {{ disk_volume_device_1 }} --option vg_name | tail -n+2"
+ register: "volume_groups"
+ ignore_errors: true
+ changed_when: false
+
+- name: "Remove volume groups."
+ become: true
+ command: "vgremove {{ volume_group }} --yes"
+ with_items: "{{ volume_groups.stdout_lines }}"
+ loop_control: { loop_var: "volume_group" }
+
+- name: "Remove physical volume from cluster disks."
+ become: true
+ command: "pvremove {{ disk_volume_device_1 }} --yes"
+ ignore_errors: true
+
+- name: "Remove lvm utils (RedHat)"
+ become: true
+ yum:
+ name: "lvm2"
+ state: "absent"
+ when: "ansible_os_family == 'RedHat'"
+
+- name: "Remove lvm utils (Debian)"
+ become: true
+ apt:
+ name: "lvm2"
+ state: "absent"
+ when: "ansible_os_family == 'Debian'"
diff --git a/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml b/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml
new file mode 100644
index 00000000000..ddc56b256ad
--- /dev/null
+++ b/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml
@@ -0,0 +1,51 @@
+---
+- name: "Remove storage class."
+ command: "{{ bin_dir }}/kubectl delete storageclass gluster"
+ ignore_errors: true
+- name: "Tear down heketi."
+ command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
+ ignore_errors: true
+- name: "Tear down heketi."
+ command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
+ ignore_errors: true
+- name: "Tear down bootstrap."
+ include_tasks: "../provision/tasks/bootstrap/tear-down.yml"
+- name: "Ensure there is nothing left over."
+ command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
+ register: "heketi_result"
+ until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
+ retries: 60
+ delay: 5
+- name: "Ensure there is nothing left over."
+ command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
+ register: "heketi_result"
+ until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
+ retries: 60
+ delay: 5
+- name: "Tear down glusterfs."
+ command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
+ ignore_errors: true
+- name: "Remove heketi storage service."
+ command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
+ ignore_errors: true
+- name: "Remove heketi gluster role binding"
+ command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
+ ignore_errors: true
+- name: "Remove heketi config secret"
+ command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
+ ignore_errors: true
+- name: "Remove heketi db backup"
+ command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
+ ignore_errors: true
+- name: "Remove heketi service account"
+ command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
+ ignore_errors: true
+- name: "Get secrets"
+ command: "{{ bin_dir }}/kubectl get secrets --output=\"json\""
+ register: "secrets"
+ changed_when: false
+- name: "Remove heketi storage secret"
+ vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" }
+ command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout|from_json|json_query(storage_query) }}"
+ when: "storage_query is defined"
+ ignore_errors: true
diff --git a/contrib/terraform/aws/README.md b/contrib/terraform/aws/README.md
index d69811335f1..709d0633faf 100644
--- a/contrib/terraform/aws/README.md
+++ b/contrib/terraform/aws/README.md
@@ -17,21 +17,20 @@ This project will create:
- Export the variables for your AWS credentials or edit `credentials.tfvars`:
```
-export AWS_ACCESS_KEY_ID="www"
-export AWS_SECRET_ACCESS_KEY ="xxx"
-export AWS_SSH_KEY_NAME="yyy"
-export AWS_DEFAULT_REGION="zzz"
+export TF_VAR_AWS_ACCESS_KEY_ID="www"
+export TF_VAR_AWS_SECRET_ACCESS_KEY ="xxx"
+export TF_VAR_AWS_SSH_KEY_NAME="yyy"
+export TF_VAR_AWS_DEFAULT_REGION="zzz"
```
- Rename `contrib/terraform/aws/terraform.tfvars.example` to `terraform.tfvars`
- Update `contrib/terraform/aws/terraform.tfvars` with your data. By default, the Terraform scripts use CoreOS as base image. If you want to change this behaviour, see note "Using other distrib than CoreOs" below.
-- Allocate a new AWS Elastic IP. Use this for your `loadbalancer_apiserver_address` value (below)
- Create an AWS EC2 SSH Key
- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
Example:
```commandline
-terraform apply -var-file=credentials.tfvars -var 'loadbalancer_apiserver_address=34.212.228.77'
+terraform apply -var-file=credentials.tfvars
```
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
@@ -46,7 +45,7 @@ ssh -F ./ssh-bastion.conf user@$ip
Example (this one assumes you are using CoreOS)
```commandline
-ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_ssh_user=core -e bootstrap_os=coreos -b --become-user=root --flush-cache
+ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_user=core -e bootstrap_os=coreos -b --become-user=root --flush-cache
```
***Using other distrib than CoreOs***
If you want to use another distribution than CoreOS, you can modify the search filters of the 'data "aws_ami" "distro"' in variables.tf.
diff --git a/contrib/terraform/aws/create-infrastructure.tf b/contrib/terraform/aws/create-infrastructure.tf
index 9c0617d8485..1ff584f0c37 100644
--- a/contrib/terraform/aws/create-infrastructure.tf
+++ b/contrib/terraform/aws/create-infrastructure.tf
@@ -181,7 +181,7 @@ data "template_file" "inventory" {
resource "null_resource" "inventories" {
provisioner "local-exec" {
- command = "echo '${data.template_file.inventory.rendered}' > ../../../inventory/hosts"
+ command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}"
}
triggers {
diff --git a/contrib/terraform/aws/terraform.tfvars b/contrib/terraform/aws/terraform.tfvars
index 99ea64eedaf..c5b1dbff1b1 100644
--- a/contrib/terraform/aws/terraform.tfvars
+++ b/contrib/terraform/aws/terraform.tfvars
@@ -31,3 +31,5 @@ default_tags = {
# Env = "devtest"
# Product = "kubernetes"
}
+
+inventory_file = "../../../inventory/hosts"
diff --git a/contrib/terraform/aws/variables.tf b/contrib/terraform/aws/variables.tf
index 58dd3138847..37aab2bae0f 100644
--- a/contrib/terraform/aws/variables.tf
+++ b/contrib/terraform/aws/variables.tf
@@ -103,3 +103,7 @@ variable "default_tags" {
description = "Default tags for all resources"
type = "map"
}
+
+variable "inventory_file" {
+ description = "Where to store the generated inventory file"
+}
diff --git a/contrib/terraform/group_vars b/contrib/terraform/group_vars
index febd29cb3f6..4dd828e8e58 120000
--- a/contrib/terraform/group_vars
+++ b/contrib/terraform/group_vars
@@ -1 +1 @@
-../../inventory/group_vars
\ No newline at end of file
+../../inventory/local/group_vars
\ No newline at end of file
diff --git a/contrib/terraform/openstack/README.md b/contrib/terraform/openstack/README.md
index ed11bef1ef7..15b101fe17c 100644
--- a/contrib/terraform/openstack/README.md
+++ b/contrib/terraform/openstack/README.md
@@ -32,7 +32,11 @@ floating IP addresses or not.
- Kubernetes worker nodes
Note that the Ansible script will report an invalid configuration if you wind up
-with an even number of etcd instances since that is not a valid configuration.
+with an even number of etcd instances since that is not a valid configuration. This
+restriction includes standalone etcd nodes that are deployed in a cluster along with
+master nodes with etcd replicas. As an example, if you have three master nodes with
+etcd replicas and three standalone etcd nodes, the script will fail since there are
+now six total etcd replicas.
### GlusterFS
The Terraform configuration supports provisioning of an optional GlusterFS
@@ -135,7 +139,7 @@ the one you want to use with the environment variable `OS_CLOUD`:
export OS_CLOUD=mycloud
```
-##### Openrc method (deprecated)
+##### Openrc method
When using classic environment variables, Terraform uses default `OS_*`
environment variables. A script suitable for your environment may be available
@@ -218,6 +222,8 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tf`.
|`number_of_bastions` | Number of bastion hosts to create. Scripts assume this is really just zero or one |
|`number_of_gfs_nodes_no_floating_ip` | Number of gluster servers to provision. |
| `gfs_volume_size_in_gb` | Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks |
+|`supplementary_master_groups` | To add ansible groups to the masters, such as `kube-node` for tainting them as nodes, empty by default. |
+|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube-ingress` for running ingress controller pods, empty by default. |
#### Terraform state files
@@ -299,11 +305,15 @@ If you have deployed and destroyed a previous iteration of your cluster, you wil
#### Bastion host
-If you are not using a bastion host, but not all of your nodes have floating IPs, create a file `inventory/$CLUSTER/group_vars/no-floating.yml` with the following content. Use one of your nodes with a floating IP (this should have been output at the end of the Terraform step) and the appropriate user for that OS, or if you have another jump host, use that.
+Bastion access will be determined by:
-```
-ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -W %h:%p -q USER@MASTER_IP"'
-```
+ - Your choice on the amount of bastion hosts (set by `number_of_bastions` terraform variable).
+ - The existence of nodes/masters with floating IPs (set by `number_of_k8s_masters`, `number_of_k8s_nodes`, `number_of_k8s_masters_no_etcd` terraform variables).
+
+If you have a bastion host, your ssh traffic will be directly routed through it. This is regardless of whether you have masters/nodes with a floating IP assigned.
+If you don't have a bastion host, but at least one of your masters/nodes have a floating IP, then ssh traffic will be tunneled by one of these machines.
+
+So, either a bastion host, or at least master/node with a floating IP are required.
#### Test access
diff --git a/contrib/terraform/openstack/ansible_bastion_template.txt b/contrib/terraform/openstack/ansible_bastion_template.txt
index cdf0120668a..a304b2c9d5d 100644
--- a/contrib/terraform/openstack/ansible_bastion_template.txt
+++ b/contrib/terraform/openstack/ansible_bastion_template.txt
@@ -1 +1 @@
-ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -W %h:%p -q USER@BASTION_ADDRESS"'
+ansible_ssh_common_args: "-o ProxyCommand='ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p -q USER@BASTION_ADDRESS {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}'"
diff --git a/contrib/terraform/openstack/kubespray.tf b/contrib/terraform/openstack/kubespray.tf
index e0dbfd02de7..8e5d05adfbe 100644
--- a/contrib/terraform/openstack/kubespray.tf
+++ b/contrib/terraform/openstack/kubespray.tf
@@ -3,6 +3,7 @@ module "network" {
external_net = "${var.external_net}"
network_name = "${var.network_name}"
+ subnet_cidr = "${var.subnet_cidr}"
cluster_name = "${var.cluster_name}"
dns_nameservers = "${var.dns_nameservers}"
}
@@ -24,6 +25,7 @@ module "compute" {
source = "modules/compute"
cluster_name = "${var.cluster_name}"
+ az_list = "${var.az_list}"
number_of_k8s_masters = "${var.number_of_k8s_masters}"
number_of_k8s_masters_no_etcd = "${var.number_of_k8s_masters_no_etcd}"
number_of_etcd = "${var.number_of_etcd}"
@@ -48,6 +50,8 @@ module "compute" {
k8s_master_fips = "${module.ips.k8s_master_fips}"
k8s_node_fips = "${module.ips.k8s_node_fips}"
bastion_fips = "${module.ips.bastion_fips}"
+ supplementary_master_groups = "${var.supplementary_master_groups}"
+ supplementary_node_groups = "${var.supplementary_node_groups}"
network_id = "${module.network.router_id}"
}
diff --git a/contrib/terraform/openstack/modules/compute/main.tf b/contrib/terraform/openstack/modules/compute/main.tf
index e0a8eab4a6b..05026ed0b2f 100644
--- a/contrib/terraform/openstack/modules/compute/main.tf
+++ b/contrib/terraform/openstack/modules/compute/main.tf
@@ -59,6 +59,17 @@ resource "openstack_compute_secgroup_v2" "k8s" {
self = true
}
}
+resource "openstack_compute_secgroup_v2" "worker" {
+ name = "${var.cluster_name}-k8s-worker"
+ description = "${var.cluster_name} - Kubernetes worker nodes"
+
+ rule {
+ ip_protocol = "tcp"
+ from_port = "30000"
+ to_port = "32767"
+ cidr = "0.0.0.0/0"
+ }
+}
resource "openstack_compute_instance_v2" "bastion" {
name = "${var.cluster_name}-bastion-${count.index+1}"
@@ -83,7 +94,7 @@ resource "openstack_compute_instance_v2" "bastion" {
}
provisioner "local-exec" {
- command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > contrib/terraform/openstack/group_vars/no-floating.yml"
+ command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > contrib/terraform/group_vars/no-floating.yml"
}
}
@@ -91,6 +102,7 @@ resource "openstack_compute_instance_v2" "bastion" {
resource "openstack_compute_instance_v2" "k8s_master" {
name = "${var.cluster_name}-k8s-master-${count.index+1}"
count = "${var.number_of_k8s_masters}"
+ availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_master}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
@@ -107,15 +119,20 @@ resource "openstack_compute_instance_v2" "k8s_master" {
metadata = {
ssh_user = "${var.ssh_user}"
- kubespray_groups = "etcd,kube-master,k8s-cluster,vault"
+ kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
depends_on = "${var.network_id}"
}
+ provisioner "local-exec" {
+ command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml"
+ }
+
}
resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
name = "${var.cluster_name}-k8s-master-ne-${count.index+1}"
count = "${var.number_of_k8s_masters_no_etcd}"
+ availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_master}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
@@ -125,20 +142,26 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
}
security_groups = ["${openstack_compute_secgroup_v2.k8s_master.name}",
+ "${openstack_compute_secgroup_v2.bastion.name}",
"${openstack_compute_secgroup_v2.k8s.name}",
]
metadata = {
ssh_user = "${var.ssh_user}"
- kubespray_groups = "kube-master,k8s-cluster,vault"
+ kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
depends_on = "${var.network_id}"
}
+ provisioner "local-exec" {
+ command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml"
+ }
+
}
resource "openstack_compute_instance_v2" "etcd" {
name = "${var.cluster_name}-etcd-${count.index+1}"
count = "${var.number_of_etcd}"
+ availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}"
flavor_id = "${var.flavor_etcd}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
@@ -160,6 +183,7 @@ resource "openstack_compute_instance_v2" "etcd" {
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
name = "${var.cluster_name}-k8s-master-nf-${count.index+1}"
count = "${var.number_of_k8s_masters_no_floating_ip}"
+ availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_master}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
@@ -175,7 +199,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
metadata = {
ssh_user = "${var.ssh_user}"
- kubespray_groups = "etcd,kube-master,k8s-cluster,vault,no-floating"
+ kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
depends_on = "${var.network_id}"
}
@@ -184,6 +208,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index+1}"
count = "${var.number_of_k8s_masters_no_floating_ip_no_etcd}"
+ availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_master}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
@@ -198,7 +223,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
metadata = {
ssh_user = "${var.ssh_user}"
- kubespray_groups = "kube-master,k8s-cluster,vault,no-floating"
+ kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
depends_on = "${var.network_id}"
}
@@ -207,6 +232,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
resource "openstack_compute_instance_v2" "k8s_node" {
name = "${var.cluster_name}-k8s-node-${count.index+1}"
count = "${var.number_of_k8s_nodes}"
+ availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_node}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
@@ -217,20 +243,26 @@ resource "openstack_compute_instance_v2" "k8s_node" {
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}",
"${openstack_compute_secgroup_v2.bastion.name}",
+ "${openstack_compute_secgroup_v2.worker.name}",
"default",
]
metadata = {
ssh_user = "${var.ssh_user}"
- kubespray_groups = "kube-node,k8s-cluster"
+ kubespray_groups = "kube-node,k8s-cluster,${var.supplementary_node_groups}"
depends_on = "${var.network_id}"
}
+ provisioner "local-exec" {
+ command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml"
+ }
+
}
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
name = "${var.cluster_name}-k8s-node-nf-${count.index+1}"
count = "${var.number_of_k8s_nodes_no_floating_ip}"
+ availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_node}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
@@ -240,12 +272,13 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
}
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}",
+ "${openstack_compute_secgroup_v2.worker.name}",
"default",
]
metadata = {
ssh_user = "${var.ssh_user}"
- kubespray_groups = "kube-node,k8s-cluster,no-floating"
+ kubespray_groups = "kube-node,k8s-cluster,no-floating,${var.supplementary_node_groups}"
depends_on = "${var.network_id}"
}
@@ -279,6 +312,7 @@ resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
name = "${var.cluster_name}-gfs-node-nf-${count.index+1}"
count = "${var.number_of_gfs_nodes_no_floating_ip}"
+ availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image_gfs}"
flavor_id = "${var.flavor_gfs_node}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
diff --git a/contrib/terraform/openstack/modules/compute/variables.tf b/contrib/terraform/openstack/modules/compute/variables.tf
index 518e1506945..50a6e496cb8 100644
--- a/contrib/terraform/openstack/modules/compute/variables.tf
+++ b/contrib/terraform/openstack/modules/compute/variables.tf
@@ -1,5 +1,9 @@
variable "cluster_name" {}
+variable "az_list" {
+ type = "list"
+}
+
variable "number_of_k8s_masters" {}
variable "number_of_k8s_masters_no_etcd" {}
@@ -55,3 +59,11 @@ variable "k8s_node_fips" {
variable "bastion_fips" {
type = "list"
}
+
+variable "supplementary_master_groups" {
+ default = ""
+}
+
+variable "supplementary_node_groups" {
+ default = ""
+}
diff --git a/contrib/terraform/openstack/modules/network/main.tf b/contrib/terraform/openstack/modules/network/main.tf
index 2c461c78483..7c02869d4d7 100644
--- a/contrib/terraform/openstack/modules/network/main.tf
+++ b/contrib/terraform/openstack/modules/network/main.tf
@@ -12,7 +12,7 @@ resource "openstack_networking_network_v2" "k8s" {
resource "openstack_networking_subnet_v2" "k8s" {
name = "${var.cluster_name}-internal-network"
network_id = "${openstack_networking_network_v2.k8s.id}"
- cidr = "10.0.0.0/24"
+ cidr = "${var.subnet_cidr}"
ip_version = 4
dns_nameservers = "${var.dns_nameservers}"
}
diff --git a/contrib/terraform/openstack/modules/network/outputs.tf b/contrib/terraform/openstack/modules/network/outputs.tf
index a426202b982..e56a792c21c 100644
--- a/contrib/terraform/openstack/modules/network/outputs.tf
+++ b/contrib/terraform/openstack/modules/network/outputs.tf
@@ -2,6 +2,6 @@ output "router_id" {
value = "${openstack_networking_router_interface_v2.k8s.id}"
}
-output "network_id" {
+output "subnet_id" {
value = "${openstack_networking_subnet_v2.k8s.id}"
}
diff --git a/contrib/terraform/openstack/modules/network/variables.tf b/contrib/terraform/openstack/modules/network/variables.tf
index a7952bced71..6494358aa3f 100644
--- a/contrib/terraform/openstack/modules/network/variables.tf
+++ b/contrib/terraform/openstack/modules/network/variables.tf
@@ -7,3 +7,5 @@ variable "cluster_name" {}
variable "dns_nameservers" {
type = "list"
}
+
+variable "subnet_cidr" {}
diff --git a/contrib/terraform/openstack/sample-inventory/cluster.tf b/contrib/terraform/openstack/sample-inventory/cluster.tf
index 7830d2159fb..a793bfaa598 100644
--- a/contrib/terraform/openstack/sample-inventory/cluster.tf
+++ b/contrib/terraform/openstack/sample-inventory/cluster.tf
@@ -41,5 +41,6 @@ number_of_k8s_nodes_no_floating_ip = 4
# networking
network_name = ""
external_net = ""
+subnet_cidr = ""
floatingip_pool = ""
diff --git a/contrib/terraform/openstack/variables.tf b/contrib/terraform/openstack/variables.tf
index 925750ab1b3..dc4ddae9056 100644
--- a/contrib/terraform/openstack/variables.tf
+++ b/contrib/terraform/openstack/variables.tf
@@ -2,6 +2,12 @@ variable "cluster_name" {
default = "example"
}
+variable "az_list" {
+ description = "List of Availability Zones available in your OpenStack cluster"
+ type = "list"
+ default = ["nova"]
+}
+
variable "number_of_bastions" {
default = 1
}
@@ -97,6 +103,12 @@ variable "network_name" {
default = "internal"
}
+variable "subnet_cidr" {
+ description = "Subnet CIDR block."
+ type = "string"
+ default = "10.0.0.0/24"
+}
+
variable "dns_nameservers" {
description = "An array of DNS name server names used by hosts in this subnet."
type = "list"
@@ -111,3 +123,13 @@ variable "floatingip_pool" {
variable "external_net" {
description = "uuid of the external/public network"
}
+
+variable "supplementary_master_groups" {
+ description = "supplementary kubespray ansible groups for masters, such kube-node"
+ default = ""
+}
+
+variable "supplementary_node_groups" {
+ description = "supplementary kubespray ansible groups for worker nodes, such as kube-ingress"
+ default = ""
+}
diff --git a/contrib/terraform/terraform.py b/contrib/terraform/terraform.py
index 955d5155b40..6feaed42afb 100755
--- a/contrib/terraform/terraform.py
+++ b/contrib/terraform/terraform.py
@@ -706,6 +706,10 @@ def query_list(hosts):
for name, attrs, hostgroups in hosts:
for group in set(hostgroups):
+ # Ansible 2.6.2 stopped supporting empty group names: https://github.com/ansible/ansible/pull/42584/commits/d4cd474b42ed23d8f8aabb2a7f84699673852eaf
+ # Empty group name defaults to "all" in Ansible < 2.6.2 so we alter empty group names to "all"
+ if not group: group = "all"
+
groups[group].setdefault('hosts', [])
groups[group]['hosts'].append(name)
diff --git a/docs/ansible.md b/docs/ansible.md
index 5e17147bedc..21583b5ee30 100644
--- a/docs/ansible.md
+++ b/docs/ansible.md
@@ -123,7 +123,6 @@ The following tags are defined in playbooks:
| hyperkube | Manipulations with K8s hyperkube image
| k8s-pre-upgrade | Upgrading K8s cluster
| k8s-secrets | Configuring K8s certs/keys
-| kpm | Installing K8s apps definitions with KPM
| kube-apiserver | Configuring static pod kube-apiserver
| kube-controller-manager | Configuring static pod kube-controller-manager
| kubectl | Installing kubectl and bash completion
@@ -159,7 +158,7 @@ And this play only removes the K8s cluster DNS resolver IP from hosts' /etc/reso
```
ansible-playbook -i inventory/sample/hosts.ini -e dnsmasq_dns_server='' cluster.yml --tags resolvconf
```
-And this prepares all container images localy (at the ansible runner node) without installing
+And this prepares all container images locally (at the ansible runner node) without installing
or upgrading related stuff or trying to upload container to K8s cluster nodes:
```
ansible-playbook -i inventory/sample/hosts.ini cluster.yml \
diff --git a/docs/aws.md b/docs/aws.md
index 8564f6f1038..28cdc89deb2 100644
--- a/docs/aws.md
+++ b/docs/aws.md
@@ -5,7 +5,7 @@ To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provi
Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
-You would also need to tag the resources in your VPC accordingly for the aws provider to utilize them. Tag the subnets and all instances that kubernetes will be run on with key `kubernetes.io/cluster/$cluster_name` (`$cluster_name` must be a unique identifier for the cluster). Tag the subnets that must be targetted by external ELBs with the key `kubernetes.io/role/elb` and internal ELBs with the key `kubernetes.io/role/internal-elb`.
+You would also need to tag the resources in your VPC accordingly for the aws provider to utilize them. Tag the subnets, route tables and all instances that kubernetes will be run on with key `kubernetes.io/cluster/$cluster_name` (`$cluster_name` must be a unique identifier for the cluster). Tag the subnets that must be targetted by external ELBs with the key `kubernetes.io/role/elb` and internal ELBs with the key `kubernetes.io/role/internal-elb`.
Make sure your VPC has both DNS Hostnames support and Private DNS enabled.
diff --git a/docs/calico.md b/docs/calico.md
index 7992e57eb6c..f6adaa6622f 100644
--- a/docs/calico.md
+++ b/docs/calico.md
@@ -1,6 +1,13 @@
Calico
===========
+---
+ **N.B. Version 2.6.5 upgrade to 3.1.1 is upgrading etcd store to etcdv3**
+ If you create automated backups of etcdv2 please switch for creating etcdv3 backups, as kubernetes and calico now uses etcdv3
+ After migration you can check `/tmp/calico_upgrade/` directory for converted items to etcdv3.
+ **PLEASE TEST upgrade before upgrading production cluster.**
+ ---
+
Check if the calico-node container is running
```
@@ -86,7 +93,7 @@ To do so you can deploy BGP route reflectors and peer `calico-node` with them as
recommended here:
* https://hub.docker.com/r/calico/routereflector/
-* http://docs.projectcalico.org/v2.0/reference/private-cloud/l3-interconnect-fabric
+* https://docs.projectcalico.org/v3.1/reference/private-cloud/l3-interconnect-fabric
You need to edit your inventory and add:
@@ -169,3 +176,12 @@ By default the felix agent(calico-node) will abort if the Kernel RPF setting is
```
calico_node_ignorelooserpf: true
```
+
+Note that in OpenStack you must allow `ipip` traffic in your security groups,
+otherwise you will experience timeouts.
+To do this you must add a rule which allows it, for example:
+
+```
+neutron security-group-rule-create --protocol 4 --direction egress k8s-a0tp4t
+neutron security-group-rule-create --protocol 4 --direction igress k8s-a0tp4t
+```
diff --git a/docs/cri-o.md b/docs/cri-o.md
new file mode 100644
index 00000000000..43391768aa4
--- /dev/null
+++ b/docs/cri-o.md
@@ -0,0 +1,31 @@
+cri-o
+===============
+
+cri-o is container developed by kubernetes project.
+Currently, only basic function is supported for cri-o.
+
+* cri-o is supported kubernetes 1.11.1 or later.
+* helm and other feature may not be supported due to docker dependency.
+* scale.yml and upgrade-cluster.yml are not supported.
+
+helm and other feature may not be supported due to docker dependency.
+
+Use cri-o instead of docker, set following variable:
+
+#### all.yml
+
+```
+kubeadm_enabled: true
+...
+download_container: false
+skip_downloads: false
+```
+
+#### k8s-cluster.yml
+
+```
+etcd_deployment_type: host
+kubelet_deployment_type: host
+container_manager: crio
+```
+
diff --git a/docs/dns-stack.md b/docs/dns-stack.md
index 6215114af40..92689eee5b7 100644
--- a/docs/dns-stack.md
+++ b/docs/dns-stack.md
@@ -52,16 +52,24 @@ You can modify how Kubespray sets up DNS for your cluster with the variables ``d
## dns_mode
``dns_mode`` configures how Kubespray will setup cluster DNS. There are four modes available:
-#### dnsmasq_kubedns (default)
+#### dnsmasq_kubedns
This installs an additional dnsmasq DaemonSet which gives more flexibility and lifts some
limitations (e.g. number of nameservers). Kubelet is instructed to use dnsmasq instead of kubedns/skydns.
It is configured to forward all DNS queries belonging to cluster services to kubedns/skydns. All
other queries are forwardet to the nameservers found in ``upstream_dns_servers`` or ``default_resolver``
-#### kubedns
+#### kubedns (default)
This does not install the dnsmasq DaemonSet and instructs kubelet to directly use kubedns/skydns for
all queries.
+#### coredns
+This does not install the dnsmasq DaemonSet and instructs kubelet to directly use CoreDNS for
+all queries.
+
+#### coredns_dual
+This does not install the dnsmasq DaemonSet and instructs kubelet to directly use CoreDNS for
+all queries. It will also deploy a secondary CoreDNS stack
+
#### manual
This does not install dnsmasq or kubedns, but allows you to specify
`manual_dns_server`, which will be configured on nodes for handling Pod DNS.
diff --git a/docs/getting-started.md b/docs/getting-started.md
index 961d1a9cfd8..98120b3d749 100644
--- a/docs/getting-started.md
+++ b/docs/getting-started.md
@@ -18,11 +18,9 @@ certain threshold. Run `python3 contrib/inventory_builder/inventory.py help` hel
Example inventory generator usage:
-```
-cp -r inventory/sample inventory/mycluster
-declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
-CONFIG_FILE=inventory/mycluster/hosts.ini python3 contrib/inventory_builder/inventory.py ${IPS[@]}
-```
+ cp -r inventory/sample inventory/mycluster
+ declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
+ CONFIG_FILE=inventory/mycluster/hosts.ini python3 contrib/inventory_builder/inventory.py ${IPS[@]}
Starting custom deployment
--------------------------
@@ -30,45 +28,70 @@ Starting custom deployment
Once you have an inventory, you may want to customize deployment data vars
and start the deployment:
-**IMPORTANT: Edit my_inventory/groups_vars/*.yaml to override data vars**
+**IMPORTANT**: Edit my\_inventory/groups\_vars/\*.yaml to override data vars:
-```
-ansible-playbook -i inventory/mycluster/hosts.ini cluster.yml -b -v \
- --private-key=~/.ssh/private_key
-```
+ ansible-playbook -i inventory/mycluster/hosts.ini cluster.yml -b -v \
+ --private-key=~/.ssh/private_key
See more details in the [ansible guide](ansible.md).
Adding nodes
------------
-You may want to add **worker** nodes to your existing cluster. This can be done by re-running the `cluster.yml` playbook, or you can target the bare minimum needed to get kubelet installed on the worker and talking to your masters. This is especially helpful when doing something like autoscaling your clusters.
+You may want to add worker, master or etcd nodes to your existing cluster. This can be done by re-running the `cluster.yml` playbook, or you can target the bare minimum needed to get kubelet installed on the worker and talking to your masters. This is especially helpful when doing something like autoscaling your clusters.
+
+- Add the new worker node to your inventory in the appropriate group (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
+- Run the ansible-playbook command, substituting `scale.yml` for `cluster.yml`:
+
+ ansible-playbook -i inventory/mycluster/hosts.ini scale.yml -b -v \
+ --private-key=~/.ssh/private_key
+
+Remove nodes
+------------
+
+You may want to remove **worker** nodes to your existing cluster. This can be done by re-running the `remove-node.yml` playbook. First, all nodes will be drained, then stop some kubernetes services and delete some certificates, and finally execute the kubectl command to delete these nodes. This can be combined with the add node function, This is generally helpful when doing something like autoscaling your clusters. Of course if a node is not working, you can remove the node and install it again.
+
+Add worker nodes to the list under kube-node if you want to delete them (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
-- Add the new worker node to your inventory under kube-node (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
-- Run the ansible-playbook command, substituting `scale.yml` for `cluster.yml`:
+ ansible-playbook -i inventory/mycluster/hosts.ini remove-node.yml -b -v \
+ --private-key=~/.ssh/private_key
+
+
+We support two ways to select the nodes:
+
+- Use `--extra-vars "node=,"` to select the node you want to delete.
+```
+ansible-playbook -i inventory/mycluster/hosts.ini remove-node.yml -b -v \
+ --private-key=~/.ssh/private_key \
+ --extra-vars "node=nodename,nodename2"
+```
+or
+- Use `--limit nodename,nodename2` to select the node
```
-ansible-playbook -i inventory/mycluster/hosts.ini scale.yml -b -v \
- --private-key=~/.ssh/private_key
+ansible-playbook -i inventory/mycluster/hosts.ini remove-node.yml -b -v \
+ --private-key=~/.ssh/private_key \
+ --limit nodename,nodename2"
```
Connecting to Kubernetes
------------------------
+
By default, Kubespray configures kube-master hosts with insecure access to
kube-apiserver via port 8080. A kubeconfig file is not necessary in this case,
-because kubectl will use http://localhost:8080 to connect. The kubeconfig files
+because kubectl will use to connect. The kubeconfig files
generated will point to localhost (on kube-masters) and kube-node hosts will
connect either to a localhost nginx proxy or to a loadbalancer if configured.
More details on this process are in the [HA guide](ha-mode.md).
-Kubespray permits connecting to the cluster remotely on any IP of any
-kube-master host on port 6443 by default. However, this requires
-authentication. One could generate a kubeconfig based on one installed
+Kubespray permits connecting to the cluster remotely on any IP of any
+kube-master host on port 6443 by default. However, this requires
+authentication. One could generate a kubeconfig based on one installed
kube-master hosts (needs improvement) or connect with a username and password.
By default, a user with admin rights is created, named `kube`.
-The password can be viewed after deployment by looking at the file
-`PATH_TO_KUBESPRAY/credentials/kube_user`. This contains a randomly generated
+The password can be viewed after deployment by looking at the file
+`{{ credentials_dir }}/kube_user.creds` (`credentials_dir` is set to `{{ inventory_dir }}/credentials` by default). This contains a randomly generated
password. If you wish to set your own password, just precreate/modify this
-file yourself.
+file yourself.
For more information on kubeconfig and accessing a Kubernetes cluster, refer to
the Kubernetes [documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/).
@@ -77,29 +100,33 @@ Accessing Kubernetes Dashboard
------------------------------
As of kubernetes-dashboard v1.7.x:
-* New login options that use apiserver auth proxying of token/basic/kubeconfig by default
-* Requires RBAC in authorization_modes
-* Only serves over https
-* No longer available at https://first_master:6443/ui until apiserver is updated with the https proxy URL
+
+- New login options that use apiserver auth proxying of token/basic/kubeconfig by default
+- Requires RBAC in authorization\_modes
+- Only serves over https
+- No longer available at until apiserver is updated with the https proxy URL
If the variable `dashboard_enabled` is set (default is true), then you can access the Kubernetes Dashboard at the following URL, You will be prompted for credentials:
-https://first_master:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login
+
Or you can run 'kubectl proxy' from your local machine to access dashboard in your browser from:
-http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login
+
-It is recommended to access dashboard from behind a gateway (like Ingress Controller) that enforces an authentication token. Details and other access options here: https://github.com/kubernetes/dashboard/wiki/Accessing-Dashboard---1.7.X-and-above
+It is recommended to access dashboard from behind a gateway (like Ingress Controller) that enforces an authentication token. Details and other access options here:
Accessing Kubernetes API
------------------------
The main client of Kubernetes is `kubectl`. It is installed on each kube-master
host and can optionally be configured on your ansible host by setting
-`kubeconfig_localhost: true` in the configuration. If enabled, kubectl and
-admin.conf will appear in the artifacts/ directory after deployment. You can
-see a list of nodes by running the following commands:
+`kubectl_localhost: true` and `kubeconfig_localhost: true` in the configuration:
+
+- If `kubectl_localhost` enabled, `kubectl` will download onto `/usr/local/bin/` and setup with bash completion. A helper script `inventory/mycluster/artifacts/kubectl.sh` also created for setup with below `admin.conf`.
+- If `kubeconfig_localhost` enabled `admin.conf` will appear in the `inventory/mycluster/artifacts/` directory after deployment.
+
+You can see a list of nodes by running the following commands:
- cd artifacts/
- ./kubectl --kubeconfig admin.conf get nodes
+ cd inventory/mycluster/artifacts
+ ./kubectl.sh get nodes
-If desired, copy kubectl to your bin dir and admin.conf to ~/.kube/config.
+If desired, copy admin.conf to ~/.kube/config.
diff --git a/docs/ha-mode.md b/docs/ha-mode.md
index f3bc97e1c6e..61963663306 100644
--- a/docs/ha-mode.md
+++ b/docs/ha-mode.md
@@ -11,12 +11,32 @@ achieve the same goal.
Etcd
----
-The `etcd_access_endpoint` fact provides an access pattern for clients. And the
-`etcd_multiaccess` (defaults to `True`) group var controls that behavior.
-It makes deployed components to access the etcd cluster members
-directly: `http://ip1:2379, http://ip2:2379,...`. This mode assumes the clients
-do a loadbalancing and handle HA for connections.
+In order to use an external loadbalancing (L4/TCP or L7 w/ SSL Passthrough VIP), the following variables need to be overriden in group_vars
+* `etcd_access_addresses`
+* `etcd_client_url`
+* `etcd_cert_alt_names`
+* `etcd_cert_alt_ips`
+
+### Example of a VIP w/ FQDN
+```yaml
+etcd_access_addresses: https://etcd.example.com:2379
+etcd_client_url: https://etcd.example.com:2379
+etcd_cert_alt_names:
+ - "etcd.kube-system.svc.{{ dns_domain }}"
+ - "etcd.kube-system.svc"
+ - "etcd.kube-system"
+ - "etcd"
+ - "etcd.example.com" # This one needs to be added to the default etcd_cert_alt_names
+```
+
+### Example of a VIP w/o FQDN (IP only)
+```yaml
+etcd_access_addresses: https://2.3.7.9:2379
+etcd_client_url: https://2.3.7.9:2379
+etcd_cert_alt_ips:
+ - "2.3.7.9"
+```
Kube-apiserver
--------------
diff --git a/docs/img/kubernetes-logo.png b/docs/img/kubernetes-logo.png
new file mode 100644
index 00000000000..2838a1829ff
Binary files /dev/null and b/docs/img/kubernetes-logo.png differ
diff --git a/docs/kubernetes-reliability.md b/docs/kubernetes-reliability.md
index 82ec65f2e0f..e338272d3bc 100644
--- a/docs/kubernetes-reliability.md
+++ b/docs/kubernetes-reliability.md
@@ -33,7 +33,7 @@ Kubelet will try to make `nodeStatusUpdateRetry` post attempts. Currently
[kubelet.go](https://github.com/kubernetes/kubernetes/blob/release-1.5/pkg/kubelet/kubelet.go#L102).
Kubelet will try to update the status in
-[tryUpdateNodeStatus](https://github.com/kubernetes/kubernetes/blob/release-1.5/pkg/kubelet/kubelet_node_status.go#L345)
+[tryUpdateNodeStatus](https://github.com/kubernetes/kubernetes/blob/release-1.5/pkg/kubelet/kubelet_node_status.go#L312)
function. Kubelet uses `http.Client()` Golang method, but has no specified
timeout. Thus there may be some glitches when API Server is overloaded while
TCP connection is established.
diff --git a/docs/netcheck.md b/docs/netcheck.md
index 80679cd7310..46038965f78 100644
--- a/docs/netcheck.md
+++ b/docs/netcheck.md
@@ -25,13 +25,13 @@ There are related application specifc variables:
netchecker_port: 31081
agent_report_interval: 15
netcheck_namespace: default
-agent_img: "quay.io/l23network/k8s-netchecker-agent:v1.0"
-server_img: "quay.io/l23network/k8s-netchecker-server:v1.0"
+agent_img: "mirantis/k8s-netchecker-agent:v1.2.2"
+server_img: "mirantis/k8s-netchecker-server:v1.2.2"
```
Note that the application verifies DNS resolve for FQDNs comprising only the
combination of the ``netcheck_namespace.dns_domain`` vars, for example the
-``netchecker-service.default.cluster.local``. If you want to deploy the application
+``netchecker-service.default.svc.cluster.local``. If you want to deploy the application
to the non default namespace, make sure as well to adjust the ``searchdomains`` var
so the resulting search domain records to contain that namespace, like:
diff --git a/docs/openstack.md b/docs/openstack.md
index 7a4368e2e29..ca1e89ae188 100644
--- a/docs/openstack.md
+++ b/docs/openstack.md
@@ -3,7 +3,7 @@ OpenStack
To deploy kubespray on [OpenStack](https://www.openstack.org/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'openstack'`.
-After that make sure to source in your OpenStack credentials like you would do when using `nova-client` by using `source path/to/your/openstack-rc`.
+After that make sure to source in your OpenStack credentials like you would do when using `nova-client` or `neutron-client` by using `source path/to/your/openstack-rc` or `. path/to/your/openstack-rc`.
The next step is to make sure the hostnames in your `inventory` file are identical to your instance names in OpenStack.
Otherwise [cinder](https://wiki.openstack.org/wiki/Cinder) won't work as expected.
@@ -12,35 +12,34 @@ Unless you are using calico you can now run the playbook.
**Additional step needed when using calico:**
-Calico does not encapsulate all packages with the hosts ip addresses. Instead the packages will be routed with the PODs ip addresses directly.
+Calico does not encapsulate all packages with the hosts' ip addresses. Instead the packages will be routed with the PODs ip addresses directly.
+
OpenStack will filter and drop all packages from ips it does not know to prevent spoofing.
-In order to make calico work on OpenStack you will need to tell OpenStack to allow calicos packages by allowing the network it uses.
+In order to make calico work on OpenStack you will need to tell OpenStack to allow calico's packages by allowing the network it uses.
First you will need the ids of your OpenStack instances that will run kubernetes:
- nova list --tenant Your-Tenant
+ openstack server list --project YOUR_PROJECT
+--------------------------------------+--------+----------------------------------+--------+-------------+
| ID | Name | Tenant ID | Status | Power State |
+--------------------------------------+--------+----------------------------------+--------+-------------+
| e1f48aad-df96-4bce-bf61-62ae12bf3f95 | k8s-1 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
| 725cd548-6ea3-426b-baaa-e7306d3c8052 | k8s-2 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
-Then you can use the instance ids to find the connected [neutron](https://wiki.openstack.org/wiki/Neutron) ports:
+Then you can use the instance ids to find the connected [neutron](https://wiki.openstack.org/wiki/Neutron) ports (though they are now configured through using OpenStack):
- neutron port-list -c id -c device_id
+ openstack port list -c id -c device_id --project YOUR_PROJECT
+--------------------------------------+--------------------------------------+
| id | device_id |
+--------------------------------------+--------------------------------------+
| 5662a4e0-e646-47f0-bf88-d80fbd2d99ef | e1f48aad-df96-4bce-bf61-62ae12bf3f95 |
| e5ae2045-a1e1-4e99-9aac-4353889449a7 | 725cd548-6ea3-426b-baaa-e7306d3c8052 |
-Given the port ids on the left, you can set the `allowed_address_pairs` in neutron.
-Note that you have to allow both of `kube_service_addresses` (default `10.233.0.0/18`)
-and `kube_pods_subnet` (default `10.233.64.0/18`.)
+Given the port ids on the left, you can set the two `allowed_address`(es) in OpenStack. Note that you have to allow both `kube_service_addresses` (default `10.233.0.0/18`) and `kube_pods_subnet` (default `10.233.64.0/18`.)
# allow kube_service_addresses and kube_pods_subnet network
- neutron port-update 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18 ip_address=10.233.64.0/18
- neutron port-update e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18 ip_address=10.233.64.0/18
+ openstack port set 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed_address ip_address=10.233.0.0/18,ip_address=10.233.64.0/18
+ openstack port set e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed_address ip_address=10.233.0.0/18,ip_address=10.233.64.0/18
Now you can finally run the playbook.
diff --git a/docs/opensuse.md b/docs/opensuse.md
new file mode 100644
index 00000000000..88fac3790e3
--- /dev/null
+++ b/docs/opensuse.md
@@ -0,0 +1,19 @@
+openSUSE Leap 42.3 and Tumbleweed
+===============
+
+openSUSE Leap installation Notes:
+
+- Install Ansible
+
+ ```
+ sudo zypper ref
+ sudo zypper -n install ansible
+
+ ```
+
+- Install Jinja2 and Python-Netaddr
+
+ ```sudo zypper -n install python-Jinja2 python-netaddr```
+
+
+Now you can continue with [Preparing your deployment](getting-started.md#starting-custom-deployment)
diff --git a/docs/roadmap.md b/docs/roadmap.md
index cf8fa2d9a32..a0c1a3ffc62 100644
--- a/docs/roadmap.md
+++ b/docs/roadmap.md
@@ -9,7 +9,7 @@ Kubespray's roadmap
### Self deployment (pull-mode) [#320](https://github.com/kubespray/kubespray/issues/320)
- the playbook would install and configure docker/rkt and the etcd cluster
- the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars.
-- a "kubespray" container would be deployed (kubespray-cli, ansible-playbook, kpm)
+- a "kubespray" container would be deployed (kubespray-cli, ansible-playbook)
- to be discussed, a way to provide the inventory
- **self deployment** of the node from inside a container [#321](https://github.com/kubespray/kubespray/issues/321)
diff --git a/docs/upgrades.md b/docs/upgrades.md
index 6297976ddb5..620e07a6b33 100644
--- a/docs/upgrades.md
+++ b/docs/upgrades.md
@@ -81,3 +81,61 @@ kubernetes-apps/rotate_tokens role, only pods in kube-system are destroyed and
recreated. All other invalidated service account tokens are cleaned up
automatically, but other pods are not deleted out of an abundance of caution
for impact to user deployed pods.
+
+### Component-based upgrades
+
+A deployer may want to upgrade specific components in order to minimize risk
+or save time. This strategy is not covered by CI as of this writing, so it is
+not guaranteed to work.
+
+These commands are useful only for upgrading fully-deployed, healthy, existing
+hosts. This will definitely not work for undeployed or partially deployed
+hosts.
+
+Upgrade docker:
+
+```
+ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=docker
+```
+
+Upgrade etcd:
+
+```
+ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=etcd
+```
+
+Upgrade vault:
+
+```
+ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=vault
+```
+
+Upgrade kubelet:
+
+```
+ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=node --skip-tags=k8s-gen-certs,k8s-gen-tokens
+```
+
+Upgrade Kubernetes master components:
+
+```
+ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=master
+```
+
+Upgrade network plugins:
+
+```
+ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=network
+```
+
+Upgrade all add-ons:
+
+```
+ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=apps
+```
+
+Upgrade just helm (assuming `helm_enabled` is true):
+
+```
+ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=helm
+```
diff --git a/docs/vagrant.md b/docs/vagrant.md
index 042e8137bd2..de47159fa73 100644
--- a/docs/vagrant.md
+++ b/docs/vagrant.md
@@ -1,7 +1,7 @@
Vagrant Install
=================
-Assuming you have Vagrant (1.9+) installed with virtualbox (it may work
+Assuming you have Vagrant (2.0+) installed with virtualbox (it may work
with vmware, but is untested) you should be able to launch a 3 node
Kubernetes cluster by simply running `$ vagrant up`.
diff --git a/docs/vars.md b/docs/vars.md
index 3303f6bcbfe..41248b036c8 100644
--- a/docs/vars.md
+++ b/docs/vars.md
@@ -63,7 +63,8 @@ following default cluster paramters:
bits in kube_pods_subnet dictates how many kube-nodes can be in cluster.
* *dns_setup* - Enables dnsmasq
* *dnsmasq_dns_server* - Cluster IP for dnsmasq (default is 10.233.0.2)
-* *skydns_server* - Cluster IP for KubeDNS (default is 10.233.0.3)
+* *skydns_server* - Cluster IP for DNS (default is 10.233.0.3)
+* *skydns_server_secondary* - Secondary Cluster IP for CoreDNS used with coredns_dual deployment (default is 10.233.0.4)
* *cloud_provider* - Enable extra Kubelet option if operating inside GCE or
OpenStack (default is unset)
* *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in
@@ -105,9 +106,9 @@ Stack](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/dns-st
* *http_proxy/https_proxy/no_proxy* - Proxy variables for deploying behind a
proxy. Note that no_proxy defaults to all internal cluster IPs and hostnames
that correspond to each node.
-* *kubelet_deployment_type* - Controls which platform to deploy kubelet on.
+* *kubelet_deployment_type* - Controls which platform to deploy kubelet on.
Available options are ``host``, ``rkt``, and ``docker``. ``docker`` mode
- is unlikely to work on newer releases. Starting with Kubernetes v1.7
+ is unlikely to work on newer releases. Starting with Kubernetes v1.7
series, this now defaults to ``host``. Before v1.7, the default was Docker.
This is because of cgroup [issues](https://github.com/kubernetes/kubernetes/issues/43704).
* *kubelet_load_modules* - For some things, kubelet needs to load kernel modules. For example,
@@ -117,6 +118,14 @@ Stack](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/dns-st
* *kubelet_cgroup_driver* - Allows manual override of the
cgroup-driver option for Kubelet. By default autodetection is used
to match Docker configuration.
+* *node_labels* - Labels applied to nodes via kubelet --node-labels parameter.
+ For example, labels can be set in the inventory as variables or more widely in group_vars.
+ *node_labels* must be defined as a dict:
+```
+node_labels:
+ label1_name: label1_value
+ label2_name: label2_value
+```
##### Custom flags for Kube Components
For all kube components, custom flags can be passed in. This allows for edge cases where users need changes to the default deployment that may not be applicable to all deployments. This can be done by providing a list of flags. Example:
@@ -136,6 +145,6 @@ The possible vars are:
By default, a user with admin rights is created, named `kube`.
The password can be viewed after deployment by looking at the file
-`PATH_TO_KUBESPRAY/credentials/kube_user`. This contains a randomly generated
+`{{ credentials_dir }}/kube_user.creds` (`credentials_dir` is set to `{{ inventory_dir }}/credentials` by default). This contains a randomly generated
password. If you wish to set your own password, just precreate/modify this
file yourself or change `kube_api_pwd` var.
diff --git a/extra_playbooks/build-cephfs-provisioner.yml b/extra_playbooks/build-cephfs-provisioner.yml
index 6a72a076e91..a669805c7fd 100644
--- a/extra_playbooks/build-cephfs-provisioner.yml
+++ b/extra_playbooks/build-cephfs-provisioner.yml
@@ -8,8 +8,8 @@
version: "{{ item.version }}"
state: "{{ item.state }}"
with_items:
- - { state: "present", name: "docker", version: "2.7.0" }
- - { state: "present", name: "docker-compose", version: "1.18.0" }
+ - { state: "present", name: "docker", version: "3.4.1" }
+ - { state: "present", name: "docker-compose", version: "1.21.2" }
- name: CephFS Provisioner | Check Go version
shell: |
@@ -35,19 +35,19 @@
- name: CephFS Provisioner | Clone repo
git:
repo: https://github.com/kubernetes-incubator/external-storage.git
- dest: "~/go/src/github.com/kubernetes-incubator"
- version: 92295a30
- clone: no
+ dest: "~/go/src/github.com/kubernetes-incubator/external-storage"
+ version: 06fddbe2
+ clone: yes
update: yes
- name: CephFS Provisioner | Build image
shell: |
cd ~/go/src/github.com/kubernetes-incubator/external-storage
- REGISTRY=quay.io/kubespray/ VERSION=92295a30 make ceph/cephfs
+ REGISTRY=quay.io/kubespray/ VERSION=06fddbe2 make ceph/cephfs
- name: CephFS Provisioner | Push image
docker_image:
- name: quay.io/kubespray/cephfs-provisioner:92295a30
+ name: quay.io/kubespray/cephfs-provisioner:06fddbe2
push: yes
retries: 10
diff --git a/inventory/sample/group_vars/all.yml b/inventory/sample/group_vars/all.yml
deleted file mode 100644
index c107b049f8d..00000000000
--- a/inventory/sample/group_vars/all.yml
+++ /dev/null
@@ -1,136 +0,0 @@
-# Valid bootstrap options (required): ubuntu, coreos, centos, none
-bootstrap_os: none
-
-#Directory where etcd data stored
-etcd_data_dir: /var/lib/etcd
-
-# Directory where the binaries will be installed
-bin_dir: /usr/local/bin
-
-## The access_ip variable is used to define how other nodes should access
-## the node. This is used in flannel to allow other flannel nodes to see
-## this node for example. The access_ip is really useful AWS and Google
-## environments where the nodes are accessed remotely by the "public" ip,
-## but don't know about that address themselves.
-#access_ip: 1.1.1.1
-
-### LOADBALANCING AND ACCESS MODES
-## Enable multiaccess to configure etcd clients to access all of the etcd members directly
-## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
-## This may be the case if clients support and loadbalance multiple etcd servers natively.
-#etcd_multiaccess: true
-
-### ETCD: disable peer client cert authentication.
-# This affects ETCD_PEER_CLIENT_CERT_AUTH variable
-#etcd_peer_client_auth: true
-
-## External LB example config
-## apiserver_loadbalancer_domain_name: "elb.some.domain"
-#loadbalancer_apiserver:
-# address: 1.2.3.4
-# port: 1234
-
-## Internal loadbalancers for apiservers
-#loadbalancer_apiserver_localhost: true
-
-## Local loadbalancer should use this port instead, if defined.
-## Defaults to kube_apiserver_port (6443)
-#nginx_kube_apiserver_port: 8443
-
-### OTHER OPTIONAL VARIABLES
-## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
-## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
-## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
-## modules.
-#kubelet_load_modules: false
-
-## Internal network total size. This is the prefix of the
-## entire network. Must be unused in your environment.
-#kube_network_prefix: 18
-
-## With calico it is possible to distributed routes with border routers of the datacenter.
-## Warning : enabling router peering will disable calico's default behavior ('node mesh').
-## The subnets of each nodes will be distributed by the datacenter router
-#peer_with_router: false
-
-## Upstream dns servers used by dnsmasq
-#upstream_dns_servers:
-# - 8.8.8.8
-# - 8.8.4.4
-
-## There are some changes specific to the cloud providers
-## for instance we need to encapsulate packets with some network plugins
-## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', or 'external'
-## When openstack is used make sure to source in the openstack credentials
-## like you would do when using nova-client before starting the playbook.
-#cloud_provider:
-
-## When azure is used, you need to also set the following variables.
-## see docs/azure.md for details on how to get these values
-#azure_tenant_id:
-#azure_subscription_id:
-#azure_aad_client_id:
-#azure_aad_client_secret:
-#azure_resource_group:
-#azure_location:
-#azure_subnet_name:
-#azure_security_group_name:
-#azure_vnet_name:
-#azure_route_table_name:
-
-## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
-#openstack_blockstorage_version: "v1/v2/auto (default)"
-## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
-#openstack_lbaas_enabled: True
-#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
-## To enable automatic floating ip provisioning, specify a subnet.
-#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
-## Override default LBaaS behavior
-#openstack_lbaas_use_octavia: False
-#openstack_lbaas_method: "ROUND_ROBIN"
-#openstack_lbaas_provider: "haproxy"
-#openstack_lbaas_create_monitor: "yes"
-#openstack_lbaas_monitor_delay: "1m"
-#openstack_lbaas_monitor_timeout: "30s"
-#openstack_lbaas_monitor_max_retries: "3"
-
-## Uncomment to enable experimental kubeadm deployment mode
-#kubeadm_enabled: false
-## Set these proxy values in order to update package manager and docker daemon to use proxies
-#http_proxy: ""
-#https_proxy: ""
-## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
-#no_proxy: ""
-
-## Uncomment this if you want to force overlay/overlay2 as docker storage driver
-## Please note that overlay2 is only supported on newer kernels
-#docker_storage_options: -s overlay2
-
-# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
-#docker_dns_servers_strict: false
-
-## Default packages to install within the cluster, f.e:
-#kpm_packages:
-# - name: kube-system/grafana
-
-## Certificate Management
-## This setting determines whether certs are generated via scripts or whether a
-## cluster of Hashicorp's Vault is started to issue certificates (using etcd
-## as a backend). Options are "script" or "vault"
-#cert_management: script
-
-# Set to true to allow pre-checks to fail and continue deployment
-#ignore_assert_errors: false
-
-## Etcd auto compaction retention for mvcc key value store in hour
-#etcd_compaction_retention: 0
-
-## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
-#etcd_metrics: basic
-
-## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing.
-## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM.
-#etcd_memory_limit: "512M"
-
-# The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
-#kube_read_only_port: 10255
diff --git a/inventory/sample/group_vars/all/all.yml b/inventory/sample/group_vars/all/all.yml
new file mode 100644
index 00000000000..faf65eb1ad4
--- /dev/null
+++ b/inventory/sample/group_vars/all/all.yml
@@ -0,0 +1,82 @@
+## Valid bootstrap options (required): ubuntu, coreos, centos, none
+## If the OS is not listed here, it means it doesn't require extra/bootstrap steps.
+## In example, python is not available on 'coreos' so it must be installed before
+## anything else. In the opposite, Debian has already all its dependencies fullfiled, then bootstrap_os should be set to `none`.
+bootstrap_os: none
+
+## Directory where etcd data stored
+etcd_data_dir: /var/lib/etcd
+
+## Directory where the binaries will be installed
+bin_dir: /usr/local/bin
+
+## The access_ip variable is used to define how other nodes should access
+## the node. This is used in flannel to allow other flannel nodes to see
+## this node for example. The access_ip is really useful AWS and Google
+## environments where the nodes are accessed remotely by the "public" ip,
+## but don't know about that address themselves.
+#access_ip: 1.1.1.1
+
+
+## External LB example config
+## apiserver_loadbalancer_domain_name: "elb.some.domain"
+#loadbalancer_apiserver:
+# address: 1.2.3.4
+# port: 1234
+
+## Internal loadbalancers for apiservers
+#loadbalancer_apiserver_localhost: true
+
+## Local loadbalancer should use this port instead, if defined.
+## Defaults to kube_apiserver_port (6443)
+#nginx_kube_apiserver_port: 8443
+
+### OTHER OPTIONAL VARIABLES
+## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
+## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
+## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
+## modules.
+#kubelet_load_modules: false
+
+## With calico it is possible to distributed routes with border routers of the datacenter.
+## Warning : enabling router peering will disable calico's default behavior ('node mesh').
+## The subnets of each nodes will be distributed by the datacenter router
+#peer_with_router: false
+
+## Upstream dns servers used by dnsmasq
+#upstream_dns_servers:
+# - 8.8.8.8
+# - 8.8.4.4
+
+## There are some changes specific to the cloud providers
+## for instance we need to encapsulate packets with some network plugins
+## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external'
+## When openstack is used make sure to source in the openstack credentials
+## like you would do when using nova-client before starting the playbook.
+#cloud_provider:
+
+
+## Uncomment to enable experimental kubeadm deployment mode
+#kubeadm_enabled: false
+
+## Set these proxy values in order to update package manager and docker daemon to use proxies
+#http_proxy: ""
+#https_proxy: ""
+
+## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
+#no_proxy: ""
+
+## Certificate Management
+## This setting determines whether certs are generated via scripts or whether a
+## cluster of Hashicorp's Vault is started to issue certificates (using etcd
+## as a backend). Options are "script" or "vault"
+#cert_management: script
+
+## Set to true to allow pre-checks to fail and continue deployment
+#ignore_assert_errors: false
+
+## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
+#kube_read_only_port: 10255
+
+## Set true to download and cache container
+#download_container: true
diff --git a/inventory/sample/group_vars/all/azure.yml b/inventory/sample/group_vars/all/azure.yml
new file mode 100644
index 00000000000..78d49c9b4d4
--- /dev/null
+++ b/inventory/sample/group_vars/all/azure.yml
@@ -0,0 +1,14 @@
+## When azure is used, you need to also set the following variables.
+## see docs/azure.md for details on how to get these values
+
+#azure_tenant_id:
+#azure_subscription_id:
+#azure_aad_client_id:
+#azure_aad_client_secret:
+#azure_resource_group:
+#azure_location:
+#azure_subnet_name:
+#azure_security_group_name:
+#azure_vnet_name:
+#azure_vnet_resource_group:
+#azure_route_table_name:
diff --git a/inventory/sample/group_vars/all/coreos.yml b/inventory/sample/group_vars/all/coreos.yml
new file mode 100644
index 00000000000..a48f24ebbc3
--- /dev/null
+++ b/inventory/sample/group_vars/all/coreos.yml
@@ -0,0 +1,2 @@
+## Does coreos need auto upgrade, default is true
+#coreos_auto_upgrade: true
diff --git a/inventory/sample/group_vars/all/docker.yml b/inventory/sample/group_vars/all/docker.yml
new file mode 100644
index 00000000000..c1a1dd85a6b
--- /dev/null
+++ b/inventory/sample/group_vars/all/docker.yml
@@ -0,0 +1,61 @@
+## Uncomment this if you want to force overlay/overlay2 as docker storage driver
+## Please note that overlay2 is only supported on newer kernels
+#docker_storage_options: -s overlay2
+
+## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7.
+docker_container_storage_setup: false
+
+## It must be define a disk path for docker_container_storage_setup_devs.
+## Otherwise docker-storage-setup will be executed incorrectly.
+#docker_container_storage_setup_devs: /dev/vdb
+
+## Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
+docker_dns_servers_strict: false
+
+# Path used to store Docker data
+docker_daemon_graph: "/var/lib/docker"
+
+## Used to set docker daemon iptables options to true
+docker_iptables_enabled: "false"
+
+# Docker log options
+# Rotate container stderr/stdout logs at 50m and keep last 5
+docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5"
+
+# define docker bin_dir
+docker_bin_dir: "/usr/bin"
+
+## An obvious use case is allowing insecure-registry access to self hosted registries.
+## Can be ipddress and domain_name.
+## example define 172.19.16.11 or mirror.registry.io
+#docker_insecure_registries:
+# - mirror.registry.io
+# - 172.19.16.11
+
+## Add other registry,example China registry mirror.
+#docker_registry_mirrors:
+# - https://registry.docker-cn.com
+# - https://mirror.aliyuncs.com
+
+## If non-empty will override default system MounFlags value.
+## This option takes a mount propagation flag: shared, slave
+## or private, which control whether mounts in the file system
+## namespace set up for docker will receive or propagate mounts
+## and unmounts. Leave empty for system default
+#docker_mount_flags:
+
+## A string of extra options to pass to the docker daemon.
+## This string should be exactly as you wish it to appear.
+docker_options: >-
+ {%- if docker_insecure_registries is defined -%}
+ {{ docker_insecure_registries | map('regex_replace', '^(.*)$', '--insecure-registry=\1' ) | list | join(' ') }}
+ {%- endif %}
+ {% if docker_registry_mirrors is defined -%}
+ {{ docker_registry_mirrors | map('regex_replace', '^(.*)$', '--registry-mirror=\1' ) | list | join(' ') }}
+ {%- endif %}
+ --graph={{ docker_daemon_graph }} {{ docker_log_opts }}
+ {%- if ansible_architecture == "aarch64" and ansible_os_family == "RedHat" %}
+ --add-runtime docker-runc=/usr/libexec/docker/docker-runc-current
+ --default-runtime=docker-runc --exec-opt native.cgroupdriver=systemd
+ --userland-proxy-path=/usr/libexec/docker/docker-proxy-current --signature-verification=false
+ {%- endif -%}
diff --git a/inventory/sample/group_vars/all/oci.yml b/inventory/sample/group_vars/all/oci.yml
new file mode 100644
index 00000000000..fd83080dd80
--- /dev/null
+++ b/inventory/sample/group_vars/all/oci.yml
@@ -0,0 +1,15 @@
+## When Oracle Cloud Infrastructure is used, set these variables
+#oci_private_key:
+#oci_region_id:
+#oci_tenancy_id:
+#oci_user_id:
+#oci_user_fingerprint:
+#oci_compartment_id:
+#oci_vnc_id:
+#oci_subnet1_id:
+#oci_subnet2_id:
+## Overide these default behaviors if you wish
+#oci_security_list_management: All
+# If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint
+#oci_use_instance_principals: false
+#oci_cloud_controller_version: 0.5.0
diff --git a/inventory/sample/group_vars/all/openstack.yml b/inventory/sample/group_vars/all/openstack.yml
new file mode 100644
index 00000000000..6347d0522f2
--- /dev/null
+++ b/inventory/sample/group_vars/all/openstack.yml
@@ -0,0 +1,15 @@
+## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
+#openstack_blockstorage_version: "v1/v2/auto (default)"
+## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
+#openstack_lbaas_enabled: True
+#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
+## To enable automatic floating ip provisioning, specify a subnet.
+#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
+## Override default LBaaS behavior
+#openstack_lbaas_use_octavia: False
+#openstack_lbaas_method: "ROUND_ROBIN"
+#openstack_lbaas_provider: "haproxy"
+#openstack_lbaas_create_monitor: "yes"
+#openstack_lbaas_monitor_delay: "1m"
+#openstack_lbaas_monitor_timeout: "30s"
+#openstack_lbaas_monitor_max_retries: "3"
diff --git a/inventory/sample/group_vars/etcd.yml b/inventory/sample/group_vars/etcd.yml
new file mode 100644
index 00000000000..6f5347cb956
--- /dev/null
+++ b/inventory/sample/group_vars/etcd.yml
@@ -0,0 +1,18 @@
+## Etcd auto compaction retention for mvcc key value store in hour
+#etcd_compaction_retention: 0
+
+## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
+#etcd_metrics: basic
+
+## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing.
+## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM.
+#etcd_memory_limit: "512M"
+
+## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than
+## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check
+## etcd documentation for more information.
+#etcd_quota_backend_bytes: "2G"
+
+### ETCD: disable peer client cert authentication.
+# This affects ETCD_PEER_CLIENT_CERT_AUTH variable
+#etcd_peer_client_auth: true
diff --git a/inventory/sample/group_vars/k8s-cluster/addons.yml b/inventory/sample/group_vars/k8s-cluster/addons.yml
new file mode 100644
index 00000000000..7c9057e715e
--- /dev/null
+++ b/inventory/sample/group_vars/k8s-cluster/addons.yml
@@ -0,0 +1,54 @@
+# Kubernetes dashboard
+# RBAC required. see docs/getting-started.md for access details.
+dashboard_enabled: true
+
+# Monitoring apps for k8s
+efk_enabled: false
+
+# Helm deployment
+helm_enabled: false
+
+# Registry deployment
+registry_enabled: false
+# registry_namespace: "{{ system_namespace }}"
+# registry_storage_class: ""
+# registry_disk_size: "10Gi"
+
+# Local volume provisioner deployment
+local_volume_provisioner_enabled: false
+# local_volume_provisioner_namespace: "{{ system_namespace }}"
+# local_volume_provisioner_base_dir: /mnt/disks
+# local_volume_provisioner_mount_dir: /mnt/disks
+# local_volume_provisioner_storage_class: local-storage
+
+# CephFS provisioner deployment
+cephfs_provisioner_enabled: false
+# cephfs_provisioner_namespace: "cephfs-provisioner"
+# cephfs_provisioner_cluster: ceph
+# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789"
+# cephfs_provisioner_admin_id: admin
+# cephfs_provisioner_secret: secret
+# cephfs_provisioner_storage_class: cephfs
+# cephfs_provisioner_reclaim_policy: Delete
+# cephfs_provisioner_claim_root: /volumes
+# cephfs_provisioner_deterministic_names: true
+
+# Nginx ingress controller deployment
+ingress_nginx_enabled: false
+# ingress_nginx_host_network: false
+# ingress_nginx_nodeselector:
+# node-role.kubernetes.io/master: "true"
+# ingress_nginx_namespace: "ingress-nginx"
+# ingress_nginx_insecure_port: 80
+# ingress_nginx_secure_port: 443
+# ingress_nginx_configmap:
+# map-hash-bucket-size: "128"
+# ssl-protocols: "SSLv2"
+# ingress_nginx_configmap_tcp_services:
+# 9000: "default/example-go:8080"
+# ingress_nginx_configmap_udp_services:
+# 53: "kube-system/kube-dns:53"
+
+# Cert manager deployment
+cert_manager_enabled: false
+# cert_manager_namespace: "cert-manager"
diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml
similarity index 61%
rename from inventory/sample/group_vars/k8s-cluster.yml
rename to inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml
index a31963f160c..efc6845b236 100644
--- a/inventory/sample/group_vars/k8s-cluster.yml
+++ b/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml
@@ -1,12 +1,11 @@
# Kubernetes configuration dirs and system namespace.
# Those are where all the additional config stuff goes
-# the kubernetes normally puts in /srv/kubernets.
+# the kubernetes normally puts in /srv/kubernetes.
# This puts them in a sane location and namespace.
-# Editting those values will almost surely break something.
+# Editing those values will almost surely break something.
kube_config_dir: /etc/kubernetes
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
-system_namespace: kube-system
# This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/ssl"
@@ -20,7 +19,7 @@ kube_users_dir: "{{ kube_config_dir }}/users"
kube_api_anonymous_auth: true
## Change this to use another Kubernetes version, e.g. a current beta release
-kube_version: v1.9.3
+kube_version: v1.11.3
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
@@ -29,15 +28,18 @@ local_release_dir: "/tmp/releases"
retry_stagger: 5
# This is the group that the cert creation scripts chgrp the
-# cert files to. Not really changable...
+# cert files to. Not really changeable...
kube_cert_group: kube-cert
# Cluster Loglevel configuration
kube_log_level: 2
+# Directory where credentials will be stored
+credentials_dir: "{{ inventory_dir }}/credentials"
+
# Users to create for basic auth in Kubernetes API via HTTP
# Optionally add groups for user
-kube_api_pwd: "{{ lookup('password', inventory_dir + '/credentials/kube_user length=15 chars=ascii_letters,digits') }}"
+kube_api_pwd: "{{ lookup('password', credentials_dir + '/kube_user.creds length=15 chars=ascii_letters,digits') }}"
kube_users:
kube:
pass: "{{kube_api_pwd}}"
@@ -59,35 +61,30 @@ kube_users:
## Optional settings for OIDC
# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
# kube_oidc_username_claim: sub
+# kube_oidc_username_prefix: oidc:
# kube_oidc_groups_claim: groups
+# kube_oidc_groups_prefix: oidc:
# Choose network plugin (cilium, calico, contiv, weave or flannel)
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
kube_network_plugin: calico
-# weave's network password for encryption
-# if null then no network encryption
-# you can use --extra-vars to pass the password in command line
-weave_password: EnterPasswordHere
-
-# Weave uses consensus mode by default
-# Enabling seed mode allow to dynamically add or remove hosts
-# https://www.weave.works/docs/net/latest/ipam/
-weave_mode_seed: false
-
-# This two variable are automatically changed by the weave's role, do not manually change these values
-# To reset values :
-# weave_seed: uninitialized
-# weave_peers: uninitialized
-weave_seed: uninitialized
-weave_peers: uninitialized
-
-# Set the MTU of Weave (default 1376, Jumbo Frames: 8916)
-weave_mtu: 1376
-
-# Enable kubernetes network policies
-enable_network_policy: false
+# Weave deployment
+# weave_password: ~
+# weave_checkpoint_disable: false
+# weave_conn_limit: 100
+# weave_hairpin_mode: true
+# weave_ipalloc_range: {{ kube_pods_subnet }}
+# weave_expect_npc: {{ enable_network_policy }}
+# weave_kube_peers: ~
+# weave_ipalloc_init: ~
+# weave_expose_ip: ~
+# weave_metrics_addr: ~
+# weave_status_addr: ~
+# weave_mtu: 1376
+# weave_no_masq_local: true
+# weave_extra_args: ~
# Kubernetes internal network for services, unused block of space.
kube_service_addresses: 10.233.0.0/18
@@ -111,14 +108,22 @@ kube_apiserver_insecure_port: 8080 # (http)
# Kube-proxy proxyMode configuration.
# Can be ipvs, iptables
-kube_proxy_mode: iptables
+kube_proxy_mode: iptables
+
+# Kube-proxy nodeport address.
+# cidr to bind nodeport services. Flag --nodeport-addresses on kube-proxy manifest
+kube_proxy_nodeport_addresses: false
+# kube_proxy_nodeport_addresses_cidr: 10.0.1.0/24
+
+## Encrypting Secret Data at Rest (experimental)
+kube_encrypt_secret_data: false
# DNS configuration.
# Kubernetes cluster name, also will be used as DNS domain
cluster_name: cluster.local
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
ndots: 2
-# Can be dnsmasq_kubedns, kubedns, manual or none
+# Can be dnsmasq_kubedns, kubedns, coredns, coredns_dual, manual or none
dns_mode: kubedns
# Set manual server if using a custom cluster DNS server
#manual_dns_server: 10.x.x.x
@@ -129,21 +134,15 @@ resolvconf_mode: docker_dns
deploy_netchecker: false
# Ip address of the kubernetes skydns service
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
+skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}"
dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
dns_domain: "{{ cluster_name }}"
-# Path used to store Docker data
-docker_daemon_graph: "/var/lib/docker"
-
-## A string of extra options to pass to the docker daemon.
-## This string should be exactly as you wish it to appear.
-## An obvious use case is allowing insecure-registry access
-## to self hosted registries like so:
+## Container runtime
+## docker for docker and crio for cri-o.
+container_manager: docker
-docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
-docker_bin_dir: "/usr/bin"
-
-# Settings for containerized control plane (etcd/kubelet/secrets)
+## Settings for containerized control plane (etcd/kubelet/secrets)
etcd_deployment_type: docker
kubelet_deployment_type: host
vault_deployment_type: docker
@@ -152,60 +151,15 @@ helm_deployment_type: host
# K8s image pull policy (imagePullPolicy)
k8s_image_pull_policy: IfNotPresent
-# Kubernetes dashboard
-# RBAC required. see docs/getting-started.md for access details.
-dashboard_enabled: true
-
-# Monitoring apps for k8s
-efk_enabled: false
-
-# Helm deployment
-helm_enabled: false
-
-# Istio deployment
-istio_enabled: false
-
-# Registry deployment
-registry_enabled: false
-
-# Local volume provisioner deployment
-local_volume_provisioner_enabled: false
-# local_volume_provisioner_namespace: "{{ system_namespace }}"
-local_volume_provisioner_base_dir: /mnt/disks
-local_volume_provisioner_mount_dir: /mnt/disks
-# local_volume_provisioner_storage_class: local-storage
-
-# CephFS provisioner deployment
-cephfs_provisioner_enabled: false
-# cephfs_provisioner_namespace: "{{ system_namespace }}"
-# cephfs_provisioner_cluster: ceph
-# cephfs_provisioner_monitors:
-# - 172.24.0.1:6789
-# - 172.24.0.2:6789
-# - 172.24.0.3:6789
-# cephfs_provisioner_admin_id: admin
-# cephfs_provisioner_secret: secret
-# cephfs_provisioner_storage_class: cephfs
-
-# Nginx ingress controller deployment
-ingress_nginx_enabled: false
-# ingress_nginx_namespace: "ingress-nginx"
-# ingress_nginx_insecure_port: 80
-# ingress_nginx_secure_port: 443
-# ingress_nginx_configmap:
-# map-hash-bucket-size: "128"
-# ssl-protocols: "SSLv2"
-# ingress_nginx_configmap_tcp_services:
-# 9000: "default/example-go:8080"
-# ingress_nginx_configmap_udp_services:
-# 53: "kube-system/kube-dns:53"
+# audit log for kubernetes
+kubernetes_audit: false
-# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
-persistent_volumes_enabled: false
+# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled)
+podsecuritypolicy_enabled: false
-# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
+# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts
# kubeconfig_localhost: false
-# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
+# Download kubectl onto the host that runs Ansible in {{ bin_dir }}
# kubectl_localhost: false
# dnsmasq
@@ -228,3 +182,20 @@ persistent_volumes_enabled: false
## See https://github.com/kubernetes-incubator/kubespray/issues/2141
## Set this variable to true to get rid of this issue
volume_cross_zone_attachment: false
+# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
+persistent_volumes_enabled: false
+
+## Container Engine Acceleration
+## Enable container accelertion feature, for example use gpu acceleration in containers
+# nvidia_accelerator_enabled: true
+## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset.
+## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2'
+## Array with nvida_gpu_nodes, leave empty or comment if you dont't want to install drivers.
+## Labels and taints won't be set to nodes if they are not in the array.
+# nvidia_gpu_nodes:
+# - kube-gpu-001
+# nvidia_driver_version: "384.111"
+## flavor can be tesla or gtx
+# nvidia_gpu_flavor: gtx
+
+
diff --git a/inventory/sample/hosts.ini b/inventory/sample/hosts.ini
index 13cc3612e33..80c854d0a17 100644
--- a/inventory/sample/hosts.ini
+++ b/inventory/sample/hosts.ini
@@ -1,14 +1,15 @@
# ## Configure 'ip' variable to bind kubernetes services on a
# ## different ip than the default iface
-# node1 ansible_ssh_host=95.54.0.12 # ip=10.3.0.1
-# node2 ansible_ssh_host=95.54.0.13 # ip=10.3.0.2
-# node3 ansible_ssh_host=95.54.0.14 # ip=10.3.0.3
-# node4 ansible_ssh_host=95.54.0.15 # ip=10.3.0.4
-# node5 ansible_ssh_host=95.54.0.16 # ip=10.3.0.5
-# node6 ansible_ssh_host=95.54.0.17 # ip=10.3.0.6
+# ## We should set etcd_member_name for etcd cluster. The node that is not a etcd member do not need to set the value, or can set the empty string value.
+# node1 ansible_host=95.54.0.12 # ip=10.3.0.1 etcd_member_name=etcd1
+# node2 ansible_host=95.54.0.13 # ip=10.3.0.2 etcd_member_name=etcd2
+# node3 ansible_host=95.54.0.14 # ip=10.3.0.3 etcd_member_name=etcd3
+# node4 ansible_host=95.54.0.15 # ip=10.3.0.4 etcd_member_name=etcd4
+# node5 ansible_host=95.54.0.16 # ip=10.3.0.5 etcd_member_name=etcd5
+# node6 ansible_host=95.54.0.17 # ip=10.3.0.6 etcd_member_name=etcd6
# ## configure a bastion host if your nodes are not directly reachable
-# bastion ansible_ssh_host=x.x.x.x
+# bastion ansible_host=x.x.x.x ansible_user=some_user
# [kube-master]
# node1
@@ -26,6 +27,6 @@
# node5
# node6
-# [k8s-cluster:children]
-# kube-node
-# kube-master
+[k8s-cluster:children]
+kube-master
+kube-node
diff --git a/remove-node.yml b/remove-node.yml
index fbc5bc8ba6f..0fae1a99425 100644
--- a/remove-node.yml
+++ b/remove-node.yml
@@ -1,9 +1,11 @@
---
- hosts: all
+ vars:
+ ansible_ssh_pipelining: true
gather_facts: true
-- hosts: etcd:k8s-cluster:vault:calico-rr
+- hosts: "{{ node | default('etcd:k8s-cluster:vault:calico-rr') }}"
vars_prompt:
name: "delete_nodes_confirmation"
prompt: "Are you sure you want to delete nodes state? Type 'yes' to delete nodes."
@@ -20,8 +22,9 @@
roles:
- { role: remove-node/pre-remove, tags: pre-remove }
-- hosts: kube-node
+- hosts: "{{ node | default('kube-node') }}"
roles:
+ - { role: kubespray-defaults }
- { role: reset, tags: reset }
- hosts: kube-master
diff --git a/requirements.txt b/requirements.txt
index 80ffd74ed51..01ff9f23cc1 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,6 @@
-pbr>=1.6
ansible>=2.4.0
-netaddr
jinja2>=2.9.6
+netaddr
+pbr>=1.6
+ansible-modules-hashivault>=3.9.4
+hvac
diff --git a/roles/adduser/tasks/main.yml b/roles/adduser/tasks/main.yml
index 43ec8ebbb9a..3854ec4119a 100644
--- a/roles/adduser/tasks/main.yml
+++ b/roles/adduser/tasks/main.yml
@@ -7,7 +7,7 @@
- name: User | Create User
user:
comment: "{{user.comment|default(omit)}}"
- createhome: "{{user.create_home|default(omit)}}"
+ createhome: "{{user.createhome|default(omit)}}"
group: "{{user.group|default(user.name)}}"
home: "{{user.home|default(omit)}}"
shell: "{{user.shell|default(omit)}}"
diff --git a/roles/bootstrap-os/defaults/main.yml b/roles/bootstrap-os/defaults/main.yml
index c191ebd2b3c..5d2f7321a79 100644
--- a/roles/bootstrap-os/defaults/main.yml
+++ b/roles/bootstrap-os/defaults/main.yml
@@ -4,3 +4,6 @@ pip_python_coreos_modules:
- six
override_system_hostname: true
+
+
+coreos_auto_upgrade: true
diff --git a/roles/bootstrap-os/files/bootstrap.sh b/roles/bootstrap-os/files/bootstrap.sh
index a2ad29b6c51..dbef6c8b103 100644
--- a/roles/bootstrap-os/files/bootstrap.sh
+++ b/roles/bootstrap-os/files/bootstrap.sh
@@ -18,7 +18,11 @@ mv -n pypy-$PYPY_VERSION-linux64 pypy
## library fixup
mkdir -p pypy/lib
-ln -snf /lib64/libncurses.so.5.9 $BINDIR/pypy/lib/libtinfo.so.5
+if [ -f /lib64/libncurses.so.5.9 ]; then
+ ln -snf /lib64/libncurses.so.5.9 $BINDIR/pypy/lib/libtinfo.so.5
+elif [ -f /lib64/libncurses.so.6.1 ]; then
+ ln -snf /lib64/libncurses.so.6.1 $BINDIR/pypy/lib/libtinfo.so.5
+fi
cat > $BINDIR/python < 0)
+ when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic) and (docker_repo_info.repos|length > 0)
+
+- name: ensure docker-engine repository public key is installed
+ action: "{{ dockerproject_repo_key_info.pkg_key }}"
+ args:
+ id: "{{item}}"
+ url: "{{dockerproject_repo_key_info.url}}"
+ state: present
+ register: keyserver_task_result
+ until: keyserver_task_result|succeeded
+ retries: 4
+ delay: "{{ retry_stagger | random + 3 }}"
+ with_items: "{{ dockerproject_repo_key_info.repo_keys }}"
+ when:
+ - not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic)
+ - use_docker_engine is defined and use_docker_engine
+
+- name: ensure docker-engine repository is enabled
+ action: "{{ dockerproject_repo_info.pkg_repo }}"
+ args:
+ repo: "{{item}}"
+ state: present
+ with_items: "{{ dockerproject_repo_info.repos }}"
+ when:
+ - use_docker_engine is defined and use_docker_engine
+ - not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic) and (dockerproject_repo_info.repos|length > 0)
- name: Configure docker repository on RedHat/CentOS
template:
src: "rh_docker.repo.j2"
- dest: "/etc/yum.repos.d/docker.repo"
+ dest: "{{ yum_repo_dir }}/docker.repo"
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
+- name: Copy yum.conf for editing
+ copy:
+ src: "{{ yum_conf }}"
+ dest: "{{ docker_yum_conf }}"
+ remote_src: yes
+ when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
+
+- name: Edit copy of yum.conf to set obsoletes=0
+ lineinfile:
+ path: "{{ docker_yum_conf }}"
+ state: present
+ regexp: '^obsoletes='
+ line: 'obsoletes=0'
+ when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
+
+
- name: ensure docker packages are installed
action: "{{ docker_package_info.pkg_mgr }}"
args:
pkg: "{{item.name}}"
force: "{{item.force|default(omit)}}"
+ conf_file: "{{item.yum_conf|default(omit)}}"
state: present
+ update_cache: yes
register: docker_task_result
until: docker_task_result|succeeded
retries: 4
@@ -70,6 +126,32 @@
with_items: "{{ docker_package_info.pkgs }}"
notify: restart docker
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic) and (docker_package_info.pkgs|length > 0)
+ ignore_errors: true
+
+- name: get available packages on Ubuntu
+ command: apt-cache policy docker-ce
+ when: docker_task_result|failed
+ register: available_packages
+
+- name: show available packages on ubuntu
+ fail:
+ msg: "{{available_packages}}"
+ when: docker_task_result|failed
+
+# This is required to ensure any apt upgrade will not break kubernetes
+- name: Set docker pin priority to apt_preferences on Debian family
+ template:
+ src: "apt_preferences.d/debian_docker.j2"
+ dest: "/etc/apt/preferences.d/docker"
+ owner: "root"
+ mode: 0644
+ when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic)
+
+- name: ensure service is started if docker packages are already present
+ service:
+ name: docker
+ state: started
+ when: docker_task_result is not changed
- name: flush handlers so we can wait for docker to come up
meta: flush_handlers
diff --git a/roles/docker/tasks/pre-upgrade.yml b/roles/docker/tasks/pre-upgrade.yml
new file mode 100644
index 00000000000..9d5d27ef6c3
--- /dev/null
+++ b/roles/docker/tasks/pre-upgrade.yml
@@ -0,0 +1,32 @@
+---
+- name: Ensure old versions of Docker are not installed. | Debian
+ package:
+ name: '{{ item }}'
+ state: absent
+ with_items:
+ - docker
+ - docker-engine
+ - docker.io
+ when:
+ - ansible_os_family == 'Debian'
+ - (docker_versioned_pkg[docker_version | string] | search('docker-ce'))
+
+- name: Ensure old versions of Docker are not installed. | RedHat
+ package:
+ name: '{{ item }}'
+ state: absent
+ with_items:
+ - docker
+ - docker-common
+ - docker-engine
+ - docker-selinux
+ - docker-client
+ - docker-client-latest
+ - docker-latest
+ - docker-latest-logrotate
+ - docker-logrotate
+ - docker-engine-selinux
+ when:
+ - ansible_os_family == 'RedHat'
+ - (docker_versioned_pkg[docker_version | string] | search('docker-ce'))
+ - not is_atomic
diff --git a/roles/docker/tasks/set_facts_dns.yml b/roles/docker/tasks/set_facts_dns.yml
index 7152b442b18..8303e63ea9d 100644
--- a/roles/docker/tasks/set_facts_dns.yml
+++ b/roles/docker/tasks/set_facts_dns.yml
@@ -3,8 +3,10 @@
- name: set dns server for docker
set_fact:
docker_dns_servers: |-
- {%- if dns_mode == 'kubedns' -%}
+ {%- if dns_mode in ['kubedns', 'coredns'] -%}
{{ [ skydns_server ] }}
+ {%- elif dns_mode == 'coredns_dual' -%}
+ {{ [ skydns_server ] + [ skydns_server_secondary ] }}
{%- elif dns_mode == 'dnsmasq_kubedns' -%}
{{ [ dnsmasq_dns_server ] }}
{%- elif dns_mode == 'manual' -%}
@@ -24,7 +26,7 @@
- name: add upstream dns servers (only when dnsmasq is not used)
set_fact:
docker_dns_servers: "{{ docker_dns_servers + upstream_dns_servers|default([]) }}"
- when: dns_mode == 'kubedns'
+ when: dns_mode in ['kubedns', 'coredns', 'coredns_dual']
- name: add global searchdomains
set_fact:
@@ -54,7 +56,7 @@
- name: check number of nameservers
fail:
- msg: "Too many nameservers. You can relax this check by set docker_dns_servers_strict=no and we will only use the first 3."
+ msg: "Too many nameservers. You can relax this check by set docker_dns_servers_strict=false in all.yml and we will only use the first 3."
when: docker_dns_servers|length > 3 and docker_dns_servers_strict|bool
- name: rtrim number of nameservers to 3
diff --git a/roles/docker/templates/apt_preferences.d/debian_docker.j2 b/roles/docker/templates/apt_preferences.d/debian_docker.j2
new file mode 100644
index 00000000000..f21008b6c14
--- /dev/null
+++ b/roles/docker/templates/apt_preferences.d/debian_docker.j2
@@ -0,0 +1,3 @@
+Package: docker-ce
+Pin: version {{ docker_version }}.*
+Pin-Priority: 1001
\ No newline at end of file
diff --git a/roles/docker/templates/docker-options.conf.j2 b/roles/docker/templates/docker-options.conf.j2
index c70f3d89fb8..296f5a8a10e 100644
--- a/roles/docker/templates/docker-options.conf.j2
+++ b/roles/docker/templates/docker-options.conf.j2
@@ -1,3 +1,5 @@
[Service]
-Environment="DOCKER_OPTS={{ docker_options | default('') }} \
---iptables=false"
+Environment="DOCKER_OPTS={{ docker_options|default('') }} --iptables={{ docker_iptables_enabled | default('false') }}"
+{% if docker_mount_flags is defined and docker_mount_flags != "" %}
+MountFlags={{ docker_mount_flags }}
+{% endif %}
diff --git a/roles/docker/templates/docker.service.j2 b/roles/docker/templates/docker.service.j2
index 29abb6d53bb..8dc82bbb205 100644
--- a/roles/docker/templates/docker.service.j2
+++ b/roles/docker/templates/docker.service.j2
@@ -7,6 +7,9 @@ Wants=docker-storage-setup.service
{% elif ansible_os_family == "Debian" %}
After=network.target docker.socket
Wants=docker.socket
+{% elif ansible_os_family == "Suse" %}
+After=network.target containerd.socket containerd.service
+Requires=containerd.socket containerd.service
{% endif %}
[Service]
@@ -19,6 +22,9 @@ ExecReload=/bin/kill -s HUP $MAINPID
Delegate=yes
KillMode=process
ExecStart={{ docker_bin_dir }}/docker{% if installed_docker_version.stdout|version_compare('17.03', '<') %} daemon{% else %}d{% endif %} \
+{% if ansible_os_family == "Suse" %}
+ --containerd /run/containerd/containerd.sock --add-runtime oci=/usr/bin/docker-runc \
+{% endif %}
$DOCKER_OPTS \
$DOCKER_STORAGE_OPTIONS \
$DOCKER_NETWORK_OPTIONS \
@@ -31,7 +37,10 @@ LimitNOFILE=1048576
LimitNPROC=1048576
LimitCORE=infinity
TimeoutStartSec=1min
-Restart=on-abnormal
+# restart the docker process if it exits prematurely
+Restart=on-failure
+StartLimitBurst=3
+StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
diff --git a/roles/docker/templates/rh_docker.repo.j2 b/roles/docker/templates/rh_docker.repo.j2
index 7cb728625dd..fe2aeac1c2e 100644
--- a/roles/docker/templates/rh_docker.repo.j2
+++ b/roles/docker/templates/rh_docker.repo.j2
@@ -1,7 +1,15 @@
-[dockerrepo]
-name=Docker Repository
+[docker-ce]
+name=Docker-CE Repository
baseurl={{ docker_rh_repo_base_url }}
enabled=1
gpgcheck=1
gpgkey={{ docker_rh_repo_gpgkey }}
{% if http_proxy is defined %}proxy={{ http_proxy }}{% endif %}
+
+[docker-engine]
+name=Docker-Engine Repository
+baseurl={{ dockerproject_rh_repo_base_url }}
+enabled=1
+gpgcheck=1
+gpgkey={{ dockerproject_rh_repo_gpgkey }}
+{% if http_proxy is defined %}proxy={{ http_proxy }}{% endif %}
diff --git a/roles/docker/vars/debian.yml b/roles/docker/vars/debian.yml
index 0a43c7c79ea..8138996c10b 100644
--- a/roles/docker/vars/debian.yml
+++ b/roles/docker/vars/debian.yml
@@ -2,12 +2,14 @@
docker_kernel_min_version: '3.10'
# https://download.docker.com/linux/debian/
+# https://apt.dockerproject.org/repo/dists/debian-wheezy/main/filelist
docker_versioned_pkg:
'latest': docker-ce
'1.11': docker-engine=1.11.2-0~{{ ansible_distribution_release|lower }}
'1.12': docker-engine=1.12.6-0~debian-{{ ansible_distribution_release|lower }}
'1.13': docker-engine=1.13.1-0~debian-{{ ansible_distribution_release|lower }}
'17.03': docker-ce=17.03.2~ce-0~debian-{{ ansible_distribution_release|lower }}
+ '17.09': docker-ce=17.09.0~ce-0~debian-{{ ansible_distribution_release|lower }}
'stable': docker-ce=17.03.2~ce-0~debian-{{ ansible_distribution_release|lower }}
'edge': docker-ce=17.12.1~ce-0~debian-{{ ansible_distribution_release|lower }}
@@ -30,3 +32,17 @@ docker_repo_info:
deb {{ docker_debian_repo_base_url }}
{{ ansible_distribution_release|lower }}
stable
+
+dockerproject_repo_key_info:
+ pkg_key: apt_key
+ url: '{{ dockerproject_apt_repo_gpgkey }}'
+ repo_keys:
+ - 58118E89F3A912897C070ADBF76221572C52609D
+
+dockerproject_repo_info:
+ pkg_repo: apt_repository
+ repos:
+ - >
+ deb {{ dockerproject_apt_repo_base_url }}
+ {{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}
+ main
diff --git a/roles/docker/vars/redhat-aarch64.yml b/roles/docker/vars/redhat-aarch64.yml
new file mode 100644
index 00000000000..0bad0593de5
--- /dev/null
+++ b/roles/docker/vars/redhat-aarch64.yml
@@ -0,0 +1,28 @@
+---
+docker_kernel_min_version: '0'
+
+# overide defaults, missing 17.03 for aarch64
+docker_version: '1.13'
+
+# http://mirror.centos.org/altarch/7/extras/aarch64/Packages/
+# or do 'yum --showduplicates list docker'
+docker_versioned_pkg:
+ 'latest': docker
+ '1.12': docker-1.12.6-48.git0fdc778.el7
+ '1.13': docker-1.13.1-63.git94f4240.el7
+
+# https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package
+# http://mirror.centos.org/altarch/7/extras/aarch64/Packages/
+
+docker_package_info:
+ pkg_mgr: yum
+ pkgs:
+ - name: "{{ docker_versioned_pkg[docker_version | string] }}"
+
+docker_repo_key_info:
+ pkg_key: ''
+ repo_keys: []
+
+docker_repo_info:
+ pkg_repo: ''
+ repos: []
diff --git a/roles/docker/vars/redhat.yml b/roles/docker/vars/redhat.yml
index 96950719e9c..57970eb50e7 100644
--- a/roles/docker/vars/redhat.yml
+++ b/roles/docker/vars/redhat.yml
@@ -3,6 +3,7 @@ docker_kernel_min_version: '0'
# https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package
# https://download.docker.com/linux/centos/7/x86_64/stable/Packages/
+# https://yum.dockerproject.org/repo/main/centos/7
# or do 'yum --showduplicates list docker-engine'
docker_versioned_pkg:
'latest': docker-ce
@@ -10,11 +11,15 @@ docker_versioned_pkg:
'1.12': docker-engine-1.12.6-1.el7.centos
'1.13': docker-engine-1.13.1-1.el7.centos
'17.03': docker-ce-17.03.2.ce-1.el7.centos
+ '17.09': docker-ce-17.09.0.ce-1.el7.centos
'stable': docker-ce-17.03.2.ce-1.el7.centos
- 'edge': docker-ce-17.03.2.ce-1.el7.centos
+ 'edge': docker-ce-17.12.1.ce-1.el7.centos
docker_selinux_versioned_pkg:
'latest': docker-ce-selinux
+ '1.11': docker-engine-selinux-1.11.2-1.el7.centos
+ '1.12': docker-engine-selinux-1.12.6-1.el7.centos
+ '1.13': docker-engine-selinux-1.13.1-1.el7.centos
'17.03': docker-ce-selinux-17.03.2.ce-1.el7.centos
'stable': docker-ce-selinux-17.03.2.ce-1.el7.centos
'edge': docker-ce-selinux-17.03.2.ce-1.el7.centos
@@ -24,7 +29,9 @@ docker_package_info:
pkg_mgr: yum
pkgs:
- name: "{{ docker_selinux_versioned_pkg[docker_selinux_version | string] }}"
+ yum_conf: "{{ docker_yum_conf }}"
- name: "{{ docker_versioned_pkg[docker_version | string] }}"
+ yum_conf: "{{ docker_yum_conf }}"
docker_repo_key_info:
pkg_key: ''
diff --git a/roles/docker/vars/suse.yml b/roles/docker/vars/suse.yml
new file mode 100644
index 00000000000..d89a50a7f4b
--- /dev/null
+++ b/roles/docker/vars/suse.yml
@@ -0,0 +1,15 @@
+---
+docker_kernel_min_version: '0'
+
+docker_package_info:
+ pkg_mgr: zypper
+ pkgs:
+ - name: docker
+
+docker_repo_key_info:
+ pkg_key: ''
+ repo_keys: []
+
+docker_repo_info:
+ pkg_repo: ''
+ repos: []
diff --git a/roles/docker/vars/ubuntu-amd64.yml b/roles/docker/vars/ubuntu-amd64.yml
new file mode 100644
index 00000000000..b88800b4c4f
--- /dev/null
+++ b/roles/docker/vars/ubuntu-amd64.yml
@@ -0,0 +1,49 @@
+---
+docker_kernel_min_version: '3.10'
+
+# https://download.docker.com/linux/ubuntu/
+docker_versioned_pkg:
+ 'latest': docker-ce
+ '1.11': docker-engine=1.11.2-0~{{ ansible_distribution_release|lower }}
+ '1.12': docker-engine=1.12.6-0~ubuntu-{{ ansible_distribution_release|lower }}
+ '1.13': docker-engine=1.13.1-0~ubuntu-{{ ansible_distribution_release|lower }}
+ '17.03': docker-ce=17.03.2~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
+ '17.09': docker-ce=17.09.0~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
+ '17.12': docker-ce=17.12.1~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
+ '18.06': docker-ce=18.06.1~ce~3-0~ubuntu
+ 'stable': docker-ce=17.03.2~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
+ 'edge': docker-ce=18.06.1~ce~3-0~ubuntu
+
+docker_package_info:
+ pkg_mgr: apt
+ pkgs:
+ - name: "{{ docker_versioned_pkg[docker_version | string] }}"
+ force: yes
+
+docker_repo_key_info:
+ pkg_key: apt_key
+ url: '{{ docker_ubuntu_repo_gpgkey }}'
+ repo_keys:
+ - 9DC858229FC7DD38854AE2D88D81803C0EBFCD88
+
+docker_repo_info:
+ pkg_repo: apt_repository
+ repos:
+ - >
+ deb {{ docker_ubuntu_repo_base_url }}
+ {{ ansible_distribution_release|lower }}
+ stable
+
+dockerproject_repo_key_info:
+ pkg_key: apt_key
+ url: '{{ dockerproject_apt_repo_gpgkey }}'
+ repo_keys:
+ - 58118E89F3A912897C070ADBF76221572C52609D
+
+dockerproject_repo_info:
+ pkg_repo: apt_repository
+ repos:
+ - >
+ deb {{ dockerproject_apt_repo_base_url }}
+ {{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}
+ main
diff --git a/roles/docker/vars/ubuntu.yml b/roles/docker/vars/ubuntu-arm64.yml
similarity index 54%
rename from roles/docker/vars/ubuntu.yml
rename to roles/docker/vars/ubuntu-arm64.yml
index 897c23ce003..3dbb1a0e4f4 100644
--- a/roles/docker/vars/ubuntu.yml
+++ b/roles/docker/vars/ubuntu-arm64.yml
@@ -1,13 +1,12 @@
---
docker_kernel_min_version: '3.10'
-
+docker_version: 17.09
# https://download.docker.com/linux/ubuntu/
docker_versioned_pkg:
'latest': docker-ce
- '1.11': docker-engine=1.11.1-0~{{ ansible_distribution_release|lower }}
- '1.12': docker-engine=1.12.6-0~ubuntu-{{ ansible_distribution_release|lower }}
- '1.13': docker-engine=1.13.1-0~ubuntu-{{ ansible_distribution_release|lower }}
- '17.03': docker-ce=17.03.2~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
+ '17.09': docker-ce=17.09.1~ce-0~ubuntu
+ 'stable': docker-ce=17.09.1~ce-0~ubuntu
+ 'edge': docker-ce=17.12.1~ce-0~ubuntu
docker_package_info:
pkg_mgr: apt
@@ -28,3 +27,17 @@ docker_repo_info:
deb {{ docker_ubuntu_repo_base_url }}
{{ ansible_distribution_release|lower }}
stable
+
+dockerproject_repo_key_info:
+ pkg_key: apt_key
+ url: '{{ dockerproject_apt_repo_gpgkey }}'
+ repo_keys:
+ - 58118E89F3A912897C070ADBF76221572C52609D
+
+dockerproject_repo_info:
+ pkg_repo: apt_repository
+ repos:
+ - >
+ deb {{ dockerproject_apt_repo_base_url }}
+ {{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}
+ main
diff --git a/roles/docker/vars/ubuntu-bionic.yml b/roles/docker/vars/ubuntu-bionic.yml
new file mode 100644
index 00000000000..3f1fa900012
--- /dev/null
+++ b/roles/docker/vars/ubuntu-bionic.yml
@@ -0,0 +1,32 @@
+---
+docker_kernel_min_version: '3.10'
+
+use_docker_engine: false
+
+docker_versioned_pkg:
+ 'latest': docker-ce
+ '17.03': docker-ce=17.03.2~ce-0~ubuntu-xenial
+ '18.03': docker-ce=18.03.1~ce-3-0~ubuntu
+ '18.06': docker-ce=18.06.1~ce~3-0~ubuntu
+ 'stable': docker-ce=18.06.1~ce~3-0~ubuntu
+ 'edge': docker-ce=18.06.1~ce~3-0~ubuntu
+
+docker_package_info:
+ pkg_mgr: apt
+ pkgs:
+ - name: "{{ docker_versioned_pkg[docker_version | string] }}"
+ force: yes
+
+docker_repo_key_info:
+ pkg_key: apt_key
+ url: '{{ docker_ubuntu_repo_gpgkey }}'
+ repo_keys:
+ - 9DC858229FC7DD38854AE2D88D81803C0EBFCD88
+
+docker_repo_info:
+ pkg_repo: apt_repository
+ repos:
+ - >
+ deb [arch=amd64] {{ docker_ubuntu_repo_base_url }}
+ xenial
+ stable
\ No newline at end of file
diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml
index d87f4b9239c..c27224b14ff 100644
--- a/roles/download/defaults/main.yml
+++ b/roles/download/defaults/main.yml
@@ -1,5 +1,5 @@
---
-local_release_dir: /tmp
+local_release_dir: /tmp/releases
# Used to only evaluate vars from download role
skip_downloads: false
@@ -10,6 +10,9 @@ skip_downloads: false
download_run_once: False
download_compress: 1
+# if this is set to true will download container
+download_container: True
+
# if this is set to true, uses the localhost for download_run_once mode
# (requires docker and sudo to access docker). You may want this option for
# local caching of docker images or for Container Linux by CoreOS cluster nodes.
@@ -23,39 +26,50 @@ download_always_pull: False
# Use the first kube-master if download_localhost is not set
download_delegate: "{% if download_localhost %}localhost{% else %}{{groups['kube-master'][0]}}{% endif %}"
+# Arch of Docker images and needed packages
+image_arch: "{{host_architecture | default('amd64')}}"
+
# Versions
-kube_version: v1.9.3
+kube_version: v1.11.3
kubeadm_version: "{{ kube_version }}"
-etcd_version: v3.2.4
+etcd_version: v3.2.18
# TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
# after migration to container download
-calico_version: "v2.6.2"
-calico_ctl_version: "v1.6.1"
-calico_cni_version: "v1.11.0"
-calico_policy_version: "v1.0.0"
-calico_rr_version: "v0.4.0"
-flannel_version: "v0.9.1"
+calico_version: "v3.1.3"
+calico_ctl_version: "v3.1.3"
+calico_cni_version: "v3.1.3"
+calico_policy_version: "v3.1.3"
+calico_rr_version: "v0.6.1"
+flannel_version: "v0.10.0"
flannel_cni_version: "v0.3.0"
-istio_version: "0.2.6"
-vault_version: 0.8.1
-weave_version: 2.2.0
-pod_infra_version: 3.0
+vault_version: 0.10.1
+weave_version: "2.4.0"
+pod_infra_version: 3.1
contiv_version: 1.1.7
-cilium_version: "v1.0.0-rc4"
+cilium_version: "v1.2.0"
# Download URLs
-istioctl_download_url: "https://storage.googleapis.com/istio-release/releases/{{ istio_version }}/istioctl/istioctl-linux"
-kubeadm_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/amd64/kubeadm"
-vault_download_url: "https://releases.hashicorp.com/vault/{{ vault_version }}/vault_{{ vault_version }}_linux_amd64.zip"
+kubeadm_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm"
+vault_download_url: "https://releases.hashicorp.com/vault/{{ vault_version }}/vault_{{ vault_version }}_linux_{{ image_arch }}.zip"
+etcd_download_url: "https://github.com/coreos/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-amd64.tar.gz"
+hyperkube_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/amd64/hyperkube"
# Checksums
-istioctl_checksum: fd703063c540b8c0ab943f478c05ab257d88ae27224c746a27d0526ddbf7c370
-kubeadm_checksum: 9ebbb1fbf3a9e72d7df3f0dc02500dc8f957f39489b22cf577498c8a7c6b39b1
+etcd_checksum: b729db0732448064271ea6fdcb901773c4fe917763ca07776f22d0e5e0bd4097
+hyperkube_checksum: dac8da16dd6688e52b5dc510f5dd0a20b54350d52fb27ceba2f018ba2c8be692
+kubeadm_checksum: 422a7a32ed9a7b1eaa2a4f9d121674dfbe80eb41e206092c13017d097f75aaec
vault_binary_checksum: 3c4d70ba71619a43229e65c67830e30e050eab7a81ac6b28325ff707e5914188
# Containers
+# In some cases, we need a way to set --registry-mirror or --insecure-registry for docker,
+# it helps a lot for local private development or bare metal environment.
+# So you need define --registry-mirror or --insecure-registry, and modify the following url address.
+# example:
+# You need to deploy kubernetes cluster on local private development.
+# Also provide the address of your own private registry.
+# And use --insecure-registry options for docker
etcd_image_repo: "quay.io/coreos/etcd"
-etcd_image_tag: "{{ etcd_version }}"
+etcd_image_tag: "{{ etcd_version }}{%- if image_arch != 'amd64' -%}-{{ image_arch }}{%- endif -%}"
flannel_image_repo: "quay.io/coreos/flannel"
flannel_image_tag: "{{ flannel_version }}"
flannel_cni_image_repo: "quay.io/coreos/flannel-cni"
@@ -70,16 +84,16 @@ calico_policy_image_repo: "quay.io/calico/kube-controllers"
calico_policy_image_tag: "{{ calico_policy_version }}"
calico_rr_image_repo: "quay.io/calico/routereflector"
calico_rr_image_tag: "{{ calico_rr_version }}"
-hyperkube_image_repo: "quay.io/coreos/hyperkube"
-hyperkube_image_tag: "{{ kube_version }}_coreos.0"
-pod_infra_image_repo: "gcr.io/google_containers/pause-amd64"
+hyperkube_image_repo: "gcr.io/google-containers/hyperkube-{{ image_arch }}"
+hyperkube_image_tag: "{{ kube_version }}"
+pod_infra_image_repo: "gcr.io/google_containers/pause-{{ image_arch }}"
pod_infra_image_tag: "{{ pod_infra_version }}"
install_socat_image_repo: "xueshanf/install-socat"
install_socat_image_tag: "latest"
-netcheck_version: "v1.0"
-netcheck_agent_img_repo: "quay.io/l23network/k8s-netchecker-agent"
+netcheck_version: "v1.2.2"
+netcheck_agent_img_repo: "mirantis/k8s-netchecker-agent"
netcheck_agent_tag: "{{ netcheck_version }}"
-netcheck_server_img_repo: "quay.io/l23network/k8s-netchecker-server"
+netcheck_server_img_repo: "mirantis/k8s-netchecker-server"
netcheck_server_tag: "{{ netcheck_version }}"
weave_kube_image_repo: "weaveworks/weave-kube"
weave_kube_image_tag: "{{ weave_version }}"
@@ -89,46 +103,67 @@ contiv_image_repo: "contiv/netplugin"
contiv_image_tag: "{{ contiv_version }}"
contiv_auth_proxy_image_repo: "contiv/auth_proxy"
contiv_auth_proxy_image_tag: "{{ contiv_version }}"
+contiv_etcd_init_image_repo: "ferest/etcd-initer"
+contiv_etcd_init_image_tag: latest
cilium_image_repo: "docker.io/cilium/cilium"
cilium_image_tag: "{{ cilium_version }}"
-
nginx_image_repo: nginx
nginx_image_tag: 1.13
dnsmasq_version: 2.78
dnsmasq_image_repo: "andyshinn/dnsmasq"
dnsmasq_image_tag: "{{ dnsmasq_version }}"
-kubedns_version: 1.14.8
-kubedns_image_repo: "gcr.io/google_containers/k8s-dns-kube-dns-amd64"
+kubedns_version: 1.14.10
+kubedns_image_repo: "gcr.io/google_containers/k8s-dns-kube-dns-{{ image_arch }}"
kubedns_image_tag: "{{ kubedns_version }}"
-dnsmasq_nanny_image_repo: "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64"
+
+coredns_version: "{%- if image_arch != 'amd64' -%}1.1.3{%- else -%}1.2.2{%- endif -%}"
+coredns_image_repo: "gcr.io/google-containers/coredns"
+coredns_image_tag: "{{ coredns_version }}{%- if image_arch != 'amd64' -%}__{{ image_arch}}_linux{%- endif -%}"
+
+
+dnsmasq_nanny_image_repo: "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-{{ image_arch }}"
dnsmasq_nanny_image_tag: "{{ kubedns_version }}"
-dnsmasq_sidecar_image_repo: "gcr.io/google_containers/k8s-dns-sidecar-amd64"
+dnsmasq_sidecar_image_repo: "gcr.io/google_containers/k8s-dns-sidecar-{{ image_arch }}"
dnsmasq_sidecar_image_tag: "{{ kubedns_version }}"
dnsmasqautoscaler_version: 1.1.2
-dnsmasqautoscaler_image_repo: "gcr.io/google_containers/cluster-proportional-autoscaler-amd64"
+dnsmasqautoscaler_image_repo: "gcr.io/google_containers/cluster-proportional-autoscaler-{{ image_arch }}"
dnsmasqautoscaler_image_tag: "{{ dnsmasqautoscaler_version }}"
kubednsautoscaler_version: 1.1.2
-kubednsautoscaler_image_repo: "gcr.io/google_containers/cluster-proportional-autoscaler-amd64"
+kubednsautoscaler_image_repo: "gcr.io/google_containers/cluster-proportional-autoscaler-{{ image_arch }}"
kubednsautoscaler_image_tag: "{{ kubednsautoscaler_version }}"
test_image_repo: busybox
test_image_tag: latest
-elasticsearch_version: "v2.4.1"
-elasticsearch_image_repo: "gcr.io/google_containers/elasticsearch"
+elasticsearch_version: "v5.6.4"
+elasticsearch_image_repo: "k8s.gcr.io/elasticsearch"
elasticsearch_image_tag: "{{ elasticsearch_version }}"
-fluentd_version: "1.22"
-fluentd_image_repo: "gcr.io/google_containers/fluentd-elasticsearch"
+fluentd_version: "v2.0.4"
+fluentd_image_repo: "k8s.gcr.io/fluentd-elasticsearch"
fluentd_image_tag: "{{ fluentd_version }}"
-kibana_version: "v4.6.1"
-kibana_image_repo: "gcr.io/google_containers/kibana"
+kibana_version: "5.6.4"
+kibana_image_repo: "docker.elastic.co/kibana/kibana"
kibana_image_tag: "{{ kibana_version }}"
-
-helm_version: "v2.8.1"
+helm_version: "v2.9.1"
helm_image_repo: "lachlanevenson/k8s-helm"
helm_image_tag: "{{ helm_version }}"
tiller_image_repo: "gcr.io/kubernetes-helm/tiller"
tiller_image_tag: "{{ helm_version }}"
vault_image_repo: "vault"
vault_image_tag: "{{ vault_version }}"
+registry_image_repo: "registry"
+registry_image_tag: "2.6"
+registry_proxy_image_repo: "gcr.io/google_containers/kube-registry-proxy"
+registry_proxy_image_tag: "0.4"
+local_volume_provisioner_image_repo: "quay.io/external_storage/local-volume-provisioner"
+local_volume_provisioner_image_tag: "v2.1.0"
+cephfs_provisioner_image_repo: "quay.io/external_storage/cephfs-provisioner"
+cephfs_provisioner_image_tag: "v2.1.0-k8s1.11"
+ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/nginx-ingress-controller"
+ingress_nginx_controller_image_tag: "0.19.0"
+ingress_nginx_default_backend_image_repo: "gcr.io/google_containers/defaultbackend"
+ingress_nginx_default_backend_image_tag: "1.4"
+cert_manager_version: "v0.4.1"
+cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller"
+cert_manager_controller_image_tag: "{{ cert_manager_version }}"
downloads:
netcheck_server:
@@ -137,18 +172,37 @@ downloads:
repo: "{{ netcheck_server_img_repo }}"
tag: "{{ netcheck_server_tag }}"
sha256: "{{ netcheck_server_digest_checksum|default(None) }}"
+ groups:
+ - k8s-cluster
netcheck_agent:
enabled: "{{ deploy_netchecker }}"
container: true
repo: "{{ netcheck_agent_img_repo }}"
tag: "{{ netcheck_agent_tag }}"
sha256: "{{ netcheck_agent_digest_checksum|default(None) }}"
+ groups:
+ - k8s-cluster
etcd:
enabled: true
container: true
repo: "{{ etcd_image_repo }}"
tag: "{{ etcd_image_tag }}"
sha256: "{{ etcd_digest_checksum|default(None) }}"
+ groups:
+ - etcd
+ etcd_file:
+ enabled: true
+ file: true
+ version: "{{ etcd_version }}"
+ dest: "etcd-{{ etcd_version }}-linux-amd64.tar.gz"
+ sha256: "{{ etcd_checksum }}"
+ source_url: "{{ etcd_download_url }}"
+ url: "{{ etcd_download_url }}"
+ unarchive: true
+ owner: "root"
+ mode: "0755"
+ groups:
+ - etcd
kubeadm:
enabled: "{{ kubeadm_enabled }}"
file: true
@@ -160,145 +214,207 @@ downloads:
unarchive: false
owner: "root"
mode: "0755"
- istioctl:
- enabled: "{{ istio_enabled }}"
- file: true
- version: "{{ istio_version }}"
- dest: "istio/istioctl"
- sha256: "{{ istioctl_checksum }}"
- source_url: "{{ istioctl_download_url }}"
- url: "{{ istioctl_download_url }}"
- unarchive: false
- owner: "root"
- mode: "0755"
+ groups:
+ - k8s-cluster
hyperkube:
enabled: true
container: true
repo: "{{ hyperkube_image_repo }}"
tag: "{{ hyperkube_image_tag }}"
sha256: "{{ hyperkube_digest_checksum|default(None) }}"
+ groups:
+ - k8s-cluster
+ hyperkube_file:
+ enabled: true
+ file: true
+ version: "{{ kube_version }}"
+ dest: "hyperkube"
+ sha256: "{{ hyperkube_checksum }}"
+ source_url: "{{ hyperkube_download_url }}"
+ url: "{{ hyperkube_download_url }}"
+ unarchive: false
+ owner: "root"
+ mode: "0755"
+ groups:
+ - k8s-cluster
cilium:
enabled: "{{ kube_network_plugin == 'cilium' }}"
container: true
repo: "{{ cilium_image_repo }}"
tag: "{{ cilium_image_tag }}"
sha256: "{{ cilium_digest_checksum|default(None) }}"
+ groups:
+ - k8s-cluster
flannel:
enabled: "{{ kube_network_plugin == 'flannel' or kube_network_plugin == 'canal' }}"
container: true
repo: "{{ flannel_image_repo }}"
tag: "{{ flannel_image_tag }}"
sha256: "{{ flannel_digest_checksum|default(None) }}"
+ groups:
+ - k8s-cluster
flannel_cni:
enabled: "{{ kube_network_plugin == 'flannel' }}"
container: true
repo: "{{ flannel_cni_image_repo }}"
tag: "{{ flannel_cni_image_tag }}"
sha256: "{{ flannel_cni_digest_checksum|default(None) }}"
+ groups:
+ - k8s-cluster
calicoctl:
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
container: true
repo: "{{ calicoctl_image_repo }}"
tag: "{{ calicoctl_image_tag }}"
sha256: "{{ calicoctl_digest_checksum|default(None) }}"
+ groups:
+ - k8s-cluster
calico_node:
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
container: true
repo: "{{ calico_node_image_repo }}"
tag: "{{ calico_node_image_tag }}"
sha256: "{{ calico_node_digest_checksum|default(None) }}"
+ groups:
+ - k8s-cluster
calico_cni:
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
container: true
repo: "{{ calico_cni_image_repo }}"
tag: "{{ calico_cni_image_tag }}"
sha256: "{{ calico_cni_digest_checksum|default(None) }}"
+ groups:
+ - k8s-cluster
calico_policy:
enabled: "{{ enable_network_policy or kube_network_plugin == 'canal' }}"
container: true
repo: "{{ calico_policy_image_repo }}"
tag: "{{ calico_policy_image_tag }}"
sha256: "{{ calico_policy_digest_checksum|default(None) }}"
+ groups:
+ - k8s-cluster
calico_rr:
enabled: "{{ peer_with_calico_rr is defined and peer_with_calico_rr and kube_network_plugin == 'calico' }}"
container: true
repo: "{{ calico_rr_image_repo }}"
tag: "{{ calico_rr_image_tag }}"
sha256: "{{ calico_rr_digest_checksum|default(None) }}"
+ groups:
+ - calico-rr
weave_kube:
enabled: "{{ kube_network_plugin == 'weave' }}"
container: true
repo: "{{ weave_kube_image_repo }}"
tag: "{{ weave_kube_image_tag }}"
sha256: "{{ weave_kube_digest_checksum|default(None) }}"
+ groups:
+ - k8s-cluster
weave_npc:
enabled: "{{ kube_network_plugin == 'weave' }}"
container: true
repo: "{{ weave_npc_image_repo }}"
tag: "{{ weave_npc_image_tag }}"
sha256: "{{ weave_npc_digest_checksum|default(None) }}"
+ groups:
+ - k8s-cluster
contiv:
enabled: "{{ kube_network_plugin == 'contiv' }}"
container: true
repo: "{{ contiv_image_repo }}"
tag: "{{ contiv_image_tag }}"
sha256: "{{ contiv_digest_checksum|default(None) }}"
+ groups:
+ - k8s-cluster
contiv_auth_proxy:
enabled: "{{ kube_network_plugin == 'contiv' }}"
container: true
repo: "{{ contiv_auth_proxy_image_repo }}"
tag: "{{ contiv_auth_proxy_image_tag }}"
sha256: "{{ contiv_auth_proxy_digest_checksum|default(None) }}"
+ groups:
+ - k8s-cluster
+ contiv_etcd_init:
+ enabled: "{{ kube_network_plugin == 'contiv' }}"
+ container: true
+ repo: "{{ contiv_etcd_init_image_repo }}"
+ tag: "{{ contiv_etcd_init_image_tag }}"
+ sha256: "{{ contiv_etcd_init_digest_checksum|default(None) }}"
+ groups:
+ - k8s-cluster
pod_infra:
enabled: true
container: true
repo: "{{ pod_infra_image_repo }}"
tag: "{{ pod_infra_image_tag }}"
sha256: "{{ pod_infra_digest_checksum|default(None) }}"
+ groups:
+ - k8s-cluster
install_socat:
enabled: "{{ ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] }}"
container: true
repo: "{{ install_socat_image_repo }}"
tag: "{{ install_socat_image_tag }}"
sha256: "{{ install_socat_digest_checksum|default(None) }}"
+ groups:
+ - k8s-cluster
nginx:
- enabled: true
+ enabled: "{{ loadbalancer_apiserver_localhost }}"
container: true
repo: "{{ nginx_image_repo }}"
tag: "{{ nginx_image_tag }}"
sha256: "{{ nginx_digest_checksum|default(None) }}"
+ groups:
+ - kube-node
dnsmasq:
enabled: "{{ dns_mode == 'dnsmasq_kubedns' }}"
container: true
repo: "{{ dnsmasq_image_repo }}"
tag: "{{ dnsmasq_image_tag }}"
sha256: "{{ dnsmasq_digest_checksum|default(None) }}"
+ groups:
+ - kube-node
kubedns:
- enabled: true
+ enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
container: true
repo: "{{ kubedns_image_repo }}"
tag: "{{ kubedns_image_tag }}"
sha256: "{{ kubedns_digest_checksum|default(None) }}"
+ groups:
+ - kube-node
+ coredns:
+ enabled: "{{ dns_mode in ['coredns', 'coredns_dual'] }}"
+ container: true
+ repo: "{{ coredns_image_repo }}"
+ tag: "{{ coredns_image_tag }}"
+ sha256: "{{ coredns_digest_checksum|default(None) }}"
+ groups:
+ - kube-node
dnsmasq_nanny:
- enabled: true
+ enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
container: true
repo: "{{ dnsmasq_nanny_image_repo }}"
tag: "{{ dnsmasq_nanny_image_tag }}"
sha256: "{{ dnsmasq_nanny_digest_checksum|default(None) }}"
+ groups:
+ - kube-node
dnsmasq_sidecar:
- enabled: true
+ enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
container: true
repo: "{{ dnsmasq_sidecar_image_repo }}"
tag: "{{ dnsmasq_sidecar_image_tag }}"
sha256: "{{ dnsmasq_sidecar_digest_checksum|default(None) }}"
+ groups:
+ - kube-node
kubednsautoscaler:
- enabled: true
+ enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
container: true
repo: "{{ kubednsautoscaler_image_repo }}"
tag: "{{ kubednsautoscaler_image_tag }}"
sha256: "{{ kubednsautoscaler_digest_checksum|default(None) }}"
+ groups:
+ - kube-node
testbox:
- enabled: true
+ enabled: false
container: true
repo: "{{ test_image_repo }}"
tag: "{{ test_image_tag }}"
@@ -309,35 +425,45 @@ downloads:
repo: "{{ elasticsearch_image_repo }}"
tag: "{{ elasticsearch_image_tag }}"
sha256: "{{ elasticsearch_digest_checksum|default(None) }}"
+ groups:
+ - kube-node
fluentd:
enabled: "{{ efk_enabled }}"
container: true
repo: "{{ fluentd_image_repo }}"
tag: "{{ fluentd_image_tag }}"
sha256: "{{ fluentd_digest_checksum|default(None) }}"
+ groups:
+ - kube-node
kibana:
enabled: "{{ efk_enabled }}"
container: true
repo: "{{ kibana_image_repo }}"
tag: "{{ kibana_image_tag }}"
sha256: "{{ kibana_digest_checksum|default(None) }}"
+ groups:
+ - kube-node
helm:
enabled: "{{ helm_enabled }}"
container: true
repo: "{{ helm_image_repo }}"
tag: "{{ helm_image_tag }}"
sha256: "{{ helm_digest_checksum|default(None) }}"
+ groups:
+ - kube-node
tiller:
enabled: "{{ helm_enabled }}"
container: true
repo: "{{ tiller_image_repo }}"
tag: "{{ tiller_image_tag }}"
sha256: "{{ tiller_digest_checksum|default(None) }}"
+ groups:
+ - kube-node
vault:
enabled: "{{ cert_management == 'vault' }}"
container: "{{ vault_deployment_type != 'host' }}"
file: "{{ vault_deployment_type == 'host' }}"
- dest: "vault/vault_{{ vault_version }}_linux_amd64.zip"
+ dest: "vault/vault_{{ vault_version }}_linux_{{ image_arch }}.zip"
mode: "0755"
owner: "vault"
repo: "{{ vault_image_repo }}"
@@ -347,6 +473,64 @@ downloads:
unarchive: true
url: "{{ vault_download_url }}"
version: "{{ vault_version }}"
+ groups:
+ - vault
+ registry:
+ enabled: "{{ registry_enabled }}"
+ container: true
+ repo: "{{ registry_image_repo }}"
+ tag: "{{ registry_image_tag }}"
+ sha256: "{{ registry_digest_checksum|default(None) }}"
+ groups:
+ - kube-node
+ registry_proxy:
+ enabled: "{{ registry_enabled }}"
+ container: true
+ repo: "{{ registry_proxy_image_repo }}"
+ tag: "{{ registry_proxy_image_tag }}"
+ sha256: "{{ registry_proxy_digest_checksum|default(None) }}"
+ groups:
+ - kube-node
+ local_volume_provisioner:
+ enabled: "{{ local_volume_provisioner_enabled }}"
+ container: true
+ repo: "{{ local_volume_provisioner_image_repo }}"
+ tag: "{{ local_volume_provisioner_image_tag }}"
+ sha256: "{{ local_volume_provisioner_digest_checksum|default(None) }}"
+ groups:
+ - kube-node
+ cephfs_provisioner:
+ enabled: "{{ cephfs_provisioner_enabled }}"
+ container: true
+ repo: "{{ cephfs_provisioner_image_repo }}"
+ tag: "{{ cephfs_provisioner_image_tag }}"
+ sha256: "{{ cephfs_provisioner_digest_checksum|default(None) }}"
+ groups:
+ - kube-node
+ ingress_nginx_controller:
+ enabled: "{{ ingress_nginx_enabled }}"
+ container: true
+ repo: "{{ ingress_nginx_controller_image_repo }}"
+ tag: "{{ ingress_nginx_controller_image_tag }}"
+ sha256: "{{ ingress_nginx_controller_digest_checksum|default(None) }}"
+ groups:
+ - kube-node
+ ingress_nginx_default_backend:
+ enabled: "{{ ingress_nginx_enabled }}"
+ container: true
+ repo: "{{ ingress_nginx_default_backend_image_repo }}"
+ tag: "{{ ingress_nginx_default_backend_image_tag }}"
+ sha256: "{{ ingress_nginx_default_backend_digest_checksum|default(None) }}"
+ groups:
+ - kube-node
+ cert_manager_controller:
+ enabled: "{{ cert_manager_enabled }}"
+ container: true
+ repo: "{{ cert_manager_controller_image_repo }}"
+ tag: "{{ cert_manager_controller_image_tag }}"
+ sha256: "{{ cert_manager_controller_digest_checksum|default(None) }}"
+ groups:
+ - kube-node
download_defaults:
container: false
diff --git a/roles/download/tasks/download_container.yml b/roles/download/tasks/download_container.yml
index bbf7cec85f3..7e3923606fb 100644
--- a/roles/download/tasks/download_container.yml
+++ b/roles/download/tasks/download_container.yml
@@ -2,7 +2,7 @@
- name: container_download | Make download decision if pull is required by tag or sha256
include_tasks: set_docker_image_facts.yml
delegate_to: "{{ download_delegate if download_run_once or omit }}"
- delegate_facts: no
+ delegate_facts: yes
run_once: "{{ download_run_once }}"
when:
- download.enabled
@@ -38,3 +38,4 @@
- download.enabled
- download.container
- pull_required|default(download_always_pull)
+ - group_names | intersect(download.groups) | length
diff --git a/roles/download/tasks/download_file.yml b/roles/download/tasks/download_file.yml
index 664fa472858..832fec41ea0 100644
--- a/roles/download/tasks/download_file.yml
+++ b/roles/download/tasks/download_file.yml
@@ -13,6 +13,7 @@
when:
- download.enabled
- download.file
+ - group_names | intersect(download.groups) | length
- name: file_download | Download item
get_url:
@@ -28,6 +29,7 @@
when:
- download.enabled
- download.file
+ - group_names | intersect(download.groups) | length
- name: file_download | Extract archives
unarchive:
@@ -40,3 +42,4 @@
- download.enabled
- download.file
- download.unarchive|default(False)
+ - group_names | intersect(download.groups) | length
diff --git a/roles/download/tasks/download_prep.yml b/roles/download/tasks/download_prep.yml
index 1fd7abf2fb2..b44da45dae7 100644
--- a/roles/download/tasks/download_prep.yml
+++ b/roles/download/tasks/download_prep.yml
@@ -7,6 +7,7 @@
failed_when: false
changed_when: false
check_mode: no
+ when: download_container
- name: container_download | Create dest directory for saved/loaded container images
file:
@@ -15,6 +16,7 @@
recurse: yes
mode: 0755
owner: "{{ansible_ssh_user|default(ansible_user_id)}}"
+ when: download_container
- name: container_download | create local directory for saved/loaded container images
file:
@@ -28,5 +30,6 @@
when:
- download_run_once
- download_delegate == 'localhost'
+ - download_container
tags:
- localhost
diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml
index c6e910e5df5..1984f626d70 100644
--- a/roles/download/tasks/main.yml
+++ b/roles/download/tasks/main.yml
@@ -11,6 +11,7 @@
when:
- not skip_downloads|default(false)
- item.value.enabled
+ - (not (item.value.container|default(False))) or (item.value.container and download_container)
- name: "Sync container"
include_tasks: sync_container.yml
@@ -20,5 +21,6 @@
when:
- not skip_downloads|default(false)
- item.value.enabled
- - item.value.container
+ - "{{ item.value.container | default(False) }}"
- download_run_once
+ - group_names | intersect(download.groups) | length
diff --git a/roles/download/tasks/set_docker_image_facts.yml b/roles/download/tasks/set_docker_image_facts.yml
index 7a9b73e38b5..87a73cfbd71 100644
--- a/roles/download/tasks/set_docker_image_facts.yml
+++ b/roles/download/tasks/set_docker_image_facts.yml
@@ -9,7 +9,7 @@
- name: Register docker images info
raw: >-
- {{ docker_bin_dir }}/docker images -q | xargs {{ docker_bin_dir }}/docker inspect -f "{{ '{{' }} (index .RepoTags 0) {{ '}}' }},{{ '{{' }} (index .RepoDigests 0) {{ '}}' }}" | tr '\n' ','
+ {{ docker_bin_dir }}/docker images -q | xargs {{ docker_bin_dir }}/docker inspect -f "{{ '{{' }} if .RepoTags {{ '}}' }}{{ '{{' }} (index .RepoTags 0) {{ '}}' }}{{ '{{' }} end {{ '}}' }}{{ '{{' }} if .RepoDigests {{ '}}' }},{{ '{{' }} (index .RepoDigests 0) {{ '}}' }}{{ '{{' }} end {{ '}}' }}" | tr '\n' ','
no_log: true
register: docker_images
failed_when: false
diff --git a/roles/download/tasks/sync_container.yml b/roles/download/tasks/sync_container.yml
index a15f78cde44..c7e37d7f3d6 100644
--- a/roles/download/tasks/sync_container.yml
+++ b/roles/download/tasks/sync_container.yml
@@ -17,6 +17,7 @@
- download.enabled
- download.container
- download_run_once
+
tags:
- facts
@@ -42,7 +43,7 @@
- name: container_download | Stat saved container image
stat:
- path: "{{fname}}"
+ path: "{{ fname }}"
register: img
changed_when: false
delegate_to: "{{ download_delegate }}"
diff --git a/roles/etcd/defaults/main.yml b/roles/etcd/defaults/main.yml
index 4986ad25739..57e1bc078c0 100644
--- a/roles/etcd/defaults/main.yml
+++ b/roles/etcd/defaults/main.yml
@@ -1,6 +1,10 @@
---
# Set to false to only do certificate management
etcd_cluster_setup: true
+etcd_events_cluster_setup: false
+
+# Set to true to separate k8s events to a different etcd cluster
+etcd_events_cluster_enabled: false
etcd_backup_prefix: "/var/backups"
etcd_data_dir: "/var/lib/etcd"
@@ -12,22 +16,38 @@ etcd_cert_group: root
# Note: This does not set up DNS entries. It simply adds the following DNS
# entries to the certificate
etcd_cert_alt_names:
- - "etcd.{{ system_namespace }}.svc.{{ dns_domain }}"
- - "etcd.{{ system_namespace }}.svc"
- - "etcd.{{ system_namespace }}"
+ - "etcd.kube-system.svc.{{ dns_domain }}"
+ - "etcd.kube-system.svc"
+ - "etcd.kube-system"
- "etcd"
+etcd_cert_alt_ips: []
etcd_script_dir: "{{ bin_dir }}/etcd-scripts"
etcd_heartbeat_interval: "250"
etcd_election_timeout: "5000"
+# etcd_snapshot_count: "10000"
+
+# Parameters for ionice
+# -c takes an integer between 0 and 3 or one of the strings none, realtime, best-effort or idle.
+# -n takes an integer between 0 (highest priority) and 7 (lowest priority)
+# etcd_ionice: "-c2 -n0"
+
etcd_metrics: "basic"
+## A dictionary of extra environment variables to add to etcd.env, formatted like:
+## etcd_extra_vars:
+## ETCD_VAR1: "value1"
+## ETCD_VAR2: "value2"
+etcd_extra_vars: {}
+
# Limits
# Limit memory only if <4GB memory on host. 0=unlimited
etcd_memory_limit: "{% if ansible_memtotal_mb < 4096 %}512M{% else %}0{% endif %}"
+# etcd_quota_backend_bytes: "2G"
+
# Uncomment to set CPU share for etcd
# etcd_cpu_limit: 300m
@@ -37,7 +57,7 @@ etcd_node_cert_hosts: "{{ groups['k8s-cluster'] | union(groups.get('calico-rr',
etcd_compaction_retention: "8"
-etcd_vault_mount_path: etcd
+etcd_vault_mount_path: "/etcd"
# Force clients like etcdctl to use TLS certs (different than peer security)
etcd_secure_client: true
diff --git a/roles/etcd/files/make-ssl-etcd.sh b/roles/etcd/files/make-ssl-etcd.sh
index 5544d6639fb..d661a2a0d05 100755
--- a/roles/etcd/files/make-ssl-etcd.sh
+++ b/roles/etcd/files/make-ssl-etcd.sh
@@ -65,7 +65,7 @@ if [ -e "$SSLDIR/ca-key.pem" ]; then
cp $SSLDIR/{ca.pem,ca-key.pem} .
else
openssl genrsa -out ca-key.pem 2048 > /dev/null 2>&1
- openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.pem -subj "/CN=etcd-ca" > /dev/null 2>&1
+ openssl req -x509 -new -nodes -key ca-key.pem -days 36500 -out ca.pem -subj "/CN=etcd-ca" > /dev/null 2>&1
fi
# ETCD member
@@ -75,12 +75,12 @@ if [ -n "$MASTERS" ]; then
# Member key
openssl genrsa -out member-${host}-key.pem 2048 > /dev/null 2>&1
openssl req -new -key member-${host}-key.pem -out member-${host}.csr -subj "/CN=etcd-member-${cn}" -config ${CONFIG} > /dev/null 2>&1
- openssl x509 -req -in member-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out member-${host}.pem -days 3650 -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1
+ openssl x509 -req -in member-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out member-${host}.pem -days 36500 -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1
# Admin key
openssl genrsa -out admin-${host}-key.pem 2048 > /dev/null 2>&1
openssl req -new -key admin-${host}-key.pem -out admin-${host}.csr -subj "/CN=etcd-admin-${cn}" > /dev/null 2>&1
- openssl x509 -req -in admin-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out admin-${host}.pem -days 3650 -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1
+ openssl x509 -req -in admin-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out admin-${host}.pem -days 36500 -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1
done
fi
@@ -90,9 +90,14 @@ if [ -n "$HOSTS" ]; then
cn="${host%%.*}"
openssl genrsa -out node-${host}-key.pem 2048 > /dev/null 2>&1
openssl req -new -key node-${host}-key.pem -out node-${host}.csr -subj "/CN=etcd-node-${cn}" > /dev/null 2>&1
- openssl x509 -req -in node-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out node-${host}.pem -days 3650 -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1
+ openssl x509 -req -in node-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out node-${host}.pem -days 36500 -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1
done
fi
# Install certs
+if [ -e "$SSLDIR/ca-key.pem" ]; then
+ # No pass existing CA
+ rm -f ca.pem ca-key.pem
+fi
+
mv *.pem ${SSLDIR}/
diff --git a/roles/etcd/handlers/backup.yml b/roles/etcd/handlers/backup.yml
index 247b2ae004c..d40a3740de0 100644
--- a/roles/etcd/handlers/backup.yml
+++ b/roles/etcd/handlers/backup.yml
@@ -39,6 +39,8 @@
environment:
ETCDCTL_API: 2
retries: 3
+ register: backup_v2_command
+ until: backup_v2_command.rc == 0
delay: "{{ retry_stagger | random + 3 }}"
- name: Backup etcd v3 data
@@ -48,7 +50,9 @@
snapshot save {{ etcd_backup_directory }}/snapshot.db
environment:
ETCDCTL_API: 3
- ETCDCTL_CERT: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
- ETCDCTL_KEY: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
+ ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
+ ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
retries: 3
+ register: etcd_backup_v3_command
+ until: etcd_backup_v3_command.rc == 0
delay: "{{ retry_stagger | random + 3 }}"
diff --git a/roles/etcd/handlers/main.yml b/roles/etcd/handlers/main.yml
index a72cbd515bf..3a46978a68d 100644
--- a/roles/etcd/handlers/main.yml
+++ b/roles/etcd/handlers/main.yml
@@ -10,7 +10,7 @@
- name: restart etcd-events
command: /bin/true
notify:
- - etcd-events | reload systemd
+ - etcd | reload systemd
- reload etcd-events
- wait for etcd-events up
@@ -19,9 +19,6 @@
- name: etcd | reload systemd
command: systemctl daemon-reload
-- name: etcd-events | reload systemd
- command: systemctl daemon-reload
-
- name: reload etcd
service:
name: etcd
diff --git a/roles/etcd/tasks/configure.yml b/roles/etcd/tasks/configure.yml
index d7d3920c65b..674d202e0fd 100644
--- a/roles/etcd/tasks/configure.yml
+++ b/roles/etcd/tasks/configure.yml
@@ -1,20 +1,20 @@
---
-- name: Configure | Check if member is in etcd cluster
- shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_access_addresses }} member list | grep -q {{ etcd_access_address }}"
- register: etcd_member_in_cluster
+- name: Configure | Check if etcd cluster is healthy
+ shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
+ register: etcd_cluster_is_healthy
ignore_errors: true
changed_when: false
check_mode: no
- when: is_etcd_master
+ when: is_etcd_master and etcd_cluster_setup
tags:
- facts
environment:
- ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
- ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
+ ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
+ ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
-- name: Configure | Check if member is in etcd-events cluster
- shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_events_access_addresses }} member list | grep -q {{ etcd_access_address }}"
- register: etcd_events_member_in_cluster
+- name: Configure | Check if etcd-events cluster is healthy
+ shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_events_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
+ register: etcd_events_cluster_is_healthy
ignore_errors: true
changed_when: false
check_mode: no
@@ -22,47 +22,112 @@
tags:
- facts
environment:
- ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
- ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
+ ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
+ ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
+
+- include_tasks: refresh_config.yml
+ when: is_etcd_master
- name: Configure | Copy etcd.service systemd file
template:
src: "etcd-{{ etcd_deployment_type }}.service.j2"
dest: /etc/systemd/system/etcd.service
backup: yes
- when: is_etcd_master
- notify: restart etcd
+ when: is_etcd_master and etcd_cluster_setup
- name: Configure | Copy etcd-events.service systemd file
template:
- src: "etcd-events-host.service.j2"
+ src: "etcd-events-{{ etcd_deployment_type }}.service.j2"
dest: /etc/systemd/system/etcd-events.service
backup: yes
- when: is_etcd_master and etcd_deployment_type == "host" and etcd_events_cluster_setup
- notify: restart etcd-events
+ when: is_etcd_master and etcd_events_cluster_setup
-- name: Configure | Copy etcd-events.service systemd file
- template:
- src: "etcd-events-docker.service.j2"
- dest: /etc/systemd/system/etcd-events.service
- backup: yes
- when: is_etcd_master and etcd_deployment_type == "docker" and etcd_events_cluster_setup
- notify: restart etcd-events
+- name: Configure | reload systemd
+ command: systemctl daemon-reload
+ when: is_etcd_master
+
+- name: Configure | Ensure etcd is running
+ service:
+ name: etcd
+ state: started
+ enabled: yes
+ when: is_etcd_master and etcd_cluster_setup
+
+- name: Configure | Ensure etcd-events is running
+ service:
+ name: etcd-events
+ state: started
+ enabled: yes
+ when: is_etcd_master and etcd_events_cluster_setup
+
+- name: Configure | Check if etcd cluster is healthy
+ shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
+ register: etcd_cluster_is_healthy
+ until: etcd_cluster_is_healthy.rc == 0
+ retries: 4
+ delay: "{{ retry_stagger | random + 3 }}"
+ ignore_errors: false
+ changed_when: false
+ check_mode: no
+ when: is_etcd_master and etcd_cluster_setup
+ tags:
+ - facts
+ environment:
+ ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
+ ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
+
+- name: Configure | Check if etcd-events cluster is healthy
+ shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_events_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
+ register: etcd_events_cluster_is_healthy
+ until: etcd_events_cluster_is_healthy.rc == 0
+ retries: 4
+ delay: "{{ retry_stagger | random + 3 }}"
+ ignore_errors: false
+ changed_when: false
+ check_mode: no
+ when: is_etcd_master and etcd_events_cluster_setup
+ tags:
+ - facts
+ environment:
+ ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
+ ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
+
+- name: Configure | Check if member is in etcd cluster
+ shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_access_addresses }} member list | grep -q {{ etcd_access_address }}"
+ register: etcd_member_in_cluster
+ ignore_errors: true
+ changed_when: false
+ check_mode: no
+ when: is_etcd_master and etcd_cluster_setup
+ tags:
+ - facts
+ environment:
+ ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
+ ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
+
+- name: Configure | Check if member is in etcd-events cluster
+ shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_events_access_addresses }} member list | grep -q {{ etcd_access_address }}"
+ register: etcd_events_member_in_cluster
+ ignore_errors: true
+ changed_when: false
+ check_mode: no
+ when: is_etcd_master and etcd_events_cluster_setup
+ tags:
+ - facts
+ environment:
+ ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
+ ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
- name: Configure | Join member(s) to etcd cluster one at a time
include_tasks: join_etcd_member.yml
vars:
target_node: "{{ item }}"
- loop_control:
- pause: 10
with_items: "{{ groups['etcd'] }}"
- when: inventory_hostname == item and etcd_member_in_cluster.rc != 0 and etcd_cluster_is_healthy.rc == 0
+ when: inventory_hostname == item and etcd_cluster_setup and etcd_member_in_cluster.rc != 0 and etcd_cluster_is_healthy.rc == 0
- name: Configure | Join member(s) to etcd-events cluster one at a time
- include_tasks: join_etcd-evetns_member.yml
+ include_tasks: join_etcd-events_member.yml
vars:
target_node: "{{ item }}"
- loop_control:
- pause: 10
with_items: "{{ groups['etcd'] }}"
when: inventory_hostname == item and etcd_events_cluster_setup and etcd_events_member_in_cluster.rc != 0 and etcd_events_cluster_is_healthy.rc == 0
diff --git a/roles/etcd/tasks/gen_certs_script.yml b/roles/etcd/tasks/gen_certs_script.yml
index 3fbafc52a48..8ef9a3dccec 100644
--- a/roles/etcd/tasks/gen_certs_script.yml
+++ b/roles/etcd/tasks/gen_certs_script.yml
@@ -15,6 +15,7 @@
owner: root
mode: 0700
run_once: yes
+ when: inventory_hostname == groups['etcd'][0]
delegate_to: "{{groups['etcd'][0]}}"
- name: "Gen_certs | create etcd cert dir (on {{groups['etcd'][0]}})"
@@ -26,6 +27,7 @@
recurse: yes
mode: 0700
run_once: yes
+ when: inventory_hostname == groups['etcd'][0]
delegate_to: "{{groups['etcd'][0]}}"
- name: Gen_certs | write openssl config
@@ -34,7 +36,9 @@
dest: "{{ etcd_config_dir }}/openssl.conf"
run_once: yes
delegate_to: "{{groups['etcd'][0]}}"
- when: gen_certs|default(false)
+ when:
+ - gen_certs|default(false)
+ - inventory_hostname == groups['etcd'][0]
- name: Gen_certs | copy certs generation script
copy:
@@ -43,8 +47,9 @@
mode: 0700
run_once: yes
delegate_to: "{{groups['etcd'][0]}}"
- when: gen_certs|default(false)
-
+ when:
+ - gen_certs|default(false)
+ - inventory_hostname == groups['etcd'][0]
- name: Gen_certs | run cert generation script
command: "bash -x {{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}"
@@ -61,7 +66,9 @@
{% endfor %}"
run_once: yes
delegate_to: "{{groups['etcd'][0]}}"
- when: gen_certs|default(false)
+ when:
+ - gen_certs|default(false)
+ - inventory_hostname == groups['etcd'][0]
notify: set etcd_secret_changed
- set_fact:
@@ -160,5 +167,5 @@
group: "{{ etcd_cert_group }}"
state: directory
owner: kube
- mode: "u=rwX,g-rwx,o-rwx"
+ mode: "640"
recurse: yes
diff --git a/roles/etcd/tasks/gen_certs_vault.yml b/roles/etcd/tasks/gen_certs_vault.yml
index aa3274bd742..4e3325b4f09 100644
--- a/roles/etcd/tasks/gen_certs_vault.yml
+++ b/roles/etcd/tasks/gen_certs_vault.yml
@@ -26,6 +26,9 @@
"{{ hostvars[host]['ip'] }}",
{%- endif -%}
{%- endfor -%}
+ {%- for cert_alt_ip in etcd_cert_alt_ips -%}
+ "{{ cert_alt_ip }}",
+ {%- endfor -%}
"127.0.0.1","::1"
]
issue_cert_path: "{{ item }}"
diff --git a/roles/etcd/tasks/install_docker.yml b/roles/etcd/tasks/install_docker.yml
index 58e1485a54a..7859134b013 100644
--- a/roles/etcd/tasks/install_docker.yml
+++ b/roles/etcd/tasks/install_docker.yml
@@ -9,22 +9,22 @@
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
changed_when: false
+ when: etcd_cluster_setup
- name: Install etcd launch script
template:
src: etcd.j2
dest: "{{ bin_dir }}/etcd"
owner: 'root'
- mode: 0755
+ mode: 0750
backup: yes
- notify: restart etcd
+ when: etcd_cluster_setup
- name: Install etcd-events launch script
template:
src: etcd-events.j2
dest: "{{ bin_dir }}/etcd-events"
owner: 'root'
- mode: 0755
+ mode: 0750
backup: yes
when: etcd_events_cluster_setup
- notify: restart etcd-events
diff --git a/roles/etcd/tasks/install_host.yml b/roles/etcd/tasks/install_host.yml
index 9e83905bc89..0dc226e666e 100644
--- a/roles/etcd/tasks/install_host.yml
+++ b/roles/etcd/tasks/install_host.yml
@@ -1,12 +1,21 @@
---
-- name: Install | Copy etcdctl and etcd binary from docker container
- command: sh -c "{{ docker_bin_dir }}/docker rm -f etcdctl-binarycopy;
- {{ docker_bin_dir }}/docker create --name etcdctl-binarycopy {{ etcd_image_repo }}:{{ etcd_image_tag }} &&
- {{ docker_bin_dir }}/docker cp etcdctl-binarycopy:/usr/local/bin/etcdctl {{ bin_dir }}/etcdctl &&
- {{ docker_bin_dir }}/docker cp etcdctl-binarycopy:/usr/local/bin/etcd {{ bin_dir }}/etcd &&
- {{ docker_bin_dir }}/docker rm -f etcdctl-binarycopy"
- register: etcd_task_result
- until: etcd_task_result.rc == 0
- retries: 4
- delay: "{{ retry_stagger | random + 3 }}"
+- name: install | Copy etcd binary from download dir
+ shell: |
+ rsync -piu "{{ local_release_dir }}/etcd-{{ etcd_version }}-linux-amd64/etcd" "{{ bin_dir }}/etcd"
+ rsync -piu "{{ local_release_dir }}/etcd-{{ etcd_version }}-linux-amd64/etcdctl" "{{ bin_dir }}/etcdctl"
changed_when: false
+ when: etcd_cluster_setup
+
+- name: install | Set etcd binary permissions
+ file:
+ path: "{{ bin_dir }}/etcd"
+ mode: "0755"
+ state: file
+ when: etcd_cluster_setup
+
+- name: install | Set etcdctl binary permissions
+ file:
+ path: "{{ bin_dir }}/etcdctl"
+ mode: "0755"
+ state: file
+ when: etcd_cluster_setup
\ No newline at end of file
diff --git a/roles/etcd/tasks/install_rkt.yml b/roles/etcd/tasks/install_rkt.yml
index 5df623c8b4b..2f693b37115 100644
--- a/roles/etcd/tasks/install_rkt.yml
+++ b/roles/etcd/tasks/install_rkt.yml
@@ -11,6 +11,7 @@
delay: "{{ retry_stagger | random + 3 }}"
changed_when: false
environment: "{{proxy_env}}"
+ when: etcd_cluster_setup
- name: Install | Copy etcdctl binary from rkt container
command: >-
@@ -26,3 +27,4 @@
delay: "{{ retry_stagger | random + 3 }}"
changed_when: false
environment: "{{proxy_env}}"
+ when: etcd_cluster_setup
diff --git a/roles/etcd/tasks/join_etcd-events_member.yml b/roles/etcd/tasks/join_etcd-events_member.yml
index 104ef22dfbf..d5df065f92f 100644
--- a/roles/etcd/tasks/join_etcd-events_member.yml
+++ b/roles/etcd/tasks/join_etcd-events_member.yml
@@ -1,5 +1,5 @@
---
-- name: Join Member | Add member to cluster
+- name: Join Member | Add member to etcd-events cluster
shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_events_access_addresses }} member add {{ etcd_member_name }} {{ etcd_events_peer_url }}"
register: member_add_result
until: member_add_result.rc == 0
@@ -7,8 +7,8 @@
delay: "{{ retry_stagger | random + 3 }}"
when: target_node == inventory_hostname
environment:
- ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
- ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
+ ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
+ ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
- include_tasks: refresh_config.yml
vars:
@@ -23,17 +23,6 @@
{%- endfor -%}
when: target_node == inventory_hostname
-- name: Join Member | reload systemd
- command: systemctl daemon-reload
- when: target_node == inventory_hostname
-
-- name: Join Member | Ensure etcd-events is running
- service:
- name: etcd-events
- state: started
- enabled: yes
- when: target_node == inventory_hostname
-
- name: Join Member | Ensure member is in etcd-events cluster
shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_events_access_addresses }} member list | grep -q {{ etcd_events_access_address }}"
register: etcd_events_member_in_cluster
@@ -43,5 +32,5 @@
- facts
when: target_node == inventory_hostname
environment:
- ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
- ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
+ ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
+ ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
diff --git a/roles/etcd/tasks/join_etcd_member.yml b/roles/etcd/tasks/join_etcd_member.yml
index b7801f0c916..60cfd16cdb6 100644
--- a/roles/etcd/tasks/join_etcd_member.yml
+++ b/roles/etcd/tasks/join_etcd_member.yml
@@ -1,5 +1,5 @@
---
-- name: Join Member | Add member to cluster
+- name: Join Member | Add member to etcd cluster
shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_access_addresses }} member add {{ etcd_member_name }} {{ etcd_peer_url }}"
register: member_add_result
until: member_add_result.rc == 0
@@ -7,8 +7,8 @@
delay: "{{ retry_stagger | random + 3 }}"
when: target_node == inventory_hostname
environment:
- ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
- ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
+ ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
+ ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
- include_tasks: refresh_config.yml
vars:
@@ -23,18 +23,7 @@
{%- endfor -%}
when: target_node == inventory_hostname
-- name: Join Member | reload systemd
- command: systemctl daemon-reload
- when: target_node == inventory_hostname
-
-- name: Join Member | Ensure etcd is running
- service:
- name: etcd
- state: started
- enabled: yes
- when: target_node == inventory_hostname
-
-- name: Join Member | Ensure member is in cluster
+- name: Join Member | Ensure member is in etcd cluster
shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_access_addresses }} member list | grep -q {{ etcd_access_address }}"
register: etcd_member_in_cluster
changed_when: false
@@ -43,5 +32,5 @@
- facts
when: target_node == inventory_hostname
environment:
- ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
- ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
+ ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
+ ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
diff --git a/roles/etcd/tasks/join_member.yml b/roles/etcd/tasks/join_member.yml
deleted file mode 100644
index b7801f0c916..00000000000
--- a/roles/etcd/tasks/join_member.yml
+++ /dev/null
@@ -1,47 +0,0 @@
----
-- name: Join Member | Add member to cluster
- shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_access_addresses }} member add {{ etcd_member_name }} {{ etcd_peer_url }}"
- register: member_add_result
- until: member_add_result.rc == 0
- retries: 4
- delay: "{{ retry_stagger | random + 3 }}"
- when: target_node == inventory_hostname
- environment:
- ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
- ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
-
-- include_tasks: refresh_config.yml
- vars:
- etcd_peer_addresses: >-
- {% for host in groups['etcd'] -%}
- {%- if hostvars[host]['etcd_member_in_cluster'].rc == 0 -%}
- {{ "etcd"+loop.index|string }}=https://{{ hostvars[host].access_ip | default(hostvars[host].ip | default(hostvars[host].ansible_default_ipv4['address'])) }}:2380,
- {%- endif -%}
- {%- if loop.last -%}
- {{ etcd_member_name }}={{ etcd_peer_url }}
- {%- endif -%}
- {%- endfor -%}
- when: target_node == inventory_hostname
-
-- name: Join Member | reload systemd
- command: systemctl daemon-reload
- when: target_node == inventory_hostname
-
-- name: Join Member | Ensure etcd is running
- service:
- name: etcd
- state: started
- enabled: yes
- when: target_node == inventory_hostname
-
-- name: Join Member | Ensure member is in cluster
- shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_access_addresses }} member list | grep -q {{ etcd_access_address }}"
- register: etcd_member_in_cluster
- changed_when: false
- check_mode: no
- tags:
- - facts
- when: target_node == inventory_hostname
- environment:
- ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
- ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index bb299126be4..db59a983fa7 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -18,58 +18,45 @@
register: "etcd_client_cert_serial_result"
changed_when: false
when: inventory_hostname in groups['k8s-cluster']|union(groups['etcd'])|union(groups['calico-rr']|default([]))|unique|sort
+ tags:
+ - master
+ - network
- name: Set etcd_client_cert_serial
set_fact:
etcd_client_cert_serial: "{{ etcd_client_cert_serial_result.stdout }}"
when: inventory_hostname in groups['k8s-cluster']|union(groups['etcd'])|union(groups['calico-rr']|default([]))|unique|sort
+ tags:
+ - master
+ - network
- include_tasks: "install_{{ etcd_deployment_type }}.yml"
when: is_etcd_master
tags:
- upgrade
-- import_tasks: set_cluster_health.yml
- when: is_etcd_master and etcd_cluster_setup
-
-- import_tasks: configure.yml
- when: is_etcd_master and etcd_cluster_setup
+- include_tasks: configure.yml
+ when: is_etcd_master
-- import_tasks: refresh_config.yml
- when: is_etcd_master and etcd_cluster_setup
+- include_tasks: refresh_config.yml
+ when: is_etcd_master
- name: Restart etcd if certs changed
- command: /bin/true
- notify: restart etcd
- when: is_etcd_master and etcd_secret_changed|default(false)
-
-- name: Restart etcd-events if certs changed
- command: /bin/true
- notify: restart etcd
- when: is_etcd_master and etcd_events_cluster_setup and etcd_secret_changed|default(false)
-
-# reload-systemd
-- meta: flush_handlers
-
-- name: Ensure etcd is running
service:
name: etcd
- state: started
+ state: restarted
enabled: yes
- when: is_etcd_master and etcd_cluster_setup
+ when: is_etcd_master and etcd_cluster_setup and etcd_secret_changed|default(false)
-- name: Ensure etcd-events is running
+- name: Restart etcd-events if certs changed
service:
name: etcd-events
- state: started
+ state: restarted
enabled: yes
- when: is_etcd_master and etcd_events_cluster_setup
+ when: is_etcd_master and etcd_events_cluster_setup and etcd_secret_changed|default(false)
# After etcd cluster is assembled, make sure that
# initial state of the cluster is in `existing`
# state insted of `new`.
-- import_tasks: set_cluster_health.yml
- when: is_etcd_master and etcd_cluster_setup
-
-- import_tasks: refresh_config.yml
- when: is_etcd_master and etcd_cluster_setup
+- include_tasks: refresh_config.yml
+ when: is_etcd_master
diff --git a/roles/etcd/tasks/refresh_config.yml b/roles/etcd/tasks/refresh_config.yml
index 92766330130..21c308fb018 100644
--- a/roles/etcd/tasks/refresh_config.yml
+++ b/roles/etcd/tasks/refresh_config.yml
@@ -4,7 +4,7 @@
src: etcd.env.j2
dest: /etc/etcd.env
notify: restart etcd
- when: is_etcd_master
+ when: is_etcd_master and etcd_cluster_setup
- name: Refresh config | Create etcd-events config file
template:
diff --git a/roles/etcd/tasks/set_cluster_health.yml b/roles/etcd/tasks/set_cluster_health.yml
deleted file mode 100644
index 68e738031a5..00000000000
--- a/roles/etcd/tasks/set_cluster_health.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Configure | Check if etcd cluster is healthy
- shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
- register: etcd_cluster_is_healthy
- ignore_errors: true
- changed_when: false
- check_mode: no
- when: is_etcd_master
- tags:
- - facts
- environment:
- ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
- ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
-
-- name: Configure | Check if etcd-events cluster is healthy
- shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_events_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
- register: etcd_events_cluster_is_healthy
- ignore_errors: true
- changed_when: false
- check_mode: no
- when: is_etcd_master and etcd_events_cluster_setup
- tags:
- - facts
- environment:
- ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
- ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
diff --git a/roles/etcd/tasks/sync_etcd_master_certs.yml b/roles/etcd/tasks/sync_etcd_master_certs.yml
index b810ff775ab..3990e569df3 100644
--- a/roles/etcd/tasks/sync_etcd_master_certs.yml
+++ b/roles/etcd/tasks/sync_etcd_master_certs.yml
@@ -13,6 +13,8 @@
sync_file: "{{ item }}"
sync_file_dir: "{{ etcd_cert_dir }}"
sync_file_hosts: [ "{{ inventory_hostname }}" ]
+ sync_file_owner: kube
+ sync_file_group: root
sync_file_is_cert: true
with_items: "{{ etcd_master_cert_list|d([]) }}"
diff --git a/roles/etcd/tasks/upd_ca_trust.yml b/roles/etcd/tasks/upd_ca_trust.yml
index dd36554fb95..0ff3638601b 100644
--- a/roles/etcd/tasks/upd_ca_trust.yml
+++ b/roles/etcd/tasks/upd_ca_trust.yml
@@ -8,6 +8,8 @@
/etc/pki/ca-trust/source/anchors/etcd-ca.crt
{%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%}
/etc/ssl/certs/etcd-ca.pem
+ {%- elif ansible_os_family == "Suse" -%}
+ /etc/pki/trust/anchors/etcd-ca.pem
{%- endif %}
tags:
- facts
@@ -19,9 +21,9 @@
remote_src: true
register: etcd_ca_cert
-- name: Gen_certs | update ca-certificates (Debian/Ubuntu/Container Linux by CoreOS)
+- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Container Linux by CoreOS)
command: update-ca-certificates
- when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS"]
+ when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS", "Suse"]
- name: Gen_certs | update ca-certificates (RedHat)
command: update-ca-trust extract
diff --git a/roles/etcd/templates/etcd-events-rkt.service.j2 b/roles/etcd/templates/etcd-events-rkt.service.j2
new file mode 100644
index 00000000000..7886a038b84
--- /dev/null
+++ b/roles/etcd/templates/etcd-events-rkt.service.j2
@@ -0,0 +1,31 @@
+[Unit]
+Description=etcd events rkt wrapper
+Documentation=https://github.com/coreos/etcd
+Wants=network.target
+
+[Service]
+Restart=on-failure
+RestartSec=10s
+TimeoutStartSec=0
+LimitNOFILE=40000
+
+ExecStart=/usr/bin/rkt run \
+--uuid-file-save=/var/run/etcd-events.uuid \
+--volume hosts,kind=host,source=/etc/hosts,readOnly=true \
+--mount volume=hosts,target=/etc/hosts \
+--volume=etc-ssl-certs,kind=host,source=/etc/ssl/certs,readOnly=true \
+--mount=volume=etc-ssl-certs,target=/etc/ssl/certs \
+--volume=etcd-cert-dir,kind=host,source={{ etcd_cert_dir }},readOnly=true \
+--mount=volume=etcd-cert-dir,target={{ etcd_cert_dir }} \
+--volume=etcd-data-dir,kind=host,source={{ etcd_events_data_dir }},readOnly=false \
+--mount=volume=etcd-data-dir,target={{ etcd_events_data_dir }} \
+--set-env-file=/etc/etcd-events.env \
+--stage1-from-dir=stage1-fly.aci \
+{{ etcd_image_repo }}:{{ etcd_image_tag }} \
+--name={{ etcd_member_name | default("etcd-events") }}
+
+ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/etcd-events.uuid
+ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/etcd-events.uuid
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/etcd/templates/etcd-events.env.j2 b/roles/etcd/templates/etcd-events.env.j2
index c168ab03e42..6caeb1cf9da 100644
--- a/roles/etcd/templates/etcd-events.env.j2
+++ b/roles/etcd/templates/etcd-events.env.j2
@@ -1,7 +1,7 @@
ETCD_DATA_DIR={{ etcd_events_data_dir }}
ETCD_ADVERTISE_CLIENT_URLS={{ etcd_events_client_url }}
ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_events_peer_url }}
-ETCD_INITIAL_CLUSTER_STATE={% if etcd_cluster_is_healthy.rc != 0 | bool %}new{% else %}existing{% endif %}
+ETCD_INITIAL_CLUSTER_STATE={% if etcd_events_cluster_is_healthy.rc != 0 | bool %}new{% else %}existing{% endif %}
ETCD_METRICS={{ etcd_metrics }}
ETCD_LISTEN_CLIENT_URLS=https://{{ etcd_address }}:2381,https://127.0.0.1:2381
@@ -13,6 +13,12 @@ ETCD_NAME={{ etcd_member_name }}-events
ETCD_PROXY=off
ETCD_INITIAL_CLUSTER={{ etcd_events_peer_addresses }}
ETCD_AUTO_COMPACTION_RETENTION={{ etcd_compaction_retention }}
+{% if etcd_snapshot_count is defined %}
+ETCD_SNAPSHOT_COUNT={{ etcd_snapshot_count }}
+{% endif %}
+{% if etcd_quota_backend_bytes is defined %}
+ETCD_QUOTA_BACKEND_BYTES={{ etcd_quota_backend_bytes }}
+{% endif %}
# TLS settings
ETCD_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem
@@ -24,3 +30,7 @@ ETCD_PEER_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem
ETCD_PEER_CERT_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem
ETCD_PEER_KEY_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem
ETCD_PEER_CLIENT_CERT_AUTH={{ etcd_peer_client_auth }}
+
+{% if host_architecture != "amd64" -%}
+ETCD_UNSUPPORTED_ARCH={{host_architecture}}
+{%- endif %}
diff --git a/roles/etcd/templates/etcd.env.j2 b/roles/etcd/templates/etcd.env.j2
index 6a917d127a3..a682f1f71e0 100644
--- a/roles/etcd/templates/etcd.env.j2
+++ b/roles/etcd/templates/etcd.env.j2
@@ -1,3 +1,4 @@
+# Environment file for etcd {{ etcd_version }}
ETCD_DATA_DIR={{ etcd_data_dir }}
ETCD_ADVERTISE_CLIENT_URLS={{ etcd_client_url }}
ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_peer_url }}
@@ -13,6 +14,12 @@ ETCD_NAME={{ etcd_member_name }}
ETCD_PROXY=off
ETCD_INITIAL_CLUSTER={{ etcd_peer_addresses }}
ETCD_AUTO_COMPACTION_RETENTION={{ etcd_compaction_retention }}
+{% if etcd_snapshot_count is defined %}
+ETCD_SNAPSHOT_COUNT={{ etcd_snapshot_count }}
+{% endif %}
+{% if etcd_quota_backend_bytes is defined %}
+ETCD_QUOTA_BACKEND_BYTES={{ etcd_quota_backend_bytes }}
+{% endif %}
# TLS settings
ETCD_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem
@@ -24,3 +31,11 @@ ETCD_PEER_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem
ETCD_PEER_CERT_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem
ETCD_PEER_KEY_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem
ETCD_PEER_CLIENT_CERT_AUTH={{ etcd_peer_client_auth }}
+
+{% for key, value in etcd_extra_vars.items() %}
+{{ key }}={{ value }}
+{% endfor %}
+
+{% if host_architecture != "amd64" -%}
+ETCD_UNSUPPORTED_ARCH={{host_architecture}}
+{%- endif %}
diff --git a/roles/etcd/templates/etcd.j2 b/roles/etcd/templates/etcd.j2
old mode 100644
new mode 100755
index 9ac08e0738a..a6628d8fb97
--- a/roles/etcd/templates/etcd.j2
+++ b/roles/etcd/templates/etcd.j2
@@ -6,16 +6,19 @@
-v /etc/ssl/certs:/etc/ssl/certs:ro \
-v {{ etcd_cert_dir }}:{{ etcd_cert_dir }}:ro \
-v {{ etcd_data_dir }}:{{ etcd_data_dir }}:rw \
- {% if etcd_memory_limit is defined %}
+{% if etcd_memory_limit is defined %}
--memory={{ etcd_memory_limit|regex_replace('Mi', 'M') }} \
- {% endif %}
- {% if etcd_cpu_limit is defined %}
+{% endif %}
+{% if etcd_cpu_limit is defined %}
--cpu-shares={{ etcd_cpu_limit|regex_replace('m', '') }} \
- {% endif %}
- {% if etcd_blkio_weight is defined %}
+{% endif %}
+{% if etcd_blkio_weight is defined %}
--blkio-weight={{ etcd_blkio_weight }} \
- {% endif %}
+{% endif %}
--name={{ etcd_member_name | default("etcd") }} \
{{ etcd_image_repo }}:{{ etcd_image_tag }} \
+{% if etcd_ionice is defined %}
+ /bin/ionice {{ etcd_ionice }} \
+{% endif %}
/usr/local/bin/etcd \
"$@"
diff --git a/roles/etcd/templates/openssl.conf.j2 b/roles/etcd/templates/openssl.conf.j2
index 48327f0bfa6..402417827c1 100644
--- a/roles/etcd/templates/openssl.conf.j2
+++ b/roles/etcd/templates/openssl.conf.j2
@@ -1,4 +1,4 @@
-[req]
+{% set counter = {'dns': 2,'ip': 1,} %}{% macro increment(dct, key, inc=1)%}{% if dct.update({key: dct[key] + inc}) %} {% endif %}{% endmacro %}[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
@@ -25,19 +25,21 @@ authorityKeyIdentifier=keyid:always,issuer
[alt_names]
DNS.1 = localhost
{% for host in groups['etcd'] %}
-DNS.{{ 1 + loop.index }} = {{ host }}
+DNS.{{ counter["dns"] }} = {{ host }}{{ increment(counter, 'dns') }}
{% endfor %}
-{% if loadbalancer_apiserver is defined %}
-{% set idx = groups['etcd'] | length | int + 2 %}
-DNS.{{ idx | string }} = {{ apiserver_loadbalancer_domain_name }}
+{% if apiserver_loadbalancer_domain_name is defined %}
+DNS.{{ counter["dns"] }} = {{ apiserver_loadbalancer_domain_name }}{{ increment(counter, 'dns') }}
{% endif %}
-{% set idx = groups['etcd'] | length | int + 3 %}
{% for etcd_alt_name in etcd_cert_alt_names %}
-DNS.{{ idx + 1 + loop.index }} = {{ etcd_alt_name }}
+DNS.{{ counter["dns"] }} = {{ etcd_alt_name }}{{ increment(counter, 'dns') }}
{% endfor %}
{% for host in groups['etcd'] %}
-IP.{{ 2 * loop.index - 1 }} = {{ hostvars[host]['access_ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
-IP.{{ 2 * loop.index }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
+{% if hostvars[host]['access_ip'] is defined %}
+IP.{{ counter["ip"] }} = {{ hostvars[host]['access_ip'] }}{{ increment(counter, 'ip') }}
+{% endif %}
+IP.{{ counter["ip"] }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}{{ increment(counter, 'ip') }}
+{% endfor %}
+{% for cert_alt_ip in etcd_cert_alt_ips %}
+IP.{{ counter["ip"] }} = {{ cert_alt_ip }}{{ increment(counter, 'ip') }}
{% endfor %}
-{% set idx = groups['etcd'] | length | int * 2 + 1 %}
-IP.{{ idx }} = 127.0.0.1
+IP.{{ counter["ip"] }} = 127.0.0.1
diff --git a/roles/kubernetes-apps/ansible/defaults/main.yml b/roles/kubernetes-apps/ansible/defaults/main.yml
index 350f663a132..6a60e09d84e 100644
--- a/roles/kubernetes-apps/ansible/defaults/main.yml
+++ b/roles/kubernetes-apps/ansible/defaults/main.yml
@@ -1,6 +1,6 @@
---
# Versions
-kubedns_version: 1.14.8
+kubedns_version: 1.14.10
kubednsautoscaler_version: 1.1.2
# Limits for dnsmasq/kubedns apps
@@ -10,14 +10,19 @@ dns_memory_requests: 70Mi
kubedns_min_replicas: 2
kubedns_nodes_per_replica: 10
+# CoreDNS
+coredns_replicas: 2
+
# Images
-kubedns_image_repo: "gcr.io/google_containers/k8s-dns-kube-dns-amd64"
+image_arch: "{{host_architecture}}"
+
+kubedns_image_repo: "gcr.io/google_containers/k8s-dns-kube-dns-{{ image_arch }}"
kubedns_image_tag: "{{ kubedns_version }}"
-dnsmasq_nanny_image_repo: "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64"
+dnsmasq_nanny_image_repo: "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-{{ image_arch }}"
dnsmasq_nanny_image_tag: "{{ kubedns_version }}"
-dnsmasq_sidecar_image_repo: "gcr.io/google_containers/k8s-dns-sidecar-amd64"
+dnsmasq_sidecar_image_repo: "gcr.io/google_containers/k8s-dns-sidecar-{{ image_arch }}"
dnsmasq_sidecar_image_tag: "{{ kubedns_version }}"
-kubednsautoscaler_image_repo: "gcr.io/google_containers/cluster-proportional-autoscaler-amd64"
+kubednsautoscaler_image_repo: "gcr.io/google_containers/cluster-proportional-autoscaler-{{ image_arch }}"
kubednsautoscaler_image_tag: "{{ kubednsautoscaler_version }}"
# Netchecker
@@ -38,10 +43,16 @@ netchecker_server_memory_limit: 256M
netchecker_server_cpu_requests: 50m
netchecker_server_memory_requests: 64M
+# SecurityContext when PodSecurityPolicy is enabled
+netchecker_agent_user: 1000
+netchecker_server_user: 1000
+netchecker_agent_group: 1000
+netchecker_server_group: 1000
+
# Dashboard
dashboard_enabled: true
-dashboard_image_repo: gcr.io/google_containers/kubernetes-dashboard-amd64
-dashboard_image_tag: v1.8.3
+dashboard_image_repo: gcr.io/google_containers/kubernetes-dashboard-{{ image_arch }}
+dashboard_image_tag: v1.10.0
# Limits for dashboard
dashboard_cpu_limit: 100m
@@ -56,11 +67,9 @@ dashboard_certs_secret_name: kubernetes-dashboard-certs
dashboard_tls_key_file: dashboard.key
dashboard_tls_cert_file: dashboard.crt
+# Override dashboard default settings
+dashboard_token_ttl: 900
+
# SSL
etcd_cert_dir: "/etc/ssl/etcd/ssl"
canal_cert_dir: "/etc/canal/certs"
-
-rbac_resources:
- - sa
- - clusterrole
- - clusterrolebinding
diff --git a/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml b/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml
new file mode 100644
index 00000000000..e77f1e7991e
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml
@@ -0,0 +1,54 @@
+---
+- name: Kubernetes Apps | Delete old CoreDNS resources
+ kube:
+ name: "coredns"
+ namespace: "kube-system"
+ kubectl: "{{ bin_dir }}/kubectl"
+ resource: "{{ item }}"
+ state: absent
+ with_items:
+ - 'deploy'
+ - 'configmap'
+ - 'svc'
+ tags:
+ - upgrade
+
+- name: Kubernetes Apps | Delete kubeadm CoreDNS
+ kube:
+ name: "coredns"
+ namespace: "kube-system"
+ kubectl: "{{ bin_dir }}/kubectl"
+ resource: "deploy"
+ state: absent
+ when:
+ - kubeadm_enabled|default(false)
+ - kubeadm_init.changed|default(false)
+ - inventory_hostname == groups['kube-master'][0]
+
+- name: Kubernetes Apps | Delete old KubeDNS resources
+ kube:
+ name: "kube-dns"
+ namespace: "kube-system"
+ kubectl: "{{ bin_dir }}/kubectl"
+ resource: "{{ item }}"
+ state: absent
+ with_items:
+ - 'deploy'
+ - 'svc'
+ tags:
+ - upgrade
+
+- name: Kubernetes Apps | Delete kubeadm KubeDNS
+ kube:
+ name: "kube-dns"
+ namespace: "kube-system"
+ kubectl: "{{ bin_dir }}/kubectl"
+ resource: "{{ item }}"
+ state: absent
+ with_items:
+ - 'deploy'
+ - 'svc'
+ when:
+ - kubeadm_enabled|default(false)
+ - kubeadm_init.changed|default(false)
+ - inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes-apps/ansible/tasks/coredns.yml b/roles/kubernetes-apps/ansible/tasks/coredns.yml
new file mode 100644
index 00000000000..c52cf7ba8fc
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/tasks/coredns.yml
@@ -0,0 +1,37 @@
+---
+- name: Kubernetes Apps | Lay Down CoreDNS Template
+ template:
+ src: "{{ item.file }}.j2"
+ dest: "{{ kube_config_dir }}/{{ item.file }}"
+ with_items:
+ - { name: coredns, file: coredns-config.yml, type: configmap }
+ - { name: coredns, file: coredns-sa.yml, type: sa }
+ - { name: coredns, file: coredns-deployment.yml, type: deployment }
+ - { name: coredns, file: coredns-svc.yml, type: svc }
+ - { name: coredns, file: coredns-clusterrole.yml, type: clusterrole }
+ - { name: coredns, file: coredns-clusterrolebinding.yml, type: clusterrolebinding }
+ register: coredns_manifests
+ vars:
+ clusterIP: "{{ skydns_server }}"
+ when:
+ - dns_mode in ['coredns', 'coredns_dual']
+ - inventory_hostname == groups['kube-master'][0]
+ tags:
+ - coredns
+
+- name: Kubernetes Apps | Lay Down Secondary CoreDNS Template
+ template:
+ src: "{{ item.src }}.j2"
+ dest: "{{ kube_config_dir }}/{{ item.file }}"
+ with_items:
+ - { name: coredns, src: coredns-deployment.yml, file: coredns-deployment-secondary.yml, type: deployment }
+ - { name: coredns, src: coredns-svc.yml, file: coredns-svc-secondary.yml, type: svc }
+ register: coredns_secondary_manifests
+ vars:
+ clusterIP: "{{ skydns_server_secondary }}"
+ coredns_ordinal_suffix: "-secondary"
+ when:
+ - dns_mode == 'coredns_dual'
+ - inventory_hostname == groups['kube-master'][0]
+ tags:
+ - coredns
diff --git a/roles/kubernetes-apps/ansible/tasks/dashboard.yml b/roles/kubernetes-apps/ansible/tasks/dashboard.yml
index ce56bd5d10a..4c9ad5c7426 100644
--- a/roles/kubernetes-apps/ansible/tasks/dashboard.yml
+++ b/roles/kubernetes-apps/ansible/tasks/dashboard.yml
@@ -22,7 +22,7 @@
- name: Kubernetes Apps | Start dashboard
kube:
name: "{{ item.item.name }}"
- namespace: "{{ system_namespace }}"
+ namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
diff --git a/roles/kubernetes-apps/ansible/tasks/kubedns.yml b/roles/kubernetes-apps/ansible/tasks/kubedns.yml
new file mode 100644
index 00000000000..e7bf8298fa2
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/tasks/kubedns.yml
@@ -0,0 +1,42 @@
+---
+
+- name: Kubernetes Apps | Lay Down KubeDNS Template
+ template:
+ src: "{{ item.file }}.j2"
+ dest: "{{ kube_config_dir }}/{{ item.file }}"
+ with_items:
+ - { name: kube-dns, file: kubedns-sa.yml, type: sa }
+ - { name: kube-dns, file: kubedns-deploy.yml, type: deployment }
+ - { name: kube-dns, file: kubedns-svc.yml, type: svc }
+ - { name: kubedns-autoscaler, file: kubedns-autoscaler-sa.yml, type: sa }
+ - { name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrole.yml, type: clusterrole }
+ - { name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrolebinding.yml, type: clusterrolebinding }
+ - { name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment }
+ register: kubedns_manifests
+ when:
+ - dns_mode in ['kubedns','dnsmasq_kubedns']
+ - inventory_hostname == groups['kube-master'][0]
+ tags:
+ - dnsmasq
+ - kubedns
+
+# see https://github.com/kubernetes/kubernetes/issues/45084, only needed for "old" kube-dns
+- name: Kubernetes Apps | Patch system:kube-dns ClusterRole
+ command: >
+ {{ bin_dir }}/kubectl patch clusterrole system:kube-dns
+ --patch='{
+ "rules": [
+ {
+ "apiGroups" : [""],
+ "resources" : ["endpoints", "services"],
+ "verbs": ["list", "watch", "get"]
+ }
+ ]
+ }'
+ when:
+ - dns_mode in ['kubedns', 'dnsmasq_kubedns']
+ - inventory_hostname == groups['kube-master'][0]
+ - rbac_enabled and kubedns_version|version_compare("1.11.0", "<", strict=True)
+ tags:
+ - dnsmasq
+ - kubedns
diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml
index a25d595ebb6..62169d27d49 100644
--- a/roles/kubernetes-apps/ansible/tasks/main.yml
+++ b/roles/kubernetes-apps/ansible/tasks/main.yml
@@ -11,84 +11,56 @@
delay: 2
when: inventory_hostname == groups['kube-master'][0]
-- name: Kubernetes Apps | Delete old kubedns resources
- kube:
- name: "kubedns"
- namespace: "{{ system_namespace }}"
- kubectl: "{{ bin_dir }}/kubectl"
- resource: "{{ item }}"
- state: absent
- with_items:
- - 'deploy'
- - 'svc'
+- name: Kubernetes Apps | Cleanup DNS
+ import_tasks: tasks/cleanup_dns.yml
+ when:
+ - inventory_hostname == groups['kube-master'][0]
tags:
- upgrade
+ - dnsmasq
+ - coredns
+ - kubedns
-- name: Kubernetes Apps | Delete kubeadm kubedns
- kube:
- name: "kubedns"
- namespace: "{{ system_namespace }}"
- kubectl: "{{ bin_dir }}/kubectl"
- resource: "deploy"
- state: absent
+- name: Kubernetes Apps | CoreDNS
+ import_tasks: "tasks/coredns.yml"
when:
- - kubeadm_enabled|default(false)
- - kubeadm_init.changed|default(false)
+ - dns_mode in ['coredns', 'coredns_dual']
- inventory_hostname == groups['kube-master'][0]
-
-- name: Kubernetes Apps | Lay Down KubeDNS Template
- template:
- src: "{{ item.file }}.j2"
- dest: "{{ kube_config_dir }}/{{ item.file }}"
- with_items:
- - { name: kube-dns, file: kubedns-sa.yml, type: sa }
- - { name: kube-dns, file: kubedns-deploy.yml, type: deployment }
- - { name: kube-dns, file: kubedns-svc.yml, type: svc }
- - { name: kubedns-autoscaler, file: kubedns-autoscaler-sa.yml, type: sa }
- - { name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrole.yml, type: clusterrole }
- - { name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrolebinding.yml, type: clusterrolebinding }
- - { name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment }
- register: manifests
- when:
- - dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
- - rbac_enabled or item.type not in rbac_resources
tags:
- - dnsmasq
+ - coredns
-# see https://github.com/kubernetes/kubernetes/issues/45084, only needed for "old" kube-dns
-- name: Kubernetes Apps | Patch system:kube-dns ClusterRole
- command: >
- {{ bin_dir }}/kubectl patch clusterrole system:kube-dns
- --patch='{
- "rules": [
- {
- "apiGroups" : [""],
- "resources" : ["endpoints", "services"],
- "verbs": ["list", "watch", "get"]
- }
- ]
- }'
+- name: Kubernetes Apps | KubeDNS
+ import_tasks: "tasks/kubedns.yml"
when:
- - dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
- - rbac_enabled and kubedns_version|version_compare("1.11.0", "<", strict=True)
+ - dns_mode in ['kubedns', 'dnsmasq_kubedns']
+ - inventory_hostname == groups['kube-master'][0]
tags:
- dnsmasq
- name: Kubernetes Apps | Start Resources
kube:
name: "{{ item.item.name }}"
- namespace: "{{ system_namespace }}"
+ namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
- with_items: "{{ manifests.results }}"
+ with_items:
+ - "{{ kubedns_manifests.results | default({}) }}"
+ - "{{ coredns_manifests.results | default({}) }}"
+ - "{{ coredns_secondary_manifests.results | default({}) }}"
when:
- dns_mode != 'none'
- inventory_hostname == groups['kube-master'][0]
- not item|skipped
+ register: resource_result
+ until: resource_result|succeeded
+ retries: 4
+ delay: 5
tags:
- dnsmasq
+ - coredns
+ - kubedns
- name: Kubernetes Apps | Netchecker
import_tasks: tasks/netchecker.yml
diff --git a/roles/kubernetes-apps/ansible/tasks/netchecker.yml b/roles/kubernetes-apps/ansible/tasks/netchecker.yml
index c07f07bba37..655ef744b0f 100644
--- a/roles/kubernetes-apps/ansible/tasks/netchecker.yml
+++ b/roles/kubernetes-apps/ansible/tasks/netchecker.yml
@@ -2,7 +2,7 @@
- name: Kubernetes Apps | Check if netchecker-server manifest already exists
stat:
- path: "{{ kube_config_dir }}/netchecker-server-deployment.yml.j2"
+ path: "{{ kube_config_dir }}/netchecker-server-deployment.yml"
register: netchecker_server_manifest
tags:
- facts
@@ -20,22 +20,35 @@
tags:
- upgrade
+- name: Kubernetes Apps | Netchecker Templates list
+ set_fact:
+ netchecker_templates:
+ - {file: netchecker-agent-sa.yml, type: sa, name: netchecker-agent}
+ - {file: netchecker-agent-ds.yml, type: ds, name: netchecker-agent}
+ - {file: netchecker-agent-hostnet-ds.yml, type: ds, name: netchecker-agent-hostnet}
+ - {file: netchecker-server-sa.yml, type: sa, name: netchecker-server}
+ - {file: netchecker-server-clusterrole.yml, type: clusterrole, name: netchecker-server}
+ - {file: netchecker-server-clusterrolebinding.yml, type: clusterrolebinding, name: netchecker-server}
+ - {file: netchecker-server-deployment.yml, type: deployment, name: netchecker-server}
+ - {file: netchecker-server-svc.yml, type: svc, name: netchecker-service}
+ netchecker_templates_for_psp:
+ - {file: netchecker-agent-hostnet-psp.yml, type: podsecuritypolicy, name: netchecker-agent-hostnet-policy}
+ - {file: netchecker-agent-hostnet-clusterrole.yml, type: clusterrole, name: netchecker-agent}
+ - {file: netchecker-agent-hostnet-clusterrolebinding.yml, type: clusterrolebinding, name: netchecker-agent}
+
+- name: Kubernetes Apps | Append extra templates to Netchecker Templates list for PodSecurityPolicy
+ set_fact:
+ netchecker_templates: "{{ netchecker_templates_for_psp + netchecker_templates}}"
+ when: podsecuritypolicy_enabled
+
- name: Kubernetes Apps | Lay Down Netchecker Template
template:
- src: "{{item.file}}"
+ src: "{{item.file}}.j2"
dest: "{{kube_config_dir}}/{{item.file}}"
- with_items:
- - {file: netchecker-agent-ds.yml.j2, type: ds, name: netchecker-agent}
- - {file: netchecker-agent-hostnet-ds.yml.j2, type: ds, name: netchecker-agent-hostnet}
- - {file: netchecker-server-sa.yml.j2, type: sa, name: netchecker-server}
- - {file: netchecker-server-clusterrole.yml.j2, type: clusterrole, name: netchecker-server}
- - {file: netchecker-server-clusterrolebinding.yml.j2, type: clusterrolebinding, name: netchecker-server}
- - {file: netchecker-server-deployment.yml.j2, type: deployment, name: netchecker-server}
- - {file: netchecker-server-svc.yml.j2, type: svc, name: netchecker-service}
+ with_items: "{{ netchecker_templates }}"
register: manifests
when:
- inventory_hostname == groups['kube-master'][0]
- - rbac_enabled or item.type not in rbac_resources
- name: Kubernetes Apps | Purge old Netchecker server
kube:
diff --git a/roles/kubernetes-apps/ansible/templates/coredns-clusterrole.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-clusterrole.yml.j2
new file mode 100644
index 00000000000..4136d603e98
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/templates/coredns-clusterrole.yml.j2
@@ -0,0 +1,19 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ labels:
+ kubernetes.io/bootstrapping: rbac-defaults
+ addonmanager.kubernetes.io/mode: Reconcile
+ name: system:coredns
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - endpoints
+ - services
+ - pods
+ - namespaces
+ verbs:
+ - list
+ - watch
diff --git a/roles/kubernetes-apps/ansible/templates/coredns-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-clusterrolebinding.yml.j2
new file mode 100644
index 00000000000..89becd5b44b
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/templates/coredns-clusterrolebinding.yml.j2
@@ -0,0 +1,18 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ annotations:
+ rbac.authorization.kubernetes.io/autoupdate: "true"
+ labels:
+ kubernetes.io/bootstrapping: rbac-defaults
+ addonmanager.kubernetes.io/mode: EnsureExists
+ name: system:coredns
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:coredns
+subjects:
+- kind: ServiceAccount
+ name: coredns
+ namespace: kube-system
diff --git a/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2
new file mode 100644
index 00000000000..34cd4b77e4d
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2
@@ -0,0 +1,22 @@
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: coredns
+ namespace: kube-system
+ labels:
+ addonmanager.kubernetes.io/mode: EnsureExists
+data:
+ Corefile: |
+ .:53 {
+ errors
+ health
+ kubernetes {{ dns_domain }} in-addr.arpa ip6.arpa {
+ pods insecure
+ upstream /etc/resolv.conf
+ fallthrough in-addr.arpa ip6.arpa
+ }
+ prometheus :9153
+ proxy . /etc/resolv.conf
+ cache 30
+ }
diff --git a/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2
new file mode 100644
index 00000000000..27c0576a1f7
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2
@@ -0,0 +1,95 @@
+---
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: coredns{{ coredns_ordinal_suffix | default('') }}
+ namespace: kube-system
+ labels:
+ k8s-app: coredns{{ coredns_ordinal_suffix | default('') }}
+ kubernetes.io/cluster-service: "true"
+ addonmanager.kubernetes.io/mode: Reconcile
+ kubernetes.io/name: "CoreDNS"
+spec:
+ replicas: {{ coredns_replicas }}
+ strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 0
+ maxSurge: 10%
+ selector:
+ matchLabels:
+ k8s-app: coredns{{ coredns_ordinal_suffix | default('') }}
+ template:
+ metadata:
+ labels:
+ k8s-app: coredns{{ coredns_ordinal_suffix | default('') }}
+ annotations:
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ serviceAccountName: coredns
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - key: "CriticalAddonsOnly"
+ operator: "Exists"
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - topologyKey: "kubernetes.io/hostname"
+ labelSelector:
+ matchLabels:
+ k8s-app: coredns{{ coredns_ordinal_suffix | default('') }}
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: In
+ values:
+ - "true"
+ containers:
+ - name: coredns
+ image: "{{ coredns_image_repo }}:{{ coredns_image_tag }}"
+ imagePullPolicy: {{ k8s_image_pull_policy }}
+ resources:
+ # TODO: Set memory limits when we've profiled the container for large
+ # clusters, then set request = limit to keep this container in
+ # guaranteed class. Currently, this container falls into the
+ # "burstable" category so the kubelet doesn't backoff from restarting it.
+ limits:
+ memory: {{ dns_memory_limit }}
+ requests:
+ cpu: {{ dns_cpu_requests }}
+ memory: {{ dns_memory_requests }}
+ args: [ "-conf", "/etc/coredns/Corefile" ]
+ volumeMounts:
+ - name: config-volume
+ mountPath: /etc/coredns
+ ports:
+ - containerPort: 53
+ name: dns
+ protocol: UDP
+ - containerPort: 53
+ name: dns-tcp
+ protocol: TCP
+ - containerPort: 9153
+ name: metrics
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ path: /health
+ port: 8080
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ successThreshold: 1
+ failureThreshold: 5
+ dnsPolicy: Default
+ volumes:
+ - name: config-volume
+ configMap:
+ name: coredns
+ items:
+ - key: Corefile
+ path: Corefile
diff --git a/roles/kubernetes-apps/ansible/templates/coredns-sa.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-sa.yml.j2
new file mode 100644
index 00000000000..64d9c4dae27
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/templates/coredns-sa.yml.j2
@@ -0,0 +1,9 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: coredns
+ namespace: kube-system
+ labels:
+ kubernetes.io/cluster-service: "true"
+ addonmanager.kubernetes.io/mode: Reconcile
diff --git a/roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2
new file mode 100644
index 00000000000..193de10eb97
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2
@@ -0,0 +1,22 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: coredns{{ coredns_ordinal_suffix | default('') }}
+ namespace: kube-system
+ labels:
+ k8s-app: coredns{{ coredns_ordinal_suffix | default('') }}
+ kubernetes.io/cluster-service: "true"
+ addonmanager.kubernetes.io/mode: Reconcile
+ kubernetes.io/name: "CoreDNS"
+spec:
+ selector:
+ k8s-app: coredns{{ coredns_ordinal_suffix | default('') }}
+ clusterIP: {{ clusterIP }}
+ ports:
+ - name: dns
+ port: 53
+ protocol: UDP
+ - name: dns-tcp
+ port: 53
+ protocol: TCP
diff --git a/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 b/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2
index b1ba1481de2..41f6716e7ff 100644
--- a/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2
@@ -25,7 +25,7 @@ metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
- namespace: {{ system_namespace }}
+ namespace: kube-system
type: Opaque
---
@@ -37,7 +37,7 @@ metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
- namespace: {{ system_namespace }}
+ namespace: kube-system
---
# ------------------- Dashboard Role & Role Binding ------------------- #
@@ -46,7 +46,7 @@ kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubernetes-dashboard-minimal
- namespace: {{ system_namespace }}
+ namespace: kube-system
rules:
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
- apiGroups: [""]
@@ -81,7 +81,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubernetes-dashboard-minimal
- namespace: {{ system_namespace }}
+ namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
@@ -89,7 +89,7 @@ roleRef:
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
- namespace: {{ system_namespace }}
+ namespace: kube-system
---
# ------------------- Gross Hack For anonymous auth through api proxy ------------------- #
@@ -103,7 +103,7 @@ rules:
resources: ["services/proxy"]
resourceNames: ["https:kubernetes-dashboard:"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
-- nonResourceURLs: ["/ui", "/ui/*", "/api/v1/namespaces/{{ system_namespace }}/services/https:kubernetes-dashboard:/proxy/*"]
+- nonResourceURLs: ["/ui", "/ui/*", "/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/*"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
@@ -128,7 +128,7 @@ metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
- namespace: {{ system_namespace }}
+ namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
@@ -166,6 +166,7 @@ spec:
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
+ - --token-ttl={{ dashboard_token_ttl }}
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
@@ -199,8 +200,9 @@ apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
+ kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
- namespace: {{ system_namespace }}
+ namespace: kube-system
spec:
ports:
- port: 443
diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml.j2
index f80d3d90c17..e29ed4dac54 100644
--- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml.j2
@@ -17,7 +17,7 @@ kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: cluster-proportional-autoscaler
- namespace: {{ system_namespace }}
+ namespace: kube-system
rules:
- apiGroups: [""]
resources: ["nodes"]
diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml.j2
index eb76f2d4ecb..3b11c6b9fcb 100644
--- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml.j2
@@ -17,11 +17,11 @@ kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: cluster-proportional-autoscaler
- namespace: {{ system_namespace }}
+ namespace: kube-system
subjects:
- kind: ServiceAccount
name: cluster-proportional-autoscaler
- namespace: {{ system_namespace }}
+ namespace: kube-system
roleRef:
kind: ClusterRole
name: cluster-proportional-autoscaler
diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml.j2
index 542ae86cec4..4c440f653f1 100644
--- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml.j2
@@ -17,4 +17,4 @@ kind: ServiceAccount
apiVersion: v1
metadata:
name: cluster-proportional-autoscaler
- namespace: {{ system_namespace }}
+ namespace: kube-system
diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2
index df92ee6156b..e726e8d2a7f 100644
--- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2
@@ -17,7 +17,7 @@ apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kubedns-autoscaler
- namespace: {{ system_namespace }}
+ namespace: kube-system
labels:
k8s-app: kubedns-autoscaler
kubernetes.io/cluster-service: "true"
@@ -28,9 +28,29 @@ spec:
labels:
k8s-app: kubedns-autoscaler
spec:
+ # When having win nodes in cluster without this patch, this pod cloud try to be created in windows
+ nodeSelector:
+ beta.kubernetes.io/os: linux
tolerations:
- effect: NoSchedule
- operator: Exists
+ operator: Equal
+ key: node-role.kubernetes.io/master
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - topologyKey: "kubernetes.io/hostname"
+ labelSelector:
+ matchLabels:
+ k8s-app: kubedns-autoscaler
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: In
+ values:
+ - "true"
containers:
- name: autoscaler
image: "{{ kubednsautoscaler_image_repo }}:{{ kubednsautoscaler_image_tag }}"
@@ -40,13 +60,11 @@ spec:
memory: "10Mi"
command:
- /cluster-proportional-autoscaler
- - --namespace={{ system_namespace }}
+ - --namespace=kube-system
- --configmap=kubedns-autoscaler
# Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base
- --target=Deployment/kube-dns
- --default-params={"linear":{"nodesPerReplica":{{ kubedns_nodes_per_replica }},"min":{{ kubedns_min_replicas }}}}
- --logtostderr=true
- --v=2
-{% if rbac_enabled %}
serviceAccountName: cluster-proportional-autoscaler
-{% endif %}
diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2
index 682bdf49130..96ef72283ef 100644
--- a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2
@@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kube-dns
- namespace: "{{system_namespace}}"
+ namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
@@ -27,11 +27,31 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
+ # When having win nodes in cluster without this patch, this pod cloud try to be created in windows
+ nodeSelector:
+ beta.kubernetes.io/os: linux
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
- - effect: NoSchedule
- operator: Exists
+ - effect: "NoSchedule"
+ operator: "Equal"
+ key: "node-role.kubernetes.io/master"
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - topologyKey: "kubernetes.io/hostname"
+ labelSelector:
+ matchLabels:
+ k8s-app: kube-dns
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: In
+ values:
+ - "true"
volumes:
- name: kube-dns-config
configMap:
@@ -110,6 +130,7 @@ spec:
- --
- -k
- --cache-size=1000
+ - --dns-loop-detect
- --log-facility=-
- --server=/{{ dns_domain }}/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
@@ -154,6 +175,4 @@ spec:
memory: 20Mi
cpu: 10m
dnsPolicy: Default # Don't use cluster DNS.
-{% if rbac_enabled %}
serviceAccountName: kube-dns
-{% endif %}
diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml.j2
index f399fd6f4b4..296a3a93820 100644
--- a/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml.j2
@@ -3,6 +3,6 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-dns
- namespace: {{ system_namespace }}
+ namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml.j2
index 1c4710db13b..6bc5f9240e1 100644
--- a/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml.j2
@@ -3,7 +3,7 @@ apiVersion: v1
kind: Service
metadata:
name: kube-dns
- namespace: {{ system_namespace }}
+ namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2
index 4f32214ebd9..a2c4850c442 100644
--- a/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2
@@ -15,6 +15,9 @@ spec:
tolerations:
- effect: NoSchedule
operator: Exists
+ # When having win nodes in cluster without this patch, this pod cloud try to be created in windows
+ nodeSelector:
+ beta.kubernetes.io/os: linux
containers:
- name: netchecker-agent
image: "{{ agent_img }}"
@@ -40,6 +43,10 @@ spec:
requests:
cpu: {{ netchecker_agent_cpu_requests }}
memory: {{ netchecker_agent_memory_requests }}
+ securityContext:
+ runAsUser: {{ netchecker_agent_user | default('0') }}
+ runAsGroup: {{ netchecker_agent_group | default('0') }}
+ serviceAccountName: netchecker-agent
updateStrategy:
rollingUpdate:
maxUnavailable: 100%
diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-clusterrole.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-clusterrole.yml.j2
new file mode 100644
index 00000000000..f364b428388
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-clusterrole.yml.j2
@@ -0,0 +1,14 @@
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: psp:netchecker-agent-hostnet
+ namespace: {{ netcheck_namespace }}
+rules:
+ - apiGroups:
+ - policy
+ resourceNames:
+ - netchecker-agent-hostnet
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - use
diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-clusterrolebinding.yml.j2
new file mode 100644
index 00000000000..766faa28cb5
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-clusterrolebinding.yml.j2
@@ -0,0 +1,13 @@
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: psp:netchecker-agent-hostnet
+ namespace: {{ netcheck_namespace }}
+subjects:
+ - kind: ServiceAccount
+ name: netchecker-agent-hostnet
+ namespace: {{ netcheck_namespace }}
+roleRef:
+ kind: ClusterRole
+ name: psp:netchecker-agent-hostnet
+ apiGroup: rbac.authorization.k8s.io
diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2
index 76fca481283..f046e8f4b58 100644
--- a/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2
@@ -13,6 +13,9 @@ spec:
app: netchecker-agent-hostnet
spec:
hostNetwork: True
+ # When having win nodes in cluster without this patch, this pod cloud try to be created in windows
+ nodeSelector:
+ beta.kubernetes.io/os: linux
{% if kube_version | version_compare('v1.6', '>=') %}
dnsPolicy: ClusterFirstWithHostNet
{% endif %}
@@ -44,6 +47,10 @@ spec:
requests:
cpu: {{ netchecker_agent_cpu_requests }}
memory: {{ netchecker_agent_memory_requests }}
+ securityContext:
+ runAsUser: {{ netchecker_agent_user | default('0') }}
+ runAsGroup: {{ netchecker_agent_group | default('0') }}
+ serviceAccountName: netchecker-agent
updateStrategy:
rollingUpdate:
maxUnavailable: 100%
diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-psp.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-psp.yml.j2
new file mode 100644
index 00000000000..32fb0c1a0e1
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-psp.yml.j2
@@ -0,0 +1,45 @@
+---
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: netchecker-agent-hostnet
+ annotations:
+ seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
+ seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
+{% if apparmor_enabled %}
+ apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+ apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
+{% endif %}
+ labels:
+ kubernetes.io/cluster-service: 'true'
+ addonmanager.kubernetes.io/mode: Reconcile
+spec:
+ privileged: false
+ allowPrivilegeEscalation: false
+ requiredDropCapabilities:
+ - ALL
+ volumes:
+ - 'configMap'
+ - 'emptyDir'
+ - 'projected'
+ - 'secret'
+ - 'downwardAPI'
+ - 'persistentVolumeClaim'
+ hostNetwork: true
+ hostIPC: false
+ hostPID: false
+ runAsUser:
+ rule: 'MustRunAsNonRoot'
+ seLinux:
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'MustRunAs'
+ ranges:
+ - min: 1
+ max: 65535
+ fsGroup:
+ rule: 'MustRunAs'
+ ranges:
+ - min: 1
+ max: 65535
+ readOnlyRootFilesystem: false
diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-agent-sa.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-agent-sa.yml.j2
new file mode 100644
index 00000000000..d842faa6c07
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/templates/netchecker-agent-sa.yml.j2
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: netchecker-agent
+ namespace: {{ netcheck_namespace }}
+ labels:
+ kubernetes.io/cluster-service: "true"
diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrole.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrole.yml.j2
index 7a8c1d2731d..19bdc8b1fa1 100644
--- a/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrole.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrole.yml.j2
@@ -7,3 +7,6 @@ rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["list"]
+ - apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions"]
+ verbs: ['*']
diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2
index 6e2738e6fc7..1a858683d2a 100644
--- a/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2
@@ -23,6 +23,9 @@ spec:
requests:
cpu: {{ netchecker_server_cpu_requests }}
memory: {{ netchecker_server_memory_requests }}
+ securityContext:
+ runAsUser: {{ netchecker_server_user | default('0') }}
+ runAsGroup: {{ netchecker_server_group | default('0') }}
ports:
- containerPort: 8081
args:
@@ -33,6 +36,4 @@ spec:
tolerations:
- effect: NoSchedule
operator: Exists
-{% if rbac_enabled %}
serviceAccountName: netchecker-server
-{% endif %}
diff --git a/roles/kubernetes-apps/cephfs_provisioner/defaults/main.yml b/roles/kubernetes-apps/cephfs_provisioner/defaults/main.yml
deleted file mode 100644
index 9a3bca1ef81..00000000000
--- a/roles/kubernetes-apps/cephfs_provisioner/defaults/main.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-cephfs_provisioner_image_repo: quay.io/kubespray/cephfs-provisioner
-cephfs_provisioner_image_tag: 92295a30
-
-cephfs_provisioner_namespace: "{{ system_namespace }}"
-cephfs_provisioner_cluster: ceph
-cephfs_provisioner_monitors: []
-cephfs_provisioner_admin_id: admin
-cephfs_provisioner_secret: secret
-cephfs_provisioner_storage_class: cephfs
diff --git a/roles/kubernetes-apps/cephfs_provisioner/tasks/main.yml b/roles/kubernetes-apps/cephfs_provisioner/tasks/main.yml
deleted file mode 100644
index 6e854f05ea7..00000000000
--- a/roles/kubernetes-apps/cephfs_provisioner/tasks/main.yml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-
-- name: CephFS Provisioner | Create addon dir
- file:
- path: "{{ kube_config_dir }}/addons/cephfs_provisioner"
- owner: root
- group: root
- mode: 0755
- recurse: true
-
-- name: CephFS Provisioner | Create manifests
- template:
- src: "{{ item.file }}.j2"
- dest: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.file }}"
- with_items:
- - { name: cephfs-provisioner-sa, file: cephfs-provisioner-sa.yml, type: sa }
- - { name: cephfs-provisioner-role, file: cephfs-provisioner-role.yml, type: role }
- - { name: cephfs-provisioner-rolebinding, file: cephfs-provisioner-rolebinding.yml, type: rolebinding }
- - { name: cephfs-provisioner-clusterrole, file: cephfs-provisioner-clusterrole.yml, type: clusterrole }
- - { name: cephfs-provisioner-clusterrolebinding, file: cephfs-provisioner-clusterrolebinding.yml, type: clusterrolebinding }
- - { name: cephfs-provisioner-deploy, file: cephfs-provisioner-deploy.yml, type: deploy }
- - { name: cephfs-provisioner-secret, file: cephfs-provisioner-secret.yml, type: secret }
- - { name: cephfs-provisioner-sc, file: cephfs-provisioner-sc.yml, type: sc }
- register: cephfs_manifests
- when: inventory_hostname == groups['kube-master'][0]
-
-- name: CephFS Provisioner | Apply manifests
- kube:
- name: "{{ item.item.name }}"
- namespace: "{{ system_namespace }}"
- kubectl: "{{ bin_dir }}/kubectl"
- resource: "{{ item.item.type }}"
- filename: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.item.file }}"
- state: "latest"
- with_items: "{{ cephfs_manifests.results }}"
- when: inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes-apps/cloud_controller/oci/defaults/main.yml b/roles/kubernetes-apps/cloud_controller/oci/defaults/main.yml
new file mode 100644
index 00000000000..ccfb7007712
--- /dev/null
+++ b/roles/kubernetes-apps/cloud_controller/oci/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+
+oci_security_list_management: All
+oci_use_instance_principals: false
+oci_cloud_controller_version: 0.5.0
diff --git a/roles/kubernetes-apps/cloud_controller/oci/tasks/credentials-check.yml b/roles/kubernetes-apps/cloud_controller/oci/tasks/credentials-check.yml
new file mode 100644
index 00000000000..b6098686f96
--- /dev/null
+++ b/roles/kubernetes-apps/cloud_controller/oci/tasks/credentials-check.yml
@@ -0,0 +1,56 @@
+---
+
+- name: "OCI Cloud Controller | Credentials Check | oci_private_key"
+ fail:
+ msg: "oci_private_key is missing"
+ when: (oci_use_instance_principals == false) and
+ (oci_private_key is not defined or oci_private_key == "")
+
+- name: "OCI Cloud Controller | Credentials Check | oci_region_id"
+ fail:
+ msg: "oci_region_id is missing"
+ when: (oci_use_instance_principals == false) and
+ (oci_region_id is not defined or oci_region_id == "")
+
+- name: "OCI Cloud Controller | Credentials Check | oci_tenancy_id"
+ fail:
+ msg: "oci_tenancy_id is missing"
+ when: (oci_use_instance_principals == false) and
+ (oci_tenancy_id is not defined or oci_tenancy_id == "")
+
+- name: "OCI Cloud Controller | Credentials Check | oci_user_id"
+ fail:
+ msg: "oci_user_id is missing"
+ when: (oci_use_instance_principals == false) and
+ (oci_user_id is not defined or oci_user_id == "")
+
+- name: "OCI Cloud Controller | Credentials Check | oci_user_fingerprint"
+ fail:
+ msg: "oci_user_fingerprint is missing"
+ when: (oci_use_instance_principals == false) and
+ (oci_user_fingerprint is not defined or oci_user_fingerprint == "")
+
+- name: "OCI Cloud Controller | Credentials Check | oci_compartment_id"
+ fail:
+ msg: "oci_compartment_id is missing. This is the compartment in which the cluster resides"
+ when: oci_compartment_id is not defined or oci_compartment_id == ""
+
+- name: "OCI Cloud Controller | Credentials Check | oci_vnc_id"
+ fail:
+ msg: "oci_vnc_id is missin. This is the Virtual Cloud Network in which the cluster resides"
+ when: oci_vnc_id is not defined or oci_vnc_id == ""
+
+- name: "OCI Cloud Controller | Credentials Check | oci_subnet1_id"
+ fail:
+ msg: "oci_subnet1_id is missing. This is the first subnet to which loadbalancers will be added"
+ when: oci_subnet1_id is not defined or oci_subnet1_id == ""
+
+- name: "OCI Cloud Controller | Credentials Check | oci_subnet2_id"
+ fail:
+ msg: "oci_subnet2_id is missing. Two subnets are required for load balancer high availability"
+ when: oci_subnet2_id is not defined or oci_subnet2_id == ""
+
+- name: "OCI Cloud Controller | Credentials Check | oci_security_list_management"
+ fail:
+ msg: "oci_security_list_management is missing, or not defined correctly. Valid options are (All, Frontend, None)."
+ when: oci_security_list_management is not defined or oci_security_list_management not in ["All", "Frontend", "None"]
\ No newline at end of file
diff --git a/roles/kubernetes-apps/cloud_controller/oci/tasks/main.yml b/roles/kubernetes-apps/cloud_controller/oci/tasks/main.yml
new file mode 100644
index 00000000000..37e5962d3dc
--- /dev/null
+++ b/roles/kubernetes-apps/cloud_controller/oci/tasks/main.yml
@@ -0,0 +1,51 @@
+---
+
+- include: credentials-check.yml
+ tags: oci
+
+- name: "OCI Cloud Controller | Generate Configuration"
+ template:
+ src: controller-manager-config.yml.j2
+ dest: /tmp/controller-manager-config.yml
+ register: controller_manager_config
+ when: inventory_hostname == groups['kube-master'][0]
+ tags: oci
+
+- name: "OCI Cloud Controller | Encode Configuration"
+ set_fact:
+ controller_manager_config_base64: "{{ lookup('file', '/tmp/controller-manager-config.yml') | b64encode }}"
+ when: inventory_hostname == groups['kube-master'][0]
+ tags: oci
+
+- name: "OCI Cloud Controller | Apply Configuration To Secret"
+ template:
+ src: cloud-provider.yml.j2
+ dest: /tmp/cloud-provider.yml
+ when: inventory_hostname == groups['kube-master'][0]
+ tags: oci
+
+- name: "OCI Cloud Controller | Apply Configuration"
+ kube:
+ kubectl: "{{ bin_dir }}/kubectl"
+ filename: "/tmp/cloud-provider.yml"
+ when: inventory_hostname == groups['kube-master'][0]
+ tags: oci
+
+- name: "OCI Cloud Controller | Download Controller Manifest"
+ get_url:
+ url: "https://raw.githubusercontent.com/oracle/oci-cloud-controller-manager/{{oci_cloud_controller_version}}/manifests/oci-cloud-controller-manager.yaml"
+ dest: "/tmp/oci-cloud-controller-manager.yml"
+ force: yes
+ register: result
+ until: "'OK' in result.msg"
+ retries: 4
+ delay: "{{ retry_stagger | random + 3 }}"
+ when: inventory_hostname == groups['kube-master'][0]
+ tags: oci
+
+- name: "OCI Cloud Controller | Apply Controller Manifest"
+ kube:
+ kubectl: "{{ bin_dir }}/kubectl"
+ filename: "/tmp/oci-cloud-controller-manager.yml"
+ when: inventory_hostname == groups['kube-master'][0]
+ tags: oci
diff --git a/roles/kubernetes-apps/cloud_controller/oci/templates/cloud-provider.yml.j2 b/roles/kubernetes-apps/cloud_controller/oci/templates/cloud-provider.yml.j2
new file mode 100644
index 00000000000..bff3ab43b60
--- /dev/null
+++ b/roles/kubernetes-apps/cloud_controller/oci/templates/cloud-provider.yml.j2
@@ -0,0 +1,8 @@
+apiVersion: v1
+data:
+ cloud-provider.yaml: {{ controller_manager_config_base64 }}
+kind: Secret
+metadata:
+ name: oci-cloud-controller-manager
+ namespace: kube-system
+type: Opaque
diff --git a/roles/kubernetes-apps/cloud_controller/oci/templates/controller-manager-config.yml.j2 b/roles/kubernetes-apps/cloud_controller/oci/templates/controller-manager-config.yml.j2
new file mode 100644
index 00000000000..38c7ba86ce2
--- /dev/null
+++ b/roles/kubernetes-apps/cloud_controller/oci/templates/controller-manager-config.yml.j2
@@ -0,0 +1,56 @@
+auth:
+
+{% if oci_use_instance_principals %}
+ # (https://docs.us-phoenix-1.oraclecloud.com/Content/Identity/Tasks/callingservicesfrominstances.htm).
+ # Ensure you have setup the following OCI policies and your kubernetes nodes are running within them
+ # allow dynamic-group [your dynamic group name] to read instance-family in compartment [your compartment name]
+ # allow dynamic-group [your dynamic group name] to use virtual-network-family in compartment [your compartment name]
+ # allow dynamic-group [your dynamic group name] to manage load-balancers in compartment [your compartment name]
+ useInstancePrincipals: true
+{% else %}
+ useInstancePrincipals: false
+
+ region: {{ oci_region_id }}
+ tenancy: {{ oci_tenancy_id }}
+ user: {{ oci_user_id }}
+ key: |
+ {{ oci_private_key }}
+
+ {% if oci_private_key_passphrase is defined %}
+ passphrase: {{ oci_private_key_passphrase }}
+ {% endif %}
+
+
+ fingerprint: {{ oci_user_fingerprint }}
+{% endif %}
+
+# compartment configures Compartment within which the cluster resides.
+compartment: {{ oci_compartment_id }}
+
+# vcn configures the Virtual Cloud Network (VCN) within which the cluster resides.
+vcn: {{ oci_vnc_id }}
+
+loadBalancer:
+ # subnet1 configures one of two subnets to which load balancers will be added.
+ # OCI load balancers require two subnets to ensure high availability.
+ subnet1: {{ oci_subnet1_id }}
+
+ # subnet2 configures the second of two subnets to which load balancers will be
+ # added. OCI load balancers require two subnets to ensure high availability.
+ subnet2: {{ oci_subnet2_id }}
+
+ # SecurityListManagementMode configures how security lists are managed by the CCM.
+ # "All" (default): Manage all required security list rules for load balancer services.
+ # "Frontend": Manage only security list rules for ingress to the load
+ # balancer. Requires that the user has setup a rule that
+ # allows inbound traffic to the appropriate ports for kube
+ # proxy health port, node port ranges, and health check port ranges.
+ # E.g. 10.82.0.0/16 30000-32000.
+ # "None": Disables all security list management. Requires that the
+ # user has setup a rule that allows inbound traffic to the
+ # appropriate ports for kube proxy health port, node port
+ # ranges, and health check port ranges. E.g. 10.82.0.0/16 30000-32000.
+ # Additionally requires the user to mange rules to allow
+ # inbound traffic to load balancers.
+ securityListManagementMode: {{ oci_security_list_management }}
+
diff --git a/roles/kubernetes-apps/cluster_roles/defaults/main.yml b/roles/kubernetes-apps/cluster_roles/defaults/main.yml
new file mode 100644
index 00000000000..2a99fc0fd7e
--- /dev/null
+++ b/roles/kubernetes-apps/cluster_roles/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+oci_cloud_controller_version: 0.5.0
diff --git a/roles/kubernetes-apps/cluster_roles/tasks/main.yml b/roles/kubernetes-apps/cluster_roles/tasks/main.yml
index 3f696a9fec1..229e497e42a 100644
--- a/roles/kubernetes-apps/cluster_roles/tasks/main.yml
+++ b/roles/kubernetes-apps/cluster_roles/tasks/main.yml
@@ -11,12 +11,54 @@
delay: 6
when: inventory_hostname == groups['kube-master'][0]
+- name: Kubernetes Apps | Check AppArmor status
+ command: which apparmor_parser
+ register: apparmor_status
+ when:
+ - podsecuritypolicy_enabled
+ - inventory_hostname == groups['kube-master'][0]
+ failed_when: false
+
+- name: Kubernetes Apps | Set apparmor_enabled
+ set_fact:
+ apparmor_enabled: "{{ apparmor_status.rc == 0 }}"
+ when:
+ - podsecuritypolicy_enabled
+ - inventory_hostname == groups['kube-master'][0]
+
+- name: Kubernetes Apps | Render templates for PodSecurityPolicy
+ template:
+ src: "{{ item.file }}.j2"
+ dest: "{{ kube_config_dir }}/{{ item.file }}"
+ register: psp_manifests
+ with_items:
+ - {file: psp.yml, type: psp, name: psp}
+ - {file: psp-cr.yml, type: clusterrole, name: psp-cr}
+ - {file: psp-crb.yml, type: rolebinding, name: psp-crb}
+ when:
+ - podsecuritypolicy_enabled
+ - inventory_hostname == groups['kube-master'][0]
+
+- name: Kubernetes Apps | Add policies, roles, bindings for PodSecurityPolicy
+ kube:
+ name: "{{item.item.name}}"
+ kubectl: "{{bin_dir}}/kubectl"
+ resource: "{{item.item.type}}"
+ filename: "{{kube_config_dir}}/{{item.item.file}}"
+ state: "latest"
+ with_items: "{{ psp_manifests.results }}"
+ when:
+ - inventory_hostname == groups['kube-master'][0]
+ - not item|skipped
+
- name: Kubernetes Apps | Add ClusterRoleBinding to admit nodes
template:
src: "node-crb.yml.j2"
dest: "{{ kube_config_dir }}/node-crb.yml"
register: node_crb_manifest
- when: rbac_enabled
+ when:
+ - rbac_enabled
+ - inventory_hostname == groups['kube-master'][0]
- name: Apply workaround to allow all nodes with cert O=system:nodes to register
kube:
@@ -28,6 +70,7 @@
when:
- rbac_enabled
- node_crb_manifest.changed
+ - inventory_hostname == groups['kube-master'][0]
- name: Kubernetes Apps | Add webhook ClusterRole that grants access to proxy, stats, log, spec, and metrics on a kubelet
template:
@@ -37,6 +80,7 @@
when:
- rbac_enabled
- kubelet_authorization_mode_webhook
+ - inventory_hostname == groups['kube-master'][0]
tags: node-webhook
- name: Apply webhook ClusterRole
@@ -50,6 +94,7 @@
- rbac_enabled
- kubelet_authorization_mode_webhook
- node_webhook_cr_manifest.changed
+ - inventory_hostname == groups['kube-master'][0]
tags: node-webhook
- name: Kubernetes Apps | Add ClusterRoleBinding for system:nodes to webhook ClusterRole
@@ -60,6 +105,7 @@
when:
- rbac_enabled
- kubelet_authorization_mode_webhook
+ - inventory_hostname == groups['kube-master'][0]
tags: node-webhook
- name: Grant system:nodes the webhook ClusterRole
@@ -73,33 +119,58 @@
- rbac_enabled
- kubelet_authorization_mode_webhook
- node_webhook_crb_manifest.changed
+ - inventory_hostname == groups['kube-master'][0]
tags: node-webhook
-# This is not a cluster role, but should be run after kubeconfig is set on master
-- name: Write kube system namespace manifest
+- name: Check if vsphere-cloud-provider ClusterRole exists
+ command: "{{ bin_dir }}/kubectl get clusterroles system:vsphere-cloud-provider"
+ register: vsphere_cloud_provider
+ ignore_errors: true
+ when:
+ - rbac_enabled
+ - cloud_provider is defined
+ - cloud_provider == 'vsphere'
+ - kube_version | version_compare('v1.9.0', '>=')
+ - kube_version | version_compare('v1.9.3', '<=')
+ - inventory_hostname == groups['kube-master'][0]
+ tags: vsphere
+
+- name: Write vsphere-cloud-provider ClusterRole manifest
template:
- src: namespace.j2
- dest: "{{kube_config_dir}}/{{system_namespace}}-ns.yml"
- when: inventory_hostname == groups['kube-master'][0]
- tags:
- - apps
+ src: "vsphere-rbac.yml.j2"
+ dest: "{{ kube_config_dir }}/vsphere-rbac.yml"
+ register: vsphere_rbac_manifest
+ when:
+ - rbac_enabled
+ - cloud_provider is defined
+ - cloud_provider == 'vsphere'
+ - vsphere_cloud_provider.rc is defined
+ - vsphere_cloud_provider.rc != 0
+ - kube_version | version_compare('v1.9.0', '>=')
+ - kube_version | version_compare('v1.9.3', '<=')
+ - inventory_hostname == groups['kube-master'][0]
+ tags: vsphere
-- name: Check if kube system namespace exists
- command: "{{ bin_dir }}/kubectl get ns {{system_namespace}}"
- register: 'kubesystem'
- changed_when: False
- failed_when: False
- when: inventory_hostname == groups['kube-master'][0]
- tags:
- - apps
+- name: Apply vsphere-cloud-provider ClusterRole
+ kube:
+ name: "system:vsphere-cloud-provider"
+ kubectl: "{{bin_dir}}/kubectl"
+ resource: "clusterrolebinding"
+ filename: "{{ kube_config_dir }}/vsphere-rbac.yml"
+ state: latest
+ when:
+ - rbac_enabled
+ - cloud_provider is defined
+ - cloud_provider == 'vsphere'
+ - vsphere_cloud_provider.rc is defined
+ - vsphere_cloud_provider.rc != 0
+ - kube_version | version_compare('v1.9.0', '>=')
+ - kube_version | version_compare('v1.9.3', '<=')
+ - inventory_hostname == groups['kube-master'][0]
+ tags: vsphere
-- name: Create kube system namespace
- command: "{{ bin_dir }}/kubectl create -f {{kube_config_dir}}/{{system_namespace}}-ns.yml"
- retries: 4
- delay: "{{ retry_stagger | random + 3 }}"
- register: create_system_ns
- until: create_system_ns.rc == 0
- changed_when: False
- when: inventory_hostname == groups['kube-master'][0] and kubesystem.rc != 0
- tags:
- - apps
+- include_tasks: oci.yml
+ tags: oci
+ when:
+ - cloud_provider is defined
+ - cloud_provider == 'oci'
diff --git a/roles/kubernetes-apps/cluster_roles/tasks/oci.yml b/roles/kubernetes-apps/cluster_roles/tasks/oci.yml
new file mode 100644
index 00000000000..fb89a85e82f
--- /dev/null
+++ b/roles/kubernetes-apps/cluster_roles/tasks/oci.yml
@@ -0,0 +1,23 @@
+---
+- name: Get OCI ClusterRole, and ClusterRoleBinding
+ get_url:
+ url: "https://raw.githubusercontent.com/oracle/oci-cloud-controller-manager/{{oci_cloud_controller_version}}/manifests/oci-cloud-controller-manager-rbac.yaml"
+ dest: "/tmp/oci-cloud-controller-manager-rbac.yaml"
+ force: yes
+ register: result
+ until: "'OK' in result.msg"
+ retries: 4
+ delay: "{{ retry_stagger | random + 3 }}"
+ when:
+ - cloud_provider is defined
+ - cloud_provider == 'oci'
+ - inventory_hostname == groups['kube-master'][0]
+
+- name: Apply OCI ClusterRole, and ClusterRoleBinding
+ kube:
+ kubectl: "{{bin_dir}}/kubectl"
+ filename: "/tmp/oci-cloud-controller-manager-rbac.yaml"
+ when:
+ - cloud_provider is defined
+ - cloud_provider == 'oci'
+ - inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes-apps/cluster_roles/templates/namespace.j2 b/roles/kubernetes-apps/cluster_roles/templates/namespace.j2
index 9bdf201a21a..f2e115a6acf 100644
--- a/roles/kubernetes-apps/cluster_roles/templates/namespace.j2
+++ b/roles/kubernetes-apps/cluster_roles/templates/namespace.j2
@@ -1,4 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
- name: "{{system_namespace}}"
+ name: "kube-system"
diff --git a/roles/kubernetes-apps/cluster_roles/templates/psp-cr.yml.j2 b/roles/kubernetes-apps/cluster_roles/templates/psp-cr.yml.j2
new file mode 100644
index 00000000000..9fa333ddc62
--- /dev/null
+++ b/roles/kubernetes-apps/cluster_roles/templates/psp-cr.yml.j2
@@ -0,0 +1,35 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: psp:privileged
+ labels:
+ kubernetes.io/cluster-service: "true"
+ addonmanager.kubernetes.io/mode: Reconcile
+rules:
+- apiGroups:
+ - policy
+ resourceNames:
+ - privileged
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - use
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: psp:restricted
+ namespace: kube-system
+ labels:
+ kubernetes.io/cluster-service: "true"
+ addonmanager.kubernetes.io/mode: Reconcile
+rules:
+- apiGroups:
+ - policy
+ resourceNames:
+ - restricted
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - use
diff --git a/roles/kubernetes-apps/cluster_roles/templates/psp-crb.yml.j2 b/roles/kubernetes-apps/cluster_roles/templates/psp-crb.yml.j2
new file mode 100644
index 00000000000..6cade2883b7
--- /dev/null
+++ b/roles/kubernetes-apps/cluster_roles/templates/psp-crb.yml.j2
@@ -0,0 +1,55 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: psp:any:restricted
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: psp:restricted
+subjects:
+- kind: Group
+ name: system:authenticated
+ apiGroup: rbac.authorization.k8s.io
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: psp:kube-system:privileged
+ namespace: kube-system
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: psp:privileged
+subjects:
+- kind: Group
+ name: system:masters
+ apiGroup: rbac.authorization.k8s.io
+- kind: Group
+ name: system:serviceaccounts:kube-system
+ apiGroup: rbac.authorization.k8s.io
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: psp:nodes:privileged
+ namespace: kube-system
+ annotations:
+ kubernetes.io/description: 'Allow nodes to create privileged pods. Should
+ be used in combination with the NodeRestriction admission plugin to limit
+ nodes to mirror pods bound to themselves.'
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
+ kubernetes.io/cluster-service: 'true'
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: psp:privileged
+subjects:
+ - kind: Group
+ apiGroup: rbac.authorization.k8s.io
+ name: system:nodes
+ - kind: User
+ apiGroup: rbac.authorization.k8s.io
+ # Legacy node ID
+ name: kubelet
diff --git a/roles/kubernetes-apps/cluster_roles/templates/psp.yml.j2 b/roles/kubernetes-apps/cluster_roles/templates/psp.yml.j2
new file mode 100644
index 00000000000..a9d32a6e6bb
--- /dev/null
+++ b/roles/kubernetes-apps/cluster_roles/templates/psp.yml.j2
@@ -0,0 +1,77 @@
+---
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: restricted
+ annotations:
+ seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
+ seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
+{% if apparmor_enabled %}
+ apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+ apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
+{% endif %}
+ labels:
+ kubernetes.io/cluster-service: 'true'
+ addonmanager.kubernetes.io/mode: Reconcile
+spec:
+ privileged: false
+ allowPrivilegeEscalation: false
+ requiredDropCapabilities:
+ - ALL
+ volumes:
+ - 'configMap'
+ - 'emptyDir'
+ - 'projected'
+ - 'secret'
+ - 'downwardAPI'
+ - 'persistentVolumeClaim'
+ hostNetwork: false
+ hostIPC: false
+ hostPID: false
+ runAsUser:
+ rule: 'MustRunAsNonRoot'
+ seLinux:
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'MustRunAs'
+ ranges:
+ - min: 1
+ max: 65535
+ fsGroup:
+ rule: 'MustRunAs'
+ ranges:
+ - min: 1
+ max: 65535
+ readOnlyRootFilesystem: false
+---
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: privileged
+ annotations:
+ seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
+ labels:
+ kubernetes.io/cluster-service: "true"
+ addonmanager.kubernetes.io/mode: Reconcile
+spec:
+ privileged: true
+ allowPrivilegeEscalation: true
+ allowedCapabilities:
+ - '*'
+ volumes:
+ - '*'
+ hostNetwork: true
+ hostPorts:
+ - min: 0
+ max: 65535
+ hostIPC: true
+ hostPID: true
+ runAsUser:
+ rule: 'RunAsAny'
+ seLinux:
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'RunAsAny'
+ fsGroup:
+ rule: 'RunAsAny'
+ readOnlyRootFilesystem: false
diff --git a/roles/kubernetes-apps/cluster_roles/templates/vsphere-rbac.yml.j2 b/roles/kubernetes-apps/cluster_roles/templates/vsphere-rbac.yml.j2
new file mode 100644
index 00000000000..99da0462f80
--- /dev/null
+++ b/roles/kubernetes-apps/cluster_roles/templates/vsphere-rbac.yml.j2
@@ -0,0 +1,35 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: system:vsphere-cloud-provider
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: system:vsphere-cloud-provider
+roleRef:
+ kind: ClusterRole
+ name: system:vsphere-cloud-provider
+ apiGroup: rbac.authorization.k8s.io
+subjects:
+- kind: ServiceAccount
+ name: vsphere-cloud-provider
+ namespace: kube-system
diff --git a/roles/kubernetes-apps/container_engine_accelerator/meta/main.yml b/roles/kubernetes-apps/container_engine_accelerator/meta/main.yml
new file mode 100644
index 00000000000..c82c5d86bd4
--- /dev/null
+++ b/roles/kubernetes-apps/container_engine_accelerator/meta/main.yml
@@ -0,0 +1,8 @@
+---
+dependencies:
+ - role: kubernetes-apps/container_engine_accelerator/nvidia_gpu
+ when: nvidia_accelerator_enabled
+ tags:
+ - apps
+ - nvidia_gpu
+ - container_engine_accelerator
diff --git a/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/defaults/main.yml b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/defaults/main.yml
new file mode 100644
index 00000000000..34aea1c473c
--- /dev/null
+++ b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/defaults/main.yml
@@ -0,0 +1,10 @@
+---
+nvidia_accelerator_enabled: false
+nvidia_driver_version: "390.87"
+nvidia_gpu_tesla_base_url: https://us.download.nvidia.com/tesla/
+nvidia_gpu_gtx_base_url: http://us.download.nvidia.com/XFree86/Linux-x86_64/
+nvidia_gpu_flavor: tesla
+nvidia_url_end: "{{nvidia_driver_version}}/NVIDIA-Linux-x86_64-{{nvidia_driver_version}}.run"
+nvidia_driver_install_container: false
+nvidia_driver_install_supported: false
+nvidia_gpu_nodes: []
diff --git a/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml
new file mode 100644
index 00000000000..50822be7d46
--- /dev/null
+++ b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml
@@ -0,0 +1,54 @@
+---
+
+- name: Container Engine Acceleration Nvidia GPU| gather os specific variables
+ include_vars: "{{ item }}"
+ with_first_found:
+ - files:
+ - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
+ - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
+ - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
+ - "{{ ansible_distribution|lower }}.yml"
+ - "{{ ansible_os_family|lower }}.yml"
+ skip: true
+
+- name: Container Engine Acceleration Nvidia GPU | Set fact of download url Tesla
+ set_fact:
+ nvidia_driver_download_url_default: "{{nvidia_gpu_tesla_base_url}}{{nvidia_url_end}}"
+ when: nvidia_gpu_flavor|lower == "tesla"
+
+- name: Container Engine Acceleration Nvidia GPU | Set fact of download url GTX
+ set_fact:
+ nvidia_driver_download_url_default: "{{nvidia_gpu_gtx_base_url}}{{nvidia_url_end}}"
+ when: nvidia_gpu_flavor|lower == "gtx"
+
+- name: Container Engine Acceleration Nvidia GPU | Create addon dir
+ file:
+ path: "{{ kube_config_dir }}/addons/container_engine_accelerator"
+ owner: root
+ group: root
+ mode: 0755
+ recurse: true
+
+- name: Container Engine Acceleration Nvidia GPU | Create manifests for nvidia accelerators
+ template:
+ src: "{{ item.file }}.j2"
+ dest: "{{ kube_config_dir }}/addons/container_engine_accelerator/{{ item.file }}"
+ with_items:
+ - { name: nvidia-driver-install-daemonset, file: nvidia-driver-install-daemonset.yml, type: daemonset }
+ - { name: k8s-device-plugin-nvidia-daemonset, file: k8s-device-plugin-nvidia-daemonset.yml, type: daemonset }
+ register: container_engine_accelerator_manifests
+ when:
+ - inventory_hostname == groups['kube-master'][0] and nvidia_driver_install_container
+
+- name: Container Engine Acceleration Nvidia GPU | Apply manifests for nvidia accelerators
+ kube:
+ name: "{{ item.item.name }}"
+ namespace: "kube-system"
+ kubectl: "{{ bin_dir }}/kubectl"
+ resource: "{{ item.item.type }}"
+ filename: "{{ kube_config_dir }}/addons/container_engine_accelerator/{{ item.item.file }}"
+ state: "latest"
+ with_items:
+ - "{{container_engine_accelerator_manifests.results}}"
+ when:
+ - inventory_hostname == groups['kube-master'][0] and nvidia_driver_install_container and nvidia_driver_install_supported
diff --git a/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/templates/k8s-device-plugin-nvidia-daemonset.yml.j2 b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/templates/k8s-device-plugin-nvidia-daemonset.yml.j2
new file mode 100644
index 00000000000..84f440442ee
--- /dev/null
+++ b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/templates/k8s-device-plugin-nvidia-daemonset.yml.j2
@@ -0,0 +1,61 @@
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: nvidia-gpu-device-plugin
+ namespace: kube-system
+ labels:
+ k8s-app: nvidia-gpu-device-plugin
+ addonmanager.kubernetes.io/mode: Reconcile
+spec:
+ selector:
+ matchLabels:
+ k8s-app: nvidia-gpu-device-plugin
+ template:
+ metadata:
+ labels:
+ k8s-app: nvidia-gpu-device-plugin
+ annotations:
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ priorityClassName: system-node-critical
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: "nvidia.com/gpu"
+ operator: Exists
+ tolerations:
+ - operator: "Exists"
+ effect: "NoExecute"
+ - operator: "Exists"
+ effect: "NoSchedule"
+ hostNetwork: true
+ hostPID: true
+ volumes:
+ - name: device-plugin
+ hostPath:
+ path: /var/lib/kubelet/device-plugins
+ - name: dev
+ hostPath:
+ path: /dev
+ containers:
+ - image: "k8s.gcr.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e"
+ command: ["/usr/bin/nvidia-gpu-device-plugin", "-logtostderr"]
+ name: nvidia-gpu-device-plugin
+ resources:
+ requests:
+ cpu: 50m
+ memory: 10Mi
+ limits:
+ cpu: 50m
+ memory: 10Mi
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: device-plugin
+ mountPath: /device-plugin
+ - name: dev
+ mountPath: /dev
+ updateStrategy:
+ type: RollingUpdate
diff --git a/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/templates/nvidia-driver-install-daemonset.yml.j2 b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/templates/nvidia-driver-install-daemonset.yml.j2
new file mode 100644
index 00000000000..a1adede5a29
--- /dev/null
+++ b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/templates/nvidia-driver-install-daemonset.yml.j2
@@ -0,0 +1,80 @@
+# Copyright 2017 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: nvidia-driver-installer
+ namespace: kube-system
+spec:
+ template:
+ metadata:
+ labels:
+ name: nvidia-driver-installer
+ annotations:
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ priorityClassName: system-node-critical
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: "nvidia.com/gpu"
+ operator: Exists
+ tolerations:
+ - key: "nvidia.com/gpu"
+ effect: "NoSchedule"
+ operator: "Exists"
+ hostNetwork: true
+ hostPID: true
+ volumes:
+ - name: dev
+ hostPath:
+ path: /dev
+ - name: nvidia-install-dir-host
+ hostPath:
+ path: /home/kubernetes/bin/nvidia
+ - name: root-mount
+ hostPath:
+ path: /
+ initContainers:
+ - image: "{{nvidia_driver_install_container}}"
+ name: nvidia-driver-installer
+ resources:
+ requests:
+ cpu: 0.15
+ securityContext:
+ privileged: true
+ env:
+ - name: NVIDIA_INSTALL_DIR_HOST
+ value: /home/kubernetes/bin/nvidia
+ - name: NVIDIA_INSTALL_DIR_CONTAINER
+ value: /usr/local/nvidia
+ - name: ROOT_MOUNT_DIR
+ value: /root
+ - name: NVIDIA_DRIVER_VERSION
+ value: "{{nvidia_driver_version}}"
+ - name: NVIDIA_DRIVER_DOWNLOAD_URL
+ value: "{{nvidia_driver_download_url_default}}"
+ volumeMounts:
+ - name: nvidia-install-dir-host
+ mountPath: /usr/local/nvidia
+ - name: dev
+ mountPath: /dev
+ - name: root-mount
+ mountPath: /root
+ containers:
+ - image: "gcr.io/google-containers/pause:2.0"
+ name: pause
diff --git a/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/vars/centos-7.yml b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/vars/centos-7.yml
new file mode 100644
index 00000000000..5f6adfde759
--- /dev/null
+++ b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/vars/centos-7.yml
@@ -0,0 +1,3 @@
+---
+nvidia_driver_install_container: atzedevries/nvidia-centos-driver-installer:2
+nvidia_driver_install_supported: true
diff --git a/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/vars/ubuntu-16.yml b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/vars/ubuntu-16.yml
new file mode 100644
index 00000000000..04b9e0ac9be
--- /dev/null
+++ b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/vars/ubuntu-16.yml
@@ -0,0 +1,3 @@
+---
+nvidia_driver_install_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:eea7309dc4fa4a5c9d716157e74b90826e0a853aa26c7219db4710ddcd1ad8bc
+nvidia_driver_install_supported: true
diff --git a/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/vars/ubuntu-18.yml b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/vars/ubuntu-18.yml
new file mode 100644
index 00000000000..04b9e0ac9be
--- /dev/null
+++ b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/vars/ubuntu-18.yml
@@ -0,0 +1,3 @@
+---
+nvidia_driver_install_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:eea7309dc4fa4a5c9d716157e74b90826e0a853aa26c7219db4710ddcd1ad8bc
+nvidia_driver_install_supported: true
diff --git a/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml b/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml
index 8abbe231711..888cbd189ce 100644
--- a/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml
+++ b/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml
@@ -7,15 +7,13 @@
- "efk-sa.yml"
- "efk-clusterrolebinding.yml"
run_once: true
- when: rbac_enabled
- name: "ElasticSearch | Create Serviceaccount and Clusterrolebinding (RBAC)"
- command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/{{ item }} -n {{ system_namespace }}"
+ command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/{{ item }} -n kube-system"
with_items:
- "efk-sa.yml"
- "efk-clusterrolebinding.yml"
run_once: true
- when: rbac_enabled
- name: "ElasticSearch | Write ES deployment"
template:
@@ -24,7 +22,7 @@
register: es_deployment_manifest
- name: "ElasticSearch | Create ES deployment"
- command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-deployment.yaml -n {{ system_namespace }}"
+ command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-deployment.yaml -n kube-system"
run_once: true
when: es_deployment_manifest.changed
@@ -35,6 +33,6 @@
register: es_service_manifest
- name: "ElasticSearch | Create ES service"
- command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-service.yaml -n {{ system_namespace }}"
+ command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-service.yaml -n kube-system"
run_once: true
when: es_service_manifest.changed
diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml
index a5aba61aef5..4b9ab006737 100644
--- a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml
+++ b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml
@@ -1,13 +1,16 @@
---
kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: efk
- namespace: {{ system_namespace }}
+ namespace: kube-system
+ labels:
+ kubernetes.io/cluster-service: "true"
+ addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
name: efk
- namespace: {{ system_namespace }}
+ namespace: kube-system
roleRef:
kind: ClusterRole
name: cluster-admin
diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml
index e79e26be87f..01e774e9666 100644
--- a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml
+++ b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml
@@ -3,6 +3,7 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: efk
- namespace: {{ system_namespace }}
+ namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
+ addonmanager.kubernetes.io/mode: Reconcile
diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2 b/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2
index 6d5382e09ad..ad1adc536c6 100644
--- a/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2
+++ b/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2
@@ -1,15 +1,17 @@
---
-# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.5.2/cluster/addons/fluentd-elasticsearch/es-controller.yaml
-apiVersion: extensions/v1beta1
-kind: Deployment
+# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.10.2/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml
+apiVersion: apps/v1
+kind: StatefulSet
metadata:
- name: elasticsearch-logging-v1
- namespace: "{{ system_namespace }}"
+ name: elasticsearch-logging
+ namespace: kube-system
labels:
k8s-app: elasticsearch-logging
version: "{{ elasticsearch_image_tag }}"
kubernetes.io/cluster-service: "true"
+ addonmanager.kubernetes.io/mode: Reconcile
spec:
+ serviceName: elasticsearch-logging
replicas: 2
selector:
matchLabels:
@@ -30,12 +32,12 @@ spec:
limits:
cpu: {{ elasticsearch_cpu_limit }}
{% if elasticsearch_mem_limit is defined and elasticsearch_mem_limit != "0M" %}
- mem: {{ elasticsearch_mem_limit }}
+ memory: "{{ elasticsearch_mem_limit }}"
{% endif %}
requests:
cpu: {{ elasticsearch_cpu_requests }}
{% if elasticsearch_mem_requests is defined and elasticsearch_mem_requests != "0M" %}
- mem: {{ elasticsearch_mem_requests }}
+ memory: "{{ elasticsearch_mem_requests }}"
{% endif %}
ports:
- containerPort: 9200
@@ -50,7 +52,11 @@ spec:
volumes:
- name: es-persistent-storage
emptyDir: {}
-{% if rbac_enabled %}
serviceAccountName: efk
-{% endif %}
+ initContainers:
+ - image: alpine:3.6
+ command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"]
+ name: elasticsearch-logging-init
+ securityContext:
+ privileged: true
diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-service.yml.j2 b/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-service.yml.j2
index b7558f9d938..789ecb215a0 100644
--- a/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-service.yml.j2
+++ b/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-service.yml.j2
@@ -3,7 +3,7 @@ apiVersion: v1
kind: Service
metadata:
name: elasticsearch-logging
- namespace: "{{ system_namespace }}"
+ namespace: "kube-system"
labels:
k8s-app: elasticsearch-logging
kubernetes.io/cluster-service: "true"
diff --git a/roles/kubernetes-apps/efk/fluentd/defaults/main.yml b/roles/kubernetes-apps/efk/fluentd/defaults/main.yml
index e8d93732c57..0305a5f7a7c 100644
--- a/roles/kubernetes-apps/efk/fluentd/defaults/main.yml
+++ b/roles/kubernetes-apps/efk/fluentd/defaults/main.yml
@@ -1,7 +1,7 @@
---
fluentd_cpu_limit: 0m
-fluentd_mem_limit: 200Mi
+fluentd_mem_limit: 500Mi
fluentd_cpu_requests: 100m
fluentd_mem_requests: 200Mi
-fluentd_config_dir: /etc/kubernetes/fluentd
-fluentd_config_file: fluentd.conf
+fluentd_config_dir: /etc/fluent/config.d
+# fluentd_config_file: fluentd.conf
diff --git a/roles/kubernetes-apps/efk/fluentd/tasks/main.yml b/roles/kubernetes-apps/efk/fluentd/tasks/main.yml
index c91bf68276e..f444c79b62f 100644
--- a/roles/kubernetes-apps/efk/fluentd/tasks/main.yml
+++ b/roles/kubernetes-apps/efk/fluentd/tasks/main.yml
@@ -17,6 +17,6 @@
register: fluentd_ds_manifest
- name: "Fluentd | Create fluentd daemonset"
- command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/fluentd-ds.yaml -n {{ system_namespace }}"
+ command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/fluentd-ds.yaml -n kube-system"
run_once: true
when: fluentd_ds_manifest.changed
diff --git a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2 b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2
index 8a8ebbceca8..0b0229f69fd 100644
--- a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2
+++ b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2
@@ -1,10 +1,19 @@
+---
+# https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.10/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: fluentd-config
- namespace: "{{ system_namespace }}"
+ namespace: "kube-system"
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
data:
- {{ fluentd_config_file }}: |
+ system.conf: |-
+
+ root_dir /tmp/fluentd-buffers/
+
+
+ containers.input.conf: |-
# This configuration file for Fluentd / td-agent is used
# to watch changes to Docker log files. The kubelet creates symlinks that
# capture the pod name, namespace, container name & Docker container ID
@@ -18,7 +27,6 @@ data:
# See https://github.com/uken/fluent-plugin-elasticsearch &
# https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter for
# more information about the plugins.
- # Maintainer: Jimmi Dyson
#
# Example
# =======
@@ -99,63 +107,87 @@ data:
# This makes it easier for users to search for logs by pod name or by
# the name of the Kubernetes container regardless of how many times the
# Kubernetes pod has been restarted (resulting in a several Docker container IDs).
- #
- # TODO: Propagate the labels associated with a container along with its logs
- # so users can query logs using labels as well as or instead of the pod name
- # and container name. This is simply done via configuration of the Kubernetes
- # fluentd plugin but requires secrets to be enabled in the fluent pod. This is a
- # problem yet to be solved as secrets are not usable in static pods which the fluentd
- # pod must be until a per-node controller is available in Kubernetes.
- # Prevent fluentd from handling records containing its own logs. Otherwise
- # it can lead to an infinite loop, when error in sending one message generates
- # another message which also fails to be sent and so on.
-
- type null
-
- # Example:
+
+ # Json Log Example:
# {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"}
+ # CRI Log Example:
+ # 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here
+
+ # Detect exceptions in the log output and forward them as one log entry.
+
+ @id raw.kubernetes
+ @type detect_exceptions
+ remove_tag_prefix raw
+ message log
+ stream stream
+ multiline_flush_interval 5
+ max_bytes 500000
+ max_lines 1000
+
+
+ system.input.conf: |-
# Example:
# 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081
+
# Example:
# Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
+
# Examples:
# time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json"
# time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
+ # TODO(random-liu): Remove this after cri container runtime rolls out.
+
# Example:
# 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal
+
# Multi-line parsing is required for all the kube logs because very large log
# statements, such as those that include entire object bodies, get split into
# multiple lines by glog.
+
# Example:
# I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
+
# Example:
# I1118 21:26:53.975789 6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed
+
# Example:
# I0204 07:00:19.604280 5 handlers.go:131] GET /api/v1/nodes: (1.624207ms) 200 [[kube-controller-manager/v1.1.3 (linux/amd64) kubernetes/6a81b50] 127.0.0.1:38266]
+
# Example:
# I0204 06:55:31.872680 5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui
+
# Example:
# W0204 06:49:18.239674 7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been cleared [2578313/2577886]) [2579312]
+
# Example:
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
+
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
+
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
+
+ # Logs from systemd-journal for interesting services.
+ # TODO(random-liu): Remove this after cri container runtime rolls out.
+
+
+ #
+
+
+
+
+
+ forward.input.conf: |-
+ # Takes the messages sent over TCP
+
+
+ monitoring.conf: |-
+ # Prometheus Exporter Plugin
+ # input plugin that exports metrics
+
+
+
+
+ # input plugin that collects metrics from MonitorAgent
+
+
+ # input plugin that collects metrics for output plugin
+
+
+ # input plugin that collects metrics for in_tail plugin
+
+
+ output.conf: |-
+ # Enriches records with Kubernetes metadata
- type kubernetes_metadata
+ @type kubernetes_metadata
- ## Prometheus Exporter Plugin
- ## input plugin that exports metrics
- #
- #
- #
- ## input plugin that collects metrics from MonitorAgent
- #
- ## input plugin that collects metrics for output plugin
- #
- ## input plugin that collects metrics for in_tail plugin
- #
+
- type elasticsearch
- user "#{ENV['FLUENT_ELASTICSEARCH_USER']}"
- password "#{ENV['FLUENT_ELASTICSEARCH_PASSWORD']}"
- log_level info
- include_tag_key true
- host elasticsearch-logging
- port 9200
- logstash_format true
- # Set the chunk limit the same as for fluentd-gcp.
- buffer_chunk_limit 2M
- # Cap buffer memory usage to 2MiB/chunk * 32 chunks = 64 MiB
- buffer_queue_limit 32
- flush_interval 5s
- # Never wait longer than 5 minutes between retries.
- max_retry_wait 30
- # Disable the limit on the number of retries (retry forever).
- disable_retry_limit
- # Use multiple threads for processing.
- num_threads 8
-
+ @id elasticsearch
+ @type elasticsearch
+ @log_level info
+ include_tag_key true
+ host elasticsearch-logging
+ port 9200
+ logstash_format true
+
+ @type file
+ path /var/log/fluentd-buffers/kubernetes.system.buffer
+ flush_mode interval
+ retry_type exponential_backoff
+ flush_thread_count 2
+ flush_interval 5s
+ retry_forever
+ retry_max_interval 30
+ chunk_limit_size 2M
+ queue_limit_length 8
+ overflow_action block
+
+
\ No newline at end of file
diff --git a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2
index 960a79e89e7..03b118f8d70 100644
--- a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2
+++ b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2
@@ -1,32 +1,43 @@
---
-# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.5.2/cluster/addons/fluentd-elasticsearch/es-controller.yaml
-apiVersion: extensions/v1beta1
+# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.10.2/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml
+apiVersion: apps/v1
kind: DaemonSet
metadata:
- name: "fluentd-es-v{{ fluentd_version }}"
- namespace: "{{ system_namespace }}"
+ name: "fluentd-es-{{ fluentd_version }}"
+ namespace: "kube-system"
labels:
k8s-app: fluentd-es
+ version: "{{ fluentd_version }}"
kubernetes.io/cluster-service: "true"
- version: "v{{ fluentd_version }}"
+ addonmanager.kubernetes.io/mode: Reconcile
spec:
+ selector:
+ matchLabels:
+ k8s-app: fluentd-es
+ version: "{{ fluentd_version }}"
template:
metadata:
labels:
k8s-app: fluentd-es
kubernetes.io/cluster-service: "true"
- version: "v{{ fluentd_version }}"
+ version: "{{ fluentd_version }}"
+ # This annotation ensures that fluentd does not get evicted if the node
+ # supports critical pod annotation based priority scheme.
+ # Note that this does not guarantee admission on the nodes (#40573).
+ annotations:
+ scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
- tolerations:
- - effect: NoSchedule
- operator: Exists
+ priorityClassName: system-node-critical
+ serviceAccountName: efk
+ # When having win nodes in cluster without this patch, this pod cloud try to be created in windows
+ nodeSelector:
+ beta.kubernetes.io/os: linux
containers:
- name: fluentd-es
image: "{{ fluentd_image_repo }}:{{ fluentd_image_tag }}"
- command:
- - '/bin/sh'
- - '-c'
- - '/usr/sbin/td-agent -c {{ fluentd_config_dir }}/{{ fluentd_config_file}} 2>&1 >> /var/log/fluentd.log'
+ env:
+ - name: FLUENTD_ARGS
+ value: "--no-supervisor -q"
resources:
limits:
{% if fluentd_cpu_limit is defined and fluentd_cpu_limit != "0m" %}
@@ -34,27 +45,24 @@ spec:
{% endif %}
memory: {{ fluentd_mem_limit }}
requests:
- cpu: {{ fluentd_cpu_requests }}
+ cpu: {{ fluentd_cpu_requests }}
memory: {{ fluentd_mem_requests }}
volumeMounts:
- name: varlog
mountPath: /var/log
- - name: dockercontainers
+ - name: varlibdockercontainers
mountPath: "{{ docker_daemon_graph }}/containers"
readOnly: true
- - name: config
+ - name: config-volume
mountPath: "{{ fluentd_config_dir }}"
terminationGracePeriodSeconds: 30
volumes:
- name: varlog
hostPath:
path: /var/log
- - name: dockercontainers
+ - name: varlibdockercontainers
hostPath:
path: {{ docker_daemon_graph }}/containers
- - name: config
- configMap:
+ - name: config-volume
+ configMap:
name: fluentd-config
-{% if rbac_enabled %}
- serviceAccountName: efk
-{% endif %}
diff --git a/roles/kubernetes-apps/efk/kibana/defaults/main.yml b/roles/kubernetes-apps/efk/kibana/defaults/main.yml
index baf07cdf23e..c76e3e71009 100644
--- a/roles/kubernetes-apps/efk/kibana/defaults/main.yml
+++ b/roles/kubernetes-apps/efk/kibana/defaults/main.yml
@@ -4,4 +4,4 @@ kibana_mem_limit: 0M
kibana_cpu_requests: 100m
kibana_mem_requests: 0M
kibana_service_port: 5601
-kibana_base_url: "/api/v1/proxy/namespaces/kube-system/services/kibana-logging"
+kibana_base_url: "/api/v1/namespaces/kube-system/services/kibana-logging/proxy"
diff --git a/roles/kubernetes-apps/efk/kibana/tasks/main.yml b/roles/kubernetes-apps/efk/kibana/tasks/main.yml
index ea85682864b..424b313b80c 100644
--- a/roles/kubernetes-apps/efk/kibana/tasks/main.yml
+++ b/roles/kubernetes-apps/efk/kibana/tasks/main.yml
@@ -10,7 +10,7 @@
filename: "{{kube_config_dir}}/kibana-deployment.yaml"
kubectl: "{{bin_dir}}/kubectl"
name: "kibana-logging"
- namespace: "{{system_namespace}}"
+ namespace: "kube-system"
resource: "deployment"
state: "latest"
with_items: "{{ kibana_deployment_manifest.changed }}"
@@ -27,7 +27,7 @@
filename: "{{kube_config_dir}}/kibana-service.yaml"
kubectl: "{{bin_dir}}/kubectl"
name: "kibana-logging"
- namespace: "{{system_namespace}}"
+ namespace: "kube-system"
resource: "svc"
state: "latest"
with_items: "{{ kibana_service_manifest.changed }}"
diff --git a/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2 b/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2
index c48413bd0e8..b9c875be686 100644
--- a/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2
+++ b/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2
@@ -1,10 +1,10 @@
---
-# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.5.2/cluster/addons/fluentd-kibana/kibana-controller.yaml
-apiVersion: extensions/v1beta1
+# https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.10/cluster/addons/fluentd-elasticsearch/kibana-deployment.yaml
+apiVersion: apps/v1
kind: Deployment
metadata:
name: kibana-logging
- namespace: "{{ system_namespace }}"
+ namespace: "kube-system"
labels:
k8s-app: kibana-logging
kubernetes.io/cluster-service: "true"
@@ -26,25 +26,24 @@ spec:
limits:
cpu: {{ kibana_cpu_limit }}
{% if kibana_mem_limit is defined and kibana_mem_limit != "0M" %}
- mem: {{ kibana_mem_limit }}
+ memory: "{{ kibana_mem_limit }}"
{% endif %}
requests:
cpu: {{ kibana_cpu_requests }}
{% if kibana_mem_requests is defined and kibana_mem_requests != "0M" %}
- mem: {{ kibana_mem_requests }}
+ memory: "{{ kibana_mem_requests }}"
{% endif %}
env:
- name: "ELASTICSEARCH_URL"
value: "http://elasticsearch-logging:{{ elasticsearch_service_port }}"
-{% if kibana_base_url is defined and kibana_base_url != "" %}
- - name: "KIBANA_BASE_URL"
+ - name: "SERVER_BASEPATH"
value: "{{ kibana_base_url }}"
-{% endif %}
+ - name: XPACK_MONITORING_ENABLED
+ value: "false"
+ - name: XPACK_SECURITY_ENABLED
+ value: "false"
ports:
- containerPort: 5601
name: ui
protocol: TCP
-{% if rbac_enabled %}
serviceAccountName: efk
-{% endif %}
-
diff --git a/roles/kubernetes-apps/efk/kibana/templates/kibana-service.yml.j2 b/roles/kubernetes-apps/efk/kibana/templates/kibana-service.yml.j2
index 241b896f051..5cff3c62809 100644
--- a/roles/kubernetes-apps/efk/kibana/templates/kibana-service.yml.j2
+++ b/roles/kubernetes-apps/efk/kibana/templates/kibana-service.yml.j2
@@ -3,7 +3,7 @@ apiVersion: v1
kind: Service
metadata:
name: kibana-logging
- namespace: "{{ system_namespace }}"
+ namespace: "kube-system"
labels:
k8s-app: kibana-logging
kubernetes.io/cluster-service: "true"
diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/README.md b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/README.md
new file mode 100644
index 00000000000..5b338a793d5
--- /dev/null
+++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/README.md
@@ -0,0 +1,78 @@
+CephFS Volume Provisioner for Kubernetes 1.5+
+=============================================
+
+[![Docker Repository on Quay](https://quay.io/repository/external_storage/cephfs-provisioner/status "Docker Repository on Quay")](https://quay.io/repository/external_storage/cephfs-provisioner)
+
+Using Ceph volume client
+
+Development
+-----------
+
+Compile the provisioner
+
+``` console
+make
+```
+
+Make the container image and push to the registry
+
+``` console
+make push
+```
+
+Test instruction
+----------------
+
+- Start Kubernetes local cluster
+
+See https://kubernetes.io/.
+
+- Create a Ceph admin secret
+
+``` bash
+ceph auth get client.admin 2>&1 |grep "key = " |awk '{print $3'} |xargs echo -n > /tmp/secret
+kubectl create ns cephfs
+kubectl create secret generic ceph-secret-admin --from-file=/tmp/secret --namespace=cephfs
+```
+
+- Start CephFS provisioner
+
+The following example uses `cephfs-provisioner-1` as the identity for the instance and assumes kubeconfig is at `/root/.kube`. The identity should remain the same if the provisioner restarts. If there are multiple provisioners, each should have a different identity.
+
+``` bash
+docker run -ti -v /root/.kube:/kube -v /var/run/kubernetes:/var/run/kubernetes --privileged --net=host cephfs-provisioner /usr/local/bin/cephfs-provisioner -master=http://127.0.0.1:8080 -kubeconfig=/kube/config -id=cephfs-provisioner-1
+```
+
+Alternatively, deploy it in kubernetes, see [deployment](deploy/README.md).
+
+- Create a CephFS Storage Class
+
+Replace Ceph monitor's IP in example/class.yaml with your own and create storage class:
+
+``` bash
+kubectl create -f example/class.yaml
+```
+
+- Create a claim
+
+``` bash
+kubectl create -f example/claim.yaml
+```
+
+- Create a Pod using the claim
+
+``` bash
+kubectl create -f example/test-pod.yaml
+```
+
+Known limitations
+-----------------
+
+- Kernel CephFS doesn't work with SELinux, setting SELinux label in Pod's securityContext will not work.
+- Kernel CephFS doesn't support quota or capacity, capacity requested by PVC is not enforced or validated.
+- Currently each Ceph user created by the provisioner has `allow r` MDS cap to permit CephFS mount.
+
+Acknowledgement
+---------------
+
+Inspired by CephFS Manila provisioner and conversation with John Spray
diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml
new file mode 100644
index 00000000000..577fbff1ed9
--- /dev/null
+++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml
@@ -0,0 +1,10 @@
+---
+cephfs_provisioner_namespace: "cephfs-provisioner"
+cephfs_provisioner_cluster: ceph
+cephfs_provisioner_monitors: ~
+cephfs_provisioner_admin_id: admin
+cephfs_provisioner_secret: secret
+cephfs_provisioner_storage_class: cephfs
+cephfs_provisioner_reclaim_policy: Delete
+cephfs_provisioner_claim_root: /volumes
+cephfs_provisioner_deterministic_names: true
diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml
new file mode 100644
index 00000000000..c93ecfde79b
--- /dev/null
+++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml
@@ -0,0 +1,79 @@
+---
+
+- name: CephFS Provisioner | Remove legacy addon dir and manifests
+ file:
+ path: "{{ kube_config_dir }}/addons/cephfs_provisioner"
+ state: absent
+ when:
+ - inventory_hostname == groups['kube-master'][0]
+ tags:
+ - upgrade
+
+- name: CephFS Provisioner | Remove legacy namespace
+ shell: |
+ {{ bin_dir }}/kubectl delete namespace {{ cephfs_provisioner_namespace }}
+ ignore_errors: yes
+ when:
+ - inventory_hostname == groups['kube-master'][0]
+ tags:
+ - upgrade
+
+- name: CephFS Provisioner | Remove legacy storageclass
+ shell: |
+ {{ bin_dir }}/kubectl delete storageclass {{ cephfs_provisioner_storage_class }}
+ ignore_errors: yes
+ when:
+ - inventory_hostname == groups['kube-master'][0]
+ tags:
+ - upgrade
+
+- name: CephFS Provisioner | Create addon dir
+ file:
+ path: "{{ kube_config_dir }}/addons/cephfs_provisioner"
+ state: directory
+ owner: root
+ group: root
+ mode: 0755
+ when:
+ - inventory_hostname == groups['kube-master'][0]
+
+- name: CephFS Provisioner | Templates list
+ set_fact:
+ cephfs_provisioner_templates:
+ - { name: 00-namespace, file: 00-namespace.yml, type: ns }
+ - { name: secret-cephfs-provisioner, file: secret-cephfs-provisioner.yml, type: secret }
+ - { name: sa-cephfs-provisioner, file: sa-cephfs-provisioner.yml, type: sa }
+ - { name: clusterrole-cephfs-provisioner, file: clusterrole-cephfs-provisioner.yml, type: clusterrole }
+ - { name: clusterrolebinding-cephfs-provisioner, file: clusterrolebinding-cephfs-provisioner.yml, type: clusterrolebinding }
+ - { name: role-cephfs-provisioner, file: role-cephfs-provisioner.yml, type: role }
+ - { name: rolebinding-cephfs-provisioner, file: rolebinding-cephfs-provisioner.yml, type: rolebinding }
+ - { name: deploy-cephfs-provisioner, file: deploy-cephfs-provisioner.yml, type: deploy }
+ - { name: sc-cephfs-provisioner, file: sc-cephfs-provisioner.yml, type: sc }
+ cephfs_provisioner_templates_for_psp:
+ - { name: psp-cephfs-provisioner, file: psp-cephfs-provisioner.yml, type: psp }
+
+- name: CephFS Provisioner | Append extra templates to CephFS Provisioner Templates list for PodSecurityPolicy
+ set_fact:
+ cephfs_provisioner_templates: "{{ cephfs_provisioner_templates_for_psp + cephfs_provisioner_templates }}"
+ when:
+ - podsecuritypolicy_enabled
+ - cephfs_provisioner_namespace != "kube-system"
+
+- name: CephFS Provisioner | Create manifests
+ template:
+ src: "{{ item.file }}.j2"
+ dest: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.file }}"
+ with_items: "{{ cephfs_provisioner_templates }}"
+ register: cephfs_provisioner_manifests
+ when: inventory_hostname == groups['kube-master'][0]
+
+- name: CephFS Provisioner | Apply manifests
+ kube:
+ name: "{{ item.item.name }}"
+ namespace: "{{ cephfs_provisioner_namespace }}"
+ kubectl: "{{ bin_dir }}/kubectl"
+ resource: "{{ item.item.type }}"
+ filename: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.item.file }}"
+ state: "latest"
+ with_items: "{{ cephfs_provisioner_manifests.results }}"
+ when: inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/00-namespace.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/00-namespace.yml.j2
new file mode 100644
index 00000000000..2a2a67cf6e7
--- /dev/null
+++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/00-namespace.yml.j2
@@ -0,0 +1,7 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: {{ cephfs_provisioner_namespace }}
+ labels:
+ name: {{ cephfs_provisioner_namespace }}
diff --git a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-clusterrole.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/clusterrole-cephfs-provisioner.yml.j2
similarity index 72%
rename from roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-clusterrole.yml.j2
rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/clusterrole-cephfs-provisioner.yml.j2
index 272db0f704c..4c92ea68e58 100644
--- a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-clusterrole.yml.j2
+++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/clusterrole-cephfs-provisioner.yml.j2
@@ -3,7 +3,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cephfs-provisioner
- namespace: {{ system_namespace }}
+ namespace: {{ cephfs_provisioner_namespace }}
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
@@ -16,7 +16,11 @@ rules:
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
- verbs: ["list", "watch", "create", "update", "patch"]
+ verbs: ["create", "update", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "create", "delete"]
+ - apiGroups: ["policy"]
+ resourceNames: ["cephfs-provisioner"]
+ resources: ["podsecuritypolicies"]
+ verbs: ["use"]
diff --git a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-rolebinding.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/clusterrolebinding-cephfs-provisioner.yml.j2
similarity index 69%
rename from roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-rolebinding.yml.j2
rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/clusterrolebinding-cephfs-provisioner.yml.j2
index f84ed32baef..cc5d5ff5b5f 100644
--- a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-rolebinding.yml.j2
+++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/clusterrolebinding-cephfs-provisioner.yml.j2
@@ -1,13 +1,13 @@
---
apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
+kind: ClusterRoleBinding
metadata:
name: cephfs-provisioner
- namespace: {{ cephfs_provisioner_namespace }}
subjects:
- kind: ServiceAccount
name: cephfs-provisioner
+ namespace: {{ cephfs_provisioner_namespace }}
roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: Role
+ kind: ClusterRole
name: cephfs-provisioner
+ apiGroup: rbac.authorization.k8s.io
diff --git a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-deploy.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/deploy-cephfs-provisioner.yml.j2
similarity index 65%
rename from roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-deploy.yml.j2
rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/deploy-cephfs-provisioner.yml.j2
index bfe2117548f..197cc8dee22 100644
--- a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-deploy.yml.j2
+++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/deploy-cephfs-provisioner.yml.j2
@@ -1,21 +1,29 @@
---
-apiVersion: extensions/v1beta1
+apiVersion: apps/v1
kind: Deployment
metadata:
name: cephfs-provisioner
namespace: {{ cephfs_provisioner_namespace }}
+ labels:
+ app: cephfs-provisioner
+ version: {{ cephfs_provisioner_image_tag }}
spec:
replicas: 1
- strategy:
- type: Recreate
+ selector:
+ matchLabels:
+ app: cephfs-provisioner
+ version: {{ cephfs_provisioner_image_tag }}
template:
metadata:
labels:
app: cephfs-provisioner
+ version: {{ cephfs_provisioner_image_tag }}
spec:
+ serviceAccount: cephfs-provisioner
containers:
- name: cephfs-provisioner
image: {{ cephfs_provisioner_image_repo }}:{{ cephfs_provisioner_image_tag }}
+ imagePullPolicy: {{ k8s_image_pull_policy }}
env:
- name: PROVISIONER_NAME
value: ceph.com/cephfs
@@ -23,4 +31,3 @@ spec:
- "/usr/local/bin/cephfs-provisioner"
args:
- "-id=cephfs-provisioner-1"
- serviceAccount: cephfs-provisioner
diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/psp-cephfs-provisioner.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/psp-cephfs-provisioner.yml.j2
new file mode 100644
index 00000000000..b1e9b0ac117
--- /dev/null
+++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/psp-cephfs-provisioner.yml.j2
@@ -0,0 +1,45 @@
+---
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: cephfs-provisioner
+ annotations:
+ seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
+ seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
+{% if apparmor_enabled %}
+ apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+ apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
+{% endif %}
+ labels:
+ kubernetes.io/cluster-service: 'true'
+ addonmanager.kubernetes.io/mode: Reconcile
+spec:
+ privileged: false
+ allowPrivilegeEscalation: false
+ requiredDropCapabilities:
+ - ALL
+ volumes:
+ - 'configMap'
+ - 'emptyDir'
+ - 'projected'
+ - 'secret'
+ - 'downwardAPI'
+ - 'persistentVolumeClaim'
+ hostNetwork: false
+ hostIPC: false
+ hostPID: false
+ runAsUser:
+ rule: 'RunAsAny'
+ seLinux:
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'MustRunAs'
+ ranges:
+ - min: 1
+ max: 65535
+ fsGroup:
+ rule: 'MustRunAs'
+ ranges:
+ - min: 1
+ max: 65535
+ readOnlyRootFilesystem: false
diff --git a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-role.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/role-cephfs-provisioner.yml.j2
similarity index 67%
rename from roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-role.yml.j2
rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/role-cephfs-provisioner.yml.j2
index fb18127f22c..1fb80a13a42 100644
--- a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-role.yml.j2
+++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/role-cephfs-provisioner.yml.j2
@@ -8,3 +8,6 @@ rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create", "get", "delete"]
+ - apiGroups: [""]
+ resources: ["endpoints"]
+ verbs: ["get", "list", "watch", "create", "update", "patch"]
diff --git a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/rolebinding-cephfs-provisioner.yml.j2
similarity index 87%
rename from roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-clusterrolebinding.yml.j2
rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/rolebinding-cephfs-provisioner.yml.j2
index 83325f1f869..01ab87b7d01 100644
--- a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-clusterrolebinding.yml.j2
+++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/rolebinding-cephfs-provisioner.yml.j2
@@ -1,6 +1,6 @@
---
apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
+kind: RoleBinding
metadata:
name: cephfs-provisioner
namespace: {{ cephfs_provisioner_namespace }}
@@ -9,6 +9,6 @@ subjects:
name: cephfs-provisioner
namespace: {{ cephfs_provisioner_namespace }}
roleRef:
- kind: ClusterRole
- name: cephfs-provisioner
apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: cephfs-provisioner
diff --git a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-sa.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/sa-cephfs-provisioner.yml.j2
similarity index 100%
rename from roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-sa.yml.j2
rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/sa-cephfs-provisioner.yml.j2
diff --git a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-sc.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/sc-cephfs-provisioner.yml.j2
similarity index 52%
rename from roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-sc.yml.j2
rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/sc-cephfs-provisioner.yml.j2
index 6ada523cb1e..dd0e37eb532 100644
--- a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-sc.yml.j2
+++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/sc-cephfs-provisioner.yml.j2
@@ -4,9 +4,12 @@ kind: StorageClass
metadata:
name: {{ cephfs_provisioner_storage_class }}
provisioner: ceph.com/cephfs
+reclaimPolicy: {{ cephfs_provisioner_reclaim_policy }}
parameters:
cluster: {{ cephfs_provisioner_cluster }}
- monitors: {{ cephfs_provisioner_monitors | join(',') }}
+ monitors: {{ cephfs_provisioner_monitors }}
adminId: {{ cephfs_provisioner_admin_id }}
- adminSecretName: cephfs-provisioner-{{ cephfs_provisioner_admin_id }}-secret
+ adminSecretName: cephfs-provisioner
adminSecretNamespace: {{ cephfs_provisioner_namespace }}
+ claimRoot: {{ cephfs_provisioner_claim_root }}
+ deterministicNames: "{{ cephfs_provisioner_deterministic_names | bool | lower }}"
diff --git a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-secret.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/secret-cephfs-provisioner.yml.j2
similarity index 70%
rename from roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-secret.yml.j2
rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/secret-cephfs-provisioner.yml.j2
index 796e30b814d..6d73c0c154a 100644
--- a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-secret.yml.j2
+++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/secret-cephfs-provisioner.yml.j2
@@ -2,7 +2,7 @@
kind: Secret
apiVersion: v1
metadata:
- name: cephfs-provisioner-{{ cephfs_provisioner_admin_id }}-secret
+ name: cephfs-provisioner
namespace: {{ cephfs_provisioner_namespace }}
type: Opaque
data:
diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/README.md b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/README.md
index 458a483cb7d..900694795de 100644
--- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/README.md
+++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/README.md
@@ -46,18 +46,20 @@ to limit the quota of persistent volumes.
### Simple directories
-``` bash
-for vol in vol6 vol7 vol8; do
-mkdir /mnt/disks/$vol
-done
-```
-
-This is also acceptable in a development environment, but there is no capacity
+In a development environment using `mount --bind` works also, but there is no capacity
management.
+### Block volumeMode PVs
+
+Create a symbolic link under discovery directory to the block device on the node. To use
+raw block devices in pods BlockVolume feature gate must be enabled.
+
Usage notes
-----------
+Beta PV.NodeAffinity field is used by default. If running against an older K8s
+version, the useAlphaAPI flag must be set in the configMap.
+
The volume provisioner cannot calculate volume sizes correctly, so you should
delete the daemonset pod on the relevant host after creating volumes. The pod
will be recreated and read the size correctly.
diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml
index dd2e8a14744..4b18546d32d 100644
--- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml
+++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml
@@ -1,8 +1,5 @@
---
-local_volume_provisioner_image_repo: quay.io/external_storage/local-volume-provisioner
-local_volume_provisioner_image_tag: v2.0.0
-
-local_volume_provisioner_namespace: "{{ system_namespace }}"
+local_volume_provisioner_namespace: "kube-system"
local_volume_provisioner_base_dir: /mnt/disks
local_volume_provisioner_mount_dir: /mnt/disks
local_volume_provisioner_storage_class: local-storage
diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml
index b83e45a2023..070f4c00cd0 100644
--- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml
+++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml
@@ -19,17 +19,32 @@
group: root
mode: 0755
+- name: Local Volume Provisioner | Templates list
+ set_fact:
+ local_volume_provisioner_templates:
+ - { name: local-volume-provisioner-ns, file: local-volume-provisioner-ns.yml, type: ns }
+ - { name: local-volume-provisioner-sa, file: local-volume-provisioner-sa.yml, type: sa }
+ - { name: local-volume-provisioner-clusterrolebinding, file: local-volume-provisioner-clusterrolebinding.yml, type: clusterrolebinding }
+ - { name: local-volume-provisioner-cm, file: local-volume-provisioner-cm.yml, type: cm }
+ - { name: local-volume-provisioner-ds, file: local-volume-provisioner-ds.yml, type: ds }
+ - { name: local-volume-provisioner-sc, file: local-volume-provisioner-sc.yml, type: sc }
+ local_volume_provisioner_templates_for_psp_not_system_ns:
+ - { name: local-volume-provisioner-psp, file: local-volume-provisioner-psp.yml, type: psp }
+ - { name: local-volume-provisioner-psp-role, file: local-volume-provisioner-psp-role.yml, type: role }
+ - { name: local-volume-provisioner-psp-rb, file: local-volume-provisioner-psp-rb.yml, type: rolebinding }
+
+- name: Local Volume Provisioner | Insert extra templates to Local Volume Provisioner templates list for PodSecurityPolicy
+ set_fact:
+ local_volume_provisioner_templates: "{{ local_volume_provisioner_templates[:2] + local_volume_provisioner_templates_for_psp_not_system_ns + local_volume_provisioner_templates[2:] }}"
+ when:
+ - podsecuritypolicy_enabled
+ - local_volume_provisioner_namespace != "kube-system"
+
- name: Local Volume Provisioner | Create manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/addons/local_volume_provisioner/{{ item.file }}"
- with_items:
- - { name: local-volume-provisioner-ns, file: local-volume-provisioner-ns.yml, type: ns }
- - { name: local-volume-provisioner-sa, file: local-volume-provisioner-sa.yml, type: sa }
- - { name: local-volume-provisioner-clusterrolebinding, file: local-volume-provisioner-clusterrolebinding.yml, type, clusterrolebinding }
- - { name: local-volume-provisioner-cm, file: local-volume-provisioner-cm.yml, type, cm }
- - { name: local-volume-provisioner-ds, file: local-volume-provisioner-ds.yml, type, ds }
- - { name: local-volume-provisioner-sc, file: local-volume-provisioner-sc.yml, type, sc }
+ with_items: "{{ local_volume_provisioner_templates }}"
register: local_volume_provisioner_manifests
when: inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2
index 80a74f5f10f..cc73e073d05 100644
--- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2
+++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2
@@ -19,6 +19,9 @@ spec:
version: {{ local_volume_provisioner_image_tag }}
spec:
serviceAccountName: local-volume-provisioner
+ tolerations:
+ - effect: NoSchedule
+ operator: Exists
containers:
- name: provisioner
image: {{ local_volume_provisioner_image_repo }}:{{ local_volume_provisioner_image_tag }}
@@ -30,12 +33,17 @@ spec:
valueFrom:
fieldRef:
fieldPath: spec.nodeName
+ - name: MY_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
volumeMounts:
- name: local-volume-provisioner
mountPath: /etc/provisioner/config
readOnly: true
- name: local-volume-provisioner-hostpath-mnt-disks
mountPath: {{ local_volume_provisioner_mount_dir }}
+ mountPropagation: "HostToContainer"
volumes:
- name: local-volume-provisioner
configMap:
diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ns.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ns.yml.j2
index 68faacfbc6c..04a791010e2 100644
--- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ns.yml.j2
+++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ns.yml.j2
@@ -3,3 +3,5 @@ apiVersion: v1
kind: Namespace
metadata:
name: {{ local_volume_provisioner_namespace }}
+ labels:
+ name: {{ local_volume_provisioner_namespace }}
diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-psp-cr.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-psp-cr.yml.j2
new file mode 100644
index 00000000000..a9dcc23ae58
--- /dev/null
+++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-psp-cr.yml.j2
@@ -0,0 +1,14 @@
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: psp:local-volume-provisioner
+ namespace: {{ local_volume_provisioner_namespace }}
+rules:
+ - apiGroups:
+ - policy
+ resourceNames:
+ - local-volume-provisioner
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - use
diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-psp-rb.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-psp-rb.yml.j2
new file mode 100644
index 00000000000..45ca822a3be
--- /dev/null
+++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-psp-rb.yml.j2
@@ -0,0 +1,13 @@
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: psp:local-volume-provisioner
+ namespace: {{ local_volume_provisioner_namespace }}
+subjects:
+ - kind: ServiceAccount
+ name: local-volume-provisioner
+ namespace: {{ local_volume_provisioner_namespace }}
+roleRef:
+ kind: ClusterRole
+ name: psp:local-volume-provisioner
+ apiGroup: rbac.authorization.k8s.io
diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-psp-role.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-psp-role.yml.j2
new file mode 100644
index 00000000000..40a530972e8
--- /dev/null
+++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-psp-role.yml.j2
@@ -0,0 +1,15 @@
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: psp:local-volume-provisioner
+ namespace: {{ local_volume_provisioner_namespace }}
+rules:
+ - apiGroups:
+ - policy
+ resourceNames:
+ - local-volume-provisioner
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - use
diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-psp.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-psp.yml.j2
new file mode 100644
index 00000000000..9daf694fa36
--- /dev/null
+++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-psp.yml.j2
@@ -0,0 +1,44 @@
+---
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: local-volume-provisioner
+ annotations:
+ seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
+ seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
+{% if apparmor_enabled %}
+ apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+ apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
+{% endif %}
+ labels:
+ kubernetes.io/cluster-service: 'true'
+ addonmanager.kubernetes.io/mode: Reconcile
+spec:
+ privileged: true
+ allowPrivilegeEscalation: true
+ requiredDropCapabilities:
+ - ALL
+ volumes:
+ - 'configMap'
+ - 'emptyDir'
+ - 'secret'
+ - 'downwardAPI'
+ - 'hostPath'
+ allowedHostPaths:
+ - pathPrefix: "{{ local_volume_provisioner_base_dir }}"
+ readOnly: false
+ hostNetwork: false
+ hostIPC: false
+ hostPID: false
+ runAsUser:
+ rule: 'RunAsAny'
+ seLinux:
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'MustRunAs'
+ ranges:
+ - min: 1
+ max: 65535
+ fsGroup:
+ rule: 'RunAsAny'
+ readOnlyRootFilesystem: false
diff --git a/roles/kubernetes-apps/external_provisioner/meta/main.yml b/roles/kubernetes-apps/external_provisioner/meta/main.yml
index 3daa461d8ef..b520922d6a7 100644
--- a/roles/kubernetes-apps/external_provisioner/meta/main.yml
+++ b/roles/kubernetes-apps/external_provisioner/meta/main.yml
@@ -6,3 +6,10 @@ dependencies:
- apps
- local-volume-provisioner
- external-provisioner
+
+ - role: kubernetes-apps/external_provisioner/cephfs_provisioner
+ when: cephfs_provisioner_enabled
+ tags:
+ - apps
+ - cephfs-provisioner
+ - external-provisioner
diff --git a/roles/kubernetes-apps/helm/defaults/main.yml b/roles/kubernetes-apps/helm/defaults/main.yml
index 0bc22739cc9..5c6396b57ba 100644
--- a/roles/kubernetes-apps/helm/defaults/main.yml
+++ b/roles/kubernetes-apps/helm/defaults/main.yml
@@ -10,11 +10,35 @@ helm_deployment_type: host
# Do not download the local repository cache on helm init
helm_skip_refresh: false
+# Secure Tiller installation with TLS
+tiller_enable_tls: false
+helm_config_dir: "{{ kube_config_dir }}/helm"
+helm_script_dir: "{{ bin_dir }}/helm-scripts"
+
+# Store tiller release information as Secret instead of a ConfigMap
+tiller_secure_release_info: false
+
+# Where private root key will be secured for TLS
+helm_tiller_cert_dir: "{{ helm_config_dir }}/ssl"
+tiller_tls_cert: "{{ helm_tiller_cert_dir }}/tiller.pem"
+tiller_tls_key: "{{ helm_tiller_cert_dir }}/tiller-key.pem"
+tiller_tls_ca_cert: "{{ helm_tiller_cert_dir }}/ca.pem"
+
+# Permission owner and group for helm client cert. Will be dependent on the helm_home_dir
+helm_cert_group: root
+helm_cert_owner: root
+
# Set URL for stable repository
# helm_stable_repo_url: "https://kubernetes-charts.storage.googleapis.com"
+# Namespace for the Tiller Deployment.
+tiller_namespace: kube-system
+
# Set node selector options for Tiller Deployment manifest.
# tiller_node_selectors: "key1=val1,key2=val2"
# Override values for the Tiller Deployment manifest.
# tiller_override: "key1=val1,key2=val2"
+
+# Limit the maximum number of revisions saved per release. Use 0 for no limit.
+# tiller_max_history: 0
diff --git a/roles/kubernetes-apps/helm/files/helm-make-ssl.sh b/roles/kubernetes-apps/helm/files/helm-make-ssl.sh
new file mode 100644
index 00000000000..c4c9b060a69
--- /dev/null
+++ b/roles/kubernetes-apps/helm/files/helm-make-ssl.sh
@@ -0,0 +1,76 @@
+#!/bin/bash
+
+set -o errexit
+set -o pipefail
+
+usage()
+{
+ cat << EOF
+Create self signed certificates
+
+Usage : $(basename $0) -f [-d ]
+ -h | --help : Show this message
+ -e | --helm-home : Helm home directory
+ -d | --ssldir : Directory where the certificates will be installed
+EOF
+}
+
+# Options parsing
+while (($#)); do
+ case "$1" in
+ -h | --help) usage; exit 0;;
+ -e | --helm-home) HELM_HOME="${2}"; shift 2;;
+ -d | --ssldir) SSLDIR="${2}"; shift 2;;
+ *)
+ usage
+ echo "ERROR : Unknown option"
+ exit 3
+ ;;
+ esac
+done
+
+if [ -z ${SSLDIR} ]; then
+ SSLDIR="/etc/kubernetes/helm/ssl"
+fi
+
+tmpdir=$(mktemp -d /tmp/helm_cacert.XXXXXX)
+trap 'rm -rf "${tmpdir}"' EXIT
+cd "${tmpdir}"
+
+mkdir -p "${SSLDIR}"
+
+# Root CA
+if [ -e "$SSLDIR/ca-key.pem" ]; then
+ # Reuse existing CA
+ cp $SSLDIR/{ca.pem,ca-key.pem} .
+else
+ openssl genrsa -out ca-key.pem 4096 > /dev/null 2>&1
+ openssl req -x509 -new -nodes -key ca-key.pem -days 36500 -out ca.pem -subj "/CN=tiller-ca" > /dev/null 2>&1
+fi
+
+gen_key_and_cert() {
+ local name=$1
+ local subject=$2
+ openssl genrsa -out ${name}-key.pem 4096 > /dev/null 2>&1
+ openssl req -new -key ${name}-key.pem -sha256 -out ${name}.csr -subj "${subject}" > /dev/null 2>&1
+ openssl x509 -req -in ${name}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out ${name}.pem -days 36500 > /dev/null 2>&1
+}
+
+#Generate cert and key for Tiller if they don't exist
+if ! [ -e "$SSLDIR/tiller.pem" ]; then
+ gen_key_and_cert "tiller" "/CN=tiller-server"
+fi
+
+#Generate cert and key for Helm client if they dont exist
+if ! [ -e "$SSLDIR/helm.pem" ]; then
+ gen_key_and_cert "helm" "/CN=helm-client"
+fi
+
+# Secure certs to first master
+mv *.pem ${SSLDIR}/
+
+# Install Helm client certs to first master
+# Copy using Helm default names for convenience
+cp ${SSLDIR}/ca.pem ${HELM_HOME}/ca.pem
+cp ${SSLDIR}/helm.pem ${HELM_HOME}/cert.pem
+cp ${SSLDIR}/helm-key.pem ${HELM_HOME}/key.pem
diff --git a/roles/kubernetes-apps/helm/tasks/gen_helm_tiller_certs.yml b/roles/kubernetes-apps/helm/tasks/gen_helm_tiller_certs.yml
new file mode 100644
index 00000000000..86a0c74515c
--- /dev/null
+++ b/roles/kubernetes-apps/helm/tasks/gen_helm_tiller_certs.yml
@@ -0,0 +1,107 @@
+---
+- name: "Gen_helm_tiller_certs | Create helm config directory (on {{groups['kube-master'][0]}})"
+ run_once: yes
+ delegate_to: "{{groups['kube-master'][0]}}"
+ file:
+ path: "{{ helm_config_dir }}"
+ state: directory
+ owner: kube
+
+- name: "Gen_helm_tiller_certs | Create helm script directory (on {{groups['kube-master'][0]}})"
+ run_once: yes
+ delegate_to: "{{groups['kube-master'][0]}}"
+ file:
+ path: "{{ helm_script_dir }}"
+ state: directory
+ owner: kube
+
+- name: Gen_helm_tiller_certs | Copy certs generation script
+ run_once: yes
+ delegate_to: "{{groups['kube-master'][0]}}"
+ copy:
+ src: "helm-make-ssl.sh"
+ dest: "{{ helm_script_dir }}/helm-make-ssl.sh"
+ mode: 0700
+
+- name: "Check_helm_certs | check if helm client certs have already been generated on first master (on {{groups['kube-master'][0]}})"
+ find:
+ paths: "{{ helm_home_dir }}"
+ patterns: "*.pem"
+ get_checksum: true
+ delegate_to: "{{groups['kube-master'][0]}}"
+ register: helmcert_master
+ run_once: true
+
+- name: Gen_helm_tiller_certs | run cert generation script
+ run_once: yes
+ delegate_to: "{{groups['kube-master'][0]}}"
+ command: "{{ helm_script_dir }}/helm-make-ssl.sh -e {{ helm_home_dir }} -d {{ helm_tiller_cert_dir }}"
+
+- set_fact:
+ helm_client_certs: ['ca.pem', 'cert.pem', 'key.pem']
+
+- name: "Check_helm_client_certs | check if a cert already exists on master node"
+ find:
+ paths: "{{ helm_home_dir }}"
+ patterns: "*.pem"
+ get_checksum: true
+ register: helmcert_node
+ when: inventory_hostname != groups['kube-master'][0]
+
+- name: "Check_helm_client_certs | Set 'sync_helm_certs' to true on masters"
+ set_fact:
+ sync_helm_certs: true
+ when: inventory_hostname != groups['kube-master'][0] and
+ (not item in helmcert_node.files | map(attribute='path') | map("basename") | list or
+ helmcert_node.files | selectattr("path", "equalto", "{{ helm_home_dir }}/{{ item }}") | map(attribute="checksum")|first|default('') != helmcert_master.files | selectattr("path", "equalto", "{{ helm_home_dir }}/{{ item }}") | map(attribute="checksum")|first|default(''))
+ with_items:
+ - "{{ helm_client_certs }}"
+
+- name: Gen_helm_tiller_certs | Gather helm client certs
+ shell: "tar cfz - -C {{ helm_home_dir }} -T /dev/stdin <<< {{ helm_client_certs|join(' ') }} | base64 --wrap=0"
+ args:
+ executable: /bin/bash
+ no_log: true
+ register: helm_client_cert_data
+ check_mode: no
+ delegate_to: "{{groups['kube-master'][0]}}"
+ when: sync_helm_certs|default(false) and inventory_hostname != groups['kube-master'][0]
+
+- name: Gen_helm_tiller_certs | Use tempfile for unpacking certs on masters
+ tempfile:
+ state: file
+ path: /tmp
+ prefix: helmcertsXXXXX
+ suffix: tar.gz
+ register: helm_cert_tempfile
+ when: sync_helm_certs|default(false) and inventory_hostname != groups['kube-master'][0]
+
+- name: Gen_helm_tiller_certs | Write helm client certs to tempfile
+ copy:
+ content: "{{helm_client_cert_data.stdout}}"
+ dest: "{{helm_cert_tempfile.path}}"
+ owner: root
+ mode: "0600"
+ when: sync_helm_certs|default(false) and inventory_hostname != groups['kube-master'][0]
+
+- name: Gen_helm_tiller_certs | Unpack helm certs on masters
+ shell: "base64 -d < {{ helm_cert_tempfile.path }} | tar xz -C {{ helm_home_dir }}"
+ no_log: true
+ changed_when: false
+ check_mode: no
+ when: sync_helm_certs|default(false) and inventory_hostname != groups['kube-master'][0]
+
+- name: Gen_helm_tiller_certs | Cleanup tempfile on masters
+ file:
+ path: "{{helm_cert_tempfile.path}}"
+ state: absent
+ when: sync_helm_certs|default(false) and inventory_hostname != groups['kube-master'][0]
+
+- name: Gen_certs | check certificate permissions
+ file:
+ path: "{{ helm_home_dir }}"
+ group: "{{ helm_cert_group }}"
+ state: directory
+ owner: "{{ helm_cert_owner }}"
+ mode: "u=rwX,g-rwx,o-rwx"
+ recurse: yes
diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml
index 06e97aff242..bae058f2648 100644
--- a/roles/kubernetes-apps/helm/tasks/main.yml
+++ b/roles/kubernetes-apps/helm/tasks/main.yml
@@ -7,34 +7,62 @@
- name: Helm | Lay Down Helm Manifests (RBAC)
template:
- src: "{{item.file}}"
+ src: "{{item.file}}.j2"
dest: "{{kube_config_dir}}/{{item.file}}"
with_items:
+ - {name: tiller, file: tiller-namespace.yml, type: namespace}
- {name: tiller, file: tiller-sa.yml, type: sa}
- {name: tiller, file: tiller-clusterrolebinding.yml, type: clusterrolebinding}
register: manifests
- when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and rbac_enabled
+ when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
- name: Helm | Apply Helm Manifests (RBAC)
kube:
name: "{{item.item.name}}"
- namespace: "{{ system_namespace }}"
+ namespace: "{{ tiller_namespace }}"
kubectl: "{{bin_dir}}/kubectl"
resource: "{{item.item.type}}"
filename: "{{kube_config_dir}}/{{item.item.file}}"
state: "latest"
with_items: "{{ manifests.results }}"
- when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and rbac_enabled
+ when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
+
+# Generate necessary certs for securing Helm and Tiller connection with TLS
+- name: Helm | Set up TLS
+ include_tasks: "gen_helm_tiller_certs.yml"
+ when: tiller_enable_tls
- name: Helm | Install/upgrade helm
command: >
- {{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }} --tiller-namespace={{ system_namespace }}
+ {{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }} --tiller-namespace={{ tiller_namespace }}
+ {% if helm_skip_refresh %} --skip-refresh{% endif %}
+ {% if helm_stable_repo_url is defined %} --stable-repo-url {{ helm_stable_repo_url }}{% endif %}
+ {% if rbac_enabled %} --service-account=tiller{% endif %}
+ {% if tiller_node_selectors is defined %} --node-selectors {{ tiller_node_selectors }}{% endif %}
+ {% if tiller_override is defined %} --override {{ tiller_override }}{% endif %}
+ {% if tiller_max_history is defined %} --history-max={{ tiller_max_history }}{% endif %}
+ {% if tiller_enable_tls %} --tiller-tls --tiller-tls-verify --tiller-tls-cert={{ tiller_tls_cert }} --tiller-tls-key={{ tiller_tls_key }} --tls-ca-cert={{ tiller_tls_ca_cert }} {% endif %}
+ {% if tiller_secure_release_info %} --override 'spec.template.spec.containers[0].command'='{/tiller,--storage=secret}' {% endif %}
+ register: install_helm
+ changed_when: false
+ environment: "{{proxy_env}}"
+
+# FIXME: https://github.com/helm/helm/issues/4063
+- name: Helm | Force apply tiller overrides if necessary
+ shell: >
+ {{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }} --tiller-namespace={{ tiller_namespace }}
{% if helm_skip_refresh %} --skip-refresh{% endif %}
{% if helm_stable_repo_url is defined %} --stable-repo-url {{ helm_stable_repo_url }}{% endif %}
{% if rbac_enabled %} --service-account=tiller{% endif %}
{% if tiller_node_selectors is defined %} --node-selectors {{ tiller_node_selectors }}{% endif %}
{% if tiller_override is defined %} --override {{ tiller_override }}{% endif %}
- when: (helm_container is defined and helm_container.changed) or (helm_task_result is defined and helm_task_result.changed)
+ {% if tiller_max_history is defined %} --history-max={{ tiller_max_history }}{% endif %}
+ {% if tiller_enable_tls %} --tiller-tls --tiller-tls-verify --tiller-tls-cert={{ tiller_tls_cert }} --tiller-tls-key={{ tiller_tls_key }} --tls-ca-cert={{ tiller_tls_ca_cert }} {% endif %}
+ {% if tiller_secure_release_info %} --override 'spec.template.spec.containers[0].command'='{/tiller,--storage=secret}' {% endif %}
+ | kubectl apply -f -
+ changed_when: false
+ when: tiller_override is defined
+ environment: "{{proxy_env}}"
- name: Helm | Set up bash completion
shell: "umask 022 && {{ bin_dir }}/helm completion bash >/etc/bash_completion.d/helm.sh"
diff --git a/roles/kubernetes-apps/helm/templates/helm-container.j2 b/roles/kubernetes-apps/helm/templates/helm-container.j2
index 1fe260566e0..3afb2b2a9dc 100644
--- a/roles/kubernetes-apps/helm/templates/helm-container.j2
+++ b/roles/kubernetes-apps/helm/templates/helm-container.j2
@@ -2,6 +2,7 @@
{{ docker_bin_dir }}/docker run --rm \
--net=host \
--name=helm \
+ -v /root/.kube:/root/.kube:ro \
-v /etc/ssl:/etc/ssl:ro \
-v {{ helm_home_dir }}:{{ helm_home_dir }}:rw \
{% for dir in ssl_ca_dirs -%}
diff --git a/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml.j2
new file mode 100644
index 00000000000..9bdfdde034c
--- /dev/null
+++ b/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml.j2
@@ -0,0 +1,29 @@
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: tiller
+ namespace: {{ tiller_namespace }}
+subjects:
+ - kind: ServiceAccount
+ name: tiller
+ namespace: {{ tiller_namespace }}
+roleRef:
+ kind: ClusterRole
+ name: cluster-admin
+ apiGroup: rbac.authorization.k8s.io
+{% if podsecuritypolicy_enabled %}
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: psp:tiller
+subjects:
+ - kind: ServiceAccount
+ name: tiller
+ namespace: {{ tiller_namespace }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: psp:privileged
+{% endif %}
diff --git a/roles/kubernetes-apps/helm/templates/tiller-namespace.yml.j2 b/roles/kubernetes-apps/helm/templates/tiller-namespace.yml.j2
new file mode 100644
index 00000000000..455742185c2
--- /dev/null
+++ b/roles/kubernetes-apps/helm/templates/tiller-namespace.yml.j2
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: "{{ tiller_namespace}}"
diff --git a/roles/kubernetes-apps/helm/templates/tiller-sa.yml b/roles/kubernetes-apps/helm/templates/tiller-sa.yml.j2
similarity index 76%
rename from roles/kubernetes-apps/helm/templates/tiller-sa.yml
rename to roles/kubernetes-apps/helm/templates/tiller-sa.yml.j2
index 26e575fb6a3..09b8157250a 100644
--- a/roles/kubernetes-apps/helm/templates/tiller-sa.yml
+++ b/roles/kubernetes-apps/helm/templates/tiller-sa.yml.j2
@@ -3,6 +3,6 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: tiller
- namespace: {{ system_namespace }}
+ namespace: {{ tiller_namespace }}
labels:
kubernetes.io/cluster-service: "true"
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/README.md b/roles/kubernetes-apps/ingress_controller/cert_manager/README.md
new file mode 100644
index 00000000000..b0f008676a1
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/README.md
@@ -0,0 +1,17 @@
+Deployment files
+================
+
+This directory contains example deployment manifests for cert-manager that can
+be used in place of the official Helm chart.
+
+This is useful if you are deploying cert-manager into an environment without
+Helm, or want to inspect a 'bare minimum' deployment.
+
+Where do these come from?
+-------------------------
+
+The manifests in these subdirectories are generated from the Helm chart
+automatically. The `values.yaml` files used to configure cert-manager can be
+found in [`hack/deploy`](../../hack/deploy/).
+
+They are automatically generated by running `./hack/update-deploy-gen.sh`.
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml b/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml
new file mode 100644
index 00000000000..58c09e6a9d5
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+cert_manager_namespace: "cert-manager"
+cert_manager_user: 1001
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml b/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml
new file mode 100644
index 00000000000..d8ca7ad1735
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml
@@ -0,0 +1,58 @@
+---
+
+- name: Cert Manager | Remove legacy addon dir and manifests
+ file:
+ path: "{{ kube_config_dir }}/addons/cert_manager"
+ state: absent
+ when:
+ - inventory_hostname == groups['kube-master'][0]
+ tags:
+ - upgrade
+
+- name: Cert Manager | Remove legacy namespace
+ shell: |
+ {{ bin_dir }}/kubectl delete namespace {{ cert_manager_namespace }}
+ ignore_errors: yes
+ when:
+ - inventory_hostname == groups['kube-master'][0]
+ tags:
+ - upgrade
+
+- name: Cert Manager | Create addon dir
+ file:
+ path: "{{ kube_config_dir }}/addons/cert_manager"
+ state: directory
+ owner: root
+ group: root
+ mode: 0755
+ when:
+ - inventory_hostname == groups['kube-master'][0]
+
+- name: Cert Manager | Create manifests
+ template:
+ src: "{{ item.file }}.j2"
+ dest: "{{ kube_config_dir }}/addons/cert_manager/{{ item.file }}"
+ with_items:
+ - { name: 00-namespace, file: 00-namespace.yml, type: ns }
+ - { name: sa-cert-manager, file: sa-cert-manager.yml, type: sa }
+ - { name: crd-certificate, file: crd-certificate.yml, type: crd }
+ - { name: crd-clusterissuer, file: crd-clusterissuer.yml, type: crd }
+ - { name: crd-issuer, file: crd-issuer.yml, type: crd }
+ - { name: clusterrole-cert-manager, file: clusterrole-cert-manager.yml, type: clusterrole }
+ - { name: clusterrolebinding-cert-manager, file: clusterrolebinding-cert-manager.yml, type: clusterrolebinding }
+ - { name: deploy-cert-manager, file: deploy-cert-manager.yml, type: deploy }
+ register: cert_manager_manifests
+ when:
+ - inventory_hostname == groups['kube-master'][0]
+
+- name: Cert Manager | Apply manifests
+ kube:
+ name: "{{ item.item.name }}"
+ namespace: "{{ cert_manager_namespace }}"
+ kubectl: "{{ bin_dir }}/kubectl"
+ resource: "{{ item.item.type }}"
+ filename: "{{ kube_config_dir }}/addons/cert_manager/{{ item.item.file }}"
+ state: "latest"
+ with_items: "{{ cert_manager_manifests.results }}"
+ when:
+ - inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/00-namespace.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/00-namespace.yml.j2
new file mode 100644
index 00000000000..7cf3a282dc1
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/00-namespace.yml.j2
@@ -0,0 +1,7 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: {{ cert_manager_namespace }}
+ labels:
+ name: {{ cert_manager_namespace }}
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/clusterrole-cert-manager.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/clusterrole-cert-manager.yml.j2
new file mode 100644
index 00000000000..0ce11fb9b39
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/clusterrole-cert-manager.yml.j2
@@ -0,0 +1,25 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: cert-manager
+ labels:
+ app: cert-manager
+ chart: cert-manager-v0.4.1
+ release: cert-manager
+ heritage: Tiller
+rules:
+ - apiGroups: ["certmanager.k8s.io"]
+ resources: ["certificates", "issuers", "clusterissuers"]
+ verbs: ["*"]
+ - apiGroups: [""]
+ # TODO: remove endpoints once 0.4 is released. We include it here in case
+ # users use the 'master' version of the Helm chart with a 0.2.x release of
+ # cert-manager that still performs leader election with Endpoint resources.
+ # We advise users don't do this, but some will anyway and this will reduce
+ # friction.
+ resources: ["endpoints", "configmaps", "secrets", "events", "services", "pods"]
+ verbs: ["*"]
+ - apiGroups: ["extensions"]
+ resources: ["ingresses"]
+ verbs: ["*"]
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/clusterrolebinding-cert-manager.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/clusterrolebinding-cert-manager.yml.j2
new file mode 100644
index 00000000000..7dd567fd988
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/clusterrolebinding-cert-manager.yml.j2
@@ -0,0 +1,18 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: cert-manager
+ labels:
+ app: cert-manager
+ chart: cert-manager-v0.4.1
+ release: cert-manager
+ heritage: Tiller
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cert-manager
+subjects:
+ - name: cert-manager
+ namespace: {{ cert_manager_namespace }}
+ kind: ServiceAccount
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/crd-certificate.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/crd-certificate.yml.j2
new file mode 100644
index 00000000000..a1663c64d58
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/crd-certificate.yml.j2
@@ -0,0 +1,21 @@
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: certificates.certmanager.k8s.io
+ labels:
+ app: cert-manager
+ chart: cert-manager-v0.4.1
+ release: cert-manager
+ heritage: Tiller
+spec:
+ group: certmanager.k8s.io
+ version: v1alpha1
+ scope: Namespaced
+ names:
+ kind: Certificate
+ plural: certificates
+ shortNames:
+ - cert
+ - certs
+
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/crd-clusterissuer.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/crd-clusterissuer.yml.j2
new file mode 100644
index 00000000000..869d4d2600a
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/crd-clusterissuer.yml.j2
@@ -0,0 +1,17 @@
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: clusterissuers.certmanager.k8s.io
+ labels:
+ app: cert-manager
+ chart: cert-manager-v0.4.1
+ release: cert-manager
+ heritage: Tiller
+spec:
+ group: certmanager.k8s.io
+ version: v1alpha1
+ names:
+ kind: ClusterIssuer
+ plural: clusterissuers
+ scope: Cluster
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/crd-issuer.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/crd-issuer.yml.j2
new file mode 100644
index 00000000000..1946b81bf1f
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/crd-issuer.yml.j2
@@ -0,0 +1,17 @@
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: issuers.certmanager.k8s.io
+ labels:
+ app: cert-manager
+ chart: cert-manager-v0.4.1
+ release: cert-manager
+ heritage: Tiller
+spec:
+ group: certmanager.k8s.io
+ version: v1alpha1
+ names:
+ kind: Issuer
+ plural: issuers
+ scope: Namespaced
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/deploy-cert-manager.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/deploy-cert-manager.yml.j2
new file mode 100644
index 00000000000..2bcf5c701e9
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/deploy-cert-manager.yml.j2
@@ -0,0 +1,43 @@
+---
+apiVersion: apps/v1beta1
+kind: Deployment
+metadata:
+ name: cert-manager
+ namespace: {{ cert_manager_namespace }}
+ labels:
+ app: cert-manager
+ chart: cert-manager-v0.4.1
+ release: cert-manager
+ heritage: Tiller
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: cert-manager
+ release: cert-manager
+ template:
+ metadata:
+ labels:
+ app: cert-manager
+ release: cert-manager
+ annotations:
+ spec:
+ serviceAccountName: cert-manager
+ containers:
+ - name: cert-manager
+ image: {{ cert_manager_controller_image_repo }}:{{ cert_manager_controller_image_tag }}
+ imagePullPolicy: {{ k8s_image_pull_policy }}
+ args:
+ - --cluster-resource-namespace=$(POD_NAMESPACE)
+ - --leader-election-namespace=$(POD_NAMESPACE)
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ resources:
+ requests:
+ cpu: 10m
+ memory: 32Mi
+ securityContext:
+ runAsUser: {{ cert_manager_user }}
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/sa-cert-manager.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/sa-cert-manager.yml.j2
new file mode 100644
index 00000000000..c5270e88baa
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/sa-cert-manager.yml.j2
@@ -0,0 +1,11 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: cert-manager
+ namespace: {{ cert_manager_namespace }}
+ labels:
+ app: cert-manager
+ chart: cert-manager-v0.4.1
+ release: cert-manager
+ heritage: Tiller
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml b/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml
index dce234f6c8c..8acee53eb64 100644
--- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml
@@ -1,11 +1,8 @@
---
-ingress_nginx_default_backend_image_repo: gcr.io/google_containers/defaultbackend
-ingress_nginx_default_backend_image_tag: 1.4
-
-ingress_nginx_controller_image_repo: quay.io/kubernetes-ingress-controller/nginx-ingress-controller
-ingress_nginx_controller_image_tag: 0.11.0
-
ingress_nginx_namespace: "ingress-nginx"
+ingress_nginx_host_network: false
+ingress_nginx_nodeselector:
+ node-role.kubernetes.io/master: "true"
ingress_nginx_insecure_port: 80
ingress_nginx_secure_port: 443
ingress_nginx_configmap: {}
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml b/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml
index 0a37e94cdd7..8db7d297215 100644
--- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml
@@ -1,5 +1,23 @@
---
+- name: NGINX Ingress Controller | Remove legacy addon dir and manifests
+ file:
+ path: "{{ kube_config_dir }}/addons/ingress_nginx"
+ state: absent
+ when:
+ - inventory_hostname == groups['kube-master'][0]
+ tags:
+ - upgrade
+
+- name: NGINX Ingress Controller | Remove legacy namespace
+ shell: |
+ {{ bin_dir }}/kubectl delete namespace {{ ingress_nginx_namespace }}
+ ignore_errors: yes
+ when:
+ - inventory_hostname == groups['kube-master'][0]
+ tags:
+ - upgrade
+
- name: NGINX Ingress Controller | Create addon dir
file:
path: "{{ kube_config_dir }}/addons/ingress_nginx"
@@ -7,24 +25,37 @@
owner: root
group: root
mode: 0755
+ when:
+ - inventory_hostname == groups['kube-master'][0]
+
+- name: NGINX Ingress Controller | Templates list
+ set_fact:
+ ingress_nginx_templates:
+ - { name: 00-namespace, file: 00-namespace.yml, type: ns }
+ - { name: deploy-default-backend, file: deploy-default-backend.yml, type: deploy }
+ - { name: svc-default-backend, file: svc-default-backend.yml, type: svc }
+ - { name: cm-ingress-nginx, file: cm-ingress-nginx.yml, type: cm }
+ - { name: cm-tcp-services, file: cm-tcp-services.yml, type: cm }
+ - { name: cm-udp-services, file: cm-udp-services.yml, type: cm }
+ - { name: sa-ingress-nginx, file: sa-ingress-nginx.yml, type: sa }
+ - { name: clusterrole-ingress-nginx, file: clusterrole-ingress-nginx.yml, type: clusterrole }
+ - { name: clusterrolebinding-ingress-nginx, file: clusterrolebinding-ingress-nginx.yml, type: clusterrolebinding }
+ - { name: role-ingress-nginx, file: role-ingress-nginx.yml, type: role }
+ - { name: rolebinding-ingress-nginx, file: rolebinding-ingress-nginx.yml, type: rolebinding }
+ - { name: ds-ingress-nginx-controller, file: ds-ingress-nginx-controller.yml, type: ds }
+ ingress_nginx_templates_for_psp:
+ - { name: psp-ingress-nginx, file: psp-ingress-nginx.yml, type: podsecuritypolicy }
+
+- name: NGINX Ingress Controller | Append extra templates to NGINX Ingress Templates list for PodSecurityPolicy
+ set_fact:
+ ingress_nginx_templates: "{{ ingress_nginx_templates_for_psp + ingress_nginx_templates }}"
+ when: podsecuritypolicy_enabled
- name: NGINX Ingress Controller | Create manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/addons/ingress_nginx/{{ item.file }}"
- with_items:
- - { name: ingress-nginx-ns, file: ingress-nginx-ns.yml, type: ns }
- - { name: ingress-nginx-sa, file: ingress-nginx-sa.yml, type: sa }
- - { name: ingress-nginx-role, file: ingress-nginx-role.yml, type: role }
- - { name: ingress-nginx-rolebinding, file: ingress-nginx-rolebinding.yml, type: rolebinding }
- - { name: ingress-nginx-clusterrole, file: ingress-nginx-clusterrole.yml, type: clusterrole }
- - { name: ingress-nginx-clusterrolebinding, file: ingress-nginx-clusterrolebinding.yml, type: clusterrolebinding }
- - { name: ingress-nginx-cm, file: ingress-nginx-cm.yml, type: cm }
- - { name: ingress-nginx-tcp-servicecs-cm, file: ingress-nginx-tcp-servicecs-cm.yml, type: cm }
- - { name: ingress-nginx-udp-servicecs-cm, file: ingress-nginx-udp-servicecs-cm.yml, type: cm }
- - { name: ingress-nginx-default-backend-svc, file: ingress-nginx-default-backend-svc.yml, type: svc }
- - { name: ingress-nginx-default-backend-rs, file: ingress-nginx-default-backend-rs.yml, type: rs }
- - { name: ingress-nginx-controller-ds, file: ingress-nginx-controller-ds.yml, type: ds }
+ with_items: "{{ ingress_nginx_templates }}"
register: ingress_nginx_manifests
when:
- inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-ns.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/00-namespace.yml.j2
similarity index 100%
rename from roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-ns.yml.j2
rename to roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/00-namespace.yml.j2
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-clusterrole.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrole-ingress-nginx.yml.j2
similarity index 86%
rename from roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-clusterrole.yml.j2
rename to roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrole-ingress-nginx.yml.j2
index e6c36ef3069..7cc6870e5c1 100644
--- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-clusterrole.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrole-ingress-nginx.yml.j2
@@ -3,7 +3,9 @@ apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: ingress-nginx
- namespace: {{ ingress_nginx_namespace }}
+ labels:
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
rules:
- apiGroups: [""]
resources: ["configmaps", "endpoints", "nodes", "pods", "secrets"]
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrolebinding-ingress-nginx.yml.j2
similarity index 77%
rename from roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-clusterrolebinding.yml.j2
rename to roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrolebinding-ingress-nginx.yml.j2
index 8d14af4b7d7..67aa97f8b90 100644
--- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-clusterrolebinding.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/clusterrolebinding-ingress-nginx.yml.j2
@@ -4,11 +4,14 @@ kind: ClusterRoleBinding
metadata:
name: ingress-nginx
namespace: {{ ingress_nginx_namespace }}
+ labels:
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: {{ ingress_nginx_namespace }}
-roleRef:
- kind: ClusterRole
- name: ingress-nginx
- apiGroup: rbac.authorization.k8s.io
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-ingress-nginx.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-ingress-nginx.yml.j2
new file mode 100644
index 00000000000..9f1e3bb3672
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-ingress-nginx.yml.j2
@@ -0,0 +1,13 @@
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: ingress-nginx
+ namespace: {{ ingress_nginx_namespace }}
+ labels:
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+{% if ingress_nginx_configmap %}
+data:
+ {{ ingress_nginx_configmap | to_nice_yaml | indent(2) }}
+{%- endif %}
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-tcp-services.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-tcp-services.yml.j2
new file mode 100644
index 00000000000..97520816c46
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-tcp-services.yml.j2
@@ -0,0 +1,13 @@
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: tcp-services
+ namespace: {{ ingress_nginx_namespace }}
+ labels:
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+{% if ingress_nginx_configmap_tcp_services %}
+data:
+ {{ ingress_nginx_configmap_tcp_services | to_nice_yaml | indent(2) }}
+{%- endif %}
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-udp-services.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-udp-services.yml.j2
new file mode 100644
index 00000000000..a3f6613a4ff
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-udp-services.yml.j2
@@ -0,0 +1,13 @@
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: udp-services
+ namespace: {{ ingress_nginx_namespace }}
+ labels:
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+{% if ingress_nginx_configmap_udp_services %}
+data:
+ {{ ingress_nginx_configmap_udp_services | to_nice_yaml | indent(2) }}
+{%- endif %}
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-default-backend-rs.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/deploy-default-backend.yml.j2
similarity index 52%
rename from roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-default-backend-rs.yml.j2
rename to roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/deploy-default-backend.yml.j2
index c0bed920b25..0578844f9aa 100644
--- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-default-backend-rs.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/deploy-default-backend.yml.j2
@@ -1,27 +1,27 @@
---
apiVersion: apps/v1
-kind: ReplicaSet
+kind: Deployment
metadata:
- name: ingress-nginx-default-backend-v{{ ingress_nginx_default_backend_image_tag }}
+ name: default-backend
namespace: {{ ingress_nginx_namespace }}
labels:
- k8s-app: ingress-nginx-default-backend
- version: v{{ ingress_nginx_default_backend_image_tag }}
+ app.kubernetes.io/name: default-backend
+ app.kubernetes.io/part-of: ingress-nginx
spec:
replicas: 1
selector:
matchLabels:
- k8s-app: ingress-nginx-default-backend
- version: v{{ ingress_nginx_default_backend_image_tag }}
+ app.kubernetes.io/name: default-backend
+ app.kubernetes.io/part-of: ingress-nginx
template:
metadata:
labels:
- k8s-app: ingress-nginx-default-backend
- version: v{{ ingress_nginx_default_backend_image_tag }}
+ app.kubernetes.io/name: default-backend
+ app.kubernetes.io/part-of: ingress-nginx
spec:
terminationGracePeriodSeconds: 60
containers:
- - name: ingress-nginx-default-backend
+ - name: default-backend
# Any image is permissible as long as:
# 1. It serves a 404 page at /
# 2. It serves 200 on a /healthz endpoint
@@ -35,3 +35,13 @@ spec:
timeoutSeconds: 5
ports:
- containerPort: 8080
+ resources:
+ limits:
+ cpu: 10m
+ memory: 20Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ # When having win nodes in cluster without this patch, this pod cloud try to be created in windows
+ nodeSelector:
+ beta.kubernetes.io/os: linux
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ds-ingress-nginx-controller.yml.j2
similarity index 63%
rename from roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2
rename to roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ds-ingress-nginx-controller.yml.j2
index 7fd3a946cbc..1031798af5c 100644
--- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ds-ingress-nginx-controller.yml.j2
@@ -5,33 +5,49 @@ metadata:
name: ingress-nginx-controller
namespace: {{ ingress_nginx_namespace }}
labels:
- k8s-app: ingress-nginx
- version: v{{ ingress_nginx_controller_image_tag }}
- annotations:
- prometheus.io/port: '10254'
- prometheus.io/scrape: 'true'
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
spec:
selector:
matchLabels:
- k8s-app: ingress-nginx
- version: v{{ ingress_nginx_controller_image_tag }}
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
template:
metadata:
labels:
- k8s-app: ingress-nginx
- version: v{{ ingress_nginx_controller_image_tag }}
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
+ annotations:
+ prometheus.io/port: '10254'
+ prometheus.io/scrape: 'true'
spec:
+ serviceAccountName: ingress-nginx
+{% if ingress_nginx_host_network %}
+ hostNetwork: true
+{% endif %}
+{% if ingress_nginx_nodeselector %}
+ nodeSelector:
+ {{ ingress_nginx_nodeselector | to_nice_yaml }}
+{%- endif %}
containers:
- name: ingress-nginx-controller
image: {{ ingress_nginx_controller_image_repo }}:{{ ingress_nginx_controller_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- /nginx-ingress-controller
- - --default-backend-service=$(POD_NAMESPACE)/ingress-nginx-default-backend
+ - --default-backend-service=$(POD_NAMESPACE)/default-backend
- --configmap=$(POD_NAMESPACE)/ingress-nginx
- - --tcp-services-configmap=$(POD_NAMESPACE)/ingress-nginx-tcp-services
- - --udp-services-configmap=$(POD_NAMESPACE)/ingress-nginx-udp-services
+ - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
+ - --udp-services-configmap=$(POD_NAMESPACE)/udp-services
- --annotations-prefix=nginx.ingress.kubernetes.io
+ securityContext:
+ capabilities:
+ drop:
+ - ALL
+ add:
+ - NET_BIND_SERVICE
+ # www-data -> 33
+ runAsUser: 33
env:
- name: POD_NAME
valueFrom:
@@ -67,6 +83,3 @@ spec:
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
-{% if rbac_enabled %}
- serviceAccountName: ingress-nginx
-{% endif %}
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-cm.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-cm.yml.j2
deleted file mode 100644
index 79b9e17e737..00000000000
--- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-cm.yml.j2
+++ /dev/null
@@ -1,10 +0,0 @@
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: ingress-nginx
- namespace: {{ ingress_nginx_namespace }}
- labels:
- k8s-app: ingress-nginx
-data:
- {{ ingress_nginx_configmap | to_nice_yaml }}
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-tcp-servicecs-cm.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-tcp-servicecs-cm.yml.j2
deleted file mode 100644
index 5fb87594081..00000000000
--- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-tcp-servicecs-cm.yml.j2
+++ /dev/null
@@ -1,10 +0,0 @@
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: ingress-nginx-tcp-services
- namespace: {{ ingress_nginx_namespace }}
- labels:
- k8s-app: ingress-nginx
-data:
- {{ ingress_nginx_configmap_tcp_services | to_nice_yaml }}
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-udp-servicecs-cm.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-udp-servicecs-cm.yml.j2
deleted file mode 100644
index bcb004bc9a2..00000000000
--- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-udp-servicecs-cm.yml.j2
+++ /dev/null
@@ -1,10 +0,0 @@
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: ingress-nginx-udp-services
- namespace: {{ ingress_nginx_namespace }}
- labels:
- k8s-app: ingress-nginx
-data:
- {{ ingress_nginx_configmap_udp_services | to_nice_yaml }}
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/psp-ingress-nginx.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/psp-ingress-nginx.yml.j2
new file mode 100644
index 00000000000..0eac6aa2c9d
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/psp-ingress-nginx.yml.j2
@@ -0,0 +1,48 @@
+---
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: ingress-nginx
+ annotations:
+ seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
+ seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
+{% if apparmor_enabled %}
+ apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+ apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
+{% endif %}
+ labels:
+ kubernetes.io/cluster-service: 'true'
+ addonmanager.kubernetes.io/mode: Reconcile
+spec:
+ privileged: false
+ allowPrivilegeEscalation: true
+ allowedCapabilities:
+ - NET_BIND_SERVICE
+ volumes:
+ - 'configMap'
+ - 'emptyDir'
+ - 'projected'
+ - 'secret'
+ - 'downwardAPI'
+ - 'persistentVolumeClaim'
+ hostNetwork: {{ ingress_nginx_host_network|bool }}
+ hostPorts:
+ - min: 0
+ max: 65535
+ hostIPC: false
+ hostPID: false
+ runAsUser:
+ rule: 'MustRunAsNonRoot'
+ seLinux:
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'MustRunAs'
+ ranges:
+ - min: 1
+ max: 65535
+ fsGroup:
+ rule: 'MustRunAs'
+ ranges:
+ - min: 1
+ max: 65535
+ readOnlyRootFilesystem: false
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-role.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/role-ingress-nginx.yml.j2
similarity index 77%
rename from roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-role.yml.j2
rename to roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/role-ingress-nginx.yml.j2
index 9254e035a26..3148002da7e 100644
--- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-role.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/role-ingress-nginx.yml.j2
@@ -4,6 +4,9 @@ kind: Role
metadata:
name: ingress-nginx
namespace: {{ ingress_nginx_namespace }}
+ labels:
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
rules:
- apiGroups: [""]
resources: ["configmaps", "pods", "secrets", "namespaces"]
@@ -22,3 +25,7 @@ rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get"]
+ - apiGroups: ["policy"]
+ resourceNames: ["ingress-nginx"]
+ resources: ["podsecuritypolicies"]
+ verbs: ["use"]
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-rolebinding.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/rolebinding-ingress-nginx.yml.j2
similarity index 77%
rename from roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-rolebinding.yml.j2
rename to roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/rolebinding-ingress-nginx.yml.j2
index a6a8dec4ba4..4357a2d7742 100644
--- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-rolebinding.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/rolebinding-ingress-nginx.yml.j2
@@ -4,11 +4,14 @@ kind: RoleBinding
metadata:
name: ingress-nginx
namespace: {{ ingress_nginx_namespace }}
-subjects:
- - kind: ServiceAccount
- name: ingress-nginx
- namespace: {{ ingress_nginx_namespace }}
+ labels:
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx
+subjects:
+ - kind: ServiceAccount
+ name: ingress-nginx
+ namespace: {{ ingress_nginx_namespace }}
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-sa.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/sa-ingress-nginx.yml.j2
similarity index 54%
rename from roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-sa.yml.j2
rename to roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/sa-ingress-nginx.yml.j2
index 55d6d65181f..305d553f040 100644
--- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-sa.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/sa-ingress-nginx.yml.j2
@@ -4,3 +4,6 @@ kind: ServiceAccount
metadata:
name: ingress-nginx
namespace: {{ ingress_nginx_namespace }}
+ labels:
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/part-of: ingress-nginx
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-default-backend-svc.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/svc-default-backend.yml.j2
similarity index 50%
rename from roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-default-backend-svc.yml.j2
rename to roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/svc-default-backend.yml.j2
index ab23f379959..8d4ad59916a 100644
--- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-default-backend-svc.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/svc-default-backend.yml.j2
@@ -2,13 +2,14 @@
apiVersion: v1
kind: Service
metadata:
- name: ingress-nginx-default-backend
+ name: default-backend
namespace: {{ ingress_nginx_namespace }}
labels:
- k8s-app: ingress-nginx-default-backend
+ app.kubernetes.io/name: default-backend
+ app.kubernetes.io/part-of: ingress-nginx
spec:
ports:
- port: 80
targetPort: 8080
selector:
- k8s-app: ingress-nginx-default-backend
+ app.kubernetes.io/name: default-backend
diff --git a/roles/kubernetes-apps/ingress_controller/meta/main.yml b/roles/kubernetes-apps/ingress_controller/meta/main.yml
index da2e03ecc0d..617e9d9a7cc 100644
--- a/roles/kubernetes-apps/ingress_controller/meta/main.yml
+++ b/roles/kubernetes-apps/ingress_controller/meta/main.yml
@@ -6,3 +6,10 @@ dependencies:
- apps
- ingress-nginx
- ingress-controller
+
+ - role: kubernetes-apps/ingress_controller/cert_manager
+ when: cert_manager_enabled
+ tags:
+ - apps
+ - cert-manager
+ - ingress-controller
diff --git a/roles/kubernetes-apps/istio/defaults/main.yml b/roles/kubernetes-apps/istio/defaults/main.yml
deleted file mode 100644
index dc51ea7d67f..00000000000
--- a/roles/kubernetes-apps/istio/defaults/main.yml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-istio_enabled: false
-
-istio_namespace: istio-system
-istio_version: "0.2.6"
-
-istioctl_download_url: "https://storage.googleapis.com/istio-release/releases/{{ istio_version }}/istioctl/istioctl-linux"
-istioctl_checksum: fd703063c540b8c0ab943f478c05ab257d88ae27224c746a27d0526ddbf7c370
-
-istio_proxy_image_repo: docker.io/istio/proxy
-istio_proxy_image_tag: "{{ istio_version }}"
-
-istio_proxy_init_image_repo: docker.io/istio/proxy_init
-istio_proxy_init_image_tag: "{{ istio_version }}"
-
-istio_ca_image_repo: docker.io/istio/istio-ca
-istio_ca_image_tag: "{{ istio_version }}"
-
-istio_mixer_image_repo: docker.io/istio/mixer
-istio_mixer_image_tag: "{{ istio_version }}"
-
-istio_pilot_image_repo: docker.io/istio/pilot
-istio_pilot_image_tag: "{{ istio_version }}"
-
-istio_proxy_debug_image_repo: docker.io/istio/proxy_debug
-istio_proxy_debug_image_tag: "{{ istio_version }}"
-
-istio_sidecar_initializer_image_repo: docker.io/istio/sidecar_initializer
-istio_sidecar_initializer_image_tag: "{{ istio_version }}"
-
-istio_statsd_image_repo: prom/statsd-exporter
-istio_statsd_image_tag: latest
diff --git a/roles/kubernetes-apps/istio/tasks/main.yml b/roles/kubernetes-apps/istio/tasks/main.yml
deleted file mode 100644
index 5e36a56cc72..00000000000
--- a/roles/kubernetes-apps/istio/tasks/main.yml
+++ /dev/null
@@ -1,45 +0,0 @@
----
-- name: istio | Create addon dir
- file:
- path: "{{ kube_config_dir }}/addons/istio"
- owner: root
- group: root
- mode: 0755
- recurse: yes
-
-- name: istio | Lay out manifests
- template:
- src: "{{item.file}}.j2"
- dest: "{{kube_config_dir}}/addons/istio/{{item.file}}"
- with_items:
- - {name: istio-mixer, file: istio.yml, type: deployment }
- - {name: istio-initializer, file: istio-initializer.yml, type: deployment }
- register: manifests
- when: inventory_hostname == groups['kube-master'][0]
-
-- name: istio | Copy istioctl binary from download dir
- command: rsync -piu "{{ local_release_dir }}/istio/istioctl" "{{ bin_dir }}/istioctl"
- changed_when: false
-
-- name: istio | Set up bash completion
- shell: "{{ bin_dir }}/istioctl completion >/etc/bash_completion.d/istioctl.sh"
- when: ansible_os_family in ["Debian","RedHat"]
-
-- name: istio | Set bash completion file
- file:
- path: /etc/bash_completion.d/istioctl.sh
- owner: root
- group: root
- mode: 0755
- when: ansible_os_family in ["Debian","RedHat"]
-
-- name: istio | apply manifests
- kube:
- name: "{{item.item.name}}"
- namespace: "{{ istio_namespace }}"
- kubectl: "{{bin_dir}}/kubectl"
- resource: "{{item.item.type}}"
- filename: "{{kube_config_dir}}/addons/istio/{{item.item.file}}"
- state: "latest"
- with_items: "{{ manifests.results }}"
- when: inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes-apps/istio/templates/istio-initializer.yml.j2 b/roles/kubernetes-apps/istio/templates/istio-initializer.yml.j2
deleted file mode 100644
index 84f957ed1b8..00000000000
--- a/roles/kubernetes-apps/istio/templates/istio-initializer.yml.j2
+++ /dev/null
@@ -1,84 +0,0 @@
-# GENERATED FILE. Use with Kubernetes 1.7+
-# TO UPDATE, modify files in install/kubernetes/templates and run install/updateVersion.sh
-################################
-# Istio initializer
-################################
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: istio-inject
- namespace: {{ istio_namespace }}
-data:
- config: |-
- policy: "enabled"
- namespaces: [""] # everything, aka v1.NamepsaceAll, aka cluster-wide
- initializerName: "sidecar.initializer.istio.io"
- params:
- initImage: {{ istio_proxy_init_image_repo }}:{{ istio_proxy_init_image_tag }}
- proxyImage: {{ istio_proxy_image_repo }}:{{ istio_proxy_image_tag }}
- verbosity: 2
- version: 0.2.6
- meshConfigMapName: istio
- imagePullPolicy: IfNotPresent
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: istio-initializer-service-account
- namespace: {{ istio_namespace }}
----
-apiVersion: apps/v1beta1
-kind: Deployment
-metadata:
- name: istio-initializer
- namespace: {{ istio_namespace }}
- annotations:
- sidecar.istio.io/inject: "false"
- initializers:
- pending: []
- labels:
- istio: istio-initializer
-spec:
- replicas: 1
- template:
- metadata:
- name: istio-initializer
- labels:
- istio: initializer
- annotations:
- sidecar.istio.io/inject: "false"
- spec:
- serviceAccountName: istio-initializer-service-account
- containers:
- - name: initializer
- image: {{ istio_sidecar_initializer_image_repo }}:{{ istio_sidecar_initializer_image_tag }}
- imagePullPolicy: IfNotPresent
- args:
- - --port=8083
- - --namespace={{ istio_namespace }}
- - -v=2
- volumeMounts:
- - name: config-volume
- mountPath: /etc/istio/config
- volumes:
- - name: config-volume
- configMap:
- name: istio
----
-apiVersion: admissionregistration.k8s.io/v1alpha1
-kind: InitializerConfiguration
-metadata:
- name: istio-sidecar
-initializers:
- - name: sidecar.initializer.istio.io
- rules:
- - apiGroups:
- - "*"
- apiVersions:
- - "*"
- resources:
- - deployments
- - statefulsets
- - jobs
- - daemonsets
----
diff --git a/roles/kubernetes-apps/istio/templates/istio.yml.j2 b/roles/kubernetes-apps/istio/templates/istio.yml.j2
deleted file mode 100644
index bd0b93a7f5b..00000000000
--- a/roles/kubernetes-apps/istio/templates/istio.yml.j2
+++ /dev/null
@@ -1,1285 +0,0 @@
-# GENERATED FILE. Use with Kubernetes 1.7+
-# TO UPDATE, modify files in install/kubernetes/templates and run install/updateVersion.sh
-################################
-# Istio system namespace
-################################
-apiVersion: v1
-kind: Namespace
-metadata:
- name: {{ istio_namespace }}
----
-################################
-# Istio RBAC
-################################
-# Permissions and roles for istio
-# To debug: start the cluster with -vmodule=rbac,3 to enable verbose logging on RBAC DENY
-# Also helps to enable logging on apiserver 'wrap' to see the URLs.
-# Each RBAC deny needs to be mapped into a rule for the role.
-# If using minikube, start with '--extra-config=apiserver.Authorization.Mode=RBAC'
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
- name: istio-pilot-istio-system
-rules:
-- apiGroups: ["config.istio.io"]
- resources: ["*"]
- verbs: ["*"]
-- apiGroups: ["apiextensions.k8s.io"]
- resources: ["customresourcedefinitions"]
- verbs: ["*"]
-- apiGroups: ["istio.io"]
- resources: ["istioconfigs", "istioconfigs.istio.io"]
- verbs: ["*"]
-- apiGroups: ["extensions"]
- resources: ["thirdpartyresources", "thirdpartyresources.extensions", "ingresses", "ingresses/status"]
- verbs: ["*"]
-- apiGroups: [""]
- resources: ["configmaps", "endpoints", "pods", "services"]
- verbs: ["*"]
-- apiGroups: [""]
- resources: ["namespaces", "nodes", "secrets"]
- verbs: ["get", "list", "watch"]
-- apiGroups: ["admissionregistration.k8s.io"]
- resources: ["externaladmissionhookconfigurations"]
- verbs: ["create", "update", "delete"]
----
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
- name: istio-initializer-istio-system
-rules:
-- apiGroups: ["*"]
- resources: ["deployments", "statefulsets", "jobs", "cronjobs", "daemonsets", "replicasets", "replicationcontrollers"]
- verbs: ["initialize", "patch", "watch", "list"]
-- apiGroups: ["*"]
- resources: ["configmaps"]
- verbs: ["get", "list", "watch"]
----
-# Mixer CRD needs to watch and list CRDs
-# It also uses discovery API to discover Kinds of config.istio.io
-# K8s adapter needs to list pods, services etc.
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
- name: istio-mixer-istio-system
-rules:
-- apiGroups: ["config.istio.io"] # Istio CRD watcher
- resources: ["*"]
- verbs: ["get", "list", "watch"]
-- apiGroups: ["apiextensions.k8s.io"]
- resources: ["customresourcedefinitions"]
- verbs: ["get", "list", "watch"]
-- apiGroups: [""]
- resources: ["configmaps", "endpoints", "pods", "services", "namespaces", "secrets"]
- verbs: ["get", "list", "watch"]
----
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
- name: istio-ca-istio-system
-rules:
-- apiGroups: [""]
- resources: ["secrets"]
- verbs: ["create", "get", "watch", "list", "update"]
-- apiGroups: [""]
- resources: ["serviceaccounts"]
- verbs: ["get", "watch", "list"]
----
-# Permissions for the sidecar proxy.
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
- name: istio-sidecar-istio-system
-rules:
-- apiGroups: ["istio.io"]
- resources: ["istioconfigs"]
- verbs: ["get", "watch", "list"]
-- apiGroups: ["extensions"]
- resources: ["thirdpartyresources", "ingresses"]
- verbs: ["get", "watch", "list", "update"]
-- apiGroups: [""]
- resources: ["configmaps", "pods", "endpoints", "services"]
- verbs: ["get", "watch", "list"]
----
-# Grant permissions to the Pilot/discovery.
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
- name: istio-pilot-admin-role-binding-istio-system
-subjects:
-- kind: ServiceAccount
- name: istio-pilot-service-account
- namespace: {{ istio_namespace }}
-roleRef:
- kind: ClusterRole
- name: istio-pilot-istio-system
- apiGroup: rbac.authorization.k8s.io
----
-# Grant permissions to the Sidecar initializer
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
- name: istio-initializer-admin-role-binding-istio-system
-subjects:
-- kind: ServiceAccount
- name: istio-initializer-service-account
- namespace: {{ istio_namespace }}
-roleRef:
- kind: ClusterRole
- name: istio-initializer-istio-system
- apiGroup: rbac.authorization.k8s.io
----
-# Grant permissions to the CA.
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
- name: istio-ca-role-binding-istio-system
-subjects:
-- kind: ServiceAccount
- name: istio-ca-service-account
- namespace: {{ istio_namespace }}
-roleRef:
- kind: ClusterRole
- name: istio-ca-istio-system
- apiGroup: rbac.authorization.k8s.io
----
-# Grant permissions to the Ingress controller.
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
- name: istio-ingress-admin-role-binding-istio-system
-subjects:
-- kind: ServiceAccount
- name: istio-ingress-service-account
- namespace: {{ istio_namespace }}
-roleRef:
- kind: ClusterRole
- name: istio-pilot-istio-system
- apiGroup: rbac.authorization.k8s.io
----
-# Grant permissions to the Egress controller.
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
- name: istio-egress-admin-role-binding-istio-system
-subjects:
-- kind: ServiceAccount
- name: istio-egress-service-account
- namespace: {{ istio_namespace }}
-roleRef:
- kind: ClusterRole
- name: istio-pilot-istio-system
- apiGroup: rbac.authorization.k8s.io
----
-# Grant permissions to the sidecar.
-# TEMPORARY: the istioctl should generate a separate service account for the proxy, and permission
-# granted only to that account !
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
- name: istio-sidecar-role-binding-istio-system
-subjects:
-- kind: ServiceAccount
- name: default
- namespace: {{ istio_namespace }}
-roleRef:
- kind: ClusterRole
- name: istio-sidecar-istio-system
- apiGroup: rbac.authorization.k8s.io
----
-# Grant permissions to Mixer.
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
- name: istio-mixer-admin-role-binding-istio-system
-subjects:
-- kind: ServiceAccount
- name: istio-mixer-service-account
- namespace: {{ istio_namespace }}
-roleRef:
- kind: ClusterRole
- name: istio-mixer-istio-system
- apiGroup: rbac.authorization.k8s.io
----
-# Mixer
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: istio-mixer
- namespace: {{ istio_namespace }}
-data:
- mapping.conf: |-
----
-apiVersion: v1
-kind: Service
-metadata:
- name: istio-mixer
- namespace: {{ istio_namespace }}
- labels:
- istio: mixer
-spec:
- ports:
- - name: tcp
- port: 9091
- - name: http-health
- port: 9093
- - name: configapi
- port: 9094
- - name: statsd-prom
- port: 9102
- - name: statsd-udp
- port: 9125
- protocol: UDP
- - name: prometheus
- port: 42422
- selector:
- istio: mixer
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: istio-mixer-service-account
- namespace: {{ istio_namespace }}
----
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- name: istio-mixer
- namespace: {{ istio_namespace }}
- annotations:
- sidecar.istio.io/inject: "false"
-spec:
- replicas: 1
- template:
- metadata:
- labels:
- istio: mixer
- spec:
- serviceAccountName: istio-mixer-service-account
- containers:
- - name: statsd-to-prometheus
- image: {{ istio_statsd_image_repo }}:{{ istio_statsd_image_tag }}
- imagePullPolicy: IfNotPresent
- ports:
- - containerPort: 9102
- - containerPort: 9125
- protocol: UDP
- args:
- - '-statsd.mapping-config=/etc/statsd/mapping.conf'
- volumeMounts:
- - name: config-volume
- mountPath: /etc/statsd
- - name: mixer
- image: {{ istio_mixer_image_repo }}:{{ istio_mixer_image_tag }}
- imagePullPolicy: IfNotPresent
- ports:
- - containerPort: 9091
- - containerPort: 9094
- - containerPort: 42422
- args:
- - --configStoreURL=fs:///etc/opt/mixer/configroot
- - --configStore2URL=k8s://
- - --configDefaultNamespace=istio-system
- - --traceOutput=http://zipkin:9411/api/v1/spans
- - --logtostderr
- - -v
- - "2"
- volumes:
- - name: config-volume
- configMap:
- name: istio-mixer
----
-# Mixer CRD definitions are generated using
-# mixs crd all
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: rules.config.istio.io
- labels:
- package: istio.io.mixer
- istio: core
-spec:
- group: config.istio.io
- names:
- kind: rule
- plural: rules
- singular: rule
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: attributemanifests.config.istio.io
- labels:
- package: istio.io.mixer
- istio: core
-spec:
- group: config.istio.io
- names:
- kind: attributemanifest
- plural: attributemanifests
- singular: attributemanifest
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: deniers.config.istio.io
- labels:
- package: denier
- istio: mixer-adapter
-spec:
- group: config.istio.io
- names:
- kind: denier
- plural: deniers
- singular: denier
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: listcheckers.config.istio.io
- labels:
- package: listchecker
- istio: mixer-adapter
-spec:
- group: config.istio.io
- names:
- kind: listchecker
- plural: listcheckers
- singular: listchecker
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: memquotas.config.istio.io
- labels:
- package: memquota
- istio: mixer-adapter
-spec:
- group: config.istio.io
- names:
- kind: memquota
- plural: memquotas
- singular: memquota
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: noops.config.istio.io
- labels:
- package: noop
- istio: mixer-adapter
-spec:
- group: config.istio.io
- names:
- kind: noop
- plural: noops
- singular: noop
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: prometheuses.config.istio.io
- labels:
- package: prometheus
- istio: mixer-adapter
-spec:
- group: config.istio.io
- names:
- kind: prometheus
- plural: prometheuses
- singular: prometheus
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: stackdrivers.config.istio.io
- labels:
- package: stackdriver
- istio: mixer-adapter
-spec:
- group: config.istio.io
- names:
- kind: stackdriver
- plural: stackdrivers
- singular: stackdriver
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: statsds.config.istio.io
- labels:
- package: statsd
- istio: mixer-adapter
-spec:
- group: config.istio.io
- names:
- kind: statsd
- plural: statsds
- singular: statsd
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: stdios.config.istio.io
- labels:
- package: stdio
- istio: mixer-adapter
-spec:
- group: config.istio.io
- names:
- kind: stdio
- plural: stdios
- singular: stdio
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: svcctrls.config.istio.io
- labels:
- package: svcctrl
- istio: mixer-adapter
-spec:
- group: config.istio.io
- names:
- kind: svcctrl
- plural: svcctrls
- singular: svcctrl
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: checknothings.config.istio.io
- labels:
- package: checknothing
- istio: mixer-instance
-spec:
- group: config.istio.io
- names:
- kind: checknothing
- plural: checknothings
- singular: checknothing
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: listentries.config.istio.io
- labels:
- package: listentry
- istio: mixer-instance
-spec:
- group: config.istio.io
- names:
- kind: listentry
- plural: listentries
- singular: listentry
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: logentries.config.istio.io
- labels:
- package: logentry
- istio: mixer-instance
-spec:
- group: config.istio.io
- names:
- kind: logentry
- plural: logentries
- singular: logentry
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: metrics.config.istio.io
- labels:
- package: metric
- istio: mixer-instance
-spec:
- group: config.istio.io
- names:
- kind: metric
- plural: metrics
- singular: metric
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: quotas.config.istio.io
- labels:
- package: quota
- istio: mixer-instance
-spec:
- group: config.istio.io
- names:
- kind: quota
- plural: quotas
- singular: quota
- scope: Namespaced
- version: v1alpha2
----
-
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: reportnothings.config.istio.io
- labels:
- package: reportnothing
- istio: mixer-instance
-spec:
- group: config.istio.io
- names:
- kind: reportnothing
- plural: reportnothings
- singular: reportnothing
- scope: Namespaced
- version: v1alpha2
----
-apiVersion: "config.istio.io/v1alpha2"
-kind: attributemanifest
-metadata:
- name: istioproxy
- namespace: {{ istio_namespace }}
-spec:
- attributes:
- origin.ip:
- valueType: IP_ADDRESS
- origin.uid:
- valueType: STRING
- origin.user:
- valueType: STRING
- request.headers:
- valueType: STRING_MAP
- request.id:
- valueType: STRING
- request.host:
- valueType: STRING
- request.method:
- valueType: STRING
- request.path:
- valueType: STRING
- request.reason:
- valueType: STRING
- request.referer:
- valueType: STRING
- request.scheme:
- valueType: STRING
- request.size:
- valueType: INT64
- request.time:
- valueType: TIMESTAMP
- request.useragent:
- valueType: STRING
- response.code:
- valueType: INT64
- response.duration:
- valueType: DURATION
- response.headers:
- valueType: STRING_MAP
- response.size:
- valueType: INT64
- response.time:
- valueType: TIMESTAMP
- source.uid:
- valueType: STRING
- source.user:
- valueType: STRING
- destination.uid:
- valueType: STRING
- connection.id:
- valueType: STRING
- connection.received.bytes:
- valueType: INT64
- connection.received.bytes_total:
- valueType: INT64
- connection.sent.bytes:
- valueType: INT64
- connection.sent.bytes_total:
- valueType: INT64
- connection.duration:
- valueType: DURATION
- context.protocol:
- valueType: STRING
- context.timestamp:
- valueType: TIMESTAMP
- context.time:
- valueType: TIMESTAMP
-
----
-apiVersion: "config.istio.io/v1alpha2"
-kind: attributemanifest
-metadata:
- name: kubernetes
- namespace: {{ istio_namespace }}
-spec:
- attributes:
- source.ip:
- valueType: IP_ADDRESS
- source.labels:
- valueType: STRING_MAP
- source.name:
- valueType: STRING
- source.namespace:
- valueType: STRING
- source.service:
- valueType: STRING
- source.serviceAccount:
- valueType: STRING
- destination.ip:
- valueType: IP_ADDRESS
- destination.labels:
- valueType: STRING_MAP
- destination.name:
- valueType: STRING
- destination.namespace:
- valueType: STRING
- destination.service:
- valueType: STRING
- destination.serviceAccount:
- valueType: STRING
----
-apiVersion: "config.istio.io/v1alpha2"
-kind: stdio
-metadata:
- name: handler
- namespace: {{ istio_namespace }}
-spec:
- outputAsJson: true
----
-apiVersion: "config.istio.io/v1alpha2"
-kind: logentry
-metadata:
- name: accesslog
- namespace: {{ istio_namespace }}
-spec:
- severity: '"Default"'
- timestamp: request.time
- variables:
- sourceIp: source.ip | ip("0.0.0.0")
- destinationIp: destination.ip | ip("0.0.0.0")
- sourceUser: source.user | ""
- method: request.method | ""
- url: request.path | ""
- protocol: request.scheme | "http"
- responseCode: response.code | 0
- responseSize: response.size | 0
- requestSize: request.size | 0
- latency: response.duration | "0ms"
- monitored_resource_type: '"UNSPECIFIED"'
----
-apiVersion: "config.istio.io/v1alpha2"
-kind: rule
-metadata:
- name: stdio
- namespace: {{ istio_namespace }}
-spec:
- match: "true" # If omitted match is true.
- actions:
- - handler: handler.stdio
- instances:
- - accesslog.logentry
----
-apiVersion: "config.istio.io/v1alpha2"
-kind: metric
-metadata:
- name: requestcount
- namespace: {{ istio_namespace }}
-spec:
- value: "1"
- dimensions:
- source_service: source.service | "unknown"
- source_version: source.labels["version"] | "unknown"
- destination_service: destination.service | "unknown"
- destination_version: destination.labels["version"] | "unknown"
- response_code: response.code | 200
- monitored_resource_type: '"UNSPECIFIED"'
----
-apiVersion: "config.istio.io/v1alpha2"
-kind: metric
-metadata:
- name: requestduration
- namespace: {{ istio_namespace }}
-spec:
- value: response.duration | "0ms"
- dimensions:
- source_service: source.service | "unknown"
- source_version: source.labels["version"] | "unknown"
- destination_service: destination.service | "unknown"
- destination_version: destination.labels["version"] | "unknown"
- response_code: response.code | 200
- monitored_resource_type: '"UNSPECIFIED"'
----
-apiVersion: "config.istio.io/v1alpha2"
-kind: metric
-metadata:
- name: requestsize
- namespace: {{ istio_namespace }}
-spec:
- value: request.size | 0
- dimensions:
- source_service: source.service | "unknown"
- source_version: source.labels["version"] | "unknown"
- destination_service: destination.service | "unknown"
- destination_version: destination.labels["version"] | "unknown"
- response_code: response.code | 200
- monitored_resource_type: '"UNSPECIFIED"'
----
-apiVersion: "config.istio.io/v1alpha2"
-kind: metric
-metadata:
- name: responsesize
- namespace: {{ istio_namespace }}
-spec:
- value: response.size | 0
- dimensions:
- source_service: source.service | "unknown"
- source_version: source.labels["version"] | "unknown"
- destination_service: destination.service | "unknown"
- destination_version: destination.labels["version"] | "unknown"
- response_code: response.code | 200
- monitored_resource_type: '"UNSPECIFIED"'
----
-apiVersion: "config.istio.io/v1alpha2"
-kind: metric
-metadata:
- name: tcpbytesent
- namespace: {{ istio_namespace }}
- labels:
- istio-protocol: tcp # needed so that mixer will only generate when context.protocol == tcp
-spec:
- value: connection.sent.bytes | 0
- dimensions:
- source_service: source.service | "unknown"
- source_version: source.labels["version"] | "unknown"
- destination_service: destination.service | "unknown"
- destination_version: destination.labels["version"] | "unknown"
- monitored_resource_type: '"UNSPECIFIED"'
----
-apiVersion: "config.istio.io/v1alpha2"
-kind: metric
-metadata:
- name: tcpbytereceived
- namespace: {{ istio_namespace }}
- labels:
- istio-protocol: tcp # needed so that mixer will only generate when context.protocol == tcp
-spec:
- value: connection.received.bytes | 0
- dimensions:
- source_service: source.service | "unknown"
- source_version: source.labels["version"] | "unknown"
- destination_service: destination.service | "unknown"
- destination_version: destination.labels["version"] | "unknown"
- monitored_resource_type: '"UNSPECIFIED"'
----
-apiVersion: "config.istio.io/v1alpha2"
-kind: prometheus
-metadata:
- name: handler
- namespace: {{ istio_namespace }}
-spec:
- metrics:
- - name: request_count
- instance_name: requestcount.metric.istio-system
- kind: COUNTER
- label_names:
- - source_service
- - source_version
- - destination_service
- - destination_version
- - response_code
- - name: request_duration
- instance_name: requestduration.metric.istio-system
- kind: DISTRIBUTION
- label_names:
- - source_service
- - source_version
- - destination_service
- - destination_version
- - response_code
- buckets:
- explicit_buckets:
- bounds: [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10]
- - name: request_size
- instance_name: requestsize.metric.istio-system
- kind: DISTRIBUTION
- label_names:
- - source_service
- - source_version
- - destination_service
- - destination_version
- - response_code
- buckets:
- exponentialBuckets:
- numFiniteBuckets: 8
- scale: 1
- growthFactor: 10
- - name: response_size
- instance_name: responsesize.metric.istio-system
- kind: DISTRIBUTION
- label_names:
- - source_service
- - source_version
- - destination_service
- - destination_version
- - response_code
- buckets:
- exponentialBuckets:
- numFiniteBuckets: 8
- scale: 1
- growthFactor: 10
- - name: tcp_bytes_sent
- instance_name: tcpbytesent.metric.istio-system
- kind: COUNTER
- label_names:
- - source_service
- - source_version
- - destination_service
- - destination_version
- - name: tcp_bytes_received
- instance_name: tcpbytereceived.metric.istio-system
- kind: COUNTER
- label_names:
- - source_service
- - source_version
- - destination_service
- - destination_version
----
-apiVersion: "config.istio.io/v1alpha2"
-kind: rule
-metadata:
- name: promhttp
- namespace: {{ istio_namespace }}
- labels:
- istio-protocol: http
-spec:
- actions:
- - handler: handler.prometheus
- instances:
- - requestcount.metric
- - requestduration.metric
- - requestsize.metric
- - responsesize.metric
----
-apiVersion: "config.istio.io/v1alpha2"
-kind: rule
-metadata:
- name: promtcp
- namespace: {{ istio_namespace }}
- labels:
- istio-protocol: tcp # needed so that mixer will only execute when context.protocol == TCP
-spec:
- actions:
- - handler: handler.prometheus
- instances:
- - tcpbytesent.metric
- - tcpbytereceived.metric
----
-################################
-# Istio configMap cluster-wide
-################################
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: istio
- namespace: {{ istio_namespace }}
-data:
- mesh: |-
- # Uncomment the following line to enable mutual TLS between proxies
- # authPolicy: MUTUAL_TLS
- #
- # Set the following variable to true to disable policy checks by the Mixer.
- # Note that metrics will still be reported to the Mixer.
- disablePolicyChecks: false
- # Set enableTracing to false to disable request tracing.
- enableTracing: true
- #
- # To disable the mixer completely (including metrics), comment out
- # the following line
- mixerAddress: istio-mixer.istio-system:9091
- # This is the ingress service name, update if you used a different name
- ingressService: istio-ingress
- egressProxyAddress: istio-egress.istio-system:80
- #
- # Along with discoveryRefreshDelay, this setting determines how
- # frequently should Envoy fetch and update its internal configuration
- # from Istio Pilot. Lower refresh delay results in higher CPU
- # utilization and potential performance loss in exchange for faster
- # convergence. Tweak this value according to your setup.
- rdsRefreshDelay: 1s
- #
- defaultConfig:
- # See rdsRefreshDelay for explanation about this setting.
- discoveryRefreshDelay: 1s
- #
- # TCP connection timeout between Envoy & the application, and between Envoys.
- connectTimeout: 10s
- #
- ### ADVANCED SETTINGS #############
- # Where should envoy's configuration be stored in the istio-proxy container
- configPath: "/etc/istio/proxy"
- binaryPath: "/usr/local/bin/envoy"
- # The pseudo service name used for Envoy.
- serviceCluster: istio-proxy
- # These settings that determine how long an old Envoy
- # process should be kept alive after an occasional reload.
- drainDuration: 45s
- parentShutdownDuration: 1m0s
- #
- # Port where Envoy listens (on local host) for admin commands
- # You can exec into the istio-proxy container in a pod and
- # curl the admin port (curl http://localhost:15000/) to obtain
- # diagnostic information from Envoy. See
- # https://lyft.github.io/envoy/docs/operations/admin.html
- # for more details
- proxyAdminPort: 15000
- #
- # Address where Istio Pilot service is running
- discoveryAddress: istio-pilot.istio-system:8080
- #
- # Zipkin trace collector
- zipkinAddress: zipkin.istio-system:9411
- #
- # Statsd metrics collector. Istio mixer exposes a UDP endpoint
- # to collect and convert statsd metrics into Prometheus metrics.
- statsdUdpAddress: istio-mixer.istio-system:9125
----
-################################
-# Pilot
-################################
-# Pilot CRDs
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: destinationpolicies.config.istio.io
-spec:
- group: config.istio.io
- names:
- kind: DestinationPolicy
- listKind: DestinationPolicyList
- plural: destinationpolicies
- singular: destinationpolicy
- scope: Namespaced
- version: v1alpha2
----
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: egressrules.config.istio.io
-spec:
- group: config.istio.io
- names:
- kind: EgressRule
- listKind: EgressRuleList
- plural: egressrules
- singular: egressrule
- scope: Namespaced
- version: v1alpha2
----
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: routerules.config.istio.io
-spec:
- group: config.istio.io
- names:
- kind: RouteRule
- listKind: RouteRuleList
- plural: routerules
- singular: routerule
- scope: Namespaced
- version: v1alpha2
----
-# Pilot service for discovery
-apiVersion: v1
-kind: Service
-metadata:
- name: istio-pilot
- namespace: {{ istio_namespace }}
- labels:
- istio: pilot
-spec:
- ports:
- - port: 8080
- name: http-discovery
- - port: 443
- name: http-admission-webhook
- selector:
- istio: pilot
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: istio-pilot-service-account
- namespace: {{ istio_namespace }}
----
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- name: istio-pilot
- namespace: {{ istio_namespace }}
- annotations:
- sidecar.istio.io/inject: "false"
-spec:
- replicas: 1
- template:
- metadata:
- labels:
- istio: pilot
- spec:
- serviceAccountName: istio-pilot-service-account
- containers:
- - name: discovery
- image: {{ istio_pilot_image_repo }}:{{ istio_pilot_image_tag }}
- imagePullPolicy: IfNotPresent
- args: ["discovery", "-v", "2", "--admission-service", "istio-pilot-external"]
- ports:
- - containerPort: 8080
- - containerPort: 443
- env:
- - name: POD_NAME
- valueFrom:
- fieldRef:
- apiVersion: v1
- fieldPath: metadata.name
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- apiVersion: v1
- fieldPath: metadata.namespace
- volumeMounts:
- - name: config-volume
- mountPath: /etc/istio/config
- volumes:
- - name: config-volume
- configMap:
- name: istio
----
-################################
-# Istio ingress
-################################
-apiVersion: v1
-kind: Service
-metadata:
- name: istio-ingress
- namespace: {{ istio_namespace }}
- labels:
- istio: ingress
-spec:
- type: LoadBalancer
- ports:
- - port: 80
-# nodePort: 32000
- name: http
- - port: 443
- name: https
- selector:
- istio: ingress
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: istio-ingress-service-account
- namespace: {{ istio_namespace }}
----
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- name: istio-ingress
- namespace: {{ istio_namespace }}
- annotations:
- sidecar.istio.io/inject: "false"
-spec:
- replicas: 1
- template:
- metadata:
- labels:
- istio: ingress
- spec:
- serviceAccountName: istio-ingress-service-account
- containers:
- - name: istio-ingress
- image: {{ istio_proxy_debug_image_repo }}:{{ istio_proxy_debug_image_tag }}
- args:
- - proxy
- - ingress
- - -v
- - "2"
- - --discoveryAddress
- - istio-pilot:8080
- imagePullPolicy: IfNotPresent
- ports:
- - containerPort: 80
- - containerPort: 443
- env:
- - name: POD_NAME
- valueFrom:
- fieldRef:
- apiVersion: v1
- fieldPath: metadata.name
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- apiVersion: v1
- fieldPath: metadata.namespace
- volumeMounts:
- - name: istio-certs
- mountPath: /etc/certs
- readOnly: true
- - name: ingress-certs
- mountPath: /etc/istio/ingress-certs
- readOnly: true
- volumes:
- - name: istio-certs
- secret:
- secretName: istio.default
- optional: true
- - name: ingress-certs
- secret:
- secretName: istio-ingress-certs
- optional: true
----
-################################
-# Istio egress
-################################
-apiVersion: v1
-kind: Service
-metadata:
- name: istio-egress
- namespace: {{ istio_namespace }}
-spec:
- ports:
- - port: 80
- selector:
- istio: egress
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: istio-egress-service-account
- namespace: {{ istio_namespace }}
----
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- name: istio-egress
- namespace: {{ istio_namespace }}
- annotations:
- sidecar.istio.io/inject: "false"
-spec:
- replicas: 1
- template:
- metadata:
- labels:
- istio: egress
- spec:
- serviceAccountName: istio-egress-service-account
- containers:
- - name: proxy
- image: {{ istio_proxy_debug_image_repo }}:{{ istio_proxy_debug_image_tag }}
- imagePullPolicy: IfNotPresent
- args:
- - proxy
- - egress
- - -v
- - "2"
- - --discoveryAddress
- - istio-pilot:8080
- env:
- - name: POD_NAME
- valueFrom:
- fieldRef:
- apiVersion: v1
- fieldPath: metadata.name
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- apiVersion: v1
- fieldPath: metadata.namespace
- volumeMounts:
- - name: istio-certs
- mountPath: /etc/certs
- readOnly: true
- volumes:
- - name: istio-certs
- secret:
- secretName: istio.default
- optional: true
----
-################################
-# Istio-CA cluster-wide
-################################
-# Service account CA
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: istio-ca-service-account
- namespace: {{ istio_namespace }}
----
-# Istio CA watching all namespaces
-apiVersion: v1
-kind: Deployment
-apiVersion: extensions/v1beta1
-metadata:
- name: istio-ca
- namespace: {{ istio_namespace }}
- annotations:
- sidecar.istio.io/inject: "false"
-spec:
- replicas: 1
- template:
- metadata:
- labels:
- istio: istio-ca
- spec:
- serviceAccountName: istio-ca-service-account
- containers:
- - name: istio-ca
- image: {{ istio_ca_image_repo }}:{{ istio_ca_image_tag }}
- imagePullPolicy: IfNotPresent
----
-
diff --git a/roles/kubernetes-apps/kpm/library/kpm.py b/roles/kubernetes-apps/kpm/library/kpm.py
deleted file mode 100644
index aa44d4cc8fc..00000000000
--- a/roles/kubernetes-apps/kpm/library/kpm.py
+++ /dev/null
@@ -1,111 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-import kpm.deploy
-from ansible.module_utils.basic import *
-
-DOCUMENTATION = """
----
-module: kpm
-short_description: Application deployment on kubernetes with kpm registry
-description:
- - Create, remove, and update resources within a Kubernetes Cluster
-version_added: "2.0"
-options:
- name:
- required: true
- default: null
- description:
- - The name of the kpm package
- namespace:
- required: false
- default: 'default'
- description:
- - The namespace to deploy package. It will be created if doesn't exist
- force:
- required: false
- default: false
- description:
- - A flag to indicate to force delete, replace.
- registry:
- required: false
- default: 'https://api.kpm.sh'
- description:
- - The registry url to fetch packages
- version:
- required: false
- default: 'None'
- description:
- - The package version
- variables:
- required: false
- default: 'None'
- description:
- - Set package variables
- state:
- required: false
- choices: ['present', 'absent']
- default: present
- description:
- - present handles checking existence or creating resources,
- absent handles deleting resource(s).
-requirements:
- - kubectl
- - kpm
-author: "Antoine Legrand (ant31_2t@msn.com)"
-"""
-
-EXAMPLES = """
-- name: check presence or install ghost
- kpm: name=ghost/ghost state=present
-
-- name: check absence or remove rocketchat
- kpm: name=ant31/rocketchat state=absent
-"""
-
-RETURN = """
-"""
-
-
-def check_changed(result, state='present'):
- no_change = ["ok", 'protected', 'absent']
- for r in result:
- if r['status'] not in no_change:
- return True
- return False
-
-
-def main():
- module = AnsibleModule(
- supports_check_mode=True,
- argument_spec = dict(
- version = dict(default=None, required=False),
- state = dict(default='present', choices=['present', 'absent']),
- name = dict(required=True),
- force = dict(required=False, default=False, type='bool'),
- variables = dict(required=False, default=None, type='dict'),
- registry = dict(required=False, default="https://api.kpm.sh"),
- namespace=dict(default='default', required=False)))
-
- params = {"version": module.params.get("version"),
- "namespace": module.params.get('namespace'),
- "variables": module.params.get('variables'),
- "endpoint": module.params.get('registry'),
- "dry": module.check_mode,
- "proxy": None,
- "fmt": "json"}
- state = module.params.get("state")
- try:
- if state == 'present':
- r = kpm.deploy.deploy(module.params.get('name'), **params)
- elif state == 'absent':
- r = kpm.deploy.delete(module.params.get('name'), **params)
- except Exception as e:
- module.fail_json(msg=e.message)
- res = {}
- res['kpm'] = r
- res['changed'] = check_changed(r, state)
- module.exit_json(**res)
-
-if __name__ == '__main__':
- main()
diff --git a/roles/kubernetes-apps/kpm/tasks/main.yml b/roles/kubernetes-apps/kpm/tasks/main.yml
deleted file mode 100644
index 9aadc07eab7..00000000000
--- a/roles/kubernetes-apps/kpm/tasks/main.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: install kpm
- pip:
- name: "kpm"
- state: "present"
- version: "0.16.1"
- when: kpm_packages|default([])| length > 0
-
-- name: manage kubernetes applications
- kpm:
- namespace: "{{item.namespace | default(kpm_namespace | default('default'))}}"
- registry: "{{item.registry | default(kpm_registry | default('https://api.kpm.sh'))}}"
- state: "{{item.state | default(omit)}}"
- version: "{{item.version | default(omit)}}"
- variables: "{{item.variables | default(omit)}}"
- name: "{{item.name}}"
- with_items: "{{kpm_packages|default([])}}"
- register: kpmresults
- environment:
- PATH: "{{ ansible_env.PATH }}:{{ bin_dir }}"
diff --git a/roles/kubernetes-apps/meta/main.yml b/roles/kubernetes-apps/meta/main.yml
index fa3b1f1a6f7..63b76ae30cc 100644
--- a/roles/kubernetes-apps/meta/main.yml
+++ b/roles/kubernetes-apps/meta/main.yml
@@ -4,11 +4,6 @@ dependencies:
tags:
- apps
- - role: kubernetes-apps/kpm
- tags:
- - apps
- - kpm
-
- role: kubernetes-apps/efk
when: efk_enabled
tags:
@@ -27,23 +22,19 @@ dependencies:
- apps
- registry
- - role: kubernetes-apps/cephfs_provisioner
- when: cephfs_provisioner_enabled
+ - role: kubernetes-apps/persistent_volumes
+ when: persistent_volumes_enabled
tags:
- apps
- - cephfs_provisioner
- - storage
+ - persistent_volumes
- # istio role should be last because it takes a long time to initialize and
- # will cause timeouts trying to start other addons.
- - role: kubernetes-apps/istio
- when: istio_enabled
+ - role: kubernetes-apps/container_engine_accelerator
+ when: nvidia_accelerator_enabled
tags:
- apps
- - istio
+ - container_engine_accelerator
- - role: kubernetes-apps/persistent_volumes
- when: persistent_volumes_enabled
+ - role: kubernetes-apps/cloud_controller/oci
+ when: cloud_provider is defined and cloud_provider == "oci"
tags:
- - apps
- - persistent_volumes
+ - oci
diff --git a/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml
index f17e45c7abd..e4215ed6201 100644
--- a/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml
@@ -2,10 +2,19 @@
- name: Start Calico resources
kube:
name: "{{item.item.name}}"
- namespace: "{{ system_namespace }}"
+ namespace: "kube-system"
kubectl: "{{bin_dir}}/kubectl"
resource: "{{item.item.type}}"
filename: "{{kube_config_dir}}/{{item.item.file}}"
state: "latest"
- with_items: "{{ calico_node_manifests.results }}"
- when: inventory_hostname == groups['kube-master'][0] and not item|skipped
+ with_items:
+ - "{{ calico_node_manifests.results }}"
+ when:
+ - inventory_hostname == groups['kube-master'][0] and not item|skipped
+
+- name: "calico upgrade complete"
+ shell: "{{ bin_dir }}/calico-upgrade complete --no-prompts --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml"
+ when:
+ - inventory_hostname == groups['kube-master'][0]
+ - calico_upgrade_enabled|default(True)
+ - calico_upgrade_needed|default(False)
diff --git a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml
index cbe4f0ac7e1..3640fe762ee 100644
--- a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml
@@ -2,7 +2,7 @@
- name: Canal | Start Resources
kube:
name: "{{item.item.name}}"
- namespace: "{{ system_namespace }}"
+ namespace: "kube-system"
kubectl: "{{bin_dir}}/kubectl"
resource: "{{item.item.type}}"
filename: "{{kube_config_dir}}/{{item.item.file}}"
diff --git a/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml b/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml
index 2359fe2d496..5d90bdb0182 100755
--- a/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml
@@ -2,7 +2,7 @@
- name: Cilium | Start Resources
kube:
name: "{{item.item.name}}"
- namespace: "{{ system_namespace }}"
+ namespace: "kube-system"
kubectl: "{{bin_dir}}/kubectl"
resource: "{{item.item.type}}"
filename: "{{kube_config_dir}}/{{item.item.file}}"
@@ -11,7 +11,7 @@
when: inventory_hostname == groups['kube-master'][0] and not item|skipped
- name: Cilium | Wait for pods to run
- command: "{{bin_dir}}/kubectl -n {{system_namespace}} get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'"
+ command: "{{bin_dir}}/kubectl -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'"
register: pods_not_ready
until: pods_not_ready.stdout.find("cilium")==-1
retries: 30
diff --git a/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml b/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml
index 330acc1cd04..5289296dc65 100644
--- a/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml
@@ -3,7 +3,7 @@
- name: Contiv | Create Kubernetes resources
kube:
name: "{{ item.item.name }}"
- namespace: "{{ system_namespace }}"
+ namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ contiv_config_dir }}/{{ item.item.file }}"
diff --git a/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml b/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml
index 09603a79430..bdf954bf99d 100644
--- a/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml
@@ -2,7 +2,7 @@
- name: Flannel | Start Resources
kube:
name: "{{item.item.name}}"
- namespace: "{{ system_namespace }}"
+ namespace: "kube-system"
kubectl: "{{bin_dir}}/kubectl"
resource: "{{item.item.type}}"
filename: "{{kube_config_dir}}/{{item.item.file}}"
diff --git a/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml b/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml
index 66d900d55e0..44babf34321 100644
--- a/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml
@@ -1,21 +1,21 @@
---
+
- name: Weave | Start Resources
kube:
name: "weave-net"
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/weave-net.yml"
resource: "ds"
- namespace: "{{system_namespace}}"
+ namespace: "kube-system"
state: "latest"
when: inventory_hostname == groups['kube-master'][0]
-- name: "Weave | wait for weave to become available"
+- name: Weave | Wait for Weave to become available
uri:
url: http://127.0.0.1:6784/status
return_content: yes
register: weave_status
retries: 180
delay: 5
- until: "{{ weave_status.status == 200 and
- 'Status: ready' in weave_status.content }}"
+ until: "{{ weave_status.status == 200 and 'Status: ready' in weave_status.content }}"
when: inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes-apps/persistent_volumes/openstack/defaults/main.yml b/roles/kubernetes-apps/persistent_volumes/openstack/defaults/main.yml
index 8553ec5e264..05a3d944e97 100644
--- a/roles/kubernetes-apps/persistent_volumes/openstack/defaults/main.yml
+++ b/roles/kubernetes-apps/persistent_volumes/openstack/defaults/main.yml
@@ -1,2 +1,7 @@
---
persistent_volumes_enabled: false
+storage_classes:
+ - name: standard
+ is_default: true
+ parameters:
+ availability: nova
diff --git a/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml b/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml
index e4d1b138c43..80d5fdd29f0 100644
--- a/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml
+++ b/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml
@@ -1,21 +1,19 @@
---
- name: Kubernetes Persistent Volumes | Lay down OpenStack Cinder Storage Class template
template:
- src: "{{item.file}}"
- dest: "{{kube_config_dir}}/{{item.file}}"
- with_items:
- - {file: openstack-storage-class.yml, type: StorageClass, name: storage-class }
+ src: "openstack-storage-class.yml.j2"
+ dest: "{{kube_config_dir}}/openstack-storage-class.yml"
register: manifests
when:
- inventory_hostname == groups['kube-master'][0]
- name: Kubernetes Persistent Volumes | Add OpenStack Cinder Storage Class
kube:
- name: "{{item.item.name}}"
+ name: storage-class
kubectl: "{{bin_dir}}/kubectl"
- resource: "{{item.item.type}}"
- filename: "{{kube_config_dir}}/{{item.item.file}}"
+ resource: StorageClass
+ filename: "{{kube_config_dir}}/openstack-storage-class.yml"
state: "latest"
- with_items: "{{ manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
+ - manifests.changed
diff --git a/roles/kubernetes-apps/persistent_volumes/openstack/templates/openstack-storage-class.yml b/roles/kubernetes-apps/persistent_volumes/openstack/templates/openstack-storage-class.yml
deleted file mode 100644
index 02d39dd9760..00000000000
--- a/roles/kubernetes-apps/persistent_volumes/openstack/templates/openstack-storage-class.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-kind: StorageClass
-apiVersion: storage.k8s.io/v1
-metadata:
- name: standard
- annotations:
- storageclass.kubernetes.io/is-default-class: "true"
-provisioner: kubernetes.io/cinder
-parameters:
- availability: nova
diff --git a/roles/kubernetes-apps/persistent_volumes/openstack/templates/openstack-storage-class.yml.j2 b/roles/kubernetes-apps/persistent_volumes/openstack/templates/openstack-storage-class.yml.j2
new file mode 100644
index 00000000000..629c1f0a305
--- /dev/null
+++ b/roles/kubernetes-apps/persistent_volumes/openstack/templates/openstack-storage-class.yml.j2
@@ -0,0 +1,14 @@
+{% for class in storage_classes %}
+---
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: "{{ class.name }}"
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "{{ class.is_default | default(false) | ternary("true","false") }}"
+provisioner: kubernetes.io/cinder
+parameters:
+{% for key, value in (class.parameters | default({})).items() %}
+ "{{ key }}": "{{ value }}"
+{% endfor %}
+{% endfor %}
diff --git a/roles/kubernetes-apps/policy_controller/calico/defaults/main.yml b/roles/kubernetes-apps/policy_controller/calico/defaults/main.yml
index 0e66359cc18..93d12c90135 100644
--- a/roles/kubernetes-apps/policy_controller/calico/defaults/main.yml
+++ b/roles/kubernetes-apps/policy_controller/calico/defaults/main.yml
@@ -8,8 +8,3 @@ calico_policy_controller_memory_requests: 64M
# SSL
calico_cert_dir: "/etc/calico/certs"
canal_cert_dir: "/etc/canal/certs"
-
-rbac_resources:
- - sa
- - clusterrole
- - clusterrolebinding
diff --git a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml
index ba11627992c..7e9377da4ad 100644
--- a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml
+++ b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml
@@ -2,20 +2,12 @@
- name: Set cert dir
set_fact:
calico_cert_dir: "{{ canal_cert_dir }}"
- when: kube_network_plugin == 'canal'
+ when:
+ - kube_network_plugin == 'canal'
tags:
- facts
- canal
-- name: Delete the old calico-policy-controller if it exist
- kube:
- name: calico-policy-controller
- kubectl: "{{bin_dir}}/kubectl"
- resource: rs
- namespace: "{{ system_namespace }}"
- state: absent
- run_once: true
-
- name: Create calico-kube-controllers manifests
template:
src: "{{item.file}}.j2"
@@ -27,15 +19,19 @@
- {name: calico-kube-controllers, file: calico-kube-crb.yml, type: clusterrolebinding}
register: calico_kube_manifests
when:
+ - inventory_hostname == groups['kube-master'][0]
- rbac_enabled or item.type not in rbac_resources
- name: Start of Calico kube controllers
kube:
name: "{{item.item.name}}"
- namespace: "{{ system_namespace }}"
+ namespace: "kube-system"
kubectl: "{{bin_dir}}/kubectl"
resource: "{{item.item.type}}"
filename: "{{kube_config_dir}}/{{item.item.file}}"
state: "latest"
- with_items: "{{ calico_kube_manifests.results }}"
- when: inventory_hostname == groups['kube-master'][0] and not item|skipped
+ with_items:
+ - "{{ calico_kube_manifests.results }}"
+ when:
+ - inventory_hostname == groups['kube-master'][0]
+ - not item|skipped
diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2
index 7e1311b9286..d99466248d2 100644
--- a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2
+++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2
@@ -2,12 +2,16 @@ apiVersion: apps/v1beta2
kind: Deployment
metadata:
name: calico-kube-controllers
- namespace: {{ system_namespace }}
+ namespace: kube-system
labels:
k8s-app: calico-kube-controllers
kubernetes.io/cluster-service: "true"
+ annotations:
+ scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
replicas: 1
+ strategy:
+ type: Recreate
selector:
matchLabels:
kubernetes.io/cluster-service: "true"
@@ -15,15 +19,13 @@ spec:
template:
metadata:
name: calico-kube-controllers
- namespace: {{ system_namespace }}
+ namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
k8s-app: calico-kube-controllers
spec:
hostNetwork: true
-{% if rbac_enabled %}
serviceAccountName: calico-kube-controllers
-{% endif %}
tolerations:
- effect: NoSchedule
operator: Exists
@@ -47,17 +49,6 @@ spec:
value: "{{ calico_cert_dir }}/cert.crt"
- name: ETCD_KEY_FILE
value: "{{ calico_cert_dir }}/key.pem"
- # Location of the Kubernetes API - this shouldn't need to be
- # changed so long as it is used in conjunction with
- # CONFIGURE_ETC_HOSTS="true".
- - name: K8S_API
- value: "https://kubernetes.default"
- # Configure /etc/hosts within the container to resolve
- # the kubernetes.default Service to the correct clusterIP
- # using the environment provided by the kubelet.
- # This removes the need for KubeDNS to resolve the Service.
- - name: CONFIGURE_ETC_HOSTS
- value: "true"
volumeMounts:
- mountPath: {{ calico_cert_dir }}
name: etcd-certs
diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-cr.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-cr.yml.j2
index 82c2f3e44a9..bf582c76983 100644
--- a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-cr.yml.j2
+++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-cr.yml.j2
@@ -3,7 +3,7 @@ kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-kube-controllers
- namespace: {{ system_namespace }}
+ namespace: kube-system
rules:
- apiGroups:
- ""
@@ -12,6 +12,14 @@ rules:
- pods
- namespaces
- networkpolicies
+ - nodes
verbs:
- watch
- list
+ - apiGroups:
+ - networking.k8s.io
+ resources:
+ - networkpolicies
+ verbs:
+ - watch
+ - list
\ No newline at end of file
diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-crb.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-crb.yml.j2
index 38853a41357..2e51184811e 100644
--- a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-crb.yml.j2
+++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-crb.yml.j2
@@ -10,4 +10,4 @@ roleRef:
subjects:
- kind: ServiceAccount
name: calico-kube-controllers
- namespace: {{ system_namespace }}
+ namespace: kube-system
diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-sa.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-sa.yml.j2
index bf8958976d8..e42e89d1894 100644
--- a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-sa.yml.j2
+++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-sa.yml.j2
@@ -3,6 +3,6 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-kube-controllers
- namespace: {{ system_namespace }}
+ namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
diff --git a/roles/kubernetes-apps/registry/README.md b/roles/kubernetes-apps/registry/README.md
index 59542355eda..c320f2bd49c 100644
--- a/roles/kubernetes-apps/registry/README.md
+++ b/roles/kubernetes-apps/registry/README.md
@@ -1,36 +1,39 @@
-# Private Docker Registry in Kubernetes
+Private Docker Registry in Kubernetes
+=====================================
Kubernetes offers an optional private Docker registry addon, which you can turn
-on when you bring up a cluster or install later. This gives you a place to
+on when you bring up a cluster or install later. This gives you a place to
store truly private Docker images for your cluster.
-## How it works
+How it works
+------------
-The private registry runs as a `Pod` in your cluster. It does not currently
+The private registry runs as a `Pod` in your cluster. It does not currently
support SSL or authentication, which triggers Docker's "insecure registry"
-logic. To work around this, we run a proxy on each node in the cluster,
+logic. To work around this, we run a proxy on each node in the cluster,
exposing a port onto the node (via a hostPort), which Docker accepts as
"secure", since it is accessed by `localhost`.
-## Turning it on
+Turning it on
+-------------
-Some cluster installs (e.g. GCE) support this as a cluster-birth flag. The
+Some cluster installs (e.g. GCE) support this as a cluster-birth flag. The
`ENABLE_CLUSTER_REGISTRY` variable in `cluster/gce/config-default.sh` governs
-whether the registry is run or not. To set this flag, you can specify
-`KUBE_ENABLE_CLUSTER_REGISTRY=true` when running `kube-up.sh`. If your cluster
-does not include this flag, the following steps should work. Note that some of
+whether the registry is run or not. To set this flag, you can specify
+`KUBE_ENABLE_CLUSTER_REGISTRY=true` when running `kube-up.sh`. If your cluster
+does not include this flag, the following steps should work. Note that some of
this is cloud-provider specific, so you may have to customize it a bit.
### Make some storage
-The primary job of the registry is to store data. To do that we have to decide
-where to store it. For cloud environments that have networked storage, we can
-use Kubernetes's `PersistentVolume` abstraction. The following template is
+The primary job of the registry is to store data. To do that we have to decide
+where to store it. For cloud environments that have networked storage, we can
+use Kubernetes's `PersistentVolume` abstraction. The following template is
expanded by `salt` in the GCE cluster turnup, but can easily be adapted to
other situations:
-```yaml
+``` yaml
kind: PersistentVolume
apiVersion: v1
metadata:
@@ -64,14 +67,15 @@ just want to kick the tires on this without committing to it, you can easily
adapt the `ReplicationController` specification below to use a simple
`emptyDir` volume instead of a `persistentVolumeClaim`.
-## Claim the storage
+Claim the storage
+-----------------
Now that the Kubernetes cluster knows that some storage exists, you can put a
-claim on that storage. As with the `PersistentVolume` above, you can start
+claim on that storage. As with the `PersistentVolume` above, you can start
with the `salt` template:
-```yaml
+``` yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
@@ -90,33 +94,34 @@ spec:
This tells Kubernetes that you want to use storage, and the `PersistentVolume`
you created before will be bound to this claim (unless you have other
-`PersistentVolumes` in which case those might get bound instead). This claim
+`PersistentVolumes` in which case those might get bound instead). This claim
gives you the right to use this storage until you release the claim.
-## Run the registry
+Run the registry
+----------------
Now we can run a Docker registry:
-```yaml
+``` yaml
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
- k8s-app: kube-registry-upstream
+ k8s-app: registry
version: v0
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
- k8s-app: kube-registry-upstream
+ k8s-app: registry
version: v0
template:
metadata:
labels:
- k8s-app: kube-registry-upstream
+ k8s-app: registry
version: v0
kubernetes.io/cluster-service: "true"
spec:
@@ -146,24 +151,25 @@ spec:
```
-## Expose the registry in the cluster
+Expose the registry in the cluster
+----------------------------------
Now that we have a registry `Pod` running, we can expose it as a Service:
-```yaml
+``` yaml
apiVersion: v1
kind: Service
metadata:
name: kube-registry
namespace: kube-system
labels:
- k8s-app: kube-registry-upstream
+ k8s-app: registry
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "KubeRegistry"
spec:
selector:
- k8s-app: kube-registry-upstream
+ k8s-app: registry
ports:
- name: registry
port: 5000
@@ -171,14 +177,15 @@ spec:
```
-## Expose the registry on each node
+Expose the registry on each node
+--------------------------------
Now that we have a running `Service`, we need to expose it onto each Kubernetes
-`Node` so that Docker will see it as `localhost`. We can load a `Pod` on every
+`Node` so that Docker will see it as `localhost`. We can load a `Pod` on every
node by creating following daemonset.
-```yaml
+``` yaml
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
@@ -217,7 +224,7 @@ spec:
When modifying replication-controller, service and daemon-set defintions, take
-care to ensure _unique_ identifiers for the rc-svc couple and the daemon-set.
+care to ensure *unique* identifiers for the rc-svc couple and the daemon-set.
Failing to do so will have register the localhost proxy daemon-sets to the
upstream service. As a result they will then try to proxy themselves, which
will, for obvious reasons, not work.
@@ -226,30 +233,31 @@ This ensures that port 5000 on each node is directed to the registry `Service`.
You should be able to verify that it is running by hitting port 5000 with a web
browser and getting a 404 error:
-```console
+``` console
$ curl localhost:5000
404 page not found
```
-## Using the registry
+Using the registry
+------------------
To use an image hosted by this registry, simply say this in your `Pod`'s
`spec.containers[].image` field:
-```yaml
+``` yaml
image: localhost:5000/user/container
```
Before you can use the registry, you have to be able to get images into it,
-though. If you are building an image on your Kubernetes `Node`, you can spell
-out `localhost:5000` when you build and push. More likely, though, you are
+though. If you are building an image on your Kubernetes `Node`, you can spell
+out `localhost:5000` when you build and push. More likely, though, you are
building locally and want to push to your cluster.
You can use `kubectl` to set up a port-forward from your local node to a
running Pod:
-```console
-$ POD=$(kubectl get pods --namespace kube-system -l k8s-app=kube-registry-upstream \
+``` console
+$ POD=$(kubectl get pods --namespace kube-system -l k8s-app=registry \
-o template --template '{{range .items}}{{.metadata.name}} {{.status.phase}}{{"\n"}}{{end}}' \
| grep Running | head -1 | cut -f1 -d' ')
@@ -260,15 +268,14 @@ Now you can build and push images on your local computer as
`localhost:5000/yourname/container` and those images will be available inside
your kubernetes cluster with the same name.
-# More Extensions
+More Extensions
+===============
-- [Use GCS as storage backend](gcs/README.md)
-- [Enable TLS/SSL](tls/README.md)
-- [Enable Authentication](auth/README.md)
+- [Use GCS as storage backend](gcs/README.md)
+- [Enable TLS/SSL](tls/README.md)
+- [Enable Authentication](auth/README.md)
-## Future improvements
+Future improvements
+-------------------
-* Allow port-forwarding to a Service rather than a pod (#15180)
-
-
-[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/registry/README.md?pixel)]()
+- Allow port-forwarding to a Service rather than a pod (\#15180)
diff --git a/roles/kubernetes-apps/registry/defaults/main.yml b/roles/kubernetes-apps/registry/defaults/main.yml
index d13290b3b82..aa52347bc3a 100644
--- a/roles/kubernetes-apps/registry/defaults/main.yml
+++ b/roles/kubernetes-apps/registry/defaults/main.yml
@@ -1,5 +1,4 @@
---
-registry_image_repo: registry
-registry_image_tag: 2.6
-registry_proxy_image_repo: gcr.io/google_containers/kube-registry-proxy
-registry_proxy_image_tag: 0.4
+registry_namespace: "kube-system"
+registry_storage_class: ""
+registry_disk_size: "10Gi"
diff --git a/roles/kubernetes-apps/registry/files/images/Dockerfile b/roles/kubernetes-apps/registry/files/images/Dockerfile
deleted file mode 100644
index 4223025a8c4..00000000000
--- a/roles/kubernetes-apps/registry/files/images/Dockerfile
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2016 The Kubernetes Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-FROM nginx:1.12
-
-RUN apt-get update \
- && apt-get install -y \
- curl \
- --no-install-recommends \
- && apt-get clean \
- && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* /usr/share/man /usr/share/doc
-
-COPY rootfs /
-
-CMD ["/bin/boot"]
diff --git a/roles/kubernetes-apps/registry/files/images/Makefile b/roles/kubernetes-apps/registry/files/images/Makefile
deleted file mode 100644
index c1b64de1c20..00000000000
--- a/roles/kubernetes-apps/registry/files/images/Makefile
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright 2016 The Kubernetes Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-.PHONY: build push vet test clean
-
-TAG = 0.4
-REPO = gcr.io/google_containers/kube-registry-proxy
-
-build:
- docker build --pull -t $(REPO):$(TAG) .
-
-push:
- gcloud docker -- push $(REPO):$(TAG)
diff --git a/roles/kubernetes-apps/registry/files/images/rootfs/bin/boot b/roles/kubernetes-apps/registry/files/images/rootfs/bin/boot
deleted file mode 100755
index 04262b4642e..00000000000
--- a/roles/kubernetes-apps/registry/files/images/rootfs/bin/boot
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env bash
-
-# fail if no hostname is provided
-REGISTRY_HOST=${REGISTRY_HOST:?no host}
-REGISTRY_PORT=${REGISTRY_PORT:-5000}
-
-# we are always listening on port 80
-# https://github.com/nginxinc/docker-nginx/blob/43c112100750cbd1e9f2160324c64988e7920ac9/stable/jessie/Dockerfile#L25
-PORT=80
-
-sed -e "s/%HOST%/$REGISTRY_HOST/g" \
- -e "s/%PORT%/$REGISTRY_PORT/g" \
- -e "s/%BIND_PORT%/$PORT/g" \
- /etc/nginx/conf.d/default.conf
-
-# wait for registry to come online
-while ! curl -sS "$REGISTRY_HOST:$REGISTRY_PORT" &>/dev/null; do
- printf "waiting for the registry (%s:%s) to come online...\n" "$REGISTRY_HOST" "$REGISTRY_PORT"
- sleep 1
-done
-
-printf "starting proxy...\n"
-exec nginx -g "daemon off;" "$@"
diff --git a/roles/kubernetes-apps/registry/files/images/rootfs/etc/nginx/conf.d/default.conf.in b/roles/kubernetes-apps/registry/files/images/rootfs/etc/nginx/conf.d/default.conf.in
deleted file mode 100644
index ecd95fd2fe1..00000000000
--- a/roles/kubernetes-apps/registry/files/images/rootfs/etc/nginx/conf.d/default.conf.in
+++ /dev/null
@@ -1,28 +0,0 @@
-# Docker registry proxy for api version 2
-
-upstream docker-registry {
- server %HOST%:%PORT%;
-}
-
-# No client auth or TLS
-# TODO(bacongobbler): experiment with authenticating the registry if it's using TLS
-server {
- listen %BIND_PORT%;
- server_name localhost;
-
- # disable any limits to avoid HTTP 413 for large image uploads
- client_max_body_size 0;
-
- # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486)
- chunked_transfer_encoding on;
-
- location / {
- # Do not allow connections from docker 1.5 and earlier
- # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents
- if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) {
- return 404;
- }
-
- include docker-registry.conf;
- }
-}
diff --git a/roles/kubernetes-apps/registry/files/images/rootfs/etc/nginx/docker-registry.conf b/roles/kubernetes-apps/registry/files/images/rootfs/etc/nginx/docker-registry.conf
deleted file mode 100644
index 7dc8cfff266..00000000000
--- a/roles/kubernetes-apps/registry/files/images/rootfs/etc/nginx/docker-registry.conf
+++ /dev/null
@@ -1,6 +0,0 @@
-proxy_pass http://docker-registry;
-proxy_set_header Host $http_host; # required for docker client's sake
-proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP
-proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
-proxy_set_header X-Forwarded-Proto $scheme;
-proxy_read_timeout 900;
diff --git a/roles/kubernetes-apps/registry/files/images/rootfs/etc/nginx/nginx.conf b/roles/kubernetes-apps/registry/files/images/rootfs/etc/nginx/nginx.conf
deleted file mode 100644
index 54ecc888e55..00000000000
--- a/roles/kubernetes-apps/registry/files/images/rootfs/etc/nginx/nginx.conf
+++ /dev/null
@@ -1,26 +0,0 @@
-user nginx;
-worker_processes auto;
-
-error_log /var/log/nginx/error.log warn;
-pid /var/run/nginx.pid;
-
-events {
- worker_connections 1024;
-}
-
-http {
- include /etc/nginx/mime.types;
- default_type application/octet-stream;
-
- log_format main '$remote_addr - $remote_user [$time_local] "$request" '
- '$status $body_bytes_sent "$http_referer" '
- '"$http_user_agent" "$http_x_forwarded_for"';
-
- access_log /var/log/nginx/access.log main;
-
- sendfile on;
-
- keepalive_timeout 65;
-
- include /etc/nginx/conf.d/*.conf;
-}
diff --git a/roles/kubernetes-apps/registry/tasks/main.yml b/roles/kubernetes-apps/registry/tasks/main.yml
index a236d273cac..fd8cb82e33d 100644
--- a/roles/kubernetes-apps/registry/tasks/main.yml
+++ b/roles/kubernetes-apps/registry/tasks/main.yml
@@ -3,29 +3,76 @@
- name: Registry | Create addon dir
file:
path: "{{ kube_config_dir }}/addons/registry"
+ state: directory
owner: root
group: root
mode: 0755
- recurse: true
+
+- name: Registry | Templates list
+ set_fact:
+ registry_templates:
+ - { name: registry-ns, file: registry-ns.yml, type: ns }
+ - { name: registry-sa, file: registry-sa.yml, type: sa }
+ - { name: registry-proxy-sa, file: registry-proxy-sa.yml, type: sa }
+ - { name: registry-svc, file: registry-svc.yml, type: svc }
+ - { name: registry-rs, file: registry-rs.yml, type: rs }
+ - { name: registry-proxy-ds, file: registry-proxy-ds.yml, type: ds }
+ registry_templates_for_psp:
+ - { name: registry-psp, file: registry-psp.yml, type: psp }
+ - { name: registry-cr, file: registry-cr.yml, type: clusterrole }
+ - { name: registry-crb, file: registry-crb.yml, type: rolebinding }
+ - { name: registry-proxy-psp, file: registry-proxy-psp.yml, type: psp }
+ - { name: registry-proxy-cr, file: registry-proxy-cr.yml, type: clusterrole }
+ - { name: registry-proxy-crb, file: registry-proxy-crb.yml, type: rolebinding }
+
+- name: Registry | Append extra templates to Registry Templates list for PodSecurityPolicy
+ set_fact:
+ registry_templates: "{{ registry_templates[:3] + registry_templates_for_psp + registry_templates[3:] }}"
+ when:
+ - podsecuritypolicy_enabled
+ - registry_namespace != "kube-system"
- name: Registry | Create manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/addons/registry/{{ item.file }}"
- with_items:
- - { name: registry-svc, file: registry-svc.yml, type: service }
- - { name: registry-rc, file: registry-rc.yml, type: replicationcontroller }
- - { name: registry-ds, file: registry-ds.yml, type: daemonset }
+ with_items: "{{ registry_templates }}"
register: registry_manifests
when: inventory_hostname == groups['kube-master'][0]
- name: Registry | Apply manifests
kube:
name: "{{ item.item.name }}"
- namespace: "{{ system_namespace }}"
+ namespace: "{{ registry_namespace }}"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/addons/registry/{{ item.item.file }}"
state: "latest"
with_items: "{{ registry_manifests.results }}"
when: inventory_hostname == groups['kube-master'][0]
+
+- name: Registry | Create PVC manifests
+ template:
+ src: "{{ item.file }}.j2"
+ dest: "{{ kube_config_dir }}/addons/registry/{{ item.file }}"
+ with_items:
+ - { name: registry-pvc, file: registry-pvc.yml, type: pvc }
+ register: registry_manifests
+ when:
+ - registry_storage_class != none
+ - registry_disk_size != none
+ - inventory_hostname == groups['kube-master'][0]
+
+- name: Registry | Apply PVC manifests
+ kube:
+ name: "{{ item.item.name }}"
+ namespace: "{{ registry_namespace }}"
+ kubectl: "{{ bin_dir }}/kubectl"
+ resource: "{{ item.item.type }}"
+ filename: "{{ kube_config_dir }}/addons/registry/{{ item.item.file }}"
+ state: "latest"
+ with_items: "{{ registry_manifests.results }}"
+ when:
+ - registry_storage_class != none
+ - registry_disk_size != none
+ - inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes-apps/registry/templates/auth/README.md b/roles/kubernetes-apps/registry/templates/auth/README.md
deleted file mode 100644
index 040c54bcb8d..00000000000
--- a/roles/kubernetes-apps/registry/templates/auth/README.md
+++ /dev/null
@@ -1,92 +0,0 @@
-# Enable Authentication with Htpasswd for Kube-Registry
-
-Docker registry support a few authentication providers. Full list of supported provider can be found [here](https://docs.docker.com/registry/configuration/#auth). This document describes how to enable authentication with htpasswd for kube-registry.
-
-### Prepare Htpasswd Secret
-
-Please generate your own htpasswd file. Assuming the file you generated is `htpasswd`.
-Creating secret to hold htpasswd...
-```console
-$ kubectl --namespace=kube-system create secret generic registry-auth-secret --from-file=htpasswd=htpasswd
-```
-
-### Run Registry
-
-Please be noted that this sample rc is using emptyDir as storage backend for simplicity.
-
-
-```yaml
-apiVersion: v1
-kind: ReplicationController
-metadata:
- name: kube-registry-v0
- namespace: kube-system
- labels:
- k8s-app: kube-registry
- version: v0
-# kubernetes.io/cluster-service: "true"
-spec:
- replicas: 1
- selector:
- k8s-app: kube-registry
- version: v0
- template:
- metadata:
- labels:
- k8s-app: kube-registry
- version: v0
-# kubernetes.io/cluster-service: "true"
- spec:
- containers:
- - name: registry
- image: registry:2
- resources:
- # keep request = limit to keep this container in guaranteed class
- limits:
- cpu: 100m
- memory: 100Mi
- requests:
- cpu: 100m
- memory: 100Mi
- env:
- - name: REGISTRY_HTTP_ADDR
- value: :5000
- - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
- value: /var/lib/registry
- - name: REGISTRY_AUTH_HTPASSWD_REALM
- value: basic_realm
- - name: REGISTRY_AUTH_HTPASSWD_PATH
- value: /auth/htpasswd
- volumeMounts:
- - name: image-store
- mountPath: /var/lib/registry
- - name: auth-dir
- mountPath: /auth
- ports:
- - containerPort: 5000
- name: registry
- protocol: TCP
- volumes:
- - name: image-store
- emptyDir: {}
- - name: auth-dir
- secret:
- secretName: registry-auth-secret
-```
-
-
-No changes are needed for other components (kube-registry service and proxy).
-
-### To Verify
-
-Setup proxy or port-forwarding to the kube-registry. Image push/pull should fail without authentication. Then use `docker login` to authenticate with kube-registry and see if it works.
-
-### Configure Nodes to Authenticate with Kube-Registry
-
-By default, nodes assume no authentication is required by kube-registry. Without authentication, nodes cannot pull images from kube-registry. To solve this, more documentation can be found [Here](https://github.com/kubernetes/kubernetes.github.io/blob/master/docs/concepts/containers/images.md#configuring-nodes-to-authenticate-to-a-private-repository).
-
-
-
-
-
-[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/registry/auth/README.md?pixel)]()
diff --git a/roles/kubernetes-apps/registry/templates/auth/registry-auth-rc.yml.j2 b/roles/kubernetes-apps/registry/templates/auth/registry-auth-rc.yml.j2
deleted file mode 100644
index 1af623d0909..00000000000
--- a/roles/kubernetes-apps/registry/templates/auth/registry-auth-rc.yml.j2
+++ /dev/null
@@ -1,56 +0,0 @@
-apiVersion: v1
-kind: ReplicationController
-metadata:
- name: kube-registry-v0
- namespace: kube-system
- labels:
- k8s-app: kube-registry
- version: v0
-# kubernetes.io/cluster-service: "true"
-spec:
- replicas: 1
- selector:
- k8s-app: kube-registry
- version: v0
- template:
- metadata:
- labels:
- k8s-app: kube-registry
- version: v0
-# kubernetes.io/cluster-service: "true"
- spec:
- containers:
- - name: registry
- image: registry:2
- resources:
- # keep request = limit to keep this container in guaranteed class
- limits:
- cpu: 100m
- memory: 100Mi
- requests:
- cpu: 100m
- memory: 100Mi
- env:
- - name: REGISTRY_HTTP_ADDR
- value: :5000
- - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
- value: /var/lib/registry
- - name: REGISTRY_AUTH_HTPASSWD_REALM
- value: basic_realm
- - name: REGISTRY_AUTH_HTPASSWD_PATH
- value: /auth/htpasswd
- volumeMounts:
- - name: image-store
- mountPath: /var/lib/registry
- - name: auth-dir
- mountPath: /auth
- ports:
- - containerPort: 5000
- name: registry
- protocol: TCP
- volumes:
- - name: image-store
- emptyDir: {}
- - name: auth-dir
- secret:
- secretName: registry-auth-secret
\ No newline at end of file
diff --git a/roles/kubernetes-apps/registry/templates/gcs/README.md b/roles/kubernetes-apps/registry/templates/gcs/README.md
deleted file mode 100644
index 5706a848f8d..00000000000
--- a/roles/kubernetes-apps/registry/templates/gcs/README.md
+++ /dev/null
@@ -1,81 +0,0 @@
-# Kube-Registry with GCS storage backend
-
-Besides local file system, docker registry also supports a number of cloud storage backends. Full list of supported backend can be found [here](https://docs.docker.com/registry/configuration/#storage). This document describes how to enable GCS for kube-registry as storage backend.
-
-A few preparation steps are needed.
- 1. Create a bucket named kube-registry in GCS.
- 1. Create a service account for GCS access and create key file in json format. Detail instruction can be found [here](https://cloud.google.com/storage/docs/authentication#service_accounts).
-
-
-### Pack Keyfile into a Secret
-
-Assuming you have downloaded the keyfile as `keyfile.json`. Create secret with the `keyfile.json`...
-```console
-$ kubectl --namespace=kube-system create secret generic gcs-key-secret --from-file=keyfile=keyfile.json
-```
-
-
-### Run Registry
-
-
-```yaml
-apiVersion: v1
-kind: ReplicationController
-metadata:
- name: kube-registry-v0
- namespace: kube-system
- labels:
- k8s-app: kube-registry
- version: v0
-# kubernetes.io/cluster-service: "true"
-spec:
- replicas: 1
- selector:
- k8s-app: kube-registry
- version: v0
- template:
- metadata:
- labels:
- k8s-app: kube-registry
- version: v0
-# kubernetes.io/cluster-service: "true"
- spec:
- containers:
- - name: registry
- image: registry:2
- resources:
- # keep request = limit to keep this container in guaranteed class
- limits:
- cpu: 100m
- memory: 100Mi
- requests:
- cpu: 100m
- memory: 100Mi
- env:
- - name: REGISTRY_HTTP_ADDR
- value: :5000
- - name: REGISTRY_STORAGE
- value: gcs
- - name: REGISTRY_STORAGE_GCS_BUCKET
- value: kube-registry
- - name: REGISTRY_STORAGE_GCS_KEYFILE
- value: /gcs/keyfile
- ports:
- - containerPort: 5000
- name: registry
- protocol: TCP
- volumeMounts:
- - name: gcs-key
- mountPath: /gcs
- volumes:
- - name: gcs-key
- secret:
- secretName: gcs-key-secret
-```
-
-
-
-No changes are needed for other components (kube-registry service and proxy).
-
-
-[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/registry/gcs/README.md?pixel)]()
diff --git a/roles/kubernetes-apps/registry/templates/gcs/registry-gcs-rc.yml.j2 b/roles/kubernetes-apps/registry/templates/gcs/registry-gcs-rc.yml.j2
deleted file mode 100644
index e6974033564..00000000000
--- a/roles/kubernetes-apps/registry/templates/gcs/registry-gcs-rc.yml.j2
+++ /dev/null
@@ -1,52 +0,0 @@
-apiVersion: v1
-kind: ReplicationController
-metadata:
- name: kube-registry-v0
- namespace: kube-system
- labels:
- k8s-app: kube-registry
- version: v0
-# kubernetes.io/cluster-service: "true"
-spec:
- replicas: 1
- selector:
- k8s-app: kube-registry
- version: v0
- template:
- metadata:
- labels:
- k8s-app: kube-registry
- version: v0
-# kubernetes.io/cluster-service: "true"
- spec:
- containers:
- - name: registry
- image: registry:2
- resources:
- # keep request = limit to keep this container in guaranteed class
- limits:
- cpu: 100m
- memory: 100Mi
- requests:
- cpu: 100m
- memory: 100Mi
- env:
- - name: REGISTRY_HTTP_ADDR
- value: :5000
- - name: REGISTRY_STORAGE
- value: gcs
- - name: REGISTRY_STORAGE_GCS_BUCKET
- value: kube-registry
- - name: REGISTRY_STORAGE_GCS_KEYFILE
- value: /gcs/keyfile
- ports:
- - containerPort: 5000
- name: registry
- protocol: TCP
- volumeMounts:
- - name: gcs-key
- mountPath: /gcs
- volumes:
- - name: gcs-key
- secret:
- secretName: gcs-key-secret
diff --git a/roles/kubernetes-apps/registry/templates/registry-cr.yml.j2 b/roles/kubernetes-apps/registry/templates/registry-cr.yml.j2
new file mode 100644
index 00000000000..27b2f316ede
--- /dev/null
+++ b/roles/kubernetes-apps/registry/templates/registry-cr.yml.j2
@@ -0,0 +1,15 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: psp:registry
+ namespace: {{ registry_namespace }}
+rules:
+ - apiGroups:
+ - policy
+ resourceNames:
+ - registry
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - use
diff --git a/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml b/roles/kubernetes-apps/registry/templates/registry-crb.yml.j2
similarity index 50%
rename from roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml
rename to roles/kubernetes-apps/registry/templates/registry-crb.yml.j2
index 0c8db4c78fe..c9d14813f63 100644
--- a/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml
+++ b/roles/kubernetes-apps/registry/templates/registry-crb.yml.j2
@@ -1,14 +1,13 @@
----
-kind: ClusterRoleBinding
+kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
- name: tiller
- namespace: {{ system_namespace }}
+ name: psp:registry
+ namespace: {{ registry_namespace }}
subjects:
- kind: ServiceAccount
- name: tiller
- namespace: {{ system_namespace }}
+ name: registry
+ namespace: {{ registry_namespace }}
roleRef:
kind: ClusterRole
- name: cluster-admin
+ name: psp:registry
apiGroup: rbac.authorization.k8s.io
diff --git a/roles/kubernetes-apps/registry/templates/registry-ns.yml.j2 b/roles/kubernetes-apps/registry/templates/registry-ns.yml.j2
new file mode 100644
index 00000000000..c224337af23
--- /dev/null
+++ b/roles/kubernetes-apps/registry/templates/registry-ns.yml.j2
@@ -0,0 +1,7 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: {{ registry_namespace }}
+ labels:
+ name: {{ registry_namespace }}
diff --git a/roles/kubernetes-apps/registry/templates/registry-proxy-cr.yml.j2 b/roles/kubernetes-apps/registry/templates/registry-proxy-cr.yml.j2
new file mode 100644
index 00000000000..5a28b076dd5
--- /dev/null
+++ b/roles/kubernetes-apps/registry/templates/registry-proxy-cr.yml.j2
@@ -0,0 +1,15 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: psp:registry-proxy
+ namespace: {{ registry_namespace }}
+rules:
+ - apiGroups:
+ - policy
+ resourceNames:
+ - registry-proxy
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - use
diff --git a/roles/kubernetes-apps/registry/templates/registry-proxy-crb.yml.j2 b/roles/kubernetes-apps/registry/templates/registry-proxy-crb.yml.j2
new file mode 100644
index 00000000000..db6ce0c95b5
--- /dev/null
+++ b/roles/kubernetes-apps/registry/templates/registry-proxy-crb.yml.j2
@@ -0,0 +1,13 @@
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: psp:registry-proxy
+ namespace: {{ registry_namespace }}
+subjects:
+ - kind: ServiceAccount
+ name: registry-proxy
+ namespace: {{ registry_namespace }}
+roleRef:
+ kind: ClusterRole
+ name: psp:registry-proxy
+ apiGroup: rbac.authorization.k8s.io
diff --git a/roles/kubernetes-apps/registry/templates/registry-ds.yml.j2 b/roles/kubernetes-apps/registry/templates/registry-proxy-ds.yml.j2
similarity index 54%
rename from roles/kubernetes-apps/registry/templates/registry-ds.yml.j2
rename to roles/kubernetes-apps/registry/templates/registry-proxy-ds.yml.j2
index 4d6a7800bd5..0a04c40d134 100644
--- a/roles/kubernetes-apps/registry/templates/registry-ds.yml.j2
+++ b/roles/kubernetes-apps/registry/templates/registry-proxy-ds.yml.j2
@@ -1,28 +1,34 @@
---
-apiVersion: extensions/v1beta1
+apiVersion: apps/v1
kind: DaemonSet
metadata:
- name: kube-registry-proxy
- namespace: {{ system_namespace }}
+ name: registry-proxy
+ namespace: {{ registry_namespace }}
labels:
- k8s-app: kube-registry-proxy
+ k8s-app: registry-proxy
kubernetes.io/cluster-service: "true"
version: v{{ registry_proxy_image_tag }}
spec:
+ selector:
+ matchLabels:
+ k8s-app: registry-proxy
+ version: v{{ registry_proxy_image_tag }}
template:
metadata:
labels:
- k8s-app: kube-registry-proxy
- kubernetes.io/name: "kube-registry-proxy"
+ k8s-app: registry-proxy
+ kubernetes.io/name: "registry-proxy"
kubernetes.io/cluster-service: "true"
version: v{{ registry_proxy_image_tag }}
spec:
+ serviceAccountName: registry-proxy
containers:
- - name: kube-registry-proxy
+ - name: registry-proxy
image: {{ registry_proxy_image_repo }}:{{ registry_proxy_image_tag }}
+ imagePullPolicy: {{ k8s_image_pull_policy }}
env:
- name: REGISTRY_HOST
- value: kube-registry.kube-system.svc.cluster.local
+ value: registry.{{ registry_namespace }}.svc.{{ cluster_name }}
- name: REGISTRY_PORT
value: "5000"
ports:
diff --git a/roles/kubernetes-apps/registry/templates/registry-proxy-psp.yml.j2 b/roles/kubernetes-apps/registry/templates/registry-proxy-psp.yml.j2
new file mode 100644
index 00000000000..e73711a9578
--- /dev/null
+++ b/roles/kubernetes-apps/registry/templates/registry-proxy-psp.yml.j2
@@ -0,0 +1,48 @@
+---
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: registry-proxy
+ annotations:
+ seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
+ seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
+{% if apparmor_enabled %}
+ apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+ apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
+{% endif %}
+ labels:
+ kubernetes.io/cluster-service: 'true'
+ addonmanager.kubernetes.io/mode: Reconcile
+spec:
+ privileged: false
+ allowPrivilegeEscalation: false
+ requiredDropCapabilities:
+ - ALL
+ volumes:
+ - 'configMap'
+ - 'emptyDir'
+ - 'projected'
+ - 'secret'
+ - 'downwardAPI'
+ - 'persistentVolumeClaim'
+ hostNetwork: true
+ hostPorts:
+ - min: 5000
+ max: 5000
+ hostIPC: false
+ hostPID: false
+ runAsUser:
+ rule: 'RunAsAny'
+ seLinux:
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'MustRunAs'
+ ranges:
+ - min: 1
+ max: 65535
+ fsGroup:
+ rule: 'MustRunAs'
+ ranges:
+ - min: 1
+ max: 65535
+ readOnlyRootFilesystem: false
diff --git a/roles/kubernetes-apps/registry/templates/registry-proxy-sa.yml.j2 b/roles/kubernetes-apps/registry/templates/registry-proxy-sa.yml.j2
new file mode 100644
index 00000000000..0c18fa22769
--- /dev/null
+++ b/roles/kubernetes-apps/registry/templates/registry-proxy-sa.yml.j2
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: registry-proxy
+ namespace: {{ registry_namespace }}
+ labels:
+ kubernetes.io/cluster-service: "true"
diff --git a/roles/kubernetes-apps/registry/templates/registry-psp.yml.j2 b/roles/kubernetes-apps/registry/templates/registry-psp.yml.j2
new file mode 100644
index 00000000000..512f8a4e7be
--- /dev/null
+++ b/roles/kubernetes-apps/registry/templates/registry-psp.yml.j2
@@ -0,0 +1,45 @@
+---
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: registry
+ annotations:
+ seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
+ seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
+{% if apparmor_enabled %}
+ apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+ apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
+{% endif %}
+ labels:
+ kubernetes.io/cluster-service: 'true'
+ addonmanager.kubernetes.io/mode: Reconcile
+spec:
+ privileged: false
+ allowPrivilegeEscalation: false
+ requiredDropCapabilities:
+ - ALL
+ volumes:
+ - 'configMap'
+ - 'emptyDir'
+ - 'projected'
+ - 'secret'
+ - 'downwardAPI'
+ - 'persistentVolumeClaim'
+ hostNetwork: false
+ hostIPC: false
+ hostPID: false
+ runAsUser:
+ rule: 'RunAsAny'
+ seLinux:
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'MustRunAs'
+ ranges:
+ - min: 1
+ max: 65535
+ fsGroup:
+ rule: 'MustRunAs'
+ ranges:
+ - min: 1
+ max: 65535
+ readOnlyRootFilesystem: false
diff --git a/roles/kubernetes-apps/registry/templates/registry-pv.yml.j2 b/roles/kubernetes-apps/registry/templates/registry-pv.yml.j2
deleted file mode 100644
index 196efa928da..00000000000
--- a/roles/kubernetes-apps/registry/templates/registry-pv.yml.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-kind: PersistentVolume
-apiVersion: v1
-metadata:
- name: kube-system-kube-registry-pv
- labels:
- kubernetes.io/cluster-service: "true"
- addonmanager.kubernetes.io/mode: Reconcile
-spec:
-{% if pillar.get('cluster_registry_disk_type', '') == 'gce' %}
- capacity:
- storage: {{ pillar['cluster_registry_disk_size'] }}
- accessModes:
- - ReadWriteOnce
- gcePersistentDisk:
- pdName: "{{ pillar['cluster_registry_disk_name'] }}"
- fsType: "ext4"
-{% endif %}
diff --git a/roles/kubernetes-apps/registry/templates/registry-pvc.yml.j2 b/roles/kubernetes-apps/registry/templates/registry-pvc.yml.j2
index 35c78717713..0db26db96c9 100644
--- a/roles/kubernetes-apps/registry/templates/registry-pvc.yml.j2
+++ b/roles/kubernetes-apps/registry/templates/registry-pvc.yml.j2
@@ -1,14 +1,16 @@
-kind: PersistentVolumeClaim
+---
apiVersion: v1
+kind: PersistentVolumeClaim
metadata:
- name: kube-registry-pvc
- namespace: kube-system
+ name: registry-pvc
+ namespace: {{ registry_namespace }}
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
accessModes:
- ReadWriteOnce
+ storageClassName: {{ registry_storage_class }}
resources:
requests:
- storage: {{ pillar['cluster_registry_disk_size'] }}
+ storage: {{ registry_disk_size }}
diff --git a/roles/kubernetes-apps/registry/templates/registry-rc.yml.j2 b/roles/kubernetes-apps/registry/templates/registry-rs.yml.j2
similarity index 62%
rename from roles/kubernetes-apps/registry/templates/registry-rc.yml.j2
rename to roles/kubernetes-apps/registry/templates/registry-rs.yml.j2
index 90c01c4aa37..57e8db66894 100644
--- a/roles/kubernetes-apps/registry/templates/registry-rc.yml.j2
+++ b/roles/kubernetes-apps/registry/templates/registry-rs.yml.j2
@@ -1,41 +1,49 @@
---
-apiVersion: v1
-kind: ReplicationController
+apiVersion: apps/v1
+kind: ReplicaSet
metadata:
- name: kube-registry-v{{ registry_image_tag }}
- namespace: {{ system_namespace }}
+ name: registry
+ namespace: {{ registry_namespace }}
labels:
- k8s-app: kube-registry-upstream
+ k8s-app: registry
version: v{{ registry_image_tag }}
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
replicas: 1
selector:
- k8s-app: kube-registry-upstream
- version: v{{ registry_image_tag }}
+ matchLabels:
+ k8s-app: registry
+ version: v{{ registry_image_tag }}
template:
metadata:
labels:
- k8s-app: kube-registry-upstream
+ k8s-app: registry
version: v{{ registry_image_tag }}
kubernetes.io/cluster-service: "true"
spec:
+ serviceAccountName: registry
containers:
- name: registry
image: {{ registry_image_repo }}:{{ registry_image_tag }}
+ imagePullPolicy: {{ k8s_image_pull_policy }}
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
value: /var/lib/registry
volumeMounts:
- - name: image-store
+ - name: registry-pvc
mountPath: /var/lib/registry
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumes:
- - name: image-store
+ - name: registry-pvc
+{% if registry_storage_class != none %}
+ persistentVolumeClaim:
+ claimName: registry-pvc
+{% else %}
emptyDir: {}
+{% endif %}
diff --git a/roles/kubernetes-apps/registry/templates/registry-sa.yml.j2 b/roles/kubernetes-apps/registry/templates/registry-sa.yml.j2
new file mode 100644
index 00000000000..b9e48b8e139
--- /dev/null
+++ b/roles/kubernetes-apps/registry/templates/registry-sa.yml.j2
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: registry
+ namespace: {{ registry_namespace }}
+ labels:
+ kubernetes.io/cluster-service: "true"
diff --git a/roles/kubernetes-apps/registry/templates/registry-svc.yml.j2 b/roles/kubernetes-apps/registry/templates/registry-svc.yml.j2
index 5669624690b..58d101d298d 100644
--- a/roles/kubernetes-apps/registry/templates/registry-svc.yml.j2
+++ b/roles/kubernetes-apps/registry/templates/registry-svc.yml.j2
@@ -2,17 +2,17 @@
apiVersion: v1
kind: Service
metadata:
- name: kube-registry
- namespace: {{ system_namespace }}
+ name: registry
+ namespace: {{ registry_namespace }}
labels:
- k8s-app: kube-registry-upstream
+ k8s-app: registry
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "KubeRegistry"
spec:
selector:
- k8s-app: kube-registry-upstream
+ k8s-app: registry
ports:
- - name: registry
- port: 5000
- protocol: TCP
+ - name: registry
+ port: 5000
+ protocol: TCP
diff --git a/roles/kubernetes-apps/registry/templates/tls/README.md b/roles/kubernetes-apps/registry/templates/tls/README.md
deleted file mode 100644
index 7ba5cc628b3..00000000000
--- a/roles/kubernetes-apps/registry/templates/tls/README.md
+++ /dev/null
@@ -1,116 +0,0 @@
-# Enable TLS for Kube-Registry
-
-This document describes how to enable TLS for kube-registry. Before you start, please check if you have all the prerequisite:
-
-- A domain for kube-registry. Assuming it is ` myregistrydomain.com`.
-- Domain certificate and key. Assuming they are `domain.crt` and `domain.key`
-
-### Pack domain.crt and domain.key into a Secret
-
-```console
-$ kubectl --namespace=kube-system create secret generic registry-tls-secret --from-file=domain.crt=domain.crt --from-file=domain.key=domain.key
-```
-
-### Run Registry
-
-Please be noted that this sample rc is using emptyDir as storage backend for simplicity.
-
-
-```yaml
-apiVersion: v1
-kind: ReplicationController
-metadata:
- name: kube-registry-v0
- namespace: kube-system
- labels:
- k8s-app: kube-registry
- version: v0
-# kubernetes.io/cluster-service: "true"
-spec:
- replicas: 1
- selector:
- k8s-app: kube-registry
- version: v0
- template:
- metadata:
- labels:
- k8s-app: kube-registry
- version: v0
-# kubernetes.io/cluster-service: "true"
- spec:
- containers:
- - name: registry
- image: registry:2
- resources:
- # keep request = limit to keep this container in guaranteed class
- limits:
- cpu: 100m
- memory: 100Mi
- requests:
- cpu: 100m
- memory: 100Mi
- env:
- - name: REGISTRY_HTTP_ADDR
- value: :5000
- - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
- value: /var/lib/registry
- - name: REGISTRY_HTTP_TLS_CERTIFICATE
- value: /certs/domain.crt
- - name: REGISTRY_HTTP_TLS_KEY
- value: /certs/domain.key
- volumeMounts:
- - name: image-store
- mountPath: /var/lib/registry
- - name: cert-dir
- mountPath: /certs
- ports:
- - containerPort: 5000
- name: registry
- protocol: TCP
- volumes:
- - name: image-store
- emptyDir: {}
- - name: cert-dir
- secret:
- secretName: registry-tls-secret
-```
-
-
-### Expose External IP for Kube-Registry
-
-Modify the default kube-registry service to `LoadBalancer` type and point the DNS record of `myregistrydomain.com` to the service external ip.
-
-
-```yaml
-apiVersion: v1
-kind: Service
-metadata:
- name: kube-registry
- namespace: kube-system
- labels:
- k8s-app: kube-registry
-# kubernetes.io/cluster-service: "true"
- kubernetes.io/name: "KubeRegistry"
-spec:
- selector:
- k8s-app: kube-registry
- type: LoadBalancer
- ports:
- - name: registry
- port: 5000
- protocol: TCP
-```
-
-
-### To Verify
-
-Now you should be able to access your kube-registry from another docker host.
-```console
-docker pull busybox
-docker tag busybox myregistrydomain.com:5000/busybox
-docker push myregistrydomain.com:5000/busybox
-docker pull myregistrydomain.com:5000/busybox
-```
-
-
-[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/registry/tls/README.md?pixel)]()
diff --git a/roles/kubernetes-apps/registry/templates/tls/registry-tls-rc.yml.j2 b/roles/kubernetes-apps/registry/templates/tls/registry-tls-rc.yml.j2
deleted file mode 100644
index c2411c05246..00000000000
--- a/roles/kubernetes-apps/registry/templates/tls/registry-tls-rc.yml.j2
+++ /dev/null
@@ -1,57 +0,0 @@
-apiVersion: v1
-kind: ReplicationController
-metadata:
- name: kube-registry-v0
- namespace: kube-system
- labels:
- k8s-app: kube-registry
- version: v0
-# kubernetes.io/cluster-service: "true"
-spec:
- replicas: 1
- selector:
- k8s-app: kube-registry
- version: v0
- template:
- metadata:
- labels:
- k8s-app: kube-registry
- version: v0
-# kubernetes.io/cluster-service: "true"
- spec:
- containers:
- - name: registry
- image: registry:2
- resources:
- # keep request = limit to keep this container in guaranteed class
- limits:
- cpu: 100m
- memory: 100Mi
- requests:
- cpu: 100m
- memory: 100Mi
- env:
- - name: REGISTRY_HTTP_ADDR
- value: :5000
- - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
- value: /var/lib/registry
- - name: REGISTRY_HTTP_TLS_CERTIFICATE
- value: /certs/domain.crt
- - name: REGISTRY_HTTP_TLS_KEY
- value: /certs/domain.key
- volumeMounts:
- - name: image-store
- mountPath: /var/lib/registry
- - name: cert-dir
- mountPath: /certs
- ports:
- - containerPort: 5000
- name: registry
- protocol: TCP
- volumes:
- - name: image-store
- emptyDir: {}
- - name: cert-dir
- secret:
- secretName: registry-tls-secret
-
diff --git a/roles/kubernetes-apps/registry/templates/tls/registry-tls-svc.yml.j2 b/roles/kubernetes-apps/registry/templates/tls/registry-tls-svc.yml.j2
deleted file mode 100644
index a9d59f117d4..00000000000
--- a/roles/kubernetes-apps/registry/templates/tls/registry-tls-svc.yml.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
- name: kube-registry
- namespace: kube-system
- labels:
- k8s-app: kube-registry
-# kubernetes.io/cluster-service: "true"
- kubernetes.io/name: "KubeRegistry"
-spec:
- selector:
- k8s-app: kube-registry
- type: LoadBalancer
- ports:
- - name: registry
- port: 5000
- protocol: TCP
diff --git a/roles/kubernetes-apps/rotate_tokens/tasks/main.yml b/roles/kubernetes-apps/rotate_tokens/tasks/main.yml
index 52101ae16c1..2589b3610f3 100644
--- a/roles/kubernetes-apps/rotate_tokens/tasks/main.yml
+++ b/roles/kubernetes-apps/rotate_tokens/tasks/main.yml
@@ -34,7 +34,7 @@
{{ bin_dir }}/kubectl get secrets --all-namespaces
-o 'jsonpath={range .items[*]}{"\n"}{.metadata.namespace}{" "}{.metadata.name}{" "}{.type}{end}'
| grep kubernetes.io/service-account-token
- | egrep 'default-token|kube-proxy|kube-dns|dnsmasq|netchecker|weave|calico|canal|flannel|dashboard|cluster-proportional-autoscaler|efk|tiller'
+ | egrep 'default-token|kube-proxy|kube-dns|dnsmasq|netchecker|weave|calico|canal|flannel|dashboard|cluster-proportional-autoscaler|efk|tiller|local-volume-provisioner'
register: tokens_to_delete
when: needs_rotation
@@ -44,5 +44,5 @@
when: needs_rotation
- name: Rotate Tokens | Delete pods in system namespace
- command: "{{ bin_dir }}/kubectl delete pods -n {{ system_namespace }} --all"
+ command: "{{ bin_dir }}/kubectl delete pods -n kube-system --all"
when: needs_rotation
diff --git a/roles/kubernetes/client/defaults/main.yml b/roles/kubernetes/client/defaults/main.yml
index 5864e991f52..32870df016a 100644
--- a/roles/kubernetes/client/defaults/main.yml
+++ b/roles/kubernetes/client/defaults/main.yml
@@ -1,7 +1,7 @@
---
kubeconfig_localhost: false
kubectl_localhost: false
-artifacts_dir: "./artifacts"
+artifacts_dir: "{{ inventory_dir }}/artifacts"
kube_config_dir: "/etc/kubernetes"
kube_apiserver_port: "6443"
diff --git a/roles/kubernetes/client/tasks/main.yml b/roles/kubernetes/client/tasks/main.yml
index 3b66c5e1c6c..4da9ad30a34 100644
--- a/roles/kubernetes/client/tasks/main.yml
+++ b/roles/kubernetes/client/tasks/main.yml
@@ -5,7 +5,7 @@
{%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.port is defined -%}
https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }}
{%- else -%}
- https://{{ kube_apiserver_address }}:{{ kube_apiserver_port }}
+ https://{{ kube_apiserver_access_address }}:{{ kube_apiserver_port }}
{%- endif -%}
tags:
- facts
@@ -40,7 +40,7 @@
src: "{{ kube_config_dir }}/admin.conf"
dest: "/root/.kube/config"
remote_src: yes
- mode: "0700"
+ mode: "0600"
backup: yes
- name: Copy admin kubeconfig to ansible host
@@ -61,3 +61,15 @@
become: no
run_once: yes
when: kubectl_localhost|default(false)
+
+- name: create helper script kubectl.sh on ansible host
+ copy:
+ content: |
+ #!/bin/bash
+ kubectl --kubeconfig=admin.conf $@
+ dest: "{{ artifacts_dir }}/kubectl.sh"
+ mode: 0755
+ become: no
+ run_once: yes
+ delegate_to: localhost
+ when: kubectl_localhost|default(false) and kubeconfig_localhost|default(false)
diff --git a/roles/kubernetes/kubeadm/defaults/main.yml b/roles/kubernetes/kubeadm/defaults/main.yml
new file mode 100644
index 00000000000..d9ed537c274
--- /dev/null
+++ b/roles/kubernetes/kubeadm/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+# discovery_timeout modifies the discovery timeout
+discovery_timeout: 5m0s
diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml
index 2b6e739dbae..5697076e898 100644
--- a/roles/kubernetes/kubeadm/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/tasks/main.yml
@@ -2,12 +2,11 @@
- name: Set kubeadm_discovery_address
set_fact:
kubeadm_discovery_address: >-
- {%- if "127.0.0.1" or "localhost" in kube_apiserver_endpoint -%}
+ {%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%}
{{ first_kube_master }}:{{ kube_apiserver_port }}
{%- else -%}
{{ kube_apiserver_endpoint }}
{%- endif %}
- when: not is_kube_master
tags:
- facts
@@ -28,23 +27,36 @@
register: temp_token
delegate_to: "{{ groups['kube-master'][0] }}"
+- name: gets the kubeadm version
+ command: "{{ bin_dir }}/kubeadm version -o short"
+ register: kubeadm_output
+
+- name: sets kubeadm api version to v1alpha1
+ set_fact:
+ kubeadmConfig_api_version: v1alpha1
+ when: kubeadm_output.stdout|version_compare('v1.11.0', '<')
+
+- name: defaults kubeadm api version to v1alpha2
+ set_fact:
+ kubeadmConfig_api_version: v1alpha2
+ when: kubeadm_output.stdout|version_compare('v1.11.0', '>=')
+
- name: Create kubeadm client config
template:
- src: kubeadm-client.conf.j2
- dest: "{{ kube_config_dir }}/kubeadm-client.conf"
+ src: "kubeadm-client.conf.{{ kubeadmConfig_api_version }}.j2"
+ dest: "{{ kube_config_dir }}/kubeadm-client.{{ kubeadmConfig_api_version }}.conf"
backup: yes
when: not is_kube_master
vars:
kubeadm_token: "{{ temp_token.stdout }}"
- register: kubeadm_client_conf
- name: Join to cluster if needed
command: >-
{{ bin_dir }}/kubeadm join
- --config {{ kube_config_dir}}/kubeadm-client.conf
+ --config {{ kube_config_dir}}/kubeadm-client.{{ kubeadmConfig_api_version }}.conf
--ignore-preflight-errors=all
register: kubeadm_join
- when: not is_kube_master and (kubeadm_client_conf.changed or not kubelet_conf.stat.exists)
+ when: not is_kube_master and (not kubelet_conf.stat.exists)
- name: Wait for kubelet bootstrap to create config
wait_for:
@@ -53,19 +65,43 @@
timeout: 60
- name: Update server field in kubelet kubeconfig
- replace:
- path: "{{ kube_config_dir }}/kubelet.conf"
- regexp: '(\s+)https://{{ first_kube_master }}:{{ kube_apiserver_port }}(\s+.*)?$'
- replace: '\1{{ kube_apiserver_endpoint }}\2'
+ lineinfile:
+ dest: "{{ kube_config_dir }}/kubelet.conf"
+ regexp: 'server:'
+ line: ' server: {{ kube_apiserver_endpoint }}'
backup: yes
- when: not is_kube_master and kubeadm_discovery_address != kube_apiserver_endpoint
+ when:
+ - kubeadm_config_api_fqdn is not defined
+ - not is_kube_master
+ - kubeadm_discovery_address != kube_apiserver_endpoint
notify: restart kubelet
+- name: Update server field in kube-proxy kubeconfig
+ shell: >-
+ {{ bin_dir }}/kubectl get configmap kube-proxy -n kube-system -o yaml
+ | sed 's#server:.*#server:\ {{ kube_apiserver_endpoint }}#g'
+ | {{ bin_dir }}/kubectl replace -f -
+ delegate_to: "{{groups['kube-master']|first}}"
+ run_once: true
+ when:
+ - kubeadm_config_api_fqdn is not defined
+ - is_kube_master
+ - kubeadm_discovery_address != kube_apiserver_endpoint
+
# FIXME(mattymo): Reconcile kubelet kubeconfig filename for both deploy modes
- name: Symlink kubelet kubeconfig for calico/canal
file:
- src: "{{ kube_config_dir }}//kubelet.conf"
+ src: "{{ kube_config_dir }}/kubelet.conf"
dest: "{{ kube_config_dir }}/node-kubeconfig.yaml"
state: link
force: yes
when: kube_network_plugin in ['calico','canal']
+
+- name: Restart all kube-proxy pods to ensure that they load the new configmap
+ shell: "{{ bin_dir }}/kubectl delete pod -n kube-system -l k8s-app=kube-proxy"
+ delegate_to: "{{groups['kube-master']|first}}"
+ run_once: true
+ when:
+ - kubeadm_config_api_fqdn is not defined
+ - is_kube_master
+ - kubeadm_discovery_address != kube_apiserver_endpoint
diff --git a/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.j2 b/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1alpha1.j2
similarity index 52%
rename from roles/kubernetes/kubeadm/templates/kubeadm-client.conf.j2
rename to roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1alpha1.j2
index 18c6c2af603..fe9f45b2ff2 100644
--- a/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.j2
+++ b/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1alpha1.j2
@@ -3,6 +3,10 @@ kind: NodeConfiguration
caCertPath: {{ kube_config_dir }}/ssl/ca.crt
token: {{ kubeadm_token }}
discoveryTokenAPIServers:
+{% if groups['kube-master'] | length > 1 and kubeadm_config_api_fqdn is defined %}
+- {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
+{% else %}
- {{ kubeadm_discovery_address | replace("https://", "")}}
-DiscoveryTokenCACertHashes:
+{% endif %}
+discoveryTokenCACertHashes:
- sha256:{{ kubeadm_ca_hash.stdout }}
diff --git a/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1alpha2.j2 b/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1alpha2.j2
new file mode 100644
index 00000000000..62105fbde82
--- /dev/null
+++ b/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1alpha2.j2
@@ -0,0 +1,21 @@
+apiVersion: kubeadm.k8s.io/v1alpha2
+kind: NodeConfiguration
+clusterName: {{ cluster_name }}
+discoveryFile: ""
+caCertPath: {{ kube_config_dir }}/ssl/ca.crt
+discoveryTimeout: {{ discovery_timeout }}
+discoveryToken: {{ kubeadm_token }}
+tlsBootstrapToken: {{ kubeadm_token }}
+token: {{ kubeadm_token }}
+discoveryTokenAPIServers:
+{% if groups['kube-master'] | length > 1 and kubeadm_config_api_fqdn is defined %}
+- {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
+{% else %}
+- {{ kubeadm_discovery_address | replace("https://", "")}}
+{% endif %}
+discoveryTokenUnsafeSkipCAVerification: true
+nodeRegistration:
+ name: {{ inventory_hostname }}
+{% if container_manager == 'crio' %}
+ criSocket: /var/run/crio/crio.sock
+{% endif %}
diff --git a/roles/kubernetes/master/defaults/main.yml b/roles/kubernetes/master/defaults/main.yml
index 64a71fc22c9..4a7269baef6 100644
--- a/roles/kubernetes/master/defaults/main.yml
+++ b/roles/kubernetes/master/defaults/main.yml
@@ -24,6 +24,35 @@ kube_apiserver_storage_backend: etcd3
# By default, force back to etcd2. Set to true to force etcd3 (experimental!)
force_etcd3: false
+# audit support
+kubernetes_audit: false
+# path to audit log file
+audit_log_path: /var/log/audit/kube-apiserver-audit.log
+# num days
+audit_log_maxage: 30
+# the num of audit logs to retain
+audit_log_maxbackups: 1
+# the max size in MB to retain
+audit_log_maxsize: 100
+# policy file
+audit_policy_file: "{{ kube_config_dir }}/audit-policy/apiserver-audit-policy.yaml"
+# custom audit policy rules (to replace the default ones)
+# audit_policy_custom_rules: |
+# - level: None
+# users: []
+# verbs: []
+# resources: []
+
+# audit log hostpath
+audit_log_name: audit-logs
+audit_log_hostpath: /var/log/kubernetes/audit
+audit_log_mountpath: "{{ audit_log_path | dirname }}"
+
+# audit policy hostpath
+audit_policy_name: audit-policy
+audit_policy_hostpath: "{{ audit_policy_file | dirname }}"
+audit_policy_mountpath: "{{ audit_policy_hostpath }}"
+
# Limits for kube components
kube_controller_memory_limit: 512M
kube_controller_cpu_limit: 250m
@@ -41,7 +70,7 @@ kube_apiserver_cpu_limit: 800m
kube_apiserver_memory_requests: 256M
kube_apiserver_cpu_requests: 100m
-# Admission control plug-ins
+# 1.9 and below Admission control plug-ins
kube_apiserver_admission_control:
- Initializers
- NamespaceLifecycle
@@ -52,10 +81,16 @@ kube_apiserver_admission_control:
{%- if kube_version | version_compare('v1.9', '<') -%}
GenericAdmissionWebhook
{%- else -%}
- ValidatingAdmissionWebhook
+ MutatingAdmissionWebhook,ValidatingAdmissionWebhook
{%- endif -%}
- ResourceQuota
+# 1.10+ admission plugins
+kube_apiserver_enable_admission_plugins: []
+
+# 1.10+ list of disabled admission plugins
+kube_apiserver_disable_admission_plugins: []
+
# extra runtime config
kube_api_runtime_config:
- admissionregistration.k8s.io/v1alpha1
@@ -73,11 +108,16 @@ kube_oidc_auth: false
## Optional settings for OIDC
# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
# kube_oidc_username_claim: sub
+# kube_oidc_username_prefix: oidc:
# kube_oidc_groups_claim: groups
+# kube_oidc_groups_prefix: oidc:
## Variables for custom flags
apiserver_custom_flags: []
+# List of the preferred NodeAddressTypes to use for kubelet connections.
+kubelet_preferred_address_types: 'InternalDNS,InternalIP,Hostname,ExternalDNS,ExternalIP'
+
controller_mgr_custom_flags: []
scheduler_custom_flags: []
@@ -89,3 +129,12 @@ kube_kubeadm_scheduler_extra_args: {}
## Variable for influencing kube-scheduler behaviour
volume_cross_zone_attachment: false
+
+## Encrypting Secret Data at Rest
+kube_encrypt_secret_data: false
+kube_encrypt_token: "{{ lookup('password', credentials_dir + '/kube_encrypt_token.creds length=32 chars=ascii_letters,digits') }}"
+# Must be either: aescbc, secretbox or aesgcm
+kube_encryption_algorithm: "aescbc"
+
+# You may want to use ca.pem depending on your situation
+kube_front_proxy_ca: "front-proxy-ca.pem"
diff --git a/roles/kubernetes/master/tasks/encrypt-at-rest.yml b/roles/kubernetes/master/tasks/encrypt-at-rest.yml
new file mode 100644
index 00000000000..2e569b08bb6
--- /dev/null
+++ b/roles/kubernetes/master/tasks/encrypt-at-rest.yml
@@ -0,0 +1,10 @@
+---
+- name: Write secrets for encrypting secret data at rest
+ template:
+ src: secrets_encryption.yaml.j2
+ dest: "{{ kube_config_dir }}/ssl/secrets_encryption.yaml"
+ owner: root
+ group: "{{ kube_cert_group }}"
+ mode: 0640
+ tags:
+ - kube-apiserver
diff --git a/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml b/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml
index a9f9383185e..83bfbb22ad5 100644
--- a/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml
+++ b/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml
@@ -9,4 +9,10 @@
- {src: apiserver-key.pem, dest: apiserver.key}
- {src: ca.pem, dest: ca.crt}
- {src: ca-key.pem, dest: ca.key}
+ - {src: front-proxy-ca.pem, dest: front-proxy-ca.crt}
+ - {src: front-proxy-ca-key.pem, dest: front-proxy-ca.key}
+ - {src: front-proxy-client.pem, dest: front-proxy-client.crt}
+ - {src: front-proxy-client-key.pem, dest: front-proxy-client.key}
+ - {src: service-account-key.pem, dest: sa.pub}
+ - {src: service-account-key.pem, dest: sa.key}
register: kubeadm_copy_old_certs
diff --git a/roles/kubernetes/master/tasks/kubeadm-setup.yml b/roles/kubernetes/master/tasks/kubeadm-setup.yml
index 3fcd04715e2..827154612d5 100644
--- a/roles/kubernetes/master/tasks/kubeadm-setup.yml
+++ b/roles/kubernetes/master/tasks/kubeadm-setup.yml
@@ -65,14 +65,46 @@
command: "cp -TR {{ etcd_cert_dir }} {{ kube_config_dir }}/ssl/etcd"
changed_when: false
+- name: Create audit-policy directory
+ file:
+ path: "{{ audit_policy_file | dirname }}"
+ state: directory
+ when: kubernetes_audit|default(false)
+
+- name: Write api audit policy yaml
+ template:
+ src: apiserver-audit-policy.yaml.j2
+ dest: "{{ audit_policy_file }}"
+ when: kubernetes_audit|default(false)
+
+- name: gets the kubeadm version
+ command: "{{ bin_dir }}/kubeadm version -o short"
+ register: kubeadm_output
+
+- name: sets kubeadm api version to v1alpha1
+ set_fact:
+ kubeadmConfig_api_version: v1alpha1
+ when: kubeadm_output.stdout|version_compare('v1.11.0', '<')
+
+- name: defaults kubeadm api version to v1alpha2
+ set_fact:
+ kubeadmConfig_api_version: v1alpha2
+ when: kubeadm_output.stdout|version_compare('v1.11.0', '>=')
+
+# Nginx LB(default), If kubeadm_config_api_fqdn is defined, use other LB by kubeadm controlPlaneEndpoint.
+- name: set kubeadm_config_api_fqdn define
+ set_fact:
+ kubeadm_config_api_fqdn: "{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }}"
+ when: loadbalancer_apiserver is defined
+
- name: kubeadm | Create kubeadm config
template:
- src: kubeadm-config.yaml.j2
- dest: "{{ kube_config_dir }}/kubeadm-config.yaml"
+ src: "kubeadm-config.{{ kubeadmConfig_api_version }}.yaml.j2"
+ dest: "{{ kube_config_dir }}/kubeadm-config.{{ kubeadmConfig_api_version }}.yaml"
register: kubeadm_config
- name: kubeadm | Initialize first master
- command: timeout -k 240s 240s {{ bin_dir }}/kubeadm init --config={{ kube_config_dir }}/kubeadm-config.yaml --ignore-preflight-errors=all
+ command: timeout -k 600s 600s {{ bin_dir }}/kubeadm init --config={{ kube_config_dir }}/kubeadm-config.{{ kubeadmConfig_api_version }}.yaml --ignore-preflight-errors=all
register: kubeadm_init
# Retry is because upload config sometimes fails
retries: 3
@@ -82,13 +114,14 @@
- name: kubeadm | Upgrade first master
command: >-
- timeout -k 240s 240s
+ timeout -k 600s 600s
{{ bin_dir }}/kubeadm
upgrade apply -y {{ kube_version }}
- --config={{ kube_config_dir }}/kubeadm-config.yaml
+ --config={{ kube_config_dir }}/kubeadm-config.{{ kubeadmConfig_api_version }}.yaml
--ignore-preflight-errors=all
--allow-experimental-upgrades
--allow-release-candidate-upgrades
+ --force
register: kubeadm_upgrade
# Retry is because upload config sometimes fails
retries: 3
@@ -98,7 +131,7 @@
# FIXME(mattymo): remove when https://github.com/kubernetes/kubeadm/issues/433 is fixed
- name: kubeadm | Enable kube-proxy
- command: "{{ bin_dir }}/kubeadm alpha phase addon kube-proxy --config={{ kube_config_dir }}/kubeadm-config.yaml"
+ command: "{{ bin_dir }}/kubeadm alpha phase addon kube-proxy --config={{ kube_config_dir }}/kubeadm-config.{{ kubeadmConfig_api_version }}.yaml"
when: inventory_hostname == groups['kube-master']|first
changed_when: false
@@ -128,14 +161,14 @@
content: "{{ item.content | b64decode }}"
owner: root
group: root
- mode: 0700
+ mode: 0600
no_log: true
register: copy_kubeadm_certs
with_items: "{{ kubeadm_certs.results }}"
when: inventory_hostname != groups['kube-master']|first
- name: kubeadm | Init other uninitialized masters
- command: timeout -k 240s 240s {{ bin_dir }}/kubeadm init --config={{ kube_config_dir }}/kubeadm-config.yaml --ignore-preflight-errors=all
+ command: timeout -k 600s 600s {{ bin_dir }}/kubeadm init --config={{ kube_config_dir }}/kubeadm-config.{{ kubeadmConfig_api_version }}.yaml --ignore-preflight-errors=all
register: kubeadm_init
when: inventory_hostname != groups['kube-master']|first and not kubeadm_ca.stat.exists
failed_when: kubeadm_init.rc != 0 and "field is immutable" not in kubeadm_init.stderr
@@ -143,10 +176,10 @@
- name: kubeadm | Upgrade other masters
command: >-
- timeout -k 240s 240s
+ timeout -k 600s 600s
{{ bin_dir }}/kubeadm
upgrade apply -y {{ kube_version }}
- --config={{ kube_config_dir }}/kubeadm-config.yaml
+ --config={{ kube_config_dir }}/kubeadm-config.{{ kubeadmConfig_api_version }}.yaml
--ignore-preflight-errors=all
--allow-experimental-upgrades
--allow-release-candidate-upgrades
diff --git a/roles/kubernetes/master/tasks/main.yml b/roles/kubernetes/master/tasks/main.yml
index 04ad307fd53..93da9760bcc 100644
--- a/roles/kubernetes/master/tasks/main.yml
+++ b/roles/kubernetes/master/tasks/main.yml
@@ -3,36 +3,25 @@
tags:
- k8s-pre-upgrade
-# upstream bug: https://github.com/kubernetes/kubeadm/issues/441
-- name: Disable kube_basic_auth until kubeadm/441 is fixed
- set_fact:
- kube_basic_auth: false
- when: kubeadm_enabled|bool|default(false)
-
- import_tasks: users-file.yml
when: kube_basic_auth|default(true)
-- name: Compare host kubectl with hyperkube container
- command: "{{ docker_bin_dir }}/docker run --rm -v {{ bin_dir }}:/systembindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /usr/bin/cmp /hyperkube /systembindir/kubectl"
- register: kubectl_task_compare_result
- until: kubectl_task_compare_result.rc in [0,1,2]
- retries: 4
- delay: "{{ retry_stagger | random + 3 }}"
+- import_tasks: encrypt-at-rest.yml
+ when: kube_encrypt_secret_data
+
+- name: install | Copy kubectl binary from download dir
+ command: rsync -piu "{{ local_release_dir }}/hyperkube" "{{ bin_dir }}/kubectl"
changed_when: false
- failed_when: "kubectl_task_compare_result.rc not in [0,1,2]"
tags:
- hyperkube
- kubectl
- upgrade
-- name: Copy kubectl from hyperkube container
- command: "{{ docker_bin_dir }}/docker run --rm -v {{ bin_dir }}:/systembindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp /hyperkube /systembindir/kubectl"
- when: kubectl_task_compare_result.rc != 0
- register: kubectl_task_result
- until: kubectl_task_result.rc == 0
- retries: 4
- delay: "{{ retry_stagger | random + 3 }}"
- changed_when: false
+- name: install | Set kubectl binary permissions
+ file:
+ path: "{{ bin_dir }}/kubectl"
+ mode: "0755"
+ state: file
tags:
- hyperkube
- kubectl
@@ -40,7 +29,7 @@
- name: Install kubectl bash completion
shell: "{{ bin_dir }}/kubectl completion bash >/etc/bash_completion.d/kubectl.sh"
- when: kubectl_task_compare_result.rc != 0 and ansible_os_family in ["Debian","RedHat"]
+ when: ansible_os_family in ["Debian","RedHat"]
tags:
- kubectl
@@ -55,6 +44,12 @@
- kubectl
- upgrade
+- name: Disable SecurityContextDeny admission-controller and enable PodSecurityPolicy
+ set_fact:
+ kube_apiserver_admission_control: "{{ kube_apiserver_admission_control | default([]) | difference(['SecurityContextDeny']) | union(['PodSecurityPolicy']) | unique }}"
+ kube_apiserver_enable_admission_plugins: "{{ kube_apiserver_enable_admission_plugins | difference(['SecurityContextDeny']) | union(['PodSecurityPolicy']) | unique }}"
+ when: podsecuritypolicy_enabled
+
- name: Include kubeadm setup if enabled
import_tasks: kubeadm-setup.yml
when: kubeadm_enabled|bool|default(false)
diff --git a/roles/kubernetes/master/tasks/pre-upgrade.yml b/roles/kubernetes/master/tasks/pre-upgrade.yml
index 3a9fe64174a..56e57b015d9 100644
--- a/roles/kubernetes/master/tasks/pre-upgrade.yml
+++ b/roles/kubernetes/master/tasks/pre-upgrade.yml
@@ -30,4 +30,7 @@
with_items:
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
when: kube_apiserver_manifest_replaced.changed
- run_once: true
+ register: remove_master_container
+ retries: 4
+ until: remove_master_container.rc == 0
+ delay: 5
\ No newline at end of file
diff --git a/roles/kubernetes/master/tasks/static-pod-setup.yml b/roles/kubernetes/master/tasks/static-pod-setup.yml
index e8308798f78..4b563828917 100644
--- a/roles/kubernetes/master/tasks/static-pod-setup.yml
+++ b/roles/kubernetes/master/tasks/static-pod-setup.yml
@@ -1,4 +1,21 @@
---
+- name: Create audit-policy directory
+ file:
+ path: "{{ audit_policy_file | dirname }}"
+ state: directory
+ tags:
+ - kube-apiserver
+ when: kubernetes_audit|default(false)
+
+- name: Write api audit policy yaml
+ template:
+ src: apiserver-audit-policy.yaml.j2
+ dest: "{{ audit_policy_file }}"
+ notify: Master | Restart apiserver
+ tags:
+ - kube-apiserver
+ when: kubernetes_audit|default(false)
+
- name: Write kube-apiserver manifest
template:
src: manifests/kube-apiserver.manifest.j2
@@ -13,6 +30,7 @@
template:
src: kube-scheduler-policy.yaml.j2
dest: "{{ kube_config_dir }}/kube-scheduler-policy.yaml"
+ notify: Master | Restart kube-scheduler
tags:
- kube-scheduler
diff --git a/roles/kubernetes/master/templates/apiserver-audit-policy.yaml.j2 b/roles/kubernetes/master/templates/apiserver-audit-policy.yaml.j2
new file mode 100644
index 00000000000..861ffda717c
--- /dev/null
+++ b/roles/kubernetes/master/templates/apiserver-audit-policy.yaml.j2
@@ -0,0 +1,129 @@
+apiVersion: audit.k8s.io/v1beta1
+kind: Policy
+rules:
+{% if audit_policy_custom_rules is defined and audit_policy_custom_rules != "" %}
+{{ audit_policy_custom_rules | indent(2, true) }}
+{% else %}
+ # The following requests were manually identified as high-volume and low-risk,
+ # so drop them.
+ - level: None
+ users: ["system:kube-proxy"]
+ verbs: ["watch"]
+ resources:
+ - group: "" # core
+ resources: ["endpoints", "services", "services/status"]
+ - level: None
+ # Ingress controller reads `configmaps/ingress-uid` through the unsecured port.
+ # TODO(#46983): Change this to the ingress controller service account.
+ users: ["system:unsecured"]
+ namespaces: ["kube-system"]
+ verbs: ["get"]
+ resources:
+ - group: "" # core
+ resources: ["configmaps"]
+ - level: None
+ users: ["kubelet"] # legacy kubelet identity
+ verbs: ["get"]
+ resources:
+ - group: "" # core
+ resources: ["nodes", "nodes/status"]
+ - level: None
+ userGroups: ["system:nodes"]
+ verbs: ["get"]
+ resources:
+ - group: "" # core
+ resources: ["nodes", "nodes/status"]
+ - level: None
+ users:
+ - system:kube-controller-manager
+ - system:kube-scheduler
+ - system:serviceaccount:kube-system:endpoint-controller
+ verbs: ["get", "update"]
+ namespaces: ["kube-system"]
+ resources:
+ - group: "" # core
+ resources: ["endpoints"]
+ - level: None
+ users: ["system:apiserver"]
+ verbs: ["get"]
+ resources:
+ - group: "" # core
+ resources: ["namespaces", "namespaces/status", "namespaces/finalize"]
+ # Don't log HPA fetching metrics.
+ - level: None
+ users:
+ - system:kube-controller-manager
+ verbs: ["get", "list"]
+ resources:
+ - group: "metrics.k8s.io"
+ # Don't log these read-only URLs.
+ - level: None
+ nonResourceURLs:
+ - /healthz*
+ - /version
+ - /swagger*
+ # Don't log events requests.
+ - level: None
+ resources:
+ - group: "" # core
+ resources: ["events"]
+ # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data,
+ # so only log at the Metadata level.
+ - level: Metadata
+ resources:
+ - group: "" # core
+ resources: ["secrets", "configmaps"]
+ - group: authentication.k8s.io
+ resources: ["tokenreviews"]
+ omitStages:
+ - "RequestReceived"
+ # Get responses can be large; skip them.
+ - level: Request
+ verbs: ["get", "list", "watch"]
+ resources:
+ - group: "" # core
+ - group: "admissionregistration.k8s.io"
+ - group: "apiextensions.k8s.io"
+ - group: "apiregistration.k8s.io"
+ - group: "apps"
+ - group: "authentication.k8s.io"
+ - group: "authorization.k8s.io"
+ - group: "autoscaling"
+ - group: "batch"
+ - group: "certificates.k8s.io"
+ - group: "extensions"
+ - group: "metrics.k8s.io"
+ - group: "networking.k8s.io"
+ - group: "policy"
+ - group: "rbac.authorization.k8s.io"
+ - group: "settings.k8s.io"
+ - group: "storage.k8s.io"
+ omitStages:
+ - "RequestReceived"
+ # Default level for known APIs
+ - level: RequestResponse
+ resources:
+ - group: "" # core
+ - group: "admissionregistration.k8s.io"
+ - group: "apiextensions.k8s.io"
+ - group: "apiregistration.k8s.io"
+ - group: "apps"
+ - group: "authentication.k8s.io"
+ - group: "authorization.k8s.io"
+ - group: "autoscaling"
+ - group: "batch"
+ - group: "certificates.k8s.io"
+ - group: "extensions"
+ - group: "metrics.k8s.io"
+ - group: "networking.k8s.io"
+ - group: "policy"
+ - group: "rbac.authorization.k8s.io"
+ - group: "settings.k8s.io"
+ - group: "storage.k8s.io"
+ omitStages:
+ - "RequestReceived"
+ # Default level for all other requests.
+ - level: Metadata
+ omitStages:
+ - "RequestReceived"
+{% endif %}
diff --git a/roles/kubernetes/master/templates/kube-scheduler-policy.yaml.j2 b/roles/kubernetes/master/templates/kube-scheduler-policy.yaml.j2
index 6616adc6f64..5a13d7a1ea2 100644
--- a/roles/kubernetes/master/templates/kube-scheduler-policy.yaml.j2
+++ b/roles/kubernetes/master/templates/kube-scheduler-policy.yaml.j2
@@ -2,17 +2,26 @@
"kind" : "Policy",
"apiVersion" : "v1",
"predicates" : [
- {"name" : "PodFitsHostPorts"},
- {"name" : "PodFitsResources"},
+ {"name" : "MaxEBSVolumeCount"},
+ {"name" : "MaxGCEPDVolumeCount"},
+ {"name" : "MaxAzureDiskVolumeCount"},
+ {"name" : "MatchInterPodAffinity"},
{"name" : "NoDiskConflict"},
- {"name" : "MatchNodeSelector"},
- {"name" : "HostName"}
+ {"name" : "GeneralPredicates"},
+ {"name" : "CheckNodeMemoryPressure"},
+ {"name" : "CheckNodeDiskPressure"},
+ {"name" : "CheckNodeCondition"},
+ {"name" : "PodToleratesNodeTaints"},
+ {"name" : "CheckVolumeBinding"}
],
"priorities" : [
+ {"name" : "SelectorSpreadPriority", "weight" : 1},
+ {"name" : "InterPodAffinityPriority", "weight" : 1},
{"name" : "LeastRequestedPriority", "weight" : 1},
{"name" : "BalancedResourceAllocation", "weight" : 1},
- {"name" : "ServiceSpreadingPriority", "weight" : 1},
- {"name" : "EqualPriority", "weight" : 1}
+ {"name" : "NodePreferAvoidPodsPriority", "weight" : 1},
+ {"name" : "NodeAffinityPriority", "weight" : 1},
+ {"name" : "TaintTolerationPriority", "weight" : 1}
],
"hardPodAffinitySymmetricWeight" : 10
}
diff --git a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.v1alpha1.yaml.j2
similarity index 57%
rename from roles/kubernetes/master/templates/kubeadm-config.yaml.j2
rename to roles/kubernetes/master/templates/kubeadm-config.v1alpha1.yaml.j2
index ed1cc7add36..fefc5632ee6 100644
--- a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2
+++ b/roles/kubernetes/master/templates/kubeadm-config.v1alpha1.yaml.j2
@@ -1,8 +1,12 @@
apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
api:
+{% if groups['kube-master'] | length > 1 and kubeadm_config_api_fqdn is defined %}
+ controlPlaneEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
+{% else %}
advertiseAddress: {{ ip | default(ansible_default_ipv4.address) }}
bindPort: {{ kube_apiserver_port }}
+{% endif %}
etcd:
endpoints:
{% for endpoint in etcd_access_addresses.split(',') %}
@@ -16,15 +20,24 @@ networking:
serviceSubnet: {{ kube_service_addresses }}
podSubnet: {{ kube_pods_subnet }}
kubernetesVersion: {{ kube_version }}
-{% if cloud_provider is defined and cloud_provider != "gce" %}
+{% if cloud_provider is defined and cloud_provider not in ["gce", "oci"] %}
cloudProvider: {{ cloud_provider }}
{% endif %}
{% if kube_proxy_mode == 'ipvs' %}
kubeProxy:
config:
+{% if kube_version | version_compare('v1.10', '<') %}
featureGates: SupportIPVSProxyMode=true
+{% endif %}
+{% if kube_version | version_compare('v1.10', '>=') %}
+ featureGates:
+ SupportIPVSProxyMode: true
+{% endif %}
mode: ipvs
{% endif %}
+{% if kube_proxy_nodeport_addresses %}
+ nodePortAddresses: [{{ kube_proxy_nodeport_addresses_cidr }}]
+{% endif %}
authorizationModes:
{% for mode in authorization_modes %}
- {{ mode }}
@@ -34,9 +47,25 @@ apiServerExtraArgs:
bind-address: {{ kube_apiserver_bind_address }}
insecure-bind-address: {{ kube_apiserver_insecure_bind_address }}
insecure-port: "{{ kube_apiserver_insecure_port }}"
+{% if kube_version | version_compare('v1.10', '<') %}
admission-control: {{ kube_apiserver_admission_control | join(',') }}
+{% else %}
+{% if kube_apiserver_enable_admission_plugins|length > 0 %}
+ enable-admission-plugins: {{ kube_apiserver_enable_admission_plugins | join(',') }}
+{% endif %}
+{% if kube_apiserver_disable_admission_plugins|length > 0 %}
+ disable-admission-plugins: {{ kube_apiserver_disable_admission_plugins | join(',') }}
+{% endif %}
+{% endif %}
apiserver-count: "{{ kube_apiserver_count }}"
+{% if kube_version | version_compare('v1.9', '>=') %}
+ endpoint-reconciler-type: lease
+{% endif %}
+{% if etcd_events_cluster_enabled %}
+ etcd-servers-overrides: "/events#{{ etcd_events_access_addresses }}"
+{% endif %}
service-node-port-range: {{ kube_apiserver_node_port_range }}
+ kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}"
{% if kube_basic_auth|default(true) %}
basic-auth-file: {{ kube_users_dir }}/known_users.csv
{% endif %}
@@ -52,6 +81,9 @@ apiServerExtraArgs:
{% if kube_oidc_groups_claim is defined %}
oidc-groups-claim: {{ kube_oidc_groups_claim }}
{% endif %}
+{% endif %}
+{% if kube_encrypt_secret_data %}
+ experimental-encryption-provider-config: {{ kube_config_dir }}/ssl/secrets_encryption.yaml
{% endif %}
storage-backend: {{ kube_apiserver_storage_backend }}
{% if kube_api_runtime_config is defined %}
@@ -59,8 +91,11 @@ apiServerExtraArgs:
{% endif %}
allow-privileged: "true"
{% for key in kube_kubeadm_apiserver_extra_args %}
- {{ key }}: {{ kube_kubeadm_apiserver_extra_args[key] }}
+ {{ key }}: "{{ kube_kubeadm_apiserver_extra_args[key] }}"
{% endfor %}
+{% if kube_feature_gates %}
+ feature-gates: {{ kube_feature_gates|join(',') }}
+{% endif %}
controllerManagerExtraArgs:
node-monitor-grace-period: {{ kube_controller_node_monitor_grace_period }}
node-monitor-period: {{ kube_controller_node_monitor_period }}
@@ -68,13 +103,22 @@ controllerManagerExtraArgs:
{% if kube_feature_gates %}
feature-gates: {{ kube_feature_gates|join(',') }}
{% endif %}
+{% if cloud_provider is defined and cloud_provider in ["openstack"] and openstack_cacert is defined %}
+controllerManagerExtraVolumes:
+- name: openstackcacert
+ hostPath: "{{ kube_config_dir }}/openstack-cacert.pem"
+ mountPath: "{{ kube_config_dir }}/openstack-cacert.pem"
+{% endif %}
{% for key in kube_kubeadm_controller_extra_args %}
- {{ key }}: {{ kube_kubeadm_controller_extra_args[key] }}
+ {{ key }}: "{{ kube_kubeadm_controller_extra_args[key] }}"
{% endfor %}
-{% if kube_kubeadm_scheduler_extra_args|length > 0 %}
schedulerExtraArgs:
+{% if kube_feature_gates %}
+ feature-gates: {{ kube_feature_gates|join(',') }}
+{% endif %}
+{% if kube_kubeadm_scheduler_extra_args|length > 0 %}
{% for key in kube_kubeadm_scheduler_extra_args %}
- {{ key }}: {{ kube_kubeadm_scheduler_extra_args[key] }}
+ {{ key }}: "{{ kube_kubeadm_scheduler_extra_args[key] }}"
{% endfor %}
{% endif %}
apiServerCertSANs:
@@ -83,3 +127,6 @@ apiServerCertSANs:
{% endfor %}
certificatesDir: {{ kube_config_dir }}/ssl
unifiedControlPlaneImage: "{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}"
+{% if kube_override_hostname|default('') %}
+nodeName: {{ kube_override_hostname }}
+{% endif %}
diff --git a/roles/kubernetes/master/templates/kubeadm-config.v1alpha2.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.v1alpha2.yaml.j2
new file mode 100644
index 00000000000..09dc520b466
--- /dev/null
+++ b/roles/kubernetes/master/templates/kubeadm-config.v1alpha2.yaml.j2
@@ -0,0 +1,150 @@
+apiVersion: kubeadm.k8s.io/v1alpha2
+kind: MasterConfiguration
+api:
+{% if groups['kube-master'] | length > 1 and kubeadm_config_api_fqdn is defined %}
+ controlPlaneEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
+{% else %}
+ advertiseAddress: {{ ip | default(ansible_default_ipv4.address) }}
+ bindPort: {{ kube_apiserver_port }}
+{% endif %}
+etcd:
+ external:
+ endpoints:
+{% for endpoint in etcd_access_addresses.split(',') %}
+ - {{ endpoint }}
+{% endfor %}
+ caFile: {{ kube_config_dir }}/ssl/etcd/ca.pem
+ certFile: {{ kube_config_dir }}/ssl/etcd/node-{{ inventory_hostname }}.pem
+ keyFile: {{ kube_config_dir }}/ssl/etcd/node-{{ inventory_hostname }}-key.pem
+networking:
+ dnsDomain: {{ dns_domain }}
+ serviceSubnet: {{ kube_service_addresses }}
+ podSubnet: {{ kube_pods_subnet }}
+kubernetesVersion: {{ kube_version }}
+{% if cloud_provider is defined and cloud_provider != "gce" %}
+cloudProvider: {{ cloud_provider }}
+{% endif %}
+kubeProxy:
+ config:
+ mode: {{ kube_proxy_mode }}
+ hostnameOverride: {{ inventory_hostname }}
+{% if kube_proxy_nodeport_addresses %}
+ nodePortAddresses: [{{ kube_proxy_nodeport_addresses_cidr }}]
+{% endif %}
+authorizationModes:
+{% for mode in authorization_modes %}
+- {{ mode }}
+{% endfor %}
+apiServerExtraArgs:
+ bind-address: {{ kube_apiserver_bind_address }}
+ insecure-bind-address: {{ kube_apiserver_insecure_bind_address }}
+ insecure-port: "{{ kube_apiserver_insecure_port }}"
+{% if kube_version | version_compare('v1.10', '<') %}
+ admission-control: {{ kube_apiserver_admission_control | join(',') }}
+{% else %}
+{% if kube_apiserver_enable_admission_plugins|length > 0 %}
+ enable-admission-plugins: {{ kube_apiserver_enable_admission_plugins | join(',') }}
+{% endif %}
+{% if kube_apiserver_disable_admission_plugins|length > 0 %}
+ disable-admission-plugins: {{ kube_apiserver_disable_admission_plugins | join(',') }}
+{% endif %}
+{% endif %}
+ apiserver-count: "{{ kube_apiserver_count }}"
+{% if kube_version | version_compare('v1.9', '>=') %}
+ endpoint-reconciler-type: lease
+{% endif %}
+{% if etcd_events_cluster_enabled %}
+ etcd-servers-overrides: "/events#{{ etcd_events_access_addresses }}"
+{% endif %}
+ service-node-port-range: {{ kube_apiserver_node_port_range }}
+ kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}"
+{% if kube_basic_auth|default(true) %}
+ basic-auth-file: {{ kube_users_dir }}/known_users.csv
+{% endif %}
+{% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %}
+ oidc-issuer-url: {{ kube_oidc_url }}
+ oidc-client-id: {{ kube_oidc_client_id }}
+{% if kube_oidc_ca_file is defined %}
+ oidc-ca-file: {{ kube_oidc_ca_file }}
+{% endif %}
+{% if kube_oidc_username_claim is defined %}
+ oidc-username-claim: {{ kube_oidc_username_claim }}
+{% endif %}
+{% if kube_oidc_groups_claim is defined %}
+ oidc-groups-claim: {{ kube_oidc_groups_claim }}
+{% endif %}
+{% endif %}
+{% if kube_encrypt_secret_data %}
+ experimental-encryption-provider-config: {{ kube_config_dir }}/ssl/secrets_encryption.yaml
+{% endif %}
+ storage-backend: {{ kube_apiserver_storage_backend }}
+{% if kube_api_runtime_config is defined %}
+ runtime-config: {{ kube_api_runtime_config | join(',') }}
+{% endif %}
+ allow-privileged: "true"
+{% if kubernetes_audit %}
+ audit-log-path: "{{ audit_log_path }}"
+ audit-log-maxage: "{{ audit_log_maxage }}"
+ audit-log-maxbackup: "{{ audit_log_maxbackups }}"
+ audit-log-maxsize: "{{ audit_log_maxsize }}"
+ audit-policy-file: {{ audit_policy_file }}
+{% endif %}
+{% for key in kube_kubeadm_apiserver_extra_args %}
+ {{ key }}: "{{ kube_kubeadm_apiserver_extra_args[key] }}"
+{% endfor %}
+{% if kube_feature_gates %}
+ feature-gates: {{ kube_feature_gates|join(',') }}
+{% endif %}
+controllerManagerExtraArgs:
+ node-monitor-grace-period: {{ kube_controller_node_monitor_grace_period }}
+ node-monitor-period: {{ kube_controller_node_monitor_period }}
+ pod-eviction-timeout: {{ kube_controller_pod_eviction_timeout }}
+{% if kube_feature_gates %}
+ feature-gates: {{ kube_feature_gates|join(',') }}
+{% endif %}
+{% if cloud_provider is defined and cloud_provider in ["openstack"] and openstack_cacert is defined %}
+controllerManagerExtraVolumes:
+- name: openstackcacert
+ hostPath: "{{ kube_config_dir }}/openstack-cacert.pem"
+ mountPath: "{{ kube_config_dir }}/openstack-cacert.pem"
+{% endif %}
+{% if kubernetes_audit %}
+apiServerExtraVolumes:
+- name: {{ audit_policy_name }}
+ hostPath: {{ audit_policy_hostpath }}
+ mountPath: {{ audit_policy_mountpath }}
+{% if audit_log_path != "-" %}
+- name: {{ audit_log_name }}
+ hostPath: {{ audit_log_hostpath }}
+ mountPath: {{ audit_log_mountpath }}
+ writable: true
+{% endif %}
+{% endif %}
+{% for key in kube_kubeadm_controller_extra_args %}
+ {{ key }}: "{{ kube_kubeadm_controller_extra_args[key] }}"
+{% endfor %}
+schedulerExtraArgs:
+{% if kube_feature_gates %}
+ feature-gates: {{ kube_feature_gates|join(',') }}
+{% endif %}
+{% if kube_kubeadm_scheduler_extra_args|length > 0 %}
+{% for key in kube_kubeadm_scheduler_extra_args %}
+ {{ key }}: "{{ kube_kubeadm_scheduler_extra_args[key] }}"
+{% endfor %}
+{% endif %}
+apiServerCertSANs:
+{% for san in apiserver_sans.split(' ') | unique %}
+ - {{ san }}
+{% endfor %}
+certificatesDir: {{ kube_config_dir }}/ssl
+unifiedControlPlaneImage: "{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}"
+nodeRegistration:
+{% if kube_override_hostname|default('') %}
+ name: {{ kube_override_hostname }}
+{% endif %}
+ taints:
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+{% if container_manager == 'crio' %}
+ criSocket: /var/run/crio/crio.sock
+{% endif %}
diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
index bee13b4ec08..765b3d1519e 100644
--- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
+++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
@@ -2,7 +2,7 @@ apiVersion: v1
kind: Pod
metadata:
name: kube-apiserver
- namespace: {{system_namespace}}
+ namespace: kube-system
labels:
k8s-app: kube-apiserver
kubespray: v2
@@ -28,9 +28,16 @@ spec:
command:
- /hyperkube
- apiserver
+{% if kubernetes_audit %}
+ - --audit-log-path={{ audit_log_path }}
+ - --audit-log-maxage={{ audit_log_maxage }}
+ - --audit-log-maxbackup={{ audit_log_maxbackups }}
+ - --audit-log-maxsize={{ audit_log_maxsize }}
+ - --audit-policy-file={{ audit_policy_file }}
+{% endif %}
- --advertise-address={{ ip | default(ansible_default_ipv4.address) }}
- --etcd-servers={{ etcd_access_addresses }}
-{% if etcd_events_cluster_setup %}
+{% if etcd_events_cluster_enabled %}
- --etcd-servers-overrides=/events#{{ etcd_events_access_addresses }}
{% endif %}
{% if kube_version | version_compare('v1.9', '<') %}
@@ -42,7 +49,19 @@ spec:
- --insecure-bind-address={{ kube_apiserver_insecure_bind_address }}
- --bind-address={{ kube_apiserver_bind_address }}
- --apiserver-count={{ kube_apiserver_count }}
+{% if kube_version | version_compare('v1.9', '>=') %}
+ - --endpoint-reconciler-type=lease
+{% endif %}
+{% if kube_version | version_compare('v1.10', '<') %}
- --admission-control={{ kube_apiserver_admission_control | join(',') }}
+{% else %}
+{% if kube_apiserver_enable_admission_plugins|length > 0 %}
+ - --enable-admission-plugins={{ kube_apiserver_enable_admission_plugins | join(',') }}
+{% endif %}
+{% if kube_apiserver_disable_admission_plugins|length > 0 %}
+ - --disable-admission-plugins={{ kube_apiserver_disable_admission_plugins | join(',') }}
+{% endif %}
+{% endif %}
- --service-cluster-ip-range={{ kube_service_addresses }}
- --service-node-port-range={{ kube_apiserver_node_port_range }}
- --client-ca-file={{ kube_cert_dir }}/ca.pem
@@ -51,17 +70,16 @@ spec:
- --kubelet-client-certificate={{ kube_cert_dir }}/node-{{ inventory_hostname }}.pem
- --kubelet-client-key={{ kube_cert_dir }}/node-{{ inventory_hostname }}-key.pem
- --service-account-lookup=true
+ - --kubelet-preferred-address-types={{ kubelet_preferred_address_types }}
{% if kube_basic_auth|default(true) %}
- --basic-auth-file={{ kube_users_dir }}/known_users.csv
{% endif %}
- --tls-cert-file={{ kube_cert_dir }}/apiserver.pem
- --tls-private-key-file={{ kube_cert_dir }}/apiserver-key.pem
- - --proxy-client-cert-file={{ kube_cert_dir }}/apiserver.pem
- - --proxy-client-key-file={{ kube_cert_dir }}/apiserver-key.pem
{% if kube_token_auth|default(true) %}
- --token-auth-file={{ kube_token_dir }}/known_tokens.csv
{% endif %}
- - --service-account-key-file={{ kube_cert_dir }}/apiserver-key.pem
+ - --service-account-key-file={{ kube_cert_dir }}/service-account-key.pem
{% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %}
- --oidc-issuer-url={{ kube_oidc_url }}
- --oidc-client-id={{ kube_oidc_client_id }}
@@ -71,9 +89,15 @@ spec:
{% if kube_oidc_username_claim is defined %}
- --oidc-username-claim={{ kube_oidc_username_claim }}
{% endif %}
+{% if kube_oidc_username_prefix is defined %}
+ - "--oidc-username-prefix={{ kube_oidc_username_prefix }}"
+{% endif %}
{% if kube_oidc_groups_claim is defined %}
- --oidc-groups-claim={{ kube_oidc_groups_claim }}
{% endif %}
+{% if kube_oidc_groups_prefix is defined %}
+ - "--oidc-groups-prefix={{ kube_oidc_groups_prefix }}"
+{% endif %}
{% endif %}
- --secure-port={{ kube_apiserver_port }}
- --insecure-port={{ kube_apiserver_insecure_port }}
@@ -102,18 +126,29 @@ spec:
{% if authorization_modes %}
- --authorization-mode={{ authorization_modes|join(',') }}
{% endif %}
+{% if kube_encrypt_secret_data %}
+ - --experimental-encryption-provider-config={{ kube_config_dir }}/ssl/secrets_encryption.yaml
+{% endif %}
{% if kube_feature_gates %}
- --feature-gates={{ kube_feature_gates|join(',') }}
{% endif %}
{% if kube_version | version_compare('v1.9', '>=') %}
- - --requestheader-client-ca-file={{ kube_cert_dir }}/ca.pem
+ - --requestheader-client-ca-file={{ kube_cert_dir }}/{{ kube_front_proxy_ca }}
+{# FIXME(mattymo): Vault certs do not work with front-proxy-client #}
+{% if cert_management == "vault" %}
+ - --requestheader-allowed-names=
+{% else %}
- --requestheader-allowed-names=front-proxy-client
+{% endif %}
- --requestheader-extra-headers-prefix=X-Remote-Extra-
- --requestheader-group-headers=X-Remote-Group
- --requestheader-username-headers=X-Remote-User
- --enable-aggregator-routing={{ kube_api_aggregator_routing }}
- --proxy-client-cert-file={{ kube_cert_dir }}/front-proxy-client.pem
- --proxy-client-key-file={{ kube_cert_dir }}/front-proxy-client-key.pem
+{% else %}
+ - --proxy-client-cert-file={{ kube_cert_dir }}/apiserver.pem
+ - --proxy-client-key-file={{ kube_cert_dir }}/apiserver-key.pem
{% endif %}
{% if apiserver_custom_flags is string %}
- {{ apiserver_custom_flags }}
@@ -156,6 +191,15 @@ spec:
- mountPath: /etc/ssl/certs/ca-bundle.crt
name: rhel-ca-bundle
readOnly: true
+{% endif %}
+{% if kubernetes_audit %}
+{% if audit_log_path != "-" %}
+ - mountPath: {{ audit_log_mountpath }}
+ name: {{ audit_log_name }}
+ Writable: true
+{% endif %}
+ - mountPath: {{ audit_policy_mountpath }}
+ name: {{ audit_policy_name }}
{% endif %}
volumes:
- hostPath:
@@ -177,3 +221,13 @@ spec:
path: /etc/ssl/certs/ca-bundle.crt
name: rhel-ca-bundle
{% endif %}
+{% if kubernetes_audit %}
+{% if audit_log_path != "-" %}
+ - hostPath:
+ path: {{ audit_log_hostpath }}
+ name: {{ audit_log_name }}
+{% endif %}
+ - hostPath:
+ path: {{ audit_policy_hostpath }}
+ name: {{ audit_policy_name }}
+{% endif %}
diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2
index 2b4282a2e3b..0557c4498ac 100644
--- a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2
+++ b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2
@@ -2,7 +2,7 @@ apiVersion: v1
kind: Pod
metadata:
name: kube-controller-manager
- namespace: {{system_namespace}}
+ namespace: kube-system
labels:
k8s-app: kube-controller-manager
annotations:
@@ -29,7 +29,7 @@ spec:
- controller-manager
- --kubeconfig={{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml
- --leader-elect=true
- - --service-account-private-key-file={{ kube_cert_dir }}/apiserver-key.pem
+ - --service-account-private-key-file={{ kube_cert_dir }}/service-account-key.pem
- --root-ca-file={{ kube_cert_dir }}/ca.pem
- --cluster-signing-cert-file={{ kube_cert_dir }}/ca.pem
- --cluster-signing-key-file={{ kube_cert_dir }}/ca-key.pem
@@ -48,6 +48,8 @@ spec:
- --cloud-config={{ kube_config_dir }}/cloud_config
{% elif cloud_provider is defined and cloud_provider in ["aws", "external"] %}
- --cloud-provider={{cloud_provider}}
+{% elif cloud_provider is defined and cloud_provider == "oci" %}
+ - --cloud_provider=external
{% endif %}
{% if kube_network_plugin is defined and kube_network_plugin == 'cloud' %}
- --configure-cloud-routes=true
@@ -94,6 +96,11 @@ spec:
- mountPath: "{{ kube_config_dir }}/cloud_config"
name: cloudconfig
readOnly: true
+{% endif %}
+{% if cloud_provider is defined and cloud_provider in ["openstack"] and openstack_cacert is defined %}
+ - mountPath: "{{ kube_config_dir }}/openstack-cacert.pem"
+ name: openstackcacert
+ readOnly: true
{% endif %}
volumes:
- name: ssl-certs-host
@@ -115,3 +122,8 @@ spec:
path: "{{ kube_config_dir }}/cloud_config"
name: cloudconfig
{% endif %}
+{% if cloud_provider is defined and cloud_provider in ["openstack"] and openstack_cacert is defined %}
+ - hostPath:
+ path: "{{ kube_config_dir }}/openstack-cacert.pem"
+ name: openstackcacert
+{% endif %}
diff --git a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2
index b13fc7fa32d..fee223eecfc 100644
--- a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2
+++ b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2
@@ -2,7 +2,7 @@ apiVersion: v1
kind: Pod
metadata:
name: kube-scheduler
- namespace: {{ system_namespace }}
+ namespace: kube-system
labels:
k8s-app: kube-scheduler
annotations:
@@ -29,6 +29,7 @@ spec:
- --leader-elect=true
- --kubeconfig={{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml
{% if volume_cross_zone_attachment %}
+ - --use-legacy-policy-config
- --policy-config-file={{ kube_config_dir }}/kube-scheduler-policy.yaml
{% endif %}
- --profiling=false
diff --git a/roles/kubernetes/master/templates/secrets_encryption.yaml.j2 b/roles/kubernetes/master/templates/secrets_encryption.yaml.j2
new file mode 100644
index 00000000000..84c6a4ea808
--- /dev/null
+++ b/roles/kubernetes/master/templates/secrets_encryption.yaml.j2
@@ -0,0 +1,11 @@
+kind: EncryptionConfig
+apiVersion: v1
+resources:
+ - resources:
+ - secrets
+ providers:
+ - {{ kube_encryption_algorithm }}:
+ keys:
+ - name: key
+ secret: {{ kube_encrypt_token | b64encode }}
+ - identity: {}
diff --git a/roles/kubernetes/master/vars/main.yml b/roles/kubernetes/master/vars/main.yml
deleted file mode 100644
index a5eba4f2beb..00000000000
--- a/roles/kubernetes/master/vars/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-namespace_kubesystem:
- apiVersion: v1
- kind: Namespace
- metadata:
- name: "{{system_namespace}}"
diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml
index 2cbf56e1d7f..143a4192821 100644
--- a/roles/kubernetes/node/defaults/main.yml
+++ b/roles/kubernetes/node/defaults/main.yml
@@ -14,6 +14,9 @@ kubelet_bind_address: "{{ ip | default('0.0.0.0') }}"
# resolv.conf to base dns config
kube_resolv_conf: "/etc/resolv.conf"
+# bind address for kube-proxy health check
+kube_proxy_healthz_bind_address: "127.0.0.1"
+
# Can be ipvs, iptables
kube_proxy_mode: iptables
@@ -28,6 +31,11 @@ kubelet_cgroups_per_qos: true
# Set to empty to avoid cgroup creation
kubelet_enforce_node_allocatable: "\"\""
+# Set runtime cgroups
+kubelet_runtime_cgroups: "/systemd/system.slice"
+# Set kubelet cgroups
+kubelet_kubelet_cgroups: "/systemd/system.slice"
+
# Set false to enable sharing a pid namespace between containers in a pod.
# Note that PID namespace sharing requires docker >= 1.13.1.
kubelet_disable_shared_pid: true
@@ -71,6 +79,10 @@ kube_apiserver_node_port_range: "30000-32767"
kubelet_load_modules: false
+# Configure the amount of pods able to run on single node
+# default is equal to application default
+kubelet_max_pods: 110
+
## Support custom flags to be passed to kubelet
kubelet_custom_flags: []
@@ -92,3 +104,50 @@ kube_cadvisor_port: 0
# The read-only port for the Kubelet to serve on with no authentication/authorization.
kube_read_only_port: 0
+
+# sysctl_file_path to add sysctl conf to
+sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf"
+
+# For the openstack integration kubelet will need credentials to access
+# openstack apis like nova and cinder. Per default this values will be
+# read from the environment.
+openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}"
+openstack_username: "{{ lookup('env','OS_USERNAME') }}"
+openstack_password: "{{ lookup('env','OS_PASSWORD') }}"
+openstack_region: "{{ lookup('env','OS_REGION_NAME') }}"
+openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')| default(lookup('env','OS_PROJECT_ID')|default(lookup('env','OS_PROJECT_NAME'),true),true) }}"
+openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME') }}"
+openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}"
+openstack_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}"
+openstack_cacert: "{{ lookup('env','OS_CACERT') }}"
+
+# For the vsphere integration, kubelet will need credentials to access
+# vsphere apis
+# Documentation regarding these values can be found
+# https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/vsphere/vsphere.go#L105
+vsphere_vcenter_ip: "{{ lookup('env', 'VSPHERE_VCENTER') }}"
+vsphere_vcenter_port: "{{ lookup('env', 'VSPHERE_VCENTER_PORT') }}"
+vsphere_user: "{{ lookup('env', 'VSPHERE_USER') }}"
+vsphere_password: "{{ lookup('env', 'VSPHERE_PASSWORD') }}"
+vsphere_datacenter: "{{ lookup('env', 'VSPHERE_DATACENTER') }}"
+vsphere_datastore: "{{ lookup('env', 'VSPHERE_DATASTORE') }}"
+vsphere_working_dir: "{{ lookup('env', 'VSPHERE_WORKING_DIR') }}"
+vsphere_insecure: "{{ lookup('env', 'VSPHERE_INSECURE') }}"
+vsphere_resource_pool: "{{ lookup('env', 'VSPHERE_RESOURCE_POOL') }}"
+
+vsphere_scsi_controller_type: pvscsi
+# vsphere_public_network is name of the network the VMs are joined to
+vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK')|default('') }}"
+
+## When azure is used, you need to also set the following variables.
+## see docs/azure.md for details on how to get these values
+# azure_tenant_id:
+# azure_subscription_id:
+# azure_aad_client_id:
+# azure_aad_client_secret:
+# azure_resource_group:
+# azure_location:
+# azure_subnet_name:
+# azure_security_group_name:
+# azure_vnet_name:
+# azure_route_table_name:
diff --git a/roles/kubernetes/preinstall/tasks/azure-credential-check.yml b/roles/kubernetes/node/tasks/azure-credential-check.yml
similarity index 89%
rename from roles/kubernetes/preinstall/tasks/azure-credential-check.yml
rename to roles/kubernetes/node/tasks/azure-credential-check.yml
index fa2d82fd221..68cbaa16055 100644
--- a/roles/kubernetes/preinstall/tasks/azure-credential-check.yml
+++ b/roles/kubernetes/node/tasks/azure-credential-check.yml
@@ -44,6 +44,11 @@
msg: "azure_vnet_name is missing"
when: azure_vnet_name is not defined or azure_vnet_name == ""
+- name: check azure_vnet_resource_group value
+ fail:
+ msg: "azure_vnet_resource_group is missing"
+ when: azure_vnet_resource_group is not defined or azure_vnet_resource_group == ""
+
- name: check azure_route_table_name value
fail:
msg: "azure_route_table_name is missing"
diff --git a/roles/kubernetes/node/tasks/install.yml b/roles/kubernetes/node/tasks/install.yml
index 63a529aceba..fe4b6c9c808 100644
--- a/roles/kubernetes/node/tasks/install.yml
+++ b/roles/kubernetes/node/tasks/install.yml
@@ -1,19 +1,4 @@
---
-- name: install | Set SSL CA directories
- set_fact:
- ssl_ca_dirs: "[
- {% if ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] -%}
- '/usr/share/ca-certificates',
- {% elif ansible_os_family == 'RedHat' -%}
- '/etc/pki/tls',
- '/etc/pki/ca-trust',
- {% elif ansible_os_family == 'Debian' -%}
- '/usr/share/ca-certificates',
- {% endif -%}
- ]"
- tags:
- - facts
-
- name: Set kubelet deployment to host if kubeadm is enabled
set_fact:
kubelet_deployment_type: host
diff --git a/roles/kubernetes/node/tasks/install_host.yml b/roles/kubernetes/node/tasks/install_host.yml
index 7fcb4a01d0d..3ca92384805 100644
--- a/roles/kubernetes/node/tasks/install_host.yml
+++ b/roles/kubernetes/node/tasks/install_host.yml
@@ -1,23 +1,17 @@
---
-- name: install | Compare host kubelet with hyperkube container
- command: "{{ docker_bin_dir }}/docker run --rm -v {{ bin_dir }}:/systembindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /usr/bin/cmp /hyperkube /systembindir/kubelet"
- register: kubelet_task_compare_result
- until: kubelet_task_compare_result.rc in [0,1,2]
- retries: 4
- delay: "{{ retry_stagger | random + 3 }}"
+
+- name: install | Copy kubelet binary from download dir
+ command: rsync -piu "{{ local_release_dir }}/hyperkube" "{{ bin_dir }}/kubelet"
changed_when: false
- failed_when: "kubelet_task_compare_result.rc not in [0,1,2]"
tags:
- hyperkube
- upgrade
-- name: install | Copy kubelet from hyperkube container
- command: "{{ docker_bin_dir }}/docker run --rm -v {{ bin_dir }}:/systembindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -f /hyperkube /systembindir/kubelet"
- when: kubelet_task_compare_result.rc != 0
- register: kubelet_task_result
- until: kubelet_task_result.rc == 0
- retries: 4
- delay: "{{ retry_stagger | random + 3 }}"
+- name: install | Set kubelet binary permissions
+ file:
+ path: "{{ bin_dir }}/kubelet"
+ mode: "0755"
+ state: file
tags:
- hyperkube
- upgrade
diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml
index 4d5fa5df541..5b633e76b1a 100644
--- a/roles/kubernetes/node/tasks/main.yml
+++ b/roles/kubernetes/node/tasks/main.yml
@@ -61,6 +61,7 @@
name: net.ipv4.ip_local_reserved_ports
value: "{{ kube_apiserver_node_port_range }}"
sysctl_set: yes
+ sysctl_file: "{{ sysctl_file_path }}"
state: present
reload: yes
when: kube_apiserver_node_port_range is defined
@@ -96,6 +97,7 @@
sysctl:
name: "{{ item }}"
state: present
+ sysctl_file: "{{ sysctl_file_path }}"
value: 1
reload: yes
when: sysctl_bridge_nf_call_iptables.rc == 0
@@ -108,13 +110,26 @@
modprobe:
name: "{{ item }}"
state: present
- when: kube_proxy_mode == 'ipvs'
with_items:
- ip_vs
- ip_vs_rr
- ip_vs_wrr
- ip_vs_sh
- nf_conntrack_ipv4
+ when: kube_proxy_mode == 'ipvs'
+ tags:
+ - kube-proxy
+
+- name: Persist ip_vs modules
+ copy:
+ dest: /etc/modules-load.d/kube_proxy-ipvs.conf
+ content: |
+ ip_vs
+ ip_vs_rr
+ ip_vs_wrr
+ ip_vs_sh
+ nf_conntrack_ipv4
+ when: kube_proxy_mode == 'ipvs'
tags:
- kube-proxy
@@ -134,6 +149,41 @@
tags:
- kube-proxy
+- include_tasks: "{{ cloud_provider }}-credential-check.yml"
+ when:
+ - cloud_provider is defined
+ - cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
+ tags:
+ - cloud-provider
+ - facts
+
+- name: Write cacert file
+ copy:
+ src: "{{ openstack_cacert }}"
+ dest: "{{ kube_config_dir }}/openstack-cacert.pem"
+ group: "{{ kube_cert_group }}"
+ mode: 0640
+ when:
+ - inventory_hostname in groups['k8s-cluster']
+ - cloud_provider is defined
+ - cloud_provider == 'openstack'
+ - openstack_cacert is defined
+ tags:
+ - cloud-provider
+
+- name: Write cloud-config
+ template:
+ src: "{{ cloud_provider }}-cloud-config.j2"
+ dest: "{{ kube_config_dir }}/cloud_config"
+ group: "{{ kube_cert_group }}"
+ mode: 0640
+ when:
+ - cloud_provider is defined
+ - cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
+ notify: restart kubelet
+ tags:
+ - cloud-provider
+
# reload-systemd
- meta: flush_handlers
diff --git a/roles/kubernetes/preinstall/tasks/openstack-credential-check.yml b/roles/kubernetes/node/tasks/openstack-credential-check.yml
similarity index 100%
rename from roles/kubernetes/preinstall/tasks/openstack-credential-check.yml
rename to roles/kubernetes/node/tasks/openstack-credential-check.yml
diff --git a/roles/kubernetes/preinstall/tasks/vsphere-credential-check.yml b/roles/kubernetes/node/tasks/vsphere-credential-check.yml
similarity index 100%
rename from roles/kubernetes/preinstall/tasks/vsphere-credential-check.yml
rename to roles/kubernetes/node/tasks/vsphere-credential-check.yml
diff --git a/roles/kubernetes/preinstall/templates/azure-cloud-config.j2 b/roles/kubernetes/node/templates/azure-cloud-config.j2
similarity index 88%
rename from roles/kubernetes/preinstall/templates/azure-cloud-config.j2
rename to roles/kubernetes/node/templates/azure-cloud-config.j2
index 139a06cc1f4..d33c044b235 100644
--- a/roles/kubernetes/preinstall/templates/azure-cloud-config.j2
+++ b/roles/kubernetes/node/templates/azure-cloud-config.j2
@@ -8,5 +8,6 @@
"subnetName": "{{ azure_subnet_name }}",
"securityGroupName": "{{ azure_security_group_name }}",
"vnetName": "{{ azure_vnet_name }}",
+ "vnetResourceGroup": "{{ azure_vnet_resource_group }}",
"routeTableName": "{{ azure_route_table_name }}"
-}
\ No newline at end of file
+}
diff --git a/roles/kubernetes/node/templates/kubelet-container.j2 b/roles/kubernetes/node/templates/kubelet-container.j2
index 28a109ec15f..58e54560da9 100644
--- a/roles/kubernetes/node/templates/kubelet-container.j2
+++ b/roles/kubernetes/node/templates/kubelet-container.j2
@@ -5,8 +5,8 @@
--privileged \
--name=kubelet \
--restart=on-failure:5 \
- --memory={{ kubelet_memory_limit|regex_replace('Mi', 'M') }} \
- --cpu-shares={{ kubelet_cpu_limit|regex_replace('m', '') }} \
+ --memory={{ kube_memory_reserved|regex_replace('Mi', 'M') }} \
+ --cpu-shares={{ kube_cpu_reserved|regex_replace('m', '') }} \
-v /dev:/dev:rw \
-v /etc/cni:/etc/cni:ro \
-v /opt/cni:/opt/cni:ro \
@@ -22,13 +22,20 @@
-v {{ docker_daemon_graph }}:{{ docker_daemon_graph }}:rw \
-v /var/log:/var/log:rw \
-v /var/lib/kubelet:/var/lib/kubelet:shared \
+ -v /var/lib/calico:/var/lib/calico:shared \
-v /var/lib/cni:/var/lib/cni:shared \
-v /var/run:/var/run:rw \
+ {# we can run into issues with double mounting /var/lib/kubelet #}
+ {# surely there's a better way to do this #}
+ {% if '/var/lib/kubelet' not in kubelet_flexvolumes_plugins_dir %}
+ -v {{ kubelet_flexvolumes_plugins_dir }}:{{ kubelet_flexvolumes_plugins_dir }}:rw \
+ {% endif -%}
+ {% if local_volume_provisioner_enabled -%}
+ -v {{ local_volume_provisioner_base_dir }}:{{ local_volume_provisioner_base_dir }}:rw \
+ -v {{ local_volume_provisioner_mount_dir }}:{{ local_volume_provisioner_mount_dir }}:rw \
+ {% endif %}
-v {{kube_config_dir}}:{{kube_config_dir}}:ro \
-v /etc/os-release:/etc/os-release:ro \
-{% if local_volume_provisioner_enabled == true %}
- -v {{ local_volume_provisioner_base_dir }}:{{ local_volume_provisioner_base_dir }}:shared \
-{% endif %}
{{ hyperkube_image_repo }}:{{ hyperkube_image_tag}} \
./hyperkube kubelet \
"$@"
diff --git a/roles/kubernetes/node/templates/kubelet.docker.service.j2 b/roles/kubernetes/node/templates/kubelet.docker.service.j2
index fdbdb89692c..c20cf797fab 100644
--- a/roles/kubernetes/node/templates/kubelet.docker.service.j2
+++ b/roles/kubernetes/node/templates/kubelet.docker.service.j2
@@ -5,6 +5,7 @@ After=docker.service
Wants=docker.socket
[Service]
+User=root
EnvironmentFile={{kube_config_dir}}/kubelet.env
ExecStart={{ bin_dir }}/kubelet \
$KUBE_LOGTOSTDERR \
@@ -22,9 +23,7 @@ ExecStart={{ bin_dir }}/kubelet \
Restart=always
RestartSec=10s
ExecStartPre=-{{ docker_bin_dir }}/docker rm -f kubelet
-{% if kubelet_flexvolumes_plugins_dir is defined %}
ExecStartPre=-/bin/mkdir -p {{ kubelet_flexvolumes_plugins_dir }}
-{% endif %}
ExecReload={{ docker_bin_dir }}/docker restart kubelet
diff --git a/roles/kubernetes/node/templates/kubelet.host.service.j2 b/roles/kubernetes/node/templates/kubelet.host.service.j2
index 78ba51f70f7..3584cfcf51e 100644
--- a/roles/kubernetes/node/templates/kubelet.host.service.j2
+++ b/roles/kubernetes/node/templates/kubelet.host.service.j2
@@ -5,10 +5,9 @@ After=docker.service
Wants=docker.socket
[Service]
+User=root
EnvironmentFile=-{{kube_config_dir}}/kubelet.env
-{% if kubelet_flexvolumes_plugins_dir is defined %}
ExecStartPre=-/bin/mkdir -p {{ kubelet_flexvolumes_plugins_dir }}
-{% endif %}
ExecStart={{ bin_dir }}/kubelet \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
diff --git a/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 b/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2
index c8cf40e7b51..7597fd9ae44 100644
--- a/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2
+++ b/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2
@@ -1,4 +1,4 @@
-### Upstream source https://github.com/kubernetes/release/blob/master/debian/xenial/kubeadm/channel/stable/etc/systemd/system/kubelet.service.d/10-kubeadm.conf
+### Upstream source https://github.com/kubernetes/release/blob/master/debian/xenial/kubeadm/channel/stable/etc/systemd/system/kubelet.service.d/
### All upstream values should be present in this file
# logging to stderr means we get it in the systemd journal
@@ -23,16 +23,24 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
{% if kubelet_authentication_token_webhook %}
--authentication-token-webhook \
{% endif %}
+{% if kubelet_authorization_mode_webhook %}
--authorization-mode=Webhook \
+{% endif %}
--client-ca-file={{ kube_cert_dir }}/ca.crt \
--pod-manifest-path={{ kube_manifest_dir }} \
--cadvisor-port={{ kube_cadvisor_port }} \
{# end kubeadm specific settings #}
--pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }} \
---kube-reserved cpu={{ kubelet_cpu_limit }},memory={{ kubelet_memory_limit|regex_replace('Mi', 'M') }} \
--node-status-update-frequency={{ kubelet_status_update_frequency }} \
--cgroup-driver={{ kubelet_cgroup_driver|default(kubelet_cgroup_driver_detected) }} \
+--max-pods={{ kubelet_max_pods }} \
+{% if container_manager == 'docker' %}
--docker-disable-shared-pid={{ kubelet_disable_shared_pid }} \
+{% endif %}
+{% if container_manager == 'crio' %}
+--container-runtime=remote \
+--container-runtime-endpoint=/var/run/crio/crio.sock \
+{% endif %}
--anonymous-auth=false \
--read-only-port={{ kube_read_only_port }} \
{% if kube_version | version_compare('v1.8', '<') %}
@@ -40,6 +48,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
{% else %}
--fail-swap-on={{ kubelet_fail_swap_on|default(true)}} \
{% endif %}
+--runtime-cgroups={{ kubelet_runtime_cgroups }} --kubelet-cgroups={{ kubelet_kubelet_cgroups }} \
{% endset %}
{# Node reserved CPU/memory #}
@@ -50,8 +59,10 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
{% endif %}
{# DNS settings for kubelet #}
-{% if dns_mode == 'kubedns' %}
+{% if dns_mode in ['kubedns', 'coredns'] %}
{% set kubelet_args_cluster_dns %}--cluster-dns={{ skydns_server }}{% endset %}
+{% elif dns_mode == 'coredns_dual' %}
+{% set kubelet_args_cluster_dns %}--cluster-dns={{ skydns_server }},{{ skydns_server_secondary }}{% endset %}
{% elif dns_mode == 'dnsmasq_kubedns' %}
{% set kubelet_args_cluster_dns %}--cluster-dns={{ dnsmasq_dns_server }}{% endset %}
{% elif dns_mode == 'manual' %}
@@ -61,8 +72,25 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
{% endif %}
{% set kubelet_args_dns %}{{ kubelet_args_cluster_dns }} --cluster-domain={{ dns_domain }} --resolv-conf={{ kube_resolv_conf }}{% endset %}
+{# Kubelet node labels #}
+{% set role_node_labels = [] %}
+{% if inventory_hostname in groups['kube-master'] %}
+{% set dummy = role_node_labels.append('node-role.kubernetes.io/master=true') %}
+{% if not standalone_kubelet|bool %}
+{% set dummy = role_node_labels.append('node-role.kubernetes.io/node=true') %}
+{% endif %}
+{% else %}
+{% set dummy = role_node_labels.append('node-role.kubernetes.io/node=true') %}
+{% endif %}
+{% set inventory_node_labels = [] %}
+{% if node_labels is defined %}
+{% for labelname, labelvalue in node_labels.iteritems() %}
+{% set dummy = inventory_node_labels.append('%s=%s'|format(labelname, labelvalue)) %}
+{% endfor %}
+{% endif %}
+{% set all_node_labels = role_node_labels + inventory_node_labels %}
-KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kube_reserved }} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}"
+KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kube_reserved }} --node-labels={{ all_node_labels | join(',') }} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}"
{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "canal", "flannel", "weave", "contiv", "cilium"] %}
KUBELET_NETWORK_PLUGIN="--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
{% elif kube_network_plugin is defined and kube_network_plugin == "cloud" %}
@@ -72,8 +100,10 @@ KUBELET_NETWORK_PLUGIN="--hairpin-mode=promiscuous-bridge --network-plugin=kuben
KUBE_ALLOW_PRIV="--allow-privileged=true"
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %}
KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }} --cloud-config={{ kube_config_dir }}/cloud_config"
-{% elif cloud_provider is defined and cloud_provider == "aws" %}
+{% elif cloud_provider is defined and cloud_provider in ["aws", "external"] %}
KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }}"
+{% elif cloud_provider is defined and cloud_provider == "oci" %}
+KUBELET_CLOUDPROVIDER="--cloud-provider=external"
{% else %}
KUBELET_CLOUDPROVIDER=""
{% endif %}
diff --git a/roles/kubernetes/node/templates/kubelet.rkt.service.j2 b/roles/kubernetes/node/templates/kubelet.rkt.service.j2
index 80825fab33f..7f28f87b622 100644
--- a/roles/kubernetes/node/templates/kubelet.rkt.service.j2
+++ b/roles/kubernetes/node/templates/kubelet.rkt.service.j2
@@ -4,6 +4,7 @@ Documentation=https://github.com/GoogleCloudPlatform/kubernetes
Wants=network.target
[Service]
+User=root
Restart=on-failure
RestartSec=10s
TimeoutStartSec=0
@@ -11,16 +12,12 @@ LimitNOFILE=40000
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet.uuid
ExecStartPre=-/bin/mkdir -p /var/lib/kubelet
-
-{% if kubelet_flexvolumes_plugins_dir is defined %}
ExecStartPre=-/bin/mkdir -p {{ kubelet_flexvolumes_plugins_dir }}
-{% endif %}
EnvironmentFile={{kube_config_dir}}/kubelet.env
# stage1-fly mounts /proc /sys /dev so no need to duplicate the mounts
ExecStart=/usr/bin/rkt run \
{% if kubelet_load_modules == true %}
- --volume modprobe,kind=host,source=/usr/sbin/modprobe \
--volume lib-modules,kind=host,source=/lib/modules \
{% endif %}
--volume os-release,kind=host,source=/etc/os-release,readOnly=true \
@@ -36,23 +33,34 @@ ExecStart=/usr/bin/rkt run \
--volume var-lib-docker,kind=host,source={{ docker_daemon_graph }},readOnly=false \
--volume var-lib-kubelet,kind=host,source=/var/lib/kubelet,readOnly=false,recursive=true \
--volume var-log,kind=host,source=/var/log \
-{% if local_volume_provisioner_enabled == true %}
- --volume local-volume-provisioner-base-dir,kind=host,source={{ local_volume_provisioner_base_dir }},readOnly=false,recursive=true \
-{% endif %}
{% if kube_network_plugin in ["calico", "weave", "canal", "flannel", "contiv", "cilium"] %}
--volume etc-cni,kind=host,source=/etc/cni,readOnly=true \
--volume opt-cni,kind=host,source=/opt/cni,readOnly=true \
--volume var-lib-cni,kind=host,source=/var/lib/cni,readOnly=false \
-{% if kubelet_flexvolumes_plugins_dir is defined %}
+{% endif %}
+{% if kube_network_plugin in ["calico", "canal"] %}
+ --volume var-lib-calico,kind=host,source=/var/lib/calico,readOnly=false \
+{% endif %}
+{# we can run into issues with double mounting /var/lib/kubelet #}
+{# surely there's a better way to do this #}
+{% if '/var/lib/kubelet' not in kubelet_flexvolumes_plugins_dir %}
--volume flexvolumes,kind=host,source={{ kubelet_flexvolumes_plugins_dir }},readOnly=false \
+{% endif -%}
+{% if local_volume_provisioner_enabled %}
+ --volume local-volume-provisioner-base-dir,kind=host,source={{ local_volume_provisioner_base_dir }},readOnly=false \
+{# Not pretty, but needed to avoid double mount #}
+{% if local_volume_provisioner_base_dir not in local_volume_provisioner_mount_dir and local_volume_provisioner_mount_dir not in local_volume_provisioner_base_dir %}
+ --volume local-volume-provisioner-mount-dir,kind=host,source={{ local_volume_provisioner_mount_dir }},readOnly=false \
+{% endif %}
{% endif %}
{% if kubelet_load_modules == true %}
- --mount volume=modprobe,target=/usr/sbin/modprobe \
--mount volume=lib-modules,target=/lib/modules \
{% endif %}
--mount volume=etc-cni,target=/etc/cni \
--mount volume=opt-cni,target=/opt/cni \
--mount volume=var-lib-cni,target=/var/lib/cni \
+{% if kube_network_plugin in ["calico", "canal"] %}
+ --mount volume=var-lib-calico,target=/var/lib/calico \
{% endif %}
--mount volume=os-release,target=/etc/os-release \
--mount volume=dns,target=/etc/resolv.conf \
@@ -67,11 +75,17 @@ ExecStart=/usr/bin/rkt run \
--mount volume=var-lib-kubelet,target=/var/lib/kubelet \
--mount volume=var-log,target=/var/log \
--mount volume=hosts,target=/etc/hosts \
-{% if local_volume_provisioner_enabled == true %}
+{# we can run into issues with double mounting /var/lib/kubelet #}
+{# surely there's a better way to do this #}
+{% if '/var/lib/kubelet' not in kubelet_flexvolumes_plugins_dir %}
+ --mount volume=flexvolumes,target={{ kubelet_flexvolumes_plugins_dir }} \
+{% endif -%}
+{% if local_volume_provisioner_enabled %}
--mount volume=local-volume-provisioner-base-dir,target={{ local_volume_provisioner_base_dir }} \
+{# Not pretty, but needed to avoid double mount #}
+{% if local_volume_provisioner_base_dir not in local_volume_provisioner_mount_dir and local_volume_provisioner_mount_dir not in local_volume_provisioner_base_dir %}
+ --mount volume=local-volume-provisioner-mount-dir,target={{ local_volume_provisioner_mount_dir }} \
{% endif %}
-{% if kubelet_flexvolumes_plugins_dir is defined %}
- --mount volume=flexvolumes,target={{ kubelet_flexvolumes_plugins_dir }} \
{% endif %}
--stage1-from-dir=stage1-fly.aci \
{% if kube_hyperkube_image_repo == "docker" %}
diff --git a/roles/kubernetes/node/templates/kubelet.standard.env.j2 b/roles/kubernetes/node/templates/kubelet.standard.env.j2
index 8e05e025349..96049200667 100644
--- a/roles/kubernetes/node/templates/kubelet.standard.env.j2
+++ b/roles/kubernetes/node/templates/kubelet.standard.env.j2
@@ -15,7 +15,9 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
--cadvisor-port={{ kube_cadvisor_port }} \
--pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }} \
--node-status-update-frequency={{ kubelet_status_update_frequency }} \
+{% if container_manager == 'docker' %}
--docker-disable-shared-pid={{ kubelet_disable_shared_pid }} \
+{% endif %}
--client-ca-file={{ kube_cert_dir }}/ca.pem \
--tls-cert-file={{ kube_cert_dir }}/node-{{ inventory_hostname }}.pem \
--tls-private-key-file={{ kube_cert_dir }}/node-{{ inventory_hostname }}-key.pem \
@@ -26,8 +28,13 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
{% if kube_version | version_compare('v1.7', '<') %}
--enable-cri={{ kubelet_enable_cri }} \
{% endif %}
+{% if container_manager == 'crio' %}
+--container-runtime=remote \
+--container-runtime-endpoint=/var/run/crio/crio.sock \
+{% endif %}
--cgroup-driver={{ kubelet_cgroup_driver|default(kubelet_cgroup_driver_detected) }} \
--cgroups-per-qos={{ kubelet_cgroups_per_qos }} \
+--max-pods={{ kubelet_max_pods }} \
{% if kube_version | version_compare('v1.8', '<') %}
--experimental-fail-swap-on={{ kubelet_fail_swap_on|default(true)}} \
{% else %}
@@ -39,11 +46,16 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
{% if kubelet_authorization_mode_webhook %}
--authorization-mode=Webhook \
{% endif %}
+{% if ansible_architecture == "aarch64" and ansible_os_family == "RedHat" %}
+--cgroup-driver=systemd \
+{% endif %}
--enforce-node-allocatable={{ kubelet_enforce_node_allocatable }} {% endif %}{% endset %}
{# DNS settings for kubelet #}
-{% if dns_mode == 'kubedns' %}
+{% if dns_mode in ['kubedns', 'coredns'] %}
{% set kubelet_args_cluster_dns %}--cluster-dns={{ skydns_server }}{% endset %}
+{% elif dns_mode == 'coredns_dual' %}
+{% set kubelet_args_cluster_dns %}--cluster-dns={{ skydns_server }},{{ skydns_server_secondary }}{% endset %}
{% elif dns_mode == 'dnsmasq_kubedns' %}
{% set kubelet_args_cluster_dns %}--cluster-dns={{ dnsmasq_dns_server }}{% endset %}
{% elif dns_mode == 'manual' %}
@@ -79,16 +91,37 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
{% endif %}
{# Kubelet node labels #}
+{% set role_node_labels = [] %}
{% if inventory_hostname in groups['kube-master'] %}
-{% set node_labels %}--node-labels=node-role.kubernetes.io/master=true{% endset %}
+{% set dummy = role_node_labels.append('node-role.kubernetes.io/master=true') %}
{% if not standalone_kubelet|bool %}
-{% set node_labels %}{{ node_labels }},node-role.kubernetes.io/node=true{% endset %}
+{% set dummy = role_node_labels.append('node-role.kubernetes.io/node=true') %}
{% endif %}
{% else %}
-{% set node_labels %}--node-labels=node-role.kubernetes.io/node=true{% endset %}
+{% set dummy = role_node_labels.append('node-role.kubernetes.io/node=true') %}
+{% endif %}
+{% if nvidia_gpu_nodes is defined and nvidia_accelerator_enabled|bool %}
+{% if inventory_hostname in nvidia_gpu_nodes %}
+{% set dummy = role_node_labels.append('nvidia.com/gpu=true') %}
+{% endif %}
+{% endif %}
+{% set inventory_node_labels = [] %}
+{% if node_labels is defined %}
+{% for labelname, labelvalue in node_labels.iteritems() %}
+{% set dummy = inventory_node_labels.append('%s=%s'|format(labelname, labelvalue)) %}
+{% endfor %}
{% endif %}
+{% set all_node_labels = role_node_labels + inventory_node_labels %}
+
+{# Kubelet node taints for gpu #}
+{% if nvidia_gpu_nodes is defined and nvidia_accelerator_enabled|bool %}
+{% if inventory_hostname in nvidia_gpu_nodes %}
+{% set kubelet_args_kubeconfig %}{{ kubelet_args_kubeconfig }} --register-with-taints=nvidia.com/gpu=:NoSchedule{% endset %}
+{% endif %}
+{% endif %}
+
+KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ kube_reserved }} --node-labels={{ all_node_labels | join(',') }} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}"
-KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ kube_reserved }} {{ node_labels }} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}"
{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "canal", "flannel", "weave", "contiv", "cilium"] %}
KUBELET_NETWORK_PLUGIN="--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
{% elif kube_network_plugin is defined and kube_network_plugin == "weave" %}
@@ -97,16 +130,18 @@ DOCKER_SOCKET="--docker-endpoint=unix:/var/run/weave/weave.sock"
KUBELET_NETWORK_PLUGIN="--hairpin-mode=promiscuous-bridge --network-plugin=kubenet"
{% endif %}
-{% if kubelet_flexvolumes_plugins_dir is defined %}
KUBELET_VOLUME_PLUGIN="--volume-plugin-dir={{ kubelet_flexvolumes_plugins_dir }}"
-{% endif %}
# Should this cluster be allowed to run privileged docker containers
KUBE_ALLOW_PRIV="--allow-privileged=true"
-{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %}
+{% if cloud_provider is defined and cloud_provider in ["openstack", "vsphere"] %}
KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }} --cloud-config={{ kube_config_dir }}/cloud_config"
-{% elif cloud_provider is defined and cloud_provider == "aws" %}
+{% elif cloud_provider is defined and cloud_provider in ["azure"] %}
+KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }} --cloud-config={{ kube_config_dir }}/cloud_config --azure-container-registry-config={{ kube_config_dir }}/cloud_config"
+{% elif cloud_provider is defined and cloud_provider in ["aws", "external"] %}
KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }}"
+{% elif cloud_provider is defined and cloud_provider == "oci" %}
+KUBELET_CLOUDPROVIDER="--cloud-provider=external"
{% else %}
KUBELET_CLOUDPROVIDER=""
{% endif %}
diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2
index 7c8e0062d22..ece9be10cde 100644
--- a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2
+++ b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2
@@ -2,7 +2,7 @@ apiVersion: v1
kind: Pod
metadata:
name: kube-proxy
- namespace: {{system_namespace}}
+ namespace: kube-system
labels:
k8s-app: kube-proxy
annotations:
@@ -12,6 +12,9 @@ spec:
{% if kube_version | version_compare('v1.6', '>=') %}
dnsPolicy: ClusterFirst
{% endif %}
+ # When having win nodes in cluster without this patch, this pod cloud try to be created in windows
+ nodeSelector:
+ beta.kubernetes.io/os: linux
containers:
- name: kube-proxy
image: {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}
@@ -42,13 +45,17 @@ spec:
- --cluster-cidr={{ kube_pods_subnet }}
- --proxy-mode={{ kube_proxy_mode }}
- --oom-score-adj=-998
- - --healthz-bind-address=127.0.0.1
+ - --healthz-bind-address={{ kube_proxy_healthz_bind_address }}
+{% if kube_proxy_nodeport_addresses %}
+ - --nodeport-addresses={{ kube_proxy_nodeport_addresses_cidr }}
+{% endif %}
{% if kube_proxy_masquerade_all and kube_proxy_mode == "iptables" %}
- --masquerade-all
{% elif kube_proxy_mode == 'ipvs' %}
- --masquerade-all
+{% if kube_version | version_compare('v1.10', '<') %}
- --feature-gates=SupportIPVSProxyMode=true
- - --proxy-mode=ipvs
+{% endif %}
- --ipvs-min-sync-period=5s
- --ipvs-sync-period=5s
- --ipvs-scheduler=rr
diff --git a/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2
index 2d566cad10a..756eba7ee9c 100644
--- a/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2
+++ b/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2
@@ -2,11 +2,14 @@ apiVersion: v1
kind: Pod
metadata:
name: nginx-proxy
- namespace: {{system_namespace}}
+ namespace: kube-system
labels:
k8s-app: kube-nginx
spec:
hostNetwork: true
+ # When having win nodes in cluster without this patch, this pod cloud try to be created in windows
+ nodeSelector:
+ beta.kubernetes.io/os: linux
containers:
- name: nginx-proxy
image: {{ nginx_image_repo }}:{{ nginx_image_tag }}
diff --git a/roles/kubernetes/preinstall/templates/openstack-cloud-config.j2 b/roles/kubernetes/node/templates/openstack-cloud-config.j2
similarity index 83%
rename from roles/kubernetes/preinstall/templates/openstack-cloud-config.j2
rename to roles/kubernetes/node/templates/openstack-cloud-config.j2
index d9934be210b..b6814b51b73 100644
--- a/roles/kubernetes/preinstall/templates/openstack-cloud-config.j2
+++ b/roles/kubernetes/node/templates/openstack-cloud-config.j2
@@ -4,11 +4,17 @@ username="{{ openstack_username }}"
password="{{ openstack_password }}"
region="{{ openstack_region }}"
tenant-id="{{ openstack_tenant_id }}"
+{% if openstack_tenant_name is defined and openstack_tenant_name != "" %}
+tenant-name="{{ openstack_tenant_name }}"
+{% endif %}
{% if openstack_domain_name is defined and openstack_domain_name != "" %}
domain-name="{{ openstack_domain_name }}"
{% elif openstack_domain_id is defined and openstack_domain_id != "" %}
domain-id ="{{ openstack_domain_id }}"
{% endif %}
+{% if openstack_cacert is defined and openstack_cacert != "" %}
+ca-file="{{ kube_config_dir }}/openstack-cacert.pem"
+{% endif %}
{% if openstack_blockstorage_version is defined %}
[BlockStorage]
diff --git a/roles/kubernetes/preinstall/templates/vsphere-cloud-config.j2 b/roles/kubernetes/node/templates/vsphere-cloud-config.j2
similarity index 91%
rename from roles/kubernetes/preinstall/templates/vsphere-cloud-config.j2
rename to roles/kubernetes/node/templates/vsphere-cloud-config.j2
index d82d72bf82f..1383f78bbbc 100644
--- a/roles/kubernetes/preinstall/templates/vsphere-cloud-config.j2
+++ b/roles/kubernetes/node/templates/vsphere-cloud-config.j2
@@ -14,6 +14,9 @@ server = "{{ vsphere_vcenter_ip }}"
{% if vsphere_vm_uuid is defined and vsphere_vm_uuid != "" %}
vm-uuid = "{{ vsphere_vm_uuid }}"
{% endif %}
+{% if vsphere_vm_name is defined and vsphere_vm_name != "" %}
+vm-name = "{{ vsphere_vm_name }}"
+{% endif %}
{% endif %}
{% if kube_version | version_compare('v1.9.2', '>=') %}
diff --git a/roles/kubernetes/preinstall/defaults/main.yml b/roles/kubernetes/preinstall/defaults/main.yml
index 295f101789d..30ad182b399 100644
--- a/roles/kubernetes/preinstall/defaults/main.yml
+++ b/roles/kubernetes/preinstall/defaults/main.yml
@@ -8,7 +8,7 @@ epel_enabled: false
common_required_pkgs:
- python-httplib2
- - openssl
+ - "{{ (ansible_distribution == 'openSUSE Tumbleweed') | ternary('openssl-1_1', 'openssl') }}"
- curl
- rsync
- bash-completion
@@ -23,35 +23,6 @@ disable_ipv6_dns: false
kube_cert_group: kube-cert
kube_config_dir: /etc/kubernetes
-# For the openstack integration kubelet will need credentials to access
-# openstack apis like nova and cinder. Per default this values will be
-# read from the environment.
-openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}"
-openstack_username: "{{ lookup('env','OS_USERNAME') }}"
-openstack_password: "{{ lookup('env','OS_PASSWORD') }}"
-openstack_region: "{{ lookup('env','OS_REGION_NAME') }}"
-openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')|default(lookup('env','OS_PROJECT_ID'),true) }}"
-openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}"
-openstack_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}"
-
-# For the vsphere integration, kubelet will need credentials to access
-# vsphere apis
-# Documentation regarding these values can be found
-# https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/vsphere/vsphere.go#L105
-vsphere_vcenter_ip: "{{ lookup('env', 'VSPHERE_VCENTER') }}"
-vsphere_vcenter_port: "{{ lookup('env', 'VSPHERE_VCENTER_PORT') }}"
-vsphere_user: "{{ lookup('env', 'VSPHERE_USER') }}"
-vsphere_password: "{{ lookup('env', 'VSPHERE_PASSWORD') }}"
-vsphere_datacenter: "{{ lookup('env', 'VSPHERE_DATACENTER') }}"
-vsphere_datastore: "{{ lookup('env', 'VSPHERE_DATASTORE') }}"
-vsphere_working_dir: "{{ lookup('env', 'VSPHERE_WORKING_DIR') }}"
-vsphere_insecure: "{{ lookup('env', 'VSPHERE_INSECURE') }}"
-vsphere_resource_pool: "{{ lookup('env', 'VSPHERE_RESOURCE_POOL') }}"
-
-vsphere_scsi_controller_type: pvscsi
-# vsphere_public_network is name of the network the VMs are joined to
-vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK')|default('') }}"
-
# Container Linux by CoreOS cloud init config file to define /etc/resolv.conf content
# for hostnet pods and infra needs
resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf
@@ -60,3 +31,18 @@ resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf
populate_inventory_to_hosts_file: true
preinstall_selinux_state: permissive
+
+sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf"
+
+etc_hosts_localhost_entries:
+ 127.0.0.1:
+ expected:
+ - localhost
+ - localhost.localdomain
+ ::1:
+ expected:
+ - localhost6
+ - localhost6.localdomain
+ unexpected:
+ - localhost
+ - localhost.localdomain
diff --git a/roles/kubernetes/preinstall/tasks/0010-swapoff.yml b/roles/kubernetes/preinstall/tasks/0010-swapoff.yml
new file mode 100644
index 00000000000..345e7582518
--- /dev/null
+++ b/roles/kubernetes/preinstall/tasks/0010-swapoff.yml
@@ -0,0 +1,10 @@
+---
+- name: Remove swapfile from /etc/fstab
+ mount:
+ name: swap
+ fstype: swap
+ state: absent
+
+- name: Disable swap
+ command: swapoff -a
+ when: ansible_swaptotal_mb > 0
diff --git a/roles/kubernetes/preinstall/tasks/verify-settings.yml b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml
similarity index 61%
rename from roles/kubernetes/preinstall/tasks/verify-settings.yml
rename to roles/kubernetes/preinstall/tasks/0020-verify-settings.yml
index 8f0a2e85473..8c3cec92e3d 100644
--- a/roles/kubernetes/preinstall/tasks/verify-settings.yml
+++ b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml
@@ -12,18 +12,18 @@
- name: Stop if unknown OS
assert:
- that: ansible_distribution in ['RedHat', 'CentOS', 'Fedora', 'Ubuntu', 'Debian', 'CoreOS', 'Container Linux by CoreOS']
+ that: ansible_distribution in ['RedHat', 'CentOS', 'Fedora', 'Ubuntu', 'Debian', 'CoreOS', 'Container Linux by CoreOS', 'openSUSE Leap', 'openSUSE Tumbleweed']
ignore_errors: "{{ ignore_assert_errors }}"
- name: Stop if unknown network plugin
assert:
- that: network_plugin in ['calico', 'canal', 'flannel', 'weave', 'cloud']
- when: network_plugin is defined
+ that: kube_network_plugin in ['calico', 'canal', 'flannel', 'weave', 'cloud', 'cilium', 'contiv']
+ when: kube_network_plugin is defined
ignore_errors: "{{ ignore_assert_errors }}"
- name: Stop if incompatible network plugin and cloudprovider
assert:
- that: network_plugin != 'calico'
+ that: kube_network_plugin != 'calico'
msg: "Azure and Calico are not compatible. See https://github.com/projectcalico/calicoctl/issues/949 for details."
when: cloud_provider is defined and cloud_provider == 'azure'
ignore_errors: "{{ ignore_assert_errors }}"
@@ -61,6 +61,20 @@
ignore_errors: "{{ ignore_assert_errors }}"
when: inventory_hostname in groups['kube-node']
+# This assertion will fail on the safe side: One can indeed schedule more pods
+# on a node than the CIDR-range has space for when additional pods use the host
+# network namespace. It is impossible to ascertain the number of such pods at
+# provisioning time, so to establish a guarantee, we factor these out.
+# NOTICE: the check blatantly ignores the inet6-case
+- name: Guarantee that enough network address space is available for all pods
+ assert:
+ that: "{{ kubelet_max_pods | default(110) <= (2 ** (32 - kube_network_node_prefix)) - 2 }}"
+ msg: "Do not schedule more pods on a node than inet addresses are available."
+ ignore_errors: "{{ ignore_assert_errors }}"
+ when:
+ - inventory_hostname in groups['kube-node']
+ - kube_network_node_prefix is defined
+
- name: Stop if ip var does not match local ips
assert:
that: ip in ansible_all_ipv4_addresses
@@ -72,16 +86,16 @@
when: access_ip is defined
ignore_errors: "{{ ignore_assert_errors }}"
-- name: Stop if swap enabled
+- name: Stop if RBAC is not enabled when dashboard is enabled
assert:
- that: ansible_swaptotal_mb == 0
- when: kubelet_fail_swap_on|default(true)
+ that: rbac_enabled
+ when: dashboard_enabled
ignore_errors: "{{ ignore_assert_errors }}"
-- name: Stop if RBAC is not enabled when dashboard is enabled
+- name: Stop if RBAC is not enabled when OCI cloud controller is enabled
assert:
that: rbac_enabled
- when: dashboard_enabled
+ when: cloud_provider is defined and cloud_provider == "oci"
ignore_errors: "{{ ignore_assert_errors }}"
- name: Stop if RBAC and anonymous-auth are not enabled when insecure port is disabled
@@ -94,4 +108,22 @@
assert:
that: ansible_kernel.split('-')[0]|version_compare('4.8', '>=')
when: kube_network_plugin == 'cilium'
- ignore_errors: "{{ ignore_assert_errors }}"
\ No newline at end of file
+ ignore_errors: "{{ ignore_assert_errors }}"
+
+- name: Stop if bad hostname
+ assert:
+ that: inventory_hostname | match("[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$")
+ msg: "Hostname must consist of lower case alphanumeric characters, '.' or '-', and must start and end with an alphanumeric character"
+ ignore_errors: "{{ ignore_assert_errors }}"
+
+- name: check cloud_provider value
+ assert:
+ that: cloud_provider in ['generic', 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', 'external']
+ msg: "If set the 'cloud_provider' var must be set either to 'generic', 'gce', 'aws', 'azure', 'openstack', 'vsphere', or external"
+ when:
+ - cloud_provider is defined
+
+ ignore_errors: "{{ ignore_assert_errors }}"
+ tags:
+ - cloud-provider
+ - facts
diff --git a/roles/kubernetes/preinstall/tasks/0030-pre_upgrade.yml b/roles/kubernetes/preinstall/tasks/0030-pre_upgrade.yml
new file mode 100644
index 00000000000..9bace42dc6c
--- /dev/null
+++ b/roles/kubernetes/preinstall/tasks/0030-pre_upgrade.yml
@@ -0,0 +1,25 @@
+---
+- name: "Pre-upgrade | check if old credential dir exists"
+ stat:
+ path: "{{ inventory_dir }}/../credentials"
+ delegate_to: localhost
+ register: old_credential_dir
+ become: no
+
+- name: "Pre-upgrade | check if new credential dir exists"
+ stat:
+ path: "{{ inventory_dir }}/credentials"
+ delegate_to: localhost
+ register: new_credential_dir
+ become: no
+ when: old_credential_dir.stat.exists
+
+- name: "Pre-upgrade | move data from old credential dir to new"
+ command: mv {{ inventory_dir }}/../credentials {{ inventory_dir }}/credentials
+ args:
+ creates: "{{ inventory_dir }}/credentials"
+ delegate_to: localhost
+ become: no
+ when:
+ - old_credential_dir.stat.exists
+ - not new_credential_dir.stat.exists
diff --git a/roles/kubernetes/preinstall/tasks/set_resolv_facts.yml b/roles/kubernetes/preinstall/tasks/0040-set_facts.yml
similarity index 69%
rename from roles/kubernetes/preinstall/tasks/set_resolv_facts.yml
rename to roles/kubernetes/preinstall/tasks/0040-set_facts.yml
index fdc46125e97..1fddb7de508 100644
--- a/roles/kubernetes/preinstall/tasks/set_resolv_facts.yml
+++ b/roles/kubernetes/preinstall/tasks/0040-set_facts.yml
@@ -1,4 +1,37 @@
---
+- set_fact:
+ architecture_groups:
+ x86_64: amd64
+ aarch64: arm64
+
+- name: ansible_architecture_rename
+ set_fact:
+ host_architecture: >-
+ {%- if ansible_architecture in architecture_groups -%}
+ {{architecture_groups[ansible_architecture]}}
+ {%- else -%}
+ {{ansible_architecture}}
+ {% endif %}
+
+- name: Force binaries directory for Container Linux by CoreOS
+ set_fact:
+ bin_dir: "/opt/bin"
+ when: ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
+ tags:
+ - facts
+
+- name: check if atomic host
+ stat:
+ path: /run/ostree-booted
+ register: ostree
+
+- set_fact:
+ is_atomic: "{{ ostree.stat.exists }}"
+
+- set_fact:
+ kube_cert_group: "kube"
+ when: is_atomic
+
- name: check resolvconf
shell: which resolvconf
register: resolvconf
@@ -93,10 +126,12 @@
- name: pick dnsmasq cluster IP or default resolver
set_fact:
dnsmasq_server: |-
- {%- if dns_mode == 'kubedns' and not dns_early|bool -%}
+ {%- if dns_mode in ['kubedns', 'coredns'] and not dns_early|bool -%}
{{ [ skydns_server ] + upstream_dns_servers|default([]) }}
+ {%- elif dns_mode == 'coredns_dual' and not dns_early|bool -%}
+ {{ [ skydns_server ] + [ skydns_server_secondary ] + upstream_dns_servers|default([]) }}
{%- elif dns_mode == 'manual' and not dns_early|bool -%}
- {{ [ manual_dns_server ] + upstream_dns_servers|default([]) }}
+ {{ ( manual_dns_server.split(',') | list) + upstream_dns_servers|default([]) }}
{%- elif dns_early|bool -%}
{{ upstream_dns_servers|default([]) }}
{%- else -%}
@@ -109,3 +144,17 @@
nameserver {{( dnsmasq_server + nameservers|d([]) + cloud_resolver|d([])) | join(',nameserver ')}}
supersede_nameserver:
supersede domain-name-servers {{( dnsmasq_server + nameservers|d([]) + cloud_resolver|d([])) | join(', ') }};
+
+- name: gather os specific variables
+ include_vars: "{{ item }}"
+ with_first_found:
+ - files:
+ - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
+ - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
+ - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
+ - "{{ ansible_distribution|lower }}.yml"
+ - "{{ ansible_os_family|lower }}.yml"
+ - defaults.yml
+ paths:
+ - ../vars
+ skip: true
diff --git a/roles/kubernetes/preinstall/tasks/0050-create_directories.yml b/roles/kubernetes/preinstall/tasks/0050-create_directories.yml
new file mode 100644
index 00000000000..11f8e00d453
--- /dev/null
+++ b/roles/kubernetes/preinstall/tasks/0050-create_directories.yml
@@ -0,0 +1,59 @@
+---
+- name: Create kubernetes directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: kube
+ when: inventory_hostname in groups['k8s-cluster']
+ become: true
+ tags:
+ - kubelet
+ - k8s-secrets
+ - kube-controller-manager
+ - kube-apiserver
+ - bootstrap-os
+ - apps
+ - network
+ - master
+ - node
+ with_items:
+ - "{{bin_dir}}"
+ - "{{ kube_config_dir }}"
+ - "{{ kube_config_dir }}/ssl"
+ - "{{ kube_manifest_dir }}"
+ - "{{ kube_script_dir }}"
+
+- name: Create cni directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: kube
+ with_items:
+ - "/etc/cni/net.d"
+ - "/opt/cni/bin"
+ - "/var/lib/calico"
+ when:
+ - kube_network_plugin in ["calico", "weave", "canal", "flannel", "contiv", "cilium"]
+ - inventory_hostname in groups['k8s-cluster']
+ tags:
+ - network
+ - cilium
+ - calico
+ - weave
+ - canal
+ - contiv
+ - bootstrap-os
+
+- name: Create local volume provisioner directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: kube
+ with_items:
+ - "{{ local_volume_provisioner_base_dir }}"
+ - "{{ local_volume_provisioner_mount_dir }}"
+ when:
+ - inventory_hostname in groups['k8s-cluster']
+ - local_volume_provisioner_enabled
+ tags:
+ - persistent_volumes
diff --git a/roles/kubernetes/preinstall/tasks/resolvconf.yml b/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml
similarity index 100%
rename from roles/kubernetes/preinstall/tasks/resolvconf.yml
rename to roles/kubernetes/preinstall/tasks/0060-resolvconf.yml
diff --git a/roles/kubernetes/preinstall/tasks/0070-system-packages.yml b/roles/kubernetes/preinstall/tasks/0070-system-packages.yml
new file mode 100644
index 00000000000..2d1137cbfd5
--- /dev/null
+++ b/roles/kubernetes/preinstall/tasks/0070-system-packages.yml
@@ -0,0 +1,95 @@
+---
+- name: Update package management cache (YUM)
+ yum:
+ update_cache: yes
+ name: '*'
+ register: yum_task_result
+ until: yum_task_result|succeeded
+ retries: 4
+ delay: "{{ retry_stagger | random + 3 }}"
+ when:
+ - ansible_pkg_mgr == 'yum'
+ - ansible_distribution != 'RedHat'
+ - not is_atomic
+
+- name: Expire management cache (YUM) for Updation - Redhat
+ shell: yum clean expire-cache
+ register: expire_cache_output
+ until: expire_cache_output|succeeded
+ retries: 4
+ delay: "{{ retry_stagger | random + 3 }}"
+ when:
+ - ansible_pkg_mgr == 'yum'
+ - ansible_distribution == 'RedHat'
+ - not is_atomic
+ tags: bootstrap-os
+
+- name: Update package management cache (YUM) - Redhat
+ shell: yum makecache
+ register: make_cache_output
+ until: make_cache_output|succeeded
+ retries: 4
+ delay: "{{ retry_stagger | random + 3 }}"
+ when:
+ - ansible_pkg_mgr == 'yum'
+ - ansible_distribution == 'RedHat'
+ - expire_cache_output.rc == 0
+ - not is_atomic
+ tags: bootstrap-os
+
+- name: Update package management cache (zypper) - SUSE
+ shell: zypper -n --gpg-auto-import-keys ref
+ register: make_cache_output
+ until: make_cache_output|succeeded
+ retries: 4
+ delay: "{{ retry_stagger | random + 3 }}"
+ when:
+ - ansible_pkg_mgr == 'zypper'
+ tags: bootstrap-os
+
+- name: Update package management cache (APT)
+ apt:
+ update_cache: yes
+ cache_valid_time: 3600
+ when: ansible_os_family == "Debian"
+ tags:
+ - bootstrap-os
+
+- name: Install python-dnf for latest RedHat versions
+ command: dnf install -y python-dnf yum
+ register: dnf_task_result
+ until: dnf_task_result|succeeded
+ retries: 4
+ delay: "{{ retry_stagger | random + 3 }}"
+ when:
+ - ansible_distribution == "Fedora"
+ - ansible_distribution_major_version|int > 21
+ - not is_atomic
+ changed_when: False
+ tags:
+ - bootstrap-os
+
+- name: Install epel-release on RedHat/CentOS
+ yum:
+ name: epel-release
+ state: present
+ when:
+ - ansible_distribution in ["CentOS","RedHat"]
+ - not is_atomic
+ - epel_enabled|bool
+ tags:
+ - bootstrap-os
+
+- name: Install packages requirements
+ action:
+ module: "{{ ansible_pkg_mgr }}"
+ name: "{{ item }}"
+ state: latest
+ register: pkgs_task_result
+ until: pkgs_task_result|succeeded
+ retries: 4
+ delay: "{{ retry_stagger | random + 3 }}"
+ with_items: "{{required_pkgs | default([]) | union(common_required_pkgs|default([]))}}"
+ when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic)
+ tags:
+ - bootstrap-os
diff --git a/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml b/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml
new file mode 100644
index 00000000000..cc74e624ae8
--- /dev/null
+++ b/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml
@@ -0,0 +1,54 @@
+---
+# Todo : selinux configuration
+- name: Confirm selinux deployed
+ stat:
+ path: /etc/selinux/config
+ when: ansible_os_family == "RedHat"
+ register: slc
+
+- name: Set selinux policy
+ selinux:
+ policy: targeted
+ state: "{{ preinstall_selinux_state }}"
+ when:
+ - ansible_os_family == "RedHat"
+ - slc.stat.exists == True
+ changed_when: False
+ tags:
+ - bootstrap-os
+
+- name: Disable IPv6 DNS lookup
+ lineinfile:
+ dest: /etc/gai.conf
+ line: "precedence ::ffff:0:0/96 100"
+ state: present
+ backup: yes
+ when:
+ - disable_ipv6_dns
+ - not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
+ tags:
+ - bootstrap-os
+
+- name: Stat sysctl file configuration
+ stat:
+ path: "{{sysctl_file_path}}"
+ register: sysctl_file_stat
+ tags:
+ - bootstrap-os
+
+- name: Change sysctl file path to link source if linked
+ set_fact:
+ sysctl_file_path: "{{sysctl_file_stat.stat.lnk_source}}"
+ when:
+ - sysctl_file_stat.stat.islnk is defined
+ - sysctl_file_stat.stat.islnk
+ tags:
+ - bootstrap-os
+
+- name: Enable ip forwarding
+ sysctl:
+ sysctl_file: "{{sysctl_file_path}}"
+ name: net.ipv4.ip_forward
+ value: 1
+ state: present
+ reload: yes
diff --git a/roles/kubernetes/preinstall/tasks/0090-etchosts.yml b/roles/kubernetes/preinstall/tasks/0090-etchosts.yml
new file mode 100644
index 00000000000..f7d3e6023fd
--- /dev/null
+++ b/roles/kubernetes/preinstall/tasks/0090-etchosts.yml
@@ -0,0 +1,54 @@
+---
+- name: Hosts | populate inventory into hosts file
+ blockinfile:
+ dest: /etc/hosts
+ block: |-
+ {% for item in (groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]))|unique -%}{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}{% if (item != hostvars[item]['ansible_hostname']) %} {{ hostvars[item]['ansible_hostname'] }} {{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }}{% endif %} {{ item }} {{ item }}.{{ dns_domain }}
+ {% endfor %}
+ state: present
+ create: yes
+ backup: yes
+ marker: "# Ansible inventory hosts {mark}"
+ when: populate_inventory_to_hosts_file
+
+- name: Hosts | populate kubernetes loadbalancer address into hosts file
+ lineinfile:
+ dest: /etc/hosts
+ regexp: ".*{{ apiserver_loadbalancer_domain_name }}$"
+ line: "{{ loadbalancer_apiserver.address }} {{ apiserver_loadbalancer_domain_name }}"
+ state: present
+ backup: yes
+ when:
+ - loadbalancer_apiserver is defined
+ - loadbalancer_apiserver.address is defined
+
+- name: Hosts | Retrieve hosts file content
+ slurp:
+ src: /etc/hosts
+ register: etc_hosts_content
+
+- name: Hosts | Extract existing entries for localhost from hosts file
+ set_fact:
+ etc_hosts_localhosts_dict: >-
+ {%- set splitted = (item | regex_replace('[ \t]+', ' ')|regex_replace('#.*$')|trim).split( ' ') -%}
+ {{ etc_hosts_localhosts_dict|default({}) | combine({splitted[0]: splitted[1::] }) }}
+ with_items: "{{ (etc_hosts_content['content'] | b64decode).split('\n') }}"
+ when:
+ - etc_hosts_content.content is defined
+ - (item|match('^::1 .*') or item|match('^127.0.0.1 .*'))
+
+- name: Hosts | Update target hosts file entries dict with required entries
+ set_fact:
+ etc_hosts_localhosts_dict_target: >-
+ {%- set target_entries = (etc_hosts_localhosts_dict|default({})).get(item.key, []) | difference(item.value.get('unexpected' ,[])) -%}
+ {{ etc_hosts_localhosts_dict_target|default({}) | combine({item.key: (target_entries + item.value.expected)|unique}) }}
+ with_dict: "{{ etc_hosts_localhost_entries }}"
+
+- name: Hosts | Update (if necessary) hosts file
+ lineinfile:
+ dest: /etc/hosts
+ line: "{{ item.key }} {{ item.value|join(' ') }}"
+ regexp: "^{{ item.key }}.*$"
+ state: present
+ backup: yes
+ with_dict: "{{ etc_hosts_localhosts_dict_target }}"
diff --git a/roles/kubernetes/preinstall/tasks/dhclient-hooks.yml b/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml
similarity index 87%
rename from roles/kubernetes/preinstall/tasks/dhclient-hooks.yml
rename to roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml
index 8c0a5f5991a..0ab2c9b07dc 100644
--- a/roles/kubernetes/preinstall/tasks/dhclient-hooks.yml
+++ b/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml
@@ -15,7 +15,7 @@
notify: Preinstall | restart network
when: dhclientconffile is defined
-- name: Configue dhclient hooks for resolv.conf (non-RH)
+- name: Configure dhclient hooks for resolv.conf (non-RH)
template:
src: dhclient_dnsupdate.sh.j2
dest: "{{ dhclienthookfile }}"
@@ -24,7 +24,7 @@
notify: Preinstall | restart network
when: ansible_os_family != "RedHat"
-- name: Configue dhclient hooks for resolv.conf (RH-only)
+- name: Configure dhclient hooks for resolv.conf (RH-only)
template:
src: dhclient_dnsupdate_rh.sh.j2
dest: "{{ dhclienthookfile }}"
diff --git a/roles/kubernetes/preinstall/tasks/dhclient-hooks-undo.yml b/roles/kubernetes/preinstall/tasks/0110-dhclient-hooks-undo.yml
similarity index 100%
rename from roles/kubernetes/preinstall/tasks/dhclient-hooks-undo.yml
rename to roles/kubernetes/preinstall/tasks/0110-dhclient-hooks-undo.yml
diff --git a/roles/kubernetes/preinstall/tasks/growpart-azure-centos-7.yml b/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml
similarity index 83%
rename from roles/kubernetes/preinstall/tasks/growpart-azure-centos-7.yml
rename to roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml
index 2df6962e826..3e737fea3b4 100644
--- a/roles/kubernetes/preinstall/tasks/growpart-azure-centos-7.yml
+++ b/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml
@@ -12,6 +12,8 @@
failed_when: False
changed_when: "'NOCHANGE:' not in growpart_needed.stdout"
register: growpart_needed
+ environment:
+ LC_ALL: C
- name: check fs type
command: file -Ls /dev/sda1
@@ -21,7 +23,9 @@
- name: run growpart
command: growpart /dev/sda 1
when: growpart_needed.changed
+ environment:
+ LC_ALL: C
- name: run xfs_growfs
command: xfs_growfs /dev/sda1
- when: growpart_needed.changed and 'XFS' in fs_type.stdout
\ No newline at end of file
+ when: growpart_needed.changed and 'XFS' in fs_type.stdout
diff --git a/roles/kubernetes/preinstall/tasks/etchosts.yml b/roles/kubernetes/preinstall/tasks/etchosts.yml
deleted file mode 100644
index 80456f3547e..00000000000
--- a/roles/kubernetes/preinstall/tasks/etchosts.yml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-- name: Hosts | populate inventory into hosts file
- blockinfile:
- dest: /etc/hosts
- block: |-
- {% for item in (groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]))|unique -%}{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}{% if (item != hostvars[item]['ansible_hostname']) %} {{ hostvars[item]['ansible_hostname'] }} {{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }}{% endif %} {{ item }} {{ item }}.{{ dns_domain }}
- {% endfor %}
- state: present
- create: yes
- backup: yes
- marker: "# Ansible inventory hosts {mark}"
- when: populate_inventory_to_hosts_file
-
-- name: Hosts | populate kubernetes loadbalancer address into hosts file
- lineinfile:
- dest: /etc/hosts
- regexp: ".*{{ apiserver_loadbalancer_domain_name }}$"
- line: "{{ loadbalancer_apiserver.address }} {{ apiserver_loadbalancer_domain_name }}"
- state: present
- backup: yes
- when:
- - loadbalancer_apiserver is defined
- - loadbalancer_apiserver.address is defined
-
-- name: Hosts | localhost ipv4 in hosts file
- lineinfile:
- dest: /etc/hosts
- line: "127.0.0.1 localhost localhost.localdomain"
- regexp: '^127.0.0.1.*$'
- state: present
- backup: yes
-
-- name: Hosts | localhost ipv6 in hosts file
- lineinfile:
- dest: /etc/hosts
- line: "::1 localhost6 localhost6.localdomain"
- regexp: '^::1.*$'
- state: present
- backup: yes
diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml
index 24e83980633..96cde3bb24f 100644
--- a/roles/kubernetes/preinstall/tasks/main.yml
+++ b/roles/kubernetes/preinstall/tasks/main.yml
@@ -1,106 +1,26 @@
---
-- import_tasks: verify-settings.yml
- tags:
- - asserts
-
-- name: Force binaries directory for Container Linux by CoreOS
- set_fact:
- bin_dir: "/opt/bin"
- when: ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
- tags:
- - facts
+# Disable swap
+- import_tasks: 0010-swapoff.yml
+ when: disable_swap
-- name: check bin dir exists
- file:
- path: "{{bin_dir}}"
- state: directory
- owner: root
- become: true
+- import_tasks: 0020-verify-settings.yml
tags:
- - bootstrap-os
-
-- import_tasks: set_facts.yml
- tags:
- - facts
-
-- name: gather os specific variables
- include_vars: "{{ item }}"
- with_first_found:
- - files:
- - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
- - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
- - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
- - "{{ ansible_distribution|lower }}.yml"
- - "{{ ansible_os_family|lower }}.yml"
- - defaults.yml
- paths:
- - ../vars
- skip: true
- tags:
- - facts
-
-- name: Create kubernetes directories
- file:
- path: "{{ item }}"
- state: directory
- owner: kube
- when: inventory_hostname in groups['k8s-cluster']
- tags:
- - kubelet
- - k8s-secrets
- - kube-controller-manager
- - kube-apiserver
- - bootstrap-os
- - apps
- - network
- - master
- - node
- with_items:
- - "{{ kube_config_dir }}"
- - "{{ kube_config_dir }}/ssl"
- - "{{ kube_manifest_dir }}"
- - "{{ kube_script_dir }}"
- - "{{ local_volume_provisioner_base_dir }}"
+ - asserts
-- name: check cloud_provider value
- fail:
- msg: "If set the 'cloud_provider' var must be set either to 'generic', 'gce', 'aws', 'azure', 'openstack', 'vsphere', or external"
- when:
- - cloud_provider is defined
- - cloud_provider not in ['generic', 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'external']
+# This is run before bin_dir is pinned because these tasks are run on localhost
+- import_tasks: 0030-pre_upgrade.yml
+ run_once: true
tags:
- - cloud-provider
- - facts
+ - upgrade
-- include_tasks: "{{ cloud_provider }}-credential-check.yml"
- when:
- - cloud_provider is defined
- - cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
+- import_tasks: 0040-set_facts.yml
tags:
- - cloud-provider
+ - resolvconf
- facts
-- name: Create cni directories
- file:
- path: "{{ item }}"
- state: directory
- owner: kube
- with_items:
- - "/etc/cni/net.d"
- - "/opt/cni/bin"
- when:
- - kube_network_plugin in ["calico", "weave", "canal", "flannel", "contiv", "cilium"]
- - inventory_hostname in groups['k8s-cluster']
- tags:
- - network
- - cilium
- - calico
- - weave
- - canal
- - contiv
- - bootstrap-os
+- import_tasks: 0050-create_directories.yml
-- import_tasks: resolvconf.yml
+- import_tasks: 0060-resolvconf.yml
when:
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
@@ -108,174 +28,20 @@
- bootstrap-os
- resolvconf
-- name: Update package management cache (YUM)
- yum:
- update_cache: yes
- name: '*'
- register: yum_task_result
- until: yum_task_result|succeeded
- retries: 4
- delay: "{{ retry_stagger | random + 3 }}"
- when:
- - ansible_pkg_mgr == 'yum'
- - ansible_distribution != 'RedHat'
- - not is_atomic
- tags: bootstrap-os
-
-- name: Expire management cache (YUM) for Updation - Redhat
- shell: yum clean expire-cache
- register: expire_cache_output
- until: expire_cache_output|succeeded
- retries: 4
- delay: "{{ retry_stagger | random + 3 }}"
- when:
- - ansible_pkg_mgr == 'yum'
- - ansible_distribution == 'RedHat'
- - not is_atomic
- tags: bootstrap-os
-
-- name: Update package management cache (YUM) - Redhat
- shell: yum makecache
- register: make_cache_output
- until: make_cache_output|succeeded
- retries: 4
- delay: "{{ retry_stagger | random + 3 }}"
- when:
- - ansible_pkg_mgr == 'yum'
- - ansible_distribution == 'RedHat'
- - expire_cache_output.rc == 0
- - not is_atomic
- tags: bootstrap-os
-
-
-- name: Update package management cache (APT)
- apt:
- update_cache: yes
- cache_valid_time: 3600
- when: ansible_os_family == "Debian"
+- import_tasks: 0070-system-packages.yml
tags:
- bootstrap-os
-- name: Install python-dnf for latest RedHat versions
- command: dnf install -y python-dnf yum
- register: dnf_task_result
- until: dnf_task_result|succeeded
- retries: 4
- delay: "{{ retry_stagger | random + 3 }}"
- when:
- - ansible_distribution == "Fedora"
- - ansible_distribution_major_version > 21
- - not is_atomic
- changed_when: False
+- import_tasks: 0080-system-configurations.yml
tags:
- bootstrap-os
-- name: Install epel-release on RedHat/CentOS
- yum:
- name: epel-release
- state: present
- when:
- - ansible_distribution in ["CentOS","RedHat"]
- - not is_atomic
- - epel_enabled|bool
- tags:
- - bootstrap-os
-
-- name: Install packages requirements
- action:
- module: "{{ ansible_pkg_mgr }}"
- name: "{{ item }}"
- state: latest
- register: pkgs_task_result
- until: pkgs_task_result|succeeded
- retries: 4
- delay: "{{ retry_stagger | random + 3 }}"
- with_items: "{{required_pkgs | default([]) | union(common_required_pkgs|default([]))}}"
- when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic)
- tags:
- - bootstrap-os
-
-# Todo : selinux configuration
-- name: Confirm selinux deployed
- stat:
- path: /etc/selinux/config
- when: ansible_os_family == "RedHat"
- register: slc
-
-- name: Set selinux policy
- selinux:
- policy: targeted
- state: "{{ preinstall_selinux_state }}"
- when:
- - ansible_os_family == "RedHat"
- - slc.stat.exists == True
- changed_when: False
- tags:
- - bootstrap-os
-
-- name: Disable IPv6 DNS lookup
- lineinfile:
- dest: /etc/gai.conf
- line: "precedence ::ffff:0:0/96 100"
- state: present
- backup: yes
- when:
- - disable_ipv6_dns
- - not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
- tags:
- - bootstrap-os
-
-- name: set default sysctl file path
- set_fact:
- sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf"
- tags:
- - bootstrap-os
-
-- name: Stat sysctl file configuration
- stat:
- path: "{{sysctl_file_path}}"
- register: sysctl_file_stat
- tags:
- - bootstrap-os
-
-- name: Change sysctl file path to link source if linked
- set_fact:
- sysctl_file_path: "{{sysctl_file_stat.stat.lnk_source}}"
- when:
- - sysctl_file_stat.stat.islnk is defined
- - sysctl_file_stat.stat.islnk
- tags:
- - bootstrap-os
-
-- name: Enable ip forwarding
- sysctl:
- sysctl_file: "{{sysctl_file_path}}"
- name: net.ipv4.ip_forward
- value: 1
- state: present
- reload: yes
- tags:
- - bootstrap-os
-
-- name: Write cloud-config
- template:
- src: "{{ cloud_provider }}-cloud-config.j2"
- dest: "{{ kube_config_dir }}/cloud_config"
- group: "{{ kube_cert_group }}"
- mode: 0640
- when:
- - inventory_hostname in groups['k8s-cluster']
- - cloud_provider is defined
- - cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
- tags:
- - cloud-provider
-
-- import_tasks: etchosts.yml
+- import_tasks: 0090-etchosts.yml
tags:
- bootstrap-os
- etchosts
-- import_tasks: dhclient-hooks.yml
+- import_tasks: 0100-dhclient-hooks.yml
when:
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
@@ -284,7 +50,7 @@
- bootstrap-os
- resolvconf
-- import_tasks: dhclient-hooks-undo.yml
+- import_tasks: 0110-dhclient-hooks-undo.yml
when:
- dns_mode != 'none'
- resolvconf_mode != 'host_resolvconf'
@@ -300,7 +66,7 @@
tags:
- bootstrap-os
-- import_tasks: growpart-azure-centos-7.yml
+- import_tasks: 0120-growpart-azure-centos-7.yml
when:
- azure_check.stat.exists
- ansible_distribution in ["CentOS","RedHat"]
diff --git a/roles/kubernetes/preinstall/tasks/set_facts.yml b/roles/kubernetes/preinstall/tasks/set_facts.yml
deleted file mode 100644
index a945e715e0d..00000000000
--- a/roles/kubernetes/preinstall/tasks/set_facts.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- name: check if atomic host
- stat:
- path: /run/ostree-booted
- register: ostree
-
-- set_fact:
- is_atomic: "{{ ostree.stat.exists }}"
-
-- set_fact:
- kube_cert_group: "kube"
- when: is_atomic
-
-- import_tasks: set_resolv_facts.yml
- tags:
- - bootstrap-os
- - resolvconf
- - facts
diff --git a/roles/kubernetes/preinstall/vars/centos.yml b/roles/kubernetes/preinstall/vars/centos.yml
index bacfb96b5cb..67fbfe085da 100644
--- a/roles/kubernetes/preinstall/vars/centos.yml
+++ b/roles/kubernetes/preinstall/vars/centos.yml
@@ -3,3 +3,4 @@ required_pkgs:
- libselinux-python
- device-mapper-libs
- ebtables
+ - nss
diff --git a/roles/kubernetes/preinstall/vars/redhat.yml b/roles/kubernetes/preinstall/vars/redhat.yml
index bacfb96b5cb..67fbfe085da 100644
--- a/roles/kubernetes/preinstall/vars/redhat.yml
+++ b/roles/kubernetes/preinstall/vars/redhat.yml
@@ -3,3 +3,4 @@ required_pkgs:
- libselinux-python
- device-mapper-libs
- ebtables
+ - nss
diff --git a/roles/kubernetes/preinstall/vars/suse.yml b/roles/kubernetes/preinstall/vars/suse.yml
new file mode 100644
index 00000000000..3f4f9aee9a1
--- /dev/null
+++ b/roles/kubernetes/preinstall/vars/suse.yml
@@ -0,0 +1,4 @@
+---
+required_pkgs:
+ - device-mapper
+ - ebtables
diff --git a/roles/kubernetes/secrets/defaults/main.yml b/roles/kubernetes/secrets/defaults/main.yml
index f0d10711d30..34c42bc204b 100644
--- a/roles/kubernetes/secrets/defaults/main.yml
+++ b/roles/kubernetes/secrets/defaults/main.yml
@@ -1,3 +1,3 @@
---
kube_cert_group: kube-cert
-kube_vault_mount_path: kube
+kube_vault_mount_path: "/kube"
diff --git a/roles/kubernetes/secrets/files/make-ssl.sh b/roles/kubernetes/secrets/files/make-ssl.sh
index 750e9c4fe79..2a4b930ea2b 100755
--- a/roles/kubernetes/secrets/files/make-ssl.sh
+++ b/roles/kubernetes/secrets/files/make-ssl.sh
@@ -69,7 +69,16 @@ if [ -e "$SSLDIR/ca-key.pem" ]; then
cp $SSLDIR/{ca.pem,ca-key.pem} .
else
openssl genrsa -out ca-key.pem 2048 > /dev/null 2>&1
- openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.pem -subj "/CN=kube-ca" > /dev/null 2>&1
+ openssl req -x509 -new -nodes -key ca-key.pem -days 36500 -out ca.pem -subj "/CN=kube-ca" > /dev/null 2>&1
+fi
+
+# Front proxy client CA
+if [ -e "$SSLDIR/front-proxy-ca-key.pem" ]; then
+ # Reuse existing front proxy CA
+ cp $SSLDIR/{front-proxy-ca.pem,front-proxy-ca-key.pem} .
+else
+ openssl genrsa -out front-proxy-ca-key.pem 2048 > /dev/null 2>&1
+ openssl req -x509 -new -nodes -key front-proxy-ca-key.pem -days 36500 -out front-proxy-ca.pem -subj "/CN=front-proxy-ca" > /dev/null 2>&1
fi
gen_key_and_cert() {
@@ -77,11 +86,30 @@ gen_key_and_cert() {
local subject=$2
openssl genrsa -out ${name}-key.pem 2048 > /dev/null 2>&1
openssl req -new -key ${name}-key.pem -out ${name}.csr -subj "${subject}" -config ${CONFIG} > /dev/null 2>&1
- openssl x509 -req -in ${name}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out ${name}.pem -days 3650 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1
+ openssl x509 -req -in ${name}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out ${name}.pem -days 36500 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1
+}
+
+gen_key_and_cert_front_proxy() {
+ local name=$1
+ local subject=$2
+ openssl genrsa -out ${name}-key.pem 2048 > /dev/null 2>&1
+ openssl req -new -key ${name}-key.pem -out ${name}.csr -subj "${subject}" -config ${CONFIG} > /dev/null 2>&1
+ openssl x509 -req -in ${name}.csr -CA front-proxy-ca.pem -CAkey front-proxy-ca-key.pem -CAcreateserial -out ${name}.pem -days 36500 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1
}
# Admins
if [ -n "$MASTERS" ]; then
+
+ # service-account
+ # If --service-account-private-key-file was previously configured to use apiserver-key.pem then copy that to the new dedicated service-account signing key location to avoid disruptions
+ if [ -e "$SSLDIR/apiserver-key.pem" ] && ! [ -e "$SSLDIR/service-account-key.pem" ]; then
+ cp $SSLDIR/apiserver-key.pem $SSLDIR/service-account-key.pem
+ fi
+ # Generate dedicated service account signing key if one doesn't exist
+ if ! [ -e "$SSLDIR/apiserver-key.pem" ] && ! [ -e "$SSLDIR/service-account-key.pem" ]; then
+ openssl genrsa -out service-account-key.pem 2048 > /dev/null 2>&1
+ fi
+
# kube-apiserver
# Generate only if we don't have existing ca and apiserver certs
if ! [ -e "$SSLDIR/ca-key.pem" ] || ! [ -e "$SSLDIR/apiserver-key.pem" ]; then
@@ -94,7 +122,7 @@ if [ -n "$MASTERS" ]; then
# kube-controller-manager
gen_key_and_cert "kube-controller-manager" "/CN=system:kube-controller-manager"
# metrics aggregator
- gen_key_and_cert "front-proxy-client" "/CN=front-proxy-client"
+ gen_key_and_cert_front_proxy "front-proxy-client" "/CN=front-proxy-client"
for host in $MASTERS; do
cn="${host%%.*}"
diff --git a/roles/kubernetes/secrets/tasks/check-certs.yml b/roles/kubernetes/secrets/tasks/check-certs.yml
index 6278897710c..63b7e7db2da 100644
--- a/roles/kubernetes/secrets/tasks/check-certs.yml
+++ b/roles/kubernetes/secrets/tasks/check-certs.yml
@@ -14,27 +14,6 @@
gen_certs: false
secret_changed: false
-- name: "Check certs | check if a cert already exists on node"
- stat:
- path: "{{ kube_cert_dir }}/{{ item }}"
- register: kubecert_node
- with_items:
- - ca.pem
- - apiserver.pem
- - apiserver-key.pem
- - kube-scheduler.pem
- - kube-scheduler-key.pem
- - kube-controller-manager.pem
- - kube-controller-manager-key.pem
- - front-proxy-client.pem
- - front-proxy-client-key.pem
- - admin-{{ inventory_hostname }}.pem
- - admin-{{ inventory_hostname }}-key.pem
- - node-{{ inventory_hostname }}.pem
- - node-{{ inventory_hostname }}-key.pem
- - kube-proxy-{{ inventory_hostname }}.pem
- - kube-proxy-{{ inventory_hostname }}-key.pem
-
- name: "Check_certs | Set 'gen_certs' to true"
set_fact:
gen_certs: true
@@ -48,17 +27,20 @@
'{{ kube_cert_dir }}/kube-scheduler-key.pem',
'{{ kube_cert_dir }}/kube-controller-manager.pem',
'{{ kube_cert_dir }}/kube-controller-manager-key.pem',
+ '{{ kube_cert_dir }}/front-proxy-ca.pem',
+ '{{ kube_cert_dir }}/front-proxy-ca-key.pem',
'{{ kube_cert_dir }}/front-proxy-client.pem',
'{{ kube_cert_dir }}/front-proxy-client-key.pem',
+ '{{ kube_cert_dir }}/service-account-key.pem',
{% for host in groups['kube-master'] %}
- '{{ kube_cert_dir }}/admin-{{ host }}.pem'
+ '{{ kube_cert_dir }}/admin-{{ host }}.pem',
'{{ kube_cert_dir }}/admin-{{ host }}-key.pem'
{% if not loop.last %}{{','}}{% endif %}
- {% endfor %}]
+ {% endfor %},
{% for host in groups['k8s-cluster'] %}
- '{{ kube_cert_dir }}/node-{{ host }}.pem'
- '{{ kube_cert_dir }}/node-{{ host }}-key.pem'
- '{{ kube_cert_dir }}/kube-proxy-{{ host }}.pem'
+ '{{ kube_cert_dir }}/node-{{ host }}.pem',
+ '{{ kube_cert_dir }}/node-{{ host }}-key.pem',
+ '{{ kube_cert_dir }}/kube-proxy-{{ host }}.pem',
'{{ kube_cert_dir }}/kube-proxy-{{ host }}-key.pem'
{% if not loop.last %}{{','}}{% endif %}
{% endfor %}]
@@ -71,7 +53,9 @@
{% for cert in ['apiserver.pem', 'apiserver-key.pem',
'kube-scheduler.pem','kube-scheduler-key.pem',
'kube-controller-manager.pem','kube-controller-manager-key.pem',
- 'front-proxy-client.pem','front-proxy-client-key.pem'] -%}
+ 'front-proxy-ca.pem','front-proxy-ca-key.pem',
+ 'front-proxy-client.pem','front-proxy-client-key.pem',
+ 'service-account-key.pem'] -%}
{% set cert_file = "%s/%s.pem"|format(kube_cert_dir, cert) %}
{% if not cert_file in existing_certs -%}
{%- set gen = True -%}
@@ -80,7 +64,6 @@
{{ gen }}
run_once: true
-
- name: "Check_certs | Set 'gen_node_certs' to true"
set_fact:
gen_node_certs: |-
@@ -97,17 +80,3 @@
{% endfor %}
}
run_once: true
-
-- name: "Check_certs | Set 'sync_certs' to true"
- set_fact:
- sync_certs: true
- when: |-
- {%- set certs = {'sync': False} -%}
- {% if gen_node_certs[inventory_hostname] or
- (not kubecert_node.results[0].stat.exists|default(False)) or
- (not kubecert_node.results[10].stat.exists|default(False)) or
- (not kubecert_node.results[7].stat.exists|default(False)) or
- (kubecert_node.results[10].stat.checksum|default('') != kubecert_master.files|selectattr("path", "equalto", kubecert_node.results[10].stat.path)|map(attribute="checksum")|first|default('')) -%}
- {%- set _ = certs.update({'sync': True}) -%}
- {% endif %}
- {{ certs.sync }}
diff --git a/roles/kubernetes/secrets/tasks/gen_certs_script.yml b/roles/kubernetes/secrets/tasks/gen_certs_script.yml
index 01157535801..b77275b4932 100644
--- a/roles/kubernetes/secrets/tasks/gen_certs_script.yml
+++ b/roles/kubernetes/secrets/tasks/gen_certs_script.yml
@@ -12,7 +12,6 @@
- k8s-secrets
- kube-controller-manager
- kube-apiserver
- - bootstrap-os
- apps
- network
- master
@@ -28,7 +27,6 @@
when: gen_certs|default(false)
tags:
- k8s-secrets
- - bootstrap-os
- name: Gen_certs | write openssl config
template:
@@ -73,8 +71,11 @@
'kube-scheduler-key.pem',
'kube-controller-manager.pem',
'kube-controller-manager-key.pem',
+ 'front-proxy-ca.pem',
+ 'front-proxy-ca-key.pem',
'front-proxy-client.pem',
'front-proxy-client-key.pem',
+ 'service-account-key.pem',
{% for node in groups['kube-master'] %}
'admin-{{ node }}.pem',
'admin-{{ node }}-key.pem',
@@ -84,8 +85,11 @@
'admin-{{ inventory_hostname }}-key.pem',
'apiserver.pem',
'apiserver-key.pem',
+ 'front-proxy-ca.pem',
+ 'front-proxy-ca-key.pem',
'front-proxy-client.pem',
'front-proxy-client-key.pem',
+ 'service-account-key.pem',
'kube-scheduler.pem',
'kube-scheduler-key.pem',
'kube-controller-manager.pem',
@@ -105,6 +109,34 @@
tags:
- facts
+- name: "Check certs | check if a cert already exists on node"
+ find:
+ paths: "{{ kube_cert_dir }}"
+ patterns: "*.pem"
+ get_checksum: true
+ register: kubecert_node
+ when: inventory_hostname != groups['kube-master'][0]
+
+- name: "Check_certs | Set 'sync_certs' to true on masters"
+ set_fact:
+ sync_certs: true
+ when: inventory_hostname in groups['kube-master'] and
+ inventory_hostname != groups['kube-master'][0] and
+ (not item in kubecert_node.files | map(attribute='path') | map("basename") | list or
+ kubecert_node.files | selectattr("path", "equalto", "{{ kube_cert_dir }}/{{ item }}") | map(attribute="checksum")|first|default('') != kubecert_master.files | selectattr("path", "equalto", "{{ kube_cert_dir }}/{{ item }}") | map(attribute="checksum")|first|default(''))
+ with_items:
+ - "{{ my_master_certs + all_node_certs }}"
+
+- name: "Check_certs | Set 'sync_certs' to true on nodes"
+ set_fact:
+ sync_certs: true
+ when: inventory_hostname in groups['kube-node'] and
+ inventory_hostname != groups['kube-master'][0] and
+ (not item in kubecert_node.files | map(attribute='path') | map("basename") | list or
+ kubecert_node.files | selectattr("path", "equalto", "{{ kube_cert_dir }}/{{ item }}") | map(attribute="checksum")|first|default('') != kubecert_master.files | selectattr("path", "equalto", "{{ kube_cert_dir }}/{{ item }}") | map(attribute="checksum")|first|default(''))
+ with_items:
+ - "{{ my_node_certs }}"
+
- name: Gen_certs | Gather master certs
shell: "tar cfz - -C {{ kube_cert_dir }} -T /dev/stdin <<< {{ my_master_certs|join(' ') }} {{ all_node_certs|join(' ') }} | base64 --wrap=0"
args:
@@ -132,7 +164,7 @@
# char limit when using shell command
# FIXME(mattymo): Use tempfile module in ansible 2.3
-- name: Gen_certs | Prepare tempfile for unpacking certs
+- name: Gen_certs | Prepare tempfile for unpacking certs on masters
command: mktemp /tmp/certsXXXXX.tar.gz
register: cert_tempfile
when: inventory_hostname in groups['kube-master'] and sync_certs|default(false) and
@@ -156,7 +188,7 @@
inventory_hostname != groups['kube-master'][0]
notify: set secret_changed
-- name: Gen_certs | Cleanup tempfile
+- name: Gen_certs | Cleanup tempfile on masters
file:
path: "{{cert_tempfile.stdout}}"
state: absent
diff --git a/roles/kubernetes/secrets/tasks/gen_certs_vault.yml b/roles/kubernetes/secrets/tasks/gen_certs_vault.yml
index cc16b749bed..8a847b0025e 100644
--- a/roles/kubernetes/secrets/tasks/gen_certs_vault.yml
+++ b/roles/kubernetes/secrets/tasks/gen_certs_vault.yml
@@ -25,7 +25,7 @@
kube_cert_alt_names: >-
{{
groups['kube-master'] +
- ['kubernetes.default.svc.cluster.local', 'kubernetes.default.svc', 'kubernetes.default', 'kubernetes'] +
+ ['kubernetes.default.svc.'+cluster_name, 'kubernetes.default.svc', 'kubernetes.default', 'kubernetes'] +
['localhost']
}}
run_once: true
@@ -44,6 +44,7 @@
issue_cert_file_group: "{{ kube_cert_group }}"
issue_cert_file_owner: kube
issue_cert_hosts: "{{ groups['kube-master'] }}"
+ issue_cert_run_once: true
issue_cert_ip_sans: >-
[
{%- for host in groups['kube-master'] -%}
@@ -52,6 +53,11 @@
"{{ hostvars[host]['ip'] }}",
{%- endif -%}
{%- endfor -%}
+ {%- if supplementary_addresses_in_ssl_keys is defined -%}
+ {%- for ip_item in supplementary_addresses_in_ssl_keys -%}
+ "{{ ip_item }}",
+ {%- endfor -%}
+ {%- endif -%}
"127.0.0.1","::1","{{ kube_apiserver_ip }}"
]
issue_cert_path: "{{ item }}"
@@ -98,6 +104,8 @@
- include_tasks: ../../../vault/tasks/shared/issue_cert.yml
vars:
issue_cert_common_name: "front-proxy-client"
+ issue_cert_copy_ca: "{{ item == kube_front_proxy_clients_certs_needed|first }}"
+ issue_cert_ca_filename: front-proxy-ca.pem
issue_cert_alt_names: "{{ kube_cert_alt_names }}"
issue_cert_file_group: "{{ kube_cert_group }}"
issue_cert_file_owner: kube
@@ -110,6 +118,11 @@
"{{ hostvars[host]['ip'] }}",
{%- endif -%}
{%- endfor -%}
+ {%- if supplementary_addresses_in_ssl_keys is defined -%}
+ {%- for ip_item in supplementary_addresses_in_ssl_keys -%}
+ "{{ ip_item }}",
+ {%- endfor -%}
+ {%- endif -%}
"127.0.0.1","::1","{{ kube_apiserver_ip }}"
]
issue_cert_path: "{{ item }}"
diff --git a/roles/kubernetes/secrets/tasks/gen_tokens.yml b/roles/kubernetes/secrets/tasks/gen_tokens.yml
index a4cc0f69bfd..df47d157dae 100644
--- a/roles/kubernetes/secrets/tasks/gen_tokens.yml
+++ b/roles/kubernetes/secrets/tasks/gen_tokens.yml
@@ -55,4 +55,4 @@
- name: Gen_tokens | Copy tokens on masters
shell: "echo '{{ tokens_data.stdout|quote }}' | base64 -d | tar xz -C /"
when: inventory_hostname in groups['kube-master'] and sync_tokens|default(false) and
- inventory_hostname != groups['kube-master'][0]
+ inventory_hostname != groups['kube-master'][0] and tokens_data.stdout != ''
diff --git a/roles/kubernetes/secrets/tasks/main.yml b/roles/kubernetes/secrets/tasks/main.yml
index 5d20a755a0b..d36c3a05728 100644
--- a/roles/kubernetes/secrets/tasks/main.yml
+++ b/roles/kubernetes/secrets/tasks/main.yml
@@ -2,11 +2,13 @@
- import_tasks: check-certs.yml
tags:
- k8s-secrets
+ - k8s-gen-certs
- facts
- import_tasks: check-tokens.yml
tags:
- k8s-secrets
+ - k8s-gen-tokens
- facts
- name: Make sure the certificate directory exits
@@ -41,7 +43,6 @@
- k8s-secrets
- kube-controller-manager
- kube-apiserver
- - bootstrap-os
- apps
- network
- master
@@ -57,7 +58,6 @@
when: gen_certs|default(false) or gen_tokens|default(false)
tags:
- k8s-secrets
- - bootstrap-os
- name: "Get_tokens | Make sure the tokens directory exits (on {{groups['kube-master'][0]}})"
file:
@@ -72,10 +72,12 @@
- include_tasks: "gen_certs_{{ cert_management }}.yml"
tags:
- k8s-secrets
+ - k8s-gen-certs
- import_tasks: upd_ca_trust.yml
tags:
- k8s-secrets
+ - k8s-gen-certs
- name: "Gen_certs | Get certificate serials on kube masters"
shell: "openssl x509 -in {{ kube_cert_dir }}/{{ item }} -noout -serial | cut -d= -f2"
@@ -87,6 +89,10 @@
- "kube-controller-manager.pem"
- "kube-scheduler.pem"
when: inventory_hostname in groups['kube-master']
+ tags:
+ - master
+ - kubelet
+ - node
- name: "Gen_certs | set kube master certificate serial facts"
set_fact:
@@ -95,6 +101,10 @@
controller_manager_cert_serial: "{{ master_certificate_serials.results[2].stdout|default() }}"
scheduler_cert_serial: "{{ master_certificate_serials.results[3].stdout|default() }}"
when: inventory_hostname in groups['kube-master']
+ tags:
+ - master
+ - kubelet
+ - node
- name: "Gen_certs | Get certificate serials on kube nodes"
shell: "openssl x509 -in {{ kube_cert_dir }}/{{ item }} -noout -serial | cut -d= -f2"
@@ -110,7 +120,11 @@
kubelet_cert_serial: "{{ node_certificate_serials.results[0].stdout|default() }}"
kube_proxy_cert_serial: "{{ node_certificate_serials.results[1].stdout|default() }}"
when: inventory_hostname in groups['k8s-cluster']
+ tags:
+ - kubelet
+ - node
- import_tasks: gen_tokens.yml
tags:
- k8s-secrets
+ - k8s-gen-tokens
diff --git a/roles/kubernetes/secrets/tasks/sync_kube_master_certs.yml b/roles/kubernetes/secrets/tasks/sync_kube_master_certs.yml
index d747044484b..50e1a01e784 100644
--- a/roles/kubernetes/secrets/tasks/sync_kube_master_certs.yml
+++ b/roles/kubernetes/secrets/tasks/sync_kube_master_certs.yml
@@ -32,7 +32,7 @@
sync_file_hosts: "{{ groups['kube-master'] }}"
sync_file_is_cert: true
sync_file_owner: kube
- with_items: ["apiserver.pem", "kube-scheduler.pem", "kube-controller-manager.pem"]
+ with_items: ["apiserver.pem", "kube-scheduler.pem", "kube-controller-manager.pem", "service-account.pem"]
- name: sync_kube_master_certs | Set facts for kube master components sync_file results
set_fact:
@@ -44,6 +44,18 @@
set_fact:
sync_file_results: []
+- include_tasks: ../../../vault/tasks/shared/sync_file.yml
+ vars:
+ sync_file: front-proxy-ca.pem
+ sync_file_dir: "{{ kube_cert_dir }}"
+ sync_file_group: "{{ kube_cert_group }}"
+ sync_file_hosts: "{{ groups['kube-master'] }}"
+ sync_file_owner: kube
+
+- name: sync_kube_master_certs | Unset sync_file_results after front-proxy-ca.pem
+ set_fact:
+ sync_file_results: []
+
- include_tasks: ../../../vault/tasks/shared/sync_file.yml
vars:
sync_file: "{{ item }}"
diff --git a/roles/kubernetes/secrets/tasks/upd_ca_trust.yml b/roles/kubernetes/secrets/tasks/upd_ca_trust.yml
index eec44987f51..cdd5f48fa07 100644
--- a/roles/kubernetes/secrets/tasks/upd_ca_trust.yml
+++ b/roles/kubernetes/secrets/tasks/upd_ca_trust.yml
@@ -8,6 +8,8 @@
/etc/pki/ca-trust/source/anchors/kube-ca.crt
{%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%}
/etc/ssl/certs/kube-ca.pem
+ {%- elif ansible_os_family == "Suse" -%}
+ /etc/pki/trust/anchors/kube-ca.pem
{%- endif %}
tags:
- facts
@@ -19,9 +21,9 @@
remote_src: true
register: kube_ca_cert
-- name: Gen_certs | update ca-certificates (Debian/Ubuntu/Container Linux by CoreOS)
+- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Container Linux by CoreOS)
command: update-ca-certificates
- when: kube_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS"]
+ when: kube_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS", "Suse"]
- name: Gen_certs | update ca-certificates (RedHat)
command: update-ca-trust extract
diff --git a/roles/kubernetes/secrets/templates/openssl.conf.j2 b/roles/kubernetes/secrets/templates/openssl.conf.j2
index a214739c9a6..38902aeef20 100644
--- a/roles/kubernetes/secrets/templates/openssl.conf.j2
+++ b/roles/kubernetes/secrets/templates/openssl.conf.j2
@@ -1,4 +1,4 @@
-[req]
+{% set counter = {'dns': 6,'ip': 1,} %}{% macro increment(dct, key, inc=1)%}{% if dct.update({key: dct[key] + inc}) %} {% endif %}{% endmacro %}[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
@@ -13,30 +13,30 @@ DNS.3 = kubernetes.default.svc
DNS.4 = kubernetes.default.svc.{{ dns_domain }}
DNS.5 = localhost
{% for host in groups['kube-master'] %}
-DNS.{{ 5 + loop.index }} = {{ host }}
+DNS.{{ counter["dns"] }} = {{ host }}{{ increment(counter, 'dns') }}
{% endfor %}
-{% if loadbalancer_apiserver is defined %}
-{% set idx = groups['kube-master'] | length | int + 5 + 1 %}
-DNS.{{ idx | string }} = {{ apiserver_loadbalancer_domain_name }}
+{% if apiserver_loadbalancer_domain_name is defined %}
+DNS.{{ counter["dns"] }} = {{ apiserver_loadbalancer_domain_name }}{{ increment(counter, 'dns') }}
{% endif %}
{% for host in groups['kube-master'] %}
-IP.{{ 2 * loop.index - 1 }} = {{ hostvars[host]['access_ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
-IP.{{ 2 * loop.index }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
+{% if hostvars[host]['access_ip'] is defined %}
+IP.{{ counter["ip"] }} = {{ hostvars[host]['access_ip'] }}{{ increment(counter, 'ip') }}
+{% endif %}
+IP.{{ counter["ip"] }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}{{ increment(counter, 'ip') }}
{% endfor %}
-{% set idx = groups['kube-master'] | length | int * 2 + 1 %}
-IP.{{ idx }} = {{ kube_apiserver_ip }}
-{% if loadbalancer_apiserver is defined %}
-IP.{{ idx + 1 }} = {{ loadbalancer_apiserver.address }}
-{% set idx = idx + 1 %}
+{% if kube_apiserver_ip is defined %}
+IP.{{ counter["ip"] }} = {{ kube_apiserver_ip }}{{ increment(counter, 'ip') }}
+{% endif %}
+{% if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined %}
+IP.{{ counter["ip"] }} = {{ loadbalancer_apiserver.address }}{{ increment(counter, 'ip') }}
{% endif %}
-IP.{{ idx + 1 }} = 127.0.0.1
{% if supplementary_addresses_in_ssl_keys is defined %}
-{% set is = idx + 1 %}
{% for addr in supplementary_addresses_in_ssl_keys %}
{% if addr | ipaddr %}
-IP.{{ is + loop.index }} = {{ addr }}
+IP.{{ counter["ip"] }} = {{ addr }}{{ increment(counter, 'ip') }}
{% else %}
-DNS.{{ is + loop.index }} = {{ addr }}
+DNS.{{ counter["dns"] }} = {{ addr }}{{ increment(counter, 'dns') }}
{% endif %}
{% endfor %}
{% endif %}
+IP.{{ counter["ip"] }} = 127.0.0.1
diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml
index 61f11e97f0d..e405c7a3f9e 100644
--- a/roles/kubespray-defaults/defaults/main.yaml
+++ b/roles/kubespray-defaults/defaults/main.yaml
@@ -5,20 +5,27 @@ bootstrap_os: none
# Use proxycommand if bastion host is in group all
# This change obseletes editing ansible.cfg file depending on bastion existance
-ansible_ssh_common_args: "{% if 'bastion' in groups['all'] %} -o ProxyCommand='ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -W %h:%p {{ ansible_user }}@{{hostvars['bastion']['ansible_host']}} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %} ' {% endif %}"
+ansible_ssh_common_args: "{% if 'bastion' in groups['all'] %} -o ProxyCommand='ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -W %h:%p {{ hostvars['bastion']['ansible_user'] }}@{{ hostvars['bastion']['ansible_host'] }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %} ' {% endif %}"
kube_api_anonymous_auth: false
# Default value, but will be set to true automatically if detected
is_atomic: false
+# optional disable the swap
+disable_swap: true
## Change this to use another Kubernetes version, e.g. a current beta release
-kube_version: v1.9.3
+kube_version: v1.11.3
## Kube Proxy mode One of ['iptables','ipvs']
kube_proxy_mode: iptables
+# Kube-proxy nodeport address.
+# cidr to bind nodeport services. Flag --nodeport-addresses on kube-proxy manifest
+kube_proxy_nodeport_addresses: false
+# kube_proxy_nodeport_addresses_cidr: 10.0.1.0/24
+
# Set to true to allow pre-checks to fail and continue deployment
ignore_assert_errors: false
@@ -49,6 +56,7 @@ resolvconf_mode: docker_dns
deploy_netchecker: false
# Ip address of the kubernetes skydns service
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
+skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}"
dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
dns_domain: "{{ cluster_name }}"
@@ -60,7 +68,6 @@ dns_domain: "{{ cluster_name }}"
kube_config_dir: /etc/kubernetes
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
-system_namespace: kube-system
# This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/ssl"
@@ -93,6 +100,9 @@ kube_network_plugin: calico
# Determines if calico-rr group exists
peer_with_calico_rr: "{{ 'calico-rr' in groups and groups['calico-rr']|length > 0 }}"
+# Set to false to disable calico-upgrade
+calico_upgrade_enabled: true
+
# Kubernetes internal network for services, unused block of space.
kube_service_addresses: 10.233.0.0/18
@@ -127,20 +137,69 @@ kube_apiserver_insecure_bind_address: 127.0.0.1
kube_apiserver_insecure_port: 8080
# Aggregator
-kube_api_aggregator_routing: true
+kube_api_aggregator_routing: false
+
+# Container for runtime
+container_manager: docker
+
+## Uncomment this if you want to force overlay/overlay2 as docker storage driver
+## Please note that overlay2 is only supported on newer kernels
+# docker_storage_options: -s overlay2
+
+## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7.
+docker_container_storage_setup: false
+
+## It must be define a disk path for docker_container_storage_setup_devs.
+## Otherwise docker-storage-setup will be executed incorrectly.
+# docker_container_storage_setup_devs: /dev/vdb
+
+## Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
+docker_dns_servers_strict: false
# Path used to store Docker data
docker_daemon_graph: "/var/lib/docker"
+## Used to set docker daemon iptables options to true
+docker_iptables_enabled: "false"
+
# Docker log options
# Rotate container stderr/stdout logs at 50m and keep last 5
docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5"
+## An obvious use case is allowing insecure-registry access to self hosted registries.
+## Can be ipddress and domain_name.
+## example define 172.19.16.11 or mirror.registry.io
+# docker_insecure_registries:
+# - mirror.registry.io
+# - 172.19.16.11
+
+## Add other registry,example China registry mirror.
+# docker_registry_mirrors:
+# - https://registry.docker-cn.com
+# - https://mirror.aliyuncs.com
+
+## If non-empty will override default system MounFlags value.
+## This option takes a mount propagation flag: shared, slave
+## or private, which control whether mounts in the file system
+## namespace set up for docker will receive or propagate mounts
+## and unmounts. Leave empty for system default
+# docker_mount_flags:
+
## A string of extra options to pass to the docker daemon.
## This string should be exactly as you wish it to appear.
-## An obvious use case is allowing insecure-registry access
-## to self hosted registries like so:
-docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
+docker_options: >-
+ {%- if docker_insecure_registries is defined -%}
+ {{ docker_insecure_registries | map('regex_replace', '^(.*)$', '--insecure-registry=\1' ) | list | join(' ') }}
+ {%- endif %}
+ {% if docker_registry_mirrors is defined -%}
+ {{ docker_registry_mirrors | map('regex_replace', '^(.*)$', '--registry-mirror=\1' ) | list | join(' ') }}
+ {%- endif %}
+ --graph={{ docker_daemon_graph }} {{ docker_log_opts }}
+ {%- if ansible_architecture == "aarch64" and ansible_os_family == "RedHat" %}
+ --add-runtime docker-runc=/usr/libexec/docker/docker-runc-current
+ --default-runtime=docker-runc --exec-opt native.cgroupdriver=systemd
+ --userland-proxy-path=/usr/libexec/docker/docker-proxy-current --signature-verification=false
+ {%- endif -%}
# Settings for containerized control plane (etcd/kubelet/secrets)
etcd_deployment_type: docker
@@ -152,11 +211,14 @@ helm_deployment_type: host
# Enable kubeadm deployment (experimental)
kubeadm_enabled: false
-# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
+# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts
kubeconfig_localhost: false
-# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
+# Download kubectl onto the host that runs Ansible in {{ bin_dir }}
kubectl_localhost: false
+# Define credentials_dir here so it can be overriden
+credentials_dir: "{{ inventory_dir }}/credentials"
+
# K8s image pull policy (imagePullPolicy)
k8s_image_pull_policy: IfNotPresent
@@ -167,13 +229,13 @@ dashboard_enabled: true
# Addons which can be enabled
efk_enabled: false
helm_enabled: false
-istio_enabled: false
registry_enabled: false
-enable_network_policy: false
+enable_network_policy: true
local_volume_provisioner_enabled: "{{ local_volumes_enabled | default('false') }}"
persistent_volumes_enabled: false
cephfs_provisioner_enabled: false
ingress_nginx_enabled: false
+cert_manager_enabled: false
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
# openstack_blockstorage_version: "v1/v2/auto (default)"
@@ -198,7 +260,7 @@ authorization_modes: ['Node', 'RBAC']
rbac_enabled: "{{ 'RBAC' in authorization_modes or kubeadm_enabled }}"
# When enabled, API bearer tokens (including service account tokens) can be used to authenticate to the kubelet’s HTTPS endpoint
-kubelet_authentication_token_webhook: false
+kubelet_authentication_token_webhook: true
# When enabled, access to the kubelet API requires authorization by delegation to the API server
kubelet_authorization_mode_webhook: false
@@ -206,7 +268,6 @@ kubelet_authorization_mode_webhook: false
## List of key=value pairs that describe feature gates for
## the k8s cluster.
kube_feature_gates:
- - "Initializers={{ istio_enabled | string }}"
- "PersistentLocalVolumes={{ local_volume_provisioner_enabled | string }}"
- "VolumeScheduling={{ local_volume_provisioner_enabled | string }}"
- "MountPropagation={{ local_volume_provisioner_enabled | string }}"
@@ -218,6 +279,10 @@ vault_config_dir: "{{ vault_base_dir }}/config"
vault_roles_dir: "{{ vault_base_dir }}/roles"
vault_secrets_dir: "{{ vault_base_dir }}/secrets"
+# Local volume provisioner dirs
+local_volume_provisioner_base_dir: /mnt/disks
+local_volume_provisioner_mount_dir: /mnt/disks
+
## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler.
## See https://github.com/kubernetes-incubator/kubespray/issues/2141
## Set this variable to true to get rid of this issue
@@ -240,6 +305,7 @@ weave_peers: uninitialized
## Set no_proxy to all assigned cluster IPs and hostnames
no_proxy: >-
+ {%- if http_proxy is defined or https_proxy is defined %}
{%- if loadbalancer_apiserver is defined -%}
{{ apiserver_loadbalancer_domain_name| default('') }},
{{ loadbalancer_apiserver.address | default('') }},
@@ -253,11 +319,24 @@ no_proxy: >-
{{ item }},{{ item }}.{{ dns_domain }},
{%- endfor -%}
127.0.0.1,localhost
+ {%- endif %}
proxy_env:
http_proxy: "{{ http_proxy| default ('') }}"
https_proxy: "{{ https_proxy| default ('') }}"
- no_proxy: "{{ no_proxy }}"
+ no_proxy: "{{ no_proxy| default ('') }}"
+
+ssl_ca_dirs: >-
+ [
+ {% if ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] -%}
+ '/usr/share/ca-certificates',
+ {% elif ansible_os_family == 'RedHat' -%}
+ '/etc/pki/tls',
+ '/etc/pki/ca-trust',
+ {% elif ansible_os_family == 'Debian' -%}
+ '/usr/share/ca-certificates',
+ {% endif -%}
+ ]
# Vars for pointing to kubernetes api endpoints
is_kube_master: "{{ inventory_hostname in groups['kube-master'] }}"
@@ -296,16 +375,17 @@ kube_apiserver_client_key: |-
{%- endif %}
# Set to true to deploy etcd-events cluster
-etcd_events_cluster_setup: false
+etcd_events_cluster_enabled: false
# Vars for pointing to etcd endpoints
is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}"
etcd_address: "{{ ip | default(ansible_default_ipv4['address']) }}"
etcd_access_address: "{{ access_ip | default(etcd_address) }}"
+etcd_events_access_address: "{{ access_ip | default(etcd_address) }}"
etcd_peer_url: "https://{{ etcd_access_address }}:2380"
etcd_client_url: "https://{{ etcd_access_address }}:2379"
-etcd_events_peer_url: "https://{{ etcd_access_address }}:2382"
-etcd_events_client_url: "https://{{ etcd_access_address }}:2381"
+etcd_events_peer_url: "https://{{ etcd_events_access_address }}:2382"
+etcd_events_client_url: "https://{{ etcd_events_access_address }}:2381"
etcd_access_addresses: |-
{% for item in groups['etcd'] -%}
https://{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}:2379{% if not loop.last %},{% endif %}
@@ -314,15 +394,21 @@ etcd_events_access_addresses: |-
{% for item in groups['etcd'] -%}
https://{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}:2381{% if not loop.last %},{% endif %}
{%- endfor %}
+# user should set etcd_member_name in inventory/mycluster/hosts.ini
etcd_member_name: |-
{% for host in groups['etcd'] %}
- {% if inventory_hostname == host %}{{"etcd"+loop.index|string }}{% endif %}
+ {% if inventory_hostname == host %}{{ hostvars[host].etcd_member_name | default("etcd" + loop.index|string) }}{% endif %}
{% endfor %}
etcd_peer_addresses: |-
{% for item in groups['etcd'] -%}
- {{ "etcd"+loop.index|string }}=https://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2380{% if not loop.last %},{% endif %}
+ {{ hostvars[item].etcd_member_name | default("etcd" + loop.index|string) }}=https://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2380{% if not loop.last %},{% endif %}
{%- endfor %}
etcd_events_peer_addresses: |-
{% for item in groups['etcd'] -%}
- {{ "etcd"+loop.index|string }}-events=https://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2382{% if not loop.last %},{% endif %}
+ {{ hostvars[item].etcd_member_name | default("etcd" + loop.index|string) }}-events=https://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2382{% if not loop.last %},{% endif %}
{%- endfor %}
+
+podsecuritypolicy_enabled: false
+etcd_heartbeat_interval: "250"
+etcd_election_timeout: "5000"
+etcd_snapshot_count: "10000"
diff --git a/roles/network_plugin/calico/defaults/main.yml b/roles/network_plugin/calico/defaults/main.yml
index a44b3d31513..a1c6f1d2231 100644
--- a/roles/network_plugin/calico/defaults/main.yml
+++ b/roles/network_plugin/calico/defaults/main.yml
@@ -2,9 +2,12 @@
# Enables Internet connectivity from containers
nat_outgoing: true
+# add default ippool name
+calico_pool_name: "default-pool"
+
# Use IP-over-IP encapsulation across hosts
ipip: true
-ipip_mode: always # change to "cross-subnet" if you only want ipip encapsulation on traffic going across subnets
+ipip_mode: Always # change to "CrossSubnet" if you only want ipip encapsulation on traffic going across subnets
# Set to true if you want your calico cni binaries to overwrite the
# ones from hyperkube while leaving other cni plugins intact.
@@ -37,11 +40,21 @@ calico_felix_prometheusmetricsport: 9091
calico_felix_prometheusgometricsenabled: "true"
calico_felix_prometheusprocessmetricsenabled: "true"
+### check latest version https://github.com/projectcalico/calico-upgrade/releases
+calico_upgrade_enabled: true
+calico_upgrade_version: v1.0.5
+
# Should calico ignore kernel's RPF check setting,
# see https://github.com/projectcalico/felix/blob/ab8799eaea66627e5db7717e62fca61fd9c08646/python/calico/felix/config.py#L198
calico_node_ignorelooserpf: false
-rbac_resources:
- - sa
- - clusterrole
- - clusterrolebinding
+# If you want to use non default IP_AUTODETECTION_METHOD for calico node set this option to one of:
+# * can-reach=DESTINATION
+# * interface=INTERFACE-REGEX
+# see https://docs.projectcalico.org/v3.0/reference/node/configuration#ip-autodetection-methods
+# calico_ip_auto_method: "interface=eth.*"
+
+calico_baremetal_nodename: "{{ inventory_hostname }}"
+
+### do not enable this, this is detected in scope of tasks, this is just a default value
+calico_upgrade_needed: false
diff --git a/roles/network_plugin/calico/tasks/check.yml b/roles/network_plugin/calico/tasks/check.yml
new file mode 100644
index 00000000000..cfacf12a18c
--- /dev/null
+++ b/roles/network_plugin/calico/tasks/check.yml
@@ -0,0 +1,37 @@
+---
+- name: "Check vars defined correctly"
+ assert:
+ that:
+ - "calico_pool_name is defined"
+ - "calico_pool_name | match('^[a-zA-Z0-9-_\\\\.]{2,63}$')"
+ - "ipip_mode is defined"
+ - "ipip_mode in ['Always', 'CrossSubnet', 'Never']"
+ msg: "Check variable definitions seems something is wrong"
+ run_once: yes
+
+- name: "Get current version of calico cluster version"
+ shell: "{{ bin_dir }}/calicoctl version | grep 'Cluster Version' | awk '{ print $3}'"
+ register: calico_version_on_server
+ run_once: yes
+ delegate_to: "{{ groups['kube-master'][0] }}"
+
+- name: "Determine if calico upgrade is needed"
+ block:
+ - name: "Check that calico version is enought for upgrade"
+ assert:
+ that:
+ - calico_version_on_server.stdout|version_compare('v2.6.5', '>=')
+ msg: "Your version of calico is not fresh enough for upgrade"
+
+ - name: "Set upgrade flag when version needs to be updated"
+ set_fact:
+ calico_upgrade_needed: True
+ when:
+ - calico_version_on_server.stdout|version_compare('v2.6.5', '>=')
+ - calico_version_on_server.stdout|version_compare('v3.0.0', '<')
+
+ when:
+ - 'calico_version_on_server.stdout is defined'
+ - 'calico_version_on_server.stdout != ""'
+ - inventory_hostname == groups['kube-master'][0]
+ run_once: yes
diff --git a/roles/network_plugin/calico/tasks/install.yml b/roles/network_plugin/calico/tasks/install.yml
new file mode 100644
index 00000000000..4e1f1dc5e95
--- /dev/null
+++ b/roles/network_plugin/calico/tasks/install.yml
@@ -0,0 +1,230 @@
+---
+
+- name: Calico | Write Calico cni config
+ template:
+ src: "cni-calico.conflist.j2"
+ dest: "/etc/cni/net.d/10-calico.conflist"
+ owner: kube
+
+- name: Calico | Create calico certs directory
+ file:
+ dest: "{{ calico_cert_dir }}"
+ state: directory
+ mode: 0750
+ owner: root
+ group: root
+
+- name: Calico | Link etcd certificates for calico-node
+ file:
+ src: "{{ etcd_cert_dir }}/{{ item.s }}"
+ dest: "{{ calico_cert_dir }}/{{ item.d }}"
+ state: hard
+ force: yes
+ with_items:
+ - {s: "ca.pem", d: "ca_cert.crt"}
+ - {s: "node-{{ inventory_hostname }}.pem", d: "cert.crt"}
+ - {s: "node-{{ inventory_hostname }}-key.pem", d: "key.pem"}
+
+- name: Calico | Install calicoctl container script
+ template:
+ src: calicoctl-container.j2
+ dest: "{{ bin_dir }}/calicoctl"
+ mode: 0755
+ owner: root
+ group: root
+ changed_when: false
+
+- name: Calico | Copy cni plugins from hyperkube
+ command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -r /opt/cni/bin/. /cnibindir/"
+ register: cni_task_result
+ until: cni_task_result.rc == 0
+ retries: 4
+ delay: "{{ retry_stagger | random + 3 }}"
+ changed_when: false
+ tags:
+ - hyperkube
+ - upgrade
+
+- name: Calico | Copy cni plugins from calico/cni container
+ command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ calico_cni_image_repo }}:{{ calico_cni_image_tag }} sh -c 'cp /opt/cni/bin/* /cnibindir/'"
+ register: cni_task_result
+ until: cni_task_result.rc == 0
+ retries: 4
+ delay: "{{ retry_stagger | random + 3 }}"
+ changed_when: false
+ when:
+ - "overwrite_hyperkube_cni|bool"
+ tags:
+ - hyperkube
+ - upgrade
+
+- name: Calico | Set cni directory permissions
+ file:
+ path: /opt/cni/bin
+ state: directory
+ owner: kube
+ recurse: true
+ mode: 0755
+
+- name: Calico | wait for etcd
+ uri:
+ url: "{{ etcd_access_addresses.split(',') | first }}/health"
+ validate_certs: no
+ client_cert: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
+ client_key: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
+ register: result
+ until: result.status == 200 or result.status == 401
+ retries: 10
+ delay: 5
+ run_once: true
+
+- name: Calico | Check if calico network pool has already been configured
+ shell: >
+ {{ bin_dir }}/calicoctl get ippool | grep -w "{{ kube_pods_subnet }}" | wc -l
+ register: calico_conf
+ retries: 4
+ delay: "{{ retry_stagger | random + 3 }}"
+ delegate_to: "{{ groups['kube-master'][0] }}"
+ run_once: true
+
+- name: Calico | Configure calico network pool
+ shell: >
+ echo "
+ { "kind": "IPPool",
+ "apiVersion": "projectcalico.org/v3",
+ "metadata": {
+ "name": "{{ calico_pool_name }}",
+ },
+ "spec": {
+ "cidr": "{{ kube_pods_subnet }}",
+ "ipipMode": "{{ ipip_mode }}",
+ "natOutgoing": {{ nat_outgoing|default(false) and not peer_with_router|default(false) }} }} " | {{ bin_dir }}/calicoctl create -f -
+ run_once: true
+ delegate_to: "{{ groups['kube-master'][0] }}"
+ when:
+ - 'calico_conf.stdout == "0"'
+
+- name: "Determine nodeToNodeMesh needed state"
+ set_fact:
+ nodeToNodeMeshEnabled: "false"
+ when:
+ - peer_with_router|default(false) or peer_with_calico_rr|default(false)
+ - inventory_hostname in groups['k8s-cluster']
+ run_once: yes
+
+
+- name: Calico | Set global as_num
+ shell: >
+ echo '
+ { "kind": "BGPConfiguration",
+ "apiVersion": "projectcalico.org/v3",
+ "metadata": {
+ "name": "default",
+ },
+ "spec": {
+ "logSeverityScreen": "Info",
+ "nodeToNodeMeshEnabled": {{ nodeToNodeMeshEnabled|default('true') }} ,
+ "asNumber": {{ global_as_num }} }} ' | {{ bin_dir }}/calicoctl --skip-exists create -f -
+ run_once: true
+ delegate_to: "{{ groups['kube-master'][0] }}"
+
+- name: Calico | Configure peering with router(s)
+ shell: >
+ echo '{
+ "apiVersion": "projectcalico.org/v3",
+ "kind": "bgpPeer",
+ "metadata": {
+ "name": "{{ inventory_hostname }}-bgp"
+ },
+ "spec": {
+ "asNumber": "{{ item.as }}",
+ "node": "{{ inventory_hostname }}",
+ "scope": "node",
+ "peerIP": "{{ item.router_id }}"
+ }}' | {{ bin_dir }}/calicoctl create --skip-exists -f -
+ retries: 4
+ delay: "{{ retry_stagger | random + 3 }}"
+ with_items:
+ - "{{ peers|default([]) }}"
+ when:
+ - calico_version_on_server.stdout|version_compare('v3.0.0', '>') or calico_upgrade_enabled
+ - peer_with_router|default(false)
+ - inventory_hostname in groups['k8s-cluster']
+
+- name: Calico | Configure peering with router(s) (legacy)
+ shell: >
+ echo '{
+ "kind": "bgpPeer",
+ "spec": {"asNumber": "{{ item.as }}"},
+ "apiVersion": "v1",
+ "metadata": {"node": "{{ inventory_hostname }}", "scope": "node", "peerIP": "{{ item.router_id }}"}
+ }'
+ | {{ bin_dir }}/calicoctl create --skip-exists -f -
+ retries: 4
+ delay: "{{ retry_stagger | random + 3 }}"
+ with_items: "{{ peers|default([]) }}"
+ when:
+ - calico_version_on_server.stdout|version_compare('v3.0.0', '<')
+ - not calico_upgrade_enabled
+ - peer_with_router|default(false)
+ - inventory_hostname in groups['k8s-cluster']
+
+- name: Calico | Configure peering with route reflectors
+ shell: >
+ echo '{
+ "apiVersion": "projectcalico.org/v3",
+ "kind": "bgpPeer",
+ "metadata": {
+ "name": "{{ inventory_hostname }}"
+ },
+ "spec": {
+ "asNumber": "{{ local_as | default(global_as_num)}}",
+ "scope": "node",
+ "node": "{{ inventory_hostname }}",
+ "peerIP": "{{ hostvars[item]["calico_rr_ip"]|default(hostvars[item]["ip"])|default(hostvars[item]["ansible_default_ipv4"]["address"]) }}"
+ }}' | {{ bin_dir }}/calicoctl create --skip-exists -f -
+ retries: 4
+ delay: "{{ retry_stagger | random + 3 }}"
+ with_items:
+ - "{{ groups['calico-rr'] | default([]) }}"
+ when:
+ - calico_version_on_server.stdout|version_compare('v3.0.0', '>') or calico_upgrade_enabled
+ - peer_with_calico_rr|default(false)
+ - inventory_hostname in groups['k8s-cluster']
+ - hostvars[item]['cluster_id'] == cluster_id
+
+- name: Calico | Configure peering with route reflectors (legacy)
+ shell: >
+ echo '{
+ "kind": "bgpPeer",
+ "spec": {"asNumber": "{{ local_as | default(global_as_num)}}"},
+ "apiVersion": "v1",
+ "metadata": {"node": "{{ inventory_hostname }}",
+ "scope": "node",
+ "peerIP": "{{ hostvars[item]["calico_rr_ip"]|default(hostvars[item]["ip"])|default(hostvars[item]["ansible_default_ipv4"]["address"]) }}"}
+ }'
+ | {{ bin_dir }}/calicoctl create --skip-exists -f -
+ retries: 4
+ delay: "{{ retry_stagger | random + 3 }}"
+ with_items: "{{ groups['calico-rr'] | default([]) }}"
+ when:
+ - calico_version_on_server.stdout|version_compare('v3.0.0', '<')
+ - not calico_upgrade_enabled
+ - peer_with_calico_rr|default(false)
+ - hostvars[item]['cluster_id'] == cluster_id
+
+
+- name: Calico | Create calico manifests
+ template:
+ src: "{{item.file}}.j2"
+ dest: "{{kube_config_dir}}/{{item.file}}"
+ with_items:
+ - {name: calico-config, file: calico-config.yml, type: cm}
+ - {name: calico-node, file: calico-node.yml, type: ds}
+ - {name: calico, file: calico-node-sa.yml, type: sa}
+ - {name: calico, file: calico-cr.yml, type: clusterrole}
+ - {name: calico, file: calico-crb.yml, type: clusterrolebinding}
+ register: calico_node_manifests
+ when:
+ - inventory_hostname in groups['kube-master']
+ - rbac_enabled or item.type not in rbac_resources
diff --git a/roles/network_plugin/calico/tasks/main.yml b/roles/network_plugin/calico/tasks/main.yml
index 05e7b96111f..fefd8a7ff4c 100644
--- a/roles/network_plugin/calico/tasks/main.yml
+++ b/roles/network_plugin/calico/tasks/main.yml
@@ -1,194 +1,13 @@
---
-- name: Calico | Disable calico-node service if it exists
- service:
- name: calico-node
- state: stopped
- enabled: yes
- failed_when: false
+- include_tasks: check.yml
-- name: Calico | Get kubelet hostname
- shell: >-
- {{ bin_dir }}/kubectl get node -o custom-columns='NAME:.metadata.name,INTERNAL-IP:.status.addresses[?(@.type=="InternalIP")].address'
- | egrep "{{ ansible_all_ipv4_addresses | join('$|') }}$" | cut -d" " -f1
- register: calico_kubelet_name
- delegate_to: "{{ groups['kube-master'][0] }}"
- when: cloud_provider is defined
-
-- name: Calico | Write Calico cni config
- template:
- src: "cni-calico.conflist.j2"
- dest: "/etc/cni/net.d/10-calico.conflist"
- owner: kube
-
-- name: Calico | Create calico certs directory
- file:
- dest: "{{ calico_cert_dir }}"
- state: directory
- mode: 0750
- owner: root
- group: root
-
-- name: Calico | Link etcd certificates for calico-node
- file:
- src: "{{ etcd_cert_dir }}/{{ item.s }}"
- dest: "{{ calico_cert_dir }}/{{ item.d }}"
- state: hard
- force: yes
- with_items:
- - {s: "ca.pem", d: "ca_cert.crt"}
- - {s: "node-{{ inventory_hostname }}.pem", d: "cert.crt"}
- - {s: "node-{{ inventory_hostname }}-key.pem", d: "key.pem"}
-
-- name: Calico | Install calicoctl container script
- template:
- src: calicoctl-container.j2
- dest: "{{ bin_dir }}/calicoctl"
- mode: 0755
- owner: root
- group: root
- changed_when: false
-
-- name: Calico | Copy cni plugins from hyperkube
- command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -r /opt/cni/bin/. /cnibindir/"
- register: cni_task_result
- until: cni_task_result.rc == 0
- retries: 4
- delay: "{{ retry_stagger | random + 3 }}"
- changed_when: false
- tags:
- - hyperkube
- - upgrade
-
-- name: Calico | Copy cni plugins from calico/cni container
- command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ calico_cni_image_repo }}:{{ calico_cni_image_tag }} sh -c 'cp /opt/cni/bin/* /cnibindir/'"
- register: cni_task_result
- until: cni_task_result.rc == 0
- retries: 4
- delay: "{{ retry_stagger | random + 3 }}"
- changed_when: false
- when: overwrite_hyperkube_cni|bool
- tags:
- - hyperkube
- - upgrade
-
-- name: Calico | Set cni directory permissions
- file:
- path: /opt/cni/bin
- state: directory
- owner: kube
- recurse: true
- mode: 0755
-
-- name: Calico | wait for etcd
- uri:
- url: "{{ etcd_access_addresses.split(',') | first }}/health"
- validate_certs: no
- client_cert: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
- client_key: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
- register: result
- until: result.status == 200 or result.status == 401
- retries: 10
- delay: 5
- run_once: true
-
-- name: Calico | Check if calico network pool has already been configured
- command: |-
- curl \
- --cacert {{ etcd_cert_dir }}/ca.pem \
- --cert {{ etcd_cert_dir}}/node-{{ inventory_hostname }}.pem \
- --key {{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem \
- {{ etcd_access_addresses.split(',') | first }}/v2/keys/calico/v1/ipam/v4/pool
- register: calico_conf
- retries: 4
- delay: "{{ retry_stagger | random + 3 }}"
- run_once: true
- changed_when: false
+- include_tasks: pre.yml
-- name: Calico | Configure calico network pool
- shell: >
- echo '{
- "kind": "ipPool",
- "spec": {"disabled": false, "ipip": {"enabled": {{ ipip }}, "mode": "{{ ipip_mode }}"},
- "nat-outgoing": {{ nat_outgoing|default(false) and not peer_with_router|default(false) }}},
- "apiVersion": "v1",
- "metadata": {"cidr": "{{ kube_pods_subnet }}"}
- }'
- | {{ bin_dir }}/calicoctl create -f -
- environment:
- NO_DEFAULT_POOLS: true
- run_once: true
- when: ("Key not found" in calico_conf.stdout or "nodes" not in calico_conf.stdout)
-
-- name: Calico | Get calico configuration from etcd
- command: |-
- curl \
- --cacert {{ etcd_cert_dir }}/ca.pem \
- --cert {{ etcd_cert_dir}}/node-{{ inventory_hostname }}.pem \
- --key {{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem \
- {{ etcd_access_addresses.split(',') | first }}/v2/keys/calico/v1/ipam/v4/pool
- register: calico_pools_raw
- retries: 4
- delay: "{{ retry_stagger | random + 3 }}"
- run_once: true
-
-- set_fact:
- calico_pools: "{{ calico_pools_raw.stdout | from_json }}"
- run_once: true
-
-- name: Calico | Set global as_num
- command: "{{ bin_dir}}/calicoctl config set asNumber {{ global_as_num }}"
- run_once: true
-
-- name: Calico | Disable node mesh
- shell: "{{ bin_dir }}/calicoctl config set nodeToNodeMesh off"
- retries: 4
- delay: "{{ retry_stagger | random + 3 }}"
- when: ((peer_with_router|default(false) or peer_with_calico_rr|default(false))
- and inventory_hostname in groups['k8s-cluster'])
- run_once: true
-
-- name: Calico | Configure peering with router(s)
- shell: >
- echo '{
- "kind": "bgpPeer",
- "spec": {"asNumber": "{{ item.as }}"},
- "apiVersion": "v1",
- "metadata": {"node": "{{ inventory_hostname }}", "scope": "node", "peerIP": "{{ item.router_id }}"}
- }'
- | {{ bin_dir }}/calicoctl create --skip-exists -f -
- retries: 4
- delay: "{{ retry_stagger | random + 3 }}"
- with_items: "{{ peers|default([]) }}"
- when: peer_with_router|default(false) and inventory_hostname in groups['k8s-cluster']
-
-- name: Calico | Configure peering with route reflectors
- shell: >
- echo '{
- "kind": "bgpPeer",
- "spec": {"asNumber": "{{ local_as | default(global_as_num)}}"},
- "apiVersion": "v1",
- "metadata": {"node": "{{ inventory_hostname }}",
- "scope": "node",
- "peerIP": "{{ hostvars[item]["calico_rr_ip"]|default(hostvars[item]["ip"])|default(hostvars[item]["ansible_default_ipv4"]["address"]) }}"}
- }'
- | {{ bin_dir }}/calicoctl create --skip-exists -f -
- retries: 4
- delay: "{{ retry_stagger | random + 3 }}"
- with_items: "{{ groups['calico-rr'] | default([]) }}"
- when: (peer_with_calico_rr|default(false) and inventory_hostname in groups['k8s-cluster']
- and hostvars[item]['cluster_id'] == cluster_id)
-
-- name: Calico | Create calico manifests
- template:
- src: "{{item.file}}.j2"
- dest: "{{kube_config_dir}}/{{item.file}}"
- with_items:
- - {name: calico-config, file: calico-config.yml, type: cm}
- - {name: calico-node, file: calico-node.yml, type: ds}
- - {name: calico, file: calico-node-sa.yml, type: sa}
- - {name: calico, file: calico-cr.yml, type: clusterrole}
- - {name: calico, file: calico-crb.yml, type: clusterrolebinding}
- register: calico_node_manifests
+- include_tasks: upgrade.yml
when:
- - inventory_hostname in groups['kube-master']
- - rbac_enabled or item.type not in rbac_resources
+ - calico_upgrade_enabled
+ - calico_upgrade_needed
+ run_once: yes
+ delegate_to: "{{ groups['kube-master'][0] }}"
+
+- include_tasks: install.yml
diff --git a/roles/network_plugin/calico/tasks/pre.yml b/roles/network_plugin/calico/tasks/pre.yml
new file mode 100644
index 00000000000..4781541bdcf
--- /dev/null
+++ b/roles/network_plugin/calico/tasks/pre.yml
@@ -0,0 +1,16 @@
+---
+- name: Calico | Disable calico-node service if it exists
+ service:
+ name: calico-node
+ state: stopped
+ enabled: no
+ failed_when: false
+
+- name: Calico | Get kubelet hostname
+ shell: >-
+ {{ bin_dir }}/kubectl get node -o custom-columns='NAME:.metadata.name,INTERNAL-IP:.status.addresses[?(@.type=="InternalIP")].address'
+ | egrep "{{ ansible_all_ipv4_addresses | join('$|') }}$" | cut -d" " -f1
+ register: calico_kubelet_name
+ delegate_to: "{{ groups['kube-master'][0] }}"
+ when:
+ - "cloud_provider is defined"
\ No newline at end of file
diff --git a/roles/network_plugin/calico/tasks/upgrade.yml b/roles/network_plugin/calico/tasks/upgrade.yml
new file mode 100644
index 00000000000..443362c98a4
--- /dev/null
+++ b/roles/network_plugin/calico/tasks/upgrade.yml
@@ -0,0 +1,26 @@
+---
+- name: "Download calico-upgrade tool (force version)"
+ get_url:
+ url: "https://github.com/projectcalico/calico-upgrade/releases/download/{{ calico_upgrade_version }}/calico-upgrade"
+ dest: "{{ bin_dir }}/calico-upgrade"
+ mode: 0755
+ owner: root
+ group: root
+ force: yes
+
+- name: "Create etcdv2 and etcdv3 calicoApiConfig"
+ template:
+ src: "{{ item }}-store.yml.j2"
+ dest: "/etc/calico/{{ item }}.yml"
+ with_items:
+ - "etcdv2"
+ - "etcdv3"
+
+- name: "Tests data migration (dry-run)"
+ shell: "{{ bin_dir }}/calico-upgrade dry-run --output-dir=/tmp --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml"
+ register: calico_upgrade_test_data
+ failed_when: '"Successfully" not in calico_upgrade_test_data.stdout'
+
+- name: "If test migration is success continue with calico data real migration"
+ shell: "{{ bin_dir }}/calico-upgrade start --no-prompts --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml --output-dir=/tmp/calico_upgrade"
+ register: calico_upgrade_migration_data
\ No newline at end of file
diff --git a/roles/network_plugin/calico/templates/calico-config.yml.j2 b/roles/network_plugin/calico/templates/calico-config.yml.j2
index 92d2f1f0a81..3be65deaa49 100644
--- a/roles/network_plugin/calico/templates/calico-config.yml.j2
+++ b/roles/network_plugin/calico/templates/calico-config.yml.j2
@@ -2,7 +2,7 @@ kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
- namespace: {{ system_namespace }}
+ namespace: kube-system
data:
etcd_endpoints: "{{ etcd_access_addresses }}"
etcd_ca: "/calico-secrets/ca_cert.crt"
diff --git a/roles/network_plugin/calico/templates/calico-cr.yml.j2 b/roles/network_plugin/calico/templates/calico-cr.yml.j2
index 47d6266593b..41d4f2b0610 100644
--- a/roles/network_plugin/calico/templates/calico-cr.yml.j2
+++ b/roles/network_plugin/calico/templates/calico-cr.yml.j2
@@ -3,7 +3,7 @@ kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-node
- namespace: {{ system_namespace }}
+ namespace: kube-system
rules:
- apiGroups: [""]
resources:
@@ -11,3 +11,11 @@ rules:
- nodes
verbs:
- get
+ - apiGroups:
+ - policy
+ resourceNames:
+ - privileged
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - use
diff --git a/roles/network_plugin/calico/templates/calico-crb.yml.j2 b/roles/network_plugin/calico/templates/calico-crb.yml.j2
index 2e132a0dc52..1b4e8fe0097 100644
--- a/roles/network_plugin/calico/templates/calico-crb.yml.j2
+++ b/roles/network_plugin/calico/templates/calico-crb.yml.j2
@@ -10,4 +10,4 @@ roleRef:
subjects:
- kind: ServiceAccount
name: calico-node
- namespace: {{ system_namespace }}
+ namespace: kube-system
diff --git a/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 b/roles/network_plugin/calico/templates/calico-node-sa.yml.j2
index 5cce2979378..68b1c286f9b 100644
--- a/roles/network_plugin/calico/templates/calico-node-sa.yml.j2
+++ b/roles/network_plugin/calico/templates/calico-node-sa.yml.j2
@@ -3,6 +3,6 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-node
- namespace: {{ system_namespace }}
+ namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
diff --git a/roles/network_plugin/calico/templates/calico-node.yml.j2 b/roles/network_plugin/calico/templates/calico-node.yml.j2
index 3a01648f763..09eeb661886 100644
--- a/roles/network_plugin/calico/templates/calico-node.yml.j2
+++ b/roles/network_plugin/calico/templates/calico-node.yml.j2
@@ -6,7 +6,7 @@ kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: calico-node
- namespace: {{ system_namespace }}
+ namespace: kube-system
labels:
k8s-app: calico-node
spec:
@@ -22,12 +22,13 @@ spec:
kubespray.etcd-cert/serial: "{{ etcd_client_cert_serial }}"
spec:
hostNetwork: true
-{% if rbac_enabled %}
serviceAccountName: calico-node
-{% endif %}
tolerations:
- effect: NoSchedule
operator: Exists
+ # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
+ # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
+ terminationGracePeriodSeconds: 0
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
@@ -53,6 +54,11 @@ spec:
configMapKeyRef:
name: calico-config
key: cluster_type
+ # Set noderef for node controller.
+ - name: CALICO_K8S_NODE_REF
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
@@ -71,12 +77,6 @@ spec:
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
value: "info"
- # Disable autocreation of pools
- - name: CALICO_NO_DEFAULT_POOLS
- value: "true"
- # Enable libnetwork
- - name: CALICO_LIBNETWORK_ENABLED
- value: "true"
# Set MTU for tunnel device used if ipip is enabled
{% if calico_mtu is defined %}
- name: FELIX_IPINIPMTU
@@ -108,10 +108,15 @@ spec:
configMapKeyRef:
name: calico-config
key: etcd_cert
+{% if calico_ip_auto_method is defined %}
+ - name: IP_AUTODETECTION_METHOD
+ value: "{{ calico_ip_auto_method }}"
+{% else %}
- name: IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
+{% endif %}
- name: NODENAME
valueFrom:
fieldRef:
@@ -131,6 +136,7 @@ spec:
memory: {{ calico_node_memory_requests }}
livenessProbe:
httpGet:
+ host: 127.0.0.1
path: /liveness
port: 9099
periodSeconds: 10
@@ -138,6 +144,7 @@ spec:
failureThreshold: 6
readinessProbe:
httpGet:
+ host: 127.0.0.1
path: /readiness
port: 9099
periodSeconds: 10
@@ -147,6 +154,8 @@ spec:
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
+ - mountPath: /var/lib/calico
+ name: var-lib-calico
readOnly: false
- mountPath: /calico-secrets
name: etcd-certs
@@ -158,6 +167,9 @@ spec:
- name: var-run-calico
hostPath:
path: /var/run/calico
+ - name: var-lib-calico
+ hostPath:
+ path: /var/lib/calico
# Used to install CNI.
- name: cni-bin-dir
hostPath:
@@ -172,5 +184,4 @@ spec:
updateStrategy:
rollingUpdate:
maxUnavailable: {{ serial | default('20%') }}
- type: RollingUpdate
-
+ type: RollingUpdate
\ No newline at end of file
diff --git a/roles/network_plugin/calico/templates/cni-calico.conflist.j2 b/roles/network_plugin/calico/templates/cni-calico.conflist.j2
index 6dd51e91298..e84ab7d61aa 100644
--- a/roles/network_plugin/calico/templates/cni-calico.conflist.j2
+++ b/roles/network_plugin/calico/templates/cni-calico.conflist.j2
@@ -3,10 +3,10 @@
"cniVersion":"0.3.1",
"plugins":[
{
- {% if cloud_provider is defined %}
+{% if cloud_provider is defined %}
"nodename": "{{ calico_kubelet_name.stdout }}",
{% else %}
- "nodename": "{{ inventory_hostname }}",
+ "nodename": "{{ calico_baremetal_nodename }}",
{% endif %}
"type": "calico",
"etcd_endpoints": "{{ etcd_access_addresses }}",
@@ -19,14 +19,14 @@
"assign_ipv4": "true",
"ipv4_pools": ["{{ kube_pods_subnet }}"]
},
- {% if enable_network_policy %}
+{% if enable_network_policy %}
"policy": {
"type": "k8s"
},
- {%- endif %}
- {% if calico_mtu is defined and calico_mtu is number %}
+{%- endif %}
+{% if calico_mtu is defined and calico_mtu is number %}
"mtu": {{ calico_mtu }},
- {%- endif %}
+{%- endif %}
"kubernetes": {
"kubeconfig": "{{ kube_config_dir }}/node-kubeconfig.yaml"
}
diff --git a/roles/network_plugin/calico/templates/etcdv2-store.yml.j2 b/roles/network_plugin/calico/templates/etcdv2-store.yml.j2
new file mode 100644
index 00000000000..c65728838a6
--- /dev/null
+++ b/roles/network_plugin/calico/templates/etcdv2-store.yml.j2
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: calicoApiConfig
+metadata:
+spec:
+ datastoreType: "etcdv2"
+ etcdEndpoints: "{{ etcd_access_addresses }}"
+ etcdKeyFile: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
+ etcdCertFile: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
+ etcdCACertFile: "{{ etcd_cert_dir }}/ca.pem"
diff --git a/roles/network_plugin/calico/templates/etcdv3-store.yml.j2 b/roles/network_plugin/calico/templates/etcdv3-store.yml.j2
new file mode 100644
index 00000000000..3dc566d0ffb
--- /dev/null
+++ b/roles/network_plugin/calico/templates/etcdv3-store.yml.j2
@@ -0,0 +1,9 @@
+apiVersion: projectcalico.org/v3
+kind: CalicoAPIConfig
+metadata:
+spec:
+ datastoreType: "etcdv3"
+ etcdEndpoints: "{{ etcd_access_addresses }}"
+ etcdKeyFile: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
+ etcdCertFile: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
+ etcdCACertFile: "{{ etcd_cert_dir }}/ca.pem"
\ No newline at end of file
diff --git a/roles/network_plugin/canal/defaults/main.yml b/roles/network_plugin/canal/defaults/main.yml
index bf74653c78b..38696b87a1a 100644
--- a/roles/network_plugin/canal/defaults/main.yml
+++ b/roles/network_plugin/canal/defaults/main.yml
@@ -31,8 +31,3 @@ calicoctl_memory_limit: 170M
calicoctl_cpu_limit: 100m
calicoctl_memory_requests: 32M
calicoctl_cpu_requests: 25m
-
-rbac_resources:
- - sa
- - clusterrole
- - clusterrolebinding
diff --git a/roles/network_plugin/canal/tasks/main.yml b/roles/network_plugin/canal/tasks/main.yml
index a42c2cfa715..aedb47070ed 100644
--- a/roles/network_plugin/canal/tasks/main.yml
+++ b/roles/network_plugin/canal/tasks/main.yml
@@ -53,10 +53,9 @@
register: canal_manifests
when:
- inventory_hostname in groups['kube-master']
- - rbac_enabled or item.type not in rbac_resources
- name: Canal | Copy cni plugins from hyperkube
- command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -r /opt/cni/bin/. /cnibindir/"
+ command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -rf /opt/cni/bin/. /cnibindir/"
register: cni_task_result
until: cni_task_result.rc == 0
retries: 4
diff --git a/roles/network_plugin/canal/templates/canal-cr-calico.yml.j2 b/roles/network_plugin/canal/templates/canal-cr-calico.yml.j2
index e3b048c640f..0b5ebf094a0 100644
--- a/roles/network_plugin/canal/templates/canal-cr-calico.yml.j2
+++ b/roles/network_plugin/canal/templates/canal-cr-calico.yml.j2
@@ -3,7 +3,7 @@ kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico
- namespace: {{ system_namespace }}
+ namespace: kube-system
rules:
- apiGroups: [""]
resources:
@@ -78,3 +78,11 @@ rules:
verbs:
- get
- list
+ - apiGroups:
+ - policy
+ resourceNames:
+ - privileged
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - use
diff --git a/roles/network_plugin/canal/templates/canal-cr-flannel.yml.j2 b/roles/network_plugin/canal/templates/canal-cr-flannel.yml.j2
index 0be8e938cb9..45f6fcb5024 100644
--- a/roles/network_plugin/canal/templates/canal-cr-flannel.yml.j2
+++ b/roles/network_plugin/canal/templates/canal-cr-flannel.yml.j2
@@ -24,3 +24,11 @@ rules:
- nodes/status
verbs:
- patch
+ - apiGroups:
+ - policy
+ resourceNames:
+ - privileged
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - use
diff --git a/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2 b/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2
index e1c1f5050a5..016e5193e23 100644
--- a/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2
+++ b/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2
@@ -11,4 +11,4 @@ roleRef:
subjects:
- kind: ServiceAccount
name: canal
- namespace: {{ system_namespace }}
+ namespace: kube-system
diff --git a/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2 b/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2
index 3b00017b13e..097b1538e4c 100644
--- a/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2
+++ b/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2
@@ -11,4 +11,4 @@ roleRef:
subjects:
- kind: ServiceAccount
name: canal
- namespace: {{ system_namespace }}
+ namespace: kube-system
diff --git a/roles/network_plugin/canal/templates/canal-node-sa.yml.j2 b/roles/network_plugin/canal/templates/canal-node-sa.yml.j2
index d5b9a6e971a..aa168d15c14 100644
--- a/roles/network_plugin/canal/templates/canal-node-sa.yml.j2
+++ b/roles/network_plugin/canal/templates/canal-node-sa.yml.j2
@@ -3,7 +3,7 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: canal
- namespace: {{ system_namespace }}
+ namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
diff --git a/roles/network_plugin/canal/templates/canal-node.yaml.j2 b/roles/network_plugin/canal/templates/canal-node.yaml.j2
index 07754c089cb..e1fec660bc5 100644
--- a/roles/network_plugin/canal/templates/canal-node.yaml.j2
+++ b/roles/network_plugin/canal/templates/canal-node.yaml.j2
@@ -3,7 +3,7 @@ kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: canal-node
- namespace: {{ system_namespace }}
+ namespace: kube-system
labels:
k8s-app: canal-node
spec:
@@ -19,9 +19,7 @@ spec:
k8s-app: canal-node
spec:
hostNetwork: true
-{% if rbac_enabled %}
serviceAccountName: canal
-{% endif %}
tolerations:
- effect: NoSchedule
operator: Exists
@@ -30,6 +28,9 @@ spec:
- name: lib-modules
hostPath:
path: /lib/modules
+ - name: var-lib-calico
+ hostPath:
+ path: /var/lib/calico
- name: var-run-calico
hostPath:
path: /var/run/calico
@@ -50,6 +51,10 @@ spec:
- name: "canal-certs"
hostPath:
path: "{{ canal_cert_dir }}"
+ - name: xtables-lock
+ hostPath:
+ path: /run/xtables.lock
+ type: FileOrCreate
containers:
# Runs the flannel daemon to enable vxlan networking between
# container hosts.
@@ -127,6 +132,9 @@ spec:
- name: "canal-certs"
mountPath: "{{ canal_cert_dir }}"
readOnly: true
+ - name: xtables-lock
+ mountPath: /run/xtables.lock
+ readOnly: false
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and local routes on each
# host.
@@ -148,14 +156,21 @@ spec:
name: canal-config
key: etcd_endpoints
# Disable Calico BGP. Calico is simply enforcing policy.
- - name: CALICO_NETWORKING
- value: "false"
+ - name: CALICO_NETWORKING_BACKEND
+ value: "none"
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "kubespray,canal"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
+ # Set noderef for node controller.
+ - name: CALICO_K8S_NODE_REF
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: FELIX_HEALTHENABLED
+ value: "true"
# Etcd SSL vars
- name: ETCD_CA_CERT_FILE
valueFrom:
@@ -178,6 +193,18 @@ spec:
fieldPath: spec.nodeName
securityContext:
privileged: true
+ livenessProbe:
+ httpGet:
+ path: /liveness
+ port: 9099
+ periodSeconds: 10
+ initialDelaySeconds: 10
+ failureThreshold: 6
+ readinessProbe:
+ httpGet:
+ path: /readiness
+ port: 9099
+ periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
@@ -185,6 +212,9 @@ spec:
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
+ - mountPath: /var/lib/calico
+ name: var-lib-calico
+ readOnly: false
- name: "canal-certs"
mountPath: "{{ canal_cert_dir }}"
readOnly: true
diff --git a/roles/network_plugin/cilium/defaults/main.yml b/roles/network_plugin/cilium/defaults/main.yml
index 389fe5bd6dc..f6a836f953b 100755
--- a/roles/network_plugin/cilium/defaults/main.yml
+++ b/roles/network_plugin/cilium/defaults/main.yml
@@ -12,14 +12,9 @@ cilium_policy_dir: /etc/kubernetes/policy
# Limits for apps
cilium_memory_limit: 500M
-cilium_cpu_limit: 200m
+cilium_cpu_limit: 500m
cilium_memory_requests: 64M
-cilium_cpu_requests: 50m
+cilium_cpu_requests: 100m
# Optional features
cilium_enable_prometheus: false
-
-rbac_resources:
- - sa
- - clusterrole
- - clusterrolebinding
diff --git a/roles/network_plugin/cilium/tasks/main.yml b/roles/network_plugin/cilium/tasks/main.yml
index 12408a00add..6c55be6633d 100755
--- a/roles/network_plugin/cilium/tasks/main.yml
+++ b/roles/network_plugin/cilium/tasks/main.yml
@@ -38,7 +38,6 @@
register: cilium_node_manifests
when:
- inventory_hostname in groups['kube-master']
- - rbac_enabled or item.type not in rbac_resources
- name: Cilium | Set CNI directory permissions
file:
diff --git a/roles/network_plugin/cilium/tasks/reset.yml b/roles/network_plugin/cilium/tasks/reset.yml
new file mode 100644
index 00000000000..65b6e9a7880
--- /dev/null
+++ b/roles/network_plugin/cilium/tasks/reset.yml
@@ -0,0 +1,9 @@
+---
+- name: reset | check and remove devices if still present
+ include_tasks: reset_iface.yml
+ vars:
+ iface: "{{ item }}"
+ with_items:
+ - cilium_host
+ - cilium_net
+ - cilium_vxlan
diff --git a/roles/network_plugin/cilium/tasks/reset_iface.yml b/roles/network_plugin/cilium/tasks/reset_iface.yml
new file mode 100644
index 00000000000..d4a5aad90f5
--- /dev/null
+++ b/roles/network_plugin/cilium/tasks/reset_iface.yml
@@ -0,0 +1,9 @@
+---
+- name: "reset | check if network device {{ iface }} is present"
+ stat:
+ path: "/sys/class/net/{{ iface }}"
+ register: device_remains
+
+- name: "reset | remove network device {{ iface }}"
+ command: "ip link del {{ iface }}"
+ when: device_remains.stat.exists
diff --git a/roles/network_plugin/cilium/templates/cilium-config.yml.j2 b/roles/network_plugin/cilium/templates/cilium-config.yml.j2
index a96bb853138..cf5758465dd 100755
--- a/roles/network_plugin/cilium/templates/cilium-config.yml.j2
+++ b/roles/network_plugin/cilium/templates/cilium-config.yml.j2
@@ -1,29 +1,49 @@
-kind: ConfigMap
+---
apiVersion: v1
+kind: ConfigMap
metadata:
name: cilium-config
- namespace: {{ system_namespace }}
+ namespace: kube-system
data:
# This etcd-config contains the etcd endpoints of your cluster. If you use
- # TLS please make sure you uncomment the ca-file line and add the respective
- # certificate has a k8s secret, see explanation bellow in the comment labeled
- # "ETCD-CERT"
+ # TLS please make sure you follow the tutorial in https://cilium.link/etcd-config
etcd-config: |-
---
- endpoints:
+ endpoints:
{% for ip_addr in etcd_access_addresses.split(',') %}
- - {{ ip_addr }}
+ - {{ ip_addr }}
{% endfor %}
- #
- # In case you want to use TLS in etcd, uncomment the following line
- # and add the certificate as explained in the comment labeled "ETCD-CERT"
+
+ # In case you want to use TLS in etcd, uncomment the 'ca-file' line
+ # and create a kubernetes secret by following the tutorial in
+ # https://cilium.link/etcd-config
ca-file: "{{ cilium_cert_dir }}/ca_cert.crt"
- #
+
# In case you want client to server authentication, uncomment the following
- # lines and add the certificate and key in cilium-etcd-secrets bellow
+ # lines and create a kubernetes secret by following the tutorial in
+ # https://cilium.link/etcd-config
key-file: "{{ cilium_cert_dir }}/key.pem"
cert-file: "{{ cilium_cert_dir }}/cert.crt"
# If you want to run cilium in debug mode change this value to true
debug: "{{ cilium_debug }}"
disable-ipv4: "{{ cilium_disable_ipv4 }}"
+ # If you want to clean cilium state; change this value to true
+ clean-cilium-state: "false"
+ legacy-host-allows-world: "false"
+
+ # If you want cilium monitor to aggregate tracing for packets, set this level
+ # to "low", "medium", or "maximum". The higher the level, the less packets
+ # that will be seen in monitor output.
+ monitor-aggregation-level: "none"
+
+ # Regular expression matching compatible Istio sidecar istio-proxy
+ # container image names
+ sidecar-istio-proxy-image: "cilium/istio_proxy"
+
+ # Encapsulation mode for communication between nodes
+ # Possible values:
+ # - disabled
+ # - vxlan (default)
+ # - geneve
+ tunnel: "vxlan"
diff --git a/roles/network_plugin/cilium/templates/cilium-cr.yml.j2 b/roles/network_plugin/cilium/templates/cilium-cr.yml.j2
index 8eae0e8edb6..60cdb5b6e4c 100755
--- a/roles/network_plugin/cilium/templates/cilium-cr.yml.j2
+++ b/roles/network_plugin/cilium/templates/cilium-cr.yml.j2
@@ -1,62 +1,74 @@
---
+apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: cilium
rules:
-- apiGroups:
- - "networking.k8s.io"
- resources:
- - networkpolicies
- verbs:
- - get
- - list
- - watch
-- apiGroups:
- - ""
- resources:
- - namespaces
- - services
- - nodes
- - endpoints
- - componentstatuses
- verbs:
- - get
- - list
- - watch
-- apiGroups:
- - ""
- resources:
- - pods
- - nodes
- verbs:
- - get
- - list
- - watch
- - update
-- apiGroups:
- - extensions
- resources:
- - networkpolicies #FIXME remove this when we drop support for k8s NP-beta GH-1202
- - thirdpartyresources
- - ingresses
- verbs:
- - create
- - get
- - list
- - watch
-- apiGroups:
- - "apiextensions.k8s.io"
- resources:
- - customresourcedefinitions
- verbs:
- - create
- - get
- - list
- - watch
-- apiGroups:
- - cilium.io
- resources:
- - ciliumnetworkpolicies
- verbs:
- - "*"
+ - apiGroups:
+ - "networking.k8s.io"
+ resources:
+ - networkpolicies
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - namespaces
+ - services
+ - nodes
+ - endpoints
+ - componentstatuses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - extensions
+ resources:
+ - networkpolicies # FIXME remove this when we drop support for k8s NP-beta GH-1202
+ - thirdpartyresources
+ - ingresses
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - "apiextensions.k8s.io"
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - cilium.io
+ resources:
+ - ciliumnetworkpolicies
+ - ciliumnetworkpolicies/status
+ - ciliumendpoints
+ - ciliumendpoints/status
+ verbs:
+ - "*"
+ - apiGroups:
+ - policy
+ resourceNames:
+ - privileged
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - use
diff --git a/roles/network_plugin/cilium/templates/cilium-crb.yml.j2 b/roles/network_plugin/cilium/templates/cilium-crb.yml.j2
index dcfe4d47122..35994bc684d 100755
--- a/roles/network_plugin/cilium/templates/cilium-crb.yml.j2
+++ b/roles/network_plugin/cilium/templates/cilium-crb.yml.j2
@@ -1,6 +1,6 @@
---
+apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: cilium
roleRef:
@@ -8,8 +8,8 @@ roleRef:
kind: ClusterRole
name: cilium
subjects:
-- kind: ServiceAccount
- name: cilium
- namespace: {{ system_namespace }}
-- kind: Group
- name: system:nodes
+ - kind: ServiceAccount
+ name: cilium
+ namespace: kube-system
+ - kind: Group
+ name: system:nodes
diff --git a/roles/network_plugin/cilium/templates/cilium-ds.yml.j2 b/roles/network_plugin/cilium/templates/cilium-ds.yml.j2
index 9f48a62db20..7fff7ac0eac 100755
--- a/roles/network_plugin/cilium/templates/cilium-ds.yml.j2
+++ b/roles/network_plugin/cilium/templates/cilium-ds.yml.j2
@@ -1,10 +1,21 @@
---
-apiVersion: extensions/v1beta1
+apiVersion: apps/v1
kind: DaemonSet
metadata:
name: cilium
- namespace: {{ system_namespace }}
+ namespace: kube-system
spec:
+ updateStrategy:
+ type: "RollingUpdate"
+ rollingUpdate:
+ # Specifies the maximum number of Pods that can be unavailable during the update process.
+ # The current default value is 1 or 100% for daemonsets; Adding an explicit value here
+ # to avoid confusion, as the default value is specific to the type (daemonset/deployment).
+ maxUnavailable: "100%"
+ selector:
+ matchLabels:
+ k8s-app: cilium
+ kubernetes.io/cluster-service: "true"
template:
metadata:
labels:
@@ -23,141 +34,186 @@ spec:
prometheus.io/port: "9090"
{% endif %}
spec:
-{% if rbac_enabled %}
serviceAccountName: cilium
-{% endif %}
+ initContainers:
+ - name: clean-cilium-state
+ image: docker.io/library/busybox:1.28.4
+ imagePullPolicy: IfNotPresent
+ command: ['sh', '-c', 'if [ "${CLEAN_CILIUM_STATE}" = "true" ]; then rm -rf /var/run/cilium/state; rm -rf /sys/fs/bpf/tc/globals/cilium_*; fi']
+ volumeMounts:
+ - name: bpf-maps
+ mountPath: /sys/fs/bpf
+ - name: cilium-run
+ mountPath: /var/run/cilium
+ env:
+ - name: "CLEAN_CILIUM_STATE"
+ valueFrom:
+ configMapKeyRef:
+ name: cilium-config
+ optional: true
+ key: clean-cilium-state
containers:
- - image: {{ cilium_image_repo }}:{{ cilium_image_tag }}
- imagePullPolicy: Always
- name: cilium-agent
- command: [ "cilium-agent" ]
- args:
- - "--debug=$(CILIUM_DEBUG)"
- - "-t"
- - "vxlan"
- - "--kvstore"
- - "etcd"
- - "--kvstore-opt"
- - "etcd.config=/var/lib/etcd-config/etcd.config"
- - "--disable-ipv4=$(DISABLE_IPV4)"
+ - image: {{ cilium_image_repo }}:{{ cilium_image_tag }}
+ imagePullPolicy: Always
+ name: cilium-agent
+ command: ["cilium-agent"]
+ args:
+ - "--debug=$(CILIUM_DEBUG)"
+ - "--kvstore=etcd"
+ - "--kvstore-opt=etcd.config=/var/lib/etcd-config/etcd.config"
+ - "--disable-ipv4=$(DISABLE_IPV4)"
+{% if cilium_enable_prometheus %}
+ ports:
+ - name: prometheus
+ containerPort: 9090
+{% endif %}
+ lifecycle:
+ postStart:
+ exec:
+ command:
+ - "/cni-install.sh"
+ preStop:
+ exec:
+ command:
+ - "/cni-uninstall.sh"
+ env:
+ - name: "K8S_NODE_NAME"
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: "CILIUM_DEBUG"
+ valueFrom:
+ configMapKeyRef:
+ name: cilium-config
+ key: debug
+ - name: "DISABLE_IPV4"
+ valueFrom:
+ configMapKeyRef:
+ name: cilium-config
+ key: disable-ipv4
{% if cilium_enable_prometheus %}
- ports:
- - name: prometheus
- containerPort: 9090
+ # Note: this variable is a no-op if not defined, and is used in the
+ # prometheus examples.
+ - name: "CILIUM_PROMETHEUS_SERVE_ADDR"
+ valueFrom:
+ configMapKeyRef:
+ name: cilium-metrics-config
+ optional: true
+ key: prometheus-serve-addr
{% endif %}
- lifecycle:
- postStart:
+ - name: "CILIUM_LEGACY_HOST_ALLOWS_WORLD"
+ valueFrom:
+ configMapKeyRef:
+ name: cilium-config
+ optional: true
+ key: legacy-host-allows-world
+ - name: "CILIUM_SIDECAR_ISTIO_PROXY_IMAGE"
+ valueFrom:
+ configMapKeyRef:
+ name: cilium-config
+ key: sidecar-istio-proxy-image
+ optional: true
+ - name: "CILIUM_TUNNEL"
+ valueFrom:
+ configMapKeyRef:
+ key: tunnel
+ name: cilium-config
+ optional: true
+ - name: "CILIUM_MONITOR_AGGREGATION_LEVEL"
+ valueFrom:
+ configMapKeyRef:
+ key: monitor-aggregation-level
+ name: cilium-config
+ optional: true
+ resources:
+ limits:
+ cpu: {{ cilium_cpu_limit }}
+ memory: {{ cilium_memory_limit }}
+ requests:
+ cpu: {{ cilium_cpu_requests }}
+ memory: {{ cilium_memory_requests }}
+ livenessProbe:
exec:
command:
- - "/cni-install.sh"
- preStop:
+ - cilium
+ - status
+ # The initial delay for the liveness probe is intentionally large to
+ # avoid an endless kill & restart cycle if in the event that the initial
+ # bootstrapping takes longer than expected.
+ initialDelaySeconds: 120
+ failureThreshold: 10
+ periodSeconds: 10
+ readinessProbe:
exec:
command:
- - "/cni-uninstall.sh"
- env:
- - name: "K8S_NODE_NAME"
- valueFrom:
- fieldRef:
- fieldPath: spec.nodeName
- - name: "CILIUM_DEBUG"
- valueFrom:
- configMapKeyRef:
- name: cilium-config
- key: debug
- - name: "DISABLE_IPV4"
- valueFrom:
- configMapKeyRef:
- name: cilium-config
- key: disable-ipv4
-{% if cilium_enable_prometheus %}
- # Note: this variable is a no-op if not defined, and is used in the
- # prometheus examples.
- - name: "CILIUM_PROMETHEUS_SERVE_ADDR"
- valueFrom:
- configMapKeyRef:
- name: cilium-metrics-config
- optional: true
- key: prometheus-serve-addr
-{% endif %}
- livenessProbe:
- exec:
- command:
- - cilium
- - status
- # The initial delay for the liveness probe is intentionally large to
- # avoid an endless kill & restart cycle if in the event that the initial
- # bootstrapping takes longer than expected.
- initialDelaySeconds: 120
- failureThreshold: 10
- periodSeconds: 10
- readinessProbe:
- exec:
- command:
- - cilium
- - status
- initialDelaySeconds: 5
- periodSeconds: 5
- volumeMounts:
- - name: bpf-maps
- mountPath: /sys/fs/bpf
- - name: cilium-run
- mountPath: /var/run/cilium
- - name: cni-path
- mountPath: /host/opt/cni/bin
- - name: etc-cni-netd
- mountPath: /host/etc/cni/net.d
- - name: docker-socket
- mountPath: /var/run/docker.sock
- readOnly: true
- - name: etcd-config-path
- mountPath: /var/lib/etcd-config
- readOnly: true
- - name: cilium-certs
- mountPath: {{ cilium_cert_dir }}
- readOnly: true
- securityContext:
- capabilities:
- add:
- - "NET_ADMIN"
- privileged: true
+ - cilium
+ - status
+ initialDelaySeconds: 5
+ periodSeconds: 5
+ volumeMounts:
+ - name: bpf-maps
+ mountPath: /sys/fs/bpf
+ - name: cilium-run
+ mountPath: /var/run/cilium
+ - name: cni-path
+ mountPath: /host/opt/cni/bin
+ - name: etc-cni-netd
+ mountPath: /host/etc/cni/net.d
+ - name: docker-socket
+ mountPath: /var/run/docker.sock
+ readOnly: true
+ - name: etcd-config-path
+ mountPath: /var/lib/etcd-config
+ readOnly: true
+ - name: cilium-certs
+ mountPath: {{ cilium_cert_dir }}
+ readOnly: true
+ securityContext:
+ capabilities:
+ add:
+ - "NET_ADMIN"
+ privileged: true
hostNetwork: true
volumes:
- # To keep state between restarts / upgrades
+ # To keep state between restarts / upgrades
- name: cilium-run
hostPath:
path: /var/run/cilium
- # To keep state between restarts / upgrades
+ # To keep state between restarts / upgrades
- name: bpf-maps
hostPath:
path: /sys/fs/bpf
- # To read docker events from the node
+ # To read docker events from the node
- name: docker-socket
hostPath:
path: /var/run/docker.sock
- # To install cilium cni plugin in the host
+ # To install cilium cni plugin in the host
- name: cni-path
hostPath:
path: /opt/cni/bin
- # To install cilium cni configuration in the host
+ # To install cilium cni configuration in the host
- name: etc-cni-netd
hostPath:
- path: /etc/cni/net.d
- - name: cilium-certs
- hostPath:
- path: {{ cilium_cert_dir }}
- # To read the etcd config stored in config maps
+ path: /etc/cni/net.d
+ # To read the etcd config stored in config maps
- name: etcd-config-path
configMap:
name: cilium-config
items:
- - key: etcd-config
- path: etcd.config
+ - key: etcd-config
+ path: etcd.config
+ # To read the k8s etcd secrets in case the user might want to use TLS
+ - name: cilium-certs
+ hostPath:
+ path: {{ cilium_cert_dir }}
+
+ restartPolicy: Always
tolerations:
- - effect: NoSchedule
- key: node-role.kubernetes.io/master
- - effect: NoSchedule
- key: node.cloudprovider.kubernetes.io/uninitialized
- value: "true"
- # Mark cilium's pod as critical for rescheduling
- - key: CriticalAddonsOnly
- operator: "Exists"
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ - effect: NoSchedule
+ key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ # Mark cilium's pod as critical for rescheduling
+ - key: CriticalAddonsOnly
+ operator: "Exists"
diff --git a/roles/network_plugin/cilium/templates/cilium-sa.yml.j2 b/roles/network_plugin/cilium/templates/cilium-sa.yml.j2
index d6ef2a4314d..c03ac59b49b 100755
--- a/roles/network_plugin/cilium/templates/cilium-sa.yml.j2
+++ b/roles/network_plugin/cilium/templates/cilium-sa.yml.j2
@@ -3,4 +3,4 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: cilium
- namespace: {{ system_namespace }}
+ namespace: kube-system
diff --git a/roles/network_plugin/cloud/tasks/main.yml b/roles/network_plugin/cloud/tasks/main.yml
index 7b66503720f..59750770be4 100644
--- a/roles/network_plugin/cloud/tasks/main.yml
+++ b/roles/network_plugin/cloud/tasks/main.yml
@@ -1,6 +1,6 @@
---
- name: Cloud | Copy cni plugins from hyperkube
- command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -r /opt/cni/bin/. /cnibindir/"
+ command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -rf /opt/cni/bin/. /cnibindir/"
register: cni_task_result
until: cni_task_result.rc == 0
retries: 4
diff --git a/roles/network_plugin/contiv/files/generate-certificate.sh b/roles/network_plugin/contiv/files/generate-certificate.sh
index e794dbb6966..0235b2664bc 100644
--- a/roles/network_plugin/contiv/files/generate-certificate.sh
+++ b/roles/network_plugin/contiv/files/generate-certificate.sh
@@ -17,7 +17,7 @@ rm -f $KEY_PATH
rm -f $CERT_PATH
openssl genrsa -out $KEY_PATH 2048 >/dev/null 2>&1
-openssl req -new -x509 -sha256 -days 3650 \
+openssl req -new -x509 -sha256 -days 36500 \
-key $KEY_PATH \
-out $CERT_PATH \
-subj "/C=US/ST=CA/L=San Jose/O=CPSG/OU=IT Department/CN=auth-local.cisco.com"
diff --git a/roles/network_plugin/contiv/tasks/main.yml b/roles/network_plugin/contiv/tasks/main.yml
index d9b372480b8..bc9dcd3c0a4 100644
--- a/roles/network_plugin/contiv/tasks/main.yml
+++ b/roles/network_plugin/contiv/tasks/main.yml
@@ -97,7 +97,7 @@
and contiv_enable_api_proxy and contiv_generate_certificate"
- name: Contiv | Copy cni plugins from hyperkube
- command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/bash -c '/bin/cp -a /opt/cni/bin/* /cnibindir/'"
+ command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/bash -c '/bin/cp -fa /opt/cni/bin/* /cnibindir/'"
register: cni_task_result
until: cni_task_result.rc == 0
retries: 4
diff --git a/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2 b/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2
index 140379b13f4..cea0efe5118 100644
--- a/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2
@@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: contiv-api-proxy
- namespace: {{ system_namespace }}
+ namespace: kube-system
labels:
k8s-app: contiv-api-proxy
spec:
@@ -12,7 +12,7 @@ spec:
template:
metadata:
name: contiv-api-proxy
- namespace: {{ system_namespace }}
+ namespace: kube-system
labels:
k8s-app: contiv-api-proxy
annotations:
@@ -27,9 +27,7 @@ spec:
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
-{% if rbac_enabled %}
serviceAccountName: contiv-netmaster
-{% endif %}
containers:
- name: contiv-api-proxy
image: {{ contiv_auth_proxy_image_repo }}:{{ contiv_auth_proxy_image_tag }}
diff --git a/roles/network_plugin/contiv/templates/contiv-config.yml.j2 b/roles/network_plugin/contiv/templates/contiv-config.yml.j2
index 0505cd1f1e9..249d9d88ebb 100644
--- a/roles/network_plugin/contiv/templates/contiv-config.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-config.yml.j2
@@ -5,7 +5,7 @@ kind: ConfigMap
apiVersion: v1
metadata:
name: contiv-config
- namespace: {{ system_namespace }}
+ namespace: kube-system
data:
# The location of your cluster store. This is set to the
# avdertise-client value below from the contiv-etcd service.
diff --git a/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2 b/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2
index a9690cc2fa2..75946d82191 100644
--- a/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2
@@ -3,7 +3,7 @@ kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: contiv-etcd-proxy
- namespace: {{ system_namespace }}
+ namespace: kube-system
labels:
k8s-app: contiv-etcd-proxy
spec:
diff --git a/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2 b/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2
index 8060f4c01e6..9d750285751 100644
--- a/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2
@@ -3,7 +3,7 @@ kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: contiv-etcd
- namespace: {{ system_namespace }}
+ namespace: kube-system
labels:
k8s-app: contiv-etcd
spec:
@@ -26,7 +26,7 @@ spec:
effect: NoSchedule
initContainers:
- name: contiv-etcd-init
- image: ferest/etcd-initer:latest
+ image: {{ contiv_etcd_init_image_repo }}:{{ contiv_etcd_init_image_tag }}
imagePullPolicy: Always
env:
- name: ETCD_INIT_ARGSFILE
diff --git a/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrole.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrole.yml.j2
index 82ca0043753..92b4f588d4d 100644
--- a/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrole.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrole.yml.j2
@@ -2,7 +2,7 @@ kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: contiv-netmaster
- namespace: {{ system_namespace }}
+ namespace: kube-system
rules:
- apiGroups:
- ""
@@ -16,3 +16,11 @@ rules:
- watch
- list
- update
+ - apiGroups:
+ - policy
+ resourceNames:
+ - privileged
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - use
diff --git a/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrolebinding.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrolebinding.yml.j2
index 74c5e3145be..73d636775a1 100644
--- a/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrolebinding.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrolebinding.yml.j2
@@ -9,4 +9,4 @@ roleRef:
subjects:
- kind: ServiceAccount
name: contiv-netmaster
- namespace: {{ system_namespace }}
+ namespace: kube-system
diff --git a/roles/network_plugin/contiv/templates/contiv-netmaster-serviceaccount.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netmaster-serviceaccount.yml.j2
index 0c1bfb3e58f..758ea449336 100644
--- a/roles/network_plugin/contiv/templates/contiv-netmaster-serviceaccount.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-netmaster-serviceaccount.yml.j2
@@ -2,6 +2,6 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: contiv-netmaster
- namespace: {{ system_namespace }}
+ namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
diff --git a/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2
index 56be2d93d6a..787fe5c279e 100644
--- a/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2
@@ -3,7 +3,7 @@ kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: contiv-netmaster
- namespace: {{ system_namespace }}
+ namespace: kube-system
labels:
k8s-app: contiv-netmaster
spec:
@@ -12,7 +12,7 @@ spec:
template:
metadata:
name: contiv-netmaster
- namespace: {{ system_namespace }}
+ namespace: kube-system
labels:
k8s-app: contiv-netmaster
annotations:
@@ -27,9 +27,7 @@ spec:
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
-{% if rbac_enabled %}
serviceAccountName: contiv-netmaster
-{% endif %}
containers:
- name: contiv-netmaster
image: {{ contiv_image_repo }}:{{ contiv_image_tag }}
diff --git a/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrole.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrole.yml.j2
index c26e094edb3..e01fbef5d50 100644
--- a/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrole.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrole.yml.j2
@@ -2,7 +2,7 @@ kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: contiv-netplugin
- namespace: {{ system_namespace }}
+ namespace: kube-system
rules:
- apiGroups:
- ""
@@ -19,3 +19,11 @@ rules:
- list
- update
- get
+ - apiGroups:
+ - policy
+ resourceNames:
+ - privileged
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - use
diff --git a/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrolebinding.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrolebinding.yml.j2
index 0c989008a65..6cac217fc74 100644
--- a/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrolebinding.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrolebinding.yml.j2
@@ -9,4 +9,4 @@ roleRef:
subjects:
- kind: ServiceAccount
name: contiv-netplugin
- namespace: {{ system_namespace }}
+ namespace: kube-system
diff --git a/roles/network_plugin/contiv/templates/contiv-netplugin-serviceaccount.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netplugin-serviceaccount.yml.j2
index edfac8bb34a..8d00ec8cb43 100644
--- a/roles/network_plugin/contiv/templates/contiv-netplugin-serviceaccount.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-netplugin-serviceaccount.yml.j2
@@ -2,6 +2,6 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: contiv-netplugin
- namespace: {{ system_namespace }}
+ namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
diff --git a/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2
index 9c2c0a036ad..b7927f51c58 100644
--- a/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2
@@ -5,7 +5,7 @@ kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: contiv-netplugin
- namespace: {{ system_namespace }}
+ namespace: kube-system
labels:
k8s-app: contiv-netplugin
spec:
@@ -26,9 +26,7 @@ spec:
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
-{% if rbac_enabled %}
serviceAccountName: contiv-netplugin
-{% endif %}
containers:
# Runs netplugin container on each Kubernetes node. This
# container programs network policy and routes on each
diff --git a/roles/network_plugin/flannel/defaults/main.yml b/roles/network_plugin/flannel/defaults/main.yml
index 08f4ac145f1..e48a9475a73 100644
--- a/roles/network_plugin/flannel/defaults/main.yml
+++ b/roles/network_plugin/flannel/defaults/main.yml
@@ -5,9 +5,15 @@
# flannel_public_ip: "{{ access_ip|default(ip|default(ansible_default_ipv4.address)) }}"
## interface that should be used for flannel operations
-## This is actually an inventory node-level item
+## This is actually an inventory cluster-level item
# flannel_interface:
+## Select interface that should be used for flannel operations by regexp on Name or IP
+## This is actually an inventory cluster-level item
+## example: select interface with ip from net 10.0.0.0/23
+## single quote and escape backslashes
+# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}'
+
# You can choose what type of flannel backend to use
# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md
flannel_backend_type: "vxlan"
diff --git a/roles/network_plugin/flannel/tasks/main.yml b/roles/network_plugin/flannel/tasks/main.yml
index 29b79b11d12..c0c3aee3ebd 100644
--- a/roles/network_plugin/flannel/tasks/main.yml
+++ b/roles/network_plugin/flannel/tasks/main.yml
@@ -11,4 +11,3 @@
register: flannel_node_manifests
when:
- inventory_hostname in groups['kube-master']
- - rbac_enabled or item.type not in rbac_resources
diff --git a/roles/network_plugin/flannel/tasks/reset.yml b/roles/network_plugin/flannel/tasks/reset.yml
new file mode 100644
index 00000000000..b2777b4efee
--- /dev/null
+++ b/roles/network_plugin/flannel/tasks/reset.yml
@@ -0,0 +1,18 @@
+---
+- name: reset | check cni network device
+ stat:
+ path: /sys/class/net/cni0
+ register: cni
+
+- name: reset | remove the network device created by the flannel
+ command: ip link del cni0
+ when: cni.stat.exists
+
+- name: reset | check flannel network device
+ stat:
+ path: /sys/class/net/flannel.1
+ register: flannel
+
+- name: reset | remove the network device created by the flannel
+ command: ip link del flannel.1
+ when: flannel.stat.exists
diff --git a/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 b/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2
index aafe2a0f525..873d0e3f5a8 100644
--- a/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2
+++ b/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2
@@ -3,7 +3,7 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
- namespace: "{{system_namespace}}"
+ namespace: "kube-system"
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
@@ -29,6 +29,14 @@ rules:
- nodes/status
verbs:
- patch
+ - apiGroups:
+ - policy
+ resourceNames:
+ - privileged
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - use
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
@@ -41,4 +49,4 @@ roleRef:
subjects:
- kind: ServiceAccount
name: flannel
- namespace: "{{system_namespace}}"
\ No newline at end of file
+ namespace: "kube-system"
\ No newline at end of file
diff --git a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2
index 6c69dcaa84a..de9be8d9e17 100644
--- a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2
+++ b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2
@@ -3,7 +3,7 @@ kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
- namespace: "{{system_namespace}}"
+ namespace: "kube-system"
labels:
tier: node
app: flannel
@@ -41,7 +41,7 @@ apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-flannel
- namespace: "{{system_namespace}}"
+ namespace: "kube-system"
labels:
tier: node
k8s-app: flannel
@@ -52,9 +52,10 @@ spec:
tier: node
k8s-app: flannel
spec:
-{% if rbac_enabled %}
serviceAccountName: flannel
-{% endif %}
+ # When having win nodes in cluster without this patch, this pod cloud try to be created in windows
+ nodeSelector:
+ beta.kubernetes.io/os: linux
containers:
- name: kube-flannel
image: {{ flannel_image_repo }}:{{ flannel_image_tag }}
@@ -66,7 +67,7 @@ spec:
requests:
cpu: {{ flannel_cpu_requests }}
memory: {{ flannel_memory_requests }}
- command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr"{% if flannel_interface is defined %}, "--iface={{ flannel_interface }}"{% endif %} ]
+ command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr"{% if flannel_interface is defined %}, "--iface={{ flannel_interface }}"{% endif %}{% if flannel_interface_regexp is defined %}, "--iface-regex={{ flannel_interface_regexp }}"{% endif %} ]
securityContext:
privileged: true
env:
diff --git a/roles/network_plugin/weave/defaults/main.yml b/roles/network_plugin/weave/defaults/main.yml
index b59f0ab6341..ee636e56f42 100644
--- a/roles/network_plugin/weave/defaults/main.yml
+++ b/roles/network_plugin/weave/defaults/main.yml
@@ -1,29 +1,58 @@
---
-# Limits
-weave_memory_limit: 400M
-weave_cpu_limit: 30m
-weave_memory_requests: 64M
-weave_cpu_requests: 10m
-
-# This two variable are automatically changed by the weave's role, do not manually change these values
-# To reset values :
-# weave_seed: unset
-# weave_peers: unset
-weave_seed: uninitialized
-weave_peers: uninitialized
-
-# weave's network password for encryption
-# if null then no network encryption
-# you can use --extra-vars to pass the password in command line
-weave_password: EnterPasswordHere
-
-# Weave uses consensus mode by default
-# Enabling seed mode allow to dynamically add or remove hosts
-# https://www.weave.works/docs/net/latest/ipam/
-weave_mode_seed: false
-
-# Set the MTU of Weave (default 1376, Jumbo Frames: 8916)
+
+# Weave's network password for encryption, if null then no network encryption.
+weave_password: ~
+
+# If set to 1, disable checking for new Weave Net versions (default is blank,
+# i.e. check is enabled)
+weave_checkpoint_disable: false
+
+# Soft limit on the number of connections between peers. Defaults to 100.
+weave_conn_limit: 100
+
+# Weave Net defaults to enabling hairpin on the bridge side of the veth pair
+# for containers attached. If you need to disable hairpin, e.g. your kernel is
+# one of those that can panic if hairpin is enabled, then you can disable it by
+# setting `HAIRPIN_MODE=false`.
+weave_hairpin_mode: true
+
+# The range of IP addresses used by Weave Net and the subnet they are placed in
+# (CIDR format; default 10.32.0.0/12)
+weave_ipalloc_range: "{{ kube_pods_subnet }}"
+
+# Set to 0 to disable Network Policy Controller (default is on)
+weave_expect_npc: "{{ enable_network_policy }}"
+
+# List of addresses of peers in the Kubernetes cluster (default is to fetch the
+# list from the api-server)
+weave_kube_peers: ~
+
+# Set the initialization mode of the IP Address Manager (defaults to consensus
+# amongst the KUBE_PEERS)
+weave_ipalloc_init: ~
+
+# Set the IP address used as a gateway from the Weave network to the host
+# network - this is useful if you are configuring the addon as a static pod.
+weave_expose_ip: ~
+
+# Address and port that the Weave Net daemon will serve Prometheus-style
+# metrics on (defaults to 0.0.0.0:6782)
+weave_metrics_addr: ~
+
+# Address and port that the Weave Net daemon will serve status requests on
+# (defaults to disabled)
+weave_status_addr: ~
+
+# Weave Net defaults to 1376 bytes, but you can set a smaller size if your
+# underlying network has a tighter limit, or set a larger size for better
+# performance if your network supports jumbo frames (e.g. 8916)
weave_mtu: 1376
-# this variable is use in seed mode
-weave_ip_current_cluster: "{% for host in groups['k8s-cluster'] %}{{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}{% if not loop.last %} {% endif %}{% endfor %}"
+# Set to 1 to preserve the client source IP address when accessing Service
+# annotated with `service.spec.externalTrafficPolicy=Local`. The feature works
+# only with Weave IPAM (default).
+weave_no_masq_local: true
+
+# Extra variables that passing to launch.sh, useful for enabling seed mode, see
+# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/
+weave_extra_args: ~
diff --git a/roles/network_plugin/weave/tasks/main.yml b/roles/network_plugin/weave/tasks/main.yml
index f3f1da6ac3a..318b6a3693a 100644
--- a/roles/network_plugin/weave/tasks/main.yml
+++ b/roles/network_plugin/weave/tasks/main.yml
@@ -1,9 +1,7 @@
---
-- import_tasks: seed.yml
- when: weave_mode_seed
- name: Weave | Copy cni plugins from hyperkube
- command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -r /opt/cni/bin/. /cnibindir/"
+ command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -rf /opt/cni/bin/. /cnibindir/"
register: cni_task_result
until: cni_task_result.rc == 0
retries: 4
@@ -13,9 +11,12 @@
- hyperkube
- upgrade
-- name: Weave | Create weave-net manifest
+- name: Weave | Create manifest
template:
src: weave-net.yml.j2
dest: "{{ kube_config_dir }}/weave-net.yml"
- mode: 0640
- register: weave_manifest
+
+- name: Weave | Fix nodePort for Weave
+ template:
+ src: 00-weave.conflist.j2
+ dest: /etc/cni/net.d/00-weave.conflist
diff --git a/roles/network_plugin/weave/tasks/seed.yml b/roles/network_plugin/weave/tasks/seed.yml
deleted file mode 100644
index 2765267e58f..00000000000
--- a/roles/network_plugin/weave/tasks/seed.yml
+++ /dev/null
@@ -1,56 +0,0 @@
----
-- name: Weave seed | Set seed if first time
- set_fact:
- seed: '{% for host in groups["k8s-cluster"] %}{{ hostvars[host]["ansible_default_ipv4"]["macaddress"] }}{% if not loop.last %},{% endif %}{% endfor %}'
- when: "weave_seed == 'uninitialized'"
- run_once: true
- tags:
- - confweave
-
-- name: Weave seed | Set seed if not first time
- set_fact:
- seed: '{{ weave_seed }}'
- when: "weave_seed != 'uninitialized'"
- run_once: true
- tags:
- - confweave
-
-- name: Weave seed | Set peers if fist time
- set_fact:
- peers: '{{ weave_ip_current_cluster }}'
- when: "weave_peers == 'uninitialized'"
- run_once: true
- tags:
- - confweave
-
-- name: Weave seed | Set peers if existing peers
- set_fact:
- peers: '{{ weave_peers }}{% for ip in weave_ip_current_cluster.split(" ") %}{% if ip not in weave_peers.split(" ") %} {{ ip }}{% endif %}{% endfor %}'
- when: "weave_peers != 'uninitialized'"
- run_once: true
- tags:
- - confweave
-
-- name: Weave seed | Save seed
- lineinfile:
- dest: "{{ inventory_dir }}/group_vars/k8s-cluster.yml"
- state: present
- regexp: '^weave_seed:'
- line: 'weave_seed: {{ seed }}'
- become: no
- delegate_to: 127.0.0.1
- run_once: true
- tags:
- - confweave
-
-- name: Weave seed | Save peers
- lineinfile:
- dest: "{{ inventory_dir }}/group_vars/k8s-cluster.yml"
- state: present
- regexp: '^weave_peers:'
- line: 'weave_peers: {{ peers }}'
- become: no
- delegate_to: 127.0.0.1
- run_once: true
- tags:
- - confweave
diff --git a/roles/network_plugin/weave/templates/00-weave.conflist.j2 b/roles/network_plugin/weave/templates/00-weave.conflist.j2
new file mode 100644
index 00000000000..45ae0b9676d
--- /dev/null
+++ b/roles/network_plugin/weave/templates/00-weave.conflist.j2
@@ -0,0 +1,16 @@
+{
+ "cniVersion": "0.3.0",
+ "name": "mynet",
+ "plugins": [
+ {
+ "name": "weave",
+ "type": "weave-net",
+ "hairpinMode": true
+ },
+ {
+ "type": "portmap",
+ "capabilities": {"portMappings": true},
+ "snat": true
+ }
+ ]
+}
diff --git a/roles/network_plugin/weave/templates/weave-net.yml.j2 b/roles/network_plugin/weave/templates/weave-net.yml.j2
index b292339b568..09e5fbb7c7c 100644
--- a/roles/network_plugin/weave/templates/weave-net.yml.j2
+++ b/roles/network_plugin/weave/templates/weave-net.yml.j2
@@ -8,8 +8,8 @@ items:
name: weave-net
labels:
name: weave-net
- namespace: {{ system_namespace }}
- - apiVersion: rbac.authorization.k8s.io/v1
+ namespace: kube-system
+ - apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: weave-net
@@ -27,7 +27,7 @@ items:
- list
- watch
- apiGroups:
- - extensions
+ - networking.k8s.io
resources:
- networkpolicies
verbs:
@@ -35,27 +35,48 @@ items:
- list
- watch
- apiGroups:
- - 'networking.k8s.io'
+ - ''
resources:
- - networkpolicies
+ - nodes/status
verbs:
- - get
- - list
- - watch
+ - patch
+ - update
+ - apiGroups:
+ - policy
+ resourceNames:
+ - privileged
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - use
+ - apiVersion: rbac.authorization.k8s.io/v1beta1
+ kind: ClusterRoleBinding
+ metadata:
+ name: weave-net
+ labels:
+ name: weave-net
+ roleRef:
+ kind: ClusterRole
+ name: weave-net
+ apiGroup: rbac.authorization.k8s.io
+ subjects:
+ - kind: ServiceAccount
+ name: weave-net
+ namespace: kube-system
- apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: weave-net
- namespace: kube-system
labels:
name: weave-net
+ namespace: kube-system
rules:
- apiGroups:
- ''
- resources:
- - configmaps
resourceNames:
- weave-net
+ resources:
+ - configmaps
verbs:
- get
- update
@@ -65,14 +86,15 @@ items:
- configmaps
verbs:
- create
- - apiVersion: rbac.authorization.k8s.io/v1
- kind: ClusterRoleBinding
+ - apiVersion: rbac.authorization.k8s.io/v1beta1
+ kind: RoleBinding
metadata:
name: weave-net
labels:
name: weave-net
+ namespace: kube-system
roleRef:
- kind: ClusterRole
+ kind: Role
name: weave-net
apiGroup: rbac.authorization.k8s.io
subjects:
@@ -85,9 +107,9 @@ items:
name: weave-net
labels:
name: weave-net
- version: {{ weave_version }}
- namespace: {{ system_namespace }}
+ namespace: kube-system
spec:
+ minReadySeconds: 5
template:
metadata:
labels:
@@ -96,33 +118,58 @@ items:
containers:
- name: weave
command:
-{% if weave_mode_seed == true %}
- - /bin/sh
- - -c
- - export EXTRA_ARGS=--name=$(cat /sys/class/net/{{ ansible_default_ipv4['interface'] }}/address) && /home/weave/launch.sh
-{% else %}
- /home/weave/launch.sh
-{% endif %}
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- - name: WEAVE_MTU
- value: "{{ weave_mtu }}"
+ - name: WEAVE_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: weave-net
+ key: WEAVE_PASSWORD
+ - name: CHECKPOINT_DISABLE
+ value: "{{ weave_checkpoint_disable | bool | int }}"
+ - name: CONN_LIMIT
+ value: "{{ weave_conn_limit | int }}"
+ - name: HAIRPIN_MODE
+ value: "{{ weave_hairpin_mode | bool }}"
- name: IPALLOC_RANGE
- value: {{ kube_pods_subnet }}
-{% if weave_mode_seed == true %}
+ value: "{{ weave_ipalloc_range }}"
+ - name: EXPECT_NPC
+ value: "{{ weave_expect_npc | bool | int }}"
+{% if weave_kube_peers %}
- name: KUBE_PEERS
- value: {{ peers }}
+ value: "{{ weave_kube_peers }}"
+{% endif %}
+{% if weave_ipalloc_init %}
- name: IPALLOC_INIT
- value: seed={{ seed }}
+ value: "{{ weave_ipalloc_init }}"
+{% endif %}
+{% if weave_expose_ip %}
+ - name: WEAVE_EXPOSE_IP
+ value: "{{ weave_expose_ip }}"
+{% endif %}
+{% if weave_metrics_addr %}
+ - name: WEAVE_METRICS_ADDR
+ value: "{{ weave_metrics_addr }}"
+{% endif %}
+{% if weave_status_addr %}
+ - name: WEAVE_STATUS_ADDR
+ value: "{{ weave_status_addr }}"
+{% endif %}
+ - name: WEAVE_MTU
+ value: "{{ weave_mtu | int }}"
+ - name: NO_MASQ_LOCAL
+ value: "{{ weave_no_masq_local | bool | int }}"
+{% if weave_extra_args %}
+ - name: EXTRA_ARGS
+ value: "{{ weave_extra_args }}"
{% endif %}
- - name: WEAVE_PASSWORD
- value: {{ weave_password }}
image: {{ weave_kube_image_repo }}:{{ weave_kube_image_tag }}
- imagePullPolicy: Always
+ imagePullPolicy: {{ k8s_image_pull_policy }}
livenessProbe:
httpGet:
host: 127.0.0.1
@@ -149,19 +196,24 @@ items:
mountPath: /lib/modules
- name: xtables-lock
mountPath: /run/xtables.lock
- readOnly: false
- name: weave-npc
+ args: []
+ env:
+ - name: HOSTNAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: spec.nodeName
image: {{ weave_npc_image_repo }}:{{ weave_npc_image_tag }}
- imagePullPolicy: Always
+ imagePullPolicy: {{ k8s_image_pull_policy }}
resources:
requests:
- cpu: {{ weave_cpu_requests }}
- memory: {{ weave_memory_requests }}
- limits:
- cpu: {{ weave_cpu_limit }}
- memory: {{ weave_memory_limit }}
+ cpu: 10m
securityContext:
privileged: true
+ volumeMounts:
+ - name: xtables-lock
+ mountPath: /run/xtables.lock
hostNetwork: true
hostPID: true
restartPolicy: Always
@@ -193,7 +245,15 @@ items:
- name: xtables-lock
hostPath:
path: /run/xtables.lock
+ type: FileOrCreate
updateStrategy:
rollingUpdate:
maxUnavailable: {{ serial | default('20%') }}
type: RollingUpdate
+ - apiVersion: v1
+ kind: Secret
+ metadata:
+ name: weave-net
+ namespace: kube-system
+ data:
+ WEAVE_PASSWORD: "{{ weave_password | default("") | b64encode }}"
diff --git a/roles/remove-node/post-remove/tasks/main.yml b/roles/remove-node/post-remove/tasks/main.yml
index 395f9986bae..b820bff0998 100644
--- a/roles/remove-node/post-remove/tasks/main.yml
+++ b/roles/remove-node/post-remove/tasks/main.yml
@@ -1,8 +1,9 @@
---
- name: Delete node
- command: kubectl delete node {{ item }}
+ command: "{{ bin_dir}}/kubectl delete node {{ item }}"
with_items:
- - "{{ groups['kube-node'] }}"
- delegate_to: "{{ groups['kube-master'][0] }}"
+ - "{{ node.split(',') | default(groups['kube-node']) }}"
+ delegate_to: "{{ groups['kube-master']|first }}"
+ run_once: true
ignore_errors: yes
diff --git a/roles/remove-node/pre-remove/tasks/main.yml b/roles/remove-node/pre-remove/tasks/main.yml
index 12091917ac7..5db5fa13a89 100644
--- a/roles/remove-node/pre-remove/tasks/main.yml
+++ b/roles/remove-node/pre-remove/tasks/main.yml
@@ -9,7 +9,8 @@
--timeout {{ drain_timeout }}
--delete-local-data {{ item }}
with_items:
- - "{{ groups['kube-node'] }}"
+ - "{{ node.split(',') | default(groups['kube-node']) }}"
failed_when: false
- delegate_to: "{{ groups['kube-master'][0] }}"
+ delegate_to: "{{ groups['kube-master']|first }}"
+ run_once: true
ignore_errors: yes
diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml
index 9ae683df343..88dec8d7a5a 100644
--- a/roles/reset/tasks/main.yml
+++ b/roles/reset/tasks/main.yml
@@ -60,8 +60,18 @@
tags:
- docker
+- name: reset | remove all cri-o containers
+ shell: "crictl ps -aq | xargs -r crictl rm"
+ register: remove_all_crio_containers
+ retries: 4
+ until: remove_all_crio_containers.rc == 0
+ delay: 5
+ tags:
+ - crio
+ when: container_manager == 'crio'
+
- name: reset | gather mounted kubelet dirs
- shell: mount | grep /var/lib/kubelet | awk '{print $3}' | tac
+ shell: mount | grep /var/lib/kubelet/ | awk '{print $3}' | tac
check_mode: no
register: mounted_dirs
tags:
@@ -136,6 +146,7 @@
- "{{ bin_dir }}/kubeadm"
- "{{ bin_dir }}/helm"
- "{{ bin_dir }}/calicoctl"
+ - "{{ bin_dir }}/calico-upgrade"
- "{{ bin_dir }}/weave"
- /var/lib/rkt
- /etc/vault
@@ -167,33 +178,12 @@
- files
- dns
-- name: reset | check cni network device
- stat:
- path: /sys/class/net/cni0
- register: cni
- when: kube_network_plugin == 'flannel'
- tags:
- - flannel
-
-- name: reset | remove the network device created by the flannel
- command: ip link del cni0
- when: kube_network_plugin == 'flannel' and cni.stat.exists
+- name: reset | include file with reset tasks specific to the network_plugin if exists
+ include_tasks: "{{ (role_path + '/../network_plugin/' + kube_network_plugin + '/tasks/reset.yml') | realpath }}"
+ when:
+ - kube_network_plugin in ['flannel', 'cilium']
tags:
- - flannel
-
-- name: reset | check flannel network device
- stat:
- path: /sys/class/net/flannel.1
- register: flannel
- when: kube_network_plugin == 'flannel'
- tags:
- - flannel
-
-- name: reset | remove the network device created by the flannel
- command: ip link del flannel.1
- when: kube_network_plugin == 'flannel' and flannel.stat.exists
- tags:
- - flannel
+ - network
- name: reset | Restart network
service:
diff --git a/roles/rkt/files/rkt-gc.sh b/roles/rkt/files/rkt-gc.sh
new file mode 100644
index 00000000000..e260668cf53
--- /dev/null
+++ b/roles/rkt/files/rkt-gc.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+rkt gc
diff --git a/roles/rkt/tasks/install.yml b/roles/rkt/tasks/install.yml
index 599f9e50e2b..f881a81fe40 100644
--- a/roles/rkt/tasks/install.yml
+++ b/roles/rkt/tasks/install.yml
@@ -34,3 +34,13 @@
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
when: ansible_os_family == "RedHat"
+
+- name: install rkt pkg on openSUSE
+ zypper:
+ name: "{{ rkt_download_url }}/{{ rkt_pkg_name }}"
+ state: present
+ register: rkt_task_result
+ until: rkt_task_result|succeeded
+ retries: 4
+ delay: "{{ retry_stagger | random + 3 }}"
+ when: ansible_os_family == "Suse"
diff --git a/roles/rkt/tasks/main.yml b/roles/rkt/tasks/main.yml
index ab9571b1309..00f9e79c4c9 100644
--- a/roles/rkt/tasks/main.yml
+++ b/roles/rkt/tasks/main.yml
@@ -1,4 +1,13 @@
---
-
- name: Install rkt
import_tasks: install.yml
+ when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
+
+- name: Set up cron job to do garbage cleanup
+ copy:
+ src: rkt-gc.sh
+ dest: /etc/cron.hourly/rkt-gc.sh
+ owner: root
+ group: root
+ mode: 0750
+ when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
diff --git a/roles/rkt/vars/suse.yml b/roles/rkt/vars/suse.yml
new file mode 100644
index 00000000000..13149e8fbfe
--- /dev/null
+++ b/roles/rkt/vars/suse.yml
@@ -0,0 +1,2 @@
+---
+rkt_pkg_name: "rkt-{{ rkt_pkg_version }}.x86_64.rpm"
diff --git a/roles/vault/defaults/main.yml b/roles/vault/defaults/main.yml
index 4eb055f7e13..b39c052d669 100644
--- a/roles/vault/defaults/main.yml
+++ b/roles/vault/defaults/main.yml
@@ -16,14 +16,19 @@ vault_cert_dir: "{{ vault_base_dir }}/ssl"
vault_config_dir: "{{ vault_base_dir }}/config"
vault_roles_dir: "{{ vault_base_dir }}/roles"
vault_secrets_dir: "{{ vault_base_dir }}/secrets"
+vault_lib_dir: "/var/lib/vault"
vault_log_dir: "/var/log/vault"
-vault_version: 0.8.1
-vault_binary_checksum: 3c4d70ba71619a43229e65c67830e30e050eab7a81ac6b28325ff707e5914188
-vault_download_url: "https://releases.hashicorp.com/vault/{{ vault_version }}/vault_{{ vault_version }}_linux_amd64.zip"
+vault_version: 0.10.1
+vault_binary_checksum: 66f0f1b0b221d664dd5913f8697409d7401df4bb2a19c7277e8fbad152063fae
+vault_download_url: "https://releases.hashicorp.com/vault/{{ vault_version }}/vault_{{ vault_version }}_linux_{{ image_arch }}.zip"
+
+# Arch of Docker images and needed packages
+image_arch: "{{host_architecture}}"
+
vault_download_vars:
container: "{{ vault_deployment_type != 'host' }}"
- dest: "vault/vault_{{ vault_version }}_linux_amd64.zip"
+ dest: "vault/vault_{{ vault_version }}_linux_{{ image_arch }}.zip"
enabled: true
mode: "0755"
owner: "vault"
@@ -64,10 +69,10 @@ vault_config:
etcd:
address: "{{ vault_etcd_url }}"
ha_enabled: "true"
- redirect_addr: "https://{{ ansible_default_ipv4.address }}:{{ vault_port }}"
- tls_ca_file: "{{ vault_etcd_cert_dir }}/ca.pem"
- tls_cert_file: "{{ vault_etcd_cert_dir}}/node-{{ inventory_hostname }}.pem"
- tls_key_file: "{{ vault_etcd_cert_dir}}/node-{{ inventory_hostname }}-key.pem"
+ redirect_addr: "https://{{ inventory_hostname }}:{{ vault_port }}"
+ tls_ca_file: "{{ etcd_cert_dir }}/ca.pem"
+ tls_cert_file: "{{ etcd_cert_dir}}/node-{{ inventory_hostname }}.pem"
+ tls_key_file: "{{ etcd_cert_dir}}/node-{{ inventory_hostname }}-key.pem"
cluster_name: "kubernetes-vault"
default_lease_ttl: "{{ vault_default_lease_ttl }}"
max_lease_ttl: "{{ vault_max_lease_ttl }}"
@@ -80,13 +85,15 @@ vault_config:
vault_secret_shares: 1
vault_secret_threshold: 1
+vault_successful_http_codes: ["200", "429", "500", "501", "503"]
+
vault_ca_options:
vault:
common_name: vault
format: pem
ttl: "{{ vault_max_lease_ttl }}"
exclude_cn_from_sans: true
- alt_names: "vault.{{ system_namespace }}.svc.{{ dns_domain }},vault.{{ system_namespace }}.svc,vault.{{ system_namespace }},vault"
+ alt_names: "vault.kube-system.svc.{{ dns_domain }},vault.kube-system.svc,vault.kube-system,vault"
etcd:
common_name: etcd
format: pem
@@ -102,10 +109,24 @@ vault_client_headers:
Accept: "application/json"
Content-Type: "application/json"
-vault_etcd_cert_dir: /etc/ssl/etcd/ssl
-vault_kube_cert_dir: /etc/kubernetes/ssl
+etcd_cert_dir: /etc/ssl/etcd/ssl
+kube_cert_dir: /etc/kubernetes/ssl
vault_pki_mounts:
+ userpass:
+ name: userpass
+ default_lease_ttl: "{{ vault_default_lease_ttl }}"
+ max_lease_ttl: "{{ vault_max_lease_ttl }}"
+ description: "Userpass"
+ cert_dir: "{{ vault_cert_dir }}"
+ roles:
+ - name: userpass
+ group: userpass
+ password: "{{ lookup('password', credentials_dir + '/vault/userpass.creds length=15') }}"
+ policy_rules: default
+ role_options:
+ allow_any_name: true
+
vault:
name: vault
default_lease_ttl: "{{ vault_default_lease_ttl }}"
@@ -115,19 +136,20 @@ vault_pki_mounts:
roles:
- name: vault
group: vault
- password: "{{ lookup('password', inventory_dir + '/credentials/vault/vault length=15') }}"
+ password: "{{ lookup('password', credentials_dir + '/vault/vault.creds length=15') }}"
policy_rules: default
- role_options: default
+ role_options:
+ allow_any_name: true
etcd:
name: etcd
default_lease_ttl: "{{ vault_default_lease_ttl }}"
max_lease_ttl: "{{ vault_max_lease_ttl }}"
description: "Etcd Root CA"
- cert_dir: "{{ vault_etcd_cert_dir }}"
+ cert_dir: "{{ etcd_cert_dir }}"
roles:
- name: etcd
group: etcd
- password: "{{ lookup('password', inventory_dir + '/credentials/vault/etcd length=15') }}"
+ password: "{{ lookup('password', credentials_dir + '/vault/etcd.creds length=15') }}"
policy_rules: default
role_options:
allow_any_name: true
@@ -138,37 +160,37 @@ vault_pki_mounts:
default_lease_ttl: "{{ vault_default_lease_ttl }}"
max_lease_ttl: "{{ vault_max_lease_ttl }}"
description: "Kubernetes Root CA"
- cert_dir: "{{ vault_kube_cert_dir }}"
+ cert_dir: "{{ kube_cert_dir }}"
roles:
- name: kube-master
group: kube-master
- password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-master length=15') }}"
+ password: "{{ lookup('password', credentials_dir + '/vault/kube-master.creds length=15') }}"
policy_rules: default
role_options:
allow_any_name: true
enforce_hostnames: false
organization: "system:masters"
- - name: kube-node
- group: k8s-cluster
- password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-node length=15') }}"
+ - name: front-proxy-client
+ group: kube-master
+ password: "{{ lookup('password', credentials_dir + '/vault/kube-proxy.creds length=15') }}"
policy_rules: default
role_options:
allow_any_name: true
enforce_hostnames: false
- organization: "system:nodes"
- - name: kube-proxy
+ organization: "system:front-proxy-client"
+ - name: kube-node
group: k8s-cluster
- password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-proxy length=15') }}"
+ password: "{{ lookup('password', credentials_dir + '/vault/kube-node.creds length=15') }}"
policy_rules: default
role_options:
allow_any_name: true
enforce_hostnames: false
- organization: "system:node-proxier"
- - name: front-proxy-client
+ organization: "system:nodes"
+ - name: kube-proxy
group: k8s-cluster
- password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-proxy length=15') }}"
+ password: "{{ lookup('password', credentials_dir + '/vault/kube-proxy.creds length=15') }}"
policy_rules: default
role_options:
allow_any_name: true
enforce_hostnames: false
- organization: "system:front-proxy"
+ organization: "system:node-proxier"
diff --git a/roles/vault/handlers/main.yml b/roles/vault/handlers/main.yml
new file mode 100644
index 00000000000..3aeb750412b
--- /dev/null
+++ b/roles/vault/handlers/main.yml
@@ -0,0 +1,50 @@
+---
+- name: restart vault
+ command: /bin/true
+ notify:
+ - restart vault service
+ - wait for vault up
+ - unseal vault
+
+- name: wait for vault up
+ uri:
+ url: "{{ vault_leader_url | default('https://localhost:8200') }}/v1/sys/health"
+ headers: "{{ vault_client_headers }}"
+ status_code: "{{ vault_successful_http_codes | join(',') }}"
+ register: vault_health_check
+ until: vault_health_check|succeeded
+ retries: 10
+ delay: "{{ retry_stagger | random + 3 }}"
+ run_once: yes
+ notify: set facts about local Vault health
+
+- name: wait for vault up nowait
+ uri:
+ url: "{{ vault_leader_url | default('https://localhost:8200') }}/v1/sys/health"
+ headers: "{{ vault_client_headers }}"
+ status_code: "{{ vault_successful_http_codes | join(',') }}"
+ register: vault_health_check
+ run_once: yes
+ failed_when: false
+ notify: set facts about local Vault health
+
+- name: set facts about local Vault health
+ set_fact:
+ vault_is_running: "{{ vault_health_check.get('status', '-1') in vault_successful_http_codes }}"
+ vault_cluster_is_initialized: "{{ vault_health_check.get('json', {}).get('initialized', false) }}"
+ vault_is_sealed: "{{ vault_health_check.get('json', {}).get('sealed', true) }}"
+
+- name: restart vault service
+ systemd:
+ daemon_reload: true
+ enabled: yes
+ name: vault
+ state: restarted
+
+- name: unseal vault
+ hashivault_unseal:
+ url: "{{ vault_leader_url | default('https://localhost:8200') }}"
+ token: "{{ vault_root_token }}"
+ ca_cert: "{{ vault_cert_dir }}/ca.pem"
+ keys: "{{ item }}"
+ with_items: "{{ vault_unseal_keys|default([]) }}"
diff --git a/roles/vault/tasks/bootstrap/create_mounts.yml b/roles/vault/tasks/bootstrap/create_mounts.yml
index 0c82990e690..ad0fab9157a 100644
--- a/roles/vault/tasks/bootstrap/create_mounts.yml
+++ b/roles/vault/tasks/bootstrap/create_mounts.yml
@@ -1,12 +1,13 @@
---
- include_tasks: ../shared/create_mount.yml
vars:
- create_mount_path: "{{ item.name }}"
+ create_mount_path: "/{{ item.name }}"
create_mount_default_lease_ttl: "{{ item.default_lease_ttl }}"
create_mount_max_lease_ttl: "{{ item.max_lease_ttl }}"
create_mount_description: "{{ item.description }}"
create_mount_cert_dir: "{{ item.cert_dir }}"
create_mount_config_ca_needed: "{{ item.config_ca }}"
with_items:
+ - "{{ vault_pki_mounts.userpass|combine({'config_ca': not vault_ca_cert_needed}) }}"
- "{{ vault_pki_mounts.vault|combine({'config_ca': not vault_ca_cert_needed}) }}"
- "{{ vault_pki_mounts.etcd|combine({'config_ca': not vault_etcd_ca_cert_needed}) }}"
diff --git a/roles/vault/tasks/bootstrap/create_roles.yml b/roles/vault/tasks/bootstrap/create_roles.yml
index 8b252aaf527..c9cdad393a5 100644
--- a/roles/vault/tasks/bootstrap/create_roles.yml
+++ b/roles/vault/tasks/bootstrap/create_roles.yml
@@ -6,5 +6,5 @@
create_role_policy_rules: "{{ item.policy_rules }}"
create_role_password: "{{ item.password }}"
create_role_options: "{{ item.role_options }}"
- create_role_mount_path: "{{ mount.name }}"
+ create_role_mount_path: "/{{ mount.name }}"
with_items: "{{ mount.roles }}"
diff --git a/roles/vault/tasks/bootstrap/gen_vault_certs.yml b/roles/vault/tasks/bootstrap/gen_vault_certs.yml
index f982986cb1b..2857163dcec 100644
--- a/roles/vault/tasks/bootstrap/gen_vault_certs.yml
+++ b/roles/vault/tasks/bootstrap/gen_vault_certs.yml
@@ -14,7 +14,7 @@
{%- endfor -%}
"127.0.0.1","::1"
]
- issue_cert_mount_path: "{{ vault_pki_mounts.vault.name }}"
+ issue_cert_mount_path: "/{{ vault_pki_mounts.vault.name }}"
issue_cert_path: "{{ vault_cert_dir }}/api.pem"
issue_cert_role: "{{ vault_pki_mounts.vault.roles[0].name }}"
issue_cert_url: "{{ vault_leader_url }}"
diff --git a/roles/vault/tasks/bootstrap/main.yml b/roles/vault/tasks/bootstrap/main.yml
index fdecbdd2afc..e4e67d11fcd 100644
--- a/roles/vault/tasks/bootstrap/main.yml
+++ b/roles/vault/tasks/bootstrap/main.yml
@@ -1,4 +1,9 @@
---
+- import_tasks: ../shared/check_etcd.yml
+ vars:
+ vault_etcd_needed: no
+ when: inventory_hostname in groups.vault
+
- import_tasks: ../shared/check_vault.yml
when: inventory_hostname in groups.vault
@@ -23,14 +28,14 @@
when: not vault_cluster_is_initialized
- import_tasks: create_mounts.yml
- when: inventory_hostname == groups.vault|first
+ when: inventory_hostname == groups.vault|first and not vault_cluster_is_initialized
- include_tasks: ../shared/auth_backend.yml
vars:
auth_backend_description: A Username/Password Auth Backend primarily used for services needing to issue certificates
auth_backend_path: userpass
auth_backend_type: userpass
- when: inventory_hostname == groups.vault|first
+ when: inventory_hostname == groups.vault|first and not vault_cluster_is_initialized
- include_tasks: create_roles.yml
with_items:
@@ -38,14 +43,15 @@
- "{{ vault_pki_mounts.etcd }}"
loop_control:
loop_var: mount
- when: inventory_hostname in groups.vault
+ when: inventory_hostname == groups.vault|first and not vault_cluster_is_initialized
- include_tasks: ../shared/gen_ca.yml
vars:
gen_ca_cert_dir: "{{ vault_pki_mounts.vault.cert_dir }}"
- gen_ca_mount_path: "{{ vault_pki_mounts.vault.name }}"
+ gen_ca_mount_path: "/{{ vault_pki_mounts.vault.name }}"
gen_ca_vault_headers: "{{ vault_headers }}"
gen_ca_vault_options: "{{ vault_ca_options.vault }}"
+ gen_ca_copy_group: "kube-master"
when: >-
inventory_hostname in groups.vault
and not vault_cluster_is_initialized
@@ -54,12 +60,13 @@
- include_tasks: ../shared/gen_ca.yml
vars:
gen_ca_cert_dir: "{{ vault_pki_mounts.etcd.cert_dir }}"
- gen_ca_mount_path: "{{ vault_pki_mounts.etcd.name }}"
+ gen_ca_mount_path: "/{{ vault_pki_mounts.etcd.name }}"
gen_ca_vault_headers: "{{ vault_headers }}"
gen_ca_vault_options: "{{ vault_ca_options.etcd }}"
- when: inventory_hostname in groups.etcd and vault_etcd_ca_cert_needed
+ gen_ca_copy_group: "etcd"
+ when: inventory_hostname in groups.etcd and not vault_cluster_is_initialized and vault_etcd_ca_cert_needed
- import_tasks: gen_vault_certs.yml
- when: inventory_hostname in groups.vault and vault_api_cert_needed
+ when: inventory_hostname in groups.vault and not vault_cluster_is_initialized and vault_api_cert_needed
- import_tasks: ca_trust.yml
diff --git a/roles/vault/tasks/bootstrap/start_vault_temp.yml b/roles/vault/tasks/bootstrap/start_vault_temp.yml
index 1048b8c3ec1..3720d9beb23 100644
--- a/roles/vault/tasks/bootstrap/start_vault_temp.yml
+++ b/roles/vault/tasks/bootstrap/start_vault_temp.yml
@@ -12,19 +12,14 @@
-v /etc/vault:/etc/vault
{{ vault_image_repo }}:{{ vault_version }} server
-# FIXME(mattymo): Crashes on first start with aufs docker storage. See hashicorp/docker-vault#19
- name: bootstrap/start_vault_temp | Start again single node Vault with file backend
command: docker start {{ vault_temp_container_name }}
- name: bootstrap/start_vault_temp | Initialize vault-temp
- uri:
- url: "http://localhost:{{ vault_port }}/v1/sys/init"
- headers: "{{ vault_client_headers }}"
- method: PUT
- body_format: json
- body:
- secret_shares: 1
- secret_threshold: 1
+ hashivault_init:
+ url: "http://localhost:{{ vault_port }}/"
+ secret_shares: 1
+ secret_threshold: 1
until: "vault_temp_init|succeeded"
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
@@ -34,16 +29,14 @@
- name: bootstrap/start_vault_temp | Set needed vault facts
set_fact:
vault_leader_url: "http://{{ inventory_hostname }}:{{ vault_port }}"
- vault_temp_unseal_keys: "{{ vault_temp_init.json['keys'] }}"
- vault_temp_root_token: "{{ vault_temp_init.json.root_token }}"
- vault_headers: "{{ vault_client_headers|combine({'X-Vault-Token': vault_temp_init.json.root_token}) }}"
+ vault_temp_unseal_keys: "{{ vault_temp_init.keys_base64 }}"
+ vault_root_token: "{{ vault_temp_init.root_token }}"
+ vault_headers: "{{ vault_client_headers|combine({'X-Vault-Token': vault_temp_init.root_token}) }}"
- name: bootstrap/start_vault_temp | Unseal vault-temp
- uri:
- url: "http://localhost:{{ vault_port }}/v1/sys/unseal"
- headers: "{{ vault_headers }}"
- method: POST
- body_format: json
- body:
- key: "{{ item }}"
+ hashivault_unseal:
+ url: "http://localhost:{{ vault_port }}/"
+ token: "{{ vault_root_token }}"
+ keys: "{{ item }}"
with_items: "{{ vault_temp_unseal_keys|default([]) }}"
+ no_log: true
diff --git a/roles/vault/tasks/bootstrap/sync_etcd_certs.yml b/roles/vault/tasks/bootstrap/sync_etcd_certs.yml
index f29b650225a..1d6eabb5c68 100644
--- a/roles/vault/tasks/bootstrap/sync_etcd_certs.yml
+++ b/roles/vault/tasks/bootstrap/sync_etcd_certs.yml
@@ -3,7 +3,7 @@
- include_tasks: ../shared/sync_file.yml
vars:
sync_file: "ca.pem"
- sync_file_dir: "{{ vault_etcd_cert_dir }}"
+ sync_file_dir: "{{ etcd_cert_dir }}"
sync_file_hosts: "{{ groups.etcd }}"
sync_file_is_cert: true
diff --git a/roles/vault/tasks/bootstrap/sync_secrets.yml b/roles/vault/tasks/bootstrap/sync_secrets.yml
index cbbb581a7ef..8c1ae39290d 100644
--- a/roles/vault/tasks/bootstrap/sync_secrets.yml
+++ b/roles/vault/tasks/bootstrap/sync_secrets.yml
@@ -29,19 +29,23 @@
- name: bootstrap/sync_secrets | Cat root_token from a vault host
command: "cat {{ vault_secrets_dir }}/root_token"
register: vault_root_token_cat
- when: vault_secrets_available and inventory_hostname == groups.vault|first
+ run_once: yes
+ when: vault_secrets_available
- name: bootstrap/sync_secrets | Cat unseal_keys from a vault host
command: "cat {{ vault_secrets_dir }}/unseal_keys"
register: vault_unseal_keys_cat
- when: vault_secrets_available and inventory_hostname == groups.vault|first
+ run_once: yes
+ when: vault_secrets_available
- name: bootstrap/sync_secrets | Set needed facts for Vault API interaction when Vault is already running
set_fact:
- vault_root_token: "{{ hostvars[groups.vault|first]['vault_root_token_cat']['stdout'] }}"
- vault_unseal_keys: "{{ hostvars[groups.vault|first]['vault_unseal_keys_cat']['stdout_lines'] }}"
+ vault_root_token: "{{ vault_root_token_cat.stdout }}"
+ vault_unseal_keys: "{{ vault_unseal_keys_cat.stdout_lines }}"
+ run_once: yes
when: vault_secrets_available
+# FIXME: Remove all uri calls
- name: bootstrap/sync_secrets | Update vault_headers if we have the root_token
set_fact:
vault_headers: "{{ vault_client_headers | combine({'X-Vault-Token': vault_root_token}) }}"
diff --git a/roles/vault/tasks/bootstrap/sync_vault_certs.yml b/roles/vault/tasks/bootstrap/sync_vault_certs.yml
index cb35ff66c84..cf499099a3b 100644
--- a/roles/vault/tasks/bootstrap/sync_vault_certs.yml
+++ b/roles/vault/tasks/bootstrap/sync_vault_certs.yml
@@ -4,6 +4,8 @@
sync_file: "ca.pem"
sync_file_dir: "{{ vault_cert_dir }}"
sync_file_hosts: "{{ groups.vault }}"
+ sync_file_owner: vault
+ sync_file_group: root
sync_file_is_cert: true
- name: bootstrap/sync_vault_certs | Set facts for vault sync_file results
@@ -14,11 +16,32 @@
set_fact:
sync_file_results: []
+# FIXME: Distribute ca.pem alone in a better way
+- include_tasks: ../shared/sync_file.yml
+ vars:
+ sync_file: "ca.pem"
+ sync_file_dir: "{{ vault_cert_dir }}"
+ sync_file_hosts: "{{ groups['kube-master'] }}"
+ sync_file_owner: vault
+ sync_file_group: root
+ sync_file_is_cert: false
+
+- name: bootstrap/sync_vault_certs | Set facts for vault sync_file results
+ set_fact:
+ vault_ca_cert_needed: "{{ sync_file_results[0]['no_srcs'] }}"
+
+- name: bootstrap/sync_vault_certs | Unset sync_file_results after ca.pem sync
+ set_fact:
+ sync_file_results: []
+
+
- include_tasks: ../shared/sync_file.yml
vars:
sync_file: "api.pem"
sync_file_dir: "{{ vault_cert_dir }}"
sync_file_hosts: "{{ groups.vault }}"
+ sync_file_owner: vault
+ sync_file_group: root
sync_file_is_cert: true
- name: bootstrap/sync_vault_certs | Set fact if Vault's API cert is needed
diff --git a/roles/vault/tasks/cluster/binary.yml b/roles/vault/tasks/cluster/binary.yml
index 41024dd5432..5a055cbae7f 100644
--- a/roles/vault/tasks/cluster/binary.yml
+++ b/roles/vault/tasks/cluster/binary.yml
@@ -3,7 +3,7 @@
- name: cluster/binary | Copy vault binary from downloaddir
copy:
src: "{{ local_release_dir }}/vault/vault"
- dest: "/usr/bin/vault"
+ dest: "{{ bin_dir }}/vault"
remote_src: true
mode: "0755"
owner: vault
diff --git a/roles/vault/tasks/cluster/configure.yml b/roles/vault/tasks/cluster/configure.yml
index 7ac8f5f9e3f..fd20b9646ab 100644
--- a/roles/vault/tasks/cluster/configure.yml
+++ b/roles/vault/tasks/cluster/configure.yml
@@ -1,10 +1,19 @@
---
-
-- name: cluster/configure | Ensure the vault/config directory exists
+- name: cluster/configure | Ensure the vault directories exist
file:
- dest: "{{ vault_config_dir }}"
+ dest: "{{ item }}"
+ owner: vault
mode: 0750
state: directory
+ recurse: true
+ with_items:
+ - "{{ vault_base_dir }}"
+ - "{{ vault_cert_dir }}"
+ - "{{ vault_config_dir }}"
+ - "{{ vault_roles_dir }}"
+ - "{{ vault_secrets_dir }}"
+ - "{{ vault_log_dir }}"
+ - "{{ vault_lib_dir }}"
- name: cluster/configure | Lay down the configuration file
copy:
diff --git a/roles/vault/tasks/cluster/create_mounts.yml b/roles/vault/tasks/cluster/create_mounts.yml
index c6e075698db..f5dd9609b15 100644
--- a/roles/vault/tasks/cluster/create_mounts.yml
+++ b/roles/vault/tasks/cluster/create_mounts.yml
@@ -1,7 +1,7 @@
---
- include_tasks: ../shared/create_mount.yml
vars:
- create_mount_path: "{{ item.name }}"
+ create_mount_path: "/{{ item.name }}"
create_mount_default_lease_ttl: "{{ item.default_lease_ttl }}"
create_mount_max_lease_ttl: "{{ item.max_lease_ttl }}"
create_mount_description: "{{ item.description }}"
diff --git a/roles/vault/tasks/cluster/create_roles.yml b/roles/vault/tasks/cluster/create_roles.yml
index 7b9d0b4f79d..7a0e8498213 100644
--- a/roles/vault/tasks/cluster/create_roles.yml
+++ b/roles/vault/tasks/cluster/create_roles.yml
@@ -6,5 +6,5 @@
create_role_password: "{{ item.password }}"
create_role_policy_rules: "{{ item.policy_rules }}"
create_role_options: "{{ item.role_options }}"
- create_role_mount_path: "{{ mount.name }}"
+ create_role_mount_path: "/{{ mount.name }}"
with_items: "{{ mount.roles }}"
diff --git a/roles/vault/tasks/cluster/init.yml b/roles/vault/tasks/cluster/init.yml
index 60aaf9591af..fea670df2f5 100644
--- a/roles/vault/tasks/cluster/init.yml
+++ b/roles/vault/tasks/cluster/init.yml
@@ -1,36 +1,27 @@
---
+- name: cluster/init | wait for vault
+ command: /bin/true
+ notify: wait for vault up
+
+- meta: flush_handlers
- name: cluster/init | Initialize Vault
- uri:
- url: "https://{{ groups.vault|first }}:{{ vault_port }}/v1/sys/init"
- headers: "{{ vault_client_headers }}"
- method: POST
- body_format: json
- body:
- secret_shares: "{{ vault_secret_shares }}"
- secret_threshold: "{{ vault_secret_threshold }}"
- validate_certs: false
+ hashivault_init:
+ url: "https://localhost:{{ vault_port }}/"
+ ca_cert: "{{ vault_cert_dir }}/ca.pem"
+ secret_shares: "{{ vault_secret_shares }}"
+ secret_threshold: "{{ vault_secret_threshold }}"
+ run_once: true
register: vault_init_result
- when: not vault_cluster_is_initialized and inventory_hostname == groups.vault|first
+ when: not vault_cluster_is_initialized
- name: cluster/init | Set facts on the results of the initialization
set_fact:
- vault_unseal_keys: "{{ vault_init_result.json['keys'] }}"
- vault_root_token: "{{ vault_init_result.json.root_token }}"
- vault_headers: "{{ vault_client_headers|combine({'X-Vault-Token': vault_init_result.json.root_token}) }}"
- when: not vault_cluster_is_initialized and inventory_hostname == groups.vault|first
-
-- name: cluster/init | Ensure all hosts have these facts
- set_fact:
- vault_unseal_keys: "{{ hostvars[groups.vault|first]['vault_unseal_keys'] }}"
- vault_root_token: "{{ hostvars[groups.vault|first]['vault_root_token'] }}"
- when: not vault_cluster_is_initialized and inventory_hostname != groups.vault|first
-
-- name: cluster/init | Ensure the vault_secrets_dir exists
- file:
- mode: 0750
- path: "{{ vault_secrets_dir }}"
- state: directory
+ vault_unseal_keys: "{{ vault_init_result.keys_base64 }}"
+ vault_root_token: "{{ vault_init_result.root_token }}"
+ vault_headers: "{{ vault_client_headers|combine({'X-Vault-Token': vault_init_result.root_token}) }}"
+ run_once: true
+ when: not vault_cluster_is_initialized
- name: cluster/init | Ensure all in groups.vault have the unseal_keys locally
copy:
@@ -48,5 +39,5 @@
- name: cluster/init | Ensure vault_headers and vault statuses are updated
set_fact:
- vault_headers: "{{ vault_client_headers | combine({'X-Vault-Token': vault_root_token})}}"
vault_cluster_is_initialized: true
+ run_once: true
diff --git a/roles/vault/tasks/cluster/main.yml b/roles/vault/tasks/cluster/main.yml
index d904c2398b4..3ed23b2cc2d 100644
--- a/roles/vault/tasks/cluster/main.yml
+++ b/roles/vault/tasks/cluster/main.yml
@@ -1,8 +1,8 @@
---
-- import_tasks: ../shared/check_vault.yml
+- import_tasks: ../shared/check_etcd.yml
when: inventory_hostname in groups.vault
-- import_tasks: ../shared/check_etcd.yml
+- import_tasks: ../shared/check_vault.yml
when: inventory_hostname in groups.vault
- import_tasks: configure.yml
@@ -14,6 +14,9 @@
- import_tasks: systemd.yml
when: inventory_hostname in groups.vault
+- import_tasks: ../shared/find_leader.yml
+ when: inventory_hostname in groups.vault
+
- import_tasks: init.yml
when: inventory_hostname in groups.vault
@@ -29,9 +32,10 @@
- include_tasks: ../shared/gen_ca.yml
vars:
gen_ca_cert_dir: "{{ vault_pki_mounts.kube.cert_dir }}"
- gen_ca_mount_path: "{{ vault_pki_mounts.kube.name }}"
+ gen_ca_mount_path: "/{{ vault_pki_mounts.kube.name }}"
gen_ca_vault_headers: "{{ vault_headers }}"
gen_ca_vault_options: "{{ vault_ca_options.kube }}"
+ gen_ca_copy_group: "kube-master"
when: inventory_hostname in groups.vault
- include_tasks: ../shared/auth_backend.yml
diff --git a/roles/vault/tasks/cluster/systemd.yml b/roles/vault/tasks/cluster/systemd.yml
index 8df52f98255..f09cb8ed75d 100644
--- a/roles/vault/tasks/cluster/systemd.yml
+++ b/roles/vault/tasks/cluster/systemd.yml
@@ -1,32 +1,11 @@
---
-
-- name: cluster/systemd | Ensure mount points exist prior to vault.service startup
- file:
- mode: 0750
- path: "{{ item }}"
- state: directory
- with_items:
- - "{{ vault_config_dir }}"
- - "{{ vault_log_dir }}"
- - "{{ vault_secrets_dir }}"
- - /var/lib/vault/
-
-- name: cluster/systemd | Ensure the vault user has access to needed directories
- file:
- owner: vault
- path: "{{ item }}"
- recurse: true
- with_items:
- - "{{ vault_base_dir }}"
- - "{{ vault_log_dir }}"
- - /var/lib/vault
-
- name: cluster/systemd | Copy down vault.service systemd file
template:
src: "{{ vault_deployment_type }}.service.j2"
dest: /etc/systemd/system/vault.service
backup: yes
register: vault_systemd_placement
+ notify: restart vault
- name: Create vault service systemd directory
file:
@@ -39,6 +18,7 @@
dest: /etc/systemd/system/vault.service.d/http-proxy.conf
backup: yes
when: http_proxy is defined or https_proxy is defined
+ notify: restart vault
- name: cluster/systemd | Enable vault.service
systemd:
@@ -46,12 +26,4 @@
enabled: yes
name: vault
state: started
-
-- name: cluster/systemd | Query local vault until service is up
- uri:
- url: "{{ vault_config.listener.tcp.tls_disable|d()|ternary('http', 'https') }}://localhost:{{ vault_port }}/v1/sys/health"
- headers: "{{ vault_client_headers }}"
- status_code: 200,429,500,501
- register: vault_health_check
- until: vault_health_check|succeeded
- retries: 10
+ notify: wait for vault up
diff --git a/roles/vault/tasks/cluster/unseal.yml b/roles/vault/tasks/cluster/unseal.yml
index b9257bf49cc..6d0414d0df1 100644
--- a/roles/vault/tasks/cluster/unseal.yml
+++ b/roles/vault/tasks/cluster/unseal.yml
@@ -1,25 +1,16 @@
---
- name: cluster/unseal | Current sealed state
- debug: " Sealed? {{vault_is_sealed}}"
+ debug:
+ msg: "Sealed? {{ vault_is_sealed }}"
- name: cluster/unseal | Unseal Vault
- uri:
- url: "https://localhost:{{ vault_port }}/v1/sys/unseal"
- headers: "{{ vault_headers }}"
- method: POST
- body_format: json
- body:
- key: "{{ item }}"
+ hashivault_unseal:
+ url: "https://localhost:{{ vault_port }}/"
+ token: "{{ vault_root_token }}"
+ ca_cert: "{{ vault_cert_dir }}/ca.pem"
+ keys: "{{ item }}"
+ no_log: true
with_items: "{{ vault_unseal_keys|default([]) }}"
+ notify: wait for vault up
when: vault_is_sealed
-
-- name: cluster/unseal | Wait until server is ready
- uri:
- url: "https://localhost:{{ vault_port }}/v1/sys/health"
- headers: "{{ vault_headers }}"
- method: HEAD
- status_code: 200, 429
- register: vault_node_ready
- until: vault_node_ready|succeeded
- retries: 5
diff --git a/roles/vault/tasks/main.yml b/roles/vault/tasks/main.yml
index 7a10def8afb..ae59132a37e 100644
--- a/roles/vault/tasks/main.yml
+++ b/roles/vault/tasks/main.yml
@@ -10,6 +10,11 @@
# Vault cluster using Etcd as the backend. The same Root CA is mounted as
# used during step 1, allowing all certs to have the same chain of trust.
+- name: install hvac
+ pip:
+ name: "hvac"
+ state: "present"
+
## Bootstrap
- include_tasks: bootstrap/main.yml
when: cert_management == 'vault' and vault_bootstrap | d()
diff --git a/roles/vault/tasks/shared/auth_backend.yml b/roles/vault/tasks/shared/auth_backend.yml
index 82a4c94fbf2..fa5d8686589 100644
--- a/roles/vault/tasks/shared/auth_backend.yml
+++ b/roles/vault/tasks/shared/auth_backend.yml
@@ -1,20 +1,10 @@
---
-- name: shared/auth_backend | Test if the auth backend exists
- uri:
- url: "{{ vault_leader_url }}/v1/sys/auth/{{ auth_backend_path }}/tune"
- headers: "{{ vault_headers }}"
- validate_certs: false
- ignore_errors: true
- register: vault_auth_backend_check
-
-- name: shared/auth_backend | Add the cert auth backend if needed
- uri:
- url: "{{ vault_leader_url }}/v1/sys/auth/{{ auth_backend_path }}"
- headers: "{{ vault_headers }}"
- method: POST
- body_format: json
- body:
- description: "{{ auth_backend_description|d('') }}"
- type: "{{ auth_backend_type }}"
- status_code: 204
- when: vault_auth_backend_check|failed
+- name: shared/auth_backend | Enable auth backend {{ auth_backend_path }}
+ hashivault_auth_enable:
+ url: "{{ vault_leader_url }}"
+ token: "{{ vault_root_token }}"
+ ca_cert: "{{ vault_cert_dir }}/ca.pem"
+ name: "{{ auth_backend_type }}"
+ mount_point: "{{ auth_backend_path }}"
+ description: "{{ auth_backend_description|d('') }}"
+ register: result
diff --git a/roles/vault/tasks/shared/cert_auth_mount.yml b/roles/vault/tasks/shared/cert_auth_mount.yml
index d9af9c310f4..b94f531306c 100644
--- a/roles/vault/tasks/shared/cert_auth_mount.yml
+++ b/roles/vault/tasks/shared/cert_auth_mount.yml
@@ -10,11 +10,10 @@
max_lease_ttl: "{{ vault_max_lease_ttl }}"
- name: shared/auth_mount | Create a dummy role for issuing certs from auth-pki
- uri:
- url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}/v1/auth-pki/roles/dummy"
- headers: "{{ hostvars[groups.vault|first]['vault_headers'] }}"
- method: POST
- body_format: json
- body:
- {'allow_any_name': true}
- status_code: 204
+ hashivault_approle_role_create:
+ url: "{{ vault_leader_url }}"
+ token: "{{ vault_root_token }}"
+ ca_cert: "{{ vault_cert_dir }}/ca.pem"
+ name: "auth-pki/roles/dummy"
+ policies:
+ allow_any_name: true
diff --git a/roles/vault/tasks/shared/check_etcd.yml b/roles/vault/tasks/shared/check_etcd.yml
index 6158a312637..9ebed2bf150 100644
--- a/roles/vault/tasks/shared/check_etcd.yml
+++ b/roles/vault/tasks/shared/check_etcd.yml
@@ -2,11 +2,14 @@
- name: check_etcd | Check if etcd is up and reachable
uri:
- url: "{{ vault_etcd_url }}/health"
+ url: "{{ vault_etcd_url.split(',') | first }}/health"
validate_certs: no
+ client_cert: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
+ client_key: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
+
return_content: yes
until: vault_etcd_health_check.status == 200 or vault_etcd_health_check.status == 401
- retries: 10
+ retries: 3
delay: 2
delegate_to: "{{groups['etcd'][0]}}"
run_once: true
@@ -23,5 +26,5 @@
fail:
msg: >
Unable to start Vault cluster! Etcd is not available at
- {{ vault_etcd_url }} however it is needed by Vault as a backend.
+ {{ vault_etcd_url.split(',') | first }} however it is needed by Vault as a backend.
when: vault_etcd_needed|d() and not vault_etcd_available
diff --git a/roles/vault/tasks/shared/check_vault.yml b/roles/vault/tasks/shared/check_vault.yml
index 83328768ad5..999a36f32ba 100644
--- a/roles/vault/tasks/shared/check_vault.yml
+++ b/roles/vault/tasks/shared/check_vault.yml
@@ -8,24 +8,44 @@
# Check if vault is reachable on the localhost
- name: check_vault | Attempt to pull local https Vault health
- uri:
- url: "{{ vault_config.listener.tcp.tls_disable|d()|ternary('http', 'https') }}://localhost:{{ vault_port }}/v1/sys/health"
- headers: "{{ vault_client_headers }}"
- status_code: 200,429,500,501,503
- validate_certs: no
- ignore_errors: true
- register: vault_local_service_health
+ command: /bin/true
+ notify:
+ - wait for vault up nowait
+ - set facts about local Vault health
+
+- meta: flush_handlers
+
+- name: check_vault | Set facts about local Vault health
+ set_fact:
+ vault_is_running: "{{ vault_health_check.get('status', '-1') in vault_successful_http_codes }}"
- name: check_vault | Set facts about local Vault health
set_fact:
- vault_is_running: "{{ vault_local_service_health|succeeded }}"
- vault_is_initialized: "{{ vault_local_service_health.get('json', {}).get('initialized', false) }}"
- vault_is_sealed: "{{ vault_local_service_health.get('json', {}).get('sealed', true) }}"
- # vault_in_standby: "{{ vault_local_service_health.get('json', {}).get('standby', true) }}"
+ vault_is_initialized: "{{ vault_health_check.get('json', {}).get('initialized', false) }}"
+ vault_is_sealed: "{{ vault_health_check.get('json', {}).get('sealed', true) }}"
+ # vault_in_standby: "{{ vault_health_check.get('json', {}).get('standby', true) }}"
# vault_run_version: "{{ vault_local_service_health.get('json', {}).get('version', '') }}"
+- name: check_vault | Check is vault is initialized in etcd if vault is not running
+ command: |-
+ curl \
+ --cacert {{ etcd_cert_dir }}/ca.pem \
+ --cert {{ etcd_cert_dir}}/node-{{ inventory_hostname }}.pem \
+ --key {{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem \
+ -X POST -d '{"key": "{{ "/vault/core/seal-config" | b64encode }}"}' \
+ {{ etcd_access_addresses.split(',') | first }}/v3alpha/kv/range
+ register: vault_etcd_exists
+ retries: 4
+ delay: "{{ retry_stagger | random + 3 }}"
+ run_once: true
+ when: not vault_is_running and vault_etcd_available
+ changed_when: false
+
- name: check_vault | Set fact about the Vault cluster's initialization state
set_fact:
- vault_cluster_is_initialized: "{{ vault_is_initialized or hostvars[item]['vault_is_initialized'] }}"
+ vault_cluster_is_initialized: >-
+ {{ vault_is_initialized or
+ hostvars[item]['vault_is_initialized'] or
+ ('value' in vault_etcd_exists.stdout|default('')) }}
with_items: "{{ groups.vault }}"
run_once: true
diff --git a/roles/vault/tasks/shared/config_ca.yml b/roles/vault/tasks/shared/config_ca.yml
index 0ef34e7b8aa..f3f00d0beec 100644
--- a/roles/vault/tasks/shared/config_ca.yml
+++ b/roles/vault/tasks/shared/config_ca.yml
@@ -4,26 +4,26 @@
register: vault_ca_cert_cat
- name: config_ca | Pull current CA cert from Vault
- uri:
- url: "{{ vault_leader_url }}/v1/{{ config_ca_mount_path }}/ca/pem"
- headers: "{{ vault_headers }}"
- return_content: true
- status_code: 200,204
- validate_certs: no
+ hashivault_read:
+ url: "{{ vault_leader_url }}"
+ token: "{{ vault_root_token }}"
+ ca_cert: "{{ vault_cert_dir }}/ca.pem"
+ secret: "{{ config_ca_mount_path }}/ca"
+ key: "pem"
register: vault_pull_current_ca
+ failed_when: false
- name: config_ca | Read root CA key for Vault
command: "cat {{ config_ca_ca_key }}"
register: vault_ca_key_cat
- when: vault_ca_cert_cat.stdout.strip() != vault_pull_current_ca.content.strip()
+ when: vault_ca_cert_cat.stdout.strip() != vault_pull_current_ca.get("data","").strip()
- name: config_ca | Configure pki mount to use the found root CA cert and key
- uri:
- url: "{{ vault_leader_url }}/v1/{{ config_ca_mount_path }}/config/ca"
- headers: "{{ vault_headers }}"
- method: POST
- body_format: json
- body:
+ hashivault_write:
+ url: "{{ vault_leader_url }}"
+ token: "{{ vault_root_token }}"
+ ca_cert: "{{ vault_cert_dir }}/ca.pem"
+ secret: "{{ config_ca_mount_path }}/config/ca"
+ data:
pem_bundle: "{{ vault_ca_cert_cat.stdout + '\n' + vault_ca_key_cat.stdout }}"
- status_code: 204
- when: vault_ca_cert_cat.stdout.strip() != vault_pull_current_ca.get("content","").strip()
+ when: vault_ca_cert_cat.stdout.strip() != vault_pull_current_ca.get("data","").strip()
diff --git a/roles/vault/tasks/shared/create_role.yml b/roles/vault/tasks/shared/create_role.yml
index 4d1915a545c..d3aa3e441e9 100644
--- a/roles/vault/tasks/shared/create_role.yml
+++ b/roles/vault/tasks/shared/create_role.yml
@@ -1,42 +1,36 @@
---
-# The JSON inside JSON here is intentional (Vault API wants it)
-- name: create_role | Create a policy for the new role allowing issuing
- uri:
- url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}/v1/sys/policy/{{ create_role_name }}"
- headers: "{{ hostvars[groups.vault|first]['vault_headers'] }}"
- method: PUT
- body_format: json
- body:
- rules: >-
- {%- if create_role_policy_rules|d("default") == "default" -%}
- {{
- { 'path': {
- create_role_mount_path + '/issue/' + create_role_name: {'policy': 'write'},
- create_role_mount_path + '/roles/' + create_role_name: {'policy': 'read'}
- }} | to_json + '\n'
- }}
- {%- else -%}
- {{ create_role_policy_rules | to_json + '\n' }}
- {%- endif -%}
- status_code: 204
- delegate_to: "{{ groups.vault|first }}"
- run_once: true
+- name: create_role | Create a policy for the new role
+ hashivault_policy_set:
+ url: "{{ vault_leader_url }}"
+ token: "{{ vault_root_token }}"
+ ca_cert: "{{ vault_cert_dir }}/ca.pem"
+ name: "{{ create_role_name }}"
+ rules: >-
+ {%- if create_role_policy_rules|d("default") == "default" -%}
+ {{
+ { 'path': {
+ create_role_mount_path + '/issue/' + create_role_name: {'policy': 'write'},
+ create_role_mount_path + '/roles/' + create_role_name: {'policy': 'read'}
+ }} | to_json + '\n'
+ }}
+ {%- else -%}
+ {{ create_role_policy_rules | to_json + '\n' }}
+ {%- endif -%}
- name: create_role | Create {{ create_role_name }} role in the {{ create_role_mount_path }} pki mount
- uri:
- url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}/v1/{{ create_role_mount_path }}/roles/{{ create_role_name }}"
- headers: "{{ hostvars[groups.vault|first]['vault_headers'] }}"
- method: POST
- body_format: json
- body: >-
- {%- if create_role_options|d("default") == "default" -%}
- {'allow_any_name': true}
- {%- else -%}
- {{ create_role_options }}
- {%- endif -%}
- status_code: 204
- delegate_to: "{{ groups.vault|first }}"
- run_once: true
+ hashivault_write:
+ url: "{{ vault_leader_url }}"
+ token: "{{ vault_root_token }}"
+ ca_cert: "{{ vault_cert_dir }}/ca.pem"
+ secret: "{{ create_role_mount_path }}/roles/{{ create_role_name }}"
+ data: |
+ {%- if create_role_options|d("default") == "default" -%}
+ {
+ allow_any_name: true
+ }
+ {%- else -%}
+ {{ create_role_options | to_json }}
+ {%- endif -%}
## Userpass based auth method
diff --git a/roles/vault/tasks/shared/find_leader.yml b/roles/vault/tasks/shared/find_leader.yml
index 3afee482d17..1c1dcdf3054 100644
--- a/roles/vault/tasks/shared/find_leader.yml
+++ b/roles/vault/tasks/shared/find_leader.yml
@@ -5,16 +5,16 @@
url: "{{ vault_config.listener.tcp.tls_disable|d()|ternary('http', 'https') }}://localhost:{{ vault_port }}/v1/sys/health"
headers: "{{ hostvars[groups.vault|first]['vault_headers'] }}"
method: HEAD
- status_code: 200,429,503
+ status_code: 200,429,501,503
register: vault_leader_check
until: "vault_leader_check|succeeded"
retries: 10
- name: find_leader | Set fact for current http leader
set_fact:
- vault_leader_url: "{{ vault_config.listener.tcp.tls_disable|d()|ternary('http', 'https') }}://{{ item }}:{{ vault_port }}"
+ vault_leader_url: "{{ vault_config.listener.tcp.tls_disable|d()|ternary('http', 'https') }}://{{ inventory_hostname }}:{{ vault_port }}"
with_items: "{{ groups.vault }}"
- when: "hostvars[item]['vault_leader_check'].get('status') in [200,503]"
+ when: "hostvars[item]['vault_leader_check'].get('status') in [200,501,503]"
# run_once: true
- name: find_leader| show vault_leader_url
diff --git a/roles/vault/tasks/shared/gen_ca.yml b/roles/vault/tasks/shared/gen_ca.yml
index 654cc3ff3b3..eaf7a61ffc7 100644
--- a/roles/vault/tasks/shared/gen_ca.yml
+++ b/roles/vault/tasks/shared/gen_ca.yml
@@ -1,32 +1,38 @@
---
-- name: "bootstrap/gen_ca | Ensure cert_dir {{ gen_ca_cert_dir }} exists"
+- name: "bootstrap/gen_ca | Ensure cert_dir {{ gen_ca_cert_dir }} exists on necessary hosts"
file:
mode: 0755
path: "{{ gen_ca_cert_dir }}"
state: directory
+ delegate_to: "{{ item }}"
+ with_items: "{{ (groups[gen_ca_copy_group|default('vault')]) | union(groups['vault']) }}"
- name: "bootstrap/gen_ca | Generate {{ gen_ca_mount_path }} root CA"
- uri:
- url: "{{ vault_leader_url }}/v1/{{ gen_ca_mount_path }}/root/generate/exported"
- headers: "{{ gen_ca_vault_headers }}"
- method: POST
- body_format: json
- body: "{{ gen_ca_vault_options }}"
- status_code: 200,204
- register: vault_ca_gen
- delegate_to: "{{ groups.vault|first }}"
+ hashivault_write:
+ url: "{{ vault_leader_url }}"
+ token: "{{ vault_root_token }}"
+ ca_cert: "{{ vault_cert_dir }}/ca.pem"
+ secret: "{{ gen_ca_mount_path }}/root/generate/exported"
+ data: "{{ gen_ca_vault_options }}"
run_once: true
+ no_log: true
+ register: vault_ca_gen
- name: "bootstrap/gen_ca | Copy {{ gen_ca_mount_path }} root CA cert locally"
copy:
- content: "{{ hostvars[groups.vault|first]['vault_ca_gen']['json']['data']['certificate'] }}"
+ content: "{{ vault_ca_gen['data']['data']['certificate'] }}"
dest: "{{ gen_ca_cert_dir }}/ca.pem"
mode: 0644
- when: vault_ca_gen.status == 200
+ when: '"data" in vault_ca_gen.keys()'
+ delegate_to: "{{ item }}"
+ with_items: "{{ (groups[gen_ca_copy_group|default('vault')]) | union(groups['vault']) }}"
+
-- name: "bootstrap/gen_ca | Copy {{ gen_ca_mount_path }} root CA key locally"
+- name: "bootstrap/gen_ca | Copy {{ gen_ca_mount_path }} root CA key to necessary hosts"
copy:
- content: "{{ hostvars[groups.vault|first]['vault_ca_gen']['json']['data']['private_key'] }}"
+ content: "{{ vault_ca_gen['data']['data']['private_key']}}"
dest: "{{ gen_ca_cert_dir }}/ca-key.pem"
mode: 0640
- when: vault_ca_gen.status == 200
+ when: '"data" in vault_ca_gen.keys()'
+ delegate_to: "{{ item }}"
+ with_items: "{{ (groups[gen_ca_copy_group|default('vault')]) | union(groups['vault']) }}"
diff --git a/roles/vault/tasks/shared/gen_userpass.yml b/roles/vault/tasks/shared/gen_userpass.yml
index 5def39d0e46..a49b443e368 100644
--- a/roles/vault/tasks/shared/gen_userpass.yml
+++ b/roles/vault/tasks/shared/gen_userpass.yml
@@ -1,16 +1,13 @@
---
- name: shared/gen_userpass | Create the Username/Password combo for the role
- uri:
- url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}/v1/auth/userpass/users/{{ gen_userpass_username }}"
- headers: "{{ hostvars[groups.vault|first]['vault_headers'] }}"
- method: POST
- body_format: json
- body:
- username: "{{ gen_userpass_username }}"
- password: "{{ gen_userpass_password }}"
- policies: "{{ gen_userpass_role }}"
- status_code: 204
- delegate_to: "{{ groups.vault|first }}"
+ hashivault_userpass_create:
+ url: "{{ vault_leader_url }}"
+ token: "{{ vault_root_token }}"
+ ca_cert: "{{ vault_cert_dir }}/ca.pem"
+ name: "{{ gen_userpass_username }}"
+ pass: "{{ gen_userpass_password }}"
+ policies:
+ - "{{ gen_userpass_role }}"
run_once: true
- name: shared/gen_userpass | Ensure destination directory exists
diff --git a/roles/vault/tasks/shared/issue_cert.yml b/roles/vault/tasks/shared/issue_cert.yml
index 1ba90ea77b5..be49f375d64 100644
--- a/roles/vault/tasks/shared/issue_cert.yml
+++ b/roles/vault/tasks/shared/issue_cert.yml
@@ -6,6 +6,7 @@
# issue_cert_alt_name: Requested Subject Alternative Names, in a list.
# issue_cert_common_name: Common Name included in the cert
# issue_cert_copy_ca: Copy issuing CA cert needed
+# issue_cert_ca_filename: Filename for copied issuing CA cert (default ca.pem)
# issue_cert_dir_mode: Mode of the placed cert directory
# issue_cert_file_group: Group of the placed cert file and directory
# issue_cert_file_mode: Mode of the placed cert file
@@ -38,52 +39,59 @@
delegate_to: "{{ groups.vault|first }}"
run_once: true
-- name: gen_certs_vault | Log into Vault and obtain an token
- uri:
- url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}/v1/auth/userpass/login/{{ user_vault_creds.username }}"
- headers:
- Accept: application/json
- Content-Type: application/json
- method: POST
- body_format: json
- body:
- password: "{{ user_vault_creds.password }}"
- register: vault_login_result
- delegate_to: "{{ groups.vault|first }}"
- run_once: true
+- name: gen_certs_vault | Ensure vault cert dir exists
+ file:
+ path: "{{ vault_cert_dir }}"
+ state: directory
+ recurse: yes
+ owner: "vault"
+ group: "root"
+ mode: 0755
-- name: gen_certs_vault | Set fact for vault_client_token
- set_fact:
- vault_client_token: "{{ vault_login_result.get('json', {}).get('auth', {}).get('client_token') }}"
+- name: gen_certs_vault | install hvac
+ pip:
+ name: "hvac"
+ state: "present"
+
+- name: gen_certs_vault | Pull vault CA
+ get_url:
+ url: "{{ issue_cert_url }}/v1/vault/ca/pem"
+ dest: "{{ vault_cert_dir }}/ca.pem"
+ validate_certs: no
+ when: '"https" in issue_cert_url'
+
+- name: gen_certs_vault | Log into Vault and obtain a scoped token
+ hashivault_token_create:
+ url: "{{ issue_cert_url }}"
+ token: "{{ vault_root_token | default(hostvars[groups.vault|first]['vault_root_token']) }}"
+ ca_cert: "{{ vault_cert_dir }}/ca.pem"
+ policies: "{{ user_vault_creds.username }}"
+ display_name: "{{ user_vault_creds.username }}"
+ register: vault_client_token_request
run_once: true
-- name: gen_certs_vault | Set fact for Vault API token
+- name: gen_certs_vault | Pull token from request
set_fact:
- issue_cert_headers:
- Accept: application/json
- Content-Type: application/json
- X-Vault-Token: "{{ vault_client_token }}"
+ vault_client_token: "{{ vault_client_token_request['token']['auth']['client_token'] }}"
run_once: true
- when: vault_client_token != ""
- name: "issue_cert | Generate {{ issue_cert_path }} for {{ issue_cert_role }} role"
- uri:
- url: "{{ issue_cert_url }}/v1/{{ issue_cert_mount_path|d('pki') }}/issue/{{ issue_cert_role }}"
- headers: "{{ issue_cert_headers }}"
- method: POST
- body_format: json
- body:
+ hashivault_write:
+ url: "{{ issue_cert_url }}"
+ token: "{{ vault_client_token }}"
+ ca_cert: "{% if 'https' in issue_cert_url %}{{ vault_cert_dir }}/ca.pem{% endif %}"
+ secret: "{{ issue_cert_mount_path|d('/pki') }}/issue/{{ issue_cert_role }}"
+ data:
alt_names: "{{ issue_cert_alt_names | d([]) | join(',') }}"
common_name: "{{ issue_cert_common_name | d(issue_cert_path.rsplit('/', 1)[1].rsplit('.', 1)[0]) }}"
format: "{{ issue_cert_format | d('pem') }}"
ip_sans: "{{ issue_cert_ip_sans | default([]) | join(',') }}"
register: issue_cert_result
- delegate_to: "{{ issue_cert_hosts|first }}"
- run_once: true
+ run_once: "{{ issue_cert_run_once | d(false) }}"
- name: "issue_cert | Copy {{ issue_cert_path }} cert to all hosts"
copy:
- content: "{{ issue_cert_result['json']['data']['certificate'] }}\n"
+ content: "{{ issue_cert_result['data']['data']['certificate'] }}\n"
dest: "{{ issue_cert_path }}"
group: "{{ issue_cert_file_group | d('root' )}}"
mode: "{{ issue_cert_file_mode | d('0644') }}"
@@ -91,7 +99,7 @@
- name: "issue_cert | Copy key for {{ issue_cert_path }} to all hosts"
copy:
- content: "{{ issue_cert_result['json']['data']['private_key'] }}"
+ content: "{{ issue_cert_result['data']['data']['private_key'] }}"
dest: "{{ issue_cert_path.rsplit('.', 1)|first }}-key.{{ issue_cert_path.rsplit('.', 1)|last }}"
group: "{{ issue_cert_file_group | d('root' )}}"
mode: "{{ issue_cert_file_mode | d('0640') }}"
@@ -99,8 +107,8 @@
- name: issue_cert | Copy issuing CA cert
copy:
- content: "{{ issue_cert_result['json']['data']['issuing_ca'] }}\n"
- dest: "{{ issue_cert_path | dirname }}/ca.pem"
+ content: "{{ issue_cert_result['data']['data']['issuing_ca'] }}\n"
+ dest: "{{ issue_cert_path | dirname }}/{{ issue_cert_ca_filename | default('ca.pem') }}"
group: "{{ issue_cert_file_group | d('root' )}}"
mode: "{{ issue_cert_file_mode | d('0644') }}"
owner: "{{ issue_cert_file_owner | d('root') }}"
@@ -108,7 +116,7 @@
- name: issue_cert | Copy certificate serial to all hosts
copy:
- content: "{{ issue_cert_result['json']['data']['serial_number'] }}"
+ content: "{{ issue_cert_result['data']['data']['serial_number'] }}"
dest: "{{ issue_cert_path.rsplit('.', 1)|first }}.serial"
group: "{{ issue_cert_file_group | d('root' )}}"
mode: "{{ issue_cert_file_mode | d('0640') }}"
diff --git a/roles/vault/tasks/shared/pki_mount.yml b/roles/vault/tasks/shared/pki_mount.yml
index 3df56e0f809..19fa34ab4ff 100644
--- a/roles/vault/tasks/shared/pki_mount.yml
+++ b/roles/vault/tasks/shared/pki_mount.yml
@@ -1,27 +1,12 @@
---
-- name: "shared/mount | Test if {{ pki_mount_path }} PKI mount exists"
- uri:
- url: "{{ vault_leader_url }}/v1/sys/mounts/{{ pki_mount_path }}/tune"
- headers: "{{ vault_headers }}"
- ignore_errors: true
- register: vault_pki_mount_check
-- name: shared/mount | Set pki mount type
- set_fact:
- mount_options: "{{ pki_mount_options | combine({'type': 'pki'}) }}"
- when: vault_pki_mount_check|failed
-
-- name: shared/mount | Mount {{ pki_mount_path }} PKI mount if needed
- uri:
- url: "{{ vault_leader_url }}/v1/sys/mounts/{{ pki_mount_path }}"
- headers: "{{ vault_headers }}"
- method: POST
- body_format: json
- body: "{{ mount_options|d() }}"
- status_code: 204
- when: vault_pki_mount_check|failed
-
-- name: shared/mount | Unset mount options
- set_fact:
- mount_options: {}
- when: vault_pki_mount_check|failed
+- name: shared/mount | Enable {{ pki_mount_path }} PKI mount
+ hashivault_secret_enable:
+ url: "{{ vault_leader_url }}"
+ token: "{{ vault_root_token }}"
+ ca_cert: "{{ vault_cert_dir }}/ca.pem"
+ name: "{{ pki_mount_path }}"
+ backend: "pki"
+ config: "{{ pki_mount_options }}"
+ register: secret_enable_result
+ failed_when: 'secret_enable_result.rc !=0 and "existing mount" not in secret_enable_result.msg'
diff --git a/roles/vault/tasks/shared/sync.yml b/roles/vault/tasks/shared/sync.yml
index 102532f0c85..5bbe1bd5926 100644
--- a/roles/vault/tasks/shared/sync.yml
+++ b/roles/vault/tasks/shared/sync.yml
@@ -1,5 +1,4 @@
---
-
- name: "sync_file | Cat the file"
command: "cat {{ sync_file_path }}"
register: sync_file_cat
diff --git a/roles/vault/tasks/shared/sync_file.yml b/roles/vault/tasks/shared/sync_file.yml
index 2abef1c5ad1..00750df2ffc 100644
--- a/roles/vault/tasks/shared/sync_file.yml
+++ b/roles/vault/tasks/shared/sync_file.yml
@@ -1,7 +1,6 @@
---
# NOTE: This should be a role (or custom module), but currently include_role is too buggy to use
-
- name: "sync_file | Set facts for directory and file when sync_file_path is defined"
set_fact:
sync_file_dir: "{{ sync_file_path | dirname }}"
diff --git a/roles/vault/templates/docker.service.j2 b/roles/vault/templates/docker.service.j2
index f99035c7749..0ee00732b0e 100644
--- a/roles/vault/templates/docker.service.j2
+++ b/roles/vault/templates/docker.service.j2
@@ -21,13 +21,15 @@ ExecStart={{ docker_bin_dir }}/docker run \
--cap-add=IPC_LOCK \
-v {{ vault_cert_dir }}:{{ vault_cert_dir }} \
-v {{ vault_config_dir }}:{{ vault_config_dir }} \
--v {{ vault_etcd_cert_dir }}:{{ vault_etcd_cert_dir }} \
+-v /etc/ssl:/etc/ssl \
+-v {{ etcd_cert_dir }}:{{ etcd_cert_dir }} \
-v {{ vault_log_dir }}:/vault/logs \
-v {{ vault_roles_dir }}:{{ vault_roles_dir }} \
-v {{ vault_secrets_dir }}:{{ vault_secrets_dir }} \
--entrypoint=vault \
{{ vault_image_repo }}:{{ vault_image_tag }} \
-server --config={{ vault_config_dir }}/config.json
+server --config={{ vault_config_dir }}/config.json \
+--log-level=trace
[Install]
WantedBy=multi-user.target
diff --git a/roles/vault/templates/host.service.j2 b/roles/vault/templates/host.service.j2
index 11bce2f29e3..28fac1dbae0 100644
--- a/roles/vault/templates/host.service.j2
+++ b/roles/vault/templates/host.service.j2
@@ -4,7 +4,7 @@ After=network.target
[Service]
AmbientCapabilities=CAP_IPC_LOCK
-ExecStart=/usr/bin/vault server --config={{ vault_config_dir }}/config.json
+ExecStart={{ bin_dir }}/vault server --config={{ vault_config_dir }}/config.json
LimitNOFILE=40000
NotifyAccess=all
Restart=always
diff --git a/roles/vault/templates/rkt.service.j2 b/roles/vault/templates/rkt.service.j2
index 43dd8fc6d28..e92221161c2 100644
--- a/roles/vault/templates/rkt.service.j2
+++ b/roles/vault/templates/rkt.service.j2
@@ -12,26 +12,34 @@ LimitNOFILE=40000
# Container has the following internal mount points:
# /vault/file/ # File backend storage location
# /vault/logs/ # Log files
+ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/vault.uuid
+
ExecStart=/usr/bin/rkt run \
---insecure-options=image \
---volume hosts,kind=host,source=/etc/hosts,readOnly=true \
---mount volume=hosts,target=/etc/hosts \
---volume=volume-vault-file,kind=host,source=/var/lib/vault \
---volume=volume-vault-logs,kind=host,source={{ vault_log_dir }} \
---volume=vault-cert-dir,kind=host,source={{ vault_cert_dir }} \
---mount=volume=vault-cert-dir,target={{ vault_cert_dir }} \
---volume=vault-conf-dir,kind=host,source={{ vault_config_dir }} \
---mount=volume=vault-conf-dir,target={{ vault_config_dir }} \
---volume=vault-secrets-dir,kind=host,source={{ vault_secrets_dir }} \
---mount=volume=vault-secrets-dir,target={{ vault_secrets_dir }} \
---volume=vault-roles-dir,kind=host,source={{ vault_roles_dir }} \
---mount=volume=vault-roles-dir,target={{ vault_roles_dir }} \
---volume=vault-etcd-cert-dir,kind=host,source={{ vault_etcd_cert_dir }} \
---mount=volume=vault-etcd-cert-dir,target={{ vault_etcd_cert_dir }} \
-docker://{{ vault_image_repo }}:{{ vault_image_tag }} \
---name={{ vault_container_name }} --net=host \
---caps-retain=CAP_IPC_LOCK \
---exec vault -- server --config={{ vault_config_dir }}/config.json
+ --insecure-options=image \
+ --volume hosts,kind=host,source=/etc/hosts,readOnly=true \
+ --mount volume=hosts,target=/etc/hosts \
+ --volume=volume-vault-file,kind=host,source=/var/lib/vault \
+ --volume=volume-vault-logs,kind=host,source={{ vault_log_dir }} \
+ --volume=vault-cert-dir,kind=host,source={{ vault_cert_dir }} \
+ --mount=volume=vault-cert-dir,target={{ vault_cert_dir }} \
+ --volume=vault-conf-dir,kind=host,source={{ vault_config_dir }} \
+ --mount=volume=vault-conf-dir,target={{ vault_config_dir }} \
+ --volume=vault-secrets-dir,kind=host,source={{ vault_secrets_dir }} \
+ --mount=volume=vault-secrets-dir,target={{ vault_secrets_dir }} \
+ --volume=vault-roles-dir,kind=host,source={{ vault_roles_dir }} \
+ --mount=volume=vault-roles-dir,target={{ vault_roles_dir }} \
+ --volume=etcd-cert-dir,kind=host,source={{ etcd_cert_dir }} \
+ --mount=volume=etcd-cert-dir,target={{ etcd_cert_dir }} \
+ docker://{{ vault_image_repo }}:{{ vault_image_tag }} \
+ --uuid-file-save=/var/run/vault.uuid \
+ --name={{ vault_container_name }} \
+ --net=host \
+ --caps-retain=CAP_IPC_LOCK \
+ --exec vault -- \
+ server \
+ --config={{ vault_config_dir }}/config.json
+
+ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/vault.uuid
[Install]
WantedBy=multi-user.target
diff --git a/roles/win_nodes/kubernetes_patch/defaults/main.yml b/roles/win_nodes/kubernetes_patch/defaults/main.yml
new file mode 100644
index 00000000000..587f73ab42b
--- /dev/null
+++ b/roles/win_nodes/kubernetes_patch/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+
+kubernetes_user_manifests_path: "{{ ansible_env.HOME }}/kube-manifests"
diff --git a/roles/win_nodes/kubernetes_patch/files/nodeselector-os-linux-patch.json b/roles/win_nodes/kubernetes_patch/files/nodeselector-os-linux-patch.json
new file mode 100644
index 00000000000..d718ff4465e
--- /dev/null
+++ b/roles/win_nodes/kubernetes_patch/files/nodeselector-os-linux-patch.json
@@ -0,0 +1 @@
+{"spec":{"template":{"spec":{"nodeSelector":{"beta.kubernetes.io/os":"linux"}}}}}
\ No newline at end of file
diff --git a/roles/win_nodes/kubernetes_patch/tasks/main.yml b/roles/win_nodes/kubernetes_patch/tasks/main.yml
new file mode 100644
index 00000000000..8d88818a513
--- /dev/null
+++ b/roles/win_nodes/kubernetes_patch/tasks/main.yml
@@ -0,0 +1,34 @@
+---
+
+- name: Ensure that user manifests directory exists
+ file:
+ path: "{{ kubernetes_user_manifests_path }}/kubernetes"
+ state: directory
+ recurse: yes
+ tags: [init, cni]
+
+- name: Apply kube-proxy nodeselector
+ block:
+ - name: Copy kube-proxy daemonset nodeselector patch
+ copy:
+ src: nodeselector-os-linux-patch.json
+ dest: "{{ kubernetes_user_manifests_path }}/nodeselector-os-linux-patch.json"
+
+ # Due to https://github.com/kubernetes/kubernetes/issues/58212 we cannot rely on exit code for "kubectl patch"
+ - name: Check current nodeselector for kube-proxy daemonset
+ shell: kubectl get ds kube-proxy --namespace=kube-system -o jsonpath='{.spec.template.spec.nodeSelector.beta\.kubernetes\.io/os}'
+ register: current_kube_proxy_state
+
+ - name: Apply nodeselector patch for kube-proxy daemonset
+ shell: kubectl patch ds kube-proxy --namespace=kube-system --type=strategic -p "$(cat nodeselector-os-linux-patch.json)"
+ args:
+ chdir: "{{ kubernetes_user_manifests_path }}"
+ register: patch_kube_proxy_state
+ when: current_kube_proxy_state.stdout | trim | lower != "linux"
+
+ - debug: msg={{ patch_kube_proxy_state.stdout_lines }}
+ when: patch_kube_proxy_state is not skipped
+
+ - debug: msg={{ patch_kube_proxy_state.stderr_lines }}
+ when: patch_kube_proxy_state is not skipped
+ tags: init
diff --git a/scale.yml b/scale.yml
index bcf6c69b00e..c4cd117f00c 100644
--- a/scale.yml
+++ b/scale.yml
@@ -22,17 +22,26 @@
ansible_ssh_pipelining: true
gather_facts: true
+##We need to genereate the etcd certificates beforhand
+- hosts: etcd
+ any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+ roles:
+ - { role: kubespray-defaults}
+ - { role: etcd, tags: etcd, etcd_cluster_setup: false }
+
##Target only workers to get kubelet installed and checking in on any new nodes
- hosts: kube-node
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- { role: kubernetes/preinstall, tags: preinstall }
- - { role: docker, tags: docker }
+
+ - { role: docker, tags: docker, when: container_manager == 'docker' }
+ - { role: cri-o, tags: crio, when: container_manager == 'crio' }
- role: rkt
tags: rkt
when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]"
- - { role: download, tags: download, skip_downloads: false }
+ - { role: download, tags: download, when: "not skip_downloads" }
- { role: etcd, tags: etcd, etcd_cluster_setup: false }
- { role: vault, tags: vault, when: "cert_management == 'vault'"}
- { role: kubernetes/node, tags: node }
diff --git a/scripts/collect-info.yaml b/scripts/collect-info.yaml
index 1a0e2307b7e..14daf9d19bd 100644
--- a/scripts/collect-info.yaml
+++ b/scripts/collect-info.yaml
@@ -114,7 +114,12 @@
with_items: "{{logs}}"
- name: Pack results and logs
- local_action: raw GZIP=-9 tar --remove-files -cvzf {{dir|default(".")}}/logs.tar.gz -C /tmp collect-info
+ archive:
+ path: "/tmp/collect-info"
+ dest: "{{ dir|default('.') }}/logs.tar.gz"
+ remove: true
+ delegate_to: localhost
+ become: false
run_once: true
- name: Clean up collected command outputs
diff --git a/setup.cfg b/setup.cfg
index 2327160ad35..3c0a2959c27 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -21,21 +21,22 @@ setup-hooks =
[files]
data_files =
- /usr/share/kubespray/playbooks/ =
+ usr/share/kubespray/playbooks/ =
cluster.yml
upgrade-cluster.yml
scale.yml
reset.yml
+ remove-node.yml
extra_playbooks/upgrade-only-k8s.yml
- /usr/share/kubespray/roles = roles/*
- /usr/share/doc/kubespray/ =
+ usr/share/kubespray/roles = roles/*
+ usr/share/doc/kubespray/ =
LICENSE
README.md
- /usr/share/doc/kubespray/inventory/ =
+ usr/share/doc/kubespray/inventory/ =
inventory/sample/hosts.ini
- /etc/kubespray/ =
+ etc/kubespray/ =
ansible.cfg
- /etc/kubespray/inventory/sample/group_vars/ =
+ etc/kubespray/inventory/sample/group_vars/ =
inventory/sample/group_vars/all.yml
inventory/sample/group_vars/k8s-cluster.yml
diff --git a/tests/Makefile b/tests/Makefile
index 8d17e243c99..30442fb25f7 100644
--- a/tests/Makefile
+++ b/tests/Makefile
@@ -1,4 +1,4 @@
-INVENTORY=$(PWD)/../inventory/sample/hosts.ini
+INVENTORY=$(PWD)/../inventory/sample/${CI_JOB_NAME}-${BUILD_NUMBER}.ini
$(HOME)/.ssh/id_rsa:
mkdir -p $(HOME)/.ssh
diff --git a/tests/ansible.cfg b/tests/ansible.cfg
index 9e734403e51..9c405752924 100644
--- a/tests/ansible.cfg
+++ b/tests/ansible.cfg
@@ -10,3 +10,4 @@ fact_caching_connection = /tmp
stdout_callback = skippy
library = ./library:../library
callback_whitelist = profile_tasks
+jinja2_extensions = jinja2.ext.do
diff --git a/tests/files/gce_centos-weave-kubeadm.yml b/tests/files/gce_centos-weave-kubeadm.yml
index b4cd8e17c15..199fa437cd2 100644
--- a/tests/files/gce_centos-weave-kubeadm.yml
+++ b/tests/files/gce_centos-weave-kubeadm.yml
@@ -7,9 +7,8 @@ startup_script: ""
# Deployment settings
kube_network_plugin: weave
-weave_cpu_limit: "100m"
-weave_cpu_requests: "100m"
kubeadm_enabled: true
deploy_netchecker: true
+kubernetes_audit: true
kubedns_min_replicas: 1
cloud_provider: gce
diff --git a/tests/files/gce_centos7-cilium.yml b/tests/files/gce_centos7-cilium.yml
index ca682f7ed91..ec46a213d65 100644
--- a/tests/files/gce_centos7-cilium.yml
+++ b/tests/files/gce_centos7-cilium.yml
@@ -7,5 +7,6 @@ mode: default
# Deployment settings
kube_network_plugin: cilium
deploy_netchecker: true
+enable_network_policy: true
kubedns_min_replicas: 1
cloud_provider: gce
diff --git a/tests/files/gce_centos7-flannel-addons.yml b/tests/files/gce_centos7-flannel-addons.yml
index 0e4346f674d..f6bfb6cb276 100644
--- a/tests/files/gce_centos7-flannel-addons.yml
+++ b/tests/files/gce_centos7-flannel-addons.yml
@@ -2,16 +2,19 @@
cloud_image_family: centos-7
cloud_region: us-central1-c
cloud_machine_type: "n1-standard-1"
-mode: default
+mode: ha
# Deployment settings
kube_network_plugin: flannel
helm_enabled: true
-istio_enabled: true
efk_enabled: true
+kubernetes_audit: true
etcd_events_cluster_setup: true
local_volume_provisioner_enabled: true
etcd_deployment_type: host
deploy_netchecker: true
kubedns_min_replicas: 1
cloud_provider: gce
+kube_encrypt_secret_data: true
+ingress_nginx_enabled: true
+cert_manager_enabled: true
diff --git a/tests/files/gce_coreos-alpha-weave-ha.yml b/tests/files/gce_coreos-alpha-weave-ha.yml
index dd579c0322f..883a67e2ac0 100644
--- a/tests/files/gce_coreos-alpha-weave-ha.yml
+++ b/tests/files/gce_coreos-alpha-weave-ha.yml
@@ -7,8 +7,6 @@ startup_script: 'systemctl disable locksmithd && systemctl stop locksmithd'
# Deployment settings
kube_network_plugin: weave
-weave_cpu_limit: "100m"
-weave_cpu_requests: "100m"
bootstrap_os: coreos
resolvconf_mode: host_resolvconf # this is required as long as the coreos stable channel uses docker < 1.12
deploy_netchecker: true
diff --git a/tests/files/gce_coreos-calico-aio.yml b/tests/files/gce_coreos-calico-aio.yml
index 123556550e5..7f395abf10a 100644
--- a/tests/files/gce_coreos-calico-aio.yml
+++ b/tests/files/gce_coreos-calico-aio.yml
@@ -1,7 +1,7 @@
# Instance settings
cloud_image_family: coreos-stable
cloud_region: us-central1-a
-cloud_machine_type: "n1-standard-2"
+cloud_machine_type: "n1-standard-1"
mode: aio
##user-data to simply turn off coreos upgrades
startup_script: 'systemctl disable locksmithd && systemctl stop locksmithd'
diff --git a/tests/files/gce_coreos-cilium.yml b/tests/files/gce_coreos-cilium.yml
index a0900397004..1778929f09b 100644
--- a/tests/files/gce_coreos-cilium.yml
+++ b/tests/files/gce_coreos-cilium.yml
@@ -9,5 +9,6 @@ kube_network_plugin: cilium
bootstrap_os: coreos
resolvconf_mode: host_resolvconf # this is required as long as the coreos stable channel uses docker < 1.12
deploy_netchecker: true
+enable_network_policy: true
kubedns_min_replicas: 1
cloud_provider: gce
diff --git a/tests/files/gce_coreos-vault-upgrade.yml b/tests/files/gce_coreos-vault-upgrade.yml
new file mode 100644
index 00000000000..64302718853
--- /dev/null
+++ b/tests/files/gce_coreos-vault-upgrade.yml
@@ -0,0 +1,13 @@
+# Instance settings
+cloud_machine_type: "n1-standard-1"
+cloud_image_family: coreos-stable
+cloud_region: us-central1-b
+mode: aio
+
+# Instance settings
+bootstrap_os: coreos
+cert_management: vault
+kube_network_plugin: flannel
+deploy_netchecker: true
+kubedns_min_replicas: 1
+cloud_provider: gce
diff --git a/tests/files/gce_opensuse-canal.yml b/tests/files/gce_opensuse-canal.yml
new file mode 100644
index 00000000000..e5bea621cc6
--- /dev/null
+++ b/tests/files/gce_opensuse-canal.yml
@@ -0,0 +1,11 @@
+# Instance settings
+cloud_image_family: opensuse-leap
+cloud_region: us-central1-c
+mode: default
+
+# Deployment settings
+bootstrap_os: opensuse
+kube_network_plugin: canal
+deploy_netchecker: true
+kubedns_min_replicas: 1
+cloud_provider: gce
diff --git a/tests/files/gce_rhel7-cilium.yml b/tests/files/gce_rhel7-cilium.yml
index d67658a6c34..0994d009996 100644
--- a/tests/files/gce_rhel7-cilium.yml
+++ b/tests/files/gce_rhel7-cilium.yml
@@ -6,5 +6,6 @@ mode: default
# Deployment settings
kube_network_plugin: cilium
deploy_netchecker: true
+enable_network_policy: true
kubedns_min_replicas: 1
cloud_provider: gce
diff --git a/tests/files/gce_rhel7-weave.yml b/tests/files/gce_rhel7-weave.yml
index df80a556f82..bfff490daae 100644
--- a/tests/files/gce_rhel7-weave.yml
+++ b/tests/files/gce_rhel7-weave.yml
@@ -5,8 +5,6 @@ mode: default
# Deployment settings
kube_network_plugin: weave
-weave_cpu_limit: "100m"
-weave_cpu_requests: "100m"
deploy_netchecker: true
kubedns_min_replicas: 1
cloud_provider: gce
diff --git a/tests/files/gce_ubuntu-cilium-sep.yml b/tests/files/gce_ubuntu-cilium-sep.yml
index e7150a27ec5..0c064774392 100644
--- a/tests/files/gce_ubuntu-cilium-sep.yml
+++ b/tests/files/gce_ubuntu-cilium-sep.yml
@@ -6,6 +6,7 @@ mode: separate
# Deployment settings
kube_network_plugin: cilium
deploy_netchecker: true
+enable_network_policy: true
kubedns_min_replicas: 1
cloud_provider: gce
diff --git a/tests/files/gce_ubuntu-weave-sep.yml b/tests/files/gce_ubuntu-weave-sep.yml
index 133bd907af5..4598672d10c 100644
--- a/tests/files/gce_ubuntu-weave-sep.yml
+++ b/tests/files/gce_ubuntu-weave-sep.yml
@@ -6,8 +6,6 @@ mode: separate
# Deployment settings
bootstrap_os: ubuntu
kube_network_plugin: weave
-weave_cpu_limit: "100m"
-weave_cpu_requests: "100m"
deploy_netchecker: true
kubedns_min_replicas: 1
cloud_provider: gce
diff --git a/tests/files/gce_ubuntu18-flannel-aio.yml b/tests/files/gce_ubuntu18-flannel-aio.yml
new file mode 100644
index 00000000000..595fbf35809
--- /dev/null
+++ b/tests/files/gce_ubuntu18-flannel-aio.yml
@@ -0,0 +1,14 @@
+# Instance settings
+cloud_image_family: ubuntu-1804-lts
+cloud_region: us-central1-a
+cloud_machine_type: "n1-standard-1"
+mode: aio
+
+# Deployment settings
+
+bootstrap_os: ubuntu
+kube_network_plugin: flannel
+
+deploy_netchecker: true
+kubedns_min_replicas: 1
+cloud_provider: gce
diff --git a/tests/support/aws.groovy b/tests/support/aws.groovy
index a5ce89b8f09..bc13b513a98 100644
--- a/tests/support/aws.groovy
+++ b/tests/support/aws.groovy
@@ -1,9 +1,9 @@
def run(username, credentialsId, ami, network_plugin, aws_access, aws_secret) {
- def inventory_path = pwd() + "/inventory/sample/hosts.ini"
+ def inventory_path = pwd() + "/inventory/sample/${env.CI_JOB_NAME}-${env.BUILD_NUMBER}.ini"
dir('tests') {
wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) {
try {
- create_vm("${env.JOB_NAME}-${env.BUILD_NUMBER}", inventory_path, ami, username, network_plugin, aws_access, aws_secret)
+ create_vm("${env.CI_JOB_NAME}-${env.BUILD_NUMBER}", inventory_path, ami, username, network_plugin, aws_access, aws_secret)
install_cluster(inventory_path, credentialsId, network_plugin)
test_apiserver(inventory_path, credentialsId)
diff --git a/tests/testcases/010_check-apiserver.yml b/tests/testcases/010_check-apiserver.yml
index de5e3a84a46..22f9cab6c97 100644
--- a/tests/testcases/010_check-apiserver.yml
+++ b/tests/testcases/010_check-apiserver.yml
@@ -6,7 +6,7 @@
uri:
url: "https://{{ access_ip | default(ansible_default_ipv4.address) }}:{{ kube_apiserver_port }}/api/v1"
user: kube
- password: "{{ lookup('password', inventory_dir + '/credentials/kube_user length=15 chars=ascii_letters,digits') }}"
+ password: "{{ lookup('password', credentials_dir + '/kube_user.creds length=15 chars=ascii_letters,digits') }}"
validate_certs: no
status_code: 200,401
when: not kubeadm_enabled|default(false)
diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml
index 2fa78545f14..531b84c0643 100644
--- a/tests/testcases/030_check-network.yml
+++ b/tests/testcases/030_check-network.yml
@@ -14,7 +14,7 @@
- name: Wait for pods to be ready
- shell: "{{bin_dir}}/kubectl get pods"
+ shell: "{{bin_dir}}/kubectl get pods -n test"
register: pods
until:
- '"ContainerCreating" not in pods.stdout'
@@ -25,18 +25,18 @@
no_log: true
- name: Get pod names
- shell: "{{bin_dir}}/kubectl get pods -o json"
+ shell: "{{bin_dir}}/kubectl get pods -n test -o json"
register: pods
no_log: true
- name: Get hostnet pods
- command: "{{bin_dir}}/kubectl get pods -o
+ command: "{{bin_dir}}/kubectl get pods -n test -o
jsonpath='{range .items[?(.spec.hostNetwork)]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
register: hostnet_pods
no_log: true
- name: Get running pods
- command: "{{bin_dir}}/kubectl get pods -o
+ command: "{{bin_dir}}/kubectl get pods -n test -o
jsonpath='{range .items[?(.status.phase==\"Running\")]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
register: running_pods
no_log: true
diff --git a/upgrade-cluster.yml b/upgrade-cluster.yml
index 88969436bd4..7d8534d782c 100644
--- a/upgrade-cluster.yml
+++ b/upgrade-cluster.yml
@@ -26,7 +26,7 @@
setup:
delegate_to: "{{item}}"
delegate_facts: True
- with_items: "{{ groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr'] }}"
+ with_items: "{{ groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]) }}"
- hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
@@ -34,11 +34,12 @@
roles:
- { role: kubespray-defaults}
- { role: kubernetes/preinstall, tags: preinstall }
- - { role: docker, tags: docker }
+ - { role: docker, tags: docker, when: container_manager == 'docker' }
+ - { role: cri-o, tags: crio, when: container_manager == 'crio' }
- role: rkt
tags: rkt
when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]"
- - { role: download, tags: download, skip_downloads: false }
+ - { role: download, tags: download, when: "not skip_downloads" }
environment: "{{proxy_env}}"
- hosts: etcd:k8s-cluster:vault
@@ -78,10 +79,19 @@
- { role: kubernetes/master, tags: master }
- { role: kubernetes/client, tags: client }
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
- - { role: network_plugin, tags: network }
- { role: upgrade/post-upgrade, tags: post-upgrade }
environment: "{{proxy_env}}"
+#Upgrade calico on all masters and nodes
+- hosts: kube-master:kube-node
+ any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+ serial: "{{ serial | default('20%') }}"
+ roles:
+ - { role: kubespray-defaults}
+ - { role: network_plugin, tags: network }
+ - { role: kubernetes-apps/network_plugin, tags: network }
+ - { role: kubernetes-apps/policy_controller, tags: policy-controller }
+
#Finally handle worker upgrades, based on given batch size
- hosts: kube-node:!kube-master
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
@@ -90,7 +100,6 @@
- { role: kubespray-defaults}
- { role: upgrade/pre-upgrade, tags: pre-upgrade }
- { role: kubernetes/node, tags: node }
- - { role: network_plugin, tags: network }
- { role: upgrade/post-upgrade, tags: post-upgrade }
- { role: kubernetes/kubeadm, tags: kubeadm, when: "kubeadm_enabled" }
- { role: kubespray-defaults}
@@ -102,14 +111,6 @@
- { role: kubespray-defaults}
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
-- hosts: kube-master
- any_errors_fatal: true
- roles:
- - { role: kubespray-defaults}
- - { role: kubernetes-apps/network_plugin, tags: network }
- - { role: kubernetes-apps/policy_controller, tags: policy-controller }
- - { role: kubernetes/client, tags: client }
-
- hosts: calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
@@ -127,4 +128,4 @@
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- - { role: kubernetes-apps, tags: apps }
+ - { role: kubernetes-apps, tags: apps }
\ No newline at end of file