From 8669d109c6755a52419a52a0733ec391af616f5b Mon Sep 17 00:00:00 2001 From: Anand Kumar Martund Date: Sat, 24 Apr 2021 16:37:25 +0530 Subject: [PATCH] Config.yaml settings update for Vagrant file --- kubernetes/centos/README.md | 11 +- kubernetes/centos/Vagrantfile | 71 ++++++------ kubernetes/centos/Vagrantfile.bak | 186 ------------------------------ kubernetes/centos/config.yaml | 22 ++++ 4 files changed, 56 insertions(+), 234 deletions(-) delete mode 100644 kubernetes/centos/Vagrantfile.bak create mode 100644 kubernetes/centos/config.yaml diff --git a/kubernetes/centos/README.md b/kubernetes/centos/README.md index 2459a7b..80fd064 100644 --- a/kubernetes/centos/README.md +++ b/kubernetes/centos/README.md @@ -9,16 +9,7 @@ And any virtual environment, defualt can use [oracle virtualbox](https://www.vir To use kubernetes, can install `kubectl` to access cluster from host but can access via `ssh` to vritual machine also. ## Basic usage -Very first `cd` to path where `Vagrant` file exists, and open `Vagrant` file to update setting like below - -```bash -API_VERSION = "2" -IMAGE = "centos/8" -IP_PART = "192.160.0" -NODE_COUNT = 2 -USER = "vagrant" -CLUSTER = { "master" => "master-node", "node" => "worker-node" } -``` +Very first `cd` to path where `Vagrant` file exists, and open `config.yaml` file to update setting before spin up cluster. ### Command line To start kubernetes cluster please follow below instructions: diff --git a/kubernetes/centos/Vagrantfile b/kubernetes/centos/Vagrantfile index f359abc..87642c3 100644 --- a/kubernetes/centos/Vagrantfile +++ b/kubernetes/centos/Vagrantfile @@ -1,12 +1,9 @@ # -*- mode: ruby -*- # vi: set ft=ruby : -API_VERSION = "2" -IMAGE = "centos/8" -IP_PART = "192.160.0" -NODE_COUNT = 2 -USER = "vagrant" -CLUSTER = { "master" => "master-node", "node" => "worker-node" } +require 'yaml' +k8s = YAML.load_file(File.join(File.dirname(__FILE__), 'config.yaml')) +ENV["LC_ALL"] = "en_US.UTF-8" $msg = < admin.conf && rm -f \${HOME}/.kube/config 2>/dev/null; mkdir -p \${HOME}/.kube; cp -i admin.conf \${HOME}/.kube/config; rm -f admin.conf'"} + trigger_local.run = {inline: "/bin/bash -c 'vagrant ssh --no-tty -c \"cat /etc/kubernetes/admin.conf\" #{k8s['cluster']['master']} > admin.conf && rm -f \${HOME}/.kube/config 2>/dev/null; mkdir -p \${HOME}/.kube; cp -i admin.conf \${HOME}/.kube/config; rm -f admin.conf'"} end end - (1..NODE_COUNT).each do |i| - config.vm.define "#{CLUSTER['node']}-#{i}" do |subconfig| - subconfig.vm.box = IMAGE + (1..k8s['resources']['node']['count']).each do |i| + config.vm.define "#{k8s['cluster']['node']}-#{i}" do |subconfig| + subconfig.vm.box = k8s['image'] - subconfig.vm.hostname = "#{CLUSTER['node']}-#{i}" + subconfig.vm.hostname = "#{k8s['cluster']['node']}-#{i}" - subconfig.vm.network :private_network, ip: "#{IP_PART}.#{i + 10}" + subconfig.vm.network :private_network, ip: "#{k8s['ip_part']}.#{i + 10}" # Hostfile :: Master node subconfig.vm.provision "master-hostfile", type: "shell" do |s| @@ -87,23 +82,23 @@ Vagrant.configure(API_VERSION) do |config| firewall-cmd --permanent --add-port=30000-32767/tcp firewall-cmd --reload SHELL - s.args = ["#{IP_PART}.10", "#{CLUSTER['master']}"] + s.args = ["#{k8s['ip_part']}.10", "#{k8s['cluster']['master']}"] end # Hostfile :: Worker node - (1..NODE_COUNT).each do |j| + (1..k8s['resources']['node']['count']).each do |j| if i != j subconfig.vm.provision "other-worker-hostfile", type: "shell" do |supdate| supdate.inline = <<-SHELL echo -e "$1\t$2" | tee -a /etc/hosts SHELL - supdate.args = ["#{IP_PART}.#{10 + j}", "#{CLUSTER['node']}-#{j}", "#{USER}", "#{i}"] + supdate.args = ["#{k8s['ip_part']}.#{10 + j}", "#{k8s['cluster']['node']}-#{j}", "#{k8s['user']}", "#{i}"] end else subconfig.vm.provision "self-worker-hostfile", type: "shell" do |supdate| supdate.inline = <<-SHELL echo -e "127.0.0.1\t$2" | tee -a /etc/hosts; echo -e "$1\t$2" | tee -a /etc/hosts SHELL - supdate.args = ["#{IP_PART}.#{10 + j}", "#{CLUSTER['node']}-#{j}", "#{USER}", "#{i}"] + supdate.args = ["#{k8s['ip_part']}.#{10 + j}", "#{k8s['cluster']['node']}-#{j}", "#{k8s['user']}", "#{i}"] end end end @@ -114,19 +109,19 @@ Vagrant.configure(API_VERSION) do |config| end subconfig.trigger.after :up do |trigger_local| - trigger_local.run = {inline: "/bin/bash -c 'wpub_key=$(vagrant ssh --no-tty -c \"cat /home/#{USER}/.ssh/id_rsa.pub\" #{CLUSTER['node']}-#{i}) && vagrant ssh --no-tty -c \"echo \${wpub_key} >> /home/#{USER}/.ssh/authorized_keys\" #{CLUSTER['master']}; mpub_key=$(vagrant ssh --no-tty -c \"cat /home/#{USER}/.ssh/id_rsa.pub\" #{CLUSTER['master']}) && vagrant ssh --no-tty -c \"echo \${mpub_key} >> /home/#{USER}/.ssh/authorized_keys\" #{CLUSTER['node']}-#{i}'"} + trigger_local.run = {inline: "/bin/bash -c 'wpub_key=$(vagrant ssh --no-tty -c \"cat /home/#{k8s['user']}/.ssh/id_rsa.pub\" #{k8s['cluster']['node']}-#{i}) && vagrant ssh --no-tty -c \"echo \${wpub_key} >> /home/#{k8s['user']}/.ssh/authorized_keys\" #{k8s['cluster']['master']}; mpub_key=$(vagrant ssh --no-tty -c \"cat /home/#{k8s['user']}/.ssh/id_rsa.pub\" #{k8s['cluster']['master']}) && vagrant ssh --no-tty -c \"echo \${mpub_key} >> /home/#{k8s['user']}/.ssh/authorized_keys\" #{k8s['cluster']['node']}-#{i}'"} end subconfig.trigger.after :up do |trigger_remote| trigger_remote.run_remote = {inline: <<-SHELL - kube_join=\$(echo "ssh #{USER}@#{CLUSTER['master']} -o StrictHostKeyChecking=no '( cat /home/#{USER}/.bash_profile | grep KUBEADM_JOIN)'" | su - #{USER}) + kube_join=\$(echo "ssh #{k8s['user']}@#{k8s['cluster']['master']} -o StrictHostKeyChecking=no '( cat /home/#{k8s['user']}/.bash_profile | grep KUBEADM_JOIN)'" | su - #{k8s['user']}) kube_join=\$(echo ${kube_join} | awk -F'"' '{print \$2}') - echo "sudo $kube_join" | su - #{USER} + echo "sudo $kube_join" | su - #{k8s['user']} - echo "scp -o StrictHostKeyChecking=no #{USER}@#{CLUSTER['master']}:/etc/kubernetes/admin.conf /home/#{USER}/" | su - #{USER} - echo "mkdir -p /home/#{USER}/.kube" | su - #{USER} - echo "cp -i /home/#{USER}/admin.conf /home/#{USER}/.kube/config" | su - #{USER} - echo "sudo chown #{USER}:#{USER} -R /home/#{USER}/.kube" | su - #{USER} + echo "scp -o StrictHostKeyChecking=no #{k8s['user']}@#{k8s['cluster']['master']}:/etc/kubernetes/admin.conf /home/#{k8s['user']}/" | su - #{k8s['user']} + echo "mkdir -p /home/#{k8s['user']}/.kube" | su - #{k8s['user']} + echo "cp -i /home/#{k8s['user']}/admin.conf /home/#{k8s['user']}/.kube/config" | su - #{k8s['user']} + echo "sudo chown #{k8s['user']}:#{k8s['user']} -R /home/#{k8s['user']}/.kube" | su - #{k8s['user']} SHELL } end @@ -143,6 +138,6 @@ Vagrant.configure(API_VERSION) do |config| config.vm.provision "vm-setup", type: "shell" do |vms| vms.path = "script/bootstrap.sh" - vms.args = ["#{USER}"] + vms.args = ["#{k8s['user']}"] end end diff --git a/kubernetes/centos/Vagrantfile.bak b/kubernetes/centos/Vagrantfile.bak deleted file mode 100644 index 0fd3ebd..0000000 --- a/kubernetes/centos/Vagrantfile.bak +++ /dev/null @@ -1,186 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : - -API_VERSION = "2" -IMAGE = "centos/8" -DOMAIN = "k8.io" -IP_PART = "192.160.0" -NODE_COUNT = 2 -USER = "vagrant" - -$msg = <> /home/${1}/.ssh/authorized_keys - done - SHELL - whu.args = ["#{USER}", "#{NODE_COUNT}", "#{IP_PART}"] - end - - subconfig.vm.provider "virtualbox" do |vb| - vb.memory = "2048" - vb.cpus = 2 - end - - subconfig.vm.provision "Restart VM", type: "shell" do |reboot| - reboot.privileged = true - reboot.inline = <<-SHELL - echo "----------------------------------|| Reboot to load all config" - SHELL - reboot.reboot = true - end - - subconfig.vm.provision "master-node-setup", type: "shell" do |mns| - mns.path = "script/bootstrap_master.sh" - mns.args = ["#{USER}", "#{IP_PART}", "10"] - end - end - - (1..NODE_COUNT).each do |i| - config.vm.define "worker-node-#{i}" do |subconfig| - subconfig.vm.box = IMAGE - - subconfig.vm.hostname = "worker-node-#{i}" - - subconfig.vm.network :private_network, ip: "#{IP_PART}.#{i + 10}" - - # Hostfile :: Master node - subconfig.vm.provision "master-hostfile", type: "shell" do |s| - s.inline = <<-SHELL - echo "----------------------------------|| Update Master node hostfile for master" - echo -e "$1\t$2" | tee -a /etc/hosts - SHELL - s.args = ["#{IP_PART}.10", "master-node"] - end - # Hostfile :: Worker node - (1..NODE_COUNT).each do |j| - if i != j - subconfig.vm.provision "other-worker-hostfile", type: "shell" do |supdate| - supdate.inline = <<-SHELL - echo "----------------------------------|| Update Other worker node hostfile update" - echo -e "$1\t$2" | tee -a /etc/hosts - SHELL - supdate.args = ["#{IP_PART}.#{10 + j}", "worker-node-#{j}", "#{USER}", "#{i}"] - end - else - subconfig.vm.provision "self-worker-hostfile", type: "shell" do |supdate| - supdate.inline = <<-SHELL - echo "----------------------------------|| Self Other worker node hostfile update" - echo -e "127.0.0.1\t$2" | tee -a /etc/hosts; echo -e "$1\t$2" | tee -a /etc/hosts - SHELL - supdate.args = ["#{IP_PART}.#{10 + j}", "worker-node-#{j}", "#{USER}", "#{i}"] - end - end - end - - subconfig.vm.provider "virtualbox" do |vb| - vb.memory = "2048" - vb.cpus = 2 - end - - subconfig.vm.provision "shell" do |supdate| - supdate.inline = <<-SHELL - echo "----------------------------------|| Update authorized_keys file" - cat /home/${1}/.ssh/id_rsa.pub >> /home/${1}/.ssh/authorized_keys - sed -i "s/${1}@master-node/${1}@worker-node-${2}/g" /home/${1}/.ssh/id_rsa.pub - SHELL - supdate.args = ["#{USER}", "#{i}"] - end - - subconfig.vm.provision "Restart VM", type: "shell" do |reboot| - reboot.privileged = true - reboot.inline = <<-SHELL - echo "----------------------------------|| Reboot to load all config" - SHELL - reboot.reboot = true - end - - subconfig.vm.provision "Join to Kubernetes Cluster", type: "shell" do |supdate| - supdate.inline = <<-SHELL - firewall-cmd --permanent --add-port=10250/tcp - firewall-cmd --permanent --add-port=30000-32767/tcp - firewall-cmd --reload - - kube_join=\$(echo "ssh ${1}@${2} -o StrictHostKeyChecking=no '( cat /home/${1}/.bash_profile | grep KUBEADM_JOIN)'" | su - ${1}) - kube_join=\$(echo ${kube_join} | awk -F'"' '{print \$2}') - echo "sudo $kube_join" | su - ${1} - - echo "scp -o StrictHostKeyChecking=no ${1}@${2}:/etc/kubernetes/admin.conf /home/${1}/" | su - ${1} - echo "mkdir -p /home/${1}/.kube" | su - ${1} - echo "cp -i /home/${1}/admin.conf /home/${1}/.kube/config" | su - ${1} - echo "sudo chown ${1}:${1} -R /home/${1}/.kube" | su - ${1} - SHELL - supdate.args = ["#{USER}", "master-node"] - end - end - end - - config.vm.provision "vm-setup", type: "shell" do |vms| - vms.path = "script/bootstrap.sh" - vms.args = ["#{USER}"] - end - - config.vm.provision "ssh-configure", type: "shell" do |sshc| - ssh_prv_key = "" - ssh_pub_key = "" - if File.file?("ssh/id_rsa") - ssh_prv_key = File.read("ssh/id_rsa") - ssh_pub_key = File.read("ssh/id_rsa.pub") - else - puts "No SSH key found. You will need to remedy this before pushing to the repository." - end - sshc.inline = <<-SHELL - echo "----------------------------------|| Setup ssh" - - if grep -sq "#{ssh_pub_key}" /home/${1}/.ssh/authorized_keys; then - echo "SSH keys already provisioned." - exit 0; - fi - echo "SSH key provisioning." - mkdir -p /home/${1}/.ssh/ - touch /home/${1}/.ssh/authorized_keys - echo #{ssh_pub_key} > /home/${1}/.ssh/id_rsa.pub - chmod 644 /home/vagrant/.ssh/id_rsa.pub - echo "#{ssh_prv_key}" > /home/${1}/.ssh/id_rsa - chmod 600 /home/${1}/.ssh/id_rsa - chown -R ${1}:${1} /home/${1} - exit 0 - SHELL - sshc.args = ["#{USER}", "#{NODE_COUNT}"] - end - -end \ No newline at end of file diff --git a/kubernetes/centos/config.yaml b/kubernetes/centos/config.yaml new file mode 100644 index 0000000..293df8a --- /dev/null +++ b/kubernetes/centos/config.yaml @@ -0,0 +1,22 @@ +--- +api_version: "2" +image: "centos/8" +ip_part: "192.160.0" +# node_count: 2 +user: "vagrant" + +cluster: + master: "master-node" + node: "worker-node" + +resources: + master: + cpus: 1 + memory: 1024 + node: + count: 2 + cpus: 2 + memory: 2048 + +net: + network_type: private_network