diff --git a/app/directory_generators/ansible_generator.py b/app/directory_generators/ansible_generator.py index 832ccc35..e3965501 100644 --- a/app/directory_generators/ansible_generator.py +++ b/app/directory_generators/ansible_generator.py @@ -5,218 +5,812 @@ group_vars_dir = os.path.join(ansible_dir, "group_vars") host_vars_dir = os.path.join(ansible_dir, "host_vars") roles_dir = os.path.join(ansible_dir, "roles") -preinstall_dir = os.path.join(roles_dir, "preinstall") -tasks_dir = os.path.join(preinstall_dir, "tasks") -defaults_dir = os.path.join(preinstall_dir, "defaults") -files_dir = os.path.join(preinstall_dir, "files") -handlers_dir = os.path.join(preinstall_dir, "handlers") -templates_dir = os.path.join(preinstall_dir, "templates") -vars_dir = os.path.join(preinstall_dir, "vars") # Create project directories -os.makedirs(ansible_dir, exist_ok=True) os.makedirs(group_vars_dir, exist_ok=True) os.makedirs(host_vars_dir, exist_ok=True) os.makedirs(roles_dir, exist_ok=True) + +preinstall_dir = os.path.join(roles_dir, "preinstall") +k8s_dir = os.path.join(roles_dir, "k8s") +init_k8s_dir = os.path.join(roles_dir, "init_k8s") +join_master_dir = os.path.join(roles_dir, "join_master") +join_worker_dir = os.path.join(roles_dir, "join_worker") + os.makedirs(preinstall_dir, exist_ok=True) -os.makedirs(tasks_dir, exist_ok=True) -os.makedirs(defaults_dir, exist_ok=True) -os.makedirs(files_dir, exist_ok=True) -os.makedirs(handlers_dir, exist_ok=True) -os.makedirs(templates_dir, exist_ok=True) -os.makedirs(vars_dir, exist_ok=True) +os.makedirs(k8s_dir, exist_ok=True) +os.makedirs(init_k8s_dir, exist_ok=True) +os.makedirs(join_master_dir, exist_ok=True) +os.makedirs(join_worker_dir, exist_ok=True) # Create ansible.cfg -with open(os.path.join(ansible_dir, "ansible.cfg"), "w") as cfg_file: - cfg_file.write("[defaults]\n") - cfg_file.write("host_key_checking=false\n") +with open(os.path.join(ansible_dir, "ansible.cfg"), "w") as ansible_cfg_file: + ansible_cfg_file.write("[defaults]\nhost_key_checking=false\n") # Create group_vars/all -with open(os.path.join(group_vars_dir, "all"), "w") as all_file: - all_file.write("# General\n") - all_file.write('install_ansible_modules: "true"\n') - all_file.write('disable_transparent_huge_pages: "true"\n') - all_file.write('setup_interface: "false"\n') - all_file.write("\n") - all_file.write("# Network Calico see here for more details https://github.com/projectcalico/calico/releases\n") - all_file.write('calico_operator_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/tigera-operator.yaml"\n') - all_file.write('calico_crd_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/custom-resources.yaml"\n') - all_file.write('pod_network_cidr: "192.168.0.0/16"\n') - all_file.write("\n") - all_file.write("# DNS\n") - all_file.write('resolv_nameservers: [8.8.8.8, 4.2.2.4] # 403.online\n') - all_file.write("\n") - all_file.write("# Sanction shekan\n") - all_file.write('use_iran: "true" # change it to "false" if you are outside of iran\n') - all_file.write("\n") - all_file.write("# Docker\n") - all_file.write('docker_gpg_key_url: "https://download.docker.com/linux/ubuntu/gpg"\n') - all_file.write('docker_gpg_key_path: "/etc/apt/keyrings/docker.gpg"\n') - all_file.write('docker_apt_repo: "https://download.docker.com/linux/ubuntu"\n') - all_file.write("\n") - all_file.write("# Kubernetes\n") - all_file.write('kubernetes_gpg_keyring_path: "/etc/apt/keyrings/kubernetes-apt-keyring.gpg"\n') - all_file.write('kubernetes_gpg_key_url: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/Release.key"\n') - all_file.write('kubernetes_apt_repo: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/"\n') - all_file.write('k8s_version: "1.31.2" # see here https://kubernetes.io/releases/patch-releases/ and https://github.com/kubernetes/kubernetes/releases\n') - all_file.write("\n") - all_file.write("# CRI\n") - all_file.write('cri_socket: unix:///var/run/containerd/containerd.sock\n') - all_file.write("\n") - all_file.write("# VRRP and HAProxy\n") - all_file.write('interface_name: "enp0s8"\n') - all_file.write('virtual_ip: "192.168.178.100"\n') - all_file.write('haproxy_frontend_password: "password"\n') - all_file.write("\n") - all_file.write("# Ansible Connection\n") - all_file.write("\n") - all_file.write('ansible_user: root\n') - all_file.write('ansible_port: 22\n') - all_file.write('ansible_python_interpreter: "/usr/bin/python3"\n') - all_file.write('domain="devopsgpt.com"\n') - all_file.write('apiserver_url="devopsgpt.com"\n') +with open(os.path.join(group_vars_dir, "all"), "w") as group_vars_file: + group_vars_file.write("""# General +install_ansible_modules: "true" +disable_transparent_huge_pages: "true" + +setup_interface: "false" + +# Network Calico see here for more details https://github.com/projectcalico/calico/releases +calico_operator_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/tigera-operator.yaml" +calico_crd_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/custom-resources.yaml" +pod_network_cidr: "192.168.0.0/16" + +# DNS +resolv_nameservers: [8.8.8.8, 4.2.2.4] # 403.online + +# Sanction shekan +use_iran: "true" # change it to "false" if you are outside of iran + +# Docker +docker_gpg_key_url: "https://download.docker.com/linux/ubuntu/gpg" +docker_gpg_key_path: "/etc/apt/keyrings/docker.gpg" +docker_apt_repo: "https://download.docker.com/linux/ubuntu" + +# Kubernetes +kubernetes_gpg_keyring_path: "/etc/apt/keyrings/kubernetes-apt-keyring.gpg" +kubernetes_gpg_key_url: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/Release.key" +kubernetes_apt_repo: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/" +k8s_version: "1.31.2" # see here https://kubernetes.io/releases/patch-releases/ and https://github.com/kubernetes/kubernetes/releases + +# CRI +cri_socket: unix:///var/run/containerd/containerd.sock + +# Ansible Connection +ansible_user: root +ansible_port: 22 +ansible_python_interpreter: "/usr/bin/python3" +domain: "devopsgpt.com" +apiserver_url: "devopsgpt.com" +""") # Create hosts with open(os.path.join(ansible_dir, "hosts"), "w") as hosts_file: - hosts_file.write("[all]\n") - hosts_file.write("string private_ip=x.x.x.x\n") - hosts_file.write("string private_ip=x.x.x.x\n") - hosts_file.write("string private_ip=x.x.x.x\n") - hosts_file.write("\n") - hosts_file.write("[k8s]\n") - hosts_file.write("string\n") - hosts_file.write("string\n") - hosts_file.write("\n") - hosts_file.write("[k8s_masters]\n") - hosts_file.write("string\n") - hosts_file.write("\n") - hosts_file.write("[k8s_workers]\n") - hosts_file.write("string\n") - hosts_file.write("\n") - hosts_file.write("[lb]\n") - hosts_file.write("string\n") + hosts_file.write("""[all] +string private_ip=x.x.x.x +string private_ip=x.x.x.x + +[k8s] +string +string + +[k8s_masters] +string + +[k8s_workers] +string +""") # Create kubernetes_playbook.yml with open(os.path.join(ansible_dir, "kubernetes_playbook.yml"), "w") as playbook_file: - playbook_file.write("- hosts: all\n") - playbook_file.write(" roles:\n") - playbook_file.write(" - role: preinstall\n") - playbook_file.write(" gather_facts: yes\n") - playbook_file.write(" any_errors_fatal: true\n") - playbook_file.write(" tags: [preinstall]\n") - -# Create preinstall/tasks/basic.yml -with open(os.path.join(tasks_dir, "basic.yml"), "w") as basic_file: - basic_file.write("- name: Set timezone to UTC\n") - basic_file.write(" timezone:\n") - basic_file.write(" name: Etc/UTC\n") - basic_file.write("\n") - basic_file.write("- name: Set hostname\n") - basic_file.write(" command: hostnamectl set-hostname {{ inventory_hostname }}\n") - basic_file.write("\n") - basic_file.write("- name: Remove symlink resolve.conf\n") - basic_file.write(" file:\n") - basic_file.write(" path: \"/etc/resolv.conf\"\n") - basic_file.write(" state: absent\n") - basic_file.write(" ignore_errors: true\n") - basic_file.write(" when: use_iran == \"true\"\n") - basic_file.write("\n") - basic_file.write("- name: Configure resolv.conf\n") - basic_file.write(" template:\n") - basic_file.write(" src: \"resolv.conf.j2\"\n") - basic_file.write(" dest: \"/etc/resolv.conf\"\n") - basic_file.write(" mode: \"0644\"\n") - basic_file.write(" when: use_iran == \"true\"\n") - basic_file.write("\n") - basic_file.write("- name: Add hostname\n") - basic_file.write(" lineinfile:\n") - basic_file.write(" path: /etc/hosts\n") - basic_file.write(" regexp: '^127\\.0\\.0\\.1'\n") - basic_file.write(" line: \"127.0.0.1 {{ inventory_hostname }} localhost\"\n") - basic_file.write(" owner: root\n") - basic_file.write(" group: root\n") - basic_file.write(" mode: 0644\n") - basic_file.write("\n") - basic_file.write("- name: Install necessary tools\n") - basic_file.write(" apt:\n") - basic_file.write(" update_cache: true\n") - basic_file.write(" name:\n") - basic_file.write(" - vim\n") - basic_file.write(" - sudo\n") - basic_file.write(" - wget\n") - basic_file.write(" - curl\n") - basic_file.write(" - telnet\n") - basic_file.write(" - nload\n") - basic_file.write(" - s3cmd\n") - basic_file.write(" - cron\n") - basic_file.write(" - ipset\n") - basic_file.write(" - lvm2\n") - basic_file.write(" - python3\n") - basic_file.write(" - python3-setuptools\n") - basic_file.write(" - python3-pip\n") - basic_file.write(" - python3-apt\n") - basic_file.write(" - intel-microcode\n") - basic_file.write(" - htop\n") - basic_file.write(" - tcpdump\n") - basic_file.write(" - net-tools\n") - basic_file.write(" - screen\n") - basic_file.write(" - tmux\n") - basic_file.write(" - byobu\n") - basic_file.write(" - iftop\n") - basic_file.write(" - bmon\n") - basic_file.write(" - iperf\n") - basic_file.write(" - sysstat\n") - basic_file.write(" - ethtool\n") - basic_file.write(" - plocate\n") - basic_file.write(" - thin-provisioning-tools\n") - basic_file.write(" - conntrack\n") - basic_file.write(" - stress\n") - basic_file.write(" - cpufrequtils\n") - basic_file.write(" - rsync\n") - basic_file.write(" - xz-utils\n") - basic_file.write(" - build-essential\n") - basic_file.write(" - apt-transport-https\n") - basic_file.write(" - ca-certificates\n") - basic_file.write(" - software-properties-common\n") - basic_file.write(" - gnupg-agent\n") - basic_file.write(" - iptables-persistent\n") - basic_file.write(" - open-iscsi\n") - basic_file.write(" - nfs-common\n") - basic_file.write(" - tzdata\n") - basic_file.write(" - tree\n") - basic_file.write(" state: latest\n") - basic_file.write("\n") - basic_file.write("- name: Fix broken packages\n") - basic_file.write(" apt:\n") - basic_file.write(" state: fixed\n") - -# Create preinstall/tasks/main.yml -with open(os.path.join(tasks_dir, "main.yml"), "w") as main_file: - main_file.write("---\n") - main_file.write("- name: basic setup\n") - main_file.write(" include_tasks: basic.yml\n") - -# Create preinstall/defaults/main.yml -with open(os.path.join(defaults_dir, "main.yml"), "w") as defaults_file: - defaults_file.write("# Default variables for preinstall role\n") - -# Create preinstall/files/sample.sh -with open(os.path.join(files_dir, "sample.sh"), "w") as sample_file: - sample_file.write("#!/bin/bash\n") - sample_file.write("# Sample script\n") - -# Create preinstall/handlers/main.yml -with open(os.path.join(handlers_dir, "main.yml"), "w") as handlers_file: - handlers_file.write("# Handlers for preinstall role\n") - -# Create preinstall/templates/resolv.conf.j2 -with open(os.path.join(templates_dir, "resolv.conf.j2"), "w") as resolv_file: - resolv_file.write("# Generated resolv.conf\n") - resolv_file.write("nameserver {{ item }}\n") - resolv_file.write("{% for item in resolv_nameservers %}\n") - resolv_file.write(" {{ item }}\n") - resolv_file.write("{% endfor %}\n") - -# Create preinstall/vars/main.yml -with open(os.path.join(vars_dir, "main.yml"), "w") as vars_file: - vars_file.write("# Variable definitions for preinstall role\n") \ No newline at end of file + playbook_file.write("""- hosts: all + roles: + - role: preinstall + gather_facts: yes + any_errors_fatal: true + tags: [preinstall] + +- hosts: k8s + roles: + - role: k8s + gather_facts: yes + any_errors_fatal: true + tags: [k8s] + +- hosts: k8s + roles: + - role: init_k8s + gather_facts: yes + any_errors_fatal: true + tags: [init_k8s] + +- hosts: k8s_masters + roles: + - role: preinstall + - role: k8s + - role: join_master + gather_facts: yes + any_errors_fatal: true + tags: [join_master] + +- hosts: k8s_workers + roles: + - role: preinstall + - role: k8s + - role: join_worker + gather_facts: yes + any_errors_fatal: true + tags: [join_worker] +""") + +# Create preinstall files +preinstall_defaults_dir = os.path.join(preinstall_dir, "defaults") +preinstall_files_dir = os.path.join(preinstall_dir, "files") +preinstall_handlers_dir = os.path.join(preinstall_dir, "handlers") +preinstall_tasks_dir = os.path.join(preinstall_dir, "tasks") +preinstall_templates_dir = os.path.join(preinstall_dir, "templates") +preinstall_vars_dir = os.path.join(preinstall_dir, "vars") + +os.makedirs(preinstall_defaults_dir, exist_ok=True) +os.makedirs(preinstall_files_dir, exist_ok=True) +os.makedirs(preinstall_handlers_dir, exist_ok=True) +os.makedirs(preinstall_tasks_dir, exist_ok=True) +os.makedirs(preinstall_templates_dir, exist_ok=True) +os.makedirs(preinstall_vars_dir, exist_ok=True) + +with open(os.path.join(preinstall_defaults_dir, "main.yml"), "w") as defaults_file: + defaults_file.write("") + +with open(os.path.join(preinstall_files_dir, "sample.sh"), "w") as files_file: + files_file.write("") + +with open(os.path.join(preinstall_handlers_dir, "main.yml"), "w") as handlers_file: + handlers_file.write("") + +with open(os.path.join(preinstall_tasks_dir, "basic.yml"), "w") as basic_tasks_file: + basic_tasks_file.write("""- name: Set timezone to UTC + timezone: + name: Etc/UTC + +- name: Set hostname + command: hostnamectl set-hostname {{ inventory_hostname }} + +- name: Remove symlink resolve.conf + file: + path: "/etc/resolv.conf" + state: absent + ignore_errors: true + when: use_iran == "true" + +- name: Configure resolv.conf + template: + src: "resolv.conf.j2" + dest: "/etc/resolv.conf" + mode: "0644" + when: use_iran == "true" + +- name: Add hostname + lineinfile: + path: /etc/hosts + regexp: '^127\\.0\\.0\\.1' + line: "127.0.0.1 {{ inventory_hostname }} localhost" + owner: root + group: root + mode: 0644 + +- name: Install necessary tools + apt: + state: latest + update_cache: true + name: + - vim + - sudo + - wget + - curl + - telnet + - nload + - s3cmd + - cron + - ipset + - lvm2 + - python3 + - python3-setuptools + - python3-pip + - python3-apt + - intel-microcode + - htop + - tcpdump + - net-tools + - screen + - tmux + - byobu + - iftop + - bmon + - iperf + - sysstat + - ethtool + - plocate + - thin-provisioning-tools + - conntrack + - stress + - cpufrequtils + - rsync + - xz-utils + - build-essential + - apt-transport-https + - ca-certificates + - software-properties-common + - gnupg-agent + - iptables-persistent + - open-iscsi + - nfs-common + - tzdata + - tree + +- name: Fix broken packages + apt: + state: fixed +""") + +with open(os.path.join(preinstall_tasks_dir, "main.yml"), "w") as tasks_main_file: + tasks_main_file.write("""--- +- name: basic setup + include_tasks: basic.yml +""") + +# Create k8s files +k8s_defaults_dir = os.path.join(k8s_dir, "defaults") +k8s_files_dir = os.path.join(k8s_dir, "files") +k8s_handlers_dir = os.path.join(k8s_dir, "handlers") +k8s_tasks_dir = os.path.join(k8s_dir, "tasks") +k8s_templates_dir = os.path.join(k8s_dir, "templates") +k8s_vars_dir = os.path.join(k8s_dir, "vars") + +os.makedirs(k8s_defaults_dir, exist_ok=True) +os.makedirs(k8s_files_dir, exist_ok=True) +os.makedirs(k8s_handlers_dir, exist_ok=True) +os.makedirs(k8s_tasks_dir, exist_ok=True) +os.makedirs(k8s_templates_dir, exist_ok=True) +os.makedirs(k8s_vars_dir, exist_ok=True) + +with open(os.path.join(k8s_defaults_dir, "main.yml"), "w") as k8s_defaults_file: + k8s_defaults_file.write("") + +with open(os.path.join(k8s_files_dir, "sample.sh"), "w") as k8s_files_file: + k8s_files_file.write("") + +with open(os.path.join(k8s_handlers_dir, "main.yml"), "w") as k8s_handlers_file: + k8s_handlers_file.write("""--- +# handlers file for k8s + +- name: Remove temporary GPG key file + file: + path: "/tmp/docker.list" + state: absent + +- name: Restart kubelet + service: + name: kubelet + state: restarted +""") + +with open(os.path.join(k8s_tasks_dir, "k8s.yml"), "w") as k8s_tasks_k8s_file: + k8s_tasks_k8s_file.write("""- name: Disable SWAP since kubernetes can't work with swap enabled + shell: | + swapoff -a + +- name: Disable SWAP in fstab since kubernetes can't work with swap enabled + replace: + path: /etc/fstab + regexp: '^([^#].*?\\sswap\\s+sw\\s+.*)$' + replace: '# \\1' + +- name: Check if ufw is installed + package_facts: + manager: "auto" + +- name: Disable ufw # just in Ubuntu + ufw: + state: disabled + when: "'ufw' in ansible_facts.packages" + +- name: Ensure kernel modules for containerd are enabled + lineinfile: + path: /etc/modules-load.d/containerd.conf + line: "{{ item }}" + create: yes + state: present + loop: + - overlay + - br_netfilter + +- name: Load kernel modules + command: + cmd: "modprobe {{ item }}" + loop: + - overlay + - br_netfilter + +- name: Ensure sysctl settings for Kubernetes are present + blockinfile: + path: /etc/sysctl.d/kubernetes.conf + block: | + net.bridge.bridge-nf-call-ip6tables = 1 + net.bridge.bridge-nf-call-iptables = 1 + net.ipv4.ip_forward = 1 + create: yes + marker: "# {mark} ANSIBLE MANAGED BLOCK" + owner: root + group: root + mode: '0644' + +- name: Reload sysctl settings + command: + cmd: sysctl --system + +- name: Update apt cache + apt: + update_cache: yes + +- name: Install required packages + apt: + pkg: + - ca-certificates + - curl + - gnupg + - lsb-release + - gpg + state: present + update_cache: yes + +- name: Ensure the /etc/apt/keyrings directory exists + file: + path: /etc/apt/keyrings + state: directory + mode: '0755' # Adjust the permissions as necessary + owner: root # Set the owner, if required + group: root + +- name: Remove existing Docker GPG key if it exists + file: + path: '{{ docker_gpg_key_path }}' + state: absent + +- name: Download Docker GPG key + shell: | + curl -fsSL {{ docker_gpg_key_url }} | gpg --dearmor -o {{ docker_gpg_key_path }} + +- name: Determine the architecture + command: dpkg --print-architecture + register: architecture + +- name: Determine the distribution codename + command: lsb_release -cs + register: distribution_codename + +- name: Add Docker APT repository + lineinfile: + path: /etc/apt/sources.list.d/docker.list + create: yes + line: "deb [arch={{ architecture.stdout }} signed-by={{ docker_gpg_key_path }}] {{ docker_apt_repo }} {{ distribution_codename.stdout }} stable" + state: present + +- name: Update apt cache + apt: + update_cache: yes + +- name: Install required packages (containerd) + apt: + pkg: + - containerd.io + state: present + +- name: Generate default containerd configuration + shell: + cmd: containerd config default > /etc/containerd/config.toml + +- name: Replace SystemdCgroup from false to true in containerd config + replace: + path: /etc/containerd/config.toml + regexp: 'SystemdCgroup = false' + replace: 'SystemdCgroup = true' + +- name: Restart containerd service + systemd: + name: containerd + state: restarted + daemon_reload: yes + +- name: Enable containerd service + systemd: + name: containerd + enabled: yes + +- name: Delete the existing Kubernetes APT keyring file if it exists + file: + path: '{{ kubernetes_gpg_keyring_path }}' + state: absent + +- name: Download Kubernetes GPG key + shell: | + curl -fsSL '{{ kubernetes_gpg_key_url }}' | gpg --dearmor -o '{{ kubernetes_gpg_keyring_path }}' + +- name: Add Kubernetes repo + apt_repository: + repo: "deb [signed-by={{ kubernetes_gpg_keyring_path }}] {{ kubernetes_apt_repo }} /" + state: present + filename: kubernetes.list + +- name: Update apt cache + apt: + update_cache: yes + +- name: Install Kubernetes packages + apt: + name: "{{ item }}" + state: present + loop: + - kubeadm=1.31.2-1.1 + - kubelet=1.31.2-1.1 + - kubectl=1.31.2-1.1 + +- name: Hold Kubernetes packages + dpkg_selections: + name: "{{ item }}" + selection: hold + loop: + - kubeadm + - kubelet + - kubectl + - containerd.io + +- name: Configure node ip + lineinfile: + path: /etc/default/kubelet + line: KUBELET_EXTRA_ARGS=--node-ip={{ private_ip }} + create: yes + state: present + notify: Restart kubelet + +- name: Add hosts to /etc/hosts + lineinfile: + path: /etc/hosts + line: "{{ hostvars[item].private_ip }} {{ item }} {{ item }}.{{ domain }}" + state: present + create: no + loop: "{{ groups['all'] }}" + when: hostvars[item].private_ip is defined + +- name: Add apiserver_url to point to the masters temporary + lineinfile: + dest: /etc/hosts + line: "{{ hostvars[groups['k8s_masters'][0]].private_ip }} {{ apiserver_url }}" + state: present + +- name: Pull Kubernetes images | If you got error check your dns and sanction + command: + cmd: kubeadm config images pull +""") + +with open(os.path.join(k8s_tasks_dir, "main.yml"), "w") as k8s_tasks_main_file: + k8s_tasks_main_file.write("""--- +- name: Install kubernetes packages + include_tasks: k8s.yml +""") + +# Create init_k8s files +init_k8s_defaults_dir = os.path.join(init_k8s_dir, "defaults") +init_k8s_files_dir = os.path.join(init_k8s_dir, "files") +init_k8s_handlers_dir = os.path.join(init_k8s_dir, "handlers") +init_k8s_tasks_dir = os.path.join(init_k8s_dir, "tasks") +init_k8s_templates_dir = os.path.join(init_k8s_dir, "templates") +init_k8s_vars_dir = os.path.join(init_k8s_dir, "vars") + +os.makedirs(init_k8s_defaults_dir, exist_ok=True) +os.makedirs(init_k8s_files_dir, exist_ok=True) +os.makedirs(init_k8s_handlers_dir, exist_ok=True) +os.makedirs(init_k8s_tasks_dir, exist_ok=True) +os.makedirs(init_k8s_templates_dir, exist_ok=True) +os.makedirs(init_k8s_vars_dir, exist_ok=True) + +with open(os.path.join(init_k8s_defaults_dir, "main.yml"), "w") as init_k8s_defaults_file: + init_k8s_defaults_file.write("") + +with open(os.path.join(init_k8s_files_dir, "sample.sh"), "w") as init_k8s_files_file: + init_k8s_files_file.write("") + +with open(os.path.join(init_k8s_handlers_dir, "main.yml"), "w") as init_k8s_handlers_file: + init_k8s_handlers_file.write("") + +with open(os.path.join(init_k8s_tasks_dir, "cni.yml"), "w") as init_k8s_tasks_cni_file: + init_k8s_tasks_cni_file.write("""- block: + - name: Check if Calico CRDs exist + command: kubectl get crd felixconfigurations.crd.projectcalico.org + register: calico_crd_check + ignore_errors: true + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- block: + - name: Apply CNI plugin (Calico) + command: kubectl create -f {{ calico_operator_url }} + retries: 3 + delay: 3 + + - name: Apply CNI plugin (Calico) + command: kubectl create -f {{ calico_crd_url }} + retries: 3 + delay: 3 + delegate_to: "{{ groups['k8s_masters'][0] }}" + when: calico_crd_check.rc != 0 + run_once: true +""") + +with open(os.path.join(init_k8s_tasks_dir, "initk8s.yml"), "w") as init_k8s_tasks_initk8s_file: + init_k8s_tasks_initk8s_file.write("""- name: Init cluster | Check if kubeadm has already run + stat: + path: "/var/lib/kubelet/config.yaml" + register: kubeadm_already_run + when: inventory_hostname == groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- block: + - name: Init cluster | Copy kubeadmcnf.yaml + template: + src: kubeadmcnf.yml.j2 + dest: /root/kubeadmcnf.yaml + + - name: Init cluster | Initiate cluster on node groups['kube_master'][0] + shell: kubeadm init --config=/root/kubeadmcnf.yaml + register: kubeadm_init + # Retry is because upload config sometimes fails + until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr + notify: Restart kubelet + + when: inventory_hostname == groups['k8s_masters'][0] and not kubeadm_already_run.stat.exists + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- block: + - name: Create kubectl directory + file: + path: /root/.kube + state: directory + + - name: Configure kubectl + copy: + src: /etc/kubernetes/admin.conf + dest: /root/.kube/config + remote_src: yes + + - name: Fetch kubeconfig + fetch: + src: /etc/kubernetes/admin.conf + dest: kubeconfig/ + flat: yes + when: inventory_hostname == groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- name: Sleep for 300 seconds and reboot the Master1 server + wait_for: + timeout: 300 + delegate_to: localhost + +- name: Reboot the servers + command: reboot + async: 1 + poll: 0 + # ignore_errors: yes + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- name: Sleep for 300 seconds to Master1 up and running + wait_for: + timeout: 300 + delegate_to: localhost + # when: use_iran == "true" + +- name: Example Task After Reboot + debug: + msg: "Server back online and ready for tasks." +""") + +with open(os.path.join(init_k8s_tasks_dir, "main.yml"), "w") as init_k8s_tasks_main_file: + init_k8s_tasks_main_file.write("""--- +# tasks file for init_k8s + +- name: Initialize kubernetes cluster + include_tasks: initk8s.yml + +- name: Initialize Calico CNI + include_tasks: cni.yml +""") + +# Create join_master files +join_master_defaults_dir = os.path.join(join_master_dir, "defaults") +join_master_files_dir = os.path.join(join_master_dir, "files") +join_master_handlers_dir = os.path.join(join_master_dir, "handlers") +join_master_tasks_dir = os.path.join(join_master_dir, "tasks") +join_master_templates_dir = os.path.join(join_master_dir, "templates") +join_master_vars_dir = os.path.join(join_master_dir, "vars") + +os.makedirs(join_master_defaults_dir, exist_ok=True) +os.makedirs(join_master_files_dir, exist_ok=True) +os.makedirs(join_master_handlers_dir, exist_ok=True) +os.makedirs(join_master_tasks_dir, exist_ok=True) +os.makedirs(join_master_templates_dir, exist_ok=True) +os.makedirs(join_master_vars_dir, exist_ok=True) + +with open(os.path.join(join_master_defaults_dir, "main.yml"), "w") as join_master_defaults_file: + join_master_defaults_file.write("") + +with open(os.path.join(join_master_files_dir, "join-command"), "w") as join_master_files_file: + join_master_files_file.write("") + +with open(os.path.join(join_master_handlers_dir, "main.yml"), "w") as join_master_handlers_file: + join_master_handlers_file.write("") + +with open(os.path.join(join_master_tasks_dir, "join_master.yml"), "w") as join_master_tasks_join_master_file: + join_master_tasks_join_master_file.write("""- name: Init cluster | Check if kubeadm has already run + stat: + path: "/var/lib/kubelet/config.yaml" + register: kubeadm_already_run + +- block: + - name: Generate join command + command: kubeadm token create --print-join-command + register: join_command + + - name: Print join command + debug: + msg: "{{ join_command.stdout_lines[0] }}" + + - name: Copy join command to local file + become: false + local_action: copy content="{{ join_command.stdout_lines[0] }} $@" dest="roles/join_master/files/join-command" + + - name: copy kubeadmcnf.yaml + template: + src: kubeadmcnf-join.yml.j2 + dest: /root/kubeadm-config.yaml + + when: + - inventory_hostname == groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- block: + - name: Copy the join command to server location + copy: + src: roles/join_master/files/join-command + dest: /root/join-command.sh + mode: "0777" + + when: + - inventory_hostname != groups['k8s_masters'][0] + - inventory_hostname in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + +- block: + - name: get certificate key + shell: kubeadm init phase upload-certs --upload-certs --config=/root/kubeadm-config.yaml + register: kubeadm_cert_key + + - name: Print certificate key + debug: + msg: "{{ kubeadm_cert_key.stdout_lines[2] }}" + + - name: register the cert key + set_fact: + control_plane_certkey: "{{ kubeadm_cert_key.stdout_lines[2] }}" + + when: + - inventory_hostname in groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + run_once: false + delegate_facts: true + +- name: Join | Join control-plane to cluster + command: "sh /root/join-command.sh --control-plane --certificate-key={{ hostvars[groups['k8s_masters'][0]].control_plane_certkey }} --cri-socket={{ cri_socket }}" + when: + - inventory_hostname != groups['k8s_masters'][0] + - inventory_hostname in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + +- block: + - name: Create kubectl directory + file: + path: /root/.kube + state: directory + + - name: Configure kubectl + copy: + src: /etc/kubernetes/admin.conf + dest: /root/.kube/config + remote_src: yes + + - name: Fetch kubeconfig + fetch: + src: /etc/kubernetes/admin.conf + dest: kubeconfig/ + flat: yes + when: + - inventory_hostname != groups['k8s_masters'][0] + - inventory_hostname in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + +- name: remove apiserver_url to point to the masters temporary + lineinfile: + dest: /etc/hosts + line: "{{ hostvars[groups['k8s_masters'][0]].private_ip }} {{ apiserver_url }}" + state: absent + +- name: Add apiserver_url to point to the masters + lineinfile: + dest: /etc/hosts + line: "{{ private_ip }} {{ apiserver_url }}" + state: present + when: + - inventory_hostname in groups['k8s_masters'] +""") + +with open(os.path.join(join_master_tasks_dir, "main.yml"), "w") as join_master_tasks_main_file: + join_master_tasks_main_file.write("""--- +# tasks file for join_master + +- name: Join master(s) node to cluster + include_tasks: join_master.yml +""") + +# Create join_worker files +join_worker_defaults_dir = os.path.join(join_worker_dir, "defaults") +join_worker_files_dir = os.path.join(join_worker_dir, "files") +join_worker_handlers_dir = os.path.join(join_worker_dir, "handlers") +join_worker_tasks_dir = os.path.join(join_worker_dir, "tasks") +join_worker_templates_dir = os.path.join(join_worker_dir, "templates") +join_worker_vars_dir = os.path.join(join_worker_dir, "vars") + +os.makedirs(join_worker_defaults_dir, exist_ok=True) +os.makedirs(join_worker_files_dir, exist_ok=True) +os.makedirs(join_worker_handlers_dir, exist_ok=True) +os.makedirs(join_worker_tasks_dir, exist_ok=True) +os.makedirs(join_worker_templates_dir, exist_ok=True) +os.makedirs(join_worker_vars_dir, exist_ok=True) + +with open(os.path.join(join_worker_defaults_dir, "main.yml"), "w") as join_worker_defaults_file: + join_worker_defaults_file.write("") + +with open(os.path.join(join_worker_files_dir, "join-command"), "w") as join_worker_files_file: + join_worker_files_file.write("") + +with open(os.path.join(join_worker_handlers_dir, "main.yml"), "w") as join_worker_handlers_file: + join_worker_handlers_file.write("") + +with open(os.path.join(join_worker_tasks_dir, "join_worker.yml"), "w") as join_worker_tasks_join_worker_file: + join_worker_tasks_join_worker_file.write("""- name: Init cluster | Check if kubeadm has already run + stat: + path: "/var/lib/kubelet/config.yaml" + register: kubeadm_already_run + +- block: + - name: Generate join command + command: kubeadm token create --print-join-command + register: join_command + + - name: Print join command + debug: + msg: "{{ join_command.stdout_lines[0] }}" + + - name: Copy join command to local file + become: false + local_action: copy content="{{ join_command.stdout_lines[0] }} $@" dest="roles/join_worker/files/join-command" + + when: + - inventory_hostname not in groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- block: + - name: Copy the join command to server location + copy: + src: roles/join_worker/files/join-command + dest: /root/join-command.sh + mode: "0777" + + when: + - inventory_hostname not in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + +- name: Join | Join worker nodes to the cluster + command: sh /root/join-command.sh + when: + - inventory_hostname not in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists +""") + +with open(os.path.join(join_worker_tasks_dir, "main.yml"), "w") as join_worker_tasks_main_file: + join_worker_tasks_main_file.write("""--- +# tasks file for join_worker + +- name: Join worker(s) node to cluster + include_tasks: join_worker.yml +""") \ No newline at end of file diff --git a/app/media/MyAnsible/group_vars/all b/app/media/MyAnsible/group_vars/all index 977e1eba..03bf2832 100644 --- a/app/media/MyAnsible/group_vars/all +++ b/app/media/MyAnsible/group_vars/all @@ -1,6 +1,7 @@ # General install_ansible_modules: "true" disable_transparent_huge_pages: "true" + setup_interface: "false" # Network Calico see here for more details https://github.com/projectcalico/calico/releases @@ -28,15 +29,9 @@ k8s_version: "1.31.2" # see here https://kubernetes.io/releases/patch-releases/ # CRI cri_socket: unix:///var/run/containerd/containerd.sock -# VRRP and HAProxy -interface_name: "enp0s8" -virtual_ip: "192.168.178.100" -haproxy_frontend_password: "password" - # Ansible Connection - ansible_user: root ansible_port: 22 ansible_python_interpreter: "/usr/bin/python3" -domain="devopsgpt.com" -apiserver_url="devopsgpt.com" +domain: "devopsgpt.com" +apiserver_url: "devopsgpt.com" diff --git a/app/media/MyAnsible/hosts b/app/media/MyAnsible/hosts index 85b1dc88..79eace5b 100644 --- a/app/media/MyAnsible/hosts +++ b/app/media/MyAnsible/hosts @@ -1,7 +1,6 @@ [all] string private_ip=x.x.x.x string private_ip=x.x.x.x -string private_ip=x.x.x.x [k8s] string @@ -12,6 +11,3 @@ string [k8s_workers] string - -[lb] -string diff --git a/app/media/MyAnsible/kubernetes_playbook.yml b/app/media/MyAnsible/kubernetes_playbook.yml index 9045634e..ea5f7985 100644 --- a/app/media/MyAnsible/kubernetes_playbook.yml +++ b/app/media/MyAnsible/kubernetes_playbook.yml @@ -4,3 +4,35 @@ gather_facts: yes any_errors_fatal: true tags: [preinstall] + +- hosts: k8s + roles: + - role: k8s + gather_facts: yes + any_errors_fatal: true + tags: [k8s] + +- hosts: k8s + roles: + - role: init_k8s + gather_facts: yes + any_errors_fatal: true + tags: [init_k8s] + +- hosts: k8s_masters + roles: + - role: preinstall + - role: k8s + - role: join_master + gather_facts: yes + any_errors_fatal: true + tags: [join_master] + +- hosts: k8s_workers + roles: + - role: preinstall + - role: k8s + - role: join_worker + gather_facts: yes + any_errors_fatal: true + tags: [join_worker] diff --git a/app/media/MyAnsible/roles/init_k8s/defaults/main.yml b/app/media/MyAnsible/roles/init_k8s/defaults/main.yml new file mode 100644 index 00000000..e69de29b diff --git a/app/media/MyAnsible/roles/init_k8s/files/sample.sh b/app/media/MyAnsible/roles/init_k8s/files/sample.sh new file mode 100644 index 00000000..e69de29b diff --git a/app/media/MyAnsible/roles/init_k8s/handlers/main.yml b/app/media/MyAnsible/roles/init_k8s/handlers/main.yml new file mode 100644 index 00000000..e69de29b diff --git a/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml b/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml index ff134a27..a1836485 100644 --- a/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml +++ b/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml @@ -14,6 +14,7 @@ - name: Init cluster | Initiate cluster on node groups['kube_master'][0] shell: kubeadm init --config=/root/kubeadmcnf.yaml register: kubeadm_init + # Retry is because upload config sometimes fails until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr notify: Restart kubelet @@ -49,12 +50,14 @@ command: reboot async: 1 poll: 0 + # ignore_errors: yes delegate_to: "{{ groups['k8s_masters'][0] }}" - name: Sleep for 300 seconds to Master1 up and running wait_for: timeout: 300 delegate_to: localhost + # when: use_iran == "true" - name: Example Task After Reboot debug: diff --git a/app/media/MyAnsible/roles/join_master/defaults/main.yml b/app/media/MyAnsible/roles/join_master/defaults/main.yml new file mode 100644 index 00000000..e69de29b diff --git a/app/media/MyAnsible/roles/join_master/files/join-command b/app/media/MyAnsible/roles/join_master/files/join-command new file mode 100644 index 00000000..e69de29b diff --git a/app/media/MyAnsible/roles/join_master/handlers/main.yml b/app/media/MyAnsible/roles/join_master/handlers/main.yml new file mode 100644 index 00000000..e69de29b diff --git a/app/media/MyAnsible/roles/join_master/tasks/join_master.yml b/app/media/MyAnsible/roles/join_master/tasks/join_master.yml new file mode 100644 index 00000000..f82dbee0 --- /dev/null +++ b/app/media/MyAnsible/roles/join_master/tasks/join_master.yml @@ -0,0 +1,100 @@ +- name: Init cluster | Check if kubeadm has already run + stat: + path: "/var/lib/kubelet/config.yaml" + register: kubeadm_already_run + +- block: + - name: Generate join command + command: kubeadm token create --print-join-command + register: join_command + + - name: Print join command + debug: + msg: "{{ join_command.stdout_lines[0] }}" + + - name: Copy join command to local file + become: false + local_action: copy content="{{ join_command.stdout_lines[0] }} $@" dest="roles/join_master/files/join-command" + + - name: copy kubeadmcnf.yaml + template: + src: kubeadmcnf-join.yml.j2 + dest: /root/kubeadm-config.yaml + + when: + - inventory_hostname == groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- block: + - name: Copy the join command to server location + copy: + src: roles/join_master/files/join-command + dest: /root/join-command.sh + mode: "0777" + + when: + - inventory_hostname != groups['k8s_masters'][0] + - inventory_hostname in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + +- block: + - name: get certificate key + shell: kubeadm init phase upload-certs --upload-certs --config=/root/kubeadm-config.yaml + register: kubeadm_cert_key + + - name: Print certificate key + debug: + msg: "{{ kubeadm_cert_key.stdout_lines[2] }}" + + - name: register the cert key + set_fact: + control_plane_certkey: "{{ kubeadm_cert_key.stdout_lines[2] }}" + + when: + - inventory_hostname in groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + run_once: false + delegate_facts: true + +- name: Join | Join control-plane to cluster + command: "sh /root/join-command.sh --control-plane --certificate-key={{ hostvars[groups['k8s_masters'][0]].control_plane_certkey }} --cri-socket={{ cri_socket }}" + when: + - inventory_hostname != groups['k8s_masters'][0] + - inventory_hostname in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + +- block: + - name: Create kubectl directory + file: + path: /root/.kube + state: directory + + - name: Configure kubectl + copy: + src: /etc/kubernetes/admin.conf + dest: /root/.kube/config + remote_src: yes + + - name: Fetch kubeconfig + fetch: + src: /etc/kubernetes/admin.conf + dest: kubeconfig/ + flat: yes + when: + - inventory_hostname != groups['k8s_masters'][0] + - inventory_hostname in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + +- name: remove apiserver_url to point to the masters temporary + lineinfile: + dest: /etc/hosts + line: "{{ hostvars[groups['k8s_masters'][0]].private_ip }} {{ apiserver_url }}" + state: absent + +- name: Add apiserver_url to point to the masters + lineinfile: + dest: /etc/hosts + line: "{{ private_ip }} {{ apiserver_url }}" + state: present + when: + - inventory_hostname in groups['k8s_masters'] diff --git a/app/media/MyAnsible/roles/join_master/tasks/main.yml b/app/media/MyAnsible/roles/join_master/tasks/main.yml new file mode 100644 index 00000000..316b5b1d --- /dev/null +++ b/app/media/MyAnsible/roles/join_master/tasks/main.yml @@ -0,0 +1,5 @@ +--- +# tasks file for join_master + +- name: Join master(s) node to cluster + include_tasks: join_master.yml diff --git a/app/media/MyAnsible/roles/join_worker/defaults/main.yml b/app/media/MyAnsible/roles/join_worker/defaults/main.yml new file mode 100644 index 00000000..e69de29b diff --git a/app/media/MyAnsible/roles/join_worker/files/join-command b/app/media/MyAnsible/roles/join_worker/files/join-command new file mode 100644 index 00000000..e69de29b diff --git a/app/media/MyAnsible/roles/join_worker/handlers/main.yml b/app/media/MyAnsible/roles/join_worker/handlers/main.yml new file mode 100644 index 00000000..e69de29b diff --git a/app/media/MyAnsible/roles/join_worker/tasks/join_worker.yml b/app/media/MyAnsible/roles/join_worker/tasks/join_worker.yml new file mode 100644 index 00000000..b9b94947 --- /dev/null +++ b/app/media/MyAnsible/roles/join_worker/tasks/join_worker.yml @@ -0,0 +1,38 @@ +- name: Init cluster | Check if kubeadm has already run + stat: + path: "/var/lib/kubelet/config.yaml" + register: kubeadm_already_run + +- block: + - name: Generate join command + command: kubeadm token create --print-join-command + register: join_command + + - name: Print join command + debug: + msg: "{{ join_command.stdout_lines[0] }}" + + - name: Copy join command to local file + become: false + local_action: copy content="{{ join_command.stdout_lines[0] }} $@" dest="roles/join_worker/files/join-command" + + when: + - inventory_hostname not in groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- block: + - name: Copy the join command to server location + copy: + src: roles/join_worker/files/join-command + dest: /root/join-command.sh + mode: "0777" + + when: + - inventory_hostname not in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + +- name: Join | Join worker nodes to the cluster + command: sh /root/join-command.sh + when: + - inventory_hostname not in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists diff --git a/app/media/MyAnsible/roles/join_worker/tasks/main.yml b/app/media/MyAnsible/roles/join_worker/tasks/main.yml new file mode 100644 index 00000000..a43175cc --- /dev/null +++ b/app/media/MyAnsible/roles/join_worker/tasks/main.yml @@ -0,0 +1,5 @@ +--- +# tasks file for join_worker + +- name: Join worker(s) node to cluster + include_tasks: join_worker.yml diff --git a/app/media/MyAnsible/roles/k8s/defaults/main.yml b/app/media/MyAnsible/roles/k8s/defaults/main.yml new file mode 100644 index 00000000..e69de29b diff --git a/app/media/MyAnsible/roles/k8s/files/sample.sh b/app/media/MyAnsible/roles/k8s/files/sample.sh new file mode 100644 index 00000000..e69de29b diff --git a/app/media/MyAnsible/roles/k8s/tasks/k8s.yml b/app/media/MyAnsible/roles/k8s/tasks/k8s.yml index a346e99b..4620eef3 100644 --- a/app/media/MyAnsible/roles/k8s/tasks/k8s.yml +++ b/app/media/MyAnsible/roles/k8s/tasks/k8s.yml @@ -5,7 +5,7 @@ - name: Disable SWAP in fstab since kubernetes can't work with swap enabled replace: path: /etc/fstab - regexp: '^([^#].*?\sswap\ssw\s+.*)$' + regexp: '^([^#].*?\sswap\s+sw\s+.*)$' replace: '# \1' - name: Check if ufw is installed @@ -128,3 +128,68 @@ systemd: name: containerd enabled: yes + +- name: Delete the existing Kubernetes APT keyring file if it exists + file: + path: '{{ kubernetes_gpg_keyring_path }}' + state: absent + +- name: Download Kubernetes GPG key + shell: | + curl -fsSL '{{ kubernetes_gpg_key_url }}' | gpg --dearmor -o '{{ kubernetes_gpg_keyring_path }}' + +- name: Add Kubernetes repo + apt_repository: + repo: "deb [signed-by={{ kubernetes_gpg_keyring_path }}] {{ kubernetes_apt_repo }} /" + state: present + filename: kubernetes.list + +- name: Update apt cache + apt: + update_cache: yes + +- name: Install Kubernetes packages + apt: + name: "{{ item }}" + state: present + loop: + - kubeadm=1.31.2-1.1 + - kubelet=1.31.2-1.1 + - kubectl=1.31.2-1.1 + +- name: Hold Kubernetes packages + dpkg_selections: + name: "{{ item }}" + selection: hold + loop: + - kubeadm + - kubelet + - kubectl + - containerd.io + +- name: Configure node ip + lineinfile: + path: /etc/default/kubelet + line: KUBELET_EXTRA_ARGS=--node-ip={{ private_ip }} + create: yes + state: present + notify: Restart kubelet + +- name: Add hosts to /etc/hosts + lineinfile: + path: /etc/hosts + line: "{{ hostvars[item].private_ip }} {{ item }} {{ item }}.{{ domain }}" + state: present + create: no + loop: "{{ groups['all'] }}" + when: hostvars[item].private_ip is defined + +- name: Add apiserver_url to point to the masters temporary + lineinfile: + dest: /etc/hosts + line: "{{ hostvars[groups['k8s_masters'][0]].private_ip }} {{ apiserver_url }}" + state: present + +- name: Pull Kubernetes images | If you got error check your dns and sanction + command: + cmd: kubeadm config images pull diff --git a/app/media/MyAnsible/roles/lb/templates/check_apiserveer.sh.j2 b/app/media/MyAnsible/roles/lb/templates/check_apiserveer.sh.j2 deleted file mode 100644 index f2867a78..00000000 --- a/app/media/MyAnsible/roles/lb/templates/check_apiserveer.sh.j2 +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/sh - -errorExit() { - echo "*** $@" 1>&2 - exit 1 -} - -curl --silent --max-time 2 --insecure https://localhost:6443/ -o /dev/null || errorExit "Error GET https://localhost:6443/" -if ip addr | grep -q {{ virtual_ip }}; then - curl --silent --max-time 2 --insecure https://{{ virtual_ip }}:6443/ -o /dev/null || errorExit "Error GET https://{{ virtual_ip }}:6443/" -fi diff --git a/app/media/MyAnsible/roles/lb/templates/haproxy.cfg.j2 b/app/media/MyAnsible/roles/lb/templates/haproxy.cfg.j2 deleted file mode 100644 index d9c0b6c5..00000000 --- a/app/media/MyAnsible/roles/lb/templates/haproxy.cfg.j2 +++ /dev/null @@ -1,28 +0,0 @@ -# HAProxy Statistics Report Page -frontend stats-frontend - bind *:8080 - mode http - stats enable - stats hide-version - stats uri /stats - stats realm Haproxy\ Statistics - stats auth admin:{{ haproxy_frontend_password }} # Change 'admin:password' to your desired strong username and password - -# No backend is required for exporting stats in HAProxy. - - -frontend kubernetes-frontend - bind *:6443 - mode tcp - option tcplog - default_backend kubernetes-backend - -backend kubernetes-backend - option httpchk GET /healthz - http-check expect status 200 - mode tcp - option ssl-hello-chk - balance roundrobin -{% for host in groups['k8s_masters'] %} - server {{ host }} {{ hostvars[host]['private_ip'] }}:6443 check fall 3 rise 2 -{% endfor %} \ No newline at end of file diff --git a/app/media/MyAnsible/roles/lb/templates/keepalived.conf.j2 b/app/media/MyAnsible/roles/lb/templates/keepalived.conf.j2 deleted file mode 100644 index 17648153..00000000 --- a/app/media/MyAnsible/roles/lb/templates/keepalived.conf.j2 +++ /dev/null @@ -1,26 +0,0 @@ -vrrp_script check_apiserver { - script "/etc/keepalived/check_apiserver.sh" - interval 3 # check api server every 3 seconds - timeout 10 # timeout second if api server doesn't answered - fall 5 # failed time - rise 2 # success 2 times - weight -2 # if failed is done it reduce 2 of the weight -} - -vrrp_instance VI_1 { - state BACKUP - interface {{ interface_name }} # set your interface - virtual_router_id 1 - priority 100 - advert_int 5 - authentication { - auth_type PASS - auth_pass mysecret - } - virtual_ipaddress { - {{ virtual_ip }} - } - track_script { - check_apiserver - } -} \ No newline at end of file diff --git a/app/media/MyAnsible/roles/preinstall/defaults/main.yml b/app/media/MyAnsible/roles/preinstall/defaults/main.yml index 72f1176b..e69de29b 100644 --- a/app/media/MyAnsible/roles/preinstall/defaults/main.yml +++ b/app/media/MyAnsible/roles/preinstall/defaults/main.yml @@ -1 +0,0 @@ -# Default variables for preinstall role diff --git a/app/media/MyAnsible/roles/preinstall/files/sample.sh b/app/media/MyAnsible/roles/preinstall/files/sample.sh index 6bd87075..e69de29b 100644 --- a/app/media/MyAnsible/roles/preinstall/files/sample.sh +++ b/app/media/MyAnsible/roles/preinstall/files/sample.sh @@ -1,2 +0,0 @@ -#!/bin/bash -# Sample script diff --git a/app/media/MyAnsible/roles/preinstall/handlers/main.yml b/app/media/MyAnsible/roles/preinstall/handlers/main.yml index 9d2abf5a..e69de29b 100644 --- a/app/media/MyAnsible/roles/preinstall/handlers/main.yml +++ b/app/media/MyAnsible/roles/preinstall/handlers/main.yml @@ -1 +0,0 @@ -# Handlers for preinstall role diff --git a/app/media/MyAnsible/roles/preinstall/tasks/basic.yml b/app/media/MyAnsible/roles/preinstall/tasks/basic.yml index 64878e48..43fae8cd 100644 --- a/app/media/MyAnsible/roles/preinstall/tasks/basic.yml +++ b/app/media/MyAnsible/roles/preinstall/tasks/basic.yml @@ -30,6 +30,7 @@ - name: Install necessary tools apt: + state: latest update_cache: true name: - vim @@ -75,7 +76,6 @@ - nfs-common - tzdata - tree - state: latest - name: Fix broken packages apt: diff --git a/app/media/MyAnsible/roles/preinstall/vars/main.yml b/app/media/MyAnsible/roles/preinstall/vars/main.yml deleted file mode 100644 index 64ff7ae5..00000000 --- a/app/media/MyAnsible/roles/preinstall/vars/main.yml +++ /dev/null @@ -1 +0,0 @@ -# Variable definitions for preinstall role diff --git a/app/models/compose_models.py b/app/models/compose_models.py index 4294f751..daa81ba8 100644 --- a/app/models/compose_models.py +++ b/app/models/compose_models.py @@ -31,14 +31,15 @@ class Build(BaseModel): dockerfile:str class Service(BaseModel): image:str = 'nginx' + name:str = 'web_server' container_name:str = 'web_server' build: Build | None = None version:str = 'latest' - volumes:List[Volume] - depends_on:List[str] + volumes:List[Volume] | None = None + depends_on:List[str] | None = None ports:List[Port] - networks:List[Network] - environments:List[EnvironmentVariable] + networks:List[Network] | None = None + environments:List[EnvironmentVariable] | None = None @computed_field @property diff --git a/app/template_generators/ansible/install/kuber.py b/app/template_generators/ansible/install/kuber.py index e69de29b..5c01e4d0 100644 --- a/app/template_generators/ansible/install/kuber.py +++ b/app/template_generators/ansible/install/kuber.py @@ -0,0 +1,839 @@ + +def ansible_kuber_install(input): + + kubernetes_ansible_port = input.ansible_port + kubernetes_ansible_user = input.ansible_user + k8s_master_nodes = input.k8s_master_nodes + k8s_worker_nodes = input.k8s_worker_nodes + k8s_version = input.version + sections = { + "[all]": [f"{name} private_ip=x.x.x.x" for name in k8s_master_nodes + k8s_worker_nodes], + "[k8s]": k8s_master_nodes + k8s_worker_nodes, + "[k8s_masters]": k8s_master_nodes, + "[k8s_workers]": k8s_worker_nodes, + } + kubernetes_inventory = "\n\n".join(f"{section}\n" + "\n".join(entries) for section, entries in sections.items()) + + inventory_hostname = "{{ inventory_hostname }}" + item_in_task = "{{ item }}" + ufw_in_task = "'ufw'" + docker_gpg_key_path_in_task = "{{ docker_gpg_key_path }}" + docker_gpg_key_url_in_task = "{{ docker_gpg_key_url }}" + architecture_stdout_in_task = "{{ architecture.stdout }}" + docker_apt_repo_in_task = "{{ docker_apt_repo }}" + distribution_codename_stdout_in_task = "{{ distribution_codename.stdout }}" + kubernetes_gpg_keyring_path_in_task = "{{ kubernetes_gpg_keyring_path }}" + kubernetes_gpg_key_url_in_task = "{{ kubernetes_gpg_key_url }}" + kubernetes_apt_repo_in_task = "{{ kubernetes_apt_repo }}" + private_ip_in_task = "{{ private_ip }}" + hostvars_private_ip_in_task = "{{ hostvars[item].private_ip }}" + domain_in_task = "{{ domain }}" + groups_all_in_task = "{{ groups['all'] }}" + hostvars_groups_k8s_masters_private_ip_in_task = "{{ hostvars[groups['k8s_masters'][0]].private_ip }}" + apiserver_url_in_task = "{{ apiserver_url }}" + groups_k8s_masters_in_task = "{{ groups['k8s_masters'][0] }}" + calico_operator_url_in_task = "{{ calico_operator_url }}" + calico_crd_url_in_task = "{{ calico_crd_url }}" + join_command_stdout_lines_in_task = "{{ join_command.stdout_lines[0] }}" + kubeadm_cert_key_stdout_lines_in_task = "{{ kubeadm_cert_key.stdout_lines[2] }}" + hostvars_k8s_masters_control_plane_certkey_in_task = "{{ hostvars[groups['k8s_masters'][0]].control_plane_certkey }}" + cri_socket_in_task = "{{ cri_socket }}" + + + + prompt = f""" + Generate a Python code to generate an Ansible project (project name is app/media/MyAnsible) + that dynamically provisions Ansible resources ensuring a modular, flexible structure. Only provide + Python code, no explanations or markdown formatting, without ```python entry. + The project should be organized as follows: + + The structure of this project must be as follows: + ``` + ├── ansible.cfg + ├── group_vars + │   |── all + │   + ├── hosts + ├── host_vars + ├── kubernetes_playbook.yml + └── roles + └── preinstall + ├── defaults + │   └── main.yml + ├── files + │   └── sample.sh + ├── handlers + │   └── main.yml + ├── tasks + │   └── basic.yml + │   └── main.yml + ├── templates + │   └── resolv.conf.j2 + └── vars + | └── main.yml + k8s + ├── defaults + │   └── main.yml + ├── files + │   └── sample.sh + ├── handlers + │   └── main.yml + ├── tasks + │   └── k8s.yml + │   └── main.yml + ├── templates + │   └── sample.j2 + └── vars + | └── main.yml + init_k8s + ├── defaults + │   └── main.yml + ├── files + │   └── sample.sh + ├── handlers + │   └── main.yml + ├── tasks + │   └── cni.yml + │   └── initk8s.yml + │   └── main.yml + ├── templates + │   └── kubeadmcnf.yml.j2 + └── vars + | └── main.yml + join_master + ├── defaults + │   └── main.yml + ├── files + │   └── sample.sh + ├── handlers + │   └── main.yml + ├── tasks + │   └── join_master.yml + │   └── main.yml + ├── templates + │   └── kubeadmcnf-join.yml.j2 + └── vars + | └── main.yml + join_worker + ├── defaults + │   └── main.yml + ├── files + │   └── sample.sh + ├── handlers + │   └── main.yml + ├── tasks + │   └── join_worker.yml + │   └── main.yml + ├── templates + │   └── sample.j2 + └── vars + └── main.yml + ``` + - The content of ansible.cfg must be as follows: + ``` + [defaults] + host_key_checking=false + ``` + - group_vars directory includes a single file called "all" and the content of this file must be as follows: + ``` + # General + install_ansible_modules: "true" + disable_transparent_huge_pages: "true" + + setup_interface: "false" + + # Network Calico see here for more details https://github.com/projectcalico/calico/releases + calico_operator_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/tigera-operator.yaml" + calico_crd_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/custom-resources.yaml" + pod_network_cidr: "192.168.0.0/16" + + # DNS + resolv_nameservers: [8.8.8.8, 4.2.2.4] # 403.online + + # Sanction shekan + use_iran: "true" # change it to "false" if you are outside of iran + + # Docker + docker_gpg_key_url: "https://download.docker.com/linux/ubuntu/gpg" + docker_gpg_key_path: "/etc/apt/keyrings/docker.gpg" + docker_apt_repo: "https://download.docker.com/linux/ubuntu" + + # Kubernetes + kubernetes_gpg_keyring_path: "/etc/apt/keyrings/kubernetes-apt-keyring.gpg" + kubernetes_gpg_key_url: "https://pkgs.k8s.io/core:/stable:/v{k8s_version}/deb/Release.key" + kubernetes_apt_repo: "https://pkgs.k8s.io/core:/stable:/v{k8s_version}/deb/" + k8s_version: "{k8s_version}.2" # see here https://kubernetes.io/releases/patch-releases/ and https://github.com/kubernetes/kubernetes/releases + + # CRI + cri_socket: unix:///var/run/containerd/containerd.sock + + # Ansible Connection + + ansible_user: {kubernetes_ansible_user} + ansible_port: {kubernetes_ansible_port} + ansible_python_interpreter: "/usr/bin/python3" + domain: "devopsgpt.com" + apiserver_url: "devopsgpt.com" + ``` + - there is file called "hosts" which its content must be as follows: + ``` + {kubernetes_inventory} + ``` + - There is an empty directory called "host_vars" with no files included + - There is a file called "kubernetes_playbook.yml" which its content must be as follows: + ``` + - hosts: all + roles: + - role: preinstall + gather_facts: yes + any_errors_fatal: true + tags: [preinstall] + + - hosts: k8s + roles: + - role: k8s + gather_facts: yes + any_errors_fatal: true + tags: [k8s] + + - hosts: k8s + roles: + - role: init_k8s + gather_facts: yes + any_errors_fatal: true + tags: [init_k8s] + + - hosts: k8s_masters + roles: + - role: preinstall + - role: k8s + - role: join_master + gather_facts: yes + any_errors_fatal: true + tags: [join_master] + + - hosts: k8s_workers + roles: + - role: preinstall + - role: k8s + - role: join_worker + gather_facts: yes + any_errors_fatal: true + tags: [join_worker] + ``` + - There is a directory called "roles" which a sub-directory called "preinstall" (roles/preinstall): + "preinstall" has multiple sub-directories, so let's dive deeper into each its sub-directories: + - (preinstall/tasks): This path has two files called "basic.yml" and "main.yml". + + 1. Create "preinstall/tasks/basic.yml" and it must be as follows:" + ``` + - name: Set timezone to UTC + timezone: + name: Etc/UTC + + - name: Set hostname + command: hostnamectl set-hostname {inventory_hostname} + + - name: Remove symlink resolve.conf + file: + path: "/etc/resolv.conf" + state: absent + ignore_errors: true + when: use_iran == "true" + + - name: Configure resolv.conf + template: + src: "resolv.conf.j2" + dest: "/etc/resolv.conf" + mode: "0644" + when: use_iran == "true" + + - name: Add hostname + lineinfile: + path: /etc/hosts + regexp: '^127\.0\.0\.1' + line: "127.0.0.1 {inventory_hostname} localhost" + owner: root + group: root + mode: 0644 + + - name: Install necessary tools + apt: + state: latest + update_cache: true + name: + - vim + - sudo + - wget + - curl + - telnet + - nload + - s3cmd + - cron + - ipset + - lvm2 + - python3 + - python3-setuptools + - python3-pip + - python3-apt + - intel-microcode + - htop + - tcpdump + - net-tools + - screen + - tmux + - byobu + - iftop + - bmon + - iperf + - sysstat + - ethtool + - plocate + - thin-provisioning-tools + - conntrack + - stress + - cpufrequtils + - rsync + - xz-utils + - build-essential + - apt-transport-https + - ca-certificates + - software-properties-common + - gnupg-agent + - iptables-persistent + - open-iscsi + - nfs-common + - tzdata + - tree + + - name: Fix broken packages + apt: + state: fixed + ``` + + 2. Create preinstall/tasks/main.yml and it must be as follows:" + ``` + --- + - name: basic setup + include_tasks: basic.yml + ``` + - There is a directory called "roles" which a sub-directory called "k8s" (roles/k8s): + "k8s" has multiple sub-directories, so let's dive deeper into each its sub-directories: + - (k8s/tasks): This path has two files called "k8s.yml" and "main.yml". + + 1. Create k8s/tasks/k8s.yml and it must be as follows:" + ``` + - name: Disable SWAP since kubernetes can't work with swap enabled + shell: | + swapoff -a + + - name: Disable SWAP in fstab since kubernetes can't work with swap enabled + replace: + path: /etc/fstab + regexp: '^([^#].*?\sswap\s+sw\s+.*)$' + replace: '# \\1' + + - name: Check if ufw is installed + package_facts: + manager: "auto" + + - name: Disable ufw # just in Ubuntu + ufw: + state: disabled + when: "{ufw_in_task} in ansible_facts.packages" + + - name: Ensure kernel modules for containerd are enabled + lineinfile: + path: /etc/modules-load.d/containerd.conf + line: "{item_in_task}" + create: yes + state: present + loop: + - overlay + - br_netfilter + + - name: Load kernel modules + command: + cmd: "modprobe {item_in_task}" + loop: + - overlay + - br_netfilter + + - name: Ensure sysctl settings for Kubernetes are present + blockinfile: + path: /etc/sysctl.d/kubernetes.conf + block: | + net.bridge.bridge-nf-call-ip6tables = 1 + net.bridge.bridge-nf-call-iptables = 1 + net.ipv4.ip_forward = 1 + create: yes + marker: "# {{mark}} ANSIBLE MANAGED BLOCK" + owner: root + group: root + mode: '0644' + + - name: Reload sysctl settings + command: + cmd: sysctl --system + + - name: Update apt cache + apt: + update_cache: yes + + - name: Install required packages + apt: + pkg: + - ca-certificates + - curl + - gnupg + - lsb-release + - gpg + + state: present + update_cache: yes + + - name: Ensure the /etc/apt/keyrings directory exists + file: + path: /etc/apt/keyrings + state: directory + mode: '0755' # Adjust the permissions as necessary + owner: root # Set the owner, if required + group: root + + - name: Remove existing Docker GPG key if it exists + file: + path: '{docker_gpg_key_path_in_task}' + state: absent + + - name: Download Docker GPG key + shell: | + curl -fsSL {docker_gpg_key_url_in_task} | gpg --dearmor -o {docker_gpg_key_path_in_task} + + - name: Determine the architecture + command: dpkg --print-architecture + register: architecture + + - name: Determine the distribution codename + command: lsb_release -cs + register: distribution_codename + + - name: Add Docker APT repository + lineinfile: + path: /etc/apt/sources.list.d/docker.list + create: yes + line: "deb [arch={architecture_stdout_in_task} signed-by={docker_gpg_key_path_in_task}] {docker_apt_repo_in_task} {distribution_codename_stdout_in_task} stable" + state: present + + - name: Update apt cache + apt: + update_cache: yes + + - name: Install required packages (containerd) + apt: + pkg: + - containerd.io + state: present + + - name: Generate default containerd configuration + shell: + cmd: containerd config default > /etc/containerd/config.toml + + - name: Replace SystemdCgroup from false to true in containerd config + replace: + path: /etc/containerd/config.toml + regexp: 'SystemdCgroup = false' + replace: 'SystemdCgroup = true' + + - name: Restart containerd service + systemd: + name: containerd + state: restarted + daemon_reload: yes + + - name: Enable containerd service + systemd: + name: containerd + enabled: yes + - name: Delete the existing Kubernetes APT keyring file if it exists + file: + path: '{kubernetes_gpg_keyring_path_in_task}' + state: absent + + - name: Download Kubernetes GPG key + shell: | + curl -fsSL '{kubernetes_gpg_key_url_in_task}' | gpg --dearmor -o '{kubernetes_gpg_keyring_path_in_task}' + + - name: Add Kubernetes repo + apt_repository: + repo: "deb [signed-by={kubernetes_gpg_keyring_path_in_task}] {kubernetes_apt_repo_in_task} /" + state: present + filename: kubernetes.list + + - name: Update apt cache + apt: + update_cache: yes + + - name: Install Kubernetes packages + apt: + name: "{item_in_task}" + state: present + loop: + - kubeadm={k8s_version}.2-1.1 + - kubelet={k8s_version}.2-1.1 + - kubectl={k8s_version}.2-1.1 + + - name: Hold Kubernetes packages + dpkg_selections: + name: "{item_in_task}" + selection: hold + loop: + - kubeadm + - kubelet + - kubectl + - containerd.io + + - name: Configure node ip + lineinfile: + path: /etc/default/kubelet + line: KUBELET_EXTRA_ARGS=--node-ip={private_ip_in_task} + create: yes + state: present + notify: Restart kubelet + + - name: Add hosts to /etc/hosts + lineinfile: + path: /etc/hosts + line: "{hostvars_private_ip_in_task} {item_in_task} {item_in_task}.{domain_in_task}" + state: present + create: no + loop: "{groups_all_in_task}" + when: hostvars[item].private_ip is defined + + - name: Add apiserver_url to point to the masters temporary" + lineinfile: + dest: /etc/hosts + line: "{hostvars_groups_k8s_masters_private_ip_in_task} {apiserver_url_in_task}" + state: present + + - name: Pull Kubernetes images | If you got error check your dns and sanction + command: + cmd: kubeadm config images pull + ``` + 2. Create k8s/tasks/main.yml and it must be as follows:" + ``` + --- + - name: Install kubernetes packages + include_tasks: k8s.yml + ``` + - (k8s/handlers): This path has a file called "main.yml". + + 3. Create k8s/handlers/main.yml and it must be as follows:" + ``` + --- + # handlers file for k8s + + - name: Remove temporary GPG key file + file: + path: "/tmp/docker.list" + state: absent + + - name: Restart kubelet + service: + name: kubelet + state: restarted + ``` + - There is a directory called "roles" which a sub-directory called "init_k8s" (roles/init_k8s): + "init_k8s" has multiple sub-directories, so let's dive deeper into each its sub-directories: + - (init_k8s/tasks): This path has three files called "cni.yml", "initk8s.yml" and "main.yml". + + 1. Create init_k8s/tasks/cni.yml and it must be as follows:" + ``` + - block: + - name: Check if Calico CRDs exist + command: kubectl get crd felixconfigurations.crd.projectcalico.org + register: calico_crd_check + ignore_errors: true + delegate_to: "{groups_k8s_masters_in_task}" + + - block: + - name: Apply CNI plugin (Calico) + command: kubectl create -f {calico_operator_url_in_task} + retries: 3 + delay: 3 + + - name: Apply CNI plugin (Calico) + command: kubectl create -f {calico_crd_url_in_task} + retries: 3 + delay: 3 + delegate_to: "{groups_k8s_masters_in_task}" + when: calico_crd_check.rc != 0 + run_once: true + ``` + 2. Create init_k8s/tasks/initk8s.yml and it must be as follows:" + ``` + - name: Init cluster | Check if kubeadm has already run + stat: + path: "/var/lib/kubelet/config.yaml" + register: kubeadm_already_run + when: inventory_hostname == groups['k8s_masters'][0] + delegate_to: "{groups_k8s_masters_in_task}" + + - block: + - name: Init cluster | Copy kubeadmcnf.yaml + template: + src: kubeadmcnf.yml.j2 + dest: /root/kubeadmcnf.yaml + + - name: Init cluster | Initiate cluster on node groups['kube_master'][0] + shell: kubeadm init --config=/root/kubeadmcnf.yaml + register: kubeadm_init + # Retry is because upload config sometimes fails + until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr + notify: Restart kubelet + + when: inventory_hostname == groups['k8s_masters'][0] and not kubeadm_already_run.stat.exists + delegate_to: "{groups_k8s_masters_in_task}" + + - block: + - name: Create kubectl directory + file: + path: /root/.kube + state: directory + + - name: Configure kubectl + copy: + src: /etc/kubernetes/admin.conf + dest: /root/.kube/config + remote_src: yes + + - name: Fetch kubeconfig + fetch: + src: /etc/kubernetes/admin.conf + dest: kubeconfig/ + flat: yes + when: inventory_hostname == groups['k8s_masters'][0] + delegate_to: "{groups_k8s_masters_in_task}" + + - name: Sleep for 300 seconds and reboot the Master1 server + wait_for: + timeout: 300 + delegate_to: localhost + + - name: Reboot the servers + command: reboot + async: 1 + poll: 0 + # ignore_errors: yes + delegate_to: "{groups_k8s_masters_in_task}" + + - name: Sleep for 300 seconds to Master1 up and running + wait_for: + timeout: 300 + delegate_to: localhost + # when: use_iran == "true" + + - name: Example Task After Reboot + debug: + msg: "Server back online and ready for tasks." + ``` + 3. Create init_k8s/tasks/main.yml and it must be as follows:" + ``` + --- + # tasks file for init_k8s + + - name: Initialize kubernetes cluster + include_tasks: initk8s.yml + + - name: Initialize Calico CNI + include_tasks: cni.yml + ``` + - There is a directory called "roles" which a sub-directory called "join_master" (roles/join_master): + "join_master" has multiple sub-directories, so let's dive deeper into each its sub-directories: + - (join_master/tasks): This path has two files called "join_master.yml" and "main.yml". + + 1. Create "join_master/tasks/join_master.yml" and it must be as follows:" + ``` + - name: Init cluster | Check if kubeadm has already run + stat: + path: "/var/lib/kubelet/config.yaml" + register: kubeadm_already_run + + - block: + - name: Generate join command + command: kubeadm token create --print-join-command + register: join_command + + - name: Print join command + debug: + msg: "{join_command_stdout_lines_in_task}" + + - name: Copy join command to local file + become: false + local_action: copy content="{join_command_stdout_lines_in_task} $@" dest="roles/join_master/files/join-command" + + - name: copy kubeadmcnf.yaml + template: + src: kubeadmcnf-join.yml.j2 + dest: /root/kubeadm-config.yaml + + when: + - inventory_hostname == groups['k8s_masters'][0] + delegate_to: "{groups_k8s_masters_in_task}" + + - block: + - name: Copy the join command to server location + copy: + src: roles/join_master/files/join-command + dest: /root/join-command.sh + mode: "0777" + + when: + - inventory_hostname != groups['k8s_masters'][0] + - inventory_hostname in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + + - block: + - name: get certificate key + shell: kubeadm init phase upload-certs --upload-certs --config=/root/kubeadm-config.yaml + register: kubeadm_cert_key + + - name: Print certificate key + debug: + msg: "{kubeadm_cert_key_stdout_lines_in_task}" + + - name: register the cert key + set_fact: + control_plane_certkey: "{kubeadm_cert_key_stdout_lines_in_task}" + + when: + - inventory_hostname in groups['k8s_masters'][0] + delegate_to: "{groups_k8s_masters_in_task}" + run_once: false + delegate_facts: true + + - name: Join | Join control-plane to cluster + command: "sh /root/join-command.sh --control-plane --certificate-key={hostvars_k8s_masters_control_plane_certkey_in_task} --cri-socket={cri_socket_in_task}" + when: + - inventory_hostname != groups['k8s_masters'][0] + - inventory_hostname in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + + - block: + - name: Create kubectl directory + file: + path: /root/.kube + state: directory + + - name: Configure kubectl + copy: + src: /etc/kubernetes/admin.conf + dest: /root/.kube/config + remote_src: yes + + - name: Fetch kubeconfig + fetch: + src: /etc/kubernetes/admin.conf + dest: kubeconfig/ + flat: yes + when: + - inventory_hostname != groups['k8s_masters'][0] + - inventory_hostname in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + + - name: remove apiserver_url to point to the masters temporary + lineinfile: + dest: /etc/hosts + line: "{hostvars_groups_k8s_masters_private_ip_in_task} {apiserver_url_in_task}" + state: absent + + + - name: Add apiserver_url to point to the masters" + lineinfile: + dest: /etc/hosts + line: "{private_ip_in_task} {apiserver_url_in_task}" + state: present + when: + - inventory_hostname in groups['k8s_masters'] + ``` + 2. Create join_master/tasks/main.yml and it must be as follows:" + ``` + --- + # tasks file for join_master + + - name: Join master(s) node to cluster + include_tasks: join_master.yml + + ``` + - There is a directory called "roles" which a sub-directory called "join_worker" (roles/join_worker): + "join_worker" has multiple sub-directories, so let's dive deeper into each its sub-directories: + - (join_worker/tasks): This path has two files called "join_worker.yml" and "main.yml". + + 1. Create "join_worker/tasks/join_worker.yml" and it must be as follows:" + ``` + - name: Init cluster | Check if kubeadm has already run + stat: + path: "/var/lib/kubelet/config.yaml" + register: kubeadm_already_run + + - block: + - name: Generate join command + command: kubeadm token create --print-join-command + register: join_command + + - name: Print join command + debug: + msg: "{join_command_stdout_lines_in_task}" + + - name: Copy join command to local file + become: false + local_action: copy content="{join_command_stdout_lines_in_task} $@" dest="roles/join_worker/files/join-command" + + when: + - inventory_hostname not in groups['k8s_masters'][0] + delegate_to: "{groups_k8s_masters_in_task}" + + - block: + - name: Copy the join command to server location + copy: + src: roles/join_worker/files/join-command + dest: /root/join-command.sh + mode: "0777" + + when: + - inventory_hostname not in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + + - name: Join | Join worker nodes to the cluster + command: sh /root/join-command.sh + when: + - inventory_hostname not in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + ``` + 2. Create join_worker/tasks/main.yml and it must be as follows:" + ``` + --- + # tasks file for join_worker + + - name: Join worker(s) node to cluster + include_tasks: join_worker.yml + ``` + finally just give me a python code without any note that can generate a project folder with the + given schema without ```python entry. and we dont need any base directory in the python code. + the final ansible template must work very well without any error! + + the python code you give me, must have structure like that: + + import os + project_name = "app/media/MyAnsible" + foo_dir = os.path.join(project_name, "bar") + x_dir = os.path.join(modules_dir, "y") + + # Create project directories + os.makedirs(ansible_dir, exist_ok=True) + + # Create main.tf + with open(os.path.join(project_name, "main.tf"), "w") as main_file: + # any thing you need + """ + return prompt + diff --git a/app/template_generators/docker/compose.py b/app/template_generators/docker/compose.py index f2589073..23b7cf92 100644 --- a/app/template_generators/docker/compose.py +++ b/app/template_generators/docker/compose.py @@ -1,13 +1,13 @@ def docker_compose_generator(input): compose_network = input.network.name compose_services = input.services - services = [i.container_name for i in compose_services] - images = [{i.container_name:i.image_full} for i in compose_services] - volumes = [{i.container_name:i.volumes_full} for i in compose_services] - depends_on = [{i.container_name:i.depends_on} for i in compose_services] - ports = [{i.container_name:i.ports} for i in compose_services] - env = [{i.container_name:i.environments} for i in compose_services] - networks = [{i.container_name:i.networks} for i in compose_services] + services = [i.name for i in compose_services] + images = [{i.name:i.image_full} for i in compose_services] + volumes = [{i.name:i.volumes_full} for i in compose_services] + depends_on = [{i.name:i.depends_on} for i in compose_services] + ports = [{i.name:i.ports} for i in compose_services] + env = [{i.name:i.environments} for i in compose_services] + networks = [{i.name:i.networks} for i in compose_services] prompt = f"""