Skip to content

Commit

Permalink
feat: Updates to support k8s version 1.3X.x and Proxmox 8.3.x (#17)
Browse files Browse the repository at this point in the history
feat: Updating the Proxmox Terraform provider to work with Proxmox 8.3.x
Updates to move to the new k8s package repos
Updates to support 1.3x.x k8s versions
  • Loading branch information
ash0ne authored Jan 17, 2025
1 parent 21c519b commit 33ae9c7
Show file tree
Hide file tree
Showing 5 changed files with 67 additions and 29 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
### Run the ansible playbooks in order
- Apply the common playbook first by running `ansible-playbook -i ./ansible/inventory/hosts --key-file <private_ssh_key> ./ansible/roles/common/tasks/main.yaml`
- Apply the main-node playbook to initialise k8s master node by running `ansible-playbook -i ./ansible/inventory/hosts --key-file <private_ssh_key> ./ansible/roles/main-node/tasks/main.yaml`
- At this point, ssh into the master node by running `ssh ubuntu@<main-nod-ip> -i <private_ssh_key>` and install the cluster network by running `kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.1/manifests/calico.yaml`
- At this point, ssh into the master node by running `ssh ubuntu@<main-nod-ip> -i <private_ssh_key>` and install the cluster network by running `kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/v3.29.1/manifests/calico.yaml`
- You should now have your core `kube-system` pods running and should see the below output if you run `kubectl get pod -A`

![kube-system](https://github.com/ash0ne/proxmox-kubernetes/assets/136186619/dfcb5737-827b-4379-988a-c828a425d6e6)
Expand Down
30 changes: 19 additions & 11 deletions ansible/roles/common/tasks/main.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -79,24 +79,32 @@
sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml
systemctl restart containerd
- name: add google apt gpg key
apt_key:
url: https://packages.cloud.google.com/apt/doc/apt-key.gpg
state: present

- name: add kubernetes apt repository
- name: Ensure /etc/apt/keyrings directory exists
ansible.builtin.file:
path: /etc/apt/keyrings
state: directory
mode: '0755'

- name: add Kubernetes apt-key
get_url:
url: https://pkgs.k8s.io/core:/stable:/v1.30/deb/Release.key
dest: /etc/apt/keyrings/kubernetes-apt-keyring.asc
mode: '0644'
force: true

- name: add Kubernetes' APT repository
apt_repository:
repo: deb https://apt.kubernetes.io/ kubernetes-xenial main
repo: "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.asc] https://pkgs.k8s.io/core:/stable:/v1.30/deb/ /"
state: present
filename: 'kubernetes'
update_cache: yes

- name: install kubernetes
apt:
update_cache: yes
pkg:
- kubelet=1.27.2-00
- kubeadm=1.27.2-00
- kubectl=1.27.2-00
- kubelet
- kubeadm
- kubectl

- name: exclude kube components from apt upgrades
shell: |
Expand Down
2 changes: 1 addition & 1 deletion ansible/roles/main-node/tasks/main.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
when: reset_cluster is succeeded
shell: |
kubeadm init --service-cidr 10.96.0.0/12 \
--kubernetes-version 1.27.2 \
--kubernetes-version 1.30.2 \
--pod-network-cidr 10.244.0.0/16 \
--cri-socket /var/run/containerd/containerd.sock
register: init_cluster
Expand Down
59 changes: 43 additions & 16 deletions terraform/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ terraform {
required_providers {
proxmox = {
source = "telmate/proxmox"
version = "2.9.14"
version = "3.0.1-rc5"
}
}
}
Expand All @@ -20,27 +20,41 @@ resource "proxmox_vm_qemu" "kube-server" {
count = 1
name = "kube-server-0${count.index + 1}"
target_node = var.target_node_main
vmid = "70${count.index + 1}"
vmid = "50${count.index + 1}"
qemu_os = "other"
clone = var.vm_template_name
agent = 1
os_type = "cloud-init"
full_clone = true
cores = 2
sockets = 1
cpu = "host"
memory = 4096
scsihw = "virtio-scsi-pci"
scsihw = "virtio-scsi-single"
bootdisk = "scsi0"

serial {
id = 0
type = "socket"
}

disk {
slot = "scsi0"
size = "64G"
type = "disk"
storage = var.file_system == "zfs" ? "local-zfs" : "local-lvm"
replicate = true
}

disk {
slot = 0
size = "32G"
type = "scsi"
storage = var.file_system == "zfs" ? "local-zfs" : "local-lvm"
iothread = 0
slot = "ide2"
size = "4M"
type = "cloudinit"
storage = var.file_system == "zfs" ? "local-zfs" : "local-lvm"
}

network {
id = 0
model = "virtio"
bridge = "vmbr0"
}
Expand Down Expand Up @@ -68,27 +82,40 @@ resource "proxmox_vm_qemu" "kube-agent" {
count = 2
name = "kube-agent-0${count.index + 1}"
target_node = var.target_node_agent
vmid = "80${count.index + 1}"
vmid = "60${count.index + 1}"
qemu_os = "other"
clone = var.vm_template_name
agent = 1
os_type = "cloud-init"
cores = 1
sockets = 1
cpu = "host"
cpu_type = "host"
memory = 3072
scsihw = "virtio-scsi-pci"
scsihw = "virtio-scsi-single"
bootdisk = "scsi0"

disk {
slot = 0
size = "16G"
type = "scsi"
storage = var.file_system == "zfs" ? "local-zfs" : "local-lvm"
iothread = 0
slot = "scsi0"
size = "64G"
type = "disk"
replicate = true
storage = var.file_system == "zfs" ? "local-zfs" : "local-lvm"
}

disk {
slot = "ide2"
type = "cloudinit"
size = "4M"
storage = var.file_system == "zfs" ? "local-zfs" : "local-lvm"
}

serial {
id = 0
type = "socket"
}

network {
id = 0
model = "virtio"
bridge = "vmbr0"
}
Expand Down
3 changes: 3 additions & 0 deletions terraform/terraform.tfvars
Original file line number Diff line number Diff line change
Expand Up @@ -27,3 +27,6 @@ target_node_agent = "<agent-node-name>"
# IPs are created sequentially for all agent VMs.
# For e.g. if agent VM resource count is 3 and you've defined the IP as 192.168.0.200, the IPs will be 192.168.0.200, 192.168.0.201 and 192.168.0.202
ip_net_agent = "192.168.0.200/24"

# Defaults to lvm change to zfs if you are using zfs
file_system = "lvm"

0 comments on commit 33ae9c7

Please sign in to comment.