Compare commits

..

2 Commits

Author SHA1 Message Date
Alex
9286c98073 some new setup 2025-09-02 07:25:44 +02:00
Alex
fdb5df5204 add nix container 2025-08-27 09:12:59 +02:00
15 changed files with 469 additions and 1 deletions

View File

@@ -46,3 +46,13 @@ sudo podman run --rm -it \
-e PODMAN_SOCK=unix:///run/podman/podman.sock \ -e PODMAN_SOCK=unix:///run/podman/podman.sock \
hashicorp/terraform:1.13 apply hashicorp/terraform:1.13 apply
``` ```
## Development
### NixOS
Start nix container with
```bash
nix develop
```

View File

@@ -0,0 +1,43 @@
#!/bin/bash
# Script to create an Ubuntu 22.04 cloud-init template in Proxmox
set -e
# --- CONFIG ---
VMID=9000
VM_NAME="ubuntu-22.04-cloudinit"
MEMORY=2048
CORES=2
STORAGE="local-lvm" # Change if using different storage
BRIDGE="vmbr0"
IMG_URL="https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
IMG_FILE="/tmp/jammy-server-cloudimg-amd64.img"
echo "==> Downloading Ubuntu 22.04 cloud image..."
wget -O "$IMG_FILE" "$IMG_URL"
echo "==> Creating VM $VMID ($VM_NAME)..."
qm create $VMID --name $VM_NAME --memory $MEMORY --cores $CORES --net0 virtio,bridge=$BRIDGE
echo "==> Importing disk to $STORAGE..."
qm importdisk $VMID "$IMG_FILE" $STORAGE
echo "==> Attaching disk..."
qm set $VMID --scsihw virtio-scsi-pci --scsi0 ${STORAGE}:vm-${VMID}-disk-0
echo "==> Adding cloud-init drive..."
qm set $VMID --ide2 ${STORAGE}:cloudinit
echo "==> Setting boot options..."
qm set $VMID --boot c --bootdisk scsi0
echo "==> Enabling serial console..."
qm set $VMID --serial0 socket --vga serial0
echo "==> Converting VM $VMID to template..."
qm template $VMID
echo "==> Cleaning up..."
rm -f "$IMG_FILE"
echo "✅ Template $VM_NAME (VMID $VMID) created successfully!"

26
flake.lock generated Normal file
View File

@@ -0,0 +1,26 @@
{
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1756125398,
"narHash": "sha256-XexyKZpf46cMiO5Vbj+dWSAXOnr285GHsMch8FBoHbc=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "3b9f00d7a7bf68acd4c4abb9d43695afb04e03a5",
"type": "github"
},
"original": {
"id": "nixpkgs",
"ref": "nixos-unstable",
"type": "indirect"
}
},
"root": {
"inputs": {
"nixpkgs": "nixpkgs"
}
}
},
"root": "root",
"version": 7
}

21
flake.nix Normal file
View File

@@ -0,0 +1,21 @@
{
description = "A flake for Terraform development";
inputs.nixpkgs.url = "nixpkgs/nixos-unstable";
outputs = { self, nixpkgs }: {
devShells.x86_64-linux.default =
let
system = "x86_64-linux";
pkgs = import nixpkgs {
inherit system;
config.allowUnfree = true;
};
in
pkgs.mkShell {
buildInputs = with pkgs; [
terraform
];
};
};
}

View File

@@ -0,0 +1,263 @@
terraform {
required_providers {
proxmox = {
source = "Telmate/proxmox"
version = "3.0.2-rc04"
}
random = {
source = "hashicorp/random"
version = "3.7.2"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = "2.38.0"
}
helm = {
source = "hashicorp/helm"
version = "3.0.2"
}
}
}
provider "proxmox" {
pm_api_url = var.proxmox_url
pm_user = var.proxmox_user
pm_password = var.proxmox_password
pm_tls_insecure = var.proxmox_tls_insecure
}
# ----------------------
# Generate k3s token
# ----------------------
resource "random_password" "k3s_token" {
length = 32
special = false
}
# ----------------------
# Controller VM
# ----------------------
resource "proxmox_vm_qemu" "controller" {
name = "k3s-controller"
target_node = var.target_nodes["controller"]
clone = var.template_id
full_clone = true
cores = 2
sockets = 1
memory = 4096
scsihw = "virtio-scsi-pci"
disk {
size = "20G"
storage = var.storage
type = "scsi"
}
network { bridge = var.bridge }
ipconfig0 = "ip=${var.controller_ip}/${var.netmask},gw=${var.gateway}"
ciuser = "ubuntu"
citype = "cloud-init"
sshkeys = var.ssh_public_key
cicustom = <<EOT
#cloud-config
package_update: true
packages:
- curl
runcmd:
- |
curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="server --cluster-init --token=${random_password.k3s_token.result} --write-kubeconfig-mode=644" sh -
EOT
}
# ----------------------
# Worker 1 VM
# ----------------------
resource "proxmox_vm_qemu" "worker1" {
name = "k3s-worker1"
target_node = var.target_nodes["worker1"]
clone = var.template_id
full_clone = true
cores = 2
sockets = 1
memory = 4096
scsihw = "virtio-scsi-pci"
disk { size = "20G", storage = var.storage, type = "scsi" }
network { bridge = var.bridge }
ipconfig0 = "ip=${var.worker1_ip}/${var.netmask},gw=${var.gateway}"
ciuser = "ubuntu"
citype = "cloud-init"
sshkeys = var.ssh_public_key
cicustom = <<EOT
#cloud-config
package_update: true
packages:
- curl
runcmd:
- |
curl -sfL https://${var.controller_ip}:6443 | K3S_URL=https://${var.controller_ip}:6443 K3S_TOKEN=${random_password.k3s_token.result} sh -
EOT
}
# ----------------------
# Worker 2 VM
# ----------------------
resource "proxmox_vm_qemu" "worker2" {
name = "k3s-worker2"
target_node = var.target_nodes["worker2"]
clone = var.template_id
full_clone = true
cores = 2
sockets = 1
memory = 4096
scsihw = "virtio-scsi-pci"
disk { size = "20G", storage = var.storage, type = "scsi" }
network { bridge = var.bridge }
ipconfig0 = "ip=${var.worker2_ip}/${var.netmask},gw=${var.gateway}"
ciuser = "ubuntu"
citype = "cloud-init"
sshkeys = var.ssh_public_key
cicustom = <<EOT
#cloud-config
package_update: true
packages:
- curl
runcmd:
- |
curl -sfL https://${var.controller_ip}:6443 | K3S_URL=https://${var.controller_ip}:6443 K3S_TOKEN=${random_password.k3s_token.result} sh -
EOT
}
# ----------------------
# Kubernetes & Helm Providers (after cluster is ready)
# ----------------------
provider "kubernetes" {
config_path = "~/.kube/config"
}
provider "helm" {
kubernetes {
config_path = "~/.kube/config"
}
}
# ----------------------
# Namespaces
# ----------------------
resource "kubernetes_namespace" "infra" { metadata { name = "infra" } }
resource "kubernetes_namespace" "devops" { metadata { name = "devops" } }
resource "kubernetes_namespace" "monitoring" { metadata { name = "monitoring" } }
# ----------------------
# Ingress
# ----------------------
resource "helm_release" "nginx_ingress" {
name = "nginx-ingress"
namespace = kubernetes_namespace.infra.metadata[0].name
repository = "https://kubernetes.github.io/ingress-nginx"
chart = "ingress-nginx"
version = "4.10.0"
}
# ----------------------
# cert-manager
# ----------------------
resource "helm_release" "cert_manager" {
name = "cert-manager"
namespace = kubernetes_namespace.infra.metadata[0].name
repository = "https://charts.jetstack.io"
chart = "cert-manager"
version = "v1.15.1"
values = [<<EOT
installCRDs: true
EOT
]
depends_on = [helm_release.nginx_ingress]
}
# ----------------------
# CoreDNS authoritative for lab.local
# ----------------------
resource "helm_release" "coredns" {
name = "coredns-ext"
namespace = kubernetes_namespace.infra.metadata[0].name
repository = "https://coredns.github.io/helm"
chart = "coredns"
version = "1.30.0"
values = [file("${path.module}/values/coredns-values.yaml")]
}
# ----------------------
# NTP Deployment
# ----------------------
resource "kubernetes_deployment" "ntp" {
metadata { name = "ntp-server", namespace = kubernetes_namespace.infra.metadata[0].name }
spec {
replicas = 1
selector { match_labels = { app = "ntp-server" } }
template {
metadata { labels = { app = "ntp-server" } }
spec {
container {
name = "ntp"
image = "cturra/ntp:latest"
port { container_port = 123, protocol = "UDP" }
}
}
}
}
}
resource "kubernetes_service" "ntp" {
metadata { name = "ntp-service", namespace = kubernetes_namespace.infra.metadata[0].name }
spec {
type = "LoadBalancer"
port { port = 123, target_port = 123, protocol = "UDP" }
selector = { app = "ntp-server" }
}
}
# ----------------------
# GitLab Helm Release
# ----------------------
resource "helm_release" "gitlab" {
name = "gitlab"
namespace = kubernetes_namespace.devops.metadata[0].name
repository = "https://charts.gitlab.io/"
chart = "gitlab"
version = "7.7.0"
values = [file("${path.module}/values/gitlab-values.yaml")]
depends_on = [helm_release.nginx_ingress, helm_release.cert_manager]
}
# ----------------------
# Logging Stack
# ----------------------
resource "helm_release" "loki" {
name = "loki"
namespace = kubernetes_namespace.monitoring.metadata[0].name
repository = "https://grafana.github.io/helm-charts"
chart = "loki"
version = "5.41.4"
values = [file("${path.module}/values/loki-values.yaml")]
}
resource "helm_release" "promtail" {
name = "promtail"
namespace = kubernetes_namespace.monitoring.metadata[0].name
repository = "https://grafana.github.io/helm-charts"
chart = "promtail"
version = "6.15.5"
values = [file("${path.module}/values/promtail-values.yaml")]
depends_on = [helm_release.loki]
}
resource "helm_release" "grafana" {
name = "grafana"
namespace = kubernetes_namespace.monitoring.metadata[0].name
repository = "https://grafana.github.io/helm-charts"
chart = "grafana"
version = "7.3.9"
values = [file("${path.module}/values/grafana-values.yaml")]
depends_on = [helm_release.loki]
}

View File

View File

@@ -0,0 +1,34 @@
# --------------------------
# Proxmox Connection
# --------------------------
proxmox_url = "https://pve.example.com:8006/api2/json"
proxmox_user = "root@pam"
proxmox_password = "YourProxmoxPassword"
proxmox_tls_insecure = true
# --------------------------
# Template & Target Nodes
# --------------------------
template_id = "100" # ID of your cloud-init Ubuntu template in Proxmox
target_nodes = {
controller = "pve1"
worker1 = "pve2"
worker2 = "pve3"
}
# --------------------------
# SSH Keys for Cloud-Init
# --------------------------
ssh_public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD..." # Replace with your public key
# --------------------------
# Networking
# --------------------------
bridge = "vmbr0"
storage = "local-lvm"
controller_ip = "192.168.10.10"
worker1_ip = "192.168.10.11"
worker2_ip = "192.168.10.12"
netmask = "24"
gateway = "192.168.10.1"
vm_domain = "lab.local"

View File

@@ -0,0 +1,13 @@
servers:
- zones:
- zone: lab.local.
plugins:
- name: errors
- name: health
- name: ready
- name: cache
parameters: 30
- name: forward
parameters: . 8.8.8.8 1.1.1.1
service:
type: LoadBalancer

View File

@@ -0,0 +1,19 @@
global:
hosts:
domain: lab.local
externalIP: 192.168.10.10 # controller/ingress IP
ingress:
configureCertmanager: true
class: nginx
certmanager:
install: false
nginx-ingress:
enabled: false
prometheus:
install: false
redis:
install: true
postgresql:
install: true
minio:
install: false

View File

@@ -0,0 +1,12 @@
adminUser: admin
adminPassword: changeme
service:
type: LoadBalancer
datasources:
datasources.yaml:
apiVersion: 1
datasources:
- name: Loki
type: loki
url: http://loki.monitoring.svc.cluster.local:3100
isDefault: true

View File

@@ -0,0 +1,2 @@
loki:
auth_enabled: false

View File

@@ -0,0 +1,25 @@
variable "proxmox_url" { type = string }
variable "proxmox_user" { type = string }
variable "proxmox_password" { type = string; sensitive = true }
variable "proxmox_tls_insecure" { type = bool; default = true }
variable "template_id" { type = string }
variable "target_nodes" {
type = map(string)
default = {
controller = "pve1"
worker1 = "pve2"
worker2 = "pve3"
}
}
variable "ssh_public_key" { type = string }
variable "bridge" { type = string; default = "vmbr0" }
variable "storage" { type = string; default = "local-lvm" }
variable "controller_ip" { type = string }
variable "worker1_ip" { type = string }
variable "worker2_ip" { type = string }
variable "netmask" { type = string; default = "24" }
variable "gateway" { type = string }
variable "vm_domain" { type = string; default = "lab.local" }