Skip to content

文章发布较早,内容可能过时,阅读注意甄别。

实战-terraform方式部署k8s集群-20240531(测试成功)

实战:terraform方式部署k8s集群-2024.5.31(测试成功)

参考文档

《00-devops8基础环境配置(重要)》 https://onedayxyy.cn/docs/devops8-basic-test-environment

image-20240526144235306

image-20240531025303781

次文档步骤详细,安装包完成,可测试成功。

kind.tf内容

bash
[root@devops8 k8s]#cat kind.tf 
# terraform用到的providers
terraform {
  required_providers {
    kind = {
      source  = "tehcyx/kind"
      version = "0.0.12"
    }
    null = {
      source  = "hashicorp/null"
      version = "3.1.1"
    }
  }
}

provider "kind" {}

# 此变量指定kubeconfig的文件输出路径
variable "kind_cluster_config_path" {
  type    = string
  default = "~/.kube/config"
}

# 此输出会在控制台打印kubeconfig内容
output "kubeconfig" {
  value = kind_cluster.default.kubeconfig
}

# 定义k8s集群
resource "kind_cluster" "default" {
  name            = "devopscluster"                             # 集群名称
  node_image      = "kindest/node:v1.24.0"                      # kind镜像
  kubeconfig_path = pathexpand(var.kind_cluster_config_path)    # kubeconfig路径
  wait_for_ready  = true  # 等待集群节点ready
  
  # kind配置文件
  kind_config {
    kind        = "Cluster"
    api_version = "kind.x-k8s.io/v1alpha4"
    
    # Control节点配置
    node {
      role = "control-plane"
      kubeadm_config_patches = [
        <<-EOT
          kind: InitConfiguration
          imageRepository: registry.aliyuncs.com/google_containers
          networking:
            serviceSubnet: 10.0.0.0/16
            apiServerAddress: "0.0.0.0"
          nodeRegistration:
            kubeletExtraArgs:
              node-labels: "ingress-ready=true"
          ---
          kind: KubeletConfiguration
          cgroupDriver: systemd
          cgroupRoot: /kubelet
          failSwapOn: false
        EOT
      ]

      extra_port_mappings {
        container_port = 80
        host_port      = 80
      }
      extra_port_mappings {
        container_port = 443
        host_port      = 443
      }
      extra_port_mappings {
        container_port = 6443
        host_port      = 6443
      }
    }

    # worker 节点1
    node {
      role = "worker"
    }

    # worker 节点2
    node {
      role = "worker"
    }
  }
}

# null_resource 用于执行shell命令
# 此步骤用于加载ingress镜像并部署ingress
resource "null_resource" "wait_for_instatll_ingress" {
  triggers = {
    key = uuid()
  }

  provisioner "local-exec" {
    command = <<EOF
      sleep 5  
      kind load  docker-image k8s.gcr.io/ingress-nginx/controller:v1.2.0 --name devopscluster
      kind load  docker-image k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.1.1  --name devopscluster
      kubectl create ns ingress-nginx
      kubectl apply -f ingress.yaml -n ingress-nginx
      printf "\nWaiting for the nginx ingress controller...\n"
      kubectl wait --namespace ingress-nginx \
        --for=condition=ready pod \
              --selector=app.kubernetes.io/component=controller \
        --timeout=90s #这里等待了90s
    EOF
  }

  depends_on = [kind_cluster.default]
}

结果

bash
[root@devops8 ~]#kind get clusters
devopscluster
[root@devops8 ~]#kubectl get node
NAME                          STATUS   ROLES           AGE     VERSION
devopscluster-control-plane   Ready    control-plane   7h14m   v1.24.0
devopscluster-worker          Ready    <none>          7h14m   v1.24.0
devopscluster-worker2         Ready    <none>          7h14m   v1.24.0
[root@devops8 ~]#kubectl get po -A
NAMESPACE            NAME                                                  READY   STATUS      RESTARTS   AGE
ingress-nginx        ingress-nginx-admission-create-kv69f                  0/1     Completed   0          7h14m
ingress-nginx        ingress-nginx-admission-patch-2swjx                   0/1     Completed   0          7h14m
ingress-nginx        ingress-nginx-controller-59c96b9cb7-95rd6             1/1     Running     0          7h14m
kube-system          coredns-6d4b75cb6d-czwnm                              1/1     Running     0          7h14m
kube-system          coredns-6d4b75cb6d-scvnz                              1/1     Running     0          7h14m
kube-system          etcd-devopscluster-control-plane                      1/1     Running     0          7h15m
kube-system          kindnet-7dmf6                                         1/1     Running     0          7h14m
kube-system          kindnet-d2x64                                         1/1     Running     0          7h14m
kube-system          kindnet-mc4qm                                         1/1     Running     0          7h14m
kube-system          kube-apiserver-devopscluster-control-plane            1/1     Running     0          7h15m
kube-system          kube-controller-manager-devopscluster-control-plane   1/1     Running     0          7h15m
kube-system          kube-proxy-bvf7d                                      1/1     Running     0          7h14m
kube-system          kube-proxy-lbt7f                                      1/1     Running     0          7h14m
kube-system          kube-proxy-s2lsg                                      1/1     Running     0          7h14m
kube-system          kube-scheduler-devopscluster-control-plane            1/1     Running     0          7h15m
kube-system          nfs-subdir-external-provisioner-5846f745b4-8sx2n      1/1     Running     0          7h13m
local-path-storage   local-path-provisioner-9cd9bd544-s2grg                1/1     Running     0          7h14m
[root@devops8 ~]#
最近更新