Creating AWS EKS Cluster in Terraform

data "aws_availability_zones" "available" {}

data "aws_eks_cluster_auth" "eks" {
  name = local.eks_cluster_name
}

data aws_caller_identity current {}

data aws_region current {}

// At least two such subnets must be present for AWS LB K8 controller to work
data aws_subnets eks_lb_private {
  tags = {
    "kubernetes.io/role/internal-elb" = "1"
  }
  filter {
    name   = "vpc-id"
    values = [var.vpc_id]
  }
}

// At least two such subnets must be present for AWS LB K8 ncontroller to work
data aws_subnets eks_lb_public {
  tags = {
    "kubernetes.io/role/elb" = "1"
  }
  filter {
    name   = "vpc-id"
    values = [var.vpc_id]
  }
}
module "eks" {
  source  = "terraform-aws-modules/eks/aws"
  version = "~> 19.0"

  cluster_name                   = local.eks_cluster_name
  cluster_version                = "1.26"
  cluster_endpoint_public_access = true


  cluster_addons = {
    coredns = {
      addon_version = "v1.9.3-eksbuild.3"
      configuration_values = jsonencode({
        computeType = "Fargate"
        # Ensure that the we fully utilize the minimum amount of resources that are supplied by
        # Fargate https://docs.aws.amazon.com/eks/latest/userguide/fargate-pod-configuration.html
        # Fargate adds 256 MB to each pod's memory reservation for the required Kubernetes
        # components (kubelet, kube-proxy, and containerd). Fargate rounds up to the following
        # compute configuration that most closely matches the sum of vCPU and memory requests in
        # order to ensure pods always have the resources that they need to run.
        resources = {
          limits = {
            cpu = "0.25"
            # We are targetting the smallest Task size of 512Mb, so we subtract 256Mb from the
            # request/limit to ensure we can fit within that task
            memory = "256M"
          }
          requests = {
            cpu = "0.25"
            # We are targetting the smallest Task size of 512Mb, so we subtract 256Mb from the
            # request/limit to ensure we can fit within that task
            memory = "256M"
          }
        }
      })
    }
    kube-proxy = {
      addon_version = "v1.26.4-eksbuild.1"
    }
    vpc-cni = {
      addon_version= "v1.12.6-eksbuild.1"
    }
  }

  fargate_profiles = merge(
    {
      for i in range(length(var.app_subnet_ids)) :
      "kube-system-${element(split("-", local.azs[i]), 2)}" => {
        selectors = [
          { namespace = "kube-system" }
        ]
        # We want to create a profile per AZ for high availability
        subnet_ids = [element(var.app_subnet_ids, i)]
      }
    },
    {
      default = {
        name = "default"
        selectors = [
          {
            namespace = "*"
          }
        ]
      }
    }
  )

  create_kms_key = false
  cluster_encryption_config = {
    provider_key_arn = var.kms_key_arn
    resources        = ["secrets"]
  }

  iam_role_additional_policies = {
    "load-balancer" = aws_iam_policy.eks_lb.arn
  }


  cluster_enabled_log_types = []

  vpc_id     = var.vpc_id
  subnet_ids = var.app_subnet_ids
  # aws-auth configmap
  manage_aws_auth_configmap = true


  tags = merge(local.default_tags, {
    "EKS-Cluster" = local.eks_cluster_name
  })

  cluster_tags = merge(local.default_tags, {
    "EKS-Cluster" = local.eks_cluster_name
  })

  cloudwatch_log_group_kms_key_id        = var.kms_key_arn
  cloudwatch_log_group_retention_in_days = coalesce(var.cloudwatch_log_group_retention_in_days, var.is_prod ? 30 : 14)


  aws_auth_roles = [for k, v in var.eks_system_master_role_names :
    {
      rolearn  = data.aws_iam_role.system_master_role[k].arn
      username = data.aws_iam_role.system_master_role[k].name
      groups   = ["system:masters"]
    }
  ]
}


resource "helm_release" "load_balancer_controller" {
  repository = "https://aws.github.io/eks-charts"
  chart = "aws-load-balancer-controller"
  name  = "aws-load-balancer-controller"
  namespace = "kube-system"
  set {
    name  = "clusterName"
    value = local.eks_cluster_name
  }

  set {
    name  = "serviceAccount.create"
    value = "false"
  }

  set {
    name = "serviceAccount.name"
    value = kubernetes_service_account_v1.aws_load_balancer.metadata[0].name
  }

  set {
    name  = "region"
    value = data.aws_region.current.name
  }

  set {
    name  = "vpcId"
    value = var.vpc_id
  }

}
data "aws_iam_role" "system_master_role" {
  for_each = var.eks_system_master_role_names
  name     = each.value
}


module "vpc_cni_irsa_role" {
  source    = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
  role_name = "vpn-cni"

  attach_vpc_cni_policy = true

  oidc_providers = {
    main = {
      provider_arn               = module.eks.oidc_provider_arn
      namespace_service_accounts = ["kube-system:vpc_cni"]
    }
  }
}

module "load_balancer_irsa_role" {
  source    = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
  role_name = local.aws_load_balancer_controller_iam_role_name

  attach_load_balancer_controller_policy = true
  attach_load_balancer_controller_targetgroup_binding_only_policy = true
  oidc_providers = {
    main = {
      provider_arn               = module.eks.oidc_provider_arn
      namespace_service_accounts = ["${kubernetes_service_account_v1.aws_load_balancer.metadata[0].namespace}:${kubernetes_service_account_v1.aws_load_balancer.metadata[0].name}"]
    }
  }
}

module "ebs_irsa_role" {
  source    = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
  role_name = "ebs-csi"

  attach_ebs_csi_policy = true
  oidc_providers = {
    main = {
      provider_arn               = module.eks.oidc_provider_arn
      namespace_service_accounts = ["kube-system:ebs-csi"]
    }
  }
}


resource "aws_iam_policy" "eks_lb" {
  name = "${local.eks_cluster_name}-lb"
  policy = file("${path.module}/files/eks-load-balancer-policy.json")
  tags = local.default_tags

}

resource "kubernetes_service_account_v1" "aws_load_balancer" {
  metadata {
    namespace = "kube-system"
    name = local.aws_load_balancer_controller_iam_role_name
    annotations  = {
      "eks.amazonaws.com/role-arn" = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/${local.aws_load_balancer_controller_iam_role_name}"
    }
  }

}

resource "kubernetes_service_account_v1" "vpc_cni" {
  metadata {
    namespace = "kube-system"
    name = module.vpc_cni_irsa_role.iam_role_name
    annotations  = {
      "eks.amazonaws.com/role-arn" = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/${module.vpc_cni_irsa_role.iam_role_name}"
    }
  }
}

resource "kubernetes_service_account_v1" "ebs_csi" {
  metadata {
    namespace = "kube-system"
    name = module.ebs_irsa_role.iam_role_name
    annotations  = {
      "eks.amazonaws.com/role-arn" = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/${module.ebs_irsa_role.iam_role_name}"
    }
  }
}
locals {
  default_tags = merge({
    Terraform       = true
    Environment     = var.environment_name
    TerraformModule = "eks"
  }, var.additional_tags)

  eks_cluster_name = "${var.name}-${var.environment_name}"

  azs = slice(data.aws_availability_zones.available.names, 0, 3)

  aws_load_balancer_controller_iam_role_name = "aws-load-balancer-controller"
}
terraform {
  required_version = ">= 1.0.0"
  required_providers {
    kubernetes = {

    }
    helm = {

    }
  }
}

output "eks_cluster_endpoint" {
  value = module.eks.cluster_endpoint
}

output "eks_cluster_ca_certificate" {
  value = base64decode(module.eks.cluster_certificate_authority_data)
}

output "eks_token" {
  value = data.aws_eks_cluster_auth.eks.token
}
variable "name" {
  type = string
}

variable "environment_name" {
  type = string
}

variable "vpc_id" {
  type = string
}

variable "app_subnet_ids" {
  type = list(string)
}

variable "additional_tags" {
  type    = map(string)
  default = {}
}

variable "is_prod" {
  type = bool
}

variable "kms_key_arn" {
  type = string
}


variable "eks_system_master_role_names" {
  type = map(string)
  default = {}
}

variable "cloudwatch_log_group_retention_in_days" {
  type = number
  default = null
}
provider "aws" {
  profile = "..."
}

provider "kubernetes" {
  host                   = module.eks.eks_cluster_endpoint
  cluster_ca_certificate = module.eks.eks_cluster_ca_certificate
  token                  = module.eks.eks_token
}

provider "helm" {
  kubernetes {
    host                   = module.eks.eks_cluster_endpoint
    cluster_ca_certificate = module.eks.eks_cluster_ca_certificate
    token                  = module.eks.eks_token
  }
}

module "eks" {
  source                       = "../../modules/eks"
  ...               = ["subnet-...", "subnet-...", "subnet-03d3c93c777f20c71"]
  environment_name             = "non-prod"
  vpc_id                       = "vpc-..."
  is_prod                      = false
  kms_key_arn                  = "arn:aws:kms:..."
  name                         = "eks"
  eks_system_master_role_names = { "Administrator" = "Administrator" }
}

Related Posts

Leave a Reply

Your email address will not be published. Required fields are marked *