diff --git a/examples/eks-lab-ip6/eks/.terraform.lock.hcl b/examples/eks-lab-ip6/eks/.terraform.lock.hcl new file mode 100644 index 0000000..5e1c159 --- /dev/null +++ b/examples/eks-lab-ip6/eks/.terraform.lock.hcl @@ -0,0 +1,10 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/aws" { + version = "4.55.0" + constraints = ">= 3.25.0" + hashes = [ + "h1:VHfmrKCb4oTW/+rWGKKqipoMOPd4tPxlGwMp0/Flx/s=", + ] +} diff --git a/examples/eks-lab-ip6/eks/.terraform/providers/registry.terraform.io/hashicorp/aws/4.55.0/linux_amd64 b/examples/eks-lab-ip6/eks/.terraform/providers/registry.terraform.io/hashicorp/aws/4.55.0/linux_amd64 new file mode 120000 index 0000000..612bb8d --- /dev/null +++ b/examples/eks-lab-ip6/eks/.terraform/providers/registry.terraform.io/hashicorp/aws/4.55.0/linux_amd64 @@ -0,0 +1 @@ +/home/kn/.terraform.d/plugin-cache/registry.terraform.io/hashicorp/aws/4.55.0/linux_amd64 \ No newline at end of file diff --git a/examples/eks-lab-ip6/eks/README.md b/examples/eks-lab-ip6/eks/README.md new file mode 100644 index 0000000..e22ef16 --- /dev/null +++ b/examples/eks-lab-ip6/eks/README.md @@ -0,0 +1,119 @@ +# eks-lab/eks +This layer creates the following resources +- EKS cluster using ipv6 for service network +- EKS nodegroup +- EKS bastion +- Install eksctl, kubectl, awscliv2, helm on EKS bastion with user_data script + +Be patient. EKS cluster takes 12min to provision. Node group will take another 5 min. And the cluster addon takes another ?? min. + +## Worker node instance size +Choose t3.large at the minimum. This is due to AWS's limitation on number of IPs. Smaller instanecs are limited with 6 IP +which is not enough. See https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI + +## How to use eksctl and kubectl +By default, AWS EKS are installed with an aws-auth configmap which allows only the cluster creator +to work with the cluster. Therefore, one must first assume to the creator IAM role before running eksctl or kubectl. +For example, to create kube config, run these commands: + +```bash +export AWS_ACCESS_KEY_ID=xxxx AWS_SECRET_ACCESS_KEY="yyyy" AWS_DEFAULT_REGION=ap-northeast-1 +aws eks update-kubeconfig --name lab-apne1-xpk-iac-cluster01 +``` + +## Edit configmap/aws-auth +``` +kubectl edit -n kube-system configmap/aws-auth +``` +Add a group with system:master role +```yaml +apiVersion: v1 +data: + mapRoles: | + - groups: + - system:bootstrappers + - system:nodes + rolearn: arn:aws:iam::040216112220:role/clusterCreator + username: system:node:Template:EC2PrivateDNSName + - groups: + - system:masters + rolearn: arn:aws:iam::040216112220:role/lab-apne1-xpk-iac-bast-role + username: lab-apne1-xpk-iac-bast-role +kind: ConfigMap +metadata: + creationTimestamp: "2022-12-29T11:02:15Z" + name: aws-auth + namespace: kube-system + resourceVersion: "59670" + uid: 7cf9d889-8ed2-4c8d-ac0f-092184cede8a +``` + +## Addon updates +When updating addons, please select advanced options and choose preserve settings. + +## Install ALB ingress controller +AWS provides documentation on how to deploy a sample application with ingress (ALB) +https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html + +That depends on the load balancer container, which can be deployed by + +```bash +curl -O https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.4.4/docs/install/iam_policy.json + +aws iam create-policy \ +--policy-name AWSLoadBalancerControllerIAMPolicy \ +--policy-document file://iam_policy.json + +``` +Create an openid provider on iam +https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html + +``` +eksctl create iamserviceaccount \ + --cluster=lab-apne1-xpk-iac-cluster01 \ + --namespace=kube-system \ + --name=aws-load-balancer-controller \ + --role-name AmazonEKSLoadBalancerControllerRole \ + --attach-policy-arn=arn:aws:iam::040216112220:policy/AWSLoadBalancerControllerIAMPolicy \ + --approve + +helm repo add eks https://aws.github.io/eks-charts +helm repo update +helm install aws-load-balancer-controller eks/aws-load-balancer-controller \ +-n kube-system \ +--set clusterName=lab-apne1-xpk-iac-cluster01 \ +--set serviceAccount.create=false \ +--set serviceAccount.name=aws-load-balancer-controller +``` + +## Tag subnets +Reference: https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html + +The following tags are set in the network layer: + +On private subnets: +Key – kubernetes.io/role/internal-elb +Value – 1 + +On public subnets: +Key – kubernetes.io/role/elb +Value – 1 + + +## Install sample app the 2048 game +See https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html +```bash +curl -O https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.4.4/docs/examples/2048/2048_full.yaml +edit the file +kubectl apply -f 2048_full.yaml +kubectl get ingress/ingress-2048 -n game-2048 +``` + +In a moment, the lb address should be displayed +```bash +root@ip-192-168-123-187:~# kubectl get ingress/ingress-2048 -n game-2048 +NAME CLASS HOSTS ADDRESS PORTS AGE +ingress-2048 alb * internal-k8s-game2048-ingress2-5f196824a1-20502803.ap-northeast-1.elb.amazonaws.com 80 7s +``` + + diff --git a/examples/eks-lab-ip6/eks/eks-node-sshkey b/examples/eks-lab-ip6/eks/eks-node-sshkey new file mode 100644 index 0000000..2cc109d --- /dev/null +++ b/examples/eks-lab-ip6/eks/eks-node-sshkey @@ -0,0 +1,7 @@ +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW +QyNTUxOQAAACDQnEGn3cwEav+pMKXYvP3KjDYpB+Po/wpcrmQZnh31wgAAAJDu9hUF7vYV +BQAAAAtzc2gtZWQyNTUxOQAAACDQnEGn3cwEav+pMKXYvP3KjDYpB+Po/wpcrmQZnh31wg +AAAEBcvMSW9eqRM2Kd3obuJfHma+nzrsMiRSHO09wjSg4KF9CcQafdzARq/6kwpdi8/cqM +NikH4+j/ClyuZBmeHfXCAAAADWtuQGlzbS56b28ubG8= +-----END OPENSSH PRIVATE KEY----- diff --git a/examples/eks-lab-ip6/eks/eks-node-sshkey.pub b/examples/eks-lab-ip6/eks/eks-node-sshkey.pub new file mode 100644 index 0000000..ca03027 --- /dev/null +++ b/examples/eks-lab-ip6/eks/eks-node-sshkey.pub @@ -0,0 +1 @@ +ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINCcQafdzARq/6kwpdi8/cqMNikH4+j/ClyuZBmeHfXC kn@ism.zoo.lo diff --git a/examples/eks-lab-ip6/eks/locals.tf b/examples/eks-lab-ip6/eks/locals.tf new file mode 100644 index 0000000..8719f76 --- /dev/null +++ b/examples/eks-lab-ip6/eks/locals.tf @@ -0,0 +1,18 @@ +data "aws_caller_identity" "this" {} + +locals { + default-tags = merge({ + ServiceProvider = "None" + Environment = var.environment + Project = var.project + Application = var.application + TerraformMode = "managed" + TerraformDir = "${local.path-cwd-list[length(local.path-cwd-list) - 2]}/${local.path-cwd-list[length(local.path-cwd-list) - 1]}" + + CreatedBy = data.aws_caller_identity.this.arn + BuildDate = formatdate("YYYYMMDD", timestamp()) + }) + resource-prefix = "${var.environment}-${var.aws-region-short}-${var.customer-name}-${var.project}" + path-cwd-list = split("/", path.cwd) + +} \ No newline at end of file diff --git a/examples/eks-lab-ip6/eks/main.tf b/examples/eks-lab-ip6/eks/main.tf new file mode 100644 index 0000000..5753163 --- /dev/null +++ b/examples/eks-lab-ip6/eks/main.tf @@ -0,0 +1,306 @@ +data "terraform_remote_state" "vpc" { + backend = "local" + config = { + path = "../network/terraform.tfstate" + } +} + +resource "aws_iam_role" "eks-cluster-role" { + name = "${local.resource-prefix}-cluster-role" + assume_role_policy = jsonencode({ + "Version" : "2012-10-17", + "Statement" : [ + { + "Effect" : "Allow", + "Principal" : { + "Service" : "eks.amazonaws.com" + }, + "Action" : "sts:AssumeRole" + } + ] + } + ) + managed_policy_arns = ["arn:aws:iam::aws:policy/AmazonEKSClusterPolicy", "arn:aws:iam::aws:policy/AmazonEKSVPCResourceController"] + tags = local.default-tags +} + +resource "aws_eks_cluster" "eks-cluster" { + name = "${local.resource-prefix}-cluster01" + role_arn = aws_iam_role.eks-cluster-role.arn + vpc_config { + subnet_ids = data.terraform_remote_state.vpc.outputs.private-subnet-ids + endpoint_private_access = true + endpoint_public_access = false + } + enabled_cluster_log_types = ["api", "audit"] + kubernetes_network_config { + ip_family = "ipv6" + } + tags = local.default-tags +} + + +resource "aws_eks_addon" "eks-addons" { + # for_each = toset(["vpc-cni", "coredns", "kube-proxy", "aws-ebs-csi-driver"]) + # latest version as on 2023-02-17 failed to deploy + for_each = { + "aws-ebs-csi-driver" : { + "version" : "v1.15.0-eksbuild.1" + }, + "vpc-cni" : { + "version" : "v1.12.2-eksbuild.1" + }, + "coredns" : { + "version" : "v1.9.3-eksbuild.2" + }, + "kube-proxy" : { + "version" : "v1.24.9-eksbuild.1" + } + } + cluster_name = aws_eks_cluster.eks-cluster.name + addon_name = each.key + addon_version = each.value["version"] +} + +resource "aws_iam_role" "eks-nodegroup-role" { + name = "${local.resource-prefix}-nodegroup-role" + assume_role_policy = jsonencode({ + "Version" : "2012-10-17", + "Statement" : [ + { + "Effect" : "Allow", + "Principal" : { + "Service" : "ec2.amazonaws.com" + }, + "Action" : "sts:AssumeRole" + } + ] + } + ) + managed_policy_arns = [ + "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", + "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", + "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", + "arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy" + ] + inline_policy { + name = "IP6CniAccess" + policy = jsonencode({ + "Version" : "2012-10-17", + "Statement" : [ + { + "Effect" : "Allow", + "Action" : [ + "ec2:AssignIpv6Addresses", + "ec2:DescribeInstances", + "ec2:DescribeTags", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeInstanceTypes" + ], + "Resource" : "*" + }, + { + "Effect" : "Allow", + "Action" : [ + "ec2:CreateTags" + ], + "Resource" : [ + "arn:aws:ec2:*:*:network-interface/*" + ] + } + ] + }) + } + + inline_policy { + name = "AlbIngressAccess" + policy = jsonencode({ + "Version" : "2012-10-17", + "Statement" : [ + { + "Effect" : "Allow", + "Action" : [ + "elasticloadbalancing:*" + ], + "Resource" : "*" + } + ] + }) + } + + tags = local.default-tags +} + +data "aws_ssm_parameter" "eks_ami_release_version" { + name = "/aws/service/eks/optimized-ami/${aws_eks_cluster.eks-cluster.version}/amazon-linux-2/recommended/release_version" +} + +# manually generate the key: ssh-keygen -ted25519 -f eks-node-sshkey +# file() can only read pre-existing file +resource "aws_key_pair" "eks-node-sshkey" { + key_name = "${local.resource-prefix}-eks-node-sshkey" + public_key = file("${path.module}/eks-node-sshkey.pub") +} + +resource "aws_security_group" "eks-node-sg" { + name = "${local.resource-prefix}-eks-node-sg" + description = "Allow ssh to EKS nodes" + vpc_id = data.terraform_remote_state.vpc.outputs.vpc-id + + ingress { + description = "SSH from VPC" + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = [data.terraform_remote_state.vpc.outputs.vpc-cidr] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = local.default-tags +} + +resource "aws_eks_node_group" "eks-nodegroup" { + cluster_name = aws_eks_cluster.eks-cluster.name + node_group_name_prefix = "${local.resource-prefix}-eks-ng" + node_role_arn = aws_iam_role.eks-nodegroup-role.arn + subnet_ids = data.terraform_remote_state.vpc.outputs.private-subnet-ids + version = aws_eks_cluster.eks-cluster.version + release_version = nonsensitive(data.aws_ssm_parameter.eks_ami_release_version.value) + instance_types = ["t3.large"] # see README.md + scaling_config { + desired_size = 1 + max_size = 2 + min_size = 1 + } + + update_config { + max_unavailable = 1 + } + remote_access { + ec2_ssh_key = aws_key_pair.eks-node-sshkey.key_name + source_security_group_ids = [aws_security_group.eks-node-sg.id] + } + tags = local.default-tags +} + +# ec2 instance for EKS management +data "aws_ami" "ubuntu" { + most_recent = true + + filter { + name = "name" + values = ["ubuntu/images/hvm-ssd/ubuntu-*-amd64-server-*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + owners = ["099720109477"] # Canonical +} + +resource "aws_security_group" "eks-bast-sg" { + name = "${local.resource-prefix}-eks-bast-sg" + description = "Allow ssh to EKS bast" + vpc_id = data.terraform_remote_state.vpc.outputs.vpc-id + + ingress { + description = "SSH from VPC" + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = ["223.18.148.85/32"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = local.default-tags +} + + +resource "aws_iam_role" "eks-bast-role" { + name = "${local.resource-prefix}-bast-role" + assume_role_policy = jsonencode({ + "Version" : "2012-10-17", + "Statement" : [ + { + "Effect" : "Allow", + "Principal" : { + "Service" : "ec2.amazonaws.com" + }, + "Action" : "sts:AssumeRole" + } + ] + }) + inline_policy { + name = "eks-bast-policy" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = ["eks:*", "ecr:*"] + Effect = "Allow" + Resource = "*" + }, + ] + }) + } + + managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] + tags = local.default-tags +} + + +resource "aws_iam_instance_profile" "eks-bast-iam-profile" { + name = "eksBastIamProfile" + role = aws_iam_role.eks-bast-role.name +} + +resource "aws_instance" "eks-bast" { + ami = data.aws_ami.ubuntu.id + instance_type = "t3.micro" + associate_public_ip_address = true + ebs_optimized = true + key_name = aws_key_pair.eks-node-sshkey.key_name + vpc_security_group_ids = [aws_security_group.eks-bast-sg.id, aws_eks_cluster.eks-cluster.vpc_config[0].cluster_security_group_id] + subnet_id = data.terraform_remote_state.vpc.outputs.public-subnet-ids[0] + iam_instance_profile = aws_iam_instance_profile.eks-bast-iam-profile.name + root_block_device { + volume_size = 8 + volume_type = "gp3" + tags = local.default-tags + } + tags = merge(local.default-tags, { "Name" : "${local.resource-prefix}-eks-bast" }) + user_data = < +kube-system coredns-5fc8d4cdcf-c75z6 1/1 Running 0 13m 100.64.9.249 ip-192-168-123-245.ap-northeast-1.compute.internal +kube-system coredns-5fc8d4cdcf-h5lnl 1/1 Running 0 13m 100.64.13.41 ip-192-168-123-245.ap-northeast-1.compute.internal +kube-system ebs-csi-controller-d6bff959-8459z 6/6 Running 0 13m 100.64.8.74 ip-192-168-123-245.ap-northeast-1.compute.internal +kube-system ebs-csi-controller-d6bff959-vnwlf 6/6 Running 0 5m28s 100.64.11.124 ip-192-168-123-245.ap-northeast-1.compute.internal +kube-system ebs-csi-node-h7w8r 3/3 Running 0 4m9s 100.64.11.188 ip-192-168-123-245.ap-northeast-1.compute.internal +kube-system kube-proxy-vgmdf 1/1 Running 0 4m9s 192.168.123.245 ip-192-168-123-245.ap-northeast-1.compute.internal + +``` + +## Edit configmap/aws-auth +``` +kubectl edit -n kube-system configmap/aws-auth +``` +Add a group with system:master role +```yaml +apiVersion: v1 +data: + mapRoles: | + - groups: + - system:bootstrappers + - system:nodes + rolearn: arn:aws:iam::040216112220:role/clusterCreator + username: system:node:Template:EC2PrivateDNSName + - groups: + - system:masters + rolearn: arn:aws:iam::040216112220:role/lab-apne1-xpk-iac-bast-role + username: lab-apne1-xpk-iac-bast-role +kind: ConfigMap +metadata: + creationTimestamp: "2022-12-29T11:02:15Z" + name: aws-auth + namespace: kube-system + resourceVersion: "59670" + uid: 7cf9d889-8ed2-4c8d-ac0f-092184cede8a +``` + +## Addon updates +When updating addons, please select advanced options and choose preserve settings. \ No newline at end of file diff --git a/examples/eks-lab/eks/eks-node-sshkey b/examples/eks-lab/eks/eks-node-sshkey new file mode 100644 index 0000000..2cc109d --- /dev/null +++ b/examples/eks-lab/eks/eks-node-sshkey @@ -0,0 +1,7 @@ +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW +QyNTUxOQAAACDQnEGn3cwEav+pMKXYvP3KjDYpB+Po/wpcrmQZnh31wgAAAJDu9hUF7vYV +BQAAAAtzc2gtZWQyNTUxOQAAACDQnEGn3cwEav+pMKXYvP3KjDYpB+Po/wpcrmQZnh31wg +AAAEBcvMSW9eqRM2Kd3obuJfHma+nzrsMiRSHO09wjSg4KF9CcQafdzARq/6kwpdi8/cqM +NikH4+j/ClyuZBmeHfXCAAAADWtuQGlzbS56b28ubG8= +-----END OPENSSH PRIVATE KEY----- diff --git a/examples/eks-lab/eks/eks-node-sshkey.pub b/examples/eks-lab/eks/eks-node-sshkey.pub new file mode 100644 index 0000000..ca03027 --- /dev/null +++ b/examples/eks-lab/eks/eks-node-sshkey.pub @@ -0,0 +1 @@ +ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINCcQafdzARq/6kwpdi8/cqMNikH4+j/ClyuZBmeHfXC kn@ism.zoo.lo diff --git a/examples/eks-lab/eks/locals.tf b/examples/eks-lab/eks/locals.tf new file mode 100644 index 0000000..8719f76 --- /dev/null +++ b/examples/eks-lab/eks/locals.tf @@ -0,0 +1,18 @@ +data "aws_caller_identity" "this" {} + +locals { + default-tags = merge({ + ServiceProvider = "None" + Environment = var.environment + Project = var.project + Application = var.application + TerraformMode = "managed" + TerraformDir = "${local.path-cwd-list[length(local.path-cwd-list) - 2]}/${local.path-cwd-list[length(local.path-cwd-list) - 1]}" + + CreatedBy = data.aws_caller_identity.this.arn + BuildDate = formatdate("YYYYMMDD", timestamp()) + }) + resource-prefix = "${var.environment}-${var.aws-region-short}-${var.customer-name}-${var.project}" + path-cwd-list = split("/", path.cwd) + +} \ No newline at end of file diff --git a/examples/eks-lab/eks/main.tf b/examples/eks-lab/eks/main.tf new file mode 100644 index 0000000..9036f17 --- /dev/null +++ b/examples/eks-lab/eks/main.tf @@ -0,0 +1,261 @@ +data "terraform_remote_state" "vpc" { + backend = "local" + config = { + path = "../network/terraform.tfstate" + } +} + +resource "aws_iam_role" "eks-cluster-role" { + name = "${local.resource-prefix}-cluster-role" + assume_role_policy = jsonencode({ + "Version" : "2012-10-17", + "Statement" : [ + { + "Effect" : "Allow", + "Principal" : { + "Service" : "eks.amazonaws.com" + }, + "Action" : "sts:AssumeRole" + } + ] + } + ) + managed_policy_arns = ["arn:aws:iam::aws:policy/AmazonEKSClusterPolicy", "arn:aws:iam::aws:policy/AmazonEKSVPCResourceController"] + tags = local.default-tags +} + +resource "aws_eks_cluster" "eks-cluster" { + name = "${local.resource-prefix}-cluster01" + role_arn = aws_iam_role.eks-cluster-role.arn + vpc_config { + subnet_ids = data.terraform_remote_state.vpc.outputs.private-subnet-ids + endpoint_private_access = true + endpoint_public_access = false + } + enabled_cluster_log_types = ["api", "audit"] + kubernetes_network_config { + service_ipv4_cidr = "172.16.0.0/16" + ip_family = "ipv4" + } + tags = local.default-tags +} + + +resource "aws_eks_addon" "eks-addons" { + # for_each = toset(["vpc-cni", "coredns", "kube-proxy", "aws-ebs-csi-driver"]) + # latest version as on 2023-02-17 failed to deploy + for_each = { + "aws-ebs-csi-driver" : { + "version" : "v1.15.0-eksbuild.1" + }, + "vpc-cni" : { + "version" : "v1.12.2-eksbuild.1" + }, + "coredns" : { + "version" : "v1.9.3-eksbuild.2" + }, + "kube-proxy" : { + "version" : "v1.24.9-eksbuild.2" + } + } + cluster_name = aws_eks_cluster.eks-cluster.name + addon_name = each.key + # addon_version = each.value["version"] + tags = local.default-tags +} + +resource "aws_iam_role" "eks-nodegroup-role" { + name = "${local.resource-prefix}-nodegroup-role" + assume_role_policy = jsonencode({ + "Version" : "2012-10-17", + "Statement" : [ + { + "Effect" : "Allow", + "Principal" : { + "Service" : "ec2.amazonaws.com" + }, + "Action" : "sts:AssumeRole" + } + ] + } + ) + managed_policy_arns = [ + "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", + "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", + "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", + "arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy" + ] + tags = local.default-tags +} + +data "aws_ssm_parameter" "eks_ami_release_version" { + name = "/aws/service/eks/optimized-ami/${aws_eks_cluster.eks-cluster.version}/amazon-linux-2/recommended/release_version" +} + +# manually generate the key: ssh-keygen -ted25519 -f eks-node-sshkey +# file() can only read pre-existing file +resource "aws_key_pair" "eks-node-sshkey" { + key_name = "${local.resource-prefix}-eks-node-sshkey" + public_key = file("${path.module}/eks-node-sshkey.pub") +} + +resource "aws_security_group" "eks-node-sg" { + name = "${local.resource-prefix}-eks-node-sg" + description = "Allow ssh to EKS nodes" + vpc_id = data.terraform_remote_state.vpc.outputs.vpc-id + + ingress { + description = "SSH from VPC" + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = [data.terraform_remote_state.vpc.outputs.vpc-cidr] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = local.default-tags +} + +resource "aws_eks_node_group" "eks-nodegroup" { + cluster_name = aws_eks_cluster.eks-cluster.name + node_group_name_prefix = "${local.resource-prefix}-eks-ng" + node_role_arn = aws_iam_role.eks-nodegroup-role.arn + subnet_ids = data.terraform_remote_state.vpc.outputs.private-subnet-ids + version = aws_eks_cluster.eks-cluster.version + release_version = nonsensitive(data.aws_ssm_parameter.eks_ami_release_version.value) + instance_types = ["t3.small"] + scaling_config { + desired_size = 1 + max_size = 2 + min_size = 1 + } + + update_config { + max_unavailable = 1 + } + remote_access { + ec2_ssh_key = aws_key_pair.eks-node-sshkey.key_name + source_security_group_ids = [aws_security_group.eks-node-sg.id] + } + tags = local.default-tags +} + +# ec2 instance for EKS management +data "aws_ami" "ubuntu" { + most_recent = true + + filter { + name = "name" + values = ["ubuntu/images/hvm-ssd/ubuntu-*-amd64-server-*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + owners = ["099720109477"] # Canonical +} + +resource "aws_security_group" "eks-bast-sg" { + name = "${local.resource-prefix}-eks-bast-sg" + description = "Allow ssh to EKS bast" + vpc_id = data.terraform_remote_state.vpc.outputs.vpc-id + + ingress { + description = "SSH from VPC" + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = ["223.18.148.85/32"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = local.default-tags +} + + +resource "aws_iam_role" "eks-bast-role" { + name = "${local.resource-prefix}-bast-role" + assume_role_policy = jsonencode({ + "Version" : "2012-10-17", + "Statement" : [ + { + "Effect" : "Allow", + "Principal" : { + "Service" : "ec2.amazonaws.com" + }, + "Action" : "sts:AssumeRole" + } + ] + }) + inline_policy { + name = "eks-bast-policy" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = ["eks:*", "ecr:*"] + Effect = "Allow" + Resource = "*" + }, + ] + }) + } + + managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] + tags = local.default-tags +} + + +resource "aws_iam_instance_profile" "eks-bast-iam-profile" { + name = "eksBastIamProfile" + role = aws_iam_role.eks-bast-role.name +} + +resource "aws_instance" "eks-bast" { + ami = data.aws_ami.ubuntu.id + instance_type = "t3.micro" + associate_public_ip_address = true + ebs_optimized = true + key_name = aws_key_pair.eks-node-sshkey.key_name + vpc_security_group_ids = [aws_security_group.eks-bast-sg.id, aws_eks_cluster.eks-cluster.vpc_config[0].cluster_security_group_id] + subnet_id = data.terraform_remote_state.vpc.outputs.public-subnet-ids[0] + iam_instance_profile = aws_iam_instance_profile.eks-bast-iam-profile.name + root_block_device { + volume_size = 8 + volume_type = "gp3" + tags = local.default-tags + } + tags = merge(local.default-tags, { "Name" : "${local.resource-prefix}-eks-bast" }) + user_data = <=8 ? cidrsubnets(local.vpc-cidr, 4,4,4,4,4,4,4,4) : local.total-no-subnets >=6 ? cidrsubnets(local.vpc-cidr, 3,3,3,3,3,3) : local.total-no-subnets >=4 ? cidrsubnets(local.vpc-cidr, 2,2,2,2) : local.total-no-subnets >=2 ? cidrsubnets(local.vpc-cidr, 1,1) : null - simple-divide = local.total-no-subnets >= 12 ? cidrsubnets(local.vpc-cidr, 4,4,4,4, 4,4,4,4, 4,4,4,4) : local.total-no-subnets >= 8 ? cidrsubnets(local.vpc-cidr, 3,3,3,3, 3,3,3,3) : local.total-no-subnets >= 6 ? cidrsubnets(local.vpc-cidr, 3,3,3, 3,3,3) : local.total-no-subnets >= 4 ? cidrsubnets(local.vpc-cidr, 2,2,2,2) : local.total-no-subnets >= 2 ? cidrsubnets(local.vpc-cidr, 1, 1) : null - public-subnets = slice(local.simple-divide, 0, var.number-of-public-subnets-per-az * local.no-az) - private-subnets = slice(local.simple-divide, var.number-of-public-subnets-per-az * local.no-az , local.total-no-subnets) + simple-divide = local.total-no-subnets >= 12 ? cidrsubnets(local.vpc-cidr, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4) : local.total-no-subnets >= 8 ? cidrsubnets(local.vpc-cidr, 3, 3, 3, 3, 3, 3, 3, 3) : local.total-no-subnets >= 6 ? cidrsubnets(local.vpc-cidr, 3, 3, 3, 3, 3, 3) : local.total-no-subnets >= 4 ? cidrsubnets(local.vpc-cidr, 2, 2, 2, 2) : local.total-no-subnets >= 2 ? cidrsubnets(local.vpc-cidr, 1, 1) : null + public-subnets = slice(local.simple-divide, 0, var.number-of-public-subnets-per-az * local.no-az) + private-subnets = slice(local.simple-divide, var.number-of-public-subnets-per-az * local.no-az, local.total-no-subnets) } -resource aws_subnet private-subnets { +resource "aws_subnet" "private-subnets" { count = length(local.private-subnets) # count = length(var.private-subnet-cidrs) # count = var.number-of-private-subnets-per-az * length(data.aws_availability_zones.available-az.names) - vpc_id = aws_vpc.vpc.id + vpc_id = aws_vpc.vpc.id availability_zone = element(data.aws_availability_zones.available-az.names, count.index) # cidr_block = cidrsubnet(local.subnet_start[0], 2, count.index) # cidr_block = var.private-subnet-cidrs[count.index] cidr_block = local.private-subnets[count.index] tags = merge( - var.default-tags, - { - Name = "${local.resource-prefix}-private-${split("-",element(data.aws_availability_zones.available-az.names, count.index))[2]}-${count.index+1}" - }, + var.default-tags, + { + Name = "${local.resource-prefix}-private-${split("-", element(data.aws_availability_zones.available-az.names, count.index))[2]}-${count.index + 1}" + }, ) } -resource aws_subnet public-subnets { +resource "aws_subnet" "public-subnets" { count = length(local.public-subnets) # count = length(var.public-subnet-cidrs) # count = var.number-of-public-subnets-per-az * length(data.aws_availability_zones.available-az.names) - vpc_id = aws_vpc.vpc.id + vpc_id = aws_vpc.vpc.id availability_zone = element(data.aws_availability_zones.available-az.names, count.index) # cidr_block = cidrsubnet(local.subnet_start[1], 2, count.index) # cidr_block = var.public-subnet-cidrs[count.index] cidr_block = local.public-subnets[count.index] tags = merge( - var.default-tags, - { - Name = "${local.resource-prefix}-public-${split("-",element(data.aws_availability_zones.available-az.names, count.index))[2]}-${count.index+1}" - }, + var.default-tags, + { + Name = "${local.resource-prefix}-public-${split("-", element(data.aws_availability_zones.available-az.names, count.index))[2]}-${count.index + 1}" + }, ) } @@ -55,10 +55,10 @@ resource "aws_vpc" "vpc" { enable_dns_support = true tags = merge( - var.default-tags, - { - Name = "${local.resource-prefix}-vpc" - }, + var.default-tags, + { + Name = "${local.resource-prefix}-vpc" + }, ) lifecycle { @@ -67,58 +67,58 @@ resource "aws_vpc" "vpc" { } resource "aws_internet_gateway" "igw" { - count = var.number-of-public-subnets-per-az > 0 ? 1 : 0 + count = var.number-of-public-subnets-per-az > 0 ? 1 : 0 vpc_id = aws_vpc.vpc.id tags = merge( - var.default-tags, - { - Name = "${local.resource-prefix}-igw" - }, + var.default-tags, + { + Name = "${local.resource-prefix}-igw" + }, ) } resource "aws_eip" "ngw-eip" { - count = var.create-nat-gateway ? 1 : 0 - vpc = true - tags = var.default-tags + count = var.create-nat-gateway ? 1 : 0 + vpc = true + tags = var.default-tags depends_on = [aws_internet_gateway.igw] } resource "aws_nat_gateway" "ngw" { - count = var.create-nat-gateway ? 1 : 0 + count = var.create-nat-gateway ? 1 : 0 allocation_id = aws_eip.ngw-eip[0].id subnet_id = aws_subnet.public-subnets[0].id tags = merge( - var.default-tags, - { - Name = "${local.resource-prefix}-ngw" - }, + var.default-tags, + { + Name = "${local.resource-prefix}-ngw" + }, ) depends_on = [aws_internet_gateway.igw] } -resource aws_route_table public-route-table { - count = var.number-of-public-subnets-per-az > 0 ? 1 : 0 +resource "aws_route_table" "public-route-table" { + count = var.number-of-public-subnets-per-az > 0 ? 1 : 0 vpc_id = aws_vpc.vpc.id tags = merge( - var.default-tags, - { - Name = "${local.resource-prefix}-publicroutetable" - }, + var.default-tags, + { + Name = "${local.resource-prefix}-publicroutetable" + }, ) } -resource aws_route_table private-route-table { - count = var.number-of-private-subnets-per-az > 0 ? 1 : 0 +resource "aws_route_table" "private-route-table" { + count = var.number-of-private-subnets-per-az > 0 ? 1 : 0 vpc_id = aws_vpc.vpc.id tags = merge( - var.default-tags, - { - Name = "${local.resource-prefix}-privateroutetable" - }, + var.default-tags, + { + Name = "${local.resource-prefix}-privateroutetable" + }, ) } @@ -139,13 +139,13 @@ resource "aws_route" "private-routes" { } resource "aws_route_table_association" "public_route_association" { - count = length(aws_subnet.public-subnets) + count = length(aws_subnet.public-subnets) route_table_id = aws_route_table.public-route-table[0].id - subnet_id = aws_subnet.public-subnets[count.index].id + subnet_id = aws_subnet.public-subnets[count.index].id } resource "aws_route_table_association" "private_route_association" { - count = length(aws_subnet.private-subnets) + count = length(aws_subnet.private-subnets) route_table_id = aws_route_table.private-route-table[0].id subnet_id = aws_subnet.private-subnets[count.index].id } @@ -155,38 +155,39 @@ harden default security group. the default sg created by aws allows all egress. this resource limits ingress and egress from and to itself */ -resource "aws_default_security_group" default-sg { +resource "aws_default_security_group" "default-sg" { vpc_id = aws_vpc.vpc.id ingress { - protocol = -1 - self = true - from_port = 0 - to_port = 0 + protocol = -1 + self = true + from_port = 0 + to_port = 0 description = "Allow traffic coming from this SG" } egress { - from_port = 0 - protocol = -1 - to_port = 0 - self = true + from_port = 0 + protocol = -1 + to_port = 0 + self = true description = "Allow traffic going to this SG" } tags = merge( - var.default-tags, - { - Name = "${local.resource-prefix}-defaultsg" - }, + var.default-tags, + { + Name = "${local.resource-prefix}-defaultsg" + }, ) } # Enable gateway endpoints which are free -module vpc-ep { - count = var.create-free-vpc-endpoints ? 1 : 0 +module "vpc-ep" { + count = var.create-free-vpc-endpoints ? 1 : 0 source = "../vpc-endpoints" default-tags = var.default-tags - gateway-ep-services = ["s3","dynamodb"] + gateway-ep-services = ["s3", "dynamodb"] interface-ep-services = [] resource-prefix = local.resource-prefix vpc-id = aws_vpc.vpc.id + aws-region = var.aws-region } \ No newline at end of file