diff --git a/provider/cmd/pulumi-gen-eks/docs/managedNodeGroup.md b/provider/cmd/pulumi-gen-eks/docs/managedNodeGroup.md new file mode 100644 index 000000000..ad432d707 --- /dev/null +++ b/provider/cmd/pulumi-gen-eks/docs/managedNodeGroup.md @@ -0,0 +1,1047 @@ +Manages an EKS Node Group, which can provision and optionally update an Auto Scaling Group of Kubernetes worker nodes compatible with EKS. Additional documentation about this functionality can be found in the [EKS User Guide](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html). + + +{{% examples %}} +## Example Usage +{{% example %}} +### Basic Managed Node Group +This example demonstrates creating a managed node group with typical defaults. The node group uses the latest EKS-optimized Amazon Linux AMI, creates 2 nodes, and runs on t3.medium instances. Instance security groups are automatically configured. + + +```yaml +resources: + eks-vpc: + type: awsx:ec2:Vpc + properties: + enableDnsHostnames: true + cidrBlock: 10.0.0.0/16 + eks-cluster: + type: eks:Cluster + properties: + vpcId: ${eks-vpc.vpcId} + authenticationMode: API + publicSubnetIds: ${eks-vpc.publicSubnetIds} + privateSubnetIds: ${eks-vpc.privateSubnetIds} + skipDefaultNodeGroup: true + node-role: + type: aws:iam:Role + properties: + assumeRolePolicy: + fn::toJSON: + Version: 2012-10-17 + Statement: + - Action: sts:AssumeRole + Effect: Allow + Sid: "" + Principal: + Service: ec2.amazonaws.com + worker-node-policy: + type: aws:iam:RolePolicyAttachment + properties: + role: ${node-role.name} + policyArn: "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy" + cni-policy: + type: aws:iam:RolePolicyAttachment + properties: + role: ${node-role.name} + policyArn: "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy" + registry-policy: + type: aws:iam:RolePolicyAttachment + properties: + role: ${node-role.name} + policyArn: "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + node-group: + type: eks:ManagedNodeGroup + properties: + cluster: ${eks-cluster} + nodeRole: ${node-role} + +``` + +```typescript +import * as pulumi from "@pulumi/pulumi"; +import * as aws from "@pulumi/aws"; +import * as awsx from "@pulumi/awsx"; +import * as eks from "@pulumi/eks"; + +const eksVpc = new awsx.ec2.Vpc("eks-vpc", { + enableDnsHostnames: true, + cidrBlock: "10.0.0.0/16", +}); +const eksCluster = new eks.Cluster("eks-cluster", { + vpcId: eksVpc.vpcId, + authenticationMode: eks.AuthenticationMode.Api, + publicSubnetIds: eksVpc.publicSubnetIds, + privateSubnetIds: eksVpc.privateSubnetIds, + skipDefaultNodeGroup: true, +}); +const nodeRole = new aws.iam.Role("node-role", {assumeRolePolicy: JSON.stringify({ + Version: "2012-10-17", + Statement: [{ + Action: "sts:AssumeRole", + Effect: "Allow", + Sid: "", + Principal: { + Service: "ec2.amazonaws.com", + }, + }], +})}); +const workerNodePolicy = new aws.iam.RolePolicyAttachment("worker-node-policy", { + role: nodeRole.name, + policyArn: "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", +}); +const cniPolicy = new aws.iam.RolePolicyAttachment("cni-policy", { + role: nodeRole.name, + policyArn: "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", +}); +const registryPolicy = new aws.iam.RolePolicyAttachment("registry-policy", { + role: nodeRole.name, + policyArn: "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", +}); +const nodeGroup = new eks.ManagedNodeGroup("node-group", { + cluster: eksCluster, + nodeRole: nodeRole, +}); + +``` + +```python +import pulumi +import json +import pulumi_aws as aws +import pulumi_awsx as awsx +import pulumi_eks as eks + +eks_vpc = awsx.ec2.Vpc("eks-vpc", + enable_dns_hostnames=True, + cidr_block="10.0.0.0/16") +eks_cluster = eks.Cluster("eks-cluster", + vpc_id=eks_vpc.vpc_id, + authentication_mode=eks.AuthenticationMode.API, + public_subnet_ids=eks_vpc.public_subnet_ids, + private_subnet_ids=eks_vpc.private_subnet_ids, + skip_default_node_group=True) +node_role = aws.iam.Role("node-role", assume_role_policy=json.dumps({ + "Version": "2012-10-17", + "Statement": [{ + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Sid": "", + "Principal": { + "Service": "ec2.amazonaws.com", + }, + }], +})) +worker_node_policy = aws.iam.RolePolicyAttachment("worker-node-policy", + role=node_role.name, + policy_arn="arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy") +cni_policy = aws.iam.RolePolicyAttachment("cni-policy", + role=node_role.name, + policy_arn="arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy") +registry_policy = aws.iam.RolePolicyAttachment("registry-policy", + role=node_role.name, + policy_arn="arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly") +node_group = eks.ManagedNodeGroup("node-group", + cluster=eks_cluster, + node_role=node_role) + +``` + +```go +package main + +import ( + "encoding/json" + + "github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam" + "github.com/pulumi/pulumi-awsx/sdk/v2/go/awsx/ec2" + "github.com/pulumi/pulumi-eks/sdk/v3/go/eks" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +) + +func main() { + pulumi.Run(func(ctx *pulumi.Context) error { + eksVpc, err := ec2.NewVpc(ctx, "eks-vpc", &ec2.VpcArgs{ + EnableDnsHostnames: pulumi.Bool(true), + CidrBlock: "10.0.0.0/16", + }) + if err != nil { + return err + } + eksCluster, err := eks.NewCluster(ctx, "eks-cluster", &eks.ClusterArgs{ + VpcId: eksVpc.VpcId, + AuthenticationMode: eks.AuthenticationModeApi, + PublicSubnetIds: eksVpc.PublicSubnetIds, + PrivateSubnetIds: eksVpc.PrivateSubnetIds, + SkipDefaultNodeGroup: true, + }) + if err != nil { + return err + } + tmpJSON0, err := json.Marshal(map[string]interface{}{ + "Version": "2012-10-17", + "Statement": []map[string]interface{}{ + map[string]interface{}{ + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Sid": "", + "Principal": map[string]interface{}{ + "Service": "ec2.amazonaws.com", + }, + }, + }, + }) + if err != nil { + return err + } + json0 := string(tmpJSON0) + nodeRole, err := iam.NewRole(ctx, "node-role", &iam.RoleArgs{ + AssumeRolePolicy: pulumi.String(json0), + }) + if err != nil { + return err + } + _, err = iam.NewRolePolicyAttachment(ctx, "worker-node-policy", &iam.RolePolicyAttachmentArgs{ + Role: nodeRole.Name, + PolicyArn: pulumi.String("arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"), + }) + if err != nil { + return err + } + _, err = iam.NewRolePolicyAttachment(ctx, "cni-policy", &iam.RolePolicyAttachmentArgs{ + Role: nodeRole.Name, + PolicyArn: pulumi.String("arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"), + }) + if err != nil { + return err + } + _, err = iam.NewRolePolicyAttachment(ctx, "registry-policy", &iam.RolePolicyAttachmentArgs{ + Role: nodeRole.Name, + PolicyArn: pulumi.String("arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"), + }) + if err != nil { + return err + } + _, err = eks.NewManagedNodeGroup(ctx, "node-group", &eks.ManagedNodeGroupArgs{ + Cluster: eksCluster, + NodeRole: nodeRole, + }) + if err != nil { + return err + } + return nil + }) +} + +``` + +```csharp +using System.Collections.Generic; +using System.Linq; +using System.Text.Json; +using Pulumi; +using Aws = Pulumi.Aws; +using Awsx = Pulumi.Awsx; +using Eks = Pulumi.Eks; + +return await Deployment.RunAsync(() => +{ + var eksVpc = new Awsx.Ec2.Vpc("eks-vpc", new() + { + EnableDnsHostnames = true, + CidrBlock = "10.0.0.0/16", + }); + + var eksCluster = new Eks.Cluster("eks-cluster", new() + { + VpcId = eksVpc.VpcId, + AuthenticationMode = Eks.AuthenticationMode.Api, + PublicSubnetIds = eksVpc.PublicSubnetIds, + PrivateSubnetIds = eksVpc.PrivateSubnetIds, + SkipDefaultNodeGroup = true, + }); + + var nodeRole = new Aws.Iam.Role("node-role", new() + { + AssumeRolePolicy = JsonSerializer.Serialize(new Dictionary + { + ["Version"] = "2012-10-17", + ["Statement"] = new[] + { + new Dictionary + { + ["Action"] = "sts:AssumeRole", + ["Effect"] = "Allow", + ["Sid"] = "", + ["Principal"] = new Dictionary + { + ["Service"] = "ec2.amazonaws.com", + }, + }, + }, + }), + }); + + var workerNodePolicy = new Aws.Iam.RolePolicyAttachment("worker-node-policy", new() + { + Role = nodeRole.Name, + PolicyArn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", + }); + + var cniPolicy = new Aws.Iam.RolePolicyAttachment("cni-policy", new() + { + Role = nodeRole.Name, + PolicyArn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", + }); + + var registryPolicy = new Aws.Iam.RolePolicyAttachment("registry-policy", new() + { + Role = nodeRole.Name, + PolicyArn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", + }); + + var nodeGroup = new Eks.ManagedNodeGroup("node-group", new() + { + Cluster = eksCluster, + NodeRole = nodeRole, + }); + + return new Dictionary{}; +}); + +``` + +```java +package generated_program; + +import com.pulumi.Context; +import com.pulumi.Pulumi; +import com.pulumi.core.Output; +import com.pulumi.awsx.ec2.Vpc; +import com.pulumi.awsx.ec2.VpcArgs; +import com.pulumi.eks.Cluster; +import com.pulumi.eks.ClusterArgs; +import com.pulumi.aws.iam.Role; +import com.pulumi.aws.iam.RoleArgs; +import com.pulumi.aws.iam.RolePolicyAttachment; +import com.pulumi.aws.iam.RolePolicyAttachmentArgs; +import com.pulumi.eks.ManagedNodeGroup; +import com.pulumi.eks.ManagedNodeGroupArgs; +import static com.pulumi.codegen.internal.Serialization.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.io.File; +import java.nio.file.Files; +import java.nio.file.Paths; + +public class App { + public static void main(String[] args) { + Pulumi.run(App::stack); + } + + public static void stack(Context ctx) { + var eksVpc = new Vpc("eksVpc", VpcArgs.builder() + .enableDnsHostnames(true) + .cidrBlock("10.0.0.0/16") + .build()); + + var eksCluster = new Cluster("eksCluster", ClusterArgs.builder() + .vpcId(eksVpc.vpcId()) + .authenticationMode("API") + .publicSubnetIds(eksVpc.publicSubnetIds()) + .privateSubnetIds(eksVpc.privateSubnetIds()) + .skipDefaultNodeGroup(true) + .build()); + + var nodeRole = new Role("nodeRole", RoleArgs.builder() + .assumeRolePolicy(serializeJson( + jsonObject( + jsonProperty("Version", "2012-10-17"), + jsonProperty("Statement", jsonArray(jsonObject( + jsonProperty("Action", "sts:AssumeRole"), + jsonProperty("Effect", "Allow"), + jsonProperty("Sid", ""), + jsonProperty("Principal", jsonObject( + jsonProperty("Service", "ec2.amazonaws.com") + )) + ))) + ))) + .build()); + + var workerNodePolicy = new RolePolicyAttachment("workerNodePolicy", RolePolicyAttachmentArgs.builder() + .role(nodeRole.name()) + .policyArn("arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy") + .build()); + + var cniPolicy = new RolePolicyAttachment("cniPolicy", RolePolicyAttachmentArgs.builder() + .role(nodeRole.name()) + .policyArn("arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy") + .build()); + + var registryPolicy = new RolePolicyAttachment("registryPolicy", RolePolicyAttachmentArgs.builder() + .role(nodeRole.name()) + .policyArn("arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly") + .build()); + + var nodeGroup = new ManagedNodeGroup("nodeGroup", ManagedNodeGroupArgs.builder() + .cluster(eksCluster) + .nodeRole(nodeRole) + .build()); + } +} +``` +{{% /example %}} + +{{% example %}} +### Enabling EFA Support + +Enabling EFA support for a node group will do the following: +- All EFA interfaces supported by the instance will be exposed on the launch template used by the node group +- A `clustered` placement group will be created and passed to the launch template +- Checks will be performed to ensure that the instance type supports EFA and that the specified AZ is supported by the chosen instance type + +The GPU optimized AMIs include all necessary drivers and libraries to support EFA. If you're choosing an instance type without GPU acceleration you will need to install the drivers and libraries manually and bake a custom AMI. + +You can use the [aws-efa-k8s-device-plugin](https://github.com/aws/eks-charts/tree/master/stable/aws-efa-k8s-device-plugin) Helm chart to expose the EFA interfaces on the nodes as an extended resource, and allow pods to request these interfaces to be mounted to their containers. +Your application container will need to have the necessary libraries and runtimes in order to leverage the EFA interfaces (e.g. libfabric). + +```yaml +name: eks-mng-docs +description: A Pulumi YAML program to deploy a Kubernetes cluster on AWS +runtime: yaml +resources: + eks-vpc: + type: awsx:ec2:Vpc + properties: + enableDnsHostnames: true + cidrBlock: 10.0.0.0/16 + eks-cluster: + type: eks:Cluster + properties: + vpcId: ${eks-vpc.vpcId} + authenticationMode: API + publicSubnetIds: ${eks-vpc.publicSubnetIds} + privateSubnetIds: ${eks-vpc.privateSubnetIds} + skipDefaultNodeGroup: true + k8sProvider: + type: pulumi:providers:kubernetes + properties: + kubeconfig: ${eks-cluster.kubeconfig} + node-role: + type: aws:iam:Role + properties: + assumeRolePolicy: + fn::toJSON: + Version: 2012-10-17 + Statement: + - Action: sts:AssumeRole + Effect: Allow + Sid: "" + Principal: + Service: ec2.amazonaws.com + worker-node-policy: + type: aws:iam:RolePolicyAttachment + properties: + role: ${node-role.name} + policyArn: "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy" + cni-policy: + type: aws:iam:RolePolicyAttachment + properties: + role: ${node-role.name} + policyArn: "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy" + registry-policy: + type: aws:iam:RolePolicyAttachment + properties: + role: ${node-role.name} + policyArn: "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + + # The node group for running system pods (e.g. coredns, etc.) + system-node-group: + type: eks:ManagedNodeGroup + properties: + cluster: ${eks-cluster} + nodeRole: ${node-role} + + # EFA device plugin for exposing EFA interfaces as extended resources + device-plugin: + type: kubernetes:helm.sh/v3:Release + properties: + version: "0.5.7" + repositoryOpts: + repo: "https://aws.github.io/eks-charts" + chart: "aws-efa-k8s-device-plugin" + namespace: "kube-system" + atomic: true + values: + tolerations: + - key: "efa-enabled" + operator: "Exists" + effect: "NoExecute" + options: + provider: ${k8sProvider} + + # The node group for running EFA enabled workloads + efa-node-group: + type: eks:ManagedNodeGroup + properties: + cluster: ${eks-cluster} + nodeRole: ${node-role} + instanceTypes: ["g6.8xlarge"] + gpu: true + scalingConfig: + minSize: 2 + desiredSize: 2 + maxSize: 4 + enableEfaSupport: true + placementGroupAvailabilityZone: "us-west-2b" + # Taint the nodes so that only pods with the efa-enabled label can be scheduled on them + taints: + - key: "efa-enabled" + value: "true" + effect: "NO_EXECUTE" + # Instances with GPUs usually have nvme instance store volumes, so we can mount them in RAID-0 for kubelet and containerd + # These are faster than the regular EBS volumes + nodeadmExtraOptions: + - contentType: "application/node.eks.aws" + content: | + apiVersion: node.eks.aws/v1alpha1 + kind: NodeConfig + spec: + instance: + localStorage: + strategy: RAID0 + +``` + +```typescript +import * as pulumi from "@pulumi/pulumi"; +import * as aws from "@pulumi/aws"; +import * as awsx from "@pulumi/awsx"; +import * as eks from "@pulumi/eks"; +import * as kubernetes from "@pulumi/kubernetes"; + +const eksVpc = new awsx.ec2.Vpc("eks-vpc", { + enableDnsHostnames: true, + cidrBlock: "10.0.0.0/16", +}); +const eksCluster = new eks.Cluster("eks-cluster", { + vpcId: eksVpc.vpcId, + authenticationMode: eks.AuthenticationMode.Api, + publicSubnetIds: eksVpc.publicSubnetIds, + privateSubnetIds: eksVpc.privateSubnetIds, + skipDefaultNodeGroup: true, +}); +const k8SProvider = new kubernetes.Provider("k8sProvider", {kubeconfig: eksCluster.kubeconfig}); +const nodeRole = new aws.iam.Role("node-role", {assumeRolePolicy: JSON.stringify({ + Version: "2012-10-17", + Statement: [{ + Action: "sts:AssumeRole", + Effect: "Allow", + Sid: "", + Principal: { + Service: "ec2.amazonaws.com", + }, + }], +})}); +const workerNodePolicy = new aws.iam.RolePolicyAttachment("worker-node-policy", { + role: nodeRole.name, + policyArn: "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", +}); +const cniPolicy = new aws.iam.RolePolicyAttachment("cni-policy", { + role: nodeRole.name, + policyArn: "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", +}); +const registryPolicy = new aws.iam.RolePolicyAttachment("registry-policy", { + role: nodeRole.name, + policyArn: "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", +}); + +// The node group for running system pods (e.g. coredns, etc.) +const systemNodeGroup = new eks.ManagedNodeGroup("system-node-group", { + cluster: eksCluster, + nodeRole: nodeRole, +}); + +// The EFA device plugin for exposing EFA interfaces as extended resources +const devicePlugin = new kubernetes.helm.v3.Release("device-plugin", { + version: "0.5.7", + repositoryOpts: { + repo: "https://aws.github.io/eks-charts", + }, + chart: "aws-efa-k8s-device-plugin", + namespace: "kube-system", + atomic: true, + values: { + tolerations: [{ + key: "efa-enabled", + operator: "Exists", + effect: "NoExecute", + }], + }, +}, { + provider: k8SProvider, +}); + +// The node group for running EFA enabled workloads +const efaNodeGroup = new eks.ManagedNodeGroup("efa-node-group", { + cluster: eksCluster, + nodeRole: nodeRole, + instanceTypes: ["g6.8xlarge"], + gpu: true, + scalingConfig: { + minSize: 2, + desiredSize: 2, + maxSize: 4, + }, + enableEfaSupport: true, + placementGroupAvailabilityZone: "us-west-2b", + + // Taint the nodes so that only pods with the efa-enabled label can be scheduled on them + taints: [{ + key: "efa-enabled", + value: "true", + effect: "NO_EXECUTE", + }], + + // Instances with GPUs usually have nvme instance store volumes, so we can mount them in RAID-0 for kubelet and containerd + // These are faster than the regular EBS volumes + nodeadmExtraOptions: [{ + contentType: "application/node.eks.aws", + content: `apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + instance: + localStorage: + strategy: RAID0 +`, + }], +}); + +``` + +```python +import pulumi +import json +import pulumi_aws as aws +import pulumi_awsx as awsx +import pulumi_eks as eks +import pulumi_kubernetes as kubernetes + +eks_vpc = awsx.ec2.Vpc("eks-vpc", + enable_dns_hostnames=True, + cidr_block="10.0.0.0/16") +eks_cluster = eks.Cluster("eks-cluster", + vpc_id=eks_vpc.vpc_id, + authentication_mode=eks.AuthenticationMode.API, + public_subnet_ids=eks_vpc.public_subnet_ids, + private_subnet_ids=eks_vpc.private_subnet_ids, + skip_default_node_group=True) +k8_s_provider = kubernetes.Provider("k8sProvider", kubeconfig=eks_cluster.kubeconfig) +node_role = aws.iam.Role("node-role", assume_role_policy=json.dumps({ + "Version": "2012-10-17", + "Statement": [{ + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Sid": "", + "Principal": { + "Service": "ec2.amazonaws.com", + }, + }], +})) +worker_node_policy = aws.iam.RolePolicyAttachment("worker-node-policy", + role=node_role.name, + policy_arn="arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy") +cni_policy = aws.iam.RolePolicyAttachment("cni-policy", + role=node_role.name, + policy_arn="arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy") +registry_policy = aws.iam.RolePolicyAttachment("registry-policy", + role=node_role.name, + policy_arn="arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly") + +# The node group for running system pods (e.g. coredns, etc.) +system_node_group = eks.ManagedNodeGroup("system-node-group", + cluster=eks_cluster, + node_role=node_role) + +# The EFA device plugin for exposing EFA interfaces as extended resources +device_plugin = kubernetes.helm.v3.Release("device-plugin", + version="0.5.7", + repository_opts={ + "repo": "https://aws.github.io/eks-charts", + }, + chart="aws-efa-k8s-device-plugin", + namespace="kube-system", + atomic=True, + values={ + "tolerations": [{ + "key": "efa-enabled", + "operator": "Exists", + "effect": "NoExecute", + }], + }, + opts = pulumi.ResourceOptions(provider=k8_s_provider)) + +# The node group for running EFA enabled workloads +efa_node_group = eks.ManagedNodeGroup("efa-node-group", + cluster=eks_cluster, + node_role=node_role, + instance_types=["g6.8xlarge"], + gpu=True, + scaling_config={ + "min_size": 2, + "desired_size": 2, + "max_size": 4, + }, + enable_efa_support=True, + placement_group_availability_zone="us-west-2b", + + # Taint the nodes so that only pods with the efa-enabled label can be scheduled on them + taints=[{ + "key": "efa-enabled", + "value": "true", + "effect": "NO_EXECUTE", + }], + + # Instances with GPUs usually have nvme instance store volumes, so we can mount them in RAID-0 for kubelet and containerd + # These are faster than the regular EBS volumes + nodeadm_extra_options=[{ + "content_type": "application/node.eks.aws", + "content": """apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + instance: + localStorage: + strategy: RAID0 +""", + }]) + +``` + +```go +package main + +import ( + "encoding/json" + + awseks "github.com/pulumi/pulumi-aws/sdk/v6/go/aws/eks" + "github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam" + "github.com/pulumi/pulumi-awsx/sdk/v2/go/awsx/ec2" + "github.com/pulumi/pulumi-eks/sdk/v3/go/eks" + "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes" + helmv3 "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/helm/v3" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +) + +func main() { + pulumi.Run(func(ctx *pulumi.Context) error { + eksVpc, err := ec2.NewVpc(ctx, "eks-vpc", &ec2.VpcArgs{ + EnableDnsHostnames: pulumi.Bool(true), + CidrBlock: "10.0.0.0/16", + }) + if err != nil { + return err + } + eksCluster, err := eks.NewCluster(ctx, "eks-cluster", &eks.ClusterArgs{ + VpcId: eksVpc.VpcId, + AuthenticationMode: eks.AuthenticationModeApi, + PublicSubnetIds: eksVpc.PublicSubnetIds, + PrivateSubnetIds: eksVpc.PrivateSubnetIds, + SkipDefaultNodeGroup: true, + }) + if err != nil { + return err + } + k8SProvider, err := kubernetes.NewProvider(ctx, "k8sProvider", &kubernetes.ProviderArgs{ + Kubeconfig: eksCluster.Kubeconfig, + }) + if err != nil { + return err + } + tmpJSON0, err := json.Marshal(map[string]interface{}{ + "Version": "2012-10-17", + "Statement": []map[string]interface{}{ + map[string]interface{}{ + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Sid": "", + "Principal": map[string]interface{}{ + "Service": "ec2.amazonaws.com", + }, + }, + }, + }) + if err != nil { + return err + } + json0 := string(tmpJSON0) + nodeRole, err := iam.NewRole(ctx, "node-role", &iam.RoleArgs{ + AssumeRolePolicy: pulumi.String(json0), + }) + if err != nil { + return err + } + _, err = iam.NewRolePolicyAttachment(ctx, "worker-node-policy", &iam.RolePolicyAttachmentArgs{ + Role: nodeRole.Name, + PolicyArn: pulumi.String("arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"), + }) + if err != nil { + return err + } + _, err = iam.NewRolePolicyAttachment(ctx, "cni-policy", &iam.RolePolicyAttachmentArgs{ + Role: nodeRole.Name, + PolicyArn: pulumi.String("arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"), + }) + if err != nil { + return err + } + _, err = iam.NewRolePolicyAttachment(ctx, "registry-policy", &iam.RolePolicyAttachmentArgs{ + Role: nodeRole.Name, + PolicyArn: pulumi.String("arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"), + }) + if err != nil { + return err + } + + // The node group for running system pods (e.g. coredns, etc.) + _, err = eks.NewManagedNodeGroup(ctx, "system-node-group", &eks.ManagedNodeGroupArgs{ + Cluster: eksCluster, + NodeRole: nodeRole, + }) + if err != nil { + return err + } + + // The EFA device plugin for exposing EFA interfaces as extended resources + _, err = helmv3.NewRelease(ctx, "device-plugin", &helmv3.ReleaseArgs{ + Version: pulumi.String("0.5.7"), + RepositoryOpts: &helmv3.RepositoryOptsArgs{ + Repo: pulumi.String("https://aws.github.io/eks-charts"), + }, + Chart: pulumi.String("aws-efa-k8s-device-plugin"), + Namespace: pulumi.String("kube-system"), + Atomic: pulumi.Bool(true), + Values: pulumi.Map{ + "tolerations": pulumi.Any{ + []map[string]interface{}{ + { + "key": "efa-enabled", + "operator": "Exists", + "effect": "NoExecute", + } + }, + }, + }, + }, pulumi.Provider(k8SProvider)) + if err != nil { + return err + } + + // The node group for running EFA enabled workloads + _, err = eks.NewManagedNodeGroup(ctx, "efa-node-group", &eks.ManagedNodeGroupArgs{ + Cluster: eksCluster, + NodeRole: nodeRole, + InstanceTypes: pulumi.StringArray{ + pulumi.String("g6.8xlarge"), + }, + Gpu: pulumi.Bool(true), + ScalingConfig: &eks.NodeGroupScalingConfigArgs{ + MinSize: pulumi.Int(2), + DesiredSize: pulumi.Int(2), + MaxSize: pulumi.Int(4), + }, + EnableEfaSupport: true, + PlacementGroupAvailabilityZone: pulumi.String("us-west-2b"), + + // Taint the nodes so that only pods with the efa-enabled label can be scheduled on them + Taints: eks.NodeGroupTaintArray{ + &eks.NodeGroupTaintArgs{ + Key: pulumi.String("efa-enabled"), + Value: pulumi.String("true"), + Effect: pulumi.String("NO_EXECUTE"), + }, + }, + + // Instances with GPUs usually have nvme instance store volumes, so we can mount them in RAID-0 for kubelet and containerd + // These are faster than the regular EBS volumes + NodeadmExtraOptions: eks.NodeadmOptionsArray{ + &eks.NodeadmOptionsArgs{ + ContentType: pulumi.String("application/node.eks.aws"), + Content: pulumi.String(`apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + instance: + localStorage: + strategy: RAID0 +`), + }, + }, + }) + if err != nil { + return err + } + return nil + }) +} + +``` + +```csharp +using System.Collections.Generic; +using System.Linq; +using System.Text.Json; +using Pulumi; +using Aws = Pulumi.Aws; +using Awsx = Pulumi.Awsx; +using Eks = Pulumi.Eks; +using Kubernetes = Pulumi.Kubernetes; + +return await Deployment.RunAsync(() => +{ + var eksVpc = new Awsx.Ec2.Vpc("eks-vpc", new() + { + EnableDnsHostnames = true, + CidrBlock = "10.0.0.0/16", + }); + + var eksCluster = new Eks.Cluster("eks-cluster", new() + { + VpcId = eksVpc.VpcId, + AuthenticationMode = Eks.AuthenticationMode.Api, + PublicSubnetIds = eksVpc.PublicSubnetIds, + PrivateSubnetIds = eksVpc.PrivateSubnetIds, + SkipDefaultNodeGroup = true, + }); + + var k8SProvider = new Kubernetes.Provider.Provider("k8sProvider", new() + { + KubeConfig = eksCluster.Kubeconfig, + }); + + var nodeRole = new Aws.Iam.Role("node-role", new() + { + AssumeRolePolicy = JsonSerializer.Serialize(new Dictionary + { + ["Version"] = "2012-10-17", + ["Statement"] = new[] + { + new Dictionary + { + ["Action"] = "sts:AssumeRole", + ["Effect"] = "Allow", + ["Sid"] = "", + ["Principal"] = new Dictionary + { + ["Service"] = "ec2.amazonaws.com", + }, + }, + }, + }), + }); + + var workerNodePolicy = new Aws.Iam.RolePolicyAttachment("worker-node-policy", new() + { + Role = nodeRole.Name, + PolicyArn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", + }); + + var cniPolicy = new Aws.Iam.RolePolicyAttachment("cni-policy", new() + { + Role = nodeRole.Name, + PolicyArn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", + }); + + var registryPolicy = new Aws.Iam.RolePolicyAttachment("registry-policy", new() + { + Role = nodeRole.Name, + PolicyArn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", + }); + + // The node group for running system pods (e.g. coredns, etc.) + var systemNodeGroup = new Eks.ManagedNodeGroup("system-node-group", new() + { + Cluster = eksCluster, + NodeRole = nodeRole, + }); + + // The EFA device plugin for exposing EFA interfaces as extended resources + var devicePlugin = new Kubernetes.Helm.V3.Release("device-plugin", new() + { + Version = "0.5.7", + RepositoryOpts = new Kubernetes.Types.Inputs.Helm.V3.RepositoryOptsArgs + { + Repo = "https://aws.github.io/eks-charts", + }, + Chart = "aws-efa-k8s-device-plugin", + Namespace = "kube-system", + Atomic = true, + Values = + { + { "tolerations", new[] + { + + { + { "key", "efa-enabled" }, + { "operator", "Exists" }, + { "effect", "NoExecute" }, + }, + } }, + }, + }, new CustomResourceOptions + { + Provider = k8SProvider, + }); + + // The node group for running EFA enabled workloads + var efaNodeGroup = new Eks.ManagedNodeGroup("efa-node-group", new() + { + Cluster = eksCluster, + NodeRole = nodeRole, + InstanceTypes = new[] + { + "g6.8xlarge", + }, + Gpu = true, + ScalingConfig = new Aws.Eks.Inputs.NodeGroupScalingConfigArgs + { + MinSize = 2, + DesiredSize = 2, + MaxSize = 4, + }, + EnableEfaSupport = true, + PlacementGroupAvailabilityZone = "us-west-2b", + + // Taint the nodes so that only pods with the efa-enabled label can be scheduled on them + Taints = new[] + { + new Aws.Eks.Inputs.NodeGroupTaintArgs + { + Key = "efa-enabled", + Value = "true", + Effect = "NO_EXECUTE", + }, + }, + + // Instances with GPUs usually have nvme instance store volumes, so we can mount them in RAID-0 for kubelet and containerd + NodeadmExtraOptions = new[] + { + new Eks.Inputs.NodeadmOptionsArgs + { + ContentType = "application/node.eks.aws", + Content = @"apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + instance: + localStorage: + strategy: RAID0 +", + }, + }, + }); + +}); + +``` + +{{% /example %}} +{{% /examples %}} diff --git a/provider/cmd/pulumi-gen-eks/main.go b/provider/cmd/pulumi-gen-eks/main.go index e28bfc2c8..bf790313d 100644 --- a/provider/cmd/pulumi-gen-eks/main.go +++ b/provider/cmd/pulumi-gen-eks/main.go @@ -15,6 +15,8 @@ package main import ( + // used for embedding docs + _ "embed" "encoding/json" "fmt" "log" @@ -122,6 +124,9 @@ func k8sRef(ref string, k8sVersion string) string { return fmt.Sprintf("/kubernetes/v%s/schema.json%s", k8sVersion, ref) } +//go:embed docs/managedNodeGroup.md +var managedNodeGroupDocs string + //nolint:lll,goconst func generateSchema(version semver.Version, outdir string) schema.PackageSpec { dependencies := readPackageDependencies(path.Join(outdir, "..", "..", "..", "nodejs", "eks")) @@ -821,9 +826,7 @@ func generateSchema(version semver.Version, outdir string) schema.PackageSpec { "eks:index:ManagedNodeGroup": { IsComponent: true, ObjectTypeSpec: schema.ObjectTypeSpec{ - Description: "ManagedNodeGroup is a component that wraps creating an AWS managed node group.\n\n" + - "See for more details:\n" + - "https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html", + Description: managedNodeGroupDocs, Properties: map[string]schema.PropertySpec{ "nodeGroup": { TypeSpec: schema.TypeSpec{Ref: awsRef("#/resources/aws:eks%2FnodeGroup:NodeGroup", dependencies.Aws)}, diff --git a/provider/cmd/pulumi-resource-eks/schema.json b/provider/cmd/pulumi-resource-eks/schema.json index be3be87d2..65434ea5f 100644 --- a/provider/cmd/pulumi-resource-eks/schema.json +++ b/provider/cmd/pulumi-resource-eks/schema.json @@ -1581,7 +1581,7 @@ "isComponent": true }, "eks:index:ManagedNodeGroup": { - "description": "ManagedNodeGroup is a component that wraps creating an AWS managed node group.\n\nSee for more details:\nhttps://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html", + "description": "Manages an EKS Node Group, which can provision and optionally update an Auto Scaling Group of Kubernetes worker nodes compatible with EKS. Additional documentation about this functionality can be found in the [EKS User Guide](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html).\n\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n### Basic Managed Node Group\nThis example demonstrates creating a managed node group with typical defaults. The node group uses the latest EKS-optimized Amazon Linux AMI, creates 2 nodes, and runs on t3.medium instances. Instance security groups are automatically configured.\n\n\n```yaml\nresources:\n eks-vpc:\n type: awsx:ec2:Vpc\n properties:\n enableDnsHostnames: true\n cidrBlock: 10.0.0.0/16\n eks-cluster:\n type: eks:Cluster\n properties:\n vpcId: ${eks-vpc.vpcId}\n authenticationMode: API\n publicSubnetIds: ${eks-vpc.publicSubnetIds}\n privateSubnetIds: ${eks-vpc.privateSubnetIds}\n skipDefaultNodeGroup: true\n node-role:\n type: aws:iam:Role\n properties:\n assumeRolePolicy:\n fn::toJSON:\n Version: 2012-10-17\n Statement:\n - Action: sts:AssumeRole\n Effect: Allow\n Sid: \"\"\n Principal:\n Service: ec2.amazonaws.com\n worker-node-policy:\n type: aws:iam:RolePolicyAttachment\n properties:\n role: ${node-role.name}\n policyArn: \"arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy\"\n cni-policy:\n type: aws:iam:RolePolicyAttachment\n properties:\n role: ${node-role.name}\n policyArn: \"arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy\"\n registry-policy:\n type: aws:iam:RolePolicyAttachment\n properties:\n role: ${node-role.name}\n policyArn: \"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly\"\n node-group:\n type: eks:ManagedNodeGroup\n properties:\n cluster: ${eks-cluster}\n nodeRole: ${node-role}\n\n```\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws from \"@pulumi/aws\";\nimport * as awsx from \"@pulumi/awsx\";\nimport * as eks from \"@pulumi/eks\";\n\nconst eksVpc = new awsx.ec2.Vpc(\"eks-vpc\", {\n enableDnsHostnames: true,\n cidrBlock: \"10.0.0.0/16\",\n});\nconst eksCluster = new eks.Cluster(\"eks-cluster\", {\n vpcId: eksVpc.vpcId,\n authenticationMode: eks.AuthenticationMode.Api,\n publicSubnetIds: eksVpc.publicSubnetIds,\n privateSubnetIds: eksVpc.privateSubnetIds,\n skipDefaultNodeGroup: true,\n});\nconst nodeRole = new aws.iam.Role(\"node-role\", {assumeRolePolicy: JSON.stringify({\n Version: \"2012-10-17\",\n Statement: [{\n Action: \"sts:AssumeRole\",\n Effect: \"Allow\",\n Sid: \"\",\n Principal: {\n Service: \"ec2.amazonaws.com\",\n },\n }],\n})});\nconst workerNodePolicy = new aws.iam.RolePolicyAttachment(\"worker-node-policy\", {\n role: nodeRole.name,\n policyArn: \"arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy\",\n});\nconst cniPolicy = new aws.iam.RolePolicyAttachment(\"cni-policy\", {\n role: nodeRole.name,\n policyArn: \"arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy\",\n});\nconst registryPolicy = new aws.iam.RolePolicyAttachment(\"registry-policy\", {\n role: nodeRole.name,\n policyArn: \"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly\",\n});\nconst nodeGroup = new eks.ManagedNodeGroup(\"node-group\", {\n cluster: eksCluster,\n nodeRole: nodeRole,\n});\n\n```\n\n```python\nimport pulumi\nimport json\nimport pulumi_aws as aws\nimport pulumi_awsx as awsx\nimport pulumi_eks as eks\n\neks_vpc = awsx.ec2.Vpc(\"eks-vpc\",\n enable_dns_hostnames=True,\n cidr_block=\"10.0.0.0/16\")\neks_cluster = eks.Cluster(\"eks-cluster\",\n vpc_id=eks_vpc.vpc_id,\n authentication_mode=eks.AuthenticationMode.API,\n public_subnet_ids=eks_vpc.public_subnet_ids,\n private_subnet_ids=eks_vpc.private_subnet_ids,\n skip_default_node_group=True)\nnode_role = aws.iam.Role(\"node-role\", assume_role_policy=json.dumps({\n \"Version\": \"2012-10-17\",\n \"Statement\": [{\n \"Action\": \"sts:AssumeRole\",\n \"Effect\": \"Allow\",\n \"Sid\": \"\",\n \"Principal\": {\n \"Service\": \"ec2.amazonaws.com\",\n },\n }],\n}))\nworker_node_policy = aws.iam.RolePolicyAttachment(\"worker-node-policy\",\n role=node_role.name,\n policy_arn=\"arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy\")\ncni_policy = aws.iam.RolePolicyAttachment(\"cni-policy\",\n role=node_role.name,\n policy_arn=\"arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy\")\nregistry_policy = aws.iam.RolePolicyAttachment(\"registry-policy\",\n role=node_role.name,\n policy_arn=\"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly\")\nnode_group = eks.ManagedNodeGroup(\"node-group\",\n cluster=eks_cluster,\n node_role=node_role)\n\n```\n\n```go\npackage main\n\nimport (\n\t\"encoding/json\"\n\n\t\"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam\"\n\t\"github.com/pulumi/pulumi-awsx/sdk/v2/go/awsx/ec2\"\n\t\"github.com/pulumi/pulumi-eks/sdk/v3/go/eks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\teksVpc, err := ec2.NewVpc(ctx, \"eks-vpc\", \u0026ec2.VpcArgs{\n\t\t\tEnableDnsHostnames: pulumi.Bool(true),\n\t\t\tCidrBlock: \"10.0.0.0/16\",\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\teksCluster, err := eks.NewCluster(ctx, \"eks-cluster\", \u0026eks.ClusterArgs{\n\t\t\tVpcId: eksVpc.VpcId,\n\t\t\tAuthenticationMode: eks.AuthenticationModeApi,\n\t\t\tPublicSubnetIds: eksVpc.PublicSubnetIds,\n\t\t\tPrivateSubnetIds: eksVpc.PrivateSubnetIds,\n\t\t\tSkipDefaultNodeGroup: true,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttmpJSON0, err := json.Marshal(map[string]interface{}{\n\t\t\t\"Version\": \"2012-10-17\",\n\t\t\t\"Statement\": []map[string]interface{}{\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"Action\": \"sts:AssumeRole\",\n\t\t\t\t\t\"Effect\": \"Allow\",\n\t\t\t\t\t\"Sid\": \"\",\n\t\t\t\t\t\"Principal\": map[string]interface{}{\n\t\t\t\t\t\t\"Service\": \"ec2.amazonaws.com\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tjson0 := string(tmpJSON0)\n\t\tnodeRole, err := iam.NewRole(ctx, \"node-role\", \u0026iam.RoleArgs{\n\t\t\tAssumeRolePolicy: pulumi.String(json0),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = iam.NewRolePolicyAttachment(ctx, \"worker-node-policy\", \u0026iam.RolePolicyAttachmentArgs{\n\t\t\tRole: nodeRole.Name,\n\t\t\tPolicyArn: pulumi.String(\"arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = iam.NewRolePolicyAttachment(ctx, \"cni-policy\", \u0026iam.RolePolicyAttachmentArgs{\n\t\t\tRole: nodeRole.Name,\n\t\t\tPolicyArn: pulumi.String(\"arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = iam.NewRolePolicyAttachment(ctx, \"registry-policy\", \u0026iam.RolePolicyAttachmentArgs{\n\t\t\tRole: nodeRole.Name,\n\t\t\tPolicyArn: pulumi.String(\"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = eks.NewManagedNodeGroup(ctx, \"node-group\", \u0026eks.ManagedNodeGroupArgs{\n\t\t\tCluster: eksCluster,\n\t\t\tNodeRole: nodeRole,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n\n```\n\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text.Json;\nusing Pulumi;\nusing Aws = Pulumi.Aws;\nusing Awsx = Pulumi.Awsx;\nusing Eks = Pulumi.Eks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var eksVpc = new Awsx.Ec2.Vpc(\"eks-vpc\", new()\n {\n EnableDnsHostnames = true,\n CidrBlock = \"10.0.0.0/16\",\n });\n\n var eksCluster = new Eks.Cluster(\"eks-cluster\", new()\n {\n VpcId = eksVpc.VpcId,\n AuthenticationMode = Eks.AuthenticationMode.Api,\n PublicSubnetIds = eksVpc.PublicSubnetIds,\n PrivateSubnetIds = eksVpc.PrivateSubnetIds,\n SkipDefaultNodeGroup = true,\n });\n\n var nodeRole = new Aws.Iam.Role(\"node-role\", new()\n {\n AssumeRolePolicy = JsonSerializer.Serialize(new Dictionary\u003cstring, object?\u003e\n {\n [\"Version\"] = \"2012-10-17\",\n [\"Statement\"] = new[]\n {\n new Dictionary\u003cstring, object?\u003e\n {\n [\"Action\"] = \"sts:AssumeRole\",\n [\"Effect\"] = \"Allow\",\n [\"Sid\"] = \"\",\n [\"Principal\"] = new Dictionary\u003cstring, object?\u003e\n {\n [\"Service\"] = \"ec2.amazonaws.com\",\n },\n },\n },\n }),\n });\n\n var workerNodePolicy = new Aws.Iam.RolePolicyAttachment(\"worker-node-policy\", new()\n {\n Role = nodeRole.Name,\n PolicyArn = \"arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy\",\n });\n\n var cniPolicy = new Aws.Iam.RolePolicyAttachment(\"cni-policy\", new()\n {\n Role = nodeRole.Name,\n PolicyArn = \"arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy\",\n });\n\n var registryPolicy = new Aws.Iam.RolePolicyAttachment(\"registry-policy\", new()\n {\n Role = nodeRole.Name,\n PolicyArn = \"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly\",\n });\n\n var nodeGroup = new Eks.ManagedNodeGroup(\"node-group\", new()\n {\n Cluster = eksCluster,\n NodeRole = nodeRole,\n });\n\n return new Dictionary\u003cstring, object?\u003e{};\n});\n\n```\n\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.awsx.ec2.Vpc;\nimport com.pulumi.awsx.ec2.VpcArgs;\nimport com.pulumi.eks.Cluster;\nimport com.pulumi.eks.ClusterArgs;\nimport com.pulumi.aws.iam.Role;\nimport com.pulumi.aws.iam.RoleArgs;\nimport com.pulumi.aws.iam.RolePolicyAttachment;\nimport com.pulumi.aws.iam.RolePolicyAttachmentArgs;\nimport com.pulumi.eks.ManagedNodeGroup;\nimport com.pulumi.eks.ManagedNodeGroupArgs;\nimport static com.pulumi.codegen.internal.Serialization.*;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var eksVpc = new Vpc(\"eksVpc\", VpcArgs.builder()\n .enableDnsHostnames(true)\n .cidrBlock(\"10.0.0.0/16\")\n .build());\n\n var eksCluster = new Cluster(\"eksCluster\", ClusterArgs.builder()\n .vpcId(eksVpc.vpcId())\n .authenticationMode(\"API\")\n .publicSubnetIds(eksVpc.publicSubnetIds())\n .privateSubnetIds(eksVpc.privateSubnetIds())\n .skipDefaultNodeGroup(true)\n .build());\n\n var nodeRole = new Role(\"nodeRole\", RoleArgs.builder()\n .assumeRolePolicy(serializeJson(\n jsonObject(\n jsonProperty(\"Version\", \"2012-10-17\"),\n jsonProperty(\"Statement\", jsonArray(jsonObject(\n jsonProperty(\"Action\", \"sts:AssumeRole\"),\n jsonProperty(\"Effect\", \"Allow\"),\n jsonProperty(\"Sid\", \"\"),\n jsonProperty(\"Principal\", jsonObject(\n jsonProperty(\"Service\", \"ec2.amazonaws.com\")\n ))\n )))\n )))\n .build());\n\n var workerNodePolicy = new RolePolicyAttachment(\"workerNodePolicy\", RolePolicyAttachmentArgs.builder()\n .role(nodeRole.name())\n .policyArn(\"arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy\")\n .build());\n\n var cniPolicy = new RolePolicyAttachment(\"cniPolicy\", RolePolicyAttachmentArgs.builder()\n .role(nodeRole.name())\n .policyArn(\"arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy\")\n .build());\n\n var registryPolicy = new RolePolicyAttachment(\"registryPolicy\", RolePolicyAttachmentArgs.builder()\n .role(nodeRole.name())\n .policyArn(\"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly\")\n .build());\n\n var nodeGroup = new ManagedNodeGroup(\"nodeGroup\", ManagedNodeGroupArgs.builder()\n .cluster(eksCluster)\n .nodeRole(nodeRole)\n .build());\n }\n}\n```\n{{% /example %}}\n\n{{% example %}}\n### Enabling EFA Support\n\nEnabling EFA support for a node group will do the following:\n- All EFA interfaces supported by the instance will be exposed on the launch template used by the node group\n- A `clustered` placement group will be created and passed to the launch template\n- Checks will be performed to ensure that the instance type supports EFA and that the specified AZ is supported by the chosen instance type\n\nThe GPU optimized AMIs include all necessary drivers and libraries to support EFA. If you're choosing an instance type without GPU acceleration you will need to install the drivers and libraries manually and bake a custom AMI.\n\nYou can use the [aws-efa-k8s-device-plugin](https://github.com/aws/eks-charts/tree/master/stable/aws-efa-k8s-device-plugin) Helm chart to expose the EFA interfaces on the nodes as an extended resource, and allow pods to request these interfaces to be mounted to their containers.\nYour application container will need to have the necessary libraries and runtimes in order to leverage the EFA interfaces (e.g. libfabric).\n\n```yaml\nname: eks-mng-docs\ndescription: A Pulumi YAML program to deploy a Kubernetes cluster on AWS\nruntime: yaml\nresources:\n eks-vpc:\n type: awsx:ec2:Vpc\n properties:\n enableDnsHostnames: true\n cidrBlock: 10.0.0.0/16\n eks-cluster:\n type: eks:Cluster\n properties:\n vpcId: ${eks-vpc.vpcId}\n authenticationMode: API\n publicSubnetIds: ${eks-vpc.publicSubnetIds}\n privateSubnetIds: ${eks-vpc.privateSubnetIds}\n skipDefaultNodeGroup: true\n k8sProvider:\n type: pulumi:providers:kubernetes\n properties:\n kubeconfig: ${eks-cluster.kubeconfig}\n node-role:\n type: aws:iam:Role\n properties:\n assumeRolePolicy:\n fn::toJSON:\n Version: 2012-10-17\n Statement:\n - Action: sts:AssumeRole\n Effect: Allow\n Sid: \"\"\n Principal:\n Service: ec2.amazonaws.com\n worker-node-policy:\n type: aws:iam:RolePolicyAttachment\n properties:\n role: ${node-role.name}\n policyArn: \"arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy\"\n cni-policy:\n type: aws:iam:RolePolicyAttachment\n properties:\n role: ${node-role.name}\n policyArn: \"arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy\"\n registry-policy:\n type: aws:iam:RolePolicyAttachment\n properties:\n role: ${node-role.name}\n policyArn: \"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly\"\n \n # The node group for running system pods (e.g. coredns, etc.)\n system-node-group:\n type: eks:ManagedNodeGroup\n properties:\n cluster: ${eks-cluster}\n nodeRole: ${node-role}\n\n # EFA device plugin for exposing EFA interfaces as extended resources\n device-plugin:\n type: kubernetes:helm.sh/v3:Release\n properties:\n version: \"0.5.7\"\n repositoryOpts:\n repo: \"https://aws.github.io/eks-charts\"\n chart: \"aws-efa-k8s-device-plugin\"\n namespace: \"kube-system\"\n atomic: true\n values:\n tolerations:\n - key: \"efa-enabled\"\n operator: \"Exists\"\n effect: \"NoExecute\"\n options:\n provider: ${k8sProvider}\n\n # The node group for running EFA enabled workloads\n efa-node-group:\n type: eks:ManagedNodeGroup\n properties:\n cluster: ${eks-cluster}\n nodeRole: ${node-role}\n instanceTypes: [\"g6.8xlarge\"]\n gpu: true\n scalingConfig:\n minSize: 2\n desiredSize: 2\n maxSize: 4\n enableEfaSupport: true\n placementGroupAvailabilityZone: \"us-west-2b\"\n # Taint the nodes so that only pods with the efa-enabled label can be scheduled on them\n taints:\n - key: \"efa-enabled\"\n value: \"true\"\n effect: \"NO_EXECUTE\"\n # Instances with GPUs usually have nvme instance store volumes, so we can mount them in RAID-0 for kubelet and containerd\n # These are faster than the regular EBS volumes\n nodeadmExtraOptions:\n - contentType: \"application/node.eks.aws\"\n content: |\n apiVersion: node.eks.aws/v1alpha1\n kind: NodeConfig\n spec:\n instance:\n localStorage:\n strategy: RAID0\n\n```\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws from \"@pulumi/aws\";\nimport * as awsx from \"@pulumi/awsx\";\nimport * as eks from \"@pulumi/eks\";\nimport * as kubernetes from \"@pulumi/kubernetes\";\n\nconst eksVpc = new awsx.ec2.Vpc(\"eks-vpc\", {\n enableDnsHostnames: true,\n cidrBlock: \"10.0.0.0/16\",\n});\nconst eksCluster = new eks.Cluster(\"eks-cluster\", {\n vpcId: eksVpc.vpcId,\n authenticationMode: eks.AuthenticationMode.Api,\n publicSubnetIds: eksVpc.publicSubnetIds,\n privateSubnetIds: eksVpc.privateSubnetIds,\n skipDefaultNodeGroup: true,\n});\nconst k8SProvider = new kubernetes.Provider(\"k8sProvider\", {kubeconfig: eksCluster.kubeconfig});\nconst nodeRole = new aws.iam.Role(\"node-role\", {assumeRolePolicy: JSON.stringify({\n Version: \"2012-10-17\",\n Statement: [{\n Action: \"sts:AssumeRole\",\n Effect: \"Allow\",\n Sid: \"\",\n Principal: {\n Service: \"ec2.amazonaws.com\",\n },\n }],\n})});\nconst workerNodePolicy = new aws.iam.RolePolicyAttachment(\"worker-node-policy\", {\n role: nodeRole.name,\n policyArn: \"arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy\",\n});\nconst cniPolicy = new aws.iam.RolePolicyAttachment(\"cni-policy\", {\n role: nodeRole.name,\n policyArn: \"arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy\",\n});\nconst registryPolicy = new aws.iam.RolePolicyAttachment(\"registry-policy\", {\n role: nodeRole.name,\n policyArn: \"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly\",\n});\n\n// The node group for running system pods (e.g. coredns, etc.)\nconst systemNodeGroup = new eks.ManagedNodeGroup(\"system-node-group\", {\n cluster: eksCluster,\n nodeRole: nodeRole,\n});\n\n// The EFA device plugin for exposing EFA interfaces as extended resources\nconst devicePlugin = new kubernetes.helm.v3.Release(\"device-plugin\", {\n version: \"0.5.7\",\n repositoryOpts: {\n repo: \"https://aws.github.io/eks-charts\",\n },\n chart: \"aws-efa-k8s-device-plugin\",\n namespace: \"kube-system\",\n atomic: true,\n values: {\n tolerations: [{\n key: \"efa-enabled\",\n operator: \"Exists\",\n effect: \"NoExecute\",\n }],\n },\n}, {\n provider: k8SProvider,\n});\n\n// The node group for running EFA enabled workloads\nconst efaNodeGroup = new eks.ManagedNodeGroup(\"efa-node-group\", {\n cluster: eksCluster,\n nodeRole: nodeRole,\n instanceTypes: [\"g6.8xlarge\"],\n gpu: true,\n scalingConfig: {\n minSize: 2,\n desiredSize: 2,\n maxSize: 4,\n },\n enableEfaSupport: true,\n placementGroupAvailabilityZone: \"us-west-2b\",\n\n // Taint the nodes so that only pods with the efa-enabled label can be scheduled on them\n taints: [{\n key: \"efa-enabled\",\n value: \"true\",\n effect: \"NO_EXECUTE\",\n }],\n\n // Instances with GPUs usually have nvme instance store volumes, so we can mount them in RAID-0 for kubelet and containerd\n // These are faster than the regular EBS volumes\n nodeadmExtraOptions: [{\n contentType: \"application/node.eks.aws\",\n content: `apiVersion: node.eks.aws/v1alpha1\nkind: NodeConfig\nspec:\n instance:\n localStorage:\n strategy: RAID0\n`,\n }],\n});\n\n```\n\n```python\nimport pulumi\nimport json\nimport pulumi_aws as aws\nimport pulumi_awsx as awsx\nimport pulumi_eks as eks\nimport pulumi_kubernetes as kubernetes\n\neks_vpc = awsx.ec2.Vpc(\"eks-vpc\",\n enable_dns_hostnames=True,\n cidr_block=\"10.0.0.0/16\")\neks_cluster = eks.Cluster(\"eks-cluster\",\n vpc_id=eks_vpc.vpc_id,\n authentication_mode=eks.AuthenticationMode.API,\n public_subnet_ids=eks_vpc.public_subnet_ids,\n private_subnet_ids=eks_vpc.private_subnet_ids,\n skip_default_node_group=True)\nk8_s_provider = kubernetes.Provider(\"k8sProvider\", kubeconfig=eks_cluster.kubeconfig)\nnode_role = aws.iam.Role(\"node-role\", assume_role_policy=json.dumps({\n \"Version\": \"2012-10-17\",\n \"Statement\": [{\n \"Action\": \"sts:AssumeRole\",\n \"Effect\": \"Allow\",\n \"Sid\": \"\",\n \"Principal\": {\n \"Service\": \"ec2.amazonaws.com\",\n },\n }],\n}))\nworker_node_policy = aws.iam.RolePolicyAttachment(\"worker-node-policy\",\n role=node_role.name,\n policy_arn=\"arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy\")\ncni_policy = aws.iam.RolePolicyAttachment(\"cni-policy\",\n role=node_role.name,\n policy_arn=\"arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy\")\nregistry_policy = aws.iam.RolePolicyAttachment(\"registry-policy\",\n role=node_role.name,\n policy_arn=\"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly\")\n\n# The node group for running system pods (e.g. coredns, etc.)\nsystem_node_group = eks.ManagedNodeGroup(\"system-node-group\",\n cluster=eks_cluster,\n node_role=node_role)\n\n# The EFA device plugin for exposing EFA interfaces as extended resources\ndevice_plugin = kubernetes.helm.v3.Release(\"device-plugin\",\n version=\"0.5.7\",\n repository_opts={\n \"repo\": \"https://aws.github.io/eks-charts\",\n },\n chart=\"aws-efa-k8s-device-plugin\",\n namespace=\"kube-system\",\n atomic=True,\n values={\n \"tolerations\": [{\n \"key\": \"efa-enabled\",\n \"operator\": \"Exists\",\n \"effect\": \"NoExecute\",\n }],\n },\n opts = pulumi.ResourceOptions(provider=k8_s_provider))\n\n# The node group for running EFA enabled workloads\nefa_node_group = eks.ManagedNodeGroup(\"efa-node-group\",\n cluster=eks_cluster,\n node_role=node_role,\n instance_types=[\"g6.8xlarge\"],\n gpu=True,\n scaling_config={\n \"min_size\": 2,\n \"desired_size\": 2,\n \"max_size\": 4,\n },\n enable_efa_support=True,\n placement_group_availability_zone=\"us-west-2b\",\n\n # Taint the nodes so that only pods with the efa-enabled label can be scheduled on them\n taints=[{\n \"key\": \"efa-enabled\",\n \"value\": \"true\",\n \"effect\": \"NO_EXECUTE\",\n }],\n\n # Instances with GPUs usually have nvme instance store volumes, so we can mount them in RAID-0 for kubelet and containerd\n # These are faster than the regular EBS volumes\n nodeadm_extra_options=[{\n \"content_type\": \"application/node.eks.aws\",\n \"content\": \"\"\"apiVersion: node.eks.aws/v1alpha1\nkind: NodeConfig\nspec:\n instance:\n localStorage:\n strategy: RAID0\n\"\"\",\n }])\n\n```\n\n```go\npackage main\n\nimport (\n\t\"encoding/json\"\n\n\tawseks \"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/eks\"\n\t\"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam\"\n\t\"github.com/pulumi/pulumi-awsx/sdk/v2/go/awsx/ec2\"\n\t\"github.com/pulumi/pulumi-eks/sdk/v3/go/eks\"\n\t\"github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes\"\n\thelmv3 \"github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/helm/v3\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\teksVpc, err := ec2.NewVpc(ctx, \"eks-vpc\", \u0026ec2.VpcArgs{\n\t\t\tEnableDnsHostnames: pulumi.Bool(true),\n\t\t\tCidrBlock: \"10.0.0.0/16\",\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\teksCluster, err := eks.NewCluster(ctx, \"eks-cluster\", \u0026eks.ClusterArgs{\n\t\t\tVpcId: eksVpc.VpcId,\n\t\t\tAuthenticationMode: eks.AuthenticationModeApi,\n\t\t\tPublicSubnetIds: eksVpc.PublicSubnetIds,\n\t\t\tPrivateSubnetIds: eksVpc.PrivateSubnetIds,\n\t\t\tSkipDefaultNodeGroup: true,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tk8SProvider, err := kubernetes.NewProvider(ctx, \"k8sProvider\", \u0026kubernetes.ProviderArgs{\n\t\t\tKubeconfig: eksCluster.Kubeconfig,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttmpJSON0, err := json.Marshal(map[string]interface{}{\n\t\t\t\"Version\": \"2012-10-17\",\n\t\t\t\"Statement\": []map[string]interface{}{\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"Action\": \"sts:AssumeRole\",\n\t\t\t\t\t\"Effect\": \"Allow\",\n\t\t\t\t\t\"Sid\": \"\",\n\t\t\t\t\t\"Principal\": map[string]interface{}{\n\t\t\t\t\t\t\"Service\": \"ec2.amazonaws.com\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tjson0 := string(tmpJSON0)\n\t\tnodeRole, err := iam.NewRole(ctx, \"node-role\", \u0026iam.RoleArgs{\n\t\t\tAssumeRolePolicy: pulumi.String(json0),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = iam.NewRolePolicyAttachment(ctx, \"worker-node-policy\", \u0026iam.RolePolicyAttachmentArgs{\n\t\t\tRole: nodeRole.Name,\n\t\t\tPolicyArn: pulumi.String(\"arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = iam.NewRolePolicyAttachment(ctx, \"cni-policy\", \u0026iam.RolePolicyAttachmentArgs{\n\t\t\tRole: nodeRole.Name,\n\t\t\tPolicyArn: pulumi.String(\"arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = iam.NewRolePolicyAttachment(ctx, \"registry-policy\", \u0026iam.RolePolicyAttachmentArgs{\n\t\t\tRole: nodeRole.Name,\n\t\t\tPolicyArn: pulumi.String(\"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n // The node group for running system pods (e.g. coredns, etc.)\n\t\t_, err = eks.NewManagedNodeGroup(ctx, \"system-node-group\", \u0026eks.ManagedNodeGroupArgs{\n\t\t\tCluster: eksCluster,\n\t\t\tNodeRole: nodeRole,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n // The EFA device plugin for exposing EFA interfaces as extended resources\n\t\t_, err = helmv3.NewRelease(ctx, \"device-plugin\", \u0026helmv3.ReleaseArgs{\n\t\t\tVersion: pulumi.String(\"0.5.7\"),\n\t\t\tRepositoryOpts: \u0026helmv3.RepositoryOptsArgs{\n\t\t\t\tRepo: pulumi.String(\"https://aws.github.io/eks-charts\"),\n\t\t\t},\n\t\t\tChart: pulumi.String(\"aws-efa-k8s-device-plugin\"),\n\t\t\tNamespace: pulumi.String(\"kube-system\"),\n\t\t\tAtomic: pulumi.Bool(true),\n\t\t\tValues: pulumi.Map{\n\t\t\t\t\"tolerations\": pulumi.Any{\n\t\t\t\t\t[]map[string]interface{}{\n {\n \"key\": \"efa-enabled\",\n \"operator\": \"Exists\",\n \"effect\": \"NoExecute\",\n }\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}, pulumi.Provider(k8SProvider))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n // The node group for running EFA enabled workloads\n\t\t_, err = eks.NewManagedNodeGroup(ctx, \"efa-node-group\", \u0026eks.ManagedNodeGroupArgs{\n\t\t\tCluster: eksCluster,\n\t\t\tNodeRole: nodeRole,\n\t\t\tInstanceTypes: pulumi.StringArray{\n\t\t\t\tpulumi.String(\"g6.8xlarge\"),\n\t\t\t},\n\t\t\tGpu: pulumi.Bool(true),\n\t\t\tScalingConfig: \u0026eks.NodeGroupScalingConfigArgs{\n\t\t\t\tMinSize: pulumi.Int(2),\n\t\t\t\tDesiredSize: pulumi.Int(2),\n\t\t\t\tMaxSize: pulumi.Int(4),\n\t\t\t},\n\t\t\tEnableEfaSupport: true,\n\t\t\tPlacementGroupAvailabilityZone: pulumi.String(\"us-west-2b\"),\n\n // Taint the nodes so that only pods with the efa-enabled label can be scheduled on them\n\t\t\tTaints: eks.NodeGroupTaintArray{\n\t\t\t\t\u0026eks.NodeGroupTaintArgs{\n\t\t\t\t\tKey: pulumi.String(\"efa-enabled\"),\n\t\t\t\t\tValue: pulumi.String(\"true\"),\n\t\t\t\t\tEffect: pulumi.String(\"NO_EXECUTE\"),\n\t\t\t\t},\n\t\t\t},\n\n // Instances with GPUs usually have nvme instance store volumes, so we can mount them in RAID-0 for kubelet and containerd\n // These are faster than the regular EBS volumes\n\t\t\tNodeadmExtraOptions: eks.NodeadmOptionsArray{\n\t\t\t\t\u0026eks.NodeadmOptionsArgs{\n\t\t\t\t\tContentType: pulumi.String(\"application/node.eks.aws\"),\n\t\t\t\t\tContent: pulumi.String(`apiVersion: node.eks.aws/v1alpha1\nkind: NodeConfig\nspec:\n instance:\n localStorage:\n strategy: RAID0\n`),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n\n```\n\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text.Json;\nusing Pulumi;\nusing Aws = Pulumi.Aws;\nusing Awsx = Pulumi.Awsx;\nusing Eks = Pulumi.Eks;\nusing Kubernetes = Pulumi.Kubernetes;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var eksVpc = new Awsx.Ec2.Vpc(\"eks-vpc\", new()\n {\n EnableDnsHostnames = true,\n CidrBlock = \"10.0.0.0/16\",\n });\n\n var eksCluster = new Eks.Cluster(\"eks-cluster\", new()\n {\n VpcId = eksVpc.VpcId,\n AuthenticationMode = Eks.AuthenticationMode.Api,\n PublicSubnetIds = eksVpc.PublicSubnetIds,\n PrivateSubnetIds = eksVpc.PrivateSubnetIds,\n SkipDefaultNodeGroup = true,\n });\n\n var k8SProvider = new Kubernetes.Provider.Provider(\"k8sProvider\", new()\n {\n KubeConfig = eksCluster.Kubeconfig,\n });\n\n var nodeRole = new Aws.Iam.Role(\"node-role\", new()\n {\n AssumeRolePolicy = JsonSerializer.Serialize(new Dictionary\u003cstring, object?\u003e\n {\n [\"Version\"] = \"2012-10-17\",\n [\"Statement\"] = new[]\n {\n new Dictionary\u003cstring, object?\u003e\n {\n [\"Action\"] = \"sts:AssumeRole\",\n [\"Effect\"] = \"Allow\",\n [\"Sid\"] = \"\",\n [\"Principal\"] = new Dictionary\u003cstring, object?\u003e\n {\n [\"Service\"] = \"ec2.amazonaws.com\",\n },\n },\n },\n }),\n });\n\n var workerNodePolicy = new Aws.Iam.RolePolicyAttachment(\"worker-node-policy\", new()\n {\n Role = nodeRole.Name,\n PolicyArn = \"arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy\",\n });\n\n var cniPolicy = new Aws.Iam.RolePolicyAttachment(\"cni-policy\", new()\n {\n Role = nodeRole.Name,\n PolicyArn = \"arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy\",\n });\n\n var registryPolicy = new Aws.Iam.RolePolicyAttachment(\"registry-policy\", new()\n {\n Role = nodeRole.Name,\n PolicyArn = \"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly\",\n });\n\n // The node group for running system pods (e.g. coredns, etc.)\n var systemNodeGroup = new Eks.ManagedNodeGroup(\"system-node-group\", new()\n {\n Cluster = eksCluster,\n NodeRole = nodeRole,\n });\n\n // The EFA device plugin for exposing EFA interfaces as extended resources\n var devicePlugin = new Kubernetes.Helm.V3.Release(\"device-plugin\", new()\n {\n Version = \"0.5.7\",\n RepositoryOpts = new Kubernetes.Types.Inputs.Helm.V3.RepositoryOptsArgs\n {\n Repo = \"https://aws.github.io/eks-charts\",\n },\n Chart = \"aws-efa-k8s-device-plugin\",\n Namespace = \"kube-system\",\n Atomic = true,\n Values = \n {\n { \"tolerations\", new[]\n {\n \n {\n { \"key\", \"efa-enabled\" },\n { \"operator\", \"Exists\" },\n { \"effect\", \"NoExecute\" },\n },\n } },\n },\n }, new CustomResourceOptions\n {\n Provider = k8SProvider,\n });\n\n // The node group for running EFA enabled workloads\n var efaNodeGroup = new Eks.ManagedNodeGroup(\"efa-node-group\", new()\n {\n Cluster = eksCluster,\n NodeRole = nodeRole,\n InstanceTypes = new[]\n {\n \"g6.8xlarge\",\n },\n Gpu = true,\n ScalingConfig = new Aws.Eks.Inputs.NodeGroupScalingConfigArgs\n {\n MinSize = 2,\n DesiredSize = 2,\n MaxSize = 4,\n },\n EnableEfaSupport = true,\n PlacementGroupAvailabilityZone = \"us-west-2b\",\n\n // Taint the nodes so that only pods with the efa-enabled label can be scheduled on them\n Taints = new[]\n {\n new Aws.Eks.Inputs.NodeGroupTaintArgs\n {\n Key = \"efa-enabled\",\n Value = \"true\",\n Effect = \"NO_EXECUTE\",\n },\n },\n\n // Instances with GPUs usually have nvme instance store volumes, so we can mount them in RAID-0 for kubelet and containerd\n NodeadmExtraOptions = new[]\n {\n new Eks.Inputs.NodeadmOptionsArgs\n {\n ContentType = \"application/node.eks.aws\",\n Content = @\"apiVersion: node.eks.aws/v1alpha1\nkind: NodeConfig\nspec:\n instance:\n localStorage:\n strategy: RAID0\n\",\n },\n },\n });\n\n});\n\n```\n\n{{% /example %}}\n{{% /examples %}}\n", "properties": { "nodeGroup": { "$ref": "/aws/v6.66.1/schema.json#/resources/aws:eks%2FnodeGroup:NodeGroup", diff --git a/sdk/dotnet/ManagedNodeGroup.cs b/sdk/dotnet/ManagedNodeGroup.cs index 9332f325c..990a2d182 100644 --- a/sdk/dotnet/ManagedNodeGroup.cs +++ b/sdk/dotnet/ManagedNodeGroup.cs @@ -10,10 +10,255 @@ namespace Pulumi.Eks { /// - /// ManagedNodeGroup is a component that wraps creating an AWS managed node group. + /// Manages an EKS Node Group, which can provision and optionally update an Auto Scaling Group of Kubernetes worker nodes compatible with EKS. Additional documentation about this functionality can be found in the [EKS User Guide](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html). /// - /// See for more details: - /// https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html + /// ## Example Usage + /// ### Basic Managed Node Group + /// This example demonstrates creating a managed node group with typical defaults. The node group uses the latest EKS-optimized Amazon Linux AMI, creates 2 nodes, and runs on t3.medium instances. Instance security groups are automatically configured. + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using System.Text.Json; + /// using Pulumi; + /// using Aws = Pulumi.Aws; + /// using Awsx = Pulumi.Awsx; + /// using Eks = Pulumi.Eks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var eksVpc = new Awsx.Ec2.Vpc("eks-vpc", new() + /// { + /// EnableDnsHostnames = true, + /// CidrBlock = "10.0.0.0/16", + /// }); + /// + /// var eksCluster = new Eks.Cluster("eks-cluster", new() + /// { + /// VpcId = eksVpc.VpcId, + /// AuthenticationMode = Eks.AuthenticationMode.Api, + /// PublicSubnetIds = eksVpc.PublicSubnetIds, + /// PrivateSubnetIds = eksVpc.PrivateSubnetIds, + /// SkipDefaultNodeGroup = true, + /// }); + /// + /// var nodeRole = new Aws.Iam.Role("node-role", new() + /// { + /// AssumeRolePolicy = JsonSerializer.Serialize(new Dictionary<string, object?> + /// { + /// ["Version"] = "2012-10-17", + /// ["Statement"] = new[] + /// { + /// new Dictionary<string, object?> + /// { + /// ["Action"] = "sts:AssumeRole", + /// ["Effect"] = "Allow", + /// ["Sid"] = "", + /// ["Principal"] = new Dictionary<string, object?> + /// { + /// ["Service"] = "ec2.amazonaws.com", + /// }, + /// }, + /// }, + /// }), + /// }); + /// + /// var workerNodePolicy = new Aws.Iam.RolePolicyAttachment("worker-node-policy", new() + /// { + /// Role = nodeRole.Name, + /// PolicyArn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", + /// }); + /// + /// var cniPolicy = new Aws.Iam.RolePolicyAttachment("cni-policy", new() + /// { + /// Role = nodeRole.Name, + /// PolicyArn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", + /// }); + /// + /// var registryPolicy = new Aws.Iam.RolePolicyAttachment("registry-policy", new() + /// { + /// Role = nodeRole.Name, + /// PolicyArn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", + /// }); + /// + /// var nodeGroup = new Eks.ManagedNodeGroup("node-group", new() + /// { + /// Cluster = eksCluster, + /// NodeRole = nodeRole, + /// }); + /// + /// return new Dictionary<string, object?>{}; + /// }); + /// + /// ``` + /// ### Enabling EFA Support + /// + /// Enabling EFA support for a node group will do the following: + /// - All EFA interfaces supported by the instance will be exposed on the launch template used by the node group + /// - A `clustered` placement group will be created and passed to the launch template + /// - Checks will be performed to ensure that the instance type supports EFA and that the specified AZ is supported by the chosen instance type + /// + /// The GPU optimized AMIs include all necessary drivers and libraries to support EFA. If you're choosing an instance type without GPU acceleration you will need to install the drivers and libraries manually and bake a custom AMI. + /// + /// You can use the [aws-efa-k8s-device-plugin](https://github.com/aws/eks-charts/tree/master/stable/aws-efa-k8s-device-plugin) Helm chart to expose the EFA interfaces on the nodes as an extended resource, and allow pods to request these interfaces to be mounted to their containers. + /// Your application container will need to have the necessary libraries and runtimes in order to leverage the EFA interfaces (e.g. libfabric). + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using System.Text.Json; + /// using Pulumi; + /// using Aws = Pulumi.Aws; + /// using Awsx = Pulumi.Awsx; + /// using Eks = Pulumi.Eks; + /// using Kubernetes = Pulumi.Kubernetes; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var eksVpc = new Awsx.Ec2.Vpc("eks-vpc", new() + /// { + /// EnableDnsHostnames = true, + /// CidrBlock = "10.0.0.0/16", + /// }); + /// + /// var eksCluster = new Eks.Cluster("eks-cluster", new() + /// { + /// VpcId = eksVpc.VpcId, + /// AuthenticationMode = Eks.AuthenticationMode.Api, + /// PublicSubnetIds = eksVpc.PublicSubnetIds, + /// PrivateSubnetIds = eksVpc.PrivateSubnetIds, + /// SkipDefaultNodeGroup = true, + /// }); + /// + /// var k8SProvider = new Kubernetes.Provider.Provider("k8sProvider", new() + /// { + /// KubeConfig = eksCluster.Kubeconfig, + /// }); + /// + /// var nodeRole = new Aws.Iam.Role("node-role", new() + /// { + /// AssumeRolePolicy = JsonSerializer.Serialize(new Dictionary<string, object?> + /// { + /// ["Version"] = "2012-10-17", + /// ["Statement"] = new[] + /// { + /// new Dictionary<string, object?> + /// { + /// ["Action"] = "sts:AssumeRole", + /// ["Effect"] = "Allow", + /// ["Sid"] = "", + /// ["Principal"] = new Dictionary<string, object?> + /// { + /// ["Service"] = "ec2.amazonaws.com", + /// }, + /// }, + /// }, + /// }), + /// }); + /// + /// var workerNodePolicy = new Aws.Iam.RolePolicyAttachment("worker-node-policy", new() + /// { + /// Role = nodeRole.Name, + /// PolicyArn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", + /// }); + /// + /// var cniPolicy = new Aws.Iam.RolePolicyAttachment("cni-policy", new() + /// { + /// Role = nodeRole.Name, + /// PolicyArn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", + /// }); + /// + /// var registryPolicy = new Aws.Iam.RolePolicyAttachment("registry-policy", new() + /// { + /// Role = nodeRole.Name, + /// PolicyArn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", + /// }); + /// + /// // The node group for running system pods (e.g. coredns, etc.) + /// var systemNodeGroup = new Eks.ManagedNodeGroup("system-node-group", new() + /// { + /// Cluster = eksCluster, + /// NodeRole = nodeRole, + /// }); + /// + /// // The EFA device plugin for exposing EFA interfaces as extended resources + /// var devicePlugin = new Kubernetes.Helm.V3.Release("device-plugin", new() + /// { + /// Version = "0.5.7", + /// RepositoryOpts = new Kubernetes.Types.Inputs.Helm.V3.RepositoryOptsArgs + /// { + /// Repo = "https://aws.github.io/eks-charts", + /// }, + /// Chart = "aws-efa-k8s-device-plugin", + /// Namespace = "kube-system", + /// Atomic = true, + /// Values = + /// { + /// { "tolerations", new[] + /// { + /// + /// { + /// { "key", "efa-enabled" }, + /// { "operator", "Exists" }, + /// { "effect", "NoExecute" }, + /// }, + /// } }, + /// }, + /// }, new CustomResourceOptions + /// { + /// Provider = k8SProvider, + /// }); + /// + /// // The node group for running EFA enabled workloads + /// var efaNodeGroup = new Eks.ManagedNodeGroup("efa-node-group", new() + /// { + /// Cluster = eksCluster, + /// NodeRole = nodeRole, + /// InstanceTypes = new[] + /// { + /// "g6.8xlarge", + /// }, + /// Gpu = true, + /// ScalingConfig = new Aws.Eks.Inputs.NodeGroupScalingConfigArgs + /// { + /// MinSize = 2, + /// DesiredSize = 2, + /// MaxSize = 4, + /// }, + /// EnableEfaSupport = true, + /// PlacementGroupAvailabilityZone = "us-west-2b", + /// + /// // Taint the nodes so that only pods with the efa-enabled label can be scheduled on them + /// Taints = new[] + /// { + /// new Aws.Eks.Inputs.NodeGroupTaintArgs + /// { + /// Key = "efa-enabled", + /// Value = "true", + /// Effect = "NO_EXECUTE", + /// }, + /// }, + /// + /// // Instances with GPUs usually have nvme instance store volumes, so we can mount them in RAID-0 for kubelet and containerd + /// NodeadmExtraOptions = new[] + /// { + /// new Eks.Inputs.NodeadmOptionsArgs + /// { + /// ContentType = "application/node.eks.aws", + /// Content = @"apiVersion: node.eks.aws/v1alpha1 + /// kind: NodeConfig + /// spec: + /// instance: + /// localStorage: + /// strategy: RAID0 + /// ", + /// }, + /// }, + /// }); + /// + /// }); + /// + /// ``` /// [EksResourceType("eks:index:ManagedNodeGroup")] public partial class ManagedNodeGroup : global::Pulumi.ComponentResource diff --git a/sdk/go/eks/managedNodeGroup.go b/sdk/go/eks/managedNodeGroup.go index aa137d92c..31239df9b 100644 --- a/sdk/go/eks/managedNodeGroup.go +++ b/sdk/go/eks/managedNodeGroup.go @@ -14,10 +14,286 @@ import ( "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) -// ManagedNodeGroup is a component that wraps creating an AWS managed node group. +// Manages an EKS Node Group, which can provision and optionally update an Auto Scaling Group of Kubernetes worker nodes compatible with EKS. Additional documentation about this functionality can be found in the [EKS User Guide](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html). // -// See for more details: -// https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html +// ## Example Usage +// ### Basic Managed Node Group +// This example demonstrates creating a managed node group with typical defaults. The node group uses the latest EKS-optimized Amazon Linux AMI, creates 2 nodes, and runs on t3.medium instances. Instance security groups are automatically configured. +// +// ```go +// package main +// +// import ( +// +// "encoding/json" +// +// "github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam" +// "github.com/pulumi/pulumi-awsx/sdk/v2/go/awsx/ec2" +// "github.com/pulumi/pulumi-eks/sdk/v3/go/eks" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// eksVpc, err := ec2.NewVpc(ctx, "eks-vpc", &ec2.VpcArgs{ +// EnableDnsHostnames: pulumi.Bool(true), +// CidrBlock: "10.0.0.0/16", +// }) +// if err != nil { +// return err +// } +// eksCluster, err := eks.NewCluster(ctx, "eks-cluster", &eks.ClusterArgs{ +// VpcId: eksVpc.VpcId, +// AuthenticationMode: eks.AuthenticationModeApi, +// PublicSubnetIds: eksVpc.PublicSubnetIds, +// PrivateSubnetIds: eksVpc.PrivateSubnetIds, +// SkipDefaultNodeGroup: true, +// }) +// if err != nil { +// return err +// } +// tmpJSON0, err := json.Marshal(map[string]interface{}{ +// "Version": "2012-10-17", +// "Statement": []map[string]interface{}{ +// map[string]interface{}{ +// "Action": "sts:AssumeRole", +// "Effect": "Allow", +// "Sid": "", +// "Principal": map[string]interface{}{ +// "Service": "ec2.amazonaws.com", +// }, +// }, +// }, +// }) +// if err != nil { +// return err +// } +// json0 := string(tmpJSON0) +// nodeRole, err := iam.NewRole(ctx, "node-role", &iam.RoleArgs{ +// AssumeRolePolicy: pulumi.String(json0), +// }) +// if err != nil { +// return err +// } +// _, err = iam.NewRolePolicyAttachment(ctx, "worker-node-policy", &iam.RolePolicyAttachmentArgs{ +// Role: nodeRole.Name, +// PolicyArn: pulumi.String("arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"), +// }) +// if err != nil { +// return err +// } +// _, err = iam.NewRolePolicyAttachment(ctx, "cni-policy", &iam.RolePolicyAttachmentArgs{ +// Role: nodeRole.Name, +// PolicyArn: pulumi.String("arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"), +// }) +// if err != nil { +// return err +// } +// _, err = iam.NewRolePolicyAttachment(ctx, "registry-policy", &iam.RolePolicyAttachmentArgs{ +// Role: nodeRole.Name, +// PolicyArn: pulumi.String("arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"), +// }) +// if err != nil { +// return err +// } +// _, err = eks.NewManagedNodeGroup(ctx, "node-group", &eks.ManagedNodeGroupArgs{ +// Cluster: eksCluster, +// NodeRole: nodeRole, +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` +// ### Enabling EFA Support +// +// Enabling EFA support for a node group will do the following: +// - All EFA interfaces supported by the instance will be exposed on the launch template used by the node group +// - A `clustered` placement group will be created and passed to the launch template +// - Checks will be performed to ensure that the instance type supports EFA and that the specified AZ is supported by the chosen instance type +// +// The GPU optimized AMIs include all necessary drivers and libraries to support EFA. If you're choosing an instance type without GPU acceleration you will need to install the drivers and libraries manually and bake a custom AMI. +// +// You can use the [aws-efa-k8s-device-plugin](https://github.com/aws/eks-charts/tree/master/stable/aws-efa-k8s-device-plugin) Helm chart to expose the EFA interfaces on the nodes as an extended resource, and allow pods to request these interfaces to be mounted to their containers. +// Your application container will need to have the necessary libraries and runtimes in order to leverage the EFA interfaces (e.g. libfabric). +// +// ```go +// package main +// +// import ( +// +// "encoding/json" +// +// awseks "github.com/pulumi/pulumi-aws/sdk/v6/go/aws/eks" +// "github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam" +// "github.com/pulumi/pulumi-awsx/sdk/v2/go/awsx/ec2" +// "github.com/pulumi/pulumi-eks/sdk/v3/go/eks" +// "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes" +// helmv3 "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/helm/v3" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// eksVpc, err := ec2.NewVpc(ctx, "eks-vpc", &ec2.VpcArgs{ +// EnableDnsHostnames: pulumi.Bool(true), +// CidrBlock: "10.0.0.0/16", +// }) +// if err != nil { +// return err +// } +// eksCluster, err := eks.NewCluster(ctx, "eks-cluster", &eks.ClusterArgs{ +// VpcId: eksVpc.VpcId, +// AuthenticationMode: eks.AuthenticationModeApi, +// PublicSubnetIds: eksVpc.PublicSubnetIds, +// PrivateSubnetIds: eksVpc.PrivateSubnetIds, +// SkipDefaultNodeGroup: true, +// }) +// if err != nil { +// return err +// } +// k8SProvider, err := kubernetes.NewProvider(ctx, "k8sProvider", &kubernetes.ProviderArgs{ +// Kubeconfig: eksCluster.Kubeconfig, +// }) +// if err != nil { +// return err +// } +// tmpJSON0, err := json.Marshal(map[string]interface{}{ +// "Version": "2012-10-17", +// "Statement": []map[string]interface{}{ +// map[string]interface{}{ +// "Action": "sts:AssumeRole", +// "Effect": "Allow", +// "Sid": "", +// "Principal": map[string]interface{}{ +// "Service": "ec2.amazonaws.com", +// }, +// }, +// }, +// }) +// if err != nil { +// return err +// } +// json0 := string(tmpJSON0) +// nodeRole, err := iam.NewRole(ctx, "node-role", &iam.RoleArgs{ +// AssumeRolePolicy: pulumi.String(json0), +// }) +// if err != nil { +// return err +// } +// _, err = iam.NewRolePolicyAttachment(ctx, "worker-node-policy", &iam.RolePolicyAttachmentArgs{ +// Role: nodeRole.Name, +// PolicyArn: pulumi.String("arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"), +// }) +// if err != nil { +// return err +// } +// _, err = iam.NewRolePolicyAttachment(ctx, "cni-policy", &iam.RolePolicyAttachmentArgs{ +// Role: nodeRole.Name, +// PolicyArn: pulumi.String("arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"), +// }) +// if err != nil { +// return err +// } +// _, err = iam.NewRolePolicyAttachment(ctx, "registry-policy", &iam.RolePolicyAttachmentArgs{ +// Role: nodeRole.Name, +// PolicyArn: pulumi.String("arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"), +// }) +// if err != nil { +// return err +// } +// +// // The node group for running system pods (e.g. coredns, etc.) +// _, err = eks.NewManagedNodeGroup(ctx, "system-node-group", &eks.ManagedNodeGroupArgs{ +// Cluster: eksCluster, +// NodeRole: nodeRole, +// }) +// if err != nil { +// return err +// } +// +// // The EFA device plugin for exposing EFA interfaces as extended resources +// _, err = helmv3.NewRelease(ctx, "device-plugin", &helmv3.ReleaseArgs{ +// Version: pulumi.String("0.5.7"), +// RepositoryOpts: &helmv3.RepositoryOptsArgs{ +// Repo: pulumi.String("https://aws.github.io/eks-charts"), +// }, +// Chart: pulumi.String("aws-efa-k8s-device-plugin"), +// Namespace: pulumi.String("kube-system"), +// Atomic: pulumi.Bool(true), +// Values: pulumi.Map{ +// "tolerations": pulumi.Any{ +// []map[string]interface{}{ +// { +// "key": "efa-enabled", +// "operator": "Exists", +// "effect": "NoExecute", +// } +// }, +// }, +// }, +// }, pulumi.Provider(k8SProvider)) +// if err != nil { +// return err +// } +// +// // The node group for running EFA enabled workloads +// _, err = eks.NewManagedNodeGroup(ctx, "efa-node-group", &eks.ManagedNodeGroupArgs{ +// Cluster: eksCluster, +// NodeRole: nodeRole, +// InstanceTypes: pulumi.StringArray{ +// pulumi.String("g6.8xlarge"), +// }, +// Gpu: pulumi.Bool(true), +// ScalingConfig: &eks.NodeGroupScalingConfigArgs{ +// MinSize: pulumi.Int(2), +// DesiredSize: pulumi.Int(2), +// MaxSize: pulumi.Int(4), +// }, +// EnableEfaSupport: true, +// PlacementGroupAvailabilityZone: pulumi.String("us-west-2b"), +// +// // Taint the nodes so that only pods with the efa-enabled label can be scheduled on them +// Taints: eks.NodeGroupTaintArray{ +// &eks.NodeGroupTaintArgs{ +// Key: pulumi.String("efa-enabled"), +// Value: pulumi.String("true"), +// Effect: pulumi.String("NO_EXECUTE"), +// }, +// }, +// +// // Instances with GPUs usually have nvme instance store volumes, so we can mount them in RAID-0 for kubelet and containerd +// // These are faster than the regular EBS volumes +// NodeadmExtraOptions: eks.NodeadmOptionsArray{ +// &eks.NodeadmOptionsArgs{ +// ContentType: pulumi.String("application/node.eks.aws"), +// Content: pulumi.String(`apiVersion: node.eks.aws/v1alpha1 +// +// kind: NodeConfig +// spec: +// +// instance: +// localStorage: +// strategy: RAID0 +// +// `), +// +// }, +// }, +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` type ManagedNodeGroup struct { pulumi.ResourceState diff --git a/sdk/java/src/main/java/com/pulumi/eks/ManagedNodeGroup.java b/sdk/java/src/main/java/com/pulumi/eks/ManagedNodeGroup.java index 9a5885e95..cffb6518b 100644 --- a/sdk/java/src/main/java/com/pulumi/eks/ManagedNodeGroup.java +++ b/sdk/java/src/main/java/com/pulumi/eks/ManagedNodeGroup.java @@ -14,10 +14,94 @@ import javax.annotation.Nullable; /** - * ManagedNodeGroup is a component that wraps creating an AWS managed node group. + * Manages an EKS Node Group, which can provision and optionally update an Auto Scaling Group of Kubernetes worker nodes compatible with EKS. Additional documentation about this functionality can be found in the [EKS User Guide](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html). * - * See for more details: - * https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html + * ## Example Usage + * ### Basic Managed Node Group + * This example demonstrates creating a managed node group with typical defaults. The node group uses the latest EKS-optimized Amazon Linux AMI, creates 2 nodes, and runs on t3.medium instances. Instance security groups are automatically configured. + * + *
+ * {@code
+ * package generated_program;
+ * 
+ * import com.pulumi.Context;
+ * import com.pulumi.Pulumi;
+ * import com.pulumi.core.Output;
+ * import com.pulumi.awsx.ec2.Vpc;
+ * import com.pulumi.awsx.ec2.VpcArgs;
+ * import com.pulumi.eks.Cluster;
+ * import com.pulumi.eks.ClusterArgs;
+ * import com.pulumi.aws.iam.Role;
+ * import com.pulumi.aws.iam.RoleArgs;
+ * import com.pulumi.aws.iam.RolePolicyAttachment;
+ * import com.pulumi.aws.iam.RolePolicyAttachmentArgs;
+ * import com.pulumi.eks.ManagedNodeGroup;
+ * import com.pulumi.eks.ManagedNodeGroupArgs;
+ * import static com.pulumi.codegen.internal.Serialization.*;
+ * import java.util.List;
+ * import java.util.ArrayList;
+ * import java.util.Map;
+ * import java.io.File;
+ * import java.nio.file.Files;
+ * import java.nio.file.Paths;
+ * 
+ * public class App {
+ *     public static void main(String[] args) {
+ *         Pulumi.run(App::stack);
+ *     }
+ * 
+ *     public static void stack(Context ctx) {
+ *         var eksVpc = new Vpc("eksVpc", VpcArgs.builder()
+ *             .enableDnsHostnames(true)
+ *             .cidrBlock("10.0.0.0/16")
+ *             .build());
+ * 
+ *         var eksCluster = new Cluster("eksCluster", ClusterArgs.builder()
+ *             .vpcId(eksVpc.vpcId())
+ *             .authenticationMode("API")
+ *             .publicSubnetIds(eksVpc.publicSubnetIds())
+ *             .privateSubnetIds(eksVpc.privateSubnetIds())
+ *             .skipDefaultNodeGroup(true)
+ *             .build());
+ * 
+ *         var nodeRole = new Role("nodeRole", RoleArgs.builder()
+ *             .assumeRolePolicy(serializeJson(
+ *                 jsonObject(
+ *                     jsonProperty("Version", "2012-10-17"),
+ *                     jsonProperty("Statement", jsonArray(jsonObject(
+ *                         jsonProperty("Action", "sts:AssumeRole"),
+ *                         jsonProperty("Effect", "Allow"),
+ *                         jsonProperty("Sid", ""),
+ *                         jsonProperty("Principal", jsonObject(
+ *                             jsonProperty("Service", "ec2.amazonaws.com")
+ *                         ))
+ *                     )))
+ *                 )))
+ *             .build());
+ * 
+ *         var workerNodePolicy = new RolePolicyAttachment("workerNodePolicy", RolePolicyAttachmentArgs.builder()
+ *             .role(nodeRole.name())
+ *             .policyArn("arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy")
+ *             .build());
+ * 
+ *         var cniPolicy = new RolePolicyAttachment("cniPolicy", RolePolicyAttachmentArgs.builder()
+ *             .role(nodeRole.name())
+ *             .policyArn("arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy")
+ *             .build());
+ * 
+ *         var registryPolicy = new RolePolicyAttachment("registryPolicy", RolePolicyAttachmentArgs.builder()
+ *             .role(nodeRole.name())
+ *             .policyArn("arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly")
+ *             .build());
+ * 
+ *         var nodeGroup = new ManagedNodeGroup("nodeGroup", ManagedNodeGroupArgs.builder()
+ *             .cluster(eksCluster)
+ *             .nodeRole(nodeRole)
+ *             .build());
+ *     }
+ * }
+ * }
+ * 
* */ @ResourceType(type="eks:index:ManagedNodeGroup") diff --git a/sdk/nodejs/managedNodeGroup.ts b/sdk/nodejs/managedNodeGroup.ts index 6e88d007c..272b6fcdc 100644 --- a/sdk/nodejs/managedNodeGroup.ts +++ b/sdk/nodejs/managedNodeGroup.ts @@ -13,10 +13,175 @@ import * as pulumiKubernetes from "@pulumi/kubernetes"; import {Cluster, VpcCniAddon} from "./index"; /** - * ManagedNodeGroup is a component that wraps creating an AWS managed node group. + * Manages an EKS Node Group, which can provision and optionally update an Auto Scaling Group of Kubernetes worker nodes compatible with EKS. Additional documentation about this functionality can be found in the [EKS User Guide](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html). * - * See for more details: - * https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html + * ## Example Usage + * ### Basic Managed Node Group + * This example demonstrates creating a managed node group with typical defaults. The node group uses the latest EKS-optimized Amazon Linux AMI, creates 2 nodes, and runs on t3.medium instances. Instance security groups are automatically configured. + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as aws from "@pulumi/aws"; + * import * as awsx from "@pulumi/awsx"; + * import * as eks from "@pulumi/eks"; + * + * const eksVpc = new awsx.ec2.Vpc("eks-vpc", { + * enableDnsHostnames: true, + * cidrBlock: "10.0.0.0/16", + * }); + * const eksCluster = new eks.Cluster("eks-cluster", { + * vpcId: eksVpc.vpcId, + * authenticationMode: eks.AuthenticationMode.Api, + * publicSubnetIds: eksVpc.publicSubnetIds, + * privateSubnetIds: eksVpc.privateSubnetIds, + * skipDefaultNodeGroup: true, + * }); + * const nodeRole = new aws.iam.Role("node-role", {assumeRolePolicy: JSON.stringify({ + * Version: "2012-10-17", + * Statement: [{ + * Action: "sts:AssumeRole", + * Effect: "Allow", + * Sid: "", + * Principal: { + * Service: "ec2.amazonaws.com", + * }, + * }], + * })}); + * const workerNodePolicy = new aws.iam.RolePolicyAttachment("worker-node-policy", { + * role: nodeRole.name, + * policyArn: "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", + * }); + * const cniPolicy = new aws.iam.RolePolicyAttachment("cni-policy", { + * role: nodeRole.name, + * policyArn: "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", + * }); + * const registryPolicy = new aws.iam.RolePolicyAttachment("registry-policy", { + * role: nodeRole.name, + * policyArn: "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", + * }); + * const nodeGroup = new eks.ManagedNodeGroup("node-group", { + * cluster: eksCluster, + * nodeRole: nodeRole, + * }); + * + * ``` + * ### Enabling EFA Support + * + * Enabling EFA support for a node group will do the following: + * - All EFA interfaces supported by the instance will be exposed on the launch template used by the node group + * - A `clustered` placement group will be created and passed to the launch template + * - Checks will be performed to ensure that the instance type supports EFA and that the specified AZ is supported by the chosen instance type + * + * The GPU optimized AMIs include all necessary drivers and libraries to support EFA. If you're choosing an instance type without GPU acceleration you will need to install the drivers and libraries manually and bake a custom AMI. + * + * You can use the [aws-efa-k8s-device-plugin](https://github.com/aws/eks-charts/tree/master/stable/aws-efa-k8s-device-plugin) Helm chart to expose the EFA interfaces on the nodes as an extended resource, and allow pods to request these interfaces to be mounted to their containers. + * Your application container will need to have the necessary libraries and runtimes in order to leverage the EFA interfaces (e.g. libfabric). + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as aws from "@pulumi/aws"; + * import * as awsx from "@pulumi/awsx"; + * import * as eks from "@pulumi/eks"; + * import * as kubernetes from "@pulumi/kubernetes"; + * + * const eksVpc = new awsx.ec2.Vpc("eks-vpc", { + * enableDnsHostnames: true, + * cidrBlock: "10.0.0.0/16", + * }); + * const eksCluster = new eks.Cluster("eks-cluster", { + * vpcId: eksVpc.vpcId, + * authenticationMode: eks.AuthenticationMode.Api, + * publicSubnetIds: eksVpc.publicSubnetIds, + * privateSubnetIds: eksVpc.privateSubnetIds, + * skipDefaultNodeGroup: true, + * }); + * const k8SProvider = new kubernetes.Provider("k8sProvider", {kubeconfig: eksCluster.kubeconfig}); + * const nodeRole = new aws.iam.Role("node-role", {assumeRolePolicy: JSON.stringify({ + * Version: "2012-10-17", + * Statement: [{ + * Action: "sts:AssumeRole", + * Effect: "Allow", + * Sid: "", + * Principal: { + * Service: "ec2.amazonaws.com", + * }, + * }], + * })}); + * const workerNodePolicy = new aws.iam.RolePolicyAttachment("worker-node-policy", { + * role: nodeRole.name, + * policyArn: "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", + * }); + * const cniPolicy = new aws.iam.RolePolicyAttachment("cni-policy", { + * role: nodeRole.name, + * policyArn: "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", + * }); + * const registryPolicy = new aws.iam.RolePolicyAttachment("registry-policy", { + * role: nodeRole.name, + * policyArn: "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", + * }); + * + * // The node group for running system pods (e.g. coredns, etc.) + * const systemNodeGroup = new eks.ManagedNodeGroup("system-node-group", { + * cluster: eksCluster, + * nodeRole: nodeRole, + * }); + * + * // The EFA device plugin for exposing EFA interfaces as extended resources + * const devicePlugin = new kubernetes.helm.v3.Release("device-plugin", { + * version: "0.5.7", + * repositoryOpts: { + * repo: "https://aws.github.io/eks-charts", + * }, + * chart: "aws-efa-k8s-device-plugin", + * namespace: "kube-system", + * atomic: true, + * values: { + * tolerations: [{ + * key: "efa-enabled", + * operator: "Exists", + * effect: "NoExecute", + * }], + * }, + * }, { + * provider: k8SProvider, + * }); + * + * // The node group for running EFA enabled workloads + * const efaNodeGroup = new eks.ManagedNodeGroup("efa-node-group", { + * cluster: eksCluster, + * nodeRole: nodeRole, + * instanceTypes: ["g6.8xlarge"], + * gpu: true, + * scalingConfig: { + * minSize: 2, + * desiredSize: 2, + * maxSize: 4, + * }, + * enableEfaSupport: true, + * placementGroupAvailabilityZone: "us-west-2b", + * + * // Taint the nodes so that only pods with the efa-enabled label can be scheduled on them + * taints: [{ + * key: "efa-enabled", + * value: "true", + * effect: "NO_EXECUTE", + * }], + * + * // Instances with GPUs usually have nvme instance store volumes, so we can mount them in RAID-0 for kubelet and containerd + * // These are faster than the regular EBS volumes + * nodeadmExtraOptions: [{ + * contentType: "application/node.eks.aws", + * content: `apiVersion: node.eks.aws/v1alpha1 + * kind: NodeConfig + * spec: + * instance: + * localStorage: + * strategy: RAID0 + * `, + * }], + * }); + * + * ``` */ export class ManagedNodeGroup extends pulumi.ComponentResource { /** @internal */ diff --git a/sdk/python/pulumi_eks/managed_node_group.py b/sdk/python/pulumi_eks/managed_node_group.py index 2a18e3c9b..d133c041f 100644 --- a/sdk/python/pulumi_eks/managed_node_group.py +++ b/sdk/python/pulumi_eks/managed_node_group.py @@ -702,10 +702,162 @@ def __init__(__self__, version: Optional[pulumi.Input[str]] = None, __props__=None): """ - ManagedNodeGroup is a component that wraps creating an AWS managed node group. - - See for more details: - https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html + Manages an EKS Node Group, which can provision and optionally update an Auto Scaling Group of Kubernetes worker nodes compatible with EKS. Additional documentation about this functionality can be found in the [EKS User Guide](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html). + + ## Example Usage + ### Basic Managed Node Group + This example demonstrates creating a managed node group with typical defaults. The node group uses the latest EKS-optimized Amazon Linux AMI, creates 2 nodes, and runs on t3.medium instances. Instance security groups are automatically configured. + + ```python + import pulumi + import json + import pulumi_aws as aws + import pulumi_awsx as awsx + import pulumi_eks as eks + + eks_vpc = awsx.ec2.Vpc("eks-vpc", + enable_dns_hostnames=True, + cidr_block="10.0.0.0/16") + eks_cluster = eks.Cluster("eks-cluster", + vpc_id=eks_vpc.vpc_id, + authentication_mode=eks.AuthenticationMode.API, + public_subnet_ids=eks_vpc.public_subnet_ids, + private_subnet_ids=eks_vpc.private_subnet_ids, + skip_default_node_group=True) + node_role = aws.iam.Role("node-role", assume_role_policy=json.dumps({ + "Version": "2012-10-17", + "Statement": [{ + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Sid": "", + "Principal": { + "Service": "ec2.amazonaws.com", + }, + }], + })) + worker_node_policy = aws.iam.RolePolicyAttachment("worker-node-policy", + role=node_role.name, + policy_arn="arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy") + cni_policy = aws.iam.RolePolicyAttachment("cni-policy", + role=node_role.name, + policy_arn="arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy") + registry_policy = aws.iam.RolePolicyAttachment("registry-policy", + role=node_role.name, + policy_arn="arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly") + node_group = eks.ManagedNodeGroup("node-group", + cluster=eks_cluster, + node_role=node_role) + + ``` + ### Enabling EFA Support + + Enabling EFA support for a node group will do the following: + - All EFA interfaces supported by the instance will be exposed on the launch template used by the node group + - A `clustered` placement group will be created and passed to the launch template + - Checks will be performed to ensure that the instance type supports EFA and that the specified AZ is supported by the chosen instance type + + The GPU optimized AMIs include all necessary drivers and libraries to support EFA. If you're choosing an instance type without GPU acceleration you will need to install the drivers and libraries manually and bake a custom AMI. + + You can use the [aws-efa-k8s-device-plugin](https://github.com/aws/eks-charts/tree/master/stable/aws-efa-k8s-device-plugin) Helm chart to expose the EFA interfaces on the nodes as an extended resource, and allow pods to request these interfaces to be mounted to their containers. + Your application container will need to have the necessary libraries and runtimes in order to leverage the EFA interfaces (e.g. libfabric). + + ```python + import pulumi + import json + import pulumi_aws as aws + import pulumi_awsx as awsx + import pulumi_eks as eks + import pulumi_kubernetes as kubernetes + + eks_vpc = awsx.ec2.Vpc("eks-vpc", + enable_dns_hostnames=True, + cidr_block="10.0.0.0/16") + eks_cluster = eks.Cluster("eks-cluster", + vpc_id=eks_vpc.vpc_id, + authentication_mode=eks.AuthenticationMode.API, + public_subnet_ids=eks_vpc.public_subnet_ids, + private_subnet_ids=eks_vpc.private_subnet_ids, + skip_default_node_group=True) + k8_s_provider = kubernetes.Provider("k8sProvider", kubeconfig=eks_cluster.kubeconfig) + node_role = aws.iam.Role("node-role", assume_role_policy=json.dumps({ + "Version": "2012-10-17", + "Statement": [{ + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Sid": "", + "Principal": { + "Service": "ec2.amazonaws.com", + }, + }], + })) + worker_node_policy = aws.iam.RolePolicyAttachment("worker-node-policy", + role=node_role.name, + policy_arn="arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy") + cni_policy = aws.iam.RolePolicyAttachment("cni-policy", + role=node_role.name, + policy_arn="arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy") + registry_policy = aws.iam.RolePolicyAttachment("registry-policy", + role=node_role.name, + policy_arn="arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly") + + # The node group for running system pods (e.g. coredns, etc.) + system_node_group = eks.ManagedNodeGroup("system-node-group", + cluster=eks_cluster, + node_role=node_role) + + # The EFA device plugin for exposing EFA interfaces as extended resources + device_plugin = kubernetes.helm.v3.Release("device-plugin", + version="0.5.7", + repository_opts={ + "repo": "https://aws.github.io/eks-charts", + }, + chart="aws-efa-k8s-device-plugin", + namespace="kube-system", + atomic=True, + values={ + "tolerations": [{ + "key": "efa-enabled", + "operator": "Exists", + "effect": "NoExecute", + }], + }, + opts = pulumi.ResourceOptions(provider=k8_s_provider)) + + # The node group for running EFA enabled workloads + efa_node_group = eks.ManagedNodeGroup("efa-node-group", + cluster=eks_cluster, + node_role=node_role, + instance_types=["g6.8xlarge"], + gpu=True, + scaling_config={ + "min_size": 2, + "desired_size": 2, + "max_size": 4, + }, + enable_efa_support=True, + placement_group_availability_zone="us-west-2b", + + # Taint the nodes so that only pods with the efa-enabled label can be scheduled on them + taints=[{ + "key": "efa-enabled", + "value": "true", + "effect": "NO_EXECUTE", + }], + + # Instances with GPUs usually have nvme instance store volumes, so we can mount them in RAID-0 for kubelet and containerd + # These are faster than the regular EBS volumes + nodeadm_extra_options=[{ + "content_type": "application/node.eks.aws", + "content": \"\"\"apiVersion: node.eks.aws/v1alpha1 + kind: NodeConfig + spec: + instance: + localStorage: + strategy: RAID0 + \"\"\", + }]) + + ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. @@ -811,10 +963,162 @@ def __init__(__self__, args: ManagedNodeGroupArgs, opts: Optional[pulumi.ResourceOptions] = None): """ - ManagedNodeGroup is a component that wraps creating an AWS managed node group. - - See for more details: - https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html + Manages an EKS Node Group, which can provision and optionally update an Auto Scaling Group of Kubernetes worker nodes compatible with EKS. Additional documentation about this functionality can be found in the [EKS User Guide](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html). + + ## Example Usage + ### Basic Managed Node Group + This example demonstrates creating a managed node group with typical defaults. The node group uses the latest EKS-optimized Amazon Linux AMI, creates 2 nodes, and runs on t3.medium instances. Instance security groups are automatically configured. + + ```python + import pulumi + import json + import pulumi_aws as aws + import pulumi_awsx as awsx + import pulumi_eks as eks + + eks_vpc = awsx.ec2.Vpc("eks-vpc", + enable_dns_hostnames=True, + cidr_block="10.0.0.0/16") + eks_cluster = eks.Cluster("eks-cluster", + vpc_id=eks_vpc.vpc_id, + authentication_mode=eks.AuthenticationMode.API, + public_subnet_ids=eks_vpc.public_subnet_ids, + private_subnet_ids=eks_vpc.private_subnet_ids, + skip_default_node_group=True) + node_role = aws.iam.Role("node-role", assume_role_policy=json.dumps({ + "Version": "2012-10-17", + "Statement": [{ + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Sid": "", + "Principal": { + "Service": "ec2.amazonaws.com", + }, + }], + })) + worker_node_policy = aws.iam.RolePolicyAttachment("worker-node-policy", + role=node_role.name, + policy_arn="arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy") + cni_policy = aws.iam.RolePolicyAttachment("cni-policy", + role=node_role.name, + policy_arn="arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy") + registry_policy = aws.iam.RolePolicyAttachment("registry-policy", + role=node_role.name, + policy_arn="arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly") + node_group = eks.ManagedNodeGroup("node-group", + cluster=eks_cluster, + node_role=node_role) + + ``` + ### Enabling EFA Support + + Enabling EFA support for a node group will do the following: + - All EFA interfaces supported by the instance will be exposed on the launch template used by the node group + - A `clustered` placement group will be created and passed to the launch template + - Checks will be performed to ensure that the instance type supports EFA and that the specified AZ is supported by the chosen instance type + + The GPU optimized AMIs include all necessary drivers and libraries to support EFA. If you're choosing an instance type without GPU acceleration you will need to install the drivers and libraries manually and bake a custom AMI. + + You can use the [aws-efa-k8s-device-plugin](https://github.com/aws/eks-charts/tree/master/stable/aws-efa-k8s-device-plugin) Helm chart to expose the EFA interfaces on the nodes as an extended resource, and allow pods to request these interfaces to be mounted to their containers. + Your application container will need to have the necessary libraries and runtimes in order to leverage the EFA interfaces (e.g. libfabric). + + ```python + import pulumi + import json + import pulumi_aws as aws + import pulumi_awsx as awsx + import pulumi_eks as eks + import pulumi_kubernetes as kubernetes + + eks_vpc = awsx.ec2.Vpc("eks-vpc", + enable_dns_hostnames=True, + cidr_block="10.0.0.0/16") + eks_cluster = eks.Cluster("eks-cluster", + vpc_id=eks_vpc.vpc_id, + authentication_mode=eks.AuthenticationMode.API, + public_subnet_ids=eks_vpc.public_subnet_ids, + private_subnet_ids=eks_vpc.private_subnet_ids, + skip_default_node_group=True) + k8_s_provider = kubernetes.Provider("k8sProvider", kubeconfig=eks_cluster.kubeconfig) + node_role = aws.iam.Role("node-role", assume_role_policy=json.dumps({ + "Version": "2012-10-17", + "Statement": [{ + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Sid": "", + "Principal": { + "Service": "ec2.amazonaws.com", + }, + }], + })) + worker_node_policy = aws.iam.RolePolicyAttachment("worker-node-policy", + role=node_role.name, + policy_arn="arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy") + cni_policy = aws.iam.RolePolicyAttachment("cni-policy", + role=node_role.name, + policy_arn="arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy") + registry_policy = aws.iam.RolePolicyAttachment("registry-policy", + role=node_role.name, + policy_arn="arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly") + + # The node group for running system pods (e.g. coredns, etc.) + system_node_group = eks.ManagedNodeGroup("system-node-group", + cluster=eks_cluster, + node_role=node_role) + + # The EFA device plugin for exposing EFA interfaces as extended resources + device_plugin = kubernetes.helm.v3.Release("device-plugin", + version="0.5.7", + repository_opts={ + "repo": "https://aws.github.io/eks-charts", + }, + chart="aws-efa-k8s-device-plugin", + namespace="kube-system", + atomic=True, + values={ + "tolerations": [{ + "key": "efa-enabled", + "operator": "Exists", + "effect": "NoExecute", + }], + }, + opts = pulumi.ResourceOptions(provider=k8_s_provider)) + + # The node group for running EFA enabled workloads + efa_node_group = eks.ManagedNodeGroup("efa-node-group", + cluster=eks_cluster, + node_role=node_role, + instance_types=["g6.8xlarge"], + gpu=True, + scaling_config={ + "min_size": 2, + "desired_size": 2, + "max_size": 4, + }, + enable_efa_support=True, + placement_group_availability_zone="us-west-2b", + + # Taint the nodes so that only pods with the efa-enabled label can be scheduled on them + taints=[{ + "key": "efa-enabled", + "value": "true", + "effect": "NO_EXECUTE", + }], + + # Instances with GPUs usually have nvme instance store volumes, so we can mount them in RAID-0 for kubelet and containerd + # These are faster than the regular EBS volumes + nodeadm_extra_options=[{ + "content_type": "application/node.eks.aws", + "content": \"\"\"apiVersion: node.eks.aws/v1alpha1 + kind: NodeConfig + spec: + instance: + localStorage: + strategy: RAID0 + \"\"\", + }]) + + ``` :param str resource_name: The name of the resource. :param ManagedNodeGroupArgs args: The arguments to use to populate this resource's properties.