[go: up one dir, main page]

Create AWS EKS Node Groups

The aws:eks/nodeGroup:NodeGroup resource, part of the Pulumi AWS provider, provisions and manages an EKS-compatible Auto Scaling Group of Kubernetes worker nodes. This guide focuses on three capabilities: IAM role configuration, subnet placement, and scaling and update behavior.

Node groups depend on an existing EKS cluster, an IAM role with specific AWS-managed policies, and VPC subnets for node placement. The examples are intentionally small. Combine them with your own cluster, VPC, and operational policies.

Create an IAM role with required policies

Node groups need an IAM role that grants worker nodes permissions to join the cluster, manage network interfaces, and pull container images.

import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const example = new aws.iam.Role("example", {
    name: "eks-node-group-example",
    assumeRolePolicy: JSON.stringify({
        Statement: [{
            Action: "sts:AssumeRole",
            Effect: "Allow",
            Principal: {
                Service: "ec2.amazonaws.com",
            },
        }],
        Version: "2012-10-17",
    }),
});
const example_AmazonEKSWorkerNodePolicy = new aws.iam.RolePolicyAttachment("example-AmazonEKSWorkerNodePolicy", {
    policyArn: "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy",
    role: example.name,
});
const example_AmazonEKSCNIPolicy = new aws.iam.RolePolicyAttachment("example-AmazonEKS_CNI_Policy", {
    policyArn: "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy",
    role: example.name,
});
const example_AmazonEC2ContainerRegistryReadOnly = new aws.iam.RolePolicyAttachment("example-AmazonEC2ContainerRegistryReadOnly", {
    policyArn: "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly",
    role: example.name,
});
import pulumi
import json
import pulumi_aws as aws

example = aws.iam.Role("example",
    name="eks-node-group-example",
    assume_role_policy=json.dumps({
        "Statement": [{
            "Action": "sts:AssumeRole",
            "Effect": "Allow",
            "Principal": {
                "Service": "ec2.amazonaws.com",
            },
        }],
        "Version": "2012-10-17",
    }))
example__amazon_eks_worker_node_policy = aws.iam.RolePolicyAttachment("example-AmazonEKSWorkerNodePolicy",
    policy_arn="arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy",
    role=example.name)
example__amazon_ekscni_policy = aws.iam.RolePolicyAttachment("example-AmazonEKS_CNI_Policy",
    policy_arn="arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy",
    role=example.name)
example__amazon_ec2_container_registry_read_only = aws.iam.RolePolicyAttachment("example-AmazonEC2ContainerRegistryReadOnly",
    policy_arn="arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly",
    role=example.name)
package main

import (
	"encoding/json"

	"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/iam"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		tmpJSON0, err := json.Marshal(map[string]interface{}{
			"Statement": []map[string]interface{}{
				map[string]interface{}{
					"Action": "sts:AssumeRole",
					"Effect": "Allow",
					"Principal": map[string]interface{}{
						"Service": "ec2.amazonaws.com",
					},
				},
			},
			"Version": "2012-10-17",
		})
		if err != nil {
			return err
		}
		json0 := string(tmpJSON0)
		example, err := iam.NewRole(ctx, "example", &iam.RoleArgs{
			Name:             pulumi.String("eks-node-group-example"),
			AssumeRolePolicy: pulumi.String(json0),
		})
		if err != nil {
			return err
		}
		_, err = iam.NewRolePolicyAttachment(ctx, "example-AmazonEKSWorkerNodePolicy", &iam.RolePolicyAttachmentArgs{
			PolicyArn: pulumi.String("arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"),
			Role:      example.Name,
		})
		if err != nil {
			return err
		}
		_, err = iam.NewRolePolicyAttachment(ctx, "example-AmazonEKS_CNI_Policy", &iam.RolePolicyAttachmentArgs{
			PolicyArn: pulumi.String("arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"),
			Role:      example.Name,
		})
		if err != nil {
			return err
		}
		_, err = iam.NewRolePolicyAttachment(ctx, "example-AmazonEC2ContainerRegistryReadOnly", &iam.RolePolicyAttachmentArgs{
			PolicyArn: pulumi.String("arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"),
			Role:      example.Name,
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using System.Text.Json;
using Pulumi;
using Aws = Pulumi.Aws;

return await Deployment.RunAsync(() => 
{
    var example = new Aws.Iam.Role("example", new()
    {
        Name = "eks-node-group-example",
        AssumeRolePolicy = JsonSerializer.Serialize(new Dictionary<string, object?>
        {
            ["Statement"] = new[]
            {
                new Dictionary<string, object?>
                {
                    ["Action"] = "sts:AssumeRole",
                    ["Effect"] = "Allow",
                    ["Principal"] = new Dictionary<string, object?>
                    {
                        ["Service"] = "ec2.amazonaws.com",
                    },
                },
            },
            ["Version"] = "2012-10-17",
        }),
    });

    var example_AmazonEKSWorkerNodePolicy = new Aws.Iam.RolePolicyAttachment("example-AmazonEKSWorkerNodePolicy", new()
    {
        PolicyArn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy",
        Role = example.Name,
    });

    var example_AmazonEKSCNIPolicy = new Aws.Iam.RolePolicyAttachment("example-AmazonEKS_CNI_Policy", new()
    {
        PolicyArn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy",
        Role = example.Name,
    });

    var example_AmazonEC2ContainerRegistryReadOnly = new Aws.Iam.RolePolicyAttachment("example-AmazonEC2ContainerRegistryReadOnly", new()
    {
        PolicyArn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly",
        Role = example.Name,
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.iam.Role;
import com.pulumi.aws.iam.RoleArgs;
import com.pulumi.aws.iam.RolePolicyAttachment;
import com.pulumi.aws.iam.RolePolicyAttachmentArgs;
import static com.pulumi.codegen.internal.Serialization.*;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var example = new Role("example", RoleArgs.builder()
            .name("eks-node-group-example")
            .assumeRolePolicy(serializeJson(
                jsonObject(
                    jsonProperty("Statement", jsonArray(jsonObject(
                        jsonProperty("Action", "sts:AssumeRole"),
                        jsonProperty("Effect", "Allow"),
                        jsonProperty("Principal", jsonObject(
                            jsonProperty("Service", "ec2.amazonaws.com")
                        ))
                    ))),
                    jsonProperty("Version", "2012-10-17")
                )))
            .build());

        var example_AmazonEKSWorkerNodePolicy = new RolePolicyAttachment("example-AmazonEKSWorkerNodePolicy", RolePolicyAttachmentArgs.builder()
            .policyArn("arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy")
            .role(example.name())
            .build());

        var example_AmazonEKSCNIPolicy = new RolePolicyAttachment("example-AmazonEKSCNIPolicy", RolePolicyAttachmentArgs.builder()
            .policyArn("arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy")
            .role(example.name())
            .build());

        var example_AmazonEC2ContainerRegistryReadOnly = new RolePolicyAttachment("example-AmazonEC2ContainerRegistryReadOnly", RolePolicyAttachmentArgs.builder()
            .policyArn("arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly")
            .role(example.name())
            .build());

    }
}
resources:
  example:
    type: aws:iam:Role
    properties:
      name: eks-node-group-example
      assumeRolePolicy:
        fn::toJSON:
          Statement:
            - Action: sts:AssumeRole
              Effect: Allow
              Principal:
                Service: ec2.amazonaws.com
          Version: 2012-10-17
  example-AmazonEKSWorkerNodePolicy:
    type: aws:iam:RolePolicyAttachment
    properties:
      policyArn: arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy
      role: ${example.name}
  example-AmazonEKSCNIPolicy:
    type: aws:iam:RolePolicyAttachment
    name: example-AmazonEKS_CNI_Policy
    properties:
      policyArn: arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy
      role: ${example.name}
  example-AmazonEC2ContainerRegistryReadOnly:
    type: aws:iam:RolePolicyAttachment
    properties:
      policyArn: arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly
      role: ${example.name}

The assumeRolePolicy allows EC2 instances to assume this role. The three RolePolicyAttachment resources attach AWS-managed policies: AmazonEKSWorkerNodePolicy for cluster operations, AmazonEKS_CNI_Policy for VPC networking, and AmazonEC2ContainerRegistryReadOnly for pulling images from ECR.

Provision subnets across availability zones

Node groups launch worker nodes into VPC subnets. Distributing nodes across multiple availability zones provides fault tolerance.

import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
import * as std from "@pulumi/std";

const available = aws.getAvailabilityZones({
    state: "available",
});
const example: aws.ec2.Subnet[] = [];
for (const range = {value: 0}; range.value < 2; range.value++) {
    example.push(new aws.ec2.Subnet(`example-${range.value}`, {
        availabilityZone: available.then(available => available.names[range.value]),
        cidrBlock: std.cidrsubnet({
            input: exampleAwsVpc.cidrBlock,
            newbits: 8,
            netnum: range.value,
        }).then(invoke => invoke.result),
        vpcId: exampleAwsVpc.id,
    }));
}
import pulumi
import pulumi_aws as aws
import pulumi_std as std

available = aws.get_availability_zones(state="available")
example = []
for range in [{"value": i} for i in range(0, 2)]:
    example.append(aws.ec2.Subnet(f"example-{range['value']}",
        availability_zone=available.names[range["value"]],
        cidr_block=std.cidrsubnet(input=example_aws_vpc["cidrBlock"],
            newbits=8,
            netnum=range["value"]).result,
        vpc_id=example_aws_vpc["id"]))
package main

import (
	"fmt"

	"github.com/pulumi/pulumi-aws/sdk/v7/go/aws"
	"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/ec2"
	"github.com/pulumi/pulumi-std/sdk/go/std"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		available, err := aws.GetAvailabilityZones(ctx, &aws.GetAvailabilityZonesArgs{
			State: pulumi.StringRef("available"),
		}, nil)
		if err != nil {
			return err
		}
		invokeCidrsubnet, err := std.Cidrsubnet(ctx, &std.CidrsubnetArgs{
			Input:   exampleAwsVpc.CidrBlock,
			Newbits: 8,
			Netnum:  val0,
		}, nil)
		if err != nil {
			return err
		}
		var example []*ec2.Subnet
		for index := 0; index < 2; index++ {
			key0 := index
			val0 := index
			__res, err := ec2.NewSubnet(ctx, fmt.Sprintf("example-%v", key0), &ec2.SubnetArgs{
				AvailabilityZone: pulumi.String(available.Names[val0]),
				CidrBlock:        pulumi.String(invokeCidrsubnet.Result),
				VpcId:            pulumi.Any(exampleAwsVpc.Id),
			})
			if err != nil {
				return err
			}
			example = append(example, __res)
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
using Std = Pulumi.Std;

return await Deployment.RunAsync(() => 
{
    var available = Aws.GetAvailabilityZones.Invoke(new()
    {
        State = "available",
    });

    var example = new List<Aws.Ec2.Subnet>();
    for (var rangeIndex = 0; rangeIndex < 2; rangeIndex++)
    {
        var range = new { Value = rangeIndex };
        example.Add(new Aws.Ec2.Subnet($"example-{range.Value}", new()
        {
            AvailabilityZone = available.Apply(getAvailabilityZonesResult => getAvailabilityZonesResult.Names)[range.Value],
            CidrBlock = Std.Cidrsubnet.Invoke(new()
            {
                Input = exampleAwsVpc.CidrBlock,
                Newbits = 8,
                Netnum = range.Value,
            }).Apply(invoke => invoke.Result),
            VpcId = exampleAwsVpc.Id,
        }));
    }
});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.AwsFunctions;
import com.pulumi.aws.inputs.GetAvailabilityZonesArgs;
import com.pulumi.aws.ec2.Subnet;
import com.pulumi.aws.ec2.SubnetArgs;
import com.pulumi.std.StdFunctions;
import com.pulumi.std.inputs.CidrsubnetArgs;
import com.pulumi.codegen.internal.KeyedValue;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        final var available = AwsFunctions.getAvailabilityZones(GetAvailabilityZonesArgs.builder()
            .state("available")
            .build());

        for (var i = 0; i < 2; i++) {
            new Subnet("example-" + i, SubnetArgs.builder()
                .availabilityZone(available.names()[range.value()])
                .cidrBlock(StdFunctions.cidrsubnet(CidrsubnetArgs.builder()
                    .input(exampleAwsVpc.cidrBlock())
                    .newbits(8)
                    .netnum(range.value())
                    .build()).result())
                .vpcId(exampleAwsVpc.id())
                .build());

        
}
    }
}

The loop creates two subnets in different availability zones, each with a CIDR block calculated from the VPC’s address space. The node group’s subnetIds property references these subnets to control where worker nodes launch.

Configure node group capacity and updates

After provisioning IAM roles and subnets, you define the node group with scaling parameters and update behavior.

import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const example = new aws.eks.NodeGroup("example", {
    clusterName: exampleAwsEksCluster.name,
    nodeGroupName: "example",
    nodeRoleArn: exampleAwsIamRole.arn,
    subnetIds: exampleAwsSubnet.map(__item => __item.id),
    scalingConfig: {
        desiredSize: 1,
        maxSize: 2,
        minSize: 1,
    },
    updateConfig: {
        maxUnavailable: 1,
    },
}, {
    dependsOn: [
        example_AmazonEKSWorkerNodePolicy,
        example_AmazonEKSCNIPolicy,
        example_AmazonEC2ContainerRegistryReadOnly,
    ],
});
import pulumi
import pulumi_aws as aws

example = aws.eks.NodeGroup("example",
    cluster_name=example_aws_eks_cluster["name"],
    node_group_name="example",
    node_role_arn=example_aws_iam_role["arn"],
    subnet_ids=[__item["id"] for __item in example_aws_subnet],
    scaling_config={
        "desired_size": 1,
        "max_size": 2,
        "min_size": 1,
    },
    update_config={
        "max_unavailable": 1,
    },
    opts = pulumi.ResourceOptions(depends_on=[
            example__amazon_eks_worker_node_policy,
            example__amazon_ekscni_policy,
            example__amazon_ec2_container_registry_read_only,
        ]))
package main

import (
	"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/eks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
var splat0 []interface{}
for _, val0 := range exampleAwsSubnet {
splat0 = append(splat0, val0.Id)
}
_, err := eks.NewNodeGroup(ctx, "example", &eks.NodeGroupArgs{
ClusterName: pulumi.Any(exampleAwsEksCluster.Name),
NodeGroupName: pulumi.String("example"),
NodeRoleArn: pulumi.Any(exampleAwsIamRole.Arn),
SubnetIds: toPulumiArray(splat0),
ScalingConfig: &eks.NodeGroupScalingConfigArgs{
DesiredSize: pulumi.Int(1),
MaxSize: pulumi.Int(2),
MinSize: pulumi.Int(1),
},
UpdateConfig: &eks.NodeGroupUpdateConfigArgs{
MaxUnavailable: pulumi.Int(1),
},
}, pulumi.DependsOn([]pulumi.Resource{
example_AmazonEKSWorkerNodePolicy,
example_AmazonEKSCNIPolicy,
example_AmazonEC2ContainerRegistryReadOnly,
}))
if err != nil {
return err
}
return nil
})
}
func toPulumiArray(arr []) pulumi.Array {
var pulumiArr pulumi.Array
for _, v := range arr {
pulumiArr = append(pulumiArr, pulumi.(v))
}
return pulumiArr
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;

return await Deployment.RunAsync(() => 
{
    var example = new Aws.Eks.NodeGroup("example", new()
    {
        ClusterName = exampleAwsEksCluster.Name,
        NodeGroupName = "example",
        NodeRoleArn = exampleAwsIamRole.Arn,
        SubnetIds = exampleAwsSubnet.Select(__item => __item.Id).ToList(),
        ScalingConfig = new Aws.Eks.Inputs.NodeGroupScalingConfigArgs
        {
            DesiredSize = 1,
            MaxSize = 2,
            MinSize = 1,
        },
        UpdateConfig = new Aws.Eks.Inputs.NodeGroupUpdateConfigArgs
        {
            MaxUnavailable = 1,
        },
    }, new CustomResourceOptions
    {
        DependsOn =
        {
            example_AmazonEKSWorkerNodePolicy,
            example_AmazonEKSCNIPolicy,
            example_AmazonEC2ContainerRegistryReadOnly,
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.eks.NodeGroup;
import com.pulumi.aws.eks.NodeGroupArgs;
import com.pulumi.aws.eks.inputs.NodeGroupScalingConfigArgs;
import com.pulumi.aws.eks.inputs.NodeGroupUpdateConfigArgs;
import com.pulumi.resources.CustomResourceOptions;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var example = new NodeGroup("example", NodeGroupArgs.builder()
            .clusterName(exampleAwsEksCluster.name())
            .nodeGroupName("example")
            .nodeRoleArn(exampleAwsIamRole.arn())
            .subnetIds(exampleAwsSubnet.stream().map(element -> element.id()).collect(toList()))
            .scalingConfig(NodeGroupScalingConfigArgs.builder()
                .desiredSize(1)
                .maxSize(2)
                .minSize(1)
                .build())
            .updateConfig(NodeGroupUpdateConfigArgs.builder()
                .maxUnavailable(1)
                .build())
            .build(), CustomResourceOptions.builder()
                .dependsOn(                
                    example_AmazonEKSWorkerNodePolicy,
                    example_AmazonEKSCNIPolicy,
                    example_AmazonEC2ContainerRegistryReadOnly)
                .build());

    }
}

The scalingConfig block sets minimum, maximum, and desired node counts. The updateConfig block controls rolling update behavior; maxUnavailable limits how many nodes can be replaced simultaneously. The dependsOn ensures IAM policies are attached before node group creation, preventing permission errors during launch.

Allow external autoscaling to manage capacity

Kubernetes autoscalers adjust node counts based on pod scheduling needs. Pulumi’s ignoreChanges prevents drift detection from conflicting with external changes.

import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const example = new aws.eks.NodeGroup("example", {scalingConfig: {
    desiredSize: 2,
}});
import pulumi
import pulumi_aws as aws

example = aws.eks.NodeGroup("example", scaling_config={
    "desired_size": 2,
})
package main

import (
	"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/eks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := eks.NewNodeGroup(ctx, "example", &eks.NodeGroupArgs{
			ScalingConfig: &eks.NodeGroupScalingConfigArgs{
				DesiredSize: pulumi.Int(2),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;

return await Deployment.RunAsync(() => 
{
    var example = new Aws.Eks.NodeGroup("example", new()
    {
        ScalingConfig = new Aws.Eks.Inputs.NodeGroupScalingConfigArgs
        {
            DesiredSize = 2,
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.eks.NodeGroup;
import com.pulumi.aws.eks.NodeGroupArgs;
import com.pulumi.aws.eks.inputs.NodeGroupScalingConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var example = new NodeGroup("example", NodeGroupArgs.builder()
            .scalingConfig(NodeGroupScalingConfigArgs.builder()
                .desiredSize(2)
                .build())
            .build());

    }
}
resources:
  example:
    type: aws:eks:NodeGroup
    properties:
      scalingConfig:
        desiredSize: 2

Setting ignoreChanges on desiredSize allows you to set an initial capacity while letting external autoscalers modify the count without Pulumi reverting it. The minSize and maxSize properties still constrain the autoscaler’s range.

Beyond these examples

These snippets focus on specific node group features: IAM role configuration with AWS-managed policies, subnet provisioning across availability zones, and scaling and update configuration. They’re intentionally minimal rather than full cluster deployments.

The examples may reference pre-existing infrastructure such as an EKS cluster and a VPC for subnet creation. They focus on configuring the node group rather than provisioning everything around it.

To keep things focused, common node group patterns are omitted, including:

  • Instance types and AMI selection (instanceTypes, amiType)
  • Spot instances (capacityType)
  • Launch templates for custom AMIs or user data
  • Kubernetes labels and taints
  • Remote access configuration (SSH keys, security groups)
  • Node repair and auto-recovery settings

These omissions are intentional: the goal is to illustrate how each node group feature is wired, not provide drop-in cluster modules. See the EKS NodeGroup resource reference for all available configuration options.

Let's create AWS EKS Node Groups

Get started with Pulumi Cloud, then follow our quick setup guide to deploy this infrastructure.

Try Pulumi Cloud for FREE

Frequently Asked Questions

IAM & Permissions
What IAM policies does my node group role need?
The role specified in nodeRoleArn must have three AWS managed policies attached: AmazonEKSWorkerNodePolicy, AmazonEKS_CNI_Policy, and AmazonEC2ContainerRegistryReadOnly.
Why do I need to use dependsOn with IAM policy attachments?
The node group requires IAM policies to be attached before creation, but there’s no implicit dependency. Use dependsOn to ensure policy attachments complete first, as shown in the example.
Configuration & Immutability
What properties can't I change after creating a node group?
These properties are immutable and require replacement: amiType, capacityType, clusterName, diskSize, instanceTypes, nodeGroupName, nodeRoleArn, subnetIds, and remoteAccess.
What are the naming rules for node groups?
The nodeGroupName can’t exceed 63 characters and must start with a letter or digit (hyphens and underscores allowed for remaining characters). It conflicts with nodeGroupNamePrefix.
What are the default instance types and disk sizes?
Instance types default to ["t3.medium"]. Disk size defaults to 50 GiB for Windows nodes and 20 GiB for all other node groups.
Scaling & Updates
What's the difference between minSize, maxSize, and desiredSize in scalingConfig?
minSize and maxSize define the Auto Scaling Group’s capacity limits, while desiredSize sets the initial number of running instances. All three are required in scalingConfig.
How do I prevent Pulumi from reverting autoscaler changes to desiredSize?
Use ignoreChanges on scalingConfig.desiredSize to allow external autoscalers (like Cluster Autoscaler) to manage the instance count without Pulumi reverting it.
What does updateConfig control?
updateConfig is required and controls update behavior, including maxUnavailable (the maximum number of nodes unavailable during updates).
Advanced Configuration
Can I use both launchTemplate and remoteAccess?
No, launchTemplate and remoteAccess conflict. Use launchTemplate for advanced customization or remoteAccess for simple SSH access configuration.
How many taints can I apply to a node group?
You can apply a maximum of 50 taints per node group.
Why aren't all my Kubernetes labels being managed by Pulumi?
Only labels applied through the EKS API are managed by the labels argument. Other Kubernetes labels applied directly to the node group won’t be tracked by Pulumi.

Using a different cloud?

Explore containers guides for other cloud providers: